| //------------------------------------------------------------------------------------------------------- |
| // Copyright (C) Microsoft Corporation and contributors. All rights reserved. |
| // Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. |
| //------------------------------------------------------------------------------------------------------- |
| #include "Backend.h" |
| #ifdef ENABLE_SCRIPT_DEBUGGING |
| #include "Debug/DebuggingFlags.h" |
| #include "Debug/DiagProbe.h" |
| #include "Debug/DebugManager.h" |
| #endif |
| |
| // Parser includes |
| #include "RegexCommon.h" |
| #include "RegexPattern.h" |
| |
| #include "ExternalLowerer.h" |
| |
| #include "Types/DynamicObjectPropertyEnumerator.h" |
| #include "Types/JavascriptStaticEnumerator.h" |
| #include "Library/ForInObjectEnumerator.h" |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::Lower |
| /// |
| /// Lowerer's main entrypoint. Lowers this function.. |
| /// |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::Lower() |
| { |
| |
| this->m_func->StopMaintainByteCodeOffset(); |
| |
| NoRecoverMemoryJitArenaAllocator localAlloc(_u("BE-Lower"), this->m_func->m_alloc->GetPageAllocator(), Js::Throw::OutOfMemory); |
| this->m_alloc = &localAlloc; |
| BVSparse<JitArenaAllocator> localInitializedTempSym(&localAlloc); |
| this->initializedTempSym = &localInitializedTempSym; |
| BVSparse<JitArenaAllocator> localAddToLiveOnBackEdgeSyms(&localAlloc); |
| this->addToLiveOnBackEdgeSyms = &localAddToLiveOnBackEdgeSyms; |
| Assert(this->m_func->GetCloneMap() == nullptr); |
| |
| m_lowererMD.Init(this); |
| |
| bool defaultDoFastPath = this->m_func->DoFastPaths(); |
| bool loopFastPath = this->m_func->DoLoopFastPaths(); |
| |
| if (m_func->HasAnyStackNestedFunc()) |
| { |
| EnsureStackFunctionListStackSym(); |
| } |
| if (m_func->DoStackFrameDisplay() && !m_func->IsLoopBody()) |
| { |
| AllocStackClosure(); |
| } |
| |
| AllocStackForInObjectEnumeratorArray(); |
| |
| if (m_func->IsJitInDebugMode()) |
| { |
| // Initialize metadata of local var slots. |
| // Too late to wait until Register Allocator, as we need the offset when lowerering bailout for debugger. |
| int32 hasLocalVarChangedOffset = m_func->GetHasLocalVarChangedOffset(); |
| if (hasLocalVarChangedOffset != Js::Constants::InvalidOffset) |
| { |
| // MOV [EBP + m_func->GetHasLocalVarChangedOffset()], 0 |
| StackSym* sym = StackSym::New(TyInt8, m_func); |
| sym->m_offset = hasLocalVarChangedOffset; |
| sym->m_allocated = true; |
| IR::Opnd* opnd1 = IR::SymOpnd::New(sym, TyInt8, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(0, TyInt8, m_func); |
| Lowerer::InsertMove(opnd1, opnd2, m_func->GetFunctionEntryInsertionPoint()); |
| |
| #ifdef DBG |
| // Pre-fill all local slots with a pattern. This will help identify non-initialized/garbage var values. |
| // Note that in the beginning of the function in bytecode we should initialize all locals to undefined. |
| uint32 localSlotCount = m_func->GetJITFunctionBody()->GetEndNonTempLocalIndex() - m_func->GetJITFunctionBody()->GetFirstNonTempLocalIndex(); |
| for (uint i = 0; i < localSlotCount; ++i) |
| { |
| int offset = m_func->GetLocalVarSlotOffset(i); |
| |
| IRType opnd1Type; |
| |
| #if defined(TARGET_32) |
| opnd1Type = TyInt32; |
| opnd2 = IR::IntConstOpnd::New(Func::c_debugFillPattern4, opnd1Type, m_func); |
| #else |
| opnd1Type = TyInt64; |
| opnd2 = IR::IntConstOpnd::New(Func::c_debugFillPattern8, opnd1Type, m_func); |
| #endif |
| |
| sym = StackSym::New(opnd1Type, m_func); |
| sym->m_offset = offset; |
| sym->m_allocated = true; |
| opnd1 = IR::SymOpnd::New(sym, opnd1Type, m_func); |
| Lowerer::InsertMove(opnd1, opnd2, m_func->GetFunctionEntryInsertionPoint()); |
| } |
| #endif |
| } |
| |
| Assert(!m_func->HasAnyStackNestedFunc()); |
| } |
| |
| this->LowerRange(m_func->m_headInstr, m_func->m_tailInstr, defaultDoFastPath, loopFastPath); |
| |
| #if DBG && GLOBAL_ENABLE_WRITE_BARRIER |
| // TODO: (leish)(swb) implement for arm |
| #if defined(_M_IX86) || defined(_M_AMD64) |
| if (CONFIG_FLAG(ForceSoftwareWriteBarrier) && CONFIG_FLAG(VerifyBarrierBit)) |
| { |
| // find out all write barrier setting instr, call Recycler::WBSetBit for verification purpose |
| // should do this in LowererMD::GenerateWriteBarrier, however, can't insert call instruction there |
| FOREACH_INSTR_EDITING(instr, instrNext, m_func->m_headInstr) |
| if (instr->m_src1 && instr->m_src1->IsAddrOpnd()) |
| { |
| IR::AddrOpnd* addrOpnd = instr->m_src1->AsAddrOpnd(); |
| if (addrOpnd->GetAddrOpndKind() == IR::AddrOpndKindWriteBarrierCardTable) |
| { |
| auto& leaInstr = instr->m_prev->m_prev->m_prev; |
| auto& movInstr = instr->m_prev->m_prev; |
| auto& shrInstr = instr->m_prev; |
| Assert(leaInstr->m_opcode == Js::OpCode::LEA); |
| Assert(movInstr->m_opcode == Js::OpCode::MOV); |
| Assert(shrInstr->m_opcode == Js::OpCode::SHR); |
| m_lowererMD.LoadHelperArgument(movInstr, leaInstr->m_dst); |
| IR::Instr* instrCall = IR::Instr::New(Js::OpCode::Call, m_func); |
| movInstr->InsertBefore(instrCall); |
| m_lowererMD.ChangeToHelperCall(instrCall, IR::HelperWriteBarrierSetVerifyBit); |
| } |
| } |
| NEXT_INSTR_EDITING |
| } |
| #endif |
| #endif |
| |
| this->m_func->ClearCloneMap(); |
| |
| if (m_func->HasAnyStackNestedFunc()) |
| { |
| EnsureZeroLastStackFunctionNext(); |
| } |
| |
| if (!m_func->IsSimpleJit()) |
| { |
| #if 0 // TODO michhol oop jit, reenable assert |
| Js::EntryPointInfo* entryPointInfo = this->m_func->m_workItem->GetEntryPoint(); |
| Assert(entryPointInfo->GetJitTransferData() != nullptr && !entryPointInfo->GetJitTransferData()->GetIsReady()); |
| #endif |
| } |
| |
| this->initializedTempSym = nullptr; |
| this->m_alloc = nullptr; |
| |
| this->m_func->DisableConstandAddressLoadHoist(); |
| } |
| |
| void |
| Lowerer::LowerRange(IR::Instr *instrStart, IR::Instr *instrEnd, bool defaultDoFastPath, bool defaultDoLoopFastPath) |
| { |
| bool noMathFastPath; |
| bool noFieldFastPath; |
| bool isStrictMode = this->m_func->GetJITFunctionBody()->IsStrictMode(); |
| noFieldFastPath = !defaultDoFastPath; |
| noMathFastPath = !defaultDoFastPath; |
| |
| #if DBG_DUMP |
| char16 * globOptInstrString = nullptr; |
| #endif |
| |
| FOREACH_INSTR_BACKWARD_EDITING_IN_RANGE(instr, instrPrev, instrEnd, instrStart) |
| { |
| // Try to peep this` |
| instr = this->PreLowerPeepInstr(instr, &instrPrev); |
| |
| #if DBG |
| IR::Instr * verifyLegalizeInstrNext = instr->m_next; |
| m_currentInstrOpCode = instr->m_opcode; |
| #endif |
| |
| // If we have debugger bailout as part of real instr (not separate BailForDebugger instr), |
| // extract/split out BailOutForDebugger into separate instr, if needed. |
| // The instr can have just debugger bailout, or debugger bailout + other shared bailout. |
| // Note that by the time we get here, we should not have aux-only bailout (in globopt we promote it to normal bailout). |
| if (m_func->IsJitInDebugMode() && instr->HasBailOutInfo() && |
| (((instr->GetBailOutKind() & IR::BailOutForDebuggerBits) && instr->m_opcode != Js::OpCode::BailForDebugger) || |
| instr->HasAuxBailOut())) |
| { |
| instr = this->SplitBailForDebugger(instr); // Change instr, as returned is the one we need to lower next. |
| instrPrev = instr->m_prev; // Change just in case if instr got changed. |
| } |
| |
| #if DBG_DUMP |
| if (!instr->IsLowered() && !instr->IsLabelInstr() |
| && (CONFIG_FLAG(ForcePostLowerGlobOptInstrString) || |
| PHASE_DUMP(Js::LowererPhase, m_func) || |
| PHASE_DUMP(Js::LinearScanPhase, m_func) || |
| PHASE_DUMP(Js::RegAllocPhase, m_func) || |
| PHASE_DUMP(Js::PeepsPhase, m_func) || |
| PHASE_DUMP(Js::LayoutPhase, m_func) || |
| PHASE_DUMP(Js::EmitterPhase, m_func) || |
| PHASE_DUMP(Js::EncoderPhase, m_func) || |
| PHASE_DUMP(Js::BackEndPhase, m_func))) |
| { |
| if(instr->m_next && instr->m_next->m_opcode != Js::OpCode::StatementBoundary && !instr->m_next->IsLabelInstr()) |
| { |
| instr->m_next->globOptInstrString = globOptInstrString; |
| } |
| |
| globOptInstrString = instr->DumpString(); |
| } |
| #endif |
| |
| if (instr->IsBranchInstr() && !instr->AsBranchInstr()->IsMultiBranch() && instr->AsBranchInstr()->GetTarget()->m_isLoopTop) |
| { |
| Loop * loop = instr->AsBranchInstr()->GetTarget()->GetLoop(); |
| if (this->outerMostLoopLabel == nullptr && !loop->isProcessed) |
| { |
| while (loop && loop->GetLoopTopInstr()) // some loops are optimized away so that they are not loops anymore. |
| // They do, however, stay in the loop graph but don't have loop top labels assigned to them |
| { |
| this->outerMostLoopLabel = loop->GetLoopTopInstr(); |
| Assert(this->outerMostLoopLabel->m_isLoopTop); |
| // landing pad must fall through to the loop |
| Assert(this->outerMostLoopLabel->m_prev->HasFallThrough()); |
| loop = loop->parent; |
| } |
| this->initializedTempSym->ClearAll(); |
| } |
| |
| noFieldFastPath = !defaultDoLoopFastPath; |
| noMathFastPath = !defaultDoLoopFastPath; |
| } |
| |
| #ifdef INLINE_CACHE_STATS |
| if(PHASE_STATS1(Js::PolymorphicInlineCachePhase)) |
| { |
| // Always use the slow path, so we can track property accesses |
| noFieldFastPath = true; |
| } |
| #endif |
| |
| #if DBG |
| if (instr->HasBailOutInfo()) |
| { |
| IR::BailOutKind bailoutKind = instr->GetBailOutKind(); |
| if (BailOutInfo::IsBailOutOnImplicitCalls(bailoutKind)) |
| { |
| this->helperCallCheckState = (HelperCallCheckState)(this->helperCallCheckState | HelperCallCheckState_ImplicitCallsBailout); |
| } |
| |
| if ((bailoutKind & IR::BailOutOnArrayAccessHelperCall) != 0 && |
| instr->m_opcode != Js::OpCode::Memcopy && |
| instr->m_opcode != Js::OpCode::Memset) |
| { |
| this->helperCallCheckState = (HelperCallCheckState)(this->helperCallCheckState | HelperCallCheckState_NoHelperCalls); |
| } |
| } |
| #endif |
| |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::LdHandlerScope: |
| this->LowerUnaryHelperMem(instr, IR::HelperScrObj_LdHandlerScope); |
| break; |
| |
| case Js::OpCode::InitSetFld: |
| instrPrev = this->LowerStFld(instr, IR::HelperOP_InitSetter, IR::HelperOP_InitSetter, false); |
| break; |
| |
| case Js::OpCode::InitGetFld: |
| instrPrev = this->LowerStFld(instr, IR::HelperOP_InitGetter, IR::HelperOP_InitGetter, false); |
| break; |
| |
| case Js::OpCode::InitProto: |
| instrPrev = this->LowerStFld(instr, IR::HelperOP_InitProto, IR::HelperOP_InitProto, false); |
| break; |
| |
| case Js::OpCode::LdArgCnt: |
| this->LoadArgumentCount(instr); |
| break; |
| |
| case Js::OpCode::LdStackArgPtr: |
| this->LoadStackArgPtr(instr); |
| break; |
| |
| case Js::OpCode::LdHeapArguments: |
| case Js::OpCode::LdLetHeapArguments: |
| instrPrev = m_lowererMD.LoadHeapArguments(instr); |
| break; |
| |
| case Js::OpCode::LdHeapArgsCached: |
| case Js::OpCode::LdLetHeapArgsCached: |
| m_lowererMD.LoadHeapArgsCached(instr); |
| break; |
| |
| case Js::OpCode::InvalCachedScope: |
| this->LowerBinaryHelper(instr, IR::HelperOP_InvalidateCachedScope); |
| break; |
| |
| case Js::OpCode::InitCachedScope: |
| if (instr->m_func->GetJITFunctionBody()->GetDoScopeObjectCreation() || !instr->m_func->IsStackArgsEnabled()) |
| { |
| instrPrev = this->LowerInitCachedScope(instr); |
| } |
| else |
| { |
| instr->ReplaceSrc1(IR::AddrOpnd::NewNull(instr->m_func)); |
| instr->m_opcode = Js::OpCode::Ld_A; |
| instrPrev = instr; |
| |
| if (PHASE_TRACE1(Js::StackArgFormalsOptPhase)) |
| { |
| Output::Print(_u("StackArgFormals : %s (%d) :Removing Scope object creation in Lowerer and replacing it with MOV NULL. \n"), instr->m_func->GetJITFunctionBody()->GetDisplayName(), instr->m_func->GetFunctionNumber()); |
| Output::Flush(); |
| } |
| } |
| break; |
| case Js::OpCode::NewScopeObject: |
| { |
| Func * currFunc = instr->m_func; |
| if (currFunc->GetJITFunctionBody()->GetDoScopeObjectCreation() || !currFunc->IsStackArgsEnabled()) |
| { |
| //Call Helper that creates scope object and does type transition for the formals |
| if (currFunc->IsStackArgsEnabled() && currFunc->GetJITFunctionBody()->GetInParamsCount() != 1) |
| { |
| // s3 = formals are let decls |
| this->m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(currFunc->GetHasNonSimpleParams() ? TRUE : FALSE, TyUint8, currFunc)); |
| |
| // s2 = current function. |
| IR::Opnd * paramOpnd = LoadFunctionBodyOpnd(instr); |
| this->m_lowererMD.LoadHelperArgument(instr, paramOpnd); |
| |
| m_lowererMD.ChangeToHelperCallMem(instr, IR::HelperOP_NewScopeObjectWithFormals); |
| } |
| else |
| { |
| m_lowererMD.ChangeToHelperCallMem(instr, IR::HelperOP_NewScopeObject); |
| } |
| } |
| else |
| { |
| instr->SetSrc1(IR::AddrOpnd::NewNull(instr->m_func)); |
| instr->m_opcode = Js::OpCode::Ld_A; |
| instrPrev = instr; |
| |
| if (PHASE_TRACE1(Js::StackArgFormalsOptPhase)) |
| { |
| Output::Print(_u("StackArgFormals : %s (%d) :Removing Scope object creation in Lowerer and replacing it with MOV NULL. \n"), currFunc->GetJITFunctionBody()->GetDisplayName(), currFunc->GetFunctionNumber()); |
| Output::Flush(); |
| } |
| } |
| break; |
| } |
| case Js::OpCode::NewStackScopeSlots: |
| this->LowerNewScopeSlots(instr, m_func->DoStackScopeSlots()); |
| break; |
| |
| case Js::OpCode::NewScopeSlots: |
| this->LowerNewScopeSlots(instr, false); |
| break; |
| |
| case Js::OpCode::InitLocalClosure: |
| // Real initialization of the stack pointers happens on entry to the function, so this instruction |
| // (which exists to provide a def in the IR) can go away. |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::NewScopeSlotsWithoutPropIds: |
| this->LowerBinaryHelperMemWithFuncBody(instr, IR::HelperOP_NewScopeSlotsWithoutPropIds); |
| break; |
| |
| case Js::OpCode::NewBlockScope: |
| m_lowererMD.ChangeToHelperCallMem(instr, IR::HelperOP_NewBlockScope); |
| break; |
| |
| case Js::OpCode::NewPseudoScope: |
| m_lowererMD.ChangeToHelperCallMem(instr, IR::HelperOP_NewPseudoScope); |
| break; |
| |
| case Js::OpCode::CloneInnerScopeSlots: |
| this->LowerUnaryHelperMem(instr, IR::HelperOP_CloneInnerScopeSlots); |
| break; |
| |
| case Js::OpCode::CloneBlockScope: |
| this->LowerUnaryHelperMem(instr, IR::HelperOP_CloneBlockScope); |
| break; |
| |
| case Js::OpCode::GetCachedFunc: |
| this->LowerGetCachedFunc(instr); |
| break; |
| |
| case Js::OpCode::BrFncCachedScopeEq: |
| case Js::OpCode::BrFncCachedScopeNeq: |
| this->LowerBrFncCachedScopeEq(instr); |
| break; |
| |
| case Js::OpCode::CommitScope: |
| this->LowerCommitScope(instr); |
| break; |
| |
| case Js::OpCode::LdFldForTypeOf: |
| instrPrev = GenerateCompleteLdFld<false>(instr, !noFieldFastPath, IR::HelperOp_PatchGetValueForTypeOf, IR::HelperOp_PatchGetValuePolymorphicForTypeOf, |
| IR::HelperOp_PatchGetValueForTypeOf, IR::HelperOp_PatchGetValuePolymorphicForTypeOf); |
| break; |
| |
| case Js::OpCode::LdFld: |
| case Js::OpCode::LdFldForCallApplyTarget: |
| instrPrev = GenerateCompleteLdFld<false>(instr, !noFieldFastPath, IR::HelperOp_PatchGetValue, IR::HelperOp_PatchGetValuePolymorphic, |
| IR::HelperOp_PatchGetValue, IR::HelperOp_PatchGetValuePolymorphic); |
| break; |
| |
| case Js::OpCode::LdSuperFld: |
| instrPrev = GenerateCompleteLdFld<false>(instr, !noFieldFastPath, IR::HelperOp_PatchGetValueWithThisPtr, IR::HelperOp_PatchGetValuePolymorphicWithThisPtr, |
| IR::HelperOp_PatchGetValueWithThisPtr, IR::HelperOp_PatchGetValuePolymorphicWithThisPtr); |
| break; |
| |
| case Js::OpCode::LdRootFld: |
| instrPrev = GenerateCompleteLdFld<true>(instr, !noFieldFastPath, IR::HelperOp_PatchGetRootValue, IR::HelperOp_PatchGetRootValuePolymorphic, |
| IR::HelperOp_PatchGetRootValue, IR::HelperOp_PatchGetRootValuePolymorphic); |
| break; |
| |
| case Js::OpCode::LdRootFldForTypeOf: |
| instrPrev = GenerateCompleteLdFld<true>(instr, !noFieldFastPath, IR::HelperOp_PatchGetRootValueForTypeOf, IR::HelperOp_PatchGetRootValuePolymorphicForTypeOf, |
| IR::HelperOp_PatchGetRootValueForTypeOf, IR::HelperOp_PatchGetRootValuePolymorphicForTypeOf); |
| break; |
| |
| case Js::OpCode::LdMethodFldPolyInlineMiss: |
| instrPrev = LowerLdFld(instr, IR::HelperOp_PatchGetMethod, IR::HelperOp_PatchGetMethodPolymorphic, true, nullptr, true); |
| break; |
| |
| case Js::OpCode::LdMethodFld: |
| instrPrev = GenerateCompleteLdFld<false>(instr, !noFieldFastPath, IR::HelperOp_PatchGetMethod, IR::HelperOp_PatchGetMethodPolymorphic, |
| IR::HelperOp_PatchGetMethod, IR::HelperOp_PatchGetMethodPolymorphic); |
| break; |
| |
| case Js::OpCode::LdRootMethodFld: |
| instrPrev = GenerateCompleteLdFld<true>(instr, !noFieldFastPath, IR::HelperOp_PatchGetRootMethod, IR::HelperOp_PatchGetRootMethodPolymorphic, |
| IR::HelperOp_PatchGetRootMethod, IR::HelperOp_PatchGetRootMethodPolymorphic); |
| break; |
| |
| case Js::OpCode::ScopedLdMethodFld: |
| // "Scoped" in ScopedLdMethodFld is a bit of a misnomer because it doesn't look through a scope chain. |
| // Instead the op is to allow for either a LdRootMethodFld or LdMethodFld depending on whether the |
| // object is the root object or not. |
| instrPrev = GenerateCompleteLdFld<false>(instr, !noFieldFastPath, IR::HelperOp_ScopedGetMethod, IR::HelperOp_ScopedGetMethodPolymorphic, |
| IR::HelperOp_ScopedGetMethod, IR::HelperOp_ScopedGetMethodPolymorphic); |
| break; |
| |
| case Js::OpCode::LdMethodFromFlags: |
| { |
| Assert(instr->HasBailOutInfo()); |
| bool success = GenerateFastLdMethodFromFlags(instr); |
| AssertMsg(success, "Not expected to generate helper block here"); |
| break; |
| } |
| |
| case Js::OpCode::CheckFixedFld: |
| AssertMsg(!PHASE_OFF(Js::FixedMethodsPhase, instr->m_func) || !PHASE_OFF(Js::UseFixedDataPropsPhase, instr->m_func), "CheckFixedFld with fixed prop(Data|Method) phase disabled?"); |
| this->GenerateCheckFixedFld(instr); |
| break; |
| |
| case Js::OpCode::CheckPropertyGuardAndLoadType: |
| instrPrev = this->GeneratePropertyGuardCheckBailoutAndLoadType(instr); |
| break; |
| |
| case Js::OpCode::CheckObjType: |
| this->GenerateCheckObjType(instr); |
| break; |
| |
| case Js::OpCode::AdjustObjType: |
| case Js::OpCode::AdjustObjTypeReloadAuxSlotPtr: |
| this->LowerAdjustObjType(instr); |
| break; |
| |
| case Js::OpCode::DeleteFld: |
| instrPrev = this->LowerDelFld(instr, IR::HelperOp_DeleteProperty, false, false); |
| break; |
| |
| case Js::OpCode::DeleteRootFld: |
| instrPrev = this->LowerDelFld(instr, IR::HelperOp_DeleteRootProperty, false, false); |
| break; |
| |
| case Js::OpCode::DeleteFldStrict: |
| instrPrev = this->LowerDelFld(instr, IR::HelperOp_DeleteProperty, false, true); |
| break; |
| |
| case Js::OpCode::DeleteRootFldStrict: |
| instrPrev = this->LowerDelFld(instr, IR::HelperOp_DeleteRootProperty, false, true); |
| break; |
| |
| case Js::OpCode::ScopedLdFldForTypeOf: |
| if (!noFieldFastPath) |
| { |
| m_lowererMD.GenerateFastScopedLdFld(instr); |
| } |
| instrPrev = this->LowerScopedLdFld(instr, IR::HelperOp_PatchGetPropertyForTypeOfScoped, true); |
| break; |
| |
| case Js::OpCode::ScopedLdFld: |
| if (!noFieldFastPath) |
| { |
| m_lowererMD.GenerateFastScopedLdFld(instr); |
| } |
| instrPrev = this->LowerScopedLdFld(instr, IR::HelperOp_PatchGetPropertyScoped, true); |
| break; |
| |
| case Js::OpCode::ScopedLdInst: |
| instrPrev = this->LowerScopedLdInst(instr, IR::HelperOp_GetInstanceScoped); |
| break; |
| |
| case Js::OpCode::ScopedDeleteFld: |
| instrPrev = this->LowerScopedDelFld(instr, IR::HelperOp_DeletePropertyScoped, false, false); |
| break; |
| |
| case Js::OpCode::ScopedDeleteFldStrict: |
| instrPrev = this->LowerScopedDelFld(instr, IR::HelperOp_DeletePropertyScoped, false, true); |
| break; |
| |
| case Js::OpCode::NewScFunc: |
| instrPrev = this->LowerNewScFunc(instr); |
| break; |
| |
| case Js::OpCode::NewScFuncHomeObj: |
| instrPrev = this->LowerNewScFuncHomeObj(instr); |
| break; |
| |
| case Js::OpCode::NewScGenFunc: |
| instrPrev = this->LowerNewScGenFunc(instr); |
| break; |
| case Js::OpCode::NewScGenFuncHomeObj: |
| instrPrev = this->LowerNewScGenFuncHomeObj(instr); |
| break; |
| |
| case Js::OpCode::StFld: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchPutValueNoLocalFastPath, IR::HelperOp_PatchPutValueNoLocalFastPathPolymorphic, |
| IR::HelperOp_PatchPutValue, IR::HelperOp_PatchPutValuePolymorphic, true, Js::PropertyOperation_None); |
| break; |
| |
| case Js::OpCode::StSuperFld: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchPutValueWithThisPtrNoLocalFastPath, IR::HelperOp_PatchPutValueWithThisPtrNoLocalFastPathPolymorphic, |
| IR::HelperOp_PatchPutValueWithThisPtr, IR::HelperOp_PatchPutValueWithThisPtrPolymorphic, true, isStrictMode ? Js::PropertyOperation_StrictMode : Js::PropertyOperation_None); |
| break; |
| |
| case Js::OpCode::StRootFld: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchPutRootValueNoLocalFastPath, IR::HelperOp_PatchPutRootValueNoLocalFastPathPolymorphic, |
| IR::HelperOp_PatchPutRootValue, IR::HelperOp_PatchPutRootValuePolymorphic, true, Js::PropertyOperation_Root); |
| break; |
| |
| case Js::OpCode::StFldStrict: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchPutValueNoLocalFastPath, IR::HelperOp_PatchPutValueNoLocalFastPathPolymorphic, |
| IR::HelperOp_PatchPutValue, IR::HelperOp_PatchPutValuePolymorphic, true, Js::PropertyOperation_StrictMode); |
| break; |
| |
| case Js::OpCode::StRootFldStrict: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchPutRootValueNoLocalFastPath, IR::HelperOp_PatchPutRootValueNoLocalFastPathPolymorphic, |
| IR::HelperOp_PatchPutRootValue, IR::HelperOp_PatchPutRootValuePolymorphic, true, Js::PropertyOperation_StrictModeRoot); |
| break; |
| |
| case Js::OpCode::InitFld: |
| case Js::OpCode::InitRootFld: |
| instrPrev = GenerateCompleteStFld(instr, !noFieldFastPath, IR::HelperOp_PatchInitValue, IR::HelperOp_PatchInitValuePolymorphic, |
| IR::HelperOp_PatchInitValue, IR::HelperOp_PatchInitValuePolymorphic, false, Js::PropertyOperation_None); |
| break; |
| |
| case Js::OpCode::ScopedInitFunc: |
| instrPrev = this->LowerScopedStFld(instr, IR::HelperOp_InitFuncScoped, false); |
| break; |
| |
| case Js::OpCode::ScopedStFld: |
| case Js::OpCode::ScopedStFldStrict: |
| if (!noFieldFastPath) |
| { |
| m_lowererMD.GenerateFastScopedStFld(instr); |
| } |
| instrPrev = this->LowerScopedStFld(instr, IR::HelperOp_PatchSetPropertyScoped, true, true, |
| instr->m_opcode == Js::OpCode::ScopedStFld ? Js::PropertyOperation_None : Js::PropertyOperation_StrictMode); |
| break; |
| |
| case Js::OpCode::ConsoleScopedStFld: |
| case Js::OpCode::ConsoleScopedStFldStrict: |
| { |
| if (!noFieldFastPath) |
| { |
| m_lowererMD.GenerateFastScopedStFld(instr); |
| } |
| Js::PropertyOperationFlags flags = static_cast<Js::PropertyOperationFlags>((instr->m_opcode == Js::OpCode::ConsoleScopedStFld ? Js::PropertyOperation_None : Js::PropertyOperation_StrictMode) | Js::PropertyOperation_AllowUndeclInConsoleScope); |
| instrPrev = this->LowerScopedStFld(instr, IR::HelperOp_ConsolePatchSetPropertyScoped, true, true, flags); |
| break; |
| } |
| |
| case Js::OpCode::LdStr: |
| m_lowererMD.ChangeToAssign(instr); |
| break; |
| |
| case Js::OpCode::CloneStr: |
| { |
| GenerateGetImmutableOrScriptUnreferencedString(instr->GetSrc1()->AsRegOpnd(), instr, IR::HelperOp_CompoundStringCloneForAppending, false); |
| instr->Remove(); |
| break; |
| } |
| |
| case Js::OpCode::NewScObjArray: |
| instrPrev = this->LowerNewScObjArray(instr); |
| break; |
| |
| case Js::OpCode::NewScObject: |
| case Js::OpCode::NewScObjectSpread: |
| case Js::OpCode::NewScObjArraySpread: |
| instrPrev = this->LowerNewScObject(instr, true, true); |
| break; |
| |
| case Js::OpCode::NewScObjectNoCtor: |
| instrPrev = this->LowerNewScObject(instr, false, true); |
| break; |
| |
| case Js::OpCode::NewScObjectNoCtorFull: |
| instrPrev = this->LowerNewScObject(instr, false, true, true); |
| break; |
| |
| case Js::OpCode::GetNewScObject: |
| instrPrev = this->LowerGetNewScObject(instr); |
| break; |
| |
| case Js::OpCode::UpdateNewScObjectCache: |
| instrPrev = instr->m_prev; |
| this->LowerUpdateNewScObjectCache(instr, instr->GetSrc2(), instr->GetSrc1(), true /* isCtorFunction */); |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::NewScObjectSimple: |
| this->LowerNewScObjectSimple(instr); |
| break; |
| |
| case Js::OpCode::NewScObjectLiteral: |
| this->LowerNewScObjectLiteral(instr); |
| break; |
| |
| case Js::OpCode::LdPropIds: |
| m_lowererMD.ChangeToAssign(instr); |
| break; |
| |
| case Js::OpCode::StArrSegItem_A: |
| instrPrev = this->LowerArraySegmentVars(instr); |
| break; |
| |
| case Js::OpCode::InlineMathAcos: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Acos); |
| break; |
| |
| case Js::OpCode::InlineMathAsin: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Asin); |
| break; |
| |
| case Js::OpCode::InlineMathAtan: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Atan); |
| break; |
| |
| case Js::OpCode::InlineMathAtan2: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Atan2); |
| break; |
| |
| case Js::OpCode::InlineMathCos: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Cos); |
| break; |
| |
| case Js::OpCode::InlineMathExp: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Exp); |
| break; |
| |
| case Js::OpCode::InlineMathLog: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Log); |
| break; |
| |
| case Js::OpCode::InlineMathPow: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Pow); |
| break; |
| |
| case Js::OpCode::InlineMathSin: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Sin); |
| break; |
| |
| case Js::OpCode::InlineMathSqrt: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathTan: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Tan); |
| break; |
| |
| case Js::OpCode::InlineMathFloor: |
| #if defined(ASMJS_PLAT) && (defined(_M_X64) || defined(_M_IX86)) |
| if (!AutoSystemInfo::Data.SSE4_1Available() && instr->m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| m_lowererMD.HelperCallForAsmMathBuiltin(instr, IR::HelperDirectMath_FloorFlt, IR::HelperDirectMath_FloorDb); |
| break; |
| } |
| #endif |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathCeil: |
| #if defined(ASMJS_PLAT) && (defined(_M_X64) || defined(_M_IX86)) |
| if (!AutoSystemInfo::Data.SSE4_1Available() && instr->m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| m_lowererMD.HelperCallForAsmMathBuiltin(instr, IR::HelperDirectMath_CeilFlt, IR::HelperDirectMath_CeilDb); |
| break; |
| } |
| #endif |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathRound: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathAbs: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathImul: |
| GenerateFastInlineMathImul(instr); |
| break; |
| |
| case Js::OpCode::Ctz: |
| GenerateCtz(instr); |
| break; |
| |
| case Js::OpCode::PopCnt: |
| GeneratePopCnt(instr); |
| break; |
| |
| case Js::OpCode::InlineMathClz: |
| GenerateFastInlineMathClz(instr); |
| break; |
| |
| case Js::OpCode::InlineMathFround: |
| GenerateFastInlineMathFround(instr); |
| break; |
| |
| case Js::OpCode::Reinterpret_Prim: |
| LowerReinterpretPrimitive(instr); |
| break; |
| |
| case Js::OpCode::InlineMathMin: |
| case Js::OpCode::InlineMathMax: |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::InlineMathRandom: |
| this->GenerateFastInlineBuiltInMathRandom(instr); |
| break; |
| |
| #ifdef ENABLE_DOM_FAST_PATH |
| case Js::OpCode::DOMFastPathGetter: |
| this->LowerFastInlineDOMFastPathGetter(instr); |
| break; |
| #endif |
| |
| case Js::OpCode::InlineArrayPush: |
| this->GenerateFastInlineArrayPush(instr); |
| break; |
| |
| case Js::OpCode::InlineArrayPop: |
| this->GenerateFastInlineArrayPop(instr); |
| break; |
| |
| //Now retrieve the function object from the ArgOut_A_InlineSpecialized instruction opcode to push it on the stack after all the other arguments have been pushed. |
| //The lowering of the direct call to helper is handled by GenerateDirectCall (architecture specific). |
| case Js::OpCode::CallDirect: |
| { |
| IR::Opnd * src1 = instr->GetSrc1(); |
| Assert(src1->IsHelperCallOpnd()); |
| switch (src1->AsHelperCallOpnd()->m_fnHelper) |
| { |
| case IR::JnHelperMethod::HelperString_Split: |
| case IR::JnHelperMethod::HelperString_Match: |
| GenerateFastInlineStringSplitMatch(instr); |
| break; |
| case IR::JnHelperMethod::HelperRegExp_Exec: |
| GenerateFastInlineRegExpExec(instr); |
| break; |
| case IR::JnHelperMethod::HelperGlobalObject_ParseInt: |
| GenerateFastInlineGlobalObjectParseInt(instr); |
| break; |
| case IR::JnHelperMethod::HelperString_FromCharCode: |
| GenerateFastInlineStringFromCharCode(instr); |
| break; |
| case IR::JnHelperMethod::HelperString_FromCodePoint: |
| GenerateFastInlineStringFromCodePoint(instr); |
| break; |
| case IR::JnHelperMethod::HelperString_CharAt: |
| GenerateFastInlineStringCharCodeAt(instr, Js::BuiltinFunction::JavascriptString_CharAt); |
| break; |
| case IR::JnHelperMethod::HelperString_CharCodeAt: |
| GenerateFastInlineStringCharCodeAt(instr, Js::BuiltinFunction::JavascriptString_CharCodeAt); |
| break; |
| case IR::JnHelperMethod::HelperString_Replace: |
| GenerateFastInlineStringReplace(instr); |
| break; |
| case IR::JnHelperMethod::HelperObject_HasOwnProperty: |
| this->GenerateFastInlineHasOwnProperty(instr); |
| break; |
| case IR::JnHelperMethod::HelperArray_IsArray: |
| this->GenerateFastInlineIsArray(instr); |
| break; |
| } |
| instrPrev = LowerCallDirect(instr); |
| break; |
| } |
| |
| case Js::OpCode::CallIDynamic: |
| { |
| Js::CallFlags flags = instr->GetDst() ? Js::CallFlags_Value : Js::CallFlags_NotUsed; |
| instrPrev = this->LowerCallIDynamic(instr, (ushort)flags); |
| break; |
| } |
| case Js::OpCode::CallIDynamicSpread: |
| { |
| Js::CallFlags flags = instr->GetDst() ? Js::CallFlags_Value : Js::CallFlags_NotUsed; |
| instrPrev = this->LowerCallIDynamicSpread(instr, (ushort)flags); |
| break; |
| } |
| |
| case Js::OpCode::CallI: |
| case Js::OpCode::CallINew: |
| case Js::OpCode::CallIFixed: |
| case Js::OpCode::CallINewTargetNew: |
| { |
| Js::CallFlags flags = Js::CallFlags_None; |
| |
| if (instr->isCtorCall) |
| { |
| flags = Js::CallFlags_New; |
| } |
| else |
| { |
| if (instr->m_opcode == Js::OpCode::CallINew) |
| { |
| flags = Js::CallFlags_New; |
| } |
| else if (instr->m_opcode == Js::OpCode::CallINewTargetNew) |
| { |
| flags = (Js::CallFlags) (Js::CallFlags_New | Js::CallFlags_ExtraArg | Js::CallFlags_NewTarget); |
| } |
| if (instr->GetDst()) |
| { |
| flags = (Js::CallFlags) (flags | Js::CallFlags_Value); |
| } |
| else |
| { |
| flags = (Js::CallFlags) (flags | Js::CallFlags_NotUsed); |
| } |
| } |
| |
| if (!PHASE_OFF(Js::CallFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| // We shouldn't have turned this instruction into a fixed method call if we're calling one of the |
| // built-ins we still inline in the lowerer. |
| Assert(instr->m_opcode != Js::OpCode::CallIFixed || !Func::IsBuiltInInlinedInLowerer(instr->GetSrc1())); |
| |
| // Disable InlineBuiltInLibraryCall as it does not work well with 2nd chance reg alloc |
| // and may invalidate live on back edge data by introducing refs across loops. See Winblue Bug: 577641 |
| //// Callee may still be a library built-in; if so, generate it inline. |
| //if (this->InlineBuiltInLibraryCall(instr)) |
| //{ |
| // m_lowererMD.LowerCallI(instr, (ushort)flags, true /*isHelper*/); |
| //} |
| //else |
| //{ |
| m_lowererMD.LowerCallI(instr, (ushort)flags); |
| //} |
| } |
| else |
| { |
| m_lowererMD.LowerCallI(instr, (ushort)flags); |
| } |
| break; |
| } |
| case Js::OpCode::AsmJsCallI: |
| instrPrev = m_lowererMD.LowerAsmJsCallI(instr); |
| break; |
| |
| case Js::OpCode::AsmJsCallE: |
| instrPrev = m_lowererMD.LowerAsmJsCallE(instr); |
| break; |
| |
| case Js::OpCode::CallIEval: |
| { |
| Js::CallFlags flags = (Js::CallFlags)(Js::CallFlags_ExtraArg | (instr->GetDst() ? Js::CallFlags_Value : Js::CallFlags_NotUsed)); |
| if (IsSpreadCall(instr)) |
| { |
| instrPrev = LowerSpreadCall(instr, flags); |
| } |
| else |
| { |
| m_lowererMD.LowerCallI(instr, (ushort)flags); |
| } |
| |
| #ifdef PERF_HINT |
| if (PHASE_TRACE1(Js::PerfHintPhase)) |
| { |
| WritePerfHint(PerfHints::CallsEval, this->m_func, instr->GetByteCodeOffset()); |
| } |
| #endif |
| break; |
| } |
| |
| case Js::OpCode::CallHelper: |
| instrPrev = m_lowererMD.LowerCallHelper(instr); |
| break; |
| |
| case Js::OpCode::Ret: |
| if (instr->m_next->m_opcode != Js::OpCode::FunctionExit) |
| { |
| // If this RET isn't at the end of the function, insert a branch to |
| // the epilog. |
| |
| IR::Instr *exitPrev = m_func->m_exitInstr->m_prev; |
| if (!exitPrev->IsLabelInstr()) |
| { |
| exitPrev = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| m_func->m_exitInstr->InsertBefore(exitPrev); |
| } |
| IR::BranchInstr *exitBr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, |
| exitPrev->AsLabelInstr(), m_func); |
| instr->InsertAfter(exitBr); |
| } |
| |
| m_lowererMD.LowerRet(instr); |
| break; |
| |
| case Js::OpCode::LdArgumentsFromFrame: |
| this->LoadArgumentsFromFrame(instr); |
| break; |
| |
| case Js::OpCode::LdC_A_I4: |
| { |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| AssertMsg(src1->IsIntConstOpnd(), "Source of LdC_A_I4 should be an IntConst..."); |
| |
| instrPrev = this->LowerLoadVar(instr, |
| IR::AddrOpnd::NewFromNumber(static_cast<int32>(src1->AsIntConstOpnd()->GetValue()), this->m_func)); |
| src1->Free(this->m_func); |
| break; |
| } |
| |
| case Js::OpCode::LdC_A_R8: |
| { |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| AssertMsg(src1->IsFloatConstOpnd(), "Source of LdC_A_R8 should be a FloatConst..."); |
| instrPrev = this->LowerLoadVar(instr, src1->AsFloatConstOpnd()->GetAddrOpnd(this->m_func)); |
| src1->Free(this->m_func); |
| break; |
| } |
| |
| case Js::OpCode::LdC_F8_R8: |
| { |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| AssertMsg(src1->IsFloatConstOpnd() || src1->IsFloat32ConstOpnd(), "Source of LdC_F8_R8 should be a FloatConst..."); |
| if (src1->IsFloatConstOpnd()) |
| { |
| instrPrev = m_lowererMD.LoadFloatValue(instr->UnlinkDst()->AsRegOpnd(), src1->AsFloatConstOpnd()->m_value, instr); |
| } |
| else |
| { |
| instrPrev = m_lowererMD.LoadFloatValue(instr->UnlinkDst()->AsRegOpnd(), src1->AsFloat32ConstOpnd()->m_value, instr); |
| } |
| |
| src1->Free(this->m_func); |
| instr->Remove(); |
| |
| break; |
| } |
| |
| case Js::OpCode::NewRegEx: |
| instrPrev = this->LowerNewRegEx(instr); |
| break; |
| |
| case Js::OpCode::Conv_Obj: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_ConvObject); |
| break; |
| |
| case Js::OpCode::NewUnscopablesWrapperObject: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_NewUnscopablesWrapperObject); |
| break; |
| |
| case Js::OpCode::LdCustomSpreadIteratorList: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_ToSpreadedFunctionArgument); |
| break; |
| |
| case Js::OpCode::Conv_Numeric: |
| case Js::OpCode::Conv_Num: |
| this->LowerConvNum(instr, noMathFastPath); |
| break; |
| |
| case Js::OpCode::Incr_Num_A: |
| case Js::OpCode::Incr_A: |
| if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Increment); |
| } |
| else |
| { |
| instr->SetSrc2(IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(1), IR::AddrOpndKindConstantVar, this->m_func)); |
| m_lowererMD.GenerateFastAdd(instr); |
| instr->FreeSrc2(); |
| this->LowerUnaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Increment)); |
| } |
| break; |
| |
| case Js::OpCode::Decr_Num_A: |
| case Js::OpCode::Decr_A: |
| if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Decrement); |
| } |
| else |
| { |
| instr->SetSrc2(IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(1), IR::AddrOpndKindConstantVar, this->m_func)); |
| m_lowererMD.GenerateFastSub(instr); |
| instr->FreeSrc2(); |
| this->LowerUnaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Decrement)); |
| } |
| break; |
| |
| case Js::OpCode::Neg_A: |
| if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Negate); |
| } |
| else if (m_lowererMD.GenerateFastNeg(instr)) |
| { |
| this->LowerUnaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Negate)); |
| } |
| break; |
| |
| case Js::OpCode::Not_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Not); |
| } |
| else if (m_lowererMD.GenerateFastNot(instr)) |
| { |
| this->LowerUnaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Not)); |
| } |
| break; |
| |
| case Js::OpCode::BrEq_I4: |
| case Js::OpCode::BrNeq_I4: |
| case Js::OpCode::BrGt_I4: |
| case Js::OpCode::BrGe_I4: |
| case Js::OpCode::BrLt_I4: |
| case Js::OpCode::BrLe_I4: |
| case Js::OpCode::BrUnGt_I4: |
| case Js::OpCode::BrUnGe_I4: |
| case Js::OpCode::BrUnLt_I4: |
| case Js::OpCode::BrUnLe_I4: |
| { |
| // See calls to MarkOneFltTmpSym under BrSrEq. This is to handle the case |
| // where a branch is type-specialized and uses the result of a float pref op, |
| // which must then be saved to var at the def. |
| StackSym *sym = instr->GetSrc1()->GetStackSym(); |
| if (sym) |
| { |
| sym = sym->GetVarEquivSym(nullptr); |
| } |
| sym = instr->GetSrc2()->GetStackSym(); |
| if (sym) |
| { |
| sym = sym->GetVarEquivSym(nullptr); |
| } |
| } |
| // FALLTHROUGH |
| case Js::OpCode::Neg_I4: |
| case Js::OpCode::Not_I4: |
| case Js::OpCode::Add_I4: |
| case Js::OpCode::Sub_I4: |
| case Js::OpCode::Mul_I4: |
| case Js::OpCode::RemU_I4: |
| case Js::OpCode::Rem_I4: |
| case Js::OpCode::Or_I4: |
| case Js::OpCode::Xor_I4: |
| case Js::OpCode::And_I4: |
| case Js::OpCode::Shl_I4: |
| case Js::OpCode::Shr_I4: |
| case Js::OpCode::ShrU_I4: |
| case Js::OpCode::Rol_I4: |
| case Js::OpCode::Ror_I4: |
| case Js::OpCode::BrTrue_I4: |
| case Js::OpCode::BrFalse_I4: |
| #ifdef _M_IX86 |
| if ( |
| instr->GetDst() && instr->GetDst()->IsInt64() || |
| instr->GetSrc1() && instr->GetSrc1()->IsInt64() || |
| instr->GetSrc2() && instr->GetSrc2()->IsInt64() |
| ) |
| { |
| m_lowererMD.EmitInt64Instr(instr); |
| break; |
| } |
| #endif |
| if (instr->HasBailOutInfo()) |
| { |
| const auto bailOutKind = instr->GetBailOutKind(); |
| if (bailOutKind & IR::BailOutOnResultConditions || |
| bailOutKind == IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck) |
| { |
| const auto nonBailOutInstr = SplitBailOnResultCondition(instr); |
| IR::LabelInstr *bailOutLabel, *skipBailOutLabel; |
| LowerBailOnResultCondition(instr, &bailOutLabel, &skipBailOutLabel); |
| LowerInstrWithBailOnResultCondition(nonBailOutInstr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| } |
| else if (bailOutKind == IR::BailOnModByPowerOf2) |
| { |
| Assert(instr->m_opcode == Js::OpCode::Rem_I4); |
| bool fastPath = GenerateSimplifiedInt4Rem(instr); |
| Assert(fastPath); |
| instr->FreeSrc1(); |
| instr->FreeSrc2(); |
| this->GenerateBailOut(instr); |
| } |
| } |
| else |
| { |
| if (instr->m_opcode == Js::OpCode::Rem_I4 || instr->m_opcode == Js::OpCode::RemU_I4) |
| { |
| // fast path |
| this->GenerateSimplifiedInt4Rem(instr); |
| // slow path |
| this->LowerRemI4(instr); |
| } |
| #if defined(_M_IX86) || defined(_M_X64) |
| else if (instr->m_opcode == Js::OpCode::Mul_I4) |
| { |
| if (!LowererMD::GenerateSimplifiedInt4Mul(instr)) |
| { |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| } |
| #endif |
| else |
| { |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| } |
| break; |
| case Js::OpCode::TrapIfMinIntOverNegOne: |
| LowerTrapIfMinIntOverNegOne(instr); |
| break; |
| case Js::OpCode::TrapIfTruncOverflow: |
| LowererMD::ChangeToAssign(instr); |
| break; |
| case Js::OpCode::TrapIfZero: |
| LowerTrapIfZero(instr); |
| break; |
| case Js::OpCode::TrapIfUnalignedAccess: |
| instrPrev = LowerTrapIfUnalignedAccess(instr); |
| break; |
| case Js::OpCode::DivU_I4: |
| case Js::OpCode::Div_I4: |
| this->LowerDivI4(instr); |
| break; |
| |
| case Js::OpCode::Typeof: |
| m_lowererMD.LowerTypeof(instr); |
| break; |
| |
| case Js::OpCode::TypeofElem: |
| this->LowerLdElemI(instr, IR::HelperOp_TypeofElem, false); |
| break; |
| |
| case Js::OpCode::LdLen_A: |
| { |
| bool fastPath = !noMathFastPath; |
| if (!fastPath && instr->HasBailOutInfo()) |
| { |
| // Some bailouts are generated around the helper call, and will work even if the fast path is disabled. Other |
| // bailouts require the fast path. |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if (bailOutKind & IR::BailOutKindBits) |
| { |
| fastPath = true; |
| } |
| else |
| { |
| const IR::BailOutKind bailOutKindMinusBits = bailOutKind & ~IR::BailOutKindBits; |
| fastPath = |
| bailOutKindMinusBits && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCalls && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCallsPreOp; |
| } |
| } |
| |
| bool instrIsInHelperBlock = false; |
| if (!fastPath) |
| { |
| LowerLdLen(instr, false); |
| } |
| else if (GenerateFastLdLen(instr, &instrIsInHelperBlock)) |
| { |
| Assert( |
| !instr->HasBailOutInfo() || |
| (instr->GetBailOutKind() & ~IR::BailOutKindBits) != IR::BailOutOnIrregularLength); |
| LowerLdLen(instr, instrIsInHelperBlock); |
| } |
| break; |
| } |
| |
| case Js::OpCode::LdThis: |
| { |
| if (noFieldFastPath || !GenerateLdThisCheck(instr)) |
| { |
| IR::JnHelperMethod meth; |
| if (instr->IsJitProfilingInstr()) |
| { |
| Assert(instr->AsJitProfilingInstr()->profileId == Js::Constants::NoProfileId); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(instr->m_func)); |
| meth = IR::HelperSimpleProfiledLdThis; |
| this->LowerBinaryHelper(instr, meth); |
| } |
| else |
| { |
| meth = IR::HelperLdThisNoFastPath; |
| this->LowerBinaryHelperMem(instr, meth); |
| } |
| } |
| else |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperLdThis); |
| } |
| break; |
| } |
| |
| case Js::OpCode::LdNativeCodeData: |
| Assert(m_func->IsOOPJIT()); |
| instrPrev = LowerLdNativeCodeData(instr); |
| break; |
| case Js::OpCode::StrictLdThis: |
| if (noFieldFastPath) |
| { |
| IR::JnHelperMethod meth; |
| if (instr->IsJitProfilingInstr()) |
| { |
| Assert(instr->AsJitProfilingInstr()->profileId == Js::Constants::NoProfileId); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(instr->m_func)); |
| meth = IR::HelperSimpleProfiledStrictLdThis; |
| this->LowerUnaryHelper(instr, meth); |
| } |
| else |
| { |
| meth = IR::HelperStrictLdThis; |
| this->LowerUnaryHelperMem(instr, meth); |
| } |
| } |
| else |
| { |
| this->GenerateLdThisStrict(instr); |
| instr->Remove(); |
| } |
| break; |
| |
| case Js::OpCode::CheckThis: |
| GenerateLdThisCheck(instr); |
| instr->FreeSrc1(); |
| this->GenerateBailOut(instr); |
| break; |
| |
| case Js::OpCode::StrictCheckThis: |
| this->GenerateLdThisStrict(instr); |
| instr->FreeSrc1(); |
| this->GenerateBailOut(instr); |
| break; |
| |
| case Js::OpCode::NewScArray: |
| instrPrev = this->LowerNewScArray(instr); |
| break; |
| |
| case Js::OpCode::NewScArrayWithMissingValues: |
| this->LowerUnaryHelperMem(instr, IR::HelperScrArr_OP_NewScArrayWithMissingValues); |
| break; |
| |
| case Js::OpCode::NewScIntArray: |
| instrPrev = this->LowerNewScIntArray(instr); |
| break; |
| |
| case Js::OpCode::NewScFltArray: |
| instrPrev = this->LowerNewScFltArray(instr); |
| break; |
| |
| case Js::OpCode::InitForInEnumerator: |
| this->LowerInitForInEnumerator(instr); |
| break; |
| |
| case Js::OpCode::Add_A: |
| if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| Assert(instr->GetSrc2()->IsFloat()); |
| // we don't want to mix float32 and float64 |
| Assert(instr->GetDst()->GetType() == instr->GetSrc1()->GetType()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Add); |
| } |
| else if (m_lowererMD.TryGenerateFastMulAdd(instr, &instrPrev)) |
| { |
| } |
| else |
| { |
| m_lowererMD.GenerateFastAdd(instr); |
| this->LowerBinaryHelperMemWithTemp3(instr, IR_HELPER_OP_FULL_OR_INPLACE(Add), IR::HelperOp_AddLeftDead); |
| } |
| break; |
| |
| case Js::OpCode::Div_A: |
| { |
| if (instr->IsJitProfilingInstr()) { |
| LowerProfiledBinaryOp(instr->AsJitProfilingInstr(), IR::HelperSimpleProfiledDivide); |
| } |
| else if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| Assert(instr->GetSrc2()->IsFloat()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc1()->GetType()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc2()->GetType()); |
| |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else |
| { |
| if (!PHASE_OFF(Js::MathFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| IR::AddrOpnd *src2 = instr->GetSrc2()->IsAddrOpnd() ? instr->GetSrc2()->AsAddrOpnd() : nullptr; |
| if (src2 && src2->IsVar() && Js::TaggedInt::Is(src2->m_address)) |
| { |
| int32 value = Js::TaggedInt::ToInt32(src2->m_address); |
| if (Math::IsPow2(value)) |
| { |
| m_lowererMD.GenerateFastDivByPow2(instr); |
| } |
| } |
| } |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Divide)); |
| } |
| break; |
| } |
| |
| case Js::OpCode::Expo_A: |
| { |
| if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| Assert(instr->GetSrc2()->IsFloat()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc1()->GetType()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc2()->GetType()); |
| |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, IR::HelperDirectMath_Pow); |
| } |
| else |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Exponentiation)); |
| } |
| break; |
| } |
| |
| case Js::OpCode::Mul_A: |
| if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| Assert(instr->GetSrc2()->IsFloat()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc1()->GetType()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc2()->GetType()); |
| |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Multiply); |
| } |
| else if (m_lowererMD.GenerateFastMul(instr)) |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Multiply)); |
| } |
| break; |
| |
| case Js::OpCode::Rem_A: |
| if (instr->GetDst()->IsFloat64()) |
| { |
| this->LowerRemR8(instr); |
| } |
| else if (instr->IsJitProfilingInstr()) |
| { |
| this->LowerProfiledBinaryOp(instr->AsJitProfilingInstr(), IR::HelperSimpleProfiledRemainder); |
| } |
| else |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Modulus)); |
| } |
| break; |
| |
| case Js::OpCode::Sub_A: |
| if (instr->GetDst()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| Assert(instr->GetSrc2()->IsFloat()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc1()->GetType()); |
| Assert(instr->GetDst()->GetType() == instr->GetSrc2()->GetType()); |
| |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (PHASE_OFF(Js::MathFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Subtract); |
| } |
| else if (m_lowererMD.TryGenerateFastMulAdd(instr, &instrPrev)) |
| { |
| } |
| else |
| { |
| m_lowererMD.GenerateFastSub(instr); |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Subtract)); |
| } |
| break; |
| |
| case Js::OpCode::And_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_And); |
| } |
| else if (m_lowererMD.GenerateFastAnd(instr)) |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(And)); |
| } |
| break; |
| |
| case Js::OpCode::Or_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Or); |
| } |
| else if (m_lowererMD.GenerateFastOr(instr)) |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Or)); |
| } |
| break; |
| |
| case Js::OpCode::Xor_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath || m_lowererMD.GenerateFastXor(instr)) |
| { |
| this->LowerBinaryHelperMemWithTemp2(instr, IR_HELPER_OP_FULL_OR_INPLACE(Xor)); |
| } |
| break; |
| |
| case Js::OpCode::Shl_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath || m_lowererMD.GenerateFastShiftLeft(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_ShiftLeft); |
| } |
| break; |
| |
| case Js::OpCode::Shr_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath || m_lowererMD.GenerateFastShiftRight(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_ShiftRight); |
| } |
| break; |
| |
| case Js::OpCode::ShrU_A: |
| if (PHASE_OFF(Js::BitopsFastPathPhase, this->m_func) || noMathFastPath || m_lowererMD.GenerateFastShiftRight(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_ShiftRightU); |
| } |
| break; |
| |
| case Js::OpCode::CmEq_A: |
| { |
| instrPrev = LowerEqualityCompare(instr, IR::HelperOP_CmEq_A); |
| break; |
| } |
| case Js::OpCode::CmNeq_A: |
| { |
| instrPrev = LowerEqualityCompare(instr, IR::HelperOP_CmNeq_A); |
| break; |
| } |
| case Js::OpCode::CmSrEq_A: |
| instrPrev = LowerEqualityCompare(instr, IR::HelperOP_CmSrEq_A); |
| break; |
| case Js::OpCode::CmSrNeq_A: |
| instrPrev = LowerEqualityCompare(instr, IR::HelperOP_CmSrNeq_A); |
| break; |
| case Js::OpCode::CmGt_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| this->m_lowererMD.GenerateFastCmXxR8(instr); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || noMathFastPath || !m_lowererMD.GenerateFastCmXxTaggedInt(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOP_CmGt_A); |
| } |
| break; |
| |
| case Js::OpCode::CmGe_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| this->m_lowererMD.GenerateFastCmXxR8(instr); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || noMathFastPath || !m_lowererMD.GenerateFastCmXxTaggedInt(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOP_CmGe_A); |
| } |
| break; |
| |
| case Js::OpCode::CmLt_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| this->m_lowererMD.GenerateFastCmXxR8(instr); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || noMathFastPath || !m_lowererMD.GenerateFastCmXxTaggedInt(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOP_CmLt_A); |
| } |
| break; |
| |
| case Js::OpCode::CmLe_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| this->m_lowererMD.GenerateFastCmXxR8(instr); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || noMathFastPath || !m_lowererMD.GenerateFastCmXxTaggedInt(instr)) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperOP_CmLe_A); |
| } |
| break; |
| |
| case Js::OpCode::CmEq_I4: |
| case Js::OpCode::CmNeq_I4: |
| case Js::OpCode::CmGe_I4: |
| case Js::OpCode::CmGt_I4: |
| case Js::OpCode::CmLe_I4: |
| case Js::OpCode::CmLt_I4: |
| case Js::OpCode::CmUnGe_I4: |
| case Js::OpCode::CmUnGt_I4: |
| case Js::OpCode::CmUnLe_I4: |
| case Js::OpCode::CmUnLt_I4: |
| this->m_lowererMD.GenerateFastCmXxI4(instr); |
| break; |
| |
| case Js::OpCode::Conv_Bool: |
| instrPrev = this->m_lowererMD.GenerateConvBool(instr); |
| break; |
| |
| case Js::OpCode::IsInst: |
| this->GenerateFastIsInst(instr); |
| instrPrev = this->LowerIsInst(instr, IR::HelperScrObj_OP_IsInst); |
| break; |
| |
| case Js::OpCode::IsIn: |
| this->GenerateFastArrayIsIn(instr); |
| this->GenerateFastObjectIsIn(instr); |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_IsIn); |
| break; |
| |
| case Js::OpCode::LdArrViewElem: |
| instrPrev = LowerLdArrViewElem(instr); |
| break; |
| |
| case Js::OpCode::StAtomicWasm: |
| instrPrev = LowerStAtomicsWasm(instr); |
| break; |
| |
| case Js::OpCode::StArrViewElem: |
| instrPrev = LowerStArrViewElem(instr); |
| break; |
| |
| case Js::OpCode::LdAtomicWasm: |
| instrPrev = LowerLdAtomicsWasm(instr); |
| break; |
| |
| case Js::OpCode::LdArrViewElemWasm: |
| instrPrev = LowerLdArrViewElemWasm(instr); |
| break; |
| |
| case Js::OpCode::Memset: |
| case Js::OpCode::Memcopy: |
| { |
| instrPrev = LowerMemOp(instr); |
| break; |
| } |
| |
| case Js::OpCode::ArrayDetachedCheck: |
| instrPrev = LowerArrayDetachedCheck(instr); |
| break; |
| |
| case Js::OpCode::StElemI_A: |
| case Js::OpCode::StElemI_A_Strict: |
| { |
| // Note: under debugger (Fast F12) don't let GenerateFastStElemI which calls into ToNumber_Helper |
| // which takes double, and currently our helper wrapper doesn't support double. |
| bool fastPath = !noMathFastPath && !m_func->IsJitInDebugMode(); |
| if (!fastPath && instr->HasBailOutInfo()) |
| { |
| // Some bailouts are generated around the helper call, and will work even if the fast path is disabled. Other |
| // bailouts require the fast path. |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| const IR::BailOutKind bailOutKindBits = bailOutKind & IR::BailOutKindBits; |
| if (bailOutKindBits & ~(IR::BailOutOnMissingValue | IR::BailOutConvertedNativeArray)) |
| { |
| fastPath = true; |
| } |
| else |
| { |
| const IR::BailOutKind bailOutKindMinusBits = bailOutKind & ~IR::BailOutKindBits; |
| fastPath = |
| bailOutKindMinusBits && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCalls && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCallsPreOp; |
| } |
| } |
| |
| IR::Opnd * opnd = instr->GetDst(); |
| IR::Opnd * baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); |
| ValueType profiledBaseValueType = baseOpnd->AsRegOpnd()->GetValueType(); |
| if (profiledBaseValueType.IsUninitialized() && baseOpnd->AsRegOpnd()->m_sym->IsSingleDef()) |
| { |
| baseOpnd->SetValueType(baseOpnd->FindProfiledValueType()); |
| } |
| |
| bool instrIsInHelperBlock = false; |
| if (!fastPath) |
| { |
| this->LowerStElemI( |
| instr, |
| instr->m_opcode == Js::OpCode::StElemI_A ? Js::PropertyOperation_None : Js::PropertyOperation_StrictMode, |
| false); |
| } |
| else if (GenerateFastStElemI(instr, &instrIsInHelperBlock)) |
| { |
| #if DBG |
| if (instr->HasBailOutInfo()) |
| { |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| |
| Assert( |
| (bailOutKind & ~IR::BailOutKindBits) != IR::BailOutConventionalTypedArrayAccessOnly && |
| !( |
| bailOutKind & |
| (IR::BailOutConventionalNativeArrayAccessOnly | IR::BailOutOnArrayAccessHelperCall) |
| )); |
| } |
| #endif |
| this->LowerStElemI( |
| instr, |
| instr->m_opcode == Js::OpCode::StElemI_A ? Js::PropertyOperation_None : Js::PropertyOperation_StrictMode, |
| instrIsInHelperBlock); |
| } |
| break; |
| } |
| |
| case Js::OpCode::LdElemI_A: |
| case Js::OpCode::LdMethodElem: |
| { |
| bool fastPath = |
| !noMathFastPath && |
| ( |
| instr->m_opcode != Js::OpCode::LdMethodElem || |
| instr->GetSrc1()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyObject() |
| ); |
| if (!fastPath && instr->HasBailOutInfo()) |
| { |
| // Some bailouts are generated around the helper call, and will work even if the fast path is disabled. Other |
| // bailouts require the fast path. |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if (bailOutKind & IR::BailOutKindBits) |
| { |
| fastPath = true; |
| } |
| else |
| { |
| const IR::BailOutKind bailOutKindMinusBits = bailOutKind & ~IR::BailOutKindBits; |
| fastPath = |
| bailOutKindMinusBits && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCalls && |
| bailOutKindMinusBits != IR::BailOutOnImplicitCallsPreOp; |
| } |
| } |
| |
| IR::Opnd * opnd = instr->GetSrc1(); |
| IR::Opnd * baseOpnd = opnd->AsIndirOpnd()->GetBaseOpnd(); |
| ValueType profiledBaseValueType = baseOpnd->AsRegOpnd()->GetValueType(); |
| if (profiledBaseValueType.IsUninitialized() && baseOpnd->AsRegOpnd()->m_sym->IsSingleDef()) |
| { |
| baseOpnd->SetValueType(baseOpnd->FindProfiledValueType()); |
| } |
| |
| bool instrIsInHelperBlock = false; |
| |
| if (!fastPath) |
| { |
| this->LowerLdElemI( |
| instr, |
| instr->m_opcode == Js::OpCode::LdElemI_A ? IR::HelperOp_GetElementI : IR::HelperOp_GetMethodElement, |
| false); |
| } |
| else if (GenerateFastLdElemI(instr, &instrIsInHelperBlock)) |
| { |
| #if DBG |
| if (instr->HasBailOutInfo()) |
| { |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| |
| Assert( |
| (bailOutKind & ~IR::BailOutKindBits) != IR::BailOutConventionalTypedArrayAccessOnly && |
| !( |
| bailOutKind & |
| (IR::BailOutConventionalNativeArrayAccessOnly | IR::BailOutOnArrayAccessHelperCall) |
| )); |
| } |
| #endif |
| this->LowerLdElemI( |
| instr, |
| instr->m_opcode == Js::OpCode::LdElemI_A ? IR::HelperOp_GetElementI : IR::HelperOp_GetMethodElement, |
| instrIsInHelperBlock); |
| } |
| break; |
| } |
| |
| case Js::OpCode::InitSetElemI: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOP_InitElemSetter); |
| break; |
| |
| case Js::OpCode::InitGetElemI: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOP_InitElemGetter); |
| break; |
| |
| case Js::OpCode::InitComputedProperty: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOP_InitComputedProperty); |
| break; |
| |
| case Js::OpCode::Delete_A: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Delete); |
| break; |
| |
| case Js::OpCode::DeleteElemI_A: |
| this->LowerDeleteElemI(instr, false); |
| break; |
| |
| case Js::OpCode::DeleteElemIStrict_A: |
| this->LowerDeleteElemI(instr, true); |
| break; |
| |
| case Js::OpCode::BytecodeArgOutCapture: |
| m_lowererMD.ChangeToAssign(instr); |
| break; |
| |
| case Js::OpCode::UnwrapWithObj: |
| this->LowerUnaryHelper(instr, IR::HelperOp_UnwrapWithObj); |
| break; |
| |
| #ifdef ENABLE_WASM |
| case Js::OpCode::CheckWasmSignature: |
| this->LowerCheckWasmSignature(instr); |
| break; |
| case Js::OpCode::LdWasmFunc: |
| instrPrev = this->LowerLdWasmFunc(instr); |
| break; |
| case Js::OpCode::GrowWasmMemory: |
| instrPrev = this->LowerGrowWasmMemory(instr); |
| break; |
| #endif |
| case Js::OpCode::Ld_I4: |
| LowererMD::ChangeToAssign(instr); |
| break; |
| case Js::OpCode::LdAsmJsFunc: |
| if (instr->GetSrc1()->IsIndirOpnd()) |
| { |
| IR::IndirOpnd* indir = instr->GetSrc1()->AsIndirOpnd(); |
| byte scale = m_lowererMD.GetDefaultIndirScale(); |
| if (!indir->GetIndexOpnd()) |
| { |
| // If we have a constant offset, we need to apply the scale now |
| int32 offset; |
| if (Int32Math::Shl(1, scale, &offset) || Int32Math::Mul(offset, indir->GetOffset(), &offset)) |
| { |
| // The constant is too big to offset this array. Throw out of range. |
| // Todo:: throw a better error message for this scenario |
| GenerateRuntimeError(instr, JSERR_ArgumentOutOfRange, IR::HelperOp_RuntimeRangeError); |
| } |
| indir->SetOffset(offset); |
| } |
| else |
| { |
| indir->SetScale(scale); |
| } |
| } |
| //fallthrough |
| case Js::OpCode::Ld_A: |
| case Js::OpCode::InitConst: |
| if (instr->IsJitProfilingInstr() && instr->AsJitProfilingInstr()->isBeginSwitch) { |
| LowerProfiledBeginSwitch(instr->AsJitProfilingInstr()); |
| break; |
| } |
| m_lowererMD.ChangeToAssign(instr); |
| if (instr->HasBailOutInfo()) |
| { |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| |
| if (bailOutKind == IR::BailOutExpectingString) |
| { |
| this->LowerBailOnNotString(instr); |
| } |
| else |
| { |
| // Should not reach here as there are only 1 BailOutKind (BailOutExpectingString) currently associated with the Load Instr |
| Assert(false); |
| } |
| } |
| break; |
| |
| case Js::OpCode::LdIndir: |
| Assert(instr->GetDst()); |
| Assert(instr->GetDst()->IsRegOpnd()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc1()->IsIndirOpnd()); |
| Assert(!instr->GetSrc2()); |
| m_lowererMD.ChangeToAssign(instr); |
| break; |
| |
| case Js::OpCode::FromVar: |
| Assert(instr->GetSrc1()->GetType() == TyVar); |
| if (instr->GetDst()->GetType() == TyInt32) |
| { |
| if (m_lowererMD.EmitLoadInt32(instr, !(instr->HasBailOutInfo() && (instr->GetBailOutKind() == IR::BailOutOnNotPrimitive)))) |
| { |
| // Bail out instead of calling a helper |
| Assert(instr->GetBailOutKind() == IR::BailOutIntOnly || instr->GetBailOutKind() == IR::BailOutExpectingInteger); |
| Assert(!instr->GetSrc1()->GetValueType().IsInt()); // when we know it's an int, it should not have bailout info, to avoid generating a bailout path that will never be taken |
| instr->UnlinkSrc1(); |
| instr->UnlinkDst(); |
| GenerateBailOut(instr); |
| } |
| } |
| else if (instr->GetDst()->IsFloat()) |
| { |
| if (m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| m_lowererMD.EmitLoadFloat(instr->GetDst(), instr->GetSrc1(), instr); |
| instr->Remove(); |
| } |
| else |
| { |
| m_lowererMD.EmitLoadFloatFromNumber(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| } |
| else if (instr->GetDst()->IsInt64()) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| GenerateRuntimeError(instr, WASMERR_InvalidTypeConversion); |
| instr->ReplaceSrc1(IR::Int64ConstOpnd::New(0, TyInt64, m_func)); |
| LowererMD::ChangeToAssign(instr); |
| } |
| #ifdef ENABLE_WASM_SIMD |
| else if (instr->GetDst()->IsSimd128()) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| GenerateRuntimeError(instr, WASMERR_InvalidTypeConversion); |
| instr->ReplaceSrc1(IR::Simd128ConstOpnd::New({ 0,0,0,0 }, instr->GetDst()->GetType(), m_func)); |
| LowererMD::ChangeToAssign(instr); |
| } |
| #endif |
| else |
| { |
| Assert(UNREACHED); |
| } |
| break; |
| |
| case Js::OpCode::ArgOut_A: |
| // I don't know if this can happen in asm.js mode, but if it can, we might want to handle differently |
| Assert(!m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| // fall-through |
| |
| case Js::OpCode::ArgOut_A_Inline: |
| case Js::OpCode::ArgOut_A_Dynamic: |
| { |
| // ArgOut/StartCall are normally lowered by the lowering of the associated call instr. |
| // If the call becomes unreachable, we could end up with an orphan ArgOut or StartCall. |
| // Change the ArgOut into a store to the stack for bailouts |
| instr->FreeSrc2(); |
| StackSym *argSym = instr->GetDst()->AsSymOpnd()->m_sym->AsStackSym(); |
| argSym->m_offset = this->m_func->StackAllocate(sizeof(Js::Var)); |
| argSym->m_allocated = true; |
| argSym->m_isOrphanedArg = true; |
| this->m_lowererMD.ChangeToAssign(instr); |
| } |
| break; |
| case Js::OpCode::LoweredStartCall: |
| case Js::OpCode::StartCall: |
| // ArgOut/StartCall are normally lowered by the lowering of the associated call instr. |
| // If the call becomes unreachable, we could end up with an orphan ArgOut or StartCall. |
| // We'll just delete these StartCalls during peeps. |
| break; |
| |
| case Js::OpCode::ToVar: |
| Assert(instr->GetDst()->GetType() == TyVar); |
| if (instr->GetSrc1()->GetType() == TyInt32) |
| { |
| m_lowererMD.EmitLoadVar(instr); |
| } |
| else if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->IsRegOpnd()); |
| IR::RegOpnd* float64Opnd = instr->GetSrc1()->AsRegOpnd(); |
| if (float64Opnd->IsFloat32()) |
| { |
| IR::RegOpnd* float64ConvOpnd = IR::RegOpnd::New(TyFloat64, m_func); |
| m_lowererMD.EmitFloat32ToFloat64(float64ConvOpnd, float64Opnd, instr); |
| float64Opnd = float64ConvOpnd; |
| } |
| m_lowererMD.SaveDoubleToVar( |
| instr->GetDst()->AsRegOpnd(), |
| float64Opnd, instr, instr); |
| instr->Remove(); |
| } |
| else if (instr->GetSrc1()->IsInt64() || instr->GetSrc1()->IsSimd128()) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| GenerateRuntimeError(instr, WASMERR_InvalidTypeConversion); |
| instr->ReplaceSrc1(IR::IntConstOpnd::New(0, TyMachReg, m_func)); |
| LowererMD::ChangeToAssign(instr); |
| } |
| else |
| { |
| Assert(UNREACHED); |
| } |
| break; |
| case Js::OpCode::Conv_Prim_Sat: |
| { |
| GenerateTruncWithCheck<true /* Saturate */>(instr); |
| break; |
| } |
| case Js::OpCode::Conv_Prim: |
| { |
| if (IR::Instr::FindSingleDefInstr(Js::OpCode::TrapIfTruncOverflow, instr->GetSrc1())) |
| { |
| GenerateTruncWithCheck<false /* Saturate */>(instr); |
| break; |
| } |
| |
| if (instr->GetDst()->IsFloat()) |
| { |
| if (instr->GetSrc1()->IsIntConstOpnd()) |
| { |
| LoadFloatFromNonReg(instr->UnlinkSrc1(), instr->UnlinkDst(), instr); |
| } |
| else if (instr->GetSrc1()->IsInt32()) |
| { |
| m_lowererMD.EmitIntToFloat(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else if (instr->GetSrc1()->IsUInt32()) |
| { |
| m_lowererMD.EmitUIntToFloat(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else if (instr->GetSrc1()->IsInt64()) |
| { |
| m_lowererMD.EmitInt64toFloat(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else |
| { |
| Assert(instr->GetDst()->IsFloat64()); |
| Assert(instr->GetSrc1()->IsFloat32()); |
| m_lowererMD.EmitFloat32ToFloat64(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| } |
| else if (instr->GetDst()->IsInt64()) |
| { |
| if (instr->GetSrc1()->IsInt32()) |
| { |
| m_lowererMD.EmitIntToLong(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else if (instr->GetSrc1()->IsUInt32()) |
| { |
| m_lowererMD.EmitUIntToLong(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else if (instr->GetSrc1()->IsInt64() && instr->GetSrc2()) |
| { |
| m_lowererMD.EmitSignExtend(instr); |
| } |
| else |
| { |
| Assert(0); |
| } |
| } |
| else |
| { |
| Assert(instr->GetDst()->IsInt32()); |
| if (instr->GetSrc1()->IsInt64()) |
| { |
| m_lowererMD.EmitLongToInt(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| else if ((instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsUInt32()) && instr->GetSrc2()) |
| { |
| m_lowererMD.EmitSignExtend(instr); |
| } |
| else |
| { |
| Assert(instr->GetSrc1()->IsFloat()); |
| m_lowererMD.EmitFloatToInt(instr->GetDst(), instr->GetSrc1(), instr); |
| } |
| } |
| instr->Remove(); |
| break; |
| } |
| case Js::OpCode::FunctionExit: |
| LowerFunctionExit(instr); |
| // The rest of Epilog generation happens after reg allocation |
| break; |
| |
| case Js::OpCode::FunctionEntry: |
| LowerFunctionEntry(instr); |
| // The rest of Prolog generation happens after reg allocation |
| break; |
| |
| case Js::OpCode::ArgIn_Rest: |
| case Js::OpCode::ArgIn_A: |
| if (m_func->GetJITFunctionBody()->IsAsmJsMode() && !m_func->IsLoopBody()) |
| { |
| instrPrev = LowerArgInAsmJs(instr); |
| } |
| else |
| { |
| instrPrev = LowerArgIn(instr); |
| } |
| break; |
| |
| case Js::OpCode::Label: |
| if (instr->AsLabelInstr()->m_isLoopTop) |
| { |
| if (this->outerMostLoopLabel == instr) |
| { |
| noFieldFastPath = !defaultDoFastPath; |
| noMathFastPath = !defaultDoFastPath; |
| this->outerMostLoopLabel = nullptr; |
| instr->AsLabelInstr()->GetLoop()->isProcessed = true; |
| } |
| this->m_func->MarkConstantAddressSyms(instr->AsLabelInstr()->GetLoop()->regAlloc.liveOnBackEdgeSyms); |
| instr->AsLabelInstr()->GetLoop()->regAlloc.liveOnBackEdgeSyms->Or(this->addToLiveOnBackEdgeSyms); |
| } |
| break; |
| |
| case Js::OpCode::Br: |
| instr->m_opcode = LowererMD::MDUncondBranchOpcode; |
| break; |
| |
| case Js::OpCode::BrFncEqApply: |
| LowerBrFncApply(instr, IR::HelperOp_OP_BrFncEqApply); |
| break; |
| |
| case Js::OpCode::BrFncNeqApply: |
| LowerBrFncApply(instr, IR::HelperOp_OP_BrFncNeqApply); |
| break; |
| |
| case Js::OpCode::BrHasSideEffects: |
| case Js::OpCode::BrNotHasSideEffects: |
| m_lowererMD.GenerateFastBrS(instr->AsBranchInstr()); |
| break; |
| |
| case Js::OpCode::BrFalse_A: |
| case Js::OpCode::BrTrue_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| GenerateFastBrBool(instr->AsBranchInstr()); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || |
| noMathFastPath || |
| GenerateFastBrBool(instr->AsBranchInstr())) |
| { |
| this->LowerBrBMem(instr, IR::HelperConv_ToBoolean); |
| } |
| break; |
| |
| case Js::OpCode::BrOnObject_A: |
| if (PHASE_OFF(Js::BranchFastPathPhase, this->m_func) || noMathFastPath) |
| { |
| this->LowerBrOnObject(instr, IR::HelperOp_IsObject); |
| } |
| else |
| { |
| GenerateFastBrOnObject(instr); |
| } |
| break; |
| |
| case Js::OpCode::BrOnBaseConstructorKind: |
| this->LowerBrOnClassConstructor(instr, IR::HelperOp_IsBaseConstructorKind); |
| break; |
| |
| case Js::OpCode::BrOnClassConstructor: |
| this->LowerBrOnClassConstructor(instr, IR::HelperOp_IsClassConstructor); |
| break; |
| |
| case Js::OpCode::BrAddr_A: |
| case Js::OpCode::BrNotAddr_A: |
| case Js::OpCode::BrNotNull_A: |
| m_lowererMD.LowerCondBranch(instr); |
| break; |
| |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrNotNeq_A: |
| instrPrev = LowerEqualityBranch(instr, IR::HelperOp_Equal); |
| break; |
| |
| case Js::OpCode::BrGe_A: |
| case Js::OpCode::BrNotGe_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (!PHASE_OFF(Js::BranchFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_GreaterEqual, false, false /*isHelper*/); |
| } |
| else |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_GreaterEqual, true, false /*isHelper*/); |
| } |
| break; |
| |
| case Js::OpCode::BrGt_A: |
| case Js::OpCode::BrNotGt_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (!PHASE_OFF(Js::BranchFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_Greater, false, false /*isHelper*/); |
| } |
| else |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_Greater, true, false /*isHelper*/); |
| } |
| break; |
| |
| case Js::OpCode::BrLt_A: |
| case Js::OpCode::BrNotLt_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (!PHASE_OFF(Js::BranchFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_Less, false, false /*isHelper*/); |
| } |
| else |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_Less, true, false /*isHelper*/); |
| } |
| break; |
| |
| case Js::OpCode::BrLe_A: |
| case Js::OpCode::BrNotLe_A: |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| } |
| else if (!PHASE_OFF(Js::BranchFastPathPhase, this->m_func) && !noMathFastPath) |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_LessEqual, false, false /*isHelper*/); |
| } |
| else |
| { |
| this->LowerBrCMem(instr, IR::HelperOp_LessEqual, true, false /*isHelper*/); |
| } |
| break; |
| |
| case Js::OpCode::BrNeq_A: |
| case Js::OpCode::BrNotEq_A: |
| instrPrev = LowerEqualityBranch(instr, IR::HelperOp_NotEqual); |
| break; |
| |
| case Js::OpCode::MultiBr: |
| { |
| IR::MultiBranchInstr * multiBranchInstr = instr->AsBranchInstr()->AsMultiBrInstr(); |
| switch (multiBranchInstr->m_kind) |
| { |
| case IR::MultiBranchInstr::StrDictionary: |
| this->GenerateSwitchStringLookup(instr); |
| break; |
| case IR::MultiBranchInstr::SingleCharStrJumpTable: |
| this->GenerateSingleCharStrJumpTableLookup(instr); |
| m_func->m_totalJumpTableSizeInBytesForSwitchStatements += (multiBranchInstr->GetBranchJumpTable()->tableSize * sizeof(void*)); |
| break; |
| case IR::MultiBranchInstr::IntJumpTable: |
| this->LowerMultiBr(instr); |
| m_func->m_totalJumpTableSizeInBytesForSwitchStatements += (multiBranchInstr->GetBranchJumpTable()->tableSize * sizeof(void*)); |
| break; |
| default: |
| Assert(false); |
| } |
| break; |
| } |
| |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| instrPrev = LowerEqualityBranch(instr, IR::HelperOp_StrictEqual); |
| break; |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| instrPrev = LowerEqualityBranch(instr, IR::HelperOp_NotStrictEqual); |
| break; |
| |
| case Js::OpCode::BrOnEmpty: |
| case Js::OpCode::BrOnNotEmpty: |
| if (!PHASE_OFF(Js::BranchFastPathPhase, this->m_func)) |
| { |
| this->GenerateFastBrBReturn(instr); |
| this->LowerBrBReturn(instr, IR::HelperOp_OP_BrOnEmpty, true); |
| } |
| else |
| { |
| this->LowerBrBReturn(instr, IR::HelperOp_OP_BrOnEmpty, false); |
| } |
| break; |
| |
| case Js::OpCode::BrOnHasProperty: |
| case Js::OpCode::BrOnNoProperty: |
| this->LowerBrProperty(instr, IR::HelperOp_HasProperty); |
| break; |
| |
| case Js::OpCode::BrOnException: |
| Assert(!this->m_func->DoGlobOpt()); |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::BrOnNoException: |
| instr->m_opcode = LowererMD::MDUncondBranchOpcode; |
| break; |
| |
| case Js::OpCode::StSlot: |
| { |
| PropertySym *propertySym = instr->GetDst()->AsSymOpnd()->m_sym->AsPropertySym(); |
| instrPrev = AddSlotArrayCheck(propertySym, instr); |
| this->LowerStSlot(instr); |
| break; |
| } |
| |
| case Js::OpCode::StSlotChkUndecl: |
| { |
| PropertySym *propertySym = instr->GetDst()->AsSymOpnd()->m_sym->AsPropertySym(); |
| instrPrev = AddSlotArrayCheck(propertySym, instr); |
| this->LowerStSlotChkUndecl(instr); |
| break; |
| } |
| case Js::OpCode::ProfiledLoopStart: |
| { |
| Assert(m_func->DoSimpleJitDynamicProfile()); |
| Assert(instr->IsJitProfilingInstr()); |
| |
| // Check for the helper instr from IRBuilding (it won't be there if there are no LoopEnds due to an infinite loop) |
| auto prev = instr->m_prev; |
| if (prev->IsJitProfilingInstr() && prev->AsJitProfilingInstr()->isLoopHelper) |
| { |
| auto saveOpnd = prev->UnlinkDst(); |
| instrPrev = prev->m_prev; |
| prev->Remove(); |
| |
| const auto starFlag = GetImplicitCallFlagsOpnd(); |
| IR::AutoReuseOpnd a(starFlag, m_func); |
| this->InsertMove(saveOpnd, starFlag, instr); |
| this->InsertMove(starFlag, CreateClearImplicitCallFlagsOpnd(), instr); |
| } |
| else |
| { |
| #if DBG |
| // Double check that we indeed do not have a LoopEnd that is part of the same loop for the rest of the function |
| auto cur = instr; |
| auto loopNumber = instr->AsJitProfilingInstr()->loopNumber; |
| while (cur) |
| { |
| Assert(cur->m_opcode != Js::OpCode::ProfiledLoopEnd || cur->IsJitProfilingInstr() && cur->AsJitProfilingInstr()->loopNumber != loopNumber); |
| cur = cur->m_next; |
| } |
| #endif |
| } |
| |
| // If we turned off fulljit, there's no reason to do this. |
| if (PHASE_OFF(Js::FullJitPhase, m_func)) |
| { |
| instr->Remove(); |
| } |
| else |
| { |
| Assert(instr->GetDst()); |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleGetScheduledEntryPoint, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateUint32Opnd(instr->AsJitProfilingInstr()->loopNumber, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| this->m_lowererMD.LowerCall(instr, 0); |
| } |
| break; |
| } |
| case Js::OpCode::ProfiledLoopBodyStart: |
| { |
| Assert(m_func->DoSimpleJitDynamicProfile()); |
| |
| const auto loopNum = instr->AsJitProfilingInstr()->loopNumber; |
| Assert(loopNum < m_func->GetJITFunctionBody()->GetLoopCount()); |
| |
| auto entryPointOpnd = instr->UnlinkSrc1(); |
| auto dobailout = instr->UnlinkDst(); |
| const auto dobailoutType = TyUint8; |
| Assert(dobailout->GetType() == TyUint8 && sizeof(decltype(Js::SimpleJitHelpers::IsLoopCodeGenDone(nullptr))) == 1); |
| |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(0, TyUint32, m_func)); // zero indicates that we do not want to add flags back in |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(loopNum, TyUint32, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleRecordLoopImplicitCallFlags, m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| |
| // Outline of JITed code: |
| // |
| // LoopStart: |
| // entryPoint = GetScheduledEntryPoint(framePtr, loopNum) |
| // LoopBodyStart: |
| // uint8 dobailout; |
| // if (entryPoint) { |
| // dobailout = IsLoopCodeGenDone(entryPoint) |
| // } else { |
| // dobailout = ++interpretCount >= threshold |
| // } |
| // // already exists from IRBuilding: |
| // if (dobailout) { |
| // Bailout |
| // } |
| |
| if (PHASE_OFF(Js::FullJitPhase, m_func) || !m_func->GetJITFunctionBody()->DoJITLoopBody()) |
| { |
| // If we're not doing fulljit, we've turned off JitLoopBodies, or if we don't have loop headers allocated (the function has a Try, etc) |
| // just move false to dobailout |
| this->InsertMove(dobailout, IR::IntConstOpnd::New(0, dobailoutType, m_func, true), instr->m_next); |
| } |
| else if (m_func->GetWorkItem()->GetJITTimeInfo()->ForceJITLoopBody()) |
| { |
| // If we're forcing jit loop bodies, move true to dobailout |
| this->InsertMove(dobailout, IR::IntConstOpnd::New(1, dobailoutType, m_func, true), instr->m_next); |
| } |
| else |
| { |
| // Put in the labels |
| auto entryPointIsNull = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| auto checkDoBailout = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| instr->InsertAfter(checkDoBailout); |
| instr->InsertAfter(entryPointIsNull); |
| |
| this->InsertCompareBranch(entryPointOpnd, IR::AddrOpnd::New(nullptr, IR::AddrOpndKindDynamicMisc, m_func), Js::OpCode::BrEq_A, false, entryPointIsNull, instr->m_next); |
| |
| // If the entry point is not null |
| auto isCodeGenDone = IR::Instr::New(Js::OpCode::Call, dobailout, IR::HelperCallOpnd::New(IR::HelperSimpleIsLoopCodeGenDone, m_func), m_func); |
| entryPointIsNull->InsertBefore(isCodeGenDone); |
| m_lowererMD.LoadHelperArgument(isCodeGenDone, entryPointOpnd); |
| m_lowererMD.LowerCall(isCodeGenDone, 0); |
| this->InsertBranch(LowererMD::MDUncondBranchOpcode, true, checkDoBailout, entryPointIsNull); |
| |
| const auto type = TyUint32; |
| auto countReg = IR::RegOpnd::New(type, m_func); |
| auto countAddr = IR::MemRefOpnd::New(m_func->GetJITFunctionBody()->GetLoopHeaderAddr(loopNum) + Js::LoopHeader::GetOffsetOfInterpretCount(), type, m_func); |
| IR::AutoReuseOpnd a(countReg, m_func), b(countAddr, m_func); |
| this->InsertAdd(false, countReg, countAddr, IR::IntConstOpnd::New(1, type, m_func, true), checkDoBailout); |
| this->InsertMove(countAddr, countReg, checkDoBailout); |
| |
| this->InsertMove(dobailout, IR::IntConstOpnd::New(0, dobailoutType, m_func, true), checkDoBailout); |
| |
| this->InsertCompareBranch(countReg, IR::IntConstOpnd::New(m_func->GetJITFunctionBody()->GetLoopHeaderData(loopNum)->interpretCount, type, m_func), Js::OpCode::BrLt_A, checkDoBailout, checkDoBailout); |
| this->InsertMove(dobailout, IR::IntConstOpnd::New(1, dobailoutType, m_func, true), checkDoBailout); |
| // fallthrough |
| |
| // Label checkDoBailout (inserted above) |
| } |
| } |
| break; |
| |
| case Js::OpCode::ProfiledLoopEnd: |
| { |
| Assert(m_func->DoSimpleJitDynamicProfile()); |
| |
| // This is set up in IRBuilding |
| Assert(instr->GetSrc1()); |
| IR::Opnd* savedFlags = instr->UnlinkSrc1(); |
| |
| m_lowererMD.LoadHelperArgument(instr, savedFlags); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateUint32Opnd(instr->AsJitProfilingInstr()->loopNumber, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleRecordLoopImplicitCallFlags, m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| break; |
| |
| case Js::OpCode::InitLoopBodyCount: |
| Assert(this->m_func->IsLoopBody()); |
| instr->SetSrc1(IR::IntConstOpnd::New(0, TyUint32, this->m_func)); |
| this->m_lowererMD.ChangeToAssign(instr); |
| break; |
| |
| case Js::OpCode::StLoopBodyCount: |
| Assert(this->m_func->IsLoopBody()); |
| this->LowerStLoopBodyCount(instr); |
| break; |
| |
| case Js::OpCode::IncrLoopBodyCount: |
| { |
| Assert(this->m_func->IsLoopBody()); |
| instr->m_opcode = Js::OpCode::Add_I4; |
| instr->SetSrc2(IR::IntConstOpnd::New(1, TyUint32, this->m_func)); |
| this->m_lowererMD.EmitInt4Instr(instr); |
| |
| // Update the jittedLoopIterations field on the entryPointInfo |
| IR::MemRefOpnd *iterationsAddressOpnd = IR::MemRefOpnd::New(this->m_func->GetJittedLoopIterationsSinceLastBailoutAddress(), TyUint32, this->m_func); |
| InsertMove(iterationsAddressOpnd, instr->GetDst(), instr); |
| |
| break; |
| } |
| #if !FLOATVAR |
| case Js::OpCode::StSlotBoxTemp: |
| this->LowerStSlotBoxTemp(instr); |
| break; |
| #endif |
| |
| case Js::OpCode::LdSlot: |
| { |
| PropertySym *propertySym = instr->GetSrc1()->AsSymOpnd()->m_sym->AsPropertySym(); |
| instrPrev = AddSlotArrayCheck(propertySym, instr); |
| } |
| case Js::OpCode::LdSlotArr: |
| { |
| Js::ProfileId profileId; |
| IR::Instr *profileBeforeInstr; |
| if (instr->IsJitProfilingInstr()) |
| { |
| profileId = instr->AsJitProfilingInstr()->profileId; |
| Assert(profileId != Js::Constants::NoProfileId); |
| profileBeforeInstr = instr->m_next; |
| } |
| else |
| { |
| profileId = Js::Constants::NoProfileId; |
| profileBeforeInstr = nullptr; |
| } |
| |
| this->LowerLdSlot(instr); |
| |
| if (profileId != Js::Constants::NoProfileId) |
| { |
| LowerProfileLdSlot(instr->GetDst(), instr->m_func, profileId, profileBeforeInstr); |
| } |
| break; |
| } |
| |
| case Js::OpCode::ChkUndecl: |
| instrPrev = this->LowerChkUndecl(instr); |
| break; |
| |
| case Js::OpCode::LdArrHead: |
| this->LowerLdArrHead(instr); |
| break; |
| |
| case Js::OpCode::StElemC: |
| case Js::OpCode::StArrSegElemC: |
| this->LowerStElemC(instr); |
| break; |
| |
| case Js::OpCode::LdEnv: |
| instrPrev = this->LowerLdEnv(instr); |
| break; |
| |
| case Js::OpCode::LdAsmJsEnv: |
| instrPrev = this->LowerLdAsmJsEnv(instr); |
| break; |
| |
| case Js::OpCode::LdElemUndef: |
| this->LowerLdElemUndef(instr); |
| break; |
| |
| case Js::OpCode::LdElemUndefScoped: |
| this->LowerElementUndefinedScopedMem(instr, IR::HelperOp_LdElemUndefScoped); |
| break; |
| |
| case Js::OpCode::EnsureNoRootFld: |
| this->LowerElementUndefined(instr, IR::HelperOp_EnsureNoRootProperty); |
| break; |
| |
| case Js::OpCode::EnsureNoRootRedeclFld: |
| this->LowerElementUndefined(instr, IR::HelperOp_EnsureNoRootRedeclProperty); |
| break; |
| |
| case Js::OpCode::EnsureCanDeclGloFunc: |
| this->LowerElementUndefined(instr, IR::HelperOp_EnsureCanDeclGloFunc); |
| break; |
| |
| case Js::OpCode::ScopedEnsureNoRedeclFld: |
| this->LowerElementUndefinedScoped(instr, IR::HelperOp_EnsureNoRedeclPropertyScoped); |
| break; |
| |
| case Js::OpCode::LdFuncExpr: |
| // src = function Expression |
| LoadFuncExpression(instr); |
| this->GenerateGetCurrentFunctionObject(instr); |
| break; |
| |
| case Js::OpCode::LdNewTarget: |
| this->GenerateLoadNewTarget(instr); |
| break; |
| |
| case Js::OpCode::ChkNewCallFlag: |
| this->GenerateCheckForCallFlagNew(instr); |
| break; |
| |
| case Js::OpCode::StFuncExpr: |
| // object.propid = src |
| LowerStFld(instr, IR::HelperOp_StFunctionExpression, IR::HelperOp_StFunctionExpression, false); |
| break; |
| |
| case Js::OpCode::InitLetFld: |
| case Js::OpCode::InitRootLetFld: |
| LowerStFld(instr, IR::HelperOp_InitLetFld, IR::HelperOp_InitLetFld, false); |
| break; |
| |
| case Js::OpCode::InitConstFld: |
| case Js::OpCode::InitRootConstFld: |
| LowerStFld(instr, IR::HelperOp_InitConstFld, IR::HelperOp_InitConstFld, false); |
| break; |
| |
| case Js::OpCode::InitUndeclRootLetFld: |
| LowerElementUndefined(instr, IR::HelperOp_InitUndeclRootLetFld); |
| break; |
| |
| case Js::OpCode::InitUndeclRootConstFld: |
| LowerElementUndefined(instr, IR::HelperOp_InitUndeclRootConstFld); |
| break; |
| |
| case Js::OpCode::InitUndeclConsoleLetFld: |
| LowerElementUndefined(instr, IR::HelperOp_InitUndeclConsoleLetFld); |
| break; |
| |
| case Js::OpCode::InitUndeclConsoleConstFld: |
| LowerElementUndefined(instr, IR::HelperOp_InitUndeclConsoleConstFld); |
| break; |
| |
| case Js::OpCode::InitClassMember: |
| LowerStFld(instr, IR::HelperOp_InitClassMember, IR::HelperOp_InitClassMember, false); |
| break; |
| |
| case Js::OpCode::InitClassMemberComputedName: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOp_InitClassMemberComputedName); |
| break; |
| |
| case Js::OpCode::InitClassMemberGetComputedName: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOp_InitClassMemberGetComputedName); |
| break; |
| |
| case Js::OpCode::InitClassMemberSetComputedName: |
| instrPrev = this->LowerStElemI(instr, Js::PropertyOperation_None, false, IR::HelperOp_InitClassMemberSetComputedName); |
| break; |
| |
| case Js::OpCode::InitClassMemberGet: |
| instrPrev = this->LowerStFld(instr, IR::HelperOp_InitClassMemberGet, IR::HelperOp_InitClassMemberGet, false); |
| break; |
| |
| case Js::OpCode::InitClassMemberSet: |
| instrPrev = this->LowerStFld(instr, IR::HelperOp_InitClassMemberSet, IR::HelperOp_InitClassMemberSet, false); |
| break; |
| |
| case Js::OpCode::NewStackFrameDisplay: |
| this->LowerLdFrameDisplay(instr, m_func->DoStackFrameDisplay()); |
| break; |
| |
| case Js::OpCode::LdFrameDisplay: |
| this->LowerLdFrameDisplay(instr, false); |
| break; |
| |
| case Js::OpCode::LdInnerFrameDisplay: |
| this->LowerLdInnerFrameDisplay(instr); |
| break; |
| |
| case Js::OpCode::Throw: |
| case Js::OpCode::InlineThrow: |
| case Js::OpCode::EHThrow: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_Throw); |
| break; |
| |
| case Js::OpCode::TryCatch: |
| instrPrev = this->LowerTry(instr, true /*try-catch*/); |
| break; |
| |
| case Js::OpCode::TryFinally: |
| instrPrev = this->LowerTry(instr, false /*try-finally*/); |
| break; |
| |
| case Js::OpCode::Catch: |
| instrPrev = this->LowerCatch(instr); |
| break; |
| |
| case Js::OpCode::Finally: |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::LeaveNull: |
| if (this->m_func->DoOptimizeTry() || (this->m_func->IsSimpleJit() && this->m_func->hasBailout)) |
| { |
| instr->Remove(); |
| } |
| else |
| { |
| instrPrev = m_lowererMD.LowerLeaveNull(instr); |
| } |
| break; |
| |
| case Js::OpCode::Leave: |
| if (this->m_func->HasTry() && this->m_func->DoOptimizeTry()) |
| { |
| // Required in Register Allocator to mark region boundaries |
| break; |
| } |
| instrPrev = this->LowerLeave(instr, instr->AsBranchInstr()->GetTarget(), false /*fromFinalLower*/, instr->AsBranchInstr()->m_isOrphanedLeave); |
| break; |
| |
| case Js::OpCode::BailOnException: |
| instrPrev = this->LowerBailOnException(instr); |
| break; |
| |
| case Js::OpCode::BailOnEarlyExit: |
| instrPrev = this->LowerBailOnEarlyExit(instr); |
| break; |
| |
| case Js::OpCode::RuntimeTypeError: |
| case Js::OpCode::InlineRuntimeTypeError: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_RuntimeTypeError); |
| break; |
| |
| case Js::OpCode::RuntimeReferenceError: |
| case Js::OpCode::InlineRuntimeReferenceError: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_RuntimeReferenceError); |
| break; |
| |
| case Js::OpCode::Break: |
| // Inline breakpoint: for now do nothing. |
| break; |
| |
| case Js::OpCode::Nop: |
| // This may need support for debugging the JIT, but for now just remove the instruction. |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::Unused: |
| // Currently Unused is used with ScopedLdInst to keep the second dst alive, but we don't need to lower it. |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::StatementBoundary: |
| // This instruction is merely to help convey source info through the IR |
| // and eventually generate the nativeOffset maps. |
| #if DBG_DUMP && DBG |
| // If we have a JITStatementBreakpoint, then we should break on this statement |
| { |
| uint32 statementIndex = instr->AsPragmaInstr()->m_statementIndex; |
| if (Js::Configuration::Global.flags.StatementDebugBreak.Contains(instr->m_func->GetSourceContextId(), instr->m_func->GetLocalFunctionId(), statementIndex)) |
| { |
| IR::Instr* tempinstr = instr; |
| Assert(tempinstr != nullptr); |
| // go past any labels, and then add a debug breakpoint |
| while (tempinstr->m_next != nullptr && tempinstr->m_next->m_opcode == Js::OpCode::Label) |
| { |
| tempinstr = tempinstr->m_next; |
| } |
| this->m_lowererMD.GenerateDebugBreak(tempinstr); |
| } |
| } |
| #endif |
| break; |
| |
| case Js::OpCode::BailOnNotPolymorphicInlinee: |
| instrPrev = LowerBailOnNotPolymorphicInlinee(instr); |
| break; |
| |
| case Js::OpCode::BailOnNoSimdTypeSpec: |
| case Js::OpCode::BailOnNoProfile: |
| this->GenerateBailOut(instr, nullptr, nullptr); |
| break; |
| |
| case Js::OpCode::BailOnNotSpreadable: |
| instrPrev = this->LowerBailOnNotSpreadable(instr); |
| break; |
| |
| case Js::OpCode::BailOnNotStackArgs: |
| instrPrev = this->LowerBailOnNotStackArgs(instr); |
| break; |
| |
| case Js::OpCode::BailOnEqual: |
| case Js::OpCode::BailOnNotEqual: |
| instrPrev = this->LowerBailOnEqualOrNotEqual(instr); |
| break; |
| |
| case Js::OpCode::BailOnNegative: |
| LowerBailOnNegative(instr); |
| break; |
| |
| #ifdef ENABLE_SCRIPT_DEBUGGING |
| case Js::OpCode::BailForDebugger: |
| instrPrev = this->LowerBailForDebugger(instr); |
| break; |
| #endif |
| |
| case Js::OpCode::BailOnNotObject: |
| instrPrev = this->LowerBailOnNotObject(instr); |
| break; |
| |
| case Js::OpCode::CheckIsFuncObj: |
| instrPrev = this->LowerCheckIsFuncObj(instr); |
| break; |
| |
| case Js::OpCode::CheckFuncInfo: |
| instrPrev = this->LowerCheckIsFuncObj(instr, true); |
| break; |
| |
| case Js::OpCode::BailOnNotBuiltIn: |
| instrPrev = this->LowerBailOnNotBuiltIn(instr); |
| break; |
| |
| case Js::OpCode::BailOnNotArray: |
| { |
| IR::Instr *bailOnNotArray = nullptr, *bailOnMissingValue = nullptr; |
| SplitBailOnNotArray(instr, &bailOnNotArray, &bailOnMissingValue); |
| IR::RegOpnd *const arrayOpnd = LowerBailOnNotArray(bailOnNotArray); |
| if (bailOnMissingValue) |
| { |
| LowerBailOnMissingValue(bailOnMissingValue, arrayOpnd); |
| } |
| break; |
| } |
| |
| case Js::OpCode::BoundCheck: |
| case Js::OpCode::UnsignedBoundCheck: |
| LowerBoundCheck(instr); |
| break; |
| |
| case Js::OpCode::BailTarget: |
| instrPrev = this->LowerBailTarget(instr); |
| break; |
| |
| case Js::OpCode::InlineeStart: |
| this->LowerInlineeStart(instr); |
| break; |
| |
| case Js::OpCode::EndCallForPolymorphicInlinee: |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::InlineeEnd: |
| this->LowerInlineeEnd(instr); |
| break; |
| |
| case Js::OpCode::InlineBuiltInEnd: |
| case Js::OpCode::InlineNonTrackingBuiltInEnd: |
| this->LowerInlineBuiltIn(instr); |
| break; |
| |
| case Js::OpCode::ExtendArg_A: |
| if (instr->GetSrc1()->IsRegOpnd()) |
| { |
| IR::RegOpnd *src1 = instr->GetSrc1()->AsRegOpnd(); |
| this->addToLiveOnBackEdgeSyms->Clear(src1->m_sym->m_id); |
| } |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::InlineBuiltInStart: |
| case Js::OpCode::BytecodeArgOutUse: |
| case Js::OpCode::ArgOut_A_InlineBuiltIn: |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::DeadBrEqual: |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Equal); |
| break; |
| |
| case Js::OpCode::DeadBrSrEqual: |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_StrictEqual); |
| break; |
| |
| case Js::OpCode::DeadBrRelational: |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_Greater); |
| break; |
| |
| case Js::OpCode::DeadBrOnHasProperty: |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_HasProperty); |
| break; |
| |
| case Js::OpCode::DeletedNonHelperBranch: |
| break; |
| |
| case Js::OpCode::InitClass: |
| instrPrev = this->LowerInitClass(instr); |
| break; |
| |
| case Js::OpCode::NewConcatStrMulti: |
| this->LowerNewConcatStrMulti(instr); |
| break; |
| |
| case Js::OpCode::NewConcatStrMultiBE: |
| this->LowerNewConcatStrMultiBE(instr); |
| break; |
| |
| case Js::OpCode::SetConcatStrMultiItem: |
| this->LowerSetConcatStrMultiItem(instr); |
| break; |
| |
| case Js::OpCode::SetConcatStrMultiItemBE: |
| Assert(instr->GetSrc1()->IsRegOpnd()); |
| this->addToLiveOnBackEdgeSyms->Clear(instr->GetSrc1()->GetStackSym()->m_id); |
| // code corresponding to it should already have been generated while lowering NewConcatStrMultiBE |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::Conv_Str: |
| this->LowerConvStr(instr); |
| break; |
| |
| case Js::OpCode::Coerce_Str: |
| this->LowerCoerseStr(instr); |
| break; |
| |
| case Js::OpCode::Coerce_StrOrRegex: |
| this->LowerCoerseStrOrRegex(instr); |
| break; |
| |
| case Js::OpCode::Coerce_Regex: |
| this->LowerCoerseRegex(instr); |
| break; |
| |
| case Js::OpCode::Conv_PrimStr: |
| this->LowerConvPrimStr(instr); |
| break; |
| |
| case Js::OpCode::ClearAttributes: |
| this->LowerBinaryHelper(instr, IR::HelperOP_ClearAttributes); |
| break; |
| |
| case Js::OpCode::SpreadArrayLiteral: |
| this->LowerSpreadArrayLiteral(instr); |
| break; |
| |
| case Js::OpCode::CallIExtended: |
| { |
| // Currently, the only use for CallIExtended is a call that uses spread. |
| Assert(IsSpreadCall(instr)); |
| instrPrev = this->LowerSpreadCall(instr, Js::CallFlags_None); |
| break; |
| } |
| |
| case Js::OpCode::CallIExtendedNew: |
| { |
| // Currently, the only use for CallIExtended is a call that uses spread. |
| Assert(IsSpreadCall(instr)); |
| instrPrev = this->LowerSpreadCall(instr, Js::CallFlags_New); |
| break; |
| } |
| |
| case Js::OpCode::CallIExtendedNewTargetNew: |
| { |
| // Currently, the only use for CallIExtended is a call that uses spread. |
| Assert(IsSpreadCall(instr)); |
| instrPrev = this->LowerSpreadCall(instr, (Js::CallFlags)(Js::CallFlags_New | Js::CallFlags_ExtraArg | Js::CallFlags_NewTarget)); |
| break; |
| } |
| |
| case Js::OpCode::LdSpreadIndices: |
| instr->Remove(); |
| break; |
| |
| case Js::OpCode::LdHomeObj: |
| this->GenerateLdHomeObj(instr); |
| break; |
| |
| case Js::OpCode::LdHomeObjProto: |
| this->GenerateLdHomeObjProto(instr); |
| break; |
| |
| case Js::OpCode::LdFuncObj: |
| this->GenerateLdFuncObj(instr); |
| break; |
| |
| case Js::OpCode::LdFuncObjProto: |
| this->GenerateLdFuncObjProto(instr); |
| break; |
| |
| case Js::OpCode::ImportCall: |
| { |
| IR::Opnd *src1Opnd = instr->UnlinkSrc1(); |
| IR::Opnd *functionObjOpnd = nullptr; |
| m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| |
| LoadScriptContext(instr); |
| m_lowererMD.LoadHelperArgument(instr, src1Opnd); |
| m_lowererMD.LoadHelperArgument(instr, functionObjOpnd); |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperImportCall); |
| |
| break; |
| } |
| |
| case Js::OpCode::SetComputedNameVar: |
| { |
| IR::Opnd *src2Opnd = instr->UnlinkSrc2(); |
| IR::Opnd *src1Opnd = instr->UnlinkSrc1(); |
| |
| m_lowererMD.LoadHelperArgument(instr, src2Opnd); |
| m_lowererMD.LoadHelperArgument(instr, src1Opnd); |
| |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperSetComputedNameVar); |
| |
| break; |
| } |
| |
| case Js::OpCode::InlineeMetaArg: |
| { |
| m_lowererMD.ChangeToAssign(instr); |
| break; |
| } |
| |
| case Js::OpCode::Yield: |
| { |
| instr->FreeSrc1(); // Source is not actually used by the backend other than to calculate lifetime |
| IR::Opnd* dstOpnd = instr->UnlinkDst(); |
| |
| // prm2 is the ResumeYieldData pointer per calling convention established in JavascriptGenerator::CallGenerator |
| // This is the value the bytecode expects to be in the dst register of the Yield opcode after resumption. |
| // Load it here after the bail-in. |
| |
| StackSym *resumeYieldDataSym = StackSym::NewImplicitParamSym(4, m_func); |
| m_func->SetArgOffset(resumeYieldDataSym, (LowererMD::GetFormalParamOffset() + 1) * MachPtr); |
| IR::SymOpnd * resumeYieldDataOpnd = IR::SymOpnd::New(resumeYieldDataSym, TyMachPtr, m_func); |
| |
| AssertMsg(instr->m_next->IsLabelInstr(), "Expect the resume label to immediately follow Yield instruction"); |
| InsertMove(dstOpnd, resumeYieldDataOpnd, instr->m_next->m_next); |
| |
| GenerateBailOut(instr); |
| |
| break; |
| } |
| |
| case Js::OpCode::ResumeYield: |
| case Js::OpCode::ResumeYieldStar: |
| { |
| IR::Opnd *srcOpnd1 = instr->UnlinkSrc1(); |
| IR::Opnd *srcOpnd2 = instr->m_opcode == Js::OpCode::ResumeYieldStar ? instr->UnlinkSrc2() : IR::AddrOpnd::NewNull(m_func); |
| m_lowererMD.LoadHelperArgument(instr, srcOpnd2); |
| m_lowererMD.LoadHelperArgument(instr, srcOpnd1); |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperResumeYield); |
| break; |
| } |
| |
| case Js::OpCode::GeneratorResumeJumpTable: |
| { |
| // Lowered in LowerPrologEpilog so that the jumps introduced are not considered to be part of the flow for the RegAlloc phase. |
| |
| // Introduce a BailOutNoSave label if there were yield points that were elided due to optimizations. They could still be hit |
| // if an active generator object had been paused at such a yield point when the function body was JITed. So safe guard such a |
| // case by having the native code simply jump back to the interpreter for such yield points. |
| |
| IR::LabelInstr *bailOutNoSaveLabel = nullptr; |
| |
| m_func->MapUntilYieldOffsetResumeLabels([this, &bailOutNoSaveLabel](int, const YieldOffsetResumeLabel& yorl) |
| { |
| if (yorl.Second() == nullptr) |
| { |
| if (bailOutNoSaveLabel == nullptr) |
| { |
| bailOutNoSaveLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| } |
| |
| return true; |
| } |
| |
| return false; |
| }); |
| |
| // Insert the bailoutnosave label somewhere along with a call to BailOutNoSave helper |
| if (bailOutNoSaveLabel != nullptr) |
| { |
| IR::Instr * exitPrevInstr = this->m_func->m_exitInstr->m_prev; |
| IR::LabelInstr * exitTargetInstr; |
| if (exitPrevInstr->IsLabelInstr()) |
| { |
| exitTargetInstr = exitPrevInstr->AsLabelInstr(); |
| exitPrevInstr = exitPrevInstr->m_prev; |
| } |
| else |
| { |
| exitTargetInstr = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| exitPrevInstr->InsertAfter(exitTargetInstr); |
| } |
| |
| bailOutNoSaveLabel->m_hasNonBranchRef = true; |
| bailOutNoSaveLabel->isOpHelper = true; |
| |
| IR::Instr* bailOutCall = IR::Instr::New(Js::OpCode::Call, m_func); |
| |
| exitPrevInstr->InsertAfter(bailOutCall); |
| exitPrevInstr->InsertAfter(bailOutNoSaveLabel); |
| exitPrevInstr->InsertAfter(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, exitTargetInstr, m_func)); |
| |
| IR::RegOpnd * frameRegOpnd = IR::RegOpnd::New(nullptr, LowererMD::GetRegFramePointer(), TyMachPtr, m_func); |
| |
| m_lowererMD.LoadHelperArgument(bailOutCall, frameRegOpnd); |
| m_lowererMD.ChangeToHelperCall(bailOutCall, IR::HelperNoSaveRegistersBailOutForElidedYield); |
| |
| m_func->m_bailOutNoSaveLabel = bailOutNoSaveLabel; |
| } |
| |
| break; |
| } |
| |
| case Js::OpCode::FrameDisplayCheck: |
| instrPrev = this->LowerFrameDisplayCheck(instr); |
| break; |
| |
| case Js::OpCode::SlotArrayCheck: |
| instrPrev = this->LowerSlotArrayCheck(instr); |
| break; |
| |
| #if DBG |
| case Js::OpCode::CheckLowerIntBound: |
| instrPrev = this->LowerCheckLowerIntBound(instr); |
| break; |
| |
| case Js::OpCode::CheckUpperIntBound: |
| instrPrev = this->LowerCheckUpperIntBound(instr); |
| break; |
| #endif |
| #ifdef ENABLE_WASM |
| case Js::OpCode::Copysign_A: |
| m_lowererMD.GenerateCopysign(instr); |
| break; |
| |
| case Js::OpCode::Trunc_A: |
| if (!AutoSystemInfo::Data.SSE4_1Available()) |
| { |
| m_lowererMD.HelperCallForAsmMathBuiltin(instr, IR::HelperDirectMath_TruncFlt, IR::HelperDirectMath_TruncDb); |
| break; |
| } |
| |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::Nearest_A: |
| if (!AutoSystemInfo::Data.SSE4_1Available()) |
| { |
| m_lowererMD.HelperCallForAsmMathBuiltin(instr, IR::HelperDirectMath_NearestFlt, IR::HelperDirectMath_NearestDb); |
| break; |
| } |
| |
| m_lowererMD.GenerateFastInlineBuiltInCall(instr, (IR::JnHelperMethod)0); |
| break; |
| |
| case Js::OpCode::ThrowRuntimeError: |
| GenerateThrow(instr->UnlinkSrc1(), instr); |
| instr->Remove(); |
| break; |
| |
| #endif //ENABLE_WASM |
| case Js::OpCode::SpeculatedLoadFence: |
| { |
| AssertOrFailFast(instr->m_kind == IR::InstrKindByteCodeUses); |
| #ifdef _M_ARM |
| AssertOrFailFastMsg(false, "We shouldn't perform this hoisting on ARM"); |
| #else |
| IR::ByteCodeUsesInstr* bcuInstr = static_cast<IR::ByteCodeUsesInstr*>(instr); |
| // Most of the time we're not going to be able to remove any masking in a loop, and |
| // this instruction can be removed. |
| if (bcuInstr->GetByteCodeUpwardExposedUsed() != nullptr && !bcuInstr->GetByteCodeUpwardExposedUsed()->IsEmpty()) |
| { |
| // The generated code is: |
| // |
| // cmp rax, rax |
| // for each symbol to mask: |
| // reg(sym) = cmovne reg(sym), reg(sym) |
| IR::RegOpnd* temp = IR::RegOpnd::New(TyUint8, instr->m_func); |
| InsertMove(temp, IR::IntConstOpnd::New(0, TyUint8, instr->m_func), instr); |
| IR::Instr * cmp = IR::Instr::New(Js::OpCode::CMP, instr->m_func); |
| cmp->SetSrc1(temp); |
| cmp->SetSrc2(temp); |
| instr->InsertBefore(cmp); |
| m_lowererMD.Legalize(cmp); |
| FOREACH_BITSET_IN_SPARSEBV(symid, bcuInstr->GetByteCodeUpwardExposedUsed()) |
| { |
| StackSym* thisSym = instr->m_func->m_symTable->Find(symid)->AsStackSym(); |
| IR::RegOpnd* thisSymReg = IR::RegOpnd::New(thisSym, thisSym->GetType(), instr->m_func); |
| Js::OpCode specBlockOp = thisSymReg->IsFloat() ? LowererMD::MDSpecBlockFNEOpcode : LowererMD::MDSpecBlockNEOpcode; |
| IR::Instr* cmov = IR::Instr::New(specBlockOp, thisSymReg, thisSymReg, thisSymReg, instr->m_func); |
| instr->InsertBefore(cmov); |
| m_lowererMD.Legalize(cmov); |
| } NEXT_BITSET_IN_SPARSEBV; |
| } |
| #endif |
| instr->Remove(); |
| break; |
| } |
| |
| case Js::OpCode::SpreadObjectLiteral: |
| this->LowerBinaryHelperMem(instr, IR::HelperSpreadObjectLiteral); |
| break; |
| |
| case Js::OpCode::Restify: |
| instrPrev = this->LowerRestify(instr); |
| break; |
| |
| case Js::OpCode::NewPropIdArrForCompProps: |
| this->LowerUnaryHelperMem(instr, IR::HelperNewPropIdArrForCompProps); |
| break; |
| |
| case Js::OpCode::StPropIdArrFromVar: |
| instrPrev = this->LowerStPropIdArrFromVar(instr); |
| break; |
| |
| default: |
| #ifdef ENABLE_WASM_SIMD |
| if (IsSimd128Opcode(instr->m_opcode)) |
| { |
| instrPrev = m_lowererMD.Simd128Instruction(instr); |
| break; |
| } |
| #endif |
| AssertMsg(instr->IsLowered(), "Unknown opcode"); |
| if(!instr->IsLowered()) |
| { |
| Fatal(); |
| } |
| break; |
| } |
| |
| #if DBG |
| LegalizeVerifyRange(instrPrev ? instrPrev->m_next : instrStart, |
| verifyLegalizeInstrNext ? verifyLegalizeInstrNext->m_prev : nullptr); |
| this->helperCallCheckState = HelperCallCheckState_None; |
| #endif |
| } NEXT_INSTR_BACKWARD_EDITING_IN_RANGE; |
| |
| Assert(this->outerMostLoopLabel == nullptr); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadFunctionInfoOpnd(IR::Instr * instr) |
| { |
| return IR::AddrOpnd::New(instr->m_func->GetWorkItem()->GetJITTimeInfo()->GetFunctionInfoAddr(), IR::AddrOpndKindDynamicFunctionInfo, instr->m_func); |
| } |
| |
| IR::Instr * |
| Lowerer::LoadFunctionBody(IR::Instr * instr) |
| { |
| return m_lowererMD.LoadHelperArgument(instr, LoadFunctionBodyOpnd(instr)); |
| } |
| |
| IR::Instr * |
| Lowerer::LoadScriptContext(IR::Instr * instr) |
| { |
| return m_lowererMD.LoadHelperArgument(instr, LoadScriptContextOpnd(instr)); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadFunctionBodyOpnd(IR::Instr * instr) |
| { |
| return IR::AddrOpnd::New(instr->m_func->GetJITFunctionBody()->GetAddr(), IR::AddrOpndKindDynamicFunctionBody, instr->m_func); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadScriptContextOpnd(IR::Instr * instr) |
| { |
| return IR::AddrOpnd::New(m_func->GetScriptContextInfo()->GetAddr(), IR::AddrOpndKindDynamicScriptContext, this->m_func); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadScriptContextValueOpnd(IR::Instr * instr, ScriptContextValue valueType) |
| { |
| ScriptContextInfo *scriptContextInfo = instr->m_func->GetScriptContextInfo(); |
| switch (valueType) |
| { |
| case ScriptContextValue::ScriptContextNumberAllocator: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNumberAllocatorAddr(), IR::AddrOpndKindDynamicMisc, instr->m_func); |
| case ScriptContextValue::ScriptContextRecycler: |
| return IR::AddrOpnd::New(scriptContextInfo->GetRecyclerAddr(), IR::AddrOpndKindDynamicMisc, instr->m_func); |
| default: |
| Assert(false); |
| return nullptr; |
| } |
| |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadLibraryValueOpnd(IR::Instr * instr, LibraryValue valueType) |
| { |
| ScriptContextInfo *scriptContextInfo = instr->m_func->GetScriptContextInfo(); |
| switch (valueType) |
| { |
| case LibraryValue::ValueEmptyString: |
| return IR::AddrOpnd::New(scriptContextInfo->GetEmptyStringAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueUndeclBlockVar: |
| return IR::AddrOpnd::New(scriptContextInfo->GetUndeclBlockVarAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueUndefined: |
| return IR::AddrOpnd::New(scriptContextInfo->GetUndefinedAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueNull: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNullAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueTrue: |
| return IR::AddrOpnd::New(scriptContextInfo->GetTrueAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueFalse: |
| return IR::AddrOpnd::New(scriptContextInfo->GetFalseAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueNegativeZero: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNegativeZeroAddr(), IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| case LibraryValue::ValueNumberTypeStatic: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNumberTypeStaticAddr(), IR::AddrOpndKindDynamicType, instr->m_func, true); |
| case LibraryValue::ValueStringTypeStatic: |
| return IR::AddrOpnd::New(scriptContextInfo->GetStringTypeStaticAddr(), IR::AddrOpndKindDynamicType, instr->m_func, true); |
| case LibraryValue::ValueSymbolTypeStatic: |
| return IR::AddrOpnd::New(scriptContextInfo->GetSymbolTypeStaticAddr(), IR::AddrOpndKindDynamicType, instr->m_func, true); |
| case LibraryValue::ValueObjectType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetObjectTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueObjectHeaderInlinedType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetObjectHeaderInlinedTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueRegexType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetRegexTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueArrayConstructor: |
| return IR::AddrOpnd::New(scriptContextInfo->GetArrayConstructorAddr(), IR::AddrOpndKindDynamicVar, instr->m_func); |
| case LibraryValue::ValueJavascriptArrayType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetArrayTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueNativeIntArrayType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNativeIntArrayTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueNativeFloatArrayType: |
| return IR::AddrOpnd::New(scriptContextInfo->GetNativeFloatArrayTypeAddr(), IR::AddrOpndKindDynamicType, instr->m_func); |
| case LibraryValue::ValueConstructorCacheDefaultInstance: |
| return IR::AddrOpnd::New(m_func->GetThreadContextInfo()->GetConstructorCacheDefaultInstanceAddr(), IR::AddrOpndKindDynamicMisc, instr->m_func); |
| case LibraryValue::ValueAbsDoubleCst: |
| return IR::MemRefOpnd::New(m_func->GetThreadContextInfo()->GetAbsDoubleCstAddr(), TyMachDouble, instr->m_func, IR::AddrOpndKindDynamicDoubleRef); |
| case LibraryValue::ValueCharStringCache: |
| return IR::AddrOpnd::New(scriptContextInfo->GetCharStringCacheAddr(), IR::AddrOpndKindDynamicCharStringCache, instr->m_func); |
| default: |
| Assert(UNREACHED); |
| return nullptr; |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadVTableValueOpnd(IR::Instr * instr, VTableValue vtableType) |
| { |
| return IR::AddrOpnd::New((Js::Var)instr->m_func->GetScriptContextInfo()->GetVTableAddress(vtableType), IR::AddrOpndKindDynamicVtable, this->m_func); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadOptimizationOverridesValueOpnd(IR::Instr *instr, OptimizationOverridesValue valueType) |
| { |
| switch (valueType) |
| { |
| case OptimizationOverridesValue::OptimizationOverridesSideEffects: |
| return IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetSideEffectsAddr(), TyInt32, instr->m_func); |
| case OptimizationOverridesValue::OptimizationOverridesArraySetElementFastPathVtable: |
| return IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetArraySetElementFastPathVtableAddr(), TyMachPtr, instr->m_func); |
| case OptimizationOverridesValue::OptimizationOverridesIntArraySetElementFastPathVtable: |
| return IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetIntArraySetElementFastPathVtableAddr(), TyMachPtr, instr->m_func); |
| case OptimizationOverridesValue::OptimizationOverridesFloatArraySetElementFastPathVtable: |
| return IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetFloatArraySetElementFastPathVtableAddr(), TyMachPtr, instr->m_func); |
| default: |
| Assert(UNREACHED); |
| return nullptr; |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadNumberAllocatorValueOpnd(IR::Instr *instr, NumberAllocatorValue valueType) |
| { |
| ScriptContextInfo *scriptContext = instr->m_func->GetScriptContextInfo(); |
| bool allowNativeCodeBumpAllocation = scriptContext->GetRecyclerAllowNativeCodeBumpAllocation(); |
| |
| switch (valueType) |
| { |
| case NumberAllocatorValue::NumberAllocatorEndAddress: |
| return IR::MemRefOpnd::New(((char *)scriptContext->GetNumberAllocatorAddr()) + Js::RecyclerJavascriptNumberAllocator::GetEndAddressOffset(), TyMachPtr, instr->m_func); |
| case NumberAllocatorValue::NumberAllocatorFreeObjectList: |
| return IR::MemRefOpnd::New( |
| ((char *)scriptContext->GetNumberAllocatorAddr()) + |
| (allowNativeCodeBumpAllocation ? Js::RecyclerJavascriptNumberAllocator::GetFreeObjectListOffset() : Js::RecyclerJavascriptNumberAllocator::GetEndAddressOffset()), |
| TyMachPtr, instr->m_func); |
| default: |
| Assert(false); |
| return nullptr; |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadIsInstInlineCacheOpnd(IR::Instr * instr, uint inlineCacheIndex) |
| { |
| intptr_t inlineCache = instr->m_func->GetJITFunctionBody()->GetIsInstInlineCache(inlineCacheIndex); |
| return IR::AddrOpnd::New(inlineCache, IR::AddrOpndKindDynamicInlineCache, this->m_func); |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadRuntimeInlineCacheOpnd(IR::Instr * instr, IR::PropertySymOpnd * propertySymOpnd, bool isHelper) |
| { |
| Assert(propertySymOpnd->m_runtimeInlineCache != 0); |
| IR::Opnd * inlineCacheOpnd = nullptr; |
| if (instr->m_func->GetJITFunctionBody()->HasInlineCachesOnFunctionObject() && !instr->m_func->IsInlinee()) |
| { |
| inlineCacheOpnd = this->GetInlineCacheFromFuncObjectForRuntimeUse(instr, propertySymOpnd, isHelper); |
| } |
| else |
| { |
| intptr_t inlineCache = propertySymOpnd->m_runtimeInlineCache; |
| inlineCacheOpnd = IR::AddrOpnd::New(inlineCache, IR::AddrOpndKindDynamicInlineCache, this->m_func, /* dontEncode */ true); |
| } |
| return inlineCacheOpnd; |
| } |
| |
| bool |
| Lowerer::TryGenerateFastCmSrXx(IR::Instr * instr) |
| { |
| IR::RegOpnd *srcReg1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *srcReg2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| |
| if (srcReg2 && IsConstRegOpnd(srcReg2)) |
| { |
| return m_lowererMD.GenerateFastCmSrXxConst(instr); |
| } |
| else if (srcReg1 && IsConstRegOpnd(srcReg1)) |
| { |
| instr->SwapOpnds(); |
| return m_lowererMD.GenerateFastCmSrXxConst(instr); |
| } |
| |
| return false; |
| } |
| |
| // Generate fast path for StrictEquals when one of the sources are undefined, null, boolean |
| bool |
| Lowerer::TryGenerateFastBrSrXx(IR::Instr * instr, IR::RegOpnd * srcReg1, IR::RegOpnd * srcReg2, IR::Instr ** pInstrPrev, bool noMathFastPath) |
| { |
| bool isEqual = !instr->IsNeq(); |
| |
| if (srcReg2 && IsConstRegOpnd(srcReg2)) |
| { |
| this->GenerateFastBrConst(instr->AsBranchInstr(), GetConstRegOpnd(srcReg2, instr), isEqual); |
| instr->Remove(); |
| return true; |
| } |
| else if (srcReg1 && IsConstRegOpnd(srcReg1)) |
| { |
| instr->SwapOpnds(); |
| this->GenerateFastBrConst(instr->AsBranchInstr(), GetConstRegOpnd(srcReg1, instr), isEqual); |
| instr->Remove(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::GenerateFastBrConst |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::BranchInstr * |
| Lowerer::GenerateFastBrConst(IR::BranchInstr *branchInstr, IR::Opnd * constOpnd, bool isEqual) |
| { |
| Assert(constOpnd->IsAddrOpnd() || constOpnd->IsIntConstOpnd()); |
| |
| // |
| // Given: |
| // BrSrXx_A $L1, s1, s2 |
| // where s2 is either 'null', 'undefined', 'true' or 'false' |
| // |
| // Generate: |
| // |
| // CMP s1, s2 |
| // JEQ/JNE $L1 |
| // |
| |
| Assert(IsConstRegOpnd(branchInstr->GetSrc2()->AsRegOpnd())); |
| |
| IR::RegOpnd *opnd = GetRegOpnd(branchInstr->GetSrc1(), branchInstr, m_func, TyVar); |
| |
| IR::BranchInstr *newBranch; |
| newBranch = InsertCompareBranch(opnd, constOpnd, isEqual ? Js::OpCode::BrEq_A : Js::OpCode::BrNeq_A, branchInstr->GetTarget(), branchInstr); |
| |
| return newBranch; |
| } |
| |
| bool |
| Lowerer::TryGenerateFastBrEq(IR::Instr * instr) |
| { |
| IR::RegOpnd *srcReg1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *srcReg2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| |
| bool isConst = false; |
| if (srcReg1 && this->IsNullOrUndefRegOpnd(srcReg1)) |
| { |
| instr->SwapOpnds(); |
| isConst = true; |
| } |
| |
| // Fast path for == null or == undefined |
| // if (src == null || src == undefined) |
| if (isConst || (srcReg2 && this->IsNullOrUndefRegOpnd(srcReg2))) |
| { |
| IR::BranchInstr *newBranch; |
| newBranch = this->GenerateFastBrConst(instr->AsBranchInstr(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueNull), |
| true); |
| |
| this->GenerateFastBrConst(instr->AsBranchInstr(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined), |
| true); |
| |
| instr->Remove(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| bool |
| Lowerer::TryGenerateFastBrNeq(IR::Instr * instr) |
| { |
| IR::RegOpnd *srcReg1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *srcReg2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| |
| bool isConst = false; |
| if (srcReg1 && this->IsNullOrUndefRegOpnd(srcReg1)) |
| { |
| instr->SwapOpnds(); |
| isConst = true; |
| } |
| |
| // Fast path for != null or != undefined |
| // if (src != null && src != undefined) |
| // |
| // That is: |
| // if (src == NULL) goto labelEq |
| // if (src != undef) goto target |
| // labelEq: |
| |
| if (isConst || (srcReg2 && this->IsNullOrUndefRegOpnd(srcReg2))) |
| { |
| IR::LabelInstr *labelEq = instr->GetOrCreateContinueLabel(); |
| |
| IR::BranchInstr *newBranch; |
| newBranch = this->GenerateFastBrConst(instr->AsBranchInstr(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueNull), |
| true); |
| newBranch->AsBranchInstr()->SetTarget(labelEq); |
| |
| this->GenerateFastBrConst(instr->AsBranchInstr(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined), |
| false); |
| |
| instr->Remove(); |
| return true; |
| } |
| |
| return false; |
| } |
| |
| void |
| Lowerer::GenerateDynamicObjectAlloc(IR::Instr * newObjInstr, uint inlineSlotCount, uint slotCount, IR::RegOpnd * newObjDst, IR::Opnd * typeSrc) |
| { |
| size_t headerAllocSize = sizeof(Js::DynamicObject) + inlineSlotCount * sizeof(Js::Var); |
| IR::SymOpnd * tempObjectSymOpnd; |
| bool isZeroed = GenerateRecyclerOrMarkTempAlloc(newObjInstr, newObjDst, IR::HelperAllocMemForScObject, headerAllocSize, &tempObjectSymOpnd); |
| |
| if (tempObjectSymOpnd && !PHASE_OFF(Js::HoistMarkTempInitPhase, this->m_func) && this->outerMostLoopLabel) |
| { |
| // Hoist the vtable init to the outer most loop top as it never changes |
| InsertMove(tempObjectSymOpnd, |
| LoadVTableValueOpnd(this->outerMostLoopLabel, VTableValue::VtableDynamicObject), this->outerMostLoopLabel, false); |
| } |
| else |
| { |
| // MOV [newObjDst + offset(vtable)], DynamicObject::vtable |
| GenerateMemInit(newObjDst, 0, LoadVTableValueOpnd(newObjInstr, VTableValue::VtableDynamicObject), newObjInstr, isZeroed); |
| } |
| // MOV [newObjDst + offset(type)], newObjectType |
| GenerateMemInit(newObjDst, Js::DynamicObject::GetOffsetOfType(), typeSrc, newObjInstr, isZeroed); |
| |
| // CALL JavascriptOperators::AllocMemForVarArray((slotCount - inlineSlotCount) * sizeof(Js::Var)) |
| if (slotCount > inlineSlotCount) |
| { |
| size_t auxSlotsAllocSize = (slotCount - inlineSlotCount) * sizeof(Js::Var); |
| IR::RegOpnd* auxSlots = IR::RegOpnd::New(TyMachPtr, m_func); |
| |
| GenerateRecyclerAllocAligned(IR::HelperAllocMemForVarArray, auxSlotsAllocSize, auxSlots, newObjInstr); |
| GenerateMemInit(newObjDst, Js::DynamicObject::GetOffsetOfAuxSlots(), auxSlots, newObjInstr, isZeroed); |
| |
| IR::IndirOpnd* newObjAuxSlots = IR::IndirOpnd::New(newObjDst, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachPtr, m_func); |
| this->InsertMove(newObjAuxSlots, auxSlots, newObjInstr); |
| } |
| else |
| { |
| GenerateMemInitNull(newObjDst, Js::DynamicObject::GetOffsetOfAuxSlots(), newObjInstr, isZeroed); |
| } |
| |
| GenerateMemInitNull(newObjDst, Js::DynamicObject::GetOffsetOfObjectArray(), newObjInstr, isZeroed); |
| } |
| |
| void |
| Lowerer::LowerNewScObjectSimple(IR::Instr * instr) |
| { |
| GenerateDynamicObjectAlloc( |
| instr, |
| 0, |
| 0, |
| instr->UnlinkDst()->AsRegOpnd(), |
| LoadLibraryValueOpnd( |
| instr, |
| Js::FunctionBody::DoObjectHeaderInliningForEmptyObjects() |
| ? LibraryValue::ValueObjectHeaderInlinedType |
| : LibraryValue::ValueObjectType)); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::LowerNewScObjectLiteral(IR::Instr *newObjInstr) |
| { |
| Func * func = m_func; |
| IR::IntConstOpnd * literalObjectIdOpnd = newObjInstr->UnlinkSrc2()->AsIntConstOpnd(); |
| intptr_t literalTypeRef = newObjInstr->m_func->GetJITFunctionBody()->GetObjectLiteralTypeRef(literalObjectIdOpnd->AsUint32()); |
| |
| IR::LabelInstr * helperLabel = nullptr; |
| IR::LabelInstr * allocLabel = nullptr; |
| IR::Opnd * literalTypeRefOpnd; |
| IR::Opnd * literalTypeOpnd; |
| IR::Opnd * propertyArrayOpnd; |
| |
| IR::IntConstOpnd * propertyArrayIdOpnd = newObjInstr->UnlinkSrc1()->AsIntConstOpnd(); |
| const Js::PropertyIdArray * propIds = newObjInstr->m_func->GetJITFunctionBody()->ReadPropertyIdArrayFromAuxData(propertyArrayIdOpnd->AsUint32()); |
| intptr_t propArrayAddr = newObjInstr->m_func->GetJITFunctionBody()->GetAuxDataAddr(propertyArrayIdOpnd->AsUint32()); |
| uint inlineSlotCapacity = Js::JavascriptOperators::GetLiteralInlineSlotCapacity(propIds); |
| uint slotCapacity = Js::JavascriptOperators::GetLiteralSlotCapacity(propIds); |
| IR::RegOpnd * dstOpnd; |
| |
| literalTypeRefOpnd = IR::AddrOpnd::New(literalTypeRef, IR::AddrOpndKindDynamicMisc, this->m_func); |
| propertyArrayOpnd = IR::AddrOpnd::New(propArrayAddr, IR::AddrOpndKindDynamicMisc, this->m_func); |
| |
| //#if 0 TODO: OOP JIT, obj literal types |
| // should pass in isShared bit through RPC, enable for in-proc jit to see perf impact |
| Js::DynamicType * literalType = func->IsOOPJIT() || !CONFIG_FLAG(OOPJITMissingOpts) ? nullptr : *(Js::DynamicType **)literalTypeRef; |
| |
| if (literalType == nullptr || !literalType->GetIsShared()) |
| { |
| helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| allocLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| literalTypeOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(literalTypeOpnd, IR::MemRefOpnd::New(literalTypeRef, TyMachPtr, func), newObjInstr); |
| InsertTestBranch(literalTypeOpnd, literalTypeOpnd, |
| Js::OpCode::BrEq_A, helperLabel, newObjInstr); |
| InsertTestBranch(IR::IndirOpnd::New(literalTypeOpnd->AsRegOpnd(), Js::DynamicType::GetOffsetOfIsShared(), TyInt8, func), |
| IR::IntConstOpnd::New(1, TyInt8, func, true), Js::OpCode::BrEq_A, helperLabel, newObjInstr); |
| |
| dstOpnd = newObjInstr->GetDst()->AsRegOpnd(); |
| } |
| else |
| { |
| literalTypeOpnd = IR::AddrOpnd::New(literalType, IR::AddrOpndKindDynamicType, func); |
| dstOpnd = newObjInstr->UnlinkDst()->AsRegOpnd(); |
| Assert(inlineSlotCapacity == literalType->GetTypeHandler()->GetInlineSlotCapacity()); |
| Assert(slotCapacity == (uint)literalType->GetTypeHandler()->GetSlotCapacity()); |
| } |
| |
| if (helperLabel) |
| { |
| InsertBranch(Js::OpCode::Br, allocLabel, newObjInstr); |
| |
| // Slow path to ensure the type is there |
| newObjInstr->InsertBefore(helperLabel); |
| IR::HelperCallOpnd * opndHelper = IR::HelperCallOpnd::New(IR::HelperEnsureObjectLiteralType, func); |
| |
| m_lowererMD.LoadHelperArgument(newObjInstr, literalTypeRefOpnd); |
| m_lowererMD.LoadHelperArgument(newObjInstr, propertyArrayOpnd); |
| LoadScriptContext(newObjInstr); |
| |
| IR::Instr * ensureTypeInstr = IR::Instr::New(Js::OpCode::Call, literalTypeOpnd, opndHelper, func); |
| newObjInstr->InsertBefore(ensureTypeInstr); |
| m_lowererMD.LowerCall(ensureTypeInstr, 0); |
| |
| newObjInstr->InsertBefore(allocLabel); |
| } |
| else |
| { |
| Assert(allocLabel == nullptr); |
| } |
| |
| // For the next call: |
| // inlineSlotCapacity == Number of slots to allocate beyond the DynamicObject header |
| // slotCapacity - inlineSlotCapacity == Number of aux slots to allocate |
| if(Js::FunctionBody::DoObjectHeaderInliningForObjectLiteral(propIds)) |
| { |
| Assert(inlineSlotCapacity >= Js::DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity()); |
| Assert(inlineSlotCapacity == slotCapacity); |
| slotCapacity = inlineSlotCapacity -= Js::DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity(); |
| } |
| GenerateDynamicObjectAlloc( |
| newObjInstr, |
| inlineSlotCapacity, |
| slotCapacity, |
| dstOpnd, |
| literalTypeOpnd); |
| |
| newObjInstr->Remove(); |
| } |
| |
| IR::Instr* |
| Lowerer::LowerProfiledNewScArray(IR::JitProfilingInstr* arrInstr) |
| { |
| IR::Instr *instrPrev = arrInstr->m_prev; |
| |
| /* |
| JavascriptArray *ProfilingHelpers::ProfiledNewScArray( |
| const uint length, |
| FunctionBody *const functionBody, |
| const ProfileId profileId) |
| */ |
| |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::Opnd::CreateProfileIdOpnd(arrInstr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(arrInstr, CreateFunctionBodyOpnd(arrInstr->m_func)); |
| m_lowererMD.LoadHelperArgument(arrInstr, arrInstr->UnlinkSrc1()); |
| arrInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperProfiledNewScArray, m_func)); |
| m_lowererMD.LowerCall(arrInstr, 0); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScArray(IR::Instr *arrInstr) |
| { |
| if (arrInstr->IsJitProfilingInstr()) |
| { |
| return LowerProfiledNewScArray(arrInstr->AsJitProfilingInstr()); |
| } |
| |
| IR::Instr *instrPrev = arrInstr->m_prev; |
| IR::JnHelperMethod helperMethod = IR::HelperScrArr_OP_NewScArray; |
| |
| if (arrInstr->IsProfiledInstr() && arrInstr->m_func->HasProfileInfo()) |
| { |
| intptr_t weakFuncRef = arrInstr->m_func->GetWeakFuncRef(); |
| Assert(weakFuncRef); |
| |
| Js::ProfileId profileId = static_cast<Js::ProfileId>(arrInstr->AsProfiledInstr()->u.profileId); |
| Js::ArrayCallSiteInfo *arrayInfo = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(profileId); |
| intptr_t arrayInfoAddr = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfoAddr(profileId); |
| |
| Assert(arrInstr->GetSrc1()->IsConstOpnd()); |
| GenerateProfiledNewScArrayFastPath(arrInstr, arrayInfo, arrayInfoAddr, weakFuncRef, arrInstr->GetSrc1()->AsIntConstOpnd()->AsUint32()); |
| |
| if (arrInstr->GetDst() && arrInstr->GetDst()->GetValueType().IsLikelyNativeArray()) |
| { |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func)); |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(arrayInfoAddr, IR::AddrOpndKindDynamicArrayCallSiteInfo, m_func)); |
| helperMethod = IR::HelperScrArr_ProfiledNewScArray; |
| } |
| } |
| |
| LoadScriptContext(arrInstr); |
| |
| IR::Opnd *src1Opnd = arrInstr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(arrInstr, src1Opnd); |
| m_lowererMD.ChangeToHelperCall(arrInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| template <typename ArrayType> |
| BOOL Lowerer::IsSmallObject(uint32 length) |
| { |
| if (ArrayType::HasInlineHeadSegment(length)) |
| return true; |
| uint32 alignedHeadSegmentSize = Js::SparseArraySegment<typename ArrayType::TElement>::GetAlignedSize(length); |
| size_t allocSize = sizeof(Js::SparseArraySegment<typename ArrayType::TElement>) + alignedHeadSegmentSize * sizeof(typename ArrayType::TElement); |
| return HeapInfo::IsSmallObject(HeapInfo::GetAlignedSizeNoCheck(allocSize)); |
| } |
| |
| bool |
| Lowerer::GenerateProfiledNewScArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, intptr_t weakFuncRef, uint32 length) |
| { |
| if (PHASE_OFF(Js::ArrayCtorFastPathPhase, m_func) || CONFIG_FLAG(ForceES5Array)) |
| { |
| return false; |
| } |
| |
| Func * func = this->m_func; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| uint32 size = length; |
| bool isZeroed; |
| IR::RegOpnd *dstOpnd = instr->GetDst()->AsRegOpnd(); |
| IR::RegOpnd *headOpnd; |
| uint32 i = length; |
| |
| if (instr->GetDst() && instr->GetDst()->GetValueType().IsLikelyNativeIntArray()) |
| { |
| if (!IsSmallObject<Js::JavascriptNativeIntArray>(length)) |
| { |
| return false; |
| } |
| GenerateArrayInfoIsNativeIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| Assert(Js::JavascriptNativeIntArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex()); |
| headOpnd = GenerateArrayLiteralsAlloc<Js::JavascriptNativeIntArray>(instr, &size, arrayInfo, &isZeroed); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeIntArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed); |
| for (; i < size; i++) |
| { |
| GenerateMemInit(headOpnd, sizeof(Js::SparseArraySegmentBase) + i * sizeof(int32), |
| Js::JavascriptNativeIntArray::MissingItem, instr, isZeroed); |
| } |
| } |
| else if (instr->GetDst() && instr->GetDst()->GetValueType().IsLikelyNativeFloatArray()) |
| { |
| if (!IsSmallObject<Js::JavascriptNativeFloatArray>(length)) |
| { |
| return false; |
| } |
| GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| Assert(Js::JavascriptNativeFloatArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex()); |
| headOpnd = GenerateArrayLiteralsAlloc<Js::JavascriptNativeFloatArray>(instr, &size, arrayInfo, &isZeroed); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeFloatArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed); |
| // Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit. |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase); |
| for (; i < size; i++) |
| { |
| GenerateMemInit( |
| headOpnd, offsetStart + i * sizeof(double), |
| GetMissingItemOpndForAssignment(TyFloat64, m_func), |
| instr, isZeroed); |
| } |
| } |
| else |
| { |
| if (!IsSmallObject<Js::JavascriptArray>(length)) |
| { |
| return false; |
| } |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase); |
| headOpnd = GenerateArrayLiteralsAlloc<Js::JavascriptArray>(instr, &size, arrayInfo, &isZeroed); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func); |
| for (; i < size; i++) |
| { |
| GenerateMemInit( |
| headOpnd, offsetStart + i * sizeof(Js::Var), |
| GetMissingItemOpndForAssignment(TyVar, m_func), |
| instr, isZeroed); |
| } |
| } |
| |
| // Skip pass the helper call |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| |
| instr->InsertAfter(doneLabel); |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateArrayInfoIsNativeIntArrayTest(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, IR::LabelInstr * helperLabel) |
| { |
| Func * func = this->m_func; |
| InsertTestBranch(IR::MemRefOpnd::New(((char *)arrayInfoAddr) + Js::ArrayCallSiteInfo::GetOffsetOfBits(), TyUint8, func), |
| IR::IntConstOpnd::New(Js::ArrayCallSiteInfo::NotNativeIntBit, TyUint8, func), Js::OpCode::BrNeq_A, helperLabel, instr); |
| } |
| |
| void |
| Lowerer::GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, IR::LabelInstr * helperLabel) |
| { |
| Func * func = this->m_func; |
| InsertCompareBranch(IR::MemRefOpnd::New(((char *)arrayInfoAddr) + Js::ArrayCallSiteInfo::GetOffsetOfBits(), TyUint8, func), |
| IR::IntConstOpnd::New(Js::ArrayCallSiteInfo::NotNativeIntBit, TyUint8, func), Js::OpCode::BrNeq_A, helperLabel, instr); |
| } |
| |
| template <typename ArrayType> |
| static IR::JnHelperMethod GetArrayAllocMemHelper(); |
| template <> |
| IR::JnHelperMethod GetArrayAllocMemHelper<Js::JavascriptArray>() |
| { |
| return IR::HelperAllocMemForJavascriptArray; |
| } |
| template <> |
| IR::JnHelperMethod GetArrayAllocMemHelper<Js::JavascriptNativeIntArray>() |
| { |
| return IR::HelperAllocMemForJavascriptNativeIntArray; |
| } |
| template <> |
| IR::JnHelperMethod GetArrayAllocMemHelper<Js::JavascriptNativeFloatArray>() |
| { |
| return IR::HelperAllocMemForJavascriptNativeFloatArray; |
| } |
| |
| template <typename ArrayType> |
| IR::RegOpnd * |
| Lowerer::GenerateArrayLiteralsAlloc(IR::Instr *instr, uint32 * psize, Js::ArrayCallSiteInfo * arrayInfo, bool * pIsHeadSegmentZeroed) |
| { |
| return GenerateArrayAllocHelper<ArrayType>(instr, psize, arrayInfo, pIsHeadSegmentZeroed, false /* isArrayObjCtor */, false /* isNoArgs */); |
| } |
| |
| template <typename ArrayType> |
| IR::RegOpnd * |
| Lowerer::GenerateArrayObjectsAlloc(IR::Instr *instr, uint32 * psize, Js::ArrayCallSiteInfo * arrayInfo, bool * pIsHeadSegmentZeroed, bool isNoArgs) |
| { |
| return GenerateArrayAllocHelper<ArrayType>(instr, psize, arrayInfo, pIsHeadSegmentZeroed, true /* isArrayObjCtor */, isNoArgs); |
| } |
| |
| |
| template <typename ArrayType> |
| IR::RegOpnd * |
| Lowerer::GenerateArrayAllocHelper(IR::Instr *instr, uint32 * psize, Js::ArrayCallSiteInfo * arrayInfo, bool * pIsHeadSegmentZeroed, bool isArrayObjCtor, bool isNoArgs) |
| { |
| Func * func = this->m_func; |
| IR::RegOpnd * dstOpnd = instr->GetDst()->AsRegOpnd(); |
| |
| // Generate code as in JavascriptArray::NewLiteral |
| uint32 count = *psize; |
| uint alignedHeadSegmentSize; |
| size_t arrayAllocSize; |
| |
| IR::RegOpnd * headOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func, false); |
| IR::Instr * leaHeadInstr = nullptr; |
| bool isHeadSegmentZeroed = false; |
| |
| if (ArrayType::HasInlineHeadSegment(count)) |
| { |
| if (isArrayObjCtor) |
| { |
| uint32 allocCount = isNoArgs ? Js::SparseArraySegmentBase::SMALL_CHUNK_SIZE : count; |
| arrayAllocSize = Js::JavascriptArray::DetermineAllocationSizeForArrayObjects<ArrayType, 0>(allocCount, nullptr, &alignedHeadSegmentSize); |
| } |
| else |
| { |
| uint32 allocCount = count == 0 ? Js::SparseArraySegmentBase::SMALL_CHUNK_SIZE : count; |
| arrayAllocSize = Js::JavascriptArray::DetermineAllocationSize<ArrayType, 0>(allocCount, nullptr, &alignedHeadSegmentSize); |
| } |
| |
| // Note that it is possible for the returned alignedHeadSegmentSize to be greater than INLINE_CHUNK_SIZE because |
| // of rounding the *entire* object, including the head segment, to the nearest aligned size. In that case, ensure |
| // that this size is still not larger than INLINE_CHUNK_SIZE size because the head segment is still inlined. This |
| // keeps consistency with the definition of HasInlineHeadSegment and maintained in the assert below. |
| uint inlineChunkSize = Js::SparseArraySegmentBase::INLINE_CHUNK_SIZE; |
| alignedHeadSegmentSize = min(alignedHeadSegmentSize, inlineChunkSize); |
| |
| Assert(ArrayType::HasInlineHeadSegment(alignedHeadSegmentSize)); |
| |
| leaHeadInstr = IR::Instr::New(Js::OpCode::LEA, headOpnd, |
| IR::IndirOpnd::New(dstOpnd, sizeof(ArrayType), TyMachPtr, func), func); |
| isHeadSegmentZeroed = true; |
| } |
| else |
| { |
| // Need to allocate the head segment first so that if it throws, |
| // we doesn't have the memory assigned to dstOpnd yet |
| |
| // Even if the instruction is marked as dstIsTempObject, we still should not allocate |
| // that big of a chunk on the stack. |
| |
| alignedHeadSegmentSize = Js::SparseArraySegment<typename ArrayType::TElement>::GetAlignedSize(count); |
| GenerateRecyclerAlloc( |
| IR::HelperAllocMemForSparseArraySegmentBase, |
| sizeof(Js::SparseArraySegment<typename ArrayType::TElement>) + |
| alignedHeadSegmentSize * sizeof(typename ArrayType::TElement), |
| headOpnd, |
| instr); |
| |
| arrayAllocSize = sizeof(ArrayType); |
| } |
| |
| *psize = alignedHeadSegmentSize; |
| |
| IR::SymOpnd * tempObjectSymOpnd; |
| bool isZeroed = GenerateRecyclerOrMarkTempAlloc(instr, dstOpnd, |
| GetArrayAllocMemHelper<ArrayType>(), arrayAllocSize, &tempObjectSymOpnd); |
| isHeadSegmentZeroed = isHeadSegmentZeroed & isZeroed; |
| if (tempObjectSymOpnd && !PHASE_OFF(Js::HoistMarkTempInitPhase, this->m_func) && this->outerMostLoopLabel) |
| { |
| // Hoist the vtable init to the outer most loop top as it never changes |
| InsertMove(tempObjectSymOpnd, |
| this->LoadVTableValueOpnd(this->outerMostLoopLabel, ArrayType::VtableHelper()), |
| this->outerMostLoopLabel, false); |
| } |
| else |
| { |
| GenerateMemInit(dstOpnd, 0, this->LoadVTableValueOpnd(instr, ArrayType::VtableHelper()), instr, isZeroed); |
| } |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfType(), this->LoadLibraryValueOpnd(instr, ArrayType::InitialTypeHelper()), instr, isZeroed); |
| GenerateMemInitNull(dstOpnd, ArrayType::GetOffsetOfAuxSlots(), instr, isZeroed); |
| |
| // Emit the flags and call site index together |
| Js::ProfileId arrayCallSiteIndex = (Js::ProfileId)instr->AsProfiledInstr()->u.profileId; |
| #if DBG |
| if (instr->AsProfiledInstr()->u.profileId < Js::Constants::NoProfileId) |
| { |
| Assert((uint32)(arrayInfo - instr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(0)) == arrayCallSiteIndex); |
| } |
| else |
| { |
| Assert(arrayInfo == nullptr); |
| } |
| #endif |
| |
| // The same at this: |
| // GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfArrayFlags(), (uint16)Js::DynamicObjectFlags::InitialArrayValue, instr, isZeroed); |
| // GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfArrayCallSiteIndex(), arrayCallSiteIndex, instr, isZeroed); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfArrayFlags(), (uint)Js::DynamicObjectFlags::InitialArrayValue | ((uint)arrayCallSiteIndex << 16), instr, isZeroed); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfLength(), count, instr, isZeroed); |
| |
| if (leaHeadInstr != nullptr) |
| { |
| instr->InsertBefore(leaHeadInstr); |
| ChangeToLea(leaHeadInstr); |
| } |
| |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfHead(), headOpnd, instr, isZeroed); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfLastUsedSegmentOrSegmentMap(), headOpnd, instr, isZeroed); |
| |
| // Initialize segment head |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfLeft(), 0, instr, isHeadSegmentZeroed); |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), isArrayObjCtor ? 0 : count, instr, isHeadSegmentZeroed); |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfSize(), alignedHeadSegmentSize, instr, isHeadSegmentZeroed); |
| GenerateMemInitNull(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfNext(), instr, isHeadSegmentZeroed); |
| |
| *pIsHeadSegmentZeroed = isHeadSegmentZeroed; |
| return headOpnd; |
| } |
| |
| template <typename ArrayType> |
| IR::RegOpnd * |
| Lowerer::GenerateArrayAlloc(IR::Instr *instr, IR::Opnd * arrayLenOpnd, Js::ArrayCallSiteInfo * arrayInfo) |
| { |
| Func * func = this->m_func; |
| IR::RegOpnd * dstOpnd = instr->GetDst()->AsRegOpnd(); |
| |
| IR::RegOpnd * headOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func, false); |
| IR::Instr * leaHeadInstr = nullptr; |
| |
| IR::Opnd * arraySizeOpnd = IR::RegOpnd::New(TyUint32, func); |
| IR::Opnd * alignedArrayAllocSizeOpnd = IR::RegOpnd::New(TyUint32, func); |
| |
| IR::LabelInstr * doneCalculatingAllocSize = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::LabelInstr * skipToNextBucket = nullptr; |
| uint8 bucketsCount = ArrayType::AllocationBucketsCount; |
| |
| Js::JavascriptArray::EnsureCalculationOfAllocationBuckets<ArrayType>(); |
| |
| for (uint8 i = 0;i < bucketsCount;i++) |
| { |
| uint elementsCountToInitialize = ArrayType::allocationBuckets[i][Js::JavascriptArray::MissingElementsCountIndex]; |
| uint allocationSize = ArrayType::allocationBuckets[i][Js::JavascriptArray::AllocationSizeIndex]; |
| |
| // Ensure we already have allocation size calculated and within range |
| Assert(elementsCountToInitialize > 0 && elementsCountToInitialize <= ArrayType::allocationBuckets[bucketsCount - 1][Js::JavascriptArray::MissingElementsCountIndex]); |
| Assert(allocationSize > 0 && allocationSize <= ArrayType::allocationBuckets[bucketsCount - 1][Js::JavascriptArray::AllocationSizeIndex]); |
| |
| // CMP arrayLen, currentBucket |
| // JG $checkNextBucket |
| if (i != (bucketsCount - 1)) |
| { |
| Lowerer::InsertCompare(arrayLenOpnd, IR::IntConstOpnd::New((uint16)ArrayType::allocationBuckets[i][Js::JavascriptArray::AllocationBucketIndex], TyUint32, func), instr); |
| |
| skipToNextBucket = IR::LabelInstr::New(Js::OpCode::Label, func); |
| Lowerer::InsertBranch(Js::OpCode::BrGt_A, skipToNextBucket, instr); |
| } |
| |
| // MOV $arrayAlignedSize, <const1> |
| // MOV $arrayAllocSize, <const2> |
| Lowerer::InsertMove(arraySizeOpnd, IR::IntConstOpnd::New((uint16)elementsCountToInitialize, TyUint32, func), instr); |
| Lowerer::InsertMove(alignedArrayAllocSizeOpnd, IR::IntConstOpnd::New((uint16)allocationSize, TyUint32, func), instr); |
| |
| // JMP $doneCalculatingAllocSize |
| if (i != (bucketsCount - 1)) |
| { |
| Lowerer::InsertBranch(Js::OpCode::Br, doneCalculatingAllocSize, instr); |
| instr->InsertBefore(skipToNextBucket); |
| } |
| } |
| |
| instr->InsertBefore(doneCalculatingAllocSize); |
| // ***** Call to allocation helper ***** |
| this->m_lowererMD.LoadHelperArgument(instr, this->LoadScriptContextValueOpnd(instr, ScriptContextValue::ScriptContextRecycler)); |
| this->m_lowererMD.LoadHelperArgument(instr, alignedArrayAllocSizeOpnd); |
| IR::Instr *newObjCall = IR::Instr::New(Js::OpCode::Call, dstOpnd, IR::HelperCallOpnd::New(GetArrayAllocMemHelper<ArrayType>(), func), func); |
| instr->InsertBefore(newObjCall); |
| this->m_lowererMD.LowerCall(newObjCall, 0); |
| |
| // ***** Load headSeg/initialize it ***** |
| leaHeadInstr = IR::Instr::New(Js::OpCode::LEA, headOpnd, |
| IR::IndirOpnd::New(dstOpnd, sizeof(ArrayType), TyMachPtr, func), func); |
| GenerateMemInit(dstOpnd, 0, this->LoadVTableValueOpnd(instr, ArrayType::VtableHelper()), instr, true); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfType(), this->LoadLibraryValueOpnd(instr, ArrayType::InitialTypeHelper()), instr, true); |
| GenerateMemInitNull(dstOpnd, ArrayType::GetOffsetOfAuxSlots(), instr, true); |
| |
| Js::ProfileId arrayCallSiteIndex = (Js::ProfileId)instr->AsProfiledInstr()->u.profileId; |
| #if DBG |
| if (instr->AsProfiledInstr()->u.profileId < Js::Constants::NoProfileId) |
| { |
| Assert((uint32)(arrayInfo - instr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(0)) == arrayCallSiteIndex); |
| } |
| else |
| { |
| Assert(arrayInfo == nullptr); |
| } |
| #endif |
| |
| // ***** Array object initialization ***** |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfArrayFlags(), IR::IntConstOpnd::New((uint16)Js::DynamicObjectFlags::InitialArrayValue, TyUint16, func), instr, true); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfLength(), arrayLenOpnd, instr, true); |
| |
| if (leaHeadInstr != nullptr) |
| { |
| instr->InsertBefore(leaHeadInstr); |
| ChangeToLea(leaHeadInstr); |
| } |
| |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfHead(), headOpnd, instr, true); |
| GenerateMemInit(dstOpnd, ArrayType::GetOffsetOfLastUsedSegmentOrSegmentMap(), headOpnd, instr, true); |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfLeft(), 0, instr, true); |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), 0, instr, true); // Set head segment length to 0 |
| GenerateMemInit(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfSize(), arraySizeOpnd, instr, true); |
| GenerateMemInitNull(headOpnd, Js::SparseArraySegmentBase::GetOffsetOfNext(), instr, true); |
| |
| return headOpnd; |
| } |
| |
| |
| bool |
| Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, intptr_t weakFuncRef, uint32 length, IR::LabelInstr* labelDone, bool isNoArgs) |
| { |
| if (PHASE_OFF(Js::ArrayCtorFastPathPhase, m_func)) |
| { |
| return false; |
| } |
| |
| Func * func = this->m_func; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| uint32 size = length; |
| bool isZeroed = false; |
| IR::RegOpnd *dstOpnd = instr->GetDst()->AsRegOpnd(); |
| IR::RegOpnd *headOpnd; |
| Js::ProfileId profileId = static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId); |
| if (arrayInfo && arrayInfo->IsNativeIntArray()) |
| { |
| GenerateArrayInfoIsNativeIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| Assert(Js::JavascriptNativeIntArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex()); |
| headOpnd = GenerateArrayObjectsAlloc<Js::JavascriptNativeIntArray>(instr, &size, arrayInfo, &isZeroed, isNoArgs); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex(), IR::IntConstOpnd::New(profileId, TyUint16, func, true), instr, isZeroed); |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeIntArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed); |
| for (uint i = 0; i < size; i++) |
| { |
| GenerateMemInit(headOpnd, sizeof(Js::SparseArraySegmentBase) + i * sizeof(int32), |
| Js::JavascriptNativeIntArray::MissingItem, instr, isZeroed); |
| } |
| } |
| else if (arrayInfo && arrayInfo->IsNativeFloatArray()) |
| { |
| GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| Assert(Js::JavascriptNativeFloatArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex()); |
| headOpnd = GenerateArrayObjectsAlloc<Js::JavascriptNativeFloatArray>(instr, &size, arrayInfo, &isZeroed, isNoArgs); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex(), IR::IntConstOpnd::New(profileId, TyUint16, func, true), instr, isZeroed); |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeFloatArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isZeroed); |
| |
| // Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit. |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase); |
| for (uint i = 0; i < size; i++) |
| { |
| GenerateMemInit( |
| headOpnd, offsetStart + i * sizeof(double), |
| GetMissingItemOpndForAssignment(TyFloat64, m_func), |
| instr, isZeroed); |
| } |
| } |
| else |
| { |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase); |
| headOpnd = GenerateArrayObjectsAlloc<Js::JavascriptArray>(instr, &size, arrayInfo, &isZeroed, isNoArgs); |
| for (uint i = 0; i < size; i++) |
| { |
| GenerateMemInit( |
| headOpnd, offsetStart + i * sizeof(Js::Var), |
| GetMissingItemOpndForAssignment(TyVar, m_func), |
| instr, isZeroed); |
| } |
| } |
| |
| // Skip pass the helper call |
| InsertBranch(Js::OpCode::Br, labelDone, instr); |
| instr->InsertBefore(helperLabel); |
| return true; |
| } |
| |
| |
| template <typename ArrayType> |
| bool |
| Lowerer::GenerateProfiledNewScObjArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, intptr_t weakFuncRef, IR::LabelInstr* helperLabel, |
| IR::LabelInstr* labelDone, IR::Opnd* lengthOpnd, uint32 offsetOfCallSiteIndex, uint32 offsetOfWeakFuncRef) |
| { |
| if (PHASE_OFF(Js::ArrayCtorFastPathPhase, m_func)) |
| { |
| return false; |
| } |
| |
| Func * func = this->m_func; |
| |
| IR::RegOpnd *dstOpnd = instr->GetDst()->AsRegOpnd(); |
| IR::RegOpnd *headOpnd; |
| Js::ProfileId profileId = static_cast<Js::ProfileId>(instr->AsProfiledInstr()->u.profileId); |
| uint sizeOfElement = 0; |
| uint allocationBucketsCount = ArrayType::AllocationBucketsCount; |
| uint(*allocationBuckets)[Js::JavascriptArray::AllocationBucketsInfoSize]; |
| allocationBuckets = ArrayType::allocationBuckets; |
| |
| IRType missingItemType = (arrayInfo ? arrayInfo->IsNativeIntArray() ? IRType::TyInt32 : arrayInfo->IsNativeFloatArray() ? IRType::TyFloat64 : IRType::TyVar : IRType::TyVar); |
| IR::LabelInstr * arrayInitDone = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| bool isNativeArray = arrayInfo && (arrayInfo->IsNativeIntArray() || arrayInfo->IsNativeFloatArray()); |
| |
| if (arrayInfo && arrayInfo->IsNativeIntArray()) |
| { |
| sizeOfElement = sizeof(int32); |
| GenerateArrayInfoIsNativeIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| } |
| else if (arrayInfo && arrayInfo->IsNativeFloatArray()) |
| { |
| sizeOfElement = sizeof(double); |
| GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| } |
| else |
| { |
| sizeOfElement = sizeof(Js::Var); |
| } |
| |
| lengthOpnd = GenerateUntagVar(lengthOpnd->AsRegOpnd(), helperLabel, instr); |
| IR::Opnd* upperBound = IR::IntConstOpnd::New(8, TyUint8, func, true); |
| InsertCompare(lengthOpnd, upperBound, instr); |
| InsertBranch(Js::OpCode::BrGt_A, true /* isUnsigned */, helperLabel, instr); |
| headOpnd = GenerateArrayAlloc<ArrayType>(instr, lengthOpnd, arrayInfo); |
| |
| if (isNativeArray) |
| { |
| Assert(ArrayType::GetOffsetOfArrayFlags() + sizeof(uint16) == offsetOfCallSiteIndex); |
| Assert(offsetOfWeakFuncRef > 0); |
| GenerateMemInit(dstOpnd, offsetOfCallSiteIndex, IR::IntConstOpnd::New(profileId, TyUint16, func, true), instr, true /* isZeroed */); |
| GenerateMemInit(dstOpnd, offsetOfWeakFuncRef, IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, true /* isZeroed */); |
| } |
| |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase); |
| uint missingItemCount = 0; |
| uint missingItemInitializedSoFar = 0; |
| uint missingItemIndex = 0; |
| uint maxAllocationSize = allocationBuckets[allocationBucketsCount - 1][Js::JavascriptArray::AllocationSizeIndex]; |
| |
| for (uint8 i = 0;i < allocationBucketsCount;i++) |
| { |
| missingItemCount = allocationBuckets[i][Js::JavascriptArray::MissingElementsCountIndex]; |
| |
| if (i > 0) |
| { |
| // Reduce missingItemCount we have already set so far |
| missingItemCount -= missingItemInitializedSoFar; |
| } |
| |
| // Generate array initialization with MissingItem |
| for (uint j = 0;j < missingItemCount;j++) |
| { |
| // Ensure we don't write missingItems past allocation size |
| Assert(offsetStart + missingItemIndex * sizeOfElement <= maxAllocationSize); |
| GenerateMemInit(headOpnd, offsetStart + missingItemIndex * sizeOfElement, GetMissingItemOpndForAssignment(missingItemType, func), instr, true /*isZeroed*/); |
| missingItemIndex++; |
| } |
| |
| // CMP arrayLen, currentBucket |
| // JG $checkNextBucket |
| if (i != (allocationBucketsCount - 1)) |
| { |
| Lowerer::InsertCompare(lengthOpnd, IR::IntConstOpnd::New(allocationBuckets[i][Js::JavascriptArray::AllocationBucketIndex], TyUint32, func), instr); |
| |
| Lowerer::InsertBranch(Js::OpCode::BrLe_A, arrayInitDone, instr); |
| } |
| missingItemInitializedSoFar += missingItemCount; |
| } |
| |
| // Ensure no. of missingItems written are same |
| Assert(missingItemIndex == missingItemInitializedSoFar); |
| // Ensure no. of missingItems match what present in allocationBuckets |
| Assert(missingItemIndex == allocationBuckets[allocationBucketsCount - 1][Js::JavascriptArray::MissingElementsCountIndex]); |
| |
| instr->InsertBefore(arrayInitDone); |
| |
| Lowerer::InsertBranch(Js::OpCode::Br, labelDone, instr); |
| instr->InsertBefore(helperLabel); |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateProfiledNewScIntArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, intptr_t weakFuncRef) |
| { |
| // Helper will deal with ForceES5ARray |
| if (PHASE_OFF(Js::ArrayLiteralFastPathPhase, m_func) || CONFIG_FLAG(ForceES5Array)) |
| { |
| return; |
| } |
| |
| if (!arrayInfo->IsNativeIntArray()) |
| { |
| return; |
| } |
| |
| if (instr->GetSrc1()->AsAddrOpnd()->GetAddrOpndKind() != IR::AddrOpndKindDynamicAuxBufferRef) |
| { |
| return; |
| } |
| |
| Func * func = this->m_func; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| |
| GenerateArrayInfoIsNativeIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| |
| IR::AddrOpnd * elementsOpnd = instr->GetSrc1()->AsAddrOpnd(); |
| Js::AuxArray<int32> * ints = (Js::AuxArray<int32> *)elementsOpnd->m_metadata; |
| uint32 size = ints->count; |
| |
| // Generate code as in JavascriptArray::NewLiteral |
| bool isHeadSegmentZeroed; |
| IR::RegOpnd * dstOpnd = instr->GetDst()->AsRegOpnd(); |
| Assert(Js::JavascriptNativeIntArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex()); |
| IR::RegOpnd * headOpnd = GenerateArrayLiteralsAlloc<Js::JavascriptNativeIntArray>(instr, &size, arrayInfo, &isHeadSegmentZeroed); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeIntArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicMisc, m_func), instr, isHeadSegmentZeroed); |
| |
| // Initialize the elements |
| uint i = 0; |
| if (ints->count > 16) |
| { |
| // Do memcpy if > 16 |
| IR::RegOpnd * dstElementsOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseDstElementsOpnd(dstElementsOpnd, func); |
| IR::Opnd * srcOpnd = IR::AddrOpnd::New((intptr_t)elementsOpnd->m_address + Js::AuxArray<int32>::OffsetOfElements(), IR::AddrOpndKindDynamicMisc, func); |
| InsertLea(dstElementsOpnd, IR::IndirOpnd::New(headOpnd, sizeof(Js::SparseArraySegmentBase), TyMachPtr, func), instr); |
| GenerateMemCopy(dstElementsOpnd, srcOpnd, ints->count * sizeof(int32), instr); |
| i = ints->count; |
| } |
| else |
| { |
| for (; i < ints->count; i++) |
| { |
| GenerateMemInit(headOpnd, sizeof(Js::SparseArraySegmentBase) + i * sizeof(int32), |
| ints->elements[i], instr, isHeadSegmentZeroed); |
| } |
| } |
| Assert(i == ints->count); |
| for (; i < size; i++) |
| { |
| GenerateMemInit(headOpnd, sizeof(Js::SparseArraySegmentBase) + i * sizeof(int32), |
| Js::JavascriptNativeIntArray::MissingItem, instr, isHeadSegmentZeroed); |
| } |
| // Skip pass the helper call |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| |
| instr->InsertAfter(doneLabel); |
| } |
| |
| void |
| Lowerer::GenerateProfiledNewScFloatArrayFastPath(IR::Instr *instr, Js::ArrayCallSiteInfo * arrayInfo, intptr_t arrayInfoAddr, intptr_t weakFuncRef) |
| { |
| if (PHASE_OFF(Js::ArrayLiteralFastPathPhase, m_func) || CONFIG_FLAG(ForceES5Array)) |
| { |
| return; |
| } |
| |
| if (!arrayInfo->IsNativeFloatArray()) |
| { |
| return; |
| } |
| |
| if (instr->GetSrc1()->AsAddrOpnd()->GetAddrOpndKind() != IR::AddrOpndKindDynamicAuxBufferRef) |
| { |
| return; |
| } |
| |
| Func * func = this->m_func; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| |
| // If the array info hasn't mark as not int array yet, go to the helper and mark it. |
| // It really is just for assert purpose in JavascriptNativeFloatArray::ToVarArray |
| GenerateArrayInfoIsNativeFloatAndNotIntArrayTest(instr, arrayInfo, arrayInfoAddr, helperLabel); |
| |
| IR::AddrOpnd * elementsOpnd = instr->GetSrc1()->AsAddrOpnd(); |
| Js::AuxArray<double> * doubles = (Js::AuxArray<double> *)elementsOpnd->m_metadata; |
| uint32 size = doubles->count; |
| |
| // Generate code as in JavascriptArray::NewLiteral |
| bool isHeadSegmentZeroed; |
| IR::RegOpnd * dstOpnd = instr->GetDst()->AsRegOpnd(); |
| Assert(Js::JavascriptNativeFloatArray::GetOffsetOfArrayFlags() + sizeof(uint16) == Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex()); |
| IR::RegOpnd * headOpnd = GenerateArrayLiteralsAlloc<Js::JavascriptNativeFloatArray>(instr, &size, arrayInfo, &isHeadSegmentZeroed); |
| const IR::AutoReuseOpnd autoReuseHeadOpnd(headOpnd, func); |
| |
| GenerateMemInit(dstOpnd, Js::JavascriptNativeFloatArray::GetOffsetOfWeakFuncRef(), IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func), instr, isHeadSegmentZeroed); |
| |
| // Initialize the elements |
| |
| IR::RegOpnd * dstElementsOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseDstElementsOpnd(dstElementsOpnd, func); |
| IR::Opnd * srcOpnd = IR::AddrOpnd::New((intptr_t)elementsOpnd->m_address + Js::AuxArray<double>::OffsetOfElements(), IR::AddrOpndKindDynamicMisc, func); |
| InsertLea(dstElementsOpnd, IR::IndirOpnd::New(headOpnd, sizeof(Js::SparseArraySegmentBase), TyMachPtr, func), instr); |
| GenerateMemCopy(dstElementsOpnd, srcOpnd, doubles->count * sizeof(double), instr); |
| |
| // Js::JavascriptArray::MissingItem is a Var, so it may be 32-bit or 64 bit. |
| uint const offsetStart = sizeof(Js::SparseArraySegmentBase) + doubles->count * sizeof(double); |
| uint const missingItem = (size - doubles->count); |
| for (uint i = 0; i < missingItem; i++) |
| { |
| GenerateMemInit(headOpnd, offsetStart + i * sizeof(double), |
| GetMissingItemOpndForAssignment(TyFloat64, m_func), instr, isHeadSegmentZeroed); |
| } |
| // Skip pass the helper call |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| |
| instr->InsertAfter(doneLabel); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScIntArray(IR::Instr *arrInstr) |
| { |
| IR::Instr *instrPrev = arrInstr->m_prev; |
| IR::JnHelperMethod helperMethod = IR::HelperScrArr_OP_NewScIntArray; |
| |
| if ((arrInstr->IsJitProfilingInstr() || arrInstr->IsProfiledInstr()) && arrInstr->m_func->HasProfileInfo()) |
| { |
| intptr_t weakFuncRef = arrInstr->m_func->GetWeakFuncRef(); |
| if (weakFuncRef) |
| { |
| // Technically a load of the same memory address either way. |
| Js::ProfileId profileId = |
| arrInstr->IsJitProfilingInstr() |
| ? arrInstr->AsJitProfilingInstr()->profileId |
| : static_cast<Js::ProfileId>(arrInstr->AsProfiledInstr()->u.profileId); |
| Js::ArrayCallSiteInfo *arrayInfo = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(profileId); |
| intptr_t arrayInfoAddr = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfoAddr(profileId); |
| |
| // Only do fast-path if it isn't a JitProfiling instr and not copy-on-access array |
| if (arrInstr->IsProfiledInstr() |
| #if ENABLE_COPYONACCESS_ARRAY |
| && (PHASE_OFF1(Js::Phase::CopyOnAccessArrayPhase) || arrayInfo->isNotCopyOnAccessArray) && !PHASE_FORCE1(Js::Phase::CopyOnAccessArrayPhase) |
| #endif |
| ) |
| { |
| GenerateProfiledNewScIntArrayFastPath(arrInstr, arrayInfo, arrayInfoAddr, weakFuncRef); |
| } |
| |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func)); |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(arrayInfoAddr, IR::AddrOpndKindDynamicArrayCallSiteInfo, m_func)); |
| helperMethod = IR::HelperScrArr_ProfiledNewScIntArray; |
| } |
| } |
| |
| LoadScriptContext(arrInstr); |
| |
| IR::Opnd *elementsOpnd = arrInstr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(arrInstr, elementsOpnd); |
| m_lowererMD.ChangeToHelperCall(arrInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScFltArray(IR::Instr *arrInstr) |
| { |
| IR::Instr *instrPrev = arrInstr->m_prev; |
| IR::JnHelperMethod helperMethod = IR::HelperScrArr_OP_NewScFltArray; |
| |
| if ((arrInstr->IsJitProfilingInstr() || arrInstr->IsProfiledInstr()) && arrInstr->m_func->HasProfileInfo()) |
| { |
| intptr_t weakFuncRef = arrInstr->m_func->GetWeakFuncRef(); |
| if (weakFuncRef) |
| { |
| Js::ProfileId profileId = |
| arrInstr->IsJitProfilingInstr() |
| ? arrInstr->AsJitProfilingInstr()->profileId |
| : static_cast<Js::ProfileId>(arrInstr->AsProfiledInstr()->u.profileId); |
| |
| Js::ArrayCallSiteInfo *arrayInfo = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(profileId); |
| intptr_t arrayInfoAddr = arrInstr->m_func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfoAddr(profileId); |
| |
| // Only do fast-path if it isn't a JitProfiling instr |
| if (arrInstr->IsProfiledInstr()) { |
| GenerateProfiledNewScFloatArrayFastPath(arrInstr, arrayInfo, arrayInfoAddr, weakFuncRef); |
| } |
| |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, m_func)); |
| m_lowererMD.LoadHelperArgument(arrInstr, IR::AddrOpnd::New(arrayInfoAddr, IR::AddrOpndKindDynamicArrayCallSiteInfo, m_func)); |
| helperMethod = IR::HelperScrArr_ProfiledNewScFltArray; |
| } |
| } |
| |
| LoadScriptContext(arrInstr); |
| |
| IR::Opnd *elementsOpnd = arrInstr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(arrInstr, elementsOpnd); |
| m_lowererMD.ChangeToHelperCall(arrInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerArraySegmentVars(IR::Instr *arrayInstr) |
| { |
| IR::Instr * instrPrev; |
| IR::HelperCallOpnd * opndHelper = IR::HelperCallOpnd::New(IR::HelperArraySegmentVars, m_func); |
| |
| instrPrev = m_lowererMD.LoadHelperArgument(arrayInstr, arrayInstr->UnlinkSrc2()); |
| m_lowererMD.LoadHelperArgument(arrayInstr, arrayInstr->UnlinkSrc1()); |
| |
| arrayInstr->m_opcode = Js::OpCode::Call; |
| arrayInstr->SetSrc1(opndHelper); |
| |
| m_lowererMD.LowerCall(arrayInstr, 0); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* Lowerer::LowerProfiledNewArray(IR::JitProfilingInstr* instr, bool hasArgs) |
| { |
| // Use the special helper which checks whether Array has been overwritten by the user and if |
| // it hasn't, possibly allocates a native array |
| |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| IR::Instr* startMarkerInstr = InsertLoweredRegionStartMarker(instr); |
| |
| Assert(instr->isNewArray); |
| Assert(instr->arrayProfileId != Js::Constants::NoProfileId); |
| Assert(instr->profileId != Js::Constants::NoProfileId); |
| |
| bool isSpreadCall = instr->m_opcode == Js::OpCode::NewScObjectSpread || instr->m_opcode == Js::OpCode::NewScObjArraySpread; |
| |
| m_lowererMD.LoadNewScObjFirstArg(instr, IR::AddrOpnd::New(nullptr, IR::AddrOpndKindConstantVar, m_func, true), isSpreadCall ? 1 : 0); |
| |
| if (isSpreadCall) |
| { |
| this->LowerSpreadCall(instr, Js::CallFlags_New, true); |
| } |
| else |
| { |
| const int32 argCount = m_lowererMD.LowerCallArgs(instr, Js::CallFlags_New, 4); |
| |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->arrayProfileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc1()); |
| |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperProfiledNewScObjArray, m_func)); |
| m_lowererMD.LowerCall(instr, static_cast<Js::ArgSlot>(argCount)); |
| } |
| |
| return RemoveLoweredRegionStartMarker(startMarkerInstr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerNewScObject |
| /// |
| /// Machine independent lowering of a CallI instr. |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerNewScObject(IR::Instr *newObjInstr, bool callCtor, bool hasArgs, bool isBaseClassConstructorNewScObject) |
| { |
| if (newObjInstr->IsJitProfilingInstr() && newObjInstr->AsJitProfilingInstr()->isNewArray) |
| { |
| Assert(callCtor); |
| return LowerProfiledNewArray(newObjInstr->AsJitProfilingInstr(), hasArgs); |
| } |
| |
| bool isSpreadCall = newObjInstr->m_opcode == Js::OpCode::NewScObjectSpread || |
| newObjInstr->m_opcode == Js::OpCode::NewScObjArraySpread; |
| |
| Func* func = newObjInstr->m_func; |
| |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| IR::Instr* startMarkerInstr = InsertLoweredRegionStartMarker(newObjInstr); |
| |
| IR::Opnd *ctorOpnd = newObjInstr->GetSrc1(); |
| IR::RegOpnd *newObjDst = newObjInstr->GetDst()->AsRegOpnd(); |
| |
| Assert(!callCtor || !hasArgs || (newObjInstr->GetSrc2() != nullptr /*&& newObjInstr->GetSrc2()->IsSymOpnd()*/)); |
| |
| bool skipNewScObj = false; |
| bool returnNewScObj = false; |
| bool emitBailOut = false; |
| // If we haven't yet split NewScObject into NewScObjectNoCtor and CallI, we will need a temporary register |
| // to hold the result of the object allocation. |
| IR::RegOpnd* createObjDst = callCtor ? IR::RegOpnd::New(TyVar, func) : newObjDst; |
| IR::LabelInstr* helperOrBailoutLabel = IR::LabelInstr::New(Js::OpCode::Label, func, /* isOpHelper = */ true); |
| IR::LabelInstr* callCtorLabel = IR::LabelInstr::New(Js::OpCode::Label, func, /* isOpHelper = */ false); |
| |
| // Try to emit the fast allocation and construction path. |
| bool usedFixedCtorCache = TryLowerNewScObjectWithFixedCtorCache(newObjInstr, createObjDst, helperOrBailoutLabel, callCtorLabel, skipNewScObj, returnNewScObj, emitBailOut); |
| |
| AssertMsg(!skipNewScObj || callCtor, "What will we return if we skip the default new object and don't call the ctor?"); |
| Assert(!skipNewScObj || !returnNewScObj); |
| Assert(usedFixedCtorCache || !skipNewScObj); |
| Assert(!usedFixedCtorCache || newObjInstr->HasFixedFunctionAddressTarget()); |
| Assert(!skipNewScObj || !emitBailOut); |
| |
| #if DBG && 0 // TODO: OOP JIT, enable assert |
| if (usedFixedCtorCache) |
| { |
| Js::JavascriptFunction* ctor = newObjInstr->GetFixedFunction(); |
| Js::FunctionInfo* ctorInfo = ctor->GetFunctionInfo(); |
| Assert((ctorInfo->GetAttributes() & Js::FunctionInfo::Attributes::ErrorOnNew) == 0); |
| Assert(!!(ctorInfo->GetAttributes() & Js::FunctionInfo::Attributes::SkipDefaultNewObject) == skipNewScObj); |
| } |
| #endif |
| |
| IR::Instr* startCallInstr = nullptr; |
| if (callCtor && hasArgs) |
| { |
| hasArgs = !newObjInstr->HasEmptyArgOutChain(&startCallInstr); |
| } |
| |
| // If we're not skipping the default new object, let's emit bailout or a call to NewScObject* helper |
| IR::JnHelperMethod newScHelper = IR::HelperInvalid; |
| IR::Instr *newScObjCall = nullptr; |
| if (!skipNewScObj) |
| { |
| // If we emitted the fast path, this block is a helper block. |
| if (usedFixedCtorCache) |
| { |
| newObjInstr->InsertBefore(helperOrBailoutLabel); |
| } |
| |
| if (emitBailOut) |
| { |
| IR::Instr* bailOutInstr = newObjInstr; |
| |
| newObjInstr = IR::Instr::New(newObjInstr->m_opcode, func); |
| bailOutInstr->TransferTo(newObjInstr); |
| bailOutInstr->m_opcode = Js::OpCode::BailOut; |
| bailOutInstr->InsertAfter(newObjInstr); |
| |
| GenerateBailOut(bailOutInstr); |
| } |
| else |
| { |
| Assert(!newObjDst->CanStoreTemp()); |
| // createObjDst = NewScObject...(ctorOpnd) |
| newScHelper = !callCtor ? |
| (isBaseClassConstructorNewScObject ? |
| (hasArgs ? IR::HelperNewScObjectNoCtorFull : IR::HelperNewScObjectNoArgNoCtorFull) : |
| (hasArgs ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArgNoCtor)) : |
| (hasArgs || usedFixedCtorCache ? IR::HelperNewScObjectNoCtor : IR::HelperNewScObjectNoArg); |
| |
| LoadScriptContext(newObjInstr); |
| m_lowererMD.LoadHelperArgument(newObjInstr, newObjInstr->GetSrc1()); |
| |
| newScObjCall = IR::Instr::New(Js::OpCode::Call, createObjDst, IR::HelperCallOpnd::New(newScHelper, func), func); |
| newObjInstr->InsertBefore(newScObjCall); |
| m_lowererMD.LowerCall(newScObjCall, 0); |
| } |
| } |
| |
| // If we call HelperNewScObjectNoArg directly, we won't be calling the constructor from here, because the helper will do it. |
| // We could probably avoid this complexity by converting NewScObjectNoArg to NewScObject in the IRBuilder, once we have dedicated |
| // code paths for new Object() and new Array(). |
| callCtor &= hasArgs || usedFixedCtorCache; |
| AssertMsg(!skipNewScObj || callCtor, "What will we return if we skip the default new object and don't call the ctor?"); |
| |
| newObjInstr->InsertBefore(callCtorLabel); |
| |
| if (callCtor && usedFixedCtorCache) |
| { |
| IR::JnHelperMethod ctorHelper = IR::JnHelperMethodCount; |
| |
| // If we have no arguments (i.e. the argument chain is empty), we can recognize a couple of common special cases, such |
| // as new Object() or new Array(), for which we have optimized helpers. |
| FixedFieldInfo* ctor = newObjInstr->GetFixedFunction(); |
| intptr_t ctorInfo = ctor->GetFuncInfoAddr(); |
| if (!hasArgs && (ctorInfo == m_func->GetThreadContextInfo()->GetJavascriptObjectNewInstanceAddr() || ctorInfo == m_func->GetThreadContextInfo()->GetJavascriptArrayNewInstanceAddr())) |
| { |
| if (ctorInfo == m_func->GetThreadContextInfo()->GetJavascriptObjectNewInstanceAddr()) |
| { |
| Assert(skipNewScObj); |
| ctorHelper = IR::HelperNewJavascriptObjectNoArg; |
| callCtor = false; |
| } |
| else if (ctorInfo == m_func->GetThreadContextInfo()->GetJavascriptArrayNewInstanceAddr()) |
| { |
| Assert(skipNewScObj); |
| ctorHelper = IR::HelperNewJavascriptArrayNoArg; |
| callCtor = false; |
| } |
| |
| if (!callCtor) |
| { |
| LoadScriptContext(newObjInstr); |
| |
| IR::Instr *ctorCall = IR::Instr::New(Js::OpCode::Call, newObjDst, IR::HelperCallOpnd::New(ctorHelper, func), func); |
| newObjInstr->InsertBefore(ctorCall); |
| m_lowererMD.LowerCall(ctorCall, 0); |
| } |
| } |
| } |
| |
| IR::AutoReuseOpnd autoReuseSavedCtorOpnd; |
| if (callCtor) |
| { |
| // Load the first argument, which is either the object just created or null. Spread has an extra argument. |
| IR::Instr * argInstr = this->m_lowererMD.LoadNewScObjFirstArg(newObjInstr, createObjDst, isSpreadCall ? 1 : 0); |
| |
| IR::Instr * insertAfterCtorInstr = newObjInstr->m_next; |
| |
| if (skipNewScObj) |
| { |
| // Since we skipped the default new object, we must be returning whatever the constructor returns |
| // (which better be an Object), so let's just use newObjDst directly. |
| // newObjDst = newObjInstr->m_src1(createObjDst, ...) |
| Assert(newObjInstr->GetDst() == newObjDst); |
| if (isSpreadCall) |
| { |
| newObjInstr = this->LowerSpreadCall(newObjInstr, Js::CallFlags_New); |
| } |
| else |
| { |
| newObjInstr = this->m_lowererMD.LowerCallI(newObjInstr, Js::CallFlags_New, false, argInstr); |
| } |
| } |
| else |
| { |
| // We may need to return the default new object or whatever the constructor returns. Let's stash |
| // away the constructor's return in a temporary operand, and do the right check, if necessary. |
| // ctorResultObjOpnd = newObjInstr->m_src1(createObjDst, ...) |
| IR::RegOpnd *ctorResultObjOpnd = IR::RegOpnd::New(TyVar, func); |
| newObjInstr->UnlinkDst(); |
| newObjInstr->SetDst(ctorResultObjOpnd); |
| |
| if (isSpreadCall) |
| { |
| newObjInstr = this->LowerSpreadCall(newObjInstr, Js::CallFlags_New); |
| } |
| else |
| { |
| newObjInstr = this->m_lowererMD.LowerCallI(newObjInstr, Js::CallFlags_New, false, argInstr); |
| } |
| |
| if (returnNewScObj) |
| { |
| // MOV newObjDst, createObjDst |
| this->InsertMove(newObjDst, createObjDst, insertAfterCtorInstr); |
| } |
| else |
| { |
| LowerGetNewScObjectCommon(ctorResultObjOpnd, ctorResultObjOpnd, createObjDst, insertAfterCtorInstr); |
| this->InsertMove(newObjDst, ctorResultObjOpnd, insertAfterCtorInstr); |
| } |
| } |
| |
| // We don't ever need to update the constructor cache, if we hard coded it. Caches requiring update after constructor |
| // don't get cloned, and those that don't require update will never need one anymore. |
| if (!usedFixedCtorCache) |
| { |
| LowerUpdateNewScObjectCache(insertAfterCtorInstr, newObjDst, ctorOpnd, false /* isCtorFunction */); |
| } |
| } |
| else |
| { |
| if (newObjInstr->IsJitProfilingInstr()) |
| { |
| Assert(m_func->IsSimpleJit()); |
| Assert(!CONFIG_FLAG(NewSimpleJit)); |
| |
| // This path skipped calling the Ctor, which skips calling LowerCallI with newObjInstr, meaning that the call will not be profiled. |
| // So we insert it manually here. |
| |
| if(newScHelper == IR::HelperNewScObjectNoArg && |
| newObjDst && |
| ctorOpnd->IsRegOpnd() && |
| newObjDst->AsRegOpnd()->m_sym == ctorOpnd->AsRegOpnd()->m_sym) |
| { |
| Assert(newObjInstr->m_func->IsSimpleJit()); |
| Assert(createObjDst != newObjDst); |
| |
| // The function object sym is going to be overwritten, so save it in a temp for profiling |
| IR::RegOpnd *const savedCtorOpnd = IR::RegOpnd::New(ctorOpnd->GetType(), newObjInstr->m_func); |
| autoReuseSavedCtorOpnd.Initialize(savedCtorOpnd, newObjInstr->m_func); |
| Lowerer::InsertMove(savedCtorOpnd, ctorOpnd, newObjInstr); |
| ctorOpnd = savedCtorOpnd; |
| } |
| |
| // It is a constructor (CallFlags_New) and therefore a single argument (this) would have been given. |
| const auto info = Lowerer::MakeCallInfoConst(Js::CallFlags_New, 1, func); |
| |
| Assert(newScObjCall); |
| IR::JitProfilingInstr *const newObjJitProfilingInstr = newObjInstr->AsJitProfilingInstr(); |
| GenerateCallProfiling( |
| newObjJitProfilingInstr->profileId, |
| newObjJitProfilingInstr->inlineCacheIndex, |
| createObjDst, |
| ctorOpnd, |
| info, |
| false, |
| newScObjCall, |
| newObjInstr); |
| } |
| |
| // MOV newObjDst, createObjDst |
| if (!skipNewScObj && createObjDst != newObjDst) |
| { |
| this->InsertMove(newObjDst, createObjDst, newObjInstr); |
| } |
| newObjInstr->Remove(); |
| } |
| |
| // Return the first instruction above the region we've just lowered. |
| return RemoveLoweredRegionStartMarker(startMarkerInstr); |
| } |
| |
| IR::Instr* |
| Lowerer::GenerateCallProfiling(Js::ProfileId profileId, Js::InlineCacheIndex inlineCacheIndex, IR::Opnd* retval, IR::Opnd*calleeFunctionObjOpnd, IR::Opnd* callInfo, bool returnTypeOnly, IR::Instr*callInstr,IR::Instr*insertAfter) |
| { |
| // This should only ever happen in profiling simplejit |
| Assert(m_func->DoSimpleJitDynamicProfile()); |
| |
| // Make sure they gave us the correct call instruction |
| #if defined(_M_IX86) || defined(_M_X64) |
| Assert(callInstr->m_opcode == Js::OpCode::CALL); |
| #elif defined(_M_ARM) |
| Assert(callInstr->m_opcode == Js::OpCode::BLX); |
| #elif defined(_M_ARM64) |
| Assert(callInstr->m_opcode == Js::OpCode::BLR); |
| #endif |
| Func*const func = insertAfter->m_func; |
| |
| { |
| // First, we should save the implicit call flags |
| const auto starFlag = GetImplicitCallFlagsOpnd(); |
| const auto saveOpnd = IR::RegOpnd::New(starFlag->GetType(), func); |
| |
| IR::AutoReuseOpnd a(starFlag, func), b(saveOpnd, func); |
| //Save the flags (before call) and restore them (after the call) |
| this->InsertMove(saveOpnd, starFlag, callInstr); |
| // Note: On arm this is slightly inefficient because it forces a reload of the memory location to a reg (whereas x86 can load straight from hard-coded memory into a reg) |
| // But it works and making it not reload the memory location would force more refactoring. |
| this->InsertMove(starFlag, saveOpnd, insertAfter->m_next); |
| } |
| |
| // Profile a call that just happened: push some extra info on the stack and call the helper |
| |
| if (!retval) |
| { |
| if (returnTypeOnly) |
| { |
| // If we are only supposed to profile the return type but don't use the return value, we might |
| // as well do nothing! |
| return insertAfter; |
| } |
| retval = IR::AddrOpnd::NewNull(func); |
| } |
| |
| IR::Instr* profileCall = IR::Instr::New(Js::OpCode::Call, func); |
| |
| bool needInlineCacheIndex; |
| IR::JnHelperMethod helperMethod; |
| if (returnTypeOnly) |
| { |
| needInlineCacheIndex = false; |
| helperMethod = IR::HelperSimpleProfileReturnTypeCall; |
| } |
| else if(inlineCacheIndex == Js::Constants::NoInlineCacheIndex) |
| { |
| needInlineCacheIndex = false; |
| helperMethod = IR::HelperSimpleProfileCall_DefaultInlineCacheIndex; |
| } |
| else |
| { |
| needInlineCacheIndex = true; |
| helperMethod = IR::HelperSimpleProfileCall; |
| } |
| profileCall->SetSrc1(IR::HelperCallOpnd::New(helperMethod, func)); |
| |
| insertAfter->InsertAfter(profileCall); |
| |
| m_lowererMD.LoadHelperArgument(profileCall, callInfo); |
| m_lowererMD.LoadHelperArgument(profileCall, calleeFunctionObjOpnd); |
| m_lowererMD.LoadHelperArgument(profileCall, retval); |
| if(needInlineCacheIndex) |
| { |
| m_lowererMD.LoadHelperArgument(profileCall, IR::Opnd::CreateInlineCacheIndexOpnd(inlineCacheIndex, func)); |
| } |
| m_lowererMD.LoadHelperArgument(profileCall, IR::Opnd::CreateProfileIdOpnd(profileId, func)); |
| // Push the frame pointer so that the profiling call can grab the stack layout |
| m_lowererMD.LoadHelperArgument(profileCall, IR::Opnd::CreateFramePointerOpnd(func)); |
| |
| // No args: the helper is stdcall |
| return m_lowererMD.LowerCall(profileCall, 0); |
| } |
| |
| bool Lowerer::TryLowerNewScObjectWithFixedCtorCache(IR::Instr* newObjInstr, IR::RegOpnd* newObjDst, |
| IR::LabelInstr* helperOrBailoutLabel, IR::LabelInstr* callCtorLabel, bool& skipNewScObj, bool& returnNewScObj, bool& emitBailOut) |
| { |
| skipNewScObj = false; |
| returnNewScObj = false; |
| |
| AssertMsg(!PHASE_OFF(Js::ObjTypeSpecNewObjPhase, this->m_func) || !newObjInstr->HasBailOutInfo(), |
| "Why do we have bailout on NewScObject when ObjTypeSpecNewObj is off?"); |
| |
| if (PHASE_OFF(Js::FixedNewObjPhase, newObjInstr->m_func) && PHASE_OFF(Js::ObjTypeSpecNewObjPhase, this->m_func)) |
| { |
| return false; |
| } |
| |
| JITTimeConstructorCache * ctorCache; |
| |
| if (newObjInstr->HasBailOutInfo() && !newObjInstr->HasLazyBailOut()) |
| { |
| Assert(newObjInstr->IsNewScObjectInstr()); |
| Assert(newObjInstr->IsProfiledInstr()); |
| Assert(newObjInstr->GetBailOutKind() == IR::BailOutFailedCtorGuardCheck || newObjInstr->HasLazyBailOut()); |
| |
| emitBailOut = true; |
| |
| ctorCache = newObjInstr->m_func->GetConstructorCache(static_cast<Js::ProfileId>(newObjInstr->AsProfiledInstr()->u.profileId)); |
| Assert(ctorCache != nullptr); |
| Assert(!ctorCache->SkipNewScObject()); |
| Assert(!ctorCache->IsTypeFinal() || ctorCache->CtorHasNoExplicitReturnValue()); |
| |
| LinkCtorCacheToGuardedProperties(ctorCache); |
| } |
| else |
| { |
| if (newObjInstr->m_opcode == Js::OpCode::NewScObjArray || newObjInstr->m_opcode == Js::OpCode::NewScObjArraySpread) |
| { |
| // These instr's carry a profile that indexes the array call site info, not the ctor cache. |
| return false; |
| } |
| |
| ctorCache = newObjInstr->IsProfiledInstr() ? newObjInstr->m_func->GetConstructorCache(static_cast<Js::ProfileId>(newObjInstr->AsProfiledInstr()->u.profileId)) : nullptr; |
| |
| if (ctorCache == nullptr) |
| { |
| if (PHASE_TRACE(Js::FixedNewObjPhase, newObjInstr->m_func) || PHASE_TESTTRACE(Js::FixedNewObjPhase, newObjInstr->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("FixedNewObj: function %s (%s): lowering non-fixed new script object for %s, because %s.\n"), |
| newObjInstr->m_func->GetJITFunctionBody()->GetDisplayName(), newObjInstr->m_func->GetDebugNumberSet(debugStringBuffer), Js::OpCodeUtil::GetOpCodeName(newObjInstr->m_opcode), |
| newObjInstr->IsProfiledInstr() ? _u("constructor cache hasn't been cloned") : _u("instruction is not profiled")); |
| Output::Flush(); |
| } |
| |
| return false; |
| } |
| } |
| |
| Assert(ctorCache != nullptr); |
| |
| // We should only have cloned if the script contexts match. |
| // TODO: oop jit, add ctorCache->scriptContext for tracing assert |
| // Assert(newObjInstr->m_func->GetScriptContextInfo()->GetAddr() == ctorCache->scriptContext); |
| |
| // Built-in constructors don't need a default new object. Since we know which constructor we're calling, we can skip creating a default |
| // object and call a specialized helper (or even constructor, directly) avoiding the checks in generic NewScObjectCommon. |
| if (ctorCache->SkipNewScObject()) |
| { |
| #if 0 // TODO: oop jit, add constructor info for tracing |
| if (PHASE_TRACE(Js::FixedNewObjPhase, newObjInstr->m_func) || PHASE_TESTTRACE(Js::FixedNewObjPhase, newObjInstr->m_func)) |
| { |
| const Js::JavascriptFunction* ctor = ctorCache->constructor; |
| Js::FunctionBody* ctorBody = ctor->GetFunctionInfo()->HasBody() ? ctor->GetFunctionInfo()->GetFunctionBody() : nullptr; |
| const char16* ctorName = ctorBody != nullptr ? ctorBody->GetDisplayName() : _u("<unknown>"); |
| |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| |
| Output::Print(_u("FixedNewObj: function %s (%s): lowering skipped new script object for %s with %s ctor <unknown> (%s %s).\n"), |
| newObjInstr->m_func->GetJITFunctionBody()->GetDisplayName(), newObjInstr->m_func->GetDebugNumberSet(debugStringBuffer2), Js::OpCodeUtil::GetOpCodeName(newObjInstr->m_opcode), |
| newObjInstr->m_opcode == Js::OpCode::NewScObjectNoCtor ? _u("inlined") : _u("called"), |
| ctorName, ctorBody ? ctorBody->GetDebugNumberSet(debugStringBuffer) : _u("(null)")); |
| Output::Flush(); |
| } |
| #endif |
| |
| // All built-in constructors share a special singleton cache that is never checked and never invalidated. It cannot be used |
| // as a guard to protect any property operations downstream from the constructor. If this ever becomes a performance issue, |
| // we could have a dedicated cache for each built-in constructor, populate it and invalidate it as any other constructor cache. |
| AssertMsg(!emitBailOut, "Can't bail out on constructor cache guard for built-in constructors."); |
| |
| skipNewScObj = true; |
| IR::AddrOpnd* zeroOpnd = IR::AddrOpnd::NewNull(this->m_func); |
| this->InsertMove(newObjDst, zeroOpnd, newObjInstr); |
| return true; |
| } |
| |
| AssertMsg(ctorCache->GetType() != nullptr, "Why did we hard-code a mismatched, invalidated or polymorphic constructor cache?"); |
| |
| #if 0 // TODO: oop jit, add constructor info for tracing |
| if (PHASE_TRACE(Js::FixedNewObjPhase, newObjInstr->m_func) || PHASE_TESTTRACE(Js::FixedNewObjPhase, newObjInstr->m_func)) |
| { |
| const Js::JavascriptFunction* constructor = ctorCache->constructor; |
| Js::FunctionBody* constructorBody = constructor->GetFunctionInfo()->HasBody() ? constructor->GetFunctionInfo()->GetFunctionBody() : nullptr; |
| const char16* constructorName = constructorBody != nullptr ? constructorBody->GetDisplayName() : _u("<unknown>"); |
| |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| |
| if (PHASE_TRACE(Js::FixedNewObjPhase, newObjInstr->m_func)) |
| { |
| Output::Print(_u("FixedNewObj: function %s (%s): lowering fixed new script object for %s with %s ctor <unknown> (%s %s): type = %p, slots = %d, inlined slots = %d.\n"), |
| newObjInstr->m_func->GetJITFunctionBody()->GetDisplayName(), newObjInstr->m_func->GetDebugNumberSet(debugStringBuffer2), Js::OpCodeUtil::GetOpCodeName(newObjInstr->m_opcode), |
| newObjInstr->m_opcode == Js::OpCode::NewScObjectNoCtor ? _u("inlined") : _u("called"), |
| constructorName, constructorBody ? constructorBody->GetDebugNumberSet(debugStringBuffer) : _u("(null)"), |
| ctorCache->type, ctorCache->slotCount, ctorCache->inlineSlotCount); |
| } |
| else |
| { |
| Output::Print(_u("FixedNewObj: function %s (%s): lowering fixed new script object for %s with %s ctor <unknown> (%s %s): slots = %d, inlined slots = %d.\n"), |
| newObjInstr->m_func->GetJITFunctionBody()->GetDisplayName(), newObjInstr->m_func->GetDebugNumberSet(debugStringBuffer2), Js::OpCodeUtil::GetOpCodeName(newObjInstr->m_opcode), |
| newObjInstr->m_opcode == Js::OpCode::NewScObjectNoCtor ? _u("inlined") : _u("called"), |
| constructorName, debugStringBuffer, ctorCache->slotCount, ctorCache->inlineSlotCount); |
| } |
| Output::Flush(); |
| } |
| #endif |
| |
| // If the constructor has no return statements, we can safely return the object that was created here. |
| // No need to check what the constructor returned - it must be undefined. |
| returnNewScObj = ctorCache->CtorHasNoExplicitReturnValue(); |
| |
| Assert(Js::ConstructorCache::GetSizeOfGuardValue() == static_cast<size_t>(TySize[TyMachPtr])); |
| IR::MemRefOpnd* guardOpnd = IR::MemRefOpnd::New(ctorCache->GetRuntimeCacheGuardAddr(), TyMachReg, this->m_func, |
| IR::AddrOpndKindDynamicGuardValueRef); |
| IR::AddrOpnd* zeroOpnd = IR::AddrOpnd::NewNull(this->m_func); |
| InsertCompareBranch(guardOpnd, zeroOpnd, Js::OpCode::BrEq_A, helperOrBailoutLabel, newObjInstr); |
| |
| // If we are calling new on a class constructor, the contract is that we pass new.target as the 'this' argument. |
| // function is the constructor on which we called new - which is new.target. |
| FixedFieldInfo* ctor = newObjInstr->GetFixedFunction(); |
| |
| if (ctor->IsClassCtor()) |
| { |
| // MOV newObjDst, function |
| this->InsertMove(newObjDst, newObjInstr->GetSrc1(), newObjInstr); |
| } |
| else |
| { |
| JITTypeHolder newObjectType(ctorCache->GetType()); |
| Assert(newObjectType->IsShared()); |
| |
| IR::AddrOpnd* typeSrc = IR::AddrOpnd::New(newObjectType->GetAddr(), IR::AddrOpndKindDynamicType, m_func); |
| |
| // For the next call: |
| // inlineSlotSize == Number of slots to allocate beyond the DynamicObject header |
| // slotSize - inlineSlotSize == Number of aux slots to allocate |
| int inlineSlotSize = ctorCache->GetInlineSlotCount(); |
| int slotSize = ctorCache->GetSlotCount(); |
| if (newObjectType->GetTypeHandler()->IsObjectHeaderInlinedTypeHandler()) |
| { |
| Assert(inlineSlotSize >= Js::DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity()); |
| Assert(inlineSlotSize == slotSize); |
| slotSize = inlineSlotSize -= Js::DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity(); |
| } |
| GenerateDynamicObjectAlloc(newObjInstr, inlineSlotSize, slotSize, newObjDst, typeSrc); |
| } |
| |
| // JMP $callCtor |
| IR::BranchInstr *callCtorBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, callCtorLabel, m_func); |
| newObjInstr->InsertBefore(callCtorBranch); |
| |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateRecyclerAllocAligned(IR::JnHelperMethod allocHelper, size_t allocSize, IR::RegOpnd* newObjDst, IR::Instr* insertionPointInstr, bool inOpHelper) |
| { |
| IR::LabelInstr * allocDoneLabel = nullptr; |
| |
| if (!PHASE_OFF(Js::JitAllocNewObjPhase, insertionPointInstr->m_func) && HeapInfo::IsSmallObject(allocSize)) |
| { |
| IR::LabelInstr * allocHelperLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| allocDoneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, inOpHelper); |
| |
| this->m_lowererMD.GenerateFastRecyclerAlloc(allocSize, newObjDst, insertionPointInstr, allocHelperLabel, allocDoneLabel); |
| |
| // $allocHelper: |
| insertionPointInstr->InsertBefore(allocHelperLabel); |
| } |
| |
| // call JavascriptOperators::AllocMemForScObject(allocSize, scriptContext->GetRecycler()) |
| this->m_lowererMD.LoadHelperArgument(insertionPointInstr, this->LoadScriptContextValueOpnd(insertionPointInstr, ScriptContextValue::ScriptContextRecycler)); |
| this->m_lowererMD.LoadHelperArgument(insertionPointInstr, IR::IntConstOpnd::New((int32)allocSize, TyUint32, m_func, true)); |
| IR::Instr *newObjCall = IR::Instr::New(Js::OpCode::Call, newObjDst, IR::HelperCallOpnd::New(allocHelper, m_func), m_func); |
| insertionPointInstr->InsertBefore(newObjCall); |
| this->m_lowererMD.LowerCall(newObjCall, 0); |
| |
| if (allocDoneLabel != nullptr) |
| { |
| // $allocDone: |
| insertionPointInstr->InsertBefore(allocDoneLabel); |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::LowerGetNewScObject(IR::Instr *instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::GetNewScObject); |
| Assert(instr->GetDst()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc2()); |
| |
| const auto instrPrev = instr->m_prev; |
| Assert(instrPrev); |
| |
| LowerGetNewScObjectCommon( |
| instr->GetDst()->AsRegOpnd(), |
| instr->GetSrc1()->AsRegOpnd(), |
| instr->GetSrc2()->AsRegOpnd(), |
| instr); |
| instr->Remove(); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::LowerGetNewScObjectCommon( |
| IR::RegOpnd *const resultObjOpnd, |
| IR::RegOpnd *const constructorReturnOpnd, |
| IR::RegOpnd *const newObjOpnd, |
| IR::Instr *insertBeforeInstr) |
| { |
| Assert(resultObjOpnd); |
| Assert(constructorReturnOpnd); |
| Assert(newObjOpnd); |
| Assert(insertBeforeInstr); |
| |
| // (newObjOpnd == 'this' value passed to constructor) |
| // |
| // if (!IsJsObject(constructorReturnOpnd)) |
| // goto notObjectLabel |
| // newObjOpnd = constructorReturnOpnd |
| // notObjectLabel: |
| // resultObjOpnd = newObjOpnd |
| |
| if(!constructorReturnOpnd->IsEqual(newObjOpnd)) |
| { |
| // Need to check whether the constructor returned an object |
| |
| IR::LabelInstr *notObjectLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| Assert(insertBeforeInstr->m_prev); |
| IR::LabelInstr *const doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| insertBeforeInstr->InsertBefore(doneLabel); |
| insertBeforeInstr = doneLabel; |
| |
| #if defined(_M_ARM32_OR_ARM64) |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, constructorReturnOpnd); |
| |
| IR::Opnd * targetOpnd = IR::RegOpnd::New(StackSym::New(TyInt32,m_func), TyInt32, m_func); |
| IR::Instr * callIsObjectInstr = IR::Instr::New(Js::OpCode::Call, targetOpnd, m_func); |
| insertBeforeInstr->InsertBefore(callIsObjectInstr); |
| this->m_lowererMD.ChangeToHelperCall(callIsObjectInstr, IR::HelperOp_IsObject); |
| |
| InsertTestBranch( targetOpnd, targetOpnd, Js::OpCode::BrEq_A, notObjectLabel,insertBeforeInstr); |
| #else |
| m_lowererMD.GenerateIsJsObjectTest(constructorReturnOpnd, insertBeforeInstr, notObjectLabel); |
| #endif |
| |
| // Value returned by constructor is an object (use constructorReturnOpnd) |
| if(!resultObjOpnd->IsEqual(constructorReturnOpnd)) |
| { |
| this->InsertMove(resultObjOpnd, constructorReturnOpnd, insertBeforeInstr); |
| } |
| insertBeforeInstr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, doneLabel, m_func)); |
| |
| // Value returned by constructor is not an object (use newObjOpnd) |
| insertBeforeInstr->InsertBefore(notObjectLabel); |
| } |
| |
| if(!resultObjOpnd->IsEqual(newObjOpnd)) |
| { |
| this->InsertMove(resultObjOpnd, newObjOpnd, insertBeforeInstr); |
| } |
| |
| // fall through to insertBeforeInstr or doneLabel |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerUpdateNewScObjectCache |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerUpdateNewScObjectCache(IR::Instr * insertInstr, IR::Opnd *dst, IR::Opnd *src1, const bool isCtorFunction) |
| { |
| // if (!isCtorFunction) |
| // { |
| // MOV r1, [src1 + offset(type)] -- check base TypeIds_Function |
| // CMP [r1 + offset(typeId)], TypeIds_Function |
| // } |
| // JNE $fallThru |
| // MOV r2, [src1 + offset(constructorCache)] |
| // MOV r3, [r2 + offset(updateAfterCtor)] |
| // TEST r3, r3 -- check if updateAfterCtor is 0 |
| // JEQ $fallThru |
| // CALL UpdateNewScObjectCache(src1, dst, scriptContext) |
| // $fallThru: |
| IR::LabelInstr *labelFallThru = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| src1 = GetRegOpnd(src1, insertInstr, m_func, TyMachReg); |
| |
| // Check if constructor is a function if we don't already know it. |
| if (!isCtorFunction) |
| { |
| IR::RegOpnd* src1RegOpnd = src1->AsRegOpnd(); |
| // MOV r1, [src1 + offset(type)] -- check base TypeIds_Function |
| IR::RegOpnd *r1 = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(src1RegOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| Lowerer::InsertMove(r1, indirOpnd, insertInstr); |
| |
| // CMP [r1 + offset(typeId)], TypeIds_Function |
| // JNE $fallThru |
| indirOpnd = IR::IndirOpnd::New(r1, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func); |
| IR::IntConstOpnd *intOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, this->m_func, true); |
| IR::BranchInstr* branchInstr = InsertCompareBranch(indirOpnd, intOpnd, Js::OpCode::BrNeq_A, labelFallThru, insertInstr); |
| InsertObjectPoison(src1RegOpnd, branchInstr, insertInstr, false); |
| } |
| |
| // Every function has a constructor cache, even if only the default blank one. |
| // r2 = MOV JavascriptFunction->constructorCache |
| IR::RegOpnd *r2 = IR::RegOpnd::New(TyVar, this->m_func); |
| IR::IndirOpnd *opndIndir = IR::IndirOpnd::New(src1->AsRegOpnd(), Js::JavascriptFunction::GetOffsetOfConstructorCache(), TyMachReg, this->m_func); |
| IR::Instr *instr = Lowerer::InsertMove(r2, opndIndir, insertInstr); |
| |
| // r3 = constructorCache->updateAfterCtor |
| IR::RegOpnd *r3 = IR::RegOpnd::New(TyInt8, this->m_func); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(r2, Js::ConstructorCache::GetOffsetOfUpdateAfterCtor(), TyUint8, this->m_func); |
| instr = Lowerer::InsertMove(r3, indirOpnd, insertInstr); |
| |
| // TEST r3, r3 -- check if updateAfterCtor is 0 |
| // JEQ $fallThru |
| InsertTestBranch(r3, r3, Js::OpCode::BrEq_A, labelFallThru, insertInstr); |
| |
| // r2 = UpdateNewScObjectCache(src1, dst, scriptContext) |
| insertInstr->InsertBefore(IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true)); // helper label for uncommon path |
| IR::HelperCallOpnd * opndHelper = IR::HelperCallOpnd::New(IR::HelperUpdateNewScObjectCache, m_func); |
| |
| LoadScriptContext(insertInstr); |
| m_lowererMD.LoadHelperArgument(insertInstr, dst); |
| m_lowererMD.LoadHelperArgument(insertInstr, src1); |
| |
| instr = IR::Instr::New(Js::OpCode::Call, m_func); |
| instr->SetSrc1(opndHelper); |
| insertInstr->InsertBefore(instr); |
| m_lowererMD.LowerCall(instr, 0); |
| |
| // $fallThru: |
| insertInstr->InsertBefore(labelFallThru); |
| |
| return insertInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScObjArray(IR::Instr *newObjInstr) |
| { |
| if (newObjInstr->HasEmptyArgOutChain()) |
| { |
| newObjInstr->FreeSrc2(); |
| return LowerNewScObjArrayNoArg(newObjInstr); |
| } |
| |
| IR::Instr* startMarkerInstr = nullptr; |
| |
| IR::Opnd *targetOpnd = newObjInstr->GetSrc1(); |
| Func *func = newObjInstr->m_func; |
| |
| if (!targetOpnd->IsAddrOpnd()) |
| { |
| if (!newObjInstr->HasBailOutInfo() || newObjInstr->OnlyHasLazyBailOut()) |
| { |
| return this->LowerNewScObject(newObjInstr, true, true); |
| } |
| |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| startMarkerInstr = InsertLoweredRegionStartMarker(newObjInstr); |
| |
| // For whatever reason, we couldn't do a fixed function check on the call target. |
| // Generate a runtime check on the target. |
| Assert( |
| newObjInstr->GetBailOutKind() == IR::BailOutOnNotNativeArray || |
| newObjInstr->GetBailOutKind() == BailOutInfo::WithLazyBailOut(IR::BailOutOnNotNativeArray) |
| ); |
| IR::LabelInstr *labelSkipBailOut = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertCompareBranch( |
| targetOpnd, |
| LoadLibraryValueOpnd(newObjInstr, LibraryValue::ValueArrayConstructor), |
| Js::OpCode::BrEq_A, |
| true, |
| labelSkipBailOut, |
| newObjInstr); |
| |
| IR::ProfiledInstr *instrNew = IR::ProfiledInstr::New(newObjInstr->m_opcode, newObjInstr->UnlinkDst(), newObjInstr->UnlinkSrc1(), newObjInstr->UnlinkSrc2(), func); |
| instrNew->u.profileId = newObjInstr->AsProfiledInstr()->u.profileId; |
| newObjInstr->InsertAfter(instrNew); |
| newObjInstr->m_opcode = Js::OpCode::BailOut; |
| GenerateBailOut(newObjInstr); |
| |
| instrNew->InsertBefore(labelSkipBailOut); |
| newObjInstr = instrNew; |
| } |
| else |
| { |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| startMarkerInstr = InsertLoweredRegionStartMarker(newObjInstr); |
| } |
| |
| intptr_t weakFuncRef = 0; |
| Js::ArrayCallSiteInfo *arrayInfo = nullptr; |
| intptr_t arrayInfoAddr = 0; |
| Assert(newObjInstr->IsProfiledInstr()); |
| |
| IR::RegOpnd *resultObjOpnd = newObjInstr->GetDst()->AsRegOpnd(); |
| IR::Instr * insertInstr = newObjInstr->m_next; |
| |
| Js::ProfileId profileId = static_cast<Js::ProfileId>(newObjInstr->AsProfiledInstr()->u.profileId); |
| |
| // We may not have profileId if we converted a NewScObject to NewScObjArray |
| if (profileId != Js::Constants::NoProfileId) |
| { |
| arrayInfo = func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(profileId); |
| arrayInfoAddr = func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfoAddr(profileId); |
| Assert(arrayInfo); |
| weakFuncRef = func->GetWeakFuncRef(); |
| Assert(weakFuncRef); |
| } |
| |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::Opnd *linkOpnd = newObjInstr->GetSrc2(); |
| |
| Assert(linkOpnd->IsSymOpnd()); |
| StackSym *linkSym = linkOpnd->AsSymOpnd()->m_sym->AsStackSym(); |
| Assert(linkSym->IsSingleDef()); |
| IR::Instr* argInstr = linkSym->GetInstrDef(); |
| IR::Opnd *opndOfArrayCtor = argInstr->GetSrc1(); |
| const uint16 upperBoundValue = 8; |
| // Generate fast path only if it meets all the conditions: |
| // 1. It is the only parameter and it is a likely int |
| // 2a. If 1st parameter is a variable, emit fast path with checks |
| // 2b. If 1st parameter is a constant, it is in range 0 and upperBoundValue (inclusive) |
| if (opndOfArrayCtor->GetValueType().IsLikelyInt() && (opndOfArrayCtor->IsAddrOpnd() || opndOfArrayCtor->IsRegOpnd())) // #1 |
| { |
| if ((linkSym->GetArgSlotNum() == 2)) // 1. It is the only parameter |
| { |
| AssertMsg(linkSym->IsArgSlotSym(), "Not an argSlot symbol..."); |
| linkOpnd = argInstr->GetSrc2(); |
| |
| bool emittedFastPath = false; |
| // 2a. If 1st parameter is a variable, emit fast path with checks |
| if (opndOfArrayCtor->IsRegOpnd()) |
| { |
| if (!opndOfArrayCtor->AsRegOpnd()->IsNotInt()) |
| { |
| // 3. GenerateFastPath |
| if (arrayInfo && arrayInfo->IsNativeIntArray()) |
| { |
| emittedFastPath = GenerateProfiledNewScObjArrayFastPath<Js::JavascriptNativeIntArray>(newObjInstr, arrayInfo, arrayInfoAddr, weakFuncRef, helperLabel, labelDone, opndOfArrayCtor, |
| Js::JavascriptNativeIntArray::GetOffsetOfArrayCallSiteIndex(), |
| Js::JavascriptNativeIntArray::GetOffsetOfWeakFuncRef()); |
| } |
| else if (arrayInfo && arrayInfo->IsNativeFloatArray()) |
| { |
| emittedFastPath = GenerateProfiledNewScObjArrayFastPath<Js::JavascriptNativeFloatArray>(newObjInstr, arrayInfo, arrayInfoAddr, weakFuncRef, helperLabel, labelDone, opndOfArrayCtor, |
| Js::JavascriptNativeFloatArray::GetOffsetOfArrayCallSiteIndex(), |
| Js::JavascriptNativeFloatArray::GetOffsetOfWeakFuncRef()); |
| } |
| else |
| { |
| emittedFastPath = GenerateProfiledNewScObjArrayFastPath<Js::JavascriptArray>(newObjInstr, arrayInfo, arrayInfoAddr, weakFuncRef, helperLabel, labelDone, opndOfArrayCtor, 0, 0); |
| } |
| } |
| } |
| // 2b. If 1st parameter is a constant, it is in range 0 and upperBoundValue (inclusive) |
| else |
| { |
| int32 length = linkSym->GetIntConstValue(); |
| if (length >= 0 && length <= upperBoundValue) |
| { |
| emittedFastPath = GenerateProfiledNewScObjArrayFastPath(newObjInstr, arrayInfo, arrayInfoAddr, weakFuncRef, (uint32)length, labelDone, false); |
| } |
| } |
| // Since we emitted fast path above, move the startCall/argOut instruction right before helper |
| if (emittedFastPath) |
| { |
| linkSym = linkOpnd->AsRegOpnd()->m_sym->AsStackSym(); |
| AssertMsg(!linkSym->IsArgSlotSym() && linkSym->m_isSingleDef, "Arg tree not single def..."); |
| |
| IR::Instr* startCallInstr = linkSym->m_instrDef; |
| AssertMsg(startCallInstr->GetArgOutCount(false) == 2, "Generating ArrayFastPath for more than 1 parameter not allowed."); |
| |
| // Since we emitted fast path above, move the startCall/argOut instruction right before helper |
| startCallInstr->Move(newObjInstr); |
| argInstr->Move(newObjInstr); |
| } |
| } |
| } |
| newObjInstr->UnlinkSrc1(); |
| |
| IR::Opnd *profileOpnd = IR::AddrOpnd::New(arrayInfoAddr, IR::AddrOpndKindDynamicArrayCallSiteInfo, func); |
| this->m_lowererMD.LoadNewScObjFirstArg(newObjInstr, profileOpnd); |
| |
| IR::JnHelperMethod helperMethod = IR::HelperScrArr_ProfiledNewInstance; |
| |
| newObjInstr->SetSrc1(IR::HelperCallOpnd::New(helperMethod, func)); |
| newObjInstr = GenerateDirectCall(newObjInstr, targetOpnd, Js::CallFlags_New); |
| |
| IR::BranchInstr* branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(resultObjOpnd, 0, TyMachPtr, func), |
| LoadVTableValueOpnd(insertInstr, VTableValue::VtableJavascriptArray), |
| Js::OpCode::BrEq_A, |
| true, |
| labelDone, |
| insertInstr); |
| |
| InsertObjectPoison(resultObjOpnd, branchInstr, insertInstr, true); |
| // We know we have a native array, so store the weak ref and call site index. |
| InsertMove( |
| IR::IndirOpnd::New(resultObjOpnd, Js::JavascriptNativeArray::GetOffsetOfArrayCallSiteIndex(), TyUint16, func), |
| IR::Opnd::CreateProfileIdOpnd(profileId, func), |
| insertInstr); |
| InsertMove( |
| IR::IndirOpnd::New(resultObjOpnd, Js::JavascriptNativeArray::GetOffsetOfWeakFuncRef(), TyMachReg, func), |
| IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, func), |
| insertInstr); |
| insertInstr->InsertBefore(labelDone); |
| |
| return RemoveLoweredRegionStartMarker(startMarkerInstr); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScObjArrayNoArg(IR::Instr *newObjInstr) |
| { |
| IR::Opnd *targetOpnd = newObjInstr->GetSrc1(); |
| Func *func = newObjInstr->m_func; |
| |
| IR::Instr* startMarkerInstr = nullptr; |
| |
| if (!targetOpnd->IsAddrOpnd()) |
| { |
| if (!newObjInstr->HasBailOutInfo() || newObjInstr->OnlyHasLazyBailOut()) |
| { |
| return this->LowerNewScObject(newObjInstr, true, false); |
| } |
| |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| startMarkerInstr = InsertLoweredRegionStartMarker(newObjInstr); |
| |
| // For whatever reason, we couldn't do a fixed function check on the call target. |
| // Generate a runtime check on the target. |
| Assert( |
| newObjInstr->GetBailOutKind() == IR::BailOutOnNotNativeArray || |
| newObjInstr->GetBailOutKind() == BailOutInfo::WithLazyBailOut(IR::BailOutOnNotNativeArray) |
| ); |
| IR::LabelInstr *labelSkipBailOut = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertCompareBranch( |
| targetOpnd, |
| LoadLibraryValueOpnd(newObjInstr, LibraryValue::ValueArrayConstructor), |
| Js::OpCode::BrEq_A, |
| true, |
| labelSkipBailOut, |
| newObjInstr); |
| |
| IR::ProfiledInstr *instrNew = IR::ProfiledInstr::New(newObjInstr->m_opcode, newObjInstr->UnlinkDst(), newObjInstr->UnlinkSrc1(), func); |
| instrNew->u.profileId = newObjInstr->AsProfiledInstr()->u.profileId; |
| newObjInstr->InsertAfter(instrNew); |
| newObjInstr->m_opcode = Js::OpCode::BailOut; |
| GenerateBailOut(newObjInstr); |
| |
| instrNew->InsertBefore(labelSkipBailOut); |
| newObjInstr = instrNew; |
| } |
| else |
| { |
| // Insert a temporary label before the instruction we're about to lower, so that we can return |
| // the first instruction above that needs to be lowered after we're done - regardless of argument |
| // list, StartCall, etc. |
| startMarkerInstr = InsertLoweredRegionStartMarker(newObjInstr); |
| } |
| |
| Assert(newObjInstr->IsProfiledInstr()); |
| |
| intptr_t weakFuncRef = 0; |
| intptr_t arrayInfoAddr = 0; |
| Js::ArrayCallSiteInfo *arrayInfo = nullptr; |
| Js::ProfileId profileId = static_cast<Js::ProfileId>(newObjInstr->AsProfiledInstr()->u.profileId); |
| if (profileId != Js::Constants::NoProfileId) |
| { |
| arrayInfo = func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfo(profileId); |
| arrayInfoAddr = func->GetReadOnlyProfileInfo()->GetArrayCallSiteInfoAddr(profileId); |
| Assert(arrayInfo); |
| weakFuncRef = func->GetWeakFuncRef(); |
| Assert(weakFuncRef); |
| } |
| |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, func); |
| GenerateProfiledNewScObjArrayFastPath(newObjInstr, arrayInfo, arrayInfoAddr, weakFuncRef, 0, labelDone, true); |
| newObjInstr->InsertAfter(labelDone); |
| |
| m_lowererMD.LoadHelperArgument(newObjInstr, IR::AddrOpnd::New(weakFuncRef, IR::AddrOpndKindDynamicFunctionBodyWeakRef, func)); |
| m_lowererMD.LoadHelperArgument(newObjInstr, IR::AddrOpnd::New(arrayInfoAddr, IR::AddrOpndKindDynamicArrayCallSiteInfo, func)); |
| |
| LoadScriptContext(newObjInstr); |
| |
| m_lowererMD.LoadHelperArgument(newObjInstr, targetOpnd); |
| newObjInstr->UnlinkSrc1(); |
| newObjInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperScrArr_ProfiledNewInstanceNoArg, func)); |
| m_lowererMD.LowerCall(newObjInstr, 0); |
| |
| return RemoveLoweredRegionStartMarker(startMarkerInstr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerPrologEpilog |
| /// |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::LowerPrologEpilog() |
| { |
| if (m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| LowerGeneratorResumeJumpTable(); |
| } |
| |
| IR::Instr * instr; |
| |
| instr = m_func->m_headInstr; |
| AssertMsg(instr->IsEntryInstr(), "First instr isn't an EntryInstr..."); |
| |
| m_lowererMD.LowerEntryInstr(instr->AsEntryInstr()); |
| |
| instr = m_func->m_exitInstr; |
| AssertMsg(instr->IsExitInstr(), "Last instr isn't an ExitInstr..."); |
| |
| m_lowererMD.LowerExitInstr(instr->AsExitInstr()); |
| } |
| |
| void |
| Lowerer::LowerPrologEpilogAsmJs() |
| { |
| IR::Instr * instr; |
| |
| instr = m_func->m_headInstr; |
| AssertMsg(instr->IsEntryInstr(), "First instr isn't an EntryInstr..."); |
| |
| m_lowererMD.LowerEntryInstr(instr->AsEntryInstr()); |
| |
| instr = m_func->m_exitInstr; |
| AssertMsg(instr->IsExitInstr(), "Last instr isn't an ExitInstr..."); |
| |
| m_lowererMD.LowerExitInstrAsmJs(instr->AsExitInstr()); |
| } |
| |
| void |
| Lowerer::LowerGeneratorResumeJumpTable() |
| { |
| Assert(m_func->GetJITFunctionBody()->IsCoroutine()); |
| |
| IR::Instr * jumpTableInstr = m_func->m_headInstr; |
| AssertMsg(jumpTableInstr->IsEntryInstr(), "First instr isn't an EntryInstr..."); |
| |
| // Hope to do away with this linked list scan by moving this lowering to a post-prolog-epilog/pre-encoder phase that is common to all architectures (currently such phase is only available on amd64/arm) |
| while (jumpTableInstr->m_opcode != Js::OpCode::GeneratorResumeJumpTable) |
| { |
| jumpTableInstr = jumpTableInstr->m_next; |
| } |
| |
| IR::Opnd * srcOpnd = jumpTableInstr->UnlinkSrc1(); |
| |
| m_func->MapYieldOffsetResumeLabels([&](int i, const YieldOffsetResumeLabel& yorl) |
| { |
| uint32 offset = yorl.First(); |
| IR::LabelInstr * label = yorl.Second(); |
| |
| if (label != nullptr && label->m_hasNonBranchRef) |
| { |
| // Also fix up the bailout at the label with the jump to epilog that was not emitted in GenerateBailOut() |
| Assert(label->m_prev->HasBailOutInfo()); |
| GenerateJumpToEpilogForBailOut(label->m_prev->GetBailOutInfo(), label->m_prev); |
| } |
| else if (label == nullptr) |
| { |
| label = m_func->m_bailOutNoSaveLabel; |
| } |
| |
| // For each offset label pair, insert a compare of the offset and branch if equal to the label |
| InsertCompareBranch(srcOpnd, IR::IntConstOpnd::New(offset, TyUint32, m_func), Js::OpCode::BrSrEq_A, label, jumpTableInstr); |
| }); |
| |
| jumpTableInstr->Remove(); |
| } |
| |
| void |
| Lowerer::DoInterruptProbes() |
| { |
| this->m_func->SetHasInstrNumber(true); |
| uint instrCount = 1; |
| FOREACH_INSTR_IN_FUNC(instr, this->m_func) |
| { |
| instr->SetNumber(instrCount++); |
| if (instr->IsLabelInstr()) |
| { |
| IR::LabelInstr *labelInstr = instr->AsLabelInstr(); |
| if (labelInstr->m_isLoopTop) |
| { |
| // For every loop top label, insert the following: |
| |
| // cmp sp, ThreadContext::stackLimitForCurrentThread |
| // bgt $continue |
| // $helper: |
| // call JavascriptOperators::ScriptAbort |
| // b $exit |
| // $continue: |
| |
| IR::LabelInstr *newLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| labelInstr->InsertAfter(newLabel); |
| this->InsertOneLoopProbe(newLabel, newLabel); |
| } |
| } |
| } |
| NEXT_INSTR_IN_FUNC; |
| } |
| |
| // Insert an interrupt probe at each loop back branch. (Currently uncalled, since we're inserting |
| // probes at loop tops instead of back edges, but kept around because it may prove useful.) |
| uint |
| Lowerer::DoLoopProbeAndNumber(IR::BranchInstr *branchInstr) |
| { |
| IR::LabelInstr *labelInstr = branchInstr->GetTarget(); |
| if (labelInstr == nullptr || labelInstr->GetNumber() == 0) |
| { |
| // Forward branch (possibly an indirect jump after try-catch-finally); nothing to do. |
| return branchInstr->GetNumber() + 1; |
| } |
| |
| Assert(labelInstr->m_isLoopTop); |
| |
| // Insert a stack probe at this branch. Number all the instructions we insert |
| // and return the next instruction number. |
| |
| uint number = branchInstr->GetNumber(); |
| IR::Instr *instrPrev = branchInstr->m_prev; |
| IR::Instr *instrNext = branchInstr->m_next; |
| if (branchInstr->IsUnconditional()) |
| { |
| // B $loop ==> |
| |
| // cmp [], 0 |
| // beq $loop |
| // $helper: |
| // call abort |
| // b $exit |
| |
| this->InsertOneLoopProbe(branchInstr, labelInstr); |
| branchInstr->Remove(); |
| } |
| else |
| { |
| // Bcc $loop ==> |
| |
| // Binv $notloop |
| // cmp [], 0 |
| // beq $loop |
| // $helper: |
| // call abort |
| // b $exit |
| // $notloop: |
| |
| IR::LabelInstr *loopExitLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| branchInstr->SetTarget(loopExitLabel); |
| LowererMD::InvertBranch(branchInstr); |
| branchInstr->InsertAfter(loopExitLabel); |
| |
| this->InsertOneLoopProbe(loopExitLabel, labelInstr); |
| } |
| |
| FOREACH_INSTR_IN_RANGE(instr, instrPrev->m_next, instrNext->m_prev) |
| { |
| instr->SetNumber(number++); |
| } |
| NEXT_INSTR_IN_RANGE; |
| |
| return number; |
| } |
| |
| void |
| Lowerer::InsertOneLoopProbe(IR::Instr *insertInstr, IR::LabelInstr *loopLabel) |
| { |
| // Insert one interrupt probe at the given instruction. Probe the stack and call the abort helper |
| // directly if the probe fails. |
| |
| IR::Opnd *memRefOpnd = IR::MemRefOpnd::New( |
| m_func->GetThreadContextInfo()->GetThreadStackLimitAddr(), |
| TyMachReg, this->m_func); |
| |
| IR::RegOpnd *regStackPointer = IR::RegOpnd::New( |
| NULL, this->m_lowererMD.GetRegStackPointer(), TyMachReg, this->m_func); |
| |
| InsertCompareBranch(regStackPointer, memRefOpnd, Js::OpCode::BrGt_A, loopLabel, insertInstr); |
| |
| IR::LabelInstr *helperLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| insertInstr->InsertBefore(helperLabel); |
| |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperScriptAbort, this->m_func); |
| IR::Instr *instr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| instr->SetSrc1(helperOpnd); |
| insertInstr->InsertBefore(instr); |
| this->m_lowererMD.LowerCall(instr, 0); |
| |
| // Jump to the exit after the helper call. This instruction will never be reached, but the jump |
| // indicates that nothing is live after the call (to avoid useless spills in code that will |
| // be executed). |
| instr = this->m_func->m_exitInstr->GetPrevRealInstrOrLabel(); |
| if (instr->IsLabelInstr()) |
| { |
| helperLabel = instr->AsLabelInstr(); |
| } |
| else |
| { |
| helperLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| this->m_func->m_exitInstr->InsertBefore(helperLabel); |
| } |
| |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, helperLabel, this->m_func); |
| insertInstr->InsertBefore(instr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LoadPropertySymAsArgument |
| /// |
| /// Generate code to pass a fieldSym as argument to a helper. |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LoadPropertySymAsArgument(IR::Instr *instr, IR::Opnd *fieldSrc) |
| { |
| IR::Instr * instrPrev; |
| AssertMsg(fieldSrc->IsSymOpnd() && fieldSrc->AsSymOpnd()->m_sym->IsPropertySym(), "Expected fieldSym as src of LdFld"); |
| |
| IR::SymOpnd *symOpnd = fieldSrc->AsSymOpnd(); |
| PropertySym * fieldSym = symOpnd->m_sym->AsPropertySym(); |
| |
| IR::IntConstOpnd * indexOpnd = IR::IntConstOpnd::New(fieldSym->m_propertyId, TyInt32, m_func, /*dontEncode*/true); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| |
| IR::RegOpnd * instanceOpnd = symOpnd->CreatePropertyOwnerOpnd(m_func); |
| m_lowererMD.LoadHelperArgument(instr, instanceOpnd); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LoadFunctionBodyAsArgument |
| /// |
| /// Special case: the "property ID" is a key into the ScriptContext's FunctionBody map |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LoadFunctionBodyAsArgument(IR::Instr *instr, IR::IntConstOpnd * functionBodySlotOpnd, IR::RegOpnd * envOpnd) |
| { |
| IR::Instr * instrPrev; |
| |
| // We need to pass in the function reference, we can't embed the pointer to the function proxy here. |
| // The function proxy may be deferred parsed/serialize, and may 'progress' to a real function body after it is undeferred |
| // At which point the deferred function proxy may be collect. |
| // Just pass it the address where we will find the function proxy/body |
| |
| Js::FunctionInfoPtrPtr infoRef = instr->m_func->GetJITFunctionBody()->GetNestedFuncRef((uint)functionBodySlotOpnd->GetValue()); |
| AssertMsg(infoRef, "Expected FunctionProxy for index of NewScFunc or NewScGenFunc opnd"); |
| |
| IR::AddrOpnd * indexOpnd = IR::AddrOpnd::New((Js::Var)infoRef, IR::AddrOpndKindDynamicMisc, m_func); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| |
| m_lowererMD.LoadHelperArgument(instr, envOpnd); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerProfiledLdFld(IR::JitProfilingInstr *ldFldInstr) |
| { |
| const auto instrPrev = ldFldInstr->m_prev; |
| |
| auto src = ldFldInstr->UnlinkSrc1(); |
| AssertMsg(src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as src"); |
| |
| IR::JnHelperMethod helper = IR::HelperInvalid; |
| switch (ldFldInstr->m_opcode) |
| { |
| case Js::OpCode::LdFld: |
| helper = IR::HelperProfiledLdFld; |
| goto ldFldCommon; |
| case Js::OpCode::LdRootFld: |
| helper = IR::HelperProfiledLdRootFld; |
| goto ldFldCommon; |
| case Js::OpCode::LdMethodFld: |
| helper = IR::HelperProfiledLdMethodFld; |
| goto ldFldCommon; |
| case Js::OpCode::LdRootMethodFld: |
| helper = IR::HelperProfiledLdRootMethodFld; |
| goto ldFldCommon; |
| case Js::OpCode::LdFldForCallApplyTarget: |
| helper = IR::HelperProfiledLdFld_CallApplyTarget; |
| goto ldFldCommon; |
| case Js::OpCode::LdFldForTypeOf: |
| helper = IR::HelperProfiledLdFldForTypeOf; |
| goto ldFldCommon; |
| case Js::OpCode::LdRootFldForTypeOf: |
| helper = IR::HelperProfiledLdRootFldForTypeOf; |
| goto ldFldCommon; |
| |
| ldFldCommon: |
| { |
| Assert(ldFldInstr->profileId == Js::Constants::NoProfileId); |
| |
| /* |
| Var ProfilingHelpers::ProfiledLdFld_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| void *const framePointer) |
| */ |
| |
| m_lowererMD.LoadHelperArgument(ldFldInstr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| m_lowererMD.LoadHelperArgument( |
| ldFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(src->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| LoadPropertySymAsArgument(ldFldInstr, src); |
| break; |
| } |
| |
| case Js::OpCode::LdSuperFld: |
| { |
| Assert(ldFldInstr->profileId == Js::Constants::NoProfileId); |
| |
| IR::Opnd * src2 = nullptr; |
| |
| /* |
| Var ProfilingHelpers::ProfiledLdSuperFld_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| void *const framePointer, |
| const Var thisInstance) |
| */ |
| |
| src2 = ldFldInstr->UnlinkSrc2(); |
| |
| m_lowererMD.LoadHelperArgument(ldFldInstr, src2 ); |
| m_lowererMD.LoadHelperArgument(ldFldInstr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| m_lowererMD.LoadHelperArgument( |
| ldFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(src->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| LoadPropertySymAsArgument(ldFldInstr, src); |
| helper = IR::HelperProfiledLdSuperFld; |
| break; |
| } |
| |
| case Js::OpCode::LdLen_A: |
| Assert(ldFldInstr->profileId != Js::Constants::NoProfileId); |
| |
| /* |
| Var ProfilingHelpers::ProfiledLdLen_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| const ProfileId profileId, |
| void *const framePointer) |
| */ |
| |
| m_lowererMD.LoadHelperArgument(ldFldInstr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| m_lowererMD.LoadHelperArgument(ldFldInstr, IR::Opnd::CreateProfileIdOpnd(ldFldInstr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(ldFldInstr, IR::Opnd::CreateInlineCacheIndexOpnd(src->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| LoadPropertySymAsArgument(ldFldInstr, src); |
| helper = IR::HelperProfiledLdLen; |
| break; |
| |
| default: |
| Assert(false); |
| } |
| |
| ldFldInstr->SetSrc1(IR::HelperCallOpnd::New(helper, m_func)); |
| m_lowererMD.LowerCall(ldFldInstr, 0); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::GenerateProtoLdFldFromFlagInlineCache( |
| IR::Instr * insertBeforeInstr, |
| IR::Opnd * opndDst, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelFallThru, |
| bool isInlineSlot) |
| { |
| // Generate: |
| // |
| // s1 = MOV [&(inlineCache->u.accessor.object)] -- load the cached prototype object |
| // s1 = MOV [&s1->slots] -- load the slot array |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| // dst = MOV [s1 + s2*4] |
| // JMP $fallthru |
| |
| IR::Opnd* inlineCacheObjOpnd; |
| IR::IndirOpnd * opndIndir; |
| IR::RegOpnd * opndObjSlots = nullptr; |
| |
| inlineCacheObjOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.object), TyMachReg, this->m_func); |
| |
| // s1 = MOV [&(inlineCache->u.accessor.object)] -- load the cached prototype object |
| IR::RegOpnd *opndObject = IR::RegOpnd::New(TyMachReg, this->m_func); |
| InsertMove(opndObject, inlineCacheObjOpnd, insertBeforeInstr, false); |
| |
| if (!isInlineSlot) |
| { |
| // s1 = MOV [&s1->slots] -- load the slot array |
| opndObjSlots = IR::RegOpnd::New(TyMachReg, this->m_func); |
| opndIndir = IR::IndirOpnd::New(opndObject, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| InsertMove(opndObjSlots, opndIndir, insertBeforeInstr, false); |
| } |
| |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| IR::RegOpnd *opndSlotIndex = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::Opnd* slotIndexOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.slotIndex), TyUint16, this->m_func); |
| InsertMove(opndSlotIndex, slotIndexOpnd, insertBeforeInstr, false); |
| |
| if (isInlineSlot) |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndObject, opndSlotIndex, m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| } |
| else |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndObjSlots, opndSlotIndex, m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| } |
| InsertMove(opndDst, opndIndir, insertBeforeInstr, false); |
| |
| // JMP $fallthru |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateLocalLdFldFromFlagInlineCache( |
| IR::Instr * insertBeforeInstr, |
| IR::RegOpnd * opndBase, |
| IR::Opnd * opndDst, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelFallThru, |
| bool isInlineSlot) |
| { |
| // Generate: |
| // |
| // s1 = MOV [&(inlineCache->u.accessor.object)] -- load the cached prototype object |
| // s1 = MOV [&s1->slots] -- load the slot array |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| // dst = MOV [s1 + s2*4] |
| // JMP $fallthru |
| |
| IR::IndirOpnd * opndIndir; |
| IR::RegOpnd * opndObjSlots = nullptr; |
| |
| if (!isInlineSlot) |
| { |
| // s1 = MOV [&s1->slots] -- load the slot array |
| opndObjSlots = IR::RegOpnd::New(TyMachReg, this->m_func); |
| opndIndir = IR::IndirOpnd::New(opndBase, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| InsertMove(opndObjSlots, opndIndir, insertBeforeInstr, false); |
| } |
| |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| IR::RegOpnd *opndSlotIndex = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::Opnd* slotIndexOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.slotIndex), TyUint16, this->m_func); |
| InsertMove(opndSlotIndex, slotIndexOpnd, insertBeforeInstr, false); |
| |
| if (isInlineSlot) |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndBase, opndSlotIndex, m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| } |
| else |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndObjSlots, opndSlotIndex, m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| } |
| InsertMove(opndDst, opndIndir, insertBeforeInstr, false); |
| |
| // JMP $fallthru |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateFlagProtoCheck( |
| IR::Instr * insertBeforeInstr, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelNotOnProto) |
| { |
| // Generate: |
| // |
| // TEST [&(inlineCache->u.accessor.isOnProto)], Js::FlagIsOnProto |
| // JEQ $next |
| IR::Opnd* flagsOpnd; |
| flagsOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.rawUInt16), TyInt8, insertBeforeInstr->m_func); |
| |
| uint isOnProtoFlagMask = Js::InlineCache::GetIsOnProtoFlagMask(); |
| InsertTestBranch(flagsOpnd, IR::IntConstOpnd::New(isOnProtoFlagMask, TyInt8, this->m_func), Js::OpCode::BrEq_A, labelNotOnProto, insertBeforeInstr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::GenerateFastLdMethodFromFlags |
| /// |
| /// Make use of the helper to cache the type and slot index used to do a LdFld |
| /// and do an inline load from the appropriate slot if the type hasn't changed |
| /// since the last time this LdFld was executed. |
| /// |
| ///---------------------------------------------------------------------------- |
| |
| bool |
| Lowerer::GenerateFastLdMethodFromFlags(IR::Instr * instrLdFld) |
| { |
| IR::LabelInstr * labelFallThru; |
| IR::LabelInstr * bailOutLabel; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| IR::RegOpnd * opndBase; |
| IR::RegOpnd * opndType; |
| IR::RegOpnd * opndInlineCache; |
| |
| opndSrc = instrLdFld->GetSrc1(); |
| |
| AssertMsg(opndSrc->IsSymOpnd() && opndSrc->AsSymOpnd()->IsPropertySymOpnd() && opndSrc->AsSymOpnd()->m_sym->IsPropertySym(), |
| "Expected property sym operand as src of LdFldFlags"); |
| |
| IR::PropertySymOpnd * propertySymOpnd = opndSrc->AsPropertySymOpnd(); |
| |
| Assert(!instrLdFld->DoStackArgsOpt()); |
| |
| if (propertySymOpnd->IsTypeCheckSeqCandidate()) |
| { |
| AssertMsg(propertySymOpnd->HasObjectTypeSym(), "Type optimized property sym operand without a type sym?"); |
| StackSym *typeSym = propertySymOpnd->GetObjectTypeSym(); |
| opndType = IR::RegOpnd::New(typeSym, TyMachReg, this->m_func); |
| } |
| else |
| { |
| opndType = IR::RegOpnd::New(TyMachReg, this->m_func); |
| } |
| |
| opndBase = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| opndDst = instrLdFld->GetDst(); |
| opndInlineCache = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| |
| labelFallThru = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| // Label to jump to (or fall through to) when bailing out |
| bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, instrLdFld->m_func, true /* isOpHelper */); |
| |
| InsertMove(opndInlineCache, LoadRuntimeInlineCacheOpnd(instrLdFld, propertySymOpnd), instrLdFld); |
| IR::LabelInstr * labelFlagAux = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| // Check the flag cache with the untagged type |
| GenerateObjectTestAndTypeLoad(instrLdFld, opndBase, opndType, bailOutLabel); |
| GenerateFlagInlineCacheCheck(instrLdFld, opndType, opndInlineCache, labelFlagAux); |
| IR::LabelInstr * labelFlagInlineLocal = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| GenerateFlagProtoCheck(instrLdFld, opndInlineCache, labelFlagInlineLocal); |
| GenerateProtoLdFldFromFlagInlineCache(instrLdFld, opndDst, opndInlineCache, labelFallThru, true); |
| instrLdFld->InsertBefore(labelFlagInlineLocal); |
| GenerateLocalLdFldFromFlagInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, true); |
| |
| // Check the flag cache with the tagged type |
| instrLdFld->InsertBefore(labelFlagAux); |
| IR::RegOpnd * opndTaggedType = IR::RegOpnd::New(TyMachReg, this->m_func); |
| m_lowererMD.GenerateLoadTaggedType(instrLdFld, opndType, opndTaggedType); |
| GenerateFlagInlineCacheCheck(instrLdFld, opndTaggedType, opndInlineCache, bailOutLabel); |
| IR::LabelInstr * labelFlagAuxLocal = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| GenerateFlagProtoCheck(instrLdFld, opndInlineCache, labelFlagAuxLocal); |
| GenerateProtoLdFldFromFlagInlineCache(instrLdFld, opndDst, opndInlineCache, labelFallThru, false); |
| instrLdFld->InsertBefore(labelFlagAuxLocal); |
| GenerateLocalLdFldFromFlagInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, false); |
| |
| instrLdFld->InsertBefore(bailOutLabel); |
| instrLdFld->InsertAfter(labelFallThru); |
| // Generate the bailout helper call. 'instr' will be changed to the CALL into the bailout function, so it can't be used for |
| // ordering instructions anymore. |
| instrLdFld->UnlinkSrc1(); |
| GenerateBailOut(instrLdFld); |
| |
| return true; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerLdFld |
| /// |
| /// Lower an instruction (LdFld, ScopedLdFld) that takes a property |
| /// reference as a source and puts a result in a register. |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerLdFld( |
| IR::Instr * ldFldInstr, |
| IR::JnHelperMethod helperMethod, |
| IR::JnHelperMethod polymorphicHelperMethod, |
| bool useInlineCache, |
| IR::LabelInstr *labelBailOut, |
| bool isHelper) |
| { |
| if (ldFldInstr->IsJitProfilingInstr()) |
| { |
| // If we want to profile then do something completely different |
| return this->LowerProfiledLdFld(ldFldInstr->AsJitProfilingInstr()); |
| } |
| |
| IR::Opnd *src; |
| IR::Instr *instrPrev = ldFldInstr->m_prev; |
| |
| src = ldFldInstr->UnlinkSrc1(); |
| if (ldFldInstr->m_opcode == Js::OpCode::LdSuperFld) |
| { |
| IR::Opnd * src2 = nullptr; |
| src2 = ldFldInstr->UnlinkSrc2(); |
| m_lowererMD.LoadHelperArgument(ldFldInstr, src2); |
| } |
| |
| AssertMsg(src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as src"); |
| |
| if (useInlineCache) |
| { |
| IR::Opnd * inlineCacheOpnd; |
| AssertMsg(src->AsSymOpnd()->IsPropertySymOpnd(), "Need property sym operand to find the inline cache"); |
| if (src->AsPropertySymOpnd()->m_runtimePolymorphicInlineCache && polymorphicHelperMethod != helperMethod) |
| { |
| JITTimePolymorphicInlineCache * polymorphicInlineCache = src->AsPropertySymOpnd()->m_runtimePolymorphicInlineCache; |
| helperMethod = polymorphicHelperMethod; |
| inlineCacheOpnd = IR::AddrOpnd::New(polymorphicInlineCache->GetAddr(), IR::AddrOpndKindDynamicInlineCache, this->m_func); |
| } |
| else |
| { |
| // Need to load runtime inline cache opnd first before loading any helper argument |
| // because LoadRuntimeInlineCacheOpnd may create labels marked as helper, |
| // and cause op helper register push/pop save in x86, messing up with any helper arguments that is already pushed |
| inlineCacheOpnd = this->LoadRuntimeInlineCacheOpnd(ldFldInstr, src->AsPropertySymOpnd(), isHelper); |
| } |
| this->LoadPropertySymAsArgument(ldFldInstr, src); |
| this-> m_lowererMD.LoadHelperArgument( |
| ldFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(src->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| |
| this->m_lowererMD.LoadHelperArgument(ldFldInstr, inlineCacheOpnd); |
| this->m_lowererMD.LoadHelperArgument(ldFldInstr, LoadFunctionBodyOpnd(ldFldInstr)); |
| } |
| else |
| { |
| LoadScriptContext(ldFldInstr); |
| this->LoadPropertySymAsArgument(ldFldInstr, src); |
| } |
| |
| // Do we need to reload the type and slot array after the helper returns? |
| // (We do if there's a propertySymOpnd downstream that needs it, i.e., the type is not dead.) |
| IR::RegOpnd *opndBase = src->AsSymOpnd()->CreatePropertyOwnerOpnd(m_func); |
| m_lowererMD.ChangeToHelperCall(ldFldInstr, helperMethod, labelBailOut, opndBase, src->AsSymOpnd()->IsPropertySymOpnd() ? src->AsSymOpnd()->AsPropertySymOpnd() : nullptr, isHelper); |
| |
| return instrPrev; |
| } |
| |
| bool |
| Lowerer::GenerateLdFldWithCachedType(IR::Instr * instrLdFld, bool* continueAsHelperOut, IR::LabelInstr** labelHelperOut, IR::RegOpnd** typeOpndOut) |
| { |
| IR::Instr *instr; |
| IR::Opnd *opnd; |
| IR::LabelInstr *labelObjCheckFailed = nullptr; |
| IR::LabelInstr *labelTypeCheckFailed = nullptr; |
| IR::LabelInstr *labelDone = nullptr; |
| |
| Assert(continueAsHelperOut != nullptr); |
| *continueAsHelperOut = false; |
| |
| Assert(labelHelperOut != nullptr); |
| *labelHelperOut = nullptr; |
| |
| Assert(typeOpndOut != nullptr); |
| *typeOpndOut = nullptr; |
| |
| Assert(instrLdFld->GetSrc1()->IsSymOpnd()); |
| if (!instrLdFld->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()) |
| { |
| return false; |
| } |
| |
| IR::PropertySymOpnd *propertySymOpnd = instrLdFld->GetSrc1()->AsPropertySymOpnd(); |
| if (!propertySymOpnd->IsTypeCheckSeqCandidate()) |
| { |
| return false; |
| } |
| |
| AssertMsg(propertySymOpnd->TypeCheckSeqBitsSetOnlyIfCandidate(), "Property sym operand optimized despite not being a candidate?"); |
| |
| if (!propertySymOpnd->IsTypeCheckSeqParticipant() && !propertySymOpnd->NeedsLocalTypeCheck()) |
| { |
| return false; |
| } |
| |
| Assert(!propertySymOpnd->NeedsTypeCheckAndBailOut() || (instrLdFld->HasBailOutInfo() && IR::IsTypeCheckBailOutKind(instrLdFld->GetBailOutKind()))); |
| |
| // In the backwards pass we only add guarded property operations to instructions that are not already |
| // protected by an upstream type check. |
| Assert(!propertySymOpnd->IsTypeCheckProtected() || propertySymOpnd->GetGuardedPropOps() == nullptr); |
| |
| PHASE_PRINT_TESTTRACE( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Field load: %s, property ID: %d, func: %s, cache ID: %d, cloned cache: true, layout: %s, redundant check: %s\n"), |
| Js::OpCodeUtil::GetOpCodeName(instrLdFld->m_opcode), |
| propertySymOpnd->m_sym->AsPropertySym()->m_propertyId, |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| propertySymOpnd->m_inlineCacheIndex, |
| propertySymOpnd->GetCacheLayoutString(), |
| propertySymOpnd->IsTypeChecked() ? _u("true") : _u("false")); |
| |
| if (propertySymOpnd->HasFinalType() && !propertySymOpnd->IsLoadedFromProto()) |
| { |
| propertySymOpnd->UpdateSlotForFinalType(); |
| } |
| |
| // TODO (ObjTypeSpec): If ((PropertySym*)propertySymOpnd->m_sym)->m_stackSym->m_isIntConst consider emitting a direct |
| // jump to helper or bailout. If we have a type check bailout, we could even abort compilation. |
| |
| bool hasTypeCheckBailout = instrLdFld->HasBailOutInfo() && IR::IsTypeCheckBailOutKind(instrLdFld->GetBailOutKind()); |
| |
| // If the hard-coded type is not available here, do a type check, and branch to the helper if the check fails. |
| // In the prototype case, we have to check the type even if it was checked upstream, to cover the case where |
| // the property has been added locally. Note that this is not necessary if the proto chain has been checked, |
| // because then we know there's been no store of the property since the type was checked. |
| bool emitPrimaryTypeCheck = propertySymOpnd->NeedsPrimaryTypeCheck(); |
| bool emitLocalTypeCheck = propertySymOpnd->NeedsLocalTypeCheck(); |
| bool emitLoadFromProtoTypeCheck = propertySymOpnd->NeedsLoadFromProtoTypeCheck(); |
| bool emitTypeCheck = emitPrimaryTypeCheck || emitLocalTypeCheck || emitLoadFromProtoTypeCheck; |
| |
| if (emitTypeCheck) |
| { |
| if (emitLoadFromProtoTypeCheck) |
| { |
| propertySymOpnd->EnsureGuardedPropOps(this->m_func->m_alloc); |
| propertySymOpnd->SetGuardedPropOp(propertySymOpnd->GetObjTypeSpecFldId()); |
| } |
| labelTypeCheckFailed = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| labelObjCheckFailed = hasTypeCheckBailout ? labelTypeCheckFailed : IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| *typeOpndOut = this->GenerateCachedTypeCheck(instrLdFld, propertySymOpnd, labelObjCheckFailed, labelTypeCheckFailed); |
| } |
| |
| IR::Opnd *opndSlotArray; |
| if (propertySymOpnd->IsLoadedFromProto()) |
| { |
| opndSlotArray = this->LoadSlotArrayWithCachedProtoType(instrLdFld, propertySymOpnd); |
| } |
| else |
| { |
| opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrLdFld, propertySymOpnd); |
| } |
| |
| // Load the value from the slot, getting the slot ID from the cache. |
| uint16 index = propertySymOpnd->GetSlotIndex(); |
| AssertOrFailFast(index != (uint16)-1); |
| |
| if (opndSlotArray->IsRegOpnd()) |
| { |
| opnd = IR::IndirOpnd::New(opndSlotArray->AsRegOpnd(), index * sizeof(Js::Var), TyMachReg, this->m_func); |
| } |
| else |
| { |
| Assert(opndSlotArray->IsMemRefOpnd()); |
| opnd = IR::MemRefOpnd::New((char*)opndSlotArray->AsMemRefOpnd()->GetMemLoc() + (index * sizeof(Js::Var)), TyMachReg, this->m_func, IR::AddrOpndKindDynamicPropertySlotRef); |
| } |
| Lowerer::InsertMove(instrLdFld->GetDst(), opnd, instrLdFld); |
| |
| // We eliminate the helper, or the type check succeeds, or we bail out before the operation. |
| // Either delete the original instruction or replace it with a bailout. |
| if (!emitPrimaryTypeCheck && !emitLocalTypeCheck && !emitLoadFromProtoTypeCheck) |
| { |
| Assert(labelTypeCheckFailed == nullptr); |
| AssertMsg(!instrLdFld->HasBailOutInfo() || instrLdFld->HasLazyBailOut(), "Why does a direct field load have bailout that is not lazy?"); |
| instrLdFld->Remove(); |
| return true; |
| } |
| |
| // Otherwise, branch around the bailout or helper. |
| labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func); |
| instrLdFld->InsertBefore(instr); |
| |
| // Insert the bailout or helper label here. |
| instrLdFld->InsertBefore(labelTypeCheckFailed); |
| instrLdFld->InsertAfter(labelDone); |
| |
| if (hasTypeCheckBailout) |
| { |
| AssertMsg(PHASE_ON1(Js::ObjTypeSpecIsolatedFldOpsWithBailOutPhase) || !propertySymOpnd->IsTypeDead(), |
| "Why does a field load have a type check bailout, if its type is dead?"); |
| |
| // Convert the original instruction to a bailout. |
| if (instrLdFld->GetBailOutInfo()->bailOutInstr != instrLdFld) |
| { |
| // Set the cache index in the bailout info so that the bailout code will write it into the |
| // bailout record at runtime. |
| instrLdFld->GetBailOutInfo()->polymorphicCacheIndex = propertySymOpnd->m_inlineCacheIndex; |
| } |
| instrLdFld->FreeDst(); |
| instrLdFld->FreeSrc1(); |
| instrLdFld->m_opcode = Js::OpCode::BailOut; |
| this->GenerateBailOut(instrLdFld); |
| |
| return true; |
| } |
| else |
| { |
| *continueAsHelperOut = true; |
| Assert(labelObjCheckFailed != nullptr && labelObjCheckFailed != labelTypeCheckFailed); |
| *labelHelperOut = labelObjCheckFailed; |
| return false; |
| } |
| } |
| |
| template<bool isRoot> |
| IR::Instr* Lowerer::GenerateCompleteLdFld(IR::Instr* instr, bool emitFastPath, IR::JnHelperMethod monoHelperAfterFastPath, IR::JnHelperMethod polyHelperAfterFastPath, |
| IR::JnHelperMethod monoHelperWithoutFastPath, IR::JnHelperMethod polyHelperWithoutFastPath) |
| { |
| if(instr->CallsAccessor() && instr->HasBailOutInfo()) |
| { |
| Assert(!BailOutInfo::IsBailOutOnImplicitCalls(instr->GetBailOutKind())); |
| } |
| |
| IR::Instr* prevInstr = instr->m_prev; |
| |
| IR::LabelInstr* labelHelper = nullptr; |
| IR::LabelInstr* labelBailOut = nullptr; |
| bool isHelper = false; |
| IR::RegOpnd* typeOpnd = nullptr; |
| |
| if (isRoot) |
| { |
| // Don't do the fast path here if emitFastPath is false, even if we can. |
| if (emitFastPath && (this->GenerateLdFldWithCachedType(instr, &isHelper, &labelHelper, &typeOpnd) || this->GenerateNonConfigurableLdRootFld(instr))) |
| { |
| Assert(labelHelper == nullptr); |
| return prevInstr; |
| } |
| } |
| else |
| { |
| if (this->GenerateLdFldWithCachedType(instr, &isHelper, &labelHelper, &typeOpnd)) |
| { |
| Assert(labelHelper == nullptr); |
| return prevInstr; |
| } |
| } |
| |
| if (emitFastPath) |
| { |
| if (!GenerateFastLdFld(instr, monoHelperWithoutFastPath, polyHelperWithoutFastPath, &labelBailOut, typeOpnd, &isHelper, &labelHelper)) |
| { |
| if (labelHelper != nullptr) |
| { |
| labelHelper->isOpHelper = isHelper; |
| instr->InsertBefore(labelHelper); |
| } |
| prevInstr = LowerLdFld(instr, monoHelperAfterFastPath, polyHelperAfterFastPath, true, labelBailOut, isHelper); |
| } |
| } |
| else |
| { |
| if (labelHelper != nullptr) |
| { |
| labelHelper->isOpHelper = isHelper; |
| instr->InsertBefore(labelHelper); |
| } |
| prevInstr = LowerLdFld(instr, monoHelperWithoutFastPath, polyHelperWithoutFastPath, true, labelBailOut, isHelper); |
| } |
| |
| return prevInstr; |
| } |
| |
| bool |
| Lowerer::GenerateCheckFixedFld(IR::Instr * instrChkFld) |
| { |
| IR::Instr *instr; |
| IR::LabelInstr *labelBailOut = nullptr; |
| IR::LabelInstr *labelDone = nullptr; |
| |
| AssertMsg(!PHASE_OFF(Js::FixedMethodsPhase, instrChkFld->m_func) || |
| !PHASE_OFF(Js::UseFixedDataPropsPhase, instrChkFld->m_func), "Lowering a check fixed field with fixed data/method phase disabled?"); |
| |
| Assert(instrChkFld->GetSrc1()->IsSymOpnd() && instrChkFld->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()); |
| IR::PropertySymOpnd *propertySymOpnd = instrChkFld->GetSrc1()->AsPropertySymOpnd(); |
| |
| AssertMsg(propertySymOpnd->TypeCheckSeqBitsSetOnlyIfCandidate(), "Property sym operand optimized despite not being a candidate?"); |
| Assert(propertySymOpnd->MayNeedTypeCheckProtection()); |
| |
| // In the backwards pass we only add guarded property operations to instructions that are not already |
| // protected by an upstream type check. |
| Assert(!propertySymOpnd->IsTypeCheckProtected() || propertySymOpnd->GetGuardedPropOps() == nullptr); |
| |
| // For the non-configurable properties on the global object we do not need a type check. Otherwise, |
| // we need a type check and bailout here unless this operation is part of the type check sequence and |
| // is protected by a type check upstream. |
| bool emitPrimaryTypeCheck = propertySymOpnd->NeedsPrimaryTypeCheck(); |
| // In addition, we may also need a local type check in case the property comes from the prototype and |
| // it may have been overwritten on the instance after the primary type check upstream. If the property |
| // comes from the instance, we must still protect against its value changing after the type check, but |
| // for this a cheaper guard check is sufficient (see below). |
| bool emitFixedFieldTypeCheck = propertySymOpnd->NeedsCheckFixedFieldTypeCheck() && |
| (!propertySymOpnd->IsTypeChecked() || propertySymOpnd->IsLoadedFromProto()); |
| |
| PropertySym * propertySym = propertySymOpnd->m_sym->AsPropertySym(); |
| uint inlineCacheIndex = propertySymOpnd->m_inlineCacheIndex; |
| bool checkFixedDataGenerated = false; |
| bool checkFixedTypeGenerated = false; |
| |
| OUTPUT_TRACE_FUNC( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Fixed field check: %s, property ID: %d, cache ID: %u, cloned cache: true, layout: %s, redundant check: %s count of props: %u \n"), |
| Js::OpCodeUtil::GetOpCodeName(instrChkFld->m_opcode), |
| propertySym->m_propertyId, |
| inlineCacheIndex, propertySymOpnd->GetCacheLayoutString(), propertySymOpnd->IsTypeChecked() ? _u("true") : _u("false"), |
| propertySymOpnd->GetGuardedPropOps() ? propertySymOpnd->GetGuardedPropOps()->Count() : 0); |
| |
| if (emitPrimaryTypeCheck || emitFixedFieldTypeCheck) |
| { |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if(emitFixedFieldTypeCheck && propertySymOpnd->IsRootObjectNonConfigurableFieldLoad()) |
| { |
| AssertMsg(!propertySymOpnd->GetGuardedPropOps() || propertySymOpnd->GetGuardedPropOps()->IsEmpty(), "This property Guard is used only for one property"); |
| //We need only cheaper Guard check, if the property belongs to the GlobalObject. |
| checkFixedDataGenerated = this->GenerateFixedFieldGuardCheck(instrChkFld, propertySymOpnd, labelBailOut); |
| } |
| else |
| { |
| if (emitFixedFieldTypeCheck) |
| { |
| propertySymOpnd->EnsureGuardedPropOps(this->m_func->m_alloc); |
| propertySymOpnd->SetGuardedPropOp(propertySymOpnd->GetObjTypeSpecFldId()); |
| } |
| this->GenerateCachedTypeCheck(instrChkFld, propertySymOpnd, labelBailOut, labelBailOut); |
| checkFixedTypeGenerated = true; |
| } |
| } |
| |
| // We may still need this guard if we didn't emit the write protect type check above. This situation arises if we have |
| // a fixed field from the instance (not proto) and a property of the same name has been written somewhere between the |
| // primary type check and here. Note that we don't need a type check, because we know the fixed field exists on the |
| // object even if it has been written since primary type check, but we need to verify the fixed value didn't get overwritten. |
| if (!emitPrimaryTypeCheck && !emitFixedFieldTypeCheck && !propertySymOpnd->IsWriteGuardChecked()) |
| { |
| if (!PHASE_OFF(Js::FixedFieldGuardCheckPhase, this->m_func)) |
| { |
| Assert(labelBailOut == nullptr); |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| checkFixedDataGenerated = this->GenerateFixedFieldGuardCheck(instrChkFld, propertySymOpnd, labelBailOut); |
| } |
| } |
| |
| // Note that a type handler holds only a weak reference to the singleton instance it represents, so |
| // it is possible that the instance gets collected before the type and handler do. Hence, the upstream |
| // type check may succeed, even as the original instance no longer exists. However, this would happen |
| // only if another instance reached the same type (otherwise we wouldn't ever pass the type check |
| // upstream). In that case we would have invalidated all fixed fields on that type, and so the type |
| // check (or property guard check, if necessary) above would fail. All in all, we would never attempt |
| // to access a fixed field from an instance that has been collected. |
| |
| if (!emitPrimaryTypeCheck && !emitFixedFieldTypeCheck && propertySymOpnd->IsWriteGuardChecked()) |
| { |
| Assert(labelBailOut == nullptr); |
| AssertMsg(!instrChkFld->HasBailOutInfo(), "Why does a direct fixed field check have bailout?"); |
| if (propertySymOpnd->ProducesAuxSlotPtr()) |
| { |
| this->GenerateAuxSlotPtrLoad(propertySymOpnd, instrChkFld); |
| } |
| instrChkFld->Remove(); |
| return true; |
| } |
| |
| // With lazy bailout, no checks might be generated for CheckFixedFld, so the code in Lowerer is only an |
| // unconditional jmp to get past the bailout helper block. This is a new case and is unexpected, so layout |
| // phase will also move the statement boundary preceding CheckFixedFld together with the jmp to after |
| // function exit. As a result, source mapping is incorrect. Make sure that this doesn't happen by not |
| // generating helper blocks at all if we don't generate checks. |
| if (!checkFixedDataGenerated && !checkFixedTypeGenerated) |
| { |
| instrChkFld->Remove(); |
| return true; |
| } |
| |
| labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func); |
| instrChkFld->InsertBefore(instr); |
| |
| // Insert the helper label here. |
| instrChkFld->InsertBefore(labelBailOut); |
| instrChkFld->InsertAfter(labelDone); |
| |
| if (propertySymOpnd->ProducesAuxSlotPtr()) |
| { |
| this->GenerateAuxSlotPtrLoad(propertySymOpnd, labelDone->m_next); |
| } |
| |
| // Convert the original instruction to a bailout. |
| Assert(instrChkFld->HasBailOutInfo()); |
| |
| if (instrChkFld->GetBailOutInfo()->bailOutInstr != instrChkFld) |
| { |
| // Set the cache index in the bailout info so that the bailout code will write it into the |
| // bailout record at runtime. |
| instrChkFld->GetBailOutInfo()->polymorphicCacheIndex = inlineCacheIndex; |
| } |
| |
| instrChkFld->FreeSrc1(); |
| instrChkFld->m_opcode = Js::OpCode::BailOut; |
| this->GenerateBailOut(instrChkFld); |
| |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateCheckObjType(IR::Instr * instrChkObjType) |
| { |
| Assert(instrChkObjType->GetSrc1()->IsSymOpnd() && instrChkObjType->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()); |
| IR::PropertySymOpnd *propertySymOpnd = instrChkObjType->GetSrc1()->AsPropertySymOpnd(); |
| |
| // Why do we have an explicit type check if the cached type has been checked upstream? The dead store pass should have |
| // removed this instruction. |
| Assert(propertySymOpnd->IsTypeCheckSeqCandidate() && !propertySymOpnd->IsTypeChecked()); |
| // Why do we have an explicit type check on a non-configurable root field load? |
| Assert(!propertySymOpnd->IsRootObjectNonConfigurableFieldLoad()); |
| |
| PropertySym * propertySym = propertySymOpnd->m_sym->AsPropertySym(); |
| uint inlineCacheIndex = propertySymOpnd->m_inlineCacheIndex; |
| |
| PHASE_PRINT_TESTTRACE( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Object type check: %s, property ID: %d, func: %s, cache ID: %d, cloned cache: true, layout: %s, redundant check: %s\n"), |
| Js::OpCodeUtil::GetOpCodeName(instrChkObjType->m_opcode), |
| propertySym->m_propertyId, |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| inlineCacheIndex, propertySymOpnd->GetCacheLayoutString(), _u("false")); |
| |
| IR::LabelInstr* labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| this->GenerateCachedTypeCheck(instrChkObjType, propertySymOpnd, labelBailOut, labelBailOut); |
| IR::LabelInstr* labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| IR::Instr* instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func); |
| instrChkObjType->InsertBefore(instr); |
| |
| // Insert the bailout label here. |
| instrChkObjType->InsertBefore(labelBailOut); |
| instrChkObjType->InsertAfter(labelDone); |
| |
| if (propertySymOpnd->ProducesAuxSlotPtr()) |
| { |
| this->GenerateAuxSlotPtrLoad(propertySymOpnd, labelDone->m_next); |
| } |
| |
| // Convert the original instruction to a bailout. |
| Assert(instrChkObjType->HasBailOutInfo()); |
| |
| if (instrChkObjType->GetBailOutInfo()->bailOutInstr != instrChkObjType) |
| { |
| // Set the cache index in the bailout info so that the bailout code will write it into the |
| // bailout record at runtime. |
| instrChkObjType->GetBailOutInfo()->polymorphicCacheIndex = inlineCacheIndex; |
| } |
| |
| instrChkObjType->FreeSrc1(); |
| instrChkObjType->m_opcode = Js::OpCode::BailOut; |
| this->GenerateBailOut(instrChkObjType); |
| } |
| |
| void |
| Lowerer::LowerAdjustObjType(IR::Instr * instrAdjustObjType) |
| { |
| IR::AddrOpnd *finalTypeOpnd = instrAdjustObjType->UnlinkDst()->AsAddrOpnd(); |
| IR::AddrOpnd *initialTypeOpnd = instrAdjustObjType->UnlinkSrc2()->AsAddrOpnd(); |
| IR::RegOpnd *baseOpnd = instrAdjustObjType->UnlinkSrc1()->AsRegOpnd(); |
| |
| bool adjusted = this->GenerateAdjustBaseSlots( |
| instrAdjustObjType, baseOpnd, JITTypeHolder((JITType*)initialTypeOpnd->m_metadata), JITTypeHolder((JITType*)finalTypeOpnd->m_metadata)); |
| |
| if (instrAdjustObjType->m_opcode == Js::OpCode::AdjustObjTypeReloadAuxSlotPtr) |
| { |
| Assert(adjusted); |
| |
| // We reallocated the aux slots, so reload them if necessary. |
| StackSym * auxSlotPtrSym = baseOpnd->m_sym->GetAuxSlotPtrSym(); |
| Assert(auxSlotPtrSym); |
| |
| IR::Opnd *opndIndir = IR::IndirOpnd::New(baseOpnd, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func); |
| regOpnd->SetIsJITOptimizedReg(true); |
| Lowerer::InsertMove(regOpnd, opndIndir, instrAdjustObjType); |
| } |
| |
| this->m_func->PinTypeRef((JITType*)finalTypeOpnd->m_metadata); |
| |
| IR::Opnd *opnd = IR::IndirOpnd::New(baseOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, instrAdjustObjType->m_func); |
| this->InsertMove(opnd, finalTypeOpnd, instrAdjustObjType); |
| |
| initialTypeOpnd->Free(instrAdjustObjType->m_func); |
| instrAdjustObjType->Remove(); |
| } |
| |
| bool |
| Lowerer::GenerateNonConfigurableLdRootFld(IR::Instr * instrLdFld) |
| { |
| if (!instrLdFld->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()) |
| { |
| return false; |
| } |
| IR::PropertySymOpnd *propertySymOpnd = instrLdFld->GetSrc1()->AsPropertySymOpnd(); |
| if (!propertySymOpnd->IsRootObjectNonConfigurableFieldLoad()) |
| { |
| return false; |
| } |
| |
| Assert(!PHASE_OFF(Js::RootObjectFldFastPathPhase, this->m_func)); |
| Assert(!instrLdFld->HasBailOutInfo() || instrLdFld->HasLazyBailOut()); |
| |
| if (instrLdFld->HasLazyBailOut()) |
| { |
| instrLdFld->ClearBailOutInfo(); |
| } |
| |
| IR::Opnd * srcOpnd; |
| intptr_t rootObject = this->m_func->GetJITFunctionBody()->GetRootObject(); |
| if (propertySymOpnd->UsesAuxSlot()) |
| { |
| IR::RegOpnd * auxSlotOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| this->InsertMove(auxSlotOpnd, IR::MemRefOpnd::New((byte *)rootObject + Js::DynamicObject::GetOffsetOfAuxSlots(), |
| TyMachPtr, this->m_func), instrLdFld); |
| |
| srcOpnd = IR::IndirOpnd::New(auxSlotOpnd, propertySymOpnd->GetSlotIndex() * sizeof(Js::Var *), |
| TyVar, this->m_func); |
| } |
| else |
| { |
| srcOpnd = IR::MemRefOpnd::New((Js::Var *)rootObject + propertySymOpnd->GetSlotIndex(), |
| TyVar, this->m_func); |
| } |
| instrLdFld->ReplaceSrc1(srcOpnd); |
| instrLdFld->m_opcode = Js::OpCode::Ld_A; |
| LowererMD::ChangeToAssign(instrLdFld); |
| return true; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerDelFld(IR::Instr *delFldInstr, IR::JnHelperMethod helperMethod, bool useInlineCache, bool strictMode) |
| { |
| IR::Instr *instrPrev; |
| |
| Js::PropertyOperationFlags propertyOperationFlag = Js::PropertyOperation_None; |
| |
| if (strictMode) |
| { |
| propertyOperationFlag = Js::PropertyOperation_StrictMode; |
| } |
| |
| instrPrev = m_lowererMD.LoadHelperArgument(delFldInstr, IR::IntConstOpnd::New((IntConstType)propertyOperationFlag, TyInt32, m_func, true)); |
| |
| LowerLdFld(delFldInstr, helperMethod, helperMethod, useInlineCache); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerIsInst(IR::Instr * isInstInstr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrArg; |
| IR::RegOpnd * argOpnd; |
| |
| // inlineCache |
| instrPrev = m_lowererMD.LoadHelperArgument(isInstInstr, LoadIsInstInlineCacheOpnd(isInstInstr, isInstInstr->GetSrc1()->AsIntConstOpnd()->AsUint32())); |
| isInstInstr->FreeSrc1(); |
| |
| argOpnd = isInstInstr->UnlinkSrc2()->AsRegOpnd(); |
| Assert(argOpnd->m_sym->m_isSingleDef); |
| instrArg = argOpnd->m_sym->m_instrDef; |
| argOpnd->Free(m_func); |
| |
| // scriptContext |
| LoadScriptContext(isInstInstr); |
| |
| // instance goes last, so remember it now |
| IR::Opnd * instanceOpnd = instrArg->UnlinkSrc1(); |
| argOpnd = instrArg->UnlinkSrc2()->AsRegOpnd(); |
| Assert(argOpnd->m_sym->m_isSingleDef); |
| instrArg->Remove(); |
| instrArg = argOpnd->m_sym->m_instrDef; |
| argOpnd->Free(m_func); |
| |
| // function |
| IR::Opnd *opnd = instrArg->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(isInstInstr, opnd); |
| Assert(instrArg->GetSrc2() == NULL); |
| instrArg->Remove(); |
| |
| // instance |
| m_lowererMD.LoadHelperArgument(isInstInstr, instanceOpnd); |
| |
| m_lowererMD.ChangeToHelperCall(isInstInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::GenerateStackScriptFunctionInit(StackSym * stackSym, Js::FunctionInfoPtrPtr nestedInfo) |
| { |
| Func * func = this->m_func; |
| Assert(func->HasAnyStackNestedFunc()); |
| Assert(nextStackFunctionOpnd); |
| |
| IR::Instr * insertBeforeInstr = func->GetFunctionEntryInsertionPoint(); |
| |
| IR::RegOpnd * addressOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseAddressOpnd(addressOpnd, func); |
| InsertLea(addressOpnd, IR::SymOpnd::New(stackSym, TyMachPtr, func), insertBeforeInstr); |
| |
| // Currently we don't initialize the environment until we actually allocate the function, we also |
| // walk the list of stack function when we need to box them. so we should use initialize it to NullFrameDisplay |
| GenerateStackScriptFunctionInit(addressOpnd, nestedInfo, |
| IR::AddrOpnd::New(func->GetThreadContextInfo()->GetNullFrameDisplayAddr(), IR::AddrOpndKindDynamicMisc, func), insertBeforeInstr); |
| |
| // Establish the next link |
| InsertMove(nextStackFunctionOpnd, addressOpnd, insertBeforeInstr); |
| this->nextStackFunctionOpnd = IR::SymOpnd::New(stackSym, sizeof(Js::StackScriptFunction), TyMachPtr, func); |
| } |
| |
| void |
| Lowerer::GenerateScriptFunctionInit(IR::RegOpnd * regOpnd, IR::Opnd * vtableAddressOpnd, |
| Js::FunctionInfoPtrPtr nestedInfo, IR::Opnd * envOpnd, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| Func * func = this->m_func; |
| |
| IR::Opnd * functionInfoOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(functionInfoOpnd, IR::MemRefOpnd::New(nestedInfo, TyMachPtr, func), insertBeforeInstr); |
| IR::Opnd * functionProxyOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(functionProxyOpnd, IR::IndirOpnd::New(functionInfoOpnd->AsRegOpnd(), Js::FunctionInfo::GetOffsetOfFunctionProxy(), TyMachPtr, func), insertBeforeInstr); |
| IR::Opnd * typeOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(typeOpnd, IR::IndirOpnd::New(functionProxyOpnd->AsRegOpnd(), Js::FunctionProxy::GetOffsetOfDeferredPrototypeType(), |
| TyMachPtr, func), insertBeforeInstr); |
| |
| IR::LabelInstr * labelHelper = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| InsertTestBranch(typeOpnd, typeOpnd, Js::OpCode::BrEq_A, labelHelper, insertBeforeInstr); |
| IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| InsertBranch(Js::OpCode::Br, labelDone, insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(labelHelper); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, functionProxyOpnd); |
| |
| IR::Instr * callHelperInstr = IR::Instr::New(Js::OpCode::Call, typeOpnd, |
| IR::HelperCallOpnd::New(IR::JnHelperMethod::HelperEnsureFunctionProxyDeferredPrototypeType, func), func); |
| insertBeforeInstr->InsertBefore(callHelperInstr); |
| m_lowererMD.LowerCall(callHelperInstr, 0); |
| insertBeforeInstr->InsertBefore(labelDone); |
| |
| GenerateMemInit(regOpnd, 0, vtableAddressOpnd, insertBeforeInstr, isZeroed); |
| GenerateMemInit(regOpnd, Js::ScriptFunction::GetOffsetOfType(), typeOpnd, insertBeforeInstr, isZeroed); |
| GenerateMemInitNull(regOpnd, Js::ScriptFunction::GetOffsetOfAuxSlots(), insertBeforeInstr, isZeroed); |
| GenerateMemInitNull(regOpnd, Js::ScriptFunction::GetOffsetOfObjectArray(), insertBeforeInstr, isZeroed); |
| GenerateMemInit(regOpnd, Js::ScriptFunction::GetOffsetOfConstructorCache(), |
| LoadLibraryValueOpnd(insertBeforeInstr, LibraryValue::ValueConstructorCacheDefaultInstance), |
| insertBeforeInstr, isZeroed); |
| |
| GenerateMemInit(regOpnd, Js::ScriptFunction::GetOffsetOfFunctionInfo(), functionInfoOpnd, insertBeforeInstr, isZeroed); |
| GenerateMemInit(regOpnd, Js::ScriptFunction::GetOffsetOfEnvironment(), envOpnd, insertBeforeInstr, isZeroed); |
| GenerateMemInitNull(regOpnd, Js::ScriptFunction::GetOffsetOfCachedScopeObj(), insertBeforeInstr, isZeroed); |
| GenerateMemInitNull(regOpnd, Js::ScriptFunction::GetOffsetOfHasInlineCaches(), insertBeforeInstr, isZeroed); |
| } |
| |
| void |
| Lowerer::GenerateStackScriptFunctionInit(IR::RegOpnd * regOpnd, Js::FunctionInfoPtrPtr nestedInfo, IR::Opnd * envOpnd, IR::Instr * insertBeforeInstr) |
| { |
| Func * func = this->m_func; |
| GenerateScriptFunctionInit(regOpnd, |
| LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableStackScriptFunction), |
| nestedInfo, envOpnd, insertBeforeInstr); |
| InsertMove(IR::IndirOpnd::New(regOpnd, Js::StackScriptFunction::GetOffsetOfBoxedScriptFunction(), TyMachPtr, func), |
| IR::AddrOpnd::NewNull(func), insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::EnsureStackFunctionListStackSym() |
| { |
| Func * func = this->m_func; |
| Assert(func->HasAnyStackNestedFunc()); |
| #if defined(_M_IX86) || defined(_M_X64) |
| Assert(func->m_localStackHeight == (func->HasArgumentSlot()? MachArgsSlotOffset : 0)); |
| StackSym * stackFunctionListStackSym = StackSym::New(TyMachPtr, func); |
| func->StackAllocate(stackFunctionListStackSym, sizeof(Js::ScriptFunction *)); |
| nextStackFunctionOpnd = IR::SymOpnd::New(stackFunctionListStackSym, TyMachPtr, func); |
| #else |
| Assert(func->m_localStackHeight == 0); |
| nextStackFunctionOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(NULL, FRAME_REG, TyMachReg, func), |
| -(int32)(Js::Constants::StackNestedFuncList * sizeof(Js::Var)), TyMachPtr, func); |
| #endif |
| } |
| |
| void |
| Lowerer::AllocStackClosure() |
| { |
| m_func->StackAllocate(m_func->GetLocalFrameDisplaySym(), sizeof(Js::Var)); |
| m_func->StackAllocate(m_func->GetLocalClosureSym(), sizeof(Js::Var)); |
| } |
| |
| void |
| Lowerer::EnsureZeroLastStackFunctionNext() |
| { |
| Assert(nextStackFunctionOpnd != nullptr); |
| Func * func = this->m_func; |
| IR::Instr * insertBeforeInstr = func->GetFunctionEntryInsertionPoint(); |
| InsertMove(nextStackFunctionOpnd, IR::AddrOpnd::NewNull(func), insertBeforeInstr); |
| } |
| |
| IR::Instr * |
| Lowerer::GenerateNewStackScFunc(IR::Instr * newScFuncInstr, IR::RegOpnd ** ppEnvOpnd) |
| { |
| Assert(newScFuncInstr->m_func->DoStackNestedFunc()); |
| Func * func = newScFuncInstr->m_func; |
| uint index = newScFuncInstr->GetSrc1()->AsIntConstOpnd()->AsUint32(); |
| Assert(index < func->GetJITFunctionBody()->GetNestedCount()); |
| |
| IR::LabelInstr * labelNoStackFunc = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| InsertTestBranch(IR::MemRefOpnd::New(func->GetJITFunctionBody()->GetFlagsAddr(), TyInt8, func), |
| IR::IntConstOpnd::New(Js::FunctionBody::Flags_StackNestedFunc, TyInt8, func, true), |
| Js::OpCode::BrEq_A, labelNoStackFunc, newScFuncInstr); |
| |
| Js::FunctionInfoPtrPtr nestedInfo = func->GetJITFunctionBody()->GetNestedFuncRef(index); |
| IR::Instr * instrAssignDst; |
| IR::RegOpnd * envOpnd = *ppEnvOpnd; |
| if (!func->IsLoopBody()) |
| { |
| // the stackAllocate Call below for this sym is passing a size that is not represented by any IRType and hence passing TyMisc for the constructor |
| StackSym * stackSym = StackSym::New(TyMisc, func); |
| // ScriptFunction and it's next pointer |
| this->m_func->StackAllocate(stackSym, sizeof(Js::StackScriptFunction) + sizeof(Js::StackScriptFunction *)); |
| GenerateStackScriptFunctionInit(stackSym, nestedInfo); |
| |
| InsertMove(IR::SymOpnd::New(stackSym, Js::ScriptFunction::GetOffsetOfEnvironment(), TyMachPtr, func), |
| envOpnd, |
| newScFuncInstr); |
| |
| instrAssignDst = |
| InsertLea(newScFuncInstr->GetDst()->AsRegOpnd(), IR::SymOpnd::New(stackSym, TyMachPtr, func), newScFuncInstr); |
| } |
| else |
| { |
| Assert(func->IsTopFunc()); |
| Assert(func->m_loopParamSym); |
| |
| IR::Instr * envDefInstr = envOpnd->AsRegOpnd()->m_sym->m_instrDef; |
| Assert(envDefInstr && envDefInstr->m_opcode == Js::OpCode::NewScFuncData); |
| IR::RegOpnd * opndFuncPtr = envDefInstr->UnlinkSrc2()->AsRegOpnd(); |
| Assert(opndFuncPtr); |
| envOpnd = envDefInstr->UnlinkSrc1()->AsRegOpnd(); |
| Assert(envOpnd); |
| *ppEnvOpnd = envOpnd; |
| envDefInstr->Remove(); |
| |
| if (index != 0) |
| { |
| IR::RegOpnd * opnd = IR::RegOpnd::New(TyVar, func); |
| InsertAdd(false, opnd, opndFuncPtr, IR::IntConstOpnd::New(index * sizeof(Js::StackScriptFunction), TyMachPtr, func), newScFuncInstr); |
| opndFuncPtr = opnd; |
| } |
| |
| InsertMove(IR::IndirOpnd::New(opndFuncPtr, Js::ScriptFunction::GetOffsetOfEnvironment(), TyMachPtr, func), |
| envOpnd, newScFuncInstr); |
| |
| instrAssignDst = InsertMove(newScFuncInstr->GetDst(), opndFuncPtr, newScFuncInstr); |
| } |
| |
| InsertBranch(Js::OpCode::Br, labelDone, newScFuncInstr); |
| |
| newScFuncInstr->InsertBefore(labelNoStackFunc); |
| newScFuncInstr->InsertAfter(labelDone); |
| |
| return instrAssignDst; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScFunc(IR::Instr * newScFuncInstr) |
| { |
| IR::Instr *stackNewScFuncInstr = nullptr; |
| IR::RegOpnd * envOpnd = newScFuncInstr->UnlinkSrc2()->AsRegOpnd(); |
| |
| if (newScFuncInstr->m_func->DoStackNestedFunc()) |
| { |
| stackNewScFuncInstr = GenerateNewStackScFunc(newScFuncInstr, &envOpnd); |
| } |
| |
| IR::IntConstOpnd * functionBodySlotOpnd = newScFuncInstr->UnlinkSrc1()->AsIntConstOpnd(); |
| |
| IR::Instr * instrPrev = this->LoadFunctionBodyAsArgument(newScFuncInstr, functionBodySlotOpnd, envOpnd); |
| m_lowererMD.ChangeToHelperCall(newScFuncInstr, IR::HelperScrFunc_OP_NewScFunc ); |
| |
| return stackNewScFuncInstr == nullptr? instrPrev : stackNewScFuncInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScFuncHomeObj(IR::Instr * newScFuncInstr) |
| { |
| newScFuncInstr->m_opcode = Js::OpCode::CallHelper; |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperScrFunc_OP_NewScFuncHomeObj, this->m_func); |
| |
| IR::Opnd * src1 = newScFuncInstr->UnlinkSrc1(); |
| newScFuncInstr->SetSrc1(helperOpnd); |
| newScFuncInstr->SetSrc2(src1); |
| |
| return newScFuncInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScGenFunc(IR::Instr * newScFuncInstr) |
| { |
| IR::IntConstOpnd * functionBodySlotOpnd = newScFuncInstr->UnlinkSrc1()->AsIntConstOpnd(); |
| IR::RegOpnd * envOpnd = newScFuncInstr->UnlinkSrc2()->AsRegOpnd(); |
| |
| IR::Instr * instrPrev = this->LoadFunctionBodyAsArgument(newScFuncInstr, functionBodySlotOpnd, envOpnd); |
| m_lowererMD.ChangeToHelperCall(newScFuncInstr, IR::HelperScrFunc_OP_NewScGenFunc ); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewScGenFuncHomeObj(IR::Instr * newScFuncInstr) |
| { |
| newScFuncInstr->m_opcode = Js::OpCode::CallHelper; |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperScrFunc_OP_NewScGenFuncHomeObj, this->m_func); |
| |
| IR::Opnd * src1 = newScFuncInstr->UnlinkSrc1(); |
| newScFuncInstr->SetSrc1(helperOpnd); |
| newScFuncInstr->SetSrc2(src1); |
| |
| return newScFuncInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerStPropIdArrFromVar(IR::Instr * stPropIdInstr) |
| { |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperStPropIdArrFromVar, this->m_func); |
| |
| IR::Opnd * src1 = stPropIdInstr->UnlinkSrc1(); |
| stPropIdInstr->SetSrc1(helperOpnd); |
| stPropIdInstr->SetSrc2(src1); |
| |
| return m_lowererMD.LowerCallHelper(stPropIdInstr); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerRestify(IR::Instr * newRestInstr) |
| { |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperRestify, this->m_func); |
| |
| IR::Opnd * src1 = newRestInstr->UnlinkSrc1(); |
| newRestInstr->SetSrc1(helperOpnd); |
| newRestInstr->SetSrc2(src1); |
| |
| return m_lowererMD.LowerCallHelper(newRestInstr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerScopedLdFld |
| /// |
| /// Lower a load instruction that takes an additional instance to use as a |
| /// a default if the scope chain provided doesn't contain the property. |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerScopedLdFld(IR::Instr * ldFldInstr, IR::JnHelperMethod helperMethod, bool withInlineCache) |
| { |
| IR::Opnd *src; |
| IR::Instr *instrPrev = ldFldInstr->m_prev; |
| |
| if(!withInlineCache) |
| { |
| LoadScriptContext(ldFldInstr); |
| } |
| |
| intptr_t rootObject = m_func->GetJITFunctionBody()->GetRootObject(); |
| src = IR::AddrOpnd::New(rootObject, IR::AddrOpndKindDynamicVar, this->m_func, true); |
| instrPrev = m_lowererMD.LoadHelperArgument(ldFldInstr, src); |
| |
| src = ldFldInstr->UnlinkSrc1(); |
| AssertMsg(src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as src"); |
| this->LoadPropertySymAsArgument(ldFldInstr, src); |
| |
| if (withInlineCache) |
| { |
| AssertMsg(src->AsSymOpnd()->IsPropertySymOpnd(), "Need property sym operand to find the inline cache"); |
| |
| m_lowererMD.LoadHelperArgument( |
| ldFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(src->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| |
| // Not using the polymorphic inline cache because the fast path only uses the monomorphic inline cache |
| this->m_lowererMD.LoadHelperArgument(ldFldInstr, this->LoadRuntimeInlineCacheOpnd(ldFldInstr, src->AsPropertySymOpnd())); |
| |
| m_lowererMD.LoadHelperArgument(ldFldInstr, LoadFunctionBodyOpnd(ldFldInstr)); |
| } |
| m_lowererMD.ChangeToHelperCall(ldFldInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerScopedLdInst |
| /// |
| /// Lower a load instruction that takes an additional instance to use as a |
| /// a default if the scope chain provided doesn't contain the property. |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerScopedLdInst(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Opnd *src; |
| IR::Instr *instrPrev; |
| |
| // last argument is the scriptContext |
| instrPrev = LoadScriptContext(instr); |
| src = instr->UnlinkSrc2(); |
| AssertMsg(src->IsRegOpnd(), "Expected Reg opnd as src2"); |
| |
| // __out Var*. The StackSym is allocated in irbuilder, and here we need to insert a lea |
| StackSym* dstSym = src->GetStackSym(); |
| IR::Instr *load = InsertLoadStackAddress(dstSym, instr); |
| IR::Opnd* tempOpnd = load->GetDst(); |
| m_lowererMD.LoadHelperArgument(instr, tempOpnd); |
| |
| // now 3rd last argument is the rootObject of the function. Need to add addrOpnd to |
| // pass in the address of the roobObject. |
| IR::Opnd * srcOpnd; |
| intptr_t rootObject = m_func->GetJITFunctionBody()->GetRootObject(); |
| srcOpnd = IR::AddrOpnd::New(rootObject, IR::AddrOpndKindDynamicVar, instr->m_func, true); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, srcOpnd); |
| |
| // no change, the property field built from irbuilder. |
| src = instr->UnlinkSrc1(); |
| AssertMsg(src->IsSymOpnd() && src->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as src"); |
| this->LoadPropertySymAsArgument(instr, src); |
| |
| instrPrev = m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| |
| IR::RegOpnd* regOpnd = IR::RegOpnd::New(dstSym, TyVar, m_func); |
| IR::SymOpnd*symOpnd = IR::SymOpnd::New(dstSym, TyVar, m_func); |
| this->InsertMove(regOpnd, symOpnd, instrPrev); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerScopedDelFld(IR::Instr * delFldInstr, IR::JnHelperMethod helperMethod, bool withInlineCache, bool strictMode) |
| { |
| IR::Instr *instrPrev; |
| |
| Js::PropertyOperationFlags propertyOperationFlag = Js::PropertyOperation_None; |
| |
| if (strictMode) |
| { |
| propertyOperationFlag = Js::PropertyOperation_StrictMode; |
| } |
| |
| instrPrev = m_lowererMD.LoadHelperArgument(delFldInstr, IR::IntConstOpnd::New((IntConstType)propertyOperationFlag, TyInt32, m_func, true)); |
| |
| LowerScopedLdFld(delFldInstr, helperMethod, withInlineCache); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerProfiledStFld(IR::JitProfilingInstr *stFldInstr, Js::PropertyOperationFlags flags) |
| { |
| Assert(stFldInstr->profileId == Js::Constants::NoProfileId); |
| |
| IR::Instr *const instrPrev = stFldInstr->m_prev; |
| |
| /* |
| void ProfilingHelpers::ProfiledInitFld_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| const Var value, |
| void *const framePointer) |
| |
| void ProfilingHelpers::ProfiledStFld_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| const Var value, |
| void *const framePointer) |
| |
| void ProfilingHelpers::ProfiledStSuperFld_Jit( |
| const Var instance, |
| const PropertyId propertyId, |
| const InlineCacheIndex inlineCacheIndex, |
| const Var value, |
| void *const framePointer, |
| const Var thisInstance) |
| { |
| */ |
| |
| m_lowererMD.LoadHelperArgument(stFldInstr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| |
| if (stFldInstr->m_opcode == Js::OpCode::StSuperFld) |
| { |
| m_lowererMD.LoadHelperArgument(stFldInstr, stFldInstr->UnlinkSrc2()); |
| } |
| |
| m_lowererMD.LoadHelperArgument(stFldInstr, stFldInstr->UnlinkSrc1()); |
| |
| IR::Opnd *dst = stFldInstr->UnlinkDst(); |
| AssertMsg(dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as dst of field store"); |
| m_lowererMD.LoadHelperArgument( |
| stFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(dst->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| |
| LoadPropertySymAsArgument(stFldInstr, dst); |
| |
| IR::JnHelperMethod helper; |
| switch (stFldInstr->m_opcode) |
| { |
| case Js::OpCode::InitFld: |
| case Js::OpCode::InitRootFld: |
| helper = IR::HelperProfiledInitFld; |
| break; |
| |
| case Js::OpCode::StSuperFld: |
| helper = IR::HelperProfiledStSuperFld; |
| break; |
| |
| default: |
| helper = |
| flags & Js::PropertyOperation_Root |
| ? flags & Js::PropertyOperation_StrictMode ? IR::HelperProfiledStRootFld_Strict : IR::HelperProfiledStRootFld |
| : flags & Js::PropertyOperation_StrictMode ? IR::HelperProfiledStFld_Strict : IR::HelperProfiledStFld; |
| break; |
| } |
| stFldInstr->SetSrc1(IR::HelperCallOpnd::New(helper, m_func)); |
| m_lowererMD.LowerCall(stFldInstr, 0); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerStFld |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerStFld( |
| IR::Instr * stFldInstr, |
| IR::JnHelperMethod helperMethod, |
| IR::JnHelperMethod polymorphicHelperMethod, |
| bool withInlineCache, |
| IR::LabelInstr *labelBailOut, |
| bool isHelper, |
| bool withPutFlags, |
| Js::PropertyOperationFlags flags) |
| { |
| if (stFldInstr->IsJitProfilingInstr()) |
| { |
| // If we want to profile then do something completely different |
| return this->LowerProfiledStFld(stFldInstr->AsJitProfilingInstr(), flags); |
| } |
| |
| IR::Instr *instrPrev = stFldInstr->m_prev; |
| |
| IR::Opnd *dst = stFldInstr->UnlinkDst(); |
| AssertMsg(dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as dst of field store"); |
| |
| IR::Opnd * inlineCacheOpnd = nullptr; |
| if (withInlineCache) |
| { |
| AssertMsg(dst->AsSymOpnd()->IsPropertySymOpnd(), "Need property sym operand to find the inline cache"); |
| if (dst->AsPropertySymOpnd()->m_runtimePolymorphicInlineCache && polymorphicHelperMethod != helperMethod) |
| { |
| JITTimePolymorphicInlineCache * polymorphicInlineCache = dst->AsPropertySymOpnd()->m_runtimePolymorphicInlineCache; |
| helperMethod = polymorphicHelperMethod; |
| inlineCacheOpnd = IR::AddrOpnd::New(polymorphicInlineCache->GetAddr(), IR::AddrOpndKindDynamicInlineCache, this->m_func); |
| } |
| else |
| { |
| // Need to load runtime inline cache opnd first before loading any helper argument |
| // because LoadRuntimeInlineCacheOpnd may create labels marked as helper |
| // and cause op helper register push/pop save in x86, messing up with any helper arguments that is already pushed |
| inlineCacheOpnd = this->LoadRuntimeInlineCacheOpnd(stFldInstr, dst->AsPropertySymOpnd(), isHelper); |
| } |
| } |
| if (withPutFlags) |
| { |
| m_lowererMD.LoadHelperArgument(stFldInstr, |
| IR::IntConstOpnd::New(static_cast<IntConstType>(flags), IRType::TyInt32, m_func, true)); |
| } |
| |
| IR::Opnd *src = stFldInstr->UnlinkSrc1(); |
| if (stFldInstr->m_opcode == Js::OpCode::StSuperFld) |
| { |
| m_lowererMD.LoadHelperArgument(stFldInstr, stFldInstr->UnlinkSrc2()); |
| } |
| |
| m_lowererMD.LoadHelperArgument(stFldInstr, src); |
| |
| this->LoadPropertySymAsArgument(stFldInstr, dst); |
| |
| if (withInlineCache) |
| { |
| Assert(inlineCacheOpnd != nullptr); |
| this->m_lowererMD.LoadHelperArgument( |
| stFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(dst->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| |
| this->m_lowererMD.LoadHelperArgument(stFldInstr, inlineCacheOpnd); |
| this->m_lowererMD.LoadHelperArgument(stFldInstr, LoadFunctionBodyOpnd(stFldInstr)); |
| } |
| |
| IR::RegOpnd *opndBase = dst->AsSymOpnd()->CreatePropertyOwnerOpnd(m_func); |
| m_lowererMD.ChangeToHelperCall(stFldInstr, helperMethod, labelBailOut, opndBase, dst->AsSymOpnd()->IsPropertySymOpnd() ? dst->AsSymOpnd()->AsPropertySymOpnd() : nullptr, isHelper); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* Lowerer::GenerateCompleteStFld(IR::Instr* instr, bool emitFastPath, IR::JnHelperMethod monoHelperAfterFastPath, IR::JnHelperMethod polyHelperAfterFastPath, |
| IR::JnHelperMethod monoHelperWithoutFastPath, IR::JnHelperMethod polyHelperWithoutFastPath, bool withPutFlags, Js::PropertyOperationFlags flags) |
| { |
| if(instr->CallsAccessor() && instr->HasBailOutInfo()) |
| { |
| IR::BailOutKind kindMinusBits = instr->GetBailOutKind() & ~IR::BailOutKindBits; |
| Assert(kindMinusBits != IR::BailOutOnImplicitCalls && kindMinusBits != IR::BailOutOnImplicitCallsPreOp); |
| } |
| |
| IR::Instr* prevInstr = instr->m_prev; |
| |
| IR::LabelInstr* labelBailOut = nullptr; |
| IR::LabelInstr* labelHelper = nullptr; |
| bool isHelper = false; |
| IR::RegOpnd* typeOpnd = nullptr; |
| if(emitFastPath && GenerateFastStFldForCustomProperty(instr, &labelHelper)) |
| { |
| if(labelHelper) |
| { |
| Assert(labelHelper->isOpHelper); |
| instr->InsertBefore(labelHelper); |
| prevInstr = this->LowerStFld(instr, monoHelperWithoutFastPath, polyHelperWithoutFastPath, true, labelBailOut, isHelper, withPutFlags, flags); |
| } |
| else |
| { |
| instr->Remove(); |
| return prevInstr; |
| } |
| } |
| else if (this->GenerateStFldWithCachedType(instr, &isHelper, &labelHelper, &typeOpnd)) |
| { |
| Assert(labelHelper == nullptr); |
| return prevInstr; |
| } |
| else if (emitFastPath) |
| { |
| if (!GenerateFastStFld(instr, monoHelperWithoutFastPath, polyHelperWithoutFastPath, &labelBailOut, typeOpnd, &isHelper, &labelHelper, withPutFlags, flags)) |
| { |
| if (labelHelper != nullptr) |
| { |
| labelHelper->isOpHelper = isHelper; |
| instr->InsertBefore(labelHelper); |
| } |
| prevInstr = this->LowerStFld(instr, monoHelperAfterFastPath, polyHelperAfterFastPath, true, labelBailOut, isHelper, withPutFlags, flags); |
| } |
| } |
| else |
| { |
| if (labelHelper != nullptr) |
| { |
| labelHelper->isOpHelper = isHelper; |
| instr->InsertBefore(labelHelper); |
| } |
| prevInstr = this->LowerStFld(instr, monoHelperWithoutFastPath, monoHelperWithoutFastPath, true, labelBailOut, isHelper, withPutFlags, flags); |
| } |
| |
| return prevInstr; |
| } |
| |
| void |
| Lowerer::GenerateDirectFieldStore(IR::Instr* instrStFld, IR::PropertySymOpnd* propertySymOpnd) |
| { |
| Func* func = instrStFld->m_func; |
| |
| IR::Opnd *opndSlotArray = this->LoadSlotArrayWithCachedLocalType(instrStFld, propertySymOpnd); |
| |
| // Store the value to the slot, getting the slot index from the cache. |
| uint16 index = propertySymOpnd->GetSlotIndex(); |
| AssertOrFailFast(index != (uint16)-1); |
| |
| #if defined(RECYCLER_WRITE_BARRIER_JIT) && (defined(_M_IX86) || defined(_M_AMD64)) |
| if (opndSlotArray->IsRegOpnd()) |
| { |
| IR::IndirOpnd * opndDst = IR::IndirOpnd::New(opndSlotArray->AsRegOpnd(), index * sizeof(Js::Var), TyMachReg, func); |
| this->GetLowererMD()->GenerateWriteBarrierAssign(opndDst, instrStFld->GetSrc1(), instrStFld); |
| } |
| else |
| { |
| Assert(opndSlotArray->IsMemRefOpnd()); |
| IR::MemRefOpnd * opndDst = IR::MemRefOpnd::New((char*)opndSlotArray->AsMemRefOpnd()->GetMemLoc() + (index * sizeof(Js::Var)), TyMachReg, func); |
| this->GetLowererMD()->GenerateWriteBarrierAssign(opndDst, instrStFld->GetSrc1(), instrStFld); |
| } |
| #else |
| IR::Opnd *opnd; |
| |
| if (opndSlotArray->IsRegOpnd()) |
| { |
| opnd = IR::IndirOpnd::New(opndSlotArray->AsRegOpnd(), index * sizeof(Js::Var), TyMachReg, func); |
| } |
| else |
| { |
| opnd = IR::MemRefOpnd::New((char*)opndSlotArray->AsMemRefOpnd()->GetMemLoc() + (index * sizeof(Js::Var)), TyMachReg, func); |
| } |
| |
| this->InsertMove(opnd, instrStFld->GetSrc1(), instrStFld); |
| #endif |
| } |
| |
| bool |
| Lowerer::GenerateStFldWithCachedType(IR::Instr *instrStFld, bool* continueAsHelperOut, IR::LabelInstr** labelHelperOut, IR::RegOpnd** typeOpndOut) |
| { |
| IR::Instr *instr; |
| IR::RegOpnd *typeOpnd = nullptr; |
| IR::LabelInstr* labelObjCheckFailed = nullptr; |
| IR::LabelInstr *labelTypeCheckFailed = nullptr; |
| IR::LabelInstr *labelBothTypeChecksFailed = nullptr; |
| IR::LabelInstr *labelDone = nullptr; |
| |
| Assert(continueAsHelperOut != nullptr); |
| *continueAsHelperOut = false; |
| |
| Assert(labelHelperOut != nullptr); |
| *labelHelperOut = nullptr; |
| |
| Assert(typeOpndOut != nullptr); |
| *typeOpndOut = nullptr; |
| |
| Assert(instrStFld->GetDst()->IsSymOpnd()); |
| if (!instrStFld->GetDst()->AsSymOpnd()->IsPropertySymOpnd() || !instrStFld->GetDst()->AsPropertySymOpnd()->IsTypeCheckSeqCandidate()) |
| { |
| return false; |
| } |
| |
| IR::PropertySymOpnd *propertySymOpnd = instrStFld->GetDst()->AsPropertySymOpnd(); |
| |
| // If we have any object type spec info, we better not believe this is a load from prototype, since this is a store |
| // and we never share inline caches between loads and stores. |
| Assert(!propertySymOpnd->HasObjTypeSpecFldInfo() || !propertySymOpnd->IsLoadedFromProto()); |
| |
| AssertMsg(propertySymOpnd->TypeCheckSeqBitsSetOnlyIfCandidate(), "Property sym operand optimized despite not being a candidate?"); |
| |
| if (!propertySymOpnd->IsTypeCheckSeqCandidate()) |
| { |
| return false; |
| } |
| |
| if (!propertySymOpnd->IsTypeCheckSeqParticipant() && !propertySymOpnd->NeedsLocalTypeCheck()) |
| { |
| return false; |
| } |
| |
| Assert(!propertySymOpnd->NeedsTypeCheckAndBailOut() || (instrStFld->HasBailOutInfo() && IR::IsTypeCheckBailOutKind(instrStFld->GetBailOutKind()))); |
| |
| // In the backwards pass we only add guarded property operations to instructions that are not already |
| // protected by an upstream type check. |
| Assert(!propertySymOpnd->IsTypeCheckProtected() || propertySymOpnd->GetGuardedPropOps() == nullptr); |
| |
| PHASE_PRINT_TESTTRACE( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Field store: %s, property ID: %d, func: %s, cache ID: %d, cloned cache: true, layout: %s, redundant check: %s\n"), |
| Js::OpCodeUtil::GetOpCodeName(instrStFld->m_opcode), |
| propertySymOpnd->m_sym->AsPropertySym()->m_propertyId, |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| propertySymOpnd->m_inlineCacheIndex, propertySymOpnd->GetCacheLayoutString(), |
| propertySymOpnd->IsTypeChecked() ? _u("true") : _u("false")); |
| |
| if (propertySymOpnd->HasFinalType() && !propertySymOpnd->IsLoadedFromProto()) |
| { |
| propertySymOpnd->UpdateSlotForFinalType(); |
| } |
| |
| Func* func = instrStFld->m_func; |
| |
| // TODO (ObjTypeSpec): If ((PropertySym*)propertySymOpnd->m_sym)->m_stackSym->m_isIntConst consider emitting a direct |
| // jump to helper or bailout. If we have a type check bailout, we could even abort compilation. |
| |
| bool hasTypeCheckBailout = instrStFld->HasBailOutInfo() && IR::IsTypeCheckBailOutKind(instrStFld->GetBailOutKind()); |
| |
| // If the type hasn't been checked upstream, see if it makes sense to check it here. |
| bool isTypeChecked = propertySymOpnd->IsTypeChecked(); |
| if (!isTypeChecked) |
| { |
| // If the initial type has been checked, we can do a hard coded type transition without any type checks |
| // (see GenerateStFldWithCachedFinalType), which is always worth doing, even if the type is not needed |
| // downstream. We're not introducing any additional bailouts. |
| if (propertySymOpnd->HasFinalType() && propertySymOpnd->HasInitialType() && !propertySymOpnd->IsTypeDead()) |
| { |
| // We have a final type in hand, so we can JIT (most of) the type transition work. |
| return this->GenerateStFldWithCachedFinalType(instrStFld, propertySymOpnd); |
| } |
| |
| if (propertySymOpnd->HasTypeMismatch()) |
| { |
| // So we have a type mismatch, which happens when the type (and the type without property if ObjTypeSpecStore |
| // is on) on this instruction didn't match the live type value according to the flow. We must have hit some |
| // stale inline cache (perhaps inlined from a different function, or on a code path not taken for a while). |
| // Either way, we know exactly what type the object must have at this point (fully determined by flow), but |
| // we don't know whether that type already has the property we're storing here. All in all, we know exactly |
| // what shape the object will have after this operation, but we're not sure what label (type) to give this |
| // shape. Thus we can simply let the fast path do its thing based on the live inline cache. The downstream |
| // instructions relying only on this shape (loads and stores) are safe, and those that need the next type |
| // (i.e. adds) will do the same thing as this instruction. |
| return false; |
| } |
| |
| // If we're still here then we must need a primary type check on this instruction to protect |
| // a sequence of field operations downstream, or a local type check for an isolated field store. |
| Assert(propertySymOpnd->NeedsPrimaryTypeCheck() || propertySymOpnd->NeedsLocalTypeCheck()); |
| |
| labelTypeCheckFailed = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| labelBothTypeChecksFailed = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| labelObjCheckFailed = hasTypeCheckBailout ? labelBothTypeChecksFailed : IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| typeOpnd = this->GenerateCachedTypeCheck(instrStFld, propertySymOpnd, labelObjCheckFailed, labelBothTypeChecksFailed, labelTypeCheckFailed); |
| *typeOpndOut = typeOpnd; |
| } |
| |
| // Either we are protected by a type check upstream or we just emitted a type check above, |
| // now it's time to store the field value. |
| GenerateDirectFieldStore(instrStFld, propertySymOpnd); |
| |
| // If we are protected by a type check upstream, we don't need a bailout or helper here, delete the instruction |
| // and return "true" to indicate that we succeeded in eliminating it. |
| if (isTypeChecked) |
| { |
| Assert(labelTypeCheckFailed == nullptr && labelBothTypeChecksFailed == nullptr); |
| AssertMsg( |
| !instrStFld->HasBailOutInfo() || instrStFld->OnlyHasLazyBailOut(), |
| "Why does a direct field store have bailout that is not lazy?" |
| ); |
| |
| if (propertySymOpnd->HasInitialType() && propertySymOpnd->HasFinalType()) |
| { |
| bool isPrototypeTypeHandler = propertySymOpnd->GetInitialType()->GetTypeHandler()->IsPrototype(); |
| if (isPrototypeTypeHandler) |
| { |
| LoadScriptContext(instrStFld); |
| m_lowererMD.LoadHelperArgument(instrStFld, IR::IntConstOpnd::New(propertySymOpnd->GetPropertyId(), TyInt32, m_func, true)); |
| IR::Instr * invalidateCallInstr = IR::Instr::New(Js::OpCode::Call, m_func); |
| instrStFld->InsertBefore(invalidateCallInstr); |
| m_lowererMD.ChangeToHelperCall(invalidateCallInstr, IR::HelperInvalidateProtoCaches); |
| } |
| } |
| instrStFld->Remove(); |
| return true; |
| } |
| |
| // Otherwise, branch around the helper on successful type check. |
| labelDone = IR::LabelInstr::New(Js::OpCode::Label, func); |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, func); |
| instrStFld->InsertBefore(instr); |
| |
| // On failed type check, try the type without property if we've got one. |
| instrStFld->InsertBefore(labelTypeCheckFailed); |
| |
| // Caution, this is one of the dusty corners of the JIT. We only get here if this is an isolated StFld which adds a property, or |
| // ObjTypeSpecStore is off. In the former case no downstream operations depend on the final type produced here, and we can fall |
| // back on live cache and helper if the type doesn't match. In the latter we may have a cache with type transition, which must |
| // produce a value for the type after transition, because that type is consumed downstream. Thus, if the object's type doesn't |
| // match either the type with or the type without the property we're storing, we must bail out here. |
| bool emitAddProperty = propertySymOpnd->IsMono() && propertySymOpnd->HasInitialType(); |
| |
| if (emitAddProperty) |
| { |
| GenerateCachedTypeWithoutPropertyCheck(instrStFld, propertySymOpnd, typeOpnd, labelBothTypeChecksFailed); |
| GenerateFieldStoreWithTypeChange(instrStFld, propertySymOpnd, propertySymOpnd->GetInitialType(), propertySymOpnd->GetType()); |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, func); |
| instrStFld->InsertBefore(instr); |
| } |
| |
| instrStFld->InsertBefore(labelBothTypeChecksFailed); |
| instrStFld->InsertAfter(labelDone); |
| |
| if (hasTypeCheckBailout) |
| { |
| AssertMsg(PHASE_ON1(Js::ObjTypeSpecIsolatedFldOpsWithBailOutPhase) || !PHASE_ON(Js::DeadStoreTypeChecksOnStoresPhase, this->m_func) || !propertySymOpnd->IsTypeDead() || propertySymOpnd->TypeCheckRequired(), |
| "Why does a field store have a type check bailout, if its type is dead?"); |
| |
| if (instrStFld->GetBailOutInfo()->bailOutInstr != instrStFld) |
| { |
| // Set the cache index in the bailout info so that the generated code will write it into the |
| // bailout record at runtime. |
| instrStFld->GetBailOutInfo()->polymorphicCacheIndex = propertySymOpnd->m_inlineCacheIndex; |
| } |
| else |
| { |
| Assert(instrStFld->GetBailOutInfo()->polymorphicCacheIndex == propertySymOpnd->m_inlineCacheIndex); |
| } |
| |
| instrStFld->m_opcode = Js::OpCode::BailOut; |
| instrStFld->FreeSrc1(); |
| instrStFld->FreeDst(); |
| |
| this->GenerateBailOut(instrStFld); |
| return true; |
| } |
| else |
| { |
| *continueAsHelperOut = true; |
| Assert(labelObjCheckFailed != nullptr && labelObjCheckFailed != labelBothTypeChecksFailed); |
| *labelHelperOut = labelObjCheckFailed; |
| return false; |
| } |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GenerateCachedTypeCheck(IR::Instr *instrChk, IR::PropertySymOpnd *propertySymOpnd, IR::LabelInstr* labelObjCheckFailed, IR::LabelInstr *labelTypeCheckFailed, IR::LabelInstr *labelSecondChance) |
| { |
| Assert(propertySymOpnd->MayNeedTypeCheckProtection()); |
| |
| Func* func = instrChk->m_func; |
| IR::RegOpnd *regOpnd = propertySymOpnd->CreatePropertyOwnerOpnd(func); |
| regOpnd->SetValueType(propertySymOpnd->GetPropertyOwnerValueType()); |
| |
| if (!regOpnd->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(regOpnd, instrChk, labelObjCheckFailed); |
| } |
| |
| // Load the current object type into typeOpnd |
| IR::RegOpnd* typeOpnd = IR::RegOpnd::New(TyMachReg, func); |
| IR::Opnd *sourceType; |
| if (regOpnd->m_sym->IsConst() && !regOpnd->m_sym->IsIntConst() && !regOpnd->m_sym->IsFloatConst()) |
| { |
| sourceType = IR::MemRefOpnd::New((BYTE*)regOpnd->m_sym->GetConstAddress() + |
| Js::RecyclableObject::GetOffsetOfType(), TyMachReg, func, IR::AddrOpndKindDynamicObjectTypeRef); |
| } |
| else |
| { |
| sourceType = IR::IndirOpnd::New(regOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, func); |
| } |
| InsertMove(typeOpnd, sourceType, instrChk); |
| |
| // Note: don't attempt equivalent type check if we're doing a final type optimization or if we have a monomorphic |
| // cache and no type check bailout. In the latter case, we can wind up doing expensive failed equivalence checks |
| // repeatedly and never rejit. |
| bool doEquivTypeCheck = |
| instrChk->HasEquivalentTypeCheckBailOut() || |
| (propertySymOpnd->HasEquivalentTypeSet() && |
| !(propertySymOpnd->HasFinalType() && propertySymOpnd->HasInitialType()) && |
| !propertySymOpnd->MustDoMonoCheck() && |
| (propertySymOpnd->IsPoly() || instrChk->HasTypeCheckBailOut())); |
| Assert(doEquivTypeCheck || !instrChk->HasEquivalentTypeCheckBailOut()); |
| |
| // Create and initialize the property guard if required. Note that for non-shared monomorphic checks we can refer |
| // directly to the (pinned) type and not use a guard. |
| Js::PropertyGuard * typeCheckGuard; |
| IR::RegOpnd * polyIndexOpnd = nullptr; |
| JITTypeHolder monoType = nullptr; |
| if (doEquivTypeCheck) |
| { |
| typeCheckGuard = CreateEquivalentTypeGuardAndLinkToGuardedProperties(propertySymOpnd); |
| if (typeCheckGuard->IsPoly()) |
| { |
| Assert(propertySymOpnd->ShouldUsePolyEquivTypeGuard(this->m_func)); |
| polyIndexOpnd = this->GeneratePolymorphicTypeIndex(typeOpnd, typeCheckGuard, instrChk); |
| } |
| } |
| else |
| { |
| monoType = propertySymOpnd->MustDoMonoCheck() ? propertySymOpnd->GetMonoGuardType() : propertySymOpnd->GetType(); |
| typeCheckGuard = this->CreateTypePropertyGuardForGuardedProperties(monoType, propertySymOpnd); |
| } |
| |
| // Create the opnd we will check against the current type. |
| IR::Opnd *expectedTypeOpnd; |
| JITTypeHolder directCheckType = nullptr; |
| if (typeCheckGuard == nullptr) |
| { |
| Assert(monoType != nullptr); |
| expectedTypeOpnd = IR::AddrOpnd::New(monoType->GetAddr(), IR::AddrOpndKindDynamicType, func, true); |
| directCheckType = monoType; |
| } |
| else |
| { |
| Assert(Js::PropertyGuard::GetSizeOfValue() == static_cast<size_t>(TySize[TyMachPtr])); |
| |
| if (this->m_func->IsOOPJIT()) |
| { |
| if (polyIndexOpnd != nullptr) |
| { |
| IR::RegOpnd * baseOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| this->GenerateLeaOfOOPData(baseOpnd, typeCheckGuard, Js::JitPolyEquivalentTypeGuard::GetOffsetOfPolyValues(), instrChk); |
| expectedTypeOpnd = IR::IndirOpnd::New(baseOpnd, polyIndexOpnd, m_lowererMD.GetDefaultIndirScale(), TyMachPtr, func); |
| } |
| else |
| { |
| expectedTypeOpnd = this->GenerateIndirOfOOPData(typeCheckGuard, 0, instrChk); |
| } |
| this->addToLiveOnBackEdgeSyms->Set(func->GetTopFunc()->GetNativeCodeDataSym()->m_id); |
| } |
| else |
| { |
| if (polyIndexOpnd != nullptr) |
| { |
| IR::RegOpnd * baseOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(baseOpnd, IR::AddrOpnd::New((Js::Var)typeCheckGuard->AsPolyTypeCheckGuard()->GetAddressOfPolyValues(), IR::AddrOpndKindDynamicTypeCheckGuard, func, true), instrChk); |
| expectedTypeOpnd = IR::IndirOpnd::New(baseOpnd, polyIndexOpnd, m_lowererMD.GetDefaultIndirScale(), TyMachPtr, func); |
| } |
| else |
| { |
| expectedTypeOpnd = IR::MemRefOpnd::New((void*)(typeCheckGuard->GetAddressOfValue()), TyMachPtr, func, IR::AddrOpndKindDynamicGuardValueRef); |
| } |
| } |
| } |
| |
| if (PHASE_VERBOSE_TRACE(Js::ObjTypeSpecPhase, this->m_func)) |
| { |
| OUTPUT_VERBOSE_TRACE_FUNC(Js::ObjTypeSpecPhase, this->m_func, _u("Emitted %s type check "), |
| directCheckType != nullptr ? _u("direct") : propertySymOpnd->IsPoly() ? _u("equivalent") : _u("indirect")); |
| #if DBG |
| if (propertySymOpnd->GetGuardedPropOps() != nullptr) |
| { |
| Output::Print(_u(" guarding operations:\n ")); |
| propertySymOpnd->GetGuardedPropOps()->Dump(); |
| } |
| else |
| { |
| Output::Print(_u("\n")); |
| } |
| #else |
| Output::Print(_u("\n")); |
| #endif |
| Output::Flush(); |
| } |
| |
| if (doEquivTypeCheck) |
| { |
| // TODO (ObjTypeSpec): For isolated equivalent type checks it would be good to emit a check if the cache is still valid, and |
| // if not go straight to live polymorphic cache. This way we wouldn't have to bail out and re-JIT, and also wouldn't continue |
| // to try the equivalent type cache, miss it and do the slow comparison. This may be as easy as sticking a null on the main |
| // type in the equivalent type cache. |
| IR::LabelInstr* labelCheckEquivalentType = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::BranchInstr* branchInstr = InsertCompareBranch(typeOpnd, expectedTypeOpnd, Js::OpCode::BrNeq_A, labelCheckEquivalentType, instrChk); |
| |
| InsertObjectPoison(regOpnd, branchInstr, instrChk, false); |
| |
| IR::LabelInstr *labelTypeCheckSucceeded = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| InsertBranch(Js::OpCode::Br, labelTypeCheckSucceeded, instrChk); |
| |
| instrChk->InsertBefore(labelCheckEquivalentType); |
| |
| IR::Opnd* typeCheckGuardOpnd = nullptr; |
| if (this->m_func->IsOOPJIT()) |
| { |
| typeCheckGuardOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| this->GenerateLeaOfOOPData(typeCheckGuardOpnd->AsRegOpnd(), typeCheckGuard, 0, instrChk); |
| this->addToLiveOnBackEdgeSyms->Set(func->GetTopFunc()->GetNativeCodeDataSym()->m_id); |
| } |
| else |
| { |
| typeCheckGuardOpnd = IR::AddrOpnd::New((Js::Var)typeCheckGuard, IR::AddrOpndKindDynamicTypeCheckGuard, func, true); |
| } |
| |
| IR::JnHelperMethod helperMethod; |
| if (polyIndexOpnd != nullptr) |
| { |
| helperMethod = propertySymOpnd->HasFixedValue() ? IR::HelperCheckIfPolyTypeIsEquivalentForFixedField : IR::HelperCheckIfPolyTypeIsEquivalent; |
| |
| this->m_lowererMD.LoadHelperArgument(instrChk, polyIndexOpnd); |
| } |
| else |
| { |
| helperMethod = propertySymOpnd->HasFixedValue() ? IR::HelperCheckIfTypeIsEquivalentForFixedField : IR::HelperCheckIfTypeIsEquivalent; |
| } |
| this->m_lowererMD.LoadHelperArgument(instrChk, typeCheckGuardOpnd); |
| this->m_lowererMD.LoadHelperArgument(instrChk, typeOpnd); |
| |
| IR::RegOpnd* equivalentTypeCheckResultOpnd = IR::RegOpnd::New(TyUint8, func); |
| IR::HelperCallOpnd* equivalentTypeCheckHelperCallOpnd = IR::HelperCallOpnd::New(helperMethod, func); |
| IR::Instr* equivalentTypeCheckCallInstr = IR::Instr::New(Js::OpCode::Call, equivalentTypeCheckResultOpnd, equivalentTypeCheckHelperCallOpnd, func); |
| instrChk->InsertBefore(equivalentTypeCheckCallInstr); |
| this->m_lowererMD.LowerCall(equivalentTypeCheckCallInstr, 0); |
| |
| InsertTestBranch(equivalentTypeCheckResultOpnd, equivalentTypeCheckResultOpnd, Js::OpCode::BrEq_A, labelTypeCheckFailed, instrChk); |
| |
| // TODO (ObjTypeSpec): Consider emitting a shared bailout to which a specific bailout kind is written at runtime. This would allow us to distinguish |
| // between non-equivalent type and other cases, such as invalidated guard (due to fixed field overwrite, perhaps) or too much thrashing on the |
| // equivalent type cache. We could determine bailout kind based on the value returned by the helper. In the case of cache thrashing we could just |
| // turn off the whole optimization for a given function. |
| |
| instrChk->InsertBefore(labelTypeCheckSucceeded); |
| } |
| else |
| { |
| IR::BranchInstr* branchInstr = InsertCompareBranch(typeOpnd, expectedTypeOpnd, Js::OpCode::BrNeq_A, labelSecondChance != nullptr ? labelSecondChance : labelTypeCheckFailed, instrChk); |
| InsertObjectPoison(regOpnd, branchInstr, instrChk, false); |
| } |
| |
| // Don't pin the type for polymorphic operations. The code can successfully execute even if this type is no longer referenced by any objects, |
| // as long as there are other objects with types equivalent on the properties referenced by this code. The type is kept alive until entry point |
| // installation by the JIT transfer data, and after that by the equivalent type cache, so it will stay alive unless or until it gets evicted |
| // from the cache. |
| if (!doEquivTypeCheck) |
| { |
| Assert(monoType != nullptr); |
| PinTypeRef(monoType, monoType.t, instrChk, propertySymOpnd->m_sym->AsPropertySym()->m_propertyId); |
| } |
| |
| return typeOpnd; |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GeneratePolymorphicTypeIndex(IR::RegOpnd * typeOpnd, Js::PropertyGuard * typeCheckGuard, IR::Instr * instrInsert) |
| { |
| IR::RegOpnd * resultOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| InsertMove(resultOpnd, typeOpnd, instrInsert); |
| InsertShift(Js::OpCode::ShrU_A, false, resultOpnd, resultOpnd, IR::IntConstOpnd::New(PolymorphicInlineCacheShift, TyInt8, this->m_func, true), instrInsert); |
| InsertAnd(resultOpnd, resultOpnd, IR::IntConstOpnd::New(typeCheckGuard->AsPolyTypeCheckGuard()->GetSize() - 1, TyMachReg, this->m_func, true), instrInsert); |
| |
| return resultOpnd; |
| } |
| |
| void |
| Lowerer::GenerateLeaOfOOPData(IR::RegOpnd * regOpnd, void * address, int32 offset, IR::Instr * instrInsert) |
| { |
| Func * func = instrInsert->m_func; |
| int32 dataOffset; |
| Int32Math::Add(NativeCodeData::GetDataTotalOffset(address), offset, &dataOffset); |
| InsertLea(regOpnd, |
| IR::IndirOpnd::New(IR::RegOpnd::New(func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), dataOffset, TyMachPtr, |
| #if DBG |
| NativeCodeData::GetDataDescription(address, func->m_alloc), |
| #endif |
| func, true), |
| instrInsert); |
| } |
| |
| IR::Opnd * |
| Lowerer::GenerateIndirOfOOPData(void * address, int32 offset, IR::Instr * instrInsert) |
| { |
| Func * func = instrInsert->m_func; |
| int32 dataOffset; |
| Int32Math::Add(NativeCodeData::GetDataTotalOffset(address), offset, &dataOffset); |
| IR::Opnd * opnd = IR::IndirOpnd::New(IR::RegOpnd::New(func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), dataOffset, TyMachPtr, |
| #if DBG |
| NativeCodeData::GetDataDescription(address, func->m_alloc), |
| #endif |
| func, true); |
| |
| return opnd; |
| } |
| |
| void |
| Lowerer::InsertObjectPoison(IR::Opnd* poisonedOpnd, IR::BranchInstr* branchInstr, IR::Instr* insertInstr, bool isForStore) |
| { |
| #ifndef _M_ARM |
| LowererMD::InsertObjectPoison(poisonedOpnd, branchInstr, insertInstr, isForStore); |
| #endif |
| } |
| |
| void |
| Lowerer::PinTypeRef(JITTypeHolder type, void* typeRef, IR::Instr* instr, Js::PropertyId propertyId) |
| { |
| this->m_func->PinTypeRef(typeRef); |
| |
| if (PHASE_TRACE(Js::TracePinnedTypesPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("PinnedTypes: function %s(%s) instr %s property ID %u pinned %s reference 0x%p to type 0x%p.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| Js::OpCodeUtil::GetOpCodeName(instr->m_opcode), propertyId, |
| typeRef == type.t ? _u("strong") : _u("weak"), typeRef, type.t); |
| Output::Flush(); |
| } |
| } |
| |
| void |
| Lowerer::GenerateCachedTypeWithoutPropertyCheck(IR::Instr *instrInsert, IR::PropertySymOpnd *propertySymOpnd, IR::Opnd *typeOpnd, IR::LabelInstr *labelTypeCheckFailed) |
| { |
| Assert(propertySymOpnd->IsMonoObjTypeSpecCandidate()); |
| Assert(propertySymOpnd->HasInitialType()); |
| |
| JITTypeHolder typeWithoutProperty = propertySymOpnd->GetInitialType(); |
| |
| // We should never add properties to objects of static types. |
| Assert(Js::DynamicType::Is(typeWithoutProperty->GetTypeId())); |
| |
| if (typeOpnd == nullptr) |
| { |
| // No opnd holding the type was passed in, so we have to load the type here. |
| IR::RegOpnd *baseOpnd = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| if (!baseOpnd->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(baseOpnd, instrInsert, labelTypeCheckFailed); |
| } |
| IR::Opnd *opnd = IR::IndirOpnd::New(baseOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| typeOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| InsertMove(typeOpnd, opnd, instrInsert); |
| } |
| |
| Js::JitTypePropertyGuard* typePropertyGuard = CreateTypePropertyGuardForGuardedProperties(typeWithoutProperty, propertySymOpnd); |
| |
| IR::Opnd *expectedTypeOpnd; |
| |
| if (typePropertyGuard) |
| { |
| bool emitDirectCheck = true; |
| |
| Assert(typePropertyGuard != nullptr); |
| Assert(Js::PropertyGuard::GetSizeOfValue() == static_cast<size_t>(TySize[TyMachPtr])); |
| |
| if (this->m_func->IsOOPJIT()) |
| { |
| int typeCheckGuardOffset = NativeCodeData::GetDataTotalOffset(typePropertyGuard); |
| expectedTypeOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(m_func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), typeCheckGuardOffset, TyMachPtr, |
| #if DBG |
| NativeCodeData::GetDataDescription(typePropertyGuard, this->m_func->m_alloc), |
| #endif |
| this->m_func, true); |
| |
| this->addToLiveOnBackEdgeSyms->Set(m_func->GetTopFunc()->GetNativeCodeDataSym()->m_id); |
| } |
| else |
| { |
| expectedTypeOpnd = IR::MemRefOpnd::New((void*)(typePropertyGuard->GetAddressOfValue()), TyMachPtr, this->m_func, IR::AddrOpndKindDynamicGuardValueRef); |
| } |
| |
| emitDirectCheck = false; |
| |
| OUTPUT_VERBOSE_TRACE_FUNC(Js::ObjTypeSpecPhase, this->m_func, _u("Emitted %s type check for type 0x%p.\n"), |
| emitDirectCheck ? _u("direct") : _u("indirect"), typeWithoutProperty->GetAddr()); |
| } |
| else |
| { |
| expectedTypeOpnd = IR::AddrOpnd::New(typeWithoutProperty->GetAddr(), IR::AddrOpndKindDynamicType, m_func, true); |
| } |
| |
| InsertCompareBranch(typeOpnd, expectedTypeOpnd, Js::OpCode::BrNeq_A, labelTypeCheckFailed, instrInsert); |
| |
| // Technically, it should be enough to pin the final type, because it should keep all of its predecessors alive, but |
| // just to be extra cautious, let's pin the initial type as well. |
| PinTypeRef(typeWithoutProperty, typeWithoutProperty.t, instrInsert, propertySymOpnd->m_sym->AsPropertySym()->m_propertyId); |
| } |
| |
| bool |
| Lowerer::GenerateFixedFieldGuardCheck(IR::Instr *insertPointInstr, IR::PropertySymOpnd *propertySymOpnd, IR::LabelInstr *labelBailOut) |
| { |
| return this->GeneratePropertyGuardCheck(insertPointInstr, propertySymOpnd, labelBailOut); |
| } |
| |
| Js::JitTypePropertyGuard* |
| Lowerer::CreateTypePropertyGuardForGuardedProperties(JITTypeHolder type, IR::PropertySymOpnd* propertySymOpnd) |
| { |
| // We should always have a list of guarded properties. |
| Assert(propertySymOpnd->GetGuardedPropOps() != nullptr); |
| |
| Js::JitTypePropertyGuard* guard = nullptr; |
| |
| if (m_func->GetWorkItem()->GetJITTimeInfo()->HasSharedPropertyGuards()) |
| { |
| // Consider (ObjTypeSpec): Because we allocate these guards from the JIT thread we can't share guards for the same type across multiple functions. |
| // This leads to proliferation of property guards on the thread context. The alternative would be to pre-allocate shared (by value) guards |
| // from the thread context during work item creation. We would create too many of them (because some types aren't actually used as guards), |
| // but we could share a guard for a given type between functions. This may ultimately be better. |
| |
| LinkGuardToGuardedProperties(propertySymOpnd->GetGuardedPropOps(), [this, type, &guard](Js::PropertyId propertyId) |
| { |
| if (ShouldDoLazyFixedTypeBailout(this->m_func)) |
| { |
| this->m_func->lazyBailoutProperties.Item(propertyId); |
| } |
| else |
| { |
| if (guard == nullptr) |
| { |
| guard = this->m_func->GetOrCreateSingleTypeGuard(type->GetAddr()); |
| } |
| |
| if (PHASE_TRACE(Js::ObjTypeSpecPhase, this->m_func) || PHASE_TRACE(Js::TracePropertyGuardsPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("ObjTypeSpec: function %s(%s) registered guard 0x%p with value 0x%p for property ID %u.\n"), |
| m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| guard, guard->GetValue(), propertyId); |
| Output::Flush(); |
| } |
| |
| this->m_func->EnsurePropertyGuardsByPropertyId(); |
| this->m_func->LinkGuardToPropertyId(propertyId, guard); |
| } |
| }); |
| } |
| |
| return guard; |
| } |
| |
| Js::JitEquivalentTypeGuard* |
| Lowerer::CreateEquivalentTypeGuardAndLinkToGuardedProperties(IR::PropertySymOpnd* propertySymOpnd) |
| { |
| // We should always have a list of guarded properties. |
| Assert(propertySymOpnd->HasObjTypeSpecFldInfo() && propertySymOpnd->HasEquivalentTypeSet() && propertySymOpnd->GetGuardedPropOps()); |
| |
| Js::JitEquivalentTypeGuard* guard; |
| if (propertySymOpnd->ShouldUsePolyEquivTypeGuard(this->m_func)) |
| { |
| Js::JitPolyEquivalentTypeGuard *polyGuard = this->m_func->CreatePolyEquivalentTypeGuard(propertySymOpnd->GetObjTypeSpecFldId()); |
| |
| // Copy types from the type set to the guard's value locations |
| Js::EquivalentTypeSet* typeSet = propertySymOpnd->GetEquivalentTypeSet(); |
| for (uint16 ti = 0; ti < typeSet->GetCount(); ti++) |
| { |
| intptr_t typeToCache = typeSet->GetType(ti)->GetAddr(); |
| polyGuard->SetPolyValue(typeToCache, polyGuard->GetIndexForValue(typeToCache)); |
| } |
| |
| guard = polyGuard; |
| } |
| else |
| { |
| guard = this->m_func->CreateEquivalentTypeGuard(propertySymOpnd->GetFirstEquivalentType(), propertySymOpnd->GetObjTypeSpecFldId()); |
| } |
| |
| if (m_func->GetWorkItem()->GetJITTimeInfo()->HasSharedPropertyGuards()) |
| { |
| LinkGuardToGuardedProperties(propertySymOpnd->GetGuardedPropOps(), [=](Js::PropertyId propertyId) |
| { |
| if (PHASE_TRACE(Js::ObjTypeSpecPhase, this->m_func) || PHASE_TRACE(Js::TracePropertyGuardsPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("ObjTypeSpec: function %s(%s) registered equivalent type spec guard 0x%p with value 0x%p for property ID %u.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| guard, guard->GetValue(), propertyId); |
| Output::Flush(); |
| } |
| |
| this->m_func->EnsurePropertyGuardsByPropertyId(); |
| this->m_func->LinkGuardToPropertyId(propertyId, guard); |
| }); |
| } |
| |
| Assert(guard->GetCache() != nullptr); |
| Js::EquivalentTypeCache* cache = guard->GetCache(); |
| |
| // TODO (ObjTypeSpec): If we delayed populating the types until encoder, we could bulk allocate all equivalent type caches |
| // in one block from the heap. This would allow us to not allocate them from the native code data allocator and free them |
| // when no longer needed. However, we would need to store the global property operation ID in the guard, so we can look up |
| // the info in the encoder. Perhaps we could overload the cache pointer to be the ID until encoder. |
| |
| // Copy types from the type set to the guard's cache |
| Js::EquivalentTypeSet* typeSet = propertySymOpnd->GetEquivalentTypeSet(); |
| uint16 cachedTypeCount = typeSet->GetCount() < EQUIVALENT_TYPE_CACHE_SIZE ? typeSet->GetCount() : EQUIVALENT_TYPE_CACHE_SIZE; |
| for (uint16 ti = 0; ti < cachedTypeCount; ti++) |
| { |
| cache->types[ti] = (Js::Type*)typeSet->GetType(ti)->GetAddr(); |
| } |
| |
| #ifdef DEBUG |
| bool there_was_a_null_type = false; |
| for (uint16 ti = 0; ti < cachedTypeCount; ti++) |
| { |
| if (cache->types[ti] == nullptr) |
| { |
| there_was_a_null_type = true; |
| } |
| else if (there_was_a_null_type) |
| { |
| AssertMsg(false, "there_was_a_null_type ? something is wrong here."); |
| } |
| } |
| #endif |
| |
| // Populate property ID and slot index arrays on the guard's cache. We iterate over the |
| // bit vector of property operations protected by this guard, but some property operations |
| // may be referring to the same property ID (but not share the same cache). We skip |
| // redundant entries by maintaining a hash set of property IDs we've already encountered. |
| |
| auto propOps = propertySymOpnd->GetGuardedPropOps(); |
| uint propOpCount = propOps->Count(); |
| |
| bool isTypeStatic = Js::StaticType::Is(propertySymOpnd->GetFirstEquivalentType()->GetTypeId()); |
| JsUtil::BaseDictionary<Js::PropertyId, Js::EquivalentPropertyEntry*, JitArenaAllocator> propIds(this->m_alloc, propOpCount); |
| Js::EquivalentPropertyEntry* properties = AnewArray(this->m_alloc, Js::EquivalentPropertyEntry, propOpCount); |
| uint propIdCount = 0; |
| |
| FOREACH_BITSET_IN_SPARSEBV(propOpId, propOps) |
| { |
| ObjTypeSpecFldInfo* propOpInfo = this->m_func->GetGlobalObjTypeSpecFldInfo(propOpId); |
| Js::PropertyId propertyId = propOpInfo->GetPropertyId(); |
| Js::PropertyIndex propOpIndex = Js::Constants::NoSlot; |
| bool hasFixedValue = propOpInfo->HasFixedValue(); |
| if (hasFixedValue) |
| { |
| cache->SetHasFixedValue(); |
| } |
| bool isLoadedFromProto = propOpInfo->IsLoadedFromProto(); |
| if (isLoadedFromProto) |
| { |
| cache->SetIsLoadedFromProto(); |
| } |
| else |
| { |
| propOpIndex = propOpInfo->GetSlotIndex(); |
| } |
| bool propOpUsesAuxSlot = propOpInfo->UsesAuxSlot(); |
| |
| AssertMsg(!isTypeStatic || !propOpInfo->IsBeingStored(), "Why are we storing a field to an object of static type?"); |
| |
| Js::EquivalentPropertyEntry* entry = nullptr; |
| if (propIds.TryGetValue(propertyId, &entry)) |
| { |
| if (propOpIndex == entry->slotIndex && propOpUsesAuxSlot == entry->isAuxSlot) |
| { |
| entry->mustBeWritable |= propOpInfo->IsBeingStored(); |
| } |
| else |
| { |
| // Due to inline cache sharing we have the same property accessed using different caches |
| // with inconsistent info. This means a guaranteed bailout on the equivalent type check. |
| // We'll just let it happen and turn off the optimization for this function. We could avoid |
| // this problem by tracking property information on the value type in glob opt. |
| if (PHASE_TRACE(Js::EquivObjTypeSpecPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("EquivObjTypeSpec: top function %s (%s): duplicate property clash on %d \n"), |
| m_func->GetJITFunctionBody()->GetDisplayName(), m_func->GetDebugNumberSet(debugStringBuffer), propertyId); |
| Output::Flush(); |
| } |
| Assert(propIdCount < propOpCount); |
| __analysis_assume(propIdCount < propOpCount); |
| entry = &properties[propIdCount++]; |
| entry->propertyId = propertyId; |
| entry->slotIndex = propOpIndex; |
| entry->isAuxSlot = propOpUsesAuxSlot; |
| entry->mustBeWritable = propOpInfo->IsBeingStored(); |
| } |
| } |
| else |
| { |
| Assert(propIdCount < propOpCount); |
| __analysis_assume(propIdCount < propOpCount); |
| entry = &properties[propIdCount++]; |
| entry->propertyId = propertyId; |
| entry->slotIndex = propOpIndex; |
| entry->isAuxSlot = propOpUsesAuxSlot; |
| entry->mustBeWritable = propOpInfo->IsBeingStored(); |
| propIds.AddNew(propertyId, entry); |
| } |
| } |
| NEXT_BITSET_IN_SPARSEBV; |
| |
| cache->record.propertyCount = propIdCount; |
| // Js::EquivalentPropertyEntry does not contain pointer, no need to fixup |
| cache->record.properties = NativeCodeDataNewArrayNoFixup(this->m_func->GetNativeCodeDataAllocator(), Js::EquivalentPropertyEntry, propIdCount); |
| |
| memcpy(cache->record.properties, properties, propIdCount * sizeof(Js::EquivalentPropertyEntry)); |
| |
| return guard; |
| } |
| |
| bool |
| Lowerer::LinkCtorCacheToGuardedProperties(JITTimeConstructorCache* ctorCache) |
| { |
| // We do not always have guarded properties. If the constructor is empty and the subsequent code doesn't load or store any of |
| // the constructed object's properties, or if all inline caches are empty then this ctor cache doesn't guard any properties. |
| if (ctorCache->GetGuardedPropOps() == nullptr) |
| { |
| return false; |
| } |
| |
| bool linked = false; |
| |
| if (this->m_func->GetWorkItem()->GetJITTimeInfo()->HasSharedPropertyGuards()) |
| { |
| linked = LinkGuardToGuardedProperties(ctorCache->GetGuardedPropOps(), [=](Js::PropertyId propertyId) |
| { |
| if (PHASE_TRACE(Js::ObjTypeSpecPhase, this->m_func) || PHASE_TRACE(Js::TracePropertyGuardsPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("ObjTypeSpec: function %s(%s) registered ctor cache 0x%p with value 0x%p for property %u.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| ctorCache->GetRuntimeCacheAddr(), ctorCache->GetType()->GetAddr(), propertyId); |
| Output::Flush(); |
| } |
| |
| this->m_func->EnsureCtorCachesByPropertyId(); |
| this->m_func->LinkCtorCacheToPropertyId(propertyId, ctorCache); |
| }); |
| } |
| return linked; |
| } |
| |
| template<typename LinkFunc> |
| bool |
| Lowerer::LinkGuardToGuardedProperties(const BVSparse<JitArenaAllocator>* guardedPropOps, LinkFunc link) |
| { |
| Assert(this->m_func->GetWorkItem()->GetJITTimeInfo()->HasSharedPropertyGuards()); |
| Assert(guardedPropOps != nullptr); |
| bool linked = false; |
| |
| // For every entry in the bit vector, register the guard for the corresponding property ID. |
| FOREACH_BITSET_IN_SPARSEBV(propertyOpId, guardedPropOps) |
| { |
| ObjTypeSpecFldInfo* propertyOpInfo = this->m_func->GetGlobalObjTypeSpecFldInfo(propertyOpId); |
| Js::PropertyId propertyId = propertyOpInfo->GetPropertyId(); |
| |
| // It's okay for an equivalent type check to be registered as a guard against a property becoming read-only. This transpires if, there is |
| // a different monomorphic type check upstream, which guarantees the actual type of the object needed for the hard-coded type transition, |
| // but it is later followed by a sequence of polymorphic inline caches, which do not have that type in the type set. At the beginning of |
| // that sequence we'll emit an equivalent type check to verify that the actual type has relevant properties on appropriate slots. Then in |
| // the dead store pass we'll walk upwards and encounter this check first, thus we'll drop the guarded properties accumulated thus far |
| // (including the one being added) on that check. |
| // AssertMsg(!propertyOpInfo->IsBeingAdded() || !isEquivalentTypeGuard, "Why do we have an equivalent type check protecting a property add?"); |
| |
| if (propertyOpInfo->IsBeingAdded() || propertyOpInfo->IsLoadedFromProto() || propertyOpInfo->HasFixedValue()) |
| { |
| // Equivalent object type spec only supports fixed fields on prototypes. This is to simplify the slow type equivalence check. |
| // See JavascriptOperators::CheckIfTypeIsEquivalent. |
| Assert(!propertyOpInfo->IsPoly() || (!propertyOpInfo->HasFixedValue() || propertyOpInfo->IsLoadedFromProto() || propertyOpInfo->UsesAccessor())); |
| |
| if (this->m_func->GetWorkItem()->GetJITTimeInfo()->HasSharedPropertyGuard(propertyId)) |
| { |
| link(propertyId); |
| linked = true; |
| } |
| else |
| { |
| AssertMsg(false, "Did we fail to create a shared property guard for a guarded property?"); |
| } |
| } |
| } |
| NEXT_BITSET_IN_SPARSEBV; |
| |
| return linked; |
| } |
| |
| bool |
| Lowerer::GeneratePropertyGuardCheck(IR::Instr *insertPointInstr, IR::PropertySymOpnd *propertySymOpnd, IR::LabelInstr *labelBailOut) |
| { |
| intptr_t guard = propertySymOpnd->GetPropertyGuardValueAddr(); |
| Assert(guard != 0); |
| |
| if (ShouldDoLazyFixedDataBailout(this->m_func)) |
| { |
| this->m_func->lazyBailoutProperties.Item(propertySymOpnd->GetPropertyId()); |
| return false; |
| } |
| else |
| { |
| Assert(Js::PropertyGuard::GetSizeOfValue() == static_cast<size_t>(TySize[TyMachPtr])); |
| IR::AddrOpnd* zeroOpnd = IR::AddrOpnd::NewNull(this->m_func); |
| IR::MemRefOpnd* guardOpnd = IR::MemRefOpnd::New(guard, TyMachPtr, this->m_func, IR::AddrOpndKindDynamicGuardValueRef); |
| IR::BranchInstr *branchInstr = InsertCompareBranch(guardOpnd, zeroOpnd, Js::OpCode::BrEq_A, labelBailOut, insertPointInstr); |
| IR::RegOpnd *objPtrReg = IR::RegOpnd::New(propertySymOpnd->GetObjectSym(), TyMachPtr, m_func); |
| InsertObjectPoison(objPtrReg, branchInstr, insertPointInstr, false); |
| return true; |
| } |
| } |
| |
| IR::Instr* |
| Lowerer::GeneratePropertyGuardCheckBailoutAndLoadType(IR::Instr *insertInstr) |
| { |
| IR::Instr* instrPrev = insertInstr->m_prev; |
| |
| IR::Opnd* numberTypeOpnd = IR::AddrOpnd::New(insertInstr->m_func->GetScriptContextInfo()->GetNumberTypeStaticAddr(), IR::AddrOpndKindDynamicType, insertInstr->m_func); |
| IR::PropertySymOpnd* propertySymOpnd = insertInstr->GetSrc1()->AsPropertySymOpnd(); |
| |
| IR::LabelInstr* labelBailout = IR::LabelInstr::New(Js::OpCode::Label, insertInstr->m_func, true); |
| IR::LabelInstr* labelContinue = IR::LabelInstr::New(Js::OpCode::Label, insertInstr->m_func); |
| IR::LabelInstr* loadNumberTypeLabel = IR::LabelInstr::New(Js::OpCode::Label, insertInstr->m_func, true); |
| |
| GeneratePropertyGuardCheck(insertInstr, propertySymOpnd, labelBailout); |
| |
| IR::RegOpnd *baseOpnd = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| |
| GenerateObjectTestAndTypeLoad(insertInstr, baseOpnd, insertInstr->GetDst()->AsRegOpnd(), loadNumberTypeLabel); |
| |
| insertInstr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelContinue, this->m_func)); |
| |
| insertInstr->InsertBefore(loadNumberTypeLabel); |
| this->InsertMove(insertInstr->GetDst(), numberTypeOpnd, insertInstr); |
| insertInstr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelContinue, this->m_func)); |
| |
| insertInstr->InsertBefore(labelBailout); |
| insertInstr->InsertAfter(labelContinue); |
| |
| insertInstr->FreeSrc1(); |
| insertInstr->m_opcode = Js::OpCode::BailOut; |
| this->GenerateBailOut(insertInstr); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::GenerateAdjustSlots(IR::Instr *instrInsert, IR::PropertySymOpnd *propertySymOpnd, JITTypeHolder initialType, JITTypeHolder finalType) |
| { |
| IR::RegOpnd *baseOpnd = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| bool adjusted = this->GenerateAdjustBaseSlots(instrInsert, baseOpnd, initialType, finalType); |
| if (!adjusted) |
| { |
| baseOpnd->Free(m_func); |
| } |
| } |
| |
| bool |
| Lowerer::GenerateAdjustBaseSlots(IR::Instr *instrInsert, IR::RegOpnd *baseOpnd, JITTypeHolder initialType, JITTypeHolder finalType) |
| { |
| // Possibly allocate new slot capacity to accommodate a type transition. |
| AssertMsg(JITTypeHandler::IsTypeHandlerCompatibleForObjectHeaderInlining(initialType->GetTypeHandler(), finalType->GetTypeHandler()), |
| "Incompatible typeHandler transition?"); |
| |
| int oldCount = 0; |
| int newCount = 0; |
| Js::PropertyIndex inlineSlotCapacity = 0; |
| Js::PropertyIndex newInlineSlotCapacity = 0; |
| bool needSlotAdjustment = |
| JITTypeHandler::NeedSlotAdjustment(initialType->GetTypeHandler(), finalType->GetTypeHandler(), &oldCount, &newCount, &inlineSlotCapacity, &newInlineSlotCapacity); |
| |
| if (!needSlotAdjustment) |
| { |
| return false; |
| } |
| |
| // Call AdjustSlots using the new counts. Because AdjustSlots uses the "no dispose" flavor of alloc, |
| // no implicit calls are possible, and we don't need an implicit call check and bailout. |
| |
| // CALL AdjustSlots, instance, newInlineSlotCapacity, newAuxSlotCapacity |
| |
| //3rd Param |
| Assert(newCount > newInlineSlotCapacity); |
| const int newAuxSlotCapacity = newCount - newInlineSlotCapacity; |
| m_lowererMD.LoadHelperArgument(instrInsert, IR::IntConstOpnd::New(newAuxSlotCapacity, TyInt32, this->m_func)); |
| |
| //2nd Param |
| m_lowererMD.LoadHelperArgument(instrInsert, IR::IntConstOpnd::New(newInlineSlotCapacity, TyUint16, this->m_func)); |
| |
| //1st Param (instance) |
| m_lowererMD.LoadHelperArgument(instrInsert, baseOpnd); |
| |
| //CALL HelperAdjustSlots |
| IR::Opnd *opnd = IR::HelperCallOpnd::New(IR::HelperAdjustSlots, this->m_func); |
| IR::Instr *instr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| instr->SetSrc1(opnd); |
| instrInsert->InsertBefore(instr); |
| m_lowererMD.LowerCall(instr, 0); |
| |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateFieldStoreWithTypeChange(IR::Instr * instrStFld, IR::PropertySymOpnd *propertySymOpnd, JITTypeHolder initialType, JITTypeHolder finalType) |
| { |
| // Adjust instance slots, if necessary. |
| this->GenerateAdjustSlots(instrStFld, propertySymOpnd, initialType, finalType); |
| |
| // We should never add properties to objects of static types. |
| Assert(Js::DynamicType::Is(finalType->GetTypeId())); |
| |
| // Let's pin the final type to be sure its alive when we try to do the type transition. |
| PinTypeRef(finalType, finalType.t, instrStFld, propertySymOpnd->m_sym->AsPropertySym()->m_propertyId); |
| IR::Opnd *finalTypeOpnd = IR::AddrOpnd::New(finalType->GetAddr(), IR::AddrOpndKindDynamicType, instrStFld->m_func, true); |
| |
| // Set the new type. |
| IR::RegOpnd *baseOpnd = propertySymOpnd->CreatePropertyOwnerOpnd(instrStFld->m_func); |
| IR::Opnd *opnd = IR::IndirOpnd::New(baseOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, instrStFld->m_func); |
| this->InsertMove(opnd, finalTypeOpnd, instrStFld); |
| |
| // Now do the store. |
| GenerateDirectFieldStore(instrStFld, propertySymOpnd); |
| |
| bool isPrototypeTypeHandler = initialType->GetTypeHandler()->IsPrototype(); |
| if (isPrototypeTypeHandler) |
| { |
| LoadScriptContext(instrStFld); |
| m_lowererMD.LoadHelperArgument(instrStFld, IR::IntConstOpnd::New(propertySymOpnd->GetPropertyId(), TyInt32, m_func, true)); |
| IR::Instr * invalidateCallInstr = IR::Instr::New(Js::OpCode::Call, m_func); |
| instrStFld->InsertBefore(invalidateCallInstr); |
| m_lowererMD.ChangeToHelperCall(invalidateCallInstr, IR::HelperInvalidateProtoCaches); |
| } |
| } |
| |
| bool |
| Lowerer::GenerateStFldWithCachedFinalType(IR::Instr * instrStFld, IR::PropertySymOpnd *propertySymOpnd) |
| { |
| // This function tries to treat a sequence of add-property stores as a single type transition. |
| Assert(propertySymOpnd == instrStFld->GetDst()->AsPropertySymOpnd()); |
| Assert(propertySymOpnd->IsMonoObjTypeSpecCandidate()); |
| Assert(propertySymOpnd->HasFinalType()); |
| Assert(propertySymOpnd->HasInitialType()); |
| |
| IR::Instr *instr; |
| IR::LabelInstr *labelBailOut = nullptr; |
| |
| AssertMsg(!propertySymOpnd->IsTypeChecked(), "Why are we doing a type transition when we have the type we want?"); |
| |
| // If the initial type must be checked here, do it. |
| Assert(instrStFld->HasBailOutInfo()); |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| GenerateCachedTypeWithoutPropertyCheck(instrStFld, propertySymOpnd, nullptr/*typeOpnd*/, labelBailOut); |
| |
| // Do the type transition. |
| GenerateFieldStoreWithTypeChange(instrStFld, propertySymOpnd, propertySymOpnd->GetInitialType(), propertySymOpnd->GetFinalType()); |
| |
| instrStFld->FreeSrc1(); |
| instrStFld->FreeDst(); |
| |
| // Insert the bailout and let the main path branch around it. |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func); |
| instrStFld->InsertBefore(instr); |
| |
| if (instrStFld->HasBailOutInfo()) |
| { |
| Assert(labelBailOut != nullptr); |
| instrStFld->InsertBefore(labelBailOut); |
| instrStFld->InsertAfter(labelDone); |
| |
| instrStFld->m_opcode = Js::OpCode::BailOut; |
| this->GenerateBailOut(instrStFld); |
| } |
| else |
| { |
| instrStFld->InsertAfter(labelDone); |
| instrStFld->Remove(); |
| } |
| |
| return true; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerScopedStFld |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerScopedStFld(IR::Instr * stFldInstr, IR::JnHelperMethod helperMethod, bool withInlineCache, |
| bool withPropertyOperationFlags, Js::PropertyOperationFlags flags) |
| { |
| IR::Instr *instrPrev = stFldInstr->m_prev; |
| |
| if (withPropertyOperationFlags) |
| { |
| m_lowererMD.LoadHelperArgument(stFldInstr, |
| IR::IntConstOpnd::New(static_cast<IntConstType>(flags), IRType::TyInt32, m_func, true)); |
| } |
| |
| if(!withInlineCache) |
| { |
| LoadScriptContext(stFldInstr); |
| } |
| |
| // Pass the default instance |
| IR::Opnd *src = stFldInstr->UnlinkSrc2(); |
| m_lowererMD.LoadHelperArgument(stFldInstr, src); |
| |
| // Pass the value to store |
| src = stFldInstr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(stFldInstr, src); |
| |
| // Pass the property sym to store to |
| IR::Opnd *dst = stFldInstr->UnlinkDst(); |
| AssertMsg(dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected property sym as dst of field store"); |
| |
| this->LoadPropertySymAsArgument(stFldInstr, dst); |
| |
| if (withInlineCache) |
| { |
| AssertMsg(dst->AsSymOpnd()->IsPropertySymOpnd(), "Need property sym operand to find the inline cache"); |
| |
| m_lowererMD.LoadHelperArgument( |
| stFldInstr, |
| IR::Opnd::CreateInlineCacheIndexOpnd(dst->AsPropertySymOpnd()->m_inlineCacheIndex, m_func)); |
| |
| // Not using the polymorphic inline cache because the fast path only uses the monomorphic inline cache |
| this->m_lowererMD.LoadHelperArgument(stFldInstr, this->LoadRuntimeInlineCacheOpnd(stFldInstr, dst->AsPropertySymOpnd())); |
| |
| m_lowererMD.LoadHelperArgument(stFldInstr, LoadFunctionBodyOpnd(stFldInstr)); |
| } |
| |
| m_lowererMD.ChangeToHelperCall(stFldInstr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerLoadVar |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerLoadVar(IR::Instr *instr, IR::Opnd *opnd) |
| { |
| instr->SetSrc1(opnd); |
| return m_lowererMD.ChangeToAssign(instr); |
| } |
| |
| IR::Instr * |
| Lowerer::LoadHelperTemp(IR::Instr * instr, IR::Instr * instrInsert) |
| { |
| IR::Opnd *tempOpnd; |
| IR::Opnd *dst = instr->GetDst(); |
| AssertMsg(dst != nullptr, "Always expect a dst for these."); |
| AssertMsg(instr->dstIsTempNumber, "Should only be loading temps here"); |
| |
| Assert(dst->IsRegOpnd()); |
| StackSym * tempNumberSym = this->GetTempNumberSym(dst, instr->dstIsTempNumberTransferred); |
| |
| IR::Instr *load = InsertLoadStackAddress(tempNumberSym, instrInsert); |
| tempOpnd = load->GetDst(); |
| m_lowererMD.LoadHelperArgument(instrInsert, tempOpnd); |
| return load; |
| } |
| |
| void |
| Lowerer::LoadArgumentCount(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->GetDst()); |
| Assert(!instr->GetSrc1()); |
| Assert(!instr->GetSrc2()); |
| |
| if(instr->m_func->IsInlinee()) |
| { |
| // Argument count including 'this' |
| instr->SetSrc1(IR::IntConstOpnd::New(instr->m_func->actualCount, TyUint32, instr->m_func, true)); |
| LowererMD::ChangeToAssign(instr); |
| } |
| else if (instr->m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| IR::SymOpnd* symOpnd = LoadCallInfo(instr); |
| instr->SetSrc1(symOpnd); |
| LowererMD::ChangeToAssign(instr); |
| } |
| else |
| { |
| m_lowererMD.LoadArgumentCount(instr); |
| } |
| } |
| |
| void |
| Lowerer::LoadStackArgPtr(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->GetDst()); |
| Assert(!instr->GetSrc1()); |
| Assert(!instr->GetSrc2()); |
| |
| if(instr->m_func->IsInlinee()) |
| { |
| // Address of argument after 'this' |
| const auto firstRealArgStackSym = instr->m_func->GetInlineeArgvSlotOpnd()->m_sym->AsStackSym(); |
| this->m_func->SetArgOffset(firstRealArgStackSym, firstRealArgStackSym->m_offset + MachPtr); |
| instr->SetSrc1(IR::SymOpnd::New(firstRealArgStackSym, TyMachPtr, instr->m_func)); |
| ChangeToLea(instr); |
| } |
| else |
| { |
| m_lowererMD.LoadStackArgPtr(instr); |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::InsertLoadStackAddress(StackSym *sym, IR::Instr * instrInsert, IR::RegOpnd *optionalDstOpnd /* = nullptr */) |
| { |
| IR::RegOpnd * regDst = optionalDstOpnd != nullptr ? optionalDstOpnd : IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::SymOpnd * symSrc = IR::SymOpnd::New(sym, TyMachPtr, this->m_func); |
| return InsertLea(regDst, symSrc, instrInsert); |
| } |
| |
| void |
| Lowerer::LoadArgumentsFromFrame(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->GetDst()); |
| Assert(!instr->GetSrc1()); |
| Assert(!instr->GetSrc2()); |
| |
| if(instr->m_func->IsInlinee()) |
| { |
| // Use the inline object meta arg slot for the arguments object |
| instr->SetSrc1(instr->m_func->GetInlineeArgumentsObjectSlotOpnd()); |
| LowererMD::ChangeToAssign(instr); |
| } |
| else |
| { |
| m_lowererMD.LoadArgumentsFromFrame(instr); |
| } |
| } |
| |
| #ifdef ENABLE_WASM |
| IR::Instr * |
| Lowerer::LowerCheckWasmSignature(IR::Instr * instr) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc2()->IsIntConstOpnd()); |
| |
| int sigId = instr->UnlinkSrc2()->AsIntConstOpnd()->AsInt32(); |
| |
| IR::Instr *instrPrev = instr->m_prev; |
| |
| IR::IndirOpnd * actualSig = IR::IndirOpnd::New(instr->UnlinkSrc1()->AsRegOpnd(), Js::WasmScriptFunction::GetOffsetOfSignature(), TyMachReg, m_func); |
| |
| Wasm::WasmSignature * expectedSig = m_func->GetJITFunctionBody()->GetAsmJsInfo()->GetWasmSignature(sigId); |
| if (expectedSig->GetShortSig() == Js::Constants::InvalidSignature) |
| { |
| intptr_t sigAddr = m_func->GetJITFunctionBody()->GetAsmJsInfo()->GetWasmSignatureAddr(sigId); |
| IR::AddrOpnd * expectedOpnd = IR::AddrOpnd::New(sigAddr, IR::AddrOpndKindConstantAddress, m_func); |
| m_lowererMD.LoadHelperArgument(instr, expectedOpnd); |
| m_lowererMD.LoadHelperArgument(instr, actualSig); |
| |
| LoadScriptContext(instr); |
| |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperOp_CheckWasmSignature); |
| } |
| else |
| { |
| IR::LabelInstr * trapLabel = InsertLabel(true, instr); |
| IR::LabelInstr * labelFallThrough = InsertLabel(false, instr->m_next); |
| IR::RegOpnd * actualRegOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| InsertMove(actualRegOpnd, actualSig, trapLabel); |
| |
| IR::IndirOpnd * shortSigIndir = IR::IndirOpnd::New(actualRegOpnd, Wasm::WasmSignature::GetOffsetOfShortSig(), TyMachReg, m_func); |
| InsertCompareBranch(shortSigIndir, IR::IntConstOpnd::New(expectedSig->GetShortSig(), TyMachReg, m_func), Js::OpCode::BrNeq_A, trapLabel, trapLabel); |
| |
| InsertBranch(Js::OpCode::Br, labelFallThrough, trapLabel); |
| |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_SignatureMismatch), TyInt32, m_func), instr); |
| |
| instr->Remove(); |
| |
| } |
| |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdWasmFunc(IR::Instr* instr) |
| { |
| IR::Instr * prev = instr->m_prev; |
| |
| IR::RegOpnd * tableReg = instr->UnlinkSrc1()->AsRegOpnd(); |
| |
| IR::Opnd * indexOpnd = instr->UnlinkSrc2(); |
| IR::Opnd * dst = instr->UnlinkDst(); |
| |
| IR::IndirOpnd * lengthOpnd = IR::IndirOpnd::New(tableReg, Js::WebAssemblyTable::GetOffsetOfCurrentLength(), TyUint32, m_func); |
| IR::IndirOpnd * valuesIndirOpnd = IR::IndirOpnd::New(tableReg, Js::WebAssemblyTable::GetOffsetOfValues(), TyMachPtr, m_func); |
| IR::RegOpnd * valuesRegOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| |
| byte scale = m_lowererMD.GetDefaultIndirScale(); |
| IR::IndirOpnd * funcIndirOpnd; |
| if (indexOpnd->IsIntConstOpnd()) |
| { |
| funcIndirOpnd = IR::IndirOpnd::New(valuesRegOpnd, indexOpnd->AsIntConstOpnd()->AsInt32() << scale, TyMachPtr, m_func); |
| } |
| else |
| { |
| Assert(indexOpnd->IsRegOpnd()); |
| funcIndirOpnd = IR::IndirOpnd::New(valuesRegOpnd, indexOpnd->AsRegOpnd(), TyMachPtr, m_func); |
| funcIndirOpnd->SetScale(scale); |
| } |
| |
| IR::LabelInstr * trapOutOfBoundsLabel = InsertLabel(true, instr); |
| IR::LabelInstr * trapLabel = InsertLabel(true, trapOutOfBoundsLabel); |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| InsertCompareBranch(indexOpnd, lengthOpnd, Js::OpCode::BrGe_A, true, trapOutOfBoundsLabel, trapLabel); |
| InsertMove(valuesRegOpnd, valuesIndirOpnd, trapLabel); |
| |
| InsertMove(dst, funcIndirOpnd, trapLabel); |
| |
| InsertCompareBranch(dst, IR::IntConstOpnd::New(0, TyMachPtr, m_func), Js::OpCode::BrEq_A, trapLabel, trapLabel); |
| InsertBranch(Js::OpCode::Br, doneLabel, trapLabel); |
| |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_NeedWebAssemblyFunc), TyInt32, m_func), trapOutOfBoundsLabel); |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_TableIndexOutOfRange), TyInt32, m_func), instr); |
| |
| instr->Remove(); |
| |
| return prev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerGrowWasmMemory(IR::Instr* instr) |
| { |
| IR::Instr * instrPrev = m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc2()); |
| |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc1()); |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperOp_GrowWasmMemory); |
| |
| return instrPrev; |
| } |
| #endif |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelper(IR::Instr *instr, IR::JnHelperMethod helperMethod, IR::Opnd* opndBailoutArg) |
| { |
| IR::Instr *instrPrev; |
| |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, src1); |
| |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod, nullptr, opndBailoutArg); |
| |
| return instrPrev; |
| } |
| |
| // helper takes memory context as second argument |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMem(IR::Instr *instr, IR::JnHelperMethod helperMethod, IR::Opnd* opndBailoutArg) |
| { |
| IR::Instr *instrPrev; |
| |
| instrPrev = LoadScriptContext(instr); |
| |
| return this->LowerUnaryHelper(instr, helperMethod, opndBailoutArg); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMemWithFunctionInfo(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| m_lowererMD.LoadHelperArgument(instr, this->LoadFunctionInfoOpnd(instr)); |
| return this->LowerUnaryHelperMem(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMemWithFuncBody(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| m_lowererMD.LoadHelperArgument(instr, this->LoadFunctionBodyOpnd(instr)); |
| return this->LowerUnaryHelperMem(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBinaryHelperMemWithFuncBody(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3, "Expected a binary instruction..."); |
| |
| m_lowererMD.LoadHelperArgument(instr, this->LoadFunctionBodyOpnd(instr)); |
| return this->LowerBinaryHelperMem(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMemWithTemp(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2, "Expected a unary instruction..."); |
| |
| IR::Instr * instrFirst; |
| IR::Opnd * tempOpnd; |
| if (instr->dstIsTempNumber) |
| { |
| instrFirst = this->LoadHelperTemp(instr, instr); |
| } |
| else |
| { |
| tempOpnd = IR::IntConstOpnd::New(0, TyInt32, this->m_func); |
| instrFirst = m_lowererMD.LoadHelperArgument(instr, tempOpnd); |
| } |
| |
| this->LowerUnaryHelperMem(instr, helperMethod); |
| |
| return instrFirst; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMemWithTemp2(IR::Instr *instr, IR::JnHelperMethod helperMethod, IR::JnHelperMethod helperMethodWithTemp) |
| { |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2, "Expected a unary instruction..."); |
| |
| if (instr->dstIsTempNumber) |
| { |
| IR::Instr * instrFirst = this->LoadHelperTemp(instr, instr); |
| this->LowerUnaryHelperMem(instr, helperMethodWithTemp); |
| return instrFirst; |
| } |
| |
| return this->LowerUnaryHelperMem(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerUnaryHelperMemWithBoolReference(IR::Instr *instr, IR::JnHelperMethod helperMethod, bool useBoolForBailout) |
| { |
| if (!this->m_func->tempSymBool) |
| { |
| this->m_func->tempSymBool = StackSym::New(TyUint8, this->m_func); |
| this->m_func->StackAllocate(this->m_func->tempSymBool, TySize[TyUint8]); |
| } |
| IR::SymOpnd * boolOpnd = IR::SymOpnd::New(this->m_func->tempSymBool, TyUint8, this->m_func); |
| IR::RegOpnd * boolRefOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| InsertLea(boolRefOpnd, boolOpnd, instr); |
| |
| m_lowererMD.LoadHelperArgument(instr, boolRefOpnd); |
| |
| return this->LowerUnaryHelperMem(instr, helperMethod, useBoolForBailout ? boolOpnd : nullptr); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerInitCachedScope(IR::Instr* instr) |
| { |
| instr->m_opcode = Js::OpCode::CallHelper; |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(IR::HelperOP_InitCachedScope, this->m_func); |
| IR::Opnd * src1 = instr->UnlinkSrc1(); |
| instr->SetSrc1(helperOpnd); |
| instr->SetSrc2(src1); |
| return instr; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerBinaryHelper |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerBinaryHelper(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| // The only case where this would still be null when we return is when |
| // helperMethod == HelperOP_CmSrEq_EmptyString; in which case we ignore |
| // instrPrev. |
| IR::Instr *instrPrev = nullptr; |
| |
| AssertMsg((Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg1Unsigned1) || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2Int1 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::ElementU || |
| instr->m_opcode == Js::OpCode::InvalCachedScope, "Expected a binary instruction..."); |
| |
| IR::Opnd *src2 = instr->UnlinkSrc2(); |
| if (helperMethod != IR::HelperOP_CmSrEq_EmptyString) |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, src2); |
| |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, src1); |
| |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| // helper takes memory context as third argument |
| IR::Instr * |
| Lowerer::LowerBinaryHelperMem(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr *instrPrev; |
| |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg2Int1 || |
| Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg1Unsigned1, "Expected a binary instruction..."); |
| |
| instrPrev = LoadScriptContext(instr); |
| |
| return this->LowerBinaryHelper(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBinaryHelperMemWithTemp(IR::Instr *instr, IR::JnHelperMethod helperMethod) |
| { |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3, "Expected a binary instruction..."); |
| |
| IR::Instr * instrFirst; |
| IR::Opnd * tempOpnd; |
| |
| if (instr->dstIsTempNumber) |
| { |
| instrFirst = this->LoadHelperTemp(instr, instr); |
| } |
| else |
| { |
| tempOpnd = IR::IntConstOpnd::New(0, TyInt32, this->m_func); |
| instrFirst = m_lowererMD.LoadHelperArgument(instr, tempOpnd); |
| } |
| |
| this->LowerBinaryHelperMem(instr, helperMethod); |
| |
| return instrFirst; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBinaryHelperMemWithTemp2( |
| IR::Instr *instr, |
| IR::JnHelperMethod helperMethod, |
| IR::JnHelperMethod helperMethodWithTemp |
| ) |
| { |
| AssertMsg(Js::OpCodeUtil::GetOpCodeLayout(instr->m_opcode) == Js::OpLayoutType::Reg3, "Expected a binary instruction..."); |
| |
| if (instr->dstIsTempNumber && instr->GetDst() && instr->GetDst()->GetValueType().HasBeenNumber()) |
| { |
| IR::Instr * instrFirst = this->LoadHelperTemp(instr, instr); |
| this->LowerBinaryHelperMem(instr, helperMethodWithTemp); |
| return instrFirst; |
| } |
| |
| return this->LowerBinaryHelperMem(instr, helperMethod); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerAddLeftDeadForString(IR::Instr *instr) |
| { |
| IR::Opnd * opndLeft; |
| IR::Opnd * opndRight; |
| |
| opndLeft = instr->GetSrc1(); |
| opndRight = instr->GetSrc2(); |
| |
| Assert(opndLeft && opndRight); |
| |
| bool generateFastPath = this->m_func->DoFastPaths(); |
| if (!generateFastPath |
| || !opndLeft->IsRegOpnd() |
| || !opndRight->IsRegOpnd() |
| || !instr->GetDst()->IsRegOpnd() |
| || !opndLeft->GetValueType().IsLikelyString() |
| || !opndRight->GetValueType().IsLikelyString() |
| || !opndLeft->IsEqual(instr->GetDst()->AsRegOpnd()) |
| || opndLeft->IsEqual(opndRight)) |
| { |
| return this->LowerBinaryHelperMemWithTemp(instr, IR::HelperOp_AddLeftDead); |
| } |
| |
| IR::LabelInstr * labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| IR::LabelInstr * labelFallThrough = instr->GetOrCreateContinueLabel(false); |
| IR::LabelInstr *insertBeforeInstr = labelHelper; |
| |
| instr->InsertBefore(labelHelper); |
| |
| if (!opndLeft->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(opndLeft->AsRegOpnd(), insertBeforeInstr, labelHelper); |
| } |
| |
| IR::BranchInstr* branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(opndLeft->AsRegOpnd(), 0, TyMachPtr, m_func), |
| this->LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableCompoundString), |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| insertBeforeInstr); |
| |
| InsertObjectPoison(opndLeft->AsRegOpnd(), branchInstr, insertBeforeInstr, false); |
| |
| GenerateStringTest(opndRight->AsRegOpnd(), insertBeforeInstr, labelHelper); |
| |
| // left->m_charLength <= JavascriptArray::MaxCharLength |
| IR::IndirOpnd *indirLeftCharLengthOpnd = IR::IndirOpnd::New(opndLeft->AsRegOpnd(), Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, m_func); |
| IR::RegOpnd *regLeftCharLengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(regLeftCharLengthOpnd, indirLeftCharLengthOpnd, insertBeforeInstr); |
| InsertCompareBranch( |
| regLeftCharLengthOpnd, |
| IR::IntConstOpnd::New(Js::JavascriptString::MaxCharLength, TyUint32, m_func), |
| Js::OpCode::BrGe_A, |
| labelHelper, |
| insertBeforeInstr); |
| |
| // left->m_pszValue == NULL (!left->IsFinalized()) |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndLeft->AsRegOpnd(), offsetof(Js::JavascriptString, m_pszValue), TyMachPtr, this->m_func), |
| IR::AddrOpnd::NewNull(m_func), |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| insertBeforeInstr); |
| |
| // right->m_pszValue != NULL (right->IsFinalized()) |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndRight->AsRegOpnd(), offsetof(Js::JavascriptString, m_pszValue), TyMachPtr, this->m_func), |
| IR::AddrOpnd::NewNull(m_func), |
| Js::OpCode::BrEq_A, |
| labelHelper, |
| insertBeforeInstr); |
| |
| // if ownsLastBlock != 0 |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndLeft->AsRegOpnd(), (int32)Js::CompoundString::GetOffsetOfOwnsLastBlock(), TyUint8, m_func), |
| IR::IntConstOpnd::New(0, TyUint8, m_func), |
| Js::OpCode::BrEq_A, |
| labelHelper, |
| insertBeforeInstr); |
| |
| // if right->m_charLength == 1 |
| InsertCompareBranch(IR::IndirOpnd::New(opndRight->AsRegOpnd(), offsetof(Js::JavascriptString, m_charLength), TyUint32, m_func), |
| IR::IntConstOpnd::New(1, TyUint32, m_func), |
| Js::OpCode::BrNeq_A, labelHelper, insertBeforeInstr); |
| |
| // if left->m_directCharLength == -1 |
| InsertCompareBranch(IR::IndirOpnd::New(opndLeft->AsRegOpnd(), (int32)Js::CompoundString::GetOffsetOfDirectCharLength(), TyUint32, m_func), |
| IR::IntConstOpnd::New(UINT32_MAX, TyUint32, m_func), |
| Js::OpCode::BrNeq_A, labelHelper, insertBeforeInstr); |
| |
| // if lastBlockInfo.charLength < lastBlockInfo.charCapacity |
| IR::IndirOpnd *indirCharLength = IR::IndirOpnd::New(opndLeft->AsRegOpnd(), (int32)Js::CompoundString::GetOffsetOfLastBlockInfo() + (int32)Js::CompoundString::GetOffsetOfLastBlockInfoCharLength(), TyUint32, m_func); |
| IR::RegOpnd *charLengthOpnd = IR::RegOpnd::New(TyUint32, this->m_func); |
| InsertMove(charLengthOpnd, indirCharLength, insertBeforeInstr); |
| InsertCompareBranch(charLengthOpnd, IR::IndirOpnd::New(opndLeft->AsRegOpnd(), (int32)Js::CompoundString::GetOffsetOfLastBlockInfo() + (int32)Js::CompoundString::GetOffsetOfLastBlockInfoCharCapacity(), TyUint32, m_func), Js::OpCode::BrGe_A, labelHelper, insertBeforeInstr); |
| |
| // load c = right->m_pszValue[0] |
| IR::RegOpnd *pszValue0Opnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| IR::IndirOpnd *indirRightPszOpnd = IR::IndirOpnd::New(opndRight->AsRegOpnd(), offsetof(Js::JavascriptString, m_pszValue), TyMachPtr, this->m_func); |
| InsertMove(pszValue0Opnd, indirRightPszOpnd, insertBeforeInstr); |
| IR::RegOpnd *charResultOpnd = IR::RegOpnd::New(TyUint16, this->m_func); |
| InsertMove(charResultOpnd, IR::IndirOpnd::New(pszValue0Opnd, 0, TyUint16, this->m_func), insertBeforeInstr); |
| |
| // lastBlockInfo.buffer[blockCharLength] = c; |
| IR::RegOpnd *baseOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertMove(baseOpnd, IR::IndirOpnd::New(opndLeft->AsRegOpnd(), (int32)Js::CompoundString::GetOffsetOfLastBlockInfo() + (int32)Js::CompoundString::GetOffsetOfLastBlockInfoBuffer(), TyMachPtr, m_func), insertBeforeInstr); |
| IR::IndirOpnd *indirBufferToStore = IR::IndirOpnd::New(baseOpnd, charLengthOpnd, (byte)Math::Log2(sizeof(char16)), TyUint16, m_func); |
| InsertMove(indirBufferToStore, charResultOpnd, insertBeforeInstr); |
| |
| // left->m_charLength++ |
| InsertAdd(false, indirLeftCharLengthOpnd, regLeftCharLengthOpnd, IR::IntConstOpnd::New(1, TyUint32, this->m_func), insertBeforeInstr); |
| |
| // lastBlockInfo.charLength++ |
| InsertAdd(false, indirCharLength, indirCharLength, IR::IntConstOpnd::New(1, TyUint32, this->m_func), insertBeforeInstr); |
| |
| InsertBranch(Js::OpCode::Br, labelFallThrough, insertBeforeInstr); |
| |
| return this->LowerBinaryHelperMemWithTemp(instr, IR::HelperOp_AddLeftDead); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBinaryHelperMemWithTemp3(IR::Instr *instr, IR::JnHelperMethod helperMethod, IR::JnHelperMethod helperMethodWithTemp, IR::JnHelperMethod helperMethodLeftDead) |
| { |
| IR::Opnd *src1 = instr->GetSrc1(); |
| |
| if (src1->IsRegOpnd() && src1->AsRegOpnd()->m_isTempLastUse && !src1->GetValueType().IsNotString()) |
| { |
| Assert(helperMethodLeftDead == IR::HelperOp_AddLeftDead); |
| return LowerAddLeftDeadForString(instr); |
| } |
| else |
| { |
| return this->LowerBinaryHelperMemWithTemp2(instr, helperMethod, helperMethodWithTemp); |
| } |
| } |
| |
| StackSym * |
| Lowerer::GetTempNumberSym(IR::Opnd * opnd, bool isTempTransferred) |
| { |
| AssertMsg(opnd->IsRegOpnd(), "Expected regOpnd"); |
| |
| if (isTempTransferred) |
| { |
| StackSym * tempNumberSym = StackSym::New(TyMisc, m_func); |
| this->m_func->StackAllocate(tempNumberSym, sizeof(Js::JavascriptNumber)); |
| return tempNumberSym; |
| } |
| StackSym * stackSym = opnd->AsRegOpnd()->m_sym; |
| StackSym * tempNumberSym = stackSym->m_tempNumberSym; |
| |
| if (tempNumberSym == nullptr) |
| { |
| tempNumberSym = StackSym::New(TyMisc, m_func); |
| this->m_func->StackAllocate(tempNumberSym, sizeof(Js::JavascriptNumber)); |
| stackSym->m_tempNumberSym = tempNumberSym; |
| } |
| return tempNumberSym; |
| } |
| |
| void Lowerer::LowerProfiledLdElemI(IR::JitProfilingInstr *const instr) |
| { |
| Assert(instr); |
| |
| /* |
| Var ProfilingHelpers::ProfiledLdElem( |
| const Var base, |
| const Var varIndex, |
| FunctionBody *const functionBody, |
| const ProfileId profileId, |
| bool didArrayAccessHelperCall, |
| bool bailedOutOnArraySpecialization) |
| */ |
| |
| Func *const func = instr->m_func; |
| |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(false, TyInt8, func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(false, TyInt8, func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->profileId, func)); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(func)); |
| IR::IndirOpnd *const indir = instr->UnlinkSrc1()->AsIndirOpnd(); |
| IR::Opnd *const indexOpnd = indir->UnlinkIndexOpnd(); |
| Assert(indexOpnd || indir->GetOffset() >= 0 && !Js::TaggedInt::IsOverflow(indir->GetOffset())); |
| m_lowererMD.LoadHelperArgument( |
| instr, |
| indexOpnd |
| ? static_cast<IR::Opnd *>(indexOpnd) |
| : IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(indir->GetOffset()), IR::AddrOpndKindDynamicVar, func)); |
| m_lowererMD.LoadHelperArgument(instr, indir->UnlinkBaseOpnd()); |
| indir->Free(func); |
| |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperProfiledLdElem, func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| |
| void Lowerer::LowerProfiledStElemI(IR::JitProfilingInstr *const instr, const Js::PropertyOperationFlags flags) |
| { |
| Assert(instr); |
| |
| /* |
| void ProfilingHelpers::ProfiledStElem( |
| const Var base, |
| const Var varIndex, |
| const Var value, |
| FunctionBody *const functionBody, |
| const ProfileId profileId, |
| const PropertyOperationFlags flags, |
| bool didArrayAccessHelperCall, |
| bool bailedOutOnArraySpecialization) |
| */ |
| |
| Func *const func = instr->m_func; |
| |
| IR::JnHelperMethod helper; |
| if(flags == Js::PropertyOperation_None) |
| { |
| helper = IR::HelperProfiledStElem_DefaultFlags; |
| } |
| else |
| { |
| helper = IR::HelperProfiledStElem; |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(false, TyInt8, func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(false, TyInt8, func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New(flags, TyInt32, func, true)); |
| } |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->profileId, func)); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(func)); |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc1()); |
| IR::IndirOpnd *const indir = instr->UnlinkDst()->AsIndirOpnd(); |
| IR::Opnd *const indexOpnd = indir->UnlinkIndexOpnd(); |
| Assert(indexOpnd || indir->GetOffset() >= 0 && !Js::TaggedInt::IsOverflow(indir->GetOffset())); |
| m_lowererMD.LoadHelperArgument( |
| instr, |
| indexOpnd |
| ? static_cast<IR::Opnd *>(indexOpnd) |
| : IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(indir->GetOffset()), IR::AddrOpndKindDynamicVar, func)); |
| m_lowererMD.LoadHelperArgument(instr, indir->UnlinkBaseOpnd()); |
| indir->Free(func); |
| |
| instr->SetSrc1(IR::HelperCallOpnd::New(helper, func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerStElemI |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerStElemI(IR::Instr * instr, Js::PropertyOperationFlags flags, bool isHelper, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr *instrPrev = instr->m_prev; |
| |
| if (instr->IsJitProfilingInstr()) |
| { |
| Assert(!isHelper); |
| LowerProfiledStElemI(instr->AsJitProfilingInstr(), flags); |
| return instrPrev; |
| } |
| |
| IR::Opnd *src1 = instr->GetSrc1(); |
| IR::Opnd *dst = instr->GetDst(); |
| IR::Opnd *newDst = nullptr; |
| IRType srcType = src1->GetType(); |
| |
| AssertMsg(dst->IsIndirOpnd(), "Expected indirOpnd on StElementI"); |
| |
| #if !FLOATVAR |
| if (dst->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyOptimizedTypedArray() && src1->IsRegOpnd()) |
| { |
| // We allow the source of typedArray StElem to be marked as temp, since we just need the value, |
| // however if the array turns out to be a non-typed array, or the index isn't valid (the value is then stored as a property) |
| // the temp needs to be boxed if it is a float. The BoxStackNumber helper will box JavascriptNumbers |
| // which are on the stack. |
| |
| // regVar = BoxStackNumber(src1, scriptContext) |
| IR::Instr *newInstr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| IR::RegOpnd *regVar = IR::RegOpnd::New(TyVar, this->m_func); |
| newInstr->SetDst(regVar); |
| newInstr->SetSrc1(src1); |
| instr->InsertBefore(newInstr); |
| LowerUnaryHelperMem(newInstr, IR::HelperBoxStackNumber); |
| |
| // MOV src1, regVar |
| newInstr = IR::Instr::New(Js::OpCode::Ld_A, src1, regVar, this->m_func); |
| instr->InsertBefore(m_lowererMD.ChangeToAssign(newInstr)); |
| } |
| #endif |
| |
| if(instr->HasBailOutInfo()) |
| { |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if(bailOutKind & IR::BailOutOnInvalidatedArrayHeadSegment) |
| { |
| Assert(!(bailOutKind & IR::BailOutOnMissingValue)); |
| LowerBailOnInvalidatedArrayHeadSegment(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnInvalidatedArrayHeadSegment; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| else if(bailOutKind & IR::BailOutOnMissingValue) |
| { |
| LowerBailOnCreatedMissingValue(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnMissingValue; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| if(bailOutKind & IR::BailOutOnInvalidatedArrayLength) |
| { |
| LowerBailOnInvalidatedArrayLength(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnInvalidatedArrayLength; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| if(bailOutKind & IR::BailOutConvertedNativeArray) |
| { |
| IR::LabelInstr *labelSkipBailOut = IR::LabelInstr::New(Js::OpCode::Label, m_func, isHelper); |
| instr->InsertAfter(labelSkipBailOut); |
| LowerOneBailOutKind(instr, IR::BailOutConvertedNativeArray, isHelper); |
| newDst = IR::RegOpnd::New(TyMachReg, m_func); |
| InsertTestBranch(newDst, newDst, Js::OpCode::BrEq_A, labelSkipBailOut, instr->m_next); |
| } |
| } |
| |
| instr->UnlinkDst(); |
| instr->UnlinkSrc1(); |
| |
| Assert( |
| helperMethod == IR::HelperOP_InitElemGetter || |
| helperMethod == IR::HelperOP_InitElemSetter || |
| helperMethod == IR::HelperOP_InitComputedProperty || |
| helperMethod == IR::HelperOp_SetElementI || |
| helperMethod == IR::HelperOp_InitClassMemberComputedName || |
| helperMethod == IR::HelperOp_InitClassMemberGetComputedName || |
| helperMethod == IR::HelperOp_InitClassMemberSetComputedName |
| ); |
| |
| IR::IndirOpnd* dstIndirOpnd = dst->AsIndirOpnd(); |
| |
| IR::Opnd *indexOpnd = dstIndirOpnd->UnlinkIndexOpnd(); |
| |
| if (indexOpnd && indexOpnd->GetType() != TyVar) |
| { |
| Assert( |
| helperMethod != IR::HelperOP_InitElemGetter && |
| helperMethod != IR::HelperOP_InitElemSetter && |
| helperMethod != IR::HelperOp_InitClassMemberGetComputedName && |
| helperMethod != IR::HelperOp_InitClassMemberSetComputedName |
| ); |
| |
| if (indexOpnd->GetType() == TyInt32) |
| { |
| helperMethod = |
| srcType == TyVar ? IR::HelperOp_SetElementI_Int32 : |
| srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI_Int32 : |
| IR::HelperOp_SetNativeFloatElementI_Int32; |
| } |
| else if (indexOpnd->GetType() == TyUint32) |
| { |
| helperMethod = |
| srcType == TyVar ? IR::HelperOp_SetElementI_UInt32 : |
| srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI_UInt32 : |
| IR::HelperOp_SetNativeFloatElementI_UInt32; |
| } |
| else |
| { |
| Assert(FALSE); |
| } |
| } |
| else |
| { |
| if (indexOpnd == nullptr) |
| { |
| // No index; the offset identifies the element. |
| IntConstType offset = (IntConstType)dst->AsIndirOpnd()->GetOffset(); |
| indexOpnd = IR::AddrOpnd::NewFromNumber(offset, m_func); |
| } |
| |
| if (srcType != TyVar) |
| { |
| helperMethod = |
| srcType == TyInt32 ? IR::HelperOp_SetNativeIntElementI : IR::HelperOp_SetNativeFloatElementI; |
| } |
| } |
| |
| if (srcType == TyFloat64) |
| { |
| m_lowererMD.LoadDoubleHelperArgument(instr, src1); |
| } |
| m_lowererMD.LoadHelperArgument(instr, |
| IR::IntConstOpnd::New(static_cast<IntConstType>(flags), IRType::TyInt32, m_func, true)); |
| LoadScriptContext(instr); |
| if (srcType != TyFloat64) |
| { |
| m_lowererMD.LoadHelperArgument(instr, src1); |
| } |
| m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| |
| IR::Opnd *baseOpnd = dst->AsIndirOpnd()->UnlinkBaseOpnd(); |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| |
| dst->Free(this->m_func); |
| if (newDst) |
| { |
| instr->SetDst(newDst); |
| } |
| |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod, nullptr, nullptr, nullptr, isHelper); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerLdElemI |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerLdElemI(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool isHelper) |
| { |
| IR::Instr *instrPrev = instr->m_prev; |
| |
| if(instr->IsJitProfilingInstr()) |
| { |
| Assert(helperMethod == IR::HelperOp_GetElementI); |
| Assert(!isHelper); |
| LowerProfiledLdElemI(instr->AsJitProfilingInstr()); |
| return instrPrev; |
| } |
| |
| if (!isHelper && instr->DoStackArgsOpt()) |
| { |
| IR::LabelInstr * labelLdElem = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| // Pass in null for labelFallThru to only generate the LdHeapArgument call |
| GenerateFastArgumentsLdElemI(instr, nullptr); |
| instr->InsertBefore(labelLdElem); |
| instr->UnlinkSrc1(); |
| instr->UnlinkDst(); |
| Assert(instr->HasBailOutInfo() && instr->GetBailOutKind() == IR::BailOutKind::BailOnStackArgsOutOfActualsRange); |
| instr = GenerateBailOut(instr, nullptr, nullptr); |
| return instrPrev; |
| } |
| |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| AssertMsg(src1->IsIndirOpnd(), "Expected indirOpnd"); |
| IR::IndirOpnd *indirOpnd = src1->AsIndirOpnd(); |
| bool loadScriptContext = true; |
| IRType dstType = instr->GetDst()->GetType(); |
| |
| IR::Opnd *indexOpnd = indirOpnd->UnlinkIndexOpnd(); |
| if (indexOpnd && indexOpnd->GetType() != TyVar) |
| { |
| Assert(indexOpnd->GetType() == TyUint32 || indexOpnd->GetType() == TyInt32); |
| switch (helperMethod) |
| { |
| case IR::HelperOp_GetElementI: |
| |
| if (indexOpnd->GetType() == TyUint32) |
| { |
| helperMethod = |
| dstType == TyVar ? IR::HelperOp_GetElementI_UInt32 : |
| dstType == TyInt32 ? IR::HelperOp_GetNativeIntElementI_UInt32 : |
| IR::HelperOp_GetNativeFloatElementI_UInt32; |
| } |
| else |
| { |
| helperMethod = |
| dstType == TyVar ? IR::HelperOp_GetElementI_Int32 : |
| dstType == TyInt32 ? IR::HelperOp_GetNativeIntElementI_Int32 : |
| IR::HelperOp_GetNativeFloatElementI_Int32; |
| } |
| break; |
| |
| case IR::HelperOp_GetMethodElement: |
| |
| Assert(dstType == TyVar); |
| helperMethod = indexOpnd->GetType() == TyUint32? |
| IR::HelperOp_GetMethodElement_UInt32 : IR::HelperOp_GetMethodElement_Int32; |
| break; |
| |
| case IR::HelperOp_TypeofElem: |
| |
| Assert(dstType == TyVar); |
| helperMethod = indexOpnd->GetType() == TyUint32? |
| IR::HelperOp_TypeofElem_UInt32 : IR::HelperOp_TypeofElem_Int32; |
| break; |
| |
| default: |
| Assert(false); |
| } |
| } |
| else |
| { |
| if (indexOpnd == nullptr) |
| { |
| // No index; the offset identifies the element. |
| IntConstType offset = (IntConstType)src1->AsIndirOpnd()->GetOffset(); |
| indexOpnd = IR::AddrOpnd::NewFromNumber(offset, m_func); |
| } |
| |
| if (dstType != TyVar) |
| { |
| loadScriptContext = false; |
| helperMethod = |
| dstType == TyInt32 ? IR::HelperOp_GetNativeIntElementI : IR::HelperOp_GetNativeFloatElementI; |
| } |
| } |
| |
| // Jitted loop bodies have volatile information about values created outside the loop, so don't update array creation site |
| // profile data from jitted loop bodies |
| if(!m_func->IsLoopBody()) |
| { |
| const ValueType baseValueType(indirOpnd->GetBaseOpnd()->GetValueType()); |
| if( baseValueType.IsLikelyObject() && |
| baseValueType.GetObjectType() == ObjectType::Array && |
| !baseValueType.HasIntElements()) |
| { |
| switch(helperMethod) |
| { |
| case IR::HelperOp_GetElementI: |
| helperMethod = |
| baseValueType.HasFloatElements() |
| ? IR::HelperOp_GetElementI_ExpectingNativeFloatArray |
| : IR::HelperOp_GetElementI_ExpectingVarArray; |
| break; |
| |
| case IR::HelperOp_GetElementI_UInt32: |
| helperMethod = |
| baseValueType.HasFloatElements() |
| ? IR::HelperOp_GetElementI_UInt32_ExpectingNativeFloatArray |
| : IR::HelperOp_GetElementI_UInt32_ExpectingVarArray; |
| break; |
| |
| case IR::HelperOp_GetElementI_Int32: |
| helperMethod = |
| baseValueType.HasFloatElements() |
| ? IR::HelperOp_GetElementI_Int32_ExpectingNativeFloatArray |
| : IR::HelperOp_GetElementI_Int32_ExpectingVarArray; |
| break; |
| } |
| } |
| } |
| |
| if (loadScriptContext) |
| { |
| LoadScriptContext(instr); |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| |
| IR::Opnd *baseOpnd = indirOpnd->UnlinkBaseOpnd(); |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| |
| src1->Free(this->m_func); |
| |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod, nullptr, nullptr, nullptr, isHelper); |
| |
| return instrPrev; |
| } |
| |
| void Lowerer::LowerLdLen(IR::Instr *const instr, const bool isHelper) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::LdLen_A); |
| |
| // LdLen has persisted to this point for the sake of pre-lower opts. |
| // Turn it into a LdFld of the "length" property. |
| // This is normally a load of the internal "length" of an Array, so it probably doesn't benefit |
| // from inline caching. |
| |
| if (instr->GetSrc1()->IsRegOpnd()) |
| { |
| IR::RegOpnd * baseOpnd = instr->GetSrc1()->AsRegOpnd(); |
| PropertySym* fieldSym = PropertySym::FindOrCreate(baseOpnd->m_sym->m_id, Js::PropertyIds::length, (uint32)-1, (uint)-1, PropertyKindData, m_func); |
| instr->ReplaceSrc1(IR::SymOpnd::New(fieldSym, TyVar, m_func)); |
| } |
| LowerLdFld(instr, IR::HelperOp_GetProperty, IR::HelperOp_GetProperty, false, nullptr, isHelper); |
| } |
| |
| IR::Instr* InsertMaskableMove(bool isStore, bool generateWriteBarrier, IR::Opnd* dst, IR::Opnd* src1, IR::Opnd* src2, IR::Opnd* indexOpnd, IR::Instr* insertBeforeInstr, Lowerer* lowerer) |
| { |
| Assert(insertBeforeInstr->m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| |
| // Mask with the bounds check operand to avoid speculation issues |
| const bool usesFastArray = insertBeforeInstr->m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer(); |
| IR::RegOpnd* mask = nullptr; |
| bool shouldMaskResult = false; |
| if (!usesFastArray) |
| { |
| bool shouldMask = isStore ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) : CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad); |
| if (shouldMask && indexOpnd != nullptr) |
| { |
| // indices in asmjs fit in 32 bits, but we need a mask |
| IR::RegOpnd* temp = IR::RegOpnd::New(indexOpnd->GetType(), insertBeforeInstr->m_func); |
| lowerer->InsertMove(temp, indexOpnd, insertBeforeInstr, false); |
| lowerer->InsertAdd(false, temp, temp, IR::IntConstOpnd::New((uint32)src1->GetSize() - 1, temp->GetType(), insertBeforeInstr->m_func, true), insertBeforeInstr); |
| |
| // For native ints and vars, we do the masking after the load; we don't do this for |
| // floats and doubles because the conversion to and from fp regs is slow. |
| shouldMaskResult = (!isStore) && IRType_IsNativeIntOrVar(src1->GetType()) && TySize[dst->GetType()] <= TySize[TyMachReg]; |
| |
| // When we do post-load masking, we AND the mask with dst, so they need to have the |
| // same type, as otherwise we'll hit asserts later on. When we do pre-load masking, |
| // we AND the mask with the index component of the indir opnd for the move from the |
| // array, so we need to align with that type instead. |
| mask = IR::RegOpnd::New((shouldMaskResult ? dst : indexOpnd)->GetType(), insertBeforeInstr->m_func); |
| |
| if (temp->GetSize() != mask->GetSize()) |
| { |
| Assert(mask->GetSize() == MachPtr); |
| Assert(src2->GetType() == TyUint32); |
| temp = temp->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd(); |
| src2 = src2->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd(); |
| } |
| |
| lowerer->InsertSub(false, mask, temp, src2, insertBeforeInstr); |
| lowerer->InsertShift(Js::OpCode::Shr_A, false, mask, mask, IR::IntConstOpnd::New(TySize[mask->GetType()] * 8 - 1, TyInt8, insertBeforeInstr->m_func), insertBeforeInstr); |
| |
| // If we're not masking the result, we're masking the index |
| if (!shouldMaskResult) |
| { |
| lowerer->InsertAnd(indexOpnd, indexOpnd, mask, insertBeforeInstr); |
| } |
| } |
| } |
| IR::Instr* ret = lowerer->InsertMove(dst, src1, insertBeforeInstr, generateWriteBarrier); |
| if(!usesFastArray && shouldMaskResult) |
| { |
| // Mask the result if we didn't use the mask earlier to mask the index |
| lowerer->InsertAnd(dst, dst, mask, insertBeforeInstr); |
| } |
| return ret; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdArrViewElem(IR::Instr * instr) |
| { |
| #ifdef ASMJS_PLAT |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::LdArrViewElem); |
| |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::RegOpnd * indexOpnd = instr->GetSrc1()->AsIndirOpnd()->GetIndexOpnd(); |
| int32 offset = instr->GetSrc1()->AsIndirOpnd()->GetOffset(); |
| |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * src2 = instr->GetSrc2(); |
| |
| IR::Instr * done; |
| |
| if (offset < 0) |
| { |
| IR::Opnd * oobValue = nullptr; |
| if(dst->IsFloat32()) |
| { |
| oobValue = IR::MemRefOpnd::New(m_func->GetThreadContextInfo()->GetFloatNaNAddr(), TyFloat32, m_func); |
| } |
| else if(dst->IsFloat64()) |
| { |
| oobValue = IR::MemRefOpnd::New(m_func->GetThreadContextInfo()->GetDoubleNaNAddr(), TyFloat64, m_func); |
| } |
| else |
| { |
| oobValue = IR::IntConstOpnd::New(0, dst->GetType(), m_func); |
| } |
| instr->ReplaceSrc1(oobValue); |
| if (src2) |
| { |
| instr->FreeSrc2(); |
| } |
| return m_lowererMD.ChangeToAssign(instr); |
| } |
| if (indexOpnd || m_func->GetJITFunctionBody()->GetAsmJsInfo()->AccessNeedsBoundCheck((uint32)offset)) |
| { |
| // CMP indexOpnd, src2(arrSize) |
| // JA $helper |
| // JMP $load |
| // $helper: |
| // MOV dst, 0 |
| // JMP $done |
| // $load: |
| // MOV dst, src1([arrayBuffer + indexOpnd]) |
| // $done: |
| |
| Assert(!dst->IsFloat32() || src1->IsFloat32()); |
| Assert(!dst->IsFloat64() || src1->IsFloat64()); |
| done = m_lowererMD.LowerAsmJsLdElemHelper(instr); |
| } |
| else |
| { |
| // any access below 0x10000 is safe |
| instr->UnlinkDst(); |
| instr->UnlinkSrc1(); |
| if (src2) |
| { |
| instr->FreeSrc2(); |
| } |
| done = instr; |
| } |
| |
| InsertMaskableMove(false, true, dst, src1, src2, indexOpnd, done, this); |
| |
| instr->Remove(); |
| return instrPrev; |
| #else |
| Assert(UNREACHED); |
| return instr; |
| #endif |
| } |
| |
| IR::Instr * |
| Lowerer::LowerWasmArrayBoundsCheck(IR::Instr * instr, IR::Opnd *addrOpnd) |
| { |
| uint32 offset = addrOpnd->AsIndirOpnd()->GetOffset(); |
| |
| // don't encode offset for wasm memory reads/writes |
| addrOpnd->AsIndirOpnd()->m_dontEncode = true; |
| |
| // if offset/size overflow the max length, throw (this also saves us from having to do int64 math) |
| int64 constOffset = (int64)addrOpnd->GetSize() + (int64)offset; |
| if (constOffset >= Js::ArrayBuffer::MaxArrayBufferLength) |
| { |
| GenerateRuntimeError(instr, WASMERR_ArrayIndexOutOfRange, IR::HelperOp_WebAssemblyRuntimeError); |
| return instr; |
| } |
| else |
| { |
| return m_lowererMD.LowerWasmArrayBoundsCheck(instr, addrOpnd); |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr) |
| { |
| #ifdef ENABLE_WASM |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::LdArrViewElemWasm); |
| |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| |
| Assert(!dst->IsFloat32() || src1->IsFloat32()); |
| Assert(!dst->IsFloat64() || src1->IsFloat64()); |
| |
| IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1); |
| |
| IR::Instr* newMove = InsertMaskableMove(false, true, dst, src1, instr->GetSrc2(), src1->AsIndirOpnd()->GetIndexOpnd(), done, this); |
| |
| if (m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer()) |
| { |
| // We need to have an AV when accessing out of bounds memory even if the dst is not used |
| // Make sure LinearScan doesn't dead store this instruction |
| newMove->hasSideEffects = true; |
| } |
| |
| instr->Remove(); |
| return instrPrev; |
| #else |
| Assert(UNREACHED); |
| return instr; |
| #endif |
| } |
| |
| IR::Instr * |
| Lowerer::LowerMemset(IR::Instr * instr, IR::RegOpnd * helperRet) |
| { |
| IR::Opnd * dst = instr->UnlinkDst(); |
| IR::Opnd * src1 = instr->UnlinkSrc1(); |
| |
| Assert(dst->IsIndirOpnd()); |
| IR::Opnd *baseOpnd = dst->AsIndirOpnd()->UnlinkBaseOpnd(); |
| IR::Opnd *indexOpnd = dst->AsIndirOpnd()->UnlinkIndexOpnd(); |
| |
| IR::Opnd *sizeOpnd = instr->UnlinkSrc2(); |
| |
| Assert(baseOpnd); |
| Assert(sizeOpnd); |
| Assert(indexOpnd); |
| |
| IR::JnHelperMethod helperMethod = IR::HelperOp_Memset; |
| IR::Instr *instrPrev = nullptr; |
| if (src1->IsRegOpnd() && !src1->IsVar()) |
| { |
| IR::RegOpnd* varOpnd = IR::RegOpnd::New(TyVar, instr->m_func); |
| instrPrev = IR::Instr::New(Js::OpCode::ToVar, varOpnd, src1, instr->m_func); |
| instr->InsertBefore(instrPrev); |
| src1 = varOpnd; |
| } |
| instr->SetDst(helperRet); |
| LoadScriptContext(instr); |
| m_lowererMD.LoadHelperArgument(instr, sizeOpnd); |
| m_lowererMD.LoadHelperArgument(instr, src1); |
| m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| dst->Free(m_func); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerMemcopy(IR::Instr * instr, IR::RegOpnd * helperRet) |
| { |
| IR::Opnd * dst = instr->UnlinkDst(); |
| IR::Opnd * src = instr->UnlinkSrc1(); |
| |
| Assert(dst->IsIndirOpnd()); |
| Assert(src->IsIndirOpnd()); |
| |
| IR::Opnd *dstBaseOpnd = dst->AsIndirOpnd()->UnlinkBaseOpnd(); |
| IR::Opnd *dstIndexOpnd = dst->AsIndirOpnd()->UnlinkIndexOpnd(); |
| |
| IR::Opnd *srcBaseOpnd = src->AsIndirOpnd()->UnlinkBaseOpnd(); |
| IR::Opnd *srcIndexOpnd = src->AsIndirOpnd()->UnlinkIndexOpnd(); |
| |
| IR::Opnd *sizeOpnd = instr->UnlinkSrc2(); |
| |
| Assert(sizeOpnd); |
| Assert(dstBaseOpnd); |
| Assert(dstIndexOpnd); |
| Assert(srcBaseOpnd); |
| Assert(srcIndexOpnd); |
| |
| IR::JnHelperMethod helperMethod = IR::HelperOp_Memcopy; |
| |
| instr->SetDst(helperRet); |
| LoadScriptContext(instr); |
| m_lowererMD.LoadHelperArgument(instr, sizeOpnd); |
| m_lowererMD.LoadHelperArgument(instr, srcIndexOpnd); |
| m_lowererMD.LoadHelperArgument(instr, srcBaseOpnd); |
| m_lowererMD.LoadHelperArgument(instr, dstIndexOpnd); |
| m_lowererMD.LoadHelperArgument(instr, dstBaseOpnd); |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| dst->Free(m_func); |
| src->Free(m_func); |
| |
| return nullptr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerMemOp(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::Memset || instr->m_opcode == Js::OpCode::Memcopy); |
| IR::Instr *instrPrev = instr->m_prev; |
| |
| IR::RegOpnd* helperRet = IR::RegOpnd::New(TyInt8, instr->m_func); |
| const bool isHelper = false; |
| AssertMsg(instr->HasBailOutInfo(), "Expected bailOut on MemOp instruction"); |
| if (instr->HasBailOutInfo()) |
| { |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if (bailOutKind & IR::BailOutOnInvalidatedArrayHeadSegment) |
| { |
| Assert(!(bailOutKind & IR::BailOutOnMissingValue)); |
| LowerBailOnInvalidatedArrayHeadSegment(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnInvalidatedArrayHeadSegment; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| else if (bailOutKind & IR::BailOutOnMissingValue) |
| { |
| LowerBailOnCreatedMissingValue(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnMissingValue; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| if (bailOutKind & IR::BailOutOnInvalidatedArrayLength) |
| { |
| LowerBailOnInvalidatedArrayLength(instr, isHelper); |
| bailOutKind ^= IR::BailOutOnInvalidatedArrayLength; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| |
| AssertMsg(bailOutKind & IR::BailOutOnMemOpError, "Expected BailOutOnMemOpError on MemOp instruction"); |
| if (bailOutKind & IR::BailOutOnMemOpError) |
| { |
| // Insert or get continue label |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(isHelper); |
| Func *const func = instr->m_func; |
| LowerOneBailOutKind(instr, IR::BailOutOnMemOpError, isHelper); |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // test helperRet, helperRet |
| // jz $skipBailOut |
| InsertCompareBranch( |
| helperRet, |
| IR::IntConstOpnd::New(0, TyInt8, func), |
| Js::OpCode::BrNeq_A, |
| skipBailOutLabel, |
| insertBeforeInstr); |
| |
| // (Bail out with IR::BailOutOnMemOpError) |
| // $skipBailOut: |
| |
| bailOutKind ^= IR::BailOutOnMemOpError; |
| Assert(!bailOutKind || instr->GetBailOutKind() == bailOutKind); |
| } |
| |
| instr->ClearBailOutInfo(); |
| } |
| |
| IR::Instr* newInstrPrev = nullptr; |
| if (instr->m_opcode == Js::OpCode::Memset) |
| { |
| newInstrPrev = LowerMemset(instr, helperRet); |
| } |
| else if (instr->m_opcode == Js::OpCode::Memcopy) |
| { |
| newInstrPrev = LowerMemcopy(instr, helperRet); |
| } |
| |
| if (newInstrPrev != nullptr) |
| { |
| instrPrev = newInstrPrev; |
| } |
| return instrPrev; |
| } |
| |
| IR::Instr* |
| Lowerer::LowerStAtomicsWasm(IR::Instr* instr) |
| { |
| #ifdef ENABLE_WASM |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::StAtomicWasm); |
| |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| |
| Assert(IRType_IsNativeInt(dst->GetType())); |
| |
| IR::Instr * done = LowerWasmArrayBoundsCheck(instr, dst); |
| m_lowererMD.LowerAtomicStore(dst, src1, done); |
| |
| instr->Remove(); |
| return instrPrev; |
| #else |
| Assert(UNREACHED); |
| return instr; |
| #endif |
| } |
| |
| IR::Instr * Lowerer::LowerLdAtomicsWasm(IR::Instr * instr) |
| { |
| #ifdef ENABLE_WASM |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::LdAtomicWasm); |
| |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| |
| Assert(IRType_IsNativeInt(dst->GetType())); |
| |
| IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1); |
| m_lowererMD.LowerAtomicLoad(dst, src1, done); |
| |
| instr->Remove(); |
| return instrPrev; |
| #else |
| Assert(UNREACHED); |
| return instr; |
| #endif |
| } |
| |
| IR::Instr * |
| Lowerer::LowerStArrViewElem(IR::Instr * instr) |
| { |
| #ifdef ASMJS_PLAT |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::StArrViewElem); |
| |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * src2 = instr->GetSrc2(); |
| |
| // type of dst is the type of array |
| IR::RegOpnd * indexOpnd = dst->AsIndirOpnd()->GetIndexOpnd(); |
| int32 offset = dst->AsIndirOpnd()->GetOffset(); |
| |
| Assert(!dst->IsFloat32() || src1->IsFloat32()); |
| Assert(!dst->IsFloat64() || src1->IsFloat64()); |
| Assert(!dst->IsInt64() || src1->IsInt64()); |
| |
| IR::Instr * done; |
| |
| if (m_func->GetJITFunctionBody()->IsWasmFunction()) |
| { |
| done = LowerWasmArrayBoundsCheck(instr, dst); |
| } |
| else if (offset < 0) |
| { |
| instr->Remove(); |
| return instrPrev; |
| } |
| else if (indexOpnd || m_func->GetJITFunctionBody()->GetAsmJsInfo()->AccessNeedsBoundCheck((uint32)offset)) |
| { |
| // CMP indexOpnd, src2(arrSize) |
| // JA $helper |
| // JMP $store |
| // $helper: |
| // JMP $done |
| // $store: |
| // MOV dst([arrayBuffer + indexOpnd]), src1 |
| // $done: |
| done = m_lowererMD.LowerAsmJsStElemHelper(instr); |
| } |
| else |
| { |
| // any constant access below 0x10000 is safe, as that is the min heap size |
| instr->UnlinkDst(); |
| instr->UnlinkSrc1(); |
| done = instr; |
| if (src2) |
| { |
| instr->FreeSrc2(); |
| } |
| } |
| // wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier |
| InsertMaskableMove(true, false, dst, src1, src2, indexOpnd, done, this); |
| |
| instr->Remove(); |
| return instrPrev; |
| #else |
| Assert(UNREACHED); |
| return instr; |
| #endif |
| } |
| |
| IR::Instr * |
| Lowerer::LowerArrayDetachedCheck(IR::Instr * instr) |
| { |
| // TEST isDetached, isDetached |
| // JE Done |
| // Helper: |
| // CALL Js::Throw::OutOfMemory |
| // Done: |
| |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::Opnd * isDetachedOpnd = instr->UnlinkSrc1(); |
| Assert(isDetachedOpnd->IsIndirOpnd() || isDetachedOpnd->IsMemRefOpnd()); |
| |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| IR::LabelInstr * helperLabel = InsertLabel(true, instr); |
| |
| InsertTestBranch(isDetachedOpnd, isDetachedOpnd, Js::OpCode::BrNotNeq_A, doneLabel, helperLabel); |
| |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperOp_OutOfMemoryError); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerDeleteElemI |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerDeleteElemI(IR::Instr * instr, bool strictMode) |
| { |
| IR::Instr *instrPrev; |
| |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| |
| AssertMsg(src1->IsIndirOpnd(), "Expected indirOpnd on DeleteElementI"); |
| |
| Js::PropertyOperationFlags propertyOperationFlag = Js::PropertyOperation_None; |
| |
| if (strictMode) |
| { |
| propertyOperationFlag = Js::PropertyOperation_StrictMode; |
| } |
| |
| instrPrev = instr->m_prev; |
| IR::JnHelperMethod helperMethod = IR::HelperOp_DeleteElementI; |
| IR::Opnd *indexOpnd = src1->AsIndirOpnd()->UnlinkIndexOpnd(); |
| if (indexOpnd) |
| { |
| if (indexOpnd->GetType() == TyInt32) |
| { |
| helperMethod = IR::HelperOp_DeleteElementI_Int32; |
| } |
| else if (indexOpnd->GetType() == TyUint32) |
| { |
| helperMethod = IR::HelperOp_DeleteElementI_UInt32; |
| } |
| else |
| { |
| Assert(indexOpnd->GetType() == TyVar); |
| } |
| } |
| else |
| { |
| // No index; the offset identifies the element. |
| IntConstType offset = (IntConstType)src1->AsIndirOpnd()->GetOffset(); |
| indexOpnd = IR::AddrOpnd::NewFromNumber(offset, m_func); |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, IR::IntConstOpnd::New((IntConstType)propertyOperationFlag, TyInt32, m_func, true)); |
| LoadScriptContext(instr); |
| m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| |
| IR::Opnd *baseOpnd = src1->AsIndirOpnd()->UnlinkBaseOpnd(); |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| |
| src1->Free(this->m_func); |
| |
| m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| |
| return instrPrev; |
| } |
| |
| IR::Opnd * |
| Lowerer::GetForInEnumeratorFieldOpnd(IR::Opnd * forInEnumeratorOpnd, uint fieldOffset, IRType type) |
| { |
| if (forInEnumeratorOpnd->IsSymOpnd()) |
| { |
| IR::SymOpnd * symOpnd = forInEnumeratorOpnd->AsSymOpnd(); |
| return IR::SymOpnd::New(symOpnd->GetStackSym(), symOpnd->m_offset + fieldOffset, type, this->m_func); |
| } |
| Assert(forInEnumeratorOpnd->IsIndirOpnd()); |
| IR::IndirOpnd * indirOpnd = forInEnumeratorOpnd->AsIndirOpnd(); |
| return IR::IndirOpnd::New(indirOpnd->GetBaseOpnd(), indirOpnd->GetOffset() + fieldOffset, type, this->m_func); |
| } |
| |
| void |
| Lowerer::GenerateFastBrBReturn(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::BrOnEmpty || instr->m_opcode == Js::OpCode::BrOnNotEmpty); |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnds on BrB"); |
| |
| IR::Opnd * forInEnumeratorOpnd = instr->GetSrc1(); |
| IR::LabelInstr * labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| IR::LabelInstr * loopBody = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| // CMP forInEnumerator->canUseJitFastPath, 0 |
| // JEQ $helper |
| IR::Opnd * canUseJitFastPathOpnd = GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfCanUseJitFastPath(), TyInt8); |
| InsertCompareBranch(canUseJitFastPathOpnd, IR::IntConstOpnd::New(0, TyInt8, this->m_func), Js::OpCode::BrEq_A, labelHelper, instr); |
| |
| // MOV objectOpnd, forInEnumerator->enumerator.object |
| // MOV cachedDataTypeOpnd, forInEnumerator->enumerator.cachedDataType |
| // CMP cachedDataTypeOpnd, objectOpnd->type |
| // JNE $helper |
| IR::RegOpnd * objectOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertMove(objectOpnd, |
| GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorObject(), TyMachPtr), instr); |
| IR::RegOpnd * cachedDataTypeOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertMove(cachedDataTypeOpnd, |
| GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorInitialType(), TyMachPtr), instr); |
| InsertCompareBranch(cachedDataTypeOpnd, IR::IndirOpnd::New(objectOpnd, Js::DynamicObject::GetOffsetOfType(), TyMachPtr, this->m_func), |
| Js::OpCode::BrNeq_A, labelHelper, instr); |
| |
| // MOV cachedDataOpnd, forInEnumeratorOpnd->enumerator.cachedData |
| // MOV enumeratedCountOpnd, forInEnumeratorOpnd->enumerator.enumeratedCount |
| // CMP enumeratedCountOpnd, cachedDataOpnd->cachedCount |
| // JLT $loopBody |
| IR::RegOpnd * cachedDataOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(cachedDataOpnd, |
| GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorCachedData(), TyMachPtr), instr); |
| IR::RegOpnd * enumeratedCountOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(enumeratedCountOpnd, |
| GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorEnumeratedCount(), TyUint32), instr); |
| InsertCompareBranch(enumeratedCountOpnd, |
| IR::IndirOpnd::New(cachedDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataCachedCount(), TyUint32, this->m_func), |
| Js::OpCode::BrLt_A, loopBody, instr); |
| |
| // CMP cacheData.completed, 0 |
| // JNE $loopEnd |
| // JMP $helper |
| IR::LabelInstr * labelAfter = instr->GetOrCreateContinueLabel(); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(cachedDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataCompleted(), TyInt8, this->m_func), |
| IR::IntConstOpnd::New(0, TyInt8, this->m_func), |
| Js::OpCode::BrNeq_A, instr->m_opcode == Js::OpCode::BrOnNotEmpty ? labelAfter : instr->AsBranchInstr()->GetTarget(), instr); |
| InsertBranch(Js::OpCode::Br, labelHelper, instr); |
| |
| // $loopBody: |
| instr->InsertBefore(loopBody); |
| |
| IR::Opnd * opndDst = instr->GetDst(); // ForIn result propertyString |
| Assert(opndDst->IsRegOpnd()); |
| |
| // MOV stringsOpnd, cachedData->strings |
| // MOV opndDst, stringsOpnd[enumeratedCount] |
| IR::RegOpnd * stringsOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(stringsOpnd, |
| IR::IndirOpnd::New(cachedDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataStrings(), TyMachPtr, this->m_func), instr); |
| InsertMove(opndDst, |
| IR::IndirOpnd::New(stringsOpnd, enumeratedCountOpnd, m_lowererMD.GetDefaultIndirScale(), TyVar, this->m_func), instr); |
| |
| // MOV indexesOpnd, cachedData->indexes |
| // MOV objectIndexOpnd, indexesOpnd[enumeratedCount] |
| // MOV forInEnumeratorOpnd->enumerator.objectIndex, objectIndexOpnd |
| IR::RegOpnd * indexesOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(indexesOpnd, |
| IR::IndirOpnd::New(cachedDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataIndexes(), TyMachPtr, this->m_func), instr); |
| IR::RegOpnd * objectIndexOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(objectIndexOpnd, |
| IR::IndirOpnd::New(indexesOpnd, enumeratedCountOpnd, IndirScale4, TyUint32, this->m_func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorObjectIndex(), TyUint32), |
| objectIndexOpnd, instr); |
| // INC enumeratedCountOpnd |
| // MOV forInEnumeratorOpnd->enumerator.enumeratedCount, enumeratedCountOpnd |
| InsertAdd(false, enumeratedCountOpnd, enumeratedCountOpnd, IR::IntConstOpnd::New(1, TyUint32, this->m_func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorEnumeratedCount(), TyUint32), |
| enumeratedCountOpnd, instr); |
| |
| // We know result propertyString (opndDst) != NULL |
| InsertBranch(Js::OpCode::Br, instr->m_opcode == Js::OpCode::BrOnNotEmpty ? instr->AsBranchInstr()->GetTarget() : labelAfter, instr); |
| |
| // $helper |
| instr->InsertBefore(labelHelper); |
| // $after |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerBrB - lower 1-operand (boolean) conditional branch |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerBrBReturn(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool isHelper) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndDst; |
| |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnds on BrB"); |
| Assert(instr->m_opcode == Js::OpCode::BrOnEmpty || instr->m_opcode == Js::OpCode::BrOnNotEmpty); |
| IR::RegOpnd * forInEnumeratorRegOpnd = GenerateForInEnumeratorLoad(instr->UnlinkSrc1(), instr); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, forInEnumeratorRegOpnd); |
| |
| // Generate helper call to convert the unknown operand to boolean |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| |
| opndDst = instr->UnlinkDst(); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch on the result of the call |
| instr->m_opcode = (instr->m_opcode == Js::OpCode::BrOnNotEmpty? Js::OpCode::BrTrue_A : Js::OpCode::BrFalse_A); |
| instr->SetSrc1(opndDst); |
| IR::Instr *loweredInstr; |
| loweredInstr = this->LowerCondBranchCheckBailOut(instr->AsBranchInstr(), instrCall, isHelper); |
| |
| #if DBG |
| if (isHelper) |
| { |
| if (!loweredInstr->IsBranchInstr()) |
| { |
| loweredInstr = loweredInstr->GetNextBranchOrLabel(); |
| } |
| if (loweredInstr->IsBranchInstr()) |
| { |
| loweredInstr->AsBranchInstr()->m_isHelperToNonHelperBranch = true; |
| } |
| } |
| #endif |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerMultiBr |
| /// - Lowers the instruction for dictionary look up(string case arms) |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr* Lowerer::LowerMultiBr(IR::Instr * instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnd on BrB"); |
| |
| // Push the args in reverse order. |
| |
| // The end and start labels for the function are used to guarantee |
| // that the dictionary jump destinations haven't been tampered with, so we |
| // will always jump to some location within this function |
| IR::LabelOpnd * endFuncOpnd = IR::LabelOpnd::New(m_func->EnsureFuncEndLabel(), m_func); |
| m_lowererMD.LoadHelperArgument(instr, endFuncOpnd); |
| |
| IR::LabelOpnd * startFuncOpnd = IR::LabelOpnd::New(m_func->EnsureFuncStartLabel(), m_func); |
| m_lowererMD.LoadHelperArgument(instr, startFuncOpnd); |
| |
| //Load the address of the dictionary pair- Js::StringDictionaryWrapper |
| auto dictionary = instr->AsBranchInstr()->AsMultiBrInstr()->GetBranchDictionary(); |
| |
| if (this->m_func->IsOOPJIT()) |
| { |
| auto dictionaryOffset = NativeCodeData::GetDataTotalOffset(dictionary); |
| auto addressRegOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| |
| Lowerer::InsertLea(addressRegOpnd, |
| IR::IndirOpnd::New(IR::RegOpnd::New(m_func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), dictionaryOffset, TyMachPtr, |
| #if DBG |
| NativeCodeData::GetDataDescription(dictionary, this->m_func->m_alloc), |
| #endif |
| this->m_func, true), instr); |
| |
| this->addToLiveOnBackEdgeSyms->Set(m_func->GetTopFunc()->GetNativeCodeDataSym()->m_id); |
| |
| m_lowererMD.LoadHelperArgument(instr, addressRegOpnd); |
| } |
| else |
| { |
| IR::AddrOpnd* nativestringDictionaryOpnd = IR::AddrOpnd::New(dictionary, IR::AddrOpndKindDynamicMisc, this->m_func); |
| m_lowererMD.LoadHelperArgument(instr, nativestringDictionaryOpnd); |
| } |
| |
| //Load the String passed in the Switch expression for look up - JavascriptString |
| opndSrc = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call for dictionary lookup. |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| |
| symDst = StackSym::New(TyMachPtr,this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyMachPtr, this->m_func); |
| |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| instr->SetSrc1(instrCall->GetDst()); |
| instr->m_opcode = LowererMD::MDMultiBranchOpcode; |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::LowerJumpTableMultiBranch(IR::MultiBranchInstr * multiBrInstr, IR::RegOpnd * indexOpnd) |
| { |
| Func * func = this->m_func; |
| IR::Opnd * opndDst = IR::RegOpnd::New(TyMachPtr, func); |
| //Move the native address of the jump table to a register |
| IR::LabelInstr * nativeJumpTableLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| nativeJumpTableLabel->m_isDataLabel = true; |
| IR::LabelOpnd * nativeJumpTable = IR::LabelOpnd::New(nativeJumpTableLabel, m_func); |
| IR::RegOpnd * nativeJumpTableReg = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(nativeJumpTableReg, nativeJumpTable, multiBrInstr); |
| |
| BranchJumpTableWrapper * branchJumpTable = multiBrInstr->GetBranchJumpTable(); |
| AssertMsg(branchJumpTable->labelInstr == nullptr, "Should not be already assigned"); |
| branchJumpTable->labelInstr = nativeJumpTableLabel; |
| |
| //Indirect addressing @ target location in the jump table. |
| //MOV eax, [nativeJumpTableReg + (offset * indirScale)] |
| BYTE indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| IR::Opnd * opndSrc = IR::IndirOpnd::New(nativeJumpTableReg, indexOpnd, indirScale, TyMachReg, this->m_func); |
| |
| IR::Instr * indirInstr = InsertMove(opndDst, opndSrc, multiBrInstr); |
| |
| //MultiBr eax |
| multiBrInstr->SetSrc1(indirInstr->GetDst()); |
| |
| //Jump to the address at the target location in the jump table |
| multiBrInstr->m_opcode = LowererMD::MDMultiBranchOpcode; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerMultiBr |
| /// - Lowers the instruction for jump table(consecutive integer case arms) |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr* Lowerer::LowerMultiBr(IR::Instr * instr) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnd on BrB"); |
| AssertMsg(instr->IsBranchInstr() && instr->AsBranchInstr()->IsMultiBranch(), "Bad Instruction Lowering Call to LowerMultiBr()"); |
| |
| IR::MultiBranchInstr * multiBrInstr = instr->AsBranchInstr()->AsMultiBrInstr(); |
| IR::RegOpnd * offset = instr->UnlinkSrc1()->AsRegOpnd(); |
| LowerJumpTableMultiBranch(multiBrInstr, offset); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* Lowerer::LowerBrBMem(IR::Instr * instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnds on BrB"); |
| |
| instrPrev = LoadScriptContext(instr); |
| opndSrc = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call to convert the unknown operand to boolean |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| symDst = StackSym::New(TyVar, this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyVar, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch on the result of the call |
| |
| instr->SetSrc1(opndDst); |
| m_lowererMD.LowerCondBranch(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* Lowerer::LowerBrOnObject(IR::Instr * instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnds on BrB"); |
| |
| opndSrc = instr->UnlinkSrc1(); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call to check if the operand's type is object |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| symDst = StackSym::New(TyVar, this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyVar, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch on the result of the call |
| |
| instr->SetSrc1(opndDst); |
| m_lowererMD.LowerCondBranch(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * Lowerer::LowerBrOnClassConstructor(IR::Instr * instr, IR::JnHelperMethod helperMethod) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr, "Expected 1 src opnds on BrB"); |
| |
| opndSrc = instr->UnlinkSrc1(); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call to check if the operand's type is object |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| symDst = StackSym::New(TyVar, this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyVar, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch on the result of the call |
| |
| instr->SetSrc1(opndDst); |
| m_lowererMD.LowerCondBranch(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerEqualityCompare(IR::Instr* instr, IR::JnHelperMethod helper) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| bool needHelper = true; |
| bool fNoLower = false; |
| bool isStrictCompare = instr->m_opcode == Js::OpCode::CmSrEq_A || instr->m_opcode == Js::OpCode::CmSrNeq_A; |
| |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| this->m_lowererMD.GenerateFastCmXxR8(instr); |
| } |
| else if (PHASE_OFF(Js::BranchFastPathPhase, m_func) || !m_func->DoFastPaths()) |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| else if (TryGenerateFastBrOrCmTypeOf(instr, &instrPrev, instr->IsNeq(), &fNoLower)) |
| { |
| if (!fNoLower) |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| } |
| else if (isStrictCompare && TryGenerateFastCmSrXx(instr)) |
| { |
| } |
| else |
| { |
| if (GenerateFastBrOrCmString(instr)) |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| else if (isStrictCompare && GenerateFastBrOrCmEqDefinite(instr, helper, &needHelper, false, false)) |
| { |
| if (needHelper) |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| } |
| else if(GenerateFastCmEqLikely(instr, &needHelper, false) || GenerateFastEqBoolInt(instr, &needHelper, false)) |
| { |
| if (needHelper) |
| { |
| if (isStrictCompare) |
| { |
| LowerStrictBrOrCm(instr, helper, false, false /* isBranch */, true); |
| } |
| else |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| } |
| } |
| else if (!m_lowererMD.GenerateFastCmXxTaggedInt(instr, false)) |
| { |
| if (isStrictCompare) |
| { |
| LowerStrictBrOrCm(instr, helper, false, false /* isBranch */, false); |
| } |
| else |
| { |
| LowerBinaryHelperMem(instr, helper); |
| } |
| } |
| } |
| if (!needHelper) |
| { |
| instr->Remove(); |
| } |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerEqualityBranch(IR::Instr* instr, IR::JnHelperMethod helper) |
| { |
| IR::RegOpnd *srcReg1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *srcReg2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| IR::Instr * instrPrev = instr->m_prev; |
| bool fNoLower = false; |
| const bool noFastPath = PHASE_OFF(Js::BranchFastPathPhase, m_func) || !m_func->DoFastPaths(); |
| |
| if (instr->GetSrc1()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| m_lowererMD.LowerToFloat(instr); |
| return instrPrev; |
| } |
| |
| if (instr->GetSrc2()->IsFloat()) |
| { |
| Assert(instr->GetSrc1()->GetType() == instr->GetSrc2()->GetType()); |
| instr->SwapOpnds(); |
| m_lowererMD.LowerToFloat(instr); |
| return instrPrev; |
| } |
| |
| if (noFastPath) |
| { |
| LowerBrCMem(instr, helper, true, false /*isHelper*/); |
| return instrPrev; |
| } |
| |
| if (TryGenerateFastBrOrCmTypeOf(instr, &instrPrev, instr->IsNeq(), &fNoLower)) |
| { |
| if (!fNoLower) |
| { |
| LowerBrCMem(instr, helper, false, false /*isHelper*/); |
| } |
| return instrPrev; |
| } |
| |
| bool done = false; |
| bool isStrictCompare = false; |
| |
| switch(instr->m_opcode) |
| { |
| case Js::OpCode::BrNeq_A: |
| case Js::OpCode::BrNotEq_A: |
| done = TryGenerateFastBrNeq(instr); |
| break; |
| |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrNotNeq_A: |
| done = TryGenerateFastBrEq(instr); |
| break; |
| |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| isStrictCompare = true; |
| done = TryGenerateFastBrSrXx(instr, srcReg1, srcReg2, &instrPrev, noFastPath); |
| break; |
| default: |
| Assume(UNREACHED); |
| } |
| if (done) |
| { |
| return instrPrev; |
| } |
| |
| bool needHelper = true; |
| bool hasStrFastPath = false; |
| |
| if (GenerateFastBrOrCmString(instr)) |
| { |
| hasStrFastPath = true; |
| LowerBrCMem(instr, helper, false, true); |
| } |
| else if (isStrictCompare && GenerateFastBrOrCmEqDefinite(instr, helper, &needHelper, true, hasStrFastPath)) |
| { |
| if (needHelper) |
| { |
| LowerBrCMem(instr, helper, true /*noMathFastPath*/, hasStrFastPath); |
| } |
| } |
| else if (GenerateFastBrEqLikely(instr->AsBranchInstr(), &needHelper, hasStrFastPath) || GenerateFastEqBoolInt(instr, &needHelper, hasStrFastPath)) |
| { |
| if (needHelper) |
| { |
| if (isStrictCompare) |
| { |
| LowerStrictBrOrCm(instr, helper, false, true /* isBranch */, true); |
| } |
| else |
| { |
| LowerBrCMem(instr, helper, false, hasStrFastPath); |
| } |
| } |
| } |
| else if (needHelper) |
| { |
| if (isStrictCompare) |
| { |
| LowerStrictBrOrCm(instr, helper, false, true /* isBranch */, false); |
| } |
| else |
| { |
| LowerBrCMem(instr, helper, false, hasStrFastPath); |
| } |
| } |
| if (!needHelper) |
| { |
| if (instr->AsBranchInstr()->GetTarget()->m_isLoopTop) |
| { |
| LowerBrCMem(instr, helper, false, hasStrFastPath); |
| } |
| else |
| { |
| instr->Remove(); |
| } |
| } |
| |
| return instrPrev; |
| } |
| |
| // Generate fast path for StrictEquals for objects that are not GlobalObject, HostDispatch or External to be pointer comparison |
| IR::Instr * |
| Lowerer::LowerStrictBrOrCm(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool noMathFastPath, bool isBranch, bool isHelper) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::LabelInstr * labelHelper = nullptr; |
| IR::LabelInstr * labelFallThrough = nullptr; |
| IR::LabelInstr * labelBranchSuccess = nullptr; |
| IR::LabelInstr * labelBranchFailure = nullptr; |
| LibraryValue successValueType = ValueInvalid; |
| LibraryValue failureValueType = ValueInvalid; |
| |
| bool isEqual = !instr->IsNeq(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * src2 = instr->GetSrc2(); |
| |
| AssertMsg(src1 != nullptr && src2 != nullptr, "Expected 2 src opnds on BrC"); |
| |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if (!noMathFastPath) |
| { |
| labelFallThrough = instr->GetOrCreateContinueLabel(isHelper); |
| |
| if (!isBranch) |
| { |
| labelBranchSuccess = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelBranchFailure = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| successValueType = isEqual ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| failureValueType = isEqual ? LibraryValue::ValueFalse : LibraryValue::ValueTrue; |
| } |
| else |
| { |
| labelBranchSuccess = isEqual ? instr->AsBranchInstr()->GetTarget() : labelFallThrough; |
| labelBranchFailure = isEqual ? labelFallThrough : instr->AsBranchInstr()->GetTarget(); |
| } |
| |
| if (src1->IsEqual(src2)) |
| { |
| if (instr->GetSrc1()->GetValueType().IsNotFloat()) |
| { |
| if (!isBranch) |
| { |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, successValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallThrough, instr); |
| } |
| else |
| { |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelBranchSuccess, this->m_func); |
| instr->InsertBefore(branch); |
| } |
| instr->Remove(); |
| return instrPrev; |
| } |
| #if !FLOATVAR |
| m_lowererMD.GenerateObjectTest(src1->AsRegOpnd(), instr, labelHelper); |
| IR::RegOpnd *src1TypeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| Lowerer::InsertMove(src1TypeReg, IR::IndirOpnd::New(src1->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func), instr); |
| |
| // MOV src1TypeIdReg, [src1TypeReg + offset(typeId)] |
| IR::RegOpnd *src1TypeIdReg = IR::RegOpnd::New(TyInt32, this->m_func); |
| Lowerer::InsertMove(src1TypeIdReg, IR::IndirOpnd::New(src1TypeReg, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func), instr); |
| |
| // CMP src1TypeIdReg, TypeIds_Number |
| // JEQ $helper |
| IR::IntConstOpnd *numberTypeId = IR::IntConstOpnd::New(Js::TypeIds_Number, TyInt32, this->m_func, true); |
| InsertCompareBranch(src1TypeIdReg, numberTypeId, Js::OpCode::BrEq_A, labelHelper, instr); |
| #else |
| m_lowererMD.GenerateObjectTest(src1->AsRegOpnd(), instr, labelHelper); |
| #endif |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelBranchSuccess, this->m_func); |
| instr->InsertBefore(branch); |
| } |
| else |
| { |
| m_lowererMD.GenerateObjectTest(src1->AsRegOpnd(), instr, labelHelper); |
| |
| #if !FLOATVAR |
| IR::RegOpnd *src1TypeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| Lowerer::InsertMove(src1TypeReg, IR::IndirOpnd::New(src1->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func), instr); |
| |
| // MOV src1TypeIdReg, [src1TypeReg + offset(typeId)] |
| IR::RegOpnd *src1TypeIdReg = IR::RegOpnd::New(TyInt32, this->m_func); |
| Lowerer::InsertMove(src1TypeIdReg, IR::IndirOpnd::New(src1TypeReg, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func), instr); |
| |
| // CMP src1TypeIdReg, TypeIds_Number |
| // JEQ $helper |
| IR::IntConstOpnd *numberTypeId = IR::IntConstOpnd::New(Js::TypeIds_Number, TyInt32, this->m_func, true); |
| InsertCompareBranch(src1TypeIdReg, numberTypeId, Js::OpCode::BrEq_A, labelHelper, instr); |
| #endif |
| // CMP src1, src2 - Ptr comparison |
| // JEQ $branchSuccess |
| InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, labelBranchSuccess, instr); |
| |
| #if FLOATVAR |
| IR::RegOpnd *src1TypeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| Lowerer::InsertMove(src1TypeReg, IR::IndirOpnd::New(src1->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func), instr); |
| |
| // MOV src1TypeIdReg, [src1TypeReg + offset(typeId)] |
| IR::RegOpnd *src1TypeIdReg = IR::RegOpnd::New(TyInt32, this->m_func); |
| Lowerer::InsertMove(src1TypeIdReg, IR::IndirOpnd::New(src1TypeReg, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func), instr); |
| #endif |
| // CMP src1TypeIdReg, TypeIds_HostDispatch |
| // JLE $helper (le condition covers string, int64, uint64, hostdispatch, as well as undefined, null, boolean) |
| IR::IntConstOpnd *hostDispatchTypeId = IR::IntConstOpnd::New(Js::TypeIds_HostDispatch, TyInt32, this->m_func, true); |
| InsertCompareBranch(src1TypeIdReg, hostDispatchTypeId, Js::OpCode::BrLe_A, labelHelper, instr); |
| |
| // CMP src1TypeIdReg, TypeIds_GlobalObject |
| // JE $helper |
| IR::IntConstOpnd *globalObjectTypeId = IR::IntConstOpnd::New(Js::TypeIds_GlobalObject, TyInt32, this->m_func, true); |
| InsertCompareBranch(src1TypeIdReg, globalObjectTypeId, Js::OpCode::BrEq_A, labelHelper, instr); |
| |
| // TEST src1TypeReg->flags, TypeFlagMask_EngineExternal |
| // JE $helper |
| |
| IR::Opnd *flags = IR::IndirOpnd::New(src1TypeReg, Js::Type::GetOffsetOfFlags(), TyInt8, this->m_func); |
| InsertTestBranch(flags, IR::IntConstOpnd::New(TypeFlagMask_EngineExternal, TyInt8, this->m_func), Js::OpCode::BrNeq_A, labelHelper, instr); |
| |
| if (src2->IsRegOpnd()) |
| { |
| m_lowererMD.GenerateObjectTest(src2->AsRegOpnd(), instr, labelHelper); |
| // MOV src2TypeReg, [src2 + offset(type)] |
| // TEST [src2TypeReg + offset(flags)], TypeFlagMask_EngineExternal |
| // JE $helper |
| IR::RegOpnd *src2TypeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::IndirOpnd *src2Type = IR::IndirOpnd::New(src2->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| Lowerer::InsertMove(src2TypeReg, src2Type, instr); |
| IR::Opnd *src2Flags = IR::IndirOpnd::New(src2TypeReg, Js::Type::GetOffsetOfFlags(), TyInt8, this->m_func); |
| InsertTestBranch(src2Flags, IR::IntConstOpnd::New(TypeFlagMask_EngineExternal, TyInt8, this->m_func), Js::OpCode::BrNeq_A, labelHelper, instr); |
| } |
| |
| // JMP $done |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelBranchFailure, this->m_func); |
| instr->InsertBefore(branch); |
| } |
| |
| if (!isBranch) |
| { |
| instr->InsertBefore(labelBranchSuccess); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, successValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallThrough, instr); |
| |
| instr->InsertBefore(labelBranchFailure); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, failureValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallThrough, instr); |
| } |
| } |
| |
| instr->InsertBefore(labelHelper); |
| |
| if (isBranch) |
| { |
| LowerBrCMem(instr, helperMethod, true, true); |
| } |
| else |
| { |
| LowerBinaryHelperMem(instr, helperMethod); |
| } |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBrCMem(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool noMathFastPath, bool isHelper) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| bool inverted = false; |
| |
| AssertMsg(instr->GetSrc1() != nullptr && instr->GetSrc2() != nullptr, "Expected 2 src opnds on BrC"); |
| |
| if (!noMathFastPath && !this->GenerateFastCondBranch(instr->AsBranchInstr(), &isHelper)) |
| { |
| return instrPrev; |
| } |
| |
| // Push the args in reverse order. |
| const bool loadScriptContext = !(helperMethod == IR::HelperOp_StrictEqualString || helperMethod == IR::HelperOp_StrictEqualEmptyString); |
| const bool loadArg2 = !(helperMethod == IR::HelperOp_StrictEqualEmptyString); |
| |
| if (helperMethod == IR::HelperOp_NotEqual) |
| { |
| // Op_NotEqual() returns !Op_Equal(). It is faster to call Op_Equal() directly. |
| helperMethod = IR::HelperOp_Equal; |
| instr->AsBranchInstr()->Invert(); |
| inverted = true; |
| } |
| else if(helperMethod == IR::HelperOp_NotStrictEqual) |
| { |
| // Op_NotStrictEqual() returns !Op_StrictEqual(). It is faster to call Op_StrictEqual() directly. |
| helperMethod = IR::HelperOp_StrictEqual; |
| instr->AsBranchInstr()->Invert(); |
| inverted = true; |
| } |
| |
| if (loadScriptContext) |
| LoadScriptContext(instr); |
| |
| opndSrc = instr->UnlinkSrc2(); |
| if (loadArg2) |
| m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| opndSrc = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call to compare the source operands. |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| symDst = StackSym::New(TyMachReg, this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyMachReg, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrNotEq_A: |
| case Js::OpCode::BrNotNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| if (instr->HasBailOutInfo()) |
| { |
| instr->GetBailOutInfo()->isInvertedBranch = true; |
| } |
| break; |
| |
| case Js::OpCode::BrNotGe_A: |
| case Js::OpCode::BrNotGt_A: |
| case Js::OpCode::BrNotLe_A: |
| case Js::OpCode::BrNotLt_A: |
| inverted = true; |
| break; |
| } |
| |
| // Branch if the result is "true". |
| |
| instr->SetSrc1(opndDst); |
| instr->m_opcode = (inverted ? Js::OpCode::BrFalse_A : Js::OpCode::BrTrue_A); |
| this->LowerCondBranchCheckBailOut(instr->AsBranchInstr(), instrCall, !noMathFastPath && isHelper); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBrFncApply(IR::Instr * instr, IR::JnHelperMethod helperMethod) { |
| IR::Instr * instrPrev = instr->m_prev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| StackSym * symDst; |
| |
| AssertMsg(instr->GetSrc1() != nullptr, "Expected 1 src opnd on BrFncApply"); |
| |
| LoadScriptContext(instr); |
| |
| opndSrc = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, opndSrc); |
| |
| // Generate helper call to compare the source operands. |
| |
| opndHelper = IR::HelperCallOpnd::New(helperMethod, this->m_func); |
| symDst = StackSym::New(TyMachReg, this->m_func); |
| opndDst = IR::RegOpnd::New(symDst, TyMachReg, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch if the result is "true". |
| |
| instr->SetSrc1(opndDst); |
| instr->m_opcode = Js::OpCode::BrTrue_A; |
| m_lowererMD.LowerCondBranch(instr); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerBrProperty - lower branch-on-has/no-property |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerBrProperty(IR::Instr * instr, IR::JnHelperMethod helper) |
| { |
| IR::Instr * instrPrev; |
| IR::Instr * instrCall; |
| IR::HelperCallOpnd * opndHelper; |
| IR::Opnd * opndSrc; |
| IR::Opnd * opndDst; |
| |
| opndSrc = instr->UnlinkSrc1(); |
| AssertMsg(opndSrc->IsSymOpnd() && opndSrc->AsSymOpnd()->m_sym->IsPropertySym(), |
| "Expected propertySym as src of BrProperty"); |
| |
| instrPrev = LoadScriptContext(instr); |
| this->LoadPropertySymAsArgument(instr, opndSrc); |
| |
| opndHelper = IR::HelperCallOpnd::New(helper, this->m_func); |
| opndDst = IR::RegOpnd::New(StackSym::New(TyMachReg, this->m_func), TyMachReg, this->m_func); |
| instrCall = IR::Instr::New(Js::OpCode::Call, opndDst, opndHelper, this->m_func); |
| |
| instr->InsertBefore(instrCall); |
| instrCall = m_lowererMD.LowerCall(instrCall, 0); |
| |
| // Branch on the result of the call |
| |
| instr->SetSrc1(opndDst); |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrOnHasProperty: |
| instr->m_opcode = Js::OpCode::BrTrue_A; |
| break; |
| case Js::OpCode::BrOnNoProperty: |
| instr->m_opcode = Js::OpCode::BrFalse_A; |
| break; |
| default: |
| AssertMsg(0, "Unknown opcode on BrProperty branch"); |
| break; |
| } |
| this->LowerCondBranchCheckBailOut(instr->AsBranchInstr(), instrCall, false); |
| |
| return instrPrev; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerElementUndefined |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerElementUndefined(IR::Instr * instr, IR::JnHelperMethod helper) |
| { |
| IR::Opnd *dst = instr->UnlinkDst(); |
| AssertMsg(dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected fieldSym as dst of Ld Undefined"); |
| |
| // Pass the property sym to store to |
| this->LoadPropertySymAsArgument(instr, dst); |
| m_lowererMD.ChangeToHelperCall(instr, helper); |
| |
| return instr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerElementUndefinedMem(IR::Instr * instr, IR::JnHelperMethod helper) |
| { |
| // Pass script context |
| IR::Instr * instrPrev = LoadScriptContext(instr); |
| |
| this->LowerElementUndefined(instr, helper); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdElemUndef(IR::Instr * instr) |
| { |
| if (this->m_func->GetJITFunctionBody()->IsEval()) |
| { |
| return LowerElementUndefinedMem(instr, IR::HelperOp_LdElemUndefDynamic); |
| } |
| else |
| { |
| return LowerElementUndefined(instr, IR::HelperOp_LdElemUndef); |
| } |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerElementUndefinedScoped |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerElementUndefinedScoped(IR::Instr * instr, IR::JnHelperMethod helper) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| // Pass the default instance |
| IR::Opnd *src = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, src); |
| |
| // Pass the property sym to store to |
| IR::Opnd * dst = instr->UnlinkDst(); |
| AssertMsg(dst->IsSymOpnd() && dst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected fieldSym as dst of Ld Undefined Scoped"); |
| |
| this->LoadPropertySymAsArgument(instr, dst); |
| |
| m_lowererMD.ChangeToHelperCall(instr, helper); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerElementUndefinedScopedMem(IR::Instr * instr, IR::JnHelperMethod helper) |
| { |
| // Pass script context |
| IR::Instr * instrPrev = LoadScriptContext(instr); |
| |
| this->LowerElementUndefinedScoped(instr, helper); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::LowerStLoopBodyCount(IR::Instr* instr) |
| { |
| intptr_t header = m_func->m_workItem->GetLoopHeaderAddr(); |
| |
| IR::MemRefOpnd *loopBodyCounterOpnd = IR::MemRefOpnd::New((BYTE*)(header) + Js::LoopHeader::GetOffsetOfProfiledLoopCounter(), TyUint32, this->m_func); |
| instr->SetDst(loopBodyCounterOpnd); |
| instr->ReplaceSrc1(instr->GetSrc1()->AsRegOpnd()->UseWithNewType(TyUint32, this->m_func)); |
| IR::AutoReuseOpnd autoReuse(loopBodyCounterOpnd, this->m_func); |
| m_lowererMD.ChangeToAssign(instr); |
| return; |
| } |
| |
| #if !FLOATVAR |
| IR::Instr * |
| Lowerer::LowerStSlotBoxTemp(IR::Instr *stSlot) |
| { |
| // regVar = BoxStackNumber(src, scriptContext) |
| IR::RegOpnd * regSrc = stSlot->UnlinkSrc1()->AsRegOpnd(); |
| IR::Instr * instr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| IR::RegOpnd *regVar = IR::RegOpnd::New(TyVar, this->m_func); |
| instr->SetDst(regVar); |
| instr->SetSrc1(regSrc); |
| stSlot->InsertBefore(instr); |
| this->LowerUnaryHelperMem(instr, IR::HelperBoxStackNumber); |
| stSlot->SetSrc1(regVar); |
| return this->LowerStSlot(stSlot); |
| } |
| #endif |
| |
| IR::Opnd * |
| Lowerer::CreateOpndForSlotAccess(IR::Opnd * opnd) |
| { |
| IR::SymOpnd * symOpnd = opnd->AsSymOpnd(); |
| PropertySym * dstSym = symOpnd->m_sym->AsPropertySym(); |
| |
| if (!m_func->IsLoopBody() && |
| m_func->DoStackFrameDisplay() && |
| (dstSym->m_stackSym == m_func->GetLocalClosureSym() || dstSym->m_stackSym == m_func->GetLocalFrameDisplaySym())) |
| { |
| // Stack closure syms are made to look like slot accesses for the benefit of GlobOpt, so that it can do proper |
| // copy prop and implicit call bailout. But what we really want is local stack load/store. |
| // Don't do this for loop body, though, since we don't have the value saved on the stack. |
| IR::SymOpnd * closureSym = IR::SymOpnd::New(dstSym->m_stackSym, 0, TyMachReg, this->m_func); |
| closureSym->GetStackSym()->m_isClosureSym = true; |
| return closureSym; |
| } |
| |
| int32 offset = dstSym->m_propertyId; |
| if (!m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| offset = offset * TySize[opnd->GetType()]; |
| } |
| #ifdef ASMJS_PLAT |
| if (m_func->IsTJLoopBody()) |
| { |
| offset = offset - m_func->GetJITFunctionBody()->GetAsmJsInfo()->GetTotalSizeInBytes(); |
| } |
| #endif |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(symOpnd->CreatePropertyOwnerOpnd(m_func), |
| offset , opnd->GetType(), this->m_func); |
| return indirOpnd; |
| } |
| |
| IR::Instr* Lowerer::AddSlotArrayCheck(PropertySym *propertySym, IR::Instr* instr) |
| { |
| if (propertySym->m_stackSym != m_func->GetLocalClosureSym() || PHASE_OFF(Js::ClosureRangeCheckPhase, m_func)) |
| { |
| return instr->m_prev; |
| } |
| |
| IR::Instr *instrDef = propertySym->m_stackSym->m_instrDef; |
| |
| bool doDynamicCheck = this->m_func->IsLoopBody(); |
| bool insertSlotArrayCheck = false; |
| uint32 slotId = (uint32)propertySym->m_propertyId; |
| |
| if (instrDef) |
| { |
| switch (instrDef->m_opcode) |
| { |
| case Js::OpCode::NewScopeSlots: |
| case Js::OpCode::NewStackScopeSlots: |
| case Js::OpCode::NewScopeSlotsWithoutPropIds: |
| { |
| IR::Opnd *allocOpnd = allocOpnd = instrDef->GetSrc1(); |
| uint32 allocCount = allocOpnd->AsIntConstOpnd()->AsUint32(); |
| |
| if (slotId >= allocCount) |
| { |
| Js::Throw::FatalInternalError(); |
| } |
| break; |
| } |
| case Js::OpCode::ArgIn_A: |
| break; |
| case Js::OpCode::LdSlot: |
| case Js::OpCode::LdSlotArr: |
| { |
| if (doDynamicCheck && slotId > Js::ScopeSlots::FirstSlotIndex) |
| { |
| insertSlotArrayCheck = true; |
| } |
| break; |
| } |
| case Js::OpCode::SlotArrayCheck: |
| { |
| uint32 currentSlotId = instrDef->GetSrc2()->AsIntConstOpnd()->AsInt32(); |
| if (slotId > currentSlotId) |
| { |
| instrDef->ReplaceSrc2(IR::IntConstOpnd::New(slotId, TyUint32, m_func)); |
| } |
| break; |
| } |
| default: |
| Js::Throw::FatalInternalError(); |
| } |
| } |
| if (insertSlotArrayCheck) |
| { |
| IR::Instr *insertInstr = instrDef->m_next; |
| IR::RegOpnd *dstOpnd = instrDef->UnlinkDst()->AsRegOpnd(); |
| IR::Instr *checkInstr = IR::Instr::New(Js::OpCode::SlotArrayCheck, dstOpnd, m_func); |
| |
| dstOpnd = IR::RegOpnd::New(TyVar, m_func); |
| instrDef->SetDst(dstOpnd); |
| checkInstr->SetSrc1(dstOpnd); |
| |
| // Attach the slot ID to the check instruction. |
| IR::IntConstOpnd *slotIdOpnd = IR::IntConstOpnd::New(slotId, TyUint32, m_func); |
| checkInstr->SetSrc2(slotIdOpnd); |
| insertInstr->InsertBefore(checkInstr); |
| } |
| return instr->m_prev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerStSlot(IR::Instr *instr) |
| { |
| // StSlot stores the nth Var in the buffer pointed to by the property sym's stack sym. |
| |
| IR::Opnd * dstOpnd = instr->UnlinkDst(); |
| AssertMsg(dstOpnd, "Expected dst opnd on StSlot"); |
| IR::Opnd * dstNew = this->CreateOpndForSlotAccess(dstOpnd); |
| dstOpnd->Free(this->m_func); |
| |
| instr->SetDst(dstNew); |
| instr = m_lowererMD.ChangeToWriteBarrierAssign(instr, this->m_func); |
| |
| return instr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerStSlotChkUndecl(IR::Instr *instrStSlot) |
| { |
| Assert(instrStSlot->GetSrc2() != nullptr); |
| |
| // Src2 is required only to avoid dead store false positives during GlobOpt. |
| instrStSlot->FreeSrc2(); |
| |
| IR::Opnd *dstOpnd = this->CreateOpndForSlotAccess(instrStSlot->GetDst()); |
| IR::Instr *instr = this->LowerStSlot(instrStSlot); |
| |
| this->GenUndeclChk(instr, dstOpnd); |
| |
| return instr; |
| } |
| |
| void Lowerer::LowerProfileLdSlot(IR::Opnd *const valueOpnd, Func *const ldSlotFunc, const Js::ProfileId profileId, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(valueOpnd); |
| Assert(profileId != Js::Constants::NoProfileId); |
| Assert(insertBeforeInstr); |
| |
| Func *const irFunc = insertBeforeInstr->m_func; |
| |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, IR::Opnd::CreateProfileIdOpnd(profileId, irFunc)); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, CreateFunctionBodyOpnd(ldSlotFunc)); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, valueOpnd); |
| |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, irFunc); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperProfileLdSlot, irFunc)); |
| insertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.LowerCall(callInstr, 0); |
| } |
| |
| void |
| Lowerer::LowerLdSlot(IR::Instr *instr) |
| { |
| IR::Opnd * srcOpnd = instr->UnlinkSrc1(); |
| AssertMsg(srcOpnd, "Expected src opnd on LdSlot"); |
| IR::Opnd * srcNew = this->CreateOpndForSlotAccess(srcOpnd); |
| srcOpnd->Free(this->m_func); |
| |
| instr->SetSrc1(srcNew); |
| m_lowererMD.ChangeToAssign(instr); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerChkUndecl(IR::Instr *instr) |
| { |
| IR::Instr *instrPrev = instr->m_prev; |
| this->GenUndeclChk(instr, instr->GetSrc1()); |
| instr->Remove(); |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::GenUndeclChk(IR::Instr *instrInsert, IR::Opnd *opnd) |
| { |
| IR::LabelInstr *labelContinue = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| InsertCompareBranch( |
| opnd, |
| LoadLibraryValueOpnd(instrInsert, LibraryValue::ValueUndeclBlockVar), |
| Js::OpCode::BrNeq_A, labelContinue, instrInsert); |
| |
| IR::LabelInstr *labelThrow = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| instrInsert->InsertBefore(labelThrow); |
| |
| IR::Instr *instr = IR::Instr::New( |
| Js::OpCode::RuntimeReferenceError, |
| IR::RegOpnd::New(TyMachReg, m_func), |
| IR::IntConstOpnd::New(SCODE_CODE(JSERR_UseBeforeDeclaration), TyInt32, m_func), |
| m_func); |
| instrInsert->InsertBefore(instr); |
| this->LowerUnaryHelperMem(instr, IR::HelperOp_RuntimeReferenceError); |
| |
| instrInsert->InsertBefore(labelContinue); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerStElemC |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerStElemC(IR::Instr * stElem) |
| { |
| IR::Instr *instrPrev = stElem->m_prev; |
| IR::IndirOpnd * indirOpnd = stElem->GetDst()->AsIndirOpnd(); |
| IR::RegOpnd *indexOpnd = indirOpnd->UnlinkIndexOpnd(); |
| |
| Assert(!indexOpnd || indexOpnd->m_sym->IsIntConst()); |
| |
| IntConstType value; |
| if (indexOpnd) |
| { |
| value = indexOpnd->AsRegOpnd()->m_sym->GetIntConstValue(); |
| indexOpnd->Free(this->m_func); |
| } |
| else |
| { |
| value = (IntConstType)indirOpnd->GetOffset(); |
| } |
| |
| if (stElem->IsJitProfilingInstr()) |
| { |
| Assert(stElem->AsJitProfilingInstr()->profileId == Js::Constants::NoProfileId); |
| m_lowererMD.LoadHelperArgument(stElem, stElem->UnlinkSrc1()); |
| |
| const auto meth = stElem->m_opcode == Js::OpCode::StElemC ? IR::HelperSimpleStoreArrayHelper : IR::HelperSimpleStoreArraySegHelper; |
| |
| stElem->SetSrc1(IR::HelperCallOpnd::New(meth, m_func)); |
| |
| m_lowererMD.LoadHelperArgument(stElem, IR::IntConstOpnd::New(value, TyUint32, m_func)); |
| m_lowererMD.LoadHelperArgument(stElem, indirOpnd->UnlinkBaseOpnd()); |
| |
| stElem->UnlinkDst()->Free(m_func); |
| |
| m_lowererMD.LowerCall(stElem, 0); |
| return instrPrev; |
| } |
| |
| IntConstType base; |
| IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd(); |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| if(baseValueType.IsLikelyNativeArray()) |
| { |
| Assert(stElem->m_opcode == Js::OpCode::StElemC); |
| |
| IR::LabelInstr *labelBailOut = nullptr; |
| IR::Instr *instrBailOut = nullptr; |
| |
| if (stElem->HasBailOutInfo()) |
| { |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| instrBailOut = stElem; |
| stElem = IR::Instr::New(instrBailOut->m_opcode, m_func); |
| instrBailOut->TransferTo(stElem); |
| instrBailOut->InsertBefore(stElem); |
| |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| InsertBranch(Js::OpCode::Br, labelDone, instrBailOut); |
| |
| instrBailOut->InsertBefore(labelBailOut); |
| instrBailOut->InsertAfter(labelDone); |
| |
| instrBailOut->m_opcode = Js::OpCode::BailOut; |
| GenerateBailOut(instrBailOut); |
| } |
| |
| if (!baseValueType.IsObject()) |
| { |
| // Likely native array: do a vtable check and bail if it fails. |
| Assert(labelBailOut); |
| GenerateArrayTest(baseOpnd, labelBailOut, labelBailOut, stElem, true); |
| } |
| |
| if (stElem->GetSrc1()->GetType() == TyVar) |
| { |
| // Storing a non-specialized value. This may cause array conversion, which invalidates all the code |
| // that depends on the array check we've already done. |
| // Call a helper that returns the type ID of the resulting array, check it here against the one we |
| // expect, and bail if it fails. |
| |
| Assert(labelBailOut); |
| |
| // Call a helper to (try and) unbox the var and store it. |
| // If we had to convert the array to do the store, we'll bail. |
| LoadScriptContext(stElem); |
| |
| m_lowererMD.LoadHelperArgument(stElem, stElem->UnlinkSrc1()); |
| |
| IR::IntConstOpnd * intConstIndexOpnd = IR::IntConstOpnd::New(value, TyUint32, m_func); |
| |
| m_lowererMD.LoadHelperArgument(stElem, intConstIndexOpnd); |
| m_lowererMD.LoadHelperArgument(stElem, indirOpnd->UnlinkBaseOpnd()); |
| |
| IR::JnHelperMethod helperMethod; |
| if (baseValueType.HasIntElements()) |
| { |
| helperMethod = IR::HelperScrArr_SetNativeIntElementC; |
| } |
| else |
| { |
| helperMethod = IR::HelperScrArr_SetNativeFloatElementC; |
| } |
| |
| IR::Instr *instrInsertBranch = stElem->m_next; |
| IR::RegOpnd *typeIdOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| stElem->ReplaceDst(typeIdOpnd); |
| m_lowererMD.ChangeToHelperCall(stElem, helperMethod); |
| |
| InsertCompareBranch( |
| typeIdOpnd, |
| IR::IntConstOpnd::New( |
| baseValueType.HasIntElements() ? |
| Js::TypeIds_NativeIntArray : Js::TypeIds_NativeFloatArray, TyUint32, m_func), |
| Js::OpCode::BrNeq_A, |
| labelBailOut, |
| instrInsertBranch); |
| |
| return instrPrev; |
| } |
| else if (baseValueType.HasIntElements() && labelBailOut) |
| { |
| Assert(stElem->GetSrc1()->GetType() == GetArrayIndirType(baseValueType)); |
| IR::Opnd* missingElementOpnd = GetMissingItemOpnd(stElem->GetSrc1()->GetType(), m_func); |
| if (!stElem->GetSrc1()->IsEqual(missingElementOpnd)) |
| { |
| InsertMissingItemCompareBranch(stElem->GetSrc1(), Js::OpCode::BrEq_A, labelBailOut, stElem); |
| } |
| else |
| { |
| //Its a missing value store and data flow proves that src1 is always missing value. Array cannot be an int array at the first place |
| //if this code was ever hit. Just bailout, this code path would be updated with the profile information next time around. |
| InsertBranch(Js::OpCode::Br, labelBailOut, stElem); |
| #if DBG |
| labelBailOut->m_noHelperAssert = true; |
| #endif |
| stElem->Remove(); |
| return instrPrev; |
| } |
| } |
| else |
| { |
| Assert(stElem->GetSrc1()->GetType() == GetArrayIndirType(baseValueType)); |
| } |
| stElem->GetDst()->SetType(stElem->GetSrc1()->GetType()); |
| Assert(value <= Js::SparseArraySegmentBase::INLINE_CHUNK_SIZE); |
| |
| if(baseValueType.HasIntElements()) |
| { |
| base = sizeof(Js::JavascriptNativeIntArray) + offsetof(Js::SparseArraySegment<int32>, elements); |
| } |
| else |
| { |
| base = sizeof(Js::JavascriptNativeFloatArray) + offsetof(Js::SparseArraySegment<double>, elements); |
| } |
| } |
| else if(baseValueType.IsLikelyObject() && baseValueType.GetObjectType() == ObjectType::Array) |
| { |
| Assert(stElem->m_opcode == Js::OpCode::StElemC); |
| Assert(value <= Js::SparseArraySegmentBase::INLINE_CHUNK_SIZE); |
| base = sizeof(Js::JavascriptArray) + offsetof(Js::SparseArraySegment<Js::Var>, elements); |
| } |
| else |
| { |
| Assert(stElem->m_opcode == Js::OpCode::StElemC || stElem->m_opcode == Js::OpCode::StArrSegElemC); |
| Assert(indirOpnd->GetBaseOpnd()->GetType() == TyVar); |
| base = offsetof(Js::SparseArraySegment<Js::Var>, elements); |
| } |
| Assert(value >= 0); |
| |
| // MOV [r3 + offset(element) + index], src |
| const BYTE indirScale = |
| baseValueType.IsLikelyAnyOptimizedArray() ? GetArrayIndirScale(baseValueType) : m_lowererMD.GetDefaultIndirScale(); |
| IntConstType offset = base + (value << indirScale); |
| Assert(Math::FitsInDWord(offset)); |
| indirOpnd->SetOffset((int32)offset); |
| m_lowererMD.ChangeToWriteBarrierAssign(stElem, this->m_func); |
| |
| return instrPrev; |
| } |
| |
| void Lowerer::LowerLdArrHead(IR::Instr *const instr) |
| { |
| IR::RegOpnd *array = instr->UnlinkSrc1()->AsRegOpnd(); |
| const ValueType arrayValueType(array->GetValueType()); |
| Assert(arrayValueType.IsAnyOptimizedArray()); |
| |
| if(arrayValueType.GetObjectType() == ObjectType::ObjectWithArray) |
| { |
| array = LoadObjectArray(array, instr); |
| } |
| |
| // mov arrayHeadSegment, [array + offset(headSegment)] |
| instr->GetDst()->SetType(TyMachPtr); |
| instr->SetSrc1( |
| IR::IndirOpnd::New( |
| array, |
| GetArrayOffsetOfHeadSegment(arrayValueType), |
| TyMachPtr, |
| instr->m_func)); |
| LowererMD::ChangeToAssign(instr); |
| } |
| |
| // Creates the rest parameter array. |
| // Var JavascriptArray::OP_NewScArrayWithElements( |
| // uint32 elementCount, |
| // Var *elements, |
| // ScriptContext* scriptContext) |
| IR::Instr *Lowerer::LowerRestParameter(IR::Opnd *formalsOpnd, IR::Opnd *dstOpnd, IR::Opnd *excessOpnd, IR::Instr *instr, IR::RegOpnd *generatorArgsPtrOpnd) |
| { |
| IR::Instr * helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, dstOpnd, instr->m_func); |
| instr->InsertAfter(helperCallInstr); |
| |
| // Var JavascriptArray::OP_NewScArrayWithElements( |
| // int32 elementCount, |
| // Var *elements, |
| // ScriptContext* scriptContext) |
| IR::JnHelperMethod helperMethod = IR::HelperScrArr_OP_NewScArrayWithElements; |
| |
| LoadScriptContext(helperCallInstr); |
| |
| BOOL isGenerator = this->m_func->GetJITFunctionBody()->IsCoroutine(); |
| |
| // Elements pointer = ebp + (formals count + formals offset + 1)*sizeof(Var) |
| IR::RegOpnd *srcOpnd = isGenerator ? generatorArgsPtrOpnd : IR::Opnd::CreateFramePointerOpnd(this->m_func); |
| uint16 actualOffset = isGenerator ? 0 : GetFormalParamOffset(); //4 |
| IR::RegOpnd *argPtrOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertAdd(false, argPtrOpnd, srcOpnd, IR::IntConstOpnd::New((formalsOpnd->AsIntConstOpnd()->GetValue() + actualOffset) * MachPtr, TyMachPtr, this->m_func), helperCallInstr); |
| m_lowererMD.LoadHelperArgument(helperCallInstr, argPtrOpnd); |
| |
| m_lowererMD.LoadHelperArgument(helperCallInstr, excessOpnd); |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, helperMethod); |
| |
| return helperCallInstr; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerArgIn |
| /// |
| /// This function checks the passed-in argument count against the index of this |
| /// argument and uses null for a param value if the caller didn't explicitly |
| /// pass anything. |
| /// |
| ///---------------------------------------------------------------------------- |
| IR::Instr * |
| Lowerer::LowerArgIn(IR::Instr *instrArgIn) |
| { |
| IR::LabelInstr * labelDone; |
| IR::LabelInstr * labelUndef; |
| IR::LabelInstr * labelNormal; |
| IR::LabelInstr * labelInit; |
| IR::LabelInstr * labelInitNext; |
| IR::BranchInstr * instrBranch; |
| IR::Instr * instrArgInNext; |
| IR::Instr * instrInsert; |
| IR::Instr * instrPrev; |
| IR::Instr * instrResume = nullptr; |
| IR::Opnd * dstOpnd; |
| IR::Opnd * srcOpnd; |
| IR::Opnd * opndUndef; |
| Js::ArgSlot argIndex; |
| StackSym * symParam; |
| BOOLEAN isDuplicate; |
| IR::RegOpnd * generatorArgsPtrOpnd = nullptr; |
| |
| // We start with: |
| // s1 = ArgIn_A param1 |
| // s2 = ArgIn_A param2 |
| // ... |
| // sn = ArgIn_A paramn |
| // |
| // We want to end up with: |
| // |
| // s1 = ArgIn_A param1 -- Note that this is unconditional |
| // count = (load from param area) |
| // BrLt_A $start, count, n -- Forward cbranch to the uncommon case |
| // Br $Ln |
| // $start: |
| // sn = assign undef |
| // BrGe_A $Ln-1, count, n-1 |
| // sn-1 = assign undef |
| // ... |
| // s2 = assign undef |
| // Br $done |
| // $Ln: |
| // sn = assign paramn |
| // $Ln-1: |
| // sn-1 = assign paramn-1 |
| // ... |
| // s2 = assign param2 |
| // $done: |
| |
| AnalysisAssert(instrArgIn); |
| |
| IR::Opnd *restDst = nullptr; |
| bool hasRest = instrArgIn->m_opcode == Js::OpCode::ArgIn_Rest; |
| if (hasRest) |
| { |
| IR::Instr *restInstr = instrArgIn; |
| restDst = restInstr->UnlinkDst(); |
| if (m_func->GetJITFunctionBody()->HasImplicitArgIns() && m_func->argInsCount > 0) |
| { |
| while (instrArgIn->m_opcode != Js::OpCode::ArgIn_A) |
| { |
| instrArgIn = instrArgIn->m_prev; |
| if (instrResume == nullptr) |
| { |
| instrResume = instrArgIn; |
| } |
| } |
| restInstr->Remove(); |
| } |
| else |
| { |
| Assert(instrArgIn->m_func == this->m_func); |
| IR::Instr * instrCount = m_lowererMD.LoadInputParamCount(instrArgIn, -this->m_func->GetInParamsCount()); |
| IR::Opnd * excessOpnd = instrCount->GetDst(); |
| |
| IR::LabelInstr *createRestArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| // BrGe $createRestArray, excess, 0 |
| InsertCompareBranch(excessOpnd, IR::IntConstOpnd::New(0, TyUint8, this->m_func), Js::OpCode::BrGe_A, createRestArrayLabel, instrArgIn); |
| |
| // MOV excess, 0 |
| InsertMove(excessOpnd, IR::IntConstOpnd::New(0, TyUint8, this->m_func), instrArgIn); |
| |
| // $createRestArray |
| instrArgIn->InsertBefore(createRestArrayLabel); |
| |
| if (m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| generatorArgsPtrOpnd = LoadGeneratorArgsPtr(instrArgIn); |
| } |
| |
| IR::IntConstOpnd * formalsOpnd = IR::IntConstOpnd::New(this->m_func->GetInParamsCount(), TyUint32, this->m_func); |
| IR::Instr *prev = LowerRestParameter(formalsOpnd, restDst, excessOpnd, instrArgIn, generatorArgsPtrOpnd); |
| instrArgIn->Remove(); |
| return prev; |
| } |
| } |
| |
| srcOpnd = instrArgIn->GetSrc1(); |
| symParam = srcOpnd->AsSymOpnd()->m_sym->AsStackSym(); |
| |
| argIndex = symParam->GetParamSlotNum(); |
| if (argIndex == 1) |
| { |
| // The "this" argument is not source-dependent and doesn't need to be checked. |
| if (m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| generatorArgsPtrOpnd = LoadGeneratorArgsPtr(instrArgIn); |
| ConvertArgOpndIfGeneratorFunction(instrArgIn, generatorArgsPtrOpnd); |
| } |
| |
| m_lowererMD.ChangeToAssign(instrArgIn); |
| return instrResume == nullptr ? instrArgIn->m_prev : instrResume; |
| } |
| |
| Js::ArgSlot formalsCount = this->m_func->GetInParamsCount(); |
| |
| AssertMsg(argIndex <= formalsCount, "Expect to see the ArgIn's within the range of the formals"); |
| |
| // Because there may be instructions between the ArgIn's, such as saves to the frame object, |
| // we find the top of the sequence of ArgIn's and insert everything there. This assumes that |
| // ArgIn's use param symbols as src's and not the results of previous instructions. |
| |
| instrPrev = instrArgIn; |
| Js::ArgSlot currArgInCount = 0; |
| Assert(this->m_func->argInsCount > 0); |
| |
| while (currArgInCount < this->m_func->argInsCount - 1) |
| { |
| instrPrev = instrPrev->m_prev; |
| if (instrPrev->m_opcode == Js::OpCode::ArgIn_A) |
| { |
| srcOpnd = instrPrev->GetSrc1(); |
| symParam = srcOpnd->AsSymOpnd()->m_sym->AsStackSym(); |
| AssertMsg(symParam->GetParamSlotNum() < argIndex, "ArgIn's not in numerical order"); |
| argIndex = symParam->GetParamSlotNum(); |
| currArgInCount++; |
| } |
| else |
| { |
| // Make sure that this instruction gets lowered. |
| if (instrResume == nullptr) |
| { |
| instrResume = instrPrev; |
| } |
| } |
| } |
| // The loading of parameters will be inserted above this instruction. |
| instrInsert = instrPrev; |
| |
| AnalysisAssert(instrInsert); |
| if (instrResume == nullptr) |
| { |
| // We found no intervening non-ArgIn's, so lowering can resume at the previous instruction. |
| instrResume = instrInsert->m_prev; |
| } |
| |
| // Now insert all the checks and undef-assigns. |
| |
| if (m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| generatorArgsPtrOpnd = LoadGeneratorArgsPtr(instrInsert); |
| } |
| |
| // excessOpnd = (load from param area) - formalCounts |
| IR::Instr * instrCount = this->m_lowererMD.LoadInputParamCount(instrInsert, -formalsCount, true); |
| IR::Opnd * excessOpnd = instrCount->GetDst(); |
| |
| labelUndef = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, /*helperLabel*/ true); |
| Lowerer::InsertBranch(Js::OpCode::BrLt_A, labelUndef, instrInsert); |
| |
| // Br $Ln |
| |
| labelNormal = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| labelInit = labelNormal; |
| instrBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelNormal, this->m_func); |
| instrInsert->InsertBefore(instrBranch); |
| |
| // Insert the labels |
| |
| instrInsert->InsertBefore(labelUndef); |
| instrInsert->InsertBefore(labelNormal); |
| |
| //Adjustment for deadstore of ArgIn_A |
| Js::ArgSlot highestSlotNum = instrArgIn->GetSrc1()->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum(); |
| Js::ArgSlot missingSlotNums = this->m_func->GetInParamsCount() - highestSlotNum; |
| Assert(missingSlotNums >= 0); |
| while (missingSlotNums > 0) |
| { |
| InsertAdd(true, excessOpnd, excessOpnd, IR::IntConstOpnd::New(1, TyMachReg, this->m_func), labelNormal); |
| Lowerer::InsertBranch(Js::OpCode::BrEq_A, labelNormal, labelNormal); |
| missingSlotNums--; |
| } |
| |
| // MOV undefReg, undefAddress |
| IR::Opnd* opndUndefAddress = this->LoadLibraryValueOpnd(labelNormal, LibraryValue::ValueUndefined); |
| opndUndef = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| Lowerer::InsertMove(opndUndef, opndUndefAddress, labelNormal); |
| |
| BVSparse<JitArenaAllocator> *formalsBv = JitAnew(this->m_alloc, BVSparse<JitArenaAllocator>, this->m_alloc); |
| |
| while (currArgInCount > 0) |
| { |
| dstOpnd = instrArgIn->GetDst(); |
| |
| Assert(dstOpnd->IsRegOpnd()); |
| isDuplicate = formalsBv->TestAndSet(dstOpnd->AsRegOpnd()->m_sym->AsStackSym()->m_id); |
| |
| // Now insert the undef initialization before the "normal" label |
| |
| // sn = assign undef |
| |
| Lowerer::InsertMove(dstOpnd, opndUndef, labelNormal); |
| |
| // INC excessOpnd |
| // BrEq_A $Ln-1 |
| |
| currArgInCount--; |
| |
| labelInitNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| // And insert the "normal" initialization before the "done" label |
| |
| // sn = assign paramn |
| // $Ln-1: |
| |
| labelInit->InsertAfter(labelInitNext); |
| labelInit = labelInitNext; |
| |
| instrArgInNext = instrArgIn->m_prev; |
| instrArgIn->Unlink(); |
| |
| Js::ArgSlot prevParamSlotNum = instrArgIn->GetSrc1()->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum(); |
| |
| // function foo(x, x) { use(x); } |
| // This should refer to the second 'x'. Since we reverse the order here however, we need to skip |
| // the initialization of the first 'x' to not override the one for the second. WOOB:1105504 |
| if (isDuplicate) |
| { |
| instrArgIn->Free(); |
| } |
| else |
| { |
| ConvertArgOpndIfGeneratorFunction(instrArgIn, generatorArgsPtrOpnd); |
| labelInit->InsertBefore(instrArgIn); |
| this->m_lowererMD.ChangeToAssign(instrArgIn); |
| } |
| instrArgIn = instrArgInNext; |
| |
| while (instrArgIn->m_opcode != Js::OpCode::ArgIn_A) |
| { |
| instrArgIn = instrArgIn->m_prev; |
| AssertMsg(instrArgIn, "???"); |
| } |
| |
| //Adjustment for deadstore of ArgIn_A |
| Js::ArgSlot currParamSlotNum = instrArgIn->GetSrc1()->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum(); |
| Js::ArgSlot diffSlotsNum = prevParamSlotNum - currParamSlotNum; |
| |
| AssertMsg(diffSlotsNum > 0, "Argins are not in order?"); |
| |
| while (diffSlotsNum > 0) |
| { |
| InsertAdd(true, excessOpnd, excessOpnd, IR::IntConstOpnd::New(1, TyMachReg, this->m_func), labelNormal); |
| InsertBranch(Js::OpCode::BrEq_A, labelInitNext, labelNormal); |
| diffSlotsNum--; |
| } |
| |
| AssertMsg(instrArgIn->GetSrc1()->AsSymOpnd()->m_sym->AsStackSym()->GetParamSlotNum() <= formalsCount, |
| "Expect all ArgIn's to be in numerical order by param slot"); |
| } |
| |
| // Insert final undef and normal initializations, jumping unconditionally to the end |
| // rather than checking against the decremented formals count as we did inside the loop above. |
| |
| // s2 = assign undef |
| |
| dstOpnd = instrArgIn->GetDst(); |
| Assert(dstOpnd->IsRegOpnd()); |
| isDuplicate = formalsBv->TestAndSet(dstOpnd->AsRegOpnd()->m_sym->AsStackSym()->m_id); |
| |
| Lowerer::InsertMove(dstOpnd, opndUndef, labelNormal); |
| |
| if (hasRest) |
| { |
| InsertMove(excessOpnd, IR::IntConstOpnd::New(0, TyUint8, this->m_func), labelNormal); |
| } |
| |
| // Br $done |
| |
| labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instrBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func); |
| labelNormal->InsertBefore(instrBranch); |
| |
| // s2 = assign param2 |
| // $done: |
| |
| labelInit->InsertAfter(labelDone); |
| |
| if (hasRest) |
| { |
| // The formals count has been tainted, so restore it before lowering rest |
| IR::IntConstOpnd * formalsOpnd = IR::IntConstOpnd::New(this->m_func->GetInParamsCount(), TyUint32, this->m_func); |
| LowerRestParameter(formalsOpnd, restDst, excessOpnd, labelDone, generatorArgsPtrOpnd); |
| } |
| |
| instrArgIn->Unlink(); |
| if (isDuplicate) |
| { |
| instrArgIn->Free(); |
| } |
| else |
| { |
| ConvertArgOpndIfGeneratorFunction(instrArgIn, generatorArgsPtrOpnd); |
| labelDone->InsertBefore(instrArgIn); |
| this->m_lowererMD.ChangeToAssign(instrArgIn); |
| } |
| |
| JitAdelete(this->m_alloc, formalsBv); |
| |
| return instrResume; |
| } |
| |
| void |
| Lowerer::ConvertArgOpndIfGeneratorFunction(IR::Instr *instrArgIn, IR::RegOpnd *generatorArgsPtrOpnd) |
| { |
| if (this->m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| // Replace stack param operand with offset into arguments array held by |
| // the generator object. |
| IR::Opnd * srcOpnd = instrArgIn->UnlinkSrc1(); |
| StackSym * symParam = srcOpnd->AsSymOpnd()->m_sym->AsStackSym(); |
| Js::ArgSlot argIndex = symParam->GetParamSlotNum(); |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(generatorArgsPtrOpnd, (argIndex - 1) * MachPtr, TyMachPtr, this->m_func); |
| |
| srcOpnd->Free(this->m_func); |
| instrArgIn->SetSrc1(indirOpnd); |
| } |
| } |
| |
| IR::RegOpnd * |
| Lowerer::LoadGeneratorArgsPtr(IR::Instr *instrInsert) |
| { |
| IR::Instr * instr = LoadGeneratorObject(instrInsert); |
| IR::RegOpnd * generatorRegOpnd = instr->GetDst()->AsRegOpnd(); |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(generatorRegOpnd, Js::JavascriptGenerator::GetArgsPtrOffset(), TyMachPtr, instrInsert->m_func); |
| IR::RegOpnd * argsPtrOpnd = IR::RegOpnd::New(TyMachReg, instrInsert->m_func); |
| Lowerer::InsertMove(argsPtrOpnd, indirOpnd, instrInsert); |
| return argsPtrOpnd; |
| } |
| |
| IR::Instr * |
| Lowerer::LoadGeneratorObject(IR::Instr * instrInsert) |
| { |
| StackSym * generatorSym = StackSym::NewImplicitParamSym(3, instrInsert->m_func); |
| instrInsert->m_func->SetArgOffset(generatorSym, LowererMD::GetFormalParamOffset() * MachPtr); |
| IR::SymOpnd * generatorSymOpnd = IR::SymOpnd::New(generatorSym, TyMachPtr, instrInsert->m_func); |
| IR::RegOpnd * generatorRegOpnd = IR::RegOpnd::New(TyMachPtr, instrInsert->m_func); |
| instrInsert->m_func->SetHasImplicitParamLoad(); |
| return Lowerer::InsertMove(generatorRegOpnd, generatorSymOpnd, instrInsert); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerArgInAsmJs(IR::Instr * instr) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| |
| Assert(instr && instr->m_opcode == Js::OpCode::ArgIn_A); |
| IR::Instr* instrPrev = instr->m_prev; |
| m_lowererMD.ChangeToAssign(instr); |
| |
| return instrPrev; |
| } |
| |
| bool |
| Lowerer::InlineBuiltInLibraryCall(IR::Instr *callInstr) |
| { |
| IR::Opnd *src1 = callInstr->GetSrc1(); |
| IR::Opnd *src2 = callInstr->GetSrc2(); |
| |
| // Get the arg count by looking at the slot number of the last arg symbol. |
| |
| if (!src2->IsSymOpnd()) |
| { |
| // No args? Not sure this is possible, but handle it. |
| return false; |
| } |
| |
| StackSym *argLinkSym = src2->AsSymOpnd()->m_sym->AsStackSym(); |
| // Subtract "this" from the arg count. |
| IntConstType argCount = argLinkSym->GetArgSlotNum() - 1; |
| |
| // Find the callee's built-in index (if any). |
| Js::BuiltinFunction index = Func::GetBuiltInIndex(src1); |
| |
| // Warning! |
| // Don't add new built-in to following switch. Built-ins needs to be inlined in call direct way. |
| // Following is only for prejit scenarios where we don't get inlining always and generate fast path in lowerer. |
| // Generating fastpath here misses fixed functions and globopt optimizations. |
| switch(index) |
| { |
| case Js::BuiltinFunction::JavascriptString_CharAt: |
| case Js::BuiltinFunction::JavascriptString_CharCodeAt: |
| if (argCount != 1) |
| { |
| return false; |
| } |
| if (!callInstr->GetDst()) |
| { |
| // Optimization of Char[Code]At assumes result is used. |
| return false; |
| } |
| break; |
| case Js::BuiltinFunction::Math_Abs: |
| #ifdef _M_IX86 |
| if (!AutoSystemInfo::Data.SSE2Available()) |
| { |
| return false; |
| } |
| #endif |
| if (argCount != 1) |
| { |
| return false; |
| } |
| if (!callInstr->GetDst()) |
| { |
| // Optimization of Abs assumes result is used. |
| return false; |
| } |
| break; |
| |
| case Js::BuiltinFunction::JavascriptArray_Push: |
| { |
| if (argCount != 1) |
| { |
| return false; |
| } |
| if (callInstr->GetDst()) |
| { |
| // Optimization of push assumes result is unused. |
| return false; |
| } |
| |
| StackSym *linkSym = callInstr->GetSrc2()->AsSymOpnd()->m_sym->AsStackSym(); |
| Assert(linkSym->IsSingleDef()); |
| linkSym = linkSym->m_instrDef->GetSrc2()->AsSymOpnd()->m_sym->AsStackSym(); |
| Assert(linkSym->IsSingleDef()); |
| |
| IR::Opnd *const arrayOpnd = linkSym->m_instrDef->GetSrc1(); |
| if(!arrayOpnd->IsRegOpnd()) |
| { |
| // This should be rare, but needs to be handled. |
| // By now, we've already started some of the inlining. Simply jmp to the helper. |
| // The branch will get peeped later. |
| return false; |
| } |
| |
| if(!ShouldGenerateArrayFastPath(arrayOpnd, false, false, false) || |
| arrayOpnd->GetValueType().IsLikelyNativeArray()) |
| { |
| // Rejecting native array for now, since we have to do a FromVar at the call site and bail out. |
| return false; |
| } |
| |
| break; |
| } |
| |
| case Js::BuiltinFunction::JavascriptString_Replace: |
| { |
| if(argCount != 2) |
| { |
| return false; |
| } |
| |
| if(!ShouldGenerateStringReplaceFastPath(callInstr, argCount)) |
| { |
| return false; |
| } |
| break; |
| } |
| |
| default: |
| return false; |
| } |
| |
| Assert(Func::IsBuiltInInlinedInLowerer(callInstr->GetSrc1())); |
| |
| IR::Opnd *callTargetOpnd = callInstr->GetSrc1(); |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| IR::Opnd *objRefOpnd = IR::MemRefOpnd::New((void*)this->GetObjRefForBuiltInTarget(callTargetOpnd->AsRegOpnd()), TyMachReg, this->m_func); |
| |
| InsertCompareBranch(callTargetOpnd, objRefOpnd, Js::OpCode::BrNeq_A, labelHelper, callInstr); |
| |
| callInstr->InsertBefore(labelHelper); |
| |
| Assert(argCount <= 2); |
| |
| IR::Opnd *argsOpnd[3]; |
| IR::Opnd *linkOpnd = callInstr->GetSrc2(); |
| |
| while(linkOpnd->IsSymOpnd()) |
| { |
| IR::SymOpnd * symOpnd = linkOpnd->AsSymOpnd(); |
| StackSym *sym = symOpnd->m_sym->AsStackSym(); |
| Assert(sym->m_isSingleDef); |
| IR::Instr *argInstr = sym->m_instrDef; |
| |
| Assert(argCount >= 0); |
| argsOpnd[argCount] = argInstr->GetSrc1(); |
| argCount--; |
| |
| argInstr->Unlink(); |
| labelHelper->InsertAfter(argInstr); |
| |
| linkOpnd = argInstr->GetSrc2(); |
| } |
| AnalysisAssert(argCount == -1); |
| |
| // Move startcall |
| Assert(linkOpnd->IsRegOpnd()); |
| StackSym *sym = linkOpnd->AsRegOpnd()->m_sym; |
| Assert(sym->m_isSingleDef); |
| IR::Instr *startCall = sym->m_instrDef; |
| Assert(startCall->m_opcode == Js::OpCode::StartCall); |
| startCall->Unlink(); |
| labelHelper->InsertAfter(startCall); |
| |
| // $doneLabel: |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| callInstr->InsertAfter(doneLabel); |
| |
| bool success = true; |
| switch(index) |
| { |
| case Js::BuiltinFunction::Math_Abs: |
| this->m_lowererMD.GenerateFastAbs(callInstr->GetDst(), argsOpnd[1], callInstr, labelHelper, labelHelper, doneLabel); |
| break; |
| |
| case Js::BuiltinFunction::JavascriptString_CharCodeAt: |
| case Js::BuiltinFunction::JavascriptString_CharAt: |
| success = GenerateFastCharAt(index, callInstr->GetDst(), argsOpnd[0], argsOpnd[1], |
| callInstr, labelHelper, labelHelper, doneLabel); |
| break; |
| |
| case Js::BuiltinFunction::JavascriptArray_Push: |
| success = GenerateFastPush(argsOpnd[0], argsOpnd[1], callInstr, labelHelper, labelHelper, nullptr, doneLabel); |
| break; |
| |
| case Js::BuiltinFunction::JavascriptString_Replace: |
| success = GenerateFastReplace(argsOpnd[0], argsOpnd[1], argsOpnd[2], callInstr, labelHelper, labelHelper, doneLabel); |
| break; |
| |
| default: |
| Assert(UNREACHED); |
| } |
| |
| IR::Instr *instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, doneLabel, this->m_func); |
| labelHelper->InsertBefore(instr); |
| |
| return success; |
| } |
| |
| // Perform lowerer part of inlining built-in function. |
| // For details, see inline.cpp. |
| // |
| // Description of changes here (note that taking care of Argouts are similar to InlineeStart): |
| // - Move ArgOut_A_InlineBuiltIn next to the call instr -- used by bailout processing in register allocator. |
| // - Remove StartCall and InlineBuiltInStart for this call. |
| // Before: |
| // StartCall fn |
| // d1 = BIA s1, link1 |
| // ... |
| // InlineBuiltInStart fn, link0 |
| // After: |
| // ... |
| // d1 = BIA s1, NULL |
| void Lowerer::LowerInlineBuiltIn(IR::Instr* builtInEndInstr) |
| { |
| Assert(builtInEndInstr->m_opcode == Js::OpCode::InlineBuiltInEnd || builtInEndInstr->m_opcode == Js::OpCode::InlineNonTrackingBuiltInEnd); |
| IR::Instr* startCallInstr = nullptr; |
| builtInEndInstr->IterateArgInstrs([&](IR::Instr* argInstr) { |
| startCallInstr = argInstr->GetSrc2()->GetStackSym()->m_instrDef; |
| return false; |
| }); |
| // Keep the startCall around as bailout refers to it. Just unlink it for now - do not delete it. |
| startCallInstr->Unlink(); |
| builtInEndInstr->Remove(); |
| } |
| |
| intptr_t |
| Lowerer::GetObjRefForBuiltInTarget(IR::RegOpnd * regOpnd) |
| { |
| intptr_t mathFns = m_func->GetScriptContextInfo()->GetBuiltinFunctionsBaseAddr(); |
| Js::BuiltinFunction index = regOpnd->m_sym->m_builtInIndex; |
| |
| AssertMsg(index < Js::BuiltinFunction::Count, "Invalid built-in index on a call target marked as built-in"); |
| |
| return mathFns + index; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerNewRegEx(IR::Instr * instr) |
| { |
| IR::Opnd *src1 = instr->UnlinkSrc1(); |
| |
| Assert(src1->IsAddrOpnd()); |
| |
| #if ENABLE_REGEX_CONFIG_OPTIONS |
| if (REGEX_CONFIG_FLAG(RegexTracing)) |
| { |
| Assert(!instr->GetDst()->CanStoreTemp()); |
| IR::Instr * instrPrev = LoadScriptContext(instr); |
| instrPrev = m_lowererMD.LoadHelperArgument(instr, src1); |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperScrRegEx_OP_NewRegEx); |
| return instrPrev; |
| } |
| #endif |
| IR::Instr * instrPrev = instr->m_prev; |
| IR::RegOpnd * dstOpnd = instr->UnlinkDst()->AsRegOpnd(); |
| IR::SymOpnd * tempObjectSymOpnd; |
| bool isZeroed = GenerateRecyclerOrMarkTempAlloc(instr, dstOpnd, IR::HelperAllocMemForJavascriptRegExp, sizeof(Js::JavascriptRegExp), &tempObjectSymOpnd); |
| if (tempObjectSymOpnd && !PHASE_OFF(Js::HoistMarkTempInitPhase, this->m_func) && this->outerMostLoopLabel) |
| { |
| // Hoist the vtable and pattern init to the outer most loop top as it never changes |
| InsertMove(tempObjectSymOpnd, |
| LoadVTableValueOpnd(this->outerMostLoopLabel, VTableValue::VtableJavascriptRegExp), |
| this->outerMostLoopLabel, false); |
| } |
| else |
| { |
| GenerateMemInit(dstOpnd, 0, LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp), instr, isZeroed); |
| } |
| GenerateMemInit(dstOpnd, Js::JavascriptRegExp::GetOffsetOfType(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueRegexType), instr, isZeroed); |
| GenerateMemInitNull(dstOpnd, Js::JavascriptRegExp::GetOffsetOfAuxSlots(), instr, isZeroed); |
| GenerateMemInitNull(dstOpnd, Js::JavascriptRegExp::GetOffsetOfObjectArray(), instr, isZeroed); |
| if (tempObjectSymOpnd && !PHASE_OFF(Js::HoistMarkTempInitPhase, this->m_func) && this->outerMostLoopLabel) |
| { |
| InsertMove(IR::SymOpnd::New(tempObjectSymOpnd->m_sym, |
| tempObjectSymOpnd->m_offset + Js::JavascriptRegExp::GetOffsetOfPattern(), TyMachPtr, this->m_func), |
| src1, this->outerMostLoopLabel, false); |
| } |
| else |
| { |
| GenerateMemInit(dstOpnd, Js::JavascriptRegExp::GetOffsetOfPattern(), src1, instr, isZeroed); |
| } |
| GenerateMemInitNull(dstOpnd, Js::JavascriptRegExp::GetOffsetOfSplitPattern(), instr, isZeroed); |
| GenerateMemInitNull(dstOpnd, Js::JavascriptRegExp::GetOffsetOfLastIndexVar(), instr, isZeroed); |
| GenerateMemInit(dstOpnd, Js::JavascriptRegExp::GetOffsetOfLastIndexOrFlag(), 0, instr, isZeroed); |
| instr->Remove(); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::GenerateRuntimeError(IR::Instr * insertBeforeInstr, Js::MessageId errorCode, IR::JnHelperMethod helper /*= IR::JnHelperMethod::HelperOp_RuntimeTypeError*/) |
| { |
| IR::Instr * runtimeErrorInstr = IR::Instr::New(Js::OpCode::RuntimeTypeError, this->m_func); |
| runtimeErrorInstr->SetSrc1(IR::IntConstOpnd::New(errorCode, TyInt32, this->m_func, true)); |
| insertBeforeInstr->InsertBefore(runtimeErrorInstr); |
| return this->LowerUnaryHelperMem(runtimeErrorInstr, helper); |
| } |
| |
| bool Lowerer::IsNullOrUndefRegOpnd(IR::RegOpnd *opnd) const |
| { |
| StackSym *sym = opnd->m_sym; |
| |
| if (sym->IsIntConst() || sym->IsFloatConst()) |
| { |
| return false; |
| } |
| |
| return opnd->GetValueType().IsUndefined() || opnd->GetValueType().IsNull(); |
| } |
| |
| bool Lowerer::IsConstRegOpnd(IR::RegOpnd *opnd) const |
| { |
| StackSym *sym = opnd->m_sym; |
| |
| if (sym->IsIntConst() || sym->IsFloatConst()) |
| { |
| return false; |
| } |
| |
| const auto& vt = opnd->GetValueType(); |
| return vt.IsUndefined() || vt.IsNull() || (sym->m_isConst && vt.IsBoolean()); |
| } |
| |
| IR::Opnd * Lowerer::GetConstRegOpnd(IR::RegOpnd *opnd, IR::Instr * instr) |
| { |
| if (opnd->GetValueType().IsUndefined()) |
| { |
| return this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined); |
| } |
| if (opnd->GetValueType().IsNull()) |
| { |
| return this->LoadLibraryValueOpnd(instr, LibraryValue::ValueNull); |
| } |
| |
| Assert(opnd->GetValueType().IsBoolean()); |
| return opnd->GetStackSym()->GetInstrDef()->GetSrc1()->AsAddrOpnd(); |
| } |
| |
| bool |
| Lowerer::HasSideEffects(IR::Instr *instr) |
| { |
| if (LowererMD::IsCall(instr)) |
| { |
| #ifdef _M_IX86 |
| IR::Opnd *src1 = instr->GetSrc1(); |
| if (src1->IsHelperCallOpnd()) |
| { |
| IR::HelperCallOpnd * helper = src1->AsHelperCallOpnd(); |
| |
| switch(helper->m_fnHelper) |
| { |
| case IR::HelperOp_Int32ToAtomInPlace: |
| case IR::HelperOp_Int32ToAtom: |
| case IR::HelperOp_UInt32ToAtom: |
| return false; |
| } |
| } |
| #endif |
| return true; |
| } |
| return instr->HasAnySideEffects(); |
| } |
| |
| bool Lowerer::IsArgSaveRequired(Func *func) { |
| return (!func->IsTrueLeaf() || func->IsJitInDebugMode() || |
| // GetHasImplicitParamLoad covers generators, asmjs, |
| // and other javascript functions that implicitly read from the arg stack slots |
| func->GetHasThrow() || func->GetHasImplicitParamLoad() || func->HasThis() || func->argInsCount > 0); |
| } |
| |
| IR::Instr* |
| Lowerer::GenerateFastInlineBuiltInMathRandom(IR::Instr* instr) |
| { |
| AssertMsg(instr->GetDst()->IsFloat(), "dst must be float."); |
| IR::Instr* retInstr = instr->m_prev; |
| IR::Opnd* dst = instr->GetDst(); |
| |
| #if defined(_M_X64) |
| if (m_func->GetScriptContextInfo()->IsPRNGSeeded()) |
| { |
| const uint64 mExp = 0x3FF0000000000000; |
| const uint64 mMant = 0x000FFFFFFFFFFFFF; |
| |
| IR::RegOpnd* r0 = IR::RegOpnd::New(TyUint64, m_func); // s0 |
| IR::RegOpnd* r1 = IR::RegOpnd::New(TyUint64, m_func); // s1 |
| IR::RegOpnd* r3 = IR::RegOpnd::New(TyUint64, m_func); // helper uint64 reg |
| IR::RegOpnd* r4 = IR::RegOpnd::New(TyFloat64, m_func); // helper float64 reg |
| |
| // =========================================================== |
| // s0 = scriptContext->GetLibrary()->GetRandSeed1(); |
| // s1 = scriptContext->GetLibrary()->GetRandSeed0(); |
| // =========================================================== |
| this->InsertMove(r0, |
| IR::MemRefOpnd::New((BYTE*)m_func->GetScriptContextInfo()->GetLibraryAddr() + Js::JavascriptLibrary::GetRandSeed1Offset(), TyUint64, instr->m_func), instr); |
| this->InsertMove(r1, |
| IR::MemRefOpnd::New((BYTE*)m_func->GetScriptContextInfo()->GetLibraryAddr() + Js::JavascriptLibrary::GetRandSeed0Offset(), TyUint64, instr->m_func), instr); |
| |
| // =========================================================== |
| // s1 ^= s1 << 23; |
| // =========================================================== |
| this->InsertMove(r3, r1, instr); |
| this->InsertShift(Js::OpCode::Shl_A, false, r3, r3, IR::IntConstOpnd::New(23, TyInt8, m_func), instr); |
| this->InsertXor(r1, r1, r3, instr); |
| |
| // =========================================================== |
| // s1 ^= s1 >> 17; |
| // =========================================================== |
| this->InsertMove(r3, r1, instr); |
| this->InsertShift(Js::OpCode::ShrU_A, false, r3, r3, IR::IntConstOpnd::New(17, TyInt8, m_func), instr); |
| this->InsertXor(r1, r1, r3, instr); |
| |
| // =========================================================== |
| // s1 ^= s0; |
| // =========================================================== |
| this->InsertXor(r1, r1, r0, instr); |
| |
| // =========================================================== |
| // s1 ^= s0 >> 26; |
| // =========================================================== |
| this->InsertMove(r3, r0, instr); |
| this->InsertShift(Js::OpCode::ShrU_A, false, r3, r3, IR::IntConstOpnd::New(26, TyInt8, m_func), instr); |
| this->InsertXor(r1, r1, r3, instr); |
| |
| // =========================================================== |
| // scriptContext->GetLibrary()->SetRandSeed0(s0); |
| // scriptContext->GetLibrary()->SetRandSeed1(s1); |
| // =========================================================== |
| this->InsertMove( |
| IR::MemRefOpnd::New((BYTE*)m_func->GetScriptContextInfo()->GetLibraryAddr() + Js::JavascriptLibrary::GetRandSeed0Offset(), TyUint64, m_func), r0, instr); |
| this->InsertMove( |
| IR::MemRefOpnd::New((BYTE*)m_func->GetScriptContextInfo()->GetLibraryAddr() + Js::JavascriptLibrary::GetRandSeed1Offset(), TyUint64, m_func), r1, instr); |
| |
| // =========================================================== |
| // dst = bit_cast<float64>(((s0 + s1) & mMant) | mExp); |
| // =========================================================== |
| this->InsertAdd(false, r1, r1, r0, instr); |
| this->InsertMove(r3, IR::IntConstOpnd::New(mMant, TyInt64, m_func, true), instr); |
| this->InsertAnd(r1, r1, r3, instr); |
| this->InsertMove(r3, IR::IntConstOpnd::New(mExp, TyInt64, m_func, true), instr); |
| this->InsertOr(r1, r1, r3, instr); |
| this->InsertMoveBitCast(dst, r1, instr); |
| |
| // =================================================================== |
| // dst -= 1.0; |
| // =================================================================== |
| this->InsertMove(r4, IR::MemRefOpnd::New(m_func->GetThreadContextInfo()->GetDoubleOnePointZeroAddr(), TyFloat64, m_func, IR::AddrOpndKindDynamicDoubleRef), instr); |
| this->InsertSub(false, dst, dst, r4, instr); |
| } |
| else |
| #endif |
| { |
| IR::Opnd* tmpdst = dst; |
| if (!dst->IsRegOpnd()) |
| { |
| tmpdst = IR::RegOpnd::New(dst->GetType(), instr->m_func); |
| } |
| |
| LoadScriptContext(instr); |
| IR::Instr * helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, tmpdst, instr->m_func); |
| instr->InsertBefore(helperCallInstr); |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, IR::JnHelperMethod::HelperDirectMath_Random); |
| |
| if (tmpdst != dst) |
| { |
| InsertMove(dst, tmpdst, instr); |
| } |
| } |
| |
| instr->Remove(); |
| return retInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCallDirect(IR::Instr * instr) |
| { |
| IR::Opnd* linkOpnd = instr->UnlinkSrc2(); |
| StackSym *linkSym = linkOpnd->AsSymOpnd()->m_sym->AsStackSym(); |
| IR::Instr* argInstr = linkSym->m_instrDef; |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A_InlineSpecialized); |
| IR::Opnd* funcObj = argInstr->UnlinkSrc1(); |
| instr->SetSrc2(argInstr->UnlinkSrc2()); |
| argInstr->Remove(); |
| |
| if (instr->HasBailOutInfo() && !instr->HasLazyBailOut()) |
| { |
| IR::Instr * bailOutInstr = this->SplitBailOnImplicitCall(instr, instr->m_next, instr->m_next); |
| this->LowerBailOnEqualOrNotEqual(bailOutInstr); |
| } |
| Js::CallFlags flags = instr->GetDst() ? Js::CallFlags_Value : Js::CallFlags_NotUsed; |
| return this->GenerateDirectCall(instr, funcObj, (ushort)flags); |
| } |
| |
| IR::Instr * |
| Lowerer::GenerateDirectCall(IR::Instr* inlineInstr, IR::Opnd* funcObj, ushort callflags) |
| { |
| int32 argCount = m_lowererMD.LowerCallArgs(inlineInstr, callflags); |
| m_lowererMD.LoadHelperArgument(inlineInstr, funcObj); |
| m_lowererMD.LowerCall(inlineInstr, (Js::ArgSlot)argCount); //to account for function object and callinfo |
| |
| return inlineInstr->m_prev; |
| } |
| |
| /* |
| * GenerateHelperToArrayPushFastPath |
| * Generates Helper Call and pushes arguments to the Push HelperCall |
| */ |
| IR::Instr * |
| Lowerer::GenerateHelperToArrayPushFastPath(IR::Instr * instr, IR::LabelInstr * bailOutLabelHelper) |
| { |
| IR::Opnd * arrayHelperOpnd = instr->UnlinkSrc1(); |
| IR::Opnd * elementHelperOpnd = instr->UnlinkSrc2(); |
| IR::JnHelperMethod helperMethod; |
| |
| if(elementHelperOpnd->IsInt32()) |
| { |
| Assert(arrayHelperOpnd->GetValueType().IsLikelyNativeIntArray()); |
| helperMethod = IR::HelperArray_NativeIntPush; |
| |
| m_lowererMD.LoadHelperArgument(instr, elementHelperOpnd); |
| } |
| else if(elementHelperOpnd->IsFloat()) |
| { |
| Assert(arrayHelperOpnd->GetValueType().IsLikelyNativeFloatArray()); |
| helperMethod = IR::HelperArray_NativeFloatPush; |
| |
| m_lowererMD.LoadDoubleHelperArgument(instr, elementHelperOpnd); |
| } |
| else |
| { |
| helperMethod = IR::HelperArray_VarPush; |
| m_lowererMD.LoadHelperArgument(instr, elementHelperOpnd); |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, arrayHelperOpnd); |
| LoadScriptContext(instr); |
| return m_lowererMD.ChangeToHelperCall(instr, helperMethod); |
| } |
| |
| /* |
| * GenerateHelperToArrayPopFastPath |
| * Generates Helper Call and pushes arguments to the Pop HelperCall |
| */ |
| IR::Instr * |
| Lowerer::GenerateHelperToArrayPopFastPath(IR::Instr * instr, IR::LabelInstr * doneLabel, IR::LabelInstr * bailOutLabelHelper) |
| { |
| IR::Opnd * arrayHelperOpnd = instr->UnlinkSrc1(); |
| ValueType arrayValueType = arrayHelperOpnd->GetValueType(); |
| |
| IR::JnHelperMethod helperMethod; |
| |
| //Decide the helperMethod based on dst availability and nativity of the array. |
| if(arrayValueType.IsLikelyNativeArray() && !instr->GetDst()) |
| { |
| helperMethod = IR::HelperArray_NativePopWithNoDst; |
| } |
| else if(arrayValueType.IsLikelyNativeIntArray()) |
| { |
| helperMethod = IR::HelperArray_NativeIntPop; |
| } |
| else if(arrayValueType.IsLikelyNativeFloatArray()) |
| { |
| helperMethod = IR::HelperArray_NativeFloatPop; |
| } |
| else |
| { |
| helperMethod = IR::HelperArray_VarPop; |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, arrayHelperOpnd); |
| |
| //We do not need scriptContext for HelperArray_NativePopWithNoDst call. |
| if(helperMethod != IR::HelperArray_NativePopWithNoDst) |
| { |
| LoadScriptContext(instr); |
| } |
| |
| IR::Instr * retInstr = m_lowererMD.ChangeToHelperCall(instr, helperMethod, bailOutLabelHelper); |
| |
| //We don't need missing item check for var arrays, as there it is taken care by the helper. |
| if(arrayValueType.IsLikelyNativeArray()) |
| { |
| if(retInstr->GetDst()) |
| { |
| //Do this check only for native arrays with Dst. For Var arrays, this is taken care in the Runtime helper itself. |
| InsertMissingItemCompareBranch(retInstr->GetDst(), Js::OpCode::BrNeq_A, doneLabel, bailOutLabelHelper); |
| } |
| else |
| { |
| //We need unconditional jump to doneLabel, if there is no dst in Pop instr. |
| InsertBranch(Js::OpCode::Br, true, doneLabel, bailOutLabelHelper); |
| } |
| } |
| |
| return retInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCondBranchCheckBailOut(IR::BranchInstr * branchInstr, IR::Instr * helperCall, bool isHelper) |
| { |
| Assert(branchInstr->m_opcode == Js::OpCode::BrTrue_A || branchInstr->m_opcode == Js::OpCode::BrFalse_A); |
| if (branchInstr->HasBailOutInfo()) |
| { |
| #ifdef ENABLE_SCRIPT_DEBUGGING |
| IR::BailOutKind debuggerBailOutKind = IR::BailOutInvalid; |
| if (branchInstr->HasAuxBailOut()) |
| { |
| // We have shared debugger bailout. For branches we lower it here, not in SplitBailForDebugger. |
| // See SplitBailForDebugger for details. |
| AssertMsg(!(branchInstr->GetBailOutKind() & IR::BailOutForDebuggerBits), "There should be no debugger bits in main bailout kind."); |
| |
| debuggerBailOutKind = branchInstr->GetAuxBailOutKind() & IR::BailOutForDebuggerBits; |
| AssertMsg((debuggerBailOutKind & ~(IR::BailOutIgnoreException | IR::BailOutForceByFlag)) == 0, "Only IR::BailOutIgnoreException|ForceByFlag supported here."); |
| } |
| #endif |
| |
| IR::Instr * bailOutInstr = this->SplitBailOnImplicitCall(branchInstr, helperCall, branchInstr); |
| IR::Instr* prevInstr = this->LowerBailOnEqualOrNotEqual(bailOutInstr, branchInstr, nullptr, nullptr, isHelper); |
| |
| #ifdef ENABLE_SCRIPT_DEBUGGING |
| if (debuggerBailOutKind != IR::BailOutInvalid) |
| { |
| // Note that by this time implicit calls bailout is already lowered. |
| // What we do here is use same bailout info and lower debugger bailout which would be shared bailout. |
| BailOutInfo* bailOutInfo = bailOutInstr->GetBailOutInfo(); |
| IR::BailOutInstr* debuggerBailoutInstr = IR::BailOutInstr::New( |
| Js::OpCode::BailForDebugger, debuggerBailOutKind, bailOutInfo, bailOutInfo->bailOutFunc); |
| prevInstr->InsertAfter(debuggerBailoutInstr); |
| // The result of that is: |
| // original helper op_* instr, then debugger bailout, then implicit calls bailout/etc with the branch instr. |
| // Example: |
| // s35(eax).i32 = CALL Op_GreaterEqual.u32 # -- original op_* helper |
| // s34.i32 = MOV s35(eax).i32 # |
| // BailForDebugger # Bailout: #0042 (BailOutIgnoreException) -- the debugger bailout |
| // CMP [0x0003BDE0].i8, 1 (0x1).i8 # -- implicit calls check |
| // JEQ $L10 # |
| //$L11: [helper] # |
| // CALL SaveAllRegistersAndBranchBailOut.u32 # Bailout: #0042 (BailOutOnImplicitCalls) |
| // JMP $L5 # |
| //$L10: [helper] # |
| // BrFalse_A $L3, s34.i32 #0034 -- The BrTrue/BrFalse branch (branch instr) |
| //$L6: [helper] #0042 |
| |
| this->LowerBailForDebugger(debuggerBailoutInstr, isHelper); |
| // After lowering this we will have a check which on bailout condition will JMP to $L11. |
| } |
| #else |
| (prevInstr); |
| #endif |
| } |
| |
| return m_lowererMD.LowerCondBranch(branchInstr); |
| } |
| |
| IR::SymOpnd * |
| Lowerer::LoadCallInfo(IR::Instr * instrInsert) |
| { |
| IR::SymOpnd * srcOpnd; |
| Func * func = instrInsert->m_func; |
| |
| if (func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| // Generator function arguments and ArgumentsInfo are not on the stack. Instead they |
| // are accessed off the generator object (which is prm1). |
| IR::Instr *genLoadInstr = LoadGeneratorObject(instrInsert); |
| IR::RegOpnd * generatorRegOpnd = genLoadInstr->GetDst()->AsRegOpnd(); |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(generatorRegOpnd, Js::JavascriptGenerator::GetCallInfoOffset(), TyMachPtr, func); |
| IR::Instr * instr = Lowerer::InsertMove(IR::RegOpnd::New(TyMachPtr, func), indirOpnd, instrInsert); |
| |
| StackSym * callInfoSym = StackSym::New(TyMachReg, func); |
| IR::SymOpnd * callInfoSymOpnd = IR::SymOpnd::New(callInfoSym, TyMachReg, func); |
| Lowerer::InsertMove(callInfoSymOpnd, instr->GetDst(), instrInsert); |
| |
| srcOpnd = IR::SymOpnd::New(callInfoSym, TyMachReg, func); |
| } |
| else |
| { |
| // Otherwise callInfo is always the "second" argument. |
| // The stack looks like this: |
| // |
| // script param N |
| // ... |
| // script param 1 |
| // callinfo |
| // function object |
| // return addr |
| // FP -> FP chain |
| |
| StackSym * srcSym = LowererMD::GetImplicitParamSlotSym(1, func); |
| srcOpnd = IR::SymOpnd::New(srcSym, TyMachReg, func); |
| } |
| |
| return srcOpnd; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnNotStackArgs(IR::Instr * instr) |
| { |
| if (!this->m_func->GetHasStackArgs()) |
| { |
| throw Js::RejitException(RejitReason::InlineApplyDisabled); |
| } |
| |
| IR::Instr * prevInstr = instr->m_prev; |
| |
| // Bail out test |
| // Label to skip Bailout and continue |
| IR::LabelInstr * continueLabelInstr; |
| IR::Instr *instrNext = instr->m_next; |
| if (instrNext->IsLabelInstr()) |
| { |
| continueLabelInstr = instrNext->AsLabelInstr(); |
| } |
| else |
| { |
| continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func, false); |
| instr->InsertAfter(continueLabelInstr); |
| } |
| |
| if (!instr->m_func->IsInlinee()) |
| { |
| //BailOut if the number of actuals (except "this" argument) is greater than or equal to 15. |
| IR::RegOpnd* ldLenDstOpnd = IR::RegOpnd::New(TyUint32, instr->m_func); |
| const IR::AutoReuseOpnd autoReuseldLenDstOpnd(ldLenDstOpnd, instr->m_func); |
| IR::Instr* ldLen = IR::Instr::New(Js::OpCode::LdLen_A, ldLenDstOpnd, instr->m_func); |
| ldLenDstOpnd->SetValueType(ValueType::GetTaggedInt()); //LdLen_A works only on stack arguments |
| instr->InsertBefore(ldLen); |
| this->GenerateFastRealStackArgumentsLdLen(ldLen); |
| this->InsertCompareBranch(ldLenDstOpnd, IR::IntConstOpnd::New(Js::InlineeCallInfo::MaxInlineeArgoutCount, TyUint32, m_func, true), Js::OpCode::BrLt_A, true, continueLabelInstr, instr); |
| this->GenerateBailOut(instr, nullptr, nullptr); |
| } |
| else |
| { |
| //For Inlined functions, we are sure actuals can't exceed Js::InlineeCallInfo::MaxInlineeArgoutCount (15). |
| //No need to bail out. |
| instr->Remove(); |
| } |
| |
| return prevInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnNotSpreadable(IR::Instr *instr) |
| { |
| // We only avoid bailing out / throwing a rejit exception when the array operand is a simple, non-optimized, non-object array. |
| IR::Instr * prevInstr = instr->m_prev; |
| Func *func = instr->m_func; |
| |
| IR::Opnd *arraySrcOpnd = instr->UnlinkSrc1(); |
| IR::RegOpnd *arrayOpnd = GetRegOpnd(arraySrcOpnd, instr, func, TyMachPtr); |
| |
| const ValueType baseValueType(arrayOpnd->GetValueType()); |
| |
| // Check if we can just throw a rejit exception based on valuetype alone instead of bailing out. |
| if (!baseValueType.IsLikelyArray() |
| || baseValueType.IsLikelyAnyOptimizedArray() |
| || (baseValueType.IsLikelyObject() && (baseValueType.GetObjectType() == ObjectType::ObjectWithArray)) |
| |
| // Validate that GenerateArrayTest will not fail. |
| || !(baseValueType.IsUninitialized() || baseValueType.HasBeenObject()) |
| |
| || m_func->IsInlinee()) |
| { |
| throw Js::RejitException(RejitReason::InlineSpreadDisabled); |
| } |
| |
| // Past this point, we will need to use a bailout. |
| IR::LabelInstr *bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true /* isOpHelper */); |
| |
| // See if we can skip various array checks on value type alone |
| if (!baseValueType.IsArray()) |
| { |
| GenerateArrayTest(arrayOpnd, bailOutLabel, bailOutLabel, instr, false); |
| } |
| if (!(baseValueType.IsArray() && baseValueType.HasNoMissingValues())) |
| { |
| InsertTestBranch( |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, func), |
| IR::IntConstOpnd::New(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues), TyUint8, func, true), |
| Js::OpCode::BrEq_A, |
| bailOutLabel, |
| instr); |
| } |
| |
| IR::IndirOpnd *arrayLenPtrOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, func); |
| InsertCompareBranch(arrayLenPtrOpnd, IR::IntConstOpnd::New(Js::InlineeCallInfo::MaxInlineeArgoutCount - 1, TyUint8, func), Js::OpCode::BrGt_A, true, bailOutLabel, instr); |
| |
| IR::LabelInstr *skipBailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertBranch(Js::OpCode::Br, skipBailOutLabel, instr); |
| |
| instr->InsertBefore(bailOutLabel); |
| instr->InsertAfter(skipBailOutLabel); |
| |
| GenerateBailOut(instr); |
| |
| return prevInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnNotPolymorphicInlinee(IR::Instr * instr) |
| { |
| Assert(instr->HasBailOutInfo() && (instr->GetBailOutKind() == IR::BailOutOnFailedPolymorphicInlineTypeCheck || instr->GetBailOutKind() == IR::BailOutOnPolymorphicInlineFunction)); |
| IR::Instr* instrPrev = instr->m_prev; |
| |
| this->GenerateBailOut(instr, nullptr, nullptr); |
| |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::LowerBailoutCheckAndLabel(IR::Instr *instr, bool onEqual, bool isHelper) |
| { |
| // Label to skip Bailout and continue |
| IR::LabelInstr * continueLabelInstr; |
| IR::Instr *instrNext = instr->m_next; |
| if (instrNext->IsLabelInstr()) |
| { |
| continueLabelInstr = instrNext->AsLabelInstr(); |
| } |
| else |
| { |
| continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func, isHelper); |
| instr->InsertAfter(continueLabelInstr); |
| } |
| |
| if(instr->GetBailOutKind() == IR::BailOutInjected) |
| { |
| // BailOnEqual 0, 0 |
| Assert(onEqual); |
| Assert(instr->GetSrc1()->IsEqual(instr->GetSrc2())); |
| Assert(instr->GetSrc1()->AsIntConstOpnd()->GetValue() == 0); |
| |
| // The operands cannot be equal when generating a compare (assert) but since this is for testing purposes, hoist a src. |
| // Ideally, we would just create a BailOut instruction that generates a guaranteed bailout, but there seem to be issues |
| // with doing this in a non-helper path. So finally, it would generate: |
| // xor s0, s0 |
| // test s0, s0 |
| // jnz $continue |
| // $bailout: |
| // // bailout |
| // $continue: |
| instr->HoistSrc1(LowererMD::GetLoadOp(instr->GetSrc1()->GetType())); |
| } |
| |
| InsertCompareBranch(instr->UnlinkSrc1(), instr->UnlinkSrc2(), |
| onEqual ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, continueLabelInstr, instr); |
| |
| if (!isHelper) |
| { |
| IR::LabelInstr * helperLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| instr->InsertBefore(helperLabelInstr); |
| #if DBG |
| helperLabelInstr->m_noLazyHelperAssert = true; |
| #endif |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnEqualOrNotEqual(IR::Instr * instr, |
| IR::BranchInstr *branchInstr, // = nullptr |
| IR::LabelInstr *labelBailOut, // = nullptr |
| IR::PropertySymOpnd * propSymOpnd, // = nullptr |
| bool isHelper) // = false |
| { |
| IR::Instr * prevInstr = instr->m_prev; |
| |
| // Bail out test |
| bool onEqual = instr->m_opcode == Js::OpCode::BailOnEqual; |
| |
| LowerBailoutCheckAndLabel(instr, onEqual, isHelper); |
| |
| // BailOutOnImplicitCalls is a post-op bailout. Since we look at the profile info for LdFld/StFld to decide whether the instruction may or may not call an accessor, |
| // we need to update this profile information on the bailout path for BailOutOnImplicitCalls if the implicit call was an accessor call. |
| if(propSymOpnd && ((instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCalls) && (propSymOpnd->m_inlineCacheIndex != -1) && |
| instr->m_func->HasProfileInfo()) |
| { |
| // result = AND implCallFlags, ~ImplicitCall_None |
| // TST result, ImplicitCall_Accessor |
| // JEQ $bail |
| // OR profiledFlags, ( FldInfo_FromAccessor | FldInfo_Polymorphic ) |
| // $bail |
| |
| IR::Opnd * implicitCallFlags = GetImplicitCallFlagsOpnd(); |
| IR::Opnd * accessorImplicitCall = IR::IntConstOpnd::New(Js::ImplicitCall_Accessor & ~Js::ImplicitCall_None, GetImplicitCallFlagsType(), instr->m_func, true); |
| IR::Opnd * maskNoImplicitCall = IR::IntConstOpnd::New((Js::ImplicitCallFlags)~Js::ImplicitCall_None, GetImplicitCallFlagsType(), instr->m_func, true); |
| IR::Opnd * fldInfoAccessor = IR::IntConstOpnd::New(Js::FldInfo_FromAccessor | Js::FldInfo_Polymorphic, GetFldInfoFlagsType(), instr->m_func, true); |
| IR::LabelInstr * label = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, true); |
| |
| IR::Instr * andInstr = InsertAnd(IR::RegOpnd::New(GetImplicitCallFlagsType(), instr->m_func), implicitCallFlags, maskNoImplicitCall, instr); |
| InsertTestBranch(andInstr->GetDst(), accessorImplicitCall, Js::OpCode::BrEq_A, label, instr); |
| |
| intptr_t infoAddr = instr->m_func->GetReadOnlyProfileInfo()->GetFldInfoAddr(propSymOpnd->m_inlineCacheIndex); |
| |
| IR::Opnd * profiledFlags = IR::MemRefOpnd::New(infoAddr + Js::FldInfo::GetOffsetOfFlags(), TyInt8, instr->m_func); |
| |
| InsertOr(profiledFlags, profiledFlags, fldInfoAccessor, instr); |
| instr->InsertBefore(label); |
| } |
| |
| this->GenerateBailOut(instr, branchInstr, labelBailOut); |
| return prevInstr; |
| } |
| |
| void Lowerer::LowerBailOnNegative(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::BailOnNegative); |
| Assert(instr->HasBailOutInfo()); |
| Assert(!instr->GetDst()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc1()->GetType() == TyInt32 || instr->GetSrc1()->GetType() == TyUint32); |
| Assert(!instr->GetSrc2()); |
| |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(false); |
| LowerOneBailOutKind(instr, instr->GetBailOutKind(), false); |
| Assert(!instr->HasBailOutInfo()); |
| IR::Instr *insertBeforeInstr = instr->m_next; |
| |
| Func *const func = instr->m_func; |
| |
| // test src, src |
| // jns $skipBailOut |
| InsertCompareBranch( |
| instr->UnlinkSrc1(), |
| IR::IntConstOpnd::New(0, TyInt32, func, true), |
| Js::OpCode::BrGe_A, |
| skipBailOutLabel, |
| insertBeforeInstr); |
| |
| instr->Remove(); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnNotObject(IR::Instr *instr, |
| IR::BranchInstr *branchInstr /* = nullptr */, |
| IR::LabelInstr *labelBailOut /* = nullptr */) |
| { |
| IR::Instr *prevInstr = instr->m_prev; |
| IR::LabelInstr *continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, |
| m_func); |
| instr->InsertAfter(continueLabelInstr); |
| this->m_lowererMD.GenerateObjectTest(instr->UnlinkSrc1(), |
| instr, |
| continueLabelInstr, |
| /* fContinueLabel = */ true); |
| this->GenerateBailOut(instr, branchInstr, labelBailOut); |
| |
| return prevInstr; |
| } |
| |
| IR::Instr* |
| Lowerer::LowerCheckIsFuncObj(IR::Instr *instr, bool checkFuncInfo) |
| { |
| // The CheckIsFuncObj instr and CheckFuncInfo instr (checkFuncInfo = true) are used to |
| // generate bailout instrs that type check a function (and can also check the func info). |
| // Rather than creating these bailout instrs in Inline, they are created in Lower because |
| // CheckIsFuncObj and CheckFuncInfo instrs can be hoisted outside of loops and thus the |
| // bailout instrs created can exist outside of loops. |
| |
| IR::RegOpnd *funcOpnd = instr->GetSrc1()->AsRegOpnd(); |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| BailOutInfo *bailOutInfo = instr->GetBailOutInfo(); |
| |
| // Check that the property is an object. |
| InsertObjectCheck(funcOpnd, instr, bailOutKind, bailOutInfo); |
| |
| // Check that the object is a function with the correct type ID. |
| IR::Instr *lastInstr = InsertFunctionTypeIdCheck(funcOpnd, instr, bailOutKind, bailOutInfo); |
| |
| if (checkFuncInfo) |
| { |
| // Check that the function body matches the func info. |
| lastInstr = InsertFunctionInfoCheck( |
| funcOpnd, instr, instr->GetSrc2()->AsAddrOpnd(), bailOutKind, bailOutInfo); |
| lastInstr->SetByteCodeOffset(instr); |
| } |
| |
| if (bailOutInfo->bailOutInstr == instr) |
| { |
| // bailOutInstr is currently instr. By changing bailOutInstr to point to lastInstr, the next |
| // instruction to be lowered (lastInstr) will create the bailout target. This is necessary in |
| // cases where instr does not have a shared bailout (ex: instr was not hoisted outside of a loop). |
| bailOutInfo->bailOutInstr = lastInstr; |
| } |
| |
| // the CheckFunctionEntryPoint instr exists in order to create the instrs above. It does not have |
| // any other purpose and thus it is removed. The instr's BailOutInfo continues to be used and thus |
| // must not be deleted. Flags are turned off to stop Remove() from deleting instr's BailOutInfo. |
| instr->hasBailOutInfo = false; |
| instr->hasAuxBailOut = false; |
| instr->Remove(); |
| |
| return lastInstr; |
| } |
| |
| IR::Instr* |
| Lowerer::LowerBailOnTrue(IR::Instr* instr, IR::LabelInstr* labelBailOut /*nullptr*/) |
| { |
| IR::Instr* instrPrev = instr->m_prev; |
| |
| IR::LabelInstr* continueLabel = instr->GetOrCreateContinueLabel(); |
| IR::RegOpnd * regSrc1 = IR::RegOpnd::New(instr->GetSrc1()->GetType(), this->m_func); |
| InsertMove(regSrc1, instr->UnlinkSrc1(), instr); |
| InsertTestBranch(regSrc1, regSrc1, Js::OpCode::BrEq_A, continueLabel, instr); |
| |
| GenerateBailOut(instr, nullptr, labelBailOut); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailOnNotBuiltIn(IR::Instr *instr, |
| IR::BranchInstr *branchInstr /* = nullptr */, |
| IR::LabelInstr *labelBailOut /* = nullptr */) |
| { |
| Assert(instr->GetSrc2()->IsIntConstOpnd()); |
| IR::Instr *prevInstr = instr->m_prev; |
| |
| intptr_t builtInFuncs = m_func->GetScriptContextInfo()->GetBuiltinFunctionsBaseAddr(); |
| Js::BuiltinFunction builtInIndex = instr->UnlinkSrc2()->AsIntConstOpnd()->AsInt32(); |
| |
| IR::Opnd *builtIn = IR::MemRefOpnd::New((void*)(builtInFuncs + builtInIndex * MachPtr), TyMachReg, instr->m_func); |
| |
| #if TESTBUILTINFORNULL |
| IR::LabelInstr * continueAfterTestLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| InsertTestBranch(builtIn, builtIn, Js::OpCode::BrNeq_A, continueAfterTestLabel, instr); |
| this->m_lowererMD.GenerateDebugBreak(instr); |
| instr->InsertBefore(continueAfterTestLabel); |
| #endif |
| |
| IR::LabelInstr * continueLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| instr->InsertAfter(continueLabel); |
| InsertCompareBranch(instr->UnlinkSrc1(), builtIn, Js::OpCode::BrEq_A, continueLabel, instr); |
| |
| GenerateBailOut(instr, branchInstr, labelBailOut); |
| |
| return prevInstr; |
| } |
| |
| #ifdef ENABLE_SCRIPT_DEBUGGING |
| IR::Instr * |
| Lowerer::LowerBailForDebugger(IR::Instr* instr, bool isInsideHelper /* = false */) |
| { |
| IR::Instr * prevInstr = instr->m_prev; |
| |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| AssertMsg(bailOutKind, "bailOutKind should not be zero at this time."); |
| AssertMsg(!(bailOutKind & IR::BailOutExplicit) || bailOutKind == IR::BailOutExplicit, |
| "BailOutExplicit cannot be combined with any other bailout flags."); |
| |
| IR::LabelInstr* explicitBailOutLabel = nullptr; |
| |
| if (!(bailOutKind & IR::BailOutExplicit)) |
| { |
| intptr_t flags = m_func->GetScriptContextInfo()->GetDebuggingFlagsAddr(); |
| |
| // Check 1 (do we need to bail out?) |
| // JXX bailoutLabel |
| // Check 2 (do we need to bail out?) |
| // JXX bailoutLabel |
| // ... |
| // JMP continueLabel |
| // bailoutDocumentLabel: |
| // (determine if document boundary reached - if not, JMP to continueLabel) |
| // NOTE: THIS BLOCK IS CONDITIONALLY GENERATED BASED ON doGenerateBailOutDocumentBlock |
| // bailoutLabel: |
| // bail out |
| // continueLabel: |
| // ... |
| |
| IR::LabelInstr* bailOutDocumentLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, /*isOpHelper*/ true); |
| instr->InsertBefore(bailOutDocumentLabel); |
| IR::LabelInstr* bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, /*isOpHelper*/ true); |
| instr->InsertBefore(bailOutLabel); |
| IR::LabelInstr* continueLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, /*isOpHelper*/ isInsideHelper); |
| instr->InsertAfter(continueLabel); |
| IR::BranchInstr* continueBranchInstr = this->InsertBranch(Js::OpCode::Br, continueLabel, bailOutDocumentLabel); // JMP continueLabel. |
| |
| bool doGenerateBailOutDocumentBlock = false; |
| |
| const IR::BailOutKind c_forceAndIgnoreEx = IR::BailOutForceByFlag | IR::BailOutIgnoreException; |
| if ((bailOutKind & c_forceAndIgnoreEx) == c_forceAndIgnoreEx) |
| { |
| // It's faster to check these together in 1 check rather than 2 separate checks at run time. |
| // CMP [&(flags->m_forceInterpreter, flags->m_isIgnoreException)], 0 |
| // BNE bailout |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New((BYTE*)flags + DebuggingFlags::GetForceInterpreterOffset(), TyInt16, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(0, TyInt16, m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| bailOutKind ^= c_forceAndIgnoreEx; |
| } |
| else |
| { |
| if (bailOutKind & IR::BailOutForceByFlag) |
| { |
| // CMP [&flags->m_forceInterpreter], 0 |
| // BNE bailout |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New((BYTE*)flags + DebuggingFlags::GetForceInterpreterOffset(), TyInt8, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(0, TyInt8, m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| bailOutKind ^= IR::BailOutForceByFlag; |
| } |
| if (bailOutKind & IR::BailOutIgnoreException) |
| { |
| // CMP [&flags->m_byteCodeOffsetAfterIgnoreException], DebuggingFlags::InvalidByteCodeOffset |
| // BNE bailout |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New((BYTE*)flags + DebuggingFlags::GetByteCodeOffsetAfterIgnoreExceptionOffset(), TyInt32, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(DebuggingFlags::InvalidByteCodeOffset, TyInt32, m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| bailOutKind ^= IR::BailOutIgnoreException; |
| } |
| } |
| |
| if (bailOutKind & IR::BailOutBreakPointInFunction) |
| { |
| // CMP [&functionBody->m_sourceInfo.m_probeCount], 0 |
| // BNE bailout |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New(m_func->GetJITFunctionBody()->GetProbeCountAddr(), TyInt32, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(0, TyInt32, m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| bailOutKind ^= IR::BailOutBreakPointInFunction; |
| } |
| |
| // on method entry |
| if(bailOutKind & IR::BailOutStep) |
| { |
| // TEST STEP_BAILOUT, [&stepController->StepType] |
| // BNE BailoutLabel |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetDebugStepTypeAddr(), TyInt8, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(Js::STEP_BAILOUT, TyInt8, this->m_func, /*dontEncode*/ true); |
| InsertTestBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| |
| // CMP STEP_DOCUMENT, [&stepController->StepType] |
| // BEQ BailoutDocumentLabel |
| opnd1 = IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetDebugStepTypeAddr(), TyInt8, m_func); |
| opnd2 = IR::IntConstOpnd::New(Js::STEP_DOCUMENT, TyInt8, this->m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrEq_A, /*isUnsigned*/ true, bailOutDocumentLabel, continueBranchInstr); |
| |
| doGenerateBailOutDocumentBlock = true; |
| |
| bailOutKind ^= IR::BailOutStep; |
| } |
| |
| // on method exit |
| if (bailOutKind & IR::BailOutStackFrameBase) |
| { |
| // CMP EffectiveFrameBase, [&stepController->frameAddrWhenSet] |
| // BA bailoutLabel |
| RegNum effectiveFrameBaseReg; |
| #ifdef _M_X64 |
| effectiveFrameBaseReg = m_lowererMD.GetRegStackPointer(); |
| #else |
| effectiveFrameBaseReg = m_lowererMD.GetRegFramePointer(); |
| #endif |
| IR::Opnd* opnd1 = IR::RegOpnd::New(nullptr, effectiveFrameBaseReg, TyMachReg, m_func); |
| IR::Opnd* opnd2 = IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetDebugFrameAddressAddr(), TyMachReg, m_func); |
| this->InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrGt_A, /*isUnsigned*/ true, bailOutLabel, continueBranchInstr); |
| |
| // CMP STEP_DOCUMENT, [&stepController->StepType] |
| // BEQ BailoutDocumentLabel |
| opnd1 = IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetDebugStepTypeAddr(), TyInt8, m_func); |
| opnd2 = IR::IntConstOpnd::New(Js::STEP_DOCUMENT, TyInt8, this->m_func, /*dontEncode*/ true); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrEq_A, /*isUnsigned*/ true, bailOutDocumentLabel, continueBranchInstr); |
| |
| doGenerateBailOutDocumentBlock = true; |
| |
| bailOutKind ^= IR::BailOutStackFrameBase; |
| } |
| |
| if (bailOutKind & IR::BailOutLocalValueChanged) |
| { |
| int32 hasLocalVarChangedOffset = m_func->GetHasLocalVarChangedOffset(); |
| if (hasLocalVarChangedOffset != Js::Constants::InvalidOffset) |
| { |
| // CMP [EBP + hasLocalVarChangedStackOffset], 0 |
| // BNE bailout |
| StackSym* sym = StackSym::New(TyInt8, m_func); |
| sym->m_offset = hasLocalVarChangedOffset; |
| sym->m_allocated = true; |
| IR::Opnd* opnd1 = IR::SymOpnd::New(sym, TyInt8, m_func); |
| IR::Opnd* opnd2 = IR::IntConstOpnd::New(0, TyInt8, m_func); |
| InsertCompareBranch(opnd1, opnd2, Js::OpCode::BrNeq_A, bailOutLabel, continueBranchInstr); |
| } |
| bailOutKind ^= IR::BailOutLocalValueChanged; |
| } |
| |
| if (doGenerateBailOutDocumentBlock) |
| { |
| // GENERATE the BailoutDocumentLabel |
| // bailOutDocumentLabel: |
| // CMP CurrentScriptId, [&stepController->ScriptIdWhenSet] |
| // BEQ ContinueLabel |
| // bailOutLabel: // (fallthrough bailOutLabel) |
| IR::Opnd* opnd1 = IR::MemRefOpnd::New(m_func->GetJITFunctionBody()->GetScriptIdAddr(), TyInt32, m_func); |
| |
| IR::Opnd* opnd2 = IR::MemRefOpnd::New(m_func->GetScriptContextInfo()->GetDebugScriptIdWhenSetAddr(), TyInt32, m_func); |
| IR::RegOpnd* reg1 = IR::RegOpnd::New(TyInt32, m_func); |
| InsertMove(reg1, opnd2, bailOutLabel); |
| |
| InsertCompareBranch(opnd1, reg1, Js::OpCode::BrEq_A, /*isUnsigned*/ true, continueLabel, bailOutLabel); |
| } |
| |
| AssertMsg(bailOutKind == (IR::BailOutKind)0, "Some of the bits in BailOutKind were not processed!"); |
| |
| // Note: at this time the 'instr' is in between bailoutLabel and continueLabel. |
| } |
| else |
| { |
| // For explicit/unconditional bailout use label which is not a helper, otherwise we would get a helper in main code path |
| // which breaks helper label consistency (you can only get to helper from a conditional branch in main code), see DbCheckPostLower. |
| explicitBailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| } |
| |
| this->GenerateBailOut(instr, nullptr, explicitBailOutLabel); |
| |
| return prevInstr; |
| } |
| #endif |
| |
| IR::Instr* |
| Lowerer::LowerBailOnException(IR::Instr * instr) |
| { |
| Assert(instr->HasBailOutInfo()); |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| this->GenerateBailOut(instr, nullptr, nullptr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* |
| Lowerer::LowerBailOnEarlyExit(IR::Instr * instr) |
| { |
| Assert(instr->HasBailOutInfo()); |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| this->GenerateBailOut(instr, nullptr, nullptr); |
| |
| return instrPrev; |
| } |
| |
| // Generate BailOut Lowerer Instruction if the value is INT_MIN. |
| // It it's not INT_MIN, we continue without bailout. |
| IR::Instr * |
| Lowerer::LowerBailOnIntMin(IR::Instr *instr, IR::BranchInstr *branchInstr /* = nullptr */, IR::LabelInstr *labelBailOut /* = nullptr */) |
| { |
| Assert(instr); |
| Assert(instr->GetSrc1()); |
| IR::LabelInstr *continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| instr->InsertAfter(continueLabelInstr); |
| |
| if(!instr->HasBailOutInfo()) |
| { |
| instr->Remove(); |
| } |
| else |
| { |
| Assert(instr->GetBailOutKind() == IR::BailOnIntMin); |
| // Note: src1 must be int32 at this point. |
| if (instr->GetSrc1()->IsIntConstOpnd()) |
| { |
| // For consts we can check the value at JIT time. Note: without this check we'll have to legalize the CMP instr. |
| IR::IntConstOpnd* intConst = instr->UnlinkSrc1()->AsIntConstOpnd(); |
| if (intConst->GetValue() == INT_MIN) |
| { |
| this->GenerateBailOut(instr, branchInstr, labelBailOut); |
| intConst->Free(instr->m_func); |
| } |
| else |
| { |
| instr->Remove(); |
| } |
| } |
| else |
| { |
| InsertCompareBranch(instr->UnlinkSrc1(), IR::IntConstOpnd::New(INT_MIN, TyInt32, this->m_func), Js::OpCode::BrNeq_A, continueLabelInstr, instr); |
| this->GenerateBailOut(instr, branchInstr, labelBailOut); |
| } |
| } |
| |
| return continueLabelInstr; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerBailOnNotString |
| /// Generate BailOut Lowerer Instruction if not a String |
| /// |
| ///---------------------------------------------------------------------------- |
| void Lowerer::LowerBailOnNotString(IR::Instr *instr) |
| { |
| if (!instr->GetSrc1()->GetValueType().IsString()) |
| { |
| /*Creating a MOV instruction*/ |
| IR::Instr * movInstr = IR::Instr::New(instr->m_opcode, instr->UnlinkDst(), instr->UnlinkSrc1(), instr->m_func); |
| instr->InsertBefore(movInstr); |
| |
| IR::LabelInstr *continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::LabelInstr *helperLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| instr->InsertAfter(continueLabelInstr); |
| |
| IR::RegOpnd *srcReg = movInstr->GetSrc1()->IsRegOpnd() ? movInstr->GetSrc1()->AsRegOpnd() : nullptr; |
| |
| this->GenerateStringTest(srcReg, instr, helperLabelInstr, continueLabelInstr); |
| this->GenerateBailOut(instr, nullptr, helperLabelInstr); |
| } |
| else |
| { |
| instr->ClearBailOutInfo(); |
| } |
| } |
| |
| void Lowerer::LowerOneBailOutKind( |
| IR::Instr *const instr, |
| const IR::BailOutKind bailOutKindToLower, |
| const bool isInHelperBlock, |
| const bool preserveBailOutKindInInstr) |
| { |
| Assert(instr); |
| Assert(bailOutKindToLower); |
| Assert(!(bailOutKindToLower & IR::BailOutKindBits) || !(bailOutKindToLower & bailOutKindToLower - 1u)); |
| |
| Func *const func = instr->m_func; |
| |
| // Split bailouts other than the one being lowered here |
| BailOutInfo *const bailOutInfo = instr->GetBailOutInfo(); |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| Assert( |
| bailOutKindToLower & IR::BailOutKindBits |
| ? bailOutKind & bailOutKindToLower |
| : (bailOutKind & ~IR::BailOutKindBits) == bailOutKindToLower); |
| if(!preserveBailOutKindInInstr) |
| { |
| bailOutKind -= bailOutKindToLower; |
| } |
| if(bailOutKind) |
| { |
| if(bailOutInfo->bailOutInstr == instr) |
| { |
| // Create a shared bailout point for the split bailout checks |
| IR::Instr *const sharedBail = instr->ShareBailOut(); |
| Assert(sharedBail->GetBailOutInfo() == bailOutInfo); |
| GenerateBailOut(sharedBail); |
| } |
| instr->SetBailOutKind(bailOutKind); |
| } |
| else |
| { |
| instr->UnlinkBailOutInfo(); |
| if(bailOutInfo->bailOutInstr == instr) |
| { |
| bailOutInfo->bailOutInstr = nullptr; |
| } |
| } |
| |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // (Bail out with the requested bail out kind) |
| IR::BailOutInstr *const bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOut, bailOutKindToLower, bailOutInfo, func); |
| bailOutInstr->SetByteCodeOffset(instr); |
| insertBeforeInstr->InsertBefore(bailOutInstr); |
| GenerateBailOut(bailOutInstr); |
| |
| // The caller is expected to generate code to decide whether to bail out |
| } |
| |
| void Lowerer::SplitBailOnNotArray( |
| IR::Instr *const instr, |
| IR::Instr * *const bailOnNotArrayRef, |
| IR::Instr * *const bailOnMissingValueRef) |
| { |
| Assert(instr); |
| Assert(!instr->GetDst()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc1()->IsRegOpnd()); |
| Assert(!instr->GetSrc2()); |
| Assert(bailOnNotArrayRef); |
| Assert(bailOnMissingValueRef); |
| |
| IR::Instr *&bailOnNotArray = *bailOnNotArrayRef; |
| IR::Instr *&bailOnMissingValue = *bailOnMissingValueRef; |
| |
| bailOnNotArray = instr; |
| bailOnMissingValue = nullptr; |
| |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if(bailOutKind == IR::BailOutOnNotArray || |
| bailOutKind == IR::BailOutOnNotNativeArray) |
| { |
| return; |
| } |
| |
| // Split array checks |
| BailOutInfo *const bailOutInfo = instr->GetBailOutInfo(); |
| if(bailOutInfo->bailOutInstr == instr) |
| { |
| // Create a shared bailout point for the split bailout checks |
| IR::Instr *const sharedBail = instr->ShareBailOut(); |
| Assert(sharedBail->GetBailOutInfo() == bailOutInfo); |
| LowerBailTarget(sharedBail); |
| } |
| bailOutKind -= IR::BailOutOnMissingValue; |
| Assert(bailOutKind == IR::BailOutOnNotArray || |
| bailOutKind == IR::BailOutOnNotNativeArray); |
| instr->SetBailOutKind(bailOutKind); |
| |
| Func *const func = bailOutInfo->bailOutFunc; |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // Split missing value checks |
| bailOnMissingValue = IR::BailOutInstr::New(Js::OpCode::BailOnNotArray, IR::BailOutOnMissingValue, bailOutInfo, func); |
| bailOnMissingValue->SetByteCodeOffset(instr); |
| insertBeforeInstr->InsertBefore(bailOnMissingValue); |
| } |
| |
| IR::RegOpnd *Lowerer::LowerBailOnNotArray(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(!instr->GetDst()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc1()->IsRegOpnd()); |
| Assert(!instr->GetSrc2()); |
| |
| Func *const func = instr->m_func; |
| |
| // Label to jump to (or fall through to) when bailing out |
| const auto bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true /* isOpHelper */); |
| instr->InsertBefore(bailOutLabel); |
| |
| // Label to jump to when not bailing out |
| const auto skipBailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| instr->InsertAfter(skipBailOutLabel); |
| |
| // Do the array tests and jump to bailOutLabel if it's not an array. Fall through if it is an array. |
| IR::RegOpnd *const arrayOpnd = |
| GenerateArrayTest(instr->UnlinkSrc1()->AsRegOpnd(), bailOutLabel, bailOutLabel, bailOutLabel, true); |
| |
| // Skip bail-out when it is an array |
| InsertBranch(Js::OpCode::Br, skipBailOutLabel, bailOutLabel); |
| |
| // Generate the bailout helper call. 'instr' will be changed to the CALL into the bailout function, so it can't be used for |
| // ordering instructions anymore. |
| GenerateBailOut(instr); |
| |
| return arrayOpnd; |
| } |
| |
| void Lowerer::LowerBailOnMissingValue(IR::Instr *const instr, IR::RegOpnd *const arrayOpnd) |
| { |
| Assert(instr); |
| Assert(!instr->GetDst()); |
| Assert(!instr->GetSrc1()); |
| Assert(!instr->GetSrc2()); |
| Assert(arrayOpnd); |
| Assert(arrayOpnd->GetValueType().IsArrayOrObjectWithArray()); |
| |
| Func *const func = instr->m_func; |
| |
| // Label to jump to when not bailing out |
| const auto skipBailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| instr->InsertAfter(skipBailOutLabel); |
| |
| // Skip bail-out when the array has no missing values |
| // |
| // test [array + offsetOf(objectArrayOrFlags)], Js::DynamicObjectFlags::HasNoMissingValues |
| // jnz $skipBailOut |
| const IR::AutoReuseOpnd autoReuseArrayOpnd(arrayOpnd, func); |
| CompileAssert( |
| static_cast<Js::DynamicObjectFlags>(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues)) == |
| Js::DynamicObjectFlags::HasNoMissingValues); |
| InsertTestBranch( |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, func), |
| IR::IntConstOpnd::New(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues), TyUint8, func, true), |
| Js::OpCode::BrNeq_A, |
| skipBailOutLabel, |
| instr); |
| |
| // Generate the bailout helper call. 'instr' will be changed to the CALL into the bailout function, so it can't be used for |
| // ordering instructions anymore. |
| GenerateBailOut(instr); |
| } |
| |
| void Lowerer::LowerBailOnInvalidatedArrayHeadSegment(IR::Instr *const instr, const bool isInHelperBlock) |
| { |
| /* |
| // Generate checks for whether the head segment or the head segment length changed during the helper call |
| |
| if(!(baseValueType.IsArrayOrObjectWithArray() && arrayOpnd && arrayOpnd.HeadSegmentSym())) |
| { |
| // Record the array head segment before the helper call |
| headSegmentBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayHeadSegmentForArrayOrObjectWithArray(base) |
| } |
| if(!(baseValueType.IsArrayOrObjectWithArray() && arrayOpnd && arrayOpnd.HeadSegmentLengthSym())) |
| { |
| // Record the array head segment length before the helper call |
| if(baseValueType.IsArrayOrObjectWithArray() && arrayOpnd && arrayOpnd.HeadSegmentSym()) |
| { |
| mov headSegmentLengthBeforeHelperCall, [headSegmentBeforeHelperCall + offsetOf(length)] |
| } |
| else |
| { |
| headSegmentLengthBeforeHelperCall = |
| Js::JavascriptArray::Jit_GetArrayHeadSegmentLength(headSegmentBeforeHelperCall) |
| } |
| } |
| |
| helperCall: |
| (Helper call and other bailout checks) |
| |
| // If the array has a different head segment or head segment length after the helper call, then this store needs to bail |
| // out |
| invalidatedHeadSegment = |
| JavascriptArray::Jit_OperationInvalidatedArrayHeadSegment( |
| headSegmentBeforeHelperCall, |
| headSegmentLengthBeforeHelperCall, |
| base) |
| test invalidatedHeadSegment, invalidatedHeadSegment |
| jz $skipBailOut |
| |
| (Bail out with IR::BailOutOnInvalidatedArrayHeadSegment) |
| |
| $skipBailOut: |
| */ |
| |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::Memset || instr->m_opcode == Js::OpCode::Memcopy); |
| Assert(instr->GetDst()); |
| Assert(instr->GetDst()->IsIndirOpnd()); |
| |
| Func *const func = instr->m_func; |
| |
| IR::RegOpnd *const baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| Assert(!baseValueType.IsNotArrayOrObjectWithArray()); |
| const bool isArrayOrObjectWithArray = baseValueType.IsArrayOrObjectWithArray(); |
| IR::ArrayRegOpnd *const arrayOpnd = baseOpnd->IsArrayRegOpnd() ? baseOpnd->AsArrayRegOpnd() : nullptr; |
| |
| IR::RegOpnd *headSegmentBeforeHelperCallOpnd; |
| IR::AutoReuseOpnd autoReuseHeadSegmentBeforeHelperCallOpnd; |
| if(isArrayOrObjectWithArray && arrayOpnd && arrayOpnd->HeadSegmentSym()) |
| { |
| headSegmentBeforeHelperCallOpnd = IR::RegOpnd::New(arrayOpnd->HeadSegmentSym(), TyMachPtr, func); |
| autoReuseHeadSegmentBeforeHelperCallOpnd.Initialize(headSegmentBeforeHelperCallOpnd, func); |
| } |
| else |
| { |
| // Record the array head segment before the helper call |
| // headSegmentBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayHeadSegmentForArrayOrObjectWithArray(base) |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| headSegmentBeforeHelperCallOpnd = IR::RegOpnd::New(StackSym::New(TyMachPtr, func), TyMachPtr, func); |
| autoReuseHeadSegmentBeforeHelperCallOpnd.Initialize(headSegmentBeforeHelperCallOpnd, func); |
| callInstr->SetDst(headSegmentBeforeHelperCallOpnd); |
| instr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_GetArrayHeadSegmentForArrayOrObjectWithArray); |
| } |
| |
| IR::RegOpnd *headSegmentLengthBeforeHelperCallOpnd; |
| IR::AutoReuseOpnd autoReuseHeadSegmentLengthBeforeHelperCallOpnd; |
| if(isArrayOrObjectWithArray && arrayOpnd && arrayOpnd->HeadSegmentLengthSym()) |
| { |
| headSegmentLengthBeforeHelperCallOpnd = IR::RegOpnd::New(arrayOpnd->HeadSegmentLengthSym(), TyUint32, func); |
| autoReuseHeadSegmentLengthBeforeHelperCallOpnd.Initialize(headSegmentLengthBeforeHelperCallOpnd, func); |
| } |
| else |
| { |
| headSegmentLengthBeforeHelperCallOpnd = IR::RegOpnd::New(StackSym::New(TyUint32, func), TyUint32, func); |
| autoReuseHeadSegmentLengthBeforeHelperCallOpnd.Initialize(headSegmentLengthBeforeHelperCallOpnd, func); |
| if(isArrayOrObjectWithArray && arrayOpnd && arrayOpnd->HeadSegmentSym()) |
| { |
| // Record the array head segment length before the helper call |
| // mov headSegmentLengthBeforeHelperCall, [headSegmentBeforeHelperCall + offsetOf(length)] |
| InsertMove( |
| headSegmentLengthBeforeHelperCallOpnd, |
| IR::IndirOpnd::New( |
| headSegmentBeforeHelperCallOpnd, |
| Js::SparseArraySegmentBase::GetOffsetOfLength(), |
| TyUint32, |
| func), |
| instr); |
| } |
| else |
| { |
| // Record the array head segment length before the helper call |
| // headSegmentLengthBeforeHelperCall = |
| // Js::JavascriptArray::Jit_GetArrayHeadSegmentLength(headSegmentBeforeHelperCall) |
| m_lowererMD.LoadHelperArgument(instr, headSegmentBeforeHelperCallOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| callInstr->SetDst(headSegmentLengthBeforeHelperCallOpnd); |
| instr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_GetArrayHeadSegmentLength); |
| } |
| } |
| |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(isInHelperBlock); |
| LowerOneBailOutKind(instr, IR::BailOutOnInvalidatedArrayHeadSegment, isInHelperBlock); |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // If the array has a different head segment or head segment length after the helper call, then this store needs to bail out |
| // invalidatedHeadSegment = |
| // JavascriptArray::Jit_OperationInvalidatedArrayHeadSegment( |
| // headSegmentBeforeHelperCall, |
| // headSegmentLengthBeforeHelperCall, |
| // base) |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, baseOpnd); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, headSegmentLengthBeforeHelperCallOpnd); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, headSegmentBeforeHelperCallOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| IR::RegOpnd *const invalidatedHeadSegmentOpnd = IR::RegOpnd::New(TyUint8, func); |
| const IR::AutoReuseOpnd autoReuseInvalidatedHeadSegmentOpnd(invalidatedHeadSegmentOpnd, func); |
| callInstr->SetDst(invalidatedHeadSegmentOpnd); |
| insertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_OperationInvalidatedArrayHeadSegment); |
| |
| // test invalidatedHeadSegment, invalidatedHeadSegment |
| // jz $skipBailOut |
| InsertTestBranch( |
| invalidatedHeadSegmentOpnd, |
| invalidatedHeadSegmentOpnd, |
| Js::OpCode::BrEq_A, |
| skipBailOutLabel, |
| insertBeforeInstr); |
| |
| // (Bail out with IR::BailOutOnInvalidatedArrayHeadSegment) |
| // $skipBailOut: |
| } |
| |
| void Lowerer::LowerBailOnInvalidatedArrayLength(IR::Instr *const instr, const bool isInHelperBlock) |
| { |
| /* |
| // Generate checks for whether the length changed during the helper call |
| |
| if(!(arrayOpnd && arrayOpnd.LengthSym() && arrayOpnd.LengthSym() != arrayOpnd.HeadSegmentLengthSym())) |
| { |
| // Record the array length before the helper call |
| lengthBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayLength(base) |
| } |
| |
| helperCall: |
| (Helper call and other bailout checks) |
| |
| // If the array has a different length after the helper call, then this store needs to bail out |
| invalidatedLength = JavascriptArray::Jit_OperationInvalidatedArrayLength(lengthBeforeHelperCall, base) |
| test invalidatedLength, invalidatedLength |
| jz $skipBailOut |
| |
| (Bail out with IR::BailOutOnInvalidatedArrayLength) |
| |
| $skipBailOut: |
| */ |
| |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::Memset || instr->m_opcode == Js::OpCode::Memcopy); |
| Assert(instr->GetDst()); |
| Assert(instr->GetDst()->IsIndirOpnd()); |
| |
| Func *const func = instr->m_func; |
| |
| IR::RegOpnd *const baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| Assert(!baseValueType.IsNotArray()); |
| IR::ArrayRegOpnd *const arrayOpnd = baseOpnd->IsArrayRegOpnd() ? baseOpnd->AsArrayRegOpnd() : nullptr; |
| |
| IR::RegOpnd *lengthBeforeHelperCallOpnd; |
| IR::AutoReuseOpnd autoReuseLengthBeforeHelperCallOpnd; |
| if(arrayOpnd && arrayOpnd->LengthSym() && arrayOpnd->LengthSym() != arrayOpnd->HeadSegmentLengthSym()) |
| { |
| lengthBeforeHelperCallOpnd = IR::RegOpnd::New(arrayOpnd->LengthSym(), arrayOpnd->LengthSym()->GetType(), func); |
| autoReuseLengthBeforeHelperCallOpnd.Initialize(lengthBeforeHelperCallOpnd, func); |
| } |
| else |
| { |
| // Record the array length before the helper call |
| // lengthBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayLength(base) |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| lengthBeforeHelperCallOpnd = IR::RegOpnd::New(TyUint32, func); |
| autoReuseLengthBeforeHelperCallOpnd.Initialize(lengthBeforeHelperCallOpnd, func); |
| callInstr->SetDst(lengthBeforeHelperCallOpnd); |
| instr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_GetArrayLength); |
| } |
| |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(isInHelperBlock); |
| LowerOneBailOutKind(instr, IR::BailOutOnInvalidatedArrayLength, isInHelperBlock); |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // If the array has a different length after the helper call, then this store needs to bail out |
| // invalidatedLength = JavascriptArray::Jit_OperationInvalidatedArrayLength(lengthBeforeHelperCall, base) |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, baseOpnd); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, lengthBeforeHelperCallOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| IR::RegOpnd *const invalidatedLengthOpnd = IR::RegOpnd::New(TyUint8, func); |
| const IR::AutoReuseOpnd autoReuseInvalidatedLengthOpnd(invalidatedLengthOpnd, func); |
| callInstr->SetDst(invalidatedLengthOpnd); |
| insertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_OperationInvalidatedArrayLength); |
| |
| // test invalidatedLength, invalidatedLength |
| // jz $skipBailOut |
| InsertTestBranch( |
| invalidatedLengthOpnd, |
| invalidatedLengthOpnd, |
| Js::OpCode::BrEq_A, |
| skipBailOutLabel, |
| insertBeforeInstr); |
| |
| // (Bail out with IR::BailOutOnInvalidatedArrayLength) |
| // $skipBailOut: |
| } |
| |
| void Lowerer::LowerBailOnCreatedMissingValue(IR::Instr *const instr, const bool isInHelperBlock) |
| { |
| /* |
| // Generate checks for whether the first missing value was created during the helper call |
| |
| if(!(baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues())) |
| { |
| // Record whether the array has missing values before the helper call |
| arrayFlagsBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayFlagsForArrayOrObjectWithArray(base) |
| } |
| |
| helperCall: |
| (Helper call and other bailout checks) |
| |
| // If the array had no missing values before the helper call, and the array has missing values after the helper |
| // call, then this store created the first missing value in the array and needs to bail out |
| if(baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()) |
| (arrayFlagsBeforeHelperCall = Js::DynamicObjectFlags::HasNoMissingValues) |
| createdFirstMissingValue = JavascriptArray::Jit_OperationCreatedFirstMissingValue(arrayFlagsBeforeHelperCall, base) |
| test createdFirstMissingValue, createdFirstMissingValue |
| jz $skipBailOut |
| |
| (Bail out with IR::BailOutOnMissingValue) |
| |
| $skipBailOut: |
| */ |
| |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict || instr->m_opcode == Js::OpCode::Memset || instr->m_opcode == Js::OpCode::Memcopy); |
| Assert(instr->GetDst()); |
| Assert(instr->GetDst()->IsIndirOpnd()); |
| |
| Func *const func = instr->m_func; |
| |
| IR::RegOpnd *const baseOpnd = instr->GetDst()->AsIndirOpnd()->GetBaseOpnd(); |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| Assert(!baseValueType.IsNotArrayOrObjectWithArray()); |
| |
| IR::Opnd *arrayFlagsBeforeHelperCallOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseArrayFlagsBeforeHelperCallOpnd; |
| const IRType arrayFlagsType = sizeof(uintptr_t) == sizeof(uint32) ? TyUint32 : TyUint64; |
| if(!(baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues())) |
| { |
| // Record whether the array has missing values before the helper call |
| // arrayFlagsBeforeHelperCall = Js::JavascriptArray::Jit_GetArrayFlagsForArrayOrObjectWithArray(base) |
| m_lowererMD.LoadHelperArgument(instr, baseOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| arrayFlagsBeforeHelperCallOpnd = IR::RegOpnd::New(arrayFlagsType, func); |
| autoReuseArrayFlagsBeforeHelperCallOpnd.Initialize(arrayFlagsBeforeHelperCallOpnd, func); |
| callInstr->SetDst(arrayFlagsBeforeHelperCallOpnd); |
| instr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_GetArrayFlagsForArrayOrObjectWithArray); |
| } |
| |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(isInHelperBlock); |
| LowerOneBailOutKind(instr, IR::BailOutOnMissingValue, isInHelperBlock); |
| IR::Instr *const insertBeforeInstr = instr->m_next; |
| |
| // If the array had no missing values before the helper call, and the array has missing values after the helper |
| // call, then this store created the first missing value in the array and needs to bail out |
| |
| if(baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()) |
| { |
| // (arrayFlagsBeforeHelperCall = Js::DynamicObjectFlags::HasNoMissingValues) |
| Assert(!arrayFlagsBeforeHelperCallOpnd); |
| arrayFlagsBeforeHelperCallOpnd = |
| arrayFlagsType == TyUint32 |
| ? static_cast<IR::Opnd *>( |
| IR::IntConstOpnd::New( |
| static_cast<uintptr_t>(Js::DynamicObjectFlags::HasNoMissingValues), |
| arrayFlagsType, |
| func, |
| true)) |
| : IR::AddrOpnd::New( |
| reinterpret_cast<void *>(Js::DynamicObjectFlags::HasNoMissingValues), |
| IR::AddrOpndKindConstantVar, |
| func, |
| true); |
| autoReuseArrayFlagsBeforeHelperCallOpnd.Initialize(arrayFlagsBeforeHelperCallOpnd, func); |
| } |
| else |
| { |
| Assert(arrayFlagsBeforeHelperCallOpnd); |
| } |
| |
| // createdFirstMissingValue = JavascriptArray::Jit_OperationCreatedFirstMissingValue(arrayFlagsBeforeHelperCall, base) |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, baseOpnd); |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, arrayFlagsBeforeHelperCallOpnd); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| IR::RegOpnd *const createdFirstMissingValueOpnd = IR::RegOpnd::New(TyUint8, func); |
| IR::AutoReuseOpnd autoReuseCreatedFirstMissingValueOpnd(createdFirstMissingValueOpnd, func); |
| callInstr->SetDst(createdFirstMissingValueOpnd); |
| insertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.ChangeToHelperCall(callInstr, IR::HelperArray_Jit_OperationCreatedFirstMissingValue); |
| |
| // test createdFirstMissingValue, createdFirstMissingValue |
| // jz $skipBailOut |
| InsertCompareBranch( |
| createdFirstMissingValueOpnd, |
| IR::IntConstOpnd::New(0, createdFirstMissingValueOpnd->GetType(), func, true), |
| Js::OpCode::BrEq_A, |
| skipBailOutLabel, |
| insertBeforeInstr); |
| |
| // (Bail out with IR::BailOutOnMissingValue) |
| // $skipBailOut: |
| } |
| |
| |
| IR::Opnd* |
| Lowerer::GetFuncObjectOpnd(IR::Instr* insertBeforeInstr) |
| { |
| Func * func = insertBeforeInstr->m_func; |
| IR::Opnd *paramOpnd = nullptr; |
| if (func->IsInlinee()) |
| { |
| paramOpnd = func->GetInlineeFunctionObjectSlotOpnd(); |
| } |
| else |
| { |
| #if defined(_M_ARM32_OR_ARM64) |
| StackSym * paramSym = this->m_lowererMD.GetImplicitParamSlotSym(0); |
| #else |
| StackSym *paramSym = StackSym::New(TyMachReg, this->m_func); |
| this->m_func->SetArgOffset(paramSym, 2 * MachPtr); |
| this->m_func->SetHasImplicitParamLoad(); |
| #endif |
| paramOpnd = IR::SymOpnd::New(paramSym, TyMachReg, this->m_func); |
| } |
| |
| if (func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| // the function object for generator calls is a GeneratorVirtualScriptFunction object |
| // and we need to return the real JavascriptGeneratorFunction object so grab it before |
| // assigning to the dst |
| Assert(!func->IsInlinee()); |
| IR::RegOpnd *tmpOpnd = IR::RegOpnd::New(TyMachReg, func); |
| Lowerer::InsertMove(tmpOpnd, paramOpnd, insertBeforeInstr); |
| |
| paramOpnd = IR::IndirOpnd::New(tmpOpnd, Js::GeneratorVirtualScriptFunction::GetRealFunctionOffset(), TyMachPtr, func); |
| } |
| return paramOpnd; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LoadFuncExpression |
| /// |
| /// Load the function expression to src1 from [ebp + 8] |
| /// |
| ///---------------------------------------------------------------------------- |
| |
| IR::Instr * |
| Lowerer::LoadFuncExpression(IR::Instr *instrFuncExpr) |
| { |
| ASSERT_INLINEE_FUNC(instrFuncExpr); |
| IR::Opnd *paramOpnd = GetFuncObjectOpnd(instrFuncExpr); |
| |
| // mov dst, param |
| instrFuncExpr->SetSrc1(paramOpnd); |
| LowererMD::ChangeToAssign(instrFuncExpr); |
| |
| return instrFuncExpr; |
| } |
| |
| void Lowerer::LowerBoundCheck(IR::Instr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::BoundCheck || instr->m_opcode == Js::OpCode::UnsignedBoundCheck); |
| |
| #if DBG |
| if(instr->m_opcode == Js::OpCode::UnsignedBoundCheck) |
| { |
| // UnsignedBoundCheck is currently only supported for the pattern: |
| // UnsignedBoundCheck s1 <= s2 + c, where c == 0 || c == -1 |
| Assert(instr->GetSrc1()->IsRegOpnd()); |
| Assert(instr->GetSrc1()->IsInt32()); |
| Assert(instr->GetSrc2()); |
| Assert(!instr->GetSrc2()->IsIntConstOpnd()); |
| if(instr->GetDst()) |
| { |
| const int32 c = instr->GetDst()->AsIntConstOpnd()->AsInt32(); |
| Assert(c == 0 || c == -1); |
| } |
| } |
| #endif |
| |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| Assert( |
| bailOutKind == IR::BailOutOnArrayAccessHelperCall || |
| bailOutKind == IR::BailOutOnInvalidatedArrayHeadSegment || |
| bailOutKind == IR::BailOutOnFailedHoistedBoundCheck || |
| bailOutKind == IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck); |
| |
| IR::LabelInstr *const skipBailOutLabel = instr->GetOrCreateContinueLabel(false); |
| LowerOneBailOutKind(instr, bailOutKind, false); |
| Assert(!instr->HasBailOutInfo()); |
| IR::Instr *insertBeforeInstr = instr->m_next; |
| |
| #if DBG |
| const auto VerifyLeftOrRightOpnd = [&](IR::Opnd *const opnd, const bool isRightOpnd) |
| { |
| if(!opnd) |
| { |
| Assert(isRightOpnd); |
| return; |
| } |
| if(opnd->IsIntConstOpnd()) |
| { |
| Assert(!isRightOpnd || opnd->AsIntConstOpnd()->GetValue() != 0); |
| return; |
| } |
| Assert(opnd->GetType() == TyInt32 || opnd->GetType() == TyUint32); |
| }; |
| #endif |
| |
| // left <= right + offset (src1 <= src2 + dst) |
| IR::Opnd *leftOpnd = instr->UnlinkSrc1(); |
| DebugOnly(VerifyLeftOrRightOpnd(leftOpnd, false)); |
| IR::Opnd *rightOpnd = instr->UnlinkSrc2(); |
| DebugOnly(VerifyLeftOrRightOpnd(rightOpnd, true)); |
| Assert(!leftOpnd->IsIntConstOpnd() || rightOpnd && !rightOpnd->IsIntConstOpnd()); |
| IR::IntConstOpnd *offsetOpnd = instr->GetDst() ? instr->UnlinkDst()->AsIntConstOpnd() : nullptr; |
| Assert(!offsetOpnd || offsetOpnd->GetValue() != 0); |
| const bool doUnsignedCompare = instr->m_opcode == Js::OpCode::UnsignedBoundCheck; |
| instr->Remove(); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IntConstType offset = offsetOpnd ? offsetOpnd->GetValue() : 0; |
| Js::OpCode compareOpCode = Js::OpCode::BrLe_A; |
| if(leftOpnd->IsIntConstOpnd() && rightOpnd->IsRegOpnd() && offset != IntConstMin) |
| { |
| // Put the constants together: swap the operands, negate the offset, and invert the branch |
| IR::Opnd *const tempOpnd = leftOpnd; |
| leftOpnd = rightOpnd; |
| rightOpnd = tempOpnd; |
| offset = -offset; |
| compareOpCode = Js::OpCode::BrGe_A; |
| } |
| |
| if(rightOpnd->IsIntConstOpnd()) |
| { |
| // Try to aggregate right + offset into a constant offset |
| IntConstType newOffset; |
| if(!IntConstMath::Add(offset, rightOpnd->AsIntConstOpnd()->GetValue(), TyInt32, &newOffset)) |
| { |
| offset = newOffset; |
| rightOpnd = nullptr; |
| offsetOpnd = nullptr; |
| } |
| } |
| |
| // Determine if the Add for (right + offset) is necessary, and the op code that will be used for the comparison |
| IR::AutoReuseOpnd autoReuseAddResultOpnd; |
| if(offset == -1 && compareOpCode == Js::OpCode::BrLe_A) |
| { |
| offset = 0; |
| compareOpCode = Js::OpCode::BrLt_A; |
| } |
| else if(offset == 1 && compareOpCode == Js::OpCode::BrGe_A) |
| { |
| offset = 0; |
| compareOpCode = Js::OpCode::BrGt_A; |
| } |
| else if(offset != 0 && rightOpnd) |
| { |
| // Need to Add (right + offset). If it overflows, bail out. |
| IR::LabelInstr *const bailOutLabel = insertBeforeInstr->m_prev->GetOrCreateContinueLabel(true); |
| insertBeforeInstr = bailOutLabel; |
| |
| // mov temp, right |
| // add temp, offset |
| // jo $bailOut |
| // $bailOut: (insertBeforeInstr) |
| Assert(!offsetOpnd || offsetOpnd->GetValue() == offset); |
| IR::RegOpnd *const addResultOpnd = IR::RegOpnd::New(TyInt32, func); |
| autoReuseAddResultOpnd.Initialize(addResultOpnd, func); |
| InsertAdd( |
| true, |
| addResultOpnd, |
| rightOpnd, |
| offsetOpnd ? offsetOpnd->UseWithNewType(TyInt32, func) : IR::IntConstOpnd::New(offset, TyInt32, func), |
| insertBeforeInstr); |
| InsertBranch(LowererMD::MDOverflowBranchOpcode, bailOutLabel, insertBeforeInstr); |
| |
| rightOpnd = addResultOpnd; |
| } |
| |
| // cmp left, right |
| // jl[e] $skipBailOut |
| // $bailOut: |
| if(!rightOpnd) |
| { |
| rightOpnd = IR::IntConstOpnd::New(offset, TyInt32, func); |
| } |
| InsertCompareBranch(leftOpnd, rightOpnd, compareOpCode, doUnsignedCompare, skipBailOutLabel, insertBeforeInstr); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerBailTarget(IR::Instr * instr) |
| { |
| // this is just a bailout target, just skip over it and generate a label before so other bailout can jump here. |
| IR::Instr * prevInstr = instr->m_prev; |
| |
| IR::LabelInstr * continueLabelInstr = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| instr->InsertAfter(continueLabelInstr); |
| |
| IR::BranchInstr * skipInstr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, continueLabelInstr, this->m_func); |
| instr->InsertBefore(skipInstr); |
| |
| this->GenerateBailOut(instr); |
| |
| return prevInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::SplitBailOnImplicitCall(IR::Instr *& instr) |
| { |
| Assert(instr->IsPlainInstr() || instr->IsProfiledInstr()); |
| |
| const auto bailOutKind = instr->GetBailOutKind(); |
| Assert(BailOutInfo::IsBailOutOnImplicitCalls(bailOutKind)); |
| |
| IR::Opnd * implicitCallFlags = this->GetImplicitCallFlagsOpnd(); |
| const IR::AutoReuseOpnd autoReuseImplicitCallFlags(implicitCallFlags, instr->m_func); |
| IR::IntConstOpnd * noImplicitCall = IR::IntConstOpnd::New(Js::ImplicitCall_None, TyInt8, this->m_func, true); |
| const IR::AutoReuseOpnd autoReuseNoImplicitCall(noImplicitCall, instr->m_func); |
| |
| // Reset the implicit call flag on every helper call |
| Lowerer::InsertMove(implicitCallFlags, noImplicitCall, instr); |
| |
| IR::Instr *disableImplicitCallsInstr = nullptr, *enableImplicitCallsInstr = nullptr; |
| if(BailOutInfo::WithoutLazyBailOut(bailOutKind) == IR::BailOutOnImplicitCallsPreOp) |
| { |
| const auto disableImplicitCallAddress = |
| m_lowererMD.GenerateMemRef( |
| instr->m_func->GetThreadContextInfo()->GetDisableImplicitFlagsAddr(), |
| TyInt8, |
| instr); |
| |
| // Disable implicit calls since they will be called after bailing out |
| disableImplicitCallsInstr = |
| IR::Instr::New( |
| Js::OpCode::Ld_A, |
| disableImplicitCallAddress, |
| IR::IntConstOpnd::New(DisableImplicitCallFlag, TyInt8, instr->m_func, true), |
| instr->m_func); |
| instr->InsertBefore(disableImplicitCallsInstr); |
| |
| // Create instruction for re-enabling implicit calls |
| enableImplicitCallsInstr = |
| IR::Instr::New( |
| Js::OpCode::Ld_A, |
| disableImplicitCallAddress, |
| IR::IntConstOpnd::New(DisableImplicitNoFlag, TyInt8, instr->m_func, true), |
| instr->m_func); |
| |
| #if DBG |
| enableImplicitCallsInstr->m_noLazyHelperAssert = true; |
| #endif |
| } |
| |
| IR::Instr * bailOutInstr = instr; |
| |
| instr = IR::Instr::New(instr->m_opcode, instr->m_func); |
| bailOutInstr->TransferTo(instr); |
| bailOutInstr->InsertBefore(instr); |
| |
| if(disableImplicitCallsInstr) |
| { |
| // Re-enable implicit calls |
| Assert(enableImplicitCallsInstr); |
| bailOutInstr->InsertBefore(enableImplicitCallsInstr); |
| |
| // Lower both instructions. Lowering an instruction may free the instruction's original operands, so do that last. |
| LowererMD::ChangeToAssign(disableImplicitCallsInstr); |
| LowererMD::ChangeToAssign(enableImplicitCallsInstr); |
| } |
| |
| bailOutInstr->m_opcode = Js::OpCode::BailOnNotEqual; |
| |
| bailOutInstr->SetSrc1(implicitCallFlags); |
| bailOutInstr->SetSrc2(noImplicitCall); |
| |
| return bailOutInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::SplitBailOnImplicitCall(IR::Instr * instr, IR::Instr * helperCall, IR::Instr * insertBeforeInstr) |
| { |
| IR::Opnd * implicitCallFlags = this->GetImplicitCallFlagsOpnd(); |
| const IR::AutoReuseOpnd autoReuseImplicitCallFlags(implicitCallFlags, instr->m_func); |
| IR::IntConstOpnd * noImplicitCall = IR::IntConstOpnd::New(Js::ImplicitCall_None, TyInt8, this->m_func, true); |
| const IR::AutoReuseOpnd autoReuseNoImplicitCall(noImplicitCall, instr->m_func); |
| |
| // Reset the implicit call flag on every helper call |
| Lowerer::InsertMove(implicitCallFlags, noImplicitCall, helperCall->m_prev); |
| |
| BailOutInfo * bailOutInfo = instr->GetBailOutInfo(); |
| if (bailOutInfo->bailOutInstr == instr) |
| { |
| bailOutInfo->bailOutInstr = nullptr; |
| } |
| IR::Instr * bailOutInstr = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, IR::BailOutOnImplicitCalls, bailOutInfo, bailOutInfo->bailOutFunc); |
| bailOutInstr->SetSrc1(implicitCallFlags); |
| bailOutInstr->SetSrc2(noImplicitCall); |
| |
| insertBeforeInstr->InsertBefore(bailOutInstr); |
| instr->ClearBailOutInfo(); |
| return bailOutInstr; |
| } |
| |
| // Split out bailout for debugger into separate bailout instr out of real instr which has bailout for debugger. |
| // Returns the instr which needs to lower next, which would normally be last of splitted instr. |
| // IR on input: |
| // - Real instr with BailOutInfo but it's opcode is not BailForDebugger. |
| // - debugger bailout is not shared. In this case we'll have debugger bailout in instr->GetBailOutKind(). |
| // - debugger bailout is shared. In this case we'll have debugger bailout in instr->GetAuxBailOutKind(). |
| // IR on output: |
| // - Either of: |
| // - real instr, then debuggerBailout -- in case we only had debugger bailout. |
| // - real instr with BailOutInfo w/o debugger bailout, then debuggerBailout, then sharedBailout -- in case bailout for debugger was shared w/some other b.o. |
| IR::Instr* Lowerer::SplitBailForDebugger(IR::Instr* instr) |
| { |
| Assert(m_func->IsJitInDebugMode() && instr->m_opcode != Js::OpCode::BailForDebugger); |
| |
| IR::BailOutKind debuggerBailOutKind; // Used for splitted instr. |
| BailOutInfo* bailOutInfo = instr->GetBailOutInfo(); |
| IR::Instr* sharedBailoutInstr = nullptr; |
| |
| if (instr->GetBailOutKind() & IR::BailOutForDebuggerBits) |
| { |
| // debugger bailout is not shared. |
| Assert(!instr->HasAuxBailOut()); |
| AssertMsg(!(instr->GetBailOutKind() & ~IR::BailOutForDebuggerBits), "There should only be debugger bailout bits in the instr."); |
| |
| debuggerBailOutKind = instr->GetBailOutKind() & IR::BailOutForDebuggerBits; |
| |
| // There is no non-debugger bailout in the instr, still can't clear bailout info, as we use it for the splitted instr, |
| // but we need to mark the bailout as hasn't been generated yet. |
| if (bailOutInfo->bailOutInstr == instr) |
| { |
| // null will be picked up by following BailOutInstr::New which will change it to new bailout instr. |
| bailOutInfo->bailOutInstr = nullptr; |
| } |
| |
| // Remove bailout info from the original instr which from now on becomes just regular instr, w/o deallocating bailout info. |
| instr->ClearBailOutInfo(); |
| } |
| else if (instr->IsBranchInstr() && instr->HasBailOutInfo() && instr->HasAuxBailOut()) |
| { |
| // Branches with shared bailout are lowered in LowerCondBranchCheckBailOut, |
| // can't do here because we need to use BranchBailOutRecord but don't know which BrTrue/BrFalse to use for it. |
| debuggerBailOutKind = IR::BailOutInvalid; |
| } |
| else if (instr->HasAuxBailOut() && instr->GetAuxBailOutKind() & IR::BailOutForDebuggerBits) |
| { |
| // debugger bailout is shared. |
| AssertMsg(!(instr->GetBailOutKind() & IR::BailOutForDebuggerBits), "There should be no debugger bits in main bailout kind."); |
| |
| debuggerBailOutKind = instr->GetAuxBailOutKind() & IR::BailOutForDebuggerBits; |
| |
| // This will insert SharedBail instr after current instr and set bailOutInfo->bailOutInstr to the shared one. |
| sharedBailoutInstr = instr->ShareBailOut(); |
| |
| // As we extracted aux bail out, invalidate all tracks of it in the instr. |
| instr->ResetAuxBailOut(); |
| } |
| else |
| { |
| AssertMsg(FALSE, "shouldn't get here"); |
| debuggerBailOutKind = IR::BailOutInvalid; |
| } |
| |
| if (debuggerBailOutKind != IR::BailOutInvalid) |
| { |
| IR::BailOutInstr* debuggerBailoutInstr = IR::BailOutInstr::New( |
| Js::OpCode::BailForDebugger, debuggerBailOutKind, bailOutInfo, bailOutInfo->bailOutFunc); |
| instr->InsertAfter(debuggerBailoutInstr); |
| |
| // Since we go backwards, we need to process extracted out bailout for debugger first. |
| instr = sharedBailoutInstr ? sharedBailoutInstr : debuggerBailoutInstr; |
| } |
| |
| return instr; |
| } |
| |
| IR::Instr * |
| Lowerer::SplitBailOnResultCondition(IR::Instr *const instr) const |
| { |
| Assert(instr); |
| Assert(!instr->IsLowered()); |
| Assert( |
| instr->GetBailOutKind() & IR::BailOutOnResultConditions || |
| instr->GetBailOutKind() == IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck); |
| |
| const auto nonBailOutInstr = IR::Instr::New(instr->m_opcode, instr->m_func); |
| instr->TransferTo(nonBailOutInstr); |
| instr->InsertBefore(nonBailOutInstr); |
| return nonBailOutInstr; |
| } |
| |
| void |
| Lowerer::LowerBailOnResultCondition( |
| IR::Instr *const instr, |
| IR::LabelInstr * *const bailOutLabel, |
| IR::LabelInstr * *const skipBailOutLabel) |
| { |
| Assert(instr); |
| Assert( |
| instr->GetBailOutKind() & IR::BailOutOnResultConditions || |
| instr->GetBailOutKind() == IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck); |
| Assert(bailOutLabel); |
| Assert(skipBailOutLabel); |
| |
| // Label to jump to (or fall through to) when bailing out. The actual bailout label |
| // (bailOutInfo->bailOutInstr->AsLabelInstr()) may be shared, and code may be added to restore values before the jump to the |
| // actual bailout label in the cloned bailout case, so always create a new bailout label for this particular path. |
| *bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, true /* isOpHelper */); |
| instr->InsertBefore(*bailOutLabel); |
| |
| // Label to jump to when not bailing out |
| *skipBailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| instr->InsertAfter(*skipBailOutLabel); |
| |
| // Generate the bailout helper call. 'instr' will be changed to the CALL into the bailout function, so it can't be used for |
| // ordering instructions anymore. |
| GenerateBailOut(instr); |
| } |
| |
| void |
| Lowerer::PreserveSourcesForBailOnResultCondition(IR::Instr *const instr, IR::LabelInstr *const skipBailOutLabel) const |
| { |
| Assert(instr); |
| Assert(!instr->IsLowered()); |
| Assert(!instr->HasBailOutInfo()); |
| |
| // Since this instruction may bail out, writing to the destination cannot overwrite one of the sources, or we may lose one |
| // of the sources needed to redo the equivalent byte code instruction. Determine if the sources need to be preserved. |
| |
| const auto dst = instr->GetDst(); |
| Assert(dst); |
| const auto dstStackSym = dst->GetStackSym(); |
| if(!dstStackSym || !dstStackSym->HasByteCodeRegSlot()) |
| { |
| // We only need to ensure that a byte-code source is not being overwritten |
| return; |
| } |
| |
| switch(instr->m_opcode) |
| { |
| // The sources of these instructions don't need restoring, or will be restored in the bailout path |
| case Js::OpCode::Neg_I4: |
| // In case of overflow or zero, the result is the same as the operand |
| case Js::OpCode::Add_I4: |
| case Js::OpCode::Sub_I4: |
| // In case of overflow, there is always enough information to restore the operands |
| return; |
| } |
| |
| Assert(instr->GetSrc1()); |
| if(!dst->IsEqual(instr->GetSrc1()) && !(instr->GetSrc2() && dst->IsEqual(instr->GetSrc2()))) |
| { |
| // The destination is different from the sources |
| return; |
| } |
| |
| // The destination is the same as one of the sources and the original sources cannot be restored after the instruction, so |
| // use a temporary destination for the result and move it back to the original destination after deciding not to bail out |
| LowererMD::ChangeToAssign(instr->SinkDst(Js::OpCode::Ld_I4, RegNOREG, skipBailOutLabel)); |
| } |
| |
| void |
| Lowerer::LowerInstrWithBailOnResultCondition( |
| IR::Instr *const instr, |
| const IR::BailOutKind bailOutKind, |
| IR::LabelInstr *const bailOutLabel, |
| IR::LabelInstr *const skipBailOutLabel) const |
| { |
| Assert(instr); |
| Assert(!instr->IsLowered()); |
| Assert(!instr->HasBailOutInfo()); |
| Assert(bailOutKind & IR::BailOutOnResultConditions || bailOutKind == IR::BailOutOnFailedHoistedLoopCountBasedBoundCheck); |
| Assert(bailOutLabel); |
| Assert(instr->m_next == bailOutLabel); |
| Assert(skipBailOutLabel); |
| |
| // Preserve sources that are overwritten by the instruction if needed |
| PreserveSourcesForBailOnResultCondition(instr, skipBailOutLabel); |
| |
| // Lower the instruction |
| switch(instr->m_opcode) |
| { |
| case Js::OpCode::Neg_I4: |
| LowererMD::LowerInt4NegWithBailOut(instr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| break; |
| |
| case Js::OpCode::Add_I4: |
| LowererMD::LowerInt4AddWithBailOut(instr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| break; |
| |
| case Js::OpCode::Sub_I4: |
| LowererMD::LowerInt4SubWithBailOut(instr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| break; |
| |
| case Js::OpCode::Mul_I4: |
| LowererMD::LowerInt4MulWithBailOut(instr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| break; |
| |
| case Js::OpCode::Rem_I4: |
| m_lowererMD.LowerInt4RemWithBailOut(instr, bailOutKind, bailOutLabel, skipBailOutLabel); |
| break; |
| default: |
| Assert(false); // not implemented |
| __assume(false); |
| } |
| } |
| |
| void |
| Lowerer::GenerateObjectTestAndTypeLoad(IR::Instr *instrLdSt, IR::RegOpnd *opndBase, IR::RegOpnd *opndType, IR::LabelInstr *labelHelper) |
| { |
| IR::IndirOpnd *opndIndir; |
| |
| if (!opndBase->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(opndBase, instrLdSt, labelHelper); |
| } |
| |
| opndIndir = IR::IndirOpnd::New(opndBase, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| InsertMove(opndType, opndIndir, instrLdSt); |
| } |
| |
| IR::LabelInstr * |
| Lowerer::GenerateBailOut(IR::Instr * instr, IR::BranchInstr * branchInstr, IR::LabelInstr *bailOutLabel, IR::LabelInstr * collectRuntimeStatsLabel) |
| { |
| BailOutInfo * bailOutInfo = instr->GetBailOutInfo(); |
| IR::Instr * bailOutInstr = bailOutInfo->bailOutInstr; |
| if (instr->IsCloned()) |
| { |
| Assert(bailOutInstr != instr); |
| |
| // Jump to the cloned bail out label |
| IR::LabelInstr * bailOutLabelInstr = bailOutInstr->AsLabelInstr(); |
| IR::BranchInstr * bailOutBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, bailOutLabelInstr, this->m_func); |
| instr->InsertBefore(bailOutBranch); |
| instr->Remove(); |
| return bailOutLabel; |
| } |
| |
| // Add helper label to trigger layout. |
| if (!collectRuntimeStatsLabel) |
| { |
| collectRuntimeStatsLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| } |
| Assert(!collectRuntimeStatsLabel->IsLinked()); |
| instr->InsertBefore(collectRuntimeStatsLabel); |
| |
| if (bailOutInstr != instr) |
| { |
| // this bailOutInfo is shared, just jump to the bailout target |
| |
| IR::Opnd * indexOpndForBailOutKind = nullptr; |
| |
| int bailOutRecordOffset = 0; |
| if (this->m_func->IsOOPJIT()) |
| { |
| bailOutRecordOffset = NativeCodeData::GetDataTotalOffset(bailOutInfo->bailOutRecord); |
| |
| indexOpndForBailOutKind = IR::IndirOpnd::New(IR::RegOpnd::New(m_func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), (int)(bailOutRecordOffset + BailOutRecord::GetOffsetOfBailOutKind()), TyUint32, |
| #if DBG |
| NativeCodeData::GetDataDescription(bailOutInfo->bailOutRecord, this->m_func->m_alloc), |
| #endif |
| m_func, true); |
| |
| this->addToLiveOnBackEdgeSyms->Set(m_func->GetTopFunc()->GetNativeCodeDataSym()->m_id); |
| } |
| else |
| { |
| indexOpndForBailOutKind = |
| IR::MemRefOpnd::New((BYTE*)bailOutInfo->bailOutRecord + BailOutRecord::GetOffsetOfBailOutKind(), TyUint32, this->m_func, IR::AddrOpndKindDynamicBailOutKindRef); |
| } |
| |
| InsertMove( |
| indexOpndForBailOutKind, IR::IntConstOpnd::New(instr->GetBailOutKind(), indexOpndForBailOutKind->GetType(), this->m_func), instr, false); |
| |
| // No point in doing this for BailOutFailedEquivalentTypeCheck or BailOutFailedEquivalentFixedFieldTypeCheck, |
| // because the respective inline cache is already polymorphic, anyway. |
| if (instr->GetBailOutKind() == IR::BailOutFailedTypeCheck || instr->GetBailOutKind() == IR::BailOutFailedFixedFieldTypeCheck) |
| { |
| // We have a type check bailout that shares a bailout record with other instructions. |
| // Generate code to write the cache index into the bailout record before we jump to the call site. |
| Assert(bailOutInfo->polymorphicCacheIndex != (uint)-1); |
| Assert(bailOutInfo->bailOutRecord); |
| IR::Opnd * indexOpnd = nullptr; |
| |
| if (this->m_func->IsOOPJIT()) |
| { |
| indexOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(m_func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), (int)(bailOutRecordOffset + BailOutRecord::GetOffsetOfPolymorphicCacheIndex()), TyUint32, m_func); |
| } |
| else |
| { |
| indexOpnd = IR::MemRefOpnd::New((BYTE*)bailOutInfo->bailOutRecord + BailOutRecord::GetOffsetOfPolymorphicCacheIndex(), TyUint32, this->m_func); |
| } |
| |
| InsertMove( |
| indexOpnd, IR::IntConstOpnd::New(bailOutInfo->polymorphicCacheIndex, TyUint32, this->m_func), instr, false); |
| } |
| |
| if (bailOutInfo->bailOutRecord->IsShared()) |
| { |
| IR::Opnd *functionBodyOpnd; |
| if (this->m_func->IsOOPJIT()) |
| { |
| functionBodyOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(m_func->GetTopFunc()->GetNativeCodeDataSym(), TyVar, m_func), (int)(bailOutRecordOffset + SharedBailOutRecord::GetOffsetOfFunctionBody()), TyMachPtr, m_func); |
| } |
| else |
| { |
| functionBodyOpnd = IR::MemRefOpnd::New((BYTE*)bailOutInfo->bailOutRecord + SharedBailOutRecord::GetOffsetOfFunctionBody(), TyMachPtr, this->m_func); |
| } |
| InsertMove( |
| functionBodyOpnd, CreateFunctionBodyOpnd(instr->m_func), instr, false); |
| } |
| |
| // GenerateBailOut should have replaced this as a label as we should have already lowered |
| // the main bailOutInstr. |
| IR::LabelInstr * bailOutTargetLabel = bailOutInstr->AsLabelInstr(); |
| #if DBG |
| if (bailOutTargetLabel->m_noHelperAssert) |
| { |
| collectRuntimeStatsLabel->m_noHelperAssert = true; |
| } |
| #endif |
| Assert(bailOutLabel == nullptr || bailOutLabel == bailOutTargetLabel); |
| |
| IR::BranchInstr * newBranchInstr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, bailOutTargetLabel, this->m_func); |
| instr->InsertAfter(newBranchInstr); |
| instr->Remove(); |
| return collectRuntimeStatsLabel ? collectRuntimeStatsLabel : bailOutLabel; |
| } |
| |
| // The bailout hasn't been generated yet. |
| Assert(!bailOutInstr->IsLabelInstr()); |
| |
| // Capture the condition for this bailout |
| if (bailOutLabel == nullptr) |
| { |
| // Create a label and place it in the bailout info so that shared bailout point can jump to this one |
| if (instr->m_prev->IsLabelInstr()) |
| { |
| bailOutLabel = instr->m_prev->AsLabelInstr(); |
| Assert(bailOutLabel->isOpHelper); |
| } |
| else |
| { |
| bailOutLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| instr->InsertBefore(bailOutLabel); |
| } |
| } |
| else |
| { |
| instr->InsertBefore(bailOutLabel); |
| } |
| |
| #if DBG |
| bailOutLabel->m_noLazyHelperAssert = true; |
| #endif |
| |
| #if DBG |
| const IR::BailOutKind bailOutKind = bailOutInstr->GetBailOutKind(); |
| |
| if (bailOutInstr->m_opcode == Js::OpCode::BailOnNoSimdTypeSpec || |
| bailOutInstr->m_opcode == Js::OpCode::BailOnNoProfile || |
| bailOutInstr->m_opcode == Js::OpCode::BailOnException || |
| bailOutInstr->m_opcode == Js::OpCode::Yield || |
| bailOutKind & (IR::BailOutConventionalTypedArrayAccessOnly | |
| IR::BailOutConventionalNativeArrayAccessOnly | |
| IR::BailOutOnArrayAccessHelperCall)) |
| { |
| bailOutLabel->m_noHelperAssert = true; |
| } |
| #endif |
| |
| bailOutInfo->bailOutInstr = bailOutLabel; |
| bailOutLabel->m_hasNonBranchRef = true; |
| |
| // Create the bail out record |
| Assert(bailOutInfo->bailOutRecord == nullptr); |
| BailOutRecord * bailOutRecord; |
| IR::JnHelperMethod helperMethod; |
| if (branchInstr != nullptr) |
| { |
| Assert(branchInstr->GetSrc2() == nullptr); |
| Assert(branchInstr->GetDst() == nullptr); |
| |
| IR::LabelInstr * targetLabel = branchInstr->GetTarget(); |
| Assert(targetLabel->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset); |
| |
| uint32 trueOffset; |
| uint32 falseOffset; |
| IR::Opnd *condOpnd = branchInstr->GetSrc1(); |
| bool invertTarget = (branchInstr->m_opcode == Js::OpCode::BrFalse_A); |
| |
| if (bailOutInfo->isInvertedBranch) |
| { |
| // Flip the condition |
| IR::Instr *subInstr = IR::Instr::New(Js::OpCode::Sub_I4, condOpnd, condOpnd, IR::IntConstOpnd::New(1, TyMachReg, instr->m_func), instr->m_func); |
| instr->InsertBefore(subInstr); |
| this->m_lowererMD.EmitInt4Instr(subInstr); |
| // We should really do a DEC/NEG for a full 2's complement flip from 0/1 to 1/0, |
| // but DEC is sufficient to flip from 0/1 to -1/0, which is false/true to true/false... |
| // instr->InsertBefore(IR::Instr::New(Js::OpCode::Neg_I4, condOpnd, condOpnd, instr->m_func)); |
| |
| invertTarget = invertTarget ? false : true; |
| } |
| |
| if (!invertTarget) |
| { |
| trueOffset = targetLabel->GetByteCodeOffset(); |
| falseOffset = bailOutInfo->bailOutOffset; |
| } |
| else |
| { |
| falseOffset = targetLabel->GetByteCodeOffset(); |
| trueOffset = bailOutInfo->bailOutOffset; |
| } |
| |
| bailOutRecord = NativeCodeDataNewZ(this->m_func->GetNativeCodeDataAllocator(), |
| BranchBailOutRecord, trueOffset, falseOffset, branchInstr->GetByteCodeReg(), instr->GetBailOutKind(), bailOutInfo->bailOutFunc); |
| |
| helperMethod = IR::HelperSaveAllRegistersAndBranchBailOut; |
| #ifdef _M_IX86 |
| if(!AutoSystemInfo::Data.SSE2Available()) |
| { |
| helperMethod = IR::HelperSaveAllRegistersNoSse2AndBranchBailOut; |
| } |
| #endif |
| |
| // Save the condition. The register allocator will generate arguments. |
| bailOutInfo->branchConditionOpnd = branchInstr->GetSrc1()->Copy(branchInstr->m_func); |
| } |
| else |
| { |
| if (bailOutInstr->GetBailOutKind() == IR::BailOutShared) |
| { |
| bailOutRecord = NativeCodeDataNewZ(this->m_func->GetNativeCodeDataAllocator(), |
| SharedBailOutRecord, bailOutInfo->bailOutOffset, bailOutInfo->polymorphicCacheIndex, instr->GetBailOutKind(), bailOutInfo->bailOutFunc); |
| if (bailOutInfo->isLoopTopBailOutInfo) |
| { |
| bailOutRecord->SetType(BailOutRecord::BailoutRecordType::SharedForLoopTop); |
| } |
| } |
| else |
| { |
| bailOutRecord = NativeCodeDataNewZ(this->m_func->GetNativeCodeDataAllocator(), |
| BailOutRecord, bailOutInfo->bailOutOffset, bailOutInfo->polymorphicCacheIndex, instr->GetBailOutKind(), bailOutInfo->bailOutFunc); |
| } |
| |
| helperMethod = IR::HelperSaveAllRegistersAndBailOut; |
| #ifdef _M_IX86 |
| if(!AutoSystemInfo::Data.SSE2Available()) |
| { |
| helperMethod = IR::HelperSaveAllRegistersNoSse2AndBailOut; |
| } |
| #endif |
| } |
| |
| // Save the bailout record. The register allocator will generate arguments. |
| bailOutInfo->bailOutRecord = bailOutRecord; |
| |
| #if ENABLE_DEBUG_CONFIG_OPTIONS |
| bailOutRecord->bailOutOpcode = bailOutInfo->bailOutOpcode; |
| #endif |
| |
| if (instr->m_opcode == Js::OpCode::BailOnNotStackArgs && instr->GetSrc1()) |
| { |
| // src1 on BailOnNotStackArgs is helping CSE |
| instr->FreeSrc1(); |
| } |
| |
| if (instr->GetSrc2() != nullptr) |
| { |
| // Ideally we should never be in this situation but incase we reached a |
| // condition where we didn't free src2, free it here. |
| instr->FreeSrc2(); |
| } |
| |
| // We do not need lazybailout bit on SaveAllRegistersAndBailOut |
| if (instr->HasLazyBailOut()) |
| { |
| instr->ClearLazyBailOut(); |
| Assert(instr->HasBailOutInfo()); |
| } |
| |
| // Call the bail out wrapper |
| instr->m_opcode = Js::OpCode::Call; |
| if(instr->GetDst()) |
| { |
| // To facilitate register allocation, don't assign a destination. The result will anyway go into the return register, |
| // but the register allocator does not need to kill that register for the call. |
| instr->FreeDst(); |
| } |
| instr->SetSrc1(IR::HelperCallOpnd::New(helperMethod, this->m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| |
| if (bailOutInstr->GetBailOutKind() != IR::BailOutForGeneratorYield) |
| { |
| // Defer introducing the JMP to epilog until LowerPrologEpilog phase for Yield bailouts so |
| // that Yield does not appear to have flow out of its containing block for the RegAlloc phase. |
| // Yield is an unconditional bailout but we want to simulate the flow as if the Yield were |
| // just like a call. |
| GenerateJumpToEpilogForBailOut(bailOutInfo, instr); |
| } |
| |
| return collectRuntimeStatsLabel ? collectRuntimeStatsLabel : bailOutLabel; |
| } |
| |
| void |
| Lowerer::GenerateJumpToEpilogForBailOut(BailOutInfo * bailOutInfo, IR::Instr *instr) |
| { |
| IR::Instr * exitPrevInstr = this->m_func->m_exitInstr->m_prev; |
| // JMP to the epilog |
| IR::LabelInstr * exitTargetInstr; |
| if (exitPrevInstr->IsLabelInstr()) |
| { |
| exitTargetInstr = exitPrevInstr->AsLabelInstr(); |
| } |
| else |
| { |
| exitTargetInstr = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| exitPrevInstr->InsertAfter(exitTargetInstr); |
| } |
| |
| exitTargetInstr = m_lowererMD.GetBailOutStackRestoreLabel(bailOutInfo, exitTargetInstr); |
| |
| IR::Instr * instrAfter = instr->m_next; |
| IR::BranchInstr * exitInstr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, exitTargetInstr, this->m_func); |
| instrAfter->InsertBefore(exitInstr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::GenerateFastCondBranch |
| /// |
| ///---------------------------------------------------------------------------- |
| bool |
| Lowerer::GenerateFastCondBranch(IR::BranchInstr * instrBranch, bool *pIsHelper) |
| { |
| // The idea is to do an inline compare if we can prove that both sources |
| // are tagged ints |
| // |
| // Given: |
| // |
| // Brxx_A $L, src1, src2 |
| // |
| // Generate: |
| // |
| // (If not Int31's, goto $helper) |
| // Jxx $L, src1, src2 |
| // JMP $fallthru |
| // $helper: |
| // (caller will generate normal helper call sequence) |
| // $fallthru: |
| |
| IR::LabelInstr * labelHelper = nullptr; |
| IR::LabelInstr * labelFallThru; |
| IR::BranchInstr * instr; |
| IR::Opnd * opndSrc1; |
| IR::Opnd * opndSrc2; |
| |
| opndSrc1 = instrBranch->GetSrc1(); |
| opndSrc2 = instrBranch->GetSrc2(); |
| AssertMsg(opndSrc1 && opndSrc2, "BrC expects 2 src operands"); |
| |
| // Not tagged ints? |
| if (opndSrc1->IsRegOpnd() && opndSrc1->AsRegOpnd()->IsNotInt()) |
| { |
| return true; |
| } |
| if (opndSrc2->IsRegOpnd() && opndSrc2->AsRegOpnd()->IsNotInt()) |
| { |
| return true; |
| } |
| |
| // Tagged ints? |
| bool isTaggedInts = false; |
| if (opndSrc1->IsTaggedInt()) |
| { |
| if (opndSrc2->IsTaggedInt()) |
| { |
| isTaggedInts = true; |
| } |
| } |
| |
| if (!isTaggedInts) |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| this->m_lowererMD.GenerateSmIntPairTest(instrBranch, opndSrc1, opndSrc2, labelHelper); |
| } |
| |
| // Jxx $L, src1, src2 |
| |
| opndSrc1 = opndSrc1->UseWithNewType(TyInt32, this->m_func); |
| opndSrc2 = opndSrc2->UseWithNewType(TyInt32, this->m_func); |
| |
| instr = IR::BranchInstr::New(instrBranch->m_opcode, instrBranch->GetTarget(), opndSrc1, opndSrc2, this->m_func); |
| instrBranch->InsertBefore(instr); |
| this->m_lowererMD.LowerCondBranch(instr); |
| |
| if (isTaggedInts) |
| { |
| instrBranch->Remove(); |
| |
| // Skip lowering call to helper |
| return false; |
| } |
| |
| // JMP $fallthru |
| |
| IR::Instr *instrNext = instrBranch->GetNextRealInstrOrLabel(); |
| if (instrNext->IsLabelInstr()) |
| { |
| labelFallThru = instrNext->AsLabelInstr(); |
| } |
| else |
| { |
| labelFallThru = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, /**pIsHelper*/FALSE); |
| instrBranch->InsertAfter(labelFallThru); |
| } |
| instr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelFallThru, this->m_func); |
| instrBranch->InsertBefore(instr); |
| |
| // $helper: |
| // (caller will generate normal helper call sequence) |
| // $fallthru: |
| |
| AssertMsg(labelHelper, "Should not be NULL"); |
| instrBranch->InsertBefore(labelHelper); |
| |
| *pIsHelper = true; |
| return true; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerInlineeStart(IR::Instr * inlineeStartInstr) |
| { |
| IR::Opnd *linkOpnd = inlineeStartInstr->GetSrc2(); |
| if (!linkOpnd) |
| { |
| Assert(inlineeStartInstr->m_func->m_hasInlineArgsOpt); |
| return inlineeStartInstr->m_prev; |
| } |
| |
| AssertMsg(inlineeStartInstr->m_func->firstActualStackOffset != -1, "This should have been already done in backward pass"); |
| |
| IR::Instr *startCall; |
| // Free the argOut links and lower them to MOVs |
| inlineeStartInstr->IterateArgInstrs([&](IR::Instr* argInstr){ |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A || argInstr->m_opcode == Js::OpCode::ArgOut_A_Inline); |
| startCall = argInstr->GetSrc2()->GetStackSym()->m_instrDef; |
| argInstr->FreeSrc2(); |
| #pragma prefast(suppress:6235, "Non-Zero Constant in Condition") |
| if (!PHASE_ON(Js::EliminateArgoutForInlineePhase, this->m_func) || inlineeStartInstr->m_func->GetJITFunctionBody()->HasOrParentHasArguments()) |
| { |
| m_lowererMD.ChangeToAssign(argInstr); |
| } |
| else |
| { |
| argInstr->m_opcode = Js::OpCode::ArgOut_A_InlineBuiltIn; |
| } |
| |
| return false; |
| }); |
| |
| IR::Instr *argInsertInstr = inlineeStartInstr; |
| uint i = 0; |
| inlineeStartInstr->IterateMetaArgs( [&] (IR::Instr* metaArg) |
| { |
| if(i == 0) |
| { |
| Lowerer::InsertMove(metaArg->m_func->GetNextInlineeFrameArgCountSlotOpnd(), |
| IR::AddrOpnd::NewNull(metaArg->m_func), |
| argInsertInstr); |
| } |
| if (i == Js::Constants::InlineeMetaArgIndex_FunctionObject) |
| { |
| metaArg->SetSrc1(inlineeStartInstr->GetSrc1()); |
| } |
| metaArg->Unlink(); |
| argInsertInstr->InsertBefore(metaArg); |
| IR::Instr* prev = metaArg->m_prev; |
| m_lowererMD.ChangeToAssign(metaArg); |
| if (i == Js::Constants::InlineeMetaArgIndex_Argc) |
| { |
| #if defined(_M_IX86) || defined(_M_X64) |
| Assert(metaArg == prev->m_next); |
| #else //defined(_M_ARM) |
| Assert(prev->m_next->m_opcode == Js::OpCode::LDIMM); |
| #endif |
| metaArg = prev->m_next; |
| Assert(metaArg->GetSrc1()->AsIntConstOpnd()->m_dontEncode == true); |
| metaArg->isInlineeEntryInstr = true; |
| LowererMD::Legalize(metaArg); |
| } |
| argInsertInstr = metaArg; |
| i++; |
| return false; |
| }); |
| |
| IR::Instr* prev = inlineeStartInstr->m_prev; |
| |
| if (inlineeStartInstr->m_func->m_hasInlineArgsOpt) |
| { |
| inlineeStartInstr->FreeSrc1(); |
| inlineeStartInstr->FreeSrc2(); |
| inlineeStartInstr->FreeDst(); |
| } |
| else |
| { |
| inlineeStartInstr->Remove(); |
| } |
| return prev; |
| } |
| |
| void |
| Lowerer::LowerInlineeEnd(IR::Instr *instr) |
| { |
| Assert(instr->m_func->IsInlinee()); |
| Assert(m_func->IsTopFunc()); |
| |
| // No need to emit code if the function wasn't marked as having implicit calls or bailout. Dead-Store should have removed inline overhead. |
| if (instr->m_func->GetHasImplicitCalls() || PHASE_OFF(Js::DeadStorePhase, this->m_func)) |
| { |
| Lowerer::InsertMove(instr->m_func->GetInlineeArgCountSlotOpnd(), |
| IR::IntConstOpnd::New(0, TyMachReg, instr->m_func), |
| instr); |
| } |
| |
| // Keep InlineeEnd around as it is used by register allocator, if we have optimized the arguments stack |
| if (instr->m_func->m_hasInlineArgsOpt) |
| { |
| instr->FreeSrc1(); |
| } |
| else |
| { |
| instr->Remove(); |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::LoadFloatFromNonReg(IR::Opnd * opndSrc, IR::Opnd * opndDst, IR::Instr * instrInsert) |
| { |
| double value; |
| |
| if (opndSrc->IsAddrOpnd()) |
| { |
| Js::Var var = opndSrc->AsAddrOpnd()->m_address; |
| if (Js::TaggedInt::Is(var)) |
| { |
| value = Js::TaggedInt::ToDouble(var); |
| } |
| else |
| { |
| value = Js::JavascriptNumber::GetValue(var); |
| } |
| } |
| else if (opndSrc->IsIntConstOpnd()) |
| { |
| if (opndSrc->IsUInt32()) |
| { |
| value = (double)(uint32)opndSrc->AsIntConstOpnd()->GetValue(); |
| } |
| else |
| { |
| value = (double)opndSrc->AsIntConstOpnd()->GetValue(); |
| } |
| } |
| else if (opndSrc->IsFloatConstOpnd()) |
| { |
| value = (double)opndSrc->AsFloatConstOpnd()->m_value; |
| } |
| else if (opndSrc->IsFloat32ConstOpnd()) |
| { |
| float floatValue = opndSrc->AsFloat32ConstOpnd()->m_value; |
| return LowererMD::LoadFloatValue(opndDst, floatValue, instrInsert); |
| } |
| else |
| { |
| AssertMsg(0, "Unexpected opnd type"); |
| value = 0; |
| } |
| |
| return LowererMD::LoadFloatValue(opndDst, value, instrInsert); |
| } |
| |
| void |
| Lowerer::LoadInt32FromUntaggedVar(IR::Instr *const instrLoad) |
| { |
| Assert(instrLoad); |
| Assert(instrLoad->GetDst()); |
| Assert(instrLoad->GetDst()->IsRegOpnd()); |
| Assert(instrLoad->GetDst()->IsInt32()); |
| Assert(instrLoad->GetSrc1()); |
| Assert(instrLoad->GetSrc1()->IsRegOpnd()); |
| Assert(instrLoad->GetSrc1()->IsVar()); |
| Assert(!instrLoad->GetSrc2()); |
| |
| // push src |
| // int32Value = call JavascriptNumber::GetNonzeroInt32Value_NoChecks |
| // test int32Value, int32Value |
| // jne $done |
| // (fall through to 'instrLoad'; caller will generate code here) |
| // $done: |
| // (rest of program) |
| |
| Func *const func = instrLoad->m_func; |
| IR::LabelInstr *const doneLabel = instrLoad->GetOrCreateContinueLabel(); |
| |
| // push src |
| // int32Value = call JavascriptNumber::GetNonzeroInt32Value_NoChecks |
| StackSym *const int32ValueSym = instrLoad->GetDst()->AsRegOpnd()->m_sym; |
| IR::Instr *const instr = |
| IR::Instr::New( |
| Js::OpCode::Call, |
| IR::RegOpnd::New(int32ValueSym, TyInt32, func), |
| instrLoad->GetSrc1()->AsRegOpnd(), |
| func); |
| instrLoad->InsertBefore(instr); |
| LowerUnaryHelper(instr, IR::HelperGetNonzeroInt32Value_NoTaggedIntCheck); |
| |
| // test int32Value, int32Value |
| // jne $done |
| InsertCompareBranch( |
| IR::RegOpnd::New(int32ValueSym, TyInt32, func), |
| IR::IntConstOpnd::New(0, TyInt32, func, true), |
| Js::OpCode::BrNeq_A, |
| doneLabel, |
| instrLoad); |
| } |
| |
| bool |
| Lowerer::GetValueFromIndirOpnd(IR::IndirOpnd *indirOpnd, IR::Opnd **pValueOpnd, IntConstType *pValue) |
| { |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| IR::Opnd* valueOpnd = nullptr; |
| IntConstType value = 0; |
| if (!indexOpnd) |
| { |
| value = (IntConstType)indirOpnd->GetOffset(); |
| if (value < 0) |
| { |
| // Can't do fast path for negative index |
| return false; |
| } |
| valueOpnd = IR::IntConstOpnd::New(value, TyInt32, this->m_func); |
| } |
| else if (indexOpnd->m_sym->IsIntConst()) |
| { |
| value = indexOpnd->AsRegOpnd()->m_sym->GetIntConstValue(); |
| if (value < 0) |
| { |
| // Can't do fast path for negative index |
| return false; |
| } |
| valueOpnd = IR::IntConstOpnd::New(value, TyInt32, this->m_func); |
| } |
| *pValueOpnd = valueOpnd; |
| *pValue = value; |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateFastBrOnObject(IR::Instr *instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::BrOnObject_A); |
| |
| IR::RegOpnd *object = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::LabelInstr *done = instr->GetOrCreateContinueLabel(); |
| IR::LabelInstr *target = instr->AsBranchInstr()->GetTarget(); |
| IR::RegOpnd *typeRegOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| IR::IntConstOpnd *typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_LastJavascriptPrimitiveType, TyInt32, instr->m_func); |
| |
| if (!object) |
| { |
| object = IR::RegOpnd::New(TyVar, m_func); |
| Lowerer::InsertMove(object, instr->GetSrc1(), instr); |
| } |
| |
| // TEST object, 1 |
| // JNE $done |
| // MOV typeRegOpnd, [object + offset(Type)] |
| // CMP [typeRegOpnd + offset(TypeId)], TypeIds_LastJavascriptPrimitiveType |
| // JGT $target |
| // $done: |
| |
| m_lowererMD.GenerateObjectTest(object, instr, done); |
| |
| InsertMove(typeRegOpnd, |
| IR::IndirOpnd::New(object, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func), |
| instr); |
| |
| InsertCompareBranch( |
| IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, m_func), |
| typeIdOpnd, Js::OpCode::BrGt_A, target, instr); |
| |
| instr->Remove(); |
| } |
| |
| void Lowerer::GenerateObjectHeaderInliningTest(IR::RegOpnd *baseOpnd, IR::LabelInstr * target,IR::Instr *insertBeforeInstr) |
| { |
| Assert(baseOpnd); |
| Assert(target); |
| AssertMsg( |
| baseOpnd->GetValueType().IsLikelyObject() && |
| baseOpnd->GetValueType().GetObjectType() == ObjectType::ObjectWithArray, |
| "Why are we here, when the object is already known not to have an ObjArray"); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| // mov type, [base + offsetOf(type)] |
| IR::RegOpnd *const opnd = IR::RegOpnd::New(TyMachPtr, func); |
| |
| InsertMove( |
| opnd, |
| IR::IndirOpnd::New( |
| baseOpnd, |
| Js::DynamicObject::GetOffsetOfType(), |
| opnd->GetType(), |
| func), |
| insertBeforeInstr); |
| |
| // mov typeHandler, [type + offsetOf(typeHandler)] |
| InsertMove( |
| opnd, |
| IR::IndirOpnd::New( |
| opnd, |
| Js::DynamicType::GetOffsetOfTypeHandler(), |
| opnd->GetType(), |
| func), |
| insertBeforeInstr); |
| |
| IR::IndirOpnd * offsetOfInlineSlotOpnd = IR::IndirOpnd::New(opnd,Js::DynamicTypeHandler::GetOffsetOfOffsetOfInlineSlots(), TyInt16, func); |
| IR::IntConstOpnd * objHeaderInlinedSlotOffset = IR::IntConstOpnd::New(Js::DynamicTypeHandler::GetOffsetOfObjectHeaderInlineSlots(), TyInt16, func); |
| |
| // CMP [typeHandler + offsetOf(offsetOfInlineSlots)], objHeaderInlinedSlotOffset |
| InsertCompareBranch( |
| offsetOfInlineSlotOpnd, |
| objHeaderInlinedSlotOffset, |
| Js::OpCode::BrEq_A, |
| target, |
| insertBeforeInstr); |
| } |
| |
| void Lowerer::GenerateObjectTypeTest(IR::RegOpnd *srcReg, IR::Instr *instrInsert, IR::LabelInstr *labelHelper) |
| { |
| Assert(srcReg); |
| if (!srcReg->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(srcReg, instrInsert, labelHelper); |
| } |
| |
| // CMP [srcReg], Js::DynamicObject::`vtable' |
| // JNE $helper |
| IR::BranchInstr *branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(srcReg, 0, TyMachPtr, m_func), |
| LoadVTableValueOpnd(instrInsert, VTableValue::VtableDynamicObject), |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| instrInsert); |
| |
| InsertObjectPoison(srcReg, branchInstr, instrInsert, false); |
| } |
| |
| const VTableValue Lowerer::VtableAddresses[static_cast<ValueType::TSize>(ObjectType::Count)] = |
| { |
| /* ObjectType::UninitializedObject */ VTableValue::VtableInvalid, |
| /* ObjectType::Object */ VTableValue::VtableInvalid, |
| /* ObjectType::RegExp */ VTableValue::VtableInvalid, |
| /* ObjectType::ObjectWithArray */ VTableValue::VtableJavascriptArray, |
| /* ObjectType::Array */ VTableValue::VtableJavascriptArray, |
| /* ObjectType::Int8Array */ VTableValue::VtableInt8Array, |
| /* ObjectType::Uint8Array */ VTableValue::VtableUint8Array, |
| /* ObjectType::Uint8ClampedArray */ VTableValue::VtableUint8ClampedArray, |
| /* ObjectType::Int16Array */ VTableValue::VtableInt16Array, |
| /* ObjectType::Uint16Array */ VTableValue::VtableUint16Array, |
| /* ObjectType::Int32Array */ VTableValue::VtableInt32Array, |
| /* ObjectType::Uint32Array */ VTableValue::VtableUint32Array, |
| /* ObjectType::Float32Array */ VTableValue::VtableFloat32Array, |
| /* ObjectType::Float64Array */ VTableValue::VtableFloat64Array, |
| /* ObjectType::Int8VirtualArray */ VTableValue::VtableInt8VirtualArray, |
| /* ObjectType::Uint8VirtualArray */ VTableValue::VtableUint8VirtualArray, |
| /* ObjectType::Uint8ClampedVirtualArray */ VTableValue::VtableUint8ClampedVirtualArray, |
| /* ObjectType::Int16VirtualArray */ VTableValue::VtableInt16VirtualArray, |
| /* ObjectType::Uint16VirtualArray */ VTableValue::VtableUint16VirtualArray, |
| /* ObjectType::Int32VirtualArray */ VTableValue::VtableInt32VirtualArray, |
| /* ObjectType::Uint32VirtualArray */ VTableValue::VtableUint32VirtualArray, |
| /* ObjectType::Float32VirtualArray */ VTableValue::VtableFloat32VirtualArray, |
| /* ObjectType::Float64VirtualArray */ VTableValue::VtableFloat64VirtualArray, |
| /* ObjectType::Int8MixedArray */ VTableValue::VtableInt8Array, |
| /* ObjectType::Uint8MixedArray */ VTableValue::VtableUint8Array, |
| /* ObjectType::Uint8ClampedMixedArray */ VTableValue::VtableUint8ClampedArray, |
| /* ObjectType::Int16MixedArray */ VTableValue::VtableInt16Array, |
| /* ObjectType::Uint16MixedArray */ VTableValue::VtableUint16Array, |
| /* ObjectType::Int32MixedArray */ VTableValue::VtableInt32Array, |
| /* ObjectType::Uint32MixedArray */ VTableValue::VtableUint32Array, |
| /* ObjectType::Float32MixedArray */ VTableValue::VtableFloat32Array, |
| /* ObjectType::Float64MixedArray */ VTableValue::VtableFloat64Array, |
| /* ObjectType::Int64Array */ VTableValue::VtableInt64Array, |
| /* ObjectType::Uint64Array */ VTableValue::VtableUint64Array, |
| /* ObjectType::BoolArray */ VTableValue::VtableBoolArray, |
| /* ObjectType::CharArray */ VTableValue::VtableCharArray |
| |
| }; |
| |
| const uint32 Lowerer::OffsetsOfHeadSegment[static_cast<ValueType::TSize>(ObjectType::Count)] = |
| { |
| /* ObjectType::UninitializedObject */ static_cast<uint32>(-1), |
| /* ObjectType::Object */ static_cast<uint32>(-1), |
| /* ObjectType::RegExp */ static_cast<uint32>(-1), |
| /* ObjectType::ObjectWithArray */ Js::JavascriptArray::GetOffsetOfHead(), |
| /* ObjectType::Array */ Js::JavascriptArray::GetOffsetOfHead(), |
| /* ObjectType::Int8Array */ Js::Int8Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8Array */ Js::Uint8Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8ClampedArray */ Js::Uint8ClampedArray::GetOffsetOfBuffer(), |
| /* ObjectType::Int16Array */ Js::Int16Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint16Array */ Js::Uint16Array::GetOffsetOfBuffer(), |
| /* ObjectType::Int32Array */ Js::Int32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint32Array */ Js::Uint32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Float32Array */ Js::Float32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Float64Array */ Js::Float64Array::GetOffsetOfBuffer(), |
| /* ObjectType::Int8VirtualArray */ Js::Int8VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8VirtualArray */ Js::Uint8VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8ClampedVirtualArray */ Js::Uint8ClampedVirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Int16VirtualArray */ Js::Int16VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Uint16VirtualArray */ Js::Uint16VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Int32VirtualArray */ Js::Int32VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Uint32VirtualArray */ Js::Uint32VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Float32VirtualArray */ Js::Float32VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Float64VirtualArray */ Js::Float64VirtualArray::GetOffsetOfBuffer(), |
| /* ObjectType::Int8MixedArray */ Js::Int8Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8MixedArray */ Js::Uint8Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint8ClampedMixedArray */ Js::Uint8ClampedArray::GetOffsetOfBuffer(), |
| /* ObjectType::Int16MixedArray */ Js::Int16Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint16MixedArray */ Js::Uint16Array::GetOffsetOfBuffer(), |
| /* ObjectType::Int32MixedArray */ Js::Int32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint32MixedArray */ Js::Uint32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Float32MixedArray */ Js::Float32Array::GetOffsetOfBuffer(), |
| /* ObjectType::Float64MixedArray */ Js::Float64Array::GetOffsetOfBuffer(), |
| /* ObjectType::Int64Array */ Js::Int64Array::GetOffsetOfBuffer(), |
| /* ObjectType::Uint64Array */ Js::Uint64Array::GetOffsetOfBuffer(), |
| /* ObjectType::BoolArray */ Js::BoolArray::GetOffsetOfBuffer(), |
| /* ObjectType::CharArray */ Js::CharArray::GetOffsetOfBuffer() |
| }; |
| |
| const uint32 Lowerer::OffsetsOfLength[static_cast<ValueType::TSize>(ObjectType::Count)] = |
| { |
| /* ObjectType::UninitializedObject */ static_cast<uint32>(-1), |
| /* ObjectType::Object */ static_cast<uint32>(-1), |
| /* ObjectType::RegExp */ static_cast<uint32>(-1), |
| /* ObjectType::ObjectWithArray */ Js::JavascriptArray::GetOffsetOfLength(), |
| /* ObjectType::Array */ Js::JavascriptArray::GetOffsetOfLength(), |
| /* ObjectType::Int8Array */ Js::Int8Array::GetOffsetOfLength(), |
| /* ObjectType::Uint8Array */ Js::Uint8Array::GetOffsetOfLength(), |
| /* ObjectType::Uint8ClampedArray */ Js::Uint8ClampedArray::GetOffsetOfLength(), |
| /* ObjectType::Int16Array */ Js::Int16Array::GetOffsetOfLength(), |
| /* ObjectType::Uint16Array */ Js::Uint16Array::GetOffsetOfLength(), |
| /* ObjectType::Int32Array */ Js::Int32Array::GetOffsetOfLength(), |
| /* ObjectType::Uint32Array */ Js::Uint32Array::GetOffsetOfLength(), |
| /* ObjectType::Float32Array */ Js::Float32Array::GetOffsetOfLength(), |
| /* ObjectType::Float64Array */ Js::Float64Array::GetOffsetOfLength(), |
| /* ObjectType::Int8VirtualArray */ Js::Int8VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Uint8VirtualArray */ Js::Uint8VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Uint8ClampedVirtualArray */ Js::Uint8ClampedVirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Int16VirtualArray */ Js::Int16VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Uint16VirtualArray */ Js::Uint16VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Int32VirtualArray */ Js::Int32VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Uint32VirtualArray */ Js::Uint32VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Float32VirtualArray */ Js::Float32VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Float64VirtualArray */ Js::Float64VirtualArray::GetOffsetOfLength(), |
| /* ObjectType::Int8MixedArray */ Js::Int8Array::GetOffsetOfLength(), |
| /* ObjectType::Uint8MixedArray */ Js::Uint8Array::GetOffsetOfLength(), |
| /* ObjectType::Uint8ClampedMixedArray */ Js::Uint8ClampedArray::GetOffsetOfLength(), |
| /* ObjectType::Int16MixedArray */ Js::Int16Array::GetOffsetOfLength(), |
| /* ObjectType::Uint16MixedArray */ Js::Uint16Array::GetOffsetOfLength(), |
| /* ObjectType::Int32MixedArray */ Js::Int32Array::GetOffsetOfLength(), |
| /* ObjectType::Uint32MixedArray */ Js::Uint32Array::GetOffsetOfLength(), |
| /* ObjectType::Float32MixedArray */ Js::Float32Array::GetOffsetOfLength(), |
| /* ObjectType::Float64MixedArray */ Js::Float64Array::GetOffsetOfLength(), |
| /* ObjectType::Int64Array */ Js::Int64Array::GetOffsetOfLength(), |
| /* ObjectType::Uint64Array */ Js::Uint64Array::GetOffsetOfLength(), |
| /* ObjectType::BoolArray */ Js::BoolArray::GetOffsetOfLength(), |
| /* ObjectType::CharArray */ Js::CharArray::GetOffsetOfLength() |
| }; |
| |
| const IRType Lowerer::IndirTypes[static_cast<ValueType::TSize>(ObjectType::Count)] = |
| { |
| /* ObjectType::UninitializedObject */ TyIllegal, |
| /* ObjectType::Object */ TyIllegal, |
| /* ObjectType::RegExp */ TyIllegal, |
| /* ObjectType::ObjectWithArray */ TyVar, |
| /* ObjectType::Array */ TyVar, |
| /* ObjectType::Int8Array */ TyInt8, |
| /* ObjectType::Uint8Array */ TyUint8, |
| /* ObjectType::Uint8ClampedArray */ TyUint8, |
| /* ObjectType::Int16Array */ TyInt16, |
| /* ObjectType::Uint16Array */ TyUint16, |
| /* ObjectType::Int32Array */ TyInt32, |
| /* ObjectType::Uint32Array */ TyUint32, |
| /* ObjectType::Float32Array */ TyFloat32, |
| /* ObjectType::Float64Array */ TyFloat64, |
| /* ObjectType::Int8VirtualArray */ TyInt8, |
| /* ObjectType::Uint8VirtualArray */ TyUint8, |
| /* ObjectType::Uint8ClampedVirtualArray */ TyUint8, |
| /* ObjectType::Int16VirtualArray */ TyInt16, |
| /* ObjectType::Uint16vArray */ TyUint16, |
| /* ObjectType::Int32VirtualArray */ TyInt32, |
| /* ObjectType::Uint32VirtualArray */ TyUint32, |
| /* ObjectType::Float32VirtualArray */ TyFloat32, |
| /* ObjectType::Float64VirtualArray */ TyFloat64, |
| /* ObjectType::Int8MixedArray */ TyInt8, |
| /* ObjectType::Uint8MixedArray */ TyUint8, |
| /* ObjectType::Uint8ClampedMixedArray */ TyUint8, |
| /* ObjectType::Int16MixedArray */ TyInt16, |
| /* ObjectType::Uint16MixedArray */ TyUint16, |
| /* ObjectType::Int32MixedArray */ TyInt32, |
| /* ObjectType::Uint32MixedArray */ TyUint32, |
| /* ObjectType::Float32MixedArray */ TyFloat32, |
| /* ObjectType::Float64MixedArray */ TyFloat64, |
| /* ObjectType::Int64Array */ TyInt64, |
| /* ObjectType::Uint64Array */ TyUint64, |
| /* ObjectType::BoolArray */ TyUint8, |
| /* ObjectType::CharArray */ TyUint16 |
| }; |
| |
| const BYTE Lowerer::IndirScales[static_cast<ValueType::TSize>(ObjectType::Count)] = |
| { |
| /* ObjectType::UninitializedObject */ static_cast<BYTE>(-1), |
| /* ObjectType::Object */ static_cast<BYTE>(-1), |
| /* ObjectType::RegExp */ static_cast<BYTE>(-1), |
| /* ObjectType::ObjectWithArray */ LowererMD::GetDefaultIndirScale(), |
| /* ObjectType::Array */ LowererMD::GetDefaultIndirScale(), |
| /* ObjectType::Int8Array */ 0, // log2(sizeof(int8)) |
| /* ObjectType::Uint8Array */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Uint8ClampedArray */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Int16Array */ 1, // log2(sizeof(int16)) |
| /* ObjectType::Uint16Array */ 1, // log2(sizeof(uint16)) |
| /* ObjectType::Int32Array */ 2, // log2(sizeof(int32)) |
| /* ObjectType::Uint32Array */ 2, // log2(sizeof(uint32)) |
| /* ObjectType::Float32Array */ 2, // log2(sizeof(float)) |
| /* ObjectType::Float64Array */ 3, // log2(sizeof(double)) |
| /* ObjectType::Int8VirtualArray */ 0, // log2(sizeof(int8)) |
| /* ObjectType::Uint8VirtualArray */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Uint8ClampedVirtualArray */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Int16VirtualArray */ 1, // log2(sizeof(int16)) |
| /* ObjectType::Uint16VirtualArray */ 1, // log2(sizeof(uint16)) |
| /* ObjectType::Int32VirtualArray */ 2, // log2(sizeof(int32)) |
| /* ObjectType::Uint32VirtualArray */ 2, // log2(sizeof(uint32)) |
| /* ObjectType::Float32VirtualArray */ 2, // log2(sizeof(float)) |
| /* ObjectType::Float64VirtualArray */ 3, // log2(sizeof(double)) |
| /* ObjectType::Int8MixedArray */ 0, // log2(sizeof(int8)) |
| /* ObjectType::Uint8MixedArray */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Uint8ClampedMixedArray */ 0, // log2(sizeof(uint8)) |
| /* ObjectType::Int16MixedArray */ 1, // log2(sizeof(int16)) |
| /* ObjectType::Uint16MixedArray */ 1, // log2(sizeof(uint16)) |
| /* ObjectType::Int32MixedArray */ 2, // log2(sizeof(int32)) |
| /* ObjectType::Uint32MixedArray */ 2, // log2(sizeof(uint32)) |
| /* ObjectType::Float32MixedArray */ 2, // log2(sizeof(float)) |
| /* ObjectType::Float64MixedArray */ 3, // log2(sizeof(double)) |
| /* ObjectType::Int64Array */ 3, // log2(sizeof(int64)) |
| /* ObjectType::Uint64Array */ 3, // log2(sizeof(uint64)) |
| /* ObjectType::BoolArray */ 0, // log2(sizeof(bool)) |
| /* ObjectType::CharArray */ 1 // log2(sizeof(char16)) |
| }; |
| |
| VTableValue Lowerer::GetArrayVtableAddress(const ValueType valueType, bool getVirtual) |
| { |
| Assert(valueType.IsLikelyAnyOptimizedArray()); |
| if(valueType.IsLikelyArrayOrObjectWithArray()) |
| { |
| if(valueType.HasIntElements()) |
| { |
| return VTableValue::VtableNativeIntArray; |
| } |
| else if(valueType.HasFloatElements()) |
| { |
| return VTableValue::VtableNativeFloatArray; |
| } |
| } |
| if (getVirtual && valueType.IsLikelyMixedTypedArrayType()) |
| { |
| return VtableAddresses[static_cast<ValueType::TSize>(valueType.GetMixedToVirtualTypedArrayObjectType())]; |
| } |
| return VtableAddresses[static_cast<ValueType::TSize>(valueType.GetObjectType())]; |
| } |
| |
| uint32 Lowerer::GetArrayOffsetOfHeadSegment(const ValueType valueType) |
| { |
| Assert(valueType.IsLikelyAnyOptimizedArray()); |
| return OffsetsOfHeadSegment[static_cast<ValueType::TSize>(valueType.GetObjectType())]; |
| } |
| |
| uint32 Lowerer::GetArrayOffsetOfLength(const ValueType valueType) |
| { |
| Assert(valueType.IsLikelyAnyOptimizedArray()); |
| return OffsetsOfLength[static_cast<ValueType::TSize>(valueType.GetObjectType())]; |
| } |
| |
| IRType Lowerer::GetArrayIndirType(const ValueType valueType) |
| { |
| Assert(valueType.IsLikelyAnyOptimizedArray()); |
| if(valueType.IsLikelyArrayOrObjectWithArray()) |
| { |
| if(valueType.HasIntElements()) |
| { |
| return TyInt32; |
| } |
| else if(valueType.HasFloatElements()) |
| { |
| return TyFloat64; |
| } |
| } |
| |
| return IndirTypes[static_cast<ValueType::TSize>(valueType.GetObjectType())]; |
| } |
| |
| BYTE Lowerer::GetArrayIndirScale(const ValueType valueType) |
| { |
| Assert(valueType.IsLikelyAnyOptimizedArray()); |
| if(valueType.IsLikelyArrayOrObjectWithArray()) |
| { |
| if(valueType.HasIntElements()) |
| { |
| return 2; // log2(sizeof(int32)) |
| } |
| else if(valueType.HasFloatElements()) |
| { |
| return 3; // log2(sizeof(double)) |
| } |
| } |
| |
| return IndirScales[static_cast<ValueType::TSize>(valueType.GetObjectType())]; |
| } |
| |
| int Lowerer::SimdGetElementCountFromBytes(ValueType arrValueType, uint8 dataWidth) |
| { |
| Assert(dataWidth == 4 || dataWidth == 8 || dataWidth == 12 || dataWidth == 16); |
| Assert(arrValueType.IsTypedArray()); |
| BYTE bpe = 1 << Lowerer::GetArrayIndirScale(arrValueType); |
| |
| // round up |
| return (int)::ceil(((float)dataWidth) / bpe); |
| } |
| |
| bool Lowerer::ShouldGenerateArrayFastPath( |
| const IR::Opnd *const arrayOpnd, |
| const bool supportsObjectsWithArrays, |
| const bool supportsTypedArrays, |
| const bool requiresSse2ForFloatArrays) const |
| { |
| Assert(arrayOpnd); |
| |
| const ValueType arrayValueType(arrayOpnd->GetValueType()); |
| if(arrayValueType.IsUninitialized()) |
| { |
| // Don't have info about the value type, better to generate the fast path anyway |
| return true; |
| } |
| if (!arrayValueType.IsLikelyObject()) |
| { |
| if (!arrayValueType.HasBeenObject() || arrayValueType.IsLikelyString()) |
| { |
| return false; |
| } |
| //We have seen at least once there is an object in the code path. Generate fastpath hoping it to be array. |
| //Its nice if we can get all the attributes set but valueType is only 16 bits. Consider expanding the same. |
| return true; |
| } |
| |
| if( (!supportsObjectsWithArrays && arrayValueType.GetObjectType() == ObjectType::ObjectWithArray) || |
| (!supportsTypedArrays && arrayValueType.IsLikelyTypedArray()) ) |
| { |
| // The fast path likely would not hit |
| return false; |
| } |
| if(arrayValueType.GetObjectType() == ObjectType::UninitializedObject) |
| { |
| // Don't have info about the object type, better to generate the fast path anyway |
| return true; |
| } |
| #ifdef _M_IX86 |
| if(requiresSse2ForFloatArrays && |
| ( |
| arrayValueType.GetObjectType() == ObjectType::Float32Array || |
| arrayValueType.GetObjectType() == ObjectType::Float64Array |
| ) && |
| !AutoSystemInfo::Data.SSE2Available()) |
| { |
| // Fast paths for float arrays rely on SSE2 |
| return false; |
| } |
| #endif |
| return !arrayValueType.IsLikelyAnyUnOptimizedArray(); |
| } |
| |
| IR::RegOpnd *Lowerer::LoadObjectArray(IR::RegOpnd *const baseOpnd, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(baseOpnd); |
| Assert( |
| baseOpnd->GetValueType().IsLikelyObject() && |
| baseOpnd->GetValueType().GetObjectType() == ObjectType::ObjectWithArray); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| // mov array, [base + offsetOf(objectArrayOrFlags)] |
| IR::RegOpnd *const arrayOpnd = |
| baseOpnd->IsArrayRegOpnd() ? baseOpnd->AsArrayRegOpnd()->CopyAsRegOpnd(func) : baseOpnd->Copy(func)->AsRegOpnd(); |
| arrayOpnd->m_sym = StackSym::New(TyVar, func); |
| arrayOpnd->SetValueType(arrayOpnd->GetValueType().ToArray()); |
| const IR::AutoReuseOpnd autoReuseArrayOpnd(arrayOpnd, func, false /* autoDelete */); |
| InsertMove( |
| arrayOpnd, |
| IR::IndirOpnd::New( |
| baseOpnd, |
| Js::DynamicObject::GetOffsetOfObjectArray(), |
| arrayOpnd->GetType(), |
| func), |
| insertBeforeInstr); |
| |
| return arrayOpnd; |
| } |
| |
| void |
| Lowerer::GenerateIsEnabledArraySetElementFastPathCheck( |
| IR::LabelInstr * isDisabledLabel, |
| IR::Instr * const insertBeforeInstr) |
| { |
| InsertCompareBranch( |
| this->LoadOptimizationOverridesValueOpnd(insertBeforeInstr, OptimizationOverridesValue::OptimizationOverridesArraySetElementFastPathVtable), |
| LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableInvalid), |
| Js::OpCode::BrEq_A, |
| isDisabledLabel, |
| insertBeforeInstr); |
| } |
| |
| IR::RegOpnd *Lowerer::GenerateArrayTest( |
| IR::RegOpnd *const baseOpnd, |
| IR::LabelInstr *const isNotObjectLabel, |
| IR::LabelInstr *const isNotArrayLabel, |
| IR::Instr *const insertBeforeInstr, |
| const bool forceFloat, |
| const bool isStore, |
| const bool allowDefiniteArray) |
| { |
| Assert(baseOpnd); |
| |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| |
| // Shouldn't request to do an array test when it's already known to be an array, or if it's unlikely to be an array |
| Assert(!baseValueType.IsAnyOptimizedArray() || allowDefiniteArray || baseValueType.IsNativeArray()); |
| Assert(baseValueType.IsUninitialized() || baseValueType.HasBeenObject()); |
| |
| Assert(isNotObjectLabel); |
| Assert(isNotArrayLabel); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::RegOpnd *arrayOpnd; |
| IR::AutoReuseOpnd autoReuseArrayOpnd; |
| if(baseValueType.IsLikelyObject() && baseValueType.GetObjectType() == ObjectType::ObjectWithArray) |
| { |
| // Only DynamicObject is allowed (DynamicObject vtable is ensured) because some object types have special handling for |
| // index properties - arguments object, string object, external object, etc. |
| // JavascriptArray::Jit_TryGetArrayForObjectWithArray as well. |
| GenerateObjectTypeTest(baseOpnd, insertBeforeInstr, isNotObjectLabel); |
| GenerateObjectHeaderInliningTest(baseOpnd, isNotArrayLabel, insertBeforeInstr); |
| arrayOpnd = LoadObjectArray(baseOpnd, insertBeforeInstr); |
| autoReuseArrayOpnd.Initialize(arrayOpnd, func, false /* autoDelete */); |
| |
| // test array, array |
| // je $isNotArrayLabel |
| // test array, 1 |
| // jne $isNotArrayLabel |
| InsertTestBranch( |
| arrayOpnd, |
| arrayOpnd, |
| Js::OpCode::BrEq_A, |
| isNotArrayLabel, |
| insertBeforeInstr); |
| InsertTestBranch( |
| arrayOpnd, |
| IR::IntConstOpnd::New(1, TyUint8, func, true), |
| Js::OpCode::BrNeq_A, |
| isNotArrayLabel, |
| insertBeforeInstr); |
| } |
| else |
| { |
| if(!baseOpnd->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(baseOpnd, insertBeforeInstr, isNotObjectLabel); |
| } |
| arrayOpnd = baseOpnd->Copy(func)->AsRegOpnd(); |
| if(!baseValueType.IsLikelyAnyOptimizedArray()) |
| { |
| arrayOpnd->SetValueType( |
| ValueType::GetObject(ObjectType::Array) |
| .ToLikely() |
| .SetHasNoMissingValues(false) |
| .SetArrayTypeId(Js::TypeIds_Array)); |
| } |
| autoReuseArrayOpnd.Initialize(arrayOpnd, func, false /* autoDelete */); |
| } |
| |
| VTableValue vtableAddress = baseValueType.IsLikelyAnyOptimizedArray() |
| ? GetArrayVtableAddress(baseValueType) |
| : VTableValue::VtableJavascriptArray; |
| |
| VTableValue virtualVtableAddress = VTableValue::VtableInvalid; |
| if (baseValueType.IsLikelyMixedTypedArrayType()) |
| { |
| virtualVtableAddress = GetArrayVtableAddress(baseValueType, true); |
| } |
| IR::Opnd * vtableOpnd; |
| IR::Opnd * vtableVirtualOpnd = nullptr; |
| if (isStore && |
| (vtableAddress == VTableValue::VtableJavascriptArray || |
| baseValueType.IsLikelyNativeArray())) |
| { |
| vtableOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| if (baseValueType.IsLikelyNativeArray()) |
| { |
| if (baseValueType.HasIntElements()) |
| { |
| InsertMove(vtableOpnd, this->LoadOptimizationOverridesValueOpnd(insertBeforeInstr, OptimizationOverridesValue::OptimizationOverridesIntArraySetElementFastPathVtable), insertBeforeInstr); |
| } |
| else |
| { |
| Assert(baseValueType.HasFloatElements()); |
| InsertMove(vtableOpnd, this->LoadOptimizationOverridesValueOpnd(insertBeforeInstr, OptimizationOverridesValue::OptimizationOverridesFloatArraySetElementFastPathVtable), insertBeforeInstr); |
| } |
| } |
| else |
| { |
| InsertMove(vtableOpnd, this->LoadOptimizationOverridesValueOpnd(insertBeforeInstr, OptimizationOverridesValue::OptimizationOverridesArraySetElementFastPathVtable), insertBeforeInstr); |
| } |
| } |
| else |
| { |
| vtableOpnd = LoadVTableValueOpnd(insertBeforeInstr, vtableAddress); |
| } |
| |
| // cmp [array], vtableAddress |
| // jne $isNotArrayLabel |
| |
| if (forceFloat && baseValueType.IsLikelyNativeFloatArray()) |
| { |
| // We expect a native float array. If we get native int instead, convert it on the spot and bail out afterward. |
| const auto goodArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::BranchInstr* branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, func), |
| vtableOpnd, |
| Js::OpCode::BrEq_A, |
| goodArrayLabel, |
| insertBeforeInstr); |
| |
| InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); |
| |
| IR::LabelInstr *notFloatArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| insertBeforeInstr->InsertBefore(notFloatArrayLabel); |
| |
| if (isStore) |
| { |
| vtableOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(vtableOpnd, IR::MemRefOpnd::New( |
| func->GetScriptContextInfo()->GetIntArraySetElementFastPathVtableAddr(), |
| TyMachPtr, func), insertBeforeInstr); |
| } |
| else |
| { |
| vtableOpnd = LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableJavascriptNativeIntArray); |
| } |
| |
| branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| isNotArrayLabel, |
| insertBeforeInstr); |
| |
| InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); |
| |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, arrayOpnd); |
| |
| IR::Instr *helperInstr = IR::Instr::New(Js::OpCode::Call, m_func); |
| insertBeforeInstr->InsertBefore(helperInstr); |
| m_lowererMD.ChangeToHelperCall(helperInstr, IR::HelperIntArr_ToNativeFloatArray); |
| |
| // Branch to the (bailout) label, because converting the array may have made our array checks unsafe. |
| InsertBranch(Js::OpCode::Br, isNotArrayLabel, insertBeforeInstr); |
| |
| insertBeforeInstr->InsertBefore(goodArrayLabel); |
| } |
| else |
| { |
| IR::LabelInstr* goodArrayLabel = nullptr; |
| if (baseValueType.IsLikelyMixedTypedArrayType()) |
| { |
| goodArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, func), |
| vtableOpnd, |
| Js::OpCode::BrEq_A, |
| goodArrayLabel, |
| insertBeforeInstr); |
| Assert(virtualVtableAddress); |
| vtableVirtualOpnd = LoadVTableValueOpnd(insertBeforeInstr, virtualVtableAddress); |
| Assert(vtableVirtualOpnd); |
| IR::BranchInstr* branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, func), |
| vtableVirtualOpnd, |
| Js::OpCode::BrNeq_A, |
| isNotArrayLabel, |
| insertBeforeInstr); |
| |
| InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); |
| insertBeforeInstr->InsertBefore(goodArrayLabel); |
| } |
| else |
| { |
| IR::BranchInstr *branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| isNotArrayLabel, |
| insertBeforeInstr); |
| |
| InsertObjectPoison(arrayOpnd, branchInstr, insertBeforeInstr, isStore); |
| } |
| |
| } |
| |
| ValueType arrayValueType(arrayOpnd->GetValueType()); |
| if(arrayValueType.IsLikelyArrayOrObjectWithArray() && !arrayValueType.IsObject()) |
| { |
| arrayValueType = arrayValueType.SetHasNoMissingValues(false); |
| } |
| arrayValueType = arrayValueType.ToDefiniteObject(); |
| arrayOpnd->SetValueType(arrayValueType); |
| return arrayOpnd; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::HoistIndirOffset |
| /// |
| /// Replace the offset of the given indir with a new symbol, which becomes the indir index. |
| /// Assign the new symbol by creating an assignment from the constant offset. |
| /// |
| ///---------------------------------------------------------------------------- |
| |
| IR::Instr *Lowerer::HoistIndirOffset(IR::Instr* instr, IR::IndirOpnd *indirOpnd, RegNum regNum) |
| { |
| int32 offset = indirOpnd->GetOffset(); |
| if (indirOpnd->GetIndexOpnd()) |
| { |
| Assert(indirOpnd->GetBaseOpnd()); |
| return Lowerer::HoistIndirOffsetAsAdd(instr, indirOpnd, indirOpnd->GetBaseOpnd(), offset, regNum); |
| } |
| IR::IntConstOpnd *offsetOpnd = IR::IntConstOpnd::New(offset, TyInt32, instr->m_func); |
| IR::RegOpnd *indexOpnd = IR::RegOpnd::New(StackSym::New(TyMachReg, instr->m_func), regNum, TyMachReg, instr->m_func); |
| |
| #if defined(DBG) && defined(_M_ARM) |
| if (regNum == SCRATCH_REG) |
| { |
| AssertMsg(indirOpnd->GetBaseOpnd()->GetReg()!= SCRATCH_REG, "Why both are SCRATCH_REG"); |
| if (instr->GetSrc1() && instr->GetSrc1()->IsRegOpnd()) |
| { |
| Assert(instr->GetSrc1()->AsRegOpnd()->GetReg() != SCRATCH_REG); |
| } |
| if (instr->GetSrc2() && instr->GetSrc2()->IsRegOpnd()) |
| { |
| Assert(instr->GetSrc2()->AsRegOpnd()->GetReg() != SCRATCH_REG); |
| } |
| if (instr->GetDst() && instr->GetDst()->IsRegOpnd()) |
| { |
| Assert(instr->GetDst()->AsRegOpnd()->GetReg() != SCRATCH_REG); |
| } |
| } |
| #endif |
| // Clear the offset and add a new reg as the index. |
| indirOpnd->SetOffset(0); |
| indirOpnd->SetIndexOpnd(indexOpnd); |
| |
| IR::Instr *instrAssign = Lowerer::InsertMove(indexOpnd, offsetOpnd, instr); |
| indexOpnd->m_sym->SetIsIntConst(offset); |
| return instrAssign; |
| } |
| |
| IR::Instr *Lowerer::HoistIndirOffsetAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum) |
| { |
| IR::RegOpnd *newBaseOpnd = IR::RegOpnd::New(StackSym::New(TyMachPtr, instr->m_func), regNum, TyMachPtr, instr->m_func); |
| |
| IR::IntConstOpnd *src2 = IR::IntConstOpnd::New(offset, TyInt32, instr->m_func); |
| |
| IR::Instr * instrAdd = IR::Instr::New(Js::OpCode::Add_A, newBaseOpnd, baseOpnd, src2, instr->m_func); |
| LowererMD::ChangeToAdd(instrAdd, false); |
| instr->InsertBefore(instrAdd); |
| |
| orgOpnd->ReplaceBaseOpnd(newBaseOpnd); |
| orgOpnd->SetOffset(0); |
| |
| return instrAdd; |
| } |
| |
| IR::Instr *Lowerer::HoistIndirIndexOpndAsAdd(IR::Instr* instr, IR::IndirOpnd *orgOpnd, IR::Opnd *baseOpnd, IR::Opnd *indexOpnd, RegNum regNum) |
| { |
| IR::RegOpnd *newBaseOpnd = IR::RegOpnd::New(StackSym::New(TyMachPtr, instr->m_func), regNum, TyMachPtr, instr->m_func); |
| |
| IR::Instr * instrAdd = IR::Instr::New(Js::OpCode::Add_A, newBaseOpnd, baseOpnd, indexOpnd->UseWithNewType(TyMachPtr, instr->m_func), instr->m_func); |
| LowererMD::ChangeToAdd(instrAdd, false); |
| instr->InsertBefore(instrAdd); |
| |
| orgOpnd->ReplaceBaseOpnd(newBaseOpnd); |
| orgOpnd->SetIndexOpnd(nullptr); |
| |
| return instrAdd; |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::HoistSymOffset |
| /// |
| /// Replace the given sym with an indir using the given base and offset. |
| /// (This is used, for instance, to hoist a sym offset that is too large to encode.) |
| /// |
| ///---------------------------------------------------------------------------- |
| |
| IR::Instr *Lowerer::HoistSymOffset(IR::Instr *instr, IR::SymOpnd *symOpnd, RegNum baseReg, uint32 offset, RegNum regNum) |
| { |
| IR::RegOpnd *baseOpnd = IR::RegOpnd::New(nullptr, baseReg, TyMachPtr, instr->m_func); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(baseOpnd, offset, symOpnd->GetType(), instr->m_func); |
| if (symOpnd == instr->GetDst()) |
| { |
| instr->ReplaceDst(indirOpnd); |
| } |
| else |
| { |
| instr->ReplaceSrc(symOpnd, indirOpnd); |
| } |
| |
| return Lowerer::HoistIndirOffset(instr, indirOpnd, regNum); |
| } |
| |
| IR::Instr *Lowerer::HoistSymOffsetAsAdd(IR::Instr* instr, IR::SymOpnd *orgOpnd, IR::Opnd *baseOpnd, int offset, RegNum regNum) |
| { |
| IR::IndirOpnd *newIndirOpnd = IR::IndirOpnd::New(baseOpnd->AsRegOpnd(), 0, TyMachPtr, instr->m_func); |
| instr->Replace(orgOpnd, newIndirOpnd); // Replace SymOpnd with IndirOpnd |
| return Lowerer::HoistIndirOffsetAsAdd(instr, newIndirOpnd, baseOpnd, offset, regNum); |
| } |
| |
| IR::LabelInstr *Lowerer::InsertLabel(const bool isHelper, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::LabelInstr *const instr = IR::LabelInstr::New(Js::OpCode::Label, func, isHelper); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertMoveWithBarrier(IR::Opnd *dst, IR::Opnd *src, IR::Instr *const insertBeforeInstr) |
| { |
| return Lowerer::InsertMove(dst, src, insertBeforeInstr, true); |
| } |
| |
| IR::Instr *Lowerer::InsertMove(IR::Opnd *dst, IR::Opnd *src, IR::Instr *const insertBeforeInstr, bool generateWriteBarrier) |
| { |
| Assert(dst); |
| Assert(src); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| if(dst->IsFloat() && src->IsConstOpnd()) |
| { |
| return LoadFloatFromNonReg(src, dst, insertBeforeInstr); |
| } |
| |
| if(TySize[dst->GetType()] < TySize[src->GetType()]) |
| { |
| #if _M_IX86 |
| if (IRType_IsInt64(src->GetType())) |
| { |
| // On x86, if we are trying to move an int64 to a smaller type |
| // Insert a move of the low bits into dst |
| return InsertMove(dst, func->FindOrCreateInt64Pair(src).low, insertBeforeInstr, generateWriteBarrier); |
| } |
| else |
| #endif |
| { |
| src = src->UseWithNewType(dst->GetType(), func); |
| } |
| } |
| IR::Instr * instr = IR::Instr::New(Js::OpCode::Ld_A, dst, src, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| if (generateWriteBarrier) |
| { |
| instr = LowererMD::ChangeToWriteBarrierAssign(instr, func); |
| } |
| else |
| { |
| LowererMD::ChangeToAssignNoBarrierCheck(instr); |
| } |
| |
| return instr; |
| } |
| |
| IR::BranchInstr *Lowerer::InsertBranch( |
| const Js::OpCode opCode, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| return InsertBranch(opCode, false /* isUnsigned */, target, insertBeforeInstr); |
| } |
| |
| IR::BranchInstr *Lowerer::InsertBranch( |
| const Js::OpCode opCode, |
| const bool isUnsigned, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(target); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::BranchInstr *const instr = IR::BranchInstr::New(opCode, target, func); |
| if(!instr->IsLowered()) |
| { |
| if(opCode == Js::OpCode::Br) |
| { |
| instr->m_opcode = LowererMD::MDUncondBranchOpcode; |
| } |
| else if(isUnsigned) |
| { |
| instr->m_opcode = LowererMD::MDUnsignedBranchOpcode(opCode); |
| } |
| else |
| { |
| instr->m_opcode = LowererMD::MDBranchOpcode(opCode); |
| } |
| } |
| |
| insertBeforeInstr->InsertBefore(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertCompare(IR::Opnd *const src1, IR::Opnd *const src2, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(src1); |
| Assert(!src1->IsFloat64()); // not implemented |
| Assert(src2); |
| Assert(!src2->IsFloat64()); // not implemented |
| Assert(!src1->IsEqual(src2)); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::CMP, func); |
| instr->SetSrc1(src1); |
| instr->SetSrc2(src2); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::BranchInstr *Lowerer::InsertCompareBranch( |
| IR::Opnd *const compareSrc1, |
| IR::Opnd *const compareSrc2, |
| Js::OpCode branchOpCode, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr, |
| const bool ignoreNaN) |
| { |
| return InsertCompareBranch(compareSrc1, compareSrc2, branchOpCode, false /* isUnsigned */, target, insertBeforeInstr, ignoreNaN); |
| } |
| |
| IR::BranchInstr *Lowerer::InsertCompareBranch( |
| IR::Opnd *compareSrc1, |
| IR::Opnd *compareSrc2, |
| Js::OpCode branchOpCode, |
| const bool isUnsigned, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr, |
| const bool ignoreNaN) |
| { |
| Assert(compareSrc1); |
| Assert(compareSrc2); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| if(compareSrc1->IsFloat()) |
| { |
| Assert(compareSrc2->IsFloat()); |
| Assert(!isUnsigned); |
| IR::BranchInstr *const instr = IR::BranchInstr::New(branchOpCode, target, compareSrc1, compareSrc2, func); |
| insertBeforeInstr->InsertBefore(instr); |
| return LowererMD::LowerFloatCondBranch(instr, ignoreNaN); |
| } |
| #ifdef _M_IX86 |
| else if (compareSrc1->IsInt64()) |
| { |
| Assert(compareSrc2->IsInt64()); |
| IR::BranchInstr *const instr = IR::BranchInstr::New(branchOpCode, target, compareSrc1, compareSrc2, func); |
| insertBeforeInstr->InsertBefore(instr); |
| m_lowererMD.EmitInt64Instr(instr); |
| return instr; |
| } |
| #endif |
| |
| Js::OpCode swapSrcsBranchOpCode; |
| switch(branchOpCode) |
| { |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrNeq_A: |
| swapSrcsBranchOpCode = branchOpCode; |
| goto Common_BrEqNeqGeGtLeLt; |
| |
| case Js::OpCode::BrGe_A: |
| swapSrcsBranchOpCode = Js::OpCode::BrLe_A; |
| goto Common_BrEqNeqGeGtLeLt; |
| |
| case Js::OpCode::BrGt_A: |
| swapSrcsBranchOpCode = Js::OpCode::BrLt_A; |
| goto Common_BrEqNeqGeGtLeLt; |
| |
| case Js::OpCode::BrLe_A: |
| swapSrcsBranchOpCode = Js::OpCode::BrGe_A; |
| goto Common_BrEqNeqGeGtLeLt; |
| |
| case Js::OpCode::BrLt_A: |
| swapSrcsBranchOpCode = Js::OpCode::BrGt_A; |
| // fall through |
| |
| Common_BrEqNeqGeGtLeLt: |
| // Check if src1 is a constant and src2 is not, and facilitate folding the constant into the Cmp instruction |
| if( ( |
| compareSrc1->IsIntConstOpnd() || |
| ( |
| compareSrc1->IsAddrOpnd() && |
| Math::FitsInDWord(reinterpret_cast<size_t>(compareSrc1->AsAddrOpnd()->m_address)) |
| ) |
| ) && |
| !compareSrc2->IsIntConstOpnd() && |
| !compareSrc2->IsAddrOpnd()) |
| { |
| // Swap the sources and branch |
| IR::Opnd *const tempSrc = compareSrc1; |
| compareSrc1 = compareSrc2; |
| compareSrc2 = tempSrc; |
| branchOpCode = swapSrcsBranchOpCode; |
| } |
| |
| // Check for compare with zero, to prefer using Test instead of Cmp |
| if( !compareSrc1->IsRegOpnd() || |
| !( |
| (compareSrc2->IsIntConstOpnd() && compareSrc2->AsIntConstOpnd()->GetValue() == 0) || |
| (compareSrc2->IsAddrOpnd() && !compareSrc2->AsAddrOpnd()->m_address) |
| ) || |
| branchOpCode == Js::OpCode::BrGt_A || branchOpCode == Js::OpCode::BrLe_A) |
| { |
| goto Default; |
| } |
| if(branchOpCode == Js::OpCode::BrGe_A || branchOpCode == Js::OpCode::BrLt_A) |
| { |
| if(isUnsigned) |
| { |
| goto Default; |
| } |
| branchOpCode = LowererMD::MDCompareWithZeroBranchOpcode(branchOpCode); |
| } |
| if(!compareSrc2->IsInUse()) |
| { |
| compareSrc2->Free(func); |
| } |
| InsertTest(compareSrc1, compareSrc1, insertBeforeInstr); |
| break; |
| |
| default: |
| Default: |
| InsertCompare(compareSrc1, compareSrc2, insertBeforeInstr); |
| break; |
| } |
| |
| return InsertBranch(branchOpCode, isUnsigned, target, insertBeforeInstr); |
| } |
| |
| IR::Instr *Lowerer::InsertTest(IR::Opnd *const src1, IR::Opnd *const src2, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(src1); |
| Assert(!src1->IsFloat64()); // not implemented |
| Assert(src2); |
| Assert(!src2->IsFloat64()); // not implemented |
| #if !TARGET_64 |
| Assert(!src1->IsInt64()); // not implemented |
| Assert(!src2->IsInt64()); // not implemented |
| #endif |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDTestOpcode, func); |
| instr->SetSrc1(src1); |
| instr->SetSrc2(src2); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::BranchInstr *Lowerer::InsertTestBranch( |
| IR::Opnd *const testSrc1, |
| IR::Opnd *const testSrc2, |
| const Js::OpCode branchOpCode, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| return InsertTestBranch(testSrc1, testSrc2, branchOpCode, false /* isUnsigned */, target, insertBeforeInstr); |
| } |
| |
| IR::BranchInstr *Lowerer::InsertTestBranch( |
| IR::Opnd *const testSrc1, |
| IR::Opnd *const testSrc2, |
| const Js::OpCode branchOpCode, |
| const bool isUnsigned, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| InsertTest(testSrc1, testSrc2, insertBeforeInstr); |
| return InsertBranch(branchOpCode, isUnsigned, target, insertBeforeInstr); |
| } |
| |
| /* Inserts add with an overflow check, if we overflow throw OOM |
| * add dst, src |
| * jno $continueLabel |
| * overflow code |
| * $continueLabel : fall through |
| */ |
| void Lowerer::InsertAddWithOverflowCheck( |
| const bool needFlags, |
| IR::Opnd *const dst, |
| IR::Opnd *src1, |
| IR::Opnd *src2, |
| IR::Instr *const insertBeforeInstr, |
| IR::Instr **const onOverflowInsertBeforeInstrRef) |
| { |
| Func * func = insertBeforeInstr->m_func; |
| InsertAdd(needFlags, dst, src1, src2, insertBeforeInstr); |
| |
| IR::LabelInstr *const continueLabel = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| InsertBranch(LowererMD::MDNotOverflowBranchOpcode, continueLabel, insertBeforeInstr); |
| |
| *onOverflowInsertBeforeInstrRef = continueLabel; |
| } |
| |
| IR::Instr *Lowerer::InsertAdd( |
| const bool needFlags, |
| IR::Opnd *const dst, |
| IR::Opnd *src1, |
| IR::Opnd *src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src1); |
| Assert(src2); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| if(src2->IsIntConstOpnd()) |
| { |
| IR::IntConstOpnd *const intConstOpnd = src2->AsIntConstOpnd(); |
| const IntConstType value = intConstOpnd->GetValue(); |
| if(value < 0 && value != IntConstMin) |
| { |
| // Change (s1 = s1 + -5) into (s1 = s1 - 5) |
| IR::IntConstOpnd *const newSrc2 = intConstOpnd->CopyInternal(func); |
| newSrc2->SetValue(-value); |
| return InsertSub(needFlags, dst, src1, newSrc2, insertBeforeInstr); |
| } |
| } |
| else if(src1->IsIntConstOpnd()) |
| { |
| IR::IntConstOpnd *const intConstOpnd = src1->AsIntConstOpnd(); |
| const IntConstType value = intConstOpnd->GetValue(); |
| if(value < 0 && value != IntConstMin) |
| { |
| // Change (s1 = -5 + s1) into (s1 = s1 - 5) |
| IR::Opnd *const newSrc1 = src2; |
| IR::IntConstOpnd *const newSrc2 = intConstOpnd->CopyInternal(func); |
| newSrc2->SetValue(-value); |
| return InsertSub(needFlags, dst, newSrc1, newSrc2, insertBeforeInstr); |
| } |
| } |
| |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::Add_A, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::ChangeToAdd(instr, needFlags); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertSub( |
| const bool needFlags, |
| IR::Opnd *const dst, |
| IR::Opnd *src1, |
| IR::Opnd *src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src1); |
| Assert(src2); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| if(src2->IsIntConstOpnd()) |
| { |
| IR::IntConstOpnd *const intConstOpnd = src2->AsIntConstOpnd(); |
| const IntConstType value = intConstOpnd->GetValue(); |
| if(value < 0 && value != IntConstMin) |
| { |
| // Change (s1 = s1 - -5) into (s1 = s1 + 5) |
| IR::IntConstOpnd *const newSrc2 = intConstOpnd->CopyInternal(func); |
| newSrc2->SetValue(-value); |
| return InsertAdd(needFlags, dst, src1, newSrc2, insertBeforeInstr); |
| } |
| } |
| |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::Sub_A, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::ChangeToSub(instr, needFlags); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertLea(IR::RegOpnd *const dst, IR::Opnd *const src, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src); |
| Assert(src->IsIndirOpnd() || src->IsSymOpnd()); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDLea, dst, src, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| return ChangeToLea(instr); |
| } |
| |
| IR::Instr * |
| Lowerer::ChangeToLea(IR::Instr * instr) |
| { |
| Assert(instr); |
| Assert(instr->GetDst()); |
| Assert(instr->GetDst()->IsRegOpnd()); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc1()->IsIndirOpnd() || instr->GetSrc1()->IsSymOpnd()); |
| Assert(!instr->GetSrc2()); |
| |
| instr->m_opcode = LowererMD::MDLea; |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| #if _M_X64 |
| IR::Instr *Lowerer::InsertMoveBitCast( |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(dst->GetType() == TyFloat64); |
| Assert(src1); |
| Assert(src1->GetType() == TyUint64); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDMovUint64ToFloat64Opcode, dst, src1, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| #endif |
| |
| IR::Instr *Lowerer::InsertXor( |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src1); |
| Assert(src2); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDXorOpcode, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertAnd( |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src1); |
| Assert(src2); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::AND, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertOr( |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(src1); |
| Assert(src2); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDOrOpcode, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertShift( |
| const Js::OpCode opCode, |
| const bool needFlags, |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(!dst->IsFloat64()); // not implemented |
| Assert(src1); |
| Assert(!src1->IsFloat64()); // not implemented |
| Assert(src2); |
| Assert(!src2->IsFloat64()); // not implemented |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(opCode, dst, src1, src2, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::ChangeToShift(instr, needFlags); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertShiftBranch( |
| const Js::OpCode shiftOpCode, |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| const Js::OpCode branchOpCode, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| return InsertShiftBranch(shiftOpCode, dst, src1, src2, branchOpCode, false /* isUnsigned */, target, insertBeforeInstr); |
| } |
| |
| IR::Instr *Lowerer::InsertShiftBranch( |
| const Js::OpCode shiftOpCode, |
| IR::Opnd *const dst, |
| IR::Opnd *const src1, |
| IR::Opnd *const src2, |
| const Js::OpCode branchOpCode, |
| const bool isUnsigned, |
| IR::LabelInstr *const target, |
| IR::Instr *const insertBeforeInstr) |
| { |
| InsertShift(shiftOpCode, true /* needFlags */, dst, src1, src2, insertBeforeInstr); |
| return InsertBranch(branchOpCode, isUnsigned, target, insertBeforeInstr); |
| } |
| |
| IR::Instr *Lowerer::InsertConvertFloat32ToFloat64( |
| IR::Opnd *const dst, |
| IR::Opnd *const src, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(dst->IsFloat64()); |
| Assert(src); |
| Assert(src->IsFloat32()); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDConvertFloat32ToFloat64Opcode, dst, src, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| IR::Instr *Lowerer::InsertConvertFloat64ToFloat32( |
| IR::Opnd *const dst, |
| IR::Opnd *const src, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dst); |
| Assert(dst->IsFloat32()); |
| Assert(src); |
| Assert(src->IsFloat64()); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::Instr *const instr = IR::Instr::New(LowererMD::MDConvertFloat64ToFloat32Opcode, dst, src, func); |
| |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| return instr; |
| } |
| |
| void Lowerer::InsertDecUInt32PreventOverflow( |
| IR::Opnd *const dst, |
| IR::Opnd *const src, |
| IR::Instr *const insertBeforeInstr, |
| IR::Instr * *const onOverflowInsertBeforeInstrRef) |
| { |
| Assert(dst); |
| Assert(dst->GetType() == TyUint32); |
| Assert(src); |
| Assert(src->GetType() == TyUint32); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| // Generate: |
| // subs temp, src, 1 |
| // bcs $overflow |
| // mov dst, temp |
| // b $continue |
| // $overflow: |
| // mov dst, 0 |
| // $continue: |
| |
| IR::LabelInstr *const overflowLabel = Lowerer::InsertLabel(false, insertBeforeInstr); |
| |
| // subs temp, src, 1 |
| IR::RegOpnd *const tempOpnd = IR::RegOpnd::New(StackSym::New(TyUint32, func), TyUint32, func); |
| const IR::AutoReuseOpnd autoReuseTempOpnd(tempOpnd, func); |
| Lowerer::InsertSub(true, tempOpnd, src, IR::IntConstOpnd::New(1, TyUint32, func, true), overflowLabel); |
| |
| // bcs $overflow |
| Lowerer::InsertBranch(Js::OpCode::BrLt_A, true, overflowLabel, overflowLabel); |
| |
| // mov dst, temp |
| Lowerer::InsertMove(dst, tempOpnd, overflowLabel); |
| |
| const bool dstEqualsSrc = dst->IsEqual(src); |
| if(!dstEqualsSrc || onOverflowInsertBeforeInstrRef) |
| { |
| // b $continue |
| // $overflow: |
| // mov dst, 0 |
| // $continue: |
| IR::LabelInstr *const continueLabel = Lowerer::InsertLabel(false, insertBeforeInstr); |
| Lowerer::InsertBranch(Js::OpCode::Br, continueLabel, overflowLabel); |
| if(!dstEqualsSrc) |
| { |
| Lowerer::InsertMove(dst, IR::IntConstOpnd::New(0, TyUint32, func, true), continueLabel); |
| } |
| |
| if(onOverflowInsertBeforeInstrRef) |
| { |
| *onOverflowInsertBeforeInstrRef = continueLabel; |
| } |
| } |
| else |
| { |
| // $overflow: |
| } |
| } |
| |
| void Lowerer::InsertFloatCheckForZeroOrNanBranch( |
| IR::Opnd *const src, |
| const bool branchOnZeroOrNan, |
| IR::LabelInstr *const target, |
| IR::LabelInstr *const fallthroughLabel, |
| IR::Instr *const insertBeforeInstr) |
| { |
| Assert(src); |
| Assert(src->IsFloat64()); |
| Assert(target); |
| Assert(!fallthroughLabel || fallthroughLabel != target); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| IR::BranchInstr *const branchOnEqualOrNotEqual = |
| InsertCompareBranch( |
| src, |
| IR::MemRefOpnd::New(func->GetThreadContextInfo()->GetDoubleZeroAddr(), TyFloat64, func), |
| branchOnZeroOrNan ? Js::OpCode::BrEq_A : Js::OpCode::BrNeq_A, |
| target, |
| insertBeforeInstr, |
| true /* ignoreNaN */); |
| |
| // x86/x64 |
| // When NaN is ignored, on x86 and x64, JE branches when equal or unordered since an unordered result sets the zero |
| // flag, and JNE branches when not equal and not unordered. By comparing with zero, JE will branch when src is zero or |
| // NaN, and JNE will branch when src is not zero and not NaN. |
| // |
| // ARM |
| // When NaN is ignored, BEQ branches when equal and not unordered, and BNE branches when not equal or unordered. So, |
| // when comparing src with zero, an unordered check needs to be added before the BEQ/BNE. |
| branchOnEqualOrNotEqual; // satisfy the compiler |
| #ifdef _M_ARM32_OR_ARM64 |
| InsertBranch( |
| Js::OpCode::BVS, |
| branchOnZeroOrNan |
| ? target |
| : fallthroughLabel ? fallthroughLabel : insertBeforeInstr->m_prev->GetOrCreateContinueLabel(), |
| branchOnEqualOrNotEqual); |
| #endif |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GenerateFastElemICommon( |
| _In_ IR::Instr* elemInstr, |
| _In_ bool isStore, |
| _In_ IR::IndirOpnd* indirOpnd, |
| _In_ IR::LabelInstr* labelHelper, |
| _In_ IR::LabelInstr* labelCantUseArray, |
| _In_opt_ IR::LabelInstr* labelFallthrough, |
| _Out_ bool* pIsTypedArrayElement, |
| _Out_ bool* pIsStringIndex, |
| _Out_opt_ bool* emitBailoutRef, |
| _Outptr_opt_result_maybenull_ IR::Opnd** maskOpnd, |
| _Outptr_opt_result_maybenull_ IR::LabelInstr** pLabelSegmentLengthIncreased, // = nullptr |
| _In_ bool checkArrayLengthOverflow, // = true |
| _In_ bool forceGenerateFastPath, // = false |
| _In_ bool returnLength, // = false |
| _In_opt_ IR::LabelInstr* bailOutLabelInstr, // = nullptr |
| _Out_opt_ bool* indirOpndOverflowed, // = nullptr |
| _In_ Js::FldInfoFlags flags) // = Js::FldInfo_NoInfo |
| { |
| *pIsTypedArrayElement = false; |
| *pIsStringIndex = false; |
| if(pLabelSegmentLengthIncreased) |
| { |
| *pLabelSegmentLengthIncreased = nullptr; |
| } |
| if (maskOpnd) |
| { |
| *maskOpnd = nullptr; |
| } |
| if (indirOpndOverflowed) |
| { |
| *indirOpndOverflowed = false; |
| } |
| if (emitBailoutRef) |
| { |
| *emitBailoutRef = false; |
| } |
| IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd(); |
| AssertMsg(baseOpnd, "This shouldn't be NULL"); |
| |
| // Caution: If making changes to the conditions under which we don't emit the typical array checks, make sure |
| // the code in GlobOpt::ShouldAssumeIndirOpndHasNonNegativeIntIndex is updated accordingly. We don't want the |
| // global optimizer to type specialize instructions, for which the lowerer is forced to emit unconditional |
| // bailouts. |
| if (baseOpnd->IsTaggedInt()) |
| { |
| return NULL; |
| } |
| |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| if (indexOpnd) |
| { |
| const bool normalLocation = (flags & (Js::FldInfo_FromLocal | Js::FldInfo_FromProto | Js::FldInfo_FromLocalWithoutProperty)) != 0; |
| const bool normalSlots = (flags & (Js::FldInfo_FromAuxSlots | Js::FldInfo_FromInlineSlots)) != 0; |
| const bool generateFastpath = !baseOpnd->GetValueType().IsLikelyOptimizedTypedArray() && normalLocation && normalSlots && flags != Js::FldInfo_NoInfo; |
| if (indexOpnd->GetValueType().IsLikelyString()) |
| { |
| if (generateFastpath) |
| { |
| // If profile data says that it's a typed array - do not generate the property string fast path as the src. could be a temp and that would cause a bug. |
| *pIsTypedArrayElement = false; |
| *pIsStringIndex = true; |
| return GenerateFastElemIStringIndexCommon(elemInstr, isStore, indirOpnd, labelHelper, flags); |
| } |
| else |
| { |
| // There's no point in generating the int index fast path if we know the index has a string value. |
| return nullptr; |
| } |
| } |
| else if (indexOpnd->GetValueType().IsLikelySymbol()) |
| { |
| if (generateFastpath) |
| { |
| // If profile data says that it's a typed array - do not generate the symbol fast path as the src. could be a temp and that would cause a bug. |
| return GenerateFastElemISymbolIndexCommon(elemInstr, isStore, indirOpnd, labelHelper, flags); |
| } |
| else |
| { |
| // There's no point in generating the int index fast path if we know the index has a symbol value. |
| return nullptr; |
| } |
| } |
| } |
| return |
| GenerateFastElemIIntIndexCommon( |
| elemInstr, |
| isStore, |
| indirOpnd, |
| labelHelper, |
| labelCantUseArray, |
| labelFallthrough, |
| pIsTypedArrayElement, |
| emitBailoutRef, |
| pLabelSegmentLengthIncreased, |
| checkArrayLengthOverflow, |
| maskOpnd, |
| false, |
| returnLength, |
| bailOutLabelInstr, |
| indirOpndOverflowed); |
| } |
| |
| void |
| Lowerer::GenerateDynamicLoadPolymorphicInlineCacheSlot(IR::Instr * instrInsert, IR::RegOpnd * inlineCacheOpnd, IR::Opnd * objectTypeOpnd) |
| { |
| // Generates: |
| // MOV opndOffset, objectTypeOpnd |
| // SHR opndOffset, PolymorphicInlineCacheShift |
| // MOVZX cacheIndexOpnd, inlineCacheOpnd->size |
| // DEC cacheIndexOpnd |
| // AND opndOffset, cacheIndexOpnd |
| // SHL opndOffset, Math::Log2(sizeof(Js::InlineCache)) |
| // MOV inlineCacheOpnd, inlineCacheOpnd->inlineCaches |
| // LEA inlineCacheOpnd, [inlineCacheOpnd + opndOffset] |
| |
| IntConstType rightShiftAmount = PolymorphicInlineCacheShift; |
| IntConstType leftShiftAmount = Math::Log2(sizeof(Js::InlineCache)); |
| Assert(rightShiftAmount > leftShiftAmount); |
| IR::RegOpnd * opndOffset = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertShift(Js::OpCode::ShrU_A, false, opndOffset, objectTypeOpnd, IR::IntConstOpnd::New(rightShiftAmount, TyUint8, m_func, true), instrInsert); |
| |
| IR::RegOpnd * cacheIndexOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(cacheIndexOpnd, IR::IndirOpnd::New(inlineCacheOpnd, Js::PolymorphicInlineCache::GetOffsetOfSize(), TyUint16, m_func), instrInsert); |
| InsertSub(false, cacheIndexOpnd, cacheIndexOpnd, IR::IntConstOpnd::New(1, TyMachPtr, m_func), instrInsert); |
| InsertAnd(opndOffset, opndOffset, cacheIndexOpnd, instrInsert); |
| InsertShift(Js::OpCode::Shl_A, false, opndOffset, opndOffset, IR::IntConstOpnd::New(leftShiftAmount, TyUint8, m_func), instrInsert); |
| InsertMove(inlineCacheOpnd, IR::IndirOpnd::New(inlineCacheOpnd, Js::PolymorphicInlineCache::GetOffsetOfInlineCaches(), TyMachPtr, m_func), instrInsert); |
| InsertLea(inlineCacheOpnd, IR::IndirOpnd::New(inlineCacheOpnd, opndOffset, TyMachPtr, m_func), instrInsert); |
| } |
| |
| // Test that the operand is a PropertyString, or bail to helper |
| void |
| Lowerer::GeneratePropertyStringTest(IR::RegOpnd *srcReg, IR::Instr *instrInsert, IR::LabelInstr *labelHelper, bool isStore) |
| { |
| // Generates: |
| // StringTest(srcReg, $helper) ; verify index is string type |
| // CMP srcReg, PropertyString::`vtable' ; verify index is property string |
| // JNE $helper |
| |
| GenerateStringTest(srcReg, instrInsert, labelHelper); |
| |
| IR::LabelInstr * notPropStrLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * propStrLoadedLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::BranchInstr *branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(srcReg, 0, TyMachPtr, m_func), |
| LoadVTableValueOpnd(instrInsert, VTableValue::VtablePropertyString), |
| Js::OpCode::BrNeq_A, notPropStrLabel, instrInsert); |
| |
| InsertObjectPoison(srcReg, branchInstr, instrInsert, isStore); |
| |
| InsertBranch(Js::OpCode::Br, propStrLoadedLabel, instrInsert); |
| |
| InsertBranch(Js::OpCode::Br, propStrLoadedLabel, instrInsert); |
| |
| instrInsert->InsertBefore(notPropStrLabel); |
| |
| branchInstr = InsertCompareBranch( |
| IR::IndirOpnd::New(srcReg, 0, TyMachPtr, m_func), |
| LoadVTableValueOpnd(instrInsert, VTableValue::VtableLiteralStringWithPropertyStringPtr), |
| Js::OpCode::BrNeq_A, labelHelper, instrInsert); |
| |
| InsertObjectPoison(srcReg, branchInstr, instrInsert, isStore); |
| |
| IR::IndirOpnd * propStrOpnd = IR::IndirOpnd::New(srcReg, Js::LiteralStringWithPropertyStringPtr::GetOffsetOfPropertyString(), TyMachPtr, m_func); |
| InsertCompareBranch(propStrOpnd, IR::IntConstOpnd::New(NULL, TyMachPtr, m_func), Js::OpCode::BrNeq_A, labelHelper, instrInsert); |
| |
| // We don't really own srcReg, but it is fine to update it to be the PropertyString, since that is better to have anyway |
| InsertMove(srcReg, propStrOpnd, instrInsert); |
| |
| instrInsert->InsertBefore(propStrLoadedLabel); |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GenerateFastElemIStringIndexCommon( |
| _In_ IR::Instr* elemInstr, |
| _In_ bool isStore, |
| _In_ IR::IndirOpnd* indirOpnd, |
| _In_ IR::LabelInstr* labelHelper, |
| _In_ Js::FldInfoFlags flags) |
| { |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd(); |
| Assert(baseOpnd != nullptr); |
| Assert(indexOpnd->GetValueType().IsLikelyString()); |
| |
| // Generates: |
| // PropertyStringTest(indexOpnd, $helper) ; verify index is string type |
| // FastElemISymbolOrStringIndexCommon(indexOpnd, baseOpnd, $helper) ; shared code with JavascriptSymbol |
| |
| GeneratePropertyStringTest(indexOpnd, elemInstr, labelHelper, isStore); |
| |
| const uint32 inlineCacheOffset = isStore ? Js::PropertyString::GetOffsetOfStElemInlineCache() : Js::PropertyString::GetOffsetOfLdElemInlineCache(); |
| const uint32 hitRateOffset = Js::PropertyString::GetOffsetOfHitRate(); |
| |
| return GenerateFastElemISymbolOrStringIndexCommon(elemInstr, indexOpnd, baseOpnd, inlineCacheOffset, hitRateOffset, labelHelper, flags); |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GenerateFastElemISymbolIndexCommon( |
| _In_ IR::Instr* elemInstr, |
| _In_ bool isStore, |
| _In_ IR::IndirOpnd* indirOpnd, |
| _In_ IR::LabelInstr* labelHelper, |
| _In_ Js::FldInfoFlags flags) |
| { |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd(); |
| Assert(baseOpnd != nullptr); |
| Assert(indexOpnd->GetValueType().IsLikelySymbol()); |
| |
| // Generates: |
| // SymbolTest(indexOpnd, $helper) ; verify index is symbol type |
| // FastElemISymbolOrStringIndexCommon(indexOpnd, baseOpnd, $helper) ; shared code with PropertyString |
| |
| GenerateSymbolTest(indexOpnd, elemInstr, labelHelper); |
| |
| const uint32 inlineCacheOffset = isStore ? Js::JavascriptSymbol::GetOffsetOfStElemInlineCache() : Js::JavascriptSymbol::GetOffsetOfLdElemInlineCache(); |
| const uint32 hitRateOffset = Js::JavascriptSymbol::GetOffsetOfHitRate(); |
| |
| return GenerateFastElemISymbolOrStringIndexCommon(elemInstr, indexOpnd, baseOpnd, inlineCacheOffset, hitRateOffset, labelHelper, flags); |
| } |
| |
| void |
| Lowerer::GenerateFastIsInSymbolOrStringIndex(IR::Instr * instrInsert, IR::RegOpnd *indexOpnd, IR::RegOpnd *baseOpnd, IR::Opnd *dest, uint32 inlineCacheOffset, const uint32 hitRateOffset, IR::LabelInstr * labelHelper, IR::LabelInstr * labelDone) |
| { |
| // Try to look up the property in the cache, or bail to helper |
| GenerateLookUpInIndexCache(instrInsert, indexOpnd, baseOpnd, nullptr /*opndSlotArray*/, nullptr /*opndSlotIndex*/, inlineCacheOffset, hitRateOffset, labelHelper); |
| |
| // MOV dest, true |
| InsertMove(dest, LoadLibraryValueOpnd(instrInsert, LibraryValue::ValueTrue), instrInsert); |
| |
| // JMP labelDone |
| InsertBranch(Js::OpCode::Br, labelDone, instrInsert); |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GenerateFastElemISymbolOrStringIndexCommon( |
| _In_ IR::Instr* instrInsert, |
| _In_ IR::RegOpnd* indexOpnd, |
| _In_ IR::RegOpnd* baseOpnd, |
| _In_ const uint32 inlineCacheOffset, |
| _In_ const uint32 hitRateOffset, |
| _In_ IR::LabelInstr* labelHelper, |
| _In_ Js::FldInfoFlags flags) |
| { |
| // Try to look up the property in the cache, or bail to helper |
| IR::RegOpnd * opndSlotArray = IR::RegOpnd::New(TyMachReg, instrInsert->m_func); |
| IR::RegOpnd * opndSlotIndex = IR::RegOpnd::New(TyMachReg, instrInsert->m_func); |
| GenerateLookUpInIndexCache(instrInsert, indexOpnd, baseOpnd, opndSlotArray, opndSlotIndex, inlineCacheOffset, hitRateOffset, labelHelper, flags); |
| |
| // return [opndSlotArray + opndSlotIndex * PtrSize] |
| return IR::IndirOpnd::New(opndSlotArray, opndSlotIndex, m_lowererMD.GetDefaultIndirScale(), TyMachReg, instrInsert->m_func); |
| } |
| |
| // Look up a value from the polymorphic inline cache on a PropertyString or Symbol. Offsets are relative to indexOpnd. |
| // Checks local and/or proto caches based on profile data. If the property is not found, jump to the helper. |
| // opndSlotArray is optional; if provided, it will receive the base address of the slot array that contains the property. |
| // opndSlotIndex is optional; if provided, it will receive the index of the match within the slot array. |
| void |
| Lowerer::GenerateLookUpInIndexCache( |
| _In_ IR::Instr* instrInsert, |
| _In_ IR::RegOpnd* indexOpnd, |
| _In_ IR::RegOpnd* baseOpnd, |
| _In_opt_ IR::RegOpnd* opndSlotArray, |
| _In_opt_ IR::RegOpnd* opndSlotIndex, |
| _In_ const uint32 inlineCacheOffset, |
| _In_ const uint32 hitRateOffset, |
| _In_ IR::LabelInstr* labelHelper, |
| _In_ Js::FldInfoFlags flags) // = Js::FldInfo_NoInfo |
| { |
| // Generates: |
| // MOV inlineCacheOpnd, index->inlineCache |
| // GenerateObjectTest(baseOpnd, $helper) ; verify base is an object |
| // MOV objectTypeOpnd, baseOpnd->type |
| // GenerateDynamicLoadPolymorphicInlineCacheSlot(inlineCacheOpnd, objectTypeOpnd) ; loads inline cache for given type |
| // if (checkLocalInlineSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckLocal, CheckInlineSlot> // checks local inline slots, goes to next on failure |
| // if (checkLocalAuxSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckLocal, CheckAuxSlot> // checks local aux slots, goes to next on failure |
| // if (fromProto && fromInlineSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckProto, CheckInlineSlot> // checks proto inline slots, goes to next on failure |
| // if (fromProto && fromAuxSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckProto, CheckAuxSlot> // checks proto aux slots, goes to next on failure |
| // if (doAdd && fromInlineSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckLocal, CheckInlineSlot, DoAdd> // checks typeWithoutProperty inline slots, goes to next on failure |
| // if (doAdd && fromAuxSlots) |
| // GenerateLookUpInIndexCacheHelper<CheckLocal, CheckAuxSlot, DoAdd> // checks typeWithoutProperty aux slots, goes to helper on failure |
| // $slotIndexLoadedLabel |
| // INC indexOpnd->hitRate |
| |
| const bool fromInlineSlots = (flags & Js::FldInfo_FromInlineSlots) == Js::FldInfo_FromInlineSlots; |
| const bool fromAuxSlots = (flags & Js::FldInfo_FromAuxSlots) == Js::FldInfo_FromAuxSlots; |
| const bool fromLocal = (flags & Js::FldInfo_FromLocal) == Js::FldInfo_FromLocal; |
| const bool fromProto = (flags & Js::FldInfo_FromProto) == Js::FldInfo_FromProto; |
| const bool doAdd = (flags & Js::FldInfo_FromLocalWithoutProperty) == Js::FldInfo_FromLocalWithoutProperty; |
| |
| const bool checkLocalInlineSlots = flags == Js::FldInfo_NoInfo || (fromInlineSlots && fromLocal); |
| const bool checkLocalAuxSlots = flags == Js::FldInfo_NoInfo || (fromAuxSlots && fromLocal); |
| |
| m_lowererMD.GenerateObjectTest(baseOpnd, instrInsert, labelHelper); |
| |
| IR::RegOpnd * objectTypeOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(objectTypeOpnd, IR::IndirOpnd::New(baseOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, m_func), instrInsert); |
| |
| IR::RegOpnd * inlineCacheOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(inlineCacheOpnd, IR::IndirOpnd::New(indexOpnd, inlineCacheOffset, TyMachPtr, m_func), instrInsert); |
| |
| GenerateDynamicLoadPolymorphicInlineCacheSlot(instrInsert, inlineCacheOpnd, objectTypeOpnd); |
| |
| IR::LabelInstr* slotIndexLoadedLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::BranchInstr* branchToPatch = nullptr; |
| IR::LabelInstr* nextLabel = nullptr; |
| IR::RegOpnd* taggedTypeOpnd = nullptr; |
| if (checkLocalInlineSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<true /* CheckLocal */, true /* CheckInlineSlot */, false /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| if (checkLocalAuxSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<true /* CheckLocal */, false /* CheckInlineSlot */, false /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| |
| if (fromProto) |
| { |
| if (fromInlineSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<false /* CheckLocal */, true /* CheckInlineSlot */, false /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| if (fromAuxSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<false /* CheckLocal */, false /* CheckInlineSlot */, false /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| } |
| if (doAdd) |
| { |
| Assert(opndSlotArray); |
| |
| if (fromInlineSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<true /* CheckLocal */, true /* CheckInlineSlot */, true /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| if (fromAuxSlots) |
| { |
| GenerateLookUpInIndexCacheHelper<true /* CheckLocal */, false /* CheckInlineSlot */, true /* DoAdd */>( |
| instrInsert, |
| baseOpnd, |
| opndSlotArray, |
| opndSlotIndex, |
| objectTypeOpnd, |
| inlineCacheOpnd, |
| slotIndexLoadedLabel, |
| labelHelper, |
| &nextLabel, |
| &branchToPatch, |
| &taggedTypeOpnd); |
| } |
| } |
| Assert(branchToPatch); |
| Assert(nextLabel); |
| Assert(nextLabel->labelRefs.Count() == 1 && nextLabel->labelRefs.Head() == branchToPatch); |
| |
| branchToPatch->SetTarget(labelHelper); |
| nextLabel->Remove(); |
| |
| instrInsert->InsertBefore(slotIndexLoadedLabel); |
| |
| IR::IndirOpnd * hitRateOpnd = IR::IndirOpnd::New(indexOpnd, hitRateOffset, TyInt32, m_func); |
| IR::IntConstOpnd * incOpnd = IR::IntConstOpnd::New(1, TyInt32, m_func); |
| // overflow check: not needed here, we don't allocate anything with hitrate |
| InsertAdd(false, hitRateOpnd, hitRateOpnd, incOpnd, instrInsert); |
| } |
| |
| template <bool CheckLocal, bool CheckInlineSlot, bool DoAdd> |
| void |
| Lowerer::GenerateLookUpInIndexCacheHelper( |
| _In_ IR::Instr* insertInstr, |
| _In_ IR::RegOpnd* baseOpnd, |
| _In_opt_ IR::RegOpnd* opndSlotArray, |
| _In_opt_ IR::RegOpnd* opndSlotIndex, |
| _In_ IR::RegOpnd* objectTypeOpnd, |
| _In_ IR::RegOpnd* inlineCacheOpnd, |
| _In_ IR::LabelInstr* doneLabel, |
| _In_ IR::LabelInstr* helperLabel, |
| _Outptr_ IR::LabelInstr** nextLabel, |
| _Outptr_ IR::BranchInstr** branchToPatch, |
| _Inout_ IR::RegOpnd** taggedTypeOpnd) |
| { |
| CompileAssert(!DoAdd || CheckLocal); |
| AnalysisAssert(!opndSlotArray || opndSlotIndex); |
| |
| *nextLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::RegOpnd* typeOpnd = nullptr; |
| if (CheckInlineSlot) |
| { |
| typeOpnd = objectTypeOpnd; |
| } |
| else |
| { |
| if (*taggedTypeOpnd == nullptr) |
| { |
| *taggedTypeOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| m_lowererMD.GenerateLoadTaggedType(insertInstr, objectTypeOpnd, *taggedTypeOpnd); |
| } |
| typeOpnd = *taggedTypeOpnd; |
| } |
| |
| IR::RegOpnd* objectOpnd = nullptr; |
| if (CheckLocal) |
| { |
| *branchToPatch = GenerateLocalInlineCacheCheck(insertInstr, typeOpnd, inlineCacheOpnd, *nextLabel, DoAdd); |
| if (DoAdd) |
| { |
| if (!CheckInlineSlot) |
| { |
| GenerateAuxSlotAdjustmentRequiredCheck(insertInstr, inlineCacheOpnd, helperLabel); |
| } |
| GenerateSetObjectTypeFromInlineCache(insertInstr, baseOpnd, inlineCacheOpnd, !CheckInlineSlot); |
| } |
| |
| objectOpnd = baseOpnd; |
| } |
| else |
| { |
| *branchToPatch = GenerateProtoInlineCacheCheck(insertInstr, typeOpnd, inlineCacheOpnd, *nextLabel); |
| |
| IR::RegOpnd* protoOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| int32 protoObjOffset = (int32)offsetof(Js::InlineCache, u.proto.prototypeObject); |
| IR::IndirOpnd* protoIndir = IR::IndirOpnd::New(inlineCacheOpnd, protoObjOffset, TyMachReg, m_func); |
| InsertMove(protoOpnd, protoIndir, insertInstr); |
| objectOpnd = protoOpnd; |
| } |
| |
| if (opndSlotArray) |
| { |
| if (CheckInlineSlot) |
| { |
| InsertMove(opndSlotArray, objectOpnd, insertInstr); |
| } |
| else |
| { |
| IR::IndirOpnd* auxIndir = IR::IndirOpnd::New(objectOpnd, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, m_func); |
| InsertMove(opndSlotArray, auxIndir, insertInstr); |
| } |
| |
| size_t slotIndexOffset = CheckLocal ? offsetof(Js::InlineCache, u.local.slotIndex) : offsetof(Js::InlineCache, u.proto.slotIndex); |
| IR::IndirOpnd* slotOffsetIndir = IR::IndirOpnd::New(inlineCacheOpnd, (int32)slotIndexOffset, TyUint16, m_func); |
| // overflow check: not needed here, we don't allocate anything with hitrate |
| InsertMove(opndSlotIndex, slotOffsetIndir, insertInstr); |
| } |
| |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| insertInstr->InsertBefore(*nextLabel); |
| } |
| |
| IR::IndirOpnd * |
| Lowerer::GenerateFastElemIIntIndexCommon( |
| IR::Instr * instr, |
| bool isStore, |
| IR::IndirOpnd * indirOpnd, |
| IR::LabelInstr * labelHelper, |
| IR::LabelInstr * labelCantUseArray, |
| IR::LabelInstr *labelFallthrough, |
| bool * pIsTypedArrayElement, |
| bool *emitBailoutRef, |
| IR::LabelInstr **pLabelSegmentLengthIncreased, |
| bool checkArrayLengthOverflow /*= true*/, |
| IR::Opnd** maskOpnd, |
| bool forceGenerateFastPath /* = false */, |
| bool returnLength, |
| IR::LabelInstr *bailOutLabelInstr /* = nullptr*/, |
| bool * indirOpndOverflowed /* = nullptr */) |
| { |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| IR::RegOpnd *baseOpnd = indirOpnd->GetBaseOpnd(); |
| Assert(!baseOpnd->IsTaggedInt() || (indexOpnd && indexOpnd->IsNotInt())); |
| |
| if (indirOpndOverflowed != nullptr) |
| { |
| *indirOpndOverflowed = false; |
| } |
| |
| BYTE indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| IRType indirType = TyVar; |
| const ValueType baseValueType(baseOpnd->GetValueType()); |
| |
| // TEST base, AtomTag -- check base not tagged int |
| // JNE $helper |
| // if (base.GetValueType() != Array) { |
| // CMP [base], JavascriptArray::`vtable' |
| // JNE $helper |
| // } |
| // TEST index, 1 -- index tagged int |
| // JEQ $helper |
| // if (inputIndex is not int const) { |
| // MOV index, inputIndex |
| // SAR index, Js::VarTag_Shift -- remote atom tag |
| // JS $helper -- exclude negative index |
| // } |
| // MOV headSegment, [base + offset(head)] |
| // CMP [headSegment + offset(length)], index -- bounds check |
| // if (opcode == StElemI_A) { |
| // JA $done (for typedarray, JA $toNumberHelper) |
| // CMP [headSegment + offset(size)], index -- chunk has room? |
| // JBE $helper |
| // if (index is not int const) { |
| // LEA newLength, [index + 1] |
| // } else { |
| // newLength = index + 1 |
| // } |
| // if(BailOutOnInvalidatedArrayLength) { |
| // CMP [base + offset(length)], newlength |
| // JB $helper |
| // } |
| // MOV [headSegment + offset(length)], newLength -- update length on chunk |
| // CMP [base + offset(length)], newLength |
| // JAE $done |
| // MOV [base + offset(length)], newLength -- update length on array |
| // if(length to be returned){ |
| // SHL newLength, AtomTag |
| // INC newLength |
| // MOV dst, newLength |
| // } |
| // JMP $done |
| // |
| // $toNumberHelper: Call HelperOp_ConvNumber_Full |
| // JMP $done |
| // $done |
| // } else {la |
| // JBE $helper |
| // } |
| // return [headSegment + offset(elements) + index] |
| |
| // Caution: If making changes to the conditions under which we don't emit the typical array checks, make sure |
| // the code in GlobOpt::ShouldAssumeIndirOpndHasNonNegativeIntIndex is updated accordingly. We don't want the |
| // global optimizer to type specialize instructions, for which the lowerer is forced to emit unconditional |
| // bailouts. |
| bool isIndexNotInt = false; |
| IntConstType value = 0; |
| IR::Opnd * indexValueOpnd = nullptr; |
| bool invertBoundCheckComparison = false; |
| bool checkIndexConstOverflowed = false; |
| |
| if (indirOpnd->TryGetIntConstIndexValue(true, &value, &isIndexNotInt)) |
| { |
| if (value >= 0) |
| { |
| indexValueOpnd = IR::IntConstOpnd::New(value, TyUint32, this->m_func); |
| invertBoundCheckComparison = true; // facilitate folding the constant index into the compare instruction |
| checkIndexConstOverflowed = true; |
| } |
| else |
| { |
| // If the index is a negative int constant we go directly to helper. |
| Assert(!forceGenerateFastPath); |
| return nullptr; |
| } |
| } |
| else if (isIndexNotInt) |
| { |
| // If we know the index is not an int we go directly to helper. |
| Assert(!forceGenerateFastPath); |
| return nullptr; |
| } |
| |
| //At this point indexValueOpnd is either NULL or contains the valueOpnd |
| |
| if(!forceGenerateFastPath && !ShouldGenerateArrayFastPath(baseOpnd, true, true, true)) |
| { |
| return nullptr; |
| } |
| |
| if(baseValueType.IsLikelyAnyOptimizedArray()) |
| { |
| indirScale = GetArrayIndirScale(baseValueType); |
| indirType = GetArrayIndirType(baseValueType); |
| } |
| |
| if (checkIndexConstOverflowed && (static_cast<uint64>(value) << indirScale) > INT32_MAX && |
| indirOpndOverflowed != nullptr) |
| { |
| *indirOpndOverflowed = true; |
| return nullptr; |
| } |
| |
| IRType elementType = TyIllegal; |
| IR::Opnd * element = nullptr; |
| |
| if(instr->m_opcode == Js::OpCode::InlineArrayPush) |
| { |
| element = instr->GetSrc2(); |
| elementType = element->GetType(); |
| } |
| else if(isStore && instr->GetSrc1()) |
| { |
| element = instr->GetSrc1(); |
| elementType = element->GetType(); |
| } |
| |
| Assert(isStore || (element == nullptr && elementType == TyIllegal)); |
| |
| if (isStore && baseValueType.IsLikelyNativeArray() && indirType != elementType) |
| { |
| // We're trying to write a value of the wrong type, which should force a conversion of the array. |
| // Go to the helper for that. |
| return nullptr; |
| } |
| |
| IR::RegOpnd *arrayOpnd = baseOpnd; |
| IR::RegOpnd *headSegmentOpnd = nullptr; |
| IR::Opnd *headSegmentLengthOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseHeadSegmentOpnd, autoReuseHeadSegmentLengthOpnd; |
| bool indexIsNonnegative = indexValueOpnd || indexOpnd->GetType() == TyUint32 || !checkArrayLengthOverflow; |
| bool indexIsLessThanHeadSegmentLength = false; |
| if(!baseValueType.IsAnyOptimizedArray()) |
| { |
| arrayOpnd = GenerateArrayTest(baseOpnd, labelCantUseArray, labelCantUseArray, instr, true, isStore); |
| } |
| else |
| { |
| if(arrayOpnd->IsArrayRegOpnd()) |
| { |
| IR::ArrayRegOpnd *const arrayRegOpnd = arrayOpnd->AsArrayRegOpnd(); |
| if(arrayRegOpnd->HeadSegmentSym()) |
| { |
| headSegmentOpnd = IR::RegOpnd::New(arrayRegOpnd->HeadSegmentSym(), TyMachPtr, m_func); |
| DebugOnly(headSegmentOpnd->FreezeSymValue()); |
| autoReuseHeadSegmentOpnd.Initialize(headSegmentOpnd, m_func); |
| } |
| if(arrayRegOpnd->HeadSegmentLengthSym()) |
| { |
| headSegmentLengthOpnd = IR::RegOpnd::New(arrayRegOpnd->HeadSegmentLengthSym(), TyUint32, m_func); |
| // This value can change over the course of this function |
| //DebugOnly(headSegmentLengthOpnd->AsRegOpnd()->FreezeSymValue()); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| if (arrayRegOpnd->EliminatedLowerBoundCheck()) |
| { |
| indexIsNonnegative = true; |
| } |
| if(arrayRegOpnd->EliminatedUpperBoundCheck()) |
| { |
| indexIsLessThanHeadSegmentLength = true; |
| } |
| } |
| } |
| IR::AutoReuseOpnd autoReuseArrayOpnd; |
| if(arrayOpnd->GetValueType().GetObjectType() != ObjectType::ObjectWithArray) |
| { |
| autoReuseArrayOpnd.Initialize(arrayOpnd, m_func); |
| } |
| const auto EnsureObjectArrayLoaded = [&]() |
| { |
| if(arrayOpnd->GetValueType().GetObjectType() != ObjectType::ObjectWithArray) |
| { |
| return; |
| } |
| arrayOpnd = LoadObjectArray(arrayOpnd, instr); |
| autoReuseArrayOpnd.Initialize(arrayOpnd, m_func); |
| }; |
| |
| const bool doUpperBoundCheck = checkArrayLengthOverflow && !indexIsLessThanHeadSegmentLength; |
| if(!indexValueOpnd) |
| { |
| indexValueOpnd = |
| m_lowererMD.LoadNonnegativeIndex( |
| indexOpnd, |
| ( |
| indexIsNonnegative |
| #if !INT32VAR |
| || |
| // On 32-bit platforms, skip the negative check since for now, the unsigned upper bound check covers it |
| doUpperBoundCheck |
| #endif |
| ), |
| labelCantUseArray, |
| labelHelper, |
| instr); |
| } |
| const IR::AutoReuseOpnd autoReuseIndexValueOpnd(indexValueOpnd, m_func); |
| |
| if (baseValueType.IsLikelyTypedArray()) |
| { |
| *pIsTypedArrayElement = true; |
| |
| if(doUpperBoundCheck) |
| { |
| if(!headSegmentLengthOpnd) |
| { |
| // (headSegmentLength = [base + offset(length)]) |
| int lengthOffset; |
| lengthOffset = Js::Float64Array::GetOffsetOfLength(); |
| headSegmentLengthOpnd = IR::IndirOpnd::New(arrayOpnd, lengthOffset, TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| |
| // CMP index, headSegmentLength -- upper bound check |
| if(!invertBoundCheckComparison) |
| { |
| InsertCompare(indexValueOpnd, headSegmentLengthOpnd, instr); |
| } |
| else |
| { |
| InsertCompare(headSegmentLengthOpnd, indexValueOpnd, instr); |
| } |
| } |
| } |
| else |
| { |
| *pIsTypedArrayElement = false; |
| |
| if (isStore && |
| baseValueType.IsLikelyNativeIntArray() && |
| (!element->IsIntConstOpnd() || Js::SparseArraySegment<int32>::GetMissingItem() == element->AsIntConstOpnd()->AsInt32())) |
| { |
| Assert(instr->m_opcode != Js::OpCode::InlineArrayPush || bailOutLabelInstr); |
| |
| // Check for a write of the MissingItem value. |
| InsertMissingItemCompareBranch( |
| element, |
| Js::OpCode::BrEq_A, |
| instr->m_opcode == Js::OpCode::InlineArrayPush ? bailOutLabelInstr : labelCantUseArray, |
| instr); |
| } |
| |
| if(!headSegmentOpnd) |
| { |
| EnsureObjectArrayLoaded(); |
| |
| // MOV headSegment, [base + offset(head)] |
| indirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfHead(), TyMachPtr, this->m_func); |
| headSegmentOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| autoReuseHeadSegmentOpnd.Initialize(headSegmentOpnd, m_func); |
| InsertMove(headSegmentOpnd, indirOpnd, instr); |
| } |
| |
| if(doUpperBoundCheck) |
| { |
| if(!headSegmentLengthOpnd) |
| { |
| // (headSegmentLength = [headSegment + offset(length)]) |
| headSegmentLengthOpnd = |
| IR::IndirOpnd::New(headSegmentOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| |
| // CMP index, headSegmentLength -- upper bound check |
| if(!invertBoundCheckComparison) |
| { |
| InsertCompare(indexValueOpnd, headSegmentLengthOpnd, instr); |
| } |
| else |
| { |
| InsertCompare(headSegmentLengthOpnd, indexValueOpnd, instr); |
| } |
| } |
| } |
| |
| const IR::BailOutKind bailOutKind = instr->HasBailOutInfo() ? instr->GetBailOutKind() : IR::BailOutInvalid; |
| const bool needBailOutOnInvalidLength = !!(bailOutKind & (IR::BailOutOnInvalidatedArrayHeadSegment)); |
| const bool needBailOutToHelper = !!(bailOutKind & (IR::BailOutOnArrayAccessHelperCall)); |
| const bool needBailOutOnSegmentLengthCompare = needBailOutToHelper || needBailOutOnInvalidLength; |
| |
| bool usingSegmentLengthIncreasedLabel = false; |
| |
| if(indexIsLessThanHeadSegmentLength || needBailOutOnSegmentLengthCompare) |
| { |
| if (needBailOutOnSegmentLengthCompare) |
| { |
| // The bailout must be pre-op because it will not have completed the operation |
| Assert(instr->GetBailOutInfo()->bailOutOffset == instr->GetByteCodeOffset()); |
| |
| // TODO: Check this with lazy bailout |
| // Verify other bailouts these can be combined with |
| Assert( |
| !( |
| bailOutKind & |
| IR::BailOutKindBits & |
| ~( |
| IR::LazyBailOut | |
| IR::BailOutOnArrayAccessHelperCall | |
| IR::BailOutOnInvalidatedArrayHeadSegment | |
| IR::BailOutOnInvalidatedArrayLength | |
| IR::BailOutConventionalNativeArrayAccessOnly | |
| IR::BailOutOnMissingValue | |
| (bailOutKind & IR::BailOutOnArrayAccessHelperCall ? IR::BailOutInvalid : IR::BailOutConvertedNativeArray) |
| ) |
| ) |
| ); |
| |
| if (bailOutKind & IR::BailOutOnArrayAccessHelperCall) |
| { |
| // Omit the helper call and generate a bailout instead |
| Assert(emitBailoutRef); |
| *emitBailoutRef = true; |
| } |
| } |
| |
| if (indexIsLessThanHeadSegmentLength) |
| { |
| Assert(!(bailOutKind & IR::BailOutOnInvalidatedArrayHeadSegment)); |
| } |
| else |
| { |
| IR::LabelInstr *bailOutLabel; |
| if (needBailOutOnInvalidLength) |
| { |
| Assert(isStore); |
| // Lower a separate (but shared) bailout for this case, and preserve the bailout kind in the instruction if the |
| // helper call is going to be generated, because the bailout kind needs to be lowered again and differently in the |
| // helper call path. |
| // |
| // Generate: |
| // (instr) |
| // jmp $continue |
| // $bailOut: |
| // Bail out with IR::BailOutOnInvalidatedArrayHeadSegment |
| // $continue: |
| LowerOneBailOutKind( |
| instr, |
| IR::BailOutOnInvalidatedArrayHeadSegment, |
| false, |
| !(bailOutKind & IR::BailOutOnArrayAccessHelperCall)); |
| bailOutLabel = instr->GetOrCreateContinueLabel(true); |
| InsertBranch(Js::OpCode::Br, labelFallthrough, bailOutLabel); |
| } |
| else |
| { |
| Assert(needBailOutToHelper); |
| bailOutLabel = labelHelper; |
| } |
| |
| // Bail out if the index is outside the head segment bounds |
| // jae $bailOut |
| Assert(checkArrayLengthOverflow); |
| InsertBranch( |
| !invertBoundCheckComparison ? Js::OpCode::BrGe_A : Js::OpCode::BrLe_A, |
| true /* isUnsigned */, |
| bailOutLabel, |
| instr); |
| } |
| } |
| else if (isStore && !baseValueType.IsLikelyTypedArray()) // #if (opcode == StElemI_A) |
| { |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| LABELNAME(labelDone); |
| IR::LabelInstr *labelSegmentLengthIncreased = nullptr; |
| |
| const bool isPush = instr->m_opcode != Js::OpCode::StElemI_A && instr->m_opcode != Js::OpCode::StElemI_A_Strict; |
| |
| // Put the head segment size check and length updates in a helper block since they're not the common path for StElem. |
| // For push, that is the common path so keep it in a non-helper block. |
| const bool isInHelperBlock = !isPush; |
| |
| if(checkArrayLengthOverflow) |
| { |
| if(pLabelSegmentLengthIncreased && |
| !( |
| (baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()) || |
| ((instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) && |
| instr->IsProfiledInstr() && !instr->AsProfiledInstr()->u.stElemInfo->LikelyFillsMissingValue()) |
| )) |
| { |
| // For arrays that are not guaranteed to have no missing values, before storing to an element where |
| // (index < length), the element value needs to be checked to see if it's a missing value, and if so, fall back |
| // to the helper. This is done to keep the missing value tracking precise in arrays. So, create a separate label |
| // for the case where the length was increased (index >= length), and pass it back to GenerateFastStElemI, which |
| // will fill in the rest. |
| labelSegmentLengthIncreased = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelperBlock); |
| LABELNAME(labelSegmentLengthIncreased); |
| *pLabelSegmentLengthIncreased = labelSegmentLengthIncreased; |
| |
| // Since this is effectively a separate exit point, we need to do the spectre mitigations in this place as well. |
| usingSegmentLengthIncreasedLabel = true; |
| } |
| else |
| { |
| labelSegmentLengthIncreased = labelDone; |
| } |
| |
| // JB $done |
| InsertBranch( |
| !invertBoundCheckComparison ? Js::OpCode::BrLt_A : Js::OpCode::BrGt_A, |
| true /* isUnsigned */, |
| labelDone, |
| instr); |
| } |
| |
| if(isInHelperBlock) |
| { |
| InsertLabel(true /* isHelper */, instr); |
| } |
| |
| EnsureObjectArrayLoaded(); |
| |
| do // while(false); |
| { |
| if(checkArrayLengthOverflow) |
| { |
| if(instr->HasBailOutInfo() && instr->GetBailOutKind() & IR::BailOutOnMissingValue) |
| { |
| // Need to bail out if this store would create a missing value. The store would cause a missing value to be |
| // created if (index > length && index < size). If (index >= size) we would go to helper anyway, and the bailout |
| // handling for this is done after the helper call, so just go to helper if (index > length). |
| // |
| // jne $helper // branch for (cmp index, headSegmentLength) |
| InsertBranch(Js::OpCode::BrNeq_A, labelHelper, instr); |
| } |
| else |
| { |
| // If (index < size) we will not call the helper, so the array flags must be updated to reflect that it no |
| // longer has no missing values. |
| // |
| // jne indexGreaterThanLength // branch for (cmp index, headSegmentLength) |
| // cmp index, [headSegment + offset(size)] |
| // jae $helper |
| // jmp indexLessThanSize |
| // indexGreaterThanLength: |
| // cmp index, [headSegment + offset(size)] |
| // jae $helper |
| // and [array + offsetOf(objectArrayOrFlags)], ~Js::DynamicObjectFlags::HasNoMissingValues |
| // indexLessThanSize: |
| // if(!index->IsConstOpnd()) { |
| // sub temp, index, [headSegment + offset(size)] |
| // sar temp, 31 |
| // and index, temp |
| // } |
| |
| IR::LabelInstr *const indexGreaterThanLengthLabel = InsertLabel(true /* isHelper */, instr); |
| LABELNAME(indexGreaterThanLengthLabel); |
| IR::LabelInstr *const indexLessThanSizeLabel = InsertLabel(isInHelperBlock, instr); |
| LABELNAME(indexLessThanSizeLabel); |
| |
| // jne indexGreaterThanLength // branch for (cmp index, headSegmentLength) |
| InsertBranch(Js::OpCode::BrNeq_A, indexGreaterThanLengthLabel, indexGreaterThanLengthLabel); |
| |
| // cmp index, [headSegment + offset(size)] |
| // jae $helper |
| // jmp indexLessThanSize |
| // indexGreaterThanLength: |
| InsertCompareBranch( |
| indexValueOpnd, |
| IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, m_func), |
| Js::OpCode::BrGe_A, |
| true /* isUnsigned */, |
| labelHelper, |
| indexGreaterThanLengthLabel); |
| |
| InsertBranch(Js::OpCode::Br, indexLessThanSizeLabel, indexGreaterThanLengthLabel); |
| |
| // indexGreaterThanLength: |
| // cmp index, [headSegment + offset(size)] |
| // jae $helper |
| // and [array + offsetOf(objectArrayOrFlags)], ~Js::DynamicObjectFlags::HasNoMissingValues |
| // indexLessThanSize: |
| InsertCompareBranch( |
| indexValueOpnd, |
| IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, m_func), |
| Js::OpCode::BrGe_A, |
| true /* isUnsigned */, |
| labelHelper, |
| indexLessThanSizeLabel); |
| |
| CompileAssert( |
| static_cast<Js::DynamicObjectFlags>(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues)) == |
| Js::DynamicObjectFlags::HasNoMissingValues); |
| InsertAnd( |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, m_func), |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, m_func), |
| IR::IntConstOpnd::New( |
| static_cast<uint8>(~Js::DynamicObjectFlags::HasNoMissingValues), |
| TyUint8, |
| m_func, |
| true), |
| indexLessThanSizeLabel); |
| |
| // In speculative cases, we want to avoid a write to an array setting the length to something huge, which |
| // would then allow subsequent reads to hit arbitrary memory (in the speculative path). This is done with |
| // a mask generated from the difference between the index and the size. Since we should have already gone |
| // to the helper in any case where this would execute, it's a functional no-op. |
| |
| // indexLessThanSize: |
| // In speculative cases, we want to avoid a write to an array setting the length to something huge, which |
| // would then allow subsequent reads to hit arbitrary memory (in the speculative path). This is done with |
| // a mask generated from the difference between the index and the size. Since we should have already gone |
| // to the helper in any case where this would execute, it's a functional no-op. |
| |
| // if(!index->IsConstOpnd()) { |
| // sub temp, index, [headSegment + offset(size)] |
| // sar temp, 31 |
| // and index, temp |
| // } |
| if (!indexValueOpnd->IsConstOpnd() |
| && (baseValueType.IsLikelyTypedArray() |
| ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) |
| : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayStore)) |
| || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayStore)) |
| || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayStore))) |
| ) |
| ) |
| { |
| IR::RegOpnd* temp = IR::RegOpnd::New(TyUint32, m_func); |
| InsertSub( |
| false, |
| temp, |
| indexValueOpnd, |
| IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, m_func), |
| instr); |
| InsertShift(Js::OpCode::Shr_A, false, temp, temp, IR::IntConstOpnd::New(31, TyInt8, m_func), instr); |
| InsertAnd(indexValueOpnd, indexValueOpnd, temp, instr); |
| } |
| break; |
| } |
| } |
| |
| // CMP index, [headSegment + offset(size)] |
| // JAE $helper |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, size), TyUint32, this->m_func); |
| InsertCompareBranch(indexValueOpnd, indirOpnd, Js::OpCode::BrGe_A, true /* isUnsigned */, labelHelper, instr); |
| } while(false); |
| |
| if(isPush) |
| { |
| IR::LabelInstr *const updateLengthLabel = InsertLabel(isInHelperBlock, instr); |
| LABELNAME(updateLengthLabel); |
| |
| if(!doUpperBoundCheck && !headSegmentLengthOpnd) |
| { |
| // (headSegmentLength = [headSegment + offset(length)]) |
| headSegmentLengthOpnd = |
| IR::IndirOpnd::New(headSegmentOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| |
| // For push, it is guaranteed that (index >= length). We already know that (index < size), but we need to check if |
| // (index > length) because in that case a missing value will be created and the missing value tracking in the array |
| // needs to be updated. |
| // |
| // cmp index, headSegmentLength |
| // je $updateLength |
| // and [array + offsetOf(objectArrayOrFlags)], ~Js::DynamicObjectFlags::HasNoMissingValues |
| // updateLength: |
| InsertCompareBranch( |
| indexValueOpnd, |
| headSegmentLengthOpnd, |
| Js::OpCode::BrEq_A, |
| updateLengthLabel, |
| updateLengthLabel); |
| CompileAssert( |
| static_cast<Js::DynamicObjectFlags>(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues)) == |
| Js::DynamicObjectFlags::HasNoMissingValues); |
| InsertAnd( |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, m_func), |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, m_func), |
| IR::IntConstOpnd::New( |
| static_cast<uint8>(~Js::DynamicObjectFlags::HasNoMissingValues), |
| TyUint8, |
| m_func, |
| true), |
| updateLengthLabel); |
| } |
| |
| if (baseValueType.IsArrayOrObjectWithArray()) |
| { |
| // We didn't emit an array check, but if we are going to grow the array |
| // We need to go to helper if there is an ES5 array/objectarray used as prototype |
| GenerateIsEnabledArraySetElementFastPathCheck(labelHelper, instr); |
| } |
| |
| IR::Opnd *newLengthOpnd; |
| IR::AutoReuseOpnd autoReuseNewLengthOpnd; |
| if (indexValueOpnd->IsRegOpnd()) |
| { |
| // LEA newLength, [index + 1] |
| newLengthOpnd = IR::RegOpnd::New(TyUint32, this->m_func); |
| autoReuseNewLengthOpnd.Initialize(newLengthOpnd, m_func); |
| InsertAdd(false /* needFlags */, newLengthOpnd, indexValueOpnd, IR::IntConstOpnd::New(1, TyUint32, m_func), instr); |
| } |
| else |
| { |
| newLengthOpnd = IR::IntConstOpnd::New(value + 1, TyUint32, this->m_func); |
| autoReuseNewLengthOpnd.Initialize(newLengthOpnd, m_func); |
| } |
| |
| // This is a common enough case that we want to go through this path instead of the simpler one, since doing it this way is faster for preallocated but un-filled arrays. |
| if (!!(bailOutKind & IR::BailOutOnInvalidatedArrayLength)) |
| { |
| // If we'd increase the array length, go to the helper |
| indirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, this->m_func); |
| InsertCompareBranch( |
| newLengthOpnd, |
| indirOpnd, |
| Js::OpCode::BrGt_A, |
| true, |
| labelHelper, |
| instr); |
| } |
| // MOV [headSegment + offset(length)], newLength |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, length), TyUint32, this->m_func); |
| InsertMove(indirOpnd, newLengthOpnd, instr); |
| |
| // We've changed the head segment length, so we may need to change the head segment length opnd |
| if (headSegmentLengthOpnd != nullptr && !headSegmentLengthOpnd->IsIndirOpnd()) |
| { |
| InsertMove(headSegmentLengthOpnd, newLengthOpnd, instr); |
| } |
| |
| if (checkArrayLengthOverflow) |
| { |
| // CMP newLength, [base + offset(length)] |
| // JBE $segmentLengthIncreased |
| Assert(labelSegmentLengthIncreased); |
| indirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, this->m_func); |
| InsertCompareBranch( |
| newLengthOpnd, |
| indirOpnd, |
| Js::OpCode::BrLe_A, |
| true /* isUnsigned */, |
| labelSegmentLengthIncreased, |
| instr); |
| |
| if(!isInHelperBlock) |
| { |
| InsertLabel(true /* isHelper */, instr); |
| } |
| } |
| |
| // MOV [base + offset(length)], newLength |
| indirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, this->m_func); |
| InsertMove(indirOpnd, newLengthOpnd, instr); |
| |
| if(returnLength) |
| { |
| if(newLengthOpnd->GetSize() != MachPtr) |
| { |
| newLengthOpnd = newLengthOpnd->UseWithNewType(TyMachPtr, m_func)->AsRegOpnd(); |
| } |
| |
| // SHL newLength, AtomTag |
| // INC newLength |
| this->m_lowererMD.GenerateInt32ToVarConversion(newLengthOpnd, instr); |
| |
| // MOV dst, newLength |
| InsertMove(instr->GetDst(), newLengthOpnd, instr); |
| } |
| |
| // Calling code assumes that indirOpnd is initialized before labelSegmentLengthIncreased is reached |
| if(labelSegmentLengthIncreased && labelSegmentLengthIncreased != labelDone) |
| { |
| // labelSegmentLengthIncreased: |
| instr->InsertBefore(labelSegmentLengthIncreased); |
| } |
| |
| // $done |
| instr->InsertBefore(labelDone); |
| } |
| else // #else |
| { |
| if (checkArrayLengthOverflow) |
| { |
| if (*pIsTypedArrayElement && isStore) |
| { |
| IR::LabelInstr *labelInlineSet = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| LABELNAME(labelInlineSet); |
| |
| //For positive index beyond length or negative index its essentially nop for typed array store |
| InsertBranch( |
| !invertBoundCheckComparison ? Js::OpCode::BrLt_A : Js::OpCode::BrGt_A, |
| true /* isUnsigned */, |
| labelInlineSet, |
| instr); |
| |
| // For typed array, call ToNumber before we fallThrough. |
| if (instr->GetSrc1()->GetType() == TyVar && !instr->GetSrc1()->GetValueType().IsPrimitive()) |
| { |
| // Enter an ophelper block |
| IR::LabelInstr * opHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| LABELNAME(opHelper); |
| instr->InsertBefore(opHelper); |
| |
| IR::Instr *toNumberInstr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| toNumberInstr->SetSrc1(instr->GetSrc1()); |
| instr->InsertBefore(toNumberInstr); |
| |
| if (BailOutInfo::IsBailOutOnImplicitCalls(bailOutKind)) |
| { |
| // Bail out if this conversion triggers implicit calls. |
| toNumberInstr = this->AddBailoutToHelperCallInstr(toNumberInstr, instr->GetBailOutInfo(), bailOutKind, instr); |
| } |
| |
| LowerUnaryHelperMem(toNumberInstr, IR::HelperOp_ConvNumber_Full); |
| } |
| InsertBranch(Js::OpCode::Br, labelFallthrough, instr); //Jump to fallThrough |
| |
| instr->InsertBefore(labelInlineSet); |
| } |
| else |
| { |
| // JAE $helper |
| InsertBranch( |
| !invertBoundCheckComparison ? Js::OpCode::BrGe_A : Js::OpCode::BrLe_A, |
| true /* isUnsigned */, |
| labelHelper, |
| instr); |
| } |
| } |
| |
| EnsureObjectArrayLoaded(); |
| |
| if (instr->m_opcode == Js::OpCode::InlineArrayPop) |
| { |
| Assert(!baseValueType.IsLikelyTypedArray()); |
| Assert(bailOutLabelInstr); |
| |
| if (indexValueOpnd->IsIntConstOpnd()) |
| { |
| // indirOpnd = [headSegment + index + offset(elements)] |
| IntConstType offset = offsetof(Js::SparseArraySegment<Js::Var>, elements) + (value << indirScale); |
| // TODO: Assert(Math::FitsInDWord(offset)); |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, (int32)offset, indirType, this->m_func); |
| } |
| else |
| { |
| // indirOpnd = [headSegment + offset(elements) + (index << scale)] |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, indexValueOpnd->AsRegOpnd(), indirScale, indirType, this->m_func); |
| indirOpnd->SetOffset(offsetof(Js::SparseArraySegment<Js::Var>, elements)); |
| } |
| |
| IR::Opnd * tmpDst = nullptr; |
| IR::Opnd * dst = instr->GetDst(); |
| // Pop might not have a dst, if not don't worry about returning the last element. But we still have to |
| // worry about gaps, because these force us to access the prototype chain, which may have side-effects. |
| if (dst || !baseValueType.HasNoMissingValues()) |
| { |
| if (!dst) |
| { |
| dst = IR::RegOpnd::New(indirType, this->m_func); |
| } |
| else if (dst->AsRegOpnd()->m_sym == arrayOpnd->m_sym) |
| { |
| tmpDst = IR::RegOpnd::New(TyVar, this->m_func); |
| dst = tmpDst; |
| } |
| |
| // Use a mask to prevent arbitrary speculative reads |
| // If you think this code looks highly similar to the code later in this function, |
| // you'd be right. Unfortunately, I wasn't able to find a way to reduce duplication |
| // here without significantly complicating the code structure. |
| if (!headSegmentLengthOpnd) |
| { |
| headSegmentLengthOpnd = |
| IR::IndirOpnd::New(headSegmentOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| IR::RegOpnd* localMaskOpnd = nullptr; |
| #if TARGET_64 |
| IR::Opnd* lengthOpnd = nullptr; |
| AnalysisAssert(headSegmentLengthOpnd != nullptr); |
| lengthOpnd = IR::RegOpnd::New(headSegmentLengthOpnd->GetType(), m_func); |
| { |
| IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func); |
| instr->InsertBefore(instrMov); |
| LowererMD::Legalize(instrMov); |
| } |
| |
| if (lengthOpnd->GetSize() != MachPtr) |
| { |
| lengthOpnd = lengthOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| |
| // MOV r1, [opnd + offset(type)] |
| IR::RegOpnd* indexValueRegOpnd = IR::RegOpnd::New(indexValueOpnd->GetType(), m_func); |
| |
| { |
| IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func); |
| instr->InsertBefore(instrMov); |
| LowererMD::Legalize(instrMov); |
| } |
| |
| if (indexValueRegOpnd->GetSize() != MachPtr) |
| { |
| indexValueRegOpnd = indexValueRegOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| |
| localMaskOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, instr); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), instr); |
| #else |
| localMaskOpnd = IR::RegOpnd::New(TyInt32, m_func); |
| InsertSub(false, localMaskOpnd, indexValueOpnd, headSegmentLengthOpnd, instr); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), instr); |
| #endif |
| |
| // for pop we always do the masking before the load in cases where we load a value |
| IR::RegOpnd* loadAddr = IR::RegOpnd::New(TyMachPtr, m_func); |
| |
| #if _M_ARM32_OR_ARM64 |
| if (indirOpnd->GetIndexOpnd() != nullptr && indirOpnd->GetScale() > 0) |
| { |
| // We don't support encoding for LEA with scale on ARM/ARM64, so do the scale calculation as a separate instruction |
| IR::RegOpnd* fullIndexOpnd = IR::RegOpnd::New(indirOpnd->GetIndexOpnd()->GetType(), m_func); |
| InsertShift(Js::OpCode::Shl_A, false, fullIndexOpnd, indirOpnd->GetIndexOpnd(), IR::IntConstOpnd::New(indirOpnd->GetScale(), TyInt8, m_func), instr); |
| IR::IndirOpnd* newIndir = IR::IndirOpnd::New(indirOpnd->GetBaseOpnd(), fullIndexOpnd, indirType, m_func); |
| if (indirOpnd->GetOffset() != 0) |
| { |
| newIndir->SetOffset(indirOpnd->GetOffset()); |
| } |
| indirOpnd = newIndir; |
| } |
| #endif |
| IR::AutoReuseOpnd reuseIndir(indirOpnd, m_func); |
| |
| InsertLea(loadAddr, indirOpnd, instr); |
| InsertAnd(loadAddr, loadAddr, localMaskOpnd, instr); |
| indirOpnd = IR::IndirOpnd::New(loadAddr, 0, indirType, m_func); |
| |
| // MOV dst, [head + offset] |
| InsertMove(dst, indirOpnd, instr); |
| |
| //If the array has missing values, check for one |
| if (!baseValueType.HasNoMissingValues()) |
| { |
| InsertMissingItemCompareBranch( |
| dst, |
| Js::OpCode::BrEq_A, |
| bailOutLabelInstr, |
| instr); |
| } |
| } |
| // MOV [head + offset], missing |
| InsertMove(indirOpnd, GetMissingItemOpndForAssignment(indirType, m_func), instr); |
| |
| IR::Opnd *newLengthOpnd; |
| IR::AutoReuseOpnd autoReuseNewLengthOpnd; |
| if (indexValueOpnd->IsRegOpnd()) |
| { |
| // LEA newLength, [index] |
| newLengthOpnd = indexValueOpnd; |
| autoReuseNewLengthOpnd.Initialize(newLengthOpnd, m_func); |
| } |
| else |
| { |
| newLengthOpnd = IR::IntConstOpnd::New(value, TyUint32, this->m_func); |
| autoReuseNewLengthOpnd.Initialize(newLengthOpnd, m_func); |
| } |
| |
| //update segment length and array length |
| // MOV [headSegment + offset(length)], newLength |
| IR::IndirOpnd *lengthIndirOpnd = IR::IndirOpnd::New(headSegmentOpnd, offsetof(Js::SparseArraySegmentBase, length), TyUint32, this->m_func); |
| InsertMove(lengthIndirOpnd, newLengthOpnd, instr); |
| |
| // We've changed the head segment length, so we may need to change the head segment length opnd |
| if (headSegmentLengthOpnd != nullptr && !headSegmentLengthOpnd->IsIndirOpnd()) |
| { |
| InsertMove(headSegmentLengthOpnd, newLengthOpnd, instr); |
| } |
| |
| // MOV [base + offset(length)], newLength |
| lengthIndirOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, this->m_func); |
| InsertMove(lengthIndirOpnd, newLengthOpnd, instr); |
| |
| if (tmpDst) |
| { |
| // The array opnd and the destination is the same, need to move the value in the tmp dst |
| // to the actual dst |
| InsertMove(instr->GetDst(), tmpDst, instr); |
| } |
| |
| return indirOpnd; |
| } |
| } // #endif |
| |
| // Should we poison the load of the address to/from which the store/load happens? |
| bool shouldPoisonLoad = maskOpnd != nullptr |
| && ( |
| (!isStore && (!instr->IsSafeToSpeculate()) && |
| (baseValueType.IsLikelyTypedArray() |
| ? CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad) |
| : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayLoad)) |
| || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayLoad)) |
| || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayLoad))) |
| ) |
| ) |
| || |
| (isStore && |
| (baseValueType.IsLikelyTypedArray() |
| ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) |
| : ((indirType == TyVar && CONFIG_FLAG_RELEASE(PoisonVarArrayStore)) |
| || (IRType_IsNativeInt(indirType) && CONFIG_FLAG_RELEASE(PoisonIntArrayStore)) |
| || (IRType_IsFloat(indirType) && CONFIG_FLAG_RELEASE(PoisonFloatArrayStore))) |
| ) |
| ) |
| ) |
| ; |
| |
| // We have two exit paths for this function in the store case when we might grow the head |
| // segment, due to tracking for missing elements. This unfortunately means that we need a |
| // copy of the poisoning code on the other exit path, since the determination of the path |
| // and the use of the path determination to decide whether we found the missing value are |
| // things that have to happen on opposite sides of the poisoning. |
| IR::Instr* insertForSegmentLengthIncreased = nullptr; |
| if (shouldPoisonLoad && usingSegmentLengthIncreasedLabel) |
| { |
| insertForSegmentLengthIncreased = (*pLabelSegmentLengthIncreased)->m_next; |
| } |
| |
| #if TARGET_32 |
| if (shouldPoisonLoad) |
| { |
| // Prevent index from being negative, which would break the poisoning |
| if (indexValueOpnd->IsIntConstOpnd()) |
| { |
| indexValueOpnd = IR::IntConstOpnd::New(value & INT32_MAX, TyUint32, m_func); |
| } |
| else |
| { |
| IR::RegOpnd* newIndexValueOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertAnd(newIndexValueOpnd, indexValueOpnd, IR::IntConstOpnd::New(INT32_MAX, TyUint32, m_func), instr); |
| if(insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertAnd(newIndexValueOpnd, indexValueOpnd, IR::IntConstOpnd::New(INT32_MAX, TyUint32, m_func), insertForSegmentLengthIncreased); |
| } |
| indexValueOpnd = newIndexValueOpnd; |
| } |
| } |
| #endif |
| |
| if (baseValueType.IsLikelyTypedArray()) |
| { |
| if(!headSegmentOpnd) |
| { |
| // MOV headSegment, [base + offset(arrayBuffer)] |
| int bufferOffset; |
| bufferOffset = Js::Float64Array::GetOffsetOfBuffer(); |
| indirOpnd = IR::IndirOpnd::New(arrayOpnd, bufferOffset, TyMachPtr, this->m_func); |
| headSegmentOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| autoReuseHeadSegmentOpnd.Initialize(headSegmentOpnd, m_func); |
| IR::AutoReuseOpnd reuseIndir(indirOpnd, m_func); |
| InsertMove(headSegmentOpnd, indirOpnd, instr); |
| if(insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertMove(headSegmentOpnd, indirOpnd, insertForSegmentLengthIncreased); |
| } |
| } |
| |
| // indirOpnd = [headSegment + index] |
| if (indexValueOpnd->IsIntConstOpnd()) |
| { |
| IntConstType offset = (value << indirScale); |
| // TODO: Assert(Math::FitsInDWord(offset)); |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, (int32)offset, indirType, this->m_func); |
| } |
| else |
| { |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, indexValueOpnd->AsRegOpnd(), indirScale, indirType, this->m_func); |
| } |
| } |
| else if (indexValueOpnd->IsIntConstOpnd()) |
| { |
| // indirOpnd = [headSegment + index + offset(elements)] |
| IntConstType offset = offsetof(Js::SparseArraySegment<Js::Var>, elements) + (value << indirScale); |
| // TODO: Assert(Math::FitsInDWord(offset)); |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, (int32)offset, indirType, this->m_func); |
| } |
| else |
| { |
| // indirOpnd = [headSegment + offset(elements) + (index << scale)] |
| indirOpnd = IR::IndirOpnd::New(headSegmentOpnd, indexValueOpnd->AsRegOpnd(), indirScale, indirType, this->m_func); |
| indirOpnd->SetOffset(offsetof(Js::SparseArraySegment<Js::Var>, elements)); |
| } |
| |
| if (shouldPoisonLoad) |
| { |
| // Use a mask to prevent arbitrary speculative reads |
| if (!headSegmentLengthOpnd |
| #if ENABLE_FAST_ARRAYBUFFER |
| && !baseValueType.IsLikelyOptimizedVirtualTypedArray() |
| #endif |
| ) |
| { |
| if (baseValueType.IsLikelyTypedArray()) |
| { |
| int lengthOffset; |
| lengthOffset = GetArrayOffsetOfLength(baseValueType); |
| headSegmentLengthOpnd = IR::IndirOpnd::New(arrayOpnd, lengthOffset, TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| else |
| { |
| headSegmentLengthOpnd = |
| IR::IndirOpnd::New(headSegmentOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| } |
| } |
| IR::RegOpnd* localMaskOpnd = nullptr; |
| #if TARGET_64 |
| IR::Opnd* lengthOpnd = nullptr; |
| #if ENABLE_FAST_ARRAYBUFFER |
| if (baseValueType.IsLikelyOptimizedVirtualTypedArray()) |
| { |
| lengthOpnd = IR::IntConstOpnd::New(MAX_ASMJS_ARRAYBUFFER_LENGTH >> indirScale, TyMachReg, m_func); |
| } |
| else |
| #endif |
| { |
| AnalysisAssert(headSegmentLengthOpnd != nullptr); |
| lengthOpnd = IR::RegOpnd::New(headSegmentLengthOpnd->GetType(), m_func); |
| IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func); |
| instr->InsertBefore(instrMov); |
| LowererMD::Legalize(instrMov); |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| IR::Instr * instrMov2 = IR::Instr::New(Js::OpCode::MOV_TRUNC, lengthOpnd, headSegmentLengthOpnd, m_func); |
| insertForSegmentLengthIncreased->InsertBefore(instrMov2); |
| LowererMD::Legalize(instrMov2); |
| } |
| |
| if (lengthOpnd->GetSize() != MachPtr) |
| { |
| lengthOpnd = lengthOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| } |
| |
| |
| // MOV r1, [opnd + offset(type)] |
| IR::RegOpnd* indexValueRegOpnd = IR::RegOpnd::New(indexValueOpnd->GetType(), m_func); |
| |
| IR::Instr * instrMov = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func); |
| instr->InsertBefore(instrMov); |
| LowererMD::Legalize(instrMov); |
| |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| IR::Instr * instrMov2 = IR::Instr::New(Js::OpCode::MOV_TRUNC, indexValueRegOpnd, indexValueOpnd, m_func); |
| insertForSegmentLengthIncreased->InsertBefore(instrMov2); |
| LowererMD::Legalize(instrMov2); |
| } |
| |
| if (indexValueRegOpnd->GetSize() != MachPtr) |
| { |
| indexValueRegOpnd = indexValueRegOpnd->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| |
| localMaskOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, instr); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), instr); |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertSub(false, localMaskOpnd, indexValueRegOpnd, lengthOpnd, insertForSegmentLengthIncreased); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(63, TyInt8, m_func), insertForSegmentLengthIncreased); |
| } |
| #else |
| localMaskOpnd = IR::RegOpnd::New(TyInt32, m_func); |
| InsertSub(false, localMaskOpnd, indexValueOpnd, headSegmentLengthOpnd, instr); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), instr); |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertSub(false, localMaskOpnd, indexValueOpnd, headSegmentLengthOpnd, insertForSegmentLengthIncreased); |
| InsertShift(Js::OpCode::Shr_A, false, localMaskOpnd, localMaskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), insertForSegmentLengthIncreased); |
| } |
| #endif |
| |
| if ((IRType_IsNativeInt(indirType) || indirType == TyVar) && !isStore) |
| { |
| *maskOpnd = localMaskOpnd; |
| } |
| else |
| { |
| // for float values, do the poisoning before the load to avoid needing slow floating point conversions |
| IR::RegOpnd* loadAddr = IR::RegOpnd::New(TyMachPtr, m_func); |
| |
| #if _M_ARM32_OR_ARM64 |
| if (indirOpnd->GetIndexOpnd() != nullptr && indirOpnd->GetScale() > 0) |
| { |
| // We don't support encoding for LEA with scale on ARM/ARM64, so do the scale calculation as a separate instruction |
| IR::RegOpnd* fullIndexOpnd = IR::RegOpnd::New(indirOpnd->GetIndexOpnd()->GetType(), m_func); |
| InsertShift(Js::OpCode::Shl_A, false, fullIndexOpnd, indirOpnd->GetIndexOpnd(), IR::IntConstOpnd::New(indirOpnd->GetScale(), TyInt8, m_func), instr); |
| IR::IndirOpnd* newIndir = IR::IndirOpnd::New(indirOpnd->GetBaseOpnd(), fullIndexOpnd, indirType, m_func); |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertShift(Js::OpCode::Shl_A, false, fullIndexOpnd, indirOpnd->GetIndexOpnd(), IR::IntConstOpnd::New(indirOpnd->GetScale(), TyInt8, m_func), insertForSegmentLengthIncreased); |
| } |
| if (indirOpnd->GetOffset() != 0) |
| { |
| newIndir->SetOffset(indirOpnd->GetOffset()); |
| } |
| indirOpnd = newIndir; |
| } |
| #endif |
| IR::AutoReuseOpnd reuseIndir(indirOpnd, m_func); |
| |
| InsertLea(loadAddr, indirOpnd, instr); |
| InsertAnd(loadAddr, loadAddr, localMaskOpnd, instr); |
| if (insertForSegmentLengthIncreased != nullptr) |
| { |
| InsertLea(loadAddr, indirOpnd, insertForSegmentLengthIncreased); |
| InsertAnd(loadAddr, loadAddr, localMaskOpnd, insertForSegmentLengthIncreased); |
| |
| // We want to export a segmentLengthIncreasedLabel to the caller that is after the poisoning |
| // code, since that's also the code that generates indirOpnd in this case. |
| IR::LabelInstr* exportedSegmentLengthIncreasedLabel = IR::LabelInstr::New(Js::OpCode::Label, insertForSegmentLengthIncreased->m_func, (*pLabelSegmentLengthIncreased)->isOpHelper); |
| LABELNAME(exportedSegmentLengthIncreasedLabel); |
| insertForSegmentLengthIncreased->InsertBefore(exportedSegmentLengthIncreasedLabel); |
| *pLabelSegmentLengthIncreased = exportedSegmentLengthIncreasedLabel; |
| } |
| indirOpnd = IR::IndirOpnd::New(loadAddr, 0, indirType, m_func); |
| } |
| } |
| return indirOpnd; |
| } |
| |
| IR::BranchInstr* |
| Lowerer::InsertMissingItemCompareBranch(IR::Opnd* compareSrc, Js::OpCode opcode, IR::LabelInstr* target, IR::Instr* insertBeforeInstr) |
| { |
| IR::Opnd* missingItemOpnd = GetMissingItemOpndForCompare(compareSrc->GetType(), m_func); |
| if (compareSrc->IsFloat64()) |
| { |
| Assert(compareSrc->IsRegOpnd() || compareSrc->IsIndirOpnd()); |
| return m_lowererMD.InsertMissingItemCompareBranch(compareSrc, missingItemOpnd, opcode, target, insertBeforeInstr); |
| } |
| else |
| { |
| Assert(compareSrc->IsInt32() || compareSrc->IsVar()); |
| return InsertCompareBranch(missingItemOpnd, compareSrc, opcode, target, insertBeforeInstr, true); |
| } |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GenerateUntagVar(IR::RegOpnd * opnd, IR::LabelInstr * labelFail, IR::Instr * insertBeforeInstr, bool generateTagCheck) |
| { |
| if (!opnd->IsVar()) |
| { |
| AssertMsg(opnd->GetSize() == 4, "This should be 32-bit wide"); |
| return opnd; |
| } |
| AssertMsg(!opnd->IsNotInt(), "An opnd we know is not an int should not try to untag it as it will always fail"); |
| if (opnd->m_sym->IsIntConst()) |
| { |
| int32 constValue = opnd->m_sym->GetIntConstValue(); |
| IR::IntConstOpnd* constOpnd = IR::IntConstOpnd::New(constValue, TyInt32, this->m_func); |
| IR::RegOpnd* regOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| InsertMove(regOpnd, constOpnd, insertBeforeInstr); |
| return regOpnd; |
| } |
| return m_lowererMD.GenerateUntagVar(opnd, labelFail, insertBeforeInstr, generateTagCheck && !opnd->IsTaggedInt()); |
| } |
| |
| void |
| Lowerer::GenerateNotZeroTest( IR::Opnd * opndSrc, IR::LabelInstr * isZeroLabel, IR::Instr * insertBeforeInstr) |
| { |
| InsertTestBranch(opndSrc, opndSrc, Js::OpCode::BrEq_A, isZeroLabel, insertBeforeInstr); |
| } |
| |
| bool |
| Lowerer::GenerateFastStringLdElem(IR::Instr * ldElem, IR::LabelInstr * labelHelper, IR::LabelInstr * labelFallThru) |
| { |
| IR::IndirOpnd * indirOpnd = ldElem->GetSrc1()->AsIndirOpnd(); |
| IR::RegOpnd * baseOpnd = indirOpnd->GetBaseOpnd(); |
| |
| // don't generate the fast path if the instance is not likely string |
| if (!baseOpnd->GetValueType().IsLikelyString()) |
| { |
| return false; |
| } |
| |
| Assert(!baseOpnd->IsTaggedInt()); |
| |
| IR::RegOpnd * indexOpnd = indirOpnd->GetIndexOpnd(); |
| // Don't generate the fast path if the index operand is not likely int |
| if (indexOpnd && !indexOpnd->GetValueType().IsLikelyInt()) |
| { |
| return false; |
| } |
| |
| // Make sure the instance is a string |
| Assert(!indexOpnd || !indexOpnd->IsNotInt()); |
| GenerateStringTest(baseOpnd, ldElem, labelHelper); |
| |
| IR::Opnd * index32CmpOpnd; |
| IR::RegOpnd * bufferOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| const IR::AutoReuseOpnd autoReuseBufferOpnd(bufferOpnd, m_func); |
| IR::IndirOpnd * charIndirOpnd; |
| if (indexOpnd) |
| { |
| // Untag the var and generate the indir into the string buffer |
| IR::RegOpnd * index32Opnd = GenerateUntagVar(indexOpnd, labelHelper, ldElem); |
| charIndirOpnd = IR::IndirOpnd::New(bufferOpnd, index32Opnd, 1, TyUint16, this->m_func); |
| index32CmpOpnd = index32Opnd; |
| } |
| else |
| { |
| // Just use the offset to indirect into the string buffer |
| charIndirOpnd = IR::IndirOpnd::New(bufferOpnd, indirOpnd->GetOffset() * sizeof(char16), TyUint16, this->m_func); |
| index32CmpOpnd = IR::IntConstOpnd::New((uint32)indirOpnd->GetOffset(), TyUint32, this->m_func); |
| } |
| |
| // Check if the index is in range of the string length |
| // CMP [baseOpnd + offset(length)], indexOpnd -- string length |
| // JBE $helper -- unsigned compare, and string length are at most INT_MAX - 1 |
| // -- so that even if we have a negative index, this will fail |
| IR::RegOpnd* lengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(lengthOpnd, IR::IndirOpnd::New(baseOpnd, offsetof(Js::JavascriptString, m_charLength), TyUint32, this->m_func), ldElem); |
| InsertCompareBranch(lengthOpnd, index32CmpOpnd, Js::OpCode::BrLe_A, true, labelHelper, ldElem); |
| |
| // Load the string buffer and make sure it is not null |
| // MOV bufferOpnd, [baseOpnd + offset(m_pszValue)] |
| // TEST bufferOpnd, bufferOpnd |
| // JEQ $labelHelper |
| indirOpnd = IR::IndirOpnd::New(baseOpnd, offsetof(Js::JavascriptString, m_pszValue), TyMachPtr, this->m_func); |
| |
| InsertMove(bufferOpnd, indirOpnd, ldElem); |
| GenerateNotZeroTest(bufferOpnd, labelHelper, ldElem); |
| |
| IR::RegOpnd* maskOpnd = nullptr; |
| if (CONFIG_FLAG_RELEASE(PoisonStringLoad)) |
| { |
| // Mask off the sign before loading so that poisoning will work for negative indices |
| if (index32CmpOpnd->IsIntConstOpnd()) |
| { |
| charIndirOpnd->SetOffset((index32CmpOpnd->AsIntConstOpnd()->AsUint32() & INT32_MAX) * sizeof(char16)); |
| } |
| else |
| { |
| InsertAnd(index32CmpOpnd, index32CmpOpnd, IR::IntConstOpnd::New(INT32_MAX, TyInt32, m_func), ldElem); |
| } |
| |
| // All bits in mask will be 1 for a valid index or 0 for an OOB index |
| maskOpnd = IR::RegOpnd::New(TyInt32, m_func); |
| InsertSub(false, maskOpnd, index32CmpOpnd, lengthOpnd, ldElem); |
| InsertShift(Js::OpCode::Shr_A, false, maskOpnd, maskOpnd, IR::IntConstOpnd::New(31, TyInt8, m_func), ldElem); |
| } |
| |
| // Load the character and check if it is 7bit ASCI (which we have the cache for) |
| // MOV charOpnd, [bufferOpnd + index32Opnd] |
| // CMP charOpnd, 0x80 |
| // JAE $helper |
| IR::RegOpnd * charOpnd = IR::RegOpnd::New(TyUint32, this->m_func); |
| const IR::AutoReuseOpnd autoReuseCharOpnd(charOpnd, m_func); |
| InsertMove(charOpnd, charIndirOpnd, ldElem); |
| |
| if (CONFIG_FLAG_RELEASE(PoisonStringLoad)) |
| { |
| InsertAnd(charOpnd, charOpnd, maskOpnd, ldElem); |
| } |
| |
| InsertCompareBranch(charOpnd, IR::IntConstOpnd::New(Js::CharStringCache::CharStringCacheSize, TyUint16, this->m_func), |
| Js::OpCode::BrGe_A, true, labelHelper, ldElem); |
| |
| // Load the string from the cache |
| // MOV charStringCache, <charStringCache, address> |
| // MOV stringOpnd, [charStringCache + charOpnd * 4] |
| |
| IR::RegOpnd * cacheOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| const IR::AutoReuseOpnd autoReuseCacheOpnd(cacheOpnd, m_func); |
| Assert(Js::JavascriptLibrary::GetCharStringCacheAOffset() == Js::JavascriptLibrary::GetCharStringCacheOffset()); |
| InsertMove(cacheOpnd, this->LoadLibraryValueOpnd(ldElem, LibraryValue::ValueCharStringCache), ldElem); |
| |
| // Check if we have created the string or not |
| // TEST stringOpnd, stringOpnd |
| // JE $helper |
| IR::RegOpnd * stringOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| const IR::AutoReuseOpnd autoReuseStringOpnd(stringOpnd, m_func); |
| InsertMove(stringOpnd, IR::IndirOpnd::New(cacheOpnd, charOpnd, this->m_lowererMD.GetDefaultIndirScale(), TyVar, this->m_func), ldElem); |
| |
| GenerateNotZeroTest(stringOpnd, labelHelper, ldElem); |
| |
| InsertMove(ldElem->GetDst(), stringOpnd, ldElem); |
| InsertBranch(Js::OpCode::Br, labelFallThru, ldElem); |
| |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastLdElemI(IR::Instr *& ldElem, bool *instrIsInHelperBlockRef) |
| { |
| Assert(instrIsInHelperBlockRef); |
| bool &instrIsInHelperBlock = *instrIsInHelperBlockRef; |
| instrIsInHelperBlock = false; |
| |
| IR::LabelInstr * labelHelper; |
| IR::LabelInstr * labelFallThru; |
| IR::LabelInstr * labelBailOut = nullptr; |
| IR::LabelInstr * labelMissingNative = nullptr; |
| IR::Opnd *src1 = ldElem->GetSrc1(); |
| |
| AssertMsg(src1->IsIndirOpnd(), "Expected indirOpnd on LdElementI"); |
| |
| IR::IndirOpnd * indirOpnd = src1->AsIndirOpnd(); |
| |
| // From FastElemICommon: |
| // TEST base, AtomTag -- check base not tagged int |
| // JNE $helper |
| // MOV r1, [base + offset(type)] -- check base isArray |
| // CMP [r1 + offset(typeId)], TypeIds_Array |
| // JNE $helper |
| // TEST index, 1 -- index tagged int |
| // JEQ $helper |
| // MOV r2, index |
| // SAR r2, Js::VarTag_Shift -- remoe atom tag |
| // JS $helper -- exclude negative index |
| // MOV r4, [base + offset(head)] |
| // CMP r2, [r4 + offset(length)] -- bounds check |
| // JAE $helper |
| // MOV r3, [r4 + offset(elements)] |
| |
| // Generated here: |
| // MOV dst, [r3 + r2] |
| // TEST dst, dst |
| // JNE $fallthrough |
| |
| if(ldElem->m_opcode == Js::OpCode::LdMethodElem && indirOpnd->GetBaseOpnd()->GetValueType().IsLikelyOptimizedTypedArray()) |
| { |
| // Typed arrays don't return objects, so it's not worth generating a fast path for LdMethodElem. Calling the helper also |
| // generates a better error message. Skip the fast path and just generate a helper call. |
| return true; |
| } |
| |
| labelFallThru = ldElem->GetOrCreateContinueLabel(); |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| // If we know for sure (based on flow graph) we're loading from the arguments object, then ignore the (path-based) profile info. |
| bool isNativeArrayLoad = !ldElem->DoStackArgsOpt() && indirOpnd->GetBaseOpnd()->GetValueType().IsLikelyNativeArray(); |
| bool needMissingValueCheck = true; |
| bool emittedFastPath = false; |
| bool emitBailout = false; |
| |
| if (ldElem->DoStackArgsOpt()) |
| { |
| emittedFastPath = GenerateFastArgumentsLdElemI(ldElem, labelFallThru); |
| emitBailout = true; |
| } |
| else if (GenerateFastStringLdElem(ldElem, labelHelper, labelFallThru)) |
| { |
| emittedFastPath = true; |
| } |
| else |
| { |
| IR::LabelInstr * labelCantUseArray = labelHelper; |
| if (isNativeArrayLoad) |
| { |
| if (ldElem->GetDst()->GetType() == TyVar) |
| { |
| // Skip the fast path and just generate a helper call |
| return true; |
| } |
| |
| // Specialized native array lowering for LdElem requires that it is profiled. When not profiled, GlobOpt should not |
| // have specialized it. |
| Assert(ldElem->IsProfiledInstr()); |
| |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| labelCantUseArray = labelBailOut; |
| } |
| Js::FldInfoFlags flags = Js::FldInfo_NoInfo; |
| if (ldElem->IsProfiledInstr()) |
| { |
| flags = ldElem->AsProfiledInstr()->u.ldElemInfo->flags; |
| } |
| bool isTypedArrayElement, isStringIndex, indirOpndOverflowed = false; |
| IR::Opnd* maskOpnd = nullptr; |
| indirOpnd = |
| GenerateFastElemICommon( |
| ldElem, |
| false, |
| src1->AsIndirOpnd(), |
| labelHelper, |
| labelCantUseArray, |
| labelFallThru, |
| &isTypedArrayElement, |
| &isStringIndex, |
| &emitBailout, |
| &maskOpnd, |
| nullptr, /* pLabelSegmentLengthIncreased */ |
| true, /* checkArrayLengthOverflow */ |
| false, /* forceGenerateFastPath */ |
| false, /* returnLength */ |
| nullptr, /* bailOutLabelInstr */ |
| &indirOpndOverflowed, |
| flags); |
| |
| IR::Opnd *dst = ldElem->GetDst(); |
| IRType dstType = dst->AsRegOpnd()->GetType(); |
| |
| // The index is negative or not int. |
| if (indirOpnd == nullptr) |
| { |
| // could have bailout kind BailOutOnArrayAccessHelperCall if indirOpnd overflows |
| Assert(!(ldElem->HasBailOutInfo() && ldElem->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall) || indirOpndOverflowed); |
| |
| // don't check fast path without bailout because it might not be TypedArray |
| if (indirOpndOverflowed && ldElem->HasBailOutInfo()) |
| { |
| bool bailoutForOpndOverflow = false; |
| const IR::BailOutKind bailOutKind = ldElem->GetBailOutKind(); |
| |
| // return undefined for typed array if load dest is var, bailout otherwise |
| if ((bailOutKind & ~IR::BailOutKindBits) == IR::BailOutConventionalTypedArrayAccessOnly) |
| { |
| if (dst->IsVar()) |
| { |
| // returns undefined in case of indirOpnd overflow which is consistent with behavior of interpreter |
| IR::Opnd * undefinedOpnd = this->LoadLibraryValueOpnd(ldElem, LibraryValue::ValueUndefined); |
| InsertMove(dst, undefinedOpnd, ldElem); |
| |
| ldElem->FreeSrc1(); |
| ldElem->FreeDst(); |
| ldElem->Remove(); |
| |
| emittedFastPath = true; |
| } |
| else |
| { |
| bailoutForOpndOverflow = true; |
| } |
| } |
| |
| if (bailoutForOpndOverflow || (bailOutKind & (IR::BailOutConventionalNativeArrayAccessOnly | IR::BailOutOnArrayAccessHelperCall))) |
| { |
| IR::Opnd * constOpnd = nullptr; |
| if (dst->IsFloat()) |
| { |
| constOpnd = IR::FloatConstOpnd::New(Js::JavascriptNumber::NaN, TyFloat64, m_func); |
| } |
| else |
| { |
| constOpnd = IR::IntConstOpnd::New(0, TyInt32, this->m_func, true); |
| } |
| InsertMove(dst, constOpnd, ldElem); |
| |
| ldElem->FreeSrc1(); |
| ldElem->FreeDst(); |
| GenerateBailOut(ldElem, nullptr, nullptr); |
| emittedFastPath = true; |
| } |
| |
| return !emittedFastPath; |
| } |
| // The global optimizer should never type specialize a LdElem for which the index is not int or an integer constant |
| // with a negative value. This would force an unconditional bail out on the main code path. |
| else if (dst->IsVar()) |
| { |
| if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->m_func) && PHASE_TRACE(Js::LowererPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("Typed Array Lowering: function: %s (%s): instr %s, not specialized by glob opt due to negative or not likely int index.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| this->m_func->GetDebugNumberSet(debugStringBuffer), |
| Js::OpCodeUtil::GetOpCodeName(ldElem->m_opcode)); |
| Output::Flush(); |
| } |
| |
| // We must be dealing with some unconventional index value. Don't emit fast path, but go directly to helper. |
| emittedFastPath = false; |
| return true; |
| } |
| else |
| { |
| AssertMsg(false, "Global optimizer shouldn't have specialized this instruction."); |
| Assert(dst->IsRegOpnd()); |
| |
| // If global optimizer failed to notice the unconventional index and type specialized the dst, |
| // there is nothing to do but bail out. This could happen if global optimizer's information based |
| // on value tracking fails to recognize a non-integer index or a constant int index that is negative. |
| // The bailout below ensures that we behave correctly in retail builds even under |
| // these (unlikely) conditions. To satisfy the downstream code we must populate the type specialized operand |
| // with some made up values, even though we will unconditionally bail out here and the values will never be |
| // used. |
| IR::IntConstOpnd *constOpnd = IR::IntConstOpnd::New(0, TyInt32, this->m_func, true); |
| InsertMove(dst, constOpnd, ldElem); |
| |
| ldElem->FreeSrc1(); |
| ldElem->FreeDst(); |
| GenerateBailOut(ldElem, nullptr, nullptr); |
| return false; |
| } |
| } |
| |
| const IR::AutoReuseOpnd autoReuseIndirOpnd(indirOpnd, m_func); |
| const ValueType baseValueType(src1->AsIndirOpnd()->GetBaseOpnd()->GetValueType()); |
| |
| if ((ldElem->HasBailOutInfo() && |
| ldElem->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset && |
| ldElem->GetBailOutInfo()->bailOutOffset <= ldElem->GetByteCodeOffset() && |
| dst->IsEqual(src1->AsIndirOpnd()->GetBaseOpnd())) || |
| (src1->AsIndirOpnd()->GetIndexOpnd() && dst->IsEqual(src1->AsIndirOpnd()->GetIndexOpnd()))) |
| { |
| // This is a pre-op bailout where the dst is the same as one of the srcs. The dst may be trashed before bailing out, |
| // but since the operation will be processed again in the interpreter, src values need to be kept intact. Use a |
| // temporary dst until after the operation is complete. |
| IR::Instr *instrSink = ldElem->SinkDst(Js::OpCode::Ld_A); |
| |
| // The sink instruction needs to be on the fall-through path |
| instrSink->Unlink(); |
| labelFallThru->InsertAfter(instrSink); |
| |
| LowererMD::ChangeToAssign(instrSink); |
| dst = ldElem->GetDst(); |
| } |
| |
| if (isTypedArrayElement) |
| { |
| // For typedArrays, convert the loaded element to the appropriate type |
| IR::RegOpnd *reg; |
| IR::AutoReuseOpnd autoReuseReg; |
| |
| Assert(dst->IsRegOpnd()); |
| |
| if(indirOpnd->IsFloat()) |
| { |
| AssertMsg((dstType == TyFloat64) || (dstType == TyVar), "For Float32Array LdElemI's dst should be specialized to TyFloat64 or not at all."); |
| |
| if(indirOpnd->IsFloat32()) |
| { |
| // MOVSS reg32.f32, indirOpnd.f32 |
| IR::RegOpnd *reg32 = IR::RegOpnd::New(TyFloat32, this->m_func); |
| const IR::AutoReuseOpnd autoReuseReg32(reg32, m_func); |
| InsertMove(reg32, indirOpnd, ldElem); |
| |
| // CVTPS2PD dst/reg.f64, reg32.f64 |
| reg = dstType == TyFloat64 ? dst->AsRegOpnd() : IR::RegOpnd::New(TyFloat64, this->m_func); |
| autoReuseReg.Initialize(reg, m_func); |
| InsertConvertFloat32ToFloat64(reg, reg32, ldElem); |
| } |
| else |
| { |
| Assert(indirOpnd->IsFloat64()); |
| |
| // MOVSD dst/reg.f64, indirOpnd.f64 |
| reg = dstType == TyFloat64 ? dst->AsRegOpnd() : IR::RegOpnd::New(TyFloat64, this->m_func); |
| autoReuseReg.Initialize(reg, m_func); |
| InsertMove(reg, indirOpnd, ldElem); |
| } |
| |
| if (dstType != TyFloat64) |
| { |
| // Convert reg.f64 to var |
| m_lowererMD.SaveDoubleToVar(dst->AsRegOpnd(), reg, ldElem, ldElem); |
| } |
| |
| #if FLOATVAR |
| // For NaNs, go to the helper to guarantee we don't have an illegal NaN |
| // TODO(magardn): move this to MD code. |
| #if _M_X64 |
| // UCOMISD reg, reg |
| { |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::UCOMISD, this->m_func); |
| instr->SetSrc1(reg); |
| instr->SetSrc2(reg); |
| ldElem->InsertBefore(instr); |
| } |
| |
| // JP $helper |
| { |
| IR::Instr *const instr = IR::BranchInstr::New(Js::OpCode::JP, labelHelper, this->m_func); |
| ldElem->InsertBefore(instr); |
| } |
| #elif _M_ARM64 |
| // FCMP reg, reg |
| { |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::FCMP, this->m_func); |
| instr->SetSrc1(reg); |
| instr->SetSrc2(reg); |
| ldElem->InsertBefore(instr); |
| } |
| |
| // BVS $helper |
| { |
| IR::Instr *const instr = IR::BranchInstr::New(Js::OpCode::BVS, labelHelper, this->m_func); |
| ldElem->InsertBefore(instr); |
| } |
| #endif |
| #endif |
| |
| if(dstType == TyFloat64) |
| { |
| emitBailout = true; |
| } |
| } |
| else |
| { |
| AssertMsg((dstType == TyInt32) || (dstType == TyVar), "For Int/UintArray LdElemI's dst should be specialized to TyInt32 or not at all."); |
| |
| reg = dstType == TyInt32 ? dst->AsRegOpnd() : IR::RegOpnd::New(TyInt32, this->m_func); |
| autoReuseReg.Initialize(reg, m_func); |
| |
| // Int32 and Uint32 arrays could overflow an int31, but the others can't |
| if (indirOpnd->GetType() != TyUint32 |
| #if !INT32VAR |
| && indirOpnd->GetType() != TyInt32 |
| #endif |
| ) |
| { |
| reg->SetValueType(ValueType::GetTaggedInt()); // Fits as a tagged-int |
| } |
| |
| // MOV/MOVZX/MOVSX dst/reg.int32, IndirOpnd.type |
| IR::Instr* instrMov = InsertMove(reg, indirOpnd, ldElem); |
| if (maskOpnd) |
| { |
| #if TARGET_64 |
| if (maskOpnd->GetSize() != reg->GetType()) |
| { |
| maskOpnd = maskOpnd->UseWithNewType(reg->GetType(), m_func)->AsRegOpnd(); |
| } |
| #endif |
| instrMov = InsertAnd(reg, reg, maskOpnd, ldElem); |
| } |
| |
| if (dstType == TyInt32) |
| { |
| instrMov->dstIsTempNumber = ldElem->dstIsTempNumber; |
| instrMov->dstIsTempNumberTransferred = ldElem->dstIsTempNumberTransferred; |
| |
| if (indirOpnd->GetType() == TyUint32) |
| { |
| // TEST dst, dst |
| // JSB $helper (bailout) |
| InsertCompareBranch( |
| reg, |
| IR::IntConstOpnd::New(0, TyUint32, this->m_func, /* dontEncode = */ true), |
| Js::OpCode::BrLt_A, |
| labelHelper, |
| ldElem); |
| } |
| |
| emitBailout = true; |
| } |
| else |
| { |
| // MOV dst, reg |
| IR::Instr *const instr = IR::Instr::New(Js::OpCode::ToVar, dst, reg, this->m_func); |
| instr->dstIsTempNumber = ldElem->dstIsTempNumber; |
| instr->dstIsTempNumberTransferred = ldElem->dstIsTempNumberTransferred; |
| ldElem->InsertBefore(instr); |
| |
| // Convert dst to var |
| m_lowererMD.EmitLoadVar(instr, /* isFromUint32 = */ (indirOpnd->GetType() == TyUint32)); |
| } |
| } |
| |
| // JMP $fallthrough |
| InsertBranch(Js::OpCode::Br, labelFallThru, ldElem); |
| |
| emittedFastPath = true; |
| |
| if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->m_func) && PHASE_TRACE(Js::LowererPhase, this->m_func)) |
| { |
| char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; |
| baseValueType.ToString(baseValueTypeStr); |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("Typed Array Lowering: function: %s (%s), instr: %s, base value type: %S, %s."), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| this->m_func->GetDebugNumberSet(debugStringBuffer), |
| Js::OpCodeUtil::GetOpCodeName(ldElem->m_opcode), |
| baseValueTypeStr, |
| (!dst->IsVar() ? _u("specialized") : _u("not specialized"))); |
| Output::Print(_u("\n")); |
| Output::Flush(); |
| } |
| } |
| else |
| { |
| // MOV dst, indirOpnd |
| InsertMove(dst, indirOpnd, ldElem); |
| if (maskOpnd) |
| { |
| #if TARGET_64 |
| if (maskOpnd->GetSize() != dst->GetType()) |
| { |
| maskOpnd = maskOpnd->UseWithNewType(dst->GetType(), m_func)->AsRegOpnd(); |
| } |
| #endif |
| InsertAnd(dst, dst, maskOpnd, ldElem); |
| } |
| |
| // The string index fast path does not operate on index properties (we don't get a PropertyString in that case), so |
| // we don't need to do any further checks in that case |
| |
| // For LdMethodElem, if the loaded value is a tagged number, the error message generated by the helper call is |
| // better than if we were to just try to call the number. Also, the call arguments need to be evaluated before |
| // throwing the error, so just test whether it's an object and jump to helper if it's not. |
| const bool needObjectTest = !isStringIndex && !isNativeArrayLoad && ldElem->m_opcode == Js::OpCode::LdMethodElem; |
| needMissingValueCheck = |
| !isStringIndex && !(baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()); |
| if(needMissingValueCheck) |
| { |
| // TEST dst, dst |
| // JEQ $helper | JNE $fallthrough |
| InsertMissingItemCompareBranch( |
| dst, |
| needObjectTest ? Js::OpCode::BrEq_A : Js::OpCode::BrNeq_A, |
| needObjectTest ? labelHelper : labelFallThru, |
| ldElem); |
| |
| if (isNativeArrayLoad) |
| { |
| Assert(!needObjectTest); |
| Assert(labelHelper != labelBailOut); |
| if(ldElem->AsProfiledInstr()->u.ldElemInfo->GetElementType().HasBeenUndefined()) |
| { |
| // We're going to bail out trying to load "missing value" into a type-spec'd opnd. |
| // Branch to a point where we'll convert the array so that we don't keep bailing here. |
| // (Gappy arrays are not well-suited to nativeness.) |
| labelMissingNative = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| InsertBranch(Js::OpCode::Br, labelMissingNative, ldElem); |
| } |
| else |
| { |
| // If the value has not been profiled to be undefined at some point, jump directly to bail out |
| InsertBranch(Js::OpCode::Br, labelBailOut, ldElem); |
| } |
| } |
| } |
| if(needObjectTest) |
| { |
| // GenerateObjectTest(dst) |
| // JIsObject $fallthrough |
| m_lowererMD.GenerateObjectTest(dst, ldElem, labelFallThru, true); |
| } |
| else if(!needMissingValueCheck) |
| { |
| // JMP $fallthrough |
| InsertBranch(Js::OpCode::Br, labelFallThru, ldElem); |
| } |
| |
| emittedFastPath = true; |
| } |
| } |
| // $helper: |
| // bailout or caller generated helper call |
| // $fallthru: |
| |
| if (!emittedFastPath) |
| { |
| labelHelper->isOpHelper = false; |
| } |
| |
| ldElem->InsertBefore(labelHelper); |
| instrIsInHelperBlock = true; |
| |
| if (isNativeArrayLoad) |
| { |
| Assert(ldElem->HasBailOutInfo()); |
| Assert(labelHelper != labelBailOut); |
| |
| // Transform the original instr: |
| // |
| // $helper: |
| // dst = LdElemI_A src (BailOut) |
| // $fallthrough: |
| // |
| // to: |
| // |
| // b $fallthru <--- we get here if we loaded a valid element directly |
| // $helper: |
| // dst = LdElemI_A src |
| // cmp dst, MissingItem |
| // bne $fallthrough |
| // $bailout: |
| // BailOut |
| // $fallthrough: |
| |
| LowerOneBailOutKind(ldElem, IR::BailOutConventionalNativeArrayAccessOnly, instrIsInHelperBlock); |
| IR::Instr *const insertBeforeInstr = ldElem->m_next; |
| |
| // Do missing value check on value returned from helper so that we don't have to check the index against |
| // array length. (We already checked it above against the segment length.) |
| |
| bool hasBeenUndefined = ldElem->AsProfiledInstr()->u.ldElemInfo->GetElementType().HasBeenUndefined(); |
| if (hasBeenUndefined) |
| { |
| if(!emitBailout) |
| { |
| if (labelMissingNative == nullptr) |
| { |
| labelMissingNative = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| #if DBG |
| labelMissingNative->m_noLazyHelperAssert = true; |
| #endif |
| } |
| |
| InsertMissingItemCompareBranch(ldElem->GetDst(), Js::OpCode::BrEq_A, labelMissingNative, insertBeforeInstr); |
| } |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| if(labelMissingNative) |
| { |
| // We're going to bail out on a load from a gap, but convert the array to Var first, so we don't just |
| // bail here over and over. Gappy arrays are not well suited to nativeness. |
| // NOTE: only emit this call if the profile tells us that this has happened before ("hasBeenUndefined"). |
| // Emitting this in Navier-Stokes brutalizes the score. |
| insertBeforeInstr->InsertBefore(labelMissingNative); |
| IR::JnHelperMethod helperMethod; |
| indirOpnd = ldElem->GetSrc1()->AsIndirOpnd(); |
| if (indirOpnd->GetBaseOpnd()->GetValueType().HasIntElements()) |
| { |
| helperMethod = IR::HelperIntArr_ToVarArray; |
| } |
| else |
| { |
| Assert(indirOpnd->GetBaseOpnd()->GetValueType().HasFloatElements()); |
| helperMethod = IR::HelperFloatArr_ToVarArray; |
| } |
| m_lowererMD.LoadHelperArgument(insertBeforeInstr, indirOpnd->GetBaseOpnd()); |
| IR::Instr *instrHelper = IR::Instr::New(Js::OpCode::Call, m_func); |
| instrHelper->SetSrc1(IR::HelperCallOpnd::New(helperMethod, m_func)); |
| insertBeforeInstr->InsertBefore(instrHelper); |
| m_lowererMD.LowerCall(instrHelper, 0); |
| } |
| } |
| else |
| { |
| if(!emitBailout) |
| { |
| InsertMissingItemCompareBranch(ldElem->GetDst(), Js::OpCode::BrEq_A, labelBailOut, insertBeforeInstr); |
| } |
| |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| } |
| |
| insertBeforeInstr->InsertBefore(labelBailOut); |
| } |
| |
| if (emitBailout) |
| { |
| ldElem->UnlinkSrc1(); |
| ldElem->UnlinkDst(); |
| GenerateBailOut(ldElem, nullptr, nullptr); |
| } |
| |
| return !emitBailout; |
| } |
| |
| IR::Opnd * |
| Lowerer::GetMissingItemOpnd(IRType type, Func *func) |
| { |
| if (type == TyVar) |
| { |
| return IR::AddrOpnd::New(Js::JavascriptArray::MissingItem, IR::AddrOpndKindConstantAddress, func, true); |
| } |
| if (type == TyInt32) |
| { |
| return IR::IntConstOpnd::New(Js::JavascriptNativeIntArray::MissingItem, TyInt32, func, true); |
| } |
| AssertMsg(false, "Only expecting TyVar and TyInt32 in Lowerer::GetMissingItemOpnd"); |
| __assume(false); |
| } |
| |
| IR::Opnd* |
| Lowerer::GetMissingItemOpndForAssignment(IRType type, Func *func) |
| { |
| switch (type) |
| { |
| case TyVar: |
| case TyInt32: |
| return GetMissingItemOpnd(type, func); |
| |
| case TyFloat64: |
| return IR::MemRefOpnd::New(func->GetThreadContextInfo()->GetNativeFloatArrayMissingItemAddr(), TyFloat64, func); |
| |
| default: |
| AnalysisAssertMsg(false, "Unexpected type in Lowerer::GetMissingItemOpndForAssignment"); |
| __assume(false); |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::GetMissingItemOpndForCompare(IRType type, Func *func) |
| { |
| switch (type) |
| { |
| case TyVar: |
| case TyInt32: |
| return GetMissingItemOpnd(type, func); |
| |
| case TyFloat64: |
| #if TARGET_64 |
| return IR::MemRefOpnd::New(func->GetThreadContextInfo()->GetNativeFloatArrayMissingItemAddr(), TyUint64, func); |
| #else |
| return IR::MemRefOpnd::New(func->GetThreadContextInfo()->GetNativeFloatArrayMissingItemAddr(), TyUint32, func); |
| #endif |
| |
| default: |
| AnalysisAssertMsg(false, "Unexpected type in Lowerer::GetMissingItemOpndForCompare"); |
| __assume(false); |
| } |
| } |
| |
| bool |
| Lowerer::GenerateFastStElemI(IR::Instr *& stElem, bool *instrIsInHelperBlockRef) |
| { |
| Assert(instrIsInHelperBlockRef); |
| bool &instrIsInHelperBlock = *instrIsInHelperBlockRef; |
| instrIsInHelperBlock = false; |
| |
| IR::LabelInstr * labelHelper; |
| IR::LabelInstr * labelSegmentLengthIncreased; |
| IR::LabelInstr * labelFallThru; |
| IR::LabelInstr * labelBailOut = nullptr; |
| IR::Opnd *dst = stElem->GetDst(); |
| IR::IndirOpnd * indirOpnd = dst->AsIndirOpnd(); |
| |
| AssertMsg(dst->IsIndirOpnd(), "Expected indirOpnd on StElementI"); |
| |
| // From FastElemICommon: |
| // TEST base, AtomTag -- check base not tagged int |
| // JNE $helper |
| // MOV r1, [base + offset(type)] -- check base isArray |
| // CMP [r1 + offset(typeId)], TypeIds_Array |
| // JNE $helper |
| // TEST index, 1 -- index tagged int |
| // JEQ $helper |
| // MOV r2, index |
| // SAR r2, Js::VarTag_Shift -- remove atom tag |
| // JS $helper -- exclude negative index |
| // MOV r4, [base + offset(head)] |
| // CMP r2, [r4 + offset(length)] -- bounds check |
| // JB $done |
| // CMP r2, [r4 + offset(size)] -- chunk has room? |
| // JAE $helper |
| // LEA r5, [r2 + 1] |
| // MOV [r4 + offset(length)], r5 -- update length on chunk |
| // CMP r5, [base + offset(length)] |
| // JBE $done |
| // MOV [base + offset(length)], r5 -- update length on array |
| // $done |
| // LEA r3, [r4 + offset(elements)] |
| |
| // Generated here. |
| // MOV [r3 + r2], src |
| |
| labelFallThru = stElem->GetOrCreateContinueLabel(); |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| bool emitBailout = false; |
| |
| bool isNativeArrayStore = indirOpnd->GetBaseOpnd()->GetValueType().IsLikelyNativeArray(); |
| IR::LabelInstr * labelCantUseArray = labelHelper; |
| if (isNativeArrayStore) |
| { |
| if (stElem->GetSrc1()->GetType() != GetArrayIndirType(indirOpnd->GetBaseOpnd()->GetValueType())) |
| { |
| // Skip the fast path and just generate a helper call |
| return true; |
| } |
| |
| if(stElem->HasBailOutInfo()) |
| { |
| const IR::BailOutKind bailOutKind = stElem->GetBailOutKind(); |
| if (bailOutKind & IR::BailOutConventionalNativeArrayAccessOnly) |
| { |
| labelBailOut = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| labelCantUseArray = labelBailOut; |
| } |
| } |
| } |
| |
| Js::FldInfoFlags flags = Js::FldInfo_NoInfo; |
| if (stElem->IsProfiledInstr()) |
| { |
| flags = stElem->AsProfiledInstr()->u.stElemInfo->flags; |
| } |
| bool isTypedArrayElement, isStringIndex, indirOpndOverflowed = false; |
| IR::Opnd* maskOpnd = nullptr; |
| indirOpnd = |
| GenerateFastElemICommon( |
| stElem, |
| true, |
| indirOpnd, |
| labelHelper, |
| labelCantUseArray, |
| labelFallThru, |
| &isTypedArrayElement, |
| &isStringIndex, |
| &emitBailout, |
| &maskOpnd, |
| &labelSegmentLengthIncreased, |
| true, /* checkArrayLengthOverflow */ |
| false, /* forceGenerateFastPath */ |
| false, /* returnLength */ |
| nullptr, /* bailOutLabelInstr */ |
| &indirOpndOverflowed, |
| flags); |
| |
| IR::Opnd *src = stElem->GetSrc1(); |
| const IR::AutoReuseOpnd autoReuseSrc(src, m_func); |
| |
| // The index is negative or not int. |
| if (indirOpnd == nullptr) |
| { |
| Assert(!(stElem->HasBailOutInfo() && stElem->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall) || indirOpndOverflowed); |
| |
| if (indirOpndOverflowed && stElem->HasBailOutInfo()) |
| { |
| bool emittedFastPath = false; |
| |
| const IR::BailOutKind bailOutKind = stElem->GetBailOutKind(); |
| |
| // ignore StElemI in case of indirOpnd overflow only for typed array which is consistent with behavior of interpreter |
| if ((bailOutKind & ~IR::BailOutKindBits) == IR::BailOutConventionalTypedArrayAccessOnly) |
| { |
| stElem->FreeSrc1(); |
| stElem->FreeDst(); |
| stElem->Remove(); |
| emittedFastPath = true; |
| } |
| |
| if (!emittedFastPath && (bailOutKind & (IR::BailOutConventionalNativeArrayAccessOnly | IR::BailOutOnArrayAccessHelperCall))) |
| { |
| stElem->FreeSrc1(); |
| stElem->FreeDst(); |
| GenerateBailOut(stElem, nullptr, nullptr); |
| emittedFastPath = true; |
| } |
| |
| return !emittedFastPath; |
| } |
| // The global optimizer should never type specialize a StElem for which we know the index is not int or is a negative |
| // int constant. This would result in an unconditional bailout on the main code path. |
| else if (src->IsVar()) |
| { |
| if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->m_func) && PHASE_TRACE(Js::LowererPhase, this->m_func)) |
| { |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("Typed Array Lowering: function: %s (%s): instr %s, not specialized by glob opt due to negative or not likely int index.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| this->m_func->GetDebugNumberSet(debugStringBuffer), |
| Js::OpCodeUtil::GetOpCodeName(stElem->m_opcode)); |
| Output::Flush(); |
| } |
| // We must be dealing with some atypical index value. Don't emit fast path, but go directly to helper. |
| return true; |
| } |
| else |
| { |
| // If global optimizer failed to notice the unconventional index and type specialized the src, |
| // there is nothing to do but bail out. We should never hit this code path, unless the global optimizer's conditions |
| // for not specializing the instruction don't match the lowerer's conditions for not emitting the array checks (see above). |
| // This could happen if global optimizer's information based on value tracking fails to recognize a non-integer index or |
| // a constant int index that is negative. The bailout below ensures that we behave correctly in retail builds even under |
| // these (unlikely) conditions. |
| AssertMsg(false, "Global optimizer shouldn't have specialized this instruction."); |
| |
| stElem->FreeSrc1(); |
| stElem->FreeDst(); |
| GenerateBailOut(stElem, nullptr, nullptr); |
| return false; |
| } |
| } |
| |
| const IR::AutoReuseOpnd autoReuseIndirOpnd(indirOpnd, m_func); |
| |
| const ValueType baseValueType(dst->AsIndirOpnd()->GetBaseOpnd()->GetValueType()); |
| if (isTypedArrayElement) |
| { |
| if (PHASE_TRACE(Js::TypedArrayTypeSpecPhase, this->m_func) && PHASE_TRACE(Js::LowererPhase, this->m_func)) |
| { |
| char baseValueTypeStr[VALUE_TYPE_MAX_STRING_SIZE]; |
| baseValueType.ToString(baseValueTypeStr); |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| Output::Print(_u("Typed Array Lowering: function: %s (%s), instr: %s, base value type: %S, %s."), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| this->m_func->GetDebugNumberSet(debugStringBuffer), |
| Js::OpCodeUtil::GetOpCodeName(stElem->m_opcode), |
| baseValueTypeStr, |
| (!src->IsVar() ? _u("specialized") : _u("not specialized"))); |
| Output::Print(_u("\n")); |
| Output::Flush(); |
| } |
| |
| ObjectType objectType = baseValueType.GetObjectType(); |
| |
| if(indirOpnd->IsFloat()) |
| { |
| if (src->GetType() == TyFloat64) |
| { |
| IR::RegOpnd *const regSrc = src->AsRegOpnd(); |
| |
| if (indirOpnd->IsFloat32()) |
| { |
| // CVTSD2SS reg.f32, regSrc.f64 -- Convert regSrc from f64 to f32 |
| IR::RegOpnd *const reg = IR::RegOpnd::New(TyFloat32, this->m_func); |
| const IR::AutoReuseOpnd autoReuseReg(reg, m_func); |
| InsertConvertFloat64ToFloat32(reg, regSrc, stElem); |
| |
| // MOVSS indirOpnd, reg |
| InsertMove(indirOpnd, reg, stElem, false); |
| } |
| else |
| { |
| // MOVSD indirOpnd, regSrc |
| InsertMove(indirOpnd, regSrc, stElem, false); |
| } |
| emitBailout = true; |
| } |
| else |
| { |
| Assert(src->GetType() == TyVar); |
| |
| // MOV reg, src |
| IR::RegOpnd *const reg = IR::RegOpnd::New(TyVar, this->m_func); |
| const IR::AutoReuseOpnd autoReuseReg(reg, m_func); |
| InsertMove(reg, src, stElem); |
| |
| // Convert to float, and assign to indirOpnd |
| if (baseValueType.IsLikelyOptimizedVirtualTypedArray()) |
| { |
| IR::RegOpnd* dstReg = IR::RegOpnd::New(indirOpnd->GetType(), this->m_func); |
| m_lowererMD.EmitLoadFloat(dstReg, reg, stElem, stElem, labelHelper); |
| InsertMove(indirOpnd, dstReg, stElem); |
| } |
| else |
| { |
| m_lowererMD.EmitLoadFloat(indirOpnd, reg, stElem, stElem, labelHelper); |
| } |
| } |
| } |
| else if (objectType == ObjectType::Uint8ClampedArray || objectType == ObjectType::Uint8ClampedVirtualArray || objectType == ObjectType::Uint8ClampedMixedArray) |
| { |
| Assert(indirOpnd->GetType() == TyUint8); |
| |
| IR::RegOpnd *regSrc; |
| IR::AutoReuseOpnd autoReuseRegSrc; |
| if(src->IsRegOpnd()) |
| { |
| regSrc = src->AsRegOpnd(); |
| } |
| else |
| { |
| regSrc = IR::RegOpnd::New(StackSym::New(src->GetType(), m_func), src->GetType(), m_func); |
| autoReuseRegSrc.Initialize(regSrc, m_func); |
| |
| InsertMove(regSrc, src, stElem); |
| } |
| |
| IR::Opnd *bitMaskOpnd; |
| IRType srcType = regSrc->GetType(); |
| |
| if ((srcType == TyFloat64) || (srcType == TyInt32)) |
| { |
| // if (srcType == TyInt32) { |
| // TEST regSrc, ~255 |
| // JE $storeValue |
| // JSB $handleNegative |
| // MOV indirOpnd, 255 |
| // JMP $fallThru |
| // $handleNegative [isHelper = false] |
| // MOV indirOpnd, 0 |
| // JMP $fallThru |
| // $storeValue |
| // MOV indirOpnd, regSrc |
| // } |
| // else { |
| // MOVSD regTmp, regSrc |
| // ADDSD regTmp, 0.5 |
| // CVTTSD2SI regOpnd, regTmp |
| // TEST regOpnd, ~255 |
| // JE $storeValue |
| // $handleOutOfBounds [isHelper = true] |
| // COMISD regSrc, [&FloatZero] |
| // JB $handleNegative |
| // MOV regOpnd, 255 |
| // JMP $storeValue |
| // $handleNegative [isHelper = true] |
| // MOV regOpnd, 0 |
| // $storeValue |
| // MOV indirOpnd, regOpnd |
| // } |
| // $fallThru |
| |
| IR::RegOpnd *regOpnd; |
| IR::AutoReuseOpnd autoReuseRegOpnd; |
| if (srcType == TyInt32) |
| { |
| // When srcType == TyInt32 we will never call the helper and we will never |
| // modify the regOpnd. Therefore, it's okay to use regSrc directly, and it |
| // reduces register pressure. |
| regOpnd = regSrc; |
| } |
| else |
| { |
| #ifdef _M_IX86 |
| AssertMsg(AutoSystemInfo::Data.SSE2Available(), "GlobOpt shouldn't have specialized Uint8ClampedArray StElem to float64 if SSE2 is unavailable."); |
| #endif |
| |
| regOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| autoReuseRegOpnd.Initialize(regOpnd, m_func); |
| |
| Assert(objectType == ObjectType::Uint8ClampedArray || objectType == ObjectType::Uint8ClampedVirtualArray || objectType == ObjectType::Uint8ClampedMixedArray); |
| |
| // Uint8ClampedArray follows IEEE 754 rounding rules for ties which round up |
| // odd integers and round down even integers. Both ties result in the nearest |
| // even integer value. |
| // |
| // CVTSD2SI regOpnd, regSrc |
| LowererMD::InsertConvertFloat64ToInt32(RoundModeHalfToEven, regOpnd, regSrc, stElem); |
| } |
| |
| IR::LabelInstr *labelStoreValue = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| #ifndef _M_ARM |
| // TEST regOpnd, ~255 |
| // JE $storeValue |
| bitMaskOpnd = IR::IntConstOpnd::New(~255, TyInt32, this->m_func, true); |
| InsertTestBranch(regOpnd, bitMaskOpnd, Js::OpCode::BrEq_A, labelStoreValue, stElem); |
| #else // ARM |
| // Special case for ARM, a shift may be better |
| // |
| // ASRS tempReg, src, 8 |
| // BEQ $inlineSet |
| InsertShiftBranch( |
| Js::OpCode::Shr_A, |
| IR::RegOpnd::New(TyInt32, this->m_func), |
| regOpnd, |
| IR::IntConstOpnd::New(8, TyInt8, this->m_func), |
| Js::OpCode::BrEq_A, |
| labelStoreValue, |
| stElem); |
| #endif |
| |
| IR::LabelInstr *labelHandleNegative = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, srcType == TyFloat64); |
| |
| if (srcType == TyInt32) |
| { |
| // JSB $handleNegativeOrOverflow |
| InsertBranch( |
| LowererMD::MDCompareWithZeroBranchOpcode(Js::OpCode::BrLt_A), |
| labelHandleNegative, |
| stElem); |
| |
| // MOV IndirOpnd.u8, 255 |
| InsertMove(indirOpnd, IR::IntConstOpnd::New(255, TyUint8, this->m_func, true), stElem); |
| |
| // JMP $fallThru |
| InsertBranch(Js::OpCode::Br, labelFallThru, stElem); |
| |
| // $handleNegative [isHelper = false] |
| stElem->InsertBefore(labelHandleNegative); |
| |
| // MOV IndirOpnd.u8, 0 |
| InsertMove(indirOpnd, IR::IntConstOpnd::New(0, TyUint8, this->m_func, true), stElem); |
| |
| // JMP $fallThru |
| InsertBranch(Js::OpCode::Br, labelFallThru, stElem); |
| } |
| else |
| { |
| Assert(regOpnd != regSrc); |
| |
| // This label is just to ensure the following code is moved to the helper block. |
| // $handleOutOfBounds [isHelper = true] |
| IR::LabelInstr *labelHandleOutOfBounds = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| stElem->InsertBefore(labelHandleOutOfBounds); |
| |
| // COMISD regSrc, FloatZero |
| // JB labelHandleNegative |
| IR::MemRefOpnd * zeroOpnd = IR::MemRefOpnd::New(this->m_func->GetThreadContextInfo()->GetDoubleZeroAddr(), TyMachDouble, this->m_func); |
| InsertCompareBranch(regSrc, zeroOpnd, Js::OpCode::BrNotGe_A, labelHandleNegative, stElem); |
| |
| // MOV regOpnd, 255 |
| InsertMove(regOpnd, IR::IntConstOpnd::New(255, TyUint8, this->m_func, true), stElem); |
| |
| // JMP $storeValue |
| InsertBranch(Js::OpCode::Br, labelStoreValue, stElem); |
| |
| // $handleNegative [isHelper = true] |
| stElem->InsertBefore(labelHandleNegative); |
| |
| // MOV regOpnd, 0 |
| InsertMove(regOpnd, IR::IntConstOpnd::New(0, TyUint8, this->m_func, true), stElem); |
| } |
| |
| // $storeValue |
| stElem->InsertBefore(labelStoreValue); |
| |
| // MOV IndirOpnd.u8, regOpnd.u8 |
| InsertMove(indirOpnd, regOpnd, stElem); |
| |
| emitBailout = true; |
| } |
| else |
| { |
| Assert(srcType == TyVar); |
| |
| #if INT32VAR |
| bitMaskOpnd = IR::AddrOpnd::New((Js::Var)~(INT_PTR)(Js::TaggedInt::ToVarUnchecked(255)), IR::AddrOpndKindConstantVar, this->m_func, true); |
| #else |
| bitMaskOpnd = IR::IntConstOpnd::New(~(INT_PTR)(Js::TaggedInt::ToVarUnchecked(255)), TyMachReg, this->m_func, true); |
| #endif |
| // Note: We are assuming that if no bits other than ~(TaggedInt(255)) are 1, that we have a tagged |
| // int value between 0 - 255. |
| // #if INT32VAR |
| // This works for pointers because tagged int bit can't be on, and first 64k are not valid addresses |
| // This works for floats because a valid float would have one of the upper 13 bits on. |
| // #else |
| // Any pointer is larger than 512 because first 64k memory is reserved by the OS |
| // #endif |
| |
| IR::LabelInstr *labelInlineSet = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| #ifndef _M_ARM |
| // TEST src, ~(TaggedInt(255)) -- Check for tagged int >= 255 and <= 0 |
| // JEQ $inlineSet |
| InsertTestBranch(regSrc, bitMaskOpnd, Js::OpCode::BrEq_A, labelInlineSet, stElem); |
| #else // ARM |
| // Special case for ARM, a shift may be better |
| // |
| // ASRS tempReg, src, 8 |
| // BEQ $inlineSet |
| InsertShiftBranch( |
| Js::OpCode::Shr_A, |
| IR::RegOpnd::New(TyInt32, this->m_func), |
| regSrc, |
| IR::IntConstOpnd::New(8, TyInt8, this->m_func), |
| Js::OpCode::BrEq_A, |
| labelInlineSet, |
| stElem); |
| #endif |
| |
| // Uint8ClampedArray::DirectSetItem(array, index, value); |
| |
| // Inserting a helper call. Make sure it observes the main instructions's requirements regarding implicit calls. |
| if (!instrIsInHelperBlock) |
| { |
| stElem->InsertBefore(IR::LabelInstr::New(Js::OpCode::Label, m_func, true)); |
| } |
| |
| if (stElem->HasBailOutInfo() && (stElem->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall)) |
| { |
| // Bail out instead of doing the helper call. |
| Assert(labelHelper); |
| this->InsertBranch(Js::OpCode::Br, labelHelper, stElem); |
| } |
| else |
| { |
| IR::Instr *instr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| stElem->InsertBefore(instr); |
| |
| if (stElem->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(stElem->GetBailOutKind())) |
| { |
| // Bail out if this helper triggers implicit calls. |
| instr = this->AddBailoutToHelperCallInstr(instr, stElem->GetBailOutInfo(), stElem->GetBailOutKind(), stElem); |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, regSrc); |
| IR::Opnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| if (indexOpnd == nullptr) |
| { |
| if (indirOpnd->GetOffset() == 0) |
| { |
| // There are two ways that we can get an indirOpnd with no index and 0 offset. |
| // The first is that we're storing to element 0 in the array by constant offset. |
| // The second is that we got a pointer back that has spectre masking, so it's going |
| // to not have the appropriate index into the array. In that case, we need to regen |
| // the index. |
| // The plan is |
| // 1. get the backing buffer pointer |
| // 2. subtract that from the indexOpnd to get the numeric index |
| // This is unfortunately slightly worse perf for constant writes of vars to index 0 |
| // of Uint8ClampedArrays, but that's hopefully uncommon enough that the impact will |
| // be minimal |
| |
| // MOV backingBufferOpnd, [base + offset(arrayBuffer)] |
| // SUB indexOpnd, backingBufferOpnd |
| int bufferOffset = GetArrayOffsetOfHeadSegment(baseValueType); |
| IR::IndirOpnd* arrayBufferOpnd = IR::IndirOpnd::New(stElem->GetDst()->AsIndirOpnd()->GetBaseOpnd(), bufferOffset, TyMachPtr, this->m_func); |
| IR::RegOpnd* backingBufferOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertMove(backingBufferOpnd, arrayBufferOpnd, instr); |
| IR::RegOpnd* tempIndexOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| InsertSub(false, tempIndexOpnd, indirOpnd->GetBaseOpnd(), backingBufferOpnd, instr); |
| indexOpnd = tempIndexOpnd->UseWithNewType(TyInt32, this->m_func); |
| } |
| else |
| { |
| indexOpnd = IR::IntConstOpnd::New(indirOpnd->GetOffset(), TyInt32, this->m_func); |
| } |
| } |
| else |
| { |
| Assert(indirOpnd->GetOffset() == 0); |
| } |
| m_lowererMD.LoadHelperArgument(instr, indexOpnd); |
| m_lowererMD.LoadHelperArgument(instr, stElem->GetDst()->AsIndirOpnd()->GetBaseOpnd()); |
| |
| Assert(objectType == ObjectType::Uint8ClampedArray || objectType == ObjectType::Uint8ClampedMixedArray || objectType == ObjectType::Uint8ClampedVirtualArray); |
| m_lowererMD.ChangeToHelperCall(instr, IR::JnHelperMethod::HelperUint8ClampedArraySetItem); |
| |
| // JMP $fallThrough |
| InsertBranch(Js::OpCode::Br, labelFallThru, stElem); |
| } |
| |
| //$inlineSet |
| stElem->InsertBefore(labelInlineSet); |
| |
| IR::RegOpnd *regOpnd; |
| IR::AutoReuseOpnd autoReuseRegOpnd; |
| #if INT32VAR |
| regOpnd = regSrc; |
| #else |
| // MOV r1, src |
| // SAR r1, 1 |
| regOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| autoReuseRegOpnd.Initialize(regOpnd, m_func); |
| InsertShift( |
| Js::OpCode::Shr_A, |
| false /* needFlags */, |
| regOpnd, |
| regSrc, |
| IR::IntConstOpnd::New(1, TyInt8, this->m_func), |
| stElem); |
| #endif |
| |
| // MOV IndirOpnd.u8, reg.u8 |
| InsertMove(indirOpnd, regOpnd, stElem); |
| } |
| } |
| else |
| { |
| if (src->IsInt32()) |
| { |
| // MOV indirOpnd, src |
| InsertMove(indirOpnd, src, stElem); |
| |
| emitBailout = true; |
| } |
| else if (src->IsFloat64()) |
| { |
| AssertMsg(indirOpnd->GetType() == TyUint32, "Only StElemI to Uint32Array could be specialized to float64."); |
| #ifdef _M_IX86 |
| AssertMsg(AutoSystemInfo::Data.SSE2Available(), "GloOpt shouldn't have specialized Uint32Array StElemI to float64 if SSE2 is unavailable."); |
| #endif |
| |
| bool bailOutOnHelperCall = stElem->HasBailOutInfo() ? !!(stElem->GetBailOutKind() & IR::BailOutOnArrayAccessHelperCall) : false; |
| if (bailOutOnHelperCall) |
| { |
| if(!GlobOpt::DoEliminateArrayAccessHelperCall(this->m_func)) |
| { |
| // Array access helper call removal is already off for some reason. Prevent trying to rejit again |
| // because it won't help and the same thing will happen again. Just abort jitting this function. |
| if(PHASE_TRACE(Js::BailOutPhase, this->m_func)) |
| { |
| Output::Print(_u(" Aborting JIT because EliminateArrayAccessHelperCall is already off\n")); |
| Output::Flush(); |
| } |
| throw Js::OperationAbortedException(); |
| } |
| |
| throw Js::RejitException(RejitReason::ArrayAccessHelperCallEliminationDisabled); |
| } |
| |
| IR::RegOpnd *const reg = IR::RegOpnd::New(TyInt32, this->m_func); |
| const IR::AutoReuseOpnd autoReuseReg(reg, m_func); |
| m_lowererMD.EmitFloatToInt(reg, src, stElem, stElem, labelHelper); |
| |
| // MOV indirOpnd, reg |
| InsertMove(indirOpnd, reg, stElem); |
| |
| emitBailout = true; |
| } |
| else |
| { |
| Assert(src->IsVar()); |
| |
| if(src->IsAddrOpnd()) |
| { |
| IR::AddrOpnd *const addrSrc = src->AsAddrOpnd(); |
| Assert(addrSrc->IsVar()); |
| Assert(Js::TaggedInt::Is(addrSrc->m_address)); |
| |
| // MOV indirOpnd, intValue |
| InsertMove( |
| indirOpnd, |
| IR::IntConstOpnd::New(Js::TaggedInt::ToInt32(addrSrc->m_address), TyInt32, m_func), |
| stElem); |
| } |
| else |
| { |
| IR::RegOpnd *const regSrc = src->AsRegOpnd(); |
| |
| // FromVar reg, Src |
| IR::RegOpnd *const reg = IR::RegOpnd::New(TyInt32, this->m_func); |
| const IR::AutoReuseOpnd autoReuseReg(reg, m_func); |
| IR::Instr * instr = IR::Instr::New(Js::OpCode::FromVar, reg, regSrc, stElem->m_func); |
| stElem->InsertBefore(instr); |
| |
| // Convert reg to int32 |
| // Note: ToUint32 is implemented as (uint32)ToInt32() |
| IR::BailOutKind bailOutKind = stElem->HasBailOutInfo() ? stElem->GetBailOutKind() : IR::BailOutInvalid; |
| if (BailOutInfo::IsBailOutOnImplicitCalls(bailOutKind)) |
| { |
| instr = this->AddBailoutToHelperCallInstr(instr, stElem->GetBailOutInfo(), bailOutKind, stElem); |
| } |
| |
| bool bailOutOnHelperCall = !!(bailOutKind & IR::BailOutOnArrayAccessHelperCall); |
| m_lowererMD.EmitLoadInt32(instr, true /*conversionFromObjectAllowed*/, bailOutOnHelperCall, labelHelper); |
| |
| // MOV indirOpnd, reg |
| InsertMove(indirOpnd, reg, stElem); |
| } |
| } |
| } |
| } |
| else |
| { |
| if(labelSegmentLengthIncreased) |
| { |
| IR::Instr *const insertBeforeInstr = labelSegmentLengthIncreased->m_next; |
| |
| // We might be changing the array to have missing values here, or we might be |
| // changing it to extend it; in either case, we're not going to make it _not_ |
| // have missing values after this operation, so just write and fallthrough. |
| // labelSegmentLengthIncreased: |
| // mov [segment + index], src |
| // jmp $fallThru |
| InsertMove(indirOpnd, src, insertBeforeInstr); |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| } |
| |
| if (!(isStringIndex || (baseValueType.IsArrayOrObjectWithArray() && baseValueType.HasNoMissingValues()))) |
| { |
| if(!stElem->IsProfiledInstr() || stElem->AsProfiledInstr()->u.stElemInfo->LikelyFillsMissingValue()) |
| { |
| // Check whether the store is filling a missing value. If so, fall back to the helper so that it can check whether |
| // this store is filling the last missing value in the array. This is necessary to keep the missing value tracking |
| // in arrays precise. The check is omitted when profile data says that the store is likely to create missing values. |
| // |
| // cmp [segment + index], Js::SparseArraySegment::MissingValue |
| // je $helper |
| InsertMissingItemCompareBranch( |
| indirOpnd, |
| Js::OpCode::BrEq_A, |
| labelHelper, |
| stElem); |
| } |
| else |
| { |
| GenerateIsEnabledArraySetElementFastPathCheck(labelHelper, stElem); |
| } |
| } |
| |
| // MOV [r3 + r2], src |
| InsertMoveWithBarrier(indirOpnd, src, stElem); |
| } |
| |
| // JMP $fallThru |
| InsertBranch(Js::OpCode::Br, labelFallThru, stElem); |
| |
| // $helper: |
| // bailout or caller generated helper call |
| // $fallThru: |
| |
| stElem->InsertBefore(labelHelper); |
| instrIsInHelperBlock = true; |
| |
| if (isNativeArrayStore && !isStringIndex) |
| { |
| Assert(stElem->HasBailOutInfo()); |
| Assert(labelHelper != labelBailOut); |
| |
| // Transform the original instr: |
| // |
| // $helper: |
| // dst = LdElemI_A src (BailOut) |
| // $fallthrough: |
| // |
| // to: |
| // |
| // $helper: |
| // dst = LdElemI_A src |
| // b $fallthrough |
| // $bailout: |
| // BailOut |
| // $fallthrough: |
| |
| LowerOneBailOutKind(stElem, IR::BailOutConventionalNativeArrayAccessOnly, instrIsInHelperBlock); |
| IR::Instr *const insertBeforeInstr = stElem->m_next; |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(labelBailOut); |
| } |
| |
| if (emitBailout) |
| { |
| stElem->FreeSrc1(); |
| stElem->FreeDst(); |
| GenerateBailOut(stElem, nullptr, nullptr); |
| } |
| |
| return !emitBailout; |
| } |
| |
| bool |
| Lowerer::GenerateFastLdLen(IR::Instr *ldLen, bool *instrIsInHelperBlockRef) |
| { |
| Assert(instrIsInHelperBlockRef); |
| bool &instrIsInHelperBlock = *instrIsInHelperBlockRef; |
| instrIsInHelperBlock = false; |
| |
| // TEST src, AtomTag -- check src not tagged int |
| // JNE $helper |
| // CMP [src], JavascriptArray::`vtable' -- check base isArray |
| // JNE $string |
| // MOV length, [src + offset(length)] -- Load array length |
| // JMP $tovar |
| // $string: |
| // CMP [src + offset(type)], static_string_type -- check src isString |
| // JNE $helper |
| // MOV length, [src + offset(length)] -- Load string length |
| // $toVar: |
| // TEST length, 0xC0000000 -- test for overflow of SHL, or negative |
| // JNE $helper |
| // SHL length, Js::VarTag_Shift -- restore the var tag on the result |
| // INC length |
| // MOV dst, length |
| // JMP $fallthru |
| // $helper: |
| // CALL GetProperty(src, length_property_id, scriptContext) |
| // $fallthru: |
| |
| IR::Opnd * opnd = ldLen->GetSrc1(); |
| IR::RegOpnd * dst = ldLen->GetDst()->AsRegOpnd(); |
| const ValueType srcValueType(opnd->GetValueType()); |
| |
| IR::LabelInstr *const labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if (ldLen->DoStackArgsOpt()) |
| { |
| GenerateFastArgumentsLdLen(ldLen, ldLen->GetOrCreateContinueLabel()); |
| ldLen->Remove(); |
| return false; |
| } |
| else |
| { |
| const bool arrayFastPath = ShouldGenerateArrayFastPath(opnd, false, true, false); |
| |
| // HasBeenString instead of IsLikelyString because it could be a merge between StringObject and String, and this |
| // information about whether it's a StringObject or some other object is not available in the profile data |
| const bool stringFastPath = srcValueType.IsUninitialized() || srcValueType.HasBeenString(); |
| |
| if(!(arrayFastPath || stringFastPath)) |
| { |
| return true; |
| } |
| |
| IR::RegOpnd * src; |
| if (opnd->IsRegOpnd()) |
| { |
| src = opnd->AsRegOpnd(); |
| } |
| else |
| { |
| // LdLen has a PropertySymOpnd until globopt where the decision whether to convert it to LdFld is made. If globopt is skipped, the opnd will |
| // still be a PropertySymOpnd here. In that case, do the conversion here. |
| IR::SymOpnd * symOpnd = opnd->AsSymOpnd(); |
| PropertySym * propertySym = symOpnd->m_sym->AsPropertySym(); |
| src = IR::RegOpnd::New(propertySym->m_stackSym, IRType::TyVar, this->m_func); |
| ldLen->ReplaceSrc1(src); |
| opnd = src; |
| } |
| |
| const int32 arrayOffsetOfLength = |
| srcValueType.IsLikelyAnyOptimizedArray() |
| ? GetArrayOffsetOfLength(srcValueType) |
| : Js::JavascriptArray::GetOffsetOfLength(); |
| IR::LabelInstr *labelString = nullptr; |
| IR::RegOpnd *arrayOpnd = src; |
| IR::RegOpnd *arrayLengthOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseArrayLengthOpnd; |
| if(arrayFastPath) |
| { |
| if(!srcValueType.IsAnyOptimizedArray()) |
| { |
| if(stringFastPath) |
| { |
| // If we don't have info about the src value type or its object type, the array and string fast paths are |
| // generated |
| labelString = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| } |
| |
| arrayOpnd = GenerateArrayTest(src, labelHelper, stringFastPath ? labelString : labelHelper, ldLen, false); |
| } |
| else if(src->IsArrayRegOpnd()) |
| { |
| IR::ArrayRegOpnd *const arrayRegOpnd = src->AsArrayRegOpnd(); |
| if(arrayRegOpnd->LengthSym()) |
| { |
| arrayLengthOpnd = IR::RegOpnd::New(arrayRegOpnd->LengthSym(), TyUint32, m_func); |
| DebugOnly(arrayLengthOpnd->FreezeSymValue()); |
| autoReuseArrayLengthOpnd.Initialize(arrayLengthOpnd, m_func); |
| } |
| } |
| } |
| const IR::AutoReuseOpnd autoReuseArrayOpnd(arrayOpnd, m_func); |
| |
| IR::RegOpnd *lengthOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseLengthOpnd; |
| const auto EnsureLengthOpnd = [&]() |
| { |
| if(lengthOpnd) |
| { |
| return; |
| } |
| |
| lengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| autoReuseLengthOpnd.Initialize(lengthOpnd, m_func); |
| }; |
| |
| if(arrayFastPath) |
| { |
| if(arrayLengthOpnd) |
| { |
| lengthOpnd = arrayLengthOpnd; |
| autoReuseLengthOpnd.Initialize(lengthOpnd, m_func); |
| Assert(!stringFastPath); |
| } |
| else |
| { |
| // MOV length, [array + offset(length)] -- Load array length |
| EnsureLengthOpnd(); |
| IR::IndirOpnd *const indirOpnd = IR::IndirOpnd::New(arrayOpnd, arrayOffsetOfLength, TyUint32, this->m_func); |
| InsertMove(lengthOpnd, indirOpnd, ldLen); |
| } |
| } |
| |
| if(stringFastPath) |
| { |
| IR::LabelInstr *labelToVar = nullptr; |
| if(arrayFastPath) |
| { |
| // JMP $tovar |
| labelToVar = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| InsertBranch(Js::OpCode::Br, labelToVar, ldLen); |
| |
| // $string: |
| ldLen->InsertBefore(labelString); |
| } |
| |
| // CMP [src + offset(type)], static_stringtype -- check src isString |
| // JNE $helper |
| GenerateStringTest(src, ldLen, labelHelper, nullptr, !arrayFastPath); |
| |
| // MOV length, [src + offset(length)] -- Load string length |
| EnsureLengthOpnd(); |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(src, offsetof(Js::JavascriptString, m_charLength), TyUint32, this->m_func); |
| InsertMove(lengthOpnd, indirOpnd, ldLen); |
| |
| if(arrayFastPath) |
| { |
| // $toVar: |
| ldLen->InsertBefore(labelToVar); |
| } |
| } |
| |
| Assert(lengthOpnd); |
| |
| if(ldLen->HasBailOutInfo() && (ldLen->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutOnIrregularLength) |
| { |
| Assert(ldLen->GetBailOutKind() == IR::BailOutOnIrregularLength || ldLen->HasLazyBailOut()); |
| Assert(dst->IsInt32()); |
| |
| // Since the length is an unsigned int32, verify that when interpreted as a signed int32, it is not negative |
| // test length, length |
| // js $helper |
| // mov dst, length |
| // jmp $fallthrough |
| InsertCompareBranch( |
| lengthOpnd, |
| IR::IntConstOpnd::New(0, lengthOpnd->GetType(), m_func, true), |
| Js::OpCode::BrLt_A, |
| labelHelper, |
| ldLen); |
| InsertMove(dst, lengthOpnd, ldLen); |
| InsertBranch(Js::OpCode::Br, ldLen->GetOrCreateContinueLabel(), ldLen); |
| |
| // $helper: |
| // (Bail out with IR::BailOutOnIrregularLength) |
| ldLen->InsertBefore(labelHelper); |
| instrIsInHelperBlock = true; |
| ldLen->FreeDst(); |
| ldLen->FreeSrc1(); |
| GenerateBailOut(ldLen); |
| return false; |
| } |
| |
| #if INT32VAR |
| // Since the length is an unsigned int32, verify that when interpreted as a signed int32, it is not negative |
| // test length, length |
| // js $helper |
| InsertCompareBranch( |
| lengthOpnd, |
| IR::IntConstOpnd::New(0, lengthOpnd->GetType(), m_func, true), |
| Js::OpCode::BrLt_A, |
| labelHelper, |
| ldLen); |
| #else |
| // Since the length is an unsigned int32, verify that when interpreted as a signed int32, it is not negative. |
| // Additionally, verify that the signed value's width is not greater than 31 bits, since it needs to be tagged. |
| // test length, 0xC0000000 |
| // jne $helper |
| InsertTestBranch( |
| lengthOpnd, |
| IR::IntConstOpnd::New(0xC0000000, TyUint32, this->m_func, true), |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| ldLen); |
| #endif |
| |
| #if INT32VAR |
| // |
| // dst_32 = MOV length |
| // dst_64 = OR dst_64, Js::AtomTag_IntPtr |
| // |
| Assert(dst->GetType() == TyVar); |
| |
| IR::Opnd *dst32 = dst->Copy(this->m_func); |
| dst32->SetType(TyInt32); |
| |
| // This will clear the top bits. |
| InsertMove(dst32, lengthOpnd, ldLen); |
| |
| m_lowererMD.GenerateInt32ToVarConversion(dst, ldLen); |
| #else |
| // dst = SHL length, Js::VarTag_Shift -- restore the var tag on the result |
| InsertShift( |
| Js::OpCode::Shl_A, |
| false /* needFlags */, |
| dst, |
| lengthOpnd, |
| IR::IntConstOpnd::New(Js::VarTag_Shift, TyInt8, this->m_func), |
| ldLen); |
| |
| // dst = ADD dst, AtomTag |
| InsertAdd( |
| false /* needFlags */, |
| dst, |
| dst, |
| IR::IntConstOpnd::New(Js::AtomTag_Int32, TyUint32, m_func, true), |
| ldLen); |
| #endif |
| |
| // JMP $fallthrough |
| InsertBranch(Js::OpCode::Br, ldLen->GetOrCreateContinueLabel(), ldLen); |
| } |
| |
| // $helper: |
| // (caller generates helper call) |
| ldLen->InsertBefore(labelHelper); |
| instrIsInHelperBlock = true; |
| |
| return true; // fast path was generated, helper call will be in a helper block |
| } |
| |
| void |
| Lowerer::GenerateFastInlineStringCodePointAt(IR::Instr* lastInstr, Func* func, IR::Opnd *strLength, IR::Opnd *srcIndex, IR::RegOpnd *lowerChar, IR::RegOpnd *strPtr) |
| { |
| //// Required State: |
| // strLength - UInt32 |
| // srcIndex - TyVar if not Address |
| // lowerChar - TyMachReg |
| // strPtr - Addr |
| //// Instructions |
| // CMP [strLength], srcIndex + 1 |
| // JBE charCodeAt |
| // CMP lowerChar 0xDC00 |
| // JGE charCodeAt |
| // CMP lowerChar 0xD7FF |
| // JLE charCodeAt |
| // upperChar = MOVZX [strPtr + srcIndex + 1] |
| // CMP upperChar 0xE000 |
| // JGE charCodeAt |
| // CMP lowerChar 0xDBFF |
| // JLE charCodeAt |
| // lowerChar = SUB lowerChar - 0xD800 |
| // lowerChar = SHL lowerChar, 10 |
| // lowerChar = ADD lowerChar + upperChar |
| // lowerChar = ADD lowerChar + 0x2400 |
| // :charCodeAt |
| // :done |
| |
| // Asserts |
| // Arm should change to Uint32 for the strLength |
| Assert(strLength->GetType() == TyUint32 || strLength->GetType() == TyMachReg); |
| Assert(srcIndex->GetType() == TyVar || srcIndex->IsAddrOpnd()); |
| Assert(lowerChar->GetType() == TyMachReg || lowerChar->GetType() == TyUint32); |
| Assert(strPtr->IsRegOpnd()); |
| |
| IR::RegOpnd *tempReg = IR::RegOpnd::New(TyMachReg, func); |
| IR::LabelInstr *labelCharCodeAt = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::IndirOpnd *tempIndirOpnd; |
| |
| if (srcIndex->IsAddrOpnd()) |
| { |
| uint32 length = Js::TaggedInt::ToUInt32(srcIndex->AsAddrOpnd()->m_address) + 1U; |
| InsertCompareBranch(strLength, IR::IntConstOpnd::New(length, TyUint32, func), Js::OpCode::BrLe_A, true, labelCharCodeAt, lastInstr); |
| tempIndirOpnd = IR::IndirOpnd::New(strPtr, (length) * sizeof(char16), TyUint16, func); |
| } |
| else |
| { |
| InsertMove(tempReg, srcIndex, lastInstr); |
| |
| #if INT32VAR |
| IR::Opnd * reg32Bit = tempReg->UseWithNewType(TyInt32, func); |
| InsertMove(tempReg, reg32Bit, lastInstr); |
| tempReg = reg32Bit->AsRegOpnd(); |
| #else |
| InsertShift(Js::OpCode::Shr_A, false, tempReg, tempReg, IR::IntConstOpnd::New(Js::VarTag_Shift, TyInt8, func), lastInstr); |
| #endif |
| InsertAdd(false, tempReg, tempReg, IR::IntConstOpnd::New(1, TyInt32, func), lastInstr); |
| |
| InsertCompareBranch(strLength, tempReg, Js::OpCode::BrLe_A, true, labelCharCodeAt, lastInstr); |
| |
| if(tempReg->GetSize() != MachPtr) |
| { |
| tempReg = tempReg->UseWithNewType(TyMachPtr, func)->AsRegOpnd(); |
| } |
| |
| tempIndirOpnd = IR::IndirOpnd::New(strPtr, tempReg, 1, TyUint16, func); |
| } |
| |
| // By this point, we have added instructions before labelCharCodeAt to check for extra length required for the surrogate pair |
| // The branching for that is already handled, all we have to do now is to check for correct values. |
| // Validate char is in range [D800, DBFF]; otherwise just get a charCodeAt |
| InsertCompareBranch(lowerChar, IR::IntConstOpnd::New(0xDC00, TyUint32, func), Js::OpCode::BrGe_A, labelCharCodeAt, lastInstr); |
| InsertCompareBranch(lowerChar, IR::IntConstOpnd::New(0xD7FF, TyUint32, func), Js::OpCode::BrLe_A, labelCharCodeAt, lastInstr); |
| |
| // upperChar = MOVZX r3, [r1 + r3 * 2] -- this is the value of the upper surrogate pair char |
| IR::RegOpnd *upperChar = IR::RegOpnd::New(TyInt32, func); |
| InsertMove(upperChar, tempIndirOpnd, lastInstr); |
| |
| // Validate upper is in range [DC00, DFFF]; otherwise just get a charCodeAt |
| InsertCompareBranch(upperChar, IR::IntConstOpnd::New(0xE000, TyUint32, func), Js::OpCode::BrGe_A, labelCharCodeAt, lastInstr); |
| InsertCompareBranch(upperChar, IR::IntConstOpnd::New(0xDBFF, TyUint32, func), Js::OpCode::BrLe_A, labelCharCodeAt, lastInstr); |
| |
| // (lower - 0xD800) << 10 + second - 0xDC00 + 0x10000 -- 0x10000 - 0xDC00 = 0x2400 |
| // lowerChar = SUB lowerChar - 0xD800 |
| // lowerChar = SHL lowerChar, 10 |
| // lowerChar = ADD lowerChar + upperChar |
| // lowerChar = ADD lowerChar + 0x2400 |
| InsertSub(false, lowerChar, lowerChar, IR::IntConstOpnd::New(0xD800, TyUint32, func), lastInstr); |
| InsertShift(Js::OpCode::Shl_A, false, lowerChar, lowerChar, IR::IntConstOpnd::New(10, TyUint32, func), lastInstr); |
| InsertAdd(false, lowerChar, lowerChar, upperChar, lastInstr); |
| InsertAdd(false, lowerChar, lowerChar, IR::IntConstOpnd::New(0x2400, TyUint32, func), lastInstr); |
| |
| lastInstr->InsertBefore(labelCharCodeAt); |
| } |
| |
| bool |
| Lowerer::GenerateFastInlineStringFromCodePoint(IR::Instr* instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| // ArgOut sequence |
| // s8.var = StartCall 2 (0x2).i32 #000c |
| // arg1(s9)<0>.var = ArgOut_A s2.var, s8.var #0014 //Implicit this, String object |
| // arg2(s10)<4>.var = ArgOut_A s3.var, arg1(s9)<0>.var #0018 //First argument to FromCharCode |
| // arg1(s11)<0>.u32 = ArgOut_A_InlineSpecialized 0x012C26C0 (DynamicObject).var, arg2(s10)<4>.var # |
| // s0[LikelyTaggedInt].var = CallDirect String_FromCodePoint.u32, arg1(s11)<0>.u32 #001c |
| |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| IR::Instr * tmpInstr = Inline::GetDefInstr(linkOpnd);// linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| linkOpnd = tmpInstr->GetSrc2(); |
| |
| #if DBG |
| IntConstType argCount = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->GetArgSlotNum(); |
| Assert(argCount == 2); |
| #endif |
| |
| IR::Instr *argInstr = Inline::GetDefInstr(linkOpnd); |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A); |
| IR::Opnd *src1 = argInstr->GetSrc1(); |
| |
| if (src1->GetValueType().IsLikelyInt()) |
| { |
| //Trying to generate this code |
| // MOV resultOpnd, dst |
| // MOV fromCharCodeIntArgOpnd, src1 |
| // SAR fromCharCodeIntArgOpnd, Js::VarTag_Shift |
| // JAE $Helper |
| // CMP fromCharCodeIntArgOpnd, Js::ScriptContext::CharStringCacheSize |
| // |
| // JAE $labelWCharStringCheck < |
| // MOV resultOpnd, GetCharStringCache[fromCharCodeIntArgOpnd] |
| // TST resultOpnd, resultOpnd //Check for null |
| // JEQ $helper |
| // JMP $Done |
| // |
| //$labelWCharStringCheck: |
| // resultOpnd = Call HelperGetStringForCharW |
| // JMP $Done |
| //$helper: |
| IR::RegOpnd * resultOpnd = nullptr; |
| if (!instr->GetDst()->IsRegOpnd() || instr->GetDst()->IsEqual(src1)) |
| { |
| resultOpnd = IR::RegOpnd::New(TyVar, this->m_func); |
| } |
| else |
| { |
| resultOpnd = instr->GetDst()->AsRegOpnd(); |
| } |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| IR::RegOpnd * fromCodePointIntArgOpnd = IR::RegOpnd::New(TyVar, instr->m_func); |
| IR::AutoReuseOpnd autoReuseFromCodePointIntArgOpnd(fromCodePointIntArgOpnd, instr->m_func); |
| InsertMove(fromCodePointIntArgOpnd, src1, instr); |
| |
| //Check for tagged int and get the untagged version. |
| fromCodePointIntArgOpnd = GenerateUntagVar(fromCodePointIntArgOpnd, labelHelper, instr); |
| |
| GenerateGetSingleCharString(fromCodePointIntArgOpnd, resultOpnd, labelHelper, doneLabel, instr, true); |
| |
| instr->InsertBefore(labelHelper); |
| |
| instr->InsertAfter(doneLabel); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastInlineStringFromCharCode(IR::Instr* instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| // ArgOut sequence |
| // s8.var = StartCall 2 (0x2).i32 #000c |
| // arg1(s9)<0>.var = ArgOut_A s2.var, s8.var #0014 //Implicit this, String object |
| // arg2(s10)<4>.var = ArgOut_A s3.var, arg1(s9)<0>.var #0018 //First argument to FromCharCode |
| // arg1(s11)<0>.u32 = ArgOut_A_InlineSpecialized 0x012C26C0 (DynamicObject).var, arg2(s10)<4>.var # |
| // s0[LikelyTaggedInt].var = CallDirect String_FromCharCode.u32, arg1(s11)<0>.u32 #001c |
| |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| IR::Instr * tmpInstr = Inline::GetDefInstr(linkOpnd);// linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| linkOpnd = tmpInstr->GetSrc2(); |
| |
| #if DBG |
| IntConstType argCount = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->GetArgSlotNum(); |
| Assert(argCount == 2); |
| #endif |
| |
| IR::Instr *argInstr = Inline::GetDefInstr(linkOpnd); |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A); |
| IR::Opnd *src1 = argInstr->GetSrc1(); |
| |
| if (src1->GetValueType().IsLikelyInt()) |
| { |
| //Trying to generate this code |
| // MOV resultOpnd, dst |
| // MOV fromCharCodeIntArgOpnd, src1 |
| // SAR fromCharCodeIntArgOpnd, Js::VarTag_Shift |
| // JAE $Helper |
| // CMP fromCharCodeIntArgOpnd, Js::ScriptContext::CharStringCacheSize |
| // |
| // JAE $labelWCharStringCheck < |
| // MOV resultOpnd, GetCharStringCache[fromCharCodeIntArgOpnd] |
| // TST resultOpnd, resultOpnd //Check for null |
| // JEQ $helper |
| // JMP $Done |
| // |
| //$labelWCharStringCheck: |
| // resultOpnd = Call HelperGetStringForCharW |
| // JMP $Done |
| //$helper: |
| IR::RegOpnd * resultOpnd = nullptr; |
| if (!instr->GetDst()->IsRegOpnd() || instr->GetDst()->IsEqual(src1)) |
| { |
| resultOpnd = IR::RegOpnd::New(TyVar, this->m_func); |
| } |
| else |
| { |
| resultOpnd = instr->GetDst()->AsRegOpnd(); |
| } |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| IR::RegOpnd * fromCharCodeIntArgOpnd = IR::RegOpnd::New(TyVar, instr->m_func); |
| IR::AutoReuseOpnd autoReuseFromCharCodeIntArgOpnd(fromCharCodeIntArgOpnd, instr->m_func); |
| InsertMove(fromCharCodeIntArgOpnd, src1, instr); |
| |
| //Check for tagged int and get the untagged version. |
| fromCharCodeIntArgOpnd = GenerateUntagVar(fromCharCodeIntArgOpnd, labelHelper, instr); |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| GenerateGetSingleCharString(fromCharCodeIntArgOpnd, resultOpnd, labelHelper, doneLabel, instr, false); |
| |
| instr->InsertBefore(labelHelper); |
| |
| instr->InsertAfter(doneLabel); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateGetSingleCharString(IR::RegOpnd * charCodeOpnd, IR::Opnd * resultOpnd, IR::LabelInstr * labelHelper, IR::LabelInstr * doneLabel, IR::Instr * instr, bool isCodePoint) |
| { |
| // MOV cacheReg, CharStringCache |
| // CMP charCodeOpnd, Js::ScriptContext::CharStringCacheSize |
| // JAE $labelWCharStringCheck < |
| // MOV resultOpnd, cacheReg[charCodeOpnd] |
| // TST resultOpnd, resultOpnd //Check for null |
| // JEQ $helper |
| // JMP $Done |
| // |
| //$labelWCharStringCheck: |
| // Arg1 = charCodeOpnd |
| // Arg0 = cacheReg |
| // resultOpnd = Call HelperGetStringForCharW/CodePoint |
| // JMP $Done |
| //$helper: |
| |
| IR::LabelInstr *labelWCharStringCheck = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| //Try to load from in CharStringCacheA |
| IR::RegOpnd *cacheRegOpnd = IR::RegOpnd::New(TyVar, instr->m_func); |
| IR::AutoReuseOpnd autoReuseCacheRegOpnd(cacheRegOpnd, instr->m_func); |
| |
| Assert(Js::JavascriptLibrary::GetCharStringCacheAOffset() == Js::JavascriptLibrary::GetCharStringCacheOffset()); |
| InsertMove(cacheRegOpnd, this->LoadLibraryValueOpnd(instr, LibraryValue::ValueCharStringCache), instr); |
| |
| InsertCompareBranch(charCodeOpnd, IR::IntConstOpnd::New(Js::CharStringCache::CharStringCacheSize, TyUint32, this->m_func), Js::OpCode::BrGe_A, true, labelWCharStringCheck, instr); |
| InsertMove(resultOpnd, IR::IndirOpnd::New(cacheRegOpnd, charCodeOpnd, this->m_lowererMD.GetDefaultIndirScale(), TyVar, instr->m_func), instr); |
| |
| InsertTestBranch(resultOpnd, resultOpnd, Js::OpCode::BrEq_A, labelHelper, instr); |
| |
| InsertMove(instr->GetDst(), resultOpnd, instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| |
| instr->InsertBefore(labelWCharStringCheck); |
| |
| IR::JnHelperMethod helperMethod; |
| if (isCodePoint) |
| { |
| helperMethod = IR::HelperGetStringForCharCodePoint; |
| } |
| else |
| { |
| InsertMove(charCodeOpnd, charCodeOpnd->UseWithNewType(TyUint16, instr->m_func), instr); |
| helperMethod = IR::HelperGetStringForChar; |
| } |
| |
| //Try to load from in CharStringCacheW or CharStringCacheCodePoint, this is a helper call. |
| |
| this->m_lowererMD.LoadHelperArgument(instr, charCodeOpnd); |
| this->m_lowererMD.LoadHelperArgument(instr, cacheRegOpnd); |
| IR::Instr* helperCallInstr = IR::Instr::New(Js::OpCode::Call, resultOpnd, IR::HelperCallOpnd::New(helperMethod, this->m_func), this->m_func); |
| instr->InsertBefore(helperCallInstr); |
| this->m_lowererMD.LowerCall(helperCallInstr, 0); |
| |
| InsertMove(instr->GetDst(), resultOpnd, instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| } |
| |
| bool |
| Lowerer::GenerateFastInlineGlobalObjectParseInt(IR::Instr *instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| // ArgOut sequence |
| // s8.var = StartCall 2 (0x2).i32 #000c |
| // arg1(s9)<0>.var = ArgOut_A s2.var, s8.var #0014 //Implicit this, global object |
| // arg2(s10)<4>.var = ArgOut_A s3.var, arg1(s9)<0>.var #0018 //First argument to parseInt |
| // arg1(s11)<0>.u32 = ArgOut_A_InlineSpecialized 0x012C26C0 (DynamicObject).var, arg2(s10)<4>.var # |
| // s0[LikelyTaggedInt].var = CallDirect GlobalObject_ParseInt.u32, arg1(s11)<0>.u32 #001c |
| |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| IR::Instr * tmpInstr = Inline::GetDefInstr(linkOpnd);// linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| linkOpnd = tmpInstr->GetSrc2(); |
| |
| #if DBG |
| IntConstType argCount = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->GetArgSlotNum(); |
| Assert(argCount == 2); |
| #endif |
| |
| IR::Instr *argInstr = Inline::GetDefInstr(linkOpnd); |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A); |
| IR::Opnd *parseIntArgOpnd = argInstr->GetSrc1(); |
| |
| if (parseIntArgOpnd->GetValueType().IsLikelyNumber()) |
| { |
| //If likely int check for tagged int and set the dst |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if (!parseIntArgOpnd->IsTaggedInt()) |
| { |
| this->m_lowererMD.GenerateSmIntTest(parseIntArgOpnd, instr, labelHelper); |
| } |
| if (instr->GetDst()) |
| { |
| this->InsertMove(instr->GetDst(), parseIntArgOpnd, instr); |
| } |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(labelHelper); |
| instr->InsertAfter(doneLabel); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateFastInlineArrayPop(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::InlineArrayPop); |
| |
| IR::Opnd *arrayOpnd = instr->GetSrc1(); |
| |
| IR::LabelInstr *bailOutLabelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| bool isLikelyNativeArray = arrayOpnd->GetValueType().IsLikelyNativeArray(); |
| |
| if (ShouldGenerateArrayFastPath(arrayOpnd, false, false, false)) |
| { |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if(isLikelyNativeArray) |
| { |
| //We bailOut on cases like length == 0, Array Test failing cases (Runtime helper cannot handle these cases) |
| GenerateFastPop(arrayOpnd, instr, labelHelper, doneLabel, bailOutLabelHelper); |
| } |
| else |
| { |
| //We jump to helper on cases like length == 0, Array Test failing cases |
| GenerateFastPop(arrayOpnd, instr, labelHelper, doneLabel, labelHelper); |
| } |
| |
| instr->InsertBefore(labelHelper); |
| |
| ///JMP to $doneLabel |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| } |
| else |
| { |
| //We assume here that the array will be a Var array. - Runtime Helper calls assume this. |
| Assert(!isLikelyNativeArray); |
| } |
| |
| instr->InsertAfter(doneLabel); |
| |
| if(isLikelyNativeArray) |
| { |
| //Lower IR::BailOutConventionalNativeArrayAccessOnly here. |
| LowerOneBailOutKind(instr, IR::BailOutConventionalNativeArrayAccessOnly, false, false); |
| instr->InsertAfter(bailOutLabelHelper); |
| } |
| |
| GenerateHelperToArrayPopFastPath(instr, doneLabel, bailOutLabelHelper); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineIsArray(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| IR::Opnd * dst = instr->GetDst(); |
| Assert(dst); |
| |
| //CallDirect src2 |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[2] = { 0 }; |
| bool result = instr->FetchOperands(argsOpnd, 2); |
| Assert(result); |
| AnalysisAssert(argsOpnd[1]); |
| |
| IR::LabelInstr *helperLabel = InsertLabel(true, instr); |
| IR::Instr * insertInstr = helperLabel; |
| IR::LabelInstr *doneLabel = InsertLabel(false, instr->m_next); |
| |
| ValueType valueType = argsOpnd[1]->GetValueType(); |
| IR::RegOpnd * src = GetRegOpnd(argsOpnd[1], insertInstr, m_func, argsOpnd[1]->GetType()); |
| |
| IR::LabelInstr *checkNotArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, valueType.IsLikelyArray()); |
| IR::LabelInstr *notArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, valueType.IsLikelyArray()); |
| |
| if (!src->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(src, insertInstr, notArrayLabel); |
| } |
| |
| // MOV typeOpnd, [opnd + offset(type)] |
| IR::RegOpnd *typeOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| const IR::AutoReuseOpnd autoReuseTypeOpnd(typeOpnd, m_func); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(src, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, m_func); |
| InsertMove(typeOpnd, indirOpnd, insertInstr); |
| |
| // MOV typeIdOpnd, [typeOpnd + offset(typeId)] |
| IR::RegOpnd *typeIdOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| const IR::AutoReuseOpnd autoReuseTypeIdOpnd(typeIdOpnd, m_func); |
| indirOpnd = IR::IndirOpnd::New(typeOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, m_func); |
| InsertMove(typeIdOpnd, indirOpnd, insertInstr); |
| |
| // CMP typeIdOpnd, TypeIds_ArrayFirst |
| // JLT $notArray |
| InsertCompareBranch( |
| typeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_ArrayFirst, TyInt32, m_func), |
| Js::OpCode::BrLt_A, |
| checkNotArrayLabel, |
| insertInstr); |
| // CMP typeIdOpnd, TypeIds_ArrayLastWithES5 |
| // JGT $notArray |
| InsertCompareBranch( |
| typeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_ArrayLastWithES5, TyInt32, m_func), |
| Js::OpCode::BrGt_A, |
| notArrayLabel, |
| insertInstr); |
| |
| // MOV dst, True |
| InsertMove(dst, LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), insertInstr); |
| |
| // JMP $done |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| // $checkNotArray: |
| insertInstr->InsertBefore(checkNotArrayLabel); |
| |
| // CMP typeIdOpnd, TypeIds_Proxy |
| // JEQ $helperLabel |
| InsertCompareBranch( |
| typeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_Proxy, TyInt32, m_func), |
| Js::OpCode::BrEq_A, |
| helperLabel, |
| insertInstr); |
| CompileAssert(Js::TypeIds_Proxy < Js::TypeIds_ArrayFirst); |
| |
| // CMP typeIdOpnd, TypeIds_HostDispatch |
| // JEQ $helperLabel |
| InsertCompareBranch( |
| typeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_HostDispatch, TyInt32, m_func), |
| Js::OpCode::BrEq_A, |
| helperLabel, |
| insertInstr); |
| CompileAssert(Js::TypeIds_HostDispatch < Js::TypeIds_ArrayFirst); |
| |
| // $notObjectLabel: |
| insertInstr->InsertBefore(notArrayLabel); |
| |
| // MOV dst, False |
| InsertMove(dst, LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), insertInstr); |
| |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, helperLabel); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineHasOwnProperty(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| //CallDirect src2 |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[2] = { 0 }; |
| bool result = instr->FetchOperands(argsOpnd, 2); |
| Assert(result); |
| AnalysisAssert(argsOpnd[0] && argsOpnd[1]); |
| |
| if (argsOpnd[1]->GetValueType().IsNotString() |
| || argsOpnd[0]->GetValueType().IsNotObject() |
| || !argsOpnd[0]->IsRegOpnd() |
| || !argsOpnd[1]->IsRegOpnd()) |
| { |
| return; |
| } |
| |
| IR::RegOpnd * thisObj = argsOpnd[0]->AsRegOpnd(); |
| IR::RegOpnd * propOpnd = argsOpnd[1]->AsRegOpnd(); |
| |
| // fast path case where hasOwnProperty is being called using a property name loaded via a for-in loop |
| bool generateForInFastpath = propOpnd->GetValueType().IsString() |
| && propOpnd->m_sym->m_isSingleDef |
| && (propOpnd->m_sym->m_instrDef->m_opcode == Js::OpCode::BrOnEmpty |
| || propOpnd->m_sym->m_instrDef->m_opcode == Js::OpCode::BrOnNotEmpty); |
| |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| IR::LabelInstr * labelHelper = InsertLabel(true, instr); |
| |
| IR::LabelInstr * cacheMissLabel = generateForInFastpath ? IR::LabelInstr::New(Js::OpCode::Label, m_func, true) : labelHelper; |
| |
| IR::Instr * insertInstr = labelHelper; |
| |
| // GenerateObjectTest(propOpnd, $labelHelper) |
| // CMP indexOpnd, PropertyString::`vtable' |
| // JNE $helper |
| // GenerateObjectTest(thisObj, $labelHelper) |
| // MOV inlineCacheOpnd, propOpnd->lsElemInlineCache |
| // MOV objectTypeOpnd, thisObj->type |
| // GenerateDynamicLoadPolymorphicInlineCacheSlot(inlineCacheOpnd, objectTypeOpnd) ; loads inline cache for given type |
| // GenerateLocalInlineCacheCheck(objectTypeOpnd, inlineCacheOpnd, $notInlineSlotsLabel) ; check for type in inline slots, jump to $notInlineSlotsLabel on failure |
| // MOV dst, ValueTrue |
| // JMP $done |
| // $notInlineSlotsLabel: |
| // GenerateLoadTaggedType(objectTypeOpnd, opndTaggedType) |
| // GenerateLocalInlineCacheCheck(opndTaggedType, inlineCacheOpnd, $cacheMissLabel) ; check for type in aux slot, jump to $cacheMissLabel on failure |
| // MOV dst, ValueTrue |
| // JMP $done |
| |
| m_lowererMD.GenerateObjectTest(propOpnd, insertInstr, labelHelper); |
| |
| InsertCompareBranch(IR::IndirOpnd::New(propOpnd, 0, TyMachPtr, m_func), LoadVTableValueOpnd(insertInstr, VTableValue::VtablePropertyString), Js::OpCode::BrNeq_A, labelHelper, insertInstr); |
| |
| m_lowererMD.GenerateObjectTest(thisObj, insertInstr, labelHelper); |
| |
| IR::RegOpnd * inlineCacheOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(inlineCacheOpnd, IR::IndirOpnd::New(propOpnd, Js::PropertyString::GetOffsetOfLdElemInlineCache(), TyMachPtr, m_func), insertInstr); |
| |
| IR::RegOpnd * objectTypeOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(objectTypeOpnd, IR::IndirOpnd::New(thisObj, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, m_func), insertInstr); |
| |
| GenerateDynamicLoadPolymorphicInlineCacheSlot(insertInstr, inlineCacheOpnd, objectTypeOpnd); |
| |
| IR::LabelInstr * notInlineSlotsLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| GenerateLocalInlineCacheCheck(insertInstr, objectTypeOpnd, inlineCacheOpnd, notInlineSlotsLabel); |
| |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), insertInstr); |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| insertInstr->InsertBefore(notInlineSlotsLabel); |
| IR::RegOpnd * opndTaggedType = IR::RegOpnd::New(TyMachReg, m_func); |
| m_lowererMD.GenerateLoadTaggedType(insertInstr, objectTypeOpnd, opndTaggedType); |
| GenerateLocalInlineCacheCheck(insertInstr, opndTaggedType, inlineCacheOpnd, cacheMissLabel); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), insertInstr); |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| if (!generateForInFastpath) |
| { |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| return; |
| } |
| |
| insertInstr->InsertBefore(cacheMissLabel); |
| |
| // CMP forInEnumeratorOpnd->canUseJitFastPath, 0 |
| // JEQ $labelHelper |
| // MOV cachedDataTypeOpnd, forInEnumeratorOpnd->enumeratorInitialType |
| // CMP thisObj->type, cachedDataTypeOpnd |
| // JNE $labelHelper |
| // CMP forInEnumeratorOpnd->enumeratingPrototype, 0 |
| // JNE $falseLabel |
| // MOV dst, True |
| // JMP $doneLabel |
| // $falseLabel: [helper] |
| // MOV dst, False |
| // JMP $doneLabel |
| // $labelHelper: [helper] |
| // CallDirect code |
| // ... |
| // $doneLabel: |
| |
| IR::Opnd * forInEnumeratorOpnd = argsOpnd[1]->AsRegOpnd()->m_sym->m_instrDef->GetSrc1(); |
| |
| // go to helper if we can't use JIT fastpath |
| IR::Opnd * canUseJitFastPathOpnd = GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfCanUseJitFastPath(), TyInt8); |
| InsertCompareBranch(canUseJitFastPathOpnd, IR::IntConstOpnd::New(0, TyInt8, m_func), Js::OpCode::BrEq_A, labelHelper, insertInstr); |
| |
| // go to helper if initial type is not same as the object we are querying |
| IR::RegOpnd * cachedDataTypeOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(cachedDataTypeOpnd, GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorInitialType(), TyMachPtr), insertInstr); |
| InsertCompareBranch(cachedDataTypeOpnd, IR::IndirOpnd::New(thisObj, Js::DynamicObject::GetOffsetOfType(), TyMachPtr, m_func), Js::OpCode::BrNeq_A, labelHelper, insertInstr); |
| |
| // if we haven't yet gone to helper, then we can check if we are enumerating the prototype to know if property is an own property |
| IR::LabelInstr *falseLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::Opnd * enumeratingPrototype = GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratingPrototype(), TyInt8); |
| InsertCompareBranch(enumeratingPrototype, IR::IntConstOpnd::New(0, TyInt8, m_func), Js::OpCode::BrNeq_A, falseLabel, insertInstr); |
| |
| // assume true is the main path |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), insertInstr); |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| // load false on helper path |
| insertInstr->InsertBefore(falseLabel); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), insertInstr); |
| InsertBranch(Js::OpCode::Br, doneLabel, insertInstr); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| |
| bool |
| Lowerer::ShouldGenerateStringReplaceFastPath(IR::Instr * callInstr, IntConstType argCount) |
| { |
| // a.replace(b,c) |
| // We want to emit the fast path if 'a' and 'c' are strings and 'b' is a regex |
| // |
| // argout sequence: |
| // arg1(s12)<0>.var = ArgOut_A s2.var, s11.var #0014 <---- a |
| // arg2(s13)<4>.var = ArgOut_A s3.var, arg1(s12)<0>.var #0018 <---- b |
| // arg3(s14)<8>.var = ArgOut_A s4.var, arg2(s13)<4>.var #001c <---- c |
| // s0[LikelyString].var = CallI s5[ffunc].var, arg3(s14)<8>.var #0020 |
| |
| IR::Opnd *linkOpnd = callInstr->GetSrc2(); |
| Assert(argCount == 2); |
| |
| while(linkOpnd->IsSymOpnd()) |
| { |
| IR::SymOpnd *src2 = linkOpnd->AsSymOpnd(); |
| StackSym *sym = src2->m_sym->AsStackSym(); |
| Assert(sym->m_isSingleDef); |
| IR::Instr *argInstr = sym->m_instrDef; |
| |
| Assert(argCount >= 0); |
| // check to see if 'a' and 'c' are likely strings |
| if((argCount == 2 || argCount == 0) && (!argInstr->GetSrc1()->GetValueType().IsLikelyString())) |
| { |
| return false; |
| } |
| // we want 'b' to be regex. Don't generate fastpath if it is a tagged int |
| if((argCount == 1) && (argInstr->GetSrc1()->IsTaggedInt())) |
| { |
| return false; |
| } |
| argCount--; |
| linkOpnd = argInstr->GetSrc2(); |
| } |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastReplace(IR::Opnd* strOpnd, IR::Opnd* src1, IR::Opnd* src2, IR::Instr *callInstr, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, IR::LabelInstr *doneLabel) |
| { |
| // a.replace(b,c) |
| // We want to emit the fast path if 'a' and 'c' are strings and 'b' is a regex |
| // |
| // strOpnd --> a |
| // src1 --> b |
| // src2 --> c |
| |
| IR::Opnd * callDst = callInstr->GetDst(); |
| |
| Assert(strOpnd->GetValueType().IsLikelyString() && src2->GetValueType().IsLikelyString()); |
| |
| if(!strOpnd->GetValueType().IsString()) |
| { |
| strOpnd = GetRegOpnd(strOpnd, insertInstr, m_func, TyVar); |
| |
| this->GenerateStringTest(strOpnd->AsRegOpnd(), insertInstr, labelHelper); |
| } |
| |
| if(!src1->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(src1, insertInstr, labelHelper); |
| } |
| |
| IR::Opnd * vtableOpnd = LoadVTableValueOpnd(insertInstr, VTableValue::VtableJavascriptRegExp); |
| |
| // cmp [regex], vtableAddress |
| // jne $labelHelper |
| src1 = GetRegOpnd(src1, insertInstr, m_func, TyVar); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(src1->AsRegOpnd(), 0, TyMachPtr, insertInstr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| insertInstr); |
| |
| if(!src2->GetValueType().IsString()) |
| { |
| src2 = GetRegOpnd(src2, insertInstr, m_func, TyVar); |
| this->GenerateStringTest(src2->AsRegOpnd(), insertInstr, labelHelper); |
| } |
| |
| IR::Instr * helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, insertInstr->m_func); |
| if (callDst) |
| { |
| helperCallInstr->SetDst(callDst); |
| } |
| insertInstr->InsertBefore(helperCallInstr); |
| |
| if (insertInstr->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(insertInstr->GetBailOutKind())) |
| { |
| helperCallInstr = AddBailoutToHelperCallInstr(helperCallInstr, insertInstr->GetBailOutInfo(), insertInstr->GetBailOutKind(), insertInstr); |
| } |
| |
| //scriptContext, pRegEx, pThis, pReplace (to be pushed in reverse order) |
| |
| // pReplace, pThis, pRegEx |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, src2); |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, strOpnd); |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, src1); |
| |
| // script context |
| LoadScriptContext(helperCallInstr); |
| |
| if(callDst) |
| { |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, IR::JnHelperMethod::HelperRegExp_ReplaceStringResultUsed); |
| } |
| else |
| { |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, IR::JnHelperMethod::HelperRegExp_ReplaceStringResultNotUsed); |
| } |
| |
| return true; |
| } |
| |
| ///---- |
| |
| void |
| Lowerer::GenerateFastInlineStringSplitMatch(IR::Instr * instr) |
| { |
| // a.split(b,c (optional) ) |
| // We want to emit the fast path when |
| // 1. c is not present, and |
| // 2. 'a' is a string and 'b' is a regex. |
| // |
| // a.match(b) |
| // We want to emit the fast path when 'a' is a string and 'b' is a regex. |
| |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| IR::Opnd * callDst = instr->GetDst(); |
| |
| //helperCallOpnd |
| IR::Opnd * src1 = instr->GetSrc1(); |
| |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = instr->GetSrc2()->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[2]; |
| if(!instr->FetchOperands(argsOpnd, 2)) |
| { |
| return; |
| } |
| |
| if(!argsOpnd[0]->GetValueType().IsLikelyString() || argsOpnd[1]->IsTaggedInt()) |
| { |
| return; |
| } |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| if(!argsOpnd[0]->GetValueType().IsString()) |
| { |
| argsOpnd[0] = GetRegOpnd(argsOpnd[0], instr, m_func, TyVar); |
| this->GenerateStringTest(argsOpnd[0]->AsRegOpnd(), instr, labelHelper); |
| } |
| |
| if(!argsOpnd[1]->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(argsOpnd[1], instr, labelHelper); |
| } |
| |
| IR::Opnd * vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp); |
| |
| // cmp [regex], vtableAddress |
| // jne $labelHelper |
| argsOpnd[1] = GetRegOpnd(argsOpnd[1], instr, m_func, TyVar); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(argsOpnd[1]->AsRegOpnd(), 0, TyMachPtr, instr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| instr); |
| |
| IR::Instr * helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, instr->m_func); |
| if (callDst) |
| { |
| helperCallInstr->SetDst(callDst); |
| } |
| instr->InsertBefore(helperCallInstr); |
| if (instr->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(instr->GetBailOutKind())) |
| { |
| helperCallInstr = AddBailoutToHelperCallInstr(helperCallInstr, instr->GetBailOutInfo(), instr->GetBailOutKind(), instr); |
| } |
| |
| // [stackAllocationPointer, ]scriptcontext, regexp, input[, limit] (to be pushed in reverse order) |
| |
| if(src1->AsHelperCallOpnd()->m_fnHelper == IR::JnHelperMethod::HelperString_Split) |
| { |
| //limit |
| //As we are optimizing only for two operands, make limit UINT_MAX |
| IR::Opnd* limit = IR::IntConstOpnd::New(UINT_MAX, TyUint32, instr->m_func); |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, limit); |
| } |
| |
| //input, regexp |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, argsOpnd[0]); |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, argsOpnd[1]); |
| |
| // script context |
| LoadScriptContext(helperCallInstr); |
| |
| IR::JnHelperMethod helperMethod = IR::JnHelperMethod::HelperInvalid; |
| IR::AutoReuseOpnd autoReuseStackAllocationOpnd; |
| if(callDst && instr->dstIsTempObject) |
| { |
| switch(src1->AsHelperCallOpnd()->m_fnHelper) |
| { |
| case IR::JnHelperMethod::HelperString_Split: |
| helperMethod = IR::JnHelperMethod::HelperRegExp_SplitResultUsedAndMayBeTemp; |
| break; |
| |
| case IR::JnHelperMethod::HelperString_Match: |
| helperMethod = IR::JnHelperMethod::HelperRegExp_MatchResultUsedAndMayBeTemp; |
| break; |
| |
| default: |
| Assert(false); |
| __assume(false); |
| } |
| |
| // Allocate some space on the stack for the result array |
| IR::RegOpnd *const stackAllocationOpnd = IR::RegOpnd::New(TyVar, m_func); |
| autoReuseStackAllocationOpnd.Initialize(stackAllocationOpnd, m_func); |
| stackAllocationOpnd->SetValueType(callDst->GetValueType()); |
| GenerateMarkTempAlloc(stackAllocationOpnd, Js::JavascriptArray::StackAllocationSize, helperCallInstr); |
| m_lowererMD.LoadHelperArgument(helperCallInstr, stackAllocationOpnd); |
| } |
| else |
| { |
| switch(src1->AsHelperCallOpnd()->m_fnHelper) |
| { |
| case IR::JnHelperMethod::HelperString_Split: |
| helperMethod = |
| callDst |
| ? IR::JnHelperMethod::HelperRegExp_SplitResultUsed |
| : IR::JnHelperMethod::HelperRegExp_SplitResultNotUsed; |
| break; |
| |
| case IR::JnHelperMethod::HelperString_Match: |
| helperMethod = |
| callDst |
| ? IR::JnHelperMethod::HelperRegExp_MatchResultUsed |
| : IR::JnHelperMethod::HelperRegExp_MatchResultNotUsed; |
| break; |
| |
| default: |
| Assert(false); |
| __assume(false); |
| } |
| } |
| |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, helperMethod); |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr->InsertAfter(doneLabel); |
| instr->InsertBefore(labelHelper); |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineRegExpExec(IR::Instr * instr) |
| { |
| // a.exec(b) |
| // We want to emit the fast path when 'a' is a regex and 'b' is a string |
| |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| IR::Opnd * callDst = instr->GetDst(); |
| |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = instr->GetSrc2()->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[2]; |
| if (!instr->FetchOperands(argsOpnd, 2)) |
| { |
| return; |
| } |
| |
| IR::Opnd *opndString = argsOpnd[1]; |
| if(!opndString->GetValueType().IsLikelyString() || argsOpnd[0]->IsTaggedInt()) |
| { |
| return; |
| } |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| if(!opndString->GetValueType().IsString()) |
| { |
| opndString = GetRegOpnd(opndString, instr, m_func, TyVar); |
| this->GenerateStringTest(opndString->AsRegOpnd(), instr, labelHelper); |
| } |
| |
| IR::Opnd *opndRegex = argsOpnd[0]; |
| if(!opndRegex->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(opndRegex, instr, labelHelper); |
| } |
| |
| IR::Opnd * vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp); |
| |
| // cmp [regex], vtableAddress |
| // jne $labelHelper |
| opndRegex = GetRegOpnd(opndRegex, instr, m_func, TyVar); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndRegex->AsRegOpnd(), 0, TyMachPtr, instr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| instr); |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| if (!PHASE_OFF(Js::ExecBOIFastPathPhase, m_func)) |
| { |
| // Load pattern from regex operand |
| IR::RegOpnd *opndPattern = IR::RegOpnd::New(TyMachPtr, m_func); |
| Lowerer::InsertMove( |
| opndPattern, |
| IR::IndirOpnd::New(opndRegex->AsRegOpnd(), Js::JavascriptRegExp::GetOffsetOfPattern(), TyMachPtr, m_func), |
| instr); |
| |
| // Load program from pattern |
| IR::RegOpnd *opndProgram = IR::RegOpnd::New(TyMachPtr, m_func); |
| Lowerer::InsertMove( |
| opndProgram, |
| IR::IndirOpnd::New(opndPattern, offsetof(UnifiedRegex::RegexPattern, rep) + offsetof(UnifiedRegex::RegexPattern::UnifiedRep, program), TyMachPtr, m_func), |
| instr); |
| |
| IR::LabelInstr *labelFastHelper = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| // We want the program's tag to be BOILiteral2Tag |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndProgram, (int32)UnifiedRegex::Program::GetOffsetOfTag(), TyUint8, m_func), |
| IR::IntConstOpnd::New((IntConstType)UnifiedRegex::Program::GetBOILiteral2Tag(), TyUint8, m_func), |
| Js::OpCode::BrNeq_A, |
| labelFastHelper, |
| instr); |
| |
| // Test the program's flags for "global" |
| InsertTestBranch( |
| IR::IndirOpnd::New(opndProgram, offsetof(UnifiedRegex::Program, flags), TyUint8, m_func), |
| IR::IntConstOpnd::New(UnifiedRegex::GlobalRegexFlag, TyUint8, m_func), |
| Js::OpCode::BrNeq_A, |
| labelFastHelper, |
| instr); |
| |
| IR::LabelInstr *labelNoMatch = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| // If string length < 2... |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndString->AsRegOpnd(), offsetof(Js::JavascriptString, m_charLength), TyUint32, m_func), |
| IR::IntConstOpnd::New(2, TyUint32, m_func), |
| Js::OpCode::BrLt_A, |
| labelNoMatch, |
| instr); |
| |
| // ...or the DWORD doesn't match the pattern... |
| IR::RegOpnd *opndBuffer = IR::RegOpnd::New(TyMachReg, m_func); |
| Lowerer::InsertMove( |
| opndBuffer, |
| IR::IndirOpnd::New(opndString->AsRegOpnd(), offsetof(Js::JavascriptString, m_pszValue), TyMachPtr, m_func), |
| instr); |
| |
| IR::LabelInstr *labelGotString = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| InsertTestBranch(opndBuffer, opndBuffer, Js::OpCode::BrNeq_A, labelGotString, instr); |
| |
| m_lowererMD.LoadHelperArgument(instr, opndString); |
| IR::Instr *instrCall = IR::Instr::New(Js::OpCode::Call, opndBuffer, IR::HelperCallOpnd::New(IR::HelperString_GetSz, m_func), m_func); |
| instr->InsertBefore(instrCall); |
| m_lowererMD.LowerCall(instrCall, 0); |
| |
| instr->InsertBefore(labelGotString); |
| |
| IR::RegOpnd *opndBufferDWORD = IR::RegOpnd::New(TyUint32, m_func); |
| Lowerer::InsertMove( |
| opndBufferDWORD, |
| IR::IndirOpnd::New(opndBuffer, 0, TyUint32, m_func), |
| instr); |
| |
| InsertCompareBranch( |
| IR::IndirOpnd::New(opndProgram, (int32)(UnifiedRegex::Program::GetOffsetOfRep() + UnifiedRegex::Program::GetOffsetOfBOILiteral2Literal()), TyUint32, m_func), |
| opndBufferDWORD, |
| Js::OpCode::BrEq_A, |
| labelFastHelper, |
| instr); |
| |
| // ...then set the last index to 0... |
| instr->InsertBefore(labelNoMatch); |
| |
| Lowerer::InsertMove( |
| IR::IndirOpnd::New(opndRegex->AsRegOpnd(), Js::JavascriptRegExp::GetOffsetOfLastIndexVar(), TyVar, m_func), |
| IR::AddrOpnd::NewNull(m_func), |
| instr); |
| |
| Lowerer::InsertMove( |
| IR::IndirOpnd::New(opndRegex->AsRegOpnd(), Js::JavascriptRegExp::GetOffsetOfLastIndexOrFlag(), TyUint32, m_func), |
| IR::IntConstOpnd::New(0, TyUint32, m_func), |
| instr); |
| |
| // ...and set the dst to null... |
| if (callDst) |
| { |
| Lowerer::InsertMove( |
| callDst, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueNull), |
| instr); |
| } |
| |
| // ...and we're done. |
| this->InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| |
| instr->InsertBefore(labelFastHelper); |
| } |
| |
| IR::Instr * helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, instr->m_func); |
| if (callDst) |
| { |
| helperCallInstr->SetDst(callDst); |
| } |
| instr->InsertBefore(helperCallInstr); |
| if (instr->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(instr->GetBailOutKind())) |
| { |
| helperCallInstr = AddBailoutToHelperCallInstr(helperCallInstr, instr->GetBailOutInfo(), instr->GetBailOutKind(), instr); |
| } |
| // [stackAllocationPointer, ]scriptcontext, regexp, string (to be pushed in reverse order) |
| |
| //string, regexp |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, opndString); |
| this->m_lowererMD.LoadHelperArgument(helperCallInstr, opndRegex); |
| |
| // script context |
| LoadScriptContext(helperCallInstr); |
| |
| IR::JnHelperMethod helperMethod; |
| IR::AutoReuseOpnd autoReuseStackAllocationOpnd; |
| if (callDst) |
| { |
| if (instr->dstIsTempObject) |
| { |
| helperMethod = IR::JnHelperMethod::HelperRegExp_ExecResultUsedAndMayBeTemp; |
| |
| // Allocate some space on the stack for the result array |
| IR::RegOpnd *const stackAllocationOpnd = IR::RegOpnd::New(TyVar, m_func); |
| autoReuseStackAllocationOpnd.Initialize(stackAllocationOpnd, m_func); |
| stackAllocationOpnd->SetValueType(callDst->GetValueType()); |
| GenerateMarkTempAlloc(stackAllocationOpnd, Js::JavascriptArray::StackAllocationSize, helperCallInstr); |
| m_lowererMD.LoadHelperArgument(helperCallInstr, stackAllocationOpnd); |
| } |
| else |
| { |
| helperMethod = IR::JnHelperMethod::HelperRegExp_ExecResultUsed; |
| } |
| } |
| else |
| { |
| helperMethod = IR::JnHelperMethod::HelperRegExp_ExecResultNotUsed; |
| } |
| |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, helperMethod); |
| |
| instr->InsertAfter(doneLabel); |
| instr->InsertBefore(labelHelper); |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| } |
| |
| // Generate a fast path for the "in" operator that check quickly if we have an array or not and if the index of the data is contained in the array's length. |
| void Lowerer::GenerateFastArrayIsIn(IR::Instr * instr) |
| { |
| // operator "foo in bar" |
| IR::Opnd* src1 = instr->GetSrc1(); // foo |
| IR::Opnd* src2 = instr->GetSrc2(); // bar |
| |
| if ( |
| !src1->GetValueType().IsLikelyInt() || |
| // Do not do a fast path if we know for sure we don't have an int |
| src1->IsNotInt() || |
| !src2->GetValueType().IsLikelyArray() || |
| !src2->GetValueType().HasNoMissingValues()) |
| { |
| return; |
| } |
| |
| IR::LabelInstr* helperLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr* doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::LabelInstr* isArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::RegOpnd* src1Untagged = GenerateUntagVar(src1->AsRegOpnd(), helperLabel, instr); |
| IR::RegOpnd* src2RegOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(src2RegOpnd, src2, instr); |
| |
| |
| IR::AutoReuseOpnd autoReuseArrayOpnd; |
| m_lowererMD.GenerateObjectTest(src2RegOpnd, instr, helperLabel); |
| IR::RegOpnd* arrayOpnd = src2RegOpnd->Copy(instr->m_func)->AsRegOpnd(); |
| autoReuseArrayOpnd.Initialize(arrayOpnd, instr->m_func, false /* autoDelete */); |
| |
| IR::Opnd* vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptArray); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, instr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrEq_A, |
| isArrayLabel, |
| instr); |
| |
| vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableNativeIntArray); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, instr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrEq_A, |
| isArrayLabel, |
| instr); |
| |
| vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableNativeFloatArray); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(arrayOpnd, 0, TyMachPtr, instr->m_func), |
| vtableOpnd, |
| Js::OpCode::BrNeq_A, |
| helperLabel, |
| instr); |
| |
| instr->InsertBefore(isArrayLabel); |
| |
| InsertTestBranch( |
| IR::IndirOpnd::New(src2RegOpnd, Js::JavascriptArray::GetOffsetOfArrayFlags(), TyUint8, m_func), |
| IR::IntConstOpnd::New(static_cast<uint8>(Js::DynamicObjectFlags::HasNoMissingValues), TyUint8, m_func, true), |
| Js::OpCode::BrEq_A, |
| helperLabel, |
| instr); |
| |
| IR::AutoReuseOpnd autoReuseHeadSegmentOpnd; |
| IR::AutoReuseOpnd autoReuseHeadSegmentLengthOpnd; |
| IR::IndirOpnd* indirOpnd = IR::IndirOpnd::New(src2RegOpnd, Js::JavascriptArray::GetOffsetOfHead(), TyMachPtr, this->m_func); |
| IR::RegOpnd* headSegmentOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| autoReuseHeadSegmentOpnd.Initialize(headSegmentOpnd, m_func); |
| InsertMove(headSegmentOpnd, indirOpnd, instr); |
| |
| IR::Opnd* headSegmentLengthOpnd = IR::IndirOpnd::New(headSegmentOpnd, Js::SparseArraySegmentBase::GetOffsetOfLength(), TyUint32, m_func); |
| autoReuseHeadSegmentLengthOpnd.Initialize(headSegmentLengthOpnd, m_func); |
| |
| InsertCompareBranch( |
| src1Untagged, |
| headSegmentLengthOpnd, |
| Js::OpCode::BrGe_A, |
| helperLabel, |
| instr); |
| InsertCompareBranch( |
| src1Untagged, |
| IR::IntConstOpnd::New(0, src1Untagged->GetType(), this->m_func), |
| Js::OpCode::BrLt_A, |
| helperLabel, |
| instr); |
| |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| |
| instr->InsertBefore(helperLabel); |
| |
| instr->InsertAfter(doneLabel); |
| } |
| |
| // Generate a fast path for the "in" operator to use the cache where the key may be a PropertyString or Symbol. |
| void Lowerer::GenerateFastObjectIsIn(IR::Instr * instr) |
| { |
| IR::RegOpnd* baseOpnd = GetRegOpnd(instr->GetSrc2(), instr, m_func, TyVar); |
| IR::RegOpnd* indexOpnd = GetRegOpnd(instr->GetSrc1(), instr, m_func, TyVar); |
| bool likelyStringIndex = indexOpnd->GetValueType().IsLikelyString(); |
| bool likelySymbolIndex = indexOpnd->GetValueType().IsLikelySymbol(); |
| |
| if (!baseOpnd->GetValueType().IsLikelyObject() || !(likelyStringIndex || likelySymbolIndex)) |
| { |
| return; |
| } |
| |
| IR::LabelInstr* helperLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr* doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| if (likelyStringIndex) |
| { |
| GeneratePropertyStringTest(indexOpnd, instr, helperLabel, false /*isStore*/); |
| |
| const uint32 inlineCacheOffset = Js::PropertyString::GetOffsetOfLdElemInlineCache(); |
| const uint32 hitRateOffset = Js::PropertyString::GetOffsetOfHitRate(); |
| |
| GenerateFastIsInSymbolOrStringIndex(instr, indexOpnd, baseOpnd, instr->GetDst(), inlineCacheOffset, hitRateOffset, helperLabel, doneLabel); |
| } |
| else |
| { |
| Assert(likelySymbolIndex); |
| |
| GenerateSymbolTest(indexOpnd, instr, helperLabel); |
| |
| const uint32 inlineCacheOffset = Js::JavascriptSymbol::GetOffsetOfLdElemInlineCache(); |
| const uint32 hitRateOffset = Js::JavascriptSymbol::GetOffsetOfHitRate(); |
| |
| GenerateFastIsInSymbolOrStringIndex(instr, indexOpnd, baseOpnd, instr->GetDst(), inlineCacheOffset, hitRateOffset, helperLabel, doneLabel); |
| } |
| |
| instr->InsertBefore(helperLabel); |
| instr->InsertAfter(doneLabel); |
| } |
| |
| // Given an operand, either cast it or move it to a register |
| IR::RegOpnd * Lowerer::GetRegOpnd(IR::Opnd* opnd, IR::Instr* insertInstr, Func* func, IRType type) |
| { |
| if (opnd->IsRegOpnd()) |
| { |
| return opnd->AsRegOpnd(); |
| } |
| IR::RegOpnd *regOpnd = IR::RegOpnd::New(type, func); |
| InsertMove(regOpnd, opnd, insertInstr); |
| return regOpnd; |
| } |
| |
| template <bool Saturate> |
| void Lowerer::GenerateTruncWithCheck(_In_ IR::Instr* instr) |
| { |
| |
| Assert(instr->GetSrc1()->IsFloat()); |
| if (instr->GetDst()->IsInt32() || instr->GetDst()->IsUInt32()) |
| { |
| m_lowererMD.GenerateTruncWithCheck<Saturate>(instr); |
| } |
| else |
| { |
| Assert(instr->GetDst()->IsInt64()); |
| LoadScriptContext(instr); |
| |
| if (instr->GetSrc1()->IsFloat32()) |
| { |
| m_lowererMD.LoadFloatHelperArgument(instr, instr->GetSrc1()); |
| } |
| else |
| { |
| m_lowererMD.LoadDoubleHelperArgument(instr, instr->GetSrc1()); |
| } |
| IR::JnHelperMethod helper; |
| if (Saturate) |
| { |
| IR::JnHelperMethod helperList[2][2] = { IR::HelperF32ToI64Sat, IR::HelperF32ToU64Sat, IR::HelperF64ToI64Sat ,IR::HelperF64ToU64Sat }; |
| helper = helperList[instr->GetSrc1()->GetType() != TyFloat32][instr->GetDst()->GetType() == TyUint64]; |
| } |
| else |
| { |
| IR::JnHelperMethod helperList[2][2] = { IR::HelperF32ToI64, IR::HelperF32ToU64, IR::HelperF64ToI64 ,IR::HelperF64ToU64 }; |
| helper = helperList[instr->GetSrc1()->GetType() != TyFloat32][instr->GetDst()->GetType() == TyUint64]; |
| } |
| instr->UnlinkSrc1(); |
| this->m_lowererMD.ChangeToHelperCall(instr, helper); |
| } |
| } |
| |
| void |
| Lowerer::RelocateCallDirectToHelperPath(IR::Instr* argoutInlineSpecialized, IR::LabelInstr* labelHelper) |
| { |
| IR::Opnd *linkOpnd = argoutInlineSpecialized->GetSrc2(); //ArgOut_A_InlineSpecialized src2; link to actual argouts. |
| |
| argoutInlineSpecialized->Unlink(); |
| labelHelper->InsertAfter(argoutInlineSpecialized); |
| |
| while(linkOpnd->IsSymOpnd()) |
| { |
| IR::SymOpnd *src2 = linkOpnd->AsSymOpnd(); |
| StackSym *sym = src2->m_sym->AsStackSym(); |
| Assert(sym->m_isSingleDef); |
| IR::Instr *argInstr = sym->m_instrDef; |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A); |
| |
| argInstr->Unlink(); |
| labelHelper->InsertAfter(argInstr); |
| |
| linkOpnd = argInstr->GetSrc2(); |
| } |
| |
| // Move startcall |
| Assert(linkOpnd->IsRegOpnd()); |
| StackSym *sym = linkOpnd->AsRegOpnd()->m_sym; |
| Assert(sym->m_isSingleDef); |
| IR::Instr *startCall = sym->m_instrDef; |
| Assert(startCall->m_opcode == Js::OpCode::StartCall); |
| startCall->Unlink(); |
| labelHelper->InsertAfter(startCall); |
| } |
| |
| bool |
| Lowerer::GenerateFastInlineStringCharCodeAt(IR::Instr * instr, Js::BuiltinFunction index) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| //CallDirect src2 |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[2] = {0}; |
| bool result = instr->FetchOperands(argsOpnd, 2); |
| Assert(result); |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr->InsertAfter(doneLabel); |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| bool success = GenerateFastCharAt(index, instr->GetDst(), argsOpnd[0], argsOpnd[1], |
| instr, instr, labelHelper, doneLabel); |
| |
| instr->InsertBefore(labelHelper); |
| if (!success) |
| { |
| return false; |
| } |
| |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| |
| return true; |
| } |
| |
| void |
| Lowerer::GenerateCtz(IR::Instr* instr) |
| { |
| Assert(instr->GetDst()->IsInt32() || instr->GetDst()->IsInt64()); |
| Assert(instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsInt64()); |
| m_lowererMD.GenerateCtz(instr); |
| } |
| |
| void |
| Lowerer::GeneratePopCnt(IR::Instr* instr) |
| { |
| Assert(instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsUInt32() || instr->GetSrc1()->IsInt64()); |
| Assert(instr->GetDst()->IsInt32() || instr->GetDst()->IsUInt32() || instr->GetDst()->IsInt64()); |
| m_lowererMD.GeneratePopCnt(instr); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineMathClz(IR::Instr* instr) |
| { |
| Assert(instr->GetDst()->IsInt32() || instr->GetDst()->IsInt64()); |
| Assert(instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsInt64()); |
| m_lowererMD.GenerateClz(instr); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineMathImul(IR::Instr* instr) |
| { |
| IR::Opnd* src1 = instr->GetSrc1(); |
| IR::Opnd* src2 = instr->GetSrc2(); |
| IR::Opnd* dst = instr->GetDst(); |
| |
| Assert(dst->IsInt32()); |
| Assert(src1->IsInt32()); |
| Assert(src2->IsInt32()); |
| |
| IR::Instr* imul = IR::Instr::New(LowererMD::MDImulOpcode, dst, src1, src2, instr->m_func); |
| instr->InsertBefore(imul); |
| |
| LowererMD::Legalize(imul); |
| |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::LowerReinterpretPrimitive(IR::Instr* instr) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| IR::Opnd* src1 = instr->GetSrc1(); |
| IR::Opnd* dst = instr->GetDst(); |
| |
| Assert(dst->GetSize() == src1->GetSize()); |
| Assert((dst->IsFloat32() && src1->IsInt32()) || |
| (dst->IsInt32() && src1->IsFloat32()) || |
| (dst->IsInt64() && src1->IsFloat64()) || |
| (dst->IsFloat64() && src1->IsInt64()) ); |
| |
| m_lowererMD.EmitReinterpretPrimitive(dst, src1, instr); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateFastInlineMathFround(IR::Instr* instr) |
| { |
| IR::Opnd* src1 = instr->GetSrc1(); |
| IR::Opnd* dst = instr->GetDst(); |
| |
| Assert(dst->IsFloat()); |
| Assert(src1->IsFloat()); |
| |
| // This function is supposed to convert a float to the closest float32 representation. |
| // However, it is a bit loose about types, which the ARM64 encoder takes issue with. |
| #ifdef _M_ARM64 |
| LowererMD::GenerateFastInlineMathFround(instr); |
| #else |
| IR::Instr* fcvt64to32 = IR::Instr::New(LowererMD::MDConvertFloat64ToFloat32Opcode, dst, src1, instr->m_func); |
| |
| instr->InsertBefore(fcvt64to32); |
| LowererMD::Legalize(fcvt64to32); |
| |
| if (dst->IsFloat64()) |
| { |
| IR::Instr* fcvt32to64 = IR::Instr::New(LowererMD::MDConvertFloat32ToFloat64Opcode, dst, dst, instr->m_func); |
| instr->InsertBefore(fcvt32to64); |
| LowererMD::Legalize(fcvt32to64); |
| } |
| |
| instr->Remove(); |
| #endif |
| return; |
| } |
| |
| bool |
| Lowerer::GenerateFastInlineStringReplace(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::CallDirect); |
| |
| //CallDirect src2 |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| //ArgOut_A_InlineSpecialized |
| IR::Instr * tmpInstr = linkOpnd->AsSymOpnd()->m_sym->AsStackSym()->m_instrDef; |
| |
| IR::Opnd * argsOpnd[3] = {0}; |
| bool result = instr->FetchOperands(argsOpnd, 3); |
| Assert(result); |
| AnalysisAssert(argsOpnd[0] && argsOpnd[1] && argsOpnd[2]); |
| |
| if (!argsOpnd[0]->GetValueType().IsLikelyString() |
| || argsOpnd[1]->GetValueType().IsNotObject() |
| || !argsOpnd[2]->GetValueType().IsLikelyString()) |
| { |
| return false; |
| } |
| |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr->InsertAfter(doneLabel); |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| bool success = this->GenerateFastReplace(argsOpnd[0], argsOpnd[1], argsOpnd[2], |
| instr, instr, labelHelper, doneLabel); |
| |
| instr->InsertBefore(labelHelper); |
| if (!success) |
| { |
| return false; |
| } |
| |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| |
| RelocateCallDirectToHelperPath(tmpInstr, labelHelper); |
| |
| return true; |
| } |
| |
| #ifdef ENABLE_DOM_FAST_PATH |
| /* |
| Lower the DOMFastPathGetter opcode |
| We have inliner generated bytecode: |
| (dst)helpArg1: ExtendArg_A (src1)thisObject (src2)null |
| (dst)helpArg2: ExtendArg_A (src1)funcObject (src2)helpArg1 |
| method: DOMFastPathGetter (src1)HelperCall (src2)helpArg2 |
| |
| We'll convert it to a JavascriptFunction entry method call: |
| CALL Helper funcObject CallInfo(CallFlags_Value, 3) thisObj |
| */ |
| void |
| Lowerer::LowerFastInlineDOMFastPathGetter(IR::Instr* instr) |
| { |
| IR::Opnd* helperOpnd = instr->UnlinkSrc1(); |
| Assert(helperOpnd->IsHelperCallOpnd()); |
| |
| IR::Opnd *linkOpnd = instr->UnlinkSrc2(); |
| Assert(linkOpnd->IsRegOpnd()); |
| |
| IR::Instr* prevInstr = linkOpnd->AsRegOpnd()->m_sym->m_instrDef; |
| Assert(prevInstr->m_opcode == Js::OpCode::ExtendArg_A); |
| IR::Opnd* funcObj = prevInstr->GetSrc1(); |
| |
| Assert(funcObj->IsRegOpnd()); |
| // If the Extended_arg was CSE's across a loop or hoisted out of a loop, |
| // adding a new reference down here might cause funcObj to now be liveOnBackEdge. |
| // Use the addToLiveOnBackEdgeSyms bit vector to add it to a loop if we encounter one. |
| // We'll clear it once we reach the Extended arg. |
| this->addToLiveOnBackEdgeSyms->Set(funcObj->AsRegOpnd()->m_sym->m_id); |
| |
| Assert(prevInstr->GetSrc2() != nullptr); |
| prevInstr = prevInstr->GetSrc2()->AsRegOpnd()->m_sym->m_instrDef; |
| Assert(prevInstr->m_opcode == Js::OpCode::ExtendArg_A); |
| IR::Opnd* thisObj = prevInstr->GetSrc1(); |
| Assert(prevInstr->GetSrc2() == nullptr); |
| |
| Assert(thisObj->IsRegOpnd()); |
| this->addToLiveOnBackEdgeSyms->Set(thisObj->AsRegOpnd()->m_sym->m_id); |
| |
| const auto info = Lowerer::MakeCallInfoConst(Js::CallFlags_Value, 1, m_func); |
| |
| m_lowererMD.LoadHelperArgument(instr, thisObj); |
| m_lowererMD.LoadHelperArgument(instr, info); |
| m_lowererMD.LoadHelperArgument(instr, funcObj); |
| |
| instr->m_opcode = Js::OpCode::Call; |
| |
| IR::HelperCallOpnd *helperCallOpnd = Lowerer::CreateHelperCallOpnd(helperOpnd->AsHelperCallOpnd()->m_fnHelper, 3, m_func); |
| instr->SetSrc1(helperCallOpnd); |
| |
| m_lowererMD.LowerCall(instr, 3); // we have funcobj, callInfo, and this. |
| } |
| #endif |
| |
| void |
| Lowerer::GenerateFastInlineArrayPush(IR::Instr * instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::InlineArrayPush); |
| |
| IR::Opnd * baseOpnd = instr->GetSrc1(); |
| IR::Opnd * srcOpnd = instr->GetSrc2(); |
| |
| bool returnLength = false; |
| if(instr->GetDst()) |
| { |
| returnLength = true; |
| } |
| |
| IR::LabelInstr * bailOutLabelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| IR::LabelInstr *doneLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| instr->InsertAfter(doneLabel); |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| //Don't Generate fast path according to ShouldGenerateArrayFastPath() |
| //AND, Don't Generate fast path if the array is LikelyNative and the element is not specialized |
| if(ShouldGenerateArrayFastPath(baseOpnd, false, false, false) && |
| !(baseOpnd->GetValueType().IsLikelyNativeArray() && srcOpnd->IsVar())) |
| { |
| GenerateFastPush(baseOpnd, srcOpnd, instr, instr, labelHelper, doneLabel, bailOutLabelHelper, returnLength); |
| instr->InsertBefore(labelHelper); |
| InsertBranch(Js::OpCode::Br, true, doneLabel, labelHelper); |
| } |
| |
| if(baseOpnd->GetValueType().IsLikelyNativeArray()) |
| { |
| //Lower IR::BailOutConventionalNativeArrayAccessOnly here. |
| LowerOneBailOutKind(instr, IR::BailOutConventionalNativeArrayAccessOnly, false, false); |
| instr->InsertAfter(bailOutLabelHelper); |
| InsertBranch(Js::OpCode::Br, doneLabel, bailOutLabelHelper); |
| } |
| |
| GenerateHelperToArrayPushFastPath(instr, bailOutLabelHelper); |
| |
| } |
| |
| bool Lowerer::GenerateFastPop(IR::Opnd *baseOpndParam, IR::Instr *callInstr, IR::LabelInstr *labelHelper, IR::LabelInstr *doneLabel, IR::LabelInstr * bailOutLabelHelper) |
| { |
| Assert(ShouldGenerateArrayFastPath(baseOpndParam, false, false, false)); |
| |
| // TEST baseOpnd, AtomTag -- check baseOpnd not tagged int |
| // JNE $helper |
| // CMP [baseOpnd], JavascriptArray::`vtable' -- check baseOpnd isArray |
| // JNE $helper |
| // MOV r2, [baseOpnd + offset(length)] -- Load array length |
| |
| IR::RegOpnd * baseOpnd = baseOpndParam->AsRegOpnd(); |
| const IR::AutoReuseOpnd autoReuseBaseOpnd(baseOpnd, m_func); |
| |
| ValueType arrValueType(baseOpndParam->GetValueType()); |
| IR::RegOpnd *arrayOpnd = baseOpnd; |
| IR::RegOpnd *arrayLengthOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseArrayLengthOpnd; |
| if(!arrValueType.IsAnyOptimizedArray()) |
| { |
| arrayOpnd = GenerateArrayTest(baseOpnd, bailOutLabelHelper, bailOutLabelHelper, callInstr, false, true); |
| arrValueType = arrayOpnd->GetValueType().ToDefiniteObject().SetHasNoMissingValues(false); |
| } |
| else if(arrayOpnd->IsArrayRegOpnd()) |
| { |
| IR::ArrayRegOpnd *const arrayRegOpnd = arrayOpnd->AsArrayRegOpnd(); |
| if(arrayRegOpnd->LengthSym()) |
| { |
| arrayLengthOpnd = IR::RegOpnd::New(arrayRegOpnd->LengthSym(), arrayRegOpnd->LengthSym()->GetType(), m_func); |
| DebugOnly(arrayLengthOpnd->FreezeSymValue()); |
| autoReuseArrayLengthOpnd.Initialize(arrayLengthOpnd, m_func); |
| } |
| } |
| const IR::AutoReuseOpnd autoReuseArrayOpnd(arrayOpnd, m_func); |
| |
| IR::AutoReuseOpnd autoReuseMutableArrayLengthOpnd; |
| { |
| IR::RegOpnd *const mutableArrayLengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| autoReuseMutableArrayLengthOpnd.Initialize(mutableArrayLengthOpnd, m_func); |
| if(arrayLengthOpnd) |
| { |
| // mov mutableArrayLength, arrayLength |
| InsertMove(mutableArrayLengthOpnd, arrayLengthOpnd, callInstr); |
| } |
| else |
| { |
| // MOV mutableArrayLength, [array + offset(length)] -- Load array length |
| // We know this index is safe since, so mark it as UInt32 to avoid unnecessary conversion/checks |
| InsertMove( |
| mutableArrayLengthOpnd, |
| IR::IndirOpnd::New( |
| arrayOpnd, |
| Js::JavascriptArray::GetOffsetOfLength(), |
| mutableArrayLengthOpnd->GetType(), |
| this->m_func), |
| callInstr); |
| } |
| arrayLengthOpnd = mutableArrayLengthOpnd; |
| } |
| |
| InsertCompareBranch(arrayLengthOpnd, IR::IntConstOpnd::New(0, TyUint32, this->m_func), Js::OpCode::BrEq_A, true, bailOutLabelHelper, callInstr); |
| InsertSub(false, arrayLengthOpnd, arrayLengthOpnd, IR::IntConstOpnd::New(1, TyUint32, this->m_func),callInstr); |
| |
| IR::IndirOpnd *arrayRef = IR::IndirOpnd::New(arrayOpnd, arrayLengthOpnd, TyVar, this->m_func); |
| arrayRef->GetBaseOpnd()->SetValueType(arrValueType); |
| |
| //Array length is going to overflow, hence don't check for Array.length and Segment.length overflow. |
| bool isTypedArrayElement, isStringIndex; |
| IR::IndirOpnd *const indirOpnd = |
| GenerateFastElemICommon( |
| callInstr, |
| false, |
| arrayRef, |
| labelHelper, |
| labelHelper, |
| nullptr, |
| &isTypedArrayElement, |
| &isStringIndex, |
| nullptr, |
| nullptr, |
| nullptr /*pLabelSegmentLengthIncreased*/, |
| true /*checkArrayLengthOverflow*/, |
| true /* forceGenerateFastPath */, |
| false/* = returnLength */, |
| bailOutLabelHelper /* = bailOutLabelInstr*/); |
| Assert(!isTypedArrayElement); |
| Assert(indirOpnd); |
| return true; |
| } |
| |
| bool Lowerer::GenerateFastPush(IR::Opnd *baseOpndParam, IR::Opnd *src, IR::Instr *callInstr, |
| IR::Instr *insertInstr, IR::LabelInstr *labelHelper, IR::LabelInstr *doneLabel, IR::LabelInstr * bailOutLabelHelper, bool returnLength) |
| { |
| Assert(ShouldGenerateArrayFastPath(baseOpndParam, false, false, false)); |
| |
| // TEST baseOpnd, AtomTag -- check baseOpnd not tagged int |
| // JNE $helper |
| // CMP [baseOpnd], JavascriptArray::`vtable' -- check baseOpnd isArray |
| // JNE $helper |
| // MOV r2, [baseOpnd + offset(length)] -- Load array length |
| |
| IR::RegOpnd * baseOpnd = baseOpndParam->AsRegOpnd(); |
| const IR::AutoReuseOpnd autoReuseBaseOpnd(baseOpnd, m_func); |
| |
| ValueType arrValueType(baseOpndParam->GetValueType()); |
| IR::RegOpnd *arrayOpnd = baseOpnd; |
| IR::RegOpnd *arrayLengthOpnd = nullptr; |
| IR::AutoReuseOpnd autoReuseArrayLengthOpnd; |
| if(!arrValueType.IsAnyOptimizedArray()) |
| { |
| arrayOpnd = GenerateArrayTest(baseOpnd, labelHelper, labelHelper, insertInstr, false, true); |
| arrValueType = arrayOpnd->GetValueType().ToDefiniteObject().SetHasNoMissingValues(false); |
| } |
| else if(arrayOpnd->IsArrayRegOpnd()) |
| { |
| IR::ArrayRegOpnd *const arrayRegOpnd = arrayOpnd->AsArrayRegOpnd(); |
| if(arrayRegOpnd->LengthSym()) |
| { |
| arrayLengthOpnd = IR::RegOpnd::New(arrayRegOpnd->LengthSym(), arrayRegOpnd->LengthSym()->GetType(), m_func); |
| DebugOnly(arrayLengthOpnd->FreezeSymValue()); |
| autoReuseArrayLengthOpnd.Initialize(arrayLengthOpnd, m_func); |
| } |
| } |
| const IR::AutoReuseOpnd autoReuseArrayOpnd(arrayOpnd, m_func); |
| |
| if(!arrayLengthOpnd) |
| { |
| // MOV arrayLength, [array + offset(length)] -- Load array length |
| // We know this index is safe since, so mark it as UInt32 to avoid unnecessary conversion/checks |
| arrayLengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| autoReuseArrayLengthOpnd.Initialize(arrayLengthOpnd, m_func); |
| InsertMove( |
| arrayLengthOpnd, |
| IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), arrayLengthOpnd->GetType(), this->m_func), |
| insertInstr); |
| } |
| |
| IR::IndirOpnd *arrayRef = IR::IndirOpnd::New(arrayOpnd, arrayLengthOpnd, TyVar, this->m_func); |
| arrayRef->GetBaseOpnd()->SetValueType(arrValueType); |
| |
| if (returnLength && src->IsEqual(insertInstr->GetDst())) |
| { |
| //If the dst is same as the src, then dst is going to be overridden by GenerateFastElemICommon in process of updating the length. |
| //Save it in a temp register. |
| IR::RegOpnd *opnd = IR::RegOpnd::New(src->GetType(), this->m_func); |
| InsertMove(opnd, src, insertInstr); |
| src = opnd; |
| } |
| |
| //Array length is going to overflow, hence don't check for Array.length and Segment.length overflow. |
| bool isTypedArrayElement, isStringIndex; |
| IR::IndirOpnd *const indirOpnd = |
| GenerateFastElemICommon( |
| insertInstr, |
| true, |
| arrayRef, |
| labelHelper, |
| labelHelper, |
| nullptr, |
| &isTypedArrayElement, |
| &isStringIndex, |
| nullptr, |
| nullptr, |
| nullptr /*pLabelSegmentLengthIncreased*/, |
| false /*checkArrayLengthOverflow*/, |
| true /* forceGenerateFastPath */, |
| returnLength, |
| bailOutLabelHelper); |
| |
| Assert(!isTypedArrayElement); |
| Assert(indirOpnd); |
| |
| // MOV [r3 + r2], src |
| InsertMoveWithBarrier(indirOpnd, src, insertInstr); |
| |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastCharAt(Js::BuiltinFunction index, IR::Opnd *dst, IR::Opnd *srcStr, IR::Opnd *srcIndex, IR::Instr *callInstr, |
| IR::Instr *insertInstr, IR::LabelInstr *labelHelper, IR::LabelInstr *doneLabel) |
| { |
| // if regSrcStr is not object, JMP $helper |
| // CMP [regSrcStr + offset(type)] , static string type -- check base string type |
| // JNE $helper |
| // MOV r1, [regSrcStr + offset(m_pszValue)] |
| // TEST r1, r1 |
| // JEQ $helper |
| // MOV r2, srcIndex |
| // If r2 is not int, JMP $helper |
| // Convert r2 to int |
| // CMP [regSrcStr + offsetof(length)], r2 |
| // JBE $helper |
| // MOVZX r2, [r1 + r2 * 2] |
| // if (charAt) |
| // PUSH r1 |
| // PUSH scriptContext |
| // CALL GetStringFromChar |
| // MOV dst, EAX |
| // else (charCodeAt) |
| // if (codePointAt) |
| // Lowerer.GenerateFastCodePointAt -- Common inline functions |
| // Convert r2 to Var |
| // MOV dst, r2 |
| bool isInt = false; |
| bool isNotTaggedValue = false; |
| |
| if (srcStr->IsRegOpnd()) |
| { |
| if (srcStr->AsRegOpnd()->IsTaggedInt()) |
| { |
| isInt = true; |
| |
| } |
| else if (srcStr->AsRegOpnd()->IsNotTaggedValue()) |
| { |
| isNotTaggedValue = true; |
| } |
| } |
| |
| IR::RegOpnd *regSrcStr = GetRegOpnd(srcStr, insertInstr, m_func, TyVar); |
| |
| if (!isNotTaggedValue) |
| { |
| if (!isInt) |
| { |
| m_lowererMD.GenerateObjectTest(regSrcStr, insertInstr, labelHelper); |
| } |
| else |
| { |
| // Insert delete branch opcode to tell the dbChecks not to assert on this helper label |
| IR::Instr *fakeBr = IR::PragmaInstr::New(Js::OpCode::DeletedNonHelperBranch, 0, this->m_func); |
| insertInstr->InsertBefore(fakeBr); |
| |
| InsertBranch(Js::OpCode::Br, labelHelper, insertInstr); |
| } |
| } |
| |
| // Bail out if index a constant and is less than zero. |
| if (srcIndex->IsAddrOpnd() && Js::TaggedInt::ToInt32(srcIndex->AsAddrOpnd()->m_address) < 0) |
| { |
| labelHelper->isOpHelper = false; |
| InsertBranch(Js::OpCode::Br, labelHelper, insertInstr); |
| return false; |
| } |
| |
| GenerateStringTest(regSrcStr, insertInstr, labelHelper, nullptr, false); |
| |
| // r1 contains the value of the char16* pointer inside JavascriptString. |
| // MOV r1, [regSrcStr + offset(m_pszValue)] |
| IR::RegOpnd *r1 = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(regSrcStr->AsRegOpnd(), Js::JavascriptString::GetOffsetOfpszValue(), TyMachPtr, this->m_func); |
| InsertMove(r1, indirOpnd, insertInstr); |
| |
| // TEST r1, r1 -- Null pointer test |
| // JEQ $helper |
| InsertTestBranch(r1, r1, Js::OpCode::BrEq_A, labelHelper, insertInstr); |
| |
| IR::RegOpnd *strLength = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(strLength, IR::IndirOpnd::New(regSrcStr, offsetof(Js::JavascriptString, m_charLength), TyUint32, this->m_func), insertInstr); |
| IR::Opnd* indexOpnd = nullptr; |
| if (srcIndex->IsAddrOpnd()) |
| { |
| uint32 indexValue = Js::TaggedInt::ToUInt32(srcIndex->AsAddrOpnd()->m_address); |
| // CMP [regSrcStr + offsetof(length)], index |
| // Use unsigned compare, this should handle negative indexes as well (they become > INT_MAX) |
| // JBE $helper |
| InsertCompareBranch(strLength, IR::IntConstOpnd::New(indexValue, TyUint32, m_func), Js::OpCode::BrLe_A, true, labelHelper, insertInstr); |
| |
| // Mask off the sign so that poisoning will work for negative indices |
| #if TARGET_32 |
| uint32 maskedIndex = CONFIG_FLAG_RELEASE(PoisonStringLoad) ? (indexValue & INT32_MAX) : indexValue; |
| #else |
| uint32 maskedIndex = indexValue; |
| #endif |
| indirOpnd = IR::IndirOpnd::New(r1, maskedIndex * sizeof(char16), TyUint16, this->m_func); |
| indexOpnd = IR::IntConstOpnd::New(maskedIndex, TyMachPtr, m_func); |
| } |
| else |
| { |
| IR::RegOpnd *r2 = IR::RegOpnd::New(TyVar, this->m_func); |
| // MOV r2, srcIndex |
| InsertMove(r2, srcIndex, insertInstr); |
| |
| r2 = GenerateUntagVar(r2, labelHelper, insertInstr); |
| |
| // CMP [regSrcStr + offsetof(length)], r2 |
| // Use unsigned compare, this should handle negative indexes as well (they become > INT_MAX) |
| // JBE $helper |
| InsertCompareBranch(strLength, r2, Js::OpCode::BrLe_A, true, labelHelper, insertInstr); |
| |
| #if TARGET_32 |
| if (CONFIG_FLAG_RELEASE(PoisonStringLoad)) |
| { |
| // Mask off the sign so that poisoning will work for negative indices |
| InsertAnd(r2, r2, IR::IntConstOpnd::New(INT32_MAX, TyInt32, m_func), insertInstr); |
| } |
| #endif |
| |
| if (r2->GetSize() != MachPtr) |
| { |
| r2 = r2->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| indexOpnd = r2; |
| |
| indirOpnd = IR::IndirOpnd::New(r1, r2, 1, TyUint16, this->m_func); |
| } |
| IR::RegOpnd* maskOpnd = nullptr; |
| if (CONFIG_FLAG_RELEASE(PoisonStringLoad)) |
| { |
| maskOpnd = IR::RegOpnd::New(TyMachPtr, m_func); |
| if (strLength->GetSize() != MachPtr) |
| { |
| strLength = strLength->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| InsertSub(false, maskOpnd, indexOpnd, strLength, insertInstr); |
| InsertShift(Js::OpCode::Shr_A, false, maskOpnd, maskOpnd, IR::IntConstOpnd::New(MachRegInt * 8 - 1, TyInt8, m_func), insertInstr); |
| |
| if (maskOpnd->GetSize() != TyUint32) |
| { |
| maskOpnd = maskOpnd->UseWithNewType(TyUint32, this->m_func)->AsRegOpnd(); |
| } |
| } |
| // MOVZX charReg, [r1 + r2 * 2] -- this is the value of the char |
| IR::RegOpnd *charReg = IR::RegOpnd::New(TyUint32, this->m_func); |
| InsertMove(charReg, indirOpnd, insertInstr); |
| if (CONFIG_FLAG_RELEASE(PoisonStringLoad)) |
| { |
| InsertAnd(charReg, charReg, maskOpnd, insertInstr); |
| } |
| if (index == Js::BuiltinFunction::JavascriptString_CharAt) |
| { |
| IR::Opnd *resultOpnd; |
| if (dst->IsEqual(srcStr)) |
| { |
| resultOpnd = IR::RegOpnd::New(TyVar, this->m_func); |
| } |
| else |
| { |
| resultOpnd = dst; |
| } |
| GenerateGetSingleCharString(charReg, resultOpnd, labelHelper, doneLabel, insertInstr, false); |
| } |
| else |
| { |
| Assert(index == Js::BuiltinFunction::JavascriptString_CharCodeAt || index == Js::BuiltinFunction::JavascriptString_CodePointAt); |
| |
| if (index == Js::BuiltinFunction::JavascriptString_CodePointAt) |
| { |
| GenerateFastInlineStringCodePointAt(insertInstr, this->m_func, strLength, srcIndex, charReg, r1); |
| } |
| |
| if (charReg->GetSize() != MachPtr) |
| { |
| charReg = charReg->UseWithNewType(TyMachPtr, this->m_func)->AsRegOpnd(); |
| } |
| m_lowererMD.GenerateInt32ToVarConversion(charReg, insertInstr); |
| |
| // MOV dst, charReg |
| InsertMove(dst, charReg, insertInstr); |
| } |
| return true; |
| } |
| |
| IR::Opnd* |
| Lowerer::GenerateArgOutForInlineeStackArgs(IR::Instr* callInstr, IR::Instr* stackArgsInstr) |
| { |
| Assert(callInstr->m_func->IsInlinee()); |
| Func *func = callInstr->m_func; |
| uint32 actualCount = func->actualCount - 1; // don't count this pointer |
| Assert(actualCount < Js::InlineeCallInfo::MaxInlineeArgoutCount); |
| |
| const auto firstRealArgStackSym = func->GetInlineeArgvSlotOpnd()->m_sym->AsStackSym(); |
| this->m_func->SetArgOffset(firstRealArgStackSym, firstRealArgStackSym->m_offset + MachPtr); //Start after this pointer |
| IR::SymOpnd *firstArg = IR::SymOpnd::New(firstRealArgStackSym, TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseFirstArg(firstArg, func); |
| IR::RegOpnd* argInOpnd = IR::RegOpnd::New(TyMachReg, func); |
| const IR::AutoReuseOpnd autoReuseArgInOpnd(argInOpnd, func); |
| InsertLea(argInOpnd, firstArg, callInstr); |
| |
| IR::IndirOpnd *argIndirOpnd = nullptr; |
| IR::Instr* argout = nullptr; |
| |
| #if defined(_M_IX86) |
| // Maintain alignment |
| if ((actualCount & 1) == 0) |
| { |
| IR::Instr *alignPush = IR::Instr::New(Js::OpCode::PUSH, this->m_func); |
| alignPush->SetSrc1(IR::IntConstOpnd::New(1, TyInt32, this->m_func)); |
| callInstr->InsertBefore(alignPush); |
| } |
| #endif |
| |
| for(uint i = actualCount; i > 0; i--) |
| { |
| argIndirOpnd = IR::IndirOpnd::New(argInOpnd, (i - 1) * MachPtr, TyMachReg, func); |
| argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, func); |
| argout->SetSrc1(argIndirOpnd); |
| callInstr->InsertBefore(argout); |
| // i represents ith arguments from actuals, with is i + 3 counting this, callInfo and function object |
| this->m_lowererMD.LoadDynamicArgument(argout, i + 3); |
| } |
| return IR::IntConstOpnd::New(func->actualCount, TyMachReg, func); |
| } |
| |
| // For AMD64 and ARM only. |
| void |
| Lowerer::LowerInlineSpreadArgOutLoopUsingRegisters(IR::Instr *callInstr, IR::RegOpnd *indexOpnd, IR::RegOpnd *arrayElementsStartOpnd) |
| { |
| Func *const func = callInstr->m_func; |
| |
| IR::LabelInstr *oneArgLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertCompareBranch(indexOpnd, IR::IntConstOpnd::New(1, TyUint8, func), Js::OpCode::BrEq_A, true, oneArgLabel, callInstr); |
| |
| IR::LabelInstr *startLoopLabel = InsertLoopTopLabel(callInstr); |
| Loop * loop = startLoopLabel->GetLoop(); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(indexOpnd->m_sym->m_id); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(arrayElementsStartOpnd->m_sym->m_id); |
| InsertSub(false, indexOpnd, indexOpnd, IR::IntConstOpnd::New(1, TyInt8, func), callInstr); |
| |
| IR::IndirOpnd *elemPtrOpnd = IR::IndirOpnd::New(arrayElementsStartOpnd, indexOpnd, this->m_lowererMD.GetDefaultIndirScale(), TyMachPtr, func); |
| |
| // Generate argout for n+2 arg (skipping function object + this) |
| IR::Instr *argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, func); |
| |
| // X64 requires a reg opnd |
| IR::RegOpnd *elemRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| Lowerer::InsertMove(elemRegOpnd, elemPtrOpnd, callInstr); |
| argout->SetSrc1(elemRegOpnd); |
| argout->SetSrc2(indexOpnd); |
| callInstr->InsertBefore(argout); |
| this->m_lowererMD.LoadDynamicArgumentUsingLength(argout); |
| |
| InsertCompareBranch(indexOpnd, IR::IntConstOpnd::New(1, TyUint8, func), Js::OpCode::BrNeq_A, true, startLoopLabel, callInstr); |
| |
| // Emit final argument into register 4 on AMD64 and ARM |
| callInstr->InsertBefore(oneArgLabel); |
| argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, func); |
| argout->SetSrc1(elemPtrOpnd); |
| callInstr->InsertBefore(argout); |
| this->m_lowererMD.LoadDynamicArgument(argout, 4); //4 to denote this is 4th register after this, callinfo & function object |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCallIDynamicSpread(IR::Instr *callInstr, ushort callFlags) |
| { |
| Assert(callInstr->m_opcode == Js::OpCode::CallIDynamicSpread); |
| |
| IR::Instr * insertBeforeInstrForCFG = nullptr; |
| |
| Func *const func = callInstr->m_func; |
| |
| if (func->IsInlinee()) |
| { |
| throw Js::RejitException(RejitReason::InlineSpreadDisabled); |
| } |
| |
| IR::Instr *spreadArrayInstr = callInstr; |
| IR::SymOpnd *argLinkOpnd = spreadArrayInstr->UnlinkSrc2()->AsSymOpnd(); |
| StackSym *argLinkSym = argLinkOpnd->m_sym->AsStackSym(); |
| AssertMsg(argLinkSym->IsArgSlotSym() && argLinkSym->m_isSingleDef, "Arg tree not single def..."); |
| argLinkOpnd->Free(this->m_func); |
| spreadArrayInstr = argLinkSym->m_instrDef; |
| |
| Assert(spreadArrayInstr->m_opcode == Js::OpCode::ArgOut_A_SpreadArg); |
| |
| IR::Opnd *arraySrcOpnd = spreadArrayInstr->UnlinkSrc1(); |
| IR::RegOpnd *arrayOpnd = GetRegOpnd(arraySrcOpnd, spreadArrayInstr, func, TyMachPtr); |
| |
| argLinkOpnd = spreadArrayInstr->UnlinkSrc2()->AsSymOpnd(); |
| |
| // Walk the arg chain and find the start call |
| argLinkSym = argLinkOpnd->m_sym->AsStackSym(); |
| AssertMsg(argLinkSym->IsArgSlotSym() && argLinkSym->m_isSingleDef, "Arg tree not single def..."); |
| argLinkOpnd->Free(this->m_func); |
| |
| // Nothing to be done for the function object, emit as normal |
| IR::Instr *thisInstr = argLinkSym->m_instrDef; |
| IR::RegOpnd *thisOpnd = thisInstr->UnlinkSrc2()->AsRegOpnd(); |
| argLinkSym = thisOpnd->m_sym->AsStackSym(); |
| thisInstr->Unlink(); |
| thisInstr->FreeDst(); |
| |
| // Remove the array ArgOut instr and StartCall, they are no longer needed |
| spreadArrayInstr->Unlink(); |
| spreadArrayInstr->FreeDst(); |
| IR::Instr *startCallInstr = argLinkSym->m_instrDef; |
| Assert(startCallInstr->m_opcode == Js::OpCode::StartCall); |
| insertBeforeInstrForCFG = startCallInstr->GetNextRealInstr(); |
| startCallInstr->Remove(); |
| |
| IR::RegOpnd *argsLengthOpnd = IR::RegOpnd::New(TyUint32, func); |
| IR::IndirOpnd *arrayLengthPtrOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfLength(), TyUint32, func); |
| Lowerer::InsertMove(argsLengthOpnd, arrayLengthPtrOpnd, callInstr); |
| |
| // Don't bother expanding args if there are zero |
| IR::LabelInstr *zeroArgsLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertCompareBranch(argsLengthOpnd, IR::IntConstOpnd::New(0, TyInt8, func), Js::OpCode::BrEq_A, true, zeroArgsLabel, callInstr); |
| |
| IR::RegOpnd *indexOpnd = IR::RegOpnd::New(TyUint32, func); |
| Lowerer::InsertMove(indexOpnd, argsLengthOpnd, callInstr); |
| |
| // Get the array head offset and length |
| IR::IndirOpnd *arrayHeadPtrOpnd = IR::IndirOpnd::New(arrayOpnd, Js::JavascriptArray::GetOffsetOfHead(), TyMachPtr, func); |
| IR::RegOpnd *arrayElementsStartOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertAdd(false, arrayElementsStartOpnd, arrayHeadPtrOpnd, IR::IntConstOpnd::New(offsetof(Js::SparseArraySegment<Js::Var>, elements), TyUint8, func), callInstr); |
| |
| this->m_lowererMD.LowerInlineSpreadArgOutLoop(callInstr, indexOpnd, arrayElementsStartOpnd); |
| |
| // Resume if we have zero args |
| callInstr->InsertBefore(zeroArgsLabel); |
| |
| // Lower call |
| callInstr->m_opcode = Js::OpCode::CallIDynamic; |
| callInstr = m_lowererMD.LowerCallIDynamic(callInstr, thisInstr, argsLengthOpnd, callFlags, insertBeforeInstrForCFG); |
| |
| return callInstr; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCallIDynamic(IR::Instr * callInstr, ushort callFlags) |
| { |
| if (!this->m_func->GetHasStackArgs()) |
| { |
| throw Js::RejitException(RejitReason::InlineApplyDisabled); |
| } |
| |
| IR::Instr * insertBeforeInstrForCFG = nullptr; |
| |
| // Lower args and look for StartCall |
| IR::Instr * argInstr = callInstr; |
| IR::SymOpnd * argLinkOpnd = argInstr->UnlinkSrc2()->AsSymOpnd(); |
| StackSym * argLinkSym = argLinkOpnd->m_sym->AsStackSym(); |
| AssertMsg(argLinkSym->IsArgSlotSym() && argLinkSym->m_isSingleDef, "Arg tree not single def..."); |
| argLinkOpnd->Free(this->m_func); |
| argInstr = argLinkSym->m_instrDef; |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A_Dynamic); |
| |
| IR::Instr* saveThisArgOutInstr = argInstr; |
| saveThisArgOutInstr->Unlink(); |
| saveThisArgOutInstr->FreeDst(); |
| |
| argLinkOpnd = argInstr->UnlinkSrc2()->AsSymOpnd(); |
| argLinkSym = argLinkOpnd->m_sym->AsStackSym(); |
| AssertMsg(argLinkSym->IsArgSlotSym() && argLinkSym->m_isSingleDef, "Arg tree not single def..."); |
| argLinkOpnd->Free(this->m_func); |
| argInstr = argLinkSym->m_instrDef; |
| Assert(argInstr->m_opcode == Js::OpCode::ArgOut_A_FromStackArgs); |
| |
| IR::Opnd* argsLength = m_lowererMD.GenerateArgOutForStackArgs(callInstr, argInstr); |
| |
| IR::RegOpnd* startCallDstOpnd = argInstr->UnlinkSrc2()->AsRegOpnd(); |
| argLinkSym = startCallDstOpnd->m_sym->AsStackSym(); |
| startCallDstOpnd->Free(this->m_func); |
| argInstr->Remove();// Remove ArgOut_A_FromStackArgs |
| |
| argInstr = argLinkSym->m_instrDef; |
| Assert(argInstr->m_opcode == Js::OpCode::StartCall); |
| insertBeforeInstrForCFG = argInstr->GetNextRealInstr(); |
| argInstr->Remove(); //Remove start call |
| |
| return m_lowererMD.LowerCallIDynamic(callInstr, saveThisArgOutInstr, argsLength, callFlags, insertBeforeInstrForCFG); |
| } |
| |
| //This is only for x64 & ARM. |
| IR::Opnd* |
| Lowerer::GenerateArgOutForStackArgs(IR::Instr* callInstr, IR::Instr* stackArgsInstr) |
| { |
| |
| // For architectures were we only pass 4 parameters in registers, the |
| // generated code looks something like this: |
| // s25.var = LdLen_A s4.var |
| // s26.var = Ld_A s25.var |
| // BrEq_I4 $L3, s25.var,0 // If we have no further arguments to pass, don't pass them |
| // $L2: |
| // BrEq_I4 $L4, s25.var,1 // Loop through the rest of the arguments, putting them on the stack |
| // s25.var = SUB_I4 s25.var, 0x1 |
| // s10.var = LdElemI_A [s4.var+s25.var].var |
| // ArgOut_A_Dynamic s10.var, s25.var |
| // Br $L2 |
| // $L4: |
| // s25.var = LdImm 0 // set s25 to 0, since it'll be 1 on the way into this block |
| // s10.var = LdElemI_A [s4.var + 0 * MachReg].var // The last one has to be put into argslot 4, since this is likely a register, not a stack location. |
| // ArgOut_A_Dynamic s10.var, 4 |
| // $L3: |
| // |
| // Generalizing this for more register-passed parameters gives us code |
| // something like this: |
| // s25.var = LdLen_A s4.var |
| // s26.var = Ld_A s25.var |
| // BrLe_I4 $L3, s25.var,0 // If we have no further arguments to pass, don't pass them |
| // $L2: |
| // BrLe_I4 $L4, s25.var,INT_REG_COUNT-3 // Loop through the rest of the arguments up to the number passed in registers, putting them on the stack |
| // s25.var = SUB_I4 s25.var, 0x1 |
| // s10.var = LdElemI_A [s4.var+s25.var].var |
| // ArgOut_A_Dynamic s10.var, s25.var |
| // Br $L2 |
| // $L4: |
| // foreach of the remaining ones, N going down from (the number we can pass in regs -1) to 1 (0 omitted as we know that it'll be at least one register argument): |
| // BrEq_I4 $L__N, s25.var, N |
| // end foreach |
| // foreach of the remaining ones, N going down from (the number we can pass in regs -1) to 0: |
| // $L__N: |
| // s10.var = LdElemI_A [s4.var + N * MachReg].var // The last one has to be put into argslot 4, since this is likely a register, not a stack location. |
| // ArgOut_A_Dynamic s10.var, N+3 |
| // end foreach |
| // $L3: |
| |
| |
| #if defined(_M_IX86) |
| // We get a compilation error on x86 due to assigning a negative to a uint |
| // TODO: don't even define this function on x86 - we Assert(false) anyway there. |
| // Alternatively, don't define when INT_ARG_REG_COUNT - 4 < 0 |
| AssertOrFailFast(false); |
| return nullptr; |
| #else |
| |
| Assert(stackArgsInstr->m_opcode == Js::OpCode::ArgOut_A_FromStackArgs); |
| Assert(callInstr->m_opcode == Js::OpCode::CallIDynamic); |
| |
| this->m_lowererMD.GenerateFunctionObjectTest(callInstr, callInstr->GetSrc1()->AsRegOpnd(), false); |
| |
| if (callInstr->m_func->IsInlinee()) |
| { |
| return this->GenerateArgOutForInlineeStackArgs(callInstr, stackArgsInstr); |
| } |
| Func *func = callInstr->m_func; |
| IR::RegOpnd* stackArgs = stackArgsInstr->GetSrc1()->AsRegOpnd(); |
| |
| IR::RegOpnd* ldLenDstOpnd = IR::RegOpnd::New(TyMachReg, func); |
| const IR::AutoReuseOpnd autoReuseLdLenDstOpnd(ldLenDstOpnd, func); |
| IR::Instr* ldLen = IR::Instr::New(Js::OpCode::LdLen_A, ldLenDstOpnd ,stackArgs, func); |
| ldLenDstOpnd->SetValueType(ValueType::GetTaggedInt()); /*LdLen_A works only on stack arguments*/ |
| callInstr->InsertBefore(ldLen); |
| GenerateFastRealStackArgumentsLdLen(ldLen); |
| |
| IR::Instr* saveLenInstr = IR::Instr::New(Js::OpCode::MOV, IR::RegOpnd::New(TyMachReg, func), ldLenDstOpnd, func); |
| saveLenInstr->GetDst()->SetValueType(ValueType::GetTaggedInt()); |
| callInstr->InsertBefore(saveLenInstr); |
| |
| IR::LabelInstr* doneArgs = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::Instr* branchDoneArgs = IR::BranchInstr::New(Js::OpCode::BrEq_I4, doneArgs, ldLenDstOpnd, IR::IntConstOpnd::New(0, TyInt8, func),func); |
| callInstr->InsertBefore(branchDoneArgs); |
| this->m_lowererMD.EmitInt4Instr(branchDoneArgs); |
| |
| IR::LabelInstr* startLoop = InsertLoopTopLabel(callInstr); |
| Loop * loop = startLoop->GetLoop(); |
| IR::LabelInstr* endLoop = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| IR::Instr* branchOutOfLoop = IR::BranchInstr::New(Js::OpCode::BrLe_I4, endLoop, ldLenDstOpnd, IR::IntConstOpnd::New(INT_ARG_REG_COUNT - 3, TyInt8, func),func); |
| callInstr->InsertBefore(branchOutOfLoop); |
| this->m_lowererMD.EmitInt4Instr(branchOutOfLoop); |
| |
| IR::Instr* subInstr = IR::Instr::New(Js::OpCode::Sub_I4, ldLenDstOpnd, ldLenDstOpnd, IR::IntConstOpnd::New(1, TyMachReg, func),func); |
| callInstr->InsertBefore(subInstr); |
| this->m_lowererMD.EmitInt4Instr(subInstr); |
| |
| IR::IndirOpnd *nthArgument = IR::IndirOpnd::New(stackArgs, ldLenDstOpnd, TyMachReg, func); |
| IR::RegOpnd* ldElemDstOpnd = IR::RegOpnd::New(TyMachReg,func); |
| const IR::AutoReuseOpnd autoReuseldElemDstOpnd(ldElemDstOpnd, func); |
| IR::Instr* ldElem = IR::Instr::New(Js::OpCode::LdElemI_A, ldElemDstOpnd, nthArgument, func); |
| callInstr->InsertBefore(ldElem); |
| GenerateFastStackArgumentsLdElemI(ldElem); |
| |
| IR::Instr* argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, func); |
| argout->SetSrc1(ldElemDstOpnd); |
| argout->SetSrc2(ldLenDstOpnd); |
| callInstr->InsertBefore(argout); |
| this->m_lowererMD.LoadDynamicArgumentUsingLength(argout); |
| |
| IR::BranchInstr *tailBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, startLoop, func); |
| callInstr->InsertBefore(tailBranch); |
| |
| callInstr->InsertBefore(endLoop); |
| |
| loop->regAlloc.liveOnBackEdgeSyms->Set(ldLenDstOpnd->m_sym->m_id); |
| |
| // Note: This loop iteratively adds instructions in two locations; in the block |
| // of branches that jump to the "load elements to argOuts" instructions, and in |
| // the the block of load elements to argOuts instructions themselves. |
| |
| // 4 to denote this is 4th register after this, callinfo & function object |
| // INT_ARG_REG_COUNT is the number of parameters passed in int regs |
| uint current_reg_pass = INT_ARG_REG_COUNT - 4; |
| |
| do |
| { |
| // If we're on this pass we know we have to do at least one of these, so skip |
| // the branch if we're on the last one. |
| if (current_reg_pass != INT_ARG_REG_COUNT - 4) |
| { |
| IR::LabelInstr* loadBlockLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::Instr* branchToBlock = IR::BranchInstr::New(Js::OpCode::BrEq_I4, loadBlockLabel, ldLenDstOpnd, IR::IntConstOpnd::New(current_reg_pass + 1, TyInt8, func), func); |
| endLoop->InsertAfter(branchToBlock); |
| callInstr->InsertBefore(loadBlockLabel); |
| } |
| |
| // TODO: We can further optimize this with a GenerateFastStackArgumentsLdElemI that can |
| // handle us passing along constant argument references and encode them into the offset |
| // instead of having to use an IndirOpnd; this would allow us to save a few bytes here, |
| // and reduce register pressure a hair |
| |
| // stemp.var = LdImm current_reg_pass |
| IR::RegOpnd* localTemp = IR::RegOpnd::New(TyInt32, func); |
| // We need to make it a tagged int because GenerateFastStackArgumentsLdElemI asserts if |
| // it is not. |
| localTemp->SetValueType(ValueType::GetTaggedInt()); |
| const IR::AutoReuseOpnd autoReuseldElemDstOpnd3(localTemp, func); |
| this->InsertMove(localTemp, IR::IntConstOpnd::New(current_reg_pass, TyInt8, func, true), callInstr); |
| |
| // sTemp = LdElem_I [s4.var + current_reg_pass (aka stemp.var) ] |
| nthArgument = IR::IndirOpnd::New(stackArgs, localTemp, TyMachReg, func); |
| ldElemDstOpnd = IR::RegOpnd::New(TyMachReg, func); |
| const IR::AutoReuseOpnd autoReuseldElemDstOpnd2(ldElemDstOpnd, func); |
| ldElem = IR::Instr::New(Js::OpCode::LdElemI_A, ldElemDstOpnd, nthArgument, func); |
| callInstr->InsertBefore(ldElem); |
| GenerateFastStackArgumentsLdElemI(ldElem); |
| |
| argout = IR::Instr::New(Js::OpCode::ArgOut_A_Dynamic, func); |
| argout->SetSrc1(ldElemDstOpnd); |
| callInstr->InsertBefore(argout); |
| this->m_lowererMD.LoadDynamicArgument(argout, current_reg_pass + 4); |
| } |
| while (current_reg_pass-- != 0); |
| |
| callInstr->InsertBefore(doneArgs); |
| |
| /*return the length which will be used for callInfo generations & stack allocation*/ |
| return saveLenInstr->GetDst()->AsRegOpnd(); |
| #endif |
| } |
| |
| void |
| Lowerer::GenerateLoadStackArgumentByIndex(IR::Opnd *dst, IR::RegOpnd *indexOpnd, IR::Instr *instr, int32 offset, Func *func) |
| { |
| // Load argument set dst = [ebp + index]. |
| |
| IR::RegOpnd *ebpOpnd = IR::Opnd::CreateFramePointerOpnd(func); |
| IR::IndirOpnd *argIndirOpnd = nullptr; |
| |
| // The stack looks like this: |
| // [new.target or FrameDisplay] <== EBP + formalParamOffset (4) + callInfo.Count |
| // arguments[n] <== EBP + formalParamOffset (4) + n |
| // ... |
| // arguments[1] <== EBP + formalParamOffset (4) + 2 |
| // arguments[0] <== EBP + formalParamOffset (4) + 1 |
| // this or new.target <== EBP + formalParamOffset (4) |
| // callinfo |
| // function object |
| // return addr |
| // EBP-> EBP chain |
| |
| //actual arguments offset is LowererMD::GetFormalParamOffset() + 1 (this) |
| |
| int32 actualOffset = GetFormalParamOffset() + offset; |
| Assert(GetFormalParamOffset() == 4); |
| const BYTE indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| |
| argIndirOpnd = IR::IndirOpnd::New(ebpOpnd, indexOpnd, indirScale, TyMachReg, this->m_func); |
| argIndirOpnd->SetOffset(actualOffset << indirScale); |
| |
| Lowerer::InsertMove(dst, argIndirOpnd, instr); |
| } |
| |
| //This function assumes there is stackargs bailout and index is always on the range. |
| bool |
| Lowerer::GenerateFastStackArgumentsLdElemI(IR::Instr* ldElem) |
| { |
| // MOV dst, ebp [(valueOpnd + 5) *4] // 5 for the stack layout |
| // |
| |
| IR::IndirOpnd *indirOpnd = ldElem->GetSrc1()->AsIndirOpnd(); |
| // Now load the index and check if it is an integer. |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| Assert (indexOpnd && indexOpnd->IsTaggedInt()); |
| |
| if(ldElem->m_func->IsInlinee()) |
| { |
| IR::IndirOpnd *argIndirOpnd = GetArgsIndirOpndForInlinee(ldElem, indexOpnd); |
| |
| Lowerer::InsertMove(ldElem->GetDst(), argIndirOpnd, ldElem); |
| } |
| else |
| { |
| GenerateLoadStackArgumentByIndex(ldElem->GetDst(), indexOpnd, ldElem, indirOpnd->GetOffset() + 1, m_func); // +1 to offset 'this' |
| } |
| |
| ldElem->Remove(); |
| return false; |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GetArgsIndirOpndForInlinee(IR::Instr* ldElem, IR::Opnd* valueOpnd) |
| { |
| Assert(ldElem->m_func->IsInlinee()); |
| IR::IndirOpnd* argIndirOpnd = nullptr; |
| |
| // Address of argument after 'this' |
| const auto firstRealArgStackSym = ldElem->m_func->GetInlineeArgvSlotOpnd()->m_sym->AsStackSym(); |
| this->m_func->SetArgOffset(firstRealArgStackSym, firstRealArgStackSym->m_offset + MachPtr); //Start after this pointer |
| IR::SymOpnd *firstArg = IR::SymOpnd::New(firstRealArgStackSym, TyMachPtr, ldElem->m_func); |
| const IR::AutoReuseOpnd autoReuseFirstArg(firstArg, m_func); |
| |
| IR::RegOpnd *const baseOpnd = IR::RegOpnd::New(TyMachReg, ldElem->m_func); |
| const IR::AutoReuseOpnd autoReuseBaseOpnd(baseOpnd, m_func); |
| InsertLea(baseOpnd, firstArg, ldElem); |
| |
| if (valueOpnd->IsIntConstOpnd()) |
| { |
| IntConstType offset = valueOpnd->AsIntConstOpnd()->GetValue() * MachPtr; |
| // TODO: Assert(Math::FitsInDWord(offset)); |
| argIndirOpnd = IR::IndirOpnd::New(baseOpnd, (int32)offset, TyMachReg, ldElem->m_func); |
| } |
| else |
| { |
| Assert(valueOpnd->IsRegOpnd()); |
| const BYTE indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| argIndirOpnd = IR::IndirOpnd::New(baseOpnd, valueOpnd->AsRegOpnd(), indirScale, TyMachReg, ldElem->m_func); |
| } |
| return argIndirOpnd; |
| } |
| |
| IR::IndirOpnd* |
| Lowerer::GetArgsIndirOpndForTopFunction(IR::Instr* ldElem, IR::Opnd* valueOpnd) |
| { |
| // Load argument set dst = [ebp + index] (or grab from the generator object if m_func is a generator function). |
| IR::RegOpnd *baseOpnd = m_func->GetJITFunctionBody()->IsCoroutine() ? LoadGeneratorArgsPtr(ldElem) : IR::Opnd::CreateFramePointerOpnd(m_func); |
| IR::IndirOpnd* argIndirOpnd = nullptr; |
| // The stack looks like this: |
| // ... |
| // arguments[1] |
| // arguments[0] |
| // this |
| // callinfo |
| // function object |
| // return addr |
| // EBP-> EBP chain |
| |
| //actual arguments offset is LowererMD::GetFormalParamOffset() + 1 (this) |
| |
| uint16 actualOffset = m_func->GetJITFunctionBody()->IsCoroutine() ? 1 : GetFormalParamOffset() + 1; //5 |
| Assert(actualOffset == 5 || m_func->GetJITFunctionBody()->IsGenerator()); |
| if (valueOpnd->IsIntConstOpnd()) |
| { |
| IntConstType offset = (valueOpnd->AsIntConstOpnd()->GetValue() + actualOffset) * MachPtr; |
| // TODO: Assert(Math::FitsInDWord(offset)); |
| argIndirOpnd = IR::IndirOpnd::New(baseOpnd, (int32)offset, TyMachReg, this->m_func); |
| } |
| else |
| { |
| const BYTE indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| argIndirOpnd = IR::IndirOpnd::New(baseOpnd->AsRegOpnd(), valueOpnd->AsRegOpnd(), indirScale, TyMachReg, this->m_func); |
| |
| // Need to offset valueOpnd by 5. Instead of changing valueOpnd, we can just add an offset to the indir. Changing |
| // valueOpnd requires creation of a temp sym (if it's not already a temp) so that the value of the sym that |
| // valueOpnd represents is not changed. |
| argIndirOpnd->SetOffset(actualOffset << indirScale); |
| } |
| return argIndirOpnd; |
| } |
| |
| void |
| Lowerer::GenerateCheckForArgumentsLength(IR::Instr* ldElem, IR::LabelInstr* labelCreateHeapArgs, IR::Opnd* actualParamOpnd, IR::Opnd* valueOpnd, Js::OpCode opcode) |
| { |
| // Check if index < nr_actuals. |
| InsertCompare(actualParamOpnd, valueOpnd, ldElem); |
| // Jump to helper if index >= nr_actuals. |
| // Do an unsigned check here so that a negative index will also fail. |
| // (GenerateLdValueFromCheckedIndexOpnd does not guarantee positive index on x86.) |
| InsertBranch(opcode, true, labelCreateHeapArgs, ldElem); |
| } |
| |
| bool |
| Lowerer::GenerateFastArgumentsLdElemI(IR::Instr* ldElem, IR::LabelInstr *labelFallThru) |
| { |
| // ---GenerateSmIntTest |
| // ---GenerateLdValueFromCheckedIndexOpnd |
| // ---LoadInputParamCount |
| // CMP actualParamOpnd, valueOpnd //Compare between the actual count & the index count (say i in arguments[i]) |
| // JLE $labelCreateHeapArgs |
| // MOV dst, ebp [(valueOpnd + 5) *4] // 5 for the stack layout |
| // JMP $fallthrough |
| // |
| //labelCreateHeapArgs: |
| // ---Bail out to create Heap Arguments object |
| |
| Assert(ldElem->DoStackArgsOpt()); |
| |
| IR::IndirOpnd *indirOpnd = ldElem->GetSrc1()->AsIndirOpnd(); |
| bool isInlinee = ldElem->m_func->IsInlinee(); |
| Func *func = ldElem->m_func; |
| |
| IR::LabelInstr *labelCreateHeapArgs = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| |
| // Now load the index and check if it is an integer. |
| bool emittedFastPath = false; |
| bool isNotInt = false; |
| IntConstType value = 0; |
| IR::RegOpnd *indexOpnd = indirOpnd->GetIndexOpnd(); |
| IR::Opnd *valueOpnd = nullptr; |
| IR::Opnd *actualParamOpnd = nullptr; |
| |
| bool hasIntConstIndex = indirOpnd->TryGetIntConstIndexValue(true, &value, &isNotInt); |
| |
| if (isNotInt || (isInlinee && hasIntConstIndex && value >= (ldElem->m_func->actualCount - 1))) |
| { |
| //Outside the range of actuals, skip |
| } |
| else if (labelFallThru != nullptr && !(hasIntConstIndex && value < 0)) //if index is not a negative int constant |
| { |
| if (isInlinee) |
| { |
| actualParamOpnd = IR::IntConstOpnd::New(ldElem->m_func->actualCount - 1, TyInt32, func); |
| } |
| else |
| { |
| // Load actuals count, LoadHeapArguments will reuse the generated instructions here |
| IR::Instr *loadInputParamCountInstr = this->m_lowererMD.LoadInputParamCount(ldElem, -1 /* don't include 'this' while counting actuals. */); |
| actualParamOpnd = loadInputParamCountInstr->GetDst()->UseWithNewType(TyInt32,this->m_func); |
| } |
| |
| if (hasIntConstIndex) |
| { |
| //Constant index |
| valueOpnd = IR::IntConstOpnd::New(value, TyInt32, func); |
| } |
| else |
| { |
| //Load valueOpnd from the index |
| valueOpnd = |
| m_lowererMD.LoadNonnegativeIndex( |
| indexOpnd, |
| ( |
| #if INT32VAR |
| indexOpnd->GetType() == TyUint32 |
| #else |
| // On 32-bit platforms, skip the negative check since for now, the unsigned upper bound check covers it |
| true |
| #endif |
| ), |
| labelCreateHeapArgs, |
| labelCreateHeapArgs, |
| ldElem); |
| } |
| |
| if (isInlinee) |
| { |
| if (!hasIntConstIndex) |
| { |
| //Runtime check if to make sure length is within the arguments.length range. |
| GenerateCheckForArgumentsLength(ldElem, labelCreateHeapArgs, valueOpnd, actualParamOpnd, Js::OpCode::BrGe_A); |
| } |
| } |
| else |
| { |
| GenerateCheckForArgumentsLength(ldElem, labelCreateHeapArgs, actualParamOpnd, valueOpnd, Js::OpCode::BrLe_A); |
| } |
| |
| IR::Opnd *argIndirOpnd = nullptr; |
| if (isInlinee) |
| { |
| argIndirOpnd = GetArgsIndirOpndForInlinee(ldElem, valueOpnd); |
| } |
| else |
| { |
| argIndirOpnd = GetArgsIndirOpndForTopFunction(ldElem, valueOpnd); |
| } |
| |
| Lowerer::InsertMove(ldElem->GetDst(), argIndirOpnd, ldElem); |
| |
| // JMP $done |
| InsertBranch(Js::OpCode::Br, labelFallThru, ldElem); |
| // $labelCreateHeapArgs: |
| ldElem->InsertBefore(labelCreateHeapArgs); |
| emittedFastPath = true; |
| } |
| |
| if (!emittedFastPath) |
| { |
| throw Js::RejitException(RejitReason::DisableStackArgOpt); |
| } |
| |
| return emittedFastPath; |
| } |
| |
| bool |
| Lowerer::GenerateFastRealStackArgumentsLdLen(IR::Instr *ldLen) |
| { |
| if(ldLen->m_func->IsInlinee()) |
| { |
| //Get the length of the arguments |
| Lowerer::InsertMove(ldLen->GetDst(), |
| IR::IntConstOpnd::New(ldLen->m_func->actualCount - 1, TyUint32, ldLen->m_func), |
| ldLen); |
| } |
| else |
| { |
| IR::Instr *loadInputParamCountInstr = this->m_lowererMD.LoadInputParamCount(ldLen, -1); |
| IR::RegOpnd *actualCountOpnd = loadInputParamCountInstr->GetDst()->AsRegOpnd(); |
| Lowerer::InsertMove(ldLen->GetDst(), actualCountOpnd, ldLen); |
| } |
| ldLen->Remove(); |
| return false; |
| } |
| |
| bool |
| Lowerer::GenerateFastArgumentsLdLen(IR::Instr *ldLen, IR::LabelInstr* labelFallThru) |
| { |
| // TEST argslot, argslot //Test if the arguments slot is zero |
| // JNE $helper |
| // actualCountOpnd <-LoadInputParamCount fastpath |
| // SHL actualCountOpnd, actualCountOpnd, 1 // Left shift for tagging |
| // INC actualCountOpnd // Tagging |
| // MOV dst, actualCountOpnd |
| // JMP $fallthrough |
| //$helper: |
| |
| Assert(ldLen->DoStackArgsOpt()); |
| |
| if(ldLen->m_func->IsInlinee()) |
| { |
| //Get the length of the arguments |
| Lowerer::InsertMove(ldLen->GetDst(), |
| IR::AddrOpnd::New(Js::TaggedInt::ToVarUnchecked(ldLen->m_func->actualCount - 1), IR::AddrOpndKindConstantVar, ldLen->m_func), // -1 to exclude this pointer |
| ldLen); |
| } |
| else |
| { |
| IR::Instr *loadInputParamCountInstr = this->m_lowererMD.LoadInputParamCount(ldLen, -1); |
| IR::RegOpnd *actualCountOpnd = loadInputParamCountInstr->GetDst()->AsRegOpnd(); |
| |
| this->m_lowererMD.GenerateInt32ToVarConversion(actualCountOpnd, ldLen); |
| Lowerer::InsertMove(ldLen->GetDst(), actualCountOpnd, ldLen); |
| } |
| return true; |
| } |
| |
| IR::RegOpnd* |
| Lowerer::GenerateFunctionTypeFromFixedFunctionObject(IR::Instr *insertInstrPt, IR::Opnd* functionObjOpnd) |
| { |
| IR::RegOpnd * functionTypeRegOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::Opnd *functionTypeOpnd = nullptr; |
| |
| if(functionObjOpnd->IsAddrOpnd()) |
| { |
| IR::AddrOpnd* functionObjAddrOpnd = functionObjOpnd->AsAddrOpnd(); |
| // functionTypeRegOpnd = MOV [fixed function address + type offset] |
| functionObjAddrOpnd->m_address; |
| functionTypeOpnd = IR::MemRefOpnd::New((void *)((intptr_t)functionObjAddrOpnd->m_address + Js::RecyclableObject::GetOffsetOfType()), TyMachPtr, this->m_func, |
| IR::AddrOpndKindDynamicObjectTypeRef); |
| } |
| else |
| { |
| functionTypeOpnd = IR::IndirOpnd::New(functionObjOpnd->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, this->m_func); |
| } |
| Lowerer::InsertMove(functionTypeRegOpnd, functionTypeOpnd, insertInstrPt); |
| return functionTypeRegOpnd; |
| } |
| |
| void |
| Lowerer::FinalLower() |
| { |
| this->m_lowererMD.FinalLower(); |
| |
| // We check if there are any lazy bailouts in |
| // LowererMD::FinalLower, so only insert the thunk |
| // if needed |
| if (this->m_func->HasLazyBailOut()) |
| { |
| this->InsertLazyBailOutThunk(); |
| } |
| |
| // Ensure that the StartLabel and EndLabel are inserted |
| // before the prolog and after the epilog respectively |
| IR::LabelInstr * startLabel = m_func->GetFuncStartLabel(); |
| if (startLabel != nullptr) |
| { |
| m_func->m_headInstr->InsertAfter(startLabel); |
| } |
| |
| IR::LabelInstr * endLabel = m_func->GetFuncEndLabel(); |
| if (endLabel != nullptr) |
| { |
| m_func->m_tailInstr->GetPrevRealInstr()->InsertBefore(endLabel); |
| } |
| } |
| |
| void |
| Lowerer::InsertLazyBailOutThunk() |
| { |
| #if defined(_M_IX86) || defined(_M_X64) |
| if (!this->m_func->IsTopFunc()) |
| { |
| return; |
| } |
| |
| Assert(this->m_func->GetLazyBailOutRecordSlot() != nullptr); |
| |
| IR::Instr *tailInstr = this->m_func->m_tailInstr; |
| |
| // Label (LazyBailOutThunk): |
| IR::LabelInstr *lazyBailOutLabel = IR::LabelInstr::New(Js::OpCode::LazyBailOutThunkLabel, this->m_func, true /* isOpHelper */); |
| lazyBailOutLabel->m_hasNonBranchRef = true; // Make sure that this label isn't removed |
| LABELNAMESET(lazyBailOutLabel, "LazyBailOutThunk"); |
| tailInstr->InsertBefore(lazyBailOutLabel); |
| |
| #ifdef _M_X64 |
| // 1. Save registers used for parameters, and rax, if necessary, into the shadow space allocated for register parameters: |
| // mov [rsp + 16], RegArg1 (if branchConditionOpnd) |
| // mov [rsp + 8], RegArg0 |
| // mov [rsp], rax |
| extern const IRType RegTypes[RegNumCount]; |
| const RegNum regs[3] = { RegRAX, RegArg0, RegArg1 }; |
| for (int i = 2; i >= 0; i--) |
| { |
| RegNum reg = regs[i]; |
| const IRType regType = RegTypes[reg]; |
| Lowerer::InsertMove( |
| IR::SymOpnd::New(this->m_func->m_symTable->GetArgSlotSym(static_cast<Js::ArgSlot>(i + 1)), regType, this->m_func), |
| IR::RegOpnd::New(nullptr, reg, regType, this->m_func), |
| tailInstr |
| ); |
| } |
| #endif |
| |
| // 2. Always enable implicit call flag |
| // If StFld/StElem instructions have both LazyBailOut and BailOnImplicitCallPreop and the operation turns out to not |
| // be an implicit call, at that point, we have already disabled the implicit calls flag. We would then do lazy bailout |
| // and not go back to the remaining code. Therefore, we need to re-enable implicit calls again in the thunk. |
| IR::Opnd *disableImplicitCallFlagAddress = this->m_lowererMD.GenerateMemRef( |
| this->m_func->GetThreadContextInfo()->GetDisableImplicitFlagsAddr(), |
| TyInt8, |
| tailInstr /* insertBeforeInstr */ |
| ); |
| |
| #ifdef _M_X64 |
| // On x64, we might decide to load the address of implicit flag to a register, |
| // but since we are in Lowerer (past RegAlloc), all the operands won't have any |
| // registers assigned to them. We force them to be rcx (because they are going |
| // to be replaced anyway). |
| // TODO: This hack doesn't work with ARM/ARM64 |
| // Will need to revisit this if we decide to do lazy bailout on those platforms |
| IR::Instr *moveInstr = Lowerer::InsertMove( |
| disableImplicitCallFlagAddress, |
| IR::IntConstOpnd::New(DisableImplicitNoFlag, TyInt8, this->m_func, true), |
| tailInstr /* insertBeforeInstr */ |
| ); |
| |
| if (moveInstr->GetDst()->IsIndirOpnd()) |
| { |
| moveInstr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->AsRegOpnd()->SetReg(RegArg0); |
| } |
| |
| if (moveInstr->m_prev->GetDst()->IsRegOpnd()) |
| { |
| moveInstr->m_prev->GetDst()->AsRegOpnd()->SetReg(RegArg0); |
| } |
| |
| #else |
| Lowerer::InsertMove( |
| disableImplicitCallFlagAddress, |
| IR::IntConstOpnd::New(DisableImplicitNoFlag, TyInt8, this->m_func, true), |
| tailInstr /* insertBeforeInstr */ |
| ); |
| #endif |
| |
| #ifdef _M_X64 |
| // 3. mov rcx, [rbp + offset] ; for bailout record |
| IR::RegOpnd *arg0 = IR::RegOpnd::New(nullptr, RegArg0, TyMachPtr, this->m_func); |
| IR::SymOpnd *bailOutRecordAddr = IR::SymOpnd::New(this->m_func->GetLazyBailOutRecordSlot(), TyMachPtr, this->m_func); |
| Lowerer::InsertMove(arg0, bailOutRecordAddr, tailInstr, false /* generateWriteBarrier */); |
| #else |
| // 3. Put the BailOutRecord on the stack for x86 |
| IR::Instr *const newInstr = IR::Instr::New(Js::OpCode::PUSH, this->m_func); |
| IR::SymOpnd *bailOutRecordAddr = IR::SymOpnd::New(this->m_func->GetLazyBailOutRecordSlot(), TyMachPtr, this->m_func); |
| newInstr->SetSrc1(bailOutRecordAddr); |
| tailInstr->InsertBefore(newInstr); |
| #endif |
| |
| // 4. call SaveAllRegistersAndBailOut |
| IR::Instr *callInstr = IR::Instr::New(Js::OpCode::Call, this->m_func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSaveAllRegistersAndBailOut, this->m_func)); |
| tailInstr->InsertBefore(callInstr); |
| m_lowererMD.LowerCall(callInstr, 0); |
| |
| // 5. jmp to function's epilog |
| IR::LabelInstr *exitLabel = this->m_func->m_exitInstr->GetPrevLabelInstr(); |
| IR::BranchInstr *branchInstr = IR::BranchInstr::New(Js::OpCode::JMP, exitLabel, this->m_func); |
| tailInstr->InsertBefore(branchInstr); |
| |
| #endif |
| } |
| |
| void |
| Lowerer::EHBailoutPatchUp() |
| { |
| Assert(this->m_func->isPostLayout); |
| // 1. Insert return thunks for all the regions. |
| // 2. Set the hasBailedOut bit to true on all bailout paths in EH regions. |
| // 3. Insert code after every bailout in a try or catch region to save the return value on the stack, and jump to the return thunk (See Region.h) of that region. |
| // 4. Insert code right before the epilog, to restore the return value (saved in 2.) from a bailout into eax. |
| |
| IR::LabelInstr * restoreReturnValueFromBailoutLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| IR::LabelInstr * epilogLabel; |
| IR::Instr * exitPrevInstr = this->m_func->m_exitInstr->GetPrevRealInstrOrLabel(); |
| if (exitPrevInstr->IsLabelInstr()) |
| { |
| epilogLabel = exitPrevInstr->AsLabelInstr(); |
| } |
| else |
| { |
| epilogLabel = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| this->m_func->m_exitInstr->InsertBefore(epilogLabel); |
| } |
| |
| IR::Instr * tmpInstr = nullptr; |
| bool restoreReturnFromBailoutEmitted = false; |
| FOREACH_INSTR_IN_FUNC_EDITING(instr, instrNext, this->m_func) |
| { |
| if (instr->IsLabelInstr()) |
| { |
| this->currentRegion = instr->AsLabelInstr()->GetRegion(); |
| } |
| |
| // Consider (radua): Assert(this->currentRegion) here? |
| if (this->currentRegion) |
| { |
| RegionType currentRegionType = this->currentRegion->GetType(); |
| if (currentRegionType == RegionTypeTry || currentRegionType == RegionTypeCatch || currentRegionType == RegionTypeFinally) |
| { |
| if (this->currentRegion->IsNonExceptingFinally()) |
| { |
| Region * parent = this->currentRegion->GetParent(); |
| |
| while (parent->IsNonExceptingFinally()) |
| { |
| parent = parent->GetParent(); |
| } |
| if (parent->GetType() == RegionTypeRoot) |
| { |
| continue; |
| } |
| } |
| this->InsertReturnThunkForRegion(this->currentRegion, restoreReturnValueFromBailoutLabel); |
| if (instr->HasBailOutInfo()) |
| { |
| if (instr->GetBailOutInfo()->bailOutFunc == this->m_func) |
| { |
| // We dont set this bit for inlined code, if there was a bailout in the inlined code, |
| // and an exception was thrown, we want the caller's handler to handle the exception accordingly. |
| // TODO : Revisit when we start inlining functions with try-catch/try-finally |
| this->SetHasBailedOut(instr); |
| } |
| tmpInstr = this->EmitEHBailoutStackRestore(instr); |
| this->EmitSaveEHBailoutReturnValueAndJumpToRetThunk(tmpInstr); |
| if (!restoreReturnFromBailoutEmitted) |
| { |
| this->EmitRestoreReturnValueFromEHBailout(restoreReturnValueFromBailoutLabel, epilogLabel); |
| restoreReturnFromBailoutEmitted = true; |
| } |
| } |
| } |
| } |
| } |
| NEXT_INSTR_IN_FUNC_EDITING |
| } |
| |
| bool |
| Lowerer::GenerateFastLdFld(IR::Instr * const instrLdFld, IR::JnHelperMethod helperMethod, IR::JnHelperMethod polymorphicHelperMethod, |
| IR::LabelInstr ** labelBailOut, IR::RegOpnd* typeOpnd, bool* pIsHelper, IR::LabelInstr** pLabelHelper) |
| { |
| // Generates: |
| // |
| // r1 = object->type |
| // if (r1 is taggedInt) goto helper |
| // Load inline cache |
| // if monomorphic |
| // r2 = address of the monomorphic inline cache |
| // if polymorphic |
| // r2 = address of the polymorphic inline cache array |
| // r3 = (type >> PIC shift amount) & (PIC size - 1) |
| // r2 = r2 + r3 |
| // Try load property using proto cache (if protoFirst) |
| // Try load property using local cache |
| // Try loading property using proto cache (if !protoFirst) |
| // Try loading property using flags cache |
| // |
| // Loading property using local cache: |
| // if (r1 == r2->u.local.type) |
| // result = load inline slot r2->u.local.slotIndex from r1 |
| // goto fallthru |
| // if ((r1 | InlineCacheAuxSlotTypeTag) == r2->u.local.type) |
| // result = load aux slot r2->u.local.slotIndex from r1 |
| // goto fallthru |
| // |
| // Loading property using proto cache: |
| // if (r1 == r2->u.proto.type) |
| // r3 = r2->u.proto.prototypeObject |
| // result = load inline slot r2->u.proto.slotIndex from r3 |
| // goto fallthru |
| // if (r1 | InlineCacheAuxSlotTypeTag) == r2.u.proto.type) |
| // r3 = r2->u.proto.prototypeObject |
| // result = load aux slot r2->u.proto.slotIndex from r3 |
| // goto fallthru |
| // |
| // Loading property using flags cache: |
| // if (r2->u.accessor.flags & (Js::InlineCacheGetterFlag | Js::InlineCacheSetterFlag) == 0) |
| // if (r1 == r2->u.accessor.type) |
| // result = load inline slot r2->u.accessor.slotIndex from r1 |
| // goto fallthru |
| // if ((r1 | InlineCacheAuxSlotTypeTag) == r2->u.accessor.type) |
| // result = load aux slot r2->u.accessor.slotIndex from r1 |
| // goto fallthru |
| // |
| // Loading an inline slot: |
| // result = [r1 + slotIndex * sizeof(Var)] |
| // |
| // Loading an aux slot: |
| // slotArray = r1->auxSlots |
| // result = [slotArray + slotIndex * sizeof(Var)] |
| // |
| // We only emit the code block for a type of cache (local/proto/flags) if the profile data |
| // indicates that type of cache was used to load the property in the past. |
| // We don't emit the type check with aux slot tag if the profile data indicates that we didn't |
| // load the property from an aux slot before. |
| // We don't emit the type check without an aux slot tag if the profile data indicates that we didn't |
| // load the property from an inline slot before. |
| |
| IR::Opnd * opndSrc = instrLdFld->GetSrc1(); |
| AssertMsg(opndSrc->IsSymOpnd() && opndSrc->AsSymOpnd()->IsPropertySymOpnd() && opndSrc->AsSymOpnd()->m_sym->IsPropertySym(), "Expected PropertySym as src of LdFld"); |
| |
| Assert(!instrLdFld->DoStackArgsOpt()); |
| |
| IR::PropertySymOpnd * propertySymOpnd = opndSrc->AsPropertySymOpnd(); |
| PropertySym * propertySym = propertySymOpnd->m_sym->AsPropertySym(); |
| |
| PHASE_PRINT_TESTTRACE( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Field load: %s, property ID: %d, func: %s, cache ID: %d, cloned cache: false\n"), |
| Js::OpCodeUtil::GetOpCodeName(instrLdFld->m_opcode), |
| propertySym->m_propertyId, |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| propertySymOpnd->m_inlineCacheIndex); |
| |
| Assert(pIsHelper != nullptr); |
| bool& isHelper = *pIsHelper; |
| |
| Assert(pLabelHelper != nullptr); |
| IR::LabelInstr*& labelHelper = *pLabelHelper; |
| |
| bool doLocal = true; |
| bool doProto = instrLdFld->m_opcode == Js::OpCode::LdMethodFld |
| || instrLdFld->m_opcode == Js::OpCode::LdRootMethodFld |
| || instrLdFld->m_opcode == Js::OpCode::ScopedLdMethodFld; |
| bool doProtoFirst = doProto; |
| bool doInlineSlots = true; |
| bool doAuxSlots = true; |
| if (!PHASE_OFF(Js::ProfileBasedFldFastPathPhase, this->m_func) && instrLdFld->IsProfiledInstr()) |
| { |
| IR::ProfiledInstr * profiledInstrLdFld = instrLdFld->AsProfiledInstr(); |
| if (profiledInstrLdFld->u.FldInfo().flags != Js::FldInfo_NoInfo) |
| { |
| doProto = !!(profiledInstrLdFld->u.FldInfo().flags & Js::FldInfo_FromProto); |
| doLocal = !!(profiledInstrLdFld->u.FldInfo().flags & Js::FldInfo_FromLocal); |
| |
| if ((profiledInstrLdFld->u.FldInfo().flags & (Js::FldInfo_FromInlineSlots | Js::FldInfo_FromAuxSlots)) == Js::FldInfo_FromInlineSlots) |
| { |
| // If the inline slots flag is set and the aux slots flag is not, only generate the inline slots check |
| doAuxSlots = false; |
| } |
| else if ((profiledInstrLdFld->u.FldInfo().flags & (Js::FldInfo_FromInlineSlots | Js::FldInfo_FromAuxSlots)) == Js::FldInfo_FromAuxSlots) |
| { |
| // If the aux slots flag is set and the inline slots flag is not, only generate the aux slots check |
| doInlineSlots = false; |
| } |
| } |
| else if (!profiledInstrLdFld->u.FldInfo().valueType.IsUninitialized()) |
| { |
| // We have value type info about the field but no flags. This means we shouldn't generate any |
| // fast paths for this field load. |
| doLocal = false; |
| doProto = false; |
| } |
| } |
| |
| if (!doLocal && !doProto) |
| { |
| return false; |
| } |
| |
| IR::LabelInstr * labelFallThru = instrLdFld->GetOrCreateContinueLabel(); |
| |
| if (labelHelper == nullptr) |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| } |
| |
| IR::RegOpnd * opndBase = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| bool usePolymorphicInlineCache = !!propertySymOpnd->m_runtimePolymorphicInlineCache; |
| |
| IR::RegOpnd * opndInlineCache = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| if (usePolymorphicInlineCache) |
| { |
| Lowerer::InsertMove(opndInlineCache, IR::AddrOpnd::New(propertySymOpnd->m_runtimePolymorphicInlineCache->GetInlineCachesAddr(), IR::AddrOpndKindDynamicInlineCache, this->m_func, true), instrLdFld); |
| } |
| else |
| { |
| Lowerer::InsertMove(opndInlineCache, this->LoadRuntimeInlineCacheOpnd(instrLdFld, propertySymOpnd, isHelper), instrLdFld); |
| } |
| |
| if (typeOpnd == nullptr) |
| { |
| typeOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| GenerateObjectTestAndTypeLoad(instrLdFld, opndBase, typeOpnd, labelHelper); |
| } |
| |
| if (usePolymorphicInlineCache) |
| { |
| LowererMD::GenerateLoadPolymorphicInlineCacheSlot(instrLdFld, opndInlineCache, typeOpnd, propertySymOpnd->m_runtimePolymorphicInlineCache->GetSize()); |
| } |
| |
| IR::LabelInstr * labelNext = nullptr; |
| IR::Opnd * opndDst = instrLdFld->GetDst(); |
| IR::RegOpnd * opndTaggedType = nullptr; |
| IR::BranchInstr * labelNextBranchToPatch = nullptr; |
| |
| if (doProto && doProtoFirst) |
| { |
| if (doInlineSlots) |
| { |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateProtoInlineCacheCheck(instrLdFld, typeOpnd, opndInlineCache, labelNext); |
| GenerateLdFldFromProtoInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, true); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| if (doAuxSlots) |
| { |
| if (opndTaggedType == nullptr) |
| { |
| opndTaggedType = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| LowererMD::GenerateLoadTaggedType(instrLdFld, typeOpnd, opndTaggedType); |
| } |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateProtoInlineCacheCheck(instrLdFld, opndTaggedType, opndInlineCache, labelNext); |
| GenerateLdFldFromProtoInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, false); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| } |
| if (doLocal) |
| { |
| if (doInlineSlots) |
| { |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateLocalInlineCacheCheck(instrLdFld, typeOpnd, opndInlineCache, labelNext); |
| GenerateLdFldFromLocalInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, true); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| if (doAuxSlots) |
| { |
| if (opndTaggedType == nullptr) |
| { |
| opndTaggedType = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| LowererMD::GenerateLoadTaggedType(instrLdFld, typeOpnd, opndTaggedType); |
| } |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateLocalInlineCacheCheck(instrLdFld, opndTaggedType, opndInlineCache, labelNext); |
| GenerateLdFldFromLocalInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, false); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| } |
| if (doProto && !doProtoFirst) |
| { |
| if (doInlineSlots) |
| { |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateProtoInlineCacheCheck(instrLdFld, typeOpnd, opndInlineCache, labelNext); |
| GenerateLdFldFromProtoInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, true); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| if (doAuxSlots) |
| { |
| if (opndTaggedType == nullptr) |
| { |
| opndTaggedType = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| LowererMD::GenerateLoadTaggedType(instrLdFld, typeOpnd, opndTaggedType); |
| } |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| labelNextBranchToPatch = GenerateProtoInlineCacheCheck(instrLdFld, opndTaggedType, opndInlineCache, labelNext); |
| GenerateLdFldFromProtoInlineCache(instrLdFld, opndBase, opndDst, opndInlineCache, labelFallThru, false); |
| instrLdFld->InsertBefore(labelNext); |
| } |
| } |
| |
| Assert(labelNextBranchToPatch); |
| labelNextBranchToPatch->SetTarget(labelHelper); |
| labelNext->Remove(); |
| |
| // $helper: |
| // dst = CALL Helper(inlineCache, base, field, scriptContext) |
| // $fallthru: |
| isHelper = true; |
| |
| // Return false to indicate the original instruction was not lowered. Caller will insert the helper label. |
| return false; |
| } |
| |
| void |
| Lowerer::GenerateAuxSlotAdjustmentRequiredCheck( |
| IR::Instr * instrToInsertBefore, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelHelper) |
| { |
| // regSlotCap = MOV [&(inlineCache->u.local.rawUInt16)] // sized to 16 bits |
| IR::RegOpnd * regSlotCap = IR::RegOpnd::New(TyMachReg, instrToInsertBefore->m_func); |
| IR::IndirOpnd * memSlotCap = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.local.rawUInt16), TyUint16, instrToInsertBefore->m_func); |
| InsertMove(regSlotCap, memSlotCap, instrToInsertBefore); |
| |
| IR::IntConstOpnd * constSelectorBitCount = IR::IntConstOpnd::New(Js::InlineCache::CacheLayoutSelectorBitCount, TyUint16, instrToInsertBefore->m_func, /* dontEncode = */ true); |
| |
| #if _M_ARM64 |
| IR::Instr * testBranch = InsertBranch(Js::OpCode::TBZ, labelHelper, instrToInsertBefore); |
| testBranch->SetSrc1(regSlotCap); |
| testBranch->SetSrc2(constSelectorBitCount); |
| #else |
| // SAR regSlotCap, Js::InlineCache::CacheLayoutSelectorBitCount |
| InsertShiftBranch(Js::OpCode::Shr_A, regSlotCap, regSlotCap, constSelectorBitCount, Js::OpCode::BrNeq_A, true, labelHelper, instrToInsertBefore); |
| #endif |
| } |
| |
| void |
| Lowerer::GenerateSetObjectTypeFromInlineCache( |
| IR::Instr * instrToInsertBefore, |
| IR::RegOpnd * opndBase, |
| IR::RegOpnd * opndInlineCache, |
| bool isTypeTagged) |
| { |
| // regNewType = MOV [&(inlineCache->u.local.type)] |
| IR::RegOpnd * regNewType = IR::RegOpnd::New(TyMachReg, instrToInsertBefore->m_func); |
| IR::IndirOpnd * memNewType = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.local.type), TyMachReg, instrToInsertBefore->m_func); |
| InsertMove(regNewType, memNewType, instrToInsertBefore); |
| |
| // AND regNewType, ~InlineCacheAuxSlotTypeTag |
| if (isTypeTagged) |
| { |
| // On 64-bit platforms IntConstOpnd isn't big enough to hold TyMachReg values. |
| IR::IntConstOpnd * constTypeTagComplement = IR::IntConstOpnd::New(~InlineCacheAuxSlotTypeTag, TyMachReg, instrToInsertBefore->m_func, /* dontEncode = */ true); |
| InsertAnd(regNewType, regNewType, constTypeTagComplement, instrToInsertBefore); |
| } |
| |
| // MOV base->type, regNewType |
| IR::IndirOpnd * memObjType = IR::IndirOpnd::New(opndBase, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, instrToInsertBefore->m_func); |
| InsertMove(memObjType, regNewType, instrToInsertBefore); |
| } |
| |
| bool |
| Lowerer::GenerateFastStFld(IR::Instr * const instrStFld, IR::JnHelperMethod helperMethod, IR::JnHelperMethod polymorphicHelperMethod, IR::LabelInstr ** labelBailOut, IR::RegOpnd* typeOpnd, |
| bool* pIsHelper, IR::LabelInstr** pLabelHelper, bool withPutFlags, Js::PropertyOperationFlags flags) |
| { |
| // Generates: |
| // |
| // r1 = object->type |
| // if (r1 is taggedInt) goto helper |
| // Load inline cache |
| // if monomorphic |
| // r2 = address of the monomorphic inline cache |
| // if polymorphic |
| // r2 = address of the polymorphic inline cache array |
| // r3 = (type >> PIC shift amount) & (PIC size - 1) |
| // r2 = r2 + r3 |
| // Try store property using local cache |
| // |
| // Loading property using local cache: |
| // if (r1 == r2->u.local.type) |
| // store value to inline slot r2->u.local.slotIndex on r1 |
| // goto fallthru |
| // if ((r1 | InlineCacheAuxSlotTypeTag) == r2->u.local.type) |
| // store value to aux slot r2->u.local.slotIndex on r1 |
| // goto fallthru |
| // |
| // Storing to an inline slot: |
| // [r1 + slotIndex * sizeof(Var)] = value |
| // |
| // Storing to an aux slot: |
| // slotArray = r1->auxSlots |
| // [slotArray + slotIndex * sizeof(Var)] = value |
| // |
| // We don't emit the type check with aux slot tag if the profile data indicates that we didn't |
| // store the property to an aux slot before. |
| // We don't emit the type check without an aux slot tag if the profile data indicates that we didn't |
| // store the property to an inline slot before. |
| |
| IR::Opnd * opndSrc = instrStFld->GetSrc1(); |
| IR::Opnd * opndDst = instrStFld->GetDst(); |
| AssertMsg(opndDst->IsSymOpnd() && opndDst->AsSymOpnd()->IsPropertySymOpnd() && opndDst->AsSymOpnd()->m_sym->IsPropertySym(), "Expected PropertySym as dst of StFld"); |
| |
| IR::PropertySymOpnd * propertySymOpnd = opndDst->AsPropertySymOpnd(); |
| PropertySym * propertySym = propertySymOpnd->m_sym->AsPropertySym(); |
| PHASE_PRINT_TESTTRACE( |
| Js::ObjTypeSpecPhase, |
| this->m_func, |
| _u("Field store: %s, property ID: %u, func: %s, cache ID: %d, cloned cache: false\n"), |
| Js::OpCodeUtil::GetOpCodeName(instrStFld->m_opcode), |
| propertySym->m_propertyId, |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), |
| propertySymOpnd->m_inlineCacheIndex); |
| |
| Assert(pIsHelper != nullptr); |
| bool& isHelper = *pIsHelper; |
| |
| Assert(pLabelHelper != nullptr); |
| IR::LabelInstr*& labelHelper = *pLabelHelper; |
| |
| bool doStore = true; |
| bool doAdd = false; |
| bool doInlineSlots = true; |
| bool doAuxSlots = true; |
| if (!PHASE_OFF(Js::ProfileBasedFldFastPathPhase, this->m_func) && instrStFld->IsProfiledInstr()) |
| { |
| IR::ProfiledInstr * profiledInstrStFld = instrStFld->AsProfiledInstr(); |
| if (profiledInstrStFld->u.FldInfo().flags != Js::FldInfo_NoInfo) |
| { |
| if (!(profiledInstrStFld->u.FldInfo().flags & (Js::FldInfo_FromLocal | Js::FldInfo_FromLocalWithoutProperty))) |
| { |
| return false; |
| } |
| |
| if (!PHASE_OFF(Js::AddFldFastPathPhase, this->m_func)) |
| { |
| // We always try to do the store field fast path, unless the profile specifically says we never set, but always add a property here. |
| if ((profiledInstrStFld->u.FldInfo().flags & (Js::FldInfo_FromLocal | Js::FldInfo_FromLocalWithoutProperty)) == Js::FldInfo_FromLocalWithoutProperty) |
| { |
| doStore = false; |
| } |
| |
| // On the other hand, we only emit the add field fast path, if the profile explicitly says we do add properties here. |
| if (!!(profiledInstrStFld->u.FldInfo().flags & Js::FldInfo_FromLocalWithoutProperty)) |
| { |
| doAdd = true; |
| } |
| } |
| else |
| { |
| #if ENABLE_DEBUG_CONFIG_OPTIONS |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| #endif |
| PHASE_PRINT_TRACE(Js::AddFldFastPathPhase, this->m_func, |
| _u("AddFldFastPath: function: %s(%s) property ID: %u no fast path, because the phase is off.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| propertySym->m_propertyId); |
| } |
| |
| if ((profiledInstrStFld->u.FldInfo().flags & (Js::FldInfo_FromInlineSlots | Js::FldInfo_FromAuxSlots)) == Js::FldInfo_FromInlineSlots) |
| { |
| // If the inline slots flag is set and the aux slots flag is not, only generate the inline slots check |
| doAuxSlots = false; |
| } |
| else if ((profiledInstrStFld->u.FldInfo().flags & (Js::FldInfo_FromInlineSlots | Js::FldInfo_FromAuxSlots)) == Js::FldInfo_FromAuxSlots) |
| { |
| // If the aux slots flag is set and the inline slots flag is not, only generate the aux slots check |
| doInlineSlots = false; |
| } |
| } |
| else if (!profiledInstrStFld->u.FldInfo().valueType.IsUninitialized()) |
| { |
| // We have value type info about the field but no flags. This means we shouldn't generate any |
| // fast paths for this field store. |
| return false; |
| } |
| } |
| |
| Assert(doStore || doAdd); |
| |
| if (labelHelper == nullptr) |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| } |
| |
| IR::LabelInstr * labelFallThru = instrStFld->GetOrCreateContinueLabel(); |
| IR::RegOpnd * opndBase = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| bool usePolymorphicInlineCache = !!propertySymOpnd->m_runtimePolymorphicInlineCache; |
| |
| if (doAdd) |
| { |
| #if ENABLE_DEBUG_CONFIG_OPTIONS |
| char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE]; |
| #endif |
| PHASE_PRINT_TRACE(Js::AddFldFastPathPhase, this->m_func, |
| _u("AddFldFastPath: function: %s(%s) property ID: %d %s fast path for %s.\n"), |
| this->m_func->GetJITFunctionBody()->GetDisplayName(), this->m_func->GetDebugNumberSet(debugStringBuffer), |
| propertySym->m_propertyId, |
| usePolymorphicInlineCache ? _u("poly") : _u("mono"), doStore ? _u("store and add") : _u("add only")); |
| } |
| |
| IR::RegOpnd * opndInlineCache = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| if (usePolymorphicInlineCache) |
| { |
| Lowerer::InsertMove(opndInlineCache, IR::AddrOpnd::New(propertySymOpnd->m_runtimePolymorphicInlineCache->GetInlineCachesAddr(), IR::AddrOpndKindDynamicInlineCache, this->m_func, true), instrStFld); |
| } |
| else |
| { |
| Lowerer::InsertMove(opndInlineCache, this->LoadRuntimeInlineCacheOpnd(instrStFld, propertySymOpnd, isHelper), instrStFld); |
| } |
| |
| if (typeOpnd == nullptr) |
| { |
| typeOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| GenerateObjectTestAndTypeLoad(instrStFld, opndBase, typeOpnd, labelHelper); |
| } |
| |
| if (usePolymorphicInlineCache) |
| { |
| LowererMD::GenerateLoadPolymorphicInlineCacheSlot(instrStFld, opndInlineCache, typeOpnd, propertySymOpnd->m_runtimePolymorphicInlineCache->GetSize()); |
| } |
| |
| IR::LabelInstr * labelNext = nullptr; |
| IR::RegOpnd * opndTaggedType = nullptr; |
| IR::BranchInstr * lastBranchToNext = nullptr; |
| |
| if (doStore) |
| { |
| if (doInlineSlots) |
| { |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| lastBranchToNext = GenerateLocalInlineCacheCheck(instrStFld, typeOpnd, opndInlineCache, labelNext); |
| this->GetLowererMD()->GenerateStFldFromLocalInlineCache(instrStFld, opndBase, opndSrc, opndInlineCache, labelFallThru, true); |
| instrStFld->InsertBefore(labelNext); |
| } |
| if (doAuxSlots) |
| { |
| if (opndTaggedType == nullptr) |
| { |
| opndTaggedType = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| LowererMD::GenerateLoadTaggedType(instrStFld, typeOpnd, opndTaggedType); |
| } |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| lastBranchToNext = GenerateLocalInlineCacheCheck(instrStFld, opndTaggedType, opndInlineCache, labelNext); |
| this->GetLowererMD()->GenerateStFldFromLocalInlineCache(instrStFld, opndBase, opndSrc, opndInlineCache, labelFallThru, false); |
| instrStFld->InsertBefore(labelNext); |
| } |
| } |
| |
| if (doAdd) |
| { |
| if (doInlineSlots) |
| { |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isHelper); |
| lastBranchToNext = GenerateLocalInlineCacheCheck(instrStFld, typeOpnd, opndInlineCache, labelNext, true); |
| GenerateSetObjectTypeFromInlineCache(instrStFld, opndBase, opndInlineCache, false); |
| this->GetLowererMD()->GenerateStFldFromLocalInlineCache(instrStFld, opndBase, opndSrc, opndInlineCache, labelFallThru, true); |
| instrStFld->InsertBefore(labelNext); |
| } |
| if (doAuxSlots) |
| { |
| if (opndTaggedType == nullptr) |
| { |
| opndTaggedType = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| LowererMD::GenerateLoadTaggedType(instrStFld, typeOpnd, opndTaggedType); |
| } |
| labelNext = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| lastBranchToNext = GenerateLocalInlineCacheCheck(instrStFld, opndTaggedType, opndInlineCache, labelNext, true); |
| GenerateAuxSlotAdjustmentRequiredCheck(instrStFld, opndInlineCache, labelHelper); |
| GenerateSetObjectTypeFromInlineCache(instrStFld, opndBase, opndInlineCache, true); |
| this->GetLowererMD()->GenerateStFldFromLocalInlineCache(instrStFld, opndBase, opndSrc, opndInlineCache, labelFallThru, false); |
| instrStFld->InsertBefore(labelNext); |
| } |
| } |
| |
| Assert(lastBranchToNext); |
| lastBranchToNext->SetTarget(labelHelper); |
| labelNext->Remove(); |
| |
| // $helper: |
| // CALL Helper(inlineCache, base, field, src, scriptContext) |
| // $fallthru: |
| isHelper = true; |
| |
| // Return false to indicate the original instruction was not lowered. Caller will insert the helper label. |
| return false; |
| } |
| |
| bool Lowerer::GenerateFastStFldForCustomProperty(IR::Instr *const instr, IR::LabelInstr * *const labelHelperRef) |
| { |
| Assert(instr); |
| Assert(labelHelperRef); |
| Assert(!*labelHelperRef); |
| |
| switch(instr->m_opcode) |
| { |
| case Js::OpCode::StFld: |
| case Js::OpCode::StFldStrict: |
| break; |
| |
| default: |
| return false; |
| } |
| |
| IR::SymOpnd *const symOpnd = instr->GetDst()->AsSymOpnd(); |
| PropertySym *const propertySym = symOpnd->m_sym->AsPropertySym(); |
| if(propertySym->m_propertyId != Js::PropertyIds::lastIndex || !symOpnd->IsPropertySymOpnd()) |
| { |
| return false; |
| } |
| |
| const ValueType objectValueType(symOpnd->GetPropertyOwnerValueType()); |
| if(!objectValueType.IsLikelyRegExp()) |
| { |
| return false; |
| } |
| |
| if(instr->HasBailOutInfo()) |
| { |
| const IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| if(!BailOutInfo::IsBailOutOnImplicitCalls(bailOutKind) || bailOutKind & IR::BailOutKindBits) |
| { |
| // Other bailout kinds will likely need bailout checks that would not be generated here. In particular, if a type |
| // check is necessary here to guard against downstream property accesses on the same object, the type check will |
| // fail and cause a bailout if the object is a RegExp object since the "lastIndex" property accesses are not cached. |
| return false; |
| } |
| } |
| |
| Func *const func = instr->m_func; |
| |
| IR::RegOpnd *const objectOpnd = symOpnd->CreatePropertyOwnerOpnd(func); |
| const IR::AutoReuseOpnd autoReuseObjectOpnd(objectOpnd, func); |
| |
| IR::LabelInstr *labelHelper = nullptr; |
| if(!objectOpnd->IsNotTaggedValue()) |
| { |
| // test object, 1 |
| // jnz $helper |
| if(!labelHelper) |
| { |
| *labelHelperRef = labelHelper = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| } |
| m_lowererMD.GenerateObjectTest(objectOpnd, instr, labelHelper); |
| } |
| |
| if(!objectValueType.IsObject()) |
| { |
| // cmp [object], Js::JavascriptRegExp::vtable |
| // jne $helper |
| if(!labelHelper) |
| { |
| *labelHelperRef = labelHelper = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| } |
| InsertCompareBranch( |
| IR::IndirOpnd::New(objectOpnd, 0, TyMachPtr, func), |
| LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp), |
| Js::OpCode::BrNeq_A, |
| labelHelper, |
| instr); |
| objectOpnd->SetValueType(objectValueType.ToDefiniteObject()); |
| } |
| |
| // mov [object + offset(lastIndexVar)], src |
| // mov [object + offset(lastIndexOrFlag)], Js::JavascriptRegExp::NotCachedValue |
| // jmp $done |
| InsertMove( |
| IR::IndirOpnd::New(objectOpnd, Js::JavascriptRegExp::GetOffsetOfLastIndexVar(), TyVar, func), |
| instr->GetSrc1(), |
| instr); |
| InsertMove( |
| IR::IndirOpnd::New(objectOpnd, Js::JavascriptRegExp::GetOffsetOfLastIndexOrFlag(), TyUint32, func), |
| IR::IntConstOpnd::New(Js::JavascriptRegExp::NotCachedValue, TyUint32, func, true), |
| instr); |
| InsertBranch(Js::OpCode::Br, instr->GetOrCreateContinueLabel(), instr); |
| |
| return true; |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GenerateIsBuiltinRecyclableObject(IR::RegOpnd *regOpnd, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, bool checkObjectAndDynamicObject, IR::LabelInstr *labelContinue, bool isInHelper) |
| { |
| // CMP [srcReg], Js::DynamicObject::`vtable' |
| // JEQ $fallThough |
| // MOV r1, [src1 + offset(type)] -- get the type id |
| // MOV r1, [r1 + offset(typeId)] |
| // ADD r1, ~TypeIds_LastStaticType -- if (typeId > TypeIds_LastStaticType && typeId <= TypeIds_LastBuiltinDynamicObject) |
| // CMP r1, (TypeIds_LastBuiltinDynamicObject - TypeIds_LastStaticType - 1) |
| // JA $helper |
| //fallThrough: |
| |
| IR::LabelInstr *labelFallthrough = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| |
| if (checkObjectAndDynamicObject) |
| { |
| if (!regOpnd->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(regOpnd, insertInstr, labelHelper); |
| } |
| |
| GenerateIsDynamicObject(regOpnd, insertInstr, labelFallthrough, true); |
| } |
| |
| IR::RegOpnd * typeRegOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::RegOpnd * typeIdRegOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| IR::IndirOpnd *indirOpnd; |
| |
| // MOV typeRegOpnd, [src1 + offset(type)] |
| indirOpnd = IR::IndirOpnd::New(regOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| InsertMove(typeRegOpnd, indirOpnd, insertInstr); |
| |
| // MOV typeIdRegOpnd, [typeRegOpnd + offset(typeId)] |
| indirOpnd = IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func); |
| InsertMove(typeIdRegOpnd, indirOpnd, insertInstr); |
| |
| // ADD typeIdRegOpnd, ~TypeIds_LastStaticType |
| InsertAdd(false, typeIdRegOpnd, typeIdRegOpnd, |
| IR::IntConstOpnd::New(~Js::TypeIds_LastStaticType, TyInt32, this->m_func, true), insertInstr); |
| |
| // CMP typeIdRegOpnd, (TypeIds_LastBuiltinDynamicObject - TypeIds_LastStaticType - 1) |
| InsertCompare( |
| typeIdRegOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_LastBuiltinDynamicObject - Js::TypeIds_LastStaticType - 1, TyInt32, this->m_func), |
| insertInstr); |
| |
| if (labelContinue) |
| { |
| // On success, go to continuation label. |
| InsertBranch(Js::OpCode::BrLe_A, true, labelContinue, insertInstr); |
| } |
| else |
| { |
| // On failure, go to helper. |
| InsertBranch(Js::OpCode::BrGt_A, true, labelHelper, insertInstr); |
| } |
| |
| // $fallThrough |
| insertInstr->InsertBefore(labelFallthrough); |
| |
| return typeRegOpnd; |
| } |
| |
| void Lowerer::GenerateIsDynamicObject(IR::RegOpnd *regOpnd, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, bool fContinueLabel) |
| { |
| // CMP [srcReg], Js::DynamicObject::`vtable' |
| InsertCompare( |
| IR::IndirOpnd::New(regOpnd, 0, TyMachPtr, m_func), |
| LoadVTableValueOpnd(insertInstr, VTableValue::VtableDynamicObject), |
| insertInstr); |
| |
| if (fContinueLabel) |
| { |
| // JEQ $fallThough |
| Lowerer::InsertBranch(Js::OpCode::BrEq_A, labelHelper, insertInstr); |
| } |
| else |
| { |
| // JNE $helper |
| Lowerer::InsertBranch(Js::OpCode::BrNeq_A, labelHelper, insertInstr); |
| } |
| } |
| |
| void Lowerer::GenerateIsRecyclableObject(IR::RegOpnd *regOpnd, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, bool checkObjectAndDynamicObject) |
| { |
| // CMP [srcReg], Js::DynamicObject::`vtable' |
| // JEQ $fallThough |
| // MOV r1, [src1 + offset(type)] -- get the type id |
| // MOV r1, [r1 + offset(typeId)] |
| // ADD r1, ~TypeIds_LastJavascriptPrimitiveType -- if (typeId > TypeIds_LastJavascriptPrimitiveType && typeId <= TypeIds_LastTrueJavascriptObjectType) |
| // CMP r1, (TypeIds_LastTrueJavascriptObjectType - TypeIds_LastJavascriptPrimitiveType - 1) |
| // JA $helper |
| //fallThrough: |
| |
| IR::LabelInstr *labelFallthrough = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| if (checkObjectAndDynamicObject) |
| { |
| if (!regOpnd->IsNotTaggedValue()) |
| { |
| m_lowererMD.GenerateObjectTest(regOpnd, insertInstr, labelHelper); |
| } |
| |
| this->GenerateIsDynamicObject(regOpnd, insertInstr, labelFallthrough, true); |
| } |
| |
| IR::RegOpnd * typeRegOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::RegOpnd * typeIdRegOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| |
| // MOV r1, [src1 + offset(type)] |
| InsertMove(typeRegOpnd, IR::IndirOpnd::New(regOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func), insertInstr); |
| |
| // MOV r1, [r1 + offset(typeId)] |
| InsertMove(typeIdRegOpnd, IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func), insertInstr); |
| |
| // ADD r1, ~TypeIds_LastJavascriptPrimitiveType |
| InsertAdd(false, typeIdRegOpnd, typeIdRegOpnd, IR::IntConstOpnd::New(~Js::TypeIds_LastJavascriptPrimitiveType, TyInt32, this->m_func, true), insertInstr); |
| |
| // CMP r1, (TypeIds_LastTrueJavascriptObjectType - TypeIds_LastJavascriptPrimitiveType - 1) |
| InsertCompare( |
| typeIdRegOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_LastTrueJavascriptObjectType - Js::TypeIds_LastJavascriptPrimitiveType - 1, TyInt32, this->m_func), |
| insertInstr); |
| |
| // JA $helper |
| InsertBranch(Js::OpCode::BrGe_A, true, labelHelper, insertInstr); |
| |
| // $fallThrough |
| insertInstr->InsertBefore(labelFallthrough); |
| } |
| |
| bool |
| Lowerer::GenerateLdThisCheck(IR::Instr * instr) |
| { |
| // |
| // If not a recyclable object, jump to $helper |
| // MOV dst, src1 -- return the object itself |
| // JMP $fallthrough |
| // $helper: |
| // (caller generates helper call) |
| // $fallthrough: |
| // |
| IR::RegOpnd * src1 = instr->GetSrc1()->AsRegOpnd(); |
| IR::LabelInstr * helper = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * fallthrough = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| GenerateIsRecyclableObject(src1, instr, helper); |
| |
| // MOV dst, src1 |
| if (instr->GetDst() && !instr->GetDst()->IsEqual(src1)) |
| { |
| InsertMove(instr->GetDst(), src1, instr); |
| } |
| |
| // JMP $fallthrough |
| InsertBranch(Js::OpCode::Br, fallthrough, instr); |
| |
| // $helper: |
| // (caller generates helper call) |
| // $fallthrough: |
| instr->InsertBefore(helper); |
| instr->InsertAfter(fallthrough); |
| |
| return true; |
| } |
| |
| // |
| // TEST src, Js::AtomTag |
| // JNE $done |
| // MOV typeReg, objectSrc + offsetof(RecyclableObject::type) |
| // CMP [typeReg + offsetof(Type::typeid)], TypeIds_ActivationObject |
| // JEQ $helper |
| // $done: |
| // MOV dst, src |
| // JMP $fallthru |
| // helper: |
| // MOV dst, undefined |
| // $fallthru: |
| bool |
| Lowerer::GenerateLdThisStrict(IR::Instr* instr) |
| { |
| IR::RegOpnd * src1 = instr->GetSrc1()->AsRegOpnd(); |
| IR::RegOpnd * typeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::LabelInstr * done = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::LabelInstr * fallthru = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::LabelInstr * helper = IR::LabelInstr::New(Js::OpCode::Label, m_func, /*helper*/true); |
| |
| bool assign = instr->GetDst() && !instr->GetDst()->IsEqual(src1); |
| if (!src1->IsNotTaggedValue()) |
| { |
| // TEST src1, Js::AtomTag |
| // JNE $done |
| this->m_lowererMD.GenerateObjectTest(src1, instr, assign ? done : fallthru); |
| } |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(src1, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| Lowerer::InsertMove(typeReg, indirOpnd, instr); |
| |
| IR::IndirOpnd * typeID = IR::IndirOpnd::New(typeReg, Js::Type::GetOffsetOfTypeId(), TyInt32, this->m_func); |
| IR::Opnd * activationObject = IR::IntConstOpnd::New(Js::TypeIds_ActivationObject, TyMachReg, this->m_func); |
| Lowerer::InsertCompare(typeID, activationObject, instr); |
| |
| // JEQ $helper |
| Lowerer::InsertBranch(Js::OpCode::BrEq_A, helper, instr); |
| |
| if (assign) |
| { |
| // $done: |
| instr->InsertBefore(done); |
| |
| // MOV dst, src |
| Lowerer::InsertMove(instr->GetDst(), src1, instr); |
| } |
| |
| // JMP $fallthru |
| Lowerer::InsertBranch(Js::OpCode::Br, fallthru, instr); |
| |
| instr->InsertBefore(helper); |
| if (instr->GetDst()) |
| { |
| // MOV dst, undefined |
| Lowerer::InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined), instr); |
| } |
| // $fallthru: |
| instr->InsertAfter(fallthru); |
| |
| return true; |
| } |
| |
| // given object instanceof function, functionReg is a register with function, |
| // objectReg is a register with instance and inlineCache is an InstIsInlineCache. |
| // We want to generate: |
| // |
| // fallback on helper (will patch the inline cache) if function does not match the cache |
| // MOV dst, Js::false |
| // CMP functionReg, [&(inlineCache->function)] |
| // JNE helper |
| // |
| // fallback if object is a tagged int |
| // TEST objectReg, Js::AtomTag |
| // JNE done |
| // |
| |
| // return false if object is a primitive |
| // CMP [typeReg + offsetof(Type::typeid)], TypeIds_LastJavascriptPrimitiveType |
| // JLE done |
| |
| // fallback if object's type is not the cached type |
| // MOV typeReg, objectSrc + offsetof(RecyclableObject::type) |
| // CMP typeReg, [&(inlineCache->type] |
| // JNE checkPrimType |
| |
| // use the cached result and fallthrough |
| // MOV dst, [&(inlineCache->result)] |
| // JMP done |
| |
| // |
| // |
| // $helper |
| // $done |
| bool |
| Lowerer::GenerateFastIsInst(IR::Instr * instr) |
| { |
| IR::LabelInstr * helper = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * done = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::RegOpnd * typeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::Opnd * objectSrc; |
| IR::Opnd * functionSrc; |
| intptr_t inlineCache; |
| IR::Instr * instrArg; |
| |
| // We are going to use the extra ArgOut_A instructions to lower the helper call later, |
| // so we leave them alone here and clean them up then. |
| inlineCache = instr->m_func->GetJITFunctionBody()->GetIsInstInlineCache(instr->GetSrc1()->AsIntConstOpnd()->AsUint32()); |
| Assert(instr->GetSrc2()->AsRegOpnd()->m_sym->m_isSingleDef); |
| instrArg = instr->GetSrc2()->AsRegOpnd()->m_sym->m_instrDef; |
| |
| objectSrc = instrArg->GetSrc1(); |
| Assert(instrArg->GetSrc2()->AsRegOpnd()->m_sym->m_isSingleDef); |
| instrArg = instrArg->GetSrc2()->AsRegOpnd()->m_sym->m_instrDef; |
| |
| functionSrc = instrArg->GetSrc1(); |
| Assert(instrArg->GetSrc2() == nullptr); |
| |
| // MOV dst, Js::false |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), instr); |
| |
| IR::RegOpnd * functionReg = GetRegOpnd(functionSrc, instr, m_func, TyMachReg); |
| |
| // CMP functionReg, [&(inlineCache->function)] |
| { |
| IR::Opnd* cacheFunction = IR::MemRefOpnd::New(inlineCache + Js::IsInstInlineCache::OffsetOfFunction(), TyMachReg, m_func, IR::AddrOpndKindDynamicIsInstInlineCacheFunctionRef); |
| InsertCompare(functionReg, cacheFunction, instr); |
| } |
| |
| // JNE helper |
| InsertBranch(Js::OpCode::BrNeq_A, helper, instr); |
| |
| IR::RegOpnd * objectReg = GetRegOpnd(objectSrc, instr, m_func, TyMachReg); |
| |
| // TEST objectReg, Js::AtomTag |
| // JNE done |
| m_lowererMD.GenerateObjectTest(objectReg, instr, done); |
| |
| // MOV typeReg, objectSrc + offsetof(RecyclableObject::type) |
| InsertMove(typeReg, IR::IndirOpnd::New(objectReg, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func), instr); |
| |
| // CMP [typeReg + offsetof(Type::typeid)], TypeIds_LastJavascriptPrimitiveType |
| { |
| IR::IndirOpnd * typeId = IR::IndirOpnd::New(typeReg, Js::Type::GetOffsetOfTypeId(), TyInt32, m_func); |
| IR::IntConstOpnd * lastPrimitive = IR::IntConstOpnd::New(Js::TypeId::TypeIds_LastJavascriptPrimitiveType, TyInt32, m_func); |
| InsertCompare(typeId, lastPrimitive, instr); |
| } |
| |
| // JLE done |
| InsertBranch(Js::OpCode::BrLe_A, done, instr); |
| |
| // CMP typeReg, [&(inlineCache->type] |
| { |
| IR::Opnd * cacheType = IR::MemRefOpnd::New(inlineCache + Js::IsInstInlineCache::OffsetOfType(), TyMachReg, m_func, IR::AddrOpndKindDynamicIsInstInlineCacheTypeRef); |
| InsertCompare(typeReg, cacheType, instr); |
| } |
| |
| // JNE helper |
| InsertBranch(Js::OpCode::BrNeq_A, helper, instr); |
| |
| // MOV dst, [&(inlineCache->result)] |
| { |
| IR::Opnd * cacheResult = IR::MemRefOpnd::New(inlineCache + Js::IsInstInlineCache::OffsetOfResult(), TyMachReg, m_func, IR::AddrOpndKindDynamicIsInstInlineCacheResultRef); |
| InsertMove(instr->GetDst(), cacheResult, instr); |
| } |
| |
| // JMP done |
| InsertBranch(Js::OpCode::Br, done, instr); |
| |
| // LABEL helper |
| instr->InsertBefore(helper); |
| |
| instr->InsertAfter(done); |
| |
| return true; |
| } |
| |
| void Lowerer::GenerateBooleanNegate(IR::Instr * instr, IR::Opnd * srcBool, IR::Opnd * dst) |
| { |
| // dst = src |
| // dst = dst ^ (true ^ false) (= !src) |
| Lowerer::InsertMove(dst, srcBool, instr); |
| ScriptContextInfo* sci = instr->m_func->GetScriptContextInfo(); |
| IR::AddrOpnd* xorval = IR::AddrOpnd::New(sci->GetTrueAddr() ^ sci->GetFalseAddr(), IR::AddrOpndKindDynamicMisc, instr->m_func, true); |
| InsertXor(dst, dst, xorval, instr); |
| } |
| |
| bool Lowerer::GenerateJSBooleanTest(IR::RegOpnd * regSrc, IR::Instr * insertInstr, IR::LabelInstr * labelTarget, bool fContinueLabel) |
| { |
| if (regSrc->GetValueType().IsBoolean()) |
| { |
| if (fContinueLabel) |
| { |
| // JMP $labelTarget |
| InsertBranch(Js::OpCode::Br, labelTarget, insertInstr); |
| #if DBG |
| if (labelTarget->isOpHelper) |
| { |
| labelTarget->m_noHelperAssert = true; |
| } |
| #endif |
| } |
| return false; |
| } |
| |
| IR::IndirOpnd * vtablePtrOpnd = IR::IndirOpnd::New(regSrc, 0, TyMachPtr, this->m_func); |
| IR::Opnd * jsBooleanVTable = LoadVTableValueOpnd(insertInstr, VTableValue::VtableJavascriptBoolean); |
| InsertCompare(vtablePtrOpnd, jsBooleanVTable, insertInstr); |
| |
| if (fContinueLabel) |
| { |
| // JEQ $labelTarget |
| InsertBranch(Js::OpCode::BrEq_A, labelTarget, insertInstr); |
| |
| // $helper |
| InsertLabel(true, insertInstr); |
| } |
| else |
| { |
| // JNE $labelTarget |
| InsertBranch(Js::OpCode::BrNeq_A, labelTarget, insertInstr); |
| } |
| return true; |
| } |
| |
| bool Lowerer::GenerateFastEqBoolInt(IR::Instr * instr, bool *pNeedHelper, bool isInHelper) |
| { |
| Assert(instr); |
| |
| // There's a total of 8 modes for this function, based on these inferred flags |
| bool isBranchNotCompare = instr->IsBranchInstr(); |
| bool isStrict = false; |
| bool isNegOp = false; |
| |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| case Js::OpCode::CmSrEq_A: |
| case Js::OpCode::CmSrNeq_A: |
| isStrict = true; |
| break; |
| default: |
| break; |
| } |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| case Js::OpCode::CmSrEq_A: |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrNotNeq_A: |
| case Js::OpCode::CmEq_A: |
| isNegOp = false; |
| break; |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| case Js::OpCode::CmSrNeq_A: |
| case Js::OpCode::BrNeq_A: |
| case Js::OpCode::BrNotEq_A: |
| case Js::OpCode::CmNeq_A: |
| isNegOp = true; |
| break; |
| default: |
| // This opcode is not one of the ones that should be handled here. |
| return false; |
| break; |
| } |
| |
| IR::Opnd *src1 = instr->GetSrc1(); |
| IR::Opnd *src2 = instr->GetSrc2(); |
| |
| // The instrucions given to this _should_ all be 2-arg. |
| Assert(src1 && src2); |
| if (!(src1 && src2)) |
| { |
| return false; |
| } |
| |
| // If it's a branch instruction, we'll want these to be defined |
| //IR::BranchInstr *instrBranch = nullptr; |
| IR::LabelInstr *targetInstr = nullptr; |
| IR::LabelInstr *labelFallthrough = nullptr; |
| |
| if (isBranchNotCompare) |
| { |
| IR::BranchInstr * instrBranch = instr->AsBranchInstr(); |
| targetInstr = instrBranch->GetTarget(); |
| labelFallthrough = instrBranch->GetOrCreateContinueLabel(isInHelper); |
| } |
| |
| // Assume we need the helper until we can show otherwise. |
| *pNeedHelper = true; |
| // If we don't know the final types well enough at JIT time, a helper block to set |
| // the inputs to the correct types will be needed. |
| IR::LabelInstr *labelHelper = nullptr; |
| // If we're doing a compare and can handle it early, then we want to skip the helper |
| IR::LabelInstr *labelDone = instr->GetOrCreateContinueLabel(isInHelper); |
| |
| // Normallize for orderings |
| IR::Opnd *srcBool = nullptr; |
| IR::Opnd *srcInt = nullptr; |
| if (src1->GetValueType().IsLikelyBoolean() && src2->GetValueType().IsLikelyTaggedInt()) |
| { |
| srcBool = src1; |
| srcInt = src2; |
| } |
| else if (src1->GetValueType().IsLikelyTaggedInt() && src2->GetValueType().IsLikelyBoolean()) |
| { |
| srcInt = src1; |
| srcBool = src2; |
| } |
| else |
| { |
| return false; |
| } |
| |
| // If either instruction is constant, we can simplify the check. If both are constant, we can eliminate it |
| bool srcIntConst = false; |
| bool srcIntConstVal = false; |
| // If we're comparing with a number that is not 0 or 1, then the two are inequal by default |
| bool srcIntIsBoolable = false; |
| bool srcBoolConst = false; |
| bool srcBoolConstVal = false; |
| if (srcInt->IsIntConstOpnd()) |
| { |
| IR::IntConstOpnd * constSrcInt = srcInt->AsIntConstOpnd(); |
| IntConstType constIntVal = constSrcInt->GetValue(); |
| srcIntConst = true; |
| if (constIntVal == 0) |
| { |
| srcIntConstVal = false; |
| srcIntIsBoolable = true; |
| } |
| else if (constIntVal == 1) |
| { |
| srcIntConstVal = true; |
| srcIntIsBoolable = true; |
| } |
| } |
| else if (srcInt->IsAddrOpnd()) |
| { |
| IR::AddrOpnd * addrSrcInt = srcInt->AsAddrOpnd(); |
| if (!(addrSrcInt && addrSrcInt->IsVar() && Js::TaggedInt::Is(addrSrcInt->m_address))) |
| { |
| return false; |
| } |
| int32 constIntVal = Js::TaggedInt::ToInt32(addrSrcInt->m_address); |
| srcIntConst = true; |
| if (constIntVal == 0) |
| { |
| srcIntConstVal = false; |
| srcIntIsBoolable = true; |
| } |
| else if (constIntVal == 1) |
| { |
| srcIntConstVal = true; |
| srcIntIsBoolable = true; |
| } |
| } |
| else if (srcInt->IsConstOpnd()) |
| { |
| // Not handled yet |
| return false; |
| } |
| if (srcBool->IsIntConstOpnd()) |
| { |
| IR::IntConstOpnd * constSrcBool = srcBool->AsIntConstOpnd(); |
| IntConstType constIntVal = constSrcBool->GetValue(); |
| srcBoolConst = true; |
| srcBoolConstVal = constIntVal != 0; |
| } |
| else if (srcBool->IsAddrOpnd()) |
| { |
| IR::AddrOpnd * addrSrcBool = srcInt->AsAddrOpnd(); |
| if (!(addrSrcBool && addrSrcBool->IsVar() && Js::TaggedInt::Is(addrSrcBool->m_address))) |
| { |
| return false; |
| } |
| int32 value = Js::TaggedInt::ToInt32(addrSrcBool->m_address); |
| srcBoolConst = true; |
| srcBoolConstVal = value != 0; |
| } |
| else if (srcBool->IsConstOpnd()) |
| { |
| // Not handled yet |
| return false; |
| } |
| |
| // Do these checks here, since that way we avoid emitting instructions before exiting earlier |
| if (srcInt->GetValueType().IsTaggedInt() && srcBool->GetValueType().IsBoolean()) { |
| // ok, we know the types, so no helper needed |
| *pNeedHelper = false; |
| } |
| else |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| // check the types and jump to the helper if incorrect |
| if (!srcInt->IsConstOpnd() && !srcInt->GetValueType().IsTaggedInt()) |
| { |
| this->m_lowererMD.GenerateSmIntTest(srcInt->AsRegOpnd(), instr, labelHelper); |
| } |
| if (!srcBool->IsConstOpnd() && !srcBool->GetValueType().IsBoolean()) |
| { |
| if (!srcBool->GetValueType().IsObject()) |
| { |
| this->m_lowererMD.GenerateObjectTest(srcBool->AsRegOpnd(), instr, labelHelper, false); |
| } |
| GenerateJSBooleanTest(srcBool->AsRegOpnd(), instr, labelHelper, false); |
| } |
| } |
| |
| // At this point, we know both which operand is an integer and which is a boolean, |
| // whether either operand is constant, and what the constant true/false values are |
| // for any constant operands. This should allow us to emit some decent code. |
| |
| LibraryValue equalResultValue = !isNegOp ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| LibraryValue inequalResultValue = !isNegOp ? LibraryValue::ValueFalse : LibraryValue::ValueTrue; |
| IR::LabelInstr *equalResultTarget = !isNegOp ? targetInstr : labelFallthrough; |
| IR::LabelInstr *inequalResultTarget = !isNegOp ? labelFallthrough : targetInstr; |
| |
| // For the Sr instructions, we now know that the types are different, so we can immediately |
| // decide what the result will be. |
| if (isStrict) |
| { |
| if (isBranchNotCompare) |
| { |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, inequalResultTarget, this->m_func)); |
| #if DBG |
| // Since we're not making a non-helper path to one of the branches, we need to tell |
| // DbCheckPostLower that we are going to have a non-helper label without non-helper |
| // branches. |
| // Note: this following line isn't good practice in general |
| equalResultTarget->m_noHelperAssert = true; |
| #endif |
| } |
| else |
| { |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| } |
| // Now that we've checked the types, we can lower some instructions to quickly do the check |
| // in the case that it's not a type-strict strict equality/inequality check. |
| else if (srcIntConst && srcBoolConst) |
| { |
| // If both arguments are constant, we can statically determine the result. |
| bool sameVal = srcIntConstVal == srcBoolConstVal; |
| if (isBranchNotCompare) |
| { |
| // For constant branches, branch to the target |
| Assert(instr); |
| IR::LabelInstr * target = sameVal && srcIntIsBoolable ? equalResultTarget : inequalResultTarget; |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, target, this->m_func)); |
| #if DBG |
| // Since we're not making a non-helper path to one of the branches, we need to tell |
| // DbCheckPostLower that we are going to have a non-helper label without non-helper |
| // branches. |
| // Note: this following line isn't good practice in general |
| (sameVal && srcIntIsBoolable ? inequalResultTarget : equalResultTarget)->m_noHelperAssert = true; |
| #endif |
| } |
| else |
| { |
| // For constant compares, load the constant result |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, sameVal && srcIntIsBoolable ? equalResultValue : inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| } |
| else if (!srcIntConst && !srcBoolConst) |
| { |
| // If neither is constant, we can still do a bit better than loading the helper |
| IR::LabelInstr * firstFalse = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| IR::LabelInstr * forceInequal = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| // We branch based on the zero-ness of the integer argument to two checks against the boolean argument |
| this->m_lowererMD.GenerateTaggedZeroTest(srcInt->AsRegOpnd(), instr, firstFalse); |
| // If it's not zero, then it's either 1, in which case it's true, or it's something else, in which |
| // case the two will compare as inequal |
| InsertCompareBranch( |
| IR::IntConstOpnd::New((((IntConstType)1) << Js::VarTag_Shift) + Js::AtomTag, IRType::TyVar, this->m_func, true), |
| srcInt->AsRegOpnd(), |
| Js::OpCode::BrNeq_A, |
| isBranchNotCompare ? inequalResultTarget : forceInequal, // in the case of branching, we can go straight to the inequal target; for compares, we need to load the value |
| instr, |
| true); |
| if (isBranchNotCompare) |
| { |
| // if the int evaluates to 1 (true) |
| InsertCompareBranch( |
| srcBool, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), |
| instr->m_opcode, |
| targetInstr, |
| instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelFallthrough, this->m_func)); |
| |
| // if the int evaluates to 0 (false) |
| instr->InsertBefore(firstFalse); |
| InsertCompareBranch( |
| srcBool, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), |
| instr->m_opcode, |
| targetInstr, |
| instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelFallthrough, this->m_func)); |
| } |
| else |
| { |
| // the int resolves to 1 (true) |
| // Load either the bool or its complement into the dst reg, depending on the opcode |
| if (isNegOp) |
| { |
| GenerateBooleanNegate(instr, srcBool, instr->GetDst()); |
| } |
| else |
| { |
| this->InsertMove(instr->GetDst(), srcBool, instr); |
| } |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| |
| // the int resolves to 0 (false) |
| // Handle the complement case |
| instr->InsertBefore(firstFalse); |
| if (!isNegOp) |
| { |
| GenerateBooleanNegate(instr, srcBool, instr->GetDst()); |
| } |
| else |
| { |
| this->InsertMove(instr->GetDst(), srcBool, instr); |
| } |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| |
| // the int resolves to something other than 0 or 1 (inequal to a bool) |
| instr->InsertBefore(forceInequal); |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| } |
| else if (srcIntConst) |
| { |
| if (isBranchNotCompare) |
| { |
| if (srcIntIsBoolable) |
| { |
| LibraryValue intval = srcIntConstVal ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| InsertCompareBranch( |
| srcBool, |
| LoadLibraryValueOpnd(instr, intval), |
| instr->m_opcode, |
| targetInstr, |
| instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelFallthrough, this->m_func)); |
| } |
| else |
| { |
| // Since a constant int that isn't 0 or 1 will always be inequal to bools, just jump to the inequal result |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, inequalResultTarget, this->m_func)); |
| #if DBG |
| // Since we're not making a non-helper path to one of the branches, we need to tell |
| // DbCheckPostLower that we are going to have a non-helper label without non-helper |
| // branches. |
| // Note: this following line isn't good practice in general |
| equalResultTarget->m_noHelperAssert = true; |
| #endif |
| } |
| } |
| else |
| { |
| if (srcIntIsBoolable) |
| { |
| bool directPassthrough = isNegOp != srcIntConstVal; |
| if (directPassthrough) |
| { |
| // If this case is hit, the result value is the same as the value in srcBool |
| this->InsertMove(instr->GetDst(), srcBool, instr); |
| } |
| else |
| { |
| // Otherwise, the result value is the negation of the value in srcBool |
| GenerateBooleanNegate(instr, srcBool, instr->GetDst()); |
| } |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| else |
| { |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| } |
| } |
| else if (srcBoolConst) |
| { |
| if (isBranchNotCompare) |
| { |
| this->m_lowererMD.GenerateTaggedZeroTest(srcInt->AsRegOpnd(), instr, srcBoolConstVal ? inequalResultTarget : equalResultTarget); |
| if (srcBoolConstVal) |
| { |
| // If it's not zero, then it's either 1, in which case it's true, or it's something else, in which |
| // case we have an issue. |
| InsertCompareBranch(IR::IntConstOpnd::New((((IntConstType)1) << Js::VarTag_Shift) + Js::AtomTag, IRType::TyVar, this->m_func), srcInt->AsRegOpnd(), Js::OpCode::BrNeq_A, inequalResultTarget, instr, true); |
| } |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, srcBoolConstVal ? equalResultTarget : inequalResultTarget, this->m_func)); |
| } |
| else |
| { |
| IR::LabelInstr* isNonZero = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| IR::LabelInstr* isZero = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| this->m_lowererMD.GenerateTaggedZeroTest(srcInt->AsRegOpnd(), instr, isZero); |
| if (srcBoolConstVal) |
| { |
| // If it's not zero, then it's either 1, in which case it's true, or it's something else, in which |
| // case we have an issue. |
| InsertCompareBranch(IR::IntConstOpnd::New((((IntConstType)1) << Js::VarTag_Shift) + Js::AtomTag, IRType::TyVar, this->m_func), srcInt->AsRegOpnd(), Js::OpCode::BrNeq_A, isZero, instr, true); |
| } |
| instr->InsertBefore(isNonZero); |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, srcBoolConstVal ? equalResultValue : inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| instr->InsertBefore(isZero); |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, !srcBoolConstVal ? equalResultValue : inequalResultValue), instr); |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| } |
| if (*pNeedHelper) |
| { |
| instr->InsertBefore(labelHelper); |
| } |
| return true; |
| } |
| |
| // Generate fast path for StrictEquals when one of the source have a definite valuetype |
| bool Lowerer::GenerateFastBrOrCmEqDefinite(IR::Instr * instr, IR::JnHelperMethod helperMethod, bool *pNeedHelper, bool isBranch, bool isInHelper) |
| { |
| IR::Opnd *src1 = instr->GetSrc1(); |
| IR::Opnd *src2 = instr->GetSrc2(); |
| |
| if (!src1->GetValueType().IsDefinite() && !src2->GetValueType().IsDefinite()) |
| { |
| return false; |
| } |
| if (src1->IsEqual(src2)) |
| { |
| return false; |
| } |
| if (src1->GetValueType().IsDefinite() && src2->GetValueType().IsDefinite()) |
| { |
| if (src1->IsTaggedValue() || src2->IsTaggedValue()) |
| { |
| return true; |
| } |
| } |
| |
| IR::LabelInstr * labelBranchSuccess = nullptr; |
| IR::LabelInstr * labelBranchFailure = nullptr; |
| IR::LabelInstr * labelFallThrough = instr->GetOrCreateContinueLabel(); |
| IR::LabelInstr * labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| |
| LibraryValue successValueType = ValueInvalid; |
| LibraryValue failureValueType = ValueInvalid; |
| |
| IR::Opnd * definiteSrc = src1->GetValueType().IsDefinite() ? src1 : src2; |
| IR::Opnd * likelySrc = src1->GetValueType().IsDefinite() ? src2 : src1; |
| |
| bool isEqual = !instr->IsNeq(); |
| |
| if (!isBranch) |
| { |
| labelBranchSuccess = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| labelBranchFailure = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, false); |
| successValueType = isEqual ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| failureValueType = isEqual ? LibraryValue::ValueFalse : LibraryValue::ValueTrue; |
| } |
| else |
| { |
| labelBranchSuccess = isEqual ? instr->AsBranchInstr()->GetTarget() : labelFallThrough; |
| labelBranchFailure = isEqual ? labelFallThrough : instr->AsBranchInstr()->GetTarget(); |
| } |
| |
| Assert(likelySrc->IsRegOpnd()); |
| |
| if (definiteSrc->GetValueType().IsAnyArray() || definiteSrc->GetValueType().IsSymbol() || definiteSrc->GetValueType().IsBoolean() || definiteSrc->GetValueType().IsPrimitiveOrObject()) |
| { |
| InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, labelBranchSuccess, instr); |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelBranchFailure, this->m_func); |
| instr->InsertBefore(branch); |
| *pNeedHelper = false; |
| } |
| else if (definiteSrc->GetValueType().IsObject() && !CONFIG_FLAG(ESBigInt)) |
| { |
| InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, labelBranchSuccess, instr); |
| |
| if (!likelySrc->GetValueType().IsDefinite()) |
| { |
| m_lowererMD.GenerateObjectTest(likelySrc->AsRegOpnd(), instr, labelBranchFailure); |
| IR::RegOpnd * likelyTypeReg = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::IndirOpnd * likelyType = IR::IndirOpnd::New(likelySrc->AsRegOpnd(), Js::RecyclableObject::GetOffsetOfType(), TyMachReg, this->m_func); |
| Lowerer::InsertMove(likelyTypeReg, likelyType, instr); |
| IR::Opnd *likelyFlags = IR::IndirOpnd::New(likelyTypeReg, Js::Type::GetOffsetOfFlags(), TyInt8, this->m_func); |
| InsertTestBranch(likelyFlags, IR::IntConstOpnd::New(TypeFlagMask_EngineExternal, TyInt8, this->m_func), Js::OpCode::BrNeq_A, labelHelper, instr); |
| } |
| else |
| { |
| *pNeedHelper = false; |
| } |
| |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelBranchFailure, this->m_func); |
| instr->InsertBefore(branch); |
| } |
| else if (definiteSrc->IsTaggedInt()) |
| { |
| InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, labelBranchSuccess, instr); |
| IR::BranchInstr * branch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelHelper, this->m_func); |
| instr->InsertBefore(branch); |
| } |
| else |
| { |
| return true; |
| } |
| |
| if (!isBranch) |
| { |
| instr->InsertBefore(labelBranchSuccess); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, successValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallThrough, instr); |
| |
| instr->InsertBefore(labelBranchFailure); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, failureValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallThrough, instr); |
| } |
| |
| instr->InsertBefore(labelHelper); |
| |
| return true; |
| } |
| |
| // Generate fast path for Strict Equals when both sources are likely boolean/likely object/likely symbol |
| bool Lowerer::GenerateFastBrEqLikely(IR::BranchInstr * instrBranch, bool *pNeedHelper, bool isInHelper) |
| { |
| IR::Opnd *src1 = instrBranch->GetSrc1(); |
| IR::Opnd *src2 = instrBranch->GetSrc2(); |
| IR::LabelInstr *targetInstr = instrBranch->GetTarget(); |
| |
| IR::LabelInstr *labelEqualLikely = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| IR::LabelInstr *labelTrue = instrBranch->GetOrCreateContinueLabel(isInHelper); |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| *pNeedHelper = true; |
| |
| if (!this->GenerateFastBooleanAndObjectEqLikely(instrBranch, src1, src2, labelHelper, labelEqualLikely, pNeedHelper, isInHelper)) |
| { |
| return false; |
| } |
| |
| instrBranch->InsertBefore(labelEqualLikely); |
| |
| IR::BranchInstr *newBranch = IR::BranchInstr::New(instrBranch->m_opcode, targetInstr, src1, src2, this->m_func); |
| instrBranch->InsertBefore(newBranch); |
| |
| this->m_lowererMD.LowerCondBranch(newBranch); |
| |
| newBranch = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelTrue, this->m_func); |
| instrBranch->InsertBefore(newBranch); |
| |
| instrBranch->InsertBefore(labelHelper); |
| |
| return true; |
| } |
| |
| bool Lowerer::GenerateFastBooleanAndObjectEqLikely(IR::Instr * instr, IR::Opnd *src1, IR::Opnd *src2, IR::LabelInstr * labelHelper, IR::LabelInstr * labelEqualLikely, bool *pNeedHelper, bool isInHelper) |
| { |
| *pNeedHelper = true; |
| |
| if (!src1 || !src2) |
| { |
| return false; |
| } |
| |
| bool isStrictCompare = false; |
| bool isStrictMode = this->m_func->GetJITFunctionBody()->IsStrictMode(); |
| |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| case Js::OpCode::CmSrEq_A: |
| case Js::OpCode::CmSrNeq_A: |
| isStrictCompare = true; |
| break; |
| } |
| |
| if (src1->GetValueType().IsLikelyBoolean() && src2->GetValueType().IsLikelyBoolean()) |
| { |
| // |
| // Booleans |
| // |
| if (isStrictCompare) |
| { |
| if (!src1->GetValueType().IsBoolean() && !src2->GetValueType().IsBoolean()) |
| { |
| this->m_lowererMD.GenerateObjectTest(src2->AsRegOpnd(), instr, labelHelper, false); |
| if (GenerateJSBooleanTest(src2->AsRegOpnd(), instr, labelEqualLikely, true)) |
| { |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelHelper, this->m_func)); |
| } |
| } |
| else |
| { |
| *pNeedHelper = false; |
| } |
| } |
| else |
| { |
| this->m_lowererMD.GenerateObjectTest(src1->AsRegOpnd(), instr, labelHelper, false); |
| GenerateJSBooleanTest(src1->AsRegOpnd(), instr, labelHelper, false); |
| this->m_lowererMD.GenerateObjectTest(src2->AsRegOpnd(), instr, labelHelper, false); |
| if (GenerateJSBooleanTest(src2->AsRegOpnd(), instr, labelEqualLikely, true)) |
| { |
| instr->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelHelper, this->m_func)); |
| } |
| } |
| } |
| else if (src1->GetValueType().HasBeenObject() && src2->GetValueType().HasBeenObject()) |
| { |
| // |
| // Objects |
| // |
| IR::LabelInstr *labelTypeIdCheck = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| |
| if (!isStrictCompare) |
| { |
| // If not strictBr, verify both sides are dynamic objects |
| this->m_lowererMD.GenerateObjectTest(src1->AsRegOpnd(), instr, labelHelper, false); |
| this->m_lowererMD.GenerateObjectTest(src2->AsRegOpnd(), instr, labelHelper, false); |
| GenerateIsDynamicObject(src1->AsRegOpnd(), instr, labelTypeIdCheck, false); |
| } |
| else |
| { |
| this->m_lowererMD.GenerateObjectTest(src2->AsRegOpnd(), instr, labelHelper, false); |
| } |
| GenerateIsDynamicObject(src2->AsRegOpnd(), instr, labelEqualLikely, true); |
| |
| instr->InsertBefore(labelTypeIdCheck); |
| |
| if (isStrictMode) |
| { |
| labelTypeIdCheck->isOpHelper = true; |
| IR::BranchInstr *branchToHelper = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, labelHelper, this->m_func); |
| instr->InsertBefore(branchToHelper); |
| } |
| else |
| { |
| if (!ExternalLowerer::TryGenerateFastExternalEqTest(src1, src2, instr, labelHelper, labelEqualLikely, this, isStrictCompare, isInHelper)) |
| { |
| if (!isStrictCompare) |
| { |
| GenerateIsBuiltinRecyclableObject(src1->AsRegOpnd(), instr, labelHelper, false /*checkObjectAndDynamicObject*/, nullptr /*labelContinue*/, isInHelper); |
| } |
| GenerateIsBuiltinRecyclableObject(src2->AsRegOpnd(), instr, labelHelper, false /*checkObjectAndDynamicObject*/, nullptr /*labelContinue*/, isInHelper); |
| } |
| } |
| } |
| else if (src1->GetValueType().IsLikelySymbol() && src2->GetValueType().IsLikelySymbol()) |
| { |
| this->GenerateSymbolTest(src1->AsRegOpnd(), instr, labelHelper, nullptr, true); |
| this->GenerateSymbolTest(src2->AsRegOpnd(), instr, labelHelper, nullptr, true); |
| } |
| else |
| { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool Lowerer::GenerateFastCmEqLikely(IR::Instr * instr, bool *pNeedHelper, bool isInHelper) |
| { |
| *pNeedHelper = false; |
| |
| Assert(instr->m_opcode == Js::OpCode::CmSrEq_A || |
| instr->m_opcode == Js::OpCode::CmSrNeq_A || |
| instr->m_opcode == Js::OpCode::CmEq_A || |
| instr->m_opcode == Js::OpCode::CmNeq_A); |
| |
| bool isNegOp = false; |
| bool isStrict = false; |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::CmSrEq_A: |
| isStrict = true; |
| break; |
| |
| case Js::OpCode::CmSrNeq_A: |
| isStrict = true; |
| case Js::OpCode::CmNeq_A: |
| isNegOp = true; |
| break; |
| } |
| |
| IR::Opnd *src1 = instr->GetSrc1(); |
| IR::Opnd *src2 = instr->GetSrc2(); |
| |
| IR::LabelInstr *labelEqualLikely = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| |
| if (!this->GenerateFastBooleanAndObjectEqLikely(instr, src1, src2, labelHelper, labelEqualLikely, pNeedHelper, isInHelper)) |
| { |
| return false; |
| } |
| |
| instr->InsertBefore(labelEqualLikely); |
| |
| // $labelEqualLikely |
| // |
| // Will only come here for |
| // if src2 is dynamic object(matches Js::DynamicObject::`vtable'), for non strict cm both src1 and src2 should be dynamic object |
| // or if src2 is builtin recyclableobject(typeId > TypeIds_LastStaticType && typeId <= TypeIds_LastBuiltinDynamicObject) |
| // or if CustomExternalType with no operations usage flags |
| // |
| // src1->IsEqual(src2) |
| // MOV DST SUCCESS |
| // JMP $DONE |
| // CMP src1, src2 |
| // MOV DST SUCCESS |
| // JEQ $DONE |
| // MOV DST FAILURE |
| // JMP $DONE |
| |
| LibraryValue successValueType = !isNegOp ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| LibraryValue failureValueType = !isNegOp ? LibraryValue::ValueFalse : LibraryValue::ValueTrue; |
| |
| if (src1->IsEqual(src2)) |
| { |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, successValueType), instr); |
| instr->InsertBefore(IR::BranchInstr::New(this->m_lowererMD.MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| else |
| { |
| IR::LabelInstr *cmEqual = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, isInHelper); |
| this->InsertCompareBranch(src1, src2, isStrict ? Js::OpCode::BrSrEq_A : Js::OpCode::BrEq_A, cmEqual, instr); |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, failureValueType), instr); |
| |
| instr->InsertBefore(IR::BranchInstr::New(this->m_lowererMD.MDUncondBranchOpcode, labelDone, this->m_func)); |
| |
| instr->InsertBefore(cmEqual); |
| Lowerer::InsertMove(instr->GetDst(), this->LoadLibraryValueOpnd(instr, successValueType), instr); |
| |
| instr->InsertBefore(IR::BranchInstr::New(this->m_lowererMD.MDUncondBranchOpcode, labelDone, this->m_func)); |
| } |
| |
| instr->InsertBefore(labelHelper); |
| instr->InsertAfter(labelDone); |
| |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastBrOrCmString(IR::Instr* instr) |
| { |
| IR::RegOpnd *srcReg1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *srcReg2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| |
| if (!srcReg1 || |
| !srcReg2 || |
| srcReg1->IsTaggedInt() || |
| srcReg2->IsTaggedInt() || |
| (!srcReg1->GetValueType().HasHadStringTag() && !srcReg2->GetValueType().IsString()) || |
| (!srcReg2->GetValueType().HasHadStringTag() && !srcReg1->GetValueType().IsString())) |
| { |
| return false; |
| } |
| |
| IR::LabelInstr *labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| IR::LabelInstr *labelBranchFail = nullptr; |
| IR::LabelInstr *labelBranchSuccess = nullptr; |
| |
| bool isEqual = false; |
| bool isStrict = false; |
| bool isBranch = true; |
| bool isCmNegOp = false; |
| |
| switch (instr->m_opcode) |
| { |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| isStrict = true; |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrNotNeq_A: |
| labelBranchFail = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| labelBranchSuccess = instr->AsBranchInstr()->GetTarget(); |
| instr->InsertAfter(labelBranchFail); |
| isEqual = true; |
| break; |
| |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| isStrict = true; |
| case Js::OpCode::BrNeq_A: |
| case Js::OpCode::BrNotEq_A: |
| labelBranchSuccess = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| labelBranchFail = instr->AsBranchInstr()->GetTarget(); |
| instr->InsertAfter(labelBranchSuccess); |
| isEqual = false; |
| break; |
| |
| case Js::OpCode::CmSrEq_A: |
| isStrict = true; |
| case Js::OpCode::CmEq_A: |
| isEqual = true; |
| isBranch = false; |
| break; |
| |
| case Js::OpCode::CmSrNeq_A: |
| isStrict = true; |
| case Js::OpCode::CmNeq_A: |
| isEqual = false; |
| isBranch = false; |
| isCmNegOp = true; |
| break; |
| |
| default: |
| Assume(UNREACHED); |
| } |
| |
| if (!isBranch) |
| { |
| labelBranchSuccess = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| labelBranchFail = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| } |
| |
| GenerateFastStringCheck(instr, srcReg1, srcReg2, isEqual, isStrict, labelHelper, labelBranchSuccess, labelBranchFail); |
| |
| IR::LabelInstr *labelFallthrough = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| |
| if (!isBranch) |
| { |
| const LibraryValue successValueType = !isCmNegOp ? LibraryValue::ValueTrue : LibraryValue::ValueFalse; |
| const LibraryValue failureValueType = !isCmNegOp ? LibraryValue::ValueFalse : LibraryValue::ValueTrue; |
| |
| instr->InsertBefore(labelBranchSuccess); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, successValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| |
| instr->InsertBefore(labelBranchFail); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, failureValueType), instr); |
| InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| } |
| |
| instr->InsertBefore(labelHelper); |
| |
| instr->InsertAfter(labelFallthrough); |
| |
| #if DBG |
| // The fast-path for strings assumes the case where 2 strings are equal is rare, and marks that path as 'helper'. |
| // This breaks the helper label dbchecks as it can result in non-helper blocks be reachable only from helper blocks. |
| // Use m_isHelperToNonHelperBranch and m_noHelperAssert to fix this. |
| IR::Instr *blockEndInstr; |
| |
| if (isEqual) |
| { |
| blockEndInstr = labelHelper->GetNextBranchOrLabel(); |
| } |
| else |
| { |
| blockEndInstr = instr->GetNextBranchOrLabel(); |
| } |
| |
| if (blockEndInstr->IsBranchInstr()) |
| { |
| blockEndInstr->AsBranchInstr()->m_isHelperToNonHelperBranch = true; |
| } |
| |
| labelFallthrough->m_noHelperAssert = true; |
| #endif |
| |
| return true; |
| } |
| |
| bool |
| Lowerer::GenerateFastStringCheck(IR::Instr *instr, IR::RegOpnd *srcReg1, IR::RegOpnd *srcReg2, bool isEqual, bool isStrict, IR::LabelInstr *labelHelper, IR::LabelInstr *labelBranchSuccess, IR::LabelInstr *labelBranchFail) |
| { |
| Assert(instr->m_opcode == Js::OpCode::BrSrEq_A || |
| instr->m_opcode == Js::OpCode::BrSrNeq_A || |
| instr->m_opcode == Js::OpCode::BrEq_A || |
| instr->m_opcode == Js::OpCode::BrNeq_A || |
| instr->m_opcode == Js::OpCode::BrSrNotEq_A || |
| instr->m_opcode == Js::OpCode::BrSrNotNeq_A || |
| instr->m_opcode == Js::OpCode::BrNotEq_A || |
| instr->m_opcode == Js::OpCode::BrNotNeq_A || |
| instr->m_opcode == Js::OpCode::CmEq_A || |
| instr->m_opcode == Js::OpCode::CmNeq_A || |
| instr->m_opcode == Js::OpCode::CmSrEq_A || |
| instr->m_opcode == Js::OpCode::CmSrNeq_A); |
| |
| // if src1 is not string |
| // generate object test, if not equal jump to $helper |
| // compare type check to string, if not jump to $helper |
| // |
| // if strict mode generate string test as above for src2 and jump to $failure if failed any time |
| // else if not strict generate string test as above for src2 and jump to $helper if failed any time |
| // |
| // Compare length of src1 and src2 if not equal goto $failure |
| // |
| // if src1 is not flat string jump to $helper |
| // |
| // if src1 and src2 m_pszValue pointer match goto $success |
| // |
| // if src2 is not flat string jump to $helper |
| // |
| // if first character of src1 and src2 doesn't match goto $failure |
| // |
| // shift left by 1 length of src1 (length*2) |
| // |
| // wmemcmp src1 and src2 flat strings till length * 2 |
| // |
| // test eax (result of wmemcmp) |
| // if equal jump to $success else to $failure |
| // |
| // $success |
| // jmp to $fallthrough |
| // $failure |
| // jmp to $fallthrough |
| // $helper |
| // |
| // $fallthrough |
| |
| // Generates: |
| // GenerateObjectTest(src1); |
| // CMP srcReg1, srcReg2 |
| // JEQ $success |
| // MOV s1, [srcReg1 + offset(Type)] |
| // CMP type, static_string_type |
| // JNE $helper |
| // GenerateObjectTest(src2); |
| // MOV s2, [srcReg2 + offset(Type)] |
| // CMP type, static_string_type |
| // JNE $fail ; if src1 is string but not src2, src1 !== src2 if isStrict |
| // MOV s3, [srcReg1,offset(m_charLength)] |
| // CMP [srcReg2,offset(m_charLength)], s3 |
| // JNE $fail <--- length check done |
| // MOV s4, [srcReg1,offset(m_pszValue)] |
| // CMP s4, 0 |
| // JEQ $helper |
| // MOV s5, [srcReg2,offset(m_pszValue)] |
| // CMP s5, 0 |
| // JEQ $helper |
| // MOV s6,[s4] |
| // CMP [s5], s6 -First character comparison |
| // JNE $fail |
| // SHL length, 1 |
| // eax = wmemcmp(src1String, src2String, length*2) |
| // TEST eax, eax |
| // JEQ $success |
| // JMP $fail |
| IR::Instr* instrInsert = instr; |
| |
| GenerateStringTest(srcReg1, instrInsert, labelHelper); |
| |
| if (srcReg1->IsEqual(srcReg2)) |
| { |
| InsertBranch(Js::OpCode::Br, labelBranchSuccess, instrInsert); |
| #if DBG |
| if (instr->IsBranchInstr()) |
| { |
| // we might have other cases on helper path which will generate branch to the target |
| instr->AsBranchInstr()->GetTarget()->m_noHelperAssert = true; |
| } |
| #endif |
| return true; |
| } |
| // CMP srcReg1, srcReg2 - Ptr comparison |
| // JEQ $branchSuccess |
| InsertCompareBranch(srcReg1, srcReg2, Js::OpCode::BrEq_A, labelBranchSuccess, instrInsert); |
| |
| if (isStrict) |
| { |
| GenerateStringTest(srcReg2, instrInsert, labelBranchFail); |
| } |
| else |
| { |
| GenerateStringTest(srcReg2, instrInsert, labelHelper); |
| } |
| |
| if (isStrict && (srcReg1->m_sym->m_isStrEmpty || srcReg2->m_sym->m_isStrEmpty)) |
| { |
| IR::RegOpnd* otherOpnd = srcReg1->m_sym->m_isStrEmpty ? srcReg2 : srcReg1; |
| InsertCompareBranch(IR::IndirOpnd::New(otherOpnd, Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, m_func), IR::IntConstOpnd::New(0, TyUint32, this->m_func, true), Js::OpCode::BrNeq_A, labelBranchFail, instrInsert); |
| return true; |
| } |
| |
| // MOV s3, [srcReg1,offset(m_charLength)] |
| // CMP [srcReg2,offset(m_charLength)], s3 |
| // JNE $branchfail |
| |
| IR::RegOpnd * src1LengthOpnd = IR::RegOpnd::New(TyUint32, m_func); |
| InsertMove(src1LengthOpnd, IR::IndirOpnd::New(srcReg1, Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, m_func), instrInsert); |
| InsertCompareBranch(IR::IndirOpnd::New(srcReg2, Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, m_func), src1LengthOpnd, Js::OpCode::BrNeq_A, labelBranchFail, instrInsert); |
| |
| // MOV s4, [src1,offset(m_pszValue)] |
| // CMP s4, 0 |
| // JEQ $helper |
| // MOV s5, [src2,offset(m_pszValue)] |
| // CMP s5, 0 |
| // JEQ $helper |
| |
| IR::RegOpnd * src1FlatString = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(src1FlatString, IR::IndirOpnd::New(srcReg1, Js::JavascriptString::GetOffsetOfpszValue(), TyMachPtr, m_func), instrInsert); |
| InsertCompareBranch(src1FlatString, IR::IntConstOpnd::New(0, TyUint32, m_func), Js::OpCode::BrEq_A, labelHelper, instrInsert); |
| |
| IR::RegOpnd * src2FlatString = IR::RegOpnd::New(TyMachPtr, m_func); |
| InsertMove(src2FlatString, IR::IndirOpnd::New(srcReg2, Js::JavascriptString::GetOffsetOfpszValue(), TyMachPtr, m_func), instrInsert); |
| InsertCompareBranch(src2FlatString, IR::IntConstOpnd::New(0, TyUint32, m_func), Js::OpCode::BrEq_A, labelHelper, instrInsert); |
| |
| // MOV s6,[s4] |
| // CMP [s5], s6 -First character comparison |
| // JNE $branchfail |
| |
| IR::RegOpnd * src1FirstChar = IR::RegOpnd::New(TyUint16, m_func); |
| InsertMove(src1FirstChar, IR::IndirOpnd::New(src1FlatString, 0, TyUint16, m_func), instrInsert); |
| InsertCompareBranch(IR::IndirOpnd::New(src2FlatString, 0, TyUint16, m_func), src1FirstChar, Js::OpCode::BrNeq_A, labelBranchFail, instrInsert); |
| |
| // eax = wmemcmp(src1String, src2String, length) |
| |
| m_lowererMD.LoadHelperArgument(instr, src1LengthOpnd); |
| m_lowererMD.LoadHelperArgument(instr, src1FlatString); |
| m_lowererMD.LoadHelperArgument(instr, src2FlatString); |
| IR::RegOpnd *dstOpnd = IR::RegOpnd::New(TyInt32, this->m_func); |
| IR::Instr *instrCall = IR::Instr::New(Js::OpCode::Call, dstOpnd, IR::HelperCallOpnd::New(IR::HelperWMemCmp, m_func), m_func); |
| instr->InsertBefore(instrCall); |
| m_lowererMD.LowerCall(instrCall, 3); |
| |
| // TEST eax, eax |
| // JEQ success |
| InsertTestBranch(dstOpnd, dstOpnd, Js::OpCode::BrEq_A, labelBranchSuccess, instrInsert); |
| // JMP fail |
| InsertBranch(Js::OpCode::Br, labelBranchFail, instrInsert); |
| |
| return true; |
| } |
| |
| bool Lowerer::GenerateFastBrBool(IR::BranchInstr *const instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::BrFalse_A || instr->m_opcode == Js::OpCode::BrTrue_A); |
| |
| Func *const func = instr->m_func; |
| |
| if(!instr->GetSrc1()->IsRegOpnd()) |
| { |
| LowererMD::ChangeToAssign(instr->HoistSrc1(Js::OpCode::Ld_A)); |
| } |
| IR::RegOpnd *const src = instr->GetSrc1()->Copy(func)->AsRegOpnd(); |
| const IR::AutoReuseOpnd autoReuseSrc(src, func); |
| const ValueType srcOriginalValueType(src->GetValueType()); |
| ValueType srcValueType(srcOriginalValueType); |
| |
| IR::LabelInstr *const labelTarget = instr->GetTarget(); |
| IR::LabelInstr *const labelFallthrough = instr->GetOrCreateContinueLabel(); |
| if(labelTarget == labelFallthrough) |
| { |
| // Nothing to do |
| instr->Remove(); |
| return false; |
| } |
| |
| const bool branchOnFalse = instr->m_opcode == Js::OpCode::BrFalse_A; |
| IR::LabelInstr *const labelFalse = branchOnFalse ? labelTarget : labelFallthrough; |
| IR::LabelInstr *const labelTrue = branchOnFalse ? labelFallthrough : labelTarget; |
| const Js::OpCode compareWithFalseBranchToTargetOpCode = branchOnFalse ? Js::OpCode::BrEq_A : Js::OpCode::BrNeq_A; |
| IR::LabelInstr *lastLabelBeforeHelper = nullptr; |
| |
| /// Typespec'd float |
| if (instr->GetSrc1()->GetType() == TyFloat64) |
| { |
| InsertFloatCheckForZeroOrNanBranch(instr->GetSrc1(), branchOnFalse, labelTarget, labelFallthrough, instr); |
| Lowerer::InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| instr->Remove(); |
| return false; |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Null fast path |
| |
| if (srcValueType.HasBeenNull() || srcOriginalValueType.IsUninitialized()) |
| { |
| if(srcValueType.IsNull()) |
| { |
| // jmp $false |
| InsertBranch(Js::OpCode::Br, labelFalse, instr); |
| |
| // Skip lowering call to helper |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| |
| // cmp src, null |
| // je $false |
| InsertCompareBranch( |
| src, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueNull), |
| Js::OpCode::BrEq_A, |
| labelFalse, |
| instr); |
| |
| src->SetValueType(srcValueType = srcValueType.SetIsNotAnyOf(ValueType::Null)); |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Undefined fast path |
| |
| if(srcValueType.HasBeenUndefined() || srcOriginalValueType.IsUninitialized()) |
| { |
| if(srcValueType.IsUndefined()) |
| { |
| // jmp $false |
| InsertBranch(Js::OpCode::Br, labelFalse, instr); |
| |
| // Skip lowering call to helper |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| |
| // cmp src, undefined |
| // je $false |
| InsertCompareBranch( |
| src, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined), |
| Js::OpCode::BrEq_A, |
| labelFalse, |
| instr); |
| |
| src->SetValueType(srcValueType = srcValueType.SetIsNotAnyOf(ValueType::Undefined)); |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Tagged int fast path |
| |
| const bool isNotInt = src->IsNotInt(); |
| bool checkedForTaggedInt = isNotInt; |
| if( ( |
| srcValueType.HasBeenInt() || |
| srcValueType.HasBeenUnknownNumber() || |
| srcOriginalValueType.IsUninitialized() |
| ) && !isNotInt) |
| { |
| checkedForTaggedInt = true; |
| IR::LabelInstr *notTaggedIntLabel = nullptr; |
| if(!src->IsTaggedInt()) |
| { |
| // test src, 1 |
| // jz $notTaggedInt |
| notTaggedIntLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| m_lowererMD.GenerateSmIntTest(src, instr, notTaggedIntLabel); |
| } |
| |
| // cmp src, tag(0) |
| // je/jne $target |
| m_lowererMD.GenerateTaggedZeroTest(src, instr); |
| Lowerer::InsertBranch(compareWithFalseBranchToTargetOpCode, labelTarget, instr); |
| |
| if(src->IsTaggedInt()) |
| { |
| // Skip lowering call to helper |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| |
| // jmp $fallthrough |
| Lowerer::InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| |
| // $notTaggedInt: |
| if(notTaggedIntLabel) |
| { |
| instr->InsertBefore(notTaggedIntLabel); |
| lastLabelBeforeHelper = notTaggedIntLabel; |
| } |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Float fast path |
| |
| bool generateFloatTest = srcValueType.IsLikelyFloat(); |
| #ifdef _M_IX86 |
| if (!AutoSystemInfo::Data.SSE2Available()) |
| { |
| generateFloatTest = false; |
| } |
| #endif |
| bool checkedForTaggedFloat = |
| #if FLOATVAR |
| srcValueType.IsNotNumber(); |
| #else |
| true; // there are no tagged floats, indicate that it has been checked |
| #endif |
| if (generateFloatTest) |
| { |
| // if(srcValueType.IsFloat()) // skip tagged int check? |
| // |
| // ValueType::IsFloat() does not guarantee that the storage is not in a tagged int. |
| // The tagged int check is necessary. It does, however, guarantee that as long as the value is not |
| // stored in a tagged int, that it is definitely stored in a JavascriptNumber/TaggedFloat. |
| |
| IR::LabelInstr *const notFloatLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| if(!checkedForTaggedInt) |
| { |
| checkedForTaggedInt = true; |
| m_lowererMD.GenerateSmIntTest(src, instr, notFloatLabel, nullptr, true); |
| } |
| |
| // cmp [src], JavascriptNumber::vtable |
| // jne $notFloat |
| #if FLOATVAR |
| checkedForTaggedFloat = true; |
| IR::RegOpnd *const floatOpnd = m_lowererMD.CheckFloatAndUntag(src, instr, notFloatLabel); |
| #else |
| m_lowererMD.GenerateFloatTest(src, instr, notFloatLabel); |
| IR::IndirOpnd *const floatOpnd = IR::IndirOpnd::New(src, Js::JavascriptNumber::GetValueOffset(), TyMachDouble, func); |
| #endif |
| |
| // cmp src, 0.0 |
| // jp $false |
| // je/jne $target |
| // jmp $fallthrough |
| InsertFloatCheckForZeroOrNanBranch(floatOpnd, branchOnFalse, labelTarget, labelFallthrough, instr); |
| Lowerer::InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| |
| // $notFloat: |
| instr->InsertBefore(notFloatLabel); |
| lastLabelBeforeHelper = notFloatLabel; |
| |
| src->SetValueType(srcValueType = srcValueType.SetIsNotAnyOf(ValueType::AnyNumber)); |
| } |
| |
| IR::LabelInstr *labelHelper = nullptr; |
| bool _didObjectTest = checkedForTaggedInt && checkedForTaggedFloat; |
| const auto EnsureObjectTest = [&]() |
| { |
| if(_didObjectTest) |
| { |
| return; |
| } |
| if(!labelHelper) |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| } |
| m_lowererMD.GenerateObjectTest(src, instr, labelHelper); |
| _didObjectTest = true; |
| }; |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Boolean fast path |
| |
| if (srcValueType.HasBeenBoolean() || srcOriginalValueType.IsUninitialized()) |
| { |
| IR::LabelInstr *notBooleanLabel = nullptr; |
| if (!srcValueType.IsBoolean()) |
| { |
| EnsureObjectTest(); |
| |
| // cmp [src], JavascriptBoolean::vtable |
| // jne $notBoolean |
| notBooleanLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertCompareBranch( |
| IR::IndirOpnd::New(src, 0, TyMachPtr, func), |
| LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptBoolean), |
| Js::OpCode::BrNeq_A, |
| notBooleanLabel, |
| instr); |
| } |
| |
| // cmp src, false |
| // je/jne $target |
| InsertCompareBranch( |
| src, |
| LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), |
| compareWithFalseBranchToTargetOpCode, |
| labelTarget, |
| instr); |
| |
| if (srcValueType.IsBoolean()) |
| { |
| // Skip lowering call to helper |
| Assert(!labelHelper); |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| // jmp $fallthrough |
| Lowerer::InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| |
| if (notBooleanLabel) |
| { |
| instr->InsertBefore(notBooleanLabel); |
| lastLabelBeforeHelper = notBooleanLabel; |
| |
| } |
| |
| src->SetValueType(srcValueType = srcValueType.SetIsNotAnyOf(ValueType::Boolean)); |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // String fast path |
| |
| if(srcValueType.HasBeenString()) |
| { |
| IR::LabelInstr *notStringLabel = nullptr; |
| if(!srcValueType.IsString()) |
| { |
| EnsureObjectTest(); |
| |
| notStringLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| GenerateStringTest(src, instr, notStringLabel, nullptr, false); |
| } |
| |
| // cmp [src + offset(length)], 0 |
| // jeq/jne $target |
| InsertCompareBranch( |
| IR::IndirOpnd::New(src, Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, func), |
| IR::IntConstOpnd::New(0, TyUint32, func, true), |
| compareWithFalseBranchToTargetOpCode, |
| labelTarget, |
| instr); |
| |
| if(srcValueType.IsString()) |
| { |
| // Skip lowering call to helper |
| Assert(!labelHelper); |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| |
| // jmp $fallthrough |
| Lowerer::InsertBranch(Js::OpCode::Br, labelFallthrough, instr); |
| |
| if(notStringLabel) |
| { |
| instr->InsertBefore(notStringLabel); |
| lastLabelBeforeHelper = notStringLabel; |
| } |
| |
| src->SetValueType(srcValueType = srcValueType.SetIsNotAnyOf(ValueType::String)); |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Object fast path |
| |
| if (srcValueType.IsLikelyObject()) |
| { |
| if(srcValueType.IsObject()) |
| { |
| if(srcValueType.GetObjectType() > ObjectType::Object) |
| { |
| // Specific object types that are tracked are equivalent to 'true' |
| // jmp $true |
| InsertBranch(Js::OpCode::Br, labelTrue, instr); |
| |
| // Skip lowering call to helper |
| Assert(!labelHelper); |
| Assert(instr->m_prev->IsBranchInstr()); |
| instr->Remove(); |
| return false; |
| } |
| } |
| else |
| { |
| EnsureObjectTest(); |
| } |
| |
| // mov srcType, [src + offset(type)] -- load type |
| IR::RegOpnd *const srcType = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseR1(srcType, func); |
| InsertMove(srcType, IR::IndirOpnd::New(src, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, func), instr); |
| |
| // test [srcType + offset(flags)], TypeFlagMask_IsFalsy -- check if falsy |
| // jnz $false |
| InsertTestBranch( |
| IR::IndirOpnd::New(srcType, Js::Type::GetOffsetOfFlags(), TyUint8, func), |
| IR::IntConstOpnd::New(TypeFlagMask_IsFalsy, TyUint8, func), |
| Js::OpCode::BrNeq_A, |
| labelFalse, |
| instr); |
| |
| // cmp [srcType + offset(typeId)], TypeIds_LastJavascriptPrimitiveType -- check base TypeIds_LastJavascriptPrimitiveType |
| // ja $true |
| InsertCompareBranch( |
| IR::IndirOpnd::New(srcType, Js::Type::GetOffsetOfTypeId(), TyInt32, func), |
| IR::IntConstOpnd::New(Js::TypeIds_LastJavascriptPrimitiveType, TyInt32, func), |
| Js::OpCode::BrGt_A, |
| true /* isUnsigned */, |
| labelTrue, |
| instr); |
| |
| if(!labelHelper) |
| { |
| labelHelper = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| } |
| lastLabelBeforeHelper = nullptr; |
| } |
| |
| //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// |
| // Helper call |
| |
| // $helper: |
| if(lastLabelBeforeHelper) |
| { |
| Assert(instr->m_prev == lastLabelBeforeHelper); |
| lastLabelBeforeHelper->isOpHelper = true; |
| } |
| if (labelHelper) |
| { |
| Assert(labelHelper->isOpHelper); |
| instr->InsertBefore(labelHelper); |
| } |
| |
| // call JavascriptConversion::ToBoolean |
| IR::RegOpnd *const toBoolDst = IR::RegOpnd::New(TyInt32, func); |
| const IR::AutoReuseOpnd autoReuseToBoolDst(toBoolDst, func); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, toBoolDst, instr->GetSrc1(), func); |
| instr->InsertBefore(callInstr); |
| LowerUnaryHelperMem(callInstr, IR::HelperConv_ToBoolean); |
| |
| // test eax, eax |
| InsertTest(toBoolDst, toBoolDst, instr); |
| |
| // je/jne $target |
| Assert(instr->IsBranchInstr()); |
| instr->FreeSrc1(); |
| instr->m_opcode = LowererMD::MDBranchOpcode(compareWithFalseBranchToTargetOpCode); |
| Assert(instr->AsBranchInstr()->GetTarget() == labelTarget); |
| |
| // Skip lowering another call to helper |
| return false; |
| } |
| |
| // Helper method used in LowerMD by all platforms. |
| // Creates HelperCallOpnd or DiagHelperCallOpnd, based on helperMethod and state. |
| // static |
| IR::HelperCallOpnd* |
| Lowerer::CreateHelperCallOpnd(IR::JnHelperMethod helperMethod, int helperArgCount, Func* func) |
| { |
| Assert(func); |
| |
| IR::HelperCallOpnd* helperCallOpnd; |
| if (CONFIG_FLAG(EnableContinueAfterExceptionWrappersForHelpers) && |
| func->IsJitInDebugMode() && |
| HelperMethodAttributes::CanThrow(helperMethod)) |
| { |
| // Create DiagHelperCallOpnd to indicate that it's needed to wrap original helper with try-catch wrapper, |
| // so that we can ignore exception and bailout to next stmt in debugger. |
| // For details, see: Lib\Runtime\Debug\DiagHelperMethodWrapper.{h,cpp}. |
| helperCallOpnd = IR::DiagHelperCallOpnd::New(helperMethod, func, helperArgCount); |
| } |
| else |
| { |
| helperCallOpnd = IR::HelperCallOpnd::New(helperMethod, func); |
| } |
| |
| return helperCallOpnd; |
| } |
| |
| bool |
| Lowerer::TryGenerateFastBrOrCmTypeOf(IR::Instr *instr, IR::Instr **prev, bool isNeqOp, bool *pfNoLower) |
| { |
| Assert(prev); |
| Assert(instr->m_opcode == Js::OpCode::BrSrEq_A || |
| instr->m_opcode == Js::OpCode::BrSrNeq_A || |
| instr->m_opcode == Js::OpCode::BrSrNotEq_A || |
| instr->m_opcode == Js::OpCode::BrSrNotNeq_A || |
| instr->m_opcode == Js::OpCode::CmSrEq_A || |
| instr->m_opcode == Js::OpCode::CmSrNeq_A || |
| instr->m_opcode == Js::OpCode::BrEq_A || |
| instr->m_opcode == Js::OpCode::BrNeq_A || |
| instr->m_opcode == Js::OpCode::BrNotEq_A || |
| instr->m_opcode == Js::OpCode::BrNotNeq_A || |
| instr->m_opcode == Js::OpCode::CmEq_A || |
| instr->m_opcode == Js::OpCode::CmNeq_A); |
| |
| // |
| // instr - (Br/Cm)(Sr)(N(ot))eq_A |
| // instr->m_prev - typeOf |
| // |
| IR::Instr *instrLd = instr->GetPrevRealInstrOrLabel(); |
| bool skippedLoads = false; |
| //Skip intermediate Ld_A which might be inserted by flow graph peeps |
| while (instrLd && instrLd->m_opcode == Js::OpCode::Ld_A ) |
| { |
| if (!(instrLd->GetDst()->IsRegOpnd() && instrLd->GetDst()->AsRegOpnd()->m_fgPeepTmp)) |
| { |
| return false; |
| } |
| if (instrLd->HasBailOutInfo()) |
| { |
| return false; |
| } |
| instrLd = instrLd->GetPrevRealInstrOrLabel(); |
| skippedLoads = true; |
| } |
| |
| IR::Instr *typeOf = instrLd; |
| |
| IR::RegOpnd *instrSrc1 = instr->GetSrc1()->IsRegOpnd() ? instr->GetSrc1()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *instrSrc2 = instr->GetSrc2()->IsRegOpnd() ? instr->GetSrc2()->AsRegOpnd() : nullptr; |
| if (typeOf && (typeOf->m_opcode == Js::OpCode::Typeof)) |
| { |
| IR::RegOpnd *typeOfDst = typeOf->GetDst()->IsRegOpnd() ? typeOf->GetDst()->AsRegOpnd() : nullptr; |
| |
| if (typeOfDst && instrSrc1 && instrSrc2) |
| { |
| do |
| { |
| IR::RegOpnd *typeOpnd = nullptr; |
| IR::RegOpnd *idOpnd = nullptr; |
| if (instrSrc1->m_sym == typeOfDst->m_sym) |
| { |
| typeOpnd = instrSrc1; |
| idOpnd = instrSrc2; |
| } |
| else if (instrSrc2->m_sym == typeOfDst->m_sym) |
| { |
| typeOpnd = instrSrc2; |
| idOpnd = instrSrc1; |
| } |
| else |
| { |
| // Neither source turned out to be the typeOpnd |
| break; |
| } |
| |
| if (!typeOpnd->m_isTempLastUse) |
| { |
| break; |
| } |
| |
| if (!(idOpnd->m_sym->m_isSingleDef && idOpnd->m_sym->m_isStrConst)) |
| { |
| return false; |
| } |
| |
| // The second argument to [Cm|Br]TypeOf is the typeid. |
| IR::IntConstOpnd *typeIdOpnd = nullptr; |
| |
| Assert(idOpnd->m_sym->m_isSingleDef); |
| Assert(idOpnd->m_sym->m_instrDef->GetSrc1()->IsAddrOpnd()); |
| |
| // We can't optimize non-javascript type strings. |
| JITJavascriptString *typeNameJsString = JITJavascriptString::FromVar(idOpnd->m_sym->m_instrDef->GetSrc1()->AsAddrOpnd()->m_localAddress); |
| const char16 *typeName = typeNameJsString->GetString(); |
| |
| Js::InternalString typeNameString(typeName, typeNameJsString->GetLength()); |
| if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::UndefinedTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_Undefined, TyInt32, instr->m_func); |
| } |
| else if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::ObjectTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_Object, TyInt32, instr->m_func); |
| } |
| else if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::BooleanTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_Boolean, TyInt32, instr->m_func); |
| } |
| else if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::NumberTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_Number, TyInt32, instr->m_func); |
| } |
| else if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::StringTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_String, TyInt32, instr->m_func); |
| } |
| else if (Js::InternalStringComparer::Equals(typeNameString, Js::Type::FunctionTypeNameString)) |
| { |
| typeIdOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, instr->m_func); |
| } |
| else |
| { |
| return false; |
| } |
| |
| if (skippedLoads) |
| { |
| //validate none of dst of Ld_A overlaps with typeof src or dst |
| IR::Opnd* typeOfSrc = typeOf->GetSrc1(); |
| instrLd = typeOf->GetNextRealInstr(); |
| while (instrLd != instr) |
| { |
| if (instrLd->GetDst()->IsEqual(typeOfDst) || instrLd->GetDst()->IsEqual(typeOfSrc)) |
| { |
| return false; |
| } |
| instrLd = instrLd->GetNextRealInstr(); |
| } |
| typeOf->Unlink(); |
| instr->InsertBefore(typeOf); |
| } |
| // The first argument to [Cm|Br]TypeOf is the first arg to the TypeOf instruction. |
| IR::Opnd *objectOpnd = typeOf->GetSrc1(); |
| Assert(objectOpnd->IsRegOpnd()); |
| |
| // Now emit this instruction and remove the ldstr and typeOf. |
| *prev = typeOf->m_prev; |
| *pfNoLower = false; |
| if (instr->IsBranchInstr()) |
| { |
| GenerateFastBrTypeOf(instr, objectOpnd->AsRegOpnd(), typeIdOpnd, typeOf, pfNoLower, isNeqOp); |
| } |
| else |
| { |
| GenerateFastCmTypeOf(instr, objectOpnd->AsRegOpnd(), typeIdOpnd, typeOf, pfNoLower, isNeqOp); |
| } |
| |
| return true; |
| } while (false); |
| } |
| } |
| |
| if (instrSrc1 && instrSrc1->GetStackSym()->IsSingleDef() && instrSrc2 && instrSrc2->GetStackSym()->IsSingleDef() && |
| ( |
| ((instrSrc1->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::Typeof) && |
| ((instrSrc2->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::Typeof) || instrSrc2->GetStackSym()->GetIsStrConst())) |
| || |
| ((instrSrc2->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::Typeof) && |
| ((instrSrc1->GetStackSym()->GetInstrDef()->m_opcode == Js::OpCode::Typeof) || instrSrc1->GetStackSym()->GetIsStrConst())) |
| ) |
| ) |
| { |
| *pfNoLower = true; |
| if (instr->IsBranchInstr()) |
| { |
| InsertCompareBranch(instrSrc1, instrSrc2, isNeqOp ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, instr->AsBranchInstr()->GetTarget(), instr); |
| instr->Remove(); |
| } |
| else |
| { |
| if (instrSrc1->IsEqual(instrSrc2)) |
| { |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, isNeqOp ? LibraryValue::ValueFalse : LibraryValue::ValueTrue), instr); |
| } |
| else |
| { |
| // t1 = typeof o1 |
| // t2 = typeof o2 |
| // dst = t1 == t2 |
| |
| // MOV dst, true |
| // CMP t1, t2 |
| |
| // x86, amd64 |
| // CMOVNE dst, false |
| |
| // arm |
| // BEQ $done |
| // MOV dst, false |
| // $done |
| |
| if (instr->GetDst()->IsEqual(instrSrc1)) |
| { |
| IR::Instr* hoistInstr = m_lowererMD.ChangeToAssign(instr->HoistSrc1(Js::OpCode::Ld_A)); |
| instrSrc1 = hoistInstr->GetDst()->AsRegOpnd(); |
| } |
| if (instr->GetDst()->IsEqual(instrSrc2)) |
| { |
| IR::Instr* hoistInstr = m_lowererMD.ChangeToAssign(instr->HoistSrc2(Js::OpCode::Ld_A)); |
| instrSrc2 = hoistInstr->GetDst()->AsRegOpnd(); |
| } |
| |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueTrue), instr); |
| |
| #if defined(_M_ARM32_OR_ARM64) |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| InsertCompareBranch(instrSrc1, instrSrc2, isNeqOp ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, doneLabel, instr); |
| InsertMove(instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), instr); |
| instr->InsertBefore(doneLabel); |
| #else |
| InsertCompare(instrSrc1, instrSrc2, instr); |
| LowererMD::InsertCmovCC(isNeqOp ? Js::OpCode::CMOVE : Js::OpCode::CMOVNE, instr->GetDst(), LoadLibraryValueOpnd(instr, LibraryValue::ValueFalse), instr); |
| #endif |
| } |
| instr->Remove(); |
| } |
| return true; |
| } |
| |
| return false; |
| } |
| |
| void |
| Lowerer::GenerateFalsyObjectTest(IR::Instr * insertInstr, IR::RegOpnd * typeOpnd, IR::LabelInstr * falsyLabel) |
| { |
| IR::Opnd *flagsOpnd = IR::IndirOpnd::New(typeOpnd, Js::Type::GetOffsetOfFlags(), TyInt32, this->m_func); |
| InsertTestBranch(flagsOpnd, IR::IntConstOpnd::New(TypeFlagMask_IsFalsy, TyInt32, this->m_func), Js::OpCode::BrNeq_A, falsyLabel, insertInstr); |
| } |
| |
| void |
| Lowerer::GenerateFalsyObjectTest(IR::Instr *insertInstr, IR::RegOpnd *typeOpnd, Js::TypeId typeIdToCheck, IR::LabelInstr* target, IR::LabelInstr* done, bool isNeqOp) |
| { |
| if (!this->m_func->GetThreadContextInfo()->CanBeFalsy(typeIdToCheck) && typeIdToCheck != Js::TypeIds_Undefined) |
| { |
| // Don't need the check for falsy, the typeId we are looking for doesn't care |
| return; |
| } |
| |
| IR::Opnd *flagsOpnd = IR::IndirOpnd::New(typeOpnd, Js::Type::GetOffsetOfFlags(), TyInt32, this->m_func); |
| InsertTest(flagsOpnd, IR::IntConstOpnd::New(TypeFlagMask_IsFalsy, TyInt32, this->m_func), insertInstr); |
| |
| if (typeIdToCheck == Js::TypeIds_Undefined) |
| { |
| //Falsy object returns true for undefined ((typeof falsyObj) == "undefined") |
| InsertBranch( Js::OpCode::BrNeq_A, true, isNeqOp ? done : target, insertInstr); |
| } |
| else |
| { |
| //Falsy object returns false for all other types ((typeof falsyObj) != "function") |
| InsertBranch( Js::OpCode::BrNeq_A, true, isNeqOp? target : done , insertInstr); |
| } |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// LowererMD::GenerateFastBrTypeOf |
| /// |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::GenerateFastBrTypeOf(IR::Instr *branch, IR::RegOpnd *object, IR::IntConstOpnd *typeIdOpnd, IR::Instr *typeOf, bool *pfNoLower, bool isNeqOp) |
| { |
| Js::TypeId typeId = static_cast<Js::TypeId>(typeIdOpnd->GetValue()); |
| IR::LabelInstr *target = branch->AsBranchInstr()->GetTarget(); |
| IR::LabelInstr *done = IR::LabelInstr::New(Js::OpCode::Label, m_func, false); |
| IR::LabelInstr *helper = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::RegOpnd *typeRegOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| |
| switch(branch->m_opcode) |
| { |
| case Js::OpCode::BrSrNeq_A: |
| case Js::OpCode::BrNeq_A: |
| case Js::OpCode::BrSrNotEq_A: |
| case Js::OpCode::BrNotEq_A: |
| case Js::OpCode::BrSrEq_A: |
| case Js::OpCode::BrEq_A: |
| case Js::OpCode::BrSrNotNeq_A: |
| case Js::OpCode::BrNotNeq_A: |
| break; |
| default: |
| Assert(UNREACHED); |
| __assume(UNREACHED); |
| } |
| // JNE/BNE (typeId == Js::TypeIds_Number) ? $target : $done |
| IR::LabelInstr *label = (typeId == Js::TypeIds_Number) ? target : done; |
| if (isNeqOp) |
| label = (label == target) ? done : target; |
| |
| m_lowererMD.GenerateObjectTest(object, branch, label); |
| |
| // MOV typeRegOpnd, [object + offset(Type)] |
| InsertMove(typeRegOpnd, |
| IR::IndirOpnd::New(object, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func), |
| branch); |
| |
| GenerateFalsyObjectTest(branch, typeRegOpnd, typeId, target, done, isNeqOp); |
| |
| // MOV objTypeId, [typeRegOpnd + offset(TypeId)] |
| IR::RegOpnd* objTypeIdOpnd = IR::RegOpnd::New(TyInt32, m_func); |
| InsertMove(objTypeIdOpnd, |
| IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, m_func), |
| branch); |
| |
| // CMP objTypeId, typeId |
| // JEQ/JGE $done |
| if (typeId == Js::TypeIds_Object) |
| { |
| InsertCompareBranch(objTypeIdOpnd, typeIdOpnd, Js::OpCode::BrGe_A, isNeqOp ? done : target, branch); |
| } |
| else if (typeId == Js::TypeIds_Function) |
| { |
| InsertCompareBranch(objTypeIdOpnd, typeIdOpnd, Js::OpCode::BrEq_A, isNeqOp ? done : target, branch); |
| } |
| else if (typeId == Js::TypeIds_Number) |
| { |
| //Check for the typeIds between TypeIds_FirstNumberType <= typeIds <= TypeIds_LastNumberType |
| InsertSub(false, objTypeIdOpnd, objTypeIdOpnd, IR::IntConstOpnd::New(Js::TypeIds_FirstNumberType, TyInt32, branch->m_func),branch); |
| |
| InsertCompare(objTypeIdOpnd, IR::IntConstOpnd::New(Js::TypeIds_LastNumberType - Js::TypeIds_FirstNumberType, TyInt32, branch->m_func), branch); |
| InsertBranch(isNeqOp ? Js::OpCode::BrGt_A : Js::OpCode::BrLe_A, true, target, branch); |
| } |
| else |
| { |
| InsertCompare(objTypeIdOpnd, typeIdOpnd, branch); |
| InsertBranch(isNeqOp ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, target, branch); |
| } |
| |
| // This could be 'null' which, for historical reasons, has a TypeId < TypeIds_Object but |
| // is still a Javascript "object." |
| if (typeId == Js::TypeIds_Object) |
| { |
| // CMP object, 0xXXXXXXXX |
| // JEQ isNeqOp ? $done : $target |
| InsertCompareBranch(object, |
| LoadLibraryValueOpnd(branch, LibraryValue::ValueNull), |
| Js::OpCode::BrEq_A, |
| isNeqOp ? done : target, |
| branch); |
| } |
| |
| branch->InsertAfter(done); // Get this label first |
| |
| // "object" or "function" may come from HostDispatch. Needs helper if that's the case. |
| if (typeId == Js::TypeIds_Object || typeId == Js::TypeIds_Function) |
| { |
| // CMP objTypeId, TypeIds_Proxy. typeof proxy could be 'object' or 'function' depends on the target |
| // JNE isNeqOp ? $target : $done |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_Proxy, TyInt32, m_func), |
| Js::OpCode::BrEq_A, |
| helper, |
| branch); |
| |
| // CMP objTypeId, TypeIds_HostDispatch |
| // JNE isNeqOp ? $target : $done |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_HostDispatch, TyInt32, m_func), |
| Js::OpCode::BrNeq_A, |
| isNeqOp ? target : done, |
| branch); |
| |
| // Now emit Typeof and lower it like we would've for the helper call. |
| { |
| branch->InsertBefore(helper); |
| typeOf->Unlink(); |
| branch->InsertBefore(typeOf); |
| if (branch->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(branch->GetBailOutKind()) && |
| (!typeOf->HasBailOutInfo() || !BailOutInfo::IsBailOutOnImplicitCalls(typeOf->GetBailOutKind()))) |
| { |
| typeOf = AddBailoutToHelperCallInstr(typeOf, branch->GetBailOutInfo(), branch->GetBailOutKind(), branch); |
| } |
| LowerUnaryHelperMem(typeOf, IR::HelperOp_Typeof); |
| } |
| } |
| else // Other primitive types don't need helper |
| { |
| typeOf->Remove(); |
| branch->Remove(); |
| *pfNoLower = true; |
| } |
| |
| // $done: |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// LowererMD::GenerateFastCmTypeOf |
| /// |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::GenerateFastCmTypeOf(IR::Instr *compare, IR::RegOpnd *object, IR::IntConstOpnd *typeIdOpnd, IR::Instr *typeOf, bool *pfNoLower, bool isNeqOp) |
| { |
| Assert(compare->m_opcode == Js::OpCode::CmSrEq_A || |
| compare->m_opcode == Js::OpCode::CmEq_A || |
| compare->m_opcode == Js::OpCode::CmSrNeq_A || |
| compare->m_opcode == Js::OpCode::CmNeq_A); |
| |
| Js::TypeId typeId = static_cast<Js::TypeId>(typeIdOpnd->GetValue()); |
| IR::LabelInstr *movFalse = IR::LabelInstr::New(Js::OpCode::Label, m_func, false); |
| IR::LabelInstr *done = IR::LabelInstr::New(Js::OpCode::Label, m_func, false); |
| IR::LabelInstr *helper= IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::RegOpnd *dst = compare->GetDst()->IsRegOpnd() ? compare->GetDst()->AsRegOpnd() : nullptr; |
| IR::RegOpnd *typeRegOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| |
| Assert(dst); |
| |
| if (dst->IsEqual(object)) |
| { |
| //dst same as the src of typeof. As we need to move true to dst first we need to save the src to a new opnd |
| IR::RegOpnd *newObject = IR::RegOpnd::New(object->GetType(), m_func); |
| InsertMove(newObject, object, compare); //Save src |
| object = newObject; |
| } |
| |
| // mov dst, 'true' |
| InsertMove(dst, |
| LoadLibraryValueOpnd(compare, LibraryValue::ValueTrue), |
| compare); |
| |
| // TEST object, 1 |
| // JNE (typeId == Js::TypeIds_Number) ? $done : $movFalse |
| IR::LabelInstr *target = (typeId == Js::TypeIds_Number) ? done : movFalse; |
| if (isNeqOp) |
| { |
| target = (target == done) ? movFalse : done; |
| } |
| |
| m_lowererMD.GenerateObjectTest(object, compare, target); |
| |
| // MOV typeRegOpnd, [object + offset(Type)] |
| InsertMove(typeRegOpnd, |
| IR::IndirOpnd::New(object, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func), |
| compare); |
| |
| GenerateFalsyObjectTest(compare, typeRegOpnd, typeId, done, movFalse, isNeqOp); |
| |
| // MOV objTypeId, [typeRegOpnd + offset(TypeId)] |
| IR::RegOpnd* objTypeIdOpnd = IR::RegOpnd::New(TyInt32, m_func); |
| InsertMove(objTypeIdOpnd, |
| IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, m_func), |
| compare); |
| |
| // CMP objTypeId, typeId |
| // JEQ/JGE $done |
| if (typeId == Js::TypeIds_Object) |
| { |
| InsertCompareBranch(objTypeIdOpnd, typeIdOpnd, Js::OpCode::BrGe_A, isNeqOp ? movFalse : done, compare); |
| } |
| else if (typeId == Js::TypeIds_Function) |
| { |
| InsertCompareBranch(objTypeIdOpnd, typeIdOpnd, Js::OpCode::BrEq_A, isNeqOp ? movFalse : done, compare); |
| } |
| else if (typeId == Js::TypeIds_Number) |
| { |
| //Check for the typeIds between TypeIds_FirstNumberType <= typeIds <= TypeIds_LastNumberType |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_LastNumberType, TyInt32, compare->m_func), |
| Js::OpCode::BrGt_A, |
| isNeqOp ? done : movFalse, |
| compare); |
| |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_FirstNumberType, TyInt32, compare->m_func), |
| isNeqOp? Js::OpCode::BrLt_A : Js::OpCode::BrGe_A, |
| done, |
| compare); |
| } |
| else |
| { |
| InsertCompareBranch(objTypeIdOpnd, typeIdOpnd, isNeqOp ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, done, compare); |
| } |
| |
| // This could be 'null' which, for historical reasons, has a TypeId < TypeIds_Object but |
| // is still a Javascript "object." |
| if (typeId == Js::TypeIds_Object) |
| { |
| // CMP object, 0xXXXXXXXX |
| // JEQ isNeqOp ? $movFalse : $done |
| InsertCompareBranch(object, |
| LoadLibraryValueOpnd(compare, LibraryValue::ValueNull), |
| Js::OpCode::BrEq_A, |
| isNeqOp ? movFalse : done, |
| compare); |
| } |
| |
| compare->InsertAfter(done); // Get this label first |
| |
| // "object" or "function" may come from HostDispatch. Needs helper if that's the case. |
| if (typeId == Js::TypeIds_Object || typeId == Js::TypeIds_Function) |
| { |
| // CMP objTypeId, TypeIds_Proxy |
| // JNE isNeqOp ? $done : $movFalse |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_Proxy, TyInt32, m_func), |
| Js::OpCode::BrEq_A, |
| helper, |
| compare); |
| |
| // CMP objTypeId, TypeIds_HostDispatch |
| // JNE isNeqOp ? $done : $movFalse |
| InsertCompareBranch(objTypeIdOpnd, |
| IR::IntConstOpnd::New(Js::TypeIds_HostDispatch, TyInt32, m_func), |
| Js::OpCode::BrNeq_A, |
| isNeqOp ? done : movFalse, |
| compare); |
| |
| // Now emit Typeof like we would've for the helper call. |
| { |
| compare->InsertBefore(helper); |
| typeOf->Unlink(); |
| compare->InsertBefore(typeOf); |
| if (compare->HasBailOutInfo() && BailOutInfo::IsBailOutOnImplicitCalls(compare->GetBailOutKind()) && |
| (!typeOf->HasBailOutInfo() || !BailOutInfo::IsBailOutOnImplicitCalls(typeOf->GetBailOutKind()))) |
| { |
| typeOf = AddBailoutToHelperCallInstr(typeOf, compare->GetBailOutInfo(), compare->GetBailOutKind(), compare); |
| } |
| LowerUnaryHelperMem(typeOf, IR::HelperOp_Typeof); |
| } |
| |
| // JMP/B $done |
| InsertBranch(Js::OpCode::Br, done, done); |
| } |
| else // Other primitive types don't need helper |
| { |
| typeOf->Remove(); |
| dst = compare->UnlinkDst()->AsRegOpnd(); |
| compare->Remove(); |
| *pfNoLower = true; |
| } |
| |
| // $movFalse: (insert before $done) |
| done->InsertBefore(movFalse); |
| |
| // MOV dst, 'false' |
| InsertMove(dst, LoadLibraryValueOpnd(done, LibraryValue::ValueFalse), done); |
| |
| // $done: |
| } |
| |
| void |
| Lowerer::GenerateCheckForCallFlagNew(IR::Instr* instrInsert) |
| { |
| Func *func = instrInsert->m_func; |
| IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| |
| Assert(!func->IsInlinee()); |
| |
| // MOV s1, [ebp + 4] // s1 = call info |
| // AND s2, s1, Js::CallFlags_New // s2 = s1 & Js::CallFlags_New |
| // CMP s2, 0 |
| // JNE $Done |
| // CALL RuntimeTypeError |
| // $Done |
| |
| IR::SymOpnd* callInfoOpnd = Lowerer::LoadCallInfo(instrInsert); |
| Assert(Js::CallInfo::ksizeofCount == 24); |
| |
| IR::RegOpnd* isNewFlagSetRegOpnd = IR::RegOpnd::New(TyMachReg, func); |
| |
| InsertAnd(isNewFlagSetRegOpnd, callInfoOpnd, IR::IntConstOpnd::New((IntConstType)Js::CallFlags_New << Js::CallInfo::ksizeofCount, TyMachReg, func, true), instrInsert); |
| InsertTestBranch(isNewFlagSetRegOpnd, isNewFlagSetRegOpnd, Js::OpCode::BrNeq_A, labelDone, instrInsert); |
| |
| IR::Instr *throwInstr = IR::Instr::New( |
| Js::OpCode::RuntimeTypeError, |
| IR::RegOpnd::New(TyMachReg, m_func), |
| IR::IntConstOpnd::New(SCODE_CODE(JSERR_ClassConstructorCannotBeCalledWithoutNew), TyInt32, m_func), |
| m_func); |
| instrInsert->InsertBefore(throwInstr); |
| this->LowerUnaryHelperMem(throwInstr, IR::HelperOp_RuntimeTypeError); |
| |
| instrInsert->InsertBefore(labelDone); |
| instrInsert->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateJavascriptOperatorsIsConstructorGotoElse(IR::Instr *instrInsert, IR::RegOpnd *instanceRegOpnd, IR::LabelInstr *labelReturnTrue, IR::LabelInstr *labelReturnFalse) |
| { |
| // $ProxyLoop: |
| // // if (!VarIs<RecyclableObject>(instance)) { goto $ReturnFalse }; // omitted: VarIs<RecyclableObject>(instance) always true |
| // MOV s0, instance->type |
| // MOV s1, s0->typeId |
| // CMP s1, TypeIds_Proxy |
| // JNE $NotProxy |
| // |
| // MOV instance, instance->target |
| // JMP $ProxyLoop |
| // |
| // $NotProxy: |
| // CMP s1, TypeIds_Function |
| // JNE $ReturnFalse // external |
| // |
| // MOV s0, instance->functionInfo |
| // MOV s1, s0->attributes |
| // TEST s1, ErrorOnNew |
| // JNE $ReturnFalse // external |
| // |
| // JMP $ReturnTrue // external |
| |
| Func *func = instrInsert->m_func; |
| |
| IR::LabelInstr *labelProxyLoop = InsertLoopTopLabel(instrInsert); |
| |
| IR::LabelInstr *labelNotProxy = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| |
| IR::RegOpnd *indir0RegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::RegOpnd *indir1RegOpnd = IR::RegOpnd::New(TyUint32, func); |
| |
| Loop * loop = labelProxyLoop->GetLoop(); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(instanceRegOpnd->m_sym->m_id); |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, func); |
| Lowerer::InsertMove(indir0RegOpnd, indirOpnd, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(indir0RegOpnd, Js::Type::GetOffsetOfTypeId(), TyUint32, func); |
| Lowerer::InsertMove(indir1RegOpnd, indirOpnd, instrInsert); |
| |
| InsertCompareBranch(indir1RegOpnd, IR::IntConstOpnd::New(Js::TypeIds_Proxy, TyUint32, func, true), Js::OpCode::BrNeq_A, labelNotProxy, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::JavascriptProxy::GetOffsetOfTarget(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirOpnd, instrInsert); |
| |
| InsertBranch(Js::OpCode::Br, labelProxyLoop, instrInsert); |
| |
| instrInsert->InsertBefore(labelNotProxy); |
| |
| InsertCompareBranch(indir1RegOpnd, IR::IntConstOpnd::New(Js::TypeIds_Function, TyUint32, func, true), Js::OpCode::BrNeq_A, labelReturnFalse, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::JavascriptFunction::GetOffsetOfFunctionInfo(), TyMachPtr, func); |
| Lowerer::InsertMove(indir0RegOpnd, indirOpnd, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(indir0RegOpnd, Js::FunctionInfo::GetAttributesOffset(), TyUint32, func); |
| Lowerer::InsertMove(indir1RegOpnd, indirOpnd, instrInsert); |
| |
| InsertTestBranch(indir1RegOpnd, IR::IntConstOpnd::New(Js::FunctionInfo::Attributes::ErrorOnNew, TyUint32, func, true), Js::OpCode::BrNeq_A, labelReturnFalse, instrInsert); |
| |
| InsertBranch(Js::OpCode::Br, labelReturnTrue, instrInsert); |
| } |
| |
| void |
| Lowerer::GenerateRecyclableObjectGetPrototypeNullptrGoto(IR::Instr *instrInsert, IR::RegOpnd *instanceRegOpnd, IR::LabelInstr *labelReturnNullptr) |
| { |
| // MOV instance, instance->type |
| // MOV flags, instance->flags |
| // TEST flags, TypeFlagMask_HasSpecialPrototype |
| // JNE $ReturnNullptr // external, bypassing nullptr check |
| // MOV instance, instance->prototype |
| |
| Func *func = instrInsert->m_func; |
| |
| IR::RegOpnd *flagsRegOpnd = IR::RegOpnd::New(TyUint32, func); |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirOpnd, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::Type::GetOffsetOfFlags(), TyUint32, func); |
| Lowerer::InsertMove(flagsRegOpnd, indirOpnd, instrInsert); |
| |
| InsertTestBranch(flagsRegOpnd, IR::IntConstOpnd::New(TypeFlagMask_HasSpecialPrototype, TyUint32, func, true), Js::OpCode::BrNeq_A, labelReturnNullptr, instrInsert); |
| |
| indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::Type::GetOffsetOfPrototype(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirOpnd, instrInsert); |
| } |
| |
| void |
| Lowerer::GenerateRecyclableObjectIsElse(IR::Instr *instrInsert, IR::RegOpnd *instanceRegOpnd, IR::LabelInstr *labelFalse) |
| { |
| Func *func = instrInsert->m_func; |
| |
| #if INT32VAR |
| InsertTestBranch(instanceRegOpnd, IR::AddrOpnd::New((Js::Var)0xffff000000000000, IR::AddrOpndKindConstantVar, func, true), Js::OpCode::BrNeq_A, labelFalse, instrInsert); |
| #else |
| InsertTestBranch(instanceRegOpnd, IR::IntConstOpnd::New(Js::AtomTag, TyUint32, func, true), Js::OpCode::BrNeq_A, labelFalse, instrInsert); |
| #endif |
| } |
| |
| void |
| Lowerer::GenerateLdHomeObj(IR::Instr* instr) |
| { |
| // MOV dst, undefined |
| // MOV instance, functionObject // functionObject through stack params or src1 |
| // CMP [instance], VtableStackScriptFunction |
| // JE $Done |
| // MOV instance, instance->homeObj |
| // TEST instance, instance |
| // JZ $Done |
| // MOV dst, instance |
| // $Done: |
| |
| Func *func = instr->m_func; |
| |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *labelInlineFunc = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *testLabel = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *scriptFuncLabel = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::Opnd *opndUndefAddress = this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined); |
| |
| IR::RegOpnd *instanceRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| |
| IR::Opnd *dstOpnd = instr->GetDst(); |
| Assert(dstOpnd->IsRegOpnd()); |
| Lowerer::InsertMove(dstOpnd, opndUndefAddress, instr); |
| |
| IR::Opnd * functionObjOpnd = nullptr; |
| m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| Lowerer::InsertMove(instanceRegOpnd, functionObjOpnd, instr); |
| |
| IR::Opnd * vtableAddressOpnd = this->LoadVTableValueOpnd(instr, VTableValue::VtableStackScriptFunction); |
| IR::BranchInstr* branchInstr = InsertCompareBranch(IR::IndirOpnd::New(instanceRegOpnd, 0, TyMachPtr, func), vtableAddressOpnd, |
| Js::OpCode::BrEq_A, true, labelDone, instr); |
| |
| InsertObjectPoison(instanceRegOpnd, branchInstr, instr, false); |
| |
| if (func->GetJITFunctionBody()->HasHomeObj()) |
| { |
| // Is this an function with inline cache and home obj?? |
| IR::Opnd * vtableAddressInlineFuncHomObjOpnd = this->LoadVTableValueOpnd(instr, VTableValue::VtableScriptFunctionWithInlineCacheAndHomeObj); |
| IR::BranchInstr* inlineFuncHomObjOpndBr = InsertCompareBranch(IR::IndirOpnd::New(instanceRegOpnd, 0, TyMachPtr, func), vtableAddressInlineFuncHomObjOpnd, Js::OpCode::BrNeq_A, labelInlineFunc, instr); |
| InsertObjectPoison(instanceRegOpnd, inlineFuncHomObjOpndBr, instr, false); |
| IR::IndirOpnd *indirInlineFuncHomeObjOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::FunctionWithHomeObj<Js::ScriptFunctionWithInlineCache>::GetOffsetOfHomeObj(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirInlineFuncHomeObjOpnd, instr); |
| InsertBranch(Js::OpCode::Br, testLabel, instr); |
| |
| instr->InsertBefore(labelInlineFunc); |
| |
| // Is this a function with inline cache, home obj and computed name?? |
| IR::Opnd * vtableAddressInlineFuncHomObjCompNameOpnd = this->LoadVTableValueOpnd(instr, VTableValue::VtableScriptFunctionWithInlineCacheHomeObjAndComputedName); |
| IR::BranchInstr* inlineFuncHomObjCompNameBr = InsertCompareBranch(IR::IndirOpnd::New(instanceRegOpnd, 0, TyMachPtr, func), vtableAddressInlineFuncHomObjCompNameOpnd, Js::OpCode::BrNeq_A, scriptFuncLabel, instr); |
| InsertObjectPoison(instanceRegOpnd, inlineFuncHomObjCompNameBr, instr, false); |
| IR::IndirOpnd *indirInlineFuncHomeObjCompNameOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::FunctionWithComputedName<Js::FunctionWithHomeObj<Js::ScriptFunctionWithInlineCache>>::GetOffsetOfHomeObj(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirInlineFuncHomeObjCompNameOpnd, instr); |
| InsertBranch(Js::OpCode::Br, testLabel, instr); |
| |
| instr->InsertBefore(scriptFuncLabel); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::ScriptFunctionWithHomeObj::GetOffsetOfHomeObj(), TyMachPtr, func); |
| Lowerer::InsertMove(instanceRegOpnd, indirOpnd, instr); |
| } |
| else |
| { |
| // Even if the function does not have home object in eval cases we still have the LdHomeObj opcode |
| InsertBranch(Js::OpCode::Br, labelDone, instr); |
| } |
| |
| instr->InsertBefore(testLabel); |
| InsertTestBranch(instanceRegOpnd, instanceRegOpnd, Js::OpCode::BrEq_A, labelDone, instr); |
| |
| Lowerer::InsertMove(dstOpnd, instanceRegOpnd, instr); |
| |
| instr->InsertBefore(labelDone); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateLdHomeObjProto(IR::Instr* instr) |
| { |
| // MOV dst, undefined |
| // MOV instance, src1 // homeObj |
| // TEST instance, instance |
| // JZ $Done |
| // |
| // if (!VarIs<RecyclableObject>(instance)) goto $Done |
| // MOV type, [instance+Offset(type)] |
| // MOV typeId, [type+Offset(typeId)] |
| // CMP typeId, TypeIds_Null |
| // JEQ $Err |
| // CMP typeId, TypeIds_Undefined |
| // JNE $NoErr |
| // |
| // $Err: |
| // ThrowRuntimeReferenceError(JSERR_BadSuperReference); |
| // |
| // $NoErr: |
| // instance = ((RecyclableObject*)instance)->GetPrototype(); |
| // if (instance == nullptr) goto $Done; |
| // |
| // if (!VarIs<RecyclableObject>(instance)) goto $Done |
| // |
| // MOV dst, instance |
| // $Done: |
| |
| Func *func = instr->m_func; |
| IR::Opnd *src1Opnd = instr->UnlinkSrc1(); |
| |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *labelErr = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *labelNoErr = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| |
| IR::Opnd *opndUndefAddress = this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined); |
| IR::RegOpnd *instanceRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::RegOpnd *typeRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::RegOpnd *typeIdRegOpnd = IR::RegOpnd::New(TyUint32, func); |
| |
| IR::Opnd *dstOpnd = instr->GetDst(); |
| Assert(dstOpnd->IsRegOpnd()); |
| Lowerer::InsertMove(dstOpnd, opndUndefAddress, instr); |
| Lowerer::InsertMove(instanceRegOpnd, src1Opnd, instr); |
| |
| InsertTestBranch(instanceRegOpnd, instanceRegOpnd, Js::OpCode::BrEq_A, labelDone, instr); |
| this->GenerateRecyclableObjectIsElse(instr, instanceRegOpnd, labelDone); |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(instanceRegOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, func); |
| Lowerer::InsertMove(typeRegOpnd, indirOpnd, instr); |
| |
| indirOpnd = IR::IndirOpnd::New(typeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyUint32, func); |
| Lowerer::InsertMove(typeIdRegOpnd, indirOpnd, instr); |
| |
| InsertCompareBranch(typeIdRegOpnd, IR::IntConstOpnd::New(Js::TypeId::TypeIds_Null, TyUint32, func, true), Js::OpCode::BrEq_A, labelErr, instr); |
| InsertCompareBranch(typeIdRegOpnd, IR::IntConstOpnd::New(Js::TypeId::TypeIds_Undefined, TyUint32, func, true), Js::OpCode::BrNeq_A, labelNoErr, instr); |
| |
| instr->InsertBefore(labelErr); |
| this->GenerateRuntimeError(instr, JSERR_BadSuperReference, IR::HelperOp_RuntimeReferenceError); |
| |
| instr->InsertBefore(labelNoErr); |
| |
| this->GenerateRecyclableObjectGetPrototypeNullptrGoto(instr, instanceRegOpnd, labelDone); |
| this->GenerateRecyclableObjectIsElse(instr, instanceRegOpnd, labelDone); |
| |
| Lowerer::InsertMove(dstOpnd, instanceRegOpnd, instr); |
| |
| instr->InsertBefore(labelDone); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateLdFuncObj(IR::Instr* instr) |
| { |
| // MOV dst, functionObject // functionObject through stack params or src1 |
| |
| IR::Opnd *dstOpnd = instr->GetDst(); |
| IR::Opnd *functionObjOpnd = nullptr; |
| |
| m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| Lowerer::InsertMove(dstOpnd, functionObjOpnd, instr); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateLdFuncObjProto(IR::Instr* instr) |
| { |
| // MOV instance, src1 |
| // |
| // instance = ((RecyclableObject*)instance)->GetPrototype(); |
| // if (instance == nullptr) goto $ThrowTypeError; |
| // |
| // MOV dst, instance |
| // |
| // if (!JavascriptOperators::IsConstructor(instance)) |
| // goto $ThrowTypeError; |
| // else |
| // goto $Done; |
| // |
| // $helperLabelThrowTypeError: |
| // ThrowRuntimeTypeError(JSERR_NotAConstructor); |
| // |
| // $Done: |
| |
| Func *func = instr->m_func; |
| IR::Opnd *src1Opnd = instr->UnlinkSrc1(); |
| |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr *helperLabelThrowTypeError = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| |
| IR::RegOpnd *instanceRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::Opnd *dstOpnd = instr->GetDst(); |
| |
| Lowerer::InsertMove(instanceRegOpnd, src1Opnd, instr); |
| |
| this->GenerateRecyclableObjectGetPrototypeNullptrGoto(instr, instanceRegOpnd, helperLabelThrowTypeError); |
| |
| Lowerer::InsertMove(dstOpnd, instanceRegOpnd, instr); |
| |
| this->GenerateJavascriptOperatorsIsConstructorGotoElse(instr, instanceRegOpnd, labelDone, helperLabelThrowTypeError); |
| |
| instr->InsertBefore(helperLabelThrowTypeError); |
| this->GenerateRuntimeError(instr, JSERR_NotAConstructor, IR::HelperOp_RuntimeTypeError); |
| |
| instr->InsertBefore(labelDone); |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateLoadNewTarget(IR::Instr* instrInsert) |
| { |
| Func *func = instrInsert->m_func; |
| |
| IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::LabelInstr * labelLoadArgNewTarget = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::Opnd* opndUndefAddress = this->LoadLibraryValueOpnd(instrInsert, LibraryValue::ValueUndefined); |
| |
| Assert(!func->IsInlinee()); |
| |
| if (func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| instrInsert->SetSrc1(opndUndefAddress); |
| LowererMD::ChangeToAssign(instrInsert); |
| return; |
| } |
| |
| // MOV dst, undefined // dst = undefined |
| // MOV s1, callInfo // s1 = callInfo |
| // TEST s1, Js::CallFlags_NewTarget << 24 // if (callInfo.Flags & Js::CallFlags_NewTarget) |
| // JNE $LoadLastArgument // goto $LoadLastArgument |
| // TEST s1, Js::CallFlags_New << 24 // if (!(callInfo.Flags & Js::CallFlags_New)) |
| // JE $Done // goto $Done |
| // MOV dst, functionObject // dst = functionObject |
| // JMP $Done // goto $Done |
| // $LoadLastArgument |
| // AND s1, s1, (0x00FFFFFF) // s2 = callInfo.Count == arguments.length + 2 |
| // MOV dst, [ebp + (s1 - 1) * sizeof(Var) + formalParamOffset * sizeof(Var) ] // points to new.target |
| // $Done |
| |
| IR::Opnd *dstOpnd = instrInsert->GetDst(); |
| Assert(dstOpnd->IsRegOpnd()); |
| Lowerer::InsertMove(dstOpnd, opndUndefAddress, instrInsert); |
| |
| IR::SymOpnd *callInfoOpnd = Lowerer::LoadCallInfo(instrInsert); |
| Assert(Js::CallInfo::ksizeofCount == 24); |
| |
| IR::RegOpnd *s1 = IR::RegOpnd::New(TyUint32, func); |
| Lowerer::InsertMove(s1, callInfoOpnd, instrInsert); |
| |
| InsertTestBranch(s1, IR::IntConstOpnd::New((IntConstType)Js::CallFlags_NewTarget << Js::CallInfo::ksizeofCount, TyUint32, func, true), Js::OpCode::BrNeq_A, labelLoadArgNewTarget, instrInsert); |
| |
| InsertTestBranch(s1, IR::IntConstOpnd::New((IntConstType)Js::CallFlags_New << Js::CallInfo::ksizeofCount, TyUint32, func, true), Js::OpCode::BrEq_A, labelDone, instrInsert); |
| |
| IR::Instr* loadFuncInstr = IR::Instr::New(Js::OpCode::AND, func); |
| loadFuncInstr->SetDst(instrInsert->GetDst()); |
| LoadFuncExpression(loadFuncInstr); |
| |
| instrInsert->InsertBefore(loadFuncInstr); |
| InsertBranch(Js::OpCode::Br, labelDone, instrInsert); |
| |
| instrInsert->InsertBefore(labelLoadArgNewTarget); |
| |
| InsertAnd(s1, s1, IR::IntConstOpnd::New(0x00FFFFFF, TyUint32, func, true), instrInsert); // callInfo.Count |
| |
| // [formalOffset (4) + callInfo.Count] points to 'new.target' - see diagram in GenerateLoadStackArgumentByIndex() |
| GenerateLoadStackArgumentByIndex(dstOpnd, s1, instrInsert, 0, m_func); |
| |
| instrInsert->InsertBefore(labelDone); |
| instrInsert->Remove(); |
| } |
| |
| void |
| Lowerer::GenerateGetCurrentFunctionObject(IR::Instr * instr) |
| { |
| Func * func = this->m_func; |
| IR::Instr * insertBeforeInstr = instr->m_next; |
| IR::RegOpnd * functionObjectOpnd = instr->GetDst()->AsRegOpnd(); |
| IR::Opnd * vtableAddressOpnd = this->LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableStackScriptFunction); |
| IR::LabelInstr * labelDone = IR::LabelInstr::New(Js::OpCode::Label, func, false); |
| IR::BranchInstr *branchInstr = InsertCompareBranch(IR::IndirOpnd::New(functionObjectOpnd, 0, TyMachPtr, func), vtableAddressOpnd, |
| Js::OpCode::BrNeq_A, true, labelDone, insertBeforeInstr); |
| InsertObjectPoison(functionObjectOpnd, branchInstr, insertBeforeInstr, false); |
| IR::RegOpnd * boxedFunctionObjectOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(boxedFunctionObjectOpnd, IR::IndirOpnd::New(functionObjectOpnd, |
| Js::StackScriptFunction::GetOffsetOfBoxedScriptFunction(), TyMachPtr, func), insertBeforeInstr); |
| InsertTestBranch(boxedFunctionObjectOpnd, boxedFunctionObjectOpnd, Js::OpCode::BrEq_A, true, labelDone, insertBeforeInstr); |
| InsertMove(functionObjectOpnd, boxedFunctionObjectOpnd, insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(labelDone); |
| } |
| |
| IR::Opnd * |
| Lowerer::GetInlineCacheFromFuncObjectForRuntimeUse(IR::Instr * instr, IR::PropertySymOpnd * propSymOpnd, bool isHelper) |
| { |
| // MOV s1, [ebp + 8] //s1 = function object |
| // MOV s2, [s1 + offset(hasInlineCaches)] |
| // TEST s2, s2 |
| // JE $L1 |
| // MOV s3, [s1 + offset(m_inlineCaches)] //s3 = inlineCaches from function object |
| // MOV s4, [s3 + index*scale] //s4 = inlineCaches[index] |
| // JMP $L2 |
| // $L1 |
| // MOV s3, propSym->m_runtimeCache |
| // $L2 |
| |
| byte indirScale = this->m_lowererMD.GetDefaultIndirScale(); |
| |
| IR::RegOpnd * funcObjOpnd = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| IR::Instr * funcObjInstr = IR::Instr::New(Js::OpCode::Ld_A, funcObjOpnd, instr->m_func); |
| instr->InsertBefore(funcObjInstr); |
| LoadFuncExpression(funcObjInstr); |
| |
| IR::RegOpnd * funcObjHasInlineCachesOpnd = IR::RegOpnd::New(TyMachPtr, instr->m_func); |
| this->InsertMove(funcObjHasInlineCachesOpnd, IR::IndirOpnd::New(funcObjOpnd, Js::ScriptFunction::GetOffsetOfHasInlineCaches(), TyUint8, instr->m_func), instr); |
| |
| IR::LabelInstr * inlineCachesNullLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, isHelper); |
| InsertTestBranch(funcObjHasInlineCachesOpnd, funcObjHasInlineCachesOpnd, Js::OpCode::BrEq_A, inlineCachesNullLabel, instr); |
| |
| IR::RegOpnd * inlineCachesOpnd = IR::RegOpnd::New(TyMachPtr, instr->m_func); |
| Lowerer::InsertMove(inlineCachesOpnd, IR::IndirOpnd::New(funcObjOpnd, Js::ScriptFunctionWithInlineCache::GetOffsetOfInlineCaches(), TyMachPtr, instr->m_func), instr); |
| |
| IR::RegOpnd * inlineCacheOpnd = IR::RegOpnd::New(TyMachPtr, instr->m_func); |
| IR::RegOpnd * indexOpnd = IR::RegOpnd::New(TyMachReg, instr->m_func); |
| int inlineCacheOffset; |
| if (!Int32Math::Mul(sizeof(Js::InlineCache *), propSymOpnd->m_inlineCacheIndex, &inlineCacheOffset)) |
| { |
| Lowerer::InsertMove(inlineCacheOpnd, IR::IndirOpnd::New(inlineCachesOpnd, inlineCacheOffset, TyMachPtr, instr->m_func), instr); |
| } |
| else |
| { |
| Lowerer::InsertMove(indexOpnd, IR::IntConstOpnd::New(propSymOpnd->m_inlineCacheIndex, TyUint32, instr->m_func), instr); |
| Lowerer::InsertMove(inlineCacheOpnd, IR::IndirOpnd::New(inlineCachesOpnd, indexOpnd, indirScale, TyMachPtr, instr->m_func), instr); |
| } |
| |
| IR::LabelInstr * continueLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, isHelper); |
| InsertBranch(LowererMD::MDUncondBranchOpcode, continueLabel, instr); |
| |
| IR::Instr * ldCacheFromPropSymOpndInstr = this->InsertMove(inlineCacheOpnd, IR::AddrOpnd::New(propSymOpnd->m_runtimeInlineCache, IR::AddrOpndKindDynamicInlineCache, this->m_func), instr); |
| ldCacheFromPropSymOpndInstr->InsertBefore(inlineCachesNullLabel); |
| |
| ldCacheFromPropSymOpndInstr->InsertAfter(continueLabel); |
| |
| return inlineCacheOpnd; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerInitClass(IR::Instr * instr) |
| { |
| // scriptContext |
| IR::Instr * prevInstr = LoadScriptContext(instr); |
| |
| // extends |
| if (instr->GetSrc2() != nullptr) |
| { |
| IR::Opnd * extendsOpnd = instr->UnlinkSrc2(); |
| m_lowererMD.LoadHelperArgument(instr, extendsOpnd); |
| } |
| else |
| { |
| IR::AddrOpnd* extendsOpnd = IR::AddrOpnd::NewNull(this->m_func); |
| m_lowererMD.LoadHelperArgument(instr, extendsOpnd); |
| } |
| |
| // constructor |
| IR::Opnd * ctorOpnd = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, ctorOpnd); |
| |
| // call |
| m_lowererMD.ChangeToHelperCall(instr, IR::HelperOP_InitClass); |
| |
| return prevInstr; |
| } |
| |
| void |
| Lowerer::LowerNewConcatStrMulti(IR::Instr * instr) |
| { |
| IR::IntConstOpnd * countOpnd = instr->UnlinkSrc1()->AsIntConstOpnd(); |
| IR::RegOpnd * dstOpnd = instr->UnlinkDst()->AsRegOpnd(); |
| uint8 count = (uint8)countOpnd->GetValue(); |
| |
| Assert(dstOpnd->GetValueType().IsString()); |
| |
| GenerateRecyclerAlloc(IR::HelperAllocMemForConcatStringMulti, Js::ConcatStringMulti::GetAllocSize(count), dstOpnd, instr); |
| |
| GenerateRecyclerMemInit(dstOpnd, 0, this->LoadVTableValueOpnd(instr, VTableValue::VtableConcatStringMulti), instr); |
| GenerateRecyclerMemInit(dstOpnd, Js::ConcatStringMulti::GetOffsetOfType(), |
| this->LoadLibraryValueOpnd(instr, LibraryValue::ValueStringTypeStatic), instr); |
| GenerateRecyclerMemInitNull(dstOpnd, Js::ConcatStringMulti::GetOffsetOfpszValue(), instr); |
| GenerateRecyclerMemInit(dstOpnd, Js::ConcatStringMulti::GetOffsetOfcharLength(), 0, instr); |
| GenerateRecyclerMemInit(dstOpnd, Js::ConcatStringMulti::GetOffsetOfSlotCount(), countOpnd->AsUint32(), instr); |
| |
| instr->Remove(); |
| } |
| |
| void |
| Lowerer::LowerNewConcatStrMultiBE(IR::Instr * instr) |
| { |
| // Lower |
| // t1 = SetConcatStrMultiBE s1 |
| // t2 = SetConcatStrMultiBE s2, t1 |
| // t3 = SetConcatStrMultiBE s3, t2 |
| // s = NewConcatStrMultiBE 3, t3 |
| // to |
| // s = new concat string |
| // s+0 = s1 |
| // s+1 = s2 |
| // s+2 = s3 |
| Assert(instr->GetSrc1()->IsConstOpnd()); |
| Assert(instr->GetDst()->IsRegOpnd()); |
| |
| IR::RegOpnd * newString = instr->GetDst()->AsRegOpnd(); |
| |
| IR::Opnd * newConcatItemOpnd = nullptr; |
| uint index = instr->GetSrc1()->AsIntConstOpnd()->AsUint32() - 1; |
| IR::Instr * concatItemInstr = nullptr; |
| IR::Opnd * linkOpnd = instr->GetSrc2(); |
| while (linkOpnd) |
| { |
| Assert(linkOpnd->IsRegOpnd()); |
| concatItemInstr = linkOpnd->GetStackSym()->GetInstrDef(); |
| Assert(concatItemInstr->m_opcode == Js::OpCode::SetConcatStrMultiItemBE); |
| |
| IR::Opnd * concatItemOpnd = concatItemInstr->GetSrc1(); |
| Assert(concatItemOpnd->IsRegOpnd()); |
| |
| // If one of the concat items is equal to the dst of the concat expressions (s = s + a + b), |
| // hoist the load of that item to before the setting of the new string to the dst. |
| if (concatItemOpnd->IsEqual(newString)) |
| { |
| if (!newConcatItemOpnd) |
| { |
| IR::Instr * hoistSrcInstr = concatItemInstr->HoistSrc1(Js::OpCode::Ld_A); |
| newConcatItemOpnd = hoistSrcInstr->GetDst(); |
| } |
| concatItemOpnd = newConcatItemOpnd; |
| } |
| else |
| { |
| // If only some of the SetConcatStrMultiItemBE instructions were CSE'd and the rest, along with the NewConcatStrMultiBE |
| // instruction, were in a loop, the strings on the CSE'd Set*BE instructions will become live on back edge. Add them to |
| // addToLiveOnBackEdgeSyms here and clear when we reach the Set*BE instruction. |
| |
| // Note that we are doing this only for string opnds which are not the same as the dst of the concat expression. Reasoning |
| // behind this is that if a loop has a concat expression with one of its sources same as the dst, the Set*BE instruction |
| // for the dst wouldn't have been CSE'd as the dst's value is changing in the loop and the backward pass should have set the |
| // symbol as live on backedge. |
| this->addToLiveOnBackEdgeSyms->Set(concatItemOpnd->GetStackSym()->m_id); |
| } |
| IR::Instr * newConcatItemInstr = IR::Instr::New(Js::OpCode::SetConcatStrMultiItem, |
| IR::IndirOpnd::New(newString, index, TyVar, instr->m_func), |
| concatItemOpnd, |
| instr->m_func); |
| instr->InsertAfter(newConcatItemInstr); |
| this->LowerSetConcatStrMultiItem(newConcatItemInstr); |
| |
| linkOpnd = concatItemInstr->GetSrc2(); |
| index--; |
| } |
| Assert(index == -1); |
| this->LowerNewConcatStrMulti(instr); |
| } |
| |
| void |
| Lowerer::LowerSetConcatStrMultiItem(IR::Instr * instr) |
| { |
| Func * func = this->m_func; |
| IR::IndirOpnd * dstOpnd = instr->GetDst()->AsIndirOpnd(); |
| IR::RegOpnd * concatStrOpnd = dstOpnd->GetBaseOpnd(); |
| IR::RegOpnd * srcOpnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| |
| Assert(concatStrOpnd->GetValueType().IsString()); |
| Assert(srcOpnd->GetValueType().IsString()); |
| srcOpnd = GenerateGetImmutableOrScriptUnreferencedString(srcOpnd, instr, IR::HelperOp_CompoundStringCloneForConcat); |
| instr->SetSrc1(srcOpnd); |
| |
| IR::IndirOpnd * dstLength = IR::IndirOpnd::New(concatStrOpnd, Js::ConcatStringMulti::GetOffsetOfcharLength(), TyUint32, func); |
| IR::Opnd * srcLength; |
| |
| if (srcOpnd->m_sym->m_isStrConst) |
| { |
| srcLength = IR::IntConstOpnd::New(JITJavascriptString::FromVar(srcOpnd->m_sym->GetConstAddress(true))->GetLength(), TyUint32, func); |
| } |
| else |
| { |
| srcLength = IR::RegOpnd::New(TyUint32, func); |
| InsertMove(srcLength, IR::IndirOpnd::New(srcOpnd, Js::ConcatStringMulti::GetOffsetOfcharLength(), TyUint32, func), instr); |
| } |
| |
| IR::Instr *onOverflowInsertBeforeInstr; |
| InsertAddWithOverflowCheck(false, dstLength, dstLength, srcLength, instr, &onOverflowInsertBeforeInstr); |
| IR::Instr* callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperOp_OutOfMemoryError, func)); |
| |
| instr->InsertBefore(onOverflowInsertBeforeInstr); |
| onOverflowInsertBeforeInstr->InsertBefore(callInstr); |
| this->m_lowererMD.LowerCall(callInstr, 0); |
| dstOpnd->SetOffset(dstOpnd->GetOffset() * sizeof(Js::JavascriptString *) + Js::ConcatStringMulti::GetOffsetOfSlots()); |
| |
| LowererMD::ChangeToWriteBarrierAssign(instr, func); |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GenerateGetImmutableOrScriptUnreferencedString(IR::RegOpnd * strOpnd, IR::Instr * insertBeforeInstr, IR::JnHelperMethod helperMethod, bool reloadDst) |
| { |
| if (strOpnd->m_sym->m_isStrConst) |
| { |
| return strOpnd; |
| } |
| |
| Func * const func = this->m_func; |
| IR::RegOpnd *dstOpnd = reloadDst == true ? IR::RegOpnd::New(TyVar, func) : strOpnd; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| if (!strOpnd->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(strOpnd, insertBeforeInstr, doneLabel); |
| } |
| // CMP [strOpnd], Js::CompoundString::`vtable' |
| // JEQ $helper |
| InsertCompareBranch( |
| IR::IndirOpnd::New(strOpnd, 0, TyMachPtr, func), |
| this->LoadVTableValueOpnd(insertBeforeInstr, VTableValue::VtableCompoundString), |
| Js::OpCode::BrEq_A, |
| helperLabel, |
| insertBeforeInstr); |
| |
| if (reloadDst) |
| { |
| InsertMove(dstOpnd, strOpnd, insertBeforeInstr); |
| } |
| |
| InsertBranch(Js::OpCode::Br, doneLabel, insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(helperLabel); |
| |
| this->m_lowererMD.LoadHelperArgument(insertBeforeInstr, strOpnd); |
| IR::Instr* callInstr = IR::Instr::New(Js::OpCode::Call, dstOpnd, func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(helperMethod, func)); |
| insertBeforeInstr->InsertBefore(callInstr); |
| this->m_lowererMD.LowerCall(callInstr, 0); |
| |
| insertBeforeInstr->InsertBefore(doneLabel); |
| |
| return dstOpnd; |
| } |
| |
| void |
| Lowerer::LowerConvStrCommon(IR::JnHelperMethod helper, IR::Instr * instr) |
| { |
| IR::RegOpnd * src1Opnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| if (!src1Opnd->GetValueType().IsNotString()) |
| { |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| this->GenerateStringTest(src1Opnd, instr, helperLabel); |
| InsertMove(instr->GetDst(), src1Opnd, instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| instr->InsertAfter(doneLabel); |
| } |
| if (instr->GetSrc2()) |
| { |
| this->m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc2()); |
| } |
| |
| this->LoadScriptContext(instr); |
| this->m_lowererMD.LoadHelperArgument(instr, src1Opnd); |
| this->m_lowererMD.ChangeToHelperCall(instr, helper); |
| } |
| |
| void |
| Lowerer::LowerConvStr(IR::Instr * instr) |
| { |
| LowerConvStrCommon(IR::HelperOp_ConvString, instr); |
| } |
| |
| void |
| Lowerer::LowerCoerseStr(IR::Instr* instr) |
| { |
| LowerConvStrCommon(IR::HelperOp_CoerseString, instr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerCoerseStrOrRegex - This method is used for String.Replace(arg1, arg2) |
| /// where arg1 is regex or string |
| /// if arg1 is not regex, then do String.Replace(CoerseStr(arg1), arg2); |
| /// |
| /// CoerseStrOrRegex arg1 |
| /// |
| /// if (value == regex) goto :done |
| /// else |
| ///helper: |
| /// ConvStr value |
| ///done: |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::LowerCoerseStrOrRegex(IR::Instr* instr) |
| { |
| IR::RegOpnd * src1Opnd = instr->GetSrc1()->AsRegOpnd(); |
| |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| // if (value == regex) goto :done |
| if (!src1Opnd->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(src1Opnd, instr, helperLabel); |
| } |
| |
| IR::Opnd * vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp); |
| |
| InsertCompareBranch(IR::IndirOpnd::New(src1Opnd, 0, TyMachPtr, instr->m_func), |
| vtableOpnd, Js::OpCode::BrNeq_A, helperLabel, instr); |
| |
| InsertMove(instr->GetDst(), src1Opnd, instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| |
| instr->InsertAfter(doneLabel); |
| |
| // helper: ConvStr value |
| LowerConvStr(instr); |
| } |
| |
| ///---------------------------------------------------------------------------- |
| /// |
| /// Lowerer::LowerCoerseRegex - This method is used for String.Match(arg1) |
| /// if arg1 is regex, then pass CreateRegEx(arg1) to String.Match |
| /// |
| ///---------------------------------------------------------------------------- |
| void |
| Lowerer::LowerCoerseRegex(IR::Instr* instr) |
| { |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::RegOpnd * src1Opnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| if (!src1Opnd->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(src1Opnd, instr, helperLabel); |
| } |
| |
| IR::Opnd * vtableOpnd = LoadVTableValueOpnd(instr, VTableValue::VtableJavascriptRegExp); |
| |
| InsertCompareBranch(IR::IndirOpnd::New(src1Opnd, 0, TyMachPtr, instr->m_func), |
| vtableOpnd, Js::OpCode::BrNeq_A, helperLabel, instr); |
| |
| InsertMove(instr->GetDst(), src1Opnd, instr); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| instr->InsertAfter(doneLabel); |
| |
| this->LoadScriptContext(instr); |
| this->m_lowererMD.LoadHelperArgument(instr, IR::AddrOpnd::NewNull(instr->m_func)); // option |
| this->m_lowererMD.LoadHelperArgument(instr, src1Opnd); // regex |
| this->m_lowererMD.ChangeToHelperCall(instr, IR::HelperOp_CoerseRegex); |
| } |
| |
| void |
| Lowerer::LowerConvPrimStr(IR::Instr * instr) |
| { |
| LowerConvStrCommon(IR::HelperOp_ConvPrimitiveString, instr); |
| } |
| |
| void |
| Lowerer::GenerateRecyclerAlloc(IR::JnHelperMethod allocHelper, size_t allocSize, IR::RegOpnd* newObjDst, IR::Instr* insertionPointInstr, bool inOpHelper) |
| { |
| size_t alignedSize = HeapInfo::GetAlignedSizeNoCheck(allocSize); |
| this->GenerateRecyclerAllocAligned(allocHelper, alignedSize, newObjDst, insertionPointInstr, inOpHelper); |
| } |
| |
| void |
| Lowerer::GenerateMemInit(IR::RegOpnd * opnd, int32 offset, int32 value, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| IRType type = TyInt32; |
| if (isZeroed) |
| { |
| if (value == 0) |
| { |
| // Recycler memory are zero initialized |
| return; |
| } |
| |
| if (value > 0 && value <= USHORT_MAX) |
| { |
| // Recycler memory are zero initialized, so we can just initialize the 8 or 16 bits of value |
| type = (value <= UCHAR_MAX)? TyUint8 : TyUint16; |
| } |
| } |
| Func * func = this->m_func; |
| InsertMove(IR::IndirOpnd::New(opnd, offset, type, func), IR::IntConstOpnd::New(value, type, func), insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateMemInit(IR::RegOpnd * opnd, int32 offset, uint32 value, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| IRType type = TyUint32; |
| if (isZeroed) |
| { |
| if (value == 0) |
| { |
| // Recycler memory are zero initialized |
| return; |
| } |
| |
| if (value <= USHORT_MAX) |
| { |
| // Recycler memory are zero initialized, so we can just initialize the 8 or 16 bits of value |
| type = (value <= UCHAR_MAX)? TyUint8 : TyUint16; |
| } |
| } |
| |
| Func * func = this->m_func; |
| InsertMove(IR::IndirOpnd::New(opnd, offset, type, func), IR::IntConstOpnd::New(value, type, func), insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateMemInitNull(IR::RegOpnd * opnd, int32 offset, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| if (isZeroed) |
| { |
| return; |
| } |
| GenerateMemInit(opnd, offset, IR::AddrOpnd::NewNull(m_func), insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateMemInit(IR::RegOpnd * opnd, int32 offset, IR::Opnd * value, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| IRType type = value->GetType(); |
| |
| Func * func = this->m_func; |
| InsertMove(IR::IndirOpnd::New(opnd, offset, type, func), value, insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateMemInit(IR::RegOpnd * opnd, IR::RegOpnd * offset, IR::Opnd * value, IR::Instr * insertBeforeInstr, bool isZeroed) |
| { |
| IRType type = value->GetType(); |
| |
| Func * func = this->m_func; |
| InsertMove(IR::IndirOpnd::New(opnd, offset, type, func), value, insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::GenerateRecyclerMemInit(IR::RegOpnd * opnd, int32 offset, int32 value, IR::Instr * insertBeforeInstr) |
| { |
| GenerateMemInit(opnd, offset, value, insertBeforeInstr, true); |
| } |
| |
| void |
| Lowerer::GenerateRecyclerMemInit(IR::RegOpnd * opnd, int32 offset, uint32 value, IR::Instr * insertBeforeInstr) |
| { |
| GenerateMemInit(opnd, offset, value, insertBeforeInstr, true); |
| } |
| |
| void |
| Lowerer::GenerateRecyclerMemInitNull(IR::RegOpnd * opnd, int32 offset, IR::Instr * insertBeforeInstr) |
| { |
| GenerateMemInitNull(opnd, offset, insertBeforeInstr, true); |
| } |
| |
| void |
| Lowerer::GenerateRecyclerMemInit(IR::RegOpnd * opnd, int32 offset, IR::Opnd * value, IR::Instr * insertBeforeInstr) |
| { |
| GenerateMemInit(opnd, offset, value, insertBeforeInstr, true); |
| } |
| |
| void |
| Lowerer::GenerateMemCopy(IR::Opnd * dst, IR::Opnd * src, uint32 size, IR::Instr * insertBeforeInstr) |
| { |
| Func * func = this->m_func; |
| this->m_lowererMD.LoadHelperArgument(insertBeforeInstr, IR::IntConstOpnd::New(size, TyUint32, func)); |
| this->m_lowererMD.LoadHelperArgument(insertBeforeInstr, src); |
| this->m_lowererMD.LoadHelperArgument(insertBeforeInstr, dst); |
| IR::Instr * memcpyInstr = IR::Instr::New(Js::OpCode::Call, func); |
| memcpyInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperMemCpy, func)); |
| insertBeforeInstr->InsertBefore(memcpyInstr); |
| m_lowererMD.LowerCall(memcpyInstr, 3); |
| } |
| |
| bool |
| Lowerer::GenerateSimplifiedInt4Rem( |
| IR::Instr *const remInstr, |
| IR::LabelInstr *const skipBailOutLabel) const |
| { |
| Assert(remInstr); |
| Assert(remInstr->m_opcode == Js::OpCode::Rem_I4 || remInstr->m_opcode == Js::OpCode::RemU_I4); |
| |
| auto *dst = remInstr->GetDst(), *src1 = remInstr->GetSrc1(), *src2 = remInstr->GetSrc2(); |
| |
| Assert(src1 && src2); |
| Assert(dst->IsRegOpnd()); |
| |
| bool isModByPowerOf2 = (remInstr->HasBailOutInfo() && remInstr->GetBailOutKind() == IR::BailOnModByPowerOf2); |
| |
| if (PHASE_OFF(Js::Phase::MathFastPathPhase, remInstr->m_func->GetTopFunc()) && !isModByPowerOf2) |
| return false; |
| |
| if (!(src2->IsIntConstOpnd() && Math::IsPow2(src2->AsIntConstOpnd()->AsInt32())) && !isModByPowerOf2) |
| { |
| return false; |
| } |
| // We have: |
| // s3 = s1 % s2 , where s2 = +2^i |
| // |
| // Generate: |
| // test s1, s1 |
| // js $slowPathLabel |
| // s3 = and s1, 0x00..fff (2^i - 1) |
| // jmp $doneLabel |
| // $slowPathLabel: |
| // (Slow path) |
| // (Neg zero check) |
| // (Bailout code) |
| // $doneLabel: |
| |
| IR::LabelInstr *doneLabel = skipBailOutLabel, *slowPathLabel; |
| |
| if (!doneLabel) |
| { |
| doneLabel = IR::LabelInstr::New(Js::OpCode::Label, remInstr->m_func); |
| remInstr->InsertAfter(doneLabel); |
| } |
| slowPathLabel = IR::LabelInstr::New(Js::OpCode::Label, remInstr->m_func, isModByPowerOf2); |
| remInstr->InsertBefore(slowPathLabel); |
| |
| // test s1, s1 |
| InsertTest(src1, src1, slowPathLabel); |
| |
| // jsb $slowPathLabel |
| InsertBranch(LowererMD::MDCompareWithZeroBranchOpcode(Js::OpCode::BrLt_A), slowPathLabel, slowPathLabel); |
| |
| // s3 = and s1, 0x00..fff (2^i - 1) |
| IR::Opnd* maskOpnd; |
| |
| if(isModByPowerOf2) |
| { |
| Assert(isModByPowerOf2); |
| maskOpnd = IR::RegOpnd::New(TyInt32, remInstr->m_func); |
| |
| // mov maskOpnd, s2 |
| InsertMove(maskOpnd, src2, slowPathLabel); |
| |
| // dec maskOpnd |
| InsertSub(/*needFlags*/ true, maskOpnd, maskOpnd, IR::IntConstOpnd::New(1, TyInt32, this->m_func, /*dontEncode*/true), slowPathLabel); |
| |
| // maskOpnd < 0 goto $slowPath |
| InsertBranch(LowererMD::MDCompareWithZeroBranchOpcode(Js::OpCode::BrLt_A), slowPathLabel, slowPathLabel); |
| |
| // TEST src2, maskOpnd |
| InsertTestBranch(src2, maskOpnd, Js::OpCode::BrNeq_A, slowPathLabel, slowPathLabel); |
| } |
| else |
| { |
| Assert(src2->IsIntConstOpnd()); |
| int32 mask = src2->AsIntConstOpnd()->AsInt32() - 1; |
| maskOpnd = IR::IntConstOpnd::New(mask, TyInt32, remInstr->m_func); |
| } |
| |
| // dst = src1 & maskOpnd |
| InsertAnd(dst, src1, maskOpnd, slowPathLabel); |
| |
| // jmp $doneLabel |
| InsertBranch(Js::OpCode::Br, doneLabel, slowPathLabel); |
| return true; |
| } |
| |
| #if DBG |
| bool |
| Lowerer::ValidOpcodeAfterLower(IR::Instr* instr, Func * func) |
| { |
| Js::OpCode opcode = instr->m_opcode; |
| if (opcode > Js::OpCode::MDStart) |
| { |
| return true; |
| } |
| switch (opcode) |
| { |
| case Js::OpCode::Ret: |
| case Js::OpCode::Label: |
| case Js::OpCode::StatementBoundary: |
| case Js::OpCode::DeletedNonHelperBranch: |
| case Js::OpCode::FunctionEntry: |
| case Js::OpCode::FunctionExit: |
| case Js::OpCode::TryCatch: |
| case Js::OpCode::TryFinally: |
| case Js::OpCode::Catch: |
| case Js::OpCode::GeneratorResumeJumpTable: |
| |
| case Js::OpCode::Break: |
| |
| #ifdef _M_X64 |
| case Js::OpCode::PrologStart: |
| case Js::OpCode::PrologEnd: |
| #endif |
| #ifdef _M_IX86 |
| case Js::OpCode::BailOutStackRestore: |
| #endif |
| return true; |
| |
| case Js::OpCode::RestoreOutParam: |
| Assert(func->isPostRegAlloc); |
| return true; |
| |
| // These may be removed by peep |
| case Js::OpCode::StartCall: |
| case Js::OpCode::LoweredStartCall: |
| case Js::OpCode::Nop: |
| case Js::OpCode::ArgOut_A_InlineBuiltIn: |
| return func && !func->isPostPeeps; |
| |
| case Js::OpCode::InlineeStart: |
| case Js::OpCode::InlineeEnd: |
| return instr->m_func->m_hasInlineArgsOpt; |
| #ifdef _M_X64 |
| case Js::OpCode::LdArgSize: |
| case Js::OpCode::LdSpillSize: |
| return func && !func->isPostFinalLower; |
| #endif |
| |
| case Js::OpCode::Leave: |
| Assert(!func->IsLoopBodyInTry()); |
| Assert(func->HasTry() && func->DoOptimizeTry()); |
| return func && !func->isPostFinalLower; //Lowered in FinalLower phase |
| |
| case Js::OpCode::LazyBailOutThunkLabel: |
| return func && func->HasLazyBailOut() && func->isPostFinalLower; //Lowered in FinalLower phase |
| }; |
| |
| return false; |
| |
| } |
| #endif |
| |
| void Lowerer::LowerProfiledBeginSwitch(IR::JitProfilingInstr* instr) |
| { |
| Assert(instr->isBeginSwitch); |
| |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc1()); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(instr->m_func)); |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleProfiledSwitch, m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| |
| void Lowerer::LowerProfiledBinaryOp(IR::JitProfilingInstr* instr, IR::JnHelperMethod meth) |
| { |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc2()); |
| m_lowererMD.LoadHelperArgument(instr, instr->UnlinkSrc1()); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(instr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, CreateFunctionBodyOpnd(instr->m_func)); |
| instr->SetSrc1(IR::HelperCallOpnd::New(meth, m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| |
| void Lowerer::GenerateNullOutGeneratorFrame(IR::Instr* insertInstr) |
| { |
| // null out frame pointer on generator object to signal completion to JavascriptGenerator::CallGenerator |
| // s = MOV prm1 |
| // s[offset of JavascriptGenerator::frame] = MOV nullptr |
| StackSym *symSrc = StackSym::NewImplicitParamSym(3, m_func); |
| m_func->SetArgOffset(symSrc, LowererMD::GetFormalParamOffset() * MachPtr); |
| IR::SymOpnd *srcOpnd = IR::SymOpnd::New(symSrc, TyMachPtr, m_func); |
| IR::RegOpnd *dstOpnd = IR::RegOpnd::New(TyMachReg, m_func); |
| InsertMove(dstOpnd, srcOpnd, insertInstr); |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(dstOpnd, Js::JavascriptGenerator::GetFrameOffset(), TyMachPtr, m_func); |
| IR::AddrOpnd *addrOpnd = IR::AddrOpnd::NewNull(m_func); |
| InsertMove(indirOpnd, addrOpnd, insertInstr); |
| } |
| |
| void Lowerer::LowerFunctionExit(IR::Instr* funcExit) |
| { |
| if (m_func->GetJITFunctionBody()->IsCoroutine()) |
| { |
| GenerateNullOutGeneratorFrame(funcExit->m_prev); |
| } |
| |
| if (!m_func->DoSimpleJitDynamicProfile()) |
| { |
| return; |
| } |
| |
| IR::Instr* callInstr = IR::Instr::New(Js::OpCode::Call, m_func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleCleanImplicitCallFlags, m_func)); |
| funcExit->m_prev->InsertBefore(callInstr); |
| |
| m_lowererMD.LoadHelperArgument(callInstr, CreateFunctionBodyOpnd(funcExit->m_func)); |
| m_lowererMD.LowerCall(callInstr, 0); |
| } |
| |
| void Lowerer::LowerFunctionEntry(IR::Instr* funcEntry) |
| { |
| Assert(funcEntry->m_opcode == Js::OpCode::FunctionEntry); |
| |
| //Don't do a body call increment for loops or asm.js |
| if (m_func->IsLoopBody() || m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| return; |
| } |
| |
| IR::Instr *const insertBeforeInstr = this->m_func->GetFunctionEntryInsertionPoint(); |
| |
| LowerFunctionBodyCallCountChange(insertBeforeInstr); |
| |
| if (m_func->DoSimpleJitDynamicProfile()) |
| { |
| // Only generate the argument profiling if the function expects to have some arguments to profile and only if |
| // it has implicit ArgIns (the latter is a restriction imposed by the Interpreter, so it is mirrored in SimpleJit) |
| |
| if (m_func->GetJITFunctionBody()->GetInParamsCount() > 1 && m_func->GetJITFunctionBody()->HasImplicitArgIns()) |
| { |
| // Call out to the argument profiling helper |
| IR::Instr* callInstr = IR::Instr::New(Js::OpCode::Call, m_func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperSimpleProfileParameters, m_func)); |
| insertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.LoadHelperArgument(callInstr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| m_lowererMD.LowerCall(callInstr, 0); |
| } |
| |
| // Clear existing ImplicitCallFlags |
| const auto starFlag = GetImplicitCallFlagsOpnd(); |
| this->InsertMove(starFlag, CreateClearImplicitCallFlagsOpnd(), insertBeforeInstr); |
| } |
| } |
| |
| void Lowerer::LowerFunctionBodyCallCountChange(IR::Instr *const insertBeforeInstr) |
| { |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| const bool isSimpleJit = func->IsSimpleJit(); |
| |
| if ((isSimpleJit && PHASE_OFF(Js::FullJitPhase, m_func))) |
| { |
| return; |
| } |
| |
| // mov countAddress, <countAddress> |
| IR::RegOpnd *const countAddressOpnd = IR::RegOpnd::New(StackSym::New(TyMachPtr, func), TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseCountAddressOpnd(countAddressOpnd, func); |
| InsertMove( |
| countAddressOpnd, |
| IR::AddrOpnd::New((Js::Var)func->GetWorkItem()->GetCallsCountAddress(), IR::AddrOpndKindDynamicMisc, func, true), |
| insertBeforeInstr); |
| |
| IR::IndirOpnd *const countOpnd = IR::IndirOpnd::New(countAddressOpnd, 0, TyUint32, func); |
| const IR::AutoReuseOpnd autoReuseCountOpnd(countOpnd, func); |
| if(!isSimpleJit) |
| { |
| InsertAdd(false, countOpnd, countOpnd, IR::IntConstOpnd::New(1, TyUint32, func), insertBeforeInstr); |
| return; |
| } |
| |
| IR::Instr *onOverflowInsertBeforeInstr; |
| InsertDecUInt32PreventOverflow( |
| countOpnd, |
| countOpnd, |
| insertBeforeInstr, |
| &onOverflowInsertBeforeInstr); |
| |
| // ($overflow:) |
| // TransitionFromSimpleJit(framePointer) |
| m_lowererMD.LoadHelperArgument(onOverflowInsertBeforeInstr, IR::Opnd::CreateFramePointerOpnd(func)); |
| IR::Instr *const callInstr = IR::Instr::New(Js::OpCode::Call, func); |
| callInstr->SetSrc1(IR::HelperCallOpnd::New(IR::HelperTransitionFromSimpleJit, func)); |
| onOverflowInsertBeforeInstr->InsertBefore(callInstr); |
| m_lowererMD.LowerCall(callInstr, 0); |
| } |
| |
| IR::Opnd* |
| Lowerer::GetImplicitCallFlagsOpnd() |
| { |
| return GetImplicitCallFlagsOpnd(m_func); |
| } |
| |
| IR::Opnd* |
| Lowerer::GetImplicitCallFlagsOpnd(Func * func) |
| { |
| return IR::MemRefOpnd::New(func->GetThreadContextInfo()->GetImplicitCallFlagsAddr(), GetImplicitCallFlagsType(), func); |
| } |
| |
| IR::Opnd* |
| Lowerer::CreateClearImplicitCallFlagsOpnd() |
| { |
| return IR::IntConstOpnd::New(Js::ImplicitCall_None, GetImplicitCallFlagsType(), m_func); |
| } |
| |
| void |
| Lowerer::GenerateFlagInlineCacheCheckForGetterSetter( |
| IR::Instr * insertBeforeInstr, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelNext) |
| { |
| uint accessorFlagMask; |
| if (PHASE_OFF(Js::InlineGettersPhase, insertBeforeInstr->m_func)) |
| { |
| accessorFlagMask = Js::InlineCache::GetSetterFlagMask(); |
| } |
| else if (PHASE_OFF(Js::InlineSettersPhase, insertBeforeInstr->m_func)) |
| { |
| accessorFlagMask = Js::InlineCache::GetGetterFlagMask(); |
| } |
| else |
| { |
| accessorFlagMask = Js::InlineCache::GetGetterSetterFlagMask(); |
| } |
| |
| // Generate: |
| // |
| // TEST [&(inlineCache->u.accessor.flags)], Js::InlineCacheGetterFlag | Js::InlineCacheSetterFlag |
| // JEQ $next |
| |
| IR::Opnd * flagsOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.rawUInt16), TyInt8, insertBeforeInstr->m_func); |
| IR::Opnd * accessorOpnd = IR::IntConstOpnd::New(accessorFlagMask, TyInt8, this->m_func); |
| InsertTestBranch(flagsOpnd, accessorOpnd, Js::OpCode::BrEq_A, labelNext, insertBeforeInstr); |
| } |
| |
| IR::BranchInstr * |
| Lowerer::GenerateLocalInlineCacheCheck( |
| IR::Instr * instrLdSt, |
| IR::RegOpnd * opndType, |
| IR::RegOpnd * inlineCache, |
| IR::LabelInstr * labelNext, |
| bool checkTypeWithoutProperty) |
| { |
| // Generate: |
| // |
| // CMP s1, [&(inlineCache->u.local.type/typeWithoutProperty)] |
| // JNE $next |
| |
| IR::Opnd* typeOpnd; |
| if (checkTypeWithoutProperty) |
| { |
| typeOpnd = IR::IndirOpnd::New(inlineCache, (int32)offsetof(Js::InlineCache, u.local.typeWithoutProperty), TyMachReg, instrLdSt->m_func); |
| } |
| else |
| { |
| typeOpnd = IR::IndirOpnd::New(inlineCache, (int32)offsetof(Js::InlineCache, u.local.type), TyMachReg, instrLdSt->m_func); |
| } |
| |
| InsertCompare(opndType, typeOpnd, instrLdSt); |
| return InsertBranch(Js::OpCode::BrNeq_A, labelNext, instrLdSt); |
| } |
| |
| IR::BranchInstr * |
| Lowerer::GenerateProtoInlineCacheCheck( |
| IR::Instr * instrLdSt, |
| IR::RegOpnd * opndType, |
| IR::RegOpnd * inlineCache, |
| IR::LabelInstr * labelNext) |
| { |
| // Generate: |
| // |
| // CMP s1, [&(inlineCache->u.proto.type)] |
| // JNE $next |
| |
| IR::Opnd* typeOpnd = IR::IndirOpnd::New(inlineCache, (int32)offsetof(Js::InlineCache, u.proto.type), TyMachReg, instrLdSt->m_func); |
| |
| InsertCompare(opndType, typeOpnd, instrLdSt); |
| return InsertBranch(Js::OpCode::BrNeq_A, labelNext, instrLdSt); |
| } |
| |
| void |
| Lowerer::GenerateFlagInlineCacheCheck( |
| IR::Instr * instrLdSt, |
| IR::RegOpnd * opndType, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelNext) |
| { |
| // Generate: |
| // |
| // CMP s1, [&(inlineCache->u.accessor.type)] |
| // JNE $next |
| |
| IR::Opnd* typeOpnd = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.type), TyMachReg, instrLdSt->m_func); |
| |
| // CMP s1, [&(inlineCache->u.flag.type)] |
| InsertCompareBranch(opndType, typeOpnd, Js::OpCode::BrNeq_A, labelNext, instrLdSt); |
| } |
| |
| void |
| Lowerer::GenerateLdFldFromLocalInlineCache( |
| IR::Instr * instrLdFld, |
| IR::RegOpnd * opndBase, |
| IR::Opnd * opndDst, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelFallThru, |
| bool isInlineSlot) |
| { |
| // Generate: |
| // |
| // s1 = MOV base->slots -- load the slot array |
| // s2 = MOVZXw [&(inlineCache->u.local.slotIndex)] -- load the cached slot index |
| // dst = MOV [s1 + s2 * Scale] -- load the value directly from the slot |
| // JMP $fallthru |
| |
| IR::IndirOpnd * opndIndir = nullptr; |
| IR::RegOpnd * opndSlotArray = nullptr; |
| |
| if (!isInlineSlot) |
| { |
| opndSlotArray = IR::RegOpnd::New(TyMachReg, instrLdFld->m_func); |
| opndIndir = IR::IndirOpnd::New(opndBase, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndSlotArray, opndIndir, instrLdFld); |
| } |
| |
| // s2 = MOVZXw [&(inlineCache->u.local.slotIndex)] -- load the cached slot index |
| IR::RegOpnd * opndReg2 = IR::RegOpnd::New(TyMachReg, instrLdFld->m_func); |
| opndIndir = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.local.slotIndex), TyUint16, instrLdFld->m_func); |
| InsertMove(opndReg2, opndIndir, instrLdFld); |
| |
| if (isInlineSlot) |
| { |
| // dst = MOV [base + s2 * Scale] -- load the value directly from the slot |
| opndIndir = IR::IndirOpnd::New(opndBase, opndReg2, LowererMD::GetDefaultIndirScale(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndDst, opndIndir, instrLdFld); |
| } |
| else |
| { |
| // dst = MOV [s1 + s2 * Scale] -- load the value directly from the slot |
| opndIndir = IR::IndirOpnd::New(opndSlotArray, opndReg2, LowererMD::GetDefaultIndirScale(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndDst, opndIndir, instrLdFld); |
| } |
| |
| // JMP $fallthru |
| InsertBranch(Js::OpCode::Br, labelFallThru, instrLdFld); |
| } |
| |
| void |
| Lowerer::GenerateLdFldFromProtoInlineCache( |
| IR::Instr * instrLdFld, |
| IR::RegOpnd * opndBase, |
| IR::Opnd * opndDst, |
| IR::RegOpnd * inlineCache, |
| IR::LabelInstr * labelFallThru, |
| bool isInlineSlot) |
| { |
| // Generate: |
| // |
| // s1 = MOV [&(inlineCache->u.proto.prototypeObject)] -- load the cached prototype object |
| // s1 = MOV [&s1->slots] -- load the slot array |
| // s2 = MOVZXW [&(inlineCache->u.proto.slotIndex)] -- load the cached slot index |
| // dst = MOV [s1 + s2*4] |
| // JMP $fallthru |
| |
| IR::IndirOpnd * opndIndir = nullptr; |
| IR::RegOpnd * opndProtoSlots = nullptr; |
| |
| // s1 = MOV [&(inlineCache->u.proto.prototypeObject)] -- load the cached prototype object |
| IR::RegOpnd * opndProto = IR::RegOpnd::New(TyMachReg, instrLdFld->m_func); |
| opndIndir = IR::IndirOpnd::New(inlineCache, (int32)offsetof(Js::InlineCache, u.proto.prototypeObject), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndProto, opndIndir, instrLdFld); |
| |
| if (!isInlineSlot) |
| { |
| // s1 = MOV [&s1->slots] -- load the slot array |
| opndProtoSlots = IR::RegOpnd::New(TyMachReg, instrLdFld->m_func); |
| opndIndir = IR::IndirOpnd::New(opndProto, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndProtoSlots, opndIndir, instrLdFld); |
| } |
| |
| // s2 = MOVZXW [&(inlineCache->u.proto.slotIndex)] -- load the cached slot index |
| IR::RegOpnd * opndSlotIndex = IR::RegOpnd::New(TyMachReg, instrLdFld->m_func); |
| opndIndir = IR::IndirOpnd::New(inlineCache, (int32)offsetof(Js::InlineCache, u.proto.slotIndex), TyUint16, instrLdFld->m_func); |
| InsertMove(opndSlotIndex, opndIndir, instrLdFld); |
| |
| if (isInlineSlot) |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndProto, opndSlotIndex, LowererMD::GetDefaultIndirScale(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndDst, opndIndir, instrLdFld); |
| } |
| else |
| { |
| // dst = MOV [s1 + s2*4] |
| opndIndir = IR::IndirOpnd::New(opndProtoSlots, opndSlotIndex, LowererMD::GetDefaultIndirScale(), TyMachReg, instrLdFld->m_func); |
| InsertMove(opndDst, opndIndir, instrLdFld); |
| } |
| |
| // JMP $fallthru |
| InsertBranch(Js::OpCode::Br, labelFallThru, instrLdFld); |
| } |
| |
| void |
| Lowerer::GenerateLdFldFromFlagInlineCache( |
| IR::Instr * insertBeforeInstr, |
| IR::RegOpnd * opndBase, |
| IR::Opnd * opndDst, |
| IR::RegOpnd * opndInlineCache, |
| IR::LabelInstr * labelFallThru, |
| bool isInlineSlot) |
| { |
| // Generate: |
| // |
| // s1 = MOV [&(inlineCache->u.accessor.object)] -- load the cached prototype object |
| // s1 = MOV [&s1->slots] -- load the slot array |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| // dst = MOV [s1 + s2 * 4] |
| // JMP $fallthru |
| |
| IR::IndirOpnd * opndIndir = nullptr; |
| IR::RegOpnd * opndObjSlots = nullptr; |
| |
| // s1 = MOV [&(inlineCache->u.accessor.object)] -- load the cached prototype object |
| IR::RegOpnd * opndObject = IR::RegOpnd::New(TyMachReg, this->m_func); |
| opndIndir = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.object), TyMachReg, this->m_func); |
| InsertMove(opndObject, opndIndir, insertBeforeInstr); |
| |
| if (!isInlineSlot) |
| { |
| // s1 = MOV [&s1->slots] -- load the slot array |
| opndObjSlots = IR::RegOpnd::New(TyMachReg, this->m_func); |
| opndIndir = IR::IndirOpnd::New(opndObject, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| InsertMove(opndObjSlots, opndIndir, insertBeforeInstr); |
| } |
| |
| // s2 = MOVZXW [&(inlineCache->u.accessor.slotIndex)] -- load the cached slot index |
| IR::RegOpnd * opndSlotIndex = IR::RegOpnd::New(TyMachReg, this->m_func); |
| opndIndir = IR::IndirOpnd::New(opndInlineCache, (int32)offsetof(Js::InlineCache, u.accessor.slotIndex), TyUint16, this->m_func); |
| InsertMove(opndSlotIndex, opndIndir, insertBeforeInstr); |
| |
| if (isInlineSlot) |
| { |
| // dst = MOV [s1 + s2 * 4] |
| opndIndir = IR::IndirOpnd::New(opndObject, opndSlotIndex, this->m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| InsertMove(opndDst, opndIndir, insertBeforeInstr); |
| } |
| else |
| { |
| // dst = MOV [s1 + s2 * 4] |
| opndIndir = IR::IndirOpnd::New(opndObjSlots, opndSlotIndex, this->m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| InsertMove(opndDst, opndIndir, insertBeforeInstr); |
| } |
| |
| // JMP $fallthru |
| InsertBranch(Js::OpCode::Br, labelFallThru, insertBeforeInstr); |
| } |
| |
| void |
| Lowerer::LowerSpreadArrayLiteral(IR::Instr *instr) |
| { |
| LoadScriptContext(instr); |
| |
| IR::Opnd *src2Opnd = instr->UnlinkSrc2(); |
| m_lowererMD.LoadHelperArgument(instr, src2Opnd); |
| |
| IR::Opnd *src1Opnd = instr->UnlinkSrc1(); |
| m_lowererMD.LoadHelperArgument(instr, src1Opnd); |
| |
| this->m_lowererMD.ChangeToHelperCall(instr, IR::HelperSpreadArrayLiteral); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerSpreadCall(IR::Instr *instr, Js::CallFlags callFlags, bool setupProfiledVersion) |
| { |
| // Get the target function object, and emit function object test. |
| IR::RegOpnd * functionObjOpnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| functionObjOpnd->m_isCallArg = true; |
| |
| if (!(callFlags & Js::CallFlags_New) && !setupProfiledVersion) |
| { |
| IR::LabelInstr* continueAfterExLabel = InsertContinueAfterExceptionLabelForDebugger(m_func, instr, false); |
| this->m_lowererMD.GenerateFunctionObjectTest(instr, functionObjOpnd, false, continueAfterExLabel); |
| } |
| |
| IR::Instr *spreadIndicesInstr; |
| |
| spreadIndicesInstr = GetLdSpreadIndicesInstr(instr); |
| Assert(spreadIndicesInstr->m_opcode == Js::OpCode::LdSpreadIndices); |
| |
| // Get AuxArray |
| IR::Opnd *spreadIndicesOpnd = spreadIndicesInstr->UnlinkSrc1(); |
| // Remove LdSpreadIndices from the argument chain |
| instr->ReplaceSrc2(spreadIndicesInstr->UnlinkSrc2()); |
| |
| // Emit the normal args |
| if (!(callFlags & Js::CallFlags_New)) |
| { |
| callFlags = (Js::CallFlags)(callFlags | (instr->GetDst() ? Js::CallFlags_Value : Js::CallFlags_NotUsed)); |
| } |
| |
| // Profiled helper call requires three more parameters, ArrayProfileId, profileId, and the frame pointer. |
| // This is just following the convention of HelperProfiledNewScObjArray call. |
| const unsigned short extraArgsCount = setupProfiledVersion ? 5 : 2; // function object and AuxArray |
| int32 argCount = this->m_lowererMD.LowerCallArgs(instr, (ushort)callFlags, extraArgsCount); |
| |
| // Emit our extra (first) args for the Spread helper in reverse order |
| if (setupProfiledVersion) |
| { |
| IR::JitProfilingInstr* jitInstr = (IR::JitProfilingInstr*)instr; |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(jitInstr->arrayProfileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateProfileIdOpnd(jitInstr->profileId, m_func)); |
| m_lowererMD.LoadHelperArgument(instr, IR::Opnd::CreateFramePointerOpnd(m_func)); |
| } |
| |
| m_lowererMD.LoadHelperArgument(instr, functionObjOpnd); |
| m_lowererMD.LoadHelperArgument(instr, spreadIndicesOpnd); |
| |
| // Change the call target to our helper |
| IR::HelperCallOpnd *helperOpnd = IR::HelperCallOpnd::New(setupProfiledVersion ? IR::HelperProfiledNewScObjArraySpread : IR::HelperSpreadCall, this->m_func); |
| instr->SetSrc1(helperOpnd); |
| |
| return this->m_lowererMD.LowerCall(instr, (Js::ArgSlot)argCount); |
| } |
| |
| void |
| Lowerer::LowerDivI4Common(IR::Instr * instr) |
| { |
| Assert(instr); |
| Assert((instr->m_opcode == Js::OpCode::Rem_I4 || instr->m_opcode == Js::OpCode::Div_I4) || |
| (instr->m_opcode == Js::OpCode::RemU_I4 || instr->m_opcode == Js::OpCode::DivU_I4)); |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| |
| const bool isRem = instr->m_opcode == Js::OpCode::Rem_I4 || instr->m_opcode == Js::OpCode::RemU_I4; |
| // MIN_INT/-1 path is only needed for signed operations |
| |
| // TEST src2, src2 |
| // JEQ $div0 |
| // CMP src1, MIN_INT |
| // JEQ $minInt |
| // JMP $div |
| // $div0: [helper] |
| // MOV dst, 0 |
| // JMP $done |
| // $minInt: [helper] |
| // CMP src2, -1 |
| // JNE $div |
| // dst = MOV src1 / 0 |
| // JMP $done |
| // $div: |
| // dst = IDIV src2, src1 |
| // $done: |
| |
| IR::LabelInstr * div0Label = InsertLabel(true, instr); |
| IR::LabelInstr * divLabel = InsertLabel(false, instr); |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| IR::Opnd * dst = instr->GetDst(); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * src2 = instr->GetSrc2(); |
| |
| bool isWasm = m_func->GetJITFunctionBody()->IsWasmFunction(); |
| Assert(!isWasm || isRem); |
| |
| if (!isWasm) |
| { |
| InsertTestBranch(src2, src2, Js::OpCode::BrEq_A, div0Label, div0Label); |
| InsertMove(dst, IR::IntConstOpnd::NewFromType(0, dst->GetType(), m_func), divLabel); |
| InsertBranch(Js::OpCode::Br, doneLabel, divLabel); |
| } |
| |
| if (instr->GetSrc1()->IsSigned()) |
| { |
| IR::LabelInstr * minIntLabel = nullptr; |
| // we need to check for INT_MIN/-1 if divisor is either -1 or variable, and dividend is either INT_MIN or variable |
| int64 intMin = IRType_IsInt64(src1->GetType()) ? LONGLONG_MIN : INT_MIN; |
| bool needsMinOverNeg1Check = !(src2->IsImmediateOpnd() && src2->GetImmediateValue(m_func) != -1); |
| if (src1->IsImmediateOpnd()) |
| { |
| if (needsMinOverNeg1Check && src1->GetImmediateValue(m_func) == intMin) |
| { |
| minIntLabel = InsertLabel(true, divLabel); |
| InsertBranch(Js::OpCode::Br, minIntLabel, div0Label); |
| } |
| else |
| { |
| needsMinOverNeg1Check = false; |
| } |
| } |
| else if(needsMinOverNeg1Check) |
| { |
| minIntLabel = InsertLabel(true, divLabel); |
| InsertCompareBranch(src1, IR::IntConstOpnd::NewFromType(intMin, src1->GetType(), m_func), Js::OpCode::BrEq_A, minIntLabel, div0Label); |
| } |
| if (needsMinOverNeg1Check) |
| { |
| Assert(minIntLabel); |
| Assert(!src2->IsImmediateOpnd() || src2->GetImmediateValue(m_func) == -1); |
| if (!src2->IsImmediateOpnd()) |
| { |
| InsertCompareBranch(src2, IR::IntConstOpnd::NewFromType(-1, src2->GetType(), m_func), Js::OpCode::BrNeq_A, divLabel, divLabel); |
| } |
| |
| InsertMove(dst, !isRem ? src1 : IR::IntConstOpnd::NewFromType(0, dst->GetType(), m_func), divLabel); |
| InsertBranch(Js::OpCode::Br, doneLabel, divLabel); |
| } |
| } |
| InsertBranch(Js::OpCode::Br, divLabel, div0Label); |
| |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| |
| void |
| Lowerer::LowerRemI4(IR::Instr * instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::Rem_I4 || instr->m_opcode == Js::OpCode::RemU_I4); |
| //Generate fast path for const divisors |
| if (m_lowererMD.GenerateFastDivAndRem(instr)) |
| { |
| return; |
| } |
| |
| if (m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| LowerDivI4Common(instr); |
| } |
| else |
| { |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| } |
| |
| void |
| Lowerer::LowerTrapIfZero(IR::Instr * const instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::TrapIfZero); |
| Assert(instr->GetSrc1()); |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| |
| IR::Opnd * src1 = instr->GetSrc1(); |
| if (src1->IsImmediateOpnd()) |
| { |
| if (src1->GetImmediateValue(m_func) == 0) |
| { |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_DivideByZero), TyInt32, m_func), instr); |
| } |
| } |
| else |
| { |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| InsertCompareBranch(src1, IR::IntConstOpnd::NewFromType(0, src1->GetType(), m_func), Js::OpCode::BrNeq_A, doneLabel, doneLabel); |
| InsertLabel(true, doneLabel); |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_DivideByZero), TyInt32, m_func), doneLabel); |
| } |
| LowererMD::ChangeToAssign(instr); |
| } |
| |
| IR::Instr* |
| Lowerer::LowerTrapIfUnalignedAccess(IR::Instr * const instr) |
| { |
| IR::Opnd* dst = instr->UnlinkDst(); |
| IR::Opnd* src1 = instr->UnlinkSrc1(); |
| IR::Opnd* src2 = instr->GetSrc2(); |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::TrapIfUnalignedAccess); |
| Assert(src1 && !src1->IsVar()); |
| Assert(src2 && src2->IsImmediateOpnd()); |
| Assert(src2->GetSize() > 1); |
| |
| uint32 mask = src2->GetSize() - 1; |
| uint32 cmpValue = (uint32)src2->GetImmediateValue(m_func); |
| |
| InsertMove(dst, src1, instr); |
| IR::IntConstOpnd* maskOpnd = IR::IntConstOpnd::New(mask, src1->GetType(), m_func); |
| IR::RegOpnd* maskedOpnd = IR::RegOpnd::New(src1->GetType(), m_func); |
| IR::Instr* maskInstr = IR::Instr::New(Js::OpCode::And_I4, maskedOpnd, src1, maskOpnd, m_func); |
| instr->InsertBefore(maskInstr); |
| |
| IR::IntConstOpnd* cmpOpnd = IR::IntConstOpnd::New(cmpValue, maskedOpnd->GetType(), m_func, true); |
| IR::LabelInstr* alignedLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| IR::Instr* branch = IR::BranchInstr::New(Js::OpCode::BrEq_I4, alignedLabel, maskedOpnd, cmpOpnd, m_func); |
| instr->InsertBefore(branch); |
| InsertLabel(true, instr); |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(WASMERR_UnalignedAtomicAccess), TyInt32, m_func), instr); |
| instr->InsertBefore(alignedLabel); |
| |
| instr->Remove(); |
| // The check and branch are not fully lowered yet, let them go in the lower loop. |
| return branch; |
| } |
| |
| void |
| Lowerer::LowerTrapIfMinIntOverNegOne(IR::Instr * const instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::TrapIfMinIntOverNegOne); |
| Assert(instr->GetSrc1()); |
| Assert(instr->GetSrc2()); |
| Assert(m_func->GetJITFunctionBody()->IsWasmFunction()); |
| |
| IR::LabelInstr * doneLabel = InsertLabel(false, instr->m_next); |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * src2 = instr->UnlinkSrc2(); |
| |
| int64 intMin = src1->IsInt64() ? LONGLONG_MIN : INT_MIN; |
| if (src1->IsImmediateOpnd()) |
| { |
| if (src1->GetImmediateValue(m_func) != intMin) |
| { |
| // Const value not min int, will not trap |
| doneLabel->Remove(); |
| src2->Free(m_func); |
| LowererMD::ChangeToAssign(instr); |
| return; |
| } |
| // Is min int no need to do check |
| } |
| else |
| { |
| InsertCompareBranch(src1, IR::IntConstOpnd::NewFromType(intMin, src1->GetType(), m_func), Js::OpCode::BrNeq_A, doneLabel, doneLabel); |
| } |
| if (src2->IsImmediateOpnd()) |
| { |
| if (src2->GetImmediateValue(m_func) != -1) |
| { |
| // Const value not min int, will not trap |
| doneLabel->Remove(); |
| src2->Free(m_func); |
| LowererMD::ChangeToAssign(instr); |
| return; |
| } |
| // Is -1 no need to do check |
| src2->Free(m_func); |
| } |
| else |
| { |
| InsertCompareBranch(src2, IR::IntConstOpnd::NewFromType(-1, src2->GetType(), m_func), Js::OpCode::BrNeq_A, doneLabel, doneLabel); |
| } |
| InsertLabel(true, doneLabel); |
| GenerateThrow(IR::IntConstOpnd::NewFromType(SCODE_CODE(VBSERR_Overflow), TyInt32, m_func), doneLabel); |
| LowererMD::ChangeToAssign(instr); |
| } |
| |
| void |
| Lowerer::GenerateThrow(IR::Opnd* errorCode, IR::Instr * instr) |
| { |
| IR::Instr *throwInstr = IR::Instr::New(Js::OpCode::RuntimeTypeError, IR::RegOpnd::New(TyMachReg, m_func), errorCode, m_func); |
| instr->InsertBefore(throwInstr); |
| const bool isWasm = m_func->GetJITFunctionBody() && m_func->GetJITFunctionBody()->IsWasmFunction(); |
| LowerUnaryHelperMem(throwInstr, isWasm ? IR::HelperOp_WebAssemblyRuntimeError : IR::HelperOp_RuntimeTypeError); |
| } |
| |
| void |
| Lowerer::LowerDivI4(IR::Instr * instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::Div_I4 || instr->m_opcode == Js::OpCode::DivU_I4); |
| |
| #ifdef _M_IX86 |
| if ( |
| instr->GetDst() && instr->GetDst()->IsInt64() || |
| instr->GetSrc1() && instr->GetSrc1()->IsInt64() || |
| instr->GetSrc2() && instr->GetSrc2()->IsInt64() |
| ) |
| { |
| m_lowererMD.EmitInt64Instr(instr); |
| return; |
| } |
| #endif |
| |
| Assert(instr->GetSrc2()); |
| if (m_func->GetJITFunctionBody()->IsWasmFunction()) |
| { |
| if (!m_lowererMD.GenerateFastDivAndRem(instr)) |
| { |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| return; |
| } |
| |
| if (m_func->GetJITFunctionBody()->IsAsmJsMode()) |
| { |
| if (!m_lowererMD.GenerateFastDivAndRem(instr)) |
| { |
| LowerDivI4Common(instr); |
| } |
| return; |
| } |
| |
| if(!instr->HasBailOutInfo()) |
| { |
| if (!m_lowererMD.GenerateFastDivAndRem(instr)) |
| { |
| m_lowererMD.EmitInt4Instr(instr); |
| } |
| return; |
| } |
| |
| Assert(!(instr->GetBailOutKind() & ~(IR::BailOnDivResultNotInt | IR::BailOutOnNegativeZero | IR::BailOutOnDivByZero | IR::BailOutOnDivOfMinInt))); |
| |
| IR::BailOutKind bailOutKind = instr->GetBailOutKind(); |
| |
| // Split out and generate the bailout instruction |
| const auto nonBailOutInstr = IR::Instr::New(instr->m_opcode, instr->m_func); |
| instr->TransferTo(nonBailOutInstr); |
| instr->InsertBefore(nonBailOutInstr); |
| |
| IR::LabelInstr * doneLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func); |
| instr->InsertAfter(doneLabel); |
| |
| // Generate the bailout helper call. 'instr' will be changed to the CALL into the bailout function, so it can't be used for |
| // ordering instructions anymore. |
| IR::LabelInstr * bailOutLabel = GenerateBailOut(instr); |
| |
| IR::Opnd * denominatorOpnd = nonBailOutInstr->GetSrc2(); |
| IR::Opnd * nominatorOpnd = nonBailOutInstr->GetSrc1(); |
| bool isFastDiv = false; |
| if (bailOutKind & IR::BailOutOnDivOfMinInt) |
| { |
| // Bailout if numerator is MIN_INT (could also check for denominator being -1 |
| // before bailing out, but does not seem worth the extra code..) |
| InsertCompareBranch(nominatorOpnd, IR::IntConstOpnd::New(INT32_MIN, TyInt32, this->m_func, true), Js::OpCode::BrEq_A, bailOutLabel, nonBailOutInstr); |
| } |
| if (denominatorOpnd->IsIntConstOpnd() && Math::IsPow2(denominatorOpnd->AsIntConstOpnd()->AsInt32())) |
| { |
| Assert((bailOutKind & (IR::BailOutOnNegativeZero | IR::BailOutOnDivByZero)) == 0); |
| if (Math::IsPow2(denominatorOpnd->AsIntConstOpnd()->AsInt32())) |
| { |
| int pow2 = denominatorOpnd->AsIntConstOpnd()->AsInt32(); |
| InsertTestBranch(nominatorOpnd, IR::IntConstOpnd::New(pow2 - 1, TyInt32, this->m_func), |
| Js::OpCode::BrNeq_A, bailOutLabel, nonBailOutInstr); |
| nonBailOutInstr->m_opcode = Js::OpCode::Shr_A; |
| nonBailOutInstr->ReplaceSrc2(IR::IntConstOpnd::New(Math::Log2(pow2), TyInt32, this->m_func)); |
| LowererMD::ChangeToShift(nonBailOutInstr, false); |
| LowererMD::Legalize(nonBailOutInstr); |
| isFastDiv = true; |
| } |
| else |
| { |
| isFastDiv = m_lowererMD.GenerateFastDivAndRem(nonBailOutInstr, bailOutLabel); |
| } |
| |
| } |
| if (!isFastDiv) |
| { |
| if (bailOutKind & IR::BailOutOnDivByZero) |
| { |
| // Bailout if denominator is 0 |
| InsertTestBranch(denominatorOpnd, denominatorOpnd, Js::OpCode::BrEq_A, bailOutLabel, nonBailOutInstr); |
| } |
| |
| // Lower the div and bailout if there is a reminder (machine specific) |
| IR::Instr * insertBeforeInstr = m_lowererMD.LowerDivI4AndBailOnReminder(nonBailOutInstr, bailOutLabel); |
| |
| IR::Opnd * resultOpnd = nonBailOutInstr->GetDst(); |
| if (bailOutKind & IR::BailOutOnNegativeZero) |
| { |
| // TEST result, result |
| // JNE skipNegDenominatorCheckLabel // Result not 0 |
| // TEST denominator, denominator |
| // JNSB/BMI bailout // bail if negative |
| // skipNegDenominatorCheckLabel: |
| |
| IR::LabelInstr * skipNegDenominatorCheckLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| // Skip negative denominator check if the result is not 0 |
| InsertTestBranch(resultOpnd, resultOpnd, Js::OpCode::BrNeq_A, skipNegDenominatorCheckLabel, insertBeforeInstr); |
| |
| IR::LabelInstr * negDenominatorCheckLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| insertBeforeInstr->InsertBefore(negDenominatorCheckLabel); |
| // Jump to done if the denominator is not negative |
| InsertTestBranch(denominatorOpnd, denominatorOpnd, |
| LowererMD::MDCompareWithZeroBranchOpcode(Js::OpCode::BrLt_A), bailOutLabel, insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(skipNegDenominatorCheckLabel); |
| } |
| } |
| |
| // We are all fine, jump around the bailout to done |
| InsertBranch(Js::OpCode::Br, doneLabel, bailOutLabel); |
| } |
| |
| void |
| Lowerer::LowerRemR8(IR::Instr * instr) |
| { |
| Assert(instr); |
| Assert(instr->m_opcode == Js::OpCode::Rem_A); |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| |
| m_lowererMD.LoadDoubleHelperArgument(instr, instr->UnlinkSrc2()); |
| m_lowererMD.LoadDoubleHelperArgument(instr, instr->UnlinkSrc1()); |
| instr->SetSrc1(IR::HelperCallOpnd::New(IR::JnHelperMethod::HelperOp_Rem_Double, m_func)); |
| m_lowererMD.LowerCall(instr, 0); |
| } |
| |
| void |
| Lowerer::LowerNewScopeSlots(IR::Instr * instr, bool doStackSlots) |
| { |
| Func * func = m_func; |
| if (PHASE_OFF(Js::NewScopeSlotFastPathPhase, func)) |
| { |
| this->LowerUnaryHelperMemWithFunctionInfo(instr, IR::HelperOP_NewScopeSlots); |
| return; |
| } |
| |
| uint const count = instr->GetSrc1()->AsIntConstOpnd()->AsUint32(); |
| uint const allocSize = count * sizeof(Js::Var); |
| uint const actualSlotCount = count - Js::ScopeSlots::FirstSlotIndex; |
| |
| IR::RegOpnd * dst = instr->UnlinkDst()->AsRegOpnd(); |
| |
| // dst = RecyclerAlloc(allocSize) |
| // dst[EncodedSlotCountSlotIndex] = min(actualSlotCount, MaxEncodedSlotCount); |
| // dst[ScopeMetadataSlotIndex] = FunctionBody; |
| // mov undefinedOpnd, undefined |
| // dst[FirstSlotIndex..count] = undefinedOpnd; |
| |
| // Note: stack allocation of both scope slots and frame display are done together |
| // in lowering of NewStackFrameDisplay |
| if (!doStackSlots) |
| { |
| GenerateRecyclerAlloc(IR::HelperAllocMemForVarArray, allocSize, dst, instr); |
| } |
| |
| m_lowererMD.GenerateMemInit(dst, Js::ScopeSlots::EncodedSlotCountSlotIndex * sizeof(Js::Var), |
| (size_t)min<uint>(actualSlotCount, Js::ScopeSlots::MaxEncodedSlotCount), instr, !doStackSlots); |
| |
| IR::Opnd * functionInfoOpnd = this->LoadFunctionInfoOpnd(instr); |
| GenerateMemInit(dst, Js::ScopeSlots::ScopeMetadataSlotIndex * sizeof(Js::Var), |
| functionInfoOpnd, instr, !doStackSlots); |
| |
| IR::Opnd * undefinedOpnd = this->LoadLibraryValueOpnd(instr, LibraryValue::ValueUndefined); |
| const IR::AutoReuseOpnd autoReuseUndefinedOpnd(undefinedOpnd, func); |
| |
| // avoid using a register for the undefined pointer if we are going to assign 1 or 2 |
| |
| if (actualSlotCount > 2) |
| { |
| undefinedOpnd = GetRegOpnd(undefinedOpnd, instr, func, TyVar); |
| } |
| |
| int const loopUnrollCount = 8; |
| |
| if (actualSlotCount <= loopUnrollCount * 2) |
| { |
| // Just generate all the assignment in straight line code |
| // mov[dst + Js::FirstSlotIndex], undefinedOpnd |
| // ... |
| // mov[dst + count - 1], undefinedOpnd |
| for (unsigned int i = Js::ScopeSlots::FirstSlotIndex; i < count; i++) |
| { |
| GenerateMemInit(dst, sizeof(Js::Var) * i, undefinedOpnd, instr, !doStackSlots); |
| } |
| } |
| else |
| { |
| // Just generate all the assignment in loop of loopUnrollCount and the rest as straight line code |
| // |
| // lea currOpnd, [dst + sizeof(Var) * (loopAssignCount + Js::ScopeSlots::FirstSlotIndex - loopUnrollCount)]; |
| // mov [currOpnd + loopUnrollCount + leftOverAssignCount - 1] , undefinedOpnd |
| // mov [currOpnd + loopUnrollCount + leftOverAssignCount - 2] , undefinedOpnd |
| // ... |
| // mov [currOpnd + loopUnrollCount], undefinedOpnd |
| // $LoopTop: |
| // mov [currOpnd + loopUnrollCount - 1], undefinedOpnd |
| // mov [currOpnd + loopUnrollCount - 2], undefinedOpnd |
| // ... |
| // mov [currOpnd], undefinedOpnd |
| // lea currOpnd, [currOpnd - loopUnrollCount] |
| // cmp dst, currOpnd |
| // jlt $Looptop |
| |
| uint nLoop = actualSlotCount / loopUnrollCount; |
| uint loopAssignCount = nLoop * loopUnrollCount; |
| uint leftOverAssignCount = actualSlotCount - loopAssignCount; // The left over assignments |
| |
| IR::RegOpnd * currOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| const IR::AutoReuseOpnd autoReuseCurrOpnd(currOpnd, m_func); |
| InsertLea( |
| currOpnd, |
| IR::IndirOpnd::New( |
| dst, |
| sizeof(Js::Var) * (loopAssignCount + Js::ScopeSlots::FirstSlotIndex - loopUnrollCount), |
| TyMachPtr, |
| func), |
| instr); |
| |
| for (unsigned int i = 0; i < leftOverAssignCount; i++) |
| { |
| GenerateMemInit(currOpnd, sizeof(Js::Var) * (loopUnrollCount + leftOverAssignCount - i - 1), undefinedOpnd, instr, !doStackSlots); |
| } |
| |
| IR::LabelInstr * loopTop = InsertLoopTopLabel(instr); |
| Loop * loop = loopTop->GetLoop(); |
| |
| for (unsigned int i = 0; i < loopUnrollCount; i++) |
| { |
| GenerateMemInit(currOpnd, sizeof(Js::Var) * (loopUnrollCount - i - 1), undefinedOpnd, instr, !doStackSlots); |
| } |
| InsertLea(currOpnd, IR::IndirOpnd::New(currOpnd, -((int)sizeof(Js::Var) * loopUnrollCount), TyMachPtr, func), instr); |
| |
| InsertCompareBranch(dst, currOpnd, Js::OpCode::BrLt_A, true, loopTop, instr); |
| |
| loop->regAlloc.liveOnBackEdgeSyms->Set(currOpnd->m_sym->m_id); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(dst->m_sym->m_id); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(undefinedOpnd->AsRegOpnd()->m_sym->m_id); |
| } |
| |
| if (!doStackSlots) |
| { |
| InsertMove(IR::RegOpnd::New(instr->m_func->GetLocalClosureSym(), TyMachPtr, func), dst, instr); |
| } |
| instr->Remove(); |
| } |
| |
| void Lowerer::LowerLdInnerFrameDisplay(IR::Instr *instr) |
| { |
| bool isStrict = instr->m_func->GetJITFunctionBody()->IsStrictMode(); |
| if (isStrict) |
| { |
| if (instr->GetSrc2()) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperScrObj_LdStrictInnerFrameDisplay); |
| } |
| else |
| { |
| #if DBG |
| instr->m_opcode = Js::OpCode::LdInnerFrameDisplayNoParent; |
| #endif |
| this->LowerUnaryHelperMem(instr, IR::HelperScrObj_LdStrictInnerFrameDisplayNoParent); |
| } |
| } |
| else |
| { |
| if (instr->GetSrc2()) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperScrObj_LdInnerFrameDisplay); |
| } |
| else |
| { |
| #if DBG |
| instr->m_opcode = Js::OpCode::LdInnerFrameDisplayNoParent; |
| #endif |
| this->LowerUnaryHelperMem(instr, IR::HelperScrObj_LdInnerFrameDisplayNoParent); |
| } |
| } |
| } |
| |
| void Lowerer::LowerLdFrameDisplay(IR::Instr *instr, bool doStackFrameDisplay) |
| { |
| bool isStrict = instr->m_func->GetJITFunctionBody()->IsStrictMode(); |
| uint16 envDepth = instr->m_func->GetJITFunctionBody()->GetEnvDepth(); |
| Func *func = this->m_func; |
| |
| // envDepth of -1 indicates unknown depth (eval expression or HTML event handler). |
| // We could still fast-path these by generating a loop over the (dynamically loaded) scope chain length, |
| // but I doubt it's worth it. |
| // If the dst opnd is a byte code temp, that indicates we're prepending a block scope or some such and |
| // shouldn't attempt to do this. |
| if (envDepth == (uint16)-1 || |
| (!doStackFrameDisplay && (instr->isNonFastPathFrameDisplay || instr->GetDst()->AsRegOpnd()->m_sym->IsTempReg(instr->m_func))) || |
| PHASE_OFF(Js::FrameDisplayFastPathPhase, func)) |
| { |
| if (isStrict) |
| { |
| if (instr->GetSrc2()) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperScrObj_LdStrictFrameDisplay); |
| } |
| else |
| { |
| #if DBG |
| instr->m_opcode = Js::OpCode::LdFrameDisplayNoParent; |
| #endif |
| this->LowerUnaryHelperMem(instr, IR::HelperScrObj_LdStrictFrameDisplayNoParent); |
| } |
| } |
| else |
| { |
| if (instr->GetSrc2()) |
| { |
| this->LowerBinaryHelperMem(instr, IR::HelperScrObj_LdFrameDisplay); |
| } |
| else |
| { |
| #if DBG |
| instr->m_opcode = Js::OpCode::LdFrameDisplayNoParent; |
| #endif |
| this->LowerUnaryHelperMem(instr, IR::HelperScrObj_LdFrameDisplayNoParent); |
| } |
| } |
| return; |
| } |
| |
| uint16 frameDispLength = envDepth + 1; |
| Assert(frameDispLength > 0); |
| |
| IR::RegOpnd *dstOpnd = instr->UnlinkDst()->AsRegOpnd(); |
| IR::RegOpnd *currentFrameOpnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| |
| uint allocSize = sizeof(Js::FrameDisplay) + (frameDispLength * sizeof(Js::Var)); |
| if (doStackFrameDisplay) |
| { |
| IR::Instr *insertInstr = func->GetFunctionEntryInsertionPoint(); |
| |
| // Initialize stack pointers for scope slots and frame display together at the top of the function |
| // (in case we bail out before executing the instructions). |
| IR::LabelInstr *labelNoStackFunc = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr *labelDone = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| // Check whether stack functions have been disabled since we jitted. |
| // If they have, then we must allocate closure memory on the heap. |
| InsertTestBranch(IR::MemRefOpnd::New(m_func->GetJITFunctionBody()->GetFlagsAddr(), TyInt8, m_func), |
| IR::IntConstOpnd::New(Js::FunctionBody::Flags_StackNestedFunc, TyInt8, m_func, true), |
| Js::OpCode::BrEq_A, labelNoStackFunc, insertInstr); |
| // allocSize is greater than TyMachPtr and hence changing the initial size to TyMisc |
| StackSym * stackSym = StackSym::New(TyMisc, instr->m_func); |
| m_func->StackAllocate(stackSym, allocSize); |
| |
| InsertLea(dstOpnd, IR::SymOpnd::New(stackSym, TyMachPtr, func), insertInstr); |
| |
| uint scopeSlotAllocSize = |
| (m_func->GetJITFunctionBody()->GetScopeSlotArraySize() + Js::ScopeSlots::FirstSlotIndex) * sizeof(Js::Var); |
| |
| stackSym = StackSym::New(TyMisc, instr->m_func); |
| m_func->StackAllocate(stackSym, scopeSlotAllocSize); |
| |
| InsertLea(currentFrameOpnd, IR::SymOpnd::New(stackSym, TyMachPtr, func), insertInstr); |
| InsertBranch(Js::OpCode::Br, labelDone, insertInstr); |
| |
| insertInstr->InsertBefore(labelNoStackFunc); |
| GenerateRecyclerAlloc(IR::HelperAllocMemForFrameDisplay, allocSize, dstOpnd, insertInstr, true); |
| GenerateRecyclerAlloc(IR::HelperAllocMemForVarArray, scopeSlotAllocSize, currentFrameOpnd, insertInstr, true); |
| |
| insertInstr->InsertBefore(labelDone); |
| InsertMove(IR::SymOpnd::New(m_func->GetLocalFrameDisplaySym(), 0, TyMachReg, m_func), dstOpnd, insertInstr); |
| InsertMove(IR::SymOpnd::New(m_func->GetLocalClosureSym(), 0, TyMachReg, m_func), currentFrameOpnd, insertInstr); |
| } |
| else |
| { |
| GenerateRecyclerAlloc(IR::HelperAllocMemForFrameDisplay, allocSize, dstOpnd, instr); |
| } |
| |
| // Copy contents of environment |
| // Work back to front to leave the head element(s) in cache |
| if (envDepth > 0) |
| { |
| IR::RegOpnd *envOpnd = instr->UnlinkSrc2()->AsRegOpnd(); |
| for (uint16 i = envDepth; i >= 1; i--) |
| { |
| IR::Opnd *scopeOpnd = IR::RegOpnd::New(TyMachReg, func); |
| IR::Opnd *envLoadOpnd = |
| IR::IndirOpnd::New(envOpnd, Js::FrameDisplay::GetOffsetOfScopes() + ((i - 1) * sizeof(Js::Var)), TyMachReg, func); |
| InsertMove(scopeOpnd, envLoadOpnd, instr); |
| |
| IR::Opnd *dstStoreOpnd = |
| IR::IndirOpnd::New(dstOpnd, Js::FrameDisplay::GetOffsetOfScopes() + (i * sizeof(Js::Var)), TyMachReg, func); |
| InsertMove(dstStoreOpnd, scopeOpnd, instr); |
| } |
| } |
| |
| // Assign current element. |
| InsertMove( |
| IR::IndirOpnd::New(dstOpnd, Js::FrameDisplay::GetOffsetOfScopes(), TyMachReg, func), |
| currentFrameOpnd, |
| instr); |
| |
| // Combine tag, strict mode flag, and length |
| uintptr_t bits = 1 | |
| (isStrict << (Js::FrameDisplay::GetOffsetOfStrictMode() * 8)) | |
| (frameDispLength << (Js::FrameDisplay::GetOffsetOfLength() * 8)); |
| InsertMove( |
| IR::IndirOpnd::New(dstOpnd, 0, TyMachReg, func), |
| IR::IntConstOpnd::New(bits, TyMachReg, func, true), |
| instr); |
| |
| instr->Remove(); |
| } |
| |
| IR::AddrOpnd *Lowerer::CreateFunctionBodyOpnd(Func *const func) const |
| { |
| return IR::AddrOpnd::New(func->GetJITFunctionBody()->GetAddr(), IR::AddrOpndKindDynamicFunctionBody, m_func, true); |
| } |
| |
| IR::AddrOpnd *Lowerer::CreateFunctionBodyOpnd(Js::FunctionBody *const functionBody) const |
| { |
| // TODO: OOP JIT, CreateFunctionBodyOpnd |
| Assert(!m_func->IsOOPJIT()); |
| return IR::AddrOpnd::New(functionBody, IR::AddrOpndKindDynamicFunctionBody, m_func, true); |
| } |
| |
| bool |
| Lowerer::GenerateRecyclerOrMarkTempAlloc(IR::Instr * instr, IR::RegOpnd * dstOpnd, IR::JnHelperMethod allocHelper, size_t allocSize, IR::SymOpnd ** tempObjectSymOpnd) |
| { |
| if (instr->dstIsTempObject) |
| { |
| *tempObjectSymOpnd = GenerateMarkTempAlloc(dstOpnd, allocSize, instr); |
| return false; |
| } |
| |
| this->GenerateRecyclerAlloc(allocHelper, allocSize, dstOpnd, instr); |
| *tempObjectSymOpnd = nullptr; |
| return true; |
| } |
| |
| IR::SymOpnd * |
| Lowerer::GenerateMarkTempAlloc(IR::RegOpnd *const dstOpnd, const size_t allocSize, IR::Instr *const insertBeforeInstr) |
| { |
| Assert(dstOpnd); |
| Assert(allocSize != 0); |
| Assert(insertBeforeInstr); |
| |
| Func *const func = insertBeforeInstr->m_func; |
| |
| // Allocate stack space for the reg exp instance, and a slot for the boxed value |
| StackSym *const tempObjectSym = StackSym::New(TyMisc, func); |
| m_func->StackAllocate(tempObjectSym, (int)(allocSize + sizeof(void *))); |
| IR::SymOpnd * tempObjectOpnd = IR::SymOpnd::New(tempObjectSym, sizeof(void *), TyVar, func); |
| InsertLea(dstOpnd, tempObjectOpnd, insertBeforeInstr); |
| |
| // Initialize the boxed instance slot |
| if (this->outerMostLoopLabel == nullptr) |
| { |
| GenerateMemInit(dstOpnd, -(int)sizeof(void *), IR::AddrOpnd::NewNull(func), insertBeforeInstr, false); |
| } |
| else if (!PHASE_OFF(Js::HoistMarkTempInitPhase, this->m_func)) |
| { |
| InsertMove(IR::SymOpnd::New(tempObjectSym, TyMachPtr, func), IR::AddrOpnd::NewNull(func), this->outerMostLoopLabel, false); |
| } |
| return tempObjectOpnd; |
| } |
| |
| void Lowerer::LowerBrFncCachedScopeEq(IR::Instr *instr) |
| { |
| Assert(instr->m_opcode == Js::OpCode::BrFncCachedScopeEq || instr->m_opcode == Js::OpCode::BrFncCachedScopeNeq); |
| Js::OpCode opcode = (instr->m_opcode == Js::OpCode::BrFncCachedScopeEq ? Js::OpCode::BrEq_A : Js::OpCode::BrNeq_A); |
| |
| IR::RegOpnd *src1Reg = instr->UnlinkSrc1()->AsRegOpnd(); |
| |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(src1Reg, Js::ScriptFunction::GetOffsetOfCachedScopeObj(), TyMachReg, this->m_func); |
| this->InsertCompareBranch(indirOpnd, instr->UnlinkSrc2(), opcode, false, instr->AsBranchInstr()->GetTarget(), instr->m_next); |
| |
| instr->Remove(); |
| } |
| |
| IR::Instr* Lowerer::InsertLoweredRegionStartMarker(IR::Instr* instrToInsertBefore) |
| { |
| AssertMsg(instrToInsertBefore->m_prev != nullptr, "Can't insert lowered region start marker as the first instr in the func."); |
| IR::LabelInstr* startMarkerLabel = IR::LabelInstr::New(Js::OpCode::Label, instrToInsertBefore->m_func); |
| instrToInsertBefore->InsertBefore(startMarkerLabel); |
| return startMarkerLabel; |
| } |
| |
| IR::Instr* Lowerer::RemoveLoweredRegionStartMarker(IR::Instr* startMarkerInstr) |
| { |
| AssertMsg(startMarkerInstr->m_prev != nullptr, "Lowered region start marker became the first instruction in the func after lowering?"); |
| IR::Instr* prevInstr = startMarkerInstr->m_prev; |
| startMarkerInstr->Remove(); |
| return prevInstr; |
| } |
| |
| IR::Instr* Lowerer::GetLdSpreadIndicesInstr(IR::Instr *instr) |
| { |
| IR::Opnd *src2 = instr->GetSrc2(); |
| if (!src2->IsSymOpnd()) |
| { |
| return nullptr; |
| } |
| |
| IR::SymOpnd * argLinkOpnd = src2->AsSymOpnd(); |
| StackSym * argLinkSym = argLinkOpnd->m_sym->AsStackSym(); |
| |
| Assert(argLinkSym->IsSingleDef()); |
| |
| return argLinkSym->m_instrDef; |
| } |
| |
| bool Lowerer::IsSpreadCall(IR::Instr *instr) |
| { |
| IR::Instr *lastInstr = GetLdSpreadIndicesInstr(instr); |
| return lastInstr && lastInstr->m_opcode == Js::OpCode::LdSpreadIndices; |
| } |
| |
| // When under debugger, generate a new label to be used as safe place to jump after ignore exception, |
| // insert it after insertAfterInstr, and return the label inserted. |
| // Returns nullptr/NoOP for non-debugger code path. |
| //static |
| IR::LabelInstr* Lowerer::InsertContinueAfterExceptionLabelForDebugger(Func* func, IR::Instr* insertAfterInstr, bool isHelper) |
| { |
| Assert(func); |
| Assert(insertAfterInstr); |
| |
| IR::LabelInstr* continueAfterExLabel = nullptr; |
| if (func->IsJitInDebugMode()) |
| { |
| continueAfterExLabel = IR::LabelInstr::New(Js::OpCode::Label, func, isHelper); |
| insertAfterInstr->InsertAfter(continueAfterExLabel); |
| } |
| return continueAfterExLabel; |
| } |
| |
| void Lowerer::GenerateSingleCharStrJumpTableLookup(IR::Instr * instr) |
| { |
| IR::MultiBranchInstr * multiBrInstr = instr->AsBranchInstr()->AsMultiBrInstr(); |
| Func * func = instr->m_func; |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::LabelInstr * continueLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| // MOV strLengthOpnd, str->length |
| IR::RegOpnd * strLengthOpnd = IR::RegOpnd::New(TyUint32, func); |
| InsertMove(strLengthOpnd, IR::IndirOpnd::New(instr->GetSrc1()->AsRegOpnd(), Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, func), instr); |
| |
| // CMP strLengthOpnd, 1 |
| // JNE defaultLabel |
| IR::LabelInstr * defaultLabelInstr = (IR::LabelInstr *)multiBrInstr->GetBranchJumpTable()->defaultTarget; |
| InsertCompareBranch(strLengthOpnd, IR::IntConstOpnd::New(1, TyUint32, func), Js::OpCode::BrNeq_A, defaultLabelInstr, instr); |
| |
| // MOV strBuffer, str->psz |
| IR::RegOpnd * strBufferOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(strBufferOpnd, IR::IndirOpnd::New(instr->GetSrc1()->AsRegOpnd(), Js::JavascriptString::GetOffsetOfpszValue(), TyMachPtr, func), instr); |
| |
| // TST strBuffer, strBuffer |
| // JNE $continue |
| InsertTestBranch(strBufferOpnd, strBufferOpnd, Js::OpCode::BrNeq_A, continueLabel, instr); |
| |
| // $helper: |
| // PUSH str |
| // CALL JavascriptString::GetSzHelper |
| // MOV strBuffer, eax |
| // $continue: |
| instr->InsertBefore(helperLabel); |
| m_lowererMD.LoadHelperArgument(instr, instr->GetSrc1()); |
| IR::Instr * instrCall = IR::Instr::New(Js::OpCode::Call, strBufferOpnd, IR::HelperCallOpnd::New(IR::HelperString_GetSz, func), func); |
| instr->InsertBefore(instrCall); |
| m_lowererMD.LowerCall(instrCall, 0); |
| instr->InsertBefore(continueLabel); |
| |
| // MOV charOpnd, [strBuffer] |
| IR::RegOpnd * charOpnd = IR::RegOpnd::New(TyUint32, func); |
| InsertMove(charOpnd, IR::IndirOpnd::New(strBufferOpnd, 0, TyUint16, func), instr); |
| |
| if (multiBrInstr->m_baseCaseValue != 0) |
| { |
| // SUB charOpnd, baseIndex |
| InsertSub(false, charOpnd, charOpnd, IR::IntConstOpnd::New(multiBrInstr->m_baseCaseValue, TyUint32, func), instr); |
| } |
| |
| // CMP charOpnd, lastCaseIndex - baseCaseIndex |
| // JA defaultLabel |
| InsertCompareBranch(charOpnd, IR::IntConstOpnd::New(multiBrInstr->m_lastCaseValue - multiBrInstr->m_baseCaseValue, TyUint32, func), |
| Js::OpCode::BrGt_A, true, defaultLabelInstr, instr); |
| |
| instr->UnlinkSrc1(); |
| LowerJumpTableMultiBranch(multiBrInstr, charOpnd); |
| } |
| |
| void Lowerer::GenerateSwitchStringLookup(IR::Instr * instr) |
| { |
| /* Collect information about string length in all the case*/ |
| charcount_t minLength = UINT_MAX; |
| charcount_t maxLength = 0; |
| BVUnit32 bvLength; |
| instr->AsBranchInstr()->AsMultiBrInstr()->GetBranchDictionary()->dictionary.Map([&](JITJavascriptString * str, void *) |
| { |
| charcount_t len = str->GetLength(); |
| minLength = min(minLength, str->GetLength()); |
| maxLength = max(maxLength, str->GetLength()); |
| if (len < 32) |
| { |
| bvLength.Set(len); |
| } |
| }); |
| |
| Func * func = instr->m_func; |
| IR::RegOpnd * strLengthOpnd = IR::RegOpnd::New(TyUint32, func); |
| InsertMove(strLengthOpnd, IR::IndirOpnd::New(instr->GetSrc1()->AsRegOpnd(), Js::JavascriptString::GetOffsetOfcharLength(), TyUint32, func), instr); |
| IR::LabelInstr * defaultLabelInstr = (IR::LabelInstr *)instr->AsBranchInstr()->AsMultiBrInstr()->GetBranchDictionary()->defaultTarget; |
| if (minLength == maxLength) |
| { |
| // Generate single length filter |
| InsertCompareBranch(strLengthOpnd, IR::IntConstOpnd::New(minLength, TyUint32, func), Js::OpCode::BrNeq_A, defaultLabelInstr, instr); |
| } |
| else if (maxLength < 32) |
| { |
| // Generate bit filter |
| |
| // Jump to default label if the bit is not on for the length % 32 |
| IR::IntConstOpnd * lenBitMaskOpnd = IR::IntConstOpnd::New(bvLength.GetWord(), TyUint32, func); |
| InsertBitTestBranch(lenBitMaskOpnd, strLengthOpnd, false, defaultLabelInstr, instr); |
| // Jump to default label if the bit is > 32 |
| InsertTestBranch(strLengthOpnd, IR::IntConstOpnd::New(UINT32_MAX ^ 31, TyUint32, func), Js::OpCode::BrNeq_A, defaultLabelInstr, instr); |
| } |
| else |
| { |
| // CONSIDER: Generate range filter |
| } |
| this->LowerMultiBr(instr, IR::HelperOp_SwitchStringLookUp); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerGetCachedFunc(IR::Instr *instr) |
| { |
| // src1 is an ActivationObjectEx, and we want to get the function object identified by the index (src2) |
| // dst = MOV (src1)->GetFuncCacheEntry(src2)->func |
| // |
| // => [src1 + (offsetof(src1, cache) + (src2 * sizeof(FuncCacheEntry)) + offsetof(FuncCacheEntry, func))] |
| |
| IR::IntConstOpnd *src2Opnd = instr->UnlinkSrc2()->AsIntConstOpnd(); |
| IR::RegOpnd *src1Opnd = instr->UnlinkSrc1()->AsRegOpnd(); |
| IR::Instr *instrPrev = instr->m_prev; |
| |
| instr->SetSrc1(IR::IndirOpnd::New(src1Opnd, int32((src2Opnd->GetValue() * sizeof(Js::FuncCacheEntry)) + Js::ActivationObjectEx::GetOffsetOfCache() + offsetof(Js::FuncCacheEntry, func)), TyVar, this->m_func)); |
| |
| this->m_lowererMD.ChangeToAssign(instr); |
| |
| src2Opnd->Free(this->m_func); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCommitScope(IR::Instr *instrCommit) |
| { |
| IR::Instr *instrPrev = instrCommit->m_prev; |
| IR::RegOpnd *baseOpnd = instrCommit->UnlinkSrc1()->AsRegOpnd(); |
| IR::Opnd *opnd; |
| IR::Instr * insertInstr = instrCommit->m_next; |
| |
| // Write undef to all the local var slots. |
| |
| opnd = IR::IndirOpnd::New(baseOpnd, Js::ActivationObjectEx::GetOffsetOfCommitFlag(), TyInt8, this->m_func); |
| instrCommit->SetDst(opnd); |
| instrCommit->SetSrc1(IR::IntConstOpnd::New(1, TyInt8, this->m_func)); |
| LowererMD::ChangeToAssign(instrCommit); |
| |
| const Js::PropertyIdArray *propIds = instrCommit->m_func->GetJITFunctionBody()->GetFormalsPropIdArray(); |
| |
| uint firstVarSlot = (uint)Js::ActivationObjectEx::GetFirstVarSlot(propIds); |
| if (firstVarSlot < propIds->count) |
| { |
| // Instead of re-using the address of "undefined" for each store, put the address in a register and re-use that. |
| IR::RegOpnd *undefOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| InsertMove(undefOpnd, LoadLibraryValueOpnd(insertInstr, LibraryValue::ValueUndefined), insertInstr); |
| |
| IR::RegOpnd *slotBaseOpnd = IR::RegOpnd::New(TyMachReg, this->m_func); |
| |
| // Load a pointer to the aux slots. We assume that all ActivationObject's have only aux slots. |
| |
| opnd = IR::IndirOpnd::New(baseOpnd, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| InsertMove(slotBaseOpnd, opnd, insertInstr); |
| |
| for (uint i = firstVarSlot; i < propIds->count; i++) |
| { |
| opnd = IR::IndirOpnd::New(slotBaseOpnd, i << this->m_lowererMD.GetDefaultIndirScale(), TyMachReg, this->m_func); |
| InsertMove(opnd, undefOpnd, insertInstr); |
| } |
| } |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerTry(IR::Instr* instr, bool tryCatch) |
| { |
| if (this->m_func->hasBailout) |
| { |
| this->EnsureBailoutReturnValueSym(); |
| } |
| this->EnsureHasBailedOutSym(); |
| IR::SymOpnd * hasBailedOutOpnd = IR::SymOpnd::New(this->m_func->m_hasBailedOutSym, TyUint32, this->m_func); |
| IR::Instr * setInstr = IR::Instr::New(LowererMD::GetStoreOp(TyUint32), hasBailedOutOpnd, IR::IntConstOpnd::New(0, TyUint32, this->m_func), this->m_func); |
| instr->InsertBefore(setInstr); |
| LowererMD::Legalize(setInstr); |
| |
| return m_lowererMD.LowerTry(instr, tryCatch ? IR::HelperOp_TryCatch : ((this->m_func->DoOptimizeTry() || (this->m_func->IsSimpleJit() && this->m_func->hasBailout))? IR::HelperOp_TryFinally : IR::HelperOp_TryFinallyNoOpt)); |
| } |
| |
| IR::Instr * |
| Lowerer::LowerCatch(IR::Instr * instr) |
| { |
| // t1 = catch => t2 = catch |
| // => t1 = t2 |
| |
| IR::Opnd *catchObj = instr->UnlinkDst(); |
| IR::RegOpnd *catchParamReg = IR::RegOpnd::New(TyMachPtr, this->m_func); |
| catchParamReg->SetReg(CATCH_OBJ_REG); |
| |
| instr->SetDst(catchParamReg); |
| |
| IR::Instr * mov = IR::Instr::New(Js::OpCode::Ld_A, catchObj, catchParamReg, this->m_func); |
| this->m_lowererMD.ChangeToAssign(mov); |
| instr->InsertAfter(mov); |
| |
| return instr->m_prev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLeave(IR::Instr * leaveInstr, IR::LabelInstr * targetInstr, bool fromFinalLower, bool isOrphanedLeave) |
| { |
| if (isOrphanedLeave) |
| { |
| Assert(this->m_func->IsLoopBodyInTry()); |
| leaveInstr->m_opcode = LowererMD::MDUncondBranchOpcode; |
| return leaveInstr->m_prev; |
| } |
| |
| IR::Instr * instrPrev = leaveInstr->m_prev; |
| IR::LabelOpnd *labelOpnd = IR::LabelOpnd::New(targetInstr, this->m_func); |
| m_lowererMD.LowerEHRegionReturn(leaveInstr, labelOpnd); |
| |
| if (fromFinalLower) |
| { |
| instrPrev = leaveInstr->m_prev; |
| } |
| leaveInstr->Remove(); |
| return instrPrev; |
| } |
| |
| void |
| Lowerer::EnsureBailoutReturnValueSym() |
| { |
| if (this->m_func->m_bailoutReturnValueSym == nullptr) |
| { |
| this->m_func->m_bailoutReturnValueSym = StackSym::New(TyVar, this->m_func); |
| this->m_func->StackAllocate(this->m_func->m_bailoutReturnValueSym, sizeof(Js::Var)); |
| } |
| } |
| |
| void |
| Lowerer::EnsureHasBailedOutSym() |
| { |
| if (this->m_func->m_hasBailedOutSym == nullptr) |
| { |
| this->m_func->m_hasBailedOutSym = StackSym::New(TyUint32, this->m_func); |
| this->m_func->StackAllocate(this->m_func->m_hasBailedOutSym, MachRegInt); |
| } |
| } |
| |
| void |
| Lowerer::InsertReturnThunkForRegion(Region* region, IR::LabelInstr* restoreLabel) |
| { |
| Assert(this->m_func->isPostLayout); |
| Assert(region->GetType() == RegionTypeTry || region->GetType() == RegionTypeCatch || region->GetType() == RegionTypeFinally); |
| |
| if (!region->returnThunkEmitted) |
| { |
| this->m_func->m_exitInstr->InsertAfter(region->GetBailoutReturnThunkLabel()); |
| |
| bool newLastInstrInserted = false; |
| IR::Instr * insertBeforeInstr = region->GetBailoutReturnThunkLabel()->m_next; |
| if (insertBeforeInstr == nullptr) |
| { |
| Assert(this->m_func->m_exitInstr == this->m_func->m_tailInstr); |
| insertBeforeInstr = IR::Instr::New(Js::OpCode::Nop, this->m_func); |
| newLastInstrInserted = true; |
| region->GetBailoutReturnThunkLabel()->InsertAfter(insertBeforeInstr); |
| this->m_func->m_tailInstr = insertBeforeInstr; |
| } |
| |
| IR::LabelOpnd * continuationAddr; |
| // We insert return thunk to the region's parent return thunk label |
| // For non exception finallys, we do not need a return thunk |
| // Because, we are not calling none xception finallys from within amd64_callWithFakeFrame |
| // But a non exception finally maybe within other eh regions that need a return thunk |
| if (region->IsNonExceptingFinally()) |
| { |
| Assert(region->GetParent()->GetType() != RegionTypeRoot); |
| Region *ancestor = region->GetParent()->GetFirstAncestorOfNonExceptingFinallyParent(); |
| Assert(ancestor && !ancestor->IsNonExceptingFinally()); |
| if (ancestor->GetType() != RegionTypeRoot) |
| { |
| continuationAddr = IR::LabelOpnd::New(ancestor->GetBailoutReturnThunkLabel(), this->m_func); |
| } |
| else |
| { |
| continuationAddr = IR::LabelOpnd::New(restoreLabel, this->m_func); |
| } |
| } |
| else if (region->GetParent()->IsNonExceptingFinally()) |
| { |
| Region *ancestor = region->GetFirstAncestorOfNonExceptingFinally(); |
| if (ancestor && ancestor->GetType() != RegionTypeRoot) |
| { |
| continuationAddr = IR::LabelOpnd::New(ancestor->GetBailoutReturnThunkLabel(), this->m_func); |
| } |
| else |
| { |
| continuationAddr = IR::LabelOpnd::New(restoreLabel, this->m_func); |
| } |
| } |
| else if (region->GetParent()->GetType() != RegionTypeRoot) |
| { |
| continuationAddr = IR::LabelOpnd::New(region->GetParent()->GetBailoutReturnThunkLabel(), this->m_func); |
| } |
| else |
| { |
| continuationAddr = IR::LabelOpnd::New(restoreLabel, this->m_func); |
| } |
| |
| IR::Instr * lastInstr = m_lowererMD.LowerEHRegionReturn(insertBeforeInstr, continuationAddr); |
| if (newLastInstrInserted) |
| { |
| Assert(this->m_func->m_tailInstr == insertBeforeInstr); |
| insertBeforeInstr->Remove(); |
| this->m_func->m_tailInstr = lastInstr; |
| } |
| |
| region->returnThunkEmitted = true; |
| } |
| } |
| |
| void |
| Lowerer::SetHasBailedOut(IR::Instr * bailoutInstr) |
| { |
| Assert(this->m_func->isPostLayout); |
| IR::SymOpnd * hasBailedOutOpnd = IR::SymOpnd::New(this->m_func->m_hasBailedOutSym, TyUint32, this->m_func); |
| IR::Instr * setInstr = IR::Instr::New(LowererMD::GetStoreOp(TyUint32), hasBailedOutOpnd, IR::IntConstOpnd::New(1, TyUint32, this->m_func), this->m_func); |
| bailoutInstr->InsertBefore(setInstr); |
| LowererMD::Legalize(setInstr); |
| } |
| |
| IR::Instr* |
| Lowerer::EmitEHBailoutStackRestore(IR::Instr * bailoutInstr) |
| { |
| Assert(this->m_func->isPostLayout); |
| |
| #ifdef _M_IX86 |
| BailOutInfo * bailoutInfo = bailoutInstr->GetBailOutInfo(); |
| uint totalLiveArgCount = 0; |
| if (bailoutInfo->startCallCount != 0) |
| { |
| uint totalStackToBeRestored = 0; |
| uint stackAlignmentAdjustment = 0; |
| for (uint i = 0; i < bailoutInfo->startCallCount; i++) |
| { |
| uint startCallLiveArgCount = bailoutInfo->startCallInfo[i].isOrphanedCall ? 0 : bailoutInfo->GetStartCallOutParamCount(i); |
| if ((Math::Align<int32>(startCallLiveArgCount * MachPtr, MachStackAlignment) - (startCallLiveArgCount * MachPtr)) != 0) |
| { |
| stackAlignmentAdjustment++; |
| } |
| totalLiveArgCount += startCallLiveArgCount; |
| } |
| totalStackToBeRestored = (totalLiveArgCount + stackAlignmentAdjustment) * MachPtr; |
| |
| IR::RegOpnd * espOpnd = IR::RegOpnd::New(NULL, LowererMD::GetRegStackPointer(), TyMachReg, this->m_func); |
| IR::Opnd * opnd = IR::IndirOpnd::New(espOpnd, totalStackToBeRestored, TyMachReg, this->m_func); |
| IR::Instr * stackRestoreInstr = IR::Instr::New(Js::OpCode::LEA, espOpnd, opnd, this->m_func); |
| |
| bailoutInstr->InsertAfter(stackRestoreInstr); |
| return stackRestoreInstr; |
| } |
| #endif |
| |
| return bailoutInstr; |
| } |
| |
| void |
| Lowerer::EmitSaveEHBailoutReturnValueAndJumpToRetThunk(IR::Instr * insertAfterInstr) |
| { |
| Assert(this->m_func->isPostLayout); |
| // After the CALL SaveAllRegistersAndBailout instruction, emit |
| // |
| // MOV bailoutReturnValueSym, eax |
| // JMP $currentRegion->bailoutReturnThunkLabel |
| |
| IR::SymOpnd * bailoutReturnValueSymOpnd = IR::SymOpnd::New(this->m_func->m_bailoutReturnValueSym, TyVar, this->m_func); |
| IR::RegOpnd *eaxOpnd = IR::RegOpnd::New(NULL, LowererMD::GetRegReturn(TyMachReg), TyMachReg, this->m_func); |
| IR::Instr * movInstr = IR::Instr::New(LowererMD::GetStoreOp(TyVar), bailoutReturnValueSymOpnd, eaxOpnd, this->m_func); |
| insertAfterInstr->InsertAfter(movInstr); |
| LowererMD::Legalize(movInstr); |
| |
| IR::BranchInstr * jumpInstr = IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, this->currentRegion->GetBailoutReturnThunkLabel(), this->m_func); |
| movInstr->InsertAfter(jumpInstr); |
| } |
| |
| void |
| Lowerer::EmitRestoreReturnValueFromEHBailout(IR::LabelInstr * restoreLabel, IR::LabelInstr * epilogLabel) |
| { |
| Assert(this->m_func->isPostLayout); |
| // JMP $epilog |
| // $restore: |
| // MOV eax, bailoutReturnValueSym |
| // $epilog: |
| |
| IR::SymOpnd * bailoutReturnValueSymOpnd = IR::SymOpnd::New(this->m_func->m_bailoutReturnValueSym, TyVar, this->m_func); |
| IR::RegOpnd * eaxOpnd = IR::RegOpnd::New(NULL, LowererMD::GetRegReturn(TyMachReg), TyMachReg, this->m_func); |
| |
| IR::Instr * movInstr = IR::Instr::New(LowererMD::GetLoadOp(TyVar), eaxOpnd, bailoutReturnValueSymOpnd, this->m_func); |
| |
| epilogLabel->InsertBefore(restoreLabel); |
| epilogLabel->InsertBefore(movInstr); |
| LowererMD::Legalize(movInstr); |
| restoreLabel->InsertBefore(IR::BranchInstr::New(LowererMD::MDUncondBranchOpcode, epilogLabel, this->m_func)); |
| } |
| |
| void |
| Lowerer::InsertBitTestBranch(IR::Opnd * bitMaskOpnd, IR::Opnd * bitIndex, bool jumpIfBitOn, IR::LabelInstr * targetLabel, IR::Instr * insertBeforeInstr) |
| { |
| #if defined(_M_IX86) || defined(_M_AMD64) |
| // Generate bit test and branch |
| // BT bitMaskOpnd, bitIndex |
| // JB/JAE targetLabel |
| Func * func = this->m_func; |
| IR::Instr * instr = IR::Instr::New(Js::OpCode::BT, func); |
| instr->SetSrc1(bitMaskOpnd); |
| instr->SetSrc2(bitIndex); |
| insertBeforeInstr->InsertBefore(instr); |
| |
| if (!(bitMaskOpnd->IsRegOpnd() || bitMaskOpnd->IsIndirOpnd() || bitMaskOpnd->IsMemRefOpnd())) |
| { |
| instr->HoistSrc1(Js::OpCode::MOV); |
| } |
| |
| InsertBranch(jumpIfBitOn ? Js::OpCode::JB : Js::OpCode::JAE, targetLabel, insertBeforeInstr); |
| #elif defined(_M_ARM) |
| // ARM don't have bit test instruction, so just generated |
| // MOV r1, 1 |
| // SHL r1, bitIndex |
| // TEST bitMaskOpnd, r1 |
| // BEQ/BNEQ targetLabel |
| Func * func = this->m_func; |
| IR::RegOpnd * lenBitOpnd = IR::RegOpnd::New(TyUint32, func); |
| InsertMove(lenBitOpnd, IR::IntConstOpnd::New(1, TyUint32, this->m_func), insertBeforeInstr); |
| InsertShift(Js::OpCode::Shl_I4, false, lenBitOpnd, lenBitOpnd, bitIndex, insertBeforeInstr); |
| InsertTestBranch(lenBitOpnd, bitMaskOpnd, jumpIfBitOn ? Js::OpCode::BrNeq_A : Js::OpCode::BrEq_A, targetLabel, insertBeforeInstr); |
| #elif defined(_M_ARM64) |
| |
| if (bitIndex->IsImmediateOpnd()) |
| { |
| // TBZ/TBNZ bitMaskOpnd, bitIndex, targetLabel |
| IR::Instr* branchInstr = InsertBranch(jumpIfBitOn ? Js::OpCode::TBNZ : Js::OpCode::TBZ, targetLabel, insertBeforeInstr); |
| branchInstr->SetSrc1(bitMaskOpnd); |
| branchInstr->SetSrc2(bitIndex); |
| } |
| else |
| { |
| // TBZ/TBNZ require an immediate for the bit to test, so shift the mask to place the bit we want to test at bit zero, and then test bit zero. |
| Func * func = this->m_func; |
| IR::RegOpnd * maskOpnd = IR::RegOpnd::New(TyUint32, func); |
| InsertShift(Js::OpCode::Shr_I4, false, maskOpnd, bitMaskOpnd, bitIndex, insertBeforeInstr); |
| |
| IR::Instr* branchInstr = InsertBranch(jumpIfBitOn ? Js::OpCode::TBNZ : Js::OpCode::TBZ, targetLabel, insertBeforeInstr); |
| branchInstr->SetSrc1(maskOpnd); |
| branchInstr->SetSrc2(IR::IntConstOpnd::New(0, TyUint32, this->m_func)); |
| } |
| |
| |
| #else |
| AssertMsg(false, "Not implemented"); |
| #endif |
| } |
| |
| // |
| // Generates an object test and then a string test with the static string type |
| // |
| void |
| Lowerer::GenerateStringTest(IR::RegOpnd *srcReg, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, IR::LabelInstr * continueLabel, bool generateObjectCheck) |
| { |
| Assert(srcReg); |
| if (!srcReg->GetValueType().IsString()) |
| { |
| if (generateObjectCheck && !srcReg->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(srcReg, insertInstr, labelHelper); |
| } |
| |
| // CMP [regSrcStr + offset(type)] , static string type -- check base string type |
| // BrEq/BrNeq labelHelper. |
| IR::IndirOpnd * src1 = IR::IndirOpnd::New(srcReg, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func); |
| IR::Opnd * src2 = this->LoadLibraryValueOpnd(insertInstr, LibraryValue::ValueStringTypeStatic); |
| IR::BranchInstr* branchInstr = nullptr; |
| if (continueLabel) |
| { |
| branchInstr = InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, continueLabel, insertInstr); |
| } |
| else |
| { |
| branchInstr = InsertCompareBranch(src1, src2, Js::OpCode::BrNeq_A, labelHelper, insertInstr); |
| } |
| InsertObjectPoison(srcReg, branchInstr, insertInstr, false); |
| } |
| } |
| |
| // |
| // Generates an object test and then a symbol test with the static symbol type |
| // |
| void |
| Lowerer::GenerateSymbolTest(IR::RegOpnd *srcReg, IR::Instr *insertInstr, IR::LabelInstr *labelHelper, IR::LabelInstr * continueLabel, bool generateObjectCheck) |
| { |
| Assert(srcReg); |
| if (!srcReg->GetValueType().IsSymbol()) |
| { |
| if (generateObjectCheck && !srcReg->IsNotTaggedValue()) |
| { |
| this->m_lowererMD.GenerateObjectTest(srcReg, insertInstr, labelHelper); |
| } |
| |
| // CMP [regSrcStr + offset(type)] , static symbol type -- check base symbol type |
| // BrEq/BrNeq labelHelper. |
| IR::IndirOpnd * src1 = IR::IndirOpnd::New(srcReg, Js::RecyclableObject::GetOffsetOfType(), TyMachReg, m_func); |
| IR::Opnd * src2 = this->LoadLibraryValueOpnd(insertInstr, LibraryValue::ValueSymbolTypeStatic); |
| if (continueLabel) |
| { |
| InsertCompareBranch(src1, src2, Js::OpCode::BrEq_A, continueLabel, insertInstr); |
| } |
| else |
| { |
| InsertCompareBranch(src1, src2, Js::OpCode::BrNeq_A, labelHelper, insertInstr); |
| } |
| } |
| } |
| |
| void |
| Lowerer::LowerConvNum(IR::Instr *instrLoad, bool noMathFastPath) |
| { |
| if (PHASE_OFF(Js::OtherFastPathPhase, this->m_func) || noMathFastPath || !instrLoad->GetSrc1()->IsRegOpnd()) |
| { |
| this->LowerUnaryHelperMemWithTemp2(instrLoad, IR_HELPER_OP_FULL_OR_INPLACE(ConvNumber)); |
| return; |
| } |
| |
| // MOV dst, src1 |
| // TEST src1, 1 |
| // JNE $done |
| // call ToNumber |
| //$done: |
| |
| bool isInt = false; |
| bool isNotInt = false; |
| IR::RegOpnd *src1 = instrLoad->GetSrc1()->AsRegOpnd(); |
| IR::LabelInstr *labelDone = NULL; |
| IR::Instr *instr; |
| |
| if (src1->IsTaggedInt()) |
| { |
| isInt = true; |
| } |
| else if (src1->IsNotInt()) |
| { |
| isNotInt = true; |
| } |
| if (!isNotInt) |
| { |
| // MOV dst, src1 |
| |
| instr = Lowerer::InsertMove(instrLoad->GetDst(), src1, instrLoad); |
| |
| if (!isInt) |
| { |
| labelDone = IR::LabelInstr::New(Js::OpCode::Label, this->m_func); |
| bool didTest = m_lowererMD.GenerateObjectTest(src1, instrLoad, labelDone); |
| |
| if (didTest) |
| { |
| // This label is needed only to mark the helper block |
| IR::LabelInstr * labelHelper = IR::LabelInstr::New(Js::OpCode::Label, this->m_func, true); |
| instrLoad->InsertBefore(labelHelper); |
| } |
| } |
| } |
| |
| if (!isInt) |
| { |
| if (labelDone) |
| { |
| instrLoad->InsertAfter(labelDone); |
| } |
| this->LowerUnaryHelperMemWithTemp2(instrLoad, IR_HELPER_OP_FULL_OR_INPLACE(ConvNumber)); |
| } |
| else |
| { |
| instrLoad->Remove(); |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadSlotArrayWithCachedLocalType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd) |
| { |
| IR::RegOpnd *opndBase = propertySymOpnd->CreatePropertyOwnerOpnd(m_func); |
| if (propertySymOpnd->UsesAuxSlot()) |
| { |
| // If we use the auxiliary slot array, load it and return it |
| IR::RegOpnd * opndSlotArray; |
| if (propertySymOpnd->IsAuxSlotPtrSymAvailable() || propertySymOpnd->ProducesAuxSlotPtr()) |
| { |
| // We want to reload and/or reuse the shared aux slot ptr sym |
| StackSym * auxSlotPtrSym = propertySymOpnd->GetAuxSlotPtrSym(); |
| Assert(auxSlotPtrSym != nullptr); |
| |
| opndSlotArray = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, this->m_func); |
| opndSlotArray->SetIsJITOptimizedReg(true); |
| if (!propertySymOpnd->ProducesAuxSlotPtr()) |
| { |
| // No need to reload |
| return opndSlotArray; |
| } |
| } |
| else |
| { |
| opndSlotArray = IR::RegOpnd::New(TyMachReg, this->m_func); |
| } |
| IR::Opnd *opndIndir = IR::IndirOpnd::New(opndBase, Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func); |
| Lowerer::InsertMove(opndSlotArray, opndIndir, instrInsert); |
| |
| return opndSlotArray; |
| } |
| else |
| { |
| // If we use inline slot return the address to the object header |
| return opndBase; |
| } |
| } |
| |
| IR::Opnd * |
| Lowerer::LoadSlotArrayWithCachedProtoType(IR::Instr * instrInsert, IR::PropertySymOpnd *propertySymOpnd) |
| { |
| // Get the prototype object from the cache |
| intptr_t prototypeObject = propertySymOpnd->GetProtoObject(); |
| Assert(prototypeObject != 0); |
| |
| if (propertySymOpnd->UsesAuxSlot()) |
| { |
| // If we use the auxiliary slot array, load it from the prototype object and return it |
| IR::RegOpnd *opndSlotArray = IR::RegOpnd::New(TyMachReg, this->m_func); |
| IR::Opnd *opnd = IR::MemRefOpnd::New((char*)prototypeObject + Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, this->m_func, IR::AddrOpndKindDynamicAuxSlotArrayRef); |
| Lowerer::InsertMove(opndSlotArray, opnd, instrInsert); |
| return opndSlotArray; |
| } |
| else |
| { |
| // If we use inline slot return the address of the prototype object |
| return IR::MemRefOpnd::New(prototypeObject, TyMachReg, this->m_func); |
| } |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdAsmJsEnv(IR::Instr * instr) |
| { |
| Assert(m_func->GetJITFunctionBody()->IsAsmJsMode()); |
| IR::Opnd * functionObjOpnd; |
| IR::Instr * instrPrev = this->m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| Assert(!instr->GetSrc1()); |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(functionObjOpnd->AsRegOpnd(), Js::AsmJsScriptFunction::GetOffsetOfModuleMemory(), TyMachPtr, m_func); |
| instr->SetSrc1(indirOpnd); |
| LowererMD::ChangeToAssign(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdNativeCodeData(IR::Instr * instr) |
| { |
| Assert(!instr->GetSrc1()); |
| Assert(m_func->IsTopFunc()); |
| IR::Instr * instrPrev = instr->m_prev; |
| instr->SetSrc1(IR::MemRefOpnd::New((void*)m_func->GetWorkItem()->GetWorkItemData()->nativeDataAddr, TyMachPtr, m_func, IR::AddrOpndKindDynamicNativeCodeDataRef)); |
| |
| LowererMD::ChangeToAssign(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdEnv(IR::Instr * instr) |
| { |
| IR::Opnd * src1 = instr->GetSrc1(); |
| IR::Opnd * functionObjOpnd; |
| IR::Instr * instrPrev = this->m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| Assert(!instr->GetSrc1()); |
| if (src1 == nullptr || functionObjOpnd->IsRegOpnd()) |
| { |
| IR::IndirOpnd *indirOpnd = IR::IndirOpnd::New(functionObjOpnd->AsRegOpnd(), |
| Js::ScriptFunction::GetOffsetOfEnvironment(), TyMachPtr, m_func); |
| instr->SetSrc1(indirOpnd); |
| } |
| else |
| { |
| Assert(functionObjOpnd->IsAddrOpnd()); |
| IR::AddrOpnd* functionObjAddrOpnd = functionObjOpnd->AsAddrOpnd(); |
| IR::MemRefOpnd* functionEnvMemRefOpnd = IR::MemRefOpnd::New((void *)((intptr_t)functionObjAddrOpnd->m_address + Js::ScriptFunction::GetOffsetOfEnvironment()), |
| TyMachPtr, this->m_func, IR::AddrOpndKindDynamicFunctionEnvironmentRef); |
| instr->SetSrc1(functionEnvMemRefOpnd); |
| } |
| |
| LowererMD::ChangeToAssign(instr); |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerLdSuper(IR::Instr *instr, IR::JnHelperMethod helperOpCode) |
| { |
| IR::Opnd * functionObjOpnd; |
| IR::Instr * instrPrev = m_lowererMD.LoadFunctionObjectOpnd(instr, functionObjOpnd); |
| |
| LoadScriptContext(instr); |
| m_lowererMD.LoadHelperArgument(instr, functionObjOpnd); |
| m_lowererMD.ChangeToHelperCall(instr, helperOpCode); |
| |
| return instrPrev; |
| } |
| |
| |
| IR::Instr * |
| Lowerer::LowerFrameDisplayCheck(IR::Instr * instr) |
| { |
| IR::Instr *instrPrev = instr->m_prev; |
| IR::Instr *insertInstr = instr->m_next; |
| IR::AddrOpnd *addrOpnd = instr->UnlinkSrc2()->AsAddrOpnd(); |
| FrameDisplayCheckRecord *record = (FrameDisplayCheckRecord*)addrOpnd->m_address; |
| |
| IR::LabelInstr *errorLabel = nullptr; |
| IR::LabelInstr *continueLabel = nullptr; |
| IR::RegOpnd *envOpnd = instr->GetDst()->AsRegOpnd(); |
| uint32 frameDisplayOffset = Js::FrameDisplay::GetOffsetOfScopes()/sizeof(Js::Var); |
| |
| if (record->slotId != (uint32)-1 && record->slotId > frameDisplayOffset) |
| { |
| // Check that the frame display has enough scopes in it to satisfy the code. |
| errorLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| continueLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(envOpnd, |
| Js::FrameDisplay::GetOffsetOfLength(), |
| TyUint16, m_func, true); |
| |
| IR::IntConstOpnd *slotIdOpnd = IR::IntConstOpnd::New(record->slotId - frameDisplayOffset, TyUint16, m_func); |
| InsertCompareBranch(indirOpnd, slotIdOpnd, Js::OpCode::BrLe_A, true, errorLabel, insertInstr); |
| } |
| |
| if (record->table) |
| { |
| // Check the size of each of the slot arrays in the scope chain. |
| FOREACH_HASHTABLE_ENTRY(uint32, bucket, record->table) |
| { |
| uint32 slotId = bucket.element; |
| if (slotId != (uint32)-1 && slotId > Js::ScopeSlots::FirstSlotIndex) |
| { |
| if (errorLabel == nullptr) |
| { |
| errorLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| continueLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| } |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(envOpnd, |
| bucket.value * sizeof(Js::Var), |
| TyVar, m_func, true); |
| IR::RegOpnd * slotArrayOpnd = IR::RegOpnd::New(TyVar, m_func); |
| InsertMove(slotArrayOpnd, indirOpnd, insertInstr); |
| |
| indirOpnd = IR::IndirOpnd::New(slotArrayOpnd, |
| Js::ScopeSlots::EncodedSlotCountSlotIndex * sizeof(Js::Var), |
| TyVar, m_func, true); |
| |
| IR::IntConstOpnd * slotIdOpnd = IR::IntConstOpnd::New(slotId - Js::ScopeSlots::FirstSlotIndex, |
| TyUint32, m_func); |
| InsertCompareBranch(indirOpnd, slotIdOpnd, Js::OpCode::BrLe_A, true, errorLabel, insertInstr); |
| } |
| } |
| NEXT_HASHTABLE_ENTRY; |
| } |
| |
| if (errorLabel) |
| { |
| InsertBranch(Js::OpCode::Br, continueLabel, insertInstr); |
| |
| insertInstr->InsertBefore(errorLabel); |
| IR::Instr * instrHelper = IR::Instr::New(Js::OpCode::Call, m_func); |
| insertInstr->InsertBefore(instrHelper); |
| m_lowererMD.ChangeToHelperCall(instrHelper, IR::HelperOp_FatalInternalError); |
| insertInstr->InsertBefore(continueLabel); |
| } |
| |
| m_lowererMD.ChangeToAssign(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr * |
| Lowerer::LowerSlotArrayCheck(IR::Instr * instr) |
| { |
| IR::Instr *instrPrev = instr->m_prev; |
| IR::Instr *insertInstr = instr->m_next; |
| |
| IR::RegOpnd *slotArrayOpnd = instr->GetDst()->AsRegOpnd(); |
| StackSym *stackSym = slotArrayOpnd->m_sym; |
| |
| IR::IntConstOpnd *slotIdOpnd = instr->UnlinkSrc2()->AsIntConstOpnd(); |
| uint32 slotId = (uint32)slotIdOpnd->GetValue(); |
| Assert(slotId != (uint32)-1 && slotId >= Js::ScopeSlots::FirstSlotIndex); |
| |
| if (slotId > Js::ScopeSlots::FirstSlotIndex) |
| { |
| if (m_func->DoStackFrameDisplay() && stackSym->m_id == m_func->GetLocalClosureSym()->m_id) |
| { |
| // The pointer we loaded points to the reserved/known address where the slot array can be boxed. |
| // Deref to get the real value. |
| IR::IndirOpnd * srcOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(stackSym, TyVar, m_func), 0, TyVar, m_func); |
| IR::RegOpnd * dstOpnd = IR::RegOpnd::New(TyVar, m_func); |
| InsertMove(dstOpnd, srcOpnd, insertInstr); |
| stackSym = dstOpnd->m_sym; |
| } |
| |
| IR::LabelInstr *errorLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func, true); |
| IR::LabelInstr *continueLabel = IR::LabelInstr::New(Js::OpCode::Label, m_func); |
| |
| IR::IndirOpnd * indirOpnd = IR::IndirOpnd::New(IR::RegOpnd::New(stackSym, TyVar, m_func), |
| Js::ScopeSlots::EncodedSlotCountSlotIndex * sizeof(Js::Var), |
| TyVar, m_func, true); |
| |
| slotIdOpnd->SetValue(slotId - Js::ScopeSlots::FirstSlotIndex); |
| InsertCompareBranch(indirOpnd, slotIdOpnd, Js::OpCode::BrGt_A, true, continueLabel, insertInstr); |
| |
| insertInstr->InsertBefore(errorLabel); |
| IR::Instr * instrHelper = IR::Instr::New(Js::OpCode::Call, m_func); |
| insertInstr->InsertBefore(instrHelper); |
| m_lowererMD.ChangeToHelperCall(instrHelper, IR::HelperOp_FatalInternalError); |
| insertInstr->InsertBefore(continueLabel); |
| } |
| |
| m_lowererMD.ChangeToAssign(instr); |
| |
| return instrPrev; |
| } |
| |
| IR::RegOpnd * |
| Lowerer::LoadIndexFromLikelyFloat( |
| IR::RegOpnd *indexOpnd, |
| const bool skipNegativeCheck, |
| IR::LabelInstr *const notIntLabel, |
| IR::LabelInstr *const negativeLabel, |
| IR::Instr *const insertBeforeInstr) |
| { |
| #ifdef _M_IX86 |
| // We should only generate this if sse2 is available |
| Assert(AutoSystemInfo::Data.SSE2Available()); |
| #endif |
| |
| Func *func = insertBeforeInstr->m_func; |
| |
| IR::LabelInstr * fallThrough = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| IR::RegOpnd *int32IndexOpnd = nullptr; |
| // If we know for sure that it's not an int, do not check to see if it's a tagged int |
| if (indexOpnd->IsNotInt()) |
| { |
| int32IndexOpnd = IR::RegOpnd::New(TyInt32, func); |
| } |
| else |
| { |
| IR::LabelInstr * convertToUint = IR::LabelInstr::New(Js::OpCode::Label, func); |
| // First generate test for tagged int even though profile data says likely float. Indices are usually int and we need a fast path before we try to convert float to int |
| |
| // mov intIndex, index |
| // sar intIndex, 1 |
| // jae convertToInt |
| int32IndexOpnd = GenerateUntagVar(indexOpnd, convertToUint, insertBeforeInstr, !indexOpnd->IsTaggedInt()); |
| |
| if (!skipNegativeCheck) |
| { |
| // test index, index |
| // js $notTaggedIntOrNegative |
| InsertTestBranch(int32IndexOpnd, int32IndexOpnd, LowererMD::MDCompareWithZeroBranchOpcode(Js::OpCode::BrLt_A), negativeLabel, insertBeforeInstr); |
| } |
| InsertBranch(Js::OpCode::Br, fallThrough, insertBeforeInstr); |
| |
| insertBeforeInstr->InsertBefore(convertToUint); |
| } |
| |
| // try to convert float to int in a fast path |
| #if FLOATVAR |
| IR::RegOpnd* floatIndexOpnd = m_lowererMD.CheckFloatAndUntag(indexOpnd, insertBeforeInstr, notIntLabel); |
| #else |
| m_lowererMD.GenerateFloatTest(indexOpnd, insertBeforeInstr, notIntLabel); |
| IR::IndirOpnd * floatIndexOpnd = IR::IndirOpnd::New(indexOpnd, Js::JavascriptNumber::GetValueOffset(), TyMachDouble, this->m_func); |
| #endif |
| |
| IR::LabelInstr * doneConvUint32 = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::LabelInstr * helperConvUint32 = IR::LabelInstr::New(Js::OpCode::Label, func, true /*helper*/); |
| m_lowererMD.ConvertFloatToInt32(int32IndexOpnd, floatIndexOpnd, helperConvUint32, doneConvUint32, insertBeforeInstr); |
| |
| // helper path |
| insertBeforeInstr->InsertBefore(helperConvUint32); |
| m_lowererMD.LoadDoubleHelperArgument(insertBeforeInstr, floatIndexOpnd); |
| |
| IR::Instr * helperCall = IR::Instr::New(Js::OpCode::Call, int32IndexOpnd, this->m_func); |
| insertBeforeInstr->InsertBefore(helperCall); |
| |
| #if DBG |
| // This call to Conv_ToUint32Core wont be reentrant as we would only call it for floats |
| this->ClearAndSaveImplicitCallCheckOnHelperCallCheckState(); |
| #endif |
| m_lowererMD.ChangeToHelperCall(helperCall, IR::HelperConv_ToUInt32Core); |
| #if DBG |
| this->RestoreImplicitCallCheckOnHelperCallCheckState(); |
| #endif |
| |
| // main path |
| insertBeforeInstr->InsertBefore(doneConvUint32); |
| |
| //Convert uint32 to back to float for comparison that conversion was indeed successful |
| IR::RegOpnd *floatOpndFromUint32 = IR::RegOpnd::New(TyFloat64, func); |
| m_lowererMD.EmitUIntToFloat(floatOpndFromUint32, int32IndexOpnd->UseWithNewType(TyUint32, this->m_func), insertBeforeInstr); |
| |
| // compare with float from the original indexOpnd, we need floatIndex == (float64)(uint32)floatIndex |
| InsertCompareBranch(floatOpndFromUint32, floatIndexOpnd, Js::OpCode::BrNeq_A, notIntLabel, insertBeforeInstr, false); |
| |
| insertBeforeInstr->InsertBefore(fallThrough); |
| return int32IndexOpnd; |
| } |
| |
| void |
| Lowerer::AllocStackForInObjectEnumeratorArray() |
| { |
| Func * func = this->m_func; |
| Assert(func->IsTopFunc()); |
| if (func->m_forInLoopMaxDepth) |
| { |
| func->m_forInEnumeratorArrayOffset = func->StackAllocate(sizeof(Js::ForInObjectEnumerator) * this->m_func->m_forInLoopMaxDepth); |
| } |
| } |
| |
| IR::RegOpnd * |
| Lowerer::GenerateForInEnumeratorLoad(IR::Opnd * forInEnumeratorOpnd, IR::Instr * insertBeforeInstr) |
| { |
| Func * func = insertBeforeInstr->m_func; |
| |
| if (forInEnumeratorOpnd->IsSymOpnd()) |
| { |
| StackSym * stackSym = forInEnumeratorOpnd->AsSymOpnd()->GetStackSym(); |
| Assert(!stackSym->m_allocated); |
| uint forInLoopLevel = stackSym->m_offset; |
| Assert(func->m_forInLoopBaseDepth + forInLoopLevel < this->m_func->m_forInLoopMaxDepth); |
| stackSym->m_offset = this->m_func->m_forInEnumeratorArrayOffset + ((func->m_forInLoopBaseDepth + forInLoopLevel) * sizeof(Js::ForInObjectEnumerator)); |
| stackSym->m_allocated = true; |
| } |
| else |
| { |
| Assert(forInEnumeratorOpnd->IsIndirOpnd()); |
| if (forInEnumeratorOpnd->AsIndirOpnd()->GetOffset() == 0) |
| { |
| return forInEnumeratorOpnd->AsIndirOpnd()->GetBaseOpnd(); |
| } |
| } |
| IR::RegOpnd * forInEnumeratorRegOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertLea(forInEnumeratorRegOpnd, forInEnumeratorOpnd, insertBeforeInstr); |
| return forInEnumeratorRegOpnd; |
| } |
| |
| void |
| Lowerer::GenerateHasObjectArrayCheck(IR::RegOpnd * objectOpnd, IR::RegOpnd * typeOpnd, IR::LabelInstr * hasObjectArrayLabel, IR::Instr * insertBeforeInstr) |
| { |
| // CMP [objectOpnd + offset(objectArray)], nullptr |
| // JEQ $noObjectArrayLabel |
| // TEST[objectOpnd + offset(objectArray)], ObjectArrayFlagsTag (used as flags) |
| // JEQ $noObjectArrayLabel |
| // MOV typeHandlerOpnd, [typeOpnd + offset(typeHandler)] |
| // CMP typeHandler->OffsetOfInlineSlots, Js::DynamicTypeHandler::GetOffsetOfObjectHeaderInlineSlots() |
| // JNE $hasObjectArrayLabel |
| // $$noObjectArrayLabel: (fall thru) |
| |
| Func * func = this->m_func; |
| IR::LabelInstr * noObjectArrayLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| IR::IndirOpnd * objectArrayOpnd = IR::IndirOpnd::New(objectOpnd, Js::DynamicObject::GetOffsetOfObjectArray(), TyMachPtr, func); |
| InsertCompareBranch(objectArrayOpnd, IR::AddrOpnd::NewNull(func), Js::OpCode::BrEq_A, noObjectArrayLabel, insertBeforeInstr); |
| InsertTestBranch(objectArrayOpnd, IR::IntConstOpnd::New((uint32)Js::DynamicObjectFlags::ObjectArrayFlagsTag, TyUint8, func), |
| Js::OpCode::BrNeq_A, noObjectArrayLabel, insertBeforeInstr); |
| |
| IR::RegOpnd * typeHandlerOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(typeHandlerOpnd, IR::IndirOpnd::New(typeOpnd, Js::DynamicType::GetOffsetOfTypeHandler(), TyMachPtr, func), insertBeforeInstr); |
| InsertCompareBranch(IR::IndirOpnd::New(typeHandlerOpnd, Js::DynamicTypeHandler::GetOffsetOfOffsetOfInlineSlots(), TyUint16, func), |
| IR::IntConstOpnd::New(Js::DynamicTypeHandler::GetOffsetOfObjectHeaderInlineSlots(), TyUint16, func), |
| Js::OpCode::BrNeq_A, hasObjectArrayLabel, insertBeforeInstr); |
| |
| insertBeforeInstr->InsertBefore(noObjectArrayLabel); |
| } |
| |
| void |
| Lowerer::GenerateInitForInEnumeratorFastPath(IR::Instr * instr, Js::EnumeratorCache * forInCache) |
| { |
| Func * func = this->m_func; |
| |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, func, true); |
| IR::RegOpnd * objectOpnd = instr->GetSrc1()->AsRegOpnd(); |
| |
| // Tagged check and object check |
| m_lowererMD.GenerateObjectTest(objectOpnd, instr, helperLabel); |
| GenerateIsDynamicObject(objectOpnd, instr, helperLabel); |
| |
| // Type check with cache |
| // |
| // MOV typeOpnd, [objectOpnd + offset(type)] |
| // CMP [&forInCache->type], typeOpnd |
| // JNE $helper |
| |
| IR::RegOpnd * typeOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(typeOpnd, IR::IndirOpnd::New(objectOpnd, Js::DynamicObject::GetOffsetOfType(), TyMachPtr, func), instr); |
| InsertCompareBranch(IR::MemRefOpnd::New(&forInCache->type, TyMachPtr, func, IR::AddrOpndKindForInCacheType), typeOpnd, Js::OpCode::BrNeq_A, helperLabel, instr); |
| |
| // Check forInCacheData->EnumNonEnumerable == false |
| // |
| // MOV forInCacheDataOpnd, [&forInCache->data] |
| // CMP forInCacheDataOpnd->enumNonEnumerable, 0 |
| // JNE $helper |
| |
| IR::RegOpnd * forInCacheDataOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| InsertMove(forInCacheDataOpnd, IR::MemRefOpnd::New(&forInCache->data, TyMachPtr, func, IR::AddrOpndKindForInCacheData), instr); |
| InsertCompareBranch(IR::IndirOpnd::New(forInCacheDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataEnumNonEnumerable(), TyUint8, func), |
| IR::IntConstOpnd::New(0, TyUint8, func), Js::OpCode::BrNeq_A, helperLabel, instr); |
| |
| // Check has object array |
| GenerateHasObjectArrayCheck(objectOpnd, typeOpnd, helperLabel, instr); |
| |
| // Check first prototype with enumerable properties |
| // |
| // MOV prototypeObjectOpnd, [type + offset(prototype)] |
| // MOV prototypeTypeOpnd, [prototypeObjectOpnd + offset(type)] |
| // CMP [prototypeTypeOpnd + offset(typeId)], TypeIds_Null |
| // JEQ $noPrototypeWithEnumerablePropertiesLabel |
| // |
| // $checkFirstPrototypeLoopTopLabel: |
| // CMP [prototypeTypeOpnd + offset(typeId)], TypeIds_LastStaticType |
| // JLE $helper |
| // CMP [prototypeTypeOpnd, offset(hasNoEnumerableProperties], 0 |
| // JEQ $helper |
| // <hasObjectArrayCheck prototypeObjectOpnd, prototypeTypeOpnd> |
| // |
| // MOV prototypeObjectOpnd, [prototypeTypeOpnd + offset(protottype)] (load next prototype) |
| // |
| // MOV prototypeTypeOpnd, [prototypeObjectOpnd + offset(type)] (tail dup TypeIds_Null check) |
| // CMP [prototypeTypeOpnd + offset(typeId)], TypeIds_Null |
| // JNE $checkFirstPrototypeLoopTopLabel |
| // |
| // $noPrototypeWithEnumerablePropertiesLabel: |
| // |
| IR::LabelInstr * noPrototypeWithEnumerablePropertiesLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| |
| IR::RegOpnd * prototypeObjectOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::RegOpnd * prototypeTypeOpnd = IR::RegOpnd::New(TyMachPtr, func); |
| IR::IndirOpnd * prototypeTypeIdOpnd = IR::IndirOpnd::New(prototypeTypeOpnd, Js::DynamicType::GetOffsetOfTypeId(), TyUint32, func); |
| |
| InsertMove(prototypeObjectOpnd, IR::IndirOpnd::New(typeOpnd, Js::DynamicType::GetOffsetOfPrototype(), TyMachPtr, func), instr); |
| InsertMove(prototypeTypeOpnd, IR::IndirOpnd::New(prototypeObjectOpnd, Js::DynamicObject::GetOffsetOfType(), TyMachPtr, func), instr); |
| InsertCompareBranch(prototypeTypeIdOpnd, IR::IntConstOpnd::New(Js::TypeId::TypeIds_Null, TyUint32, func), Js::OpCode::BrEq_A, noPrototypeWithEnumerablePropertiesLabel, instr); |
| |
| IR::LabelInstr * checkFirstPrototypeLoopTopLabel = InsertLoopTopLabel(instr); |
| Loop * loop = checkFirstPrototypeLoopTopLabel->GetLoop(); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(prototypeObjectOpnd->m_sym->m_id); |
| loop->regAlloc.liveOnBackEdgeSyms->Set(prototypeTypeOpnd->m_sym->m_id); |
| |
| InsertCompareBranch(prototypeTypeIdOpnd, IR::IntConstOpnd::New(Js::TypeId::TypeIds_LastStaticType, TyUint32, func), Js::OpCode::BrLe_A, helperLabel, instr); |
| // No need to do EnsureObjectReady. Defer init type may not have this bit set, so we will go to helper and call EnsureObjectReady then |
| InsertCompareBranch(IR::IndirOpnd::New(prototypeTypeOpnd, Js::DynamicType::GetOffsetOfHasNoEnumerableProperties(), TyUint8, func), |
| IR::IntConstOpnd::New(0, TyUint8, func), Js::OpCode::BrEq_A, helperLabel, instr); |
| GenerateHasObjectArrayCheck(prototypeObjectOpnd, prototypeTypeOpnd, helperLabel, instr); |
| InsertMove(prototypeObjectOpnd, IR::IndirOpnd::New(prototypeTypeOpnd, Js::DynamicType::GetOffsetOfPrototype(), TyMachPtr, func), instr); |
| |
| // Tail dup the TypeIds_Null check |
| InsertMove(prototypeTypeOpnd, IR::IndirOpnd::New(prototypeObjectOpnd, Js::DynamicObject::GetOffsetOfType(), TyMachPtr, func), instr); |
| InsertCompareBranch(prototypeTypeIdOpnd, IR::IntConstOpnd::New(Js::TypeId::TypeIds_Null, TyUint32, func), Js::OpCode::BrNeq_A, checkFirstPrototypeLoopTopLabel, instr); |
| |
| instr->InsertBefore(noPrototypeWithEnumerablePropertiesLabel); |
| |
| // Initialize DynamicObjectPropertyEnumerator fields |
| IR::Opnd * forInEnumeratorOpnd = instr->GetSrc2(); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorScriptContext(), TyMachPtr), |
| LoadScriptContextOpnd(instr), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorObject(), TyMachPtr), |
| objectOpnd, instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorInitialType(), TyMachPtr), |
| typeOpnd, instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorObjectIndex(), TyInt32), |
| IR::IntConstOpnd::New(Js::Constants::NoBigSlot, TyInt32, func), instr); |
| |
| IR::RegOpnd * initialPropertyCountOpnd = IR::RegOpnd::New(TyInt32, func); |
| InsertMove(initialPropertyCountOpnd, |
| IR::IndirOpnd::New(forInCacheDataOpnd, Js::DynamicObjectPropertyEnumerator::GetOffsetOfCachedDataPropertyCount(), TyInt32, func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorInitialPropertyCount(), TyInt32), |
| initialPropertyCountOpnd, instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorEnumeratedCount(), TyInt32), |
| IR::IntConstOpnd::New(0, TyInt32, func), instr); |
| |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorFlags(), TyUint8), |
| IR::IntConstOpnd::New((uint8)(Js::EnumeratorFlags::UseCache | Js::EnumeratorFlags::SnapShotSemantics), TyUint8, func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorCachedData(), TyMachPtr), |
| forInCacheDataOpnd, instr); |
| |
| // Initialize rest of the JavascriptStaticEnumerator fields |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorCurrentEnumerator(), TyMachPtr), |
| IR::AddrOpnd::NewNull(func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorPrefixEnumerator(), TyMachPtr), |
| IR::AddrOpnd::NewNull(func), instr); |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfEnumeratorArrayEnumerator(), TyMachPtr), |
| IR::AddrOpnd::NewNull(func), instr); |
| |
| // Initialize rest of the ForInObjectEnumerator fields |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfShadowData(), TyMachPtr), |
| IR::AddrOpnd::NewNull(func), instr); |
| // Initialize can UseJitFastPath = true and enumeratingPrototype = false at the same time. |
| InsertMove(GetForInEnumeratorFieldOpnd(forInEnumeratorOpnd, Js::ForInObjectEnumerator::GetOffsetOfStates(), TyUint16), |
| IR::IntConstOpnd::New(1, TyUint16, func, true), instr); |
| |
| IR::LabelInstr* doneLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| InsertBranch(Js::OpCode::Br, doneLabel, instr); |
| instr->InsertBefore(helperLabel); |
| instr->InsertAfter(doneLabel); |
| } |
| |
| void |
| Lowerer::LowerInitForInEnumerator(IR::Instr * instr) |
| { |
| Js::EnumeratorCache * forInCache = nullptr; |
| Func * func = instr->m_func; |
| if (instr->IsProfiledInstr()) |
| { |
| uint profileId = instr->AsProfiledInstr()->u.profileId; |
| forInCache = instr->m_func->GetJITFunctionBody()->GetForInCache(profileId); |
| Assert(forInCache != nullptr); |
| |
| if (!func->IsSimpleJit() |
| #if ENABLE_TTD |
| && (func->IsOOPJIT() || !func->GetScriptContext()->GetThreadContext()->IsRuntimeInTTDMode()) |
| //TODO: We will need to enable OOPJIT info to exclude this if we have a TTD Runtime |
| #endif |
| ) |
| { |
| GenerateInitForInEnumeratorFastPath(instr, forInCache); |
| } |
| } |
| |
| IR::RegOpnd * forInEnumeratorRegOpnd = GenerateForInEnumeratorLoad(instr->UnlinkSrc2(), instr); |
| instr->SetSrc2(forInEnumeratorRegOpnd); |
| m_lowererMD.LoadHelperArgument(instr, IR::AddrOpnd::New(forInCache, IR::AddrOpndKindForInCache, func)); |
| this->LowerBinaryHelperMem(instr, IR::HelperOp_OP_InitForInEnumerator); |
| } |
| |
| IR::LabelInstr * |
| Lowerer::InsertLoopTopLabel(IR::Instr * insertBeforeInstr) |
| { |
| Func * func = this->m_func; |
| IR::LabelInstr * loopTopLabel = IR::LabelInstr::New(Js::OpCode::Label, func); |
| loopTopLabel->m_isLoopTop = true; |
| |
| Loop *loop = JitAnew(func->m_alloc, Loop, func->m_alloc, func); |
| loopTopLabel->SetLoop(loop); |
| loop->SetLoopTopInstr(loopTopLabel); |
| loop->regAlloc.liveOnBackEdgeSyms = AllocatorNew(JitArenaAllocator, func->m_alloc, BVSparse<JitArenaAllocator>, func->m_alloc); |
| |
| insertBeforeInstr->InsertBefore(loopTopLabel); |
| return loopTopLabel; |
| } |
| |
| IR::Instr * |
| Lowerer::AddBailoutToHelperCallInstr(IR::Instr * helperCallInstr, BailOutInfo * bailoutInfo, IR::BailOutKind bailoutKind, IR::Instr * primaryBailoutInstr) |
| { |
| helperCallInstr = helperCallInstr->ConvertToBailOutInstr(bailoutInfo, bailoutKind); |
| if (bailoutInfo->bailOutInstr == primaryBailoutInstr) |
| { |
| IR::Instr * instrShare = primaryBailoutInstr->ShareBailOut(); |
| LowerBailTarget(instrShare); |
| } |
| return helperCallInstr; |
| } |
| |
| void |
| Lowerer::GenerateAuxSlotPtrLoad(IR::PropertySymOpnd *propertySymOpnd, IR::Instr * instrInsert) |
| { |
| StackSym * auxSlotPtrSym = propertySymOpnd->GetAuxSlotPtrSym(); |
| Assert(auxSlotPtrSym); |
| Func * func = instrInsert->m_func; |
| |
| IR::Opnd *opndIndir = IR::IndirOpnd::New(propertySymOpnd->CreatePropertyOwnerOpnd(func), Js::DynamicObject::GetOffsetOfAuxSlots(), TyMachReg, func); |
| IR::RegOpnd *regOpnd = IR::RegOpnd::New(auxSlotPtrSym, TyMachReg, func); |
| regOpnd->SetIsJITOptimizedReg(true); |
| InsertMove(regOpnd, opndIndir, instrInsert); |
| } |
| |
| void |
| Lowerer::InsertAndLegalize(IR::Instr * instr, IR::Instr* insertBeforeInstr) |
| { |
| insertBeforeInstr->InsertBefore(instr); |
| LowererMD::Legalize(instr); |
| } |
| |
| IR::Instr* |
| Lowerer::InsertObjectCheck(IR::RegOpnd *funcOpnd, IR::Instr *insertBeforeInstr, IR::BailOutKind bailOutKind, BailOutInfo *bailOutInfo) |
| { |
| IR::Instr *bailOutIfNotObject = IR::BailOutInstr::New(Js::OpCode::BailOnNotObject, bailOutKind, bailOutInfo, bailOutInfo->bailOutFunc); |
| |
| // Bailout when funcOpnd is not an object. |
| bailOutIfNotObject->SetSrc1(funcOpnd); |
| bailOutIfNotObject->SetByteCodeOffset(insertBeforeInstr); |
| insertBeforeInstr->InsertBefore(bailOutIfNotObject); |
| |
| return bailOutIfNotObject; |
| } |
| |
| IR::Instr* |
| Lowerer::InsertFunctionTypeIdCheck(IR::RegOpnd * funcOpnd, IR::Instr* insertBeforeInstr, IR::BailOutKind bailOutKind, BailOutInfo *bailOutInfo) |
| { |
| IR::Instr *bailOutIfNotFunction = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, bailOutKind, bailOutInfo, bailOutInfo->bailOutFunc); |
| |
| // functionTypeRegOpnd = Ld functionRegOpnd->type |
| IR::IndirOpnd *functionTypeIndirOpnd = IR::IndirOpnd::New(funcOpnd, Js::RecyclableObject::GetOffsetOfType(), TyMachPtr, insertBeforeInstr->m_func); |
| IR::RegOpnd *functionTypeRegOpnd = IR::RegOpnd::New(TyVar, insertBeforeInstr->m_func->GetTopFunc()); |
| IR::Instr *instr = IR::Instr::New(Js::OpCode::Ld_A, functionTypeRegOpnd, functionTypeIndirOpnd, insertBeforeInstr->m_func); |
| if (instr->m_func->HasByteCodeOffset()) |
| { |
| instr->SetByteCodeOffset(insertBeforeInstr); |
| } |
| insertBeforeInstr->InsertBefore(instr); |
| |
| CompileAssert(sizeof(Js::TypeId) == sizeof(int32)); |
| // if (functionTypeRegOpnd->typeId != TypeIds_Function) goto $noInlineLabel |
| // BrNeq_I4 $noInlineLabel, functionTypeRegOpnd->typeId, TypeIds_Function |
| IR::IndirOpnd *functionTypeIdIndirOpnd = IR::IndirOpnd::New(functionTypeRegOpnd, Js::Type::GetOffsetOfTypeId(), TyInt32, insertBeforeInstr->m_func); |
| IR::IntConstOpnd *typeIdFunctionConstOpnd = IR::IntConstOpnd::New(Js::TypeIds_Function, TyInt32, insertBeforeInstr->m_func); |
| bailOutIfNotFunction->SetSrc1(functionTypeIdIndirOpnd); |
| bailOutIfNotFunction->SetSrc2(typeIdFunctionConstOpnd); |
| insertBeforeInstr->InsertBefore(bailOutIfNotFunction); |
| |
| return bailOutIfNotFunction; |
| } |
| |
| IR::Instr* |
| Lowerer::InsertFunctionInfoCheck(IR::RegOpnd * funcOpnd, IR::Instr *insertBeforeInstr, IR::AddrOpnd* inlinedFuncInfo, IR::BailOutKind bailOutKind, BailOutInfo *bailOutInfo) |
| { |
| IR::Instr *bailOutIfWrongFuncInfo = IR::BailOutInstr::New(Js::OpCode::BailOnNotEqual, bailOutKind, bailOutInfo, bailOutInfo->bailOutFunc); |
| |
| // if (VarTo<JavascriptFunction>(r1)->functionInfo != funcInfo) goto noInlineLabel |
| // BrNeq_A noInlineLabel, r1->functionInfo, funcInfo |
| IR::IndirOpnd* opndFuncInfo = IR::IndirOpnd::New(funcOpnd, Js::JavascriptFunction::GetOffsetOfFunctionInfo(), TyMachPtr, insertBeforeInstr->m_func); |
| bailOutIfWrongFuncInfo->SetSrc1(opndFuncInfo); |
| bailOutIfWrongFuncInfo->SetSrc2(inlinedFuncInfo); |
| |
| insertBeforeInstr->InsertBefore(bailOutIfWrongFuncInfo); |
| |
| return bailOutIfWrongFuncInfo; |
| } |
| |
| #if DBG |
| void |
| Lowerer::LegalizeVerifyRange(IR::Instr * instrStart, IR::Instr * instrLast) |
| { |
| FOREACH_INSTR_IN_RANGE(verifyLegalizeInstr, instrStart, instrLast) |
| { |
| LowererMD::Legalize<true>(verifyLegalizeInstr); |
| } |
| NEXT_INSTR_IN_RANGE; |
| } |
| |
| void |
| Lowerer::ReconcileWithLowererStateOnHelperCall(IR::Instr * callInstr, IR::JnHelperMethod helperMethod) |
| { |
| AssertMsg((this->helperCallCheckState & HelperCallCheckState_NoHelperCalls) == 0, "Emitting an helper call when we didn't allow helper calls"); |
| if (HelperMethodAttributes::CanBeReentrant(helperMethod)) |
| { |
| if (this->helperCallCheckState & HelperCallCheckState_ImplicitCallsBailout) |
| { |
| if (!callInstr->HasBailOutInfo() || |
| !BailOutInfo::IsBailOutOnImplicitCalls(callInstr->GetBailOutKind())) |
| { |
| Output::Print(_u("HelperMethod : %s\n"), IR::GetMethodName(helperMethod)); |
| AssertMsg(false, "Helper call doesn't have BailOutOnImplicitCalls when it should"); |
| } |
| } |
| |
| if (!OpCodeAttr::HasImplicitCall(m_currentInstrOpCode) && !OpCodeAttr::OpndHasImplicitCall(m_currentInstrOpCode) |
| // Special case where we allow support implicit calls, but FromVar says it doesn't have implicit calls |
| && m_currentInstrOpCode != Js::OpCode::FromVar |
| ) |
| { |
| Output::Print(_u("HelperMethod : %s, OpCode: %s"), IR::GetMethodName(helperMethod), Js::OpCodeUtil::GetOpCodeName(m_currentInstrOpCode)); |
| callInstr->DumpByteCodeOffset(); |
| Output::Print(_u("\n")); |
| AssertMsg(false, "OpCode and Helper implicit call attribute mismatch"); |
| } |
| } |
| } |
| |
| void |
| Lowerer::ClearAndSaveImplicitCallCheckOnHelperCallCheckState() |
| { |
| this->oldHelperCallCheckState = this->helperCallCheckState; |
| this->helperCallCheckState = HelperCallCheckState(this->helperCallCheckState & ~HelperCallCheckState_ImplicitCallsBailout); |
| } |
| |
| void |
| Lowerer::RestoreImplicitCallCheckOnHelperCallCheckState() |
| { |
| if (this->oldHelperCallCheckState & HelperCallCheckState_ImplicitCallsBailout) |
| { |
| this->helperCallCheckState = HelperCallCheckState(this->helperCallCheckState | HelperCallCheckState_ImplicitCallsBailout); |
| this->oldHelperCallCheckState = HelperCallCheckState_None; |
| } |
| } |
| |
| IR::Instr* |
| Lowerer::LowerCheckLowerIntBound(IR::Instr * instr) |
| { |
| IR::Instr * instrPrev = instr->m_prev; |
| |
| IR::LabelInstr * continueLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, false /*isOpHelper*/); |
| |
| Assert(instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsUInt32()); |
| InsertCompareBranch(instr->GetSrc1(), instr->GetSrc2(), Js::OpCode::BrGe_A, continueLabel, instr); |
| |
| IR::Instr* helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, instr->m_func); |
| instr->InsertBefore(helperCallInstr); |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, IR::HelperIntRangeCheckFailure); |
| |
| instr->InsertAfter(continueLabel); |
| |
| instr->Remove(); |
| |
| return instrPrev; |
| } |
| |
| IR::Instr* |
| Lowerer::LowerCheckUpperIntBound(IR::Instr * instr) |
| { |
| bool lowerBoundCheckPresent = instr->m_prev->m_opcode == Js::OpCode::CheckLowerIntBound; |
| IR::Instr * instrPrev = lowerBoundCheckPresent ? instr->m_prev->m_prev : instr->m_prev; |
| |
| IR::Instr * lowerBoundCheckInstr = lowerBoundCheckPresent ? instr->m_prev : nullptr; |
| |
| IR::LabelInstr * continueLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, false /*isOpHelper*/); |
| IR::LabelInstr * helperLabel = IR::LabelInstr::New(Js::OpCode::Label, instr->m_func, true /*isOpHelper*/); |
| |
| Assert(instr->GetSrc1()->IsInt32() || instr->GetSrc1()->IsUInt32()); |
| if (lowerBoundCheckInstr) |
| { |
| InsertCompareBranch(instr->UnlinkSrc1(), instr->UnlinkSrc2(), Js::OpCode::BrGt_A, helperLabel, instr); |
| |
| Assert(lowerBoundCheckInstr->GetSrc1()->IsInt32() || lowerBoundCheckInstr->GetSrc1()->IsUInt32()); |
| InsertCompareBranch(lowerBoundCheckInstr->UnlinkSrc1(), lowerBoundCheckInstr->UnlinkSrc2(), Js::OpCode::BrGe_A, continueLabel, instr); |
| } |
| else |
| { |
| InsertCompareBranch(instr->UnlinkSrc1(), instr->UnlinkSrc2(), Js::OpCode::BrLe_A, continueLabel, instr); |
| } |
| |
| instr->InsertBefore(helperLabel); |
| IR::Instr* helperCallInstr = IR::Instr::New(LowererMD::MDCallOpcode, instr->m_func); |
| instr->InsertBefore(helperCallInstr); |
| m_lowererMD.ChangeToHelperCall(helperCallInstr, IR::HelperIntRangeCheckFailure); |
| |
| instr->InsertAfter(continueLabel); |
| |
| instr->Remove(); |
| if (lowerBoundCheckInstr) |
| { |
| lowerBoundCheckInstr->Remove(); |
| } |
| |
| return instrPrev; |
| } |
| #endif |