| // Copyright 2022 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/base/logging.h" |
| #include "src/codegen/arm64/assembler-arm64-inl.h" |
| #include "src/codegen/arm64/register-arm64.h" |
| #include "src/codegen/interface-descriptors-inl.h" |
| #include "src/maglev/arm64/maglev-assembler-arm64-inl.h" |
| #include "src/maglev/maglev-assembler-inl.h" |
| #include "src/maglev/maglev-graph-processor.h" |
| #include "src/maglev/maglev-graph.h" |
| #include "src/maglev/maglev-ir-inl.h" |
| #include "src/maglev/maglev-ir.h" |
| #include "src/objects/feedback-cell.h" |
| #include "src/objects/js-function.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace maglev { |
| |
| #define __ masm-> |
| |
| namespace { |
| |
| std::optional<int32_t> TryGetAddImmediateInt32ConstantInput(Node* node, |
| int index) { |
| if (auto res = node->TryGetInt32ConstantInput(index)) { |
| if (MacroAssemblerBase::IsImmAddSub(*res)) { |
| return res; |
| } |
| } |
| return {}; |
| } |
| |
| std::optional<int32_t> TryGetLogicalImmediateInt32ConstantInput(Node* node, |
| int index) { |
| if (auto res = node->TryGetInt32ConstantInput(index)) { |
| if (*res <= 0) { |
| return {}; |
| } |
| unsigned u1, u2, u3; |
| if (MacroAssemblerBase::IsImmLogical(*res, 32, &u1, &u2, &u3)) { |
| return res; |
| } |
| } |
| return {}; |
| } |
| |
| } // namespace |
| |
| void Int32NegateWithOverflow::SetValueLocationConstraints() { |
| UseRegister(value_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(value_input()).W(); |
| Register out = ToRegister(result()).W(); |
| |
| // Deopt when result would be -0. |
| static_assert(Int32NegateWithOverflow::kProperties.can_eager_deopt()); |
| Label* fail = __ GetDeoptLabel(this, DeoptimizeReason::kOverflow); |
| __ RecordComment("-- Jump to eager deopt"); |
| __ Cbz(value, fail); |
| |
| __ Negs(out, value); |
| // Output register must not be a register input into the eager deopt info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| } |
| |
| void Int32AbsWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register out = ToRegister(result()).W(); |
| Label done; |
| DCHECK(ToRegister(input()).W().Aliases(out)); |
| __ Cmp(out, Immediate(0)); |
| __ JumpIf(ge, &done); |
| __ Negs(out, out); |
| // Output register must not be a register input into the eager deopt info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| __ bind(&done); |
| } |
| |
| void Int32IncrementWithOverflow::SetValueLocationConstraints() { |
| UseRegister(value_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Int32IncrementWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(value_input()).W(); |
| Register out = ToRegister(result()).W(); |
| __ Adds(out, value, Immediate(1)); |
| // Output register must not be a register input into the eager deopt info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| } |
| |
| void Int32DecrementWithOverflow::SetValueLocationConstraints() { |
| UseRegister(value_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Int32DecrementWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(value_input()).W(); |
| Register out = ToRegister(result()).W(); |
| __ Subs(out, value, Immediate(1)); |
| // Output register must not be a register input into the eager deopt info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| } |
| |
| int BuiltinStringFromCharCode::MaxCallStackArgs() const { |
| return AllocateDescriptor::GetStackParameterCount(); |
| } |
| void BuiltinStringFromCharCode::SetValueLocationConstraints() { |
| if (code_input().node()->Is<Int32Constant>()) { |
| UseAny(code_input()); |
| } else { |
| UseAndClobberRegister(code_input()); |
| } |
| set_temporaries_needed(1); |
| DefineAsRegister(this); |
| } |
| void BuiltinStringFromCharCode::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register scratch = temps.Acquire(); |
| Register result_string = ToRegister(result()); |
| if (Int32Constant* constant = code_input().node()->TryCast<Int32Constant>()) { |
| int32_t char_code = constant->value() & 0xFFFF; |
| if (0 <= char_code && char_code < String::kMaxOneByteCharCode) { |
| __ LoadSingleCharacterString(result_string, char_code); |
| } else { |
| __ AllocateTwoByteString(register_snapshot(), result_string, 1); |
| __ Move(scratch, char_code); |
| __ Strh(scratch.W(), |
| FieldMemOperand(result_string, |
| OFFSET_OF_DATA_START(SeqTwoByteString))); |
| } |
| } else { |
| __ StringFromCharCode(register_snapshot(), nullptr, result_string, |
| ToRegister(code_input()), scratch, |
| MaglevAssembler::CharCodeMaskMode::kMustApplyMask); |
| } |
| } |
| |
| void InlinedAllocation::SetValueLocationConstraints() { |
| UseRegister(allocation_block_input()); |
| if (offset() == 0) { |
| DefineSameAsFirst(this); |
| } else { |
| DefineAsRegister(this); |
| } |
| } |
| |
| void InlinedAllocation::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| if (offset() != 0) { |
| __ Add(ToRegister(result()), ToRegister(allocation_block_input()), |
| offset()); |
| } |
| } |
| |
| void ArgumentsLength::SetValueLocationConstraints() { DefineAsRegister(this); } |
| |
| void ArgumentsLength::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register argc = ToRegister(result()); |
| __ Ldr(argc, MemOperand(fp, StandardFrameConstants::kArgCOffset)); |
| __ Sub(argc, argc, 1); // Remove receiver. |
| } |
| |
| void RestLength::SetValueLocationConstraints() { DefineAsRegister(this); } |
| |
| void RestLength::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register length = ToRegister(result()); |
| Label done; |
| __ Ldr(length, MemOperand(fp, StandardFrameConstants::kArgCOffset)); |
| __ Subs(length, length, formal_parameter_count() + 1); |
| __ B(kGreaterThanEqual, &done); |
| __ Move(length, 0); |
| __ Bind(&done); |
| __ UncheckedSmiTagInt32(length); |
| } |
| |
| int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; } |
| |
| void CheckedIntPtrToInt32::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineSameAsFirst(this); |
| } |
| |
| void CheckedIntPtrToInt32::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register input_reg = ToRegister(input()); |
| __ CompareAndBranch(input_reg.X(), |
| Immediate(std::numeric_limits<int32_t>::max()), gt, |
| __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32)); |
| __ CompareAndBranch(input_reg.X(), |
| Immediate(std::numeric_limits<int32_t>::min()), lt, |
| __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32)); |
| } |
| |
| void Int32AddWithOverflow::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| if (TryGetAddImmediateInt32ConstantInput(this, kRightIndex)) { |
| UseAny(right_input()); |
| } else { |
| UseRegister(right_input()); |
| } |
| DefineAsRegister(this); |
| } |
| |
| void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register left = ToRegister(left_input()).W(); |
| Register out = ToRegister(result()).W(); |
| if (!right_input().operand().IsRegister()) { |
| auto right_const = TryGetInt32ConstantInput(kRightIndex); |
| DCHECK(right_const); |
| __ Adds(out, left, *right_const); |
| } else { |
| Register right = ToRegister(right_input()).W(); |
| __ Adds(out, left, right); |
| } |
| // The output register shouldn't be a register input into the eager deopt |
| // info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| } |
| |
| void Int32SubtractWithOverflow::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| if (TryGetAddImmediateInt32ConstantInput(this, kRightIndex)) { |
| UseAny(right_input()); |
| } else { |
| UseRegister(right_input()); |
| } |
| DefineAsRegister(this); |
| } |
| void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register left = ToRegister(left_input()).W(); |
| Register out = ToRegister(result()).W(); |
| if (!right_input().operand().IsRegister()) { |
| auto right_const = TryGetInt32ConstantInput(kRightIndex); |
| DCHECK(right_const); |
| __ Subs(out, left, *right_const); |
| } else { |
| Register right = ToRegister(right_input()).W(); |
| __ Subs(out, left, right); |
| } |
| // The output register shouldn't be a register input into the eager deopt |
| // info. |
| DCHECK_REGLIST_EMPTY(RegList{out} & |
| GetGeneralRegistersUsedAsInputs(eager_deopt_info())); |
| __ EmitEagerDeoptIf(vs, DeoptimizeReason::kOverflow, this); |
| } |
| |
| void Int32MultiplyWithOverflow::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register left = ToRegister(left_input()).W(); |
| Register right = ToRegister(right_input()).W(); |
| Register out = ToRegister(result()).W(); |
| |
| // TODO(leszeks): peephole optimise multiplication by a constant. |
| |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| bool out_alias_input = out == left || out == right; |
| Register res = out.X(); |
| if (out_alias_input) { |
| res = temps.AcquireScratch(); |
| } |
| |
| __ Smull(res, left, right); |
| |
| // if res != (res[0:31] sign extended to 64 bits), then the multiplication |
| // result is too large for 32 bits. |
| __ Cmp(res, Operand(res.W(), SXTW)); |
| __ EmitEagerDeoptIf(ne, DeoptimizeReason::kOverflow, this); |
| |
| // If the result is zero, check if either lhs or rhs is negative. |
| Label end; |
| __ CompareAndBranch(res, Immediate(0), ne, &end); |
| { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register temp = temps.AcquireScratch().W(); |
| __ Orr(temp, left, right); |
| // If one of them is negative, we must have a -0 result, which is non-int32, |
| // so deopt. |
| // TODO(leszeks): Consider splitting these deopts to have distinct deopt |
| // reasons. Otherwise, the reason has to match the above. |
| __ RecordComment("-- Jump to eager deopt if the result is negative zero"); |
| __ Tbnz(temp, temp.SizeInBits() - 1, |
| __ GetDeoptLabel(this, DeoptimizeReason::kOverflow)); |
| } |
| __ Bind(&end); |
| if (out_alias_input) { |
| __ Move(out, res.W()); |
| } |
| } |
| |
| void Int32DivideWithOverflow::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register left = ToRegister(left_input()).W(); |
| Register right = ToRegister(right_input()).W(); |
| Register out = ToRegister(result()).W(); |
| |
| // TODO(leszeks): peephole optimise division by a constant. |
| |
| // Pre-check for overflow, since idiv throws a division exception on overflow |
| // rather than setting the overflow flag. Logic copied from |
| // effect-control-linearizer.cc |
| |
| // Check if {right} is positive (and not zero). |
| __ Cmp(right, Immediate(0)); |
| ZoneLabelRef done(masm); |
| __ JumpToDeferredIf( |
| le, |
| [](MaglevAssembler* masm, ZoneLabelRef done, Register left, |
| Register right, Int32DivideWithOverflow* node) { |
| // {right} is negative or zero. |
| |
| // TODO(leszeks): Using kNotInt32 here, but in same places |
| // kDivisionByZerokMinusZero/kMinusZero/kOverflow would be better. Right |
| // now all eager deopts in a node have to be the same -- we should allow |
| // a node to emit multiple eager deopts with different reasons. |
| Label* deopt = __ GetDeoptLabel(node, DeoptimizeReason::kNotInt32); |
| |
| // Check if {right} is zero. |
| // We've already done the compare and flags won't be cleared yet. |
| __ JumpIf(eq, deopt); |
| |
| // Check if {left} is zero, as that would produce minus zero. |
| __ CompareAndBranch(left, Immediate(0), eq, deopt); |
| |
| // Check if {left} is kMinInt and {right} is -1, in which case we'd have |
| // to return -kMinInt, which is not representable as Int32. |
| __ Cmp(left, Immediate(kMinInt)); |
| __ JumpIf(ne, *done); |
| __ Cmp(right, Immediate(-1)); |
| __ JumpIf(ne, *done); |
| __ JumpToDeopt(deopt); |
| }, |
| done, left, right, this); |
| __ Bind(*done); |
| |
| // Perform the actual integer division. |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| bool out_alias_input = out == left || out == right; |
| Register res = out; |
| if (out_alias_input) { |
| res = temps.AcquireScratch().W(); |
| } |
| __ Sdiv(res, left, right); |
| |
| // Check that the remainder is zero. |
| Register temp = temps.AcquireScratch().W(); |
| __ Msub(temp, res, right, left); |
| __ CompareAndBranch(temp, Immediate(0), ne, |
| __ GetDeoptLabel(this, DeoptimizeReason::kNotInt32)); |
| |
| __ Move(out, res); |
| } |
| |
| void Int32ModulusWithOverflow::SetValueLocationConstraints() { |
| UseAndClobberRegister(left_input()); |
| UseAndClobberRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // If AreAliased(lhs, rhs): |
| // deopt if lhs < 0 // Minus zero. |
| // 0 |
| // |
| // Using same algorithm as in EffectControlLinearizer: |
| // if rhs <= 0 then |
| // rhs = -rhs |
| // deopt if rhs == 0 |
| // if lhs < 0 then |
| // let lhs_abs = -lsh in |
| // let res = lhs_abs % rhs in |
| // deopt if res == 0 |
| // -res |
| // else |
| // let msk = rhs - 1 in |
| // if rhs & msk == 0 then |
| // lhs & msk |
| // else |
| // lhs % rhs |
| |
| Register lhs = ToRegister(left_input()).W(); |
| Register rhs = ToRegister(right_input()).W(); |
| Register out = ToRegister(result()).W(); |
| |
| static constexpr DeoptimizeReason deopt_reason = |
| DeoptimizeReason::kDivisionByZero; |
| |
| if (lhs == rhs) { |
| // For the modulus algorithm described above, lhs and rhs must not alias |
| // each other. |
| __ Tst(lhs, lhs); |
| // TODO(victorgomes): This ideally should be kMinusZero, but Maglev only |
| // allows one deopt reason per IR. |
| __ EmitEagerDeoptIf(mi, deopt_reason, this); |
| __ Move(ToRegister(result()), 0); |
| return; |
| } |
| |
| DCHECK(!AreAliased(lhs, rhs)); |
| |
| ZoneLabelRef done(masm); |
| ZoneLabelRef rhs_checked(masm); |
| __ Cmp(rhs, Immediate(0)); |
| __ JumpToDeferredIf( |
| le, |
| [](MaglevAssembler* masm, ZoneLabelRef rhs_checked, Register rhs, |
| Int32ModulusWithOverflow* node) { |
| __ Negs(rhs, rhs); |
| __ B(*rhs_checked, ne); |
| __ EmitEagerDeopt(node, deopt_reason); |
| }, |
| rhs_checked, rhs, this); |
| __ Bind(*rhs_checked); |
| |
| __ Cmp(lhs, Immediate(0)); |
| __ JumpToDeferredIf( |
| lt, |
| [](MaglevAssembler* masm, ZoneLabelRef done, Register lhs, Register rhs, |
| Register out, Int32ModulusWithOverflow* node) { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register res = temps.AcquireScratch().W(); |
| __ Neg(lhs, lhs); |
| __ Udiv(res, lhs, rhs); |
| __ Msub(out, res, rhs, lhs); |
| __ Negs(out, out); |
| __ B(*done, ne); |
| // TODO(victorgomes): This ideally should be kMinusZero, but Maglev |
| // only allows one deopt reason per IR. |
| __ EmitEagerDeopt(node, deopt_reason); |
| }, |
| done, lhs, rhs, out, this); |
| |
| Label rhs_not_power_of_2; |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register mask = temps.AcquireScratch().W(); |
| __ Add(mask, rhs, Immediate(-1)); |
| __ Tst(mask, rhs); |
| __ JumpIf(ne, &rhs_not_power_of_2); |
| |
| // {rhs} is power of 2. |
| __ And(out, mask, lhs); |
| __ Jump(*done); |
| |
| __ Bind(&rhs_not_power_of_2); |
| |
| // We store the result of the Udiv in a temporary register in case {out} is |
| // the same as {lhs} or {rhs}: we'll still need those 2 registers intact to |
| // get the remainder. |
| Register res = mask; |
| __ Udiv(res, lhs, rhs); |
| __ Msub(out, res, rhs, lhs); |
| |
| __ Bind(*done); |
| } |
| |
| #define DEF_BITWISE_BINOP(Instruction, opcode) \ |
| void Instruction::SetValueLocationConstraints() { \ |
| UseRegister(left_input()); \ |
| if (TryGetLogicalImmediateInt32ConstantInput(this, kRightIndex)) { \ |
| UseAny(right_input()); \ |
| } else { \ |
| UseRegister(right_input()); \ |
| } \ |
| DefineAsRegister(this); \ |
| } \ |
| \ |
| void Instruction::GenerateCode(MaglevAssembler* masm, \ |
| const ProcessingState& state) { \ |
| Register left = ToRegister(left_input()).W(); \ |
| Register out = ToRegister(result()).W(); \ |
| if (!right_input().operand().IsRegister()) { \ |
| auto right_const = TryGetInt32ConstantInput(kRightIndex); \ |
| DCHECK(right_const); \ |
| __ opcode(out, left, *right_const); \ |
| } else { \ |
| Register right = ToRegister(right_input()).W(); \ |
| __ opcode(out, left, right); \ |
| } \ |
| } |
| DEF_BITWISE_BINOP(Int32BitwiseAnd, and_) |
| DEF_BITWISE_BINOP(Int32BitwiseOr, orr) |
| DEF_BITWISE_BINOP(Int32BitwiseXor, eor) |
| #undef DEF_BITWISE_BINOP |
| |
| #define DEF_SHIFT_BINOP(Instruction, opcode) \ |
| void Instruction::SetValueLocationConstraints() { \ |
| UseRegister(left_input()); \ |
| if (TryGetInt32ConstantInput(kRightIndex)) { \ |
| UseAny(right_input()); \ |
| } else { \ |
| UseRegister(right_input()); \ |
| } \ |
| DefineAsRegister(this); \ |
| } \ |
| \ |
| void Instruction::GenerateCode(MaglevAssembler* masm, \ |
| const ProcessingState& state) { \ |
| Register out = ToRegister(result()).W(); \ |
| Register left = ToRegister(left_input()).W(); \ |
| if (auto right_const = TryGetInt32ConstantInput(kRightIndex)) { \ |
| int right = *right_const & 31; \ |
| if (right == 0) { \ |
| __ Move(out, left); \ |
| } else { \ |
| __ opcode(out, left, right); \ |
| } \ |
| } else { \ |
| Register right = ToRegister(right_input()).W(); \ |
| __ opcode##v(out, left, right); \ |
| } \ |
| } |
| DEF_SHIFT_BINOP(Int32ShiftLeft, lsl) |
| DEF_SHIFT_BINOP(Int32ShiftRight, asr) |
| DEF_SHIFT_BINOP(Int32ShiftRightLogical, lsr) |
| #undef DEF_SHIFT_BINOP |
| |
| void Int32BitwiseNot::SetValueLocationConstraints() { |
| UseRegister(value_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(value_input()).W(); |
| Register out = ToRegister(result()).W(); |
| __ Mvn(out, value); |
| } |
| |
| void Float64Add::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Float64Add::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister left = ToDoubleRegister(left_input()); |
| DoubleRegister right = ToDoubleRegister(right_input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fadd(out, left, right); |
| } |
| |
| void Float64Subtract::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Float64Subtract::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister left = ToDoubleRegister(left_input()); |
| DoubleRegister right = ToDoubleRegister(right_input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fsub(out, left, right); |
| } |
| |
| void Float64Multiply::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Float64Multiply::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister left = ToDoubleRegister(left_input()); |
| DoubleRegister right = ToDoubleRegister(right_input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fmul(out, left, right); |
| } |
| |
| void Float64Divide::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| void Float64Divide::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister left = ToDoubleRegister(left_input()); |
| DoubleRegister right = ToDoubleRegister(right_input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fdiv(out, left, right); |
| } |
| |
| int Float64Modulus::MaxCallStackArgs() const { return 0; } |
| void Float64Modulus::SetValueLocationConstraints() { |
| UseFixed(left_input(), v0); |
| UseFixed(right_input(), v1); |
| DefineSameAsFirst(this); |
| } |
| void Float64Modulus::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); |
| } |
| |
| void Float64Negate::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineAsRegister(this); |
| } |
| void Float64Negate::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister value = ToDoubleRegister(input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fneg(out, value); |
| } |
| |
| void Float64Abs::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister in = ToDoubleRegister(input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| __ Fabs(out, in); |
| } |
| |
| void Float64Round::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DoubleRegister in = ToDoubleRegister(input()); |
| DoubleRegister out = ToDoubleRegister(result()); |
| if (kind_ == Kind::kNearest) { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| DoubleRegister temp = temps.AcquireScratchDouble(); |
| DoubleRegister half_one = temps.AcquireScratchDouble(); |
| __ Move(temp, in); |
| // Frintn rounds to even on tie, while JS expects it to round towards |
| // +Infinity. Fix the difference by checking if we rounded down by exactly |
| // 0.5, and if so, round to the other side. |
| __ Frintn(out, in); |
| __ Fsub(temp, temp, out); |
| __ Move(half_one, 0.5); |
| __ Fcmp(temp, half_one); |
| Label done; |
| __ JumpIf(ne, &done, Label::kNear); |
| // Fix wrong tie-to-even by adding 0.5 twice. |
| __ Fadd(out, out, half_one); |
| __ Fadd(out, out, half_one); |
| __ bind(&done); |
| } else if (kind_ == Kind::kCeil) { |
| __ Frintp(out, in); |
| } else if (kind_ == Kind::kFloor) { |
| __ Frintm(out, in); |
| } |
| } |
| |
| int Float64Exponentiate::MaxCallStackArgs() const { return 0; } |
| void Float64Exponentiate::SetValueLocationConstraints() { |
| UseFixed(left_input(), v0); |
| UseFixed(right_input(), v1); |
| DefineSameAsFirst(this); |
| } |
| void Float64Exponentiate::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ CallCFunction(ExternalReference::ieee754_pow_function(), 2); |
| } |
| |
| int Float64Ieee754Unary::MaxCallStackArgs() const { return 0; } |
| void Float64Ieee754Unary::SetValueLocationConstraints() { |
| UseFixed(input(), v0); |
| DefineSameAsFirst(this); |
| } |
| void Float64Ieee754Unary::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ CallCFunction(ieee_function_ref(), 1); |
| } |
| |
| void LoadTypedArrayLength::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| DefineAsRegister(this); |
| } |
| void LoadTypedArrayLength::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| Register result_register = ToRegister(result()); |
| if (v8_flags.debug_code) { |
| __ AssertObjectType(object, JS_TYPED_ARRAY_TYPE, |
| AbortReason::kUnexpectedValue); |
| } |
| __ LoadBoundedSizeFromObject(result_register, object, |
| JSTypedArray::kRawByteLengthOffset); |
| int shift_size = ElementsKindToShiftSize(elements_kind_); |
| if (shift_size > 0) { |
| // TODO(leszeks): Merge this shift with the one in LoadBoundedSize. |
| DCHECK(shift_size == 1 || shift_size == 2 || shift_size == 3); |
| __ Lsr(result_register, result_register, shift_size); |
| } |
| } |
| |
| int CheckJSDataViewBounds::MaxCallStackArgs() const { return 1; } |
| void CheckJSDataViewBounds::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| UseRegister(index_input()); |
| set_temporaries_needed(1); |
| } |
| void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register object = ToRegister(receiver_input()); |
| Register index = ToRegister(index_input()); |
| if (v8_flags.debug_code) { |
| __ AssertObjectType(object, JS_DATA_VIEW_TYPE, |
| AbortReason::kUnexpectedValue); |
| } |
| |
| // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB. |
| Register byte_length = temps.Acquire(); |
| __ LoadBoundedSizeFromObject(byte_length, object, |
| JSDataView::kRawByteLengthOffset); |
| |
| int element_size = compiler::ExternalArrayElementSize(element_type_); |
| if (element_size > 1) { |
| __ Subs(byte_length, byte_length, Immediate(element_size - 1)); |
| __ EmitEagerDeoptIf(mi, DeoptimizeReason::kOutOfBounds, this); |
| } |
| __ Cmp(index, byte_length); |
| __ EmitEagerDeoptIf(hs, DeoptimizeReason::kOutOfBounds, this); |
| } |
| |
| void HoleyFloat64ToMaybeNanFloat64::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineAsRegister(this); |
| } |
| void HoleyFloat64ToMaybeNanFloat64::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // The hole value is a signalling NaN, so just silence it to get the float64 |
| // value. |
| __ CanonicalizeNaN(ToDoubleRegister(result()), ToDoubleRegister(input())); |
| } |
| |
| namespace { |
| |
| enum class ReduceInterruptBudgetType { kLoop, kReturn }; |
| |
| void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done, |
| Node* node, ReduceInterruptBudgetType type, |
| Register scratch0) { |
| // For loops, first check for interrupts. Don't do this for returns, as we |
| // can't lazy deopt to the end of a return. |
| if (type == ReduceInterruptBudgetType::kLoop) { |
| Label next; |
| // Here, we only care about interrupts since we've already guarded against |
| // real stack overflows on function entry. |
| { |
| Register stack_limit = scratch0; |
| __ LoadStackLimit(stack_limit, StackLimitKind::kInterruptStackLimit); |
| __ Cmp(sp, stack_limit); |
| __ B(&next, hi); |
| } |
| |
| // An interrupt has been requested and we must call into runtime to handle |
| // it; since we already pay the call cost, combine with the TieringManager |
| // call. |
| { |
| SaveRegisterStateForCall save_register_state(masm, |
| node->register_snapshot()); |
| Register function = scratch0; |
| __ Ldr(function, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); |
| __ Push(function); |
| // Move into kContextRegister after the load into scratch0, just in case |
| // scratch0 happens to be kContextRegister. |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ CallRuntime(Runtime::kBytecodeBudgetInterruptWithStackCheck_Maglev, 1); |
| save_register_state.DefineSafepointWithLazyDeopt(node->lazy_deopt_info()); |
| } |
| __ B(*done); // All done, continue. |
| __ Bind(&next); |
| } |
| |
| // No pending interrupts. Call into the TieringManager if needed. |
| { |
| SaveRegisterStateForCall save_register_state(masm, |
| node->register_snapshot()); |
| Register function = scratch0; |
| __ Ldr(function, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); |
| __ Push(function); |
| // Move into kContextRegister after the load into scratch0, just in case |
| // scratch0 happens to be kContextRegister. |
| __ Move(kContextRegister, masm->native_context().object()); |
| // Note: must not cause a lazy deopt! |
| __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Maglev, 1); |
| save_register_state.DefineSafepoint(); |
| } |
| __ B(*done); |
| } |
| |
| void GenerateReduceInterruptBudget(MaglevAssembler* masm, Node* node, |
| Register feedback_cell, |
| ReduceInterruptBudgetType type, int amount) { |
| MaglevAssembler::TemporaryRegisterScope temps(masm); |
| Register scratch = temps.Acquire(); |
| Register budget = scratch.W(); |
| __ Ldr(budget, |
| FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); |
| __ Subs(budget, budget, Immediate(amount)); |
| __ Str(budget, |
| FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); |
| ZoneLabelRef done(masm); |
| __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, node, type, |
| scratch); |
| __ Bind(*done); |
| } |
| |
| } // namespace |
| |
| int ReduceInterruptBudgetForLoop::MaxCallStackArgs() const { return 1; } |
| void ReduceInterruptBudgetForLoop::SetValueLocationConstraints() { |
| UseRegister(feedback_cell()); |
| set_temporaries_needed(1); |
| } |
| void ReduceInterruptBudgetForLoop::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()), |
| ReduceInterruptBudgetType::kLoop, amount()); |
| } |
| |
| int ReduceInterruptBudgetForReturn::MaxCallStackArgs() const { return 1; } |
| void ReduceInterruptBudgetForReturn::SetValueLocationConstraints() { |
| UseRegister(feedback_cell()); |
| set_temporaries_needed(1); |
| } |
| void ReduceInterruptBudgetForReturn::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| GenerateReduceInterruptBudget(masm, this, ToRegister(feedback_cell()), |
| ReduceInterruptBudgetType::kReturn, amount()); |
| } |
| |
| // --- |
| // Control nodes |
| // --- |
| void Return::SetValueLocationConstraints() { |
| UseFixed(value_input(), kReturnRegister0); |
| } |
| void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { |
| DCHECK_EQ(ToRegister(value_input()), kReturnRegister0); |
| // Read the formal number of parameters from the top level compilation unit |
| // (i.e. the outermost, non inlined function). |
| int formal_params_size = |
| masm->compilation_info()->toplevel_compilation_unit()->parameter_count(); |
| |
| // We're not going to continue execution, so we can use an arbitrary register |
| // here instead of relying on temporaries from the register allocator. |
| // We cannot use scratch registers, since they're used in LeaveFrame and |
| // DropArguments. |
| Register actual_params_size = x9; |
| Register params_size = x10; |
| |
| // Compute the size of the actual parameters + receiver (in bytes). |
| // TODO(leszeks): Consider making this an input into Return to reuse the |
| // incoming argc's register (if it's still valid). |
| __ Ldr(actual_params_size, |
| MemOperand(fp, StandardFrameConstants::kArgCOffset)); |
| __ Mov(params_size, Immediate(formal_params_size)); |
| |
| // If actual is bigger than formal, then we should use it to free up the stack |
| // arguments. |
| Label corrected_args_count; |
| __ CompareAndBranch(params_size, actual_params_size, ge, |
| &corrected_args_count); |
| __ Mov(params_size, actual_params_size); |
| __ Bind(&corrected_args_count); |
| |
| // Leave the frame. |
| __ LeaveFrame(StackFrame::MAGLEV); |
| |
| // Drop receiver + arguments according to dynamic arguments size. |
| __ DropArguments(params_size); |
| __ Ret(); |
| } |
| |
| } // namespace maglev |
| } // namespace internal |
| } // namespace v8 |