| // Copyright 2016 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| #include "src/code-stub-assembler.h" |
| #include "src/code-factory.h" |
| #include "src/frames-inl.h" |
| #include "src/frames.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| using compiler::Node; |
| |
| CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) |
| : compiler::CodeAssembler(state) { |
| if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) { |
| HandleBreakOnNode(); |
| } |
| } |
| |
| void CodeStubAssembler::HandleBreakOnNode() { |
| // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a |
| // string specifying the name of a stub and NODE is number specifying node id. |
| const char* name = state()->name(); |
| size_t name_length = strlen(name); |
| if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) { |
| // Different name. |
| return; |
| } |
| size_t option_length = strlen(FLAG_csa_trap_on_node); |
| if (option_length < name_length + 2 || |
| FLAG_csa_trap_on_node[name_length] != ',') { |
| // Option is too short. |
| return; |
| } |
| const char* start = &FLAG_csa_trap_on_node[name_length + 1]; |
| char* end; |
| int node_id = static_cast<int>(strtol(start, &end, 10)); |
| if (start == end) { |
| // Bad node id. |
| return; |
| } |
| BreakOnNode(node_id); |
| } |
| |
| void CodeStubAssembler::Assert(const NodeGenerator& codition_body, |
| const char* message, const char* file, |
| int line) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Label ok(this); |
| Label not_ok(this, Label::kDeferred); |
| if (message != nullptr && FLAG_code_comments) { |
| Comment("[ Assert: %s", message); |
| } else { |
| Comment("[ Assert"); |
| } |
| Node* condition = codition_body(); |
| DCHECK_NOT_NULL(condition); |
| Branch(condition, &ok, ¬_ok); |
| BIND(¬_ok); |
| if (message != nullptr) { |
| char chars[1024]; |
| Vector<char> buffer(chars); |
| if (file != nullptr) { |
| SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, |
| line); |
| } else { |
| SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message); |
| } |
| CallRuntime( |
| Runtime::kGlobalPrint, SmiConstant(Smi::kZero), |
| HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0])))); |
| } |
| DebugBreak(); |
| Goto(&ok); |
| BIND(&ok); |
| Comment("] Assert"); |
| } |
| #endif |
| } |
| |
| Node* CodeStubAssembler::Select(Node* condition, const NodeGenerator& true_body, |
| const NodeGenerator& false_body, |
| MachineRepresentation rep) { |
| VARIABLE(value, rep); |
| Label vtrue(this), vfalse(this), end(this); |
| Branch(condition, &vtrue, &vfalse); |
| |
| BIND(&vtrue); |
| { |
| value.Bind(true_body()); |
| Goto(&end); |
| } |
| BIND(&vfalse); |
| { |
| value.Bind(false_body()); |
| Goto(&end); |
| } |
| |
| BIND(&end); |
| return value.value(); |
| } |
| |
| Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value, |
| Node* false_value, |
| MachineRepresentation rep) { |
| return Select(condition, [=] { return true_value; }, |
| [=] { return false_value; }, rep); |
| } |
| |
| Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value, |
| int false_value) { |
| return SelectConstant(condition, Int32Constant(true_value), |
| Int32Constant(false_value), |
| MachineRepresentation::kWord32); |
| } |
| |
| Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value, |
| int false_value) { |
| return SelectConstant(condition, IntPtrConstant(true_value), |
| IntPtrConstant(false_value), |
| MachineType::PointerRepresentation()); |
| } |
| |
| Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) { |
| return SelectConstant(condition, TrueConstant(), FalseConstant(), |
| MachineRepresentation::kTagged); |
| } |
| |
| Node* CodeStubAssembler::SelectTaggedConstant(Node* condition, Node* true_value, |
| Node* false_value) { |
| return SelectConstant(condition, true_value, false_value, |
| MachineRepresentation::kTagged); |
| } |
| |
| Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value, |
| Smi* false_value) { |
| return SelectConstant(condition, SmiConstant(true_value), |
| SmiConstant(false_value), |
| MachineRepresentation::kTaggedSigned); |
| } |
| |
| Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); } |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootName, name) \ |
| Node* CodeStubAssembler::name##Constant() { \ |
| return LoadRoot(Heap::k##rootName##RootIndex); \ |
| } |
| HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR); |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_TEST(rootName, name) \ |
| Node* CodeStubAssembler::Is##name(Node* value) { \ |
| return WordEqual(value, name##Constant()); \ |
| } |
| HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST); |
| #undef HEAP_CONSTANT_TEST |
| |
| Node* CodeStubAssembler::HashSeed() { |
| return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex); |
| } |
| |
| Node* CodeStubAssembler::StaleRegisterConstant() { |
| return LoadRoot(Heap::kStaleRegisterRootIndex); |
| } |
| |
| Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { |
| if (mode == SMI_PARAMETERS) { |
| return SmiConstant(Smi::FromInt(value)); |
| } else { |
| DCHECK_EQ(INTPTR_PARAMETERS, mode); |
| return IntPtrConstant(value); |
| } |
| } |
| |
| bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test) { |
| int32_t constant_test; |
| Smi* smi_test; |
| if ((ToInt32Constant(test, constant_test) && constant_test == 0) || |
| (ToSmiConstant(test, smi_test) && smi_test->value() == 0)) { |
| return true; |
| } |
| return false; |
| } |
| |
| Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) { |
| Comment("IntPtrRoundUpToPowerOfTwo32"); |
| CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u))); |
| value = IntPtrSub(value, IntPtrConstant(1)); |
| for (int i = 1; i <= 16; i *= 2) { |
| value = WordOr(value, WordShr(value, IntPtrConstant(i))); |
| } |
| return IntPtrAdd(value, IntPtrConstant(1)); |
| } |
| |
| Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) { |
| // value && !(value & (value - 1)) |
| return WordEqual( |
| Select( |
| WordEqual(value, IntPtrConstant(0)), |
| [=] { return IntPtrConstant(1); }, |
| [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }, |
| MachineType::PointerRepresentation()), |
| IntPtrConstant(0)); |
| } |
| |
| Node* CodeStubAssembler::Float64Round(Node* x) { |
| Node* one = Float64Constant(1.0); |
| Node* one_half = Float64Constant(0.5); |
| |
| Label return_x(this); |
| |
| // Round up {x} towards Infinity. |
| VARIABLE(var_x, MachineRepresentation::kFloat64, Float64Ceil(x)); |
| |
| GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), |
| &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| Node* CodeStubAssembler::Float64Ceil(Node* x) { |
| if (IsFloat64RoundUpSupported()) { |
| return Float64RoundUp(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64LessThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Add(var_x.value(), one)); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards Infinity and return the result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| Node* CodeStubAssembler::Float64Floor(Node* x) { |
| if (IsFloat64RoundDownSupported()) { |
| return Float64RoundDown(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return the result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Add(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| Node* CodeStubAssembler::Float64RoundToEven(Node* x) { |
| if (IsFloat64RoundTiesEvenSupported()) { |
| return Float64RoundTiesEven(x); |
| } |
| // See ES#sec-touint8clamp for details. |
| Node* f = Float64Floor(x); |
| Node* f_and_half = Float64Add(f, Float64Constant(0.5)); |
| |
| VARIABLE(var_result, MachineRepresentation::kFloat64); |
| Label return_f(this), return_f_plus_one(this), done(this); |
| |
| GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one); |
| GotoIf(Float64LessThan(x, f_and_half), &return_f); |
| { |
| Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0)); |
| Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f, |
| &return_f_plus_one); |
| } |
| |
| BIND(&return_f); |
| var_result.Bind(f); |
| Goto(&done); |
| |
| BIND(&return_f_plus_one); |
| var_result.Bind(Float64Add(f, Float64Constant(1.0))); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::Float64Trunc(Node* x) { |
| if (IsFloat64RoundTruncateSupported()) { |
| return Float64RoundTruncate(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than 0. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| if (IsFloat64RoundDownSupported()) { |
| var_x.Bind(Float64RoundDown(x)); |
| } else { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| } |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| if (IsFloat64RoundUpSupported()) { |
| var_x.Bind(Float64RoundUp(x)); |
| Goto(&return_x); |
| } else { |
| // Just return {x} unless its in the range ]-2^52,0[. |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| Node* CodeStubAssembler::SmiShiftBitsConstant() { |
| return IntPtrConstant(kSmiShiftSize + kSmiTagSize); |
| } |
| |
| Node* CodeStubAssembler::SmiFromWord32(Node* value) { |
| value = ChangeInt32ToIntPtr(value); |
| return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); |
| } |
| |
| Node* CodeStubAssembler::SmiTag(Node* value) { |
| int32_t constant_value; |
| if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) { |
| return SmiConstant(Smi::FromInt(constant_value)); |
| } |
| return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); |
| } |
| |
| Node* CodeStubAssembler::SmiUntag(Node* value) { |
| return WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()); |
| } |
| |
| Node* CodeStubAssembler::SmiToWord32(Node* value) { |
| Node* result = SmiUntag(value); |
| return TruncateWordToWord32(result); |
| } |
| |
| Node* CodeStubAssembler::SmiToFloat64(Node* value) { |
| return ChangeInt32ToFloat64(SmiToWord32(value)); |
| } |
| |
| Node* CodeStubAssembler::SmiMax(Node* a, Node* b) { |
| return SelectTaggedConstant(SmiLessThan(a, b), b, a); |
| } |
| |
| Node* CodeStubAssembler::SmiMin(Node* a, Node* b) { |
| return SelectTaggedConstant(SmiLessThan(a, b), a, b); |
| } |
| |
| Node* CodeStubAssembler::SmiMod(Node* a, Node* b) { |
| VARIABLE(var_result, MachineRepresentation::kTagged); |
| Label return_result(this, &var_result), |
| return_minuszero(this, Label::kDeferred), |
| return_nan(this, Label::kDeferred); |
| |
| // Untag {a} and {b}. |
| a = SmiToWord32(a); |
| b = SmiToWord32(b); |
| |
| // Return NaN if {b} is zero. |
| GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan); |
| |
| // Check if {a} is non-negative. |
| Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred); |
| Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative, |
| &if_aisnegative); |
| |
| BIND(&if_aisnotnegative); |
| { |
| // Fast case, don't need to check any other edge cases. |
| Node* r = Int32Mod(a, b); |
| var_result.Bind(SmiFromWord32(r)); |
| Goto(&return_result); |
| } |
| |
| BIND(&if_aisnegative); |
| { |
| if (SmiValuesAre32Bits()) { |
| // Check if {a} is kMinInt and {b} is -1 (only relevant if the |
| // kMinInt is actually representable as a Smi). |
| Label join(this); |
| GotoIfNot(Word32Equal(a, Int32Constant(kMinInt)), &join); |
| GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero); |
| Goto(&join); |
| BIND(&join); |
| } |
| |
| // Perform the integer modulus operation. |
| Node* r = Int32Mod(a, b); |
| |
| // Check if {r} is zero, and if so return -0, because we have to |
| // take the sign of the left hand side {a}, which is negative. |
| GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero); |
| |
| // The remainder {r} can be outside the valid Smi range on 32bit |
| // architectures, so we cannot just say SmiFromWord32(r) here. |
| var_result.Bind(ChangeInt32ToTagged(r)); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_minuszero); |
| var_result.Bind(MinusZeroConstant()); |
| Goto(&return_result); |
| |
| BIND(&return_nan); |
| var_result.Bind(NanConstant()); |
| Goto(&return_result); |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::SmiMul(Node* a, Node* b) { |
| VARIABLE(var_result, MachineRepresentation::kTagged); |
| VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64); |
| VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64); |
| Label return_result(this, &var_result); |
| |
| // Both {a} and {b} are Smis. Convert them to integers and multiply. |
| Node* lhs32 = SmiToWord32(a); |
| Node* rhs32 = SmiToWord32(b); |
| Node* pair = Int32MulWithOverflow(lhs32, rhs32); |
| |
| Node* overflow = Projection(1, pair); |
| |
| // Check if the multiplication overflowed. |
| Label if_overflow(this, Label::kDeferred), if_notoverflow(this); |
| Branch(overflow, &if_overflow, &if_notoverflow); |
| BIND(&if_notoverflow); |
| { |
| // If the answer is zero, we may need to return -0.0, depending on the |
| // input. |
| Label answer_zero(this), answer_not_zero(this); |
| Node* answer = Projection(0, pair); |
| Node* zero = Int32Constant(0); |
| Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero); |
| BIND(&answer_not_zero); |
| { |
| var_result.Bind(ChangeInt32ToTagged(answer)); |
| Goto(&return_result); |
| } |
| BIND(&answer_zero); |
| { |
| Node* or_result = Word32Or(lhs32, rhs32); |
| Label if_should_be_negative_zero(this), if_should_be_zero(this); |
| Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, |
| &if_should_be_zero); |
| BIND(&if_should_be_negative_zero); |
| { |
| var_result.Bind(MinusZeroConstant()); |
| Goto(&return_result); |
| } |
| BIND(&if_should_be_zero); |
| { |
| var_result.Bind(SmiConstant(0)); |
| Goto(&return_result); |
| } |
| } |
| } |
| BIND(&if_overflow); |
| { |
| var_lhs_float64.Bind(SmiToFloat64(a)); |
| var_rhs_float64.Bind(SmiToFloat64(b)); |
| Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value()); |
| Node* result = AllocateHeapNumberWithValue(value); |
| var_result.Bind(result); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor, |
| Label* bailout) { |
| // Both {a} and {b} are Smis. Bailout to floating point division if {divisor} |
| // is zero. |
| GotoIf(WordEqual(divisor, SmiConstant(0)), bailout); |
| |
| // Do floating point division if {dividend} is zero and {divisor} is |
| // negative. |
| Label dividend_is_zero(this), dividend_is_not_zero(this); |
| Branch(WordEqual(dividend, SmiConstant(0)), ÷nd_is_zero, |
| ÷nd_is_not_zero); |
| |
| Bind(÷nd_is_zero); |
| { |
| GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout); |
| Goto(÷nd_is_not_zero); |
| } |
| Bind(÷nd_is_not_zero); |
| |
| Node* untagged_divisor = SmiToWord32(divisor); |
| Node* untagged_dividend = SmiToWord32(dividend); |
| |
| // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 |
| // if the Smi size is 31) and {divisor} is -1. |
| Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); |
| Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), |
| &divisor_is_minus_one, &divisor_is_not_minus_one); |
| |
| Bind(&divisor_is_minus_one); |
| { |
| GotoIf(Word32Equal( |
| untagged_dividend, |
| Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), |
| bailout); |
| Goto(&divisor_is_not_minus_one); |
| } |
| Bind(&divisor_is_not_minus_one); |
| |
| Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor); |
| Node* truncated = Int32Mul(untagged_result, untagged_divisor); |
| |
| // Do floating point division if the remainder is not 0. |
| GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); |
| |
| return SmiFromWord32(untagged_result); |
| } |
| |
| Node* CodeStubAssembler::TruncateWordToWord32(Node* value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(value); |
| } |
| return value; |
| } |
| |
| Node* CodeStubAssembler::TaggedIsSmi(Node* a) { |
| return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), |
| IntPtrConstant(0)); |
| } |
| |
| Node* CodeStubAssembler::TaggedIsNotSmi(Node* a) { |
| return WordNotEqual( |
| WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), |
| IntPtrConstant(0)); |
| } |
| |
| Node* CodeStubAssembler::TaggedIsPositiveSmi(Node* a) { |
| return WordEqual(WordAnd(BitcastTaggedToWord(a), |
| IntPtrConstant(kSmiTagMask | kSmiSignMask)), |
| IntPtrConstant(0)); |
| } |
| |
| Node* CodeStubAssembler::WordIsWordAligned(Node* word) { |
| return WordEqual(IntPtrConstant(0), |
| WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1))); |
| } |
| |
| void CodeStubAssembler::BranchIfPrototypesHaveNoElements( |
| Node* receiver_map, Label* definitely_no_elements, |
| Label* possibly_elements) { |
| VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); |
| Label loop_body(this, &var_map); |
| Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex); |
| Goto(&loop_body); |
| |
| BIND(&loop_body); |
| { |
| Node* map = var_map.value(); |
| Node* prototype = LoadMapPrototype(map); |
| GotoIf(WordEqual(prototype, NullConstant()), definitely_no_elements); |
| Node* prototype_map = LoadMap(prototype); |
| // Pessimistically assume elements if a Proxy, Special API Object, |
| // or JSValue wrapper is found on the prototype chain. After this |
| // instance type check, it's not necessary to check for interceptors or |
| // access checks. |
| GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(prototype_map), |
| Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)), |
| possibly_elements); |
| GotoIf(WordNotEqual(LoadElements(prototype), empty_elements), |
| possibly_elements); |
| var_map.Bind(prototype_map); |
| Goto(&loop_body); |
| } |
| } |
| |
| void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true, |
| Label* if_false) { |
| GotoIf(TaggedIsSmi(object), if_false); |
| STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); |
| Branch(Int32GreaterThanOrEqual(LoadInstanceType(object), |
| Int32Constant(FIRST_JS_RECEIVER_TYPE)), |
| if_true, if_false); |
| } |
| |
| void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true, |
| Label* if_false) { |
| GotoIf(TaggedIsSmi(object), if_false); |
| STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); |
| Branch(Int32GreaterThanOrEqual(LoadInstanceType(object), |
| Int32Constant(FIRST_JS_OBJECT_TYPE)), |
| if_true, if_false); |
| } |
| |
| void CodeStubAssembler::BranchIfFastJSArray( |
| Node* object, Node* context, CodeStubAssembler::FastJSArrayAccessMode mode, |
| Label* if_true, Label* if_false) { |
| // Bailout if receiver is a Smi. |
| GotoIf(TaggedIsSmi(object), if_false); |
| |
| Node* map = LoadMap(object); |
| |
| // Bailout if instance type is not JS_ARRAY_TYPE. |
| GotoIf(Word32NotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)), |
| if_false); |
| |
| Node* elements_kind = LoadMapElementsKind(map); |
| |
| // Bailout if receiver has slow elements. |
| GotoIfNot(IsFastElementsKind(elements_kind), if_false); |
| |
| // Check prototype chain if receiver does not have packed elements. |
| if (mode == FastJSArrayAccessMode::INBOUNDS_READ) { |
| GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true); |
| } |
| BranchIfPrototypesHaveNoElements(map, if_true, if_false); |
| } |
| |
| Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags, |
| Node* top_address, Node* limit_address) { |
| Node* top = Load(MachineType::Pointer(), top_address); |
| Node* limit = Load(MachineType::Pointer(), limit_address); |
| |
| // If there's not enough space, call the runtime. |
| VARIABLE(result, MachineRepresentation::kTagged); |
| Label runtime_call(this, Label::kDeferred), no_runtime_call(this); |
| Label merge_runtime(this, &result); |
| |
| bool needs_double_alignment = flags & kDoubleAlignment; |
| |
| if (flags & kAllowLargeObjectAllocation) { |
| Label next(this); |
| GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); |
| |
| Node* runtime_flags = SmiConstant( |
| Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllocateTargetSpace::encode(AllocationSpace::LO_SPACE))); |
| Node* const runtime_result = |
| CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| result.Bind(runtime_result); |
| Goto(&merge_runtime); |
| |
| BIND(&next); |
| } |
| |
| VARIABLE(adjusted_size, MachineType::PointerRepresentation(), size_in_bytes); |
| |
| if (needs_double_alignment) { |
| Label not_aligned(this), done_alignment(this, &adjusted_size); |
| |
| Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), ¬_aligned, |
| &done_alignment); |
| |
| BIND(¬_aligned); |
| Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4)); |
| adjusted_size.Bind(not_aligned_size); |
| Goto(&done_alignment); |
| |
| BIND(&done_alignment); |
| } |
| |
| Node* new_top = IntPtrAdd(top, adjusted_size.value()); |
| |
| Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, |
| &no_runtime_call); |
| |
| BIND(&runtime_call); |
| Node* runtime_result; |
| if (flags & kPretenured) { |
| Node* runtime_flags = SmiConstant( |
| Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE))); |
| runtime_result = |
| CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, |
| NoContextConstant(), SmiTag(size_in_bytes)); |
| } |
| result.Bind(runtime_result); |
| Goto(&merge_runtime); |
| |
| // When there is enough space, return `top' and bump it up. |
| BIND(&no_runtime_call); |
| Node* no_runtime_result = top; |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, |
| new_top); |
| |
| VARIABLE(address, MachineType::PointerRepresentation(), no_runtime_result); |
| |
| if (needs_double_alignment) { |
| Label needs_filler(this), done_filling(this, &address); |
| Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling, |
| &needs_filler); |
| |
| BIND(&needs_filler); |
| // Store a filler and increase the address by kPointerSize. |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, top, |
| LoadRoot(Heap::kOnePointerFillerMapRootIndex)); |
| address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4))); |
| |
| Goto(&done_filling); |
| |
| BIND(&done_filling); |
| } |
| |
| no_runtime_result = BitcastWordToTagged( |
| IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag))); |
| |
| result.Bind(no_runtime_result); |
| Goto(&merge_runtime); |
| |
| BIND(&merge_runtime); |
| return result.value(); |
| } |
| |
| Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes, |
| AllocationFlags flags, |
| Node* top_address, |
| Node* limit_address) { |
| DCHECK((flags & kDoubleAlignment) == 0); |
| return AllocateRaw(size_in_bytes, flags, top_address, limit_address); |
| } |
| |
| Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes, |
| AllocationFlags flags, |
| Node* top_address, |
| Node* limit_address) { |
| #if defined(V8_HOST_ARCH_32_BIT) |
| return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address, |
| limit_address); |
| #elif defined(V8_HOST_ARCH_64_BIT) |
| // Allocation on 64 bit machine is naturally double aligned |
| return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address, |
| limit_address); |
| #else |
| #error Architecture not supported |
| #endif |
| } |
| |
| Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes, |
| AllocationFlags flags) { |
| DCHECK(flags == kNone || flags == kDoubleAlignment); |
| CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); |
| return Allocate(size_in_bytes, flags); |
| } |
| |
| Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) { |
| Comment("Allocate"); |
| bool const new_space = !(flags & kPretenured); |
| Node* top_address = ExternalConstant( |
| new_space |
| ? ExternalReference::new_space_allocation_top_address(isolate()) |
| : ExternalReference::old_space_allocation_top_address(isolate())); |
| DCHECK_EQ(kPointerSize, |
| ExternalReference::new_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::new_space_allocation_top_address(isolate()) |
| .address()); |
| DCHECK_EQ(kPointerSize, |
| ExternalReference::old_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::old_space_allocation_top_address(isolate()) |
| .address()); |
| Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize)); |
| |
| if (flags & kDoubleAlignment) { |
| return AllocateRawDoubleAligned(size_in_bytes, flags, top_address, |
| limit_address); |
| } else { |
| return AllocateRawUnaligned(size_in_bytes, flags, top_address, |
| limit_address); |
| } |
| } |
| |
| Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, |
| AllocationFlags flags) { |
| CHECK(flags == kNone || flags == kDoubleAlignment); |
| DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize); |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) { |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) { |
| return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset)); |
| } |
| |
| Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) { |
| return InnerAllocate(previous, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) { |
| return UintPtrLessThanOrEqual(size, |
| IntPtrConstant(kMaxRegularHeapObjectSize)); |
| } |
| |
| void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true, |
| Label* if_false) { |
| Label if_valueissmi(this), if_valueisnotsmi(this), |
| if_valueisheapnumber(this, Label::kDeferred); |
| |
| // Rule out false {value}. |
| GotoIf(WordEqual(value, BooleanConstant(false)), if_false); |
| |
| // Check if {value} is a Smi or a HeapObject. |
| Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi); |
| |
| BIND(&if_valueissmi); |
| { |
| // The {value} is a Smi, only need to check against zero. |
| BranchIfSmiEqual(value, SmiConstant(0), if_false, if_true); |
| } |
| |
| BIND(&if_valueisnotsmi); |
| { |
| // Check if {value} is the empty string. |
| GotoIf(IsEmptyString(value), if_false); |
| |
| // The {value} is a HeapObject, load its map. |
| Node* value_map = LoadMap(value); |
| |
| // Only null, undefined and document.all have the undetectable bit set, |
| // so we can return false immediately when that bit is set. |
| Node* value_map_bitfield = LoadMapBitField(value_map); |
| Node* value_map_undetectable = |
| Word32And(value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable)); |
| |
| // Check if the {value} is undetectable. |
| GotoIfNot(Word32Equal(value_map_undetectable, Int32Constant(0)), if_false); |
| |
| // We still need to handle numbers specially, but all other {value}s |
| // that make it here yield true. |
| Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber, if_true); |
| |
| BIND(&if_valueisheapnumber); |
| { |
| // Load the floating point value of {value}. |
| Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset, |
| MachineType::Float64()); |
| |
| // Check if the floating point {value} is neither 0.0, -0.0 nor NaN. |
| Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)), |
| if_true, if_false); |
| } |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) { |
| Node* frame_pointer = LoadFramePointer(); |
| return Load(rep, frame_pointer, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) { |
| Node* frame_pointer = LoadParentFramePointer(); |
| return Load(rep, frame_pointer, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, |
| MachineType rep) { |
| return Load(rep, buffer, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadObjectField(Node* object, int offset, |
| MachineType rep) { |
| return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag)); |
| } |
| |
| Node* CodeStubAssembler::LoadObjectField(Node* object, Node* offset, |
| MachineType rep) { |
| return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagObjectField(Node* object, int offset) { |
| if (Is64()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += kPointerSize / 2; |
| #endif |
| return ChangeInt32ToInt64( |
| LoadObjectField(object, offset, MachineType::Int32())); |
| } else { |
| return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged())); |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object, |
| int offset) { |
| if (Is64()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += kPointerSize / 2; |
| #endif |
| return LoadObjectField(object, offset, MachineType::Int32()); |
| } else { |
| return SmiToWord32( |
| LoadObjectField(object, offset, MachineType::AnyTagged())); |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) { |
| if (Is64()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| index += kPointerSize / 2; |
| #endif |
| return ChangeInt32ToInt64( |
| Load(MachineType::Int32(), base, IntPtrConstant(index))); |
| } else { |
| return SmiToWord( |
| Load(MachineType::AnyTagged(), base, IntPtrConstant(index))); |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagToWord32Root( |
| Heap::RootListIndex root_index) { |
| Node* roots_array_start = |
| ExternalConstant(ExternalReference::roots_array_start(isolate())); |
| int index = root_index * kPointerSize; |
| if (Is64()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| index += kPointerSize / 2; |
| #endif |
| return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index)); |
| } else { |
| return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start, |
| IntPtrConstant(index))); |
| } |
| } |
| |
| Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) { |
| if (Is64()) { |
| int zero_offset = offset + kPointerSize / 2; |
| int payload_offset = offset; |
| #if V8_TARGET_LITTLE_ENDIAN |
| std::swap(zero_offset, payload_offset); |
| #endif |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, base, |
| IntPtrConstant(zero_offset), Int32Constant(0)); |
| return StoreNoWriteBarrier(MachineRepresentation::kWord32, base, |
| IntPtrConstant(payload_offset), |
| TruncateInt64ToInt32(value)); |
| } else { |
| return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base, |
| IntPtrConstant(offset), SmiTag(value)); |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) { |
| return LoadObjectField(object, HeapNumber::kValueOffset, |
| MachineType::Float64()); |
| } |
| |
| Node* CodeStubAssembler::LoadMap(Node* object) { |
| return LoadObjectField(object, HeapObject::kMapOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadInstanceType(Node* object) { |
| return LoadMapInstanceType(LoadMap(object)); |
| } |
| |
| Node* CodeStubAssembler::HasInstanceType(Node* object, |
| InstanceType instance_type) { |
| return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)); |
| } |
| |
| Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object, |
| InstanceType instance_type) { |
| return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type)); |
| } |
| |
| Node* CodeStubAssembler::LoadProperties(Node* object) { |
| return LoadObjectField(object, JSObject::kPropertiesOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadElements(Node* object) { |
| return LoadObjectField(object, JSObject::kElementsOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadJSArrayLength(Node* array) { |
| CSA_ASSERT(this, IsJSArray(array)); |
| return LoadObjectField(array, JSArray::kLengthOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) { |
| return LoadObjectField(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) { |
| return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadMapBitField(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8()); |
| } |
| |
| Node* CodeStubAssembler::LoadMapBitField2(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8()); |
| } |
| |
| Node* CodeStubAssembler::LoadMapBitField3(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32()); |
| } |
| |
| Node* CodeStubAssembler::LoadMapInstanceType(Node* map) { |
| return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8()); |
| } |
| |
| Node* CodeStubAssembler::LoadMapElementsKind(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| Node* bit_field2 = LoadMapBitField2(map); |
| return DecodeWord32<Map::ElementsKindBits>(bit_field2); |
| } |
| |
| Node* CodeStubAssembler::LoadMapDescriptors(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return LoadObjectField(map, Map::kDescriptorsOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadMapPrototype(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return LoadObjectField(map, Map::kPrototypeOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map, |
| Label* if_no_proto_info) { |
| CSA_ASSERT(this, IsMap(map)); |
| Node* prototype_info = |
| LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset); |
| GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info); |
| GotoIfNot(WordEqual(LoadMap(prototype_info), |
| LoadRoot(Heap::kPrototypeInfoMapRootIndex)), |
| if_no_proto_info); |
| return prototype_info; |
| } |
| |
| Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return ChangeUint32ToWord( |
| LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8())); |
| } |
| |
| Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| // See Map::GetInObjectProperties() for details. |
| STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); |
| CSA_ASSERT(this, |
| Int32GreaterThanOrEqual(LoadMapInstanceType(map), |
| Int32Constant(FIRST_JS_OBJECT_TYPE))); |
| return ChangeUint32ToWord(LoadObjectField( |
| map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset, |
| MachineType::Uint8())); |
| } |
| |
| Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| // See Map::GetConstructorFunctionIndex() for details. |
| STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE); |
| CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map), |
| Int32Constant(LAST_PRIMITIVE_TYPE))); |
| return ChangeUint32ToWord(LoadObjectField( |
| map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset, |
| MachineType::Uint8())); |
| } |
| |
| Node* CodeStubAssembler::LoadMapConstructor(Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| VARIABLE(result, MachineRepresentation::kTagged, |
| LoadObjectField(map, Map::kConstructorOrBackPointerOffset)); |
| |
| Label done(this), loop(this, &result); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| GotoIf(TaggedIsSmi(result.value()), &done); |
| Node* is_map_type = |
| Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE)); |
| GotoIfNot(is_map_type, &done); |
| result.Bind( |
| LoadObjectField(result.value(), Map::kConstructorOrBackPointerOffset)); |
| Goto(&loop); |
| } |
| BIND(&done); |
| return result.value(); |
| } |
| |
| Node* CodeStubAssembler::LoadSharedFunctionInfoSpecialField( |
| Node* shared, int offset, ParameterMode mode) { |
| if (Is64()) { |
| Node* result = LoadObjectField(shared, offset, MachineType::Int32()); |
| if (mode == SMI_PARAMETERS) { |
| result = SmiTag(result); |
| } else { |
| result = ChangeUint32ToWord(result); |
| } |
| return result; |
| } else { |
| Node* result = LoadObjectField(shared, offset); |
| if (mode != SMI_PARAMETERS) { |
| result = SmiUntag(result); |
| } |
| return result; |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadNameHashField(Node* name) { |
| CSA_ASSERT(this, IsName(name)); |
| return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32()); |
| } |
| |
| Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) { |
| Node* hash_field = LoadNameHashField(name); |
| if (if_hash_not_computed != nullptr) { |
| GotoIf(Word32Equal( |
| Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)), |
| Int32Constant(0)), |
| if_hash_not_computed); |
| } |
| return Word32Shr(hash_field, Int32Constant(Name::kHashShift)); |
| } |
| |
| Node* CodeStubAssembler::LoadStringLength(Node* object) { |
| CSA_ASSERT(this, IsString(object)); |
| return LoadObjectField(object, String::kLengthOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadJSValueValue(Node* object) { |
| CSA_ASSERT(this, IsJSValue(object)); |
| return LoadObjectField(object, JSValue::kValueOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) { |
| // TODO(ishell): fix callers. |
| return LoadObjectField(weak_cell, WeakCell::kValueOffset); |
| } |
| |
| Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) { |
| CSA_ASSERT(this, IsWeakCell(weak_cell)); |
| Node* value = LoadWeakCellValueUnchecked(weak_cell); |
| if (if_cleared != nullptr) { |
| GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared); |
| } |
| return value; |
| } |
| |
| Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node, |
| int additional_offset, |
| ParameterMode parameter_mode) { |
| int32_t header_size = |
| FixedArray::kHeaderSize + additional_offset - kHeapObjectTag; |
| Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| return Load(MachineType::AnyTagged(), object, offset); |
| } |
| |
| Node* CodeStubAssembler::LoadFixedTypedArrayElement( |
| Node* data_pointer, Node* index_node, ElementsKind elements_kind, |
| ParameterMode parameter_mode) { |
| Node* offset = |
| ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0); |
| MachineType type; |
| switch (elements_kind) { |
| case UINT8_ELEMENTS: /* fall through */ |
| case UINT8_CLAMPED_ELEMENTS: |
| type = MachineType::Uint8(); |
| break; |
| case INT8_ELEMENTS: |
| type = MachineType::Int8(); |
| break; |
| case UINT16_ELEMENTS: |
| type = MachineType::Uint16(); |
| break; |
| case INT16_ELEMENTS: |
| type = MachineType::Int16(); |
| break; |
| case UINT32_ELEMENTS: |
| type = MachineType::Uint32(); |
| break; |
| case INT32_ELEMENTS: |
| type = MachineType::Int32(); |
| break; |
| case FLOAT32_ELEMENTS: |
| type = MachineType::Float32(); |
| break; |
| case FLOAT64_ELEMENTS: |
| type = MachineType::Float64(); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| return Load(type, data_pointer, offset); |
| } |
| |
| Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| Node* data_pointer, Node* index_node, ElementsKind elements_kind, |
| ParameterMode parameter_mode) { |
| Node* value = LoadFixedTypedArrayElement(data_pointer, index_node, |
| elements_kind, parameter_mode); |
| switch (elements_kind) { |
| case ElementsKind::INT8_ELEMENTS: |
| case ElementsKind::UINT8_CLAMPED_ELEMENTS: |
| case ElementsKind::UINT8_ELEMENTS: |
| case ElementsKind::INT16_ELEMENTS: |
| case ElementsKind::UINT16_ELEMENTS: |
| return SmiFromWord32(value); |
| case ElementsKind::INT32_ELEMENTS: |
| return ChangeInt32ToTagged(value); |
| case ElementsKind::UINT32_ELEMENTS: |
| return ChangeUint32ToTagged(value); |
| case ElementsKind::FLOAT32_ELEMENTS: |
| return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value)); |
| case ElementsKind::FLOAT64_ELEMENTS: |
| return AllocateHeapNumberWithValue(value); |
| default: |
| UNREACHABLE(); |
| return nullptr; |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( |
| Node* object, Node* index_node, int additional_offset, |
| ParameterMode parameter_mode) { |
| int32_t header_size = |
| FixedArray::kHeaderSize + additional_offset - kHeapObjectTag; |
| #if V8_TARGET_LITTLE_ENDIAN |
| if (Is64()) { |
| header_size += kPointerSize / 2; |
| } |
| #endif |
| Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| if (Is64()) { |
| return Load(MachineType::Int32(), object, offset); |
| } else { |
| return SmiToWord32(Load(MachineType::AnyTagged(), object, offset)); |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadFixedDoubleArrayElement( |
| Node* object, Node* index_node, MachineType machine_type, |
| int additional_offset, ParameterMode parameter_mode, Label* if_hole) { |
| CSA_ASSERT(this, IsFixedDoubleArray(object)); |
| int32_t header_size = |
| FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag; |
| Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS, |
| parameter_mode, header_size); |
| return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type); |
| } |
| |
| Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset, |
| Label* if_hole, |
| MachineType machine_type) { |
| if (if_hole) { |
| // TODO(ishell): Compare only the upper part for the hole once the |
| // compiler is able to fold addition of already complex |offset| with |
| // |kIeeeDoubleExponentWordOffset| into one addressing mode. |
| if (Is64()) { |
| Node* element = Load(MachineType::Uint64(), base, offset); |
| GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole); |
| } else { |
| Node* element_upper = Load( |
| MachineType::Uint32(), base, |
| IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset))); |
| GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)), |
| if_hole); |
| } |
| } |
| if (machine_type.IsNone()) { |
| // This means the actual value is not needed. |
| return nullptr; |
| } |
| return Load(machine_type, base, offset); |
| } |
| |
| Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) { |
| int offset = Context::SlotOffset(slot_index); |
| return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) { |
| Node* offset = |
| IntPtrAdd(WordShl(slot_index, kPointerSizeLog2), |
| IntPtrConstant(Context::kHeaderSize - kHeapObjectTag)); |
| return Load(MachineType::AnyTagged(), context, offset); |
| } |
| |
| Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index, |
| Node* value) { |
| int offset = Context::SlotOffset(slot_index); |
| return Store(context, IntPtrConstant(offset), value); |
| } |
| |
| Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index, |
| Node* value) { |
| Node* offset = |
| IntPtrAdd(WordShl(slot_index, kPointerSizeLog2), |
| IntPtrConstant(Context::kHeaderSize - kHeapObjectTag)); |
| return Store(context, offset, value); |
| } |
| |
| Node* CodeStubAssembler::StoreContextElementNoWriteBarrier(Node* context, |
| int slot_index, |
| Node* value) { |
| int offset = Context::SlotOffset(slot_index); |
| return StoreNoWriteBarrier(MachineRepresentation::kTagged, context, |
| IntPtrConstant(offset), value); |
| } |
| |
| Node* CodeStubAssembler::LoadNativeContext(Node* context) { |
| return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX); |
| } |
| |
| Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind, |
| Node* native_context) { |
| CSA_ASSERT(this, IsNativeContext(native_context)); |
| return LoadContextElement(native_context, Context::ArrayMapIndex(kind)); |
| } |
| |
| Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) { |
| return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value, |
| MachineRepresentation::kFloat64); |
| } |
| |
| Node* CodeStubAssembler::StoreObjectField( |
| Node* object, int offset, Node* value) { |
| DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead. |
| return Store(object, IntPtrConstant(offset - kHeapObjectTag), value); |
| } |
| |
| Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset, |
| Node* value) { |
| int const_offset; |
| if (ToInt32Constant(offset, const_offset)) { |
| return StoreObjectField(object, const_offset, value); |
| } |
| return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), |
| value); |
| } |
| |
| Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier( |
| Node* object, int offset, Node* value, MachineRepresentation rep) { |
| return StoreNoWriteBarrier(rep, object, |
| IntPtrConstant(offset - kHeapObjectTag), value); |
| } |
| |
| Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier( |
| Node* object, Node* offset, Node* value, MachineRepresentation rep) { |
| int const_offset; |
| if (ToInt32Constant(offset, const_offset)) { |
| return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep); |
| } |
| return StoreNoWriteBarrier( |
| rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value); |
| } |
| |
| Node* CodeStubAssembler::StoreMap(Node* object, Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return StoreWithMapWriteBarrier( |
| object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map); |
| } |
| |
| Node* CodeStubAssembler::StoreMapNoWriteBarrier( |
| Node* object, Heap::RootListIndex map_root_index) { |
| return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index)); |
| } |
| |
| Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return StoreNoWriteBarrier( |
| MachineRepresentation::kTagged, object, |
| IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map); |
| } |
| |
| Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset, |
| Heap::RootListIndex root_index) { |
| if (Heap::RootIsImmortalImmovable(root_index)) { |
| return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index)); |
| } else { |
| return StoreObjectField(object, offset, LoadRoot(root_index)); |
| } |
| } |
| |
| Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node, |
| Node* value, |
| WriteBarrierMode barrier_mode, |
| int additional_offset, |
| ParameterMode parameter_mode) { |
| DCHECK(barrier_mode == SKIP_WRITE_BARRIER || |
| barrier_mode == UPDATE_WRITE_BARRIER); |
| int header_size = |
| FixedArray::kHeaderSize + additional_offset - kHeapObjectTag; |
| Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| if (barrier_mode == SKIP_WRITE_BARRIER) { |
| return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset, |
| value); |
| } else { |
| return Store(object, offset, value); |
| } |
| } |
| |
| Node* CodeStubAssembler::StoreFixedDoubleArrayElement( |
| Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) { |
| CSA_ASSERT(this, IsFixedDoubleArray(object)); |
| Node* offset = |
| ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode, |
| FixedArray::kHeaderSize - kHeapObjectTag); |
| MachineRepresentation rep = MachineRepresentation::kFloat64; |
| return StoreNoWriteBarrier(rep, object, offset, value); |
| } |
| |
| Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) { |
| // Disallow pushing onto prototypes. It might be the JSArray prototype. |
| // Disallow pushing onto non-extensible objects. |
| Comment("Disallow pushing onto prototypes"); |
| Node* map = LoadMap(receiver); |
| Node* bit_field2 = LoadMapBitField2(map); |
| int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) | |
| (1 << Map::kIsExtensible); |
| Node* test = Word32And(bit_field2, Int32Constant(mask)); |
| GotoIf(Word32NotEqual(test, Int32Constant(1 << Map::kIsExtensible)), bailout); |
| |
| // Disallow pushing onto arrays in dictionary named property mode. We need |
| // to figure out whether the length property is still writable. |
| Comment("Disallow pushing onto arrays in dictionary named property mode"); |
| GotoIf(IsDictionaryMap(map), bailout); |
| |
| // Check whether the length property is writable. The length property is the |
| // only default named property on arrays. It's nonconfigurable, hence is |
| // guaranteed to stay the first property. |
| Node* descriptors = LoadMapDescriptors(map); |
| Node* details = |
| LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0)); |
| GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), bailout); |
| |
| Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2); |
| return kind; |
| } |
| |
| void CodeStubAssembler::PossiblyGrowElementsCapacity( |
| ParameterMode mode, ElementsKind kind, Node* array, Node* length, |
| Variable* var_elements, Node* growth, Label* bailout) { |
| Label fits(this, var_elements); |
| Node* capacity = |
| TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode); |
| // length and growth nodes are already in a ParameterMode appropriate |
| // representation. |
| Node* new_length = IntPtrOrSmiAdd(growth, length, mode); |
| GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits); |
| Node* new_capacity = CalculateNewElementsCapacity(new_length, mode); |
| var_elements->Bind(GrowElementsCapacity(array, var_elements->value(), kind, |
| kind, capacity, new_capacity, mode, |
| bailout)); |
| Goto(&fits); |
| BIND(&fits); |
| } |
| |
| Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, |
| CodeStubArguments& args, |
| Variable& arg_index, |
| Label* bailout) { |
| Comment("BuildAppendJSArray: %s", ElementsKindToString(kind)); |
| Label pre_bailout(this); |
| Label success(this); |
| VARIABLE(var_tagged_length, MachineRepresentation::kTagged); |
| ParameterMode mode = OptimalParameterMode(); |
| VARIABLE(var_length, OptimalParameterRepresentation(), |
| TaggedToParameter(LoadJSArrayLength(array), mode)); |
| VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); |
| |
| // Resize the capacity of the fixed array if it doesn't fit. |
| Node* first = arg_index.value(); |
| Node* growth = WordToParameter(IntPtrSub(args.GetLength(), first), mode); |
| PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), |
| &var_elements, growth, &pre_bailout); |
| |
| // Push each argument onto the end of the array now that there is enough |
| // capacity. |
| CodeStubAssembler::VariableList push_vars({&var_length}, zone()); |
| Node* elements = var_elements.value(); |
| args.ForEach( |
| push_vars, |
| [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) { |
| TryStoreArrayElement(kind, mode, &pre_bailout, elements, |
| var_length.value(), arg); |
| Increment(var_length, 1, mode); |
| }, |
| first, nullptr); |
| { |
| Node* length = ParameterToTagged(var_length.value(), mode); |
| var_tagged_length.Bind(length); |
| StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); |
| Goto(&success); |
| } |
| |
| BIND(&pre_bailout); |
| { |
| Node* length = ParameterToTagged(var_length.value(), mode); |
| var_tagged_length.Bind(length); |
| Node* diff = SmiSub(length, LoadJSArrayLength(array)); |
| StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); |
| arg_index.Bind(IntPtrAdd(arg_index.value(), SmiUntag(diff))); |
| Goto(bailout); |
| } |
| |
| BIND(&success); |
| return var_tagged_length.value(); |
| } |
| |
| void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, |
| ParameterMode mode, Label* bailout, |
| Node* elements, Node* index, |
| Node* value) { |
| if (IsFastSmiElementsKind(kind)) { |
| GotoIf(TaggedIsNotSmi(value), bailout); |
| } else if (IsFastDoubleElementsKind(kind)) { |
| GotoIfNotNumber(value, bailout); |
| } |
| if (IsFastDoubleElementsKind(kind)) { |
| Node* double_value = ChangeNumberToFloat64(value); |
| StoreFixedDoubleArrayElement(elements, index, |
| Float64SilenceNaN(double_value), mode); |
| } else { |
| WriteBarrierMode barrier_mode = |
| IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER; |
| StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode); |
| } |
| } |
| |
| void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array, |
| Node* value, Label* bailout) { |
| Comment("BuildAppendJSArray: %s", ElementsKindToString(kind)); |
| ParameterMode mode = OptimalParameterMode(); |
| VARIABLE(var_length, OptimalParameterRepresentation(), |
| TaggedToParameter(LoadJSArrayLength(array), mode)); |
| VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array)); |
| |
| // Resize the capacity of the fixed array if it doesn't fit. |
| Node* growth = IntPtrOrSmiConstant(1, mode); |
| PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), |
| &var_elements, growth, bailout); |
| |
| // Push each argument onto the end of the array now that there is enough |
| // capacity. |
| TryStoreArrayElement(kind, mode, bailout, var_elements.value(), |
| var_length.value(), value); |
| Increment(var_length, 1, mode); |
| |
| Node* length = ParameterToTagged(var_length.value(), mode); |
| StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); |
| } |
| |
| Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) { |
| Node* result = Allocate(HeapNumber::kSize, kNone); |
| Heap::RootListIndex heap_map_index = |
| mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex |
| : Heap::kMutableHeapNumberMapRootIndex; |
| StoreMapNoWriteBarrier(result, heap_map_index); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value, |
| MutableMode mode) { |
| Node* result = AllocateHeapNumber(mode); |
| StoreHeapNumberValue(result, value); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateSeqOneByteString(int length, |
| AllocationFlags flags) { |
| Comment("AllocateSeqOneByteString"); |
| if (length == 0) { |
| return LoadRoot(Heap::kempty_stringRootIndex); |
| } |
| Node* result = Allocate(SeqOneByteString::SizeFor(length), flags); |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex)); |
| StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex); |
| StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset, |
| SmiConstant(Smi::FromInt(length))); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length, |
| ParameterMode mode, |
| AllocationFlags flags) { |
| Comment("AllocateSeqOneByteString"); |
| VARIABLE(var_result, MachineRepresentation::kTagged); |
| |
| // Compute the SeqOneByteString size and check if it fits into new space. |
| Label if_lengthiszero(this), if_sizeissmall(this), |
| if_notsizeissmall(this, Label::kDeferred), if_join(this); |
| GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero); |
| |
| Node* raw_size = GetArrayAllocationSize( |
| length, UINT8_ELEMENTS, mode, |
| SeqOneByteString::kHeaderSize + kObjectAlignmentMask); |
| Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); |
| Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), |
| &if_sizeissmall, &if_notsizeissmall); |
| |
| BIND(&if_sizeissmall); |
| { |
| // Just allocate the SeqOneByteString in new space. |
| Node* result = AllocateInNewSpace(size, flags); |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex)); |
| StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex); |
| StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset, |
| ParameterToTagged(length, mode)); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| var_result.Bind(result); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_notsizeissmall); |
| { |
| // We might need to allocate in large object space, go to the runtime. |
| Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context, |
| ParameterToTagged(length, mode)); |
| var_result.Bind(result); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_lengthiszero); |
| { |
| var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex)); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_join); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::AllocateSeqTwoByteString(int length, |
| AllocationFlags flags) { |
| Comment("AllocateSeqTwoByteString"); |
| if (length == 0) { |
| return LoadRoot(Heap::kempty_stringRootIndex); |
| } |
| Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags); |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex)); |
| StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex); |
| StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset, |
| SmiConstant(Smi::FromInt(length))); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length, |
| ParameterMode mode, |
| AllocationFlags flags) { |
| Comment("AllocateSeqTwoByteString"); |
| VARIABLE(var_result, MachineRepresentation::kTagged); |
| |
| // Compute the SeqTwoByteString size and check if it fits into new space. |
| Label if_lengthiszero(this), if_sizeissmall(this), |
| if_notsizeissmall(this, Label::kDeferred), if_join(this); |
| GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero); |
| |
| Node* raw_size = GetArrayAllocationSize( |
| length, UINT16_ELEMENTS, mode, |
| SeqOneByteString::kHeaderSize + kObjectAlignmentMask); |
| Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask)); |
| Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)), |
| &if_sizeissmall, &if_notsizeissmall); |
| |
| BIND(&if_sizeissmall); |
| { |
| // Just allocate the SeqTwoByteString in new space. |
| Node* result = AllocateInNewSpace(size, flags); |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex)); |
| StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex); |
| StoreObjectFieldNoWriteBarrier( |
| result, SeqTwoByteString::kLengthOffset, |
| mode == SMI_PARAMETERS ? length : SmiFromWord(length)); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| var_result.Bind(result); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_notsizeissmall); |
| { |
| // We might need to allocate in large object space, go to the runtime. |
| Node* result = |
| CallRuntime(Runtime::kAllocateSeqTwoByteString, context, |
| mode == SMI_PARAMETERS ? length : SmiFromWord(length)); |
| var_result.Bind(result); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_lengthiszero); |
| { |
| var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex)); |
| Goto(&if_join); |
| } |
| |
| BIND(&if_join); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::AllocateSlicedString( |
| Heap::RootListIndex map_root_index, Node* length, Node* parent, |
| Node* offset) { |
| CSA_ASSERT(this, TaggedIsSmi(length)); |
| Node* result = Allocate(SlicedString::kSize); |
| DCHECK(Heap::RootIsImmortalImmovable(map_root_index)); |
| StoreMapNoWriteBarrier(result, map_root_index); |
| StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length, |
| MachineRepresentation::kTagged); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent, |
| MachineRepresentation::kTagged); |
| StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset, |
| MachineRepresentation::kTagged); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent, |
| Node* offset) { |
| return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length, |
| parent, offset); |
| } |
| |
| Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent, |
| Node* offset) { |
| return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent, |
| offset); |
| } |
| |
| Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index, |
| Node* length, Node* first, |
| Node* second, |
| AllocationFlags flags) { |
| CSA_ASSERT(this, TaggedIsSmi(length)); |
| Node* result = Allocate(ConsString::kSize, flags); |
| DCHECK(Heap::RootIsImmortalImmovable(map_root_index)); |
| StoreMapNoWriteBarrier(result, map_root_index); |
| StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length, |
| MachineRepresentation::kTagged); |
| // Initialize both used and unused parts of hash field slot at once. |
| StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot, |
| IntPtrConstant(String::kEmptyHashField), |
| MachineType::PointerRepresentation()); |
| bool const new_space = !(flags & kPretenured); |
| if (new_space) { |
| StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first, |
| MachineRepresentation::kTagged); |
| StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second, |
| MachineRepresentation::kTagged); |
| } else { |
| StoreObjectField(result, ConsString::kFirstOffset, first); |
| StoreObjectField(result, ConsString::kSecondOffset, second); |
| } |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first, |
| Node* second, |
| AllocationFlags flags) { |
| return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first, |
| second, flags); |
| } |
| |
| Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first, |
| Node* second, |
| AllocationFlags flags) { |
| return AllocateConsString(Heap::kConsStringMapRootIndex, length, first, |
| second, flags); |
| } |
| |
| Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left, |
| Node* right, AllocationFlags flags) { |
| CSA_ASSERT(this, TaggedIsSmi(length)); |
| // Added string can be a cons string. |
| Comment("Allocating ConsString"); |
| Node* left_instance_type = LoadInstanceType(left); |
| Node* right_instance_type = LoadInstanceType(right); |
| |
| // Compute intersection and difference of instance types. |
| Node* anded_instance_types = |
| Word32And(left_instance_type, right_instance_type); |
| Node* xored_instance_types = |
| Word32Xor(left_instance_type, right_instance_type); |
| |
| // We create a one-byte cons string if |
| // 1. both strings are one-byte, or |
| // 2. at least one of the strings is two-byte, but happens to contain only |
| // one-byte characters. |
| // To do this, we check |
| // 1. if both strings are one-byte, or if the one-byte data hint is set in |
| // both strings, or |
| // 2. if one of the strings has the one-byte data hint set and the other |
| // string is one-byte. |
| STATIC_ASSERT(kOneByteStringTag != 0); |
| STATIC_ASSERT(kOneByteDataHintTag != 0); |
| Label one_byte_map(this); |
| Label two_byte_map(this); |
| VARIABLE(result, MachineRepresentation::kTagged); |
| Label done(this, &result); |
| GotoIf(Word32NotEqual(Word32And(anded_instance_types, |
| Int32Constant(kStringEncodingMask | |
| kOneByteDataHintTag)), |
| Int32Constant(0)), |
| &one_byte_map); |
| Branch(Word32NotEqual(Word32And(xored_instance_types, |
| Int32Constant(kStringEncodingMask | |
| kOneByteDataHintMask)), |
| Int32Constant(kOneByteStringTag | kOneByteDataHintTag)), |
| &two_byte_map, &one_byte_map); |
| |
| BIND(&one_byte_map); |
| Comment("One-byte ConsString"); |
| result.Bind(AllocateOneByteConsString(length, left, right, flags)); |
| Goto(&done); |
| |
| BIND(&two_byte_map); |
| Comment("Two-byte ConsString"); |
| result.Bind(AllocateTwoByteConsString(length, left, right, flags)); |
| Goto(&done); |
| |
| BIND(&done); |
| |
| return result.value(); |
| } |
| |
| Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length, |
| Node* index, Node* input) { |
| Node* const max_length = |
| SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray)); |
| CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length)); |
| USE(max_length); |
| |
| // Allocate the JSRegExpResult. |
| // TODO(jgruber): Fold JSArray and FixedArray allocations, then remove |
| // unneeded store of elements. |
| Node* const result = Allocate(JSRegExpResult::kSize); |
| |
| // TODO(jgruber): Store map as Heap constant? |
| Node* const native_context = LoadNativeContext(context); |
| Node* const map = |
| LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX); |
| StoreMapNoWriteBarrier(result, map); |
| |
| // Initialize the header before allocating the elements. |
| Node* const empty_array = EmptyFixedArrayConstant(); |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex)); |
| StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOffset, |
| empty_array); |
| StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, empty_array); |
| StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length); |
| |
| StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index); |
| StoreObjectField(result, JSRegExpResult::kInputOffset, input); |
| |
| Node* const zero = IntPtrConstant(0); |
| Node* const length_intptr = SmiUntag(length); |
| const ElementsKind elements_kind = FAST_ELEMENTS; |
| |
| Node* const elements = AllocateFixedArray(elements_kind, length_intptr); |
| StoreObjectField(result, JSArray::kElementsOffset, elements); |
| |
| // Fill in the elements with undefined. |
| FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr, |
| Heap::kUndefinedValueRootIndex); |
| |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) { |
| return AllocateNameDictionary(IntPtrConstant(at_least_space_for)); |
| } |
| |
| Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) { |
| CSA_ASSERT(this, UintPtrLessThanOrEqual( |
| at_least_space_for, |
| IntPtrConstant(NameDictionary::kMaxCapacity))); |
| |
| Node* capacity = HashTableComputeCapacity(at_least_space_for); |
| CSA_ASSERT(this, WordIsPowerOfTwo(capacity)); |
| |
| Node* length = EntryToIndex<NameDictionary>(capacity); |
| Node* store_size = |
| IntPtrAdd(WordShl(length, IntPtrConstant(kPointerSizeLog2)), |
| IntPtrConstant(NameDictionary::kHeaderSize)); |
| |
| Node* result = AllocateInNewSpace(store_size); |
| Comment("Initialize NameDictionary"); |
| // Initialize FixedArray fields. |
| DCHECK(Heap::RootIsImmortalImmovable(Heap::kHashTableMapRootIndex)); |
| StoreMapNoWriteBarrier(result, Heap::kHashTableMapRootIndex); |
| StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset, |
| SmiFromWord(length)); |
| // Initialized HashTable fields. |
| Node* zero = SmiConstant(0); |
| StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero, |
| SKIP_WRITE_BARRIER); |
| StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex, |
| zero, SKIP_WRITE_BARRIER); |
| StoreFixedArrayElement(result, NameDictionary::kCapacityIndex, |
| SmiTag(capacity), SKIP_WRITE_BARRIER); |
| // Initialize Dictionary fields. |
| Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex); |
| StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler, |
| SKIP_WRITE_BARRIER); |
| StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex, |
| SmiConstant(PropertyDetails::kInitialIndex), |
| SKIP_WRITE_BARRIER); |
| |
| // Initialize NameDictionary elements. |
| Node* result_word = BitcastTaggedToWord(result); |
| Node* start_address = IntPtrAdd( |
| result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt( |
| NameDictionary::kElementsStartIndex) - |
| kHeapObjectTag)); |
| Node* end_address = IntPtrAdd( |
| result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag))); |
| StoreFieldsNoWriteBarrier(start_address, end_address, filler); |
| return result; |
| } |
| |
| Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties, |
| Node* elements, |
| AllocationFlags flags) { |
| CSA_ASSERT(this, IsMap(map)); |
| Node* size = |
| IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize)); |
| Node* object = AllocateInNewSpace(size, flags); |
| StoreMapNoWriteBarrier(object, map); |
| InitializeJSObjectFromMap(object, map, size, properties, elements); |
| return object; |
| } |
| |
| void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map, |
| Node* size, Node* properties, |
| Node* elements) { |
| // This helper assumes that the object is in new-space, as guarded by the |
| // check in AllocatedJSObjectFromMap. |
| if (properties == nullptr) { |
| CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map)))); |
| StoreObjectFieldRoot(object, JSObject::kPropertiesOffset, |
| Heap::kEmptyFixedArrayRootIndex); |
| } else { |
| StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset, |
| properties); |
| } |
| if (elements == nullptr) { |
| StoreObjectFieldRoot(object, JSObject::kElementsOffset, |
| Heap::kEmptyFixedArrayRootIndex); |
| } else { |
| StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements); |
| } |
| InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize); |
| } |
| |
| void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map, |
| Node* size, int start_offset) { |
| // TODO(cbruni): activate in-object slack tracking machinery. |
| Comment("InitializeJSObjectBody"); |
| Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex); |
| // Calculate the untagged field addresses. |
| object = BitcastTaggedToWord(object); |
| Node* start_address = |
| IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag)); |
| Node* end_address = |
| IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag)); |
| StoreFieldsNoWriteBarrier(start_address, end_address, filler); |
| } |
| |
| void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address, |
| Node* end_address, |
| Node* value) { |
| Comment("StoreFieldsNoWriteBarrier"); |
| CSA_ASSERT(this, WordIsWordAligned(start_address)); |
| CSA_ASSERT(this, WordIsWordAligned(end_address)); |
| BuildFastLoop(start_address, end_address, |
| [this, value](Node* current) { |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, current, |
| value); |
| }, |
| kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); |
| } |
| |
| Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements( |
| ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) { |
| Comment("begin allocation of JSArray without elements"); |
| int base_size = JSArray::kSize; |
| if (allocation_site != nullptr) { |
| base_size += AllocationMemento::kSize; |
| } |
| |
| Node* size = IntPtrConstant(base_size); |
| Node* array = AllocateUninitializedJSArray(kind, array_map, length, |
| allocation_site, size); |
| return array; |
| } |
| |
| std::pair<Node*, Node*> |
| CodeStubAssembler::AllocateUninitializedJSArrayWithElements( |
| ElementsKind kind, Node* array_map, Node* length, Node* allocation_site, |
| Node* capacity, ParameterMode capacity_mode) { |
| Comment("begin allocation of JSArray with elements"); |
| int base_size = JSArray::kSize; |
| |
| if (allocation_site != nullptr) { |
| base_size += AllocationMemento::kSize; |
| } |
| |
| int elements_offset = base_size; |
| |
| // Compute space for elements |
| base_size += FixedArray::kHeaderSize; |
| Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size); |
| |
| Node* array = AllocateUninitializedJSArray(kind, array_map, length, |
| allocation_site, size); |
| |
| Node* elements = InnerAllocate(array, elements_offset); |
| StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements); |
| |
| return {array, elements}; |
| } |
| |
| Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind, |
| Node* array_map, |
| Node* length, |
| Node* allocation_site, |
| Node* size_in_bytes) { |
| // Allocate space for the JSArray and the elements FixedArray in one go. |
| Node* array = AllocateInNewSpace(size_in_bytes); |
| |
| Comment("write JSArray headers"); |
| StoreMapNoWriteBarrier(array, array_map); |
| |
| CSA_ASSERT(this, TaggedIsSmi(length)); |
| StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); |
| |
| StoreObjectFieldRoot(array, JSArray::kPropertiesOffset, |
| Heap::kEmptyFixedArrayRootIndex); |
| |
| if (allocation_site != nullptr) { |
| InitializeAllocationMemento(array, JSArray::kSize, allocation_site); |
| } |
| return array; |
| } |
| |
| Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map, |
| Node* capacity, Node* length, |
| Node* allocation_site, |
| ParameterMode capacity_mode) { |
| Node *array = nullptr, *elements = nullptr; |
| if (IsIntPtrOrSmiConstantZero(capacity)) { |
| // Array is empty. Use the shared empty fixed array instead of allocating a |
| // new one. |
| array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length, |
| nullptr); |
| StoreObjectFieldRoot(array, JSArray::kElementsOffset, |
| Heap::kEmptyFixedArrayRootIndex); |
| } else { |
| // Allocate both array and elements object, and initialize the JSArray. |
| std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( |
| kind, array_map, length, allocation_site, capacity, capacity_mode); |
| // Setup elements object. |
| Heap::RootListIndex elements_map_index = |
| IsFastDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex |
| : Heap::kFixedArrayMapRootIndex; |
| DCHECK(Heap::RootIsImmortalImmovable(elements_map_index)); |
| StoreMapNoWriteBarrier(elements, elements_map_index); |
| StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, |
| ParameterToTagged(capacity, capacity_mode)); |
| // Fill in the elements with holes. |
| FillFixedArrayWithValue(kind, elements, |
| IntPtrOrSmiConstant(0, capacity_mode), capacity, |
| Heap::kTheHoleValueRootIndex, capacity_mode); |
| } |
| |
| return array; |
| } |
| |
| Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind, |
| Node* capacity_node, |
| ParameterMode mode, |
| AllocationFlags flags) { |
| CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node, |
| IntPtrOrSmiConstant(0, mode), mode)); |
| Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode); |
| |
| // Allocate both array and elements object, and initialize the JSArray. |
| Node* array = Allocate(total_size, flags); |
| Heap::RootListIndex map_index = IsFastDoubleElementsKind(kind) |
| ? Heap::kFixedDoubleArrayMapRootIndex |
| : Heap::kFixedArrayMapRootIndex; |
| DCHECK(Heap::RootIsImmortalImmovable(map_index)); |
| StoreMapNoWriteBarrier(array, map_index); |
| StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset, |
| ParameterToTagged(capacity_node, mode)); |
| return array; |
| } |
| |
| void CodeStubAssembler::FillFixedArrayWithValue( |
| ElementsKind kind, Node* array, Node* from_node, Node* to_node, |
| Heap::RootListIndex value_root_index, ParameterMode mode) { |
| bool is_double = IsFastDoubleElementsKind(kind); |
| DCHECK(value_root_index == Heap::kTheHoleValueRootIndex || |
| value_root_index == Heap::kUndefinedValueRootIndex); |
| DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex); |
| STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32); |
| Node* double_hole = |
| Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32); |
| Node* value = LoadRoot(value_root_index); |
| |
| BuildFastFixedArrayForEach( |
| array, kind, from_node, to_node, |
| [this, value, is_double, double_hole](Node* array, Node* offset) { |
| if (is_double) { |
| // Don't use doubles to store the hole double, since manipulating the |
| // signaling NaN used for the hole in C++, e.g. with bit_cast, will |
| // change its value on ia32 (the x87 stack is used to return values |
| // and stores to the stack silently clear the signalling bit). |
| // |
| // TODO(danno): When we have a Float32/Float64 wrapper class that |
| // preserves double bits during manipulation, remove this code/change |
| // this to an indexed Float64 store. |
| if (Is64()) { |
| StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset, |
| double_hole); |
| } else { |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset, |
| double_hole); |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, array, |
| IntPtrAdd(offset, IntPtrConstant(kPointerSize)), |
| double_hole); |
| } |
| } else { |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset, |
| value); |
| } |
| }, |
| mode); |
| } |
| |
| void CodeStubAssembler::CopyFixedArrayElements( |
| ElementsKind from_kind, Node* from_array, ElementsKind to_kind, |
| Node* to_array, Node* element_count, Node* capacity, |
| WriteBarrierMode barrier_mode, ParameterMode mode) { |
| STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); |
| const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; |
| Comment("[ CopyFixedArrayElements"); |
| |
| // Typed array elements are not supported. |
| DCHECK(!IsFixedTypedArrayElementsKind(from_kind)); |
| DCHECK(!IsFixedTypedArrayElementsKind(to_kind)); |
| |
| Label done(this); |
| bool from_double_elements = IsFastDoubleElementsKind(from_kind); |
| bool to_double_elements = IsFastDoubleElementsKind(to_kind); |
| bool element_size_matches = |
| Is64() || |
| IsFastDoubleElementsKind(from_kind) == IsFastDoubleElementsKind(to_kind); |
| bool doubles_to_objects_conversion = |
| IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind); |
| bool needs_write_barrier = |
| doubles_to_objects_conversion || (barrier_mode == UPDATE_WRITE_BARRIER && |
| IsFastObjectElementsKind(to_kind)); |
| Node* double_hole = |
| Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32); |
| |
| if (doubles_to_objects_conversion) { |
| // If the copy might trigger a GC, make sure that the FixedArray is |
| // pre-initialized with holes to make sure that it's always in a |
| // consistent state. |
| FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode), |
| capacity, Heap::kTheHoleValueRootIndex, mode); |
| } else if (element_count != capacity) { |
| FillFixedArrayWithValue(to_kind, to_array, element_count, capacity, |
| Heap::kTheHoleValueRootIndex, mode); |
| } |
| |
| Node* limit_offset = ElementOffsetFromIndex( |
| IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset); |
| VARIABLE(var_from_offset, MachineType::PointerRepresentation(), |
| ElementOffsetFromIndex(element_count, from_kind, mode, |
| first_element_offset)); |
| // This second variable is used only when the element sizes of source and |
| // destination arrays do not match. |
| VARIABLE(var_to_offset, MachineType::PointerRepresentation()); |
| if (element_size_matches) { |
| var_to_offset.Bind(var_from_offset.value()); |
| } else { |
| var_to_offset.Bind(ElementOffsetFromIndex(element_count, to_kind, mode, |
| first_element_offset)); |
| } |
| |
| Variable* vars[] = {&var_from_offset, &var_to_offset}; |
| Label decrement(this, 2, vars); |
| |
| Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement); |
| |
| BIND(&decrement); |
| { |
| Node* from_offset = IntPtrSub( |
| var_from_offset.value(), |
| IntPtrConstant(from_double_elements ? kDoubleSize : kPointerSize)); |
| var_from_offset.Bind(from_offset); |
| |
| Node* to_offset; |
| if (element_size_matches) { |
| to_offset = from_offset; |
| } else { |
| to_offset = IntPtrSub( |
| var_to_offset.value(), |
| IntPtrConstant(to_double_elements ? kDoubleSize : kPointerSize)); |
| var_to_offset.Bind(to_offset); |
| } |
| |
| Label next_iter(this), store_double_hole(this); |
| Label* if_hole; |
| if (doubles_to_objects_conversion) { |
| // The target elements array is already preinitialized with holes, so we |
| // can just proceed with the next iteration. |
| if_hole = &next_iter; |
| } else if (IsFastDoubleElementsKind(to_kind)) { |
| if_hole = &store_double_hole; |
| } else { |
| // In all the other cases don't check for holes and copy the data as is. |
| if_hole = nullptr; |
| } |
| |
| Node* value = LoadElementAndPrepareForStore( |
| from_array, var_from_offset.value(), from_kind, to_kind, if_hole); |
| |
| if (needs_write_barrier) { |
| Store(to_array, to_offset, value); |
| } else if (to_double_elements) { |
| StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset, |
| value); |
| } else { |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset, |
| value); |
| } |
| Goto(&next_iter); |
| |
| if (if_hole == &store_double_hole) { |
| BIND(&store_double_hole); |
| // Don't use doubles to store the hole double, since manipulating the |
| // signaling NaN used for the hole in C++, e.g. with bit_cast, will |
| // change its value on ia32 (the x87 stack is used to return values |
| // and stores to the stack silently clear the signalling bit). |
| // |
| // TODO(danno): When we have a Float32/Float64 wrapper class that |
| // preserves double bits during manipulation, remove this code/change |
| // this to an indexed Float64 store. |
| if (Is64()) { |
| StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array, to_offset, |
| double_hole); |
| } else { |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, to_offset, |
| double_hole); |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, |
| IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)), |
| double_hole); |
| } |
| Goto(&next_iter); |
| } |
| |
| BIND(&next_iter); |
| Node* compare = WordNotEqual(from_offset, limit_offset); |
| Branch(compare, &decrement, &done); |
| } |
| |
| BIND(&done); |
| IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1); |
| Comment("] CopyFixedArrayElements"); |
| } |
| |
| void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, |
| Node* from_index, Node* to_index, |
| Node* character_count, |
| String::Encoding from_encoding, |
| String::Encoding to_encoding, |
| ParameterMode mode) { |
| bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING; |
| bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING; |
| DCHECK_IMPLIES(to_one_byte, from_one_byte); |
| Comment("CopyStringCharacters %s -> %s", |
| from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING", |
| to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING"); |
| |
| ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; |
| ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS; |
| STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag; |
| Node* from_offset = |
| ElementOffsetFromIndex(from_index, from_kind, mode, header_size); |
| Node* to_offset = |
| ElementOffsetFromIndex(to_index, to_kind, mode, header_size); |
| Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode); |
| Node* limit_offset = IntPtrAdd(from_offset, byte_count); |
| |
| // Prepare the fast loop |
| MachineType type = |
| from_one_byte ? MachineType::Uint8() : MachineType::Uint16(); |
| MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8 |
| : MachineRepresentation::kWord16; |
| int from_increment = 1 << ElementsKindToShiftSize(from_kind); |
| int to_increment = 1 << ElementsKindToShiftSize(to_kind); |
| |
| VARIABLE(current_to_offset, MachineType::PointerRepresentation(), to_offset); |
| VariableList vars({¤t_to_offset}, zone()); |
| int to_index_constant = 0, from_index_constant = 0; |
| Smi* to_index_smi = nullptr; |
| Smi* from_index_smi = nullptr; |
| bool index_same = (from_encoding == to_encoding) && |
| (from_index == to_index || |
| (ToInt32Constant(from_index, from_index_constant) && |
| ToInt32Constant(to_index, to_index_constant) && |
| from_index_constant == to_index_constant) || |
| (ToSmiConstant(from_index, from_index_smi) && |
| ToSmiConstant(to_index, to_index_smi) && |
| to_index_smi == from_index_smi)); |
| BuildFastLoop(vars, from_offset, limit_offset, |
| [this, from_string, to_string, ¤t_to_offset, to_increment, |
| type, rep, index_same](Node* offset) { |
| Node* value = Load(type, from_string, offset); |
| StoreNoWriteBarrier( |
| rep, to_string, |
| index_same ? offset : current_to_offset.value(), value); |
| if (!index_same) { |
| Increment(current_to_offset, to_increment); |
| } |
| }, |
| from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); |
| } |
| |
| Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, |
| Node* offset, |
| ElementsKind from_kind, |
| ElementsKind to_kind, |
| Label* if_hole) { |
| if (IsFastDoubleElementsKind(from_kind)) { |
| Node* value = |
| LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64()); |
| if (!IsFastDoubleElementsKind(to_kind)) { |
| value = AllocateHeapNumberWithValue(value); |
| } |
| return value; |
| |
| } else { |
| Node* value = Load(MachineType::AnyTagged(), array, offset); |
| if (if_hole) { |
| GotoIf(WordEqual(value, TheHoleConstant()), if_hole); |
| } |
| if (IsFastDoubleElementsKind(to_kind)) { |
| if (IsFastSmiElementsKind(from_kind)) { |
| value = SmiToFloat64(value); |
| } else { |
| value = LoadHeapNumberValue(value); |
| } |
| } |
| return value; |
| } |
| } |
| |
| Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity, |
| ParameterMode mode) { |
| Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode); |
| Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode); |
| Node* padding = IntPtrOrSmiConstant(16, mode); |
| return IntPtrOrSmiAdd(new_capacity, padding, mode); |
| } |
| |
| Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, |
| ElementsKind kind, Node* key, |
| Label* bailout) { |
| Node* capacity = LoadFixedArrayBaseLength(elements); |
| |
| ParameterMode mode = OptimalParameterMode(); |
| capacity = TaggedToParameter(capacity, mode); |
| key = TaggedToParameter(key, mode); |
| |
| return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode, |
| bailout); |
| } |
| |
| Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements, |
| ElementsKind kind, Node* key, |
| Node* capacity, |
| ParameterMode mode, |
| Label* bailout) { |
| Comment("TryGrowElementsCapacity"); |
| |
| // If the gap growth is too big, fall back to the runtime. |
| Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode); |
| Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode); |
| GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout); |
| |
| // Calculate the capacity of the new backing store. |
| Node* new_capacity = CalculateNewElementsCapacity( |
| IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode); |
| return GrowElementsCapacity(object, elements, kind, kind, capacity, |
| new_capacity, mode, bailout); |
| } |
| |
| Node* CodeStubAssembler::GrowElementsCapacity( |
| Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind, |
| Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) { |
| Comment("[ GrowElementsCapacity"); |
| // If size of the allocation for the new capacity doesn't fit in a page |
| // that we can bump-pointer allocate from, fall back to the runtime. |
| int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind); |
| GotoIf(UintPtrOrSmiGreaterThanOrEqual( |
| new_capacity, IntPtrOrSmiConstant(max_size, mode), mode), |
| bailout); |
| |
| // Allocate the new backing store. |
| Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode); |
| |
| // Copy the elements from the old elements store to the new. |
| // The size-check above guarantees that the |new_elements| is allocated |
| // in new space so we can skip the write barrier. |
| CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity, |
| new_capacity, SKIP_WRITE_BARRIER, mode); |
| |
| StoreObjectField(object, JSObject::kElementsOffset, new_elements); |
| Comment("] GrowElementsCapacity"); |
| return new_elements; |
| } |
| |
| void CodeStubAssembler::InitializeAllocationMemento(Node* base_allocation, |
| int base_allocation_size, |
| Node* allocation_site) { |
| StoreObjectFieldNoWriteBarrier( |
| base_allocation, AllocationMemento::kMapOffset + base_allocation_size, |
| HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map()))); |
| StoreObjectFieldNoWriteBarrier( |
| base_allocation, |
| AllocationMemento::kAllocationSiteOffset + base_allocation_size, |
| allocation_site); |
| if (FLAG_allocation_site_pretenuring) { |
| Node* count = LoadObjectField(allocation_site, |
| AllocationSite::kPretenureCreateCountOffset); |
| Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1))); |
| StoreObjectFieldNoWriteBarrier(allocation_site, |
| AllocationSite::kPretenureCreateCountOffset, |
| incremented_count); |
| } |
| } |
| |
| Node* CodeStubAssembler::TryTaggedToFloat64(Node* value, |
| Label* if_valueisnotnumber) { |
| Label out(this); |
| VARIABLE(var_result, MachineRepresentation::kFloat64); |
| |
| // Check if the {value} is a Smi or a HeapObject. |
| Label if_valueissmi(this), if_valueisnotsmi(this); |
| Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi); |
| |
| BIND(&if_valueissmi); |
| { |
| // Convert the Smi {value}. |
| var_result.Bind(SmiToFloat64(value)); |
| Goto(&out); |
| } |
| |
| BIND(&if_valueisnotsmi); |
| { |
| // Check if {value} is a HeapNumber. |
| Label if_valueisheapnumber(this); |
| Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber, |
| if_valueisnotnumber); |
| |
| BIND(&if_valueisheapnumber); |
| { |
| // Load the floating point value. |
| var_result.Bind(LoadHeapNumberValue(value)); |
| Goto(&out); |
| } |
| } |
| BIND(&out); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) { |
| // We might need to loop once due to ToNumber conversion. |
| VARIABLE(var_value, MachineRepresentation::kTagged); |
| VARIABLE(var_result, MachineRepresentation::kFloat64); |
| Label loop(this, &var_value), done_loop(this, &var_result); |
| var_value.Bind(value); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| Label if_valueisnotnumber(this, Label::kDeferred); |
| |
| // Load the current {value}. |
| value = var_value.value(); |
| |
| // Convert {value} to Float64 if it is a number and convert it to a number |
| // otherwise. |
| Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber); |
| var_result.Bind(result); |
| Goto(&done_loop); |
| |
| BIND(&if_valueisnotnumber); |
| { |
| // Convert the {value} to a Number first. |
| Callable callable = CodeFactory::NonNumberToNumber(isolate()); |
| var_value.Bind(CallStub(callable, context, value)); |
| Goto(&loop); |
| } |
| } |
| BIND(&done_loop); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) { |
| // We might need to loop once due to ToNumber conversion. |
| VARIABLE(var_value, MachineRepresentation::kTagged, value); |
| VARIABLE(var_result, MachineRepresentation::kWord32); |
| Label loop(this, &var_value), done_loop(this, &var_result); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| // Load the current {value}. |
| value = var_value.value(); |
| |
| // Check if the {value} is a Smi or a HeapObject. |
| Label if_valueissmi(this), if_valueisnotsmi(this); |
| Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi); |
| |
| BIND(&if_valueissmi); |
| { |
| // Convert the Smi {value}. |
| var_result.Bind(SmiToWord32(value)); |
| Goto(&done_loop); |
| } |
| |
| BIND(&if_valueisnotsmi); |
| { |
| // Check if {value} is a HeapNumber. |
| Label if_valueisheapnumber(this), |
| if_valueisnotheapnumber(this, Label::kDeferred); |
| Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber, |
| &if_valueisnotheapnumber); |
| |
| BIND(&if_valueisheapnumber); |
| { |
| // Truncate the floating point value. |
| var_result.Bind(TruncateHeapNumberValueToWord32(value)); |
| Goto(&done_loop); |
| } |
| |
| BIND(&if_valueisnotheapnumber); |
| { |
| // Convert the {value} to a Number first. |
| Callable callable = CodeFactory::NonNumberToNumber(isolate()); |
| var_value.Bind(CallStub(callable, context, value)); |
| Goto(&loop); |
| } |
| } |
| } |
| BIND(&done_loop); |
| return var_result.value(); |
| } |
| |
| Node* CodeStubAssembler::TruncateHeapNumberValueToWord32(Node* object) { |
| Node* value = LoadHeapNumberValue(object); |
| return TruncateFloat64ToWord32(value); |
| } |
| |
| Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) { |
| Node* value32 = RoundFloat64ToInt32(value); |
| Node* value64 = ChangeInt32ToFloat64(value32); |
| |
| Label if_valueisint32(this), if_valueisheapnumber(this), if_join(this); |
| |
| Label if_valueisequal(this), if_valueisnotequal(this); |
| Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal); |
| BIND(&if_valueisequal); |
| { |
| GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_valueisint32); |
| Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)), |
| &if_valueisheapnumber, &if_valueisint32); |
| } |
| BIND(&if_valueisnotequal); |
| Goto(&if_valueisheapnumber); |
| |
| VARIABLE(var_result, MachineRepresentation::kTagged); |
| BIND(&if_valueisint32); |
| { |
| if (Is64()) { |
| Node* result = SmiTag(ChangeInt32ToInt64(value32)); |
| var_result.Bind(result); |
| Goto(&if_join); |
| } else { |
| Node* pair = Int32AddWithOverflow(value32, value32); |
| Node* overflow = Projection(1, pair); |
| Label if_overflow(this, Label::kDeferred), if_notoverflow(this); |
| |