blob: c9dc943fa603e98d1d504f2fe960edcd1da77280 [file] [log] [blame]
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
namespace v8 {
namespace internal {
using compiler::Node;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state) {
if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
HandleBreakOnNode();
}
}
void CodeStubAssembler::HandleBreakOnNode() {
// FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a
// string specifying the name of a stub and NODE is number specifying node id.
const char* name = state()->name();
size_t name_length = strlen(name);
if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) {
// Different name.
return;
}
size_t option_length = strlen(FLAG_csa_trap_on_node);
if (option_length < name_length + 2 ||
FLAG_csa_trap_on_node[name_length] != ',') {
// Option is too short.
return;
}
const char* start = &FLAG_csa_trap_on_node[name_length + 1];
char* end;
int node_id = static_cast<int>(strtol(start, &end, 10));
if (start == end) {
// Bad node id.
return;
}
BreakOnNode(node_id);
}
void CodeStubAssembler::Assert(const NodeGenerator& codition_body,
const char* message, const char* file,
int line) {
#if defined(DEBUG)
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
Comment("[ Assert: %s", message);
} else {
Comment("[ Assert");
}
Node* condition = codition_body();
DCHECK_NOT_NULL(condition);
Branch(condition, &ok, &not_ok);
Bind(&not_ok);
if (message != nullptr) {
char chars[1024];
Vector<char> buffer(chars);
if (file != nullptr) {
SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
} else {
SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
}
CallRuntime(
Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
}
DebugBreak();
Goto(&ok);
Bind(&ok);
Comment("] Assert");
#endif
}
Node* CodeStubAssembler::Select(Node* condition, const NodeGenerator& true_body,
const NodeGenerator& false_body,
MachineRepresentation rep) {
Variable value(this, rep);
Label vtrue(this), vfalse(this), end(this);
Branch(condition, &vtrue, &vfalse);
Bind(&vtrue);
{
value.Bind(true_body());
Goto(&end);
}
Bind(&vfalse);
{
value.Bind(false_body());
Goto(&end);
}
Bind(&end);
return value.value();
}
Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value,
Node* false_value,
MachineRepresentation rep) {
return Select(condition, [=] { return true_value; },
[=] { return false_value; }, rep);
}
Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value,
int false_value) {
return SelectConstant(condition, Int32Constant(true_value),
Int32Constant(false_value),
MachineRepresentation::kWord32);
}
Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value,
int false_value) {
return SelectConstant(condition, IntPtrConstant(true_value),
IntPtrConstant(false_value),
MachineType::PointerRepresentation());
}
Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) {
return SelectConstant(condition, TrueConstant(), FalseConstant(),
MachineRepresentation::kTagged);
}
Node* CodeStubAssembler::SelectTaggedConstant(Node* condition, Node* true_value,
Node* false_value) {
return SelectConstant(condition, true_value, false_value,
MachineRepresentation::kTagged);
}
Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
#define HEAP_CONSTANT_ACCESSOR(rootName, name) \
Node* CodeStubAssembler::name##Constant() { \
return LoadRoot(Heap::k##rootName##RootIndex); \
}
HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootName, name) \
Node* CodeStubAssembler::Is##name(Node* value) { \
return WordEqual(value, name##Constant()); \
}
HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
#undef HEAP_CONSTANT_TEST
Node* CodeStubAssembler::HashSeed() {
return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
}
Node* CodeStubAssembler::StaleRegisterConstant() {
return LoadRoot(Heap::kStaleRegisterRootIndex);
}
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(Smi::FromInt(value));
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return IntPtrConstant(value);
}
}
Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
int32_t left_constant;
bool is_left_constant = ToInt32Constant(left, left_constant);
int32_t right_constant;
bool is_right_constant = ToInt32Constant(right, right_constant);
if (is_left_constant) {
if (is_right_constant) {
return IntPtrConstant(left_constant + right_constant);
}
if (left_constant == 0) {
return right;
}
} else if (is_right_constant) {
if (right_constant == 0) {
return left;
}
}
return IntPtrAdd(left, right);
}
Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
int32_t left_constant;
bool is_left_constant = ToInt32Constant(left, left_constant);
int32_t right_constant;
bool is_right_constant = ToInt32Constant(right, right_constant);
if (is_left_constant) {
if (is_right_constant) {
return IntPtrConstant(left_constant - right_constant);
}
} else if (is_right_constant) {
if (right_constant == 0) {
return left;
}
}
return IntPtrSub(left, right);
}
Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
value = IntPtrSub(value, IntPtrConstant(1));
for (int i = 1; i <= 16; i *= 2) {
value = WordOr(value, WordShr(value, IntPtrConstant(i)));
}
return IntPtrAdd(value, IntPtrConstant(1));
}
Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
// value && !(value & (value - 1))
return WordEqual(
Select(
WordEqual(value, IntPtrConstant(0)),
[=] { return IntPtrConstant(1); },
[=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); },
MachineType::PointerRepresentation()),
IntPtrConstant(0));
}
Node* CodeStubAssembler::Float64Round(Node* x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
Variable var_x(this, MachineRepresentation::kFloat64);
Label return_x(this);
// Round up {x} towards Infinity.
var_x.Bind(Float64Ceil(x));
GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
&return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
Bind(&return_x);
return var_x.value();
}
Node* CodeStubAssembler::Float64Ceil(Node* x) {
if (IsFloat64RoundUpSupported()) {
return Float64RoundUp(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
Variable var_x(this, MachineRepresentation::kFloat64);
Label return_x(this), return_minus_x(this);
var_x.Bind(x);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
Bind(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_x);
}
Bind(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoUnless(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
Bind(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
Bind(&return_x);
return var_x.value();
}
Node* CodeStubAssembler::Float64Floor(Node* x) {
if (IsFloat64RoundDownSupported()) {
return Float64RoundDown(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
Variable var_x(this, MachineRepresentation::kFloat64);
Label return_x(this), return_minus_x(this);
var_x.Bind(x);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
Bind(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
}
Bind(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoUnless(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_minus_x);
}
Bind(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
Bind(&return_x);
return var_x.value();
}
Node* CodeStubAssembler::Float64RoundToEven(Node* x) {
if (IsFloat64RoundTiesEvenSupported()) {
return Float64RoundTiesEven(x);
}
// See ES#sec-touint8clamp for details.
Node* f = Float64Floor(x);
Node* f_and_half = Float64Add(f, Float64Constant(0.5));
Variable var_result(this, MachineRepresentation::kFloat64);
Label return_f(this), return_f_plus_one(this), done(this);
GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
GotoIf(Float64LessThan(x, f_and_half), &return_f);
{
Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
&return_f_plus_one);
}
Bind(&return_f);
var_result.Bind(f);
Goto(&done);
Bind(&return_f_plus_one);
var_result.Bind(Float64Add(f, Float64Constant(1.0)));
Goto(&done);
Bind(&done);
return var_result.value();
}
Node* CodeStubAssembler::Float64Trunc(Node* x) {
if (IsFloat64RoundTruncateSupported()) {
return Float64RoundTruncate(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
Variable var_x(this, MachineRepresentation::kFloat64);
Label return_x(this), return_minus_x(this);
var_x.Bind(x);
// Check if {x} is greater than 0.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
Bind(&if_xgreaterthanzero);
{
if (IsFloat64RoundDownSupported()) {
var_x.Bind(Float64RoundDown(x));
} else {
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
}
Goto(&return_x);
}
Bind(&if_xnotgreaterthanzero);
{
if (IsFloat64RoundUpSupported()) {
var_x.Bind(Float64RoundUp(x));
Goto(&return_x);
} else {
// Just return {x} unless its in the range ]-2^52,0[.
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoUnless(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
}
Bind(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
Bind(&return_x);
return var_x.value();
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
Node* CodeStubAssembler::SmiFromWord32(Node* value) {
value = ChangeInt32ToIntPtr(value);
return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
Node* CodeStubAssembler::SmiTag(Node* value) {
int32_t constant_value;
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(Smi::FromInt(constant_value));
}
return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
Node* CodeStubAssembler::SmiUntag(Node* value) {
return WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant());
}
Node* CodeStubAssembler::SmiToWord32(Node* value) {
Node* result = SmiUntag(value);
return TruncateWordToWord32(result);
}
Node* CodeStubAssembler::SmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(SmiToWord32(value));
}
Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
return SelectTaggedConstant(SmiLessThan(a, b), b, a);
}
Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
return SelectTaggedConstant(SmiLessThan(a, b), a, b);
}
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
Variable var_result(this, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
return_minuszero(this, Label::kDeferred),
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
a = SmiToWord32(a);
b = SmiToWord32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
// Check if {a} is non-negative.
Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative,
&if_aisnegative);
Bind(&if_aisnotnegative);
{
// Fast case, don't need to check any other edge cases.
Node* r = Int32Mod(a, b);
var_result.Bind(SmiFromWord32(r));
Goto(&return_result);
}
Bind(&if_aisnegative);
{
if (SmiValuesAre32Bits()) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
GotoUnless(Word32Equal(a, Int32Constant(kMinInt)), &join);
GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
Bind(&join);
}
// Perform the integer modulus operation.
Node* r = Int32Mod(a, b);
// Check if {r} is zero, and if so return -0, because we have to
// take the sign of the left hand side {a}, which is negative.
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
// architectures, so we cannot just say SmiFromWord32(r) here.
var_result.Bind(ChangeInt32ToTagged(r));
Goto(&return_result);
}
Bind(&return_minuszero);
var_result.Bind(MinusZeroConstant());
Goto(&return_result);
Bind(&return_nan);
var_result.Bind(NanConstant());
Goto(&return_result);
Bind(&return_result);
return var_result.value();
}
Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
Variable var_result(this, MachineRepresentation::kTagged);
Variable var_lhs_float64(this, MachineRepresentation::kFloat64),
var_rhs_float64(this, MachineRepresentation::kFloat64);
Label return_result(this, &var_result);
// Both {a} and {b} are Smis. Convert them to integers and multiply.
Node* lhs32 = SmiToWord32(a);
Node* rhs32 = SmiToWord32(b);
Node* pair = Int32MulWithOverflow(lhs32, rhs32);
Node* overflow = Projection(1, pair);
// Check if the multiplication overflowed.
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
Bind(&if_notoverflow);
{
// If the answer is zero, we may need to return -0.0, depending on the
// input.
Label answer_zero(this), answer_not_zero(this);
Node* answer = Projection(0, pair);
Node* zero = Int32Constant(0);
Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
Bind(&answer_not_zero);
{
var_result.Bind(ChangeInt32ToTagged(answer));
Goto(&return_result);
}
Bind(&answer_zero);
{
Node* or_result = Word32Or(lhs32, rhs32);
Label if_should_be_negative_zero(this), if_should_be_zero(this);
Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
&if_should_be_zero);
Bind(&if_should_be_negative_zero);
{
var_result.Bind(MinusZeroConstant());
Goto(&return_result);
}
Bind(&if_should_be_zero);
{
var_result.Bind(SmiConstant(0));
Goto(&return_result);
}
}
}
Bind(&if_overflow);
{
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&return_result);
}
Bind(&return_result);
return var_result.value();
}
Node* CodeStubAssembler::TruncateWordToWord32(Node* value) {
if (Is64()) {
return TruncateInt64ToInt32(value);
}
return value;
}
Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
Node* CodeStubAssembler::TaggedIsNotSmi(Node* a) {
return WordNotEqual(
WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
Node* CodeStubAssembler::TaggedIsPositiveSmi(Node* a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a),
IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
}
Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
return WordEqual(IntPtrConstant(0),
WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
}
void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Node* rhs, Node* rhs_map,
Label* if_equal,
Label* if_notequal) {
Label if_mapsame(this), if_mapnotsame(this);
Branch(WordEqual(lhs_map, rhs_map), &if_mapsame, &if_mapnotsame);
Bind(&if_mapsame);
{
// Both {lhs} and {rhs} are Simd128Values with the same map, need special
// handling for Float32x4 because of NaN comparisons.
Label if_float32x4(this), if_notfloat32x4(this);
Node* float32x4_map = HeapConstant(factory()->float32x4_map());
Branch(WordEqual(lhs_map, float32x4_map), &if_float32x4, &if_notfloat32x4);
Bind(&if_float32x4);
{
// Both {lhs} and {rhs} are Float32x4, compare the lanes individually
// using a floating point comparison.
for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
offset < Float32x4::kSize - kHeapObjectTag;
offset += sizeof(float)) {
// Load the floating point values for {lhs} and {rhs}.
Node* lhs_value =
Load(MachineType::Float32(), lhs, IntPtrConstant(offset));
Node* rhs_value =
Load(MachineType::Float32(), rhs, IntPtrConstant(offset));
// Perform a floating point comparison.
Label if_valueequal(this), if_valuenotequal(this);
Branch(Float32Equal(lhs_value, rhs_value), &if_valueequal,
&if_valuenotequal);
Bind(&if_valuenotequal);
Goto(if_notequal);
Bind(&if_valueequal);
}
// All 4 lanes match, {lhs} and {rhs} considered equal.
Goto(if_equal);
}
Bind(&if_notfloat32x4);
{
// For other Simd128Values we just perform a bitwise comparison.
for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
offset < Simd128Value::kSize - kHeapObjectTag;
offset += kPointerSize) {
// Load the word values for {lhs} and {rhs}.
Node* lhs_value =
Load(MachineType::Pointer(), lhs, IntPtrConstant(offset));
Node* rhs_value =
Load(MachineType::Pointer(), rhs, IntPtrConstant(offset));
// Perform a bitwise word-comparison.
Label if_valueequal(this), if_valuenotequal(this);
Branch(WordEqual(lhs_value, rhs_value), &if_valueequal,
&if_valuenotequal);
Bind(&if_valuenotequal);
Goto(if_notequal);
Bind(&if_valueequal);
}
// Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
Goto(if_equal);
}
}
Bind(&if_mapnotsame);
Goto(if_notequal);
}
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
Variable var_map(this, MachineRepresentation::kTagged);
var_map.Bind(receiver_map);
Label loop_body(this, &var_map);
Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
Goto(&loop_body);
Bind(&loop_body);
{
Node* map = var_map.value();
Node* prototype = LoadMapPrototype(map);
GotoIf(WordEqual(prototype, NullConstant()), definitely_no_elements);
Node* prototype_map = LoadMap(prototype);
// Pessimistically assume elements if a Proxy, Special API Object,
// or JSValue wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
GotoIf(Int32LessThanOrEqual(LoadMapInstanceType(prototype_map),
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
possibly_elements);
GotoIf(WordNotEqual(LoadElements(prototype), empty_elements),
possibly_elements);
var_map.Bind(prototype_map);
Goto(&loop_body);
}
}
void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
Int32Constant(FIRST_JS_RECEIVER_TYPE)),
if_true, if_false);
}
void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
Int32Constant(FIRST_JS_OBJECT_TYPE)),
if_true, if_false);
}
void CodeStubAssembler::BranchIfFastJSArray(
Node* object, Node* context, CodeStubAssembler::FastJSArrayAccessMode mode,
Label* if_true, Label* if_false) {
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
// Bailout if instance type is not JS_ARRAY_TYPE.
GotoIf(Word32NotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
Node* elements_kind = LoadMapElementsKind(map);
// Bailout if receiver has slow elements.
GotoUnless(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements.
if (mode == FastJSArrayAccessMode::INBOUNDS_READ) {
GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
}
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
Variable result(this, MachineRepresentation::kTagged);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
if (flags & kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
Node* const runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
result.Bind(runtime_result);
Goto(&merge_runtime);
Bind(&next);
}
Node* new_top = IntPtrAdd(top, size_in_bytes);
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
Bind(&runtime_call);
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
NoContextConstant(), SmiTag(size_in_bytes));
}
result.Bind(runtime_result);
Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
Bind(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
new_top);
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag)));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
Bind(&merge_runtime);
return result.value();
}
Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
Variable adjusted_size(this, MachineType::PointerRepresentation());
adjusted_size.Bind(size_in_bytes);
if (flags & kDoubleAlignment) {
// TODO(epertoso): Simd128 alignment.
Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&aligned);
Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
adjusted_size.Bind(not_aligned_size);
Goto(&merge);
Bind(&aligned);
Goto(&merge);
Bind(&merge);
}
Variable address(this, MachineRepresentation::kTagged);
address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
Label needs_filler(this), doesnt_need_filler(this),
merge_address(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &doesnt_need_filler,
&needs_filler);
Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
// TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
// it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kPointerSize))));
Goto(&merge_address);
Bind(&doesnt_need_filler);
Goto(&merge_address);
Bind(&merge_address);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size.value()));
return address.value();
}
Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(size_in_bytes, flags, top_address, limit_address);
}
#endif
return AllocateRawUnaligned(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
}
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
if_valueisheapnumber(this), if_valueisother(this);
// Fast check for Boolean {value}s (common case).
GotoIf(WordEqual(value, BooleanConstant(true)), if_true);
GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
// Check if {value} is a Smi or a HeapObject.
Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
// The {value} is a Smi, only need to check against zero.
BranchIfSmiEqual(value, SmiConstant(0), if_false, if_true);
}
Bind(&if_valueisnotsmi);
{
// The {value} is a HeapObject, load its map.
Node* value_map = LoadMap(value);
// Load the {value}s instance type.
Node* value_instance_type = LoadMapInstanceType(value_map);
// Dispatch based on the instance type; we distinguish all String instance
// types, the HeapNumber type and everything else.
GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
&if_valueisheapnumber);
Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
&if_valueisother);
Bind(&if_valueisstring);
{
// Load the string length field of the {value}.
Node* value_length = LoadObjectField(value, String::kLengthOffset);
// Check if the {value} is the empty string.
BranchIfSmiEqual(value_length, SmiConstant(0), if_false, if_true);
}
Bind(&if_valueisheapnumber);
{
// Load the floating point value of {value}.
Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
MachineType::Float64());
// Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
if_true, if_false);
}
Bind(&if_valueisother);
{
// Load the bit field from the {value}s map. The {value} is now either
// Null or Undefined, which have the undetectable bit set (so we always
// return false for those), or a Symbol or Simd128Value, whose maps never
// have the undetectable bit set (so we always return true for those), or
// a JSReceiver, which may or may not have the undetectable bit set.
Node* value_map_bitfield = LoadMapBitField(value_map);
Node* value_map_undetectable = Word32And(
value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
// Check if the {value} is undetectable.
Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
if_false);
}
}
}
Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadObjectField(Node* object, int offset,
MachineType rep) {
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(Node* object, Node* offset,
MachineType rep) {
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
Node* CodeStubAssembler::LoadAndUntagObjectField(Node* object, int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return ChangeInt32ToInt64(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return LoadObjectField(object, offset, MachineType::Int32());
} else {
return SmiToWord32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
Node* CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return ChangeInt32ToInt64(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
return SmiToWord(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32Root(
Heap::RootListIndex root_index) {
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
int index = root_index * kPointerSize;
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
return LoadObjectField(object, HeapNumber::kValueOffset,
MachineType::Float64());
}
Node* CodeStubAssembler::LoadMap(Node* object) {
return LoadObjectField(object, HeapObject::kMapOffset);
}
Node* CodeStubAssembler::LoadInstanceType(Node* object) {
return LoadMapInstanceType(LoadMap(object));
}
Node* CodeStubAssembler::HasInstanceType(Node* object,
InstanceType instance_type) {
return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
}
Node* CodeStubAssembler::LoadProperties(Node* object) {
return LoadObjectField(object, JSObject::kPropertiesOffset);
}
Node* CodeStubAssembler::LoadElements(Node* object) {
return LoadObjectField(object, JSObject::kElementsOffset);
}
Node* CodeStubAssembler::LoadJSArrayLength(Node* array) {
CSA_ASSERT(this, IsJSArray(array));
return LoadObjectField(array, JSArray::kLengthOffset);
}
Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
return LoadObjectField(array, FixedArrayBase::kLengthOffset);
}
Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
Node* CodeStubAssembler::LoadMapBitField(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
}
Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
return LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field2 = LoadMapBitField2(map);
return DecodeWord32<Map::ElementsKindBits>(bit_field2);
}
Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kDescriptorsOffset);
}
Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kPrototypeOffset);
}
Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map,
Label* if_no_proto_info) {
CSA_ASSERT(this, IsMap(map));
Node* prototype_info =
LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
GotoUnless(WordEqual(LoadMap(prototype_info),
LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
if_no_proto_info);
return prototype_info;
}
Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return ChangeUint32ToWord(
LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectProperties() for details.
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
CSA_ASSERT(this,
Int32GreaterThanOrEqual(LoadMapInstanceType(map),
Int32Constant(FIRST_JS_OBJECT_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map),
Int32Constant(LAST_PRIMITIVE_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Variable result(this, MachineRepresentation::kTagged);
result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
Label done(this), loop(this, &result);
Goto(&loop);
Bind(&loop);
{
GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
GotoUnless(is_map_type, &done);
result.Bind(
LoadObjectField(result.value(), Map::kConstructorOrBackPointerOffset));
Goto(&loop);
}
Bind(&done);
return result.value();
}
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
}
Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
Node* hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
GotoIf(Word32Equal(
Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
Int32Constant(0)),
if_hash_not_computed);
}
return Word32Shr(hash_field, Int32Constant(Name::kHashShift));
}
Node* CodeStubAssembler::LoadStringLength(Node* object) {
CSA_ASSERT(this, IsString(object));
return LoadObjectField(object, String::kLengthOffset);
}
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
}
Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
// TODO(ishell): fix callers.
return LoadObjectField(weak_cell, WeakCell::kValueOffset);
}
Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
CSA_ASSERT(this, IsWeakCell(weak_cell));
Node* value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
return value;
}
Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
parameter_mode, header_size);
return Load(MachineType::AnyTagged(), object, offset);
}
Node* CodeStubAssembler::LoadFixedTypedArrayElement(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
MachineType type;
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
case UINT8_CLAMPED_ELEMENTS:
type = MachineType::Uint8();
break;
case INT8_ELEMENTS:
type = MachineType::Int8();
break;
case UINT16_ELEMENTS:
type = MachineType::Uint16();
break;
case INT16_ELEMENTS:
type = MachineType::Int16();
break;
case UINT32_ELEMENTS:
type = MachineType::Uint32();
break;
case INT32_ELEMENTS:
type = MachineType::Int32();
break;
case FLOAT32_ELEMENTS:
type = MachineType::Float32();
break;
case FLOAT64_ELEMENTS:
type = MachineType::Float64();
break;
default:
UNREACHABLE();
}
return Load(type, data_pointer, offset);
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
#if V8_TARGET_LITTLE_ENDIAN
if (Is64()) {
header_size += kPointerSize / 2;
}
#endif
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
parameter_mode, header_size);
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
}
}
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
parameter_mode, header_size);
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
Label* if_hole,
MachineType machine_type) {
if (if_hole) {
// TODO(ishell): Compare only the upper part for the hole once the
// compiler is able to fold addition of already complex |offset| with
// |kIeeeDoubleExponentWordOffset| into one addressing mode.
if (Is64()) {
Node* element = Load(MachineType::Uint64(), base, offset);
GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
} else {
Node* element_upper = Load(
MachineType::Uint32(), base,
IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
if_hole);
}
}
if (machine_type.IsNone()) {
// This means the actual value is not needed.
return nullptr;
}
return Load(machine_type, base, offset);
}
Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset);
}
Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
Node* value) {
int offset = Context::SlotOffset(slot_index);
return Store(context, IntPtrConstant(offset), value);
}
Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
Node* value) {
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Store(context, offset, value);
}
Node* CodeStubAssembler::StoreContextElementNoWriteBarrier(Node* context,
int slot_index,
Node* value) {
int offset = Context::SlotOffset(slot_index);
return StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
}
Node* CodeStubAssembler::LoadNativeContext(Node* context) {
return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
}
Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
Node* native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
return LoadContextElement(native_context, Context::ArrayMapIndex(kind));
}
Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
MachineRepresentation::kFloat64);
}
Node* CodeStubAssembler::StoreObjectField(
Node* object, int offset, Node* value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
return Store(object, IntPtrConstant(offset - kHeapObjectTag), value);
}
Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
Node* value) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectField(object, const_offset, value);
}
return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
value);
}
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
return StoreNoWriteBarrier(rep, object,
IntPtrConstant(offset - kHeapObjectTag), value);
}
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value, MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
}
return StoreNoWriteBarrier(
rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return StoreWithMapWriteBarrier(
object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
Node* CodeStubAssembler::StoreMapNoWriteBarrier(
Node* object, Heap::RootListIndex map_root_index) {
return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
}
Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, object,
IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
Heap::RootListIndex root_index) {
if (Heap::RootIsImmortalImmovable(root_index)) {
return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
} else {
return StoreObjectField(object, offset, LoadRoot(root_index));
}
}
Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* value,
WriteBarrierMode barrier_mode,
int additional_offset,
ParameterMode parameter_mode) {
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
parameter_mode, header_size);
if (barrier_mode == SKIP_WRITE_BARRIER) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
} else {
return Store(object, offset, value);
}
}
Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
Node* offset =
ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
MachineRepresentation rep = MachineRepresentation::kFloat64;
return StoreNoWriteBarrier(rep, object, offset, value);
}
Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
Node* array,
CodeStubArguments& args,
Variable& arg_index,
Label* bailout) {
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
Label pre_bailout(this);
Label success(this);
Variable var_elements(this, MachineRepresentation::kTagged);
Variable var_tagged_length(this, MachineRepresentation::kTagged);
ParameterMode mode = OptimalParameterMode();
Variable var_length(this, OptimalParameterRepresentation());
var_length.Bind(TaggedToParameter(LoadJSArrayLength(array), mode));
var_elements.Bind(LoadElements(array));
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(var_elements.value()), mode);
// Resize the capacity of the fixed array if it doesn't fit.
Label fits(this, &var_elements);
Node* first = arg_index.value();
Node* growth = IntPtrSubFoldConstants(args.GetLength(), first);
Node* new_length =
IntPtrOrSmiAdd(WordToParameter(growth, mode), var_length.value(), mode);
GotoUnless(IntPtrOrSmiGreaterThanOrEqual(new_length, capacity, mode), &fits);
Node* new_capacity = CalculateNewElementsCapacity(
IntPtrOrSmiAdd(new_length, IntPtrOrSmiConstant(1, mode), mode), mode);
var_elements.Bind(GrowElementsCapacity(array, var_elements.value(), kind,
kind, capacity, new_capacity, mode,
&pre_bailout));
Goto(&fits);
Bind(&fits);
Node* elements = var_elements.value();
// Push each argument onto the end of the array now that there is enough
// capacity.
CodeStubAssembler::VariableList push_vars({&var_length}, zone());
args.ForEach(
push_vars,
[this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
if (IsFastSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(arg), &pre_bailout);
} else if (IsFastDoubleElementsKind(kind)) {
GotoIfNotNumber(arg, &pre_bailout);
}
if (IsFastDoubleElementsKind(kind)) {
Node* double_value = ChangeNumberToFloat64(arg);
StoreFixedDoubleArrayElement(elements, var_length.value(),
Float64SilenceNaN(double_value), mode);
} else {
WriteBarrierMode barrier_mode = IsFastSmiElementsKind(kind)
? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
StoreFixedArrayElement(elements, var_length.value(), arg,
barrier_mode, 0, mode);
}
Increment(var_length, 1, mode);
},
first, nullptr);
{
Node* length = ParameterToTagged(var_length.value(), mode);
var_tagged_length.Bind(length);
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Goto(&success);
}
Bind(&pre_bailout);
{
Node* length = ParameterToTagged(var_length.value(), mode);
var_tagged_length.Bind(length);
Node* diff = SmiSub(length, LoadJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
arg_index.Bind(IntPtrAdd(arg_index.value(), SmiUntag(diff)));
Goto(bailout);
}
Bind(&success);
return var_tagged_length.value();
}
Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
: Heap::kMutableHeapNumberMapRootIndex;
StoreMapNoWriteBarrier(result, heap_map_index);
return result;
}
Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
MutableMode mode) {
Node* result = AllocateHeapNumber(mode);
StoreHeapNumberValue(result, value);
return result;
}
Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
return result;
}
Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
ParameterMode mode,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
Node* raw_size = GetArrayAllocationSize(
length, UINT8_ELEMENTS, mode,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
Node* result = Allocate(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
ParameterToTagged(length, mode));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
ParameterToTagged(length, mode));
var_result.Bind(result);
Goto(&if_join);
}
Bind(&if_join);
return var_result.value();
}
Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
return result;
}
Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
ParameterMode mode,
AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
Node* raw_size = GetArrayAllocationSize(
length, UINT16_ELEMENTS, mode,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
Node* result = Allocate(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(
result, SeqTwoByteString::kLengthOffset,
mode == SMI_PARAMETERS ? length : SmiFromWord(length));
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
Node* result =
CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
mode == SMI_PARAMETERS ? length : SmiFromWord(length));
var_result.Bind(result);
Goto(&if_join);
}
Bind(&if_join);
return var_result.value();
}
Node* CodeStubAssembler::AllocateSlicedString(
Heap::RootListIndex map_root_index, Node* length, Node* parent,
Node* offset) {
CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
MachineRepresentation::kTagged);
return result;
}
Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
parent, offset);
}
Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
offset);
}
Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
Node* length, Node* first,
Node* second,
AllocationFlags flags) {
CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
// Initialize both used and unused parts of hash field slot at once.
StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
bool const new_space = !(flags & kPretenured);
if (new_space) {
StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
MachineRepresentation::kTagged);
} else {
StoreObjectField(result, ConsString::kFirstOffset, first);
StoreObjectField(result, ConsString::kSecondOffset, second);
}
return result;
}
Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first,
Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
second, flags);
}
Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
second, flags);
}
Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Node* right, AllocationFlags flags) {
CSA_ASSERT(this, TaggedIsSmi(length));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
Node* right_instance_type = LoadInstanceType(right);
// Compute intersection and difference of instance types.
Node* anded_instance_types =
Word32And(left_instance_type, right_instance_type);
Node* xored_instance_types =
Word32Xor(left_instance_type, right_instance_type);
// We create a one-byte cons string if
// 1. both strings are one-byte, or
// 2. at least one of the strings is two-byte, but happens to contain only
// one-byte characters.
// To do this, we check
// 1. if both strings are one-byte, or if the one-byte data hint is set in
// both strings, or
// 2. if one of the strings has the one-byte data hint set and the other
// string is one-byte.
STATIC_ASSERT(kOneByteStringTag != 0);
STATIC_ASSERT(kOneByteDataHintTag != 0);
Label one_byte_map(this);
Label two_byte_map(this);
Variable result(this, MachineRepresentation::kTagged);
Label done(this, &result);
GotoIf(Word32NotEqual(Word32And(anded_instance_types,
Int32Constant(kStringEncodingMask |
kOneByteDataHintTag)),
Int32Constant(0)),
&one_byte_map);
Branch(Word32NotEqual(Word32And(xored_instance_types,
Int32Constant(kStringEncodingMask |
kOneByteDataHintMask)),
Int32Constant(kOneByteStringTag | kOneByteDataHintTag)),
&two_byte_map, &one_byte_map);
Bind(&one_byte_map);
Comment("One-byte ConsString");
result.Bind(AllocateOneByteConsString(length, left, right, flags));
Goto(&done);
Bind(&two_byte_map);
Comment("Two-byte ConsString");
result.Bind(AllocateTwoByteConsString(length, left, right, flags));
Goto(&done);
Bind(&done);
return result.value();
}
Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
Node* const max_length =
SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
USE(max_length);
// Allocate the JSRegExpResult.
// TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
// unneeded store of elements.
Node* const result = Allocate(JSRegExpResult::kSize);
// TODO(jgruber): Store map as Heap constant?
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
StoreMapNoWriteBarrier(result, map);
// Initialize the header before allocating the elements.
Node* const empty_array = EmptyFixedArrayConstant();
DCHECK(Heap::RootIsImmortalImmovable(Heap::kEmptyFixedArrayRootIndex));
StoreObjectFieldNoWriteBarrier(result, JSArray::kPropertiesOffset,
empty_array);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset, empty_array);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(result, JSRegExpResult::kIndexOffset, index);
StoreObjectField(result, JSRegExpResult::kInputOffset, input);
Node* const zero = IntPtrConstant(0);
Node* const length_intptr = SmiUntag(length);
const ElementsKind elements_kind = FAST_ELEMENTS;
Node* const elements = AllocateFixedArray(elements_kind, length_intptr);
StoreObjectField(result, JSArray::kElementsOffset, elements);
// Fill in the elements with undefined.
FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
Heap::kUndefinedValueRootIndex);
return result;
}
Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) {
return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
}
Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
CSA_ASSERT(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
Node* capacity = HashTableComputeCapacity(at_least_space_for);
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
Node* length = EntryToIndex<NameDictionary>(capacity);
Node* store_size =
IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
IntPtrConstant(NameDictionary::kHeaderSize));
Node* result = Allocate(store_size);
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kHashTableMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kHashTableMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromWord(length));
// Initialized HashTable fields.
Node* zero = SmiConstant(0);
StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
zero, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
SmiTag(capacity), SKIP_WRITE_BARRIER);
// Initialize Dictionary fields.
Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler,
SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
SmiConstant(PropertyDetails::kInitialIndex),
SKIP_WRITE_BARRIER);
// Initialize NameDictionary elements.
Node* result_word = BitcastTaggedToWord(result);
Node* start_address = IntPtrAdd(
result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
NameDictionary::kElementsStartIndex) -
kHeapObjectTag));
Node* end_address = IntPtrAdd(
result_word,
IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
return result;
}
Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
Node* elements,
AllocationFlags flags) {
CSA_ASSERT(this, IsMap(map));
Node* size =
IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
CSA_ASSERT(this, IsRegularHeapObjectSize(size));
Node* object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, size, properties, elements);
return object;
}
void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
Node* size, Node* properties,
Node* elements) {
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (properties == nullptr) {
CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
StoreObjectFieldRoot(object, JSObject::kPropertiesOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
properties);
}
if (elements == nullptr) {
StoreObjectFieldRoot(object, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
}
InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
}
void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
Node* size, int start_offset) {
// TODO(cbruni): activate in-object slack tracking machinery.
Comment("InitializeJSObjectBody");
Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
// Calculate the untagged field addresses.
object = BitcastTaggedToWord(object);
Node* start_address =
IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
Node* end_address =
IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Node* end_address,
Node* value) {
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsWordAligned(start_address));
CSA_ASSERT(this, WordIsWordAligned(end_address));
BuildFastLoop(
MachineType::PointerRepresentation(), start_address, end_address,
[this, value](Node* current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
},
kPointerSize, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
Node* size = IntPtrConstant(base_size);
Node* array = AllocateUninitializedJSArray(kind, array_map, length,
allocation_site, size);
return array;
}
std::pair<Node*, Node*>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
Node* capacity, ParameterMode capacity_mode) {
Comment("begin allocation of JSArray with elements");
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
int elements_offset = base_size;
// Compute space for elements
base_size += FixedArray::kHeaderSize;
Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
Node* array = AllocateUninitializedJSArray(kind, array_map, length,
allocation_site, size);
Node* elements = InnerAllocate(array, elements_offset);
StoreObjectField(array, JSObject::kElementsOffset, elements);
return {array, elements};
}
Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
Node* array_map,
Node* length,
Node* allocation_site,
Node* size_in_bytes) {
Node* array = Allocate(size_in_bytes);
Comment("write JSArray headers");
StoreMapNoWriteBarrier(array, array_map);
CSA_ASSERT(this, TaggedIsSmi(length));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
Heap::kEmptyFixedArrayRootIndex);
if (allocation_site != nullptr) {
InitializeAllocationMemento(array, JSArray::kSize, allocation_site);
}
return array;
}
Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
Node* capacity, Node* length,
Node* allocation_site,
ParameterMode capacity_mode) {
// Allocate both array and elements object, and initialize the JSArray.
Node *array, *elements;
std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
kind, array_map, length, allocation_site, capacity, capacity_mode);
// Setup elements object.
Heap::RootListIndex elements_map_index =
IsFastDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
: Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements, elements_map_index);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
ParameterToTagged(capacity, capacity_mode));
// Fill in the elements with holes.
FillFixedArrayWithValue(kind, elements, IntPtrOrSmiConstant(0, capacity_mode),
capacity, Heap::kTheHoleValueRootIndex,
capacity_mode);
return array;
}
Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
IntPtrOrSmiConstant(0, mode), mode));
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
// Allocate both array and elements object, and initialize the JSArray.
Node* array = Allocate(total_size, flags);
Heap::RootListIndex map_index = IsFastDoubleElementsKind(kind)
? Heap::kFixedDoubleArrayMapRootIndex
: Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(map_index));
StoreMapNoWriteBarrier(array, map_index);
StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
ParameterToTagged(capacity_node, mode));
return array;
}
void CodeStubAssembler::FillFixedArrayWithValue(
ElementsKind kind, Node* array, Node* from_node, Node* to_node,
Heap::RootListIndex value_root_index, ParameterMode mode) {
bool is_double = IsFastDoubleElementsKind(kind);
DCHECK(value_root_index == Heap::kTheHoleValueRootIndex ||
value_root_index == Heap::kUndefinedValueRootIndex);
DCHECK_IMPLIES(is_double, value_root_index == Heap::kTheHoleValueRootIndex);
STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
Node* double_hole =
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
Node* value = LoadRoot(value_root_index);
BuildFastFixedArrayForEach(
array, kind, from_node, to_node,
[this, value, is_double, double_hole](Node* array, Node* offset) {
if (is_double) {
// Don't use doubles to store the hole double, since manipulating the
// signaling NaN used for the hole in C++, e.g. with bit_cast, will
// change its value on ia32 (the x87 stack is used to return values
// and stores to the stack silently clear the signalling bit).
//
// TODO(danno): When we have a Float32/Float64 wrapper class that
// preserves double bits during manipulation, remove this code/change
// this to an indexed Float64 store.
if (Is64()) {
StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
double_hole);
} else {
StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
double_hole);
StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
double_hole);
}
} else {
StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
value);
}
},
mode);
}
void CodeStubAssembler::CopyFixedArrayElements(
ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
Node* to_array, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode, ParameterMode mode) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
Comment("[ CopyFixedArrayElements");
// Typed array elements are not supported.
DCHECK(!IsFixedTypedArrayElementsKind(from_kind));
DCHECK(!IsFixedTypedArrayElementsKind(to_kind));
Label done(this);
bool from_double_elements = IsFastDoubleElementsKind(from_kind);
bool to_double_elements = IsFastDoubleElementsKind(to_kind);
bool element_size_matches =
Is64() ||
IsFastDoubleElementsKind(from_kind) == IsFastDoubleElementsKind(to_kind);
bool doubles_to_objects_conversion =
IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind);
bool needs_write_barrier =
doubles_to_objects_conversion || (barrier_mode == UPDATE_WRITE_BARRIER &&
IsFastObjectElementsKind(to_kind));
Node* double_hole =
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
if (doubles_to_objects_conversion) {
// If the copy might trigger a GC, make sure that the FixedArray is
// pre-initialized with holes to make sure that it's always in a
// consistent state.
FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode),
capacity, Heap::kTheHoleValueRootIndex, mode);
} else if (element_count != capacity) {
FillFixedArrayWithValue(to_kind, to_array, element_count, capacity,
Heap::kTheHoleValueRootIndex, mode);
}
Node* limit_offset = ElementOffsetFromIndex(
IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
Variable var_from_offset(this, MachineType::PointerRepresentation());
var_from_offset.Bind(ElementOffsetFromIndex(element_count, from_kind, mode,
first_element_offset));
// This second variable is used only when the element sizes of source and
// destination arrays do not match.
Variable var_to_offset(this, MachineType::PointerRepresentation());
if (element_size_matches) {
var_to_offset.Bind(var_from_offset.value());
} else {
var_to_offset.Bind(ElementOffsetFromIndex(element_count, to_kind, mode,
first_element_offset));
}
Variable* vars[] = {&var_from_offset, &var_to_offset};
Label decrement(this, 2, vars);
Branch(WordEqual(var_from_offset.value(), limit_offset), &done, &decrement);
Bind(&decrement);
{
Node* from_offset = IntPtrSub(
var_from_offset.value(),
IntPtrConstant(from_double_elements ? kDoubleSize : kPointerSize));
var_from_offset.Bind(from_offset);
Node* to_offset;
if (element_size_matches) {
to_offset = from_offset;
} else {
to_offset = IntPtrSub(
var_to_offset.value(),
IntPtrConstant(to_double_elements ? kDoubleSize : kPointerSize));
var_to_offset.Bind(to_offset);
}
Label next_iter(this), store_double_hole(this);
Label* if_hole;
if (doubles_to_objects_conversion) {
// The target elements array is already preinitialized with holes, so we
// can just proceed with the next iteration.
if_hole = &next_iter;
} else if (IsFastDoubleElementsKind(to_kind)) {
if_hole = &store_double_hole;
} else {
// In all the other cases don't check for holes and copy the data as is.
if_hole = nullptr;
}
Node* value = LoadElementAndPrepareForStore(
from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
if (needs_write_barrier) {
Store(to_array, to_offset, value);
} else if (to_double_elements) {
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
value);
} else {
StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset,
value);
}
Goto(&next_iter);
if (if_hole == &store_double_hole) {
Bind(&store_double_hole);
// Don't use doubles to store the hole double, since manipulating the
// signaling NaN used for the hole in C++, e.g. with bit_cast, will
// change its value on ia32 (the x87 stack is used to return values
// and stores to the stack silently clear the signalling bit).
//
// TODO(danno): When we have a Float32/Float64 wrapper class that
// preserves double bits during manipulation, remove this code/change
// this to an indexed Float64 store.
if (Is64()) {
StoreNoWriteBarrier(MachineRepresentation::kWord64, to_array, to_offset,
double_hole);
} else {
StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array, to_offset,
double_hole);
StoreNoWriteBarrier(MachineRepresentation::kWord32, to_array,
IntPtrAdd(to_offset, IntPtrConstant(kPointerSize)),
double_hole);
}
Goto(&next_iter);
}
Bind(&next_iter);
Node* compare = WordNotEqual(from_offset, limit_offset);
Branch(compare, &decrement, &done);
}
Bind(&done);
IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
Comment("] CopyFixedArrayElements");
}
void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
Node* from_index, Node* to_index,
Node* character_count,
String::Encoding from_encoding,
String::Encoding to_encoding,
ParameterMode mode) {
bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
DCHECK_IMPLIES(to_one_byte, from_one_byte);
Comment("CopyStringCharacters %s -> %s",
from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING",
to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
Node* from_offset =
ElementOffsetFromIndex(from_index, from_kind, mode, header_size);
Node* to_offset =
ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
// Prepare the fast loop
MachineType type =
from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
: MachineRepresentation::kWord16;
int from_increment = 1 << ElementsKindToShiftSize(from_kind);
int to_increment = 1 << ElementsKindToShiftSize(to_kind);
Variable current_to_offset(this, MachineType::PointerRepresentation());
VariableList vars({&current_to_offset}, zone());
current_to_offset.Bind(to_offset);
int to_index_constant = 0, from_index_constant = 0;
Smi* to_index_smi = nullptr;
Smi* from_index_smi = nullptr;
bool index_same = (from_encoding == to_encoding) &&
(from_index == to_index ||
(ToInt32Constant(from_index, from_index_constant) &&
ToInt32Constant(to_index, to_index_constant) &&
from_index_constant == to_index_constant) ||
(ToSmiConstant(from_index, from_index_smi) &&
ToSmiConstant(to_index, to_index_smi) &&
to_index_smi == from_index_smi));
BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
limit_offset,
[this, from_string, to_string, &current_to_offset, to_increment,
type, rep, index_same](Node* offset) {
Node* value = Load(type, from_string, offset);
StoreNoWriteBarrier(
rep, to_string,
index_same ? offset : current_to_offset.value(), value);
if (!index_same) {
Increment(current_to_offset, to_increment);
}
},
from_increment, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
Node* offset,
ElementsKind from_kind,
ElementsKind to_kind,
Label* if_hole) {
if (IsFastDoubleElementsKind(from_kind)) {
Node* value =
LoadDoubleWithHoleCheck(array, offset, if_hole, MachineType::Float64());
if (!IsFastDoubleElementsKind(to_kind)) {
value = AllocateHeapNumberWithValue(value);
}
return value;
} else {
Node* value = Load(MachineType::AnyTagged(), array, offset);
if (if_hole) {
GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
}
if (IsFastDoubleElementsKind(to_kind)) {
if (IsFastSmiElementsKind(from_kind)) {
value = SmiToFloat64(value);
} else {
value = LoadHeapNumberValue(value);
}
}
return value;
}
}
Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
old_capacity = BitcastTaggedToWord(old_capacity);
}
Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
Node* unconditioned_result = IntPtrAdd(new_capacity, IntPtrConstant(16));
if (mode == SMI_PARAMETERS) {
return SmiAnd(BitcastWordToTaggedSigned(unconditioned_result),
SmiConstant(-1));
} else {
return unconditioned_result;
}
}
Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
ElementsKind kind, Node* key,
Label* bailout) {
Node* capacity = LoadFixedArrayBaseLength(elements);
ParameterMode mode = OptimalParameterMode();
capacity = TaggedToParameter(capacity, mode);
key = TaggedToParameter(key, mode);
return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
bailout);
}
Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
ElementsKind kind, Node* key,
Node* capacity,
ParameterMode mode,
Label* bailout) {
Comment("TryGrowElementsCapacity");
// If the gap growth is too big, fall back to the runtime.
Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
// Calculate the capacity of the new backing store.
Node* new_capacity = CalculateNewElementsCapacity(
IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
return GrowElementsCapacity(object, elements, kind, kind, capacity,
new_capacity, mode, bailout);
}
Node* CodeStubAssembler::GrowElementsCapacity(
Node* object, Node* elements, ElementsKind from_kind, ElementsKind to_kind,
Node* capacity, Node* new_capacity, ParameterMode mode, Label* bailout) {
Comment("[ GrowElementsCapacity");
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
GotoIf(UintPtrOrSmiGreaterThanOrEqual(
new_capacity, IntPtrOrSmiConstant(max_size, mode), mode),
bailout);
// Allocate the new backing store.
Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
// Copy the elements from the old elements store to the new.
// The size-check above guarantees that the |new_elements| is allocated
// in new space so we can skip the write barrier.
CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity,
new_capacity, SKIP_WRITE_BARRIER, mode);
StoreObjectField(object, JSObject::kElementsOffset, new_elements);
Comment("] GrowElementsCapacity");
return new_elements;
}
void CodeStubAssembler::InitializeAllocationMemento(Node* base_allocation,
int base_allocation_size,
Node* allocation_site) {
StoreObjectFieldNoWriteBarrier(
base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
StoreObjectFieldNoWriteBarrier(
base_allocation,
AllocationMemento::kAllocationSiteOffset + base_allocation_size,
allocation_site);
if (FLAG_allocation_site_pretenuring) {
Node* count = LoadObjectField(allocation_site,
AllocationSite::kPretenureCreateCountOffset);
Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1)));
StoreObjectFieldNoWriteBarrier(allocation_site,
AllocationSite::kPretenureCreateCountOffset,
incremented_count);
}
}
Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
Label* if_valueisnotnumber) {