blob: c1f02885575556315db770d88ada9367429d1207 [file] [log] [blame]
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
template <class T>
using SloppyTNode = compiler::SloppyTNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state) {
if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
HandleBreakOnNode();
}
}
void CodeStubAssembler::HandleBreakOnNode() {
// FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a
// string specifying the name of a stub and NODE is number specifying node id.
const char* name = state()->name();
size_t name_length = strlen(name);
if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) {
// Different name.
return;
}
size_t option_length = strlen(FLAG_csa_trap_on_node);
if (option_length < name_length + 2 ||
FLAG_csa_trap_on_node[name_length] != ',') {
// Option is too short.
return;
}
const char* start = &FLAG_csa_trap_on_node[name_length + 1];
char* end;
int node_id = static_cast<int>(strtol(start, &end, 10));
if (start == end) {
// Bad node id.
return;
}
BreakOnNode(node_id);
}
void CodeStubAssembler::Assert(const BranchGenerator& branch,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5,
const char* extra_node5_name) {
#if defined(DEBUG)
if (FLAG_debug_code) {
Check(branch, message, file, line, extra_node1, extra_node1_name,
extra_node2, extra_node2_name, extra_node3, extra_node3_name,
extra_node4, extra_node4_name, extra_node5, extra_node5_name);
}
#endif
}
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5,
const char* extra_node5_name) {
#if defined(DEBUG)
if (FLAG_debug_code) {
Check(condition_body, message, file, line, extra_node1, extra_node1_name,
extra_node2, extra_node2_name, extra_node3, extra_node3_name,
extra_node4, extra_node4_name, extra_node5, extra_node5_name);
}
#endif
}
#ifdef DEBUG
namespace {
void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
const char* node_name) {
if (node != nullptr) {
csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0),
csa->StringConstant(node_name), node);
}
}
} // namespace
#endif
void CodeStubAssembler::Check(const BranchGenerator& branch,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5, const char* extra_node5_name) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
Comment("[ Assert: %s", message);
} else {
Comment("[ Assert");
}
branch(&ok, &not_ok);
BIND(&not_ok);
DCHECK_NOT_NULL(message);
char chars[1024];
Vector<char> buffer(chars);
if (file != nullptr) {
SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
} else {
SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
}
Node* message_node = StringConstant(&(buffer[0]));
#ifdef DEBUG
// Only print the extra nodes in debug builds.
MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
#endif
DebugAbort(message_node);
Unreachable();
BIND(&ok);
Comment("] Assert");
}
void CodeStubAssembler::Check(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5, const char* extra_node5_name) {
BranchGenerator branch = [=](Label* ok, Label* not_ok) {
Node* condition = condition_body();
DCHECK_NOT_NULL(condition);
Branch(condition, ok, not_ok);
};
Check(branch, message, file, line, extra_node1, extra_node1_name, extra_node2,
extra_node2_name, extra_node3, extra_node3_name, extra_node4,
extra_node4_name, extra_node5, extra_node5_name);
}
Node* CodeStubAssembler::SelectImpl(TNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
MachineRepresentation rep) {
VARIABLE(value, rep);
Label vtrue(this), vfalse(this), end(this);
Branch(condition, &vtrue, &vfalse);
BIND(&vtrue);
{
value.Bind(true_body());
Goto(&end);
}
BIND(&vfalse);
{
value.Bind(false_body());
Goto(&end);
}
BIND(&end);
return value.value();
}
TNode<Int32T> CodeStubAssembler::SelectInt32Constant(
SloppyTNode<BoolT> condition, int true_value, int false_value) {
return SelectConstant<Int32T>(condition, Int32Constant(true_value),
Int32Constant(false_value));
}
TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(
SloppyTNode<BoolT> condition, int true_value, int false_value) {
return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value),
IntPtrConstant(false_value));
}
TNode<Oddball> CodeStubAssembler::SelectBooleanConstant(
SloppyTNode<BoolT> condition) {
return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant());
}
TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition,
Smi* true_value,
Smi* false_value) {
return SelectConstant<Smi>(condition, SmiConstant(true_value),
SmiConstant(false_value));
}
TNode<Object> CodeStubAssembler::NoContextConstant() {
return SmiConstant(Context::kNoContext);
}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type>( \
LoadRoot(Heap::k##rootIndexName##RootIndex)); \
}
HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
*std::declval<ReadOnlyRoots>().rootAccessorName())>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_reference<decltype( \
*std::declval<ReadOnlyRoots>().rootAccessorName())>::type>( \
LoadRoot(Heap::k##rootIndexName##RootIndex)); \
}
HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
compiler::TNode<BoolT> CodeStubAssembler::Is##name( \
SloppyTNode<Object> value) { \
return WordEqual(value, name##Constant()); \
} \
compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \
SloppyTNode<Object> value) { \
return WordNotEqual(value, name##Constant()); \
}
HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST);
#undef HEAP_CONSTANT_TEST
TNode<Int64T> CodeStubAssembler::HashSeed() {
DCHECK(Is64());
TNode<HeapObject> hash_seed_root =
TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
return TNode<Int64T>::UncheckedCast(LoadObjectField(
hash_seed_root, ByteArray::kHeaderSize, MachineType::Int64()));
}
TNode<Int32T> CodeStubAssembler::HashSeedHigh() {
DCHECK(!Is64());
#ifdef V8_TARGET_BIG_ENDIAN
static int kOffset = 0;
#else
static int kOffset = kInt32Size;
#endif
TNode<HeapObject> hash_seed_root =
TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
return TNode<Int32T>::UncheckedCast(LoadObjectField(
hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32()));
}
TNode<Int32T> CodeStubAssembler::HashSeedLow() {
DCHECK(!Is64());
#ifdef V8_TARGET_BIG_ENDIAN
static int kOffset = kInt32Size;
#else
static int kOffset = 0;
#endif
TNode<HeapObject> hash_seed_root =
TNode<HeapObject>::UncheckedCast(LoadRoot(Heap::kHashSeedRootIndex));
return TNode<Int32T>::UncheckedCast(LoadObjectField(
hash_seed_root, ByteArray::kHeaderSize + kOffset, MachineType::Int32()));
}
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(value);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return IntPtrConstant(value);
}
}
bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
ParameterMode mode) {
int32_t constant_test;
Smi* smi_test;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(test, constant_test) && constant_test == 0) {
return true;
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
if (ToSmiConstant(test, smi_test) && smi_test->value() == 0) {
return true;
}
}
return false;
}
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
int* value,
ParameterMode mode) {
int32_t int32_constant;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(maybe_constant, int32_constant)) {
*value = int32_constant;
return true;
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
Smi* smi_constant;
if (ToSmiConstant(maybe_constant, smi_constant)) {
*value = Smi::ToInt(smi_constant);
return true;
}
}
return false;
}
TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(
TNode<IntPtrT> value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
value = Signed(IntPtrSub(value, IntPtrConstant(1)));
for (int i = 1; i <= 16; i *= 2) {
value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i))));
}
return Signed(IntPtrAdd(value, IntPtrConstant(1)));
}
Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return TaggedIsSmi(value);
} else {
return Int32Constant(1);
}
}
TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
// value && !(value & (value - 1))
return WordEqual(
Select<IntPtrT>(
WordEqual(value, IntPtrConstant(0)),
[=] { return IntPtrConstant(1); },
[=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }),
IntPtrConstant(0));
}
TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
Label return_x(this);
// Round up {x} towards Infinity.
VARIABLE(var_x, MachineRepresentation::kFloat64, Float64Ceil(x));
GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
&return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
if (IsFloat64RoundUpSupported()) {
return Float64RoundUp(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
if (IsFloat64RoundDownSupported()) {
return Float64RoundDown(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_minus_x);
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
if (IsFloat64RoundTiesEvenSupported()) {
return Float64RoundTiesEven(x);
}
// See ES#sec-touint8clamp for details.
Node* f = Float64Floor(x);
Node* f_and_half = Float64Add(f, Float64Constant(0.5));
VARIABLE(var_result, MachineRepresentation::kFloat64);
Label return_f(this), return_f_plus_one(this), done(this);
GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
GotoIf(Float64LessThan(x, f_and_half), &return_f);
{
Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
&return_f_plus_one);
}
BIND(&return_f);
var_result.Bind(f);
Goto(&done);
BIND(&return_f_plus_one);
var_result.Bind(Float64Add(f, Float64Constant(1.0)));
Goto(&done);
BIND(&done);
return TNode<Float64T>::UncheckedCast(var_result.value());
}
TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
if (IsFloat64RoundTruncateSupported()) {
return Float64RoundTruncate(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than 0.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
if (IsFloat64RoundDownSupported()) {
var_x.Bind(Float64RoundDown(x));
} else {
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
}
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
if (IsFloat64RoundUpSupported()) {
var_x.Bind(Float64RoundUp(x));
Goto(&return_x);
} else {
// Just return {x} unless its in the range ]-2^52,0[.
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) {
if (SmiValuesAre31Bits() && kPointerSize == kInt64Size) {
// Check that the Smi value is properly sign-extended.
TNode<IntPtrT> value = Signed(BitcastTaggedToWord(smi));
return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value)));
}
return Int32TrueConstant();
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant()));
#if V8_COMPRESS_POINTERS
CSA_ASSERT(this, IsValidSmi(smi));
#endif
return smi;
}
TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) {
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
return (static_cast<uintptr_t>(constant_value) <=
static_cast<uintptr_t>(Smi::kMaxValue))
? Int32TrueConstant()
: Int32FalseConstant();
}
return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue));
}
TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
int32_t constant_value;
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
TNode<Smi> smi =
BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
#if V8_COMPRESS_POINTERS
CSA_ASSERT(this, IsValidSmi(smi));
#endif
return smi;
}
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
#if V8_COMPRESS_POINTERS
CSA_ASSERT(this, IsValidSmi(value));
#endif
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
return Signed(WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) {
TNode<IntPtrT> result = SmiUntag(value);
return TruncateIntPtrToInt32(result);
}
TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
return ChangeInt32ToFloat64(SmiToInt32(value));
}
TNode<Smi> CodeStubAssembler::SmiMax(TNode<Smi> a, TNode<Smi> b) {
return SelectConstant<Smi>(SmiLessThan(a, b), b, a);
}
TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) {
return SelectConstant<Smi>(SmiLessThan(a, b), a, b);
}
TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(
BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(result);
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair =
Int32AddWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result));
}
}
TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs,
Label* if_overflow) {
if (SmiValuesAre32Bits()) {
TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(
BitcastTaggedToWord(lhs), BitcastTaggedToWord(rhs));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<IntPtrT> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(result);
} else {
DCHECK(SmiValuesAre31Bits());
TNode<PairT<Int32T, BoolT>> pair =
Int32SubWithOverflow(TruncateIntPtrToInt32(BitcastTaggedToWord(lhs)),
TruncateIntPtrToInt32(BitcastTaggedToWord(rhs)));
TNode<BoolT> overflow = Projection<1>(pair);
GotoIf(overflow, if_overflow);
TNode<Int32T> result = Projection<0>(pair);
return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result));
}
}
TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
SloppyTNode<Object> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
result.Bind(a);
Goto(&done);
BIND(&greater_than_equal_b);
result.Bind(b);
Goto(&done);
BIND(&done);
return TNode<Object>::UncheckedCast(result.value());
}
TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
SloppyTNode<Object> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
result.Bind(b);
Goto(&done);
BIND(&greater_than_equal_b);
result.Bind(a);
Goto(&done);
BIND(&done);
return TNode<Object>::UncheckedCast(result.value());
}
TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex(
TNode<Context> context, TNode<Object> index, TNode<IntPtrT> length) {
TVARIABLE(IntPtrT, result);
TNode<Number> const index_int =
ToInteger_Inline(context, index, CodeStubAssembler::kTruncateMinusZero);
TNode<IntPtrT> zero = IntPtrConstant(0);
Label done(this);
Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
Branch(TaggedIsSmi(index_int), &if_issmi, &if_isheapnumber);
BIND(&if_issmi);
{
TNode<Smi> const index_smi = CAST(index_int);
result = Select<IntPtrT>(
IntPtrLessThan(SmiUntag(index_smi), zero),
[=] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); },
[=] { return IntPtrMin(SmiUntag(index_smi), length); });
Goto(&done);
}
BIND(&if_isheapnumber);
{
// If {index} is a heap number, it is definitely out of bounds. If it is
// negative, {index} = max({length} + {index}),0) = 0'. If it is positive,
// set {index} to {length}.
TNode<HeapNumber> const index_hn = CAST(index_int);
TNode<Float64T> const float_zero = Float64Constant(0.);
TNode<Float64T> const index_float = LoadHeapNumberValue(index_hn);
result = SelectConstant<IntPtrT>(Float64LessThan(index_float, float_zero),
zero, length);
Goto(&done);
}
BIND(&done);
return result.value();
}
TNode<Number> CodeStubAssembler::SmiMod(TNode<Smi> a, TNode<Smi> b) {
TVARIABLE(Number, var_result);
Label return_result(this, &var_result),
return_minuszero(this, Label::kDeferred),
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
TNode<Int32T> int_a = SmiToInt32(a);
TNode<Int32T> int_b = SmiToInt32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan);
// Check if {a} is non-negative.
Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative,
&if_aisnegative);
BIND(&if_aisnotnegative);
{
// Fast case, don't need to check any other edge cases.
TNode<Int32T> r = Int32Mod(int_a, int_b);
var_result = SmiFromInt32(r);
Goto(&return_result);
}
BIND(&if_aisnegative);
{
if (SmiValuesAre32Bits()) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join);
GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
BIND(&join);
}
// Perform the integer modulus operation.
TNode<Int32T> r = Int32Mod(int_a, int_b);
// Check if {r} is zero, and if so return -0, because we have to
// take the sign of the left hand side {a}, which is negative.
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
// architectures, so we cannot just say SmiFromInt32(r) here.
var_result = ChangeInt32ToTagged(r);
Goto(&return_result);
}
BIND(&return_minuszero);
var_result = MinusZeroConstant();
Goto(&return_result);
BIND(&return_nan);
var_result = NanConstant();
Goto(&return_result);
BIND(&return_result);
return var_result.value();
}
TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) {
TVARIABLE(Number, var_result);
VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
Label return_result(this, &var_result);
// Both {a} and {b} are Smis. Convert them to integers and multiply.
Node* lhs32 = SmiToInt32(a);
Node* rhs32 = SmiToInt32(b);
Node* pair = Int32MulWithOverflow(lhs32, rhs32);
Node* overflow = Projection(1, pair);
// Check if the multiplication overflowed.
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
{
// If the answer is zero, we may need to return -0.0, depending on the
// input.
Label answer_zero(this), answer_not_zero(this);
Node* answer = Projection(0, pair);
Node* zero = Int32Constant(0);
Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
BIND(&answer_not_zero);
{
var_result = ChangeInt32ToTagged(answer);
Goto(&return_result);
}
BIND(&answer_zero);
{
Node* or_result = Word32Or(lhs32, rhs32);
Label if_should_be_negative_zero(this), if_should_be_zero(this);
Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
&if_should_be_zero);
BIND(&if_should_be_negative_zero);
{
var_result = MinusZeroConstant();
Goto(&return_result);
}
BIND(&if_should_be_zero);
{
var_result = SmiConstant(0);
Goto(&return_result);
}
}
}
BIND(&if_overflow);
{
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
var_result = AllocateHeapNumberWithValue(value);
Goto(&return_result);
}
BIND(&return_result);
return var_result.value();
}
TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor,
Label* bailout) {
// Both {a} and {b} are Smis. Bailout to floating point division if {divisor}
// is zero.
GotoIf(WordEqual(divisor, SmiConstant(0)), bailout);
// Do floating point division if {dividend} is zero and {divisor} is
// negative.
Label dividend_is_zero(this), dividend_is_not_zero(this);
Branch(WordEqual(dividend, SmiConstant(0)), &dividend_is_zero,
&dividend_is_not_zero);
BIND(&dividend_is_zero);
{
GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout);
Goto(&dividend_is_not_zero);
}
BIND(&dividend_is_not_zero);
TNode<Int32T> untagged_divisor = SmiToInt32(divisor);
TNode<Int32T> untagged_dividend = SmiToInt32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
&divisor_is_minus_one, &divisor_is_not_minus_one);
BIND(&divisor_is_minus_one);
{
GotoIf(Word32Equal(
untagged_dividend,
Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
bailout);
Goto(&divisor_is_not_minus_one);
}
BIND(&divisor_is_not_minus_one);
TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor);
TNode<Int32T> truncated = Signed(Int32Mul(untagged_result, untagged_divisor));
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
return SmiFromInt32(untagged_result);
}
TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
}
return ReinterpretCast<Int32T>(value);
}
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
return WordEqual(
WordAnd(BitcastMaybeObjectToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
return WordNotEqual(
WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a),
IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::WordIsWordAligned(SloppyTNode<WordT> word) {
return WordEqual(IntPtrConstant(0),
WordAnd(word, IntPtrConstant(kPointerSize - 1)));
}
#if DEBUG
void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
CodeAssembler::Bind(label, debug_info);
}
#else
void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
#endif // DEBUG
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
TNode<FixedDoubleArray> array, TNode<Smi> index, Label* if_hole) {
return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0,
SMI_PARAMETERS, if_hole);
}
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
CSA_SLOW_ASSERT(this, IsMap(receiver_map));
VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
Label loop_body(this, &var_map);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
Node* empty_slow_element_dictionary =
LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
Goto(&loop_body);
BIND(&loop_body);
{
Node* map = var_map.value();
Node* prototype = LoadMapPrototype(map);
GotoIf(IsNull(prototype), definitely_no_elements);
Node* prototype_map = LoadMap(prototype);
TNode<Int32T> prototype_instance_type = LoadMapInstanceType(prototype_map);
// Pessimistically assume elements if a Proxy, Special API Object,
// or JSValue wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
Label if_custom(this, Label::kDeferred), if_notcustom(this);
Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type),
&if_custom, &if_notcustom);
BIND(&if_custom);
{
// For string JSValue wrappers we still support the checks as long
// as they wrap the empty string.
GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE),
possibly_elements);
Node* prototype_value = LoadJSValueValue(prototype);
Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
}
BIND(&if_notcustom);
{
Node* prototype_elements = LoadElements(prototype);
var_map.Bind(prototype_map);
GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body);
Branch(WordEqual(prototype_elements, empty_slow_element_dictionary),
&loop_body, possibly_elements);
}
}
}
void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Branch(IsJSReceiver(object), if_true, if_false);
}
TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context) {
Label if_true(this), if_false(this, Label::kDeferred), exit(this);
BranchIfFastJSArray(object, context, &if_true, &if_false);
TVARIABLE(BoolT, var_result);
BIND(&if_true);
{
var_result = Int32TrueConstant();
Goto(&exit);
}
BIND(&if_false);
{
var_result = Int32FalseConstant();
Goto(&exit);
}
BIND(&exit);
return var_result.value();
}
TNode<BoolT> CodeStubAssembler::IsFastJSArrayWithNoCustomIteration(
TNode<Object> object, TNode<Context> context,
TNode<Context> native_context) {
Label if_false(this, Label::kDeferred), if_fast(this), exit(this);
TVARIABLE(BoolT, var_result);
GotoIfForceSlowPath(&if_false);
BranchIfFastJSArray(object, context, &if_fast, &if_false);
BIND(&if_fast);
{
// Check that the Array.prototype hasn't been modified in a way that would
// affect iteration.
Node* protector_cell = LoadRoot(Heap::kArrayIteratorProtectorRootIndex);
DCHECK(isolate()->heap()->array_iterator_protector()->IsPropertyCell());
var_result =
WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
SmiConstant(Isolate::kProtectorValid));
Goto(&exit);
}
BIND(&if_false);
{
var_result = Int32FalseConstant();
Goto(&exit);
}
BIND(&exit);
return var_result.value();
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
GotoIfForceSlowPath(if_false);
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
GotoIfNot(IsJSArrayMap(map), if_false);
// Bailout if receiver has slow elements.
Node* elements_kind = LoadMapElementsKind(map);
GotoIfNot(IsFastElementsKind(elements_kind), if_false);
// Verify that our prototype is the initial array prototype.
GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true);
}
void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
Label* if_true,
Label* if_false) {
GotoIf(IsArraySpeciesProtectorCellInvalid(), if_false);
BranchIfFastJSArray(object, context, if_true, if_false);
}
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
Node* const force_slow_path_addr =
ExternalConstant(ExternalReference::force_slow_path(isolate()));
Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
GotoIf(force_slow, if_true);
#endif
}
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address) {
// TODO(jgruber, chromium:848672): TNodeify AllocateRaw.
// TODO(jgruber, chromium:848672): Call FatalProcessOutOfMemory if this fails.
{
intptr_t constant_value;
if (ToIntPtrConstant(size_in_bytes, constant_value)) {
CHECK(Internals::IsValidSmi(constant_value));
CHECK_GT(constant_value, 0);
} else {
CSA_CHECK(this,
IsValidPositiveSmi(UncheckedCast<IntPtrT>(size_in_bytes)));
}
}
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
VARIABLE(result, MachineRepresentation::kTagged);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
bool needs_double_alignment = flags & kDoubleAlignment;
if (flags & kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
Node* const runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
result.Bind(runtime_result);
Goto(&merge_runtime);
BIND(&next);
}
VARIABLE(adjusted_size, MachineType::PointerRepresentation(), size_in_bytes);
if (needs_double_alignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
BIND(&not_aligned);
Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
adjusted_size.Bind(not_aligned_size);
Goto(&done_alignment);
BIND(&done_alignment);
}
Node* new_top = IntPtrAdd(top, adjusted_size.value());
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
BIND(&runtime_call);
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
NoContextConstant(), SmiTag(size_in_bytes));
}
result.Bind(runtime_result);
Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
BIND(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
new_top);
VARIABLE(address, MachineType::PointerRepresentation(), no_runtime_result);
if (needs_double_alignment) {
Label needs_filler(this), done_filling(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
&needs_filler);
BIND(&needs_filler);
// Store a filler and increase the address by kPointerSize.
StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
Goto(&done_filling);
BIND(&done_filling);
}
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
BIND(&merge_runtime);
return result.value();
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
DCHECK_EQ(flags & kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
#else
#error Architecture not supported
#endif
}
Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes,
AllocationFlags flags) {
DCHECK(flags == kNone || flags == kDoubleAlignment);
CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
DCHECK_EQ(kPointerSize,
ExternalReference::new_space_allocation_limit_address(isolate())
.address() -
ExternalReference::new_space_allocation_top_address(isolate())
.address());
DCHECK_EQ(kPointerSize,
ExternalReference::old_space_allocation_limit_address(isolate())
.address() -
ExternalReference::old_space_allocation_top_address(isolate())
.address());
Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
if (flags & kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags, top_address,
limit_address);
} else {
return AllocateRawUnaligned(size_in_bytes, flags, top_address,
limit_address);
}
}
Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
CHECK(flags == kNone || flags == kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
}
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
if_bigint(this, Label::kDeferred);
// Rule out false {value}.
GotoIf(WordEqual(value, FalseConstant()), if_false);
// Check if {value} is a Smi or a HeapObject.
Branch(TaggedIsSmi(value), &if_smi, &if_notsmi);
BIND(&if_smi);
{
// The {value} is a Smi, only need to check against zero.
BranchIfSmiEqual(CAST(value), SmiConstant(0), if_false, if_true);
}
BIND(&if_notsmi);
{
// Check if {value} is the empty string.
GotoIf(IsEmptyString(value), if_false);
// The {value} is a HeapObject, load its map.
Node* value_map = LoadMap(value);
// Only null, undefined and document.all have the undetectable bit set,
// so we can return false immediately when that bit is set.
GotoIf(IsUndetectableMap(value_map), if_false);
// We still need to handle numbers specially, but all other {value}s
// that make it here yield true.
GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
Branch(IsBigInt(value), &if_bigint, if_true);
BIND(&if_heapnumber);
{
// Load the floating point value of {value}.
Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
MachineType::Float64());
// Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
if_true, if_false);
}
BIND(&if_bigint);
{
Node* result =
CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value);
CSA_ASSERT(this, IsBoolean(result));
Branch(WordEqual(result, TrueConstant()), if_true, if_false);
}
}
}
Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
TNode<JSFunction> CodeStubAssembler::LoadTargetFromFrame() {
DCHECK(IsJSFunctionCall());
return CAST(LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer()));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType rep) {
CSA_ASSERT(this, IsStrongHeapObject(object));
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType rep) {
CSA_ASSERT(this, IsStrongHeapObject(object));
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
SloppyTNode<HeapObject> object, int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return ChangeInt32ToIntPtr(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToIntPtr(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return UncheckedCast<Int32T>(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToInt32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return ChangeInt32ToIntPtr(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
return SmiToIntPtr(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32Root(
Heap::RootListIndex root_index) {
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
int index = root_index * kPointerSize;
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return UncheckedCast<Int32T>(
Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index)));
} else {
return SmiToInt32(Load(MachineType::AnyTagged(), roots_array_start,
IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
if (SmiValuesAre32Bits()) {
int zero_offset = offset + kPointerSize / 2;
int payload_offset = offset;
#if V8_TARGET_LITTLE_ENDIAN
std::swap(zero_offset, payload_offset);
#endif
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(zero_offset), Int32Constant(0));
return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(payload_offset),
TruncateInt64ToInt32(value));
} else {
return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
IntPtrConstant(offset), SmiTag(value));
}
}
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
SloppyTNode<HeapNumber> object) {
return TNode<Float64T>::UncheckedCast(LoadObjectField(
object, HeapNumber::kValueOffset, MachineType::Float64()));
}
TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset));
}
TNode<Int32T> CodeStubAssembler::LoadInstanceType(
SloppyTNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
TNode<BoolT> CodeStubAssembler::HasInstanceType(SloppyTNode<HeapObject> object,
InstanceType instance_type) {
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType(
SloppyTNode<HeapObject> object, InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType(
SloppyTNode<HeapObject> any_tagged, InstanceType type) {
/* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */
TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged);
return Select<BoolT>(
tagged_is_smi, [=]() { return tagged_is_smi; },
[=]() { return DoesntHaveInstanceType(any_tagged, type); });
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32Not(IsDictionaryMap(LoadMap(object))));
TNode<Object> properties =
LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
return Select<HeapObject>(TaggedIsSmi(properties),
[=] { return EmptyFixedArrayConstant(); },
[=] { return CAST(properties); });
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
TNode<Object> properties =
LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
return Select<HeapObject>(TaggedIsSmi(properties),
[=] { return EmptyPropertyDictionaryConstant(); },
[=] { return CAST(properties); });
}
TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
SloppyTNode<JSObject> object) {
return CAST(LoadObjectField(object, JSObject::kElementsOffset));
}
TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
return CAST(LoadObjectField(array, JSArray::kLengthOffset));
}
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
SloppyTNode<JSArray> array) {
TNode<Object> length = LoadJSArrayLength(array);
CSA_ASSERT(this, IsFastElementsKind(LoadElementsKind(array)));
// JSArray length is always a positive Smi for fast arrays.
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
return UncheckedCast<Smi>(length);
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
SloppyTNode<FixedArrayBase> array) {
CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array));
return CAST(LoadObjectField(array, FixedArrayBase::kLengthOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(
SloppyTNode<FixedArrayBase> array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
TNode<IntPtrT> CodeStubAssembler::LoadFeedbackVectorLength(
TNode<FeedbackVector> vector) {
return ChangeInt32ToIntPtr(
LoadObjectField<Int32T>(vector, FeedbackVector::kLengthOffset));
}
TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength(
TNode<WeakFixedArray> array) {
return CAST(LoadObjectField(array, WeakFixedArray::kLengthOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength(
SloppyTNode<WeakFixedArray> array) {
return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset);
}
TNode<Smi> CodeStubAssembler::LoadTypedArrayLength(
TNode<JSTypedArray> typed_array) {
return CAST(LoadObjectField(typed_array, JSTypedArray::kLengthOffset));
}
TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8()));
}
TNode<Int32T> CodeStubAssembler::LoadMapBitField2(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8()));
}
TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Uint32T>(
LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32()));
}
TNode<Int32T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
}
TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field2 = LoadMapBitField2(map);
return Signed(DecodeWord32<Map::ElementsKindBits>(bit_field2));
}
TNode<Int32T> CodeStubAssembler::LoadElementsKind(
SloppyTNode<HeapObject> object) {
return LoadMapElementsKind(LoadMap(object));
}
TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return CAST(LoadObjectField(map, Map::kDescriptorsOffset));
}
TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return CAST(LoadObjectField(map, Map::kPrototypeOffset));
}
TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
SloppyTNode<Map> map, Label* if_no_proto_info) {
Label if_strong_heap_object(this);
CSA_ASSERT(this, IsMap(map));
TNode<MaybeObject> maybe_prototype_info =
LoadMaybeWeakObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
TVARIABLE(Object, prototype_info);
DispatchMaybeObject(maybe_prototype_info, if_no_proto_info, if_no_proto_info,
if_no_proto_info, &if_strong_heap_object,
&prototype_info);
BIND(&if_strong_heap_object);
GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())),
LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
if_no_proto_info);
return CAST(prototype_info.value());
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8()));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
TVARIABLE(Object, result,
LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
Label done(this), loop(this, &result);
Goto(&loop);
BIND(&loop);
{
GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE);
GotoIfNot(is_map_type, &done);
result = LoadObjectField(CAST(result.value()),
Map::kConstructorOrBackPointerOffset);
Goto(&loop);
}
BIND(&done);
return result.value();
}
Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
}
TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
TNode<HeapObject> object =
CAST(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
return Select<Object>(IsMap(object), [=] { return object; },
[=] { return UndefinedConstant(); });
}
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
SloppyTNode<Object> receiver, Label* if_no_hash) {
TVARIABLE(IntPtrT, var_hash);
Label done(this), if_smi(this), if_property_array(this),
if_property_dictionary(this), if_fixed_array(this);
TNode<Object> properties_or_hash =
LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver),
JSReceiver::kPropertiesOrHashOffset);
GotoIf(TaggedIsSmi(properties_or_hash), &if_smi);
TNode<HeapObject> properties =
TNode<HeapObject>::UncheckedCast(properties_or_hash);
TNode<Int32T> properties_instance_type = LoadInstanceType(properties);
GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
&if_property_array);
Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE),
&if_property_dictionary, &if_fixed_array);
BIND(&if_fixed_array);
{
var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel);
Goto(&done);
}
BIND(&if_smi);
{
var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash));
Goto(&done);
}
BIND(&if_property_array);
{
TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
properties, PropertyArray::kLengthAndHashOffset);
var_hash = TNode<IntPtrT>::UncheckedCast(
DecodeWord<PropertyArray::HashField>(length_and_hash));
Goto(&done);
}
BIND(&if_property_dictionary);
{
var_hash = SmiUntag(CAST(LoadFixedArrayElement(
CAST(properties), NameDictionary::kObjectHashIndex)));
Goto(&done);
}
BIND(&done);
if (if_no_hash != nullptr) {
GotoIf(IntPtrEqual(var_hash.value(),
IntPtrConstant(PropertyArray::kNoHashSentinel)),
if_no_hash);
}
return var_hash.value();
}
TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField<Uint32T>(name, Name::kHashFieldOffset);
}
TNode<Uint32T> CodeStubAssembler::LoadNameHash(SloppyTNode<Name> name,
Label* if_hash_not_computed) {
TNode<Uint32T> hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask),
if_hash_not_computed);
}
return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
}
TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
SloppyTNode<String> object) {
return SmiUntag(LoadStringLengthAsSmi(object));
}
TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(
SloppyTNode<String> object) {
CSA_ASSERT(this, IsString(object));
return CAST(LoadObjectField(object, String::kLengthOffset,
MachineType::TaggedPointer()));
}
Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
CSA_ASSERT(this, IsString(seq_string));
CSA_ASSERT(this,
IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
return IntPtrAdd(
BitcastTaggedToWord(seq_string),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
}
void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Label* if_smi, Label* if_cleared,
Label* if_weak, Label* if_strong,
TVariable<Object>* extracted) {
Label inner_if_smi(this), inner_if_strong(this);
GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi);
GotoIf(WordEqual(BitcastMaybeObjectToWord(maybe_object),
IntPtrConstant(reinterpret_cast<intptr_t>(
HeapObjectReference::ClearedValue()))),
if_cleared);
GotoIf(WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object),
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kHeapObjectTag)),
&inner_if_strong);
*extracted =
BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object),
IntPtrConstant(~kWeakHeapObjectMask)));
Goto(if_weak);
BIND(&inner_if_smi);
*extracted = CAST(maybe_object);
Goto(if_smi);
BIND(&inner_if_strong);
*extracted = CAST(maybe_object);
Goto(if_strong);
}
TNode<BoolT> CodeStubAssembler::IsStrongHeapObject(TNode<MaybeObject> value) {
return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kHeapObjectTag));
}
TNode<HeapObject> CodeStubAssembler::ToStrongHeapObject(
TNode<MaybeObject> value, Label* if_not_strong) {
GotoIfNot(IsStrongHeapObject(value), if_not_strong);
return CAST(value);
}
TNode<BoolT> CodeStubAssembler::IsWeakOrClearedHeapObject(
TNode<MaybeObject> value) {
return WordEqual(WordAnd(BitcastMaybeObjectToWord(value),
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kWeakHeapObjectTag));
}
TNode<BoolT> CodeStubAssembler::IsClearedWeakHeapObject(
TNode<MaybeObject> value) {
return WordEqual(BitcastMaybeObjectToWord(value),
IntPtrConstant(kClearedWeakHeapObject));
}
TNode<BoolT> CodeStubAssembler::IsNotClearedWeakHeapObject(
TNode<MaybeObject> value) {
return WordNotEqual(BitcastMaybeObjectToWord(value),
IntPtrConstant(kClearedWeakHeapObject));
}
TNode<HeapObject> CodeStubAssembler::ToWeakHeapObject(
TNode<MaybeObject> value) {
CSA_ASSERT(this, IsWeakOrClearedHeapObject(value));
CSA_ASSERT(this, IsNotClearedWeakHeapObject(value));
return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd(
BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask))));
}
TNode<HeapObject> CodeStubAssembler::ToWeakHeapObject(TNode<MaybeObject> value,
Label* if_cleared) {
GotoIf(IsClearedWeakHeapObject(value), if_cleared);
return ToWeakHeapObject(value);
}
TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object,
TNode<Object> value) {
return WordEqual(WordAnd(BitcastMaybeObjectToWord(object),
IntPtrConstant(~kWeakHeapObjectMask)),
BitcastTaggedToWord(value));
}
TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object,
TNode<Object> value) {
return WordEqual(BitcastMaybeObjectToWord(object),
BitcastTaggedToWord(value));
}
TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object,
TNode<Object> value) {
return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object),
IntPtrConstant(~kWeakHeapObjectMask)),
BitcastTaggedToWord(value));
}
TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) {
return ReinterpretCast<MaybeObject>(BitcastWordToTagged(
WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag))));
}
TNode<MaybeObject> CodeStubAssembler::LoadArrayElement(
SloppyTNode<HeapObject> array, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode,
LoadSensitivity needs_poisoning) {
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToIntPtr(index_node, parameter_mode),
IntPtrConstant(0)));
DCHECK_EQ(additional_offset % kPointerSize, 0);
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
STATIC_ASSERT(FixedArrayBase::kLengthOffset ==
PropertyArray::kLengthAndHashOffset);
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
CSA_ASSERT(
this,
IsOffsetInBounds(
offset,
Select<IntPtrT>(
IsPropertyArray(array),
[=] {
TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
array, PropertyArray::kLengthAndHashOffset);
return TNode<IntPtrT>::UncheckedCast(
DecodeWord<PropertyArray::LengthField>(length_and_hash));
},
[=] {
return LoadAndUntagObjectField(array,
FixedArrayBase::kLengthOffset);
}),
FixedArray::kHeaderSize));
return UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), array, offset, needs_poisoning));
}
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, Node* index_node, int additional_offset,
ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
CSA_ASSERT(this, IsFixedArraySubclass(object));
CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object));
TNode<MaybeObject> element =
LoadArrayElement(object, FixedArray::kHeaderSize, index_node,
additional_offset, parameter_mode, needs_poisoning);
return CAST(element);
}
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
SloppyTNode<PropertyArray> object, SloppyTNode<IntPtrT> index) {
int additional_offset = 0;
ParameterMode parameter_mode = INTPTR_PARAMETERS;
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
STATIC_ASSERT(PropertyArray::kHeaderSize == FixedArray::kHeaderSize);
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
additional_offset, parameter_mode,
needs_poisoning));
}
TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
TNode<PropertyArray> object) {
TNode<IntPtrT> value =
LoadAndUntagObjectField(object, PropertyArray::kLengthAndHashOffset);
return Signed(DecodeWord<PropertyArray::LengthField>(value));
}
TNode<RawPtrT> CodeStubAssembler::LoadFixedTypedArrayBackingStore(
TNode<FixedTypedArrayBase> typed_array) {
// Backing store = external_pointer + base_pointer.
Node* external_pointer =
LoadObjectField(typed_array, FixedTypedArrayBase::kExternalPointerOffset,
MachineType::Pointer());
Node* base_pointer =
LoadObjectField(typed_array, FixedTypedArrayBase::kBasePointerOffset);
return UncheckedCast<RawPtrT>(
IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer)));
}
Node* CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
TVARIABLE(BigInt, var_result);
Label done(this), if_zero(this);
if (Is64()) {
TNode<IntPtrT> value = UncheckedCast<IntPtrT>(
Load(MachineType::IntPtr(), data_pointer, offset));
Label if_positive(this), if_negative(this);
GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
var_result = AllocateRawBigInt(IntPtrConstant(1));
Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive,
&if_negative);
BIND(&if_positive);
{
StoreBigIntBitfield(var_result.value(),
IntPtrConstant(BigInt::SignBits::encode(false) |
BigInt::LengthBits::encode(1)));
StoreBigIntDigit(var_result.value(), 0, Unsigned(value));
Goto(&done);
}
BIND(&if_negative);
{
StoreBigIntBitfield(var_result.value(),
IntPtrConstant(BigInt::SignBits::encode(true) |
BigInt::LengthBits::encode(1)));
StoreBigIntDigit(var_result.value(), 0,
Unsigned(IntPtrSub(IntPtrConstant(0), value)));
Goto(&done);
}
} else {
DCHECK(!Is64());
TVARIABLE(WordT, var_sign, IntPtrConstant(BigInt::SignBits::encode(false)));
TVARIABLE(IntPtrT, var_low);
TVARIABLE(IntPtrT, var_high);
#if defined(V8_TARGET_BIG_ENDIAN)
var_high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
var_low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#else
var_low = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
var_high = UncheckedCast<IntPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#endif
Label high_zero(this), negative(this), allocate_one_digit(this),
allocate_two_digits(this);
GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero);
Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative,
&allocate_two_digits);
BIND(&high_zero);
Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero,
&allocate_one_digit);
BIND(&negative);
{
var_sign = IntPtrConstant(BigInt::SignBits::encode(true));
// We must negate the value by computing "0 - (high|low)", performing
// both parts of the subtraction separately and manually taking care
// of the carry bit (which is 1 iff low != 0).
var_high = IntPtrSub(IntPtrConstant(0), var_high.value());
Label carry(this), no_carry(this);
Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry);
BIND(&carry);
var_high = IntPtrSub(var_high.value(), IntPtrConstant(1));
Goto(&no_carry);
BIND(&no_carry);
var_low = IntPtrSub(IntPtrConstant(0), var_low.value());
// var_high was non-zero going into this block, but subtracting the
// carry bit from it could bring us back onto the "one digit" path.
Branch(WordEqual(var_high.value(), IntPtrConstant(0)),
&allocate_one_digit, &allocate_two_digits);
}
BIND(&allocate_one_digit);
{
var_result = AllocateRawBigInt(IntPtrConstant(1));
StoreBigIntBitfield(
var_result.value(),
WordOr(var_sign.value(),
IntPtrConstant(BigInt::LengthBits::encode(1))));
StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
Goto(&done);
}
BIND(&allocate_two_digits);
{
var_result = AllocateRawBigInt(IntPtrConstant(2));
StoreBigIntBitfield(
var_result.value(),
WordOr(var_sign.value(),
IntPtrConstant(BigInt::LengthBits::encode(2))));
StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value()));
StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value()));
Goto(&done);
}
}
BIND(&if_zero);
var_result = AllocateBigInt(IntPtrConstant(0));
Goto(&done);
BIND(&done);
return var_result.value();
}
Node* CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged(
Node* data_pointer, Node* offset) {
TVARIABLE(BigInt, var_result);
Label if_zero(this), done(this);
if (Is64()) {
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero);
var_result = AllocateBigInt(IntPtrConstant(1));
StoreBigIntDigit(var_result.value(), 0, value);
Goto(&done);
} else {
DCHECK(!Is64());
Label high_zero(this);
#if defined(V8_TARGET_BIG_ENDIAN)
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#else
TNode<UintPtrT> low = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer, offset));
TNode<UintPtrT> high = UncheckedCast<UintPtrT>(
Load(MachineType::UintPtr(), data_pointer,
Int32Add(offset, Int32Constant(kPointerSize))));
#endif
GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero);
var_result = AllocateBigInt(IntPtrConstant(2));
StoreBigIntDigit(var_result.value(), 0, low);
StoreBigIntDigit(var_result.value(), 1, high);
Goto(&done);
BIND(&high_zero);
GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero);
var_result = AllocateBigInt(IntPtrConstant(1));
StoreBigIntDigit(var_result.value(), 0, low);
Goto(&done);
}
BIND(&if_zero);
var_result = AllocateBigInt(IntPtrConstant(0));
Goto(&done);
BIND(&done);
return var_result.value();
}
Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
case UINT8_CLAMPED_ELEMENTS:
return SmiFromInt32(Load(MachineType::Uint8(), data_pointer, offset));
case INT8_ELEMENTS:
return SmiFromInt32(Load(MachineType::Int8(), data_pointer, offset));
case UINT16_ELEMENTS:
return SmiFromInt32(Load(MachineType::Uint16(), data_pointer, offset));
case INT16_ELEMENTS:
return SmiFromInt32(Load(MachineType::Int16(), data_pointer, offset));
case UINT32_ELEMENTS:
return ChangeUint32ToTagged(
Load(MachineType::Uint32(), data_pointer, offset));
case INT32_ELEMENTS:
return ChangeInt32ToTagged(
Load(MachineType::Int32(), data_pointer, offset));
case FLOAT32_ELEMENTS:
return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(
Load(MachineType::Float32(), data_pointer, offset)));
case FLOAT64_ELEMENTS:
return AllocateHeapNumberWithValue(
Load(MachineType::Float64(), data_pointer, offset));
case BIGINT64_ELEMENTS:
return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset);
case BIGUINT64_ELEMENTS:
return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset);
default:
UNREACHABLE();
}
}
void CodeStubAssembler::StoreFixedTypedArrayElementFromTagged(
TNode<Context> context, TNode<FixedTypedArrayBase> elements,
TNode<Object> index_node, TNode<Object> value, ElementsKind elements_kind,
ParameterMode parameter_mode) {
TNode<RawPtrT> data_pointer = LoadFixedTypedArrayBackingStore(elements);
switch (elements_kind) {
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
SmiToInt32(CAST(value)), parameter_mode);
break;
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
TruncateTaggedToWord32(context, value), parameter_mode);
break;
case FLOAT32_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))),
parameter_mode);
break;
case FLOAT64_ELEMENTS:
StoreElement(data_pointer, elements_kind, index_node,
LoadHeapNumberValue(CAST(value)), parameter_mode);
break;
case BIGUINT64_ELEMENTS:
case BIGINT64_ELEMENTS: {
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
EmitBigTypedArrayElementStore(elements, data_pointer, offset,
CAST(value));
break;
}
default:
UNREACHABLE();
}
}
TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot(
Node* object, Node* slot_index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
CSA_SLOW_ASSERT(
this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)),
FeedbackVector::kHeaderSize));
return UncheckedCast<MaybeObject>(
Load(MachineType::AnyTagged(), object, offset));
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement(
SloppyTNode<HeapObject> object, int array_header_size, Node* index_node,
int additional_offset, ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK_EQ(additional_offset % kPointerSize, 0);
int endian_correction = 0;
#if V8_TARGET_LITTLE_ENDIAN
if (SmiValuesAre32Bits()) endian_correction = kPointerSize / 2;
#endif
int32_t header_size = array_header_size + additional_offset - kHeapObjectTag +
endian_correction;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == WeakFixedArray::kLengthOffset);
// Check that index_node + additional_offset <= object.length.
// TODO(cbruni): Use proper LoadXXLength helpers
CSA_ASSERT(this,
IsOffsetInBounds(
offset,
LoadAndUntagObjectField(object, FixedArrayBase::kLengthOffset),
FixedArray::kHeaderSize + endian_correction));
if (SmiValuesAre32Bits()) {
return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset));
} else {
return SmiToInt32(Load(MachineType::AnyTagged(), object, offset));
}
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
SloppyTNode<HeapObject> object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize,
index_node, additional_offset,
parameter_mode);
}
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, Node* index, int additional_offset,
ParameterMode parameter_mode, LoadSensitivity needs_poisoning) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
additional_offset, parameter_mode, needs_poisoning);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
SloppyTNode<FixedDoubleArray> object, Node* index_node,
MachineType machine_type, int additional_offset,
ParameterMode parameter_mode, Label* if_hole) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
DCHECK_EQ(additional_offset % kPointerSize, 0);
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size);
CSA_ASSERT(this, IsOffsetInBounds(
offset, LoadAndUntagFixedArrayBaseLength(object),
FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS));
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck(
SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole,
MachineType machine_type) {
if (if_hole) {
// TODO(ishell): Compare only the upper part for the hole once the
// compiler is able to fold addition of already complex |offset| with
// |kIeeeDoubleExponentWordOffset| into one addressing mode.
if (Is64()) {
Node* element = Load(MachineType::Uint64(), base, offset);
GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
} else {
Node* element_upper = Load(
MachineType::Uint32(), base,
IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
if_hole);
}
}
if (machine_type.IsNone()) {
// This means the actual value is not needed.
return TNode<Float64T>();
}
return UncheckedCast<Float64T>(Load(machine_type, base, offset));
}
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
return UncheckedCast<Object>(
Load(MachineType::AnyTagged(), context, IntPtrConstant(offset)));
}
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
int slot_index,
SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
Store(context, IntPtrConstant(offset), value);
}
void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index,
SloppyTNode<Object> value) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
Store(context, offset, value);
}
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
}
TNode<Context> CodeStubAssembler::LoadNativeContext(
SloppyTNode<Context> context) {
return UncheckedCast<Context>(
LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX));
}
TNode<Context> CodeStubAssembler::LoadModuleContext(
SloppyTNode<Context> context) {
Node* module_map = LoadRoot(Heap::kModuleContextMapRootIndex);
Variable cur_context(this, MachineRepresentation::kTaggedPointer);
cur_context.Bind(context);
Label context_found(this);
Variable* context_search_loop_variables[1] = {&cur_context};
Label context_search(this, 1, context_search_loop_variables);
// Loop until cur_context->map() is module_map.
Goto(&context_search);
BIND(&context_search);
{
CSA_ASSERT(this, Word32BinaryNot(IsNativeContext(cur_context.value())));
GotoIf(WordEqual(LoadMap(cur_context.value()), module_map), &context_found);
cur_context.Bind(
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
Goto(&context_search);
}
BIND(&context_found);
return UncheckedCast<Context>(cur_context.value());
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
SloppyTNode<Int32T> kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
CSA_ASSERT(this, IsNativeContext(native_context));
Node* offset = IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
ChangeInt32ToIntPtr(kind));
return UncheckedCast<Map>(LoadContextElement(native_context, offset));
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
ElementsKind kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
return UncheckedCast<Map>(
LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
TNode<Word32T> CodeStubAssembler::IsGeneratorFunction(
TNode<JSFunction> function) {
TNode<SharedFunctionInfo> const shared_function_info =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TNode<Uint32T> const function_kind =
DecodeWord32<SharedFunctionInfo::FunctionKindBits>(LoadObjectField(
shared_function_info, SharedFunctionInfo::kFlagsOffset,
MachineType::Uint32()));
return Word32Or(
Word32Or(
Word32Or(
Word32Equal(function_kind,
Int32Constant(FunctionKind::kAsyncGeneratorFunction)),
Word32Equal(
function_kind,
Int32Constant(FunctionKind::kAsyncConciseGeneratorMethod))),
Word32Equal(function_kind,
Int32Constant(FunctionKind::kGeneratorFunction))),
Word32Equal(function_kind,
Int32Constant(FunctionKind::kConciseGeneratorMethod)));
}
TNode<Word32T> CodeStubAssembler::HasPrototypeProperty(
TNode<JSFunction> function) {
TNode<Int32T> mask = Int32Constant(Map::HasPrototypeSlotBit::kMask |
Map::IsConstructorBit::kMask);
return Word32Or(
Word32Equal(Word32And(LoadMapBitField(LoadMap(function)), mask), mask),
<