| // Copyright 2016 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/codegen/code-stub-assembler.h" |
| |
| #include <functional> |
| |
| #include "include/v8-internal.h" |
| #include "src/base/macros.h" |
| #include "src/codegen/code-factory.h" |
| #include "src/codegen/tnode.h" |
| #include "src/common/globals.h" |
| #include "src/execution/frames-inl.h" |
| #include "src/execution/frames.h" |
| #include "src/execution/protectors.h" |
| #include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop. |
| #include "src/heap/memory-chunk.h" |
| #include "src/logging/counters.h" |
| #include "src/numbers/integer-literal-inl.h" |
| #include "src/objects/api-callbacks.h" |
| #include "src/objects/cell.h" |
| #include "src/objects/descriptor-array.h" |
| #include "src/objects/function-kind.h" |
| #include "src/objects/heap-number.h" |
| #include "src/objects/instance-type.h" |
| #include "src/objects/js-generator.h" |
| #include "src/objects/oddball.h" |
| #include "src/objects/ordered-hash-table-inl.h" |
| #include "src/objects/property-cell.h" |
| #include "src/roots/roots.h" |
| |
| #if V8_ENABLE_WEBASSEMBLY |
| #include "src/wasm/wasm-objects.h" |
| #endif // V8_ENABLE_WEBASSEMBLY |
| |
| namespace v8 { |
| namespace internal { |
| |
| CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) |
| : compiler::CodeAssembler(state), |
| TorqueGeneratedExportedMacrosAssembler(state) { |
| if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) { |
| HandleBreakOnNode(); |
| } |
| } |
| |
| void CodeStubAssembler::HandleBreakOnNode() { |
| // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a |
| // string specifying the name of a stub and NODE is number specifying node id. |
| const char* name = state()->name(); |
| size_t name_length = strlen(name); |
| if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) { |
| // Different name. |
| return; |
| } |
| size_t option_length = strlen(FLAG_csa_trap_on_node); |
| if (option_length < name_length + 2 || |
| FLAG_csa_trap_on_node[name_length] != ',') { |
| // Option is too short. |
| return; |
| } |
| const char* start = &FLAG_csa_trap_on_node[name_length + 1]; |
| char* end; |
| int node_id = static_cast<int>(strtol(start, &end, 10)); |
| if (start == end) { |
| // Bad node id. |
| return; |
| } |
| BreakOnNode(node_id); |
| } |
| |
| void CodeStubAssembler::Dcheck(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(branch, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Dcheck(const NodeGenerator<BoolT>& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(condition_body, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Dcheck(TNode<Word32T> condition_node, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(condition_node, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Check(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| Label ok(this); |
| Label not_ok(this, Label::kDeferred); |
| if (message != nullptr) { |
| Comment("[ Assert: ", message); |
| } else { |
| Comment("[ Assert"); |
| } |
| branch(&ok, ¬_ok); |
| |
| BIND(¬_ok); |
| std::vector<FileAndLine> file_and_line; |
| if (file != nullptr) { |
| file_and_line.push_back({file, line}); |
| } |
| FailAssert(message, file_and_line, extra_nodes); |
| |
| BIND(&ok); |
| Comment("] Assert"); |
| } |
| |
| void CodeStubAssembler::Check(const NodeGenerator<BoolT>& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| BranchGenerator branch = [=](Label* ok, Label* not_ok) { |
| TNode<BoolT> condition = condition_body(); |
| Branch(condition, ok, not_ok); |
| }; |
| |
| Check(branch, message, file, line, extra_nodes); |
| } |
| |
| void CodeStubAssembler::Check(TNode<Word32T> condition_node, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| BranchGenerator branch = [=](Label* ok, Label* not_ok) { |
| Branch(condition_node, ok, not_ok); |
| }; |
| |
| Check(branch, message, file, line, extra_nodes); |
| } |
| |
| void CodeStubAssembler::IncrementCallCount( |
| TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) { |
| Comment("increment call count"); |
| TNode<Smi> call_count = |
| CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); |
| // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call |
| // count are used as flags. To increment the call count by 1 we hence |
| // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. |
| TNode<Smi> new_count = SmiAdd( |
| call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); |
| // Count is Smi, so we don't need a write barrier. |
| StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, |
| SKIP_WRITE_BARRIER, kTaggedSize); |
| } |
| |
| void CodeStubAssembler::FastCheck(TNode<BoolT> condition) { |
| Label ok(this), not_ok(this, Label::kDeferred); |
| Branch(condition, &ok, ¬_ok); |
| BIND(¬_ok); |
| Unreachable(); |
| BIND(&ok); |
| } |
| |
| void CodeStubAssembler::FailAssert( |
| const char* message, const std::vector<FileAndLine>& files_and_lines, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| DCHECK_NOT_NULL(message); |
| base::EmbeddedVector<char, 1024> chars; |
| std::stringstream stream; |
| for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend(); ++it) { |
| if (it->first != nullptr) { |
| stream << " [" << it->first << ":" << it->second << "]"; |
| #ifndef DEBUG |
| // To limit the size of these strings in release builds, we include only |
| // the innermost macro's file name and line number. |
| break; |
| #endif |
| } |
| } |
| std::string files_and_lines_text = stream.str(); |
| if (files_and_lines_text.size() != 0) { |
| SNPrintF(chars, "%s%s", message, files_and_lines_text.c_str()); |
| message = chars.begin(); |
| } |
| TNode<String> message_node = StringConstant(message); |
| |
| #ifdef DEBUG |
| // Only print the extra nodes in debug builds. |
| for (auto& node : extra_nodes) { |
| CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0), |
| StringConstant(node.second), node.first); |
| } |
| #endif |
| |
| AbortCSADcheck(message_node); |
| Unreachable(); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SelectInt32Constant(TNode<BoolT> condition, |
| int true_value, |
| int false_value) { |
| return SelectConstant<Int32T>(condition, Int32Constant(true_value), |
| Int32Constant(false_value)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(TNode<BoolT> condition, |
| int true_value, |
| int false_value) { |
| return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value), |
| IntPtrConstant(false_value)); |
| } |
| |
| TNode<Oddball> CodeStubAssembler::SelectBooleanConstant( |
| TNode<BoolT> condition) { |
| return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant()); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SelectSmiConstant(TNode<BoolT> condition, |
| Smi true_value, |
| Smi false_value) { |
| return SelectConstant<Smi>(condition, SmiConstant(true_value), |
| SmiConstant(false_value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::NoContextConstant() { |
| return SmiConstant(Context::kNoContext); |
| } |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ |
| TNode<BoolT> CodeStubAssembler::Is##name(TNode<Object> value) { \ |
| return TaggedEqual(value, name##Constant()); \ |
| } \ |
| TNode<BoolT> CodeStubAssembler::IsNot##name(TNode<Object> value) { \ |
| return TaggedNotEqual(value, name##Constant()); \ |
| } |
| HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) |
| #undef HEAP_CONSTANT_TEST |
| |
| TNode<BInt> CodeStubAssembler::BIntConstant(int value) { |
| #if defined(BINT_IS_SMI) |
| return SmiConstant(value); |
| #elif defined(BINT_IS_INTPTR) |
| return IntPtrConstant(value); |
| #else |
| #error Unknown architecture. |
| #endif |
| } |
| |
| template <> |
| TNode<Smi> CodeStubAssembler::IntPtrOrSmiConstant<Smi>(int value) { |
| return SmiConstant(value); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::IntPtrOrSmiConstant<IntPtrT>(int value) { |
| return IntPtrConstant(value); |
| } |
| |
| template <> |
| TNode<UintPtrT> CodeStubAssembler::IntPtrOrSmiConstant<UintPtrT>(int value) { |
| return Unsigned(IntPtrConstant(value)); |
| } |
| |
| template <> |
| TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) { |
| return ReinterpretCast<RawPtrT>(IntPtrConstant(value)); |
| } |
| |
| bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue( |
| TNode<Smi> maybe_constant, int* value) { |
| Smi smi_constant; |
| if (TryToSmiConstant(maybe_constant, &smi_constant)) { |
| *value = Smi::ToInt(smi_constant); |
| return true; |
| } |
| return false; |
| } |
| |
| bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue( |
| TNode<IntPtrT> maybe_constant, int* value) { |
| int32_t int32_constant; |
| if (TryToInt32Constant(maybe_constant, &int32_constant)) { |
| *value = int32_constant; |
| return true; |
| } |
| return false; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32( |
| TNode<IntPtrT> value) { |
| Comment("IntPtrRoundUpToPowerOfTwo32"); |
| CSA_DCHECK(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u))); |
| value = Signed(IntPtrSub(value, IntPtrConstant(1))); |
| for (int i = 1; i <= 16; i *= 2) { |
| value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i)))); |
| } |
| return Signed(IntPtrAdd(value, IntPtrConstant(1))); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(TNode<IntPtrT> value) { |
| intptr_t constant; |
| if (TryToIntPtrConstant(value, &constant)) { |
| return BoolConstant(base::bits::IsPowerOfTwo(constant)); |
| } |
| // value && !(value & (value - 1)) |
| return IntPtrEqual( |
| Select<IntPtrT>( |
| IntPtrEqual(value, IntPtrConstant(0)), |
| [=] { return IntPtrConstant(1); }, |
| [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Round(TNode<Float64T> x) { |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> one_half = Float64Constant(0.5); |
| |
| Label return_x(this); |
| |
| // Round up {x} towards Infinity. |
| TVARIABLE(Float64T, var_x, Float64Ceil(x)); |
| |
| GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), |
| &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Ceil(TNode<Float64T> x) { |
| if (IsFloat64RoundUpSupported()) { |
| return Float64RoundUp(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64LessThan(var_x.value(), x), &return_x); |
| var_x = Float64Add(var_x.value(), one); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards Infinity and return the result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Floor(TNode<Float64T> x) { |
| if (IsFloat64RoundDownSupported()) { |
| return Float64RoundDown(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return the result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Add(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64RoundToEven(TNode<Float64T> x) { |
| if (IsFloat64RoundTiesEvenSupported()) { |
| return Float64RoundTiesEven(x); |
| } |
| // See ES#sec-touint8clamp for details. |
| TNode<Float64T> f = Float64Floor(x); |
| TNode<Float64T> f_and_half = Float64Add(f, Float64Constant(0.5)); |
| |
| TVARIABLE(Float64T, var_result); |
| Label return_f(this), return_f_plus_one(this), done(this); |
| |
| GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one); |
| GotoIf(Float64LessThan(x, f_and_half), &return_f); |
| { |
| TNode<Float64T> f_mod_2 = Float64Mod(f, Float64Constant(2.0)); |
| Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f, |
| &return_f_plus_one); |
| } |
| |
| BIND(&return_f); |
| var_result = f; |
| Goto(&done); |
| |
| BIND(&return_f_plus_one); |
| var_result = Float64Add(f, Float64Constant(1.0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Trunc(TNode<Float64T> x) { |
| if (IsFloat64RoundTruncateSupported()) { |
| return Float64RoundTruncate(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than 0. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| if (IsFloat64RoundDownSupported()) { |
| var_x = Float64RoundDown(x); |
| } else { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| } |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| if (IsFloat64RoundUpSupported()) { |
| var_x = Float64RoundUp(x); |
| Goto(&return_x); |
| } else { |
| // Just return {x} unless its in the range ]-2^52,0[. |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return var_x.value(); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::PopulationCountFallback( |
| TNode<UintPtrT> value) { |
| // Taken from slow path of base::bits::CountPopulation, the comments here show |
| // C++ code and comments from there for reference. |
| // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry |
| // S. Warren, Jr.), chapter 5-1. |
| constexpr uintptr_t mask[] = {static_cast<uintptr_t>(0x5555555555555555), |
| static_cast<uintptr_t>(0x3333333333333333), |
| static_cast<uintptr_t>(0x0f0f0f0f0f0f0f0f)}; |
| |
| // TNode<UintPtrT> value = Unsigned(value_word); |
| TNode<UintPtrT> lhs, rhs; |
| |
| // Start with 64 buckets of 1 bits, holding values from [0,1]. |
| // {value = ((value >> 1) & mask[0]) + (value & mask[0])} |
| lhs = WordAnd(WordShr(value, UintPtrConstant(1)), UintPtrConstant(mask[0])); |
| rhs = WordAnd(value, UintPtrConstant(mask[0])); |
| value = UintPtrAdd(lhs, rhs); |
| |
| // Having 32 buckets of 2 bits, holding values from [0,2] now. |
| // {value = ((value >> 2) & mask[1]) + (value & mask[1])} |
| lhs = WordAnd(WordShr(value, UintPtrConstant(2)), UintPtrConstant(mask[1])); |
| rhs = WordAnd(value, UintPtrConstant(mask[1])); |
| value = UintPtrAdd(lhs, rhs); |
| |
| // Having 16 buckets of 4 bits, holding values from [0,4] now. |
| // {value = ((value >> 4) & mask[2]) + (value & mask[2])} |
| lhs = WordAnd(WordShr(value, UintPtrConstant(4)), UintPtrConstant(mask[2])); |
| rhs = WordAnd(value, UintPtrConstant(mask[2])); |
| value = UintPtrAdd(lhs, rhs); |
| |
| // Having 8 buckets of 8 bits, holding values from [0,8] now. |
| // From this point on, the buckets are bigger than the number of bits |
| // required to hold the values, and the buckets are bigger the maximum |
| // result, so there's no need to mask value anymore, since there's no |
| // more risk of overflow between buckets. |
| // {value = (value >> 8) + value} |
| lhs = WordShr(value, UintPtrConstant(8)); |
| value = UintPtrAdd(lhs, value); |
| |
| // Having 4 buckets of 16 bits, holding values from [0,16] now. |
| // {value = (value >> 16) + value} |
| lhs = WordShr(value, UintPtrConstant(16)); |
| value = UintPtrAdd(lhs, value); |
| |
| if (Is64()) { |
| // Having 2 buckets of 32 bits, holding values from [0,32] now. |
| // {value = (value >> 32) + value} |
| lhs = WordShr(value, UintPtrConstant(32)); |
| value = UintPtrAdd(lhs, value); |
| } |
| |
| // Having 1 buckets of sizeof(intptr_t) bits, holding values from [0,64] now. |
| // {return static_cast<unsigned>(value & 0xff)} |
| return Signed(WordAnd(value, UintPtrConstant(0xff))); |
| } |
| |
| TNode<Int64T> CodeStubAssembler::PopulationCount64(TNode<Word64T> value) { |
| if (IsWord64PopcntSupported()) { |
| return Word64Popcnt(value); |
| } |
| |
| if (Is32()) { |
| // Unsupported. |
| UNREACHABLE(); |
| } |
| |
| return ReinterpretCast<Int64T>( |
| PopulationCountFallback(ReinterpretCast<UintPtrT>(value))); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::PopulationCount32(TNode<Word32T> value) { |
| if (IsWord32PopcntSupported()) { |
| return Word32Popcnt(value); |
| } |
| |
| if (Is32()) { |
| TNode<IntPtrT> res = |
| PopulationCountFallback(ReinterpretCast<UintPtrT>(value)); |
| return ReinterpretCast<Int32T>(res); |
| } else { |
| TNode<IntPtrT> res = PopulationCountFallback( |
| ReinterpretCast<UintPtrT>(ChangeUint32ToUint64(value))); |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(res)); |
| } |
| } |
| |
| TNode<Int64T> CodeStubAssembler::CountTrailingZeros64(TNode<Word64T> value) { |
| if (IsWord64CtzSupported()) { |
| return Word64Ctz(value); |
| } |
| |
| if (Is32()) { |
| // Unsupported. |
| UNREACHABLE(); |
| } |
| |
| // Same fallback as in base::bits::CountTrailingZeros. |
| // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.), |
| // chapter 5-4. On x64, since is faster than counting in a loop and faster |
| // than doing binary search. |
| TNode<Word64T> lhs = Word64Not(value); |
| TNode<Word64T> rhs = Uint64Sub(Unsigned(value), Uint64Constant(1)); |
| return PopulationCount64(Word64And(lhs, rhs)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::CountTrailingZeros32(TNode<Word32T> value) { |
| if (IsWord32CtzSupported()) { |
| return Word32Ctz(value); |
| } |
| |
| if (Is32()) { |
| // Same fallback as in Word64CountTrailingZeros. |
| TNode<Word32T> lhs = Word32BitwiseNot(value); |
| TNode<Word32T> rhs = Int32Sub(Signed(value), Int32Constant(1)); |
| return PopulationCount32(Word32And(lhs, rhs)); |
| } else { |
| TNode<Int64T> res64 = CountTrailingZeros64(ChangeUint32ToUint64(value)); |
| return TruncateInt64ToInt32(Signed(res64)); |
| } |
| } |
| |
| TNode<Int64T> CodeStubAssembler::CountLeadingZeros64(TNode<Word64T> value) { |
| return Word64Clz(value); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::CountLeadingZeros32(TNode<Word32T> value) { |
| return Word32Clz(value); |
| } |
| |
| template <> |
| TNode<Smi> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) { |
| return value; |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) { |
| return SmiUntag(value); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr( |
| TNode<TaggedIndex> value) { |
| return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiTagSize))); |
| } |
| |
| TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex( |
| TNode<IntPtrT> value) { |
| return ReinterpretCast<TaggedIndex>( |
| BitcastWordToTaggedSigned(WordShl(value, IntPtrConstant(kSmiTagSize)))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TaggedIndexToSmi(TNode<TaggedIndex> value) { |
| if (SmiValuesAre32Bits()) { |
| DCHECK_EQ(kSmiShiftSize, 31); |
| return BitcastWordToTaggedSigned( |
| WordShl(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiShiftSize))); |
| } |
| DCHECK(SmiValuesAre31Bits()); |
| DCHECK_EQ(kSmiShiftSize, 0); |
| return ReinterpretCast<Smi>(value); |
| } |
| |
| TNode<TaggedIndex> CodeStubAssembler::SmiToTaggedIndex(TNode<Smi> value) { |
| if (kSystemPointerSize == kInt32Size) { |
| return ReinterpretCast<TaggedIndex>(value); |
| } |
| if (SmiValuesAre32Bits()) { |
| DCHECK_EQ(kSmiShiftSize, 31); |
| return ReinterpretCast<TaggedIndex>(BitcastWordToTaggedSigned( |
| WordSar(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiShiftSize)))); |
| } |
| DCHECK(SmiValuesAre31Bits()); |
| DCHECK_EQ(kSmiShiftSize, 0); |
| // Just sign-extend the lower 32 bits. |
| TNode<Int32T> raw = |
| TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(value)); |
| return ReinterpretCast<TaggedIndex>( |
| BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) { |
| if (COMPRESS_POINTERS_BOOL) { |
| TNode<Int32T> raw = |
| TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index)); |
| smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)); |
| } |
| return smi_index; |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiFromInt32(TNode<Int32T> value) { |
| if (COMPRESS_POINTERS_BOOL) { |
| static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1), |
| "Use shifting instead of add"); |
| return BitcastWordToTaggedSigned( |
| ChangeUint32ToWord(Int32Add(value, value))); |
| } |
| return SmiTag(ChangeInt32ToIntPtr(value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) { |
| CSA_DCHECK(this, IntPtrLessThan(ChangeUint32ToWord(value), |
| IntPtrConstant(Smi::kMaxValue))); |
| return SmiFromInt32(Signed(value)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) { |
| intptr_t constant_value; |
| if (TryToIntPtrConstant(value, &constant_value)) { |
| return (static_cast<uintptr_t>(constant_value) <= |
| static_cast<uintptr_t>(Smi::kMaxValue)) |
| ? Int32TrueConstant() |
| : Int32FalseConstant(); |
| } |
| |
| return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiTag(TNode<IntPtrT> value) { |
| int32_t constant_value; |
| if (TryToInt32Constant(value, &constant_value) && |
| Smi::IsValid(constant_value)) { |
| return SmiConstant(constant_value); |
| } |
| if (COMPRESS_POINTERS_BOOL) { |
| return SmiFromInt32(TruncateIntPtrToInt32(value)); |
| } |
| TNode<Smi> smi = |
| BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); |
| return smi; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SmiUntag(TNode<Smi> value) { |
| intptr_t constant_value; |
| if (TryToIntPtrConstant(value, &constant_value)) { |
| return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); |
| } |
| TNode<IntPtrT> raw_bits = BitcastTaggedToWordForTagAndSmiBits(value); |
| if (COMPRESS_POINTERS_BOOL) { |
| // Clear the upper half using sign-extension. |
| raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits)); |
| } |
| return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SmiToInt32(TNode<Smi> value) { |
| if (COMPRESS_POINTERS_BOOL) { |
| return Signed(Word32SarShiftOutZeros( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), |
| SmiShiftBitsConstant32())); |
| } |
| TNode<IntPtrT> result = SmiUntag(value); |
| return TruncateIntPtrToInt32(result); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::SmiToFloat64(TNode<Smi> value) { |
| return ChangeInt32ToFloat64(SmiToInt32(value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMax(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), b, a); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), a, b); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrSub(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b, |
| Label* if_overflow) { |
| TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| return BitcastWordToTaggedSigned( |
| TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs), |
| BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow)); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| TNode<PairT<IntPtrT, BoolT>> pair = |
| IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs), |
| BitcastTaggedToWordForTagAndSmiBits(rhs)); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<IntPtrT> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(result); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiAbs(TNode<Smi> a, Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| TNode<PairT<IntPtrT, BoolT>> pair = |
| IntPtrAbsWithOverflow(BitcastTaggedToWordForTagAndSmiBits(a)); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<IntPtrT> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(result); |
| } else { |
| CHECK(SmiValuesAre31Bits()); |
| CHECK(IsInt32AbsWithOverflowSupported()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32AbsWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMax(TNode<Number> a, TNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = a; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = b; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMin(TNode<Number> a, TNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = b; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = a; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMod(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| Label return_result(this, &var_result), |
| return_minuszero(this, Label::kDeferred), |
| return_nan(this, Label::kDeferred); |
| |
| // Untag {a} and {b}. |
| TNode<Int32T> int_a = SmiToInt32(a); |
| TNode<Int32T> int_b = SmiToInt32(b); |
| |
| // Return NaN if {b} is zero. |
| GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan); |
| |
| // Check if {a} is non-negative. |
| Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred); |
| Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative, |
| &if_aisnegative); |
| |
| BIND(&if_aisnotnegative); |
| { |
| // Fast case, don't need to check any other edge cases. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| var_result = SmiFromInt32(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&if_aisnegative); |
| { |
| if (SmiValuesAre32Bits()) { |
| // Check if {a} is kMinInt and {b} is -1 (only relevant if the |
| // kMinInt is actually representable as a Smi). |
| Label join(this); |
| GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join); |
| GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero); |
| Goto(&join); |
| BIND(&join); |
| } |
| |
| // Perform the integer modulus operation. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| |
| // Check if {r} is zero, and if so return -0, because we have to |
| // take the sign of the left hand side {a}, which is negative. |
| GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero); |
| |
| // The remainder {r} can be outside the valid Smi range on 32bit |
| // architectures, so we cannot just say SmiFromInt32(r) here. |
| var_result = ChangeInt32ToTagged(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_minuszero); |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_nan); |
| var_result = NanConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| TVARIABLE(Float64T, var_lhs_float64); |
| TVARIABLE(Float64T, var_rhs_float64); |
| Label return_result(this, &var_result); |
| |
| // Both {a} and {b} are Smis. Convert them to integers and multiply. |
| TNode<Int32T> lhs32 = SmiToInt32(a); |
| TNode<Int32T> rhs32 = SmiToInt32(b); |
| auto pair = Int32MulWithOverflow(lhs32, rhs32); |
| |
| TNode<BoolT> overflow = Projection<1>(pair); |
| |
| // Check if the multiplication overflowed. |
| Label if_overflow(this, Label::kDeferred), if_notoverflow(this); |
| Branch(overflow, &if_overflow, &if_notoverflow); |
| BIND(&if_notoverflow); |
| { |
| // If the answer is zero, we may need to return -0.0, depending on the |
| // input. |
| Label answer_zero(this), answer_not_zero(this); |
| TNode<Int32T> answer = Projection<0>(pair); |
| TNode<Int32T> zero = Int32Constant(0); |
| Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero); |
| BIND(&answer_not_zero); |
| { |
| var_result = ChangeInt32ToTagged(answer); |
| Goto(&return_result); |
| } |
| BIND(&answer_zero); |
| { |
| TNode<Int32T> or_result = Word32Or(lhs32, rhs32); |
| Label if_should_be_negative_zero(this), if_should_be_zero(this); |
| Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, |
| &if_should_be_zero); |
| BIND(&if_should_be_negative_zero); |
| { |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| } |
| BIND(&if_should_be_zero); |
| { |
| var_result = SmiConstant(0); |
| Goto(&return_result); |
| } |
| } |
| } |
| BIND(&if_overflow); |
| { |
| var_lhs_float64 = SmiToFloat64(a); |
| var_rhs_float64 = SmiToFloat64(b); |
| TNode<Float64T> value = |
| Float64Mul(var_lhs_float64.value(), var_rhs_float64.value()); |
| var_result = AllocateHeapNumberWithValue(value); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, |
| Label* bailout) { |
| // Both {a} and {b} are Smis. Bailout to floating point division if {divisor} |
| // is zero. |
| GotoIf(TaggedEqual(divisor, SmiConstant(0)), bailout); |
| |
| // Do floating point division if {dividend} is zero and {divisor} is |
| // negative. |
| Label dividend_is_zero(this), dividend_is_not_zero(this); |
| Branch(TaggedEqual(dividend, SmiConstant(0)), ÷nd_is_zero, |
| ÷nd_is_not_zero); |
| |
| BIND(÷nd_is_zero); |
| { |
| GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout); |
| Goto(÷nd_is_not_zero); |
| } |
| BIND(÷nd_is_not_zero); |
| |
| TNode<Int32T> untagged_divisor = SmiToInt32(divisor); |
| TNode<Int32T> untagged_dividend = SmiToInt32(dividend); |
| |
| // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 |
| // if the Smi size is 31) and {divisor} is -1. |
| Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); |
| Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), |
| &divisor_is_minus_one, &divisor_is_not_minus_one); |
| |
| BIND(&divisor_is_minus_one); |
| { |
| GotoIf(Word32Equal( |
| untagged_dividend, |
| Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), |
| bailout); |
| Goto(&divisor_is_not_minus_one); |
| } |
| BIND(&divisor_is_not_minus_one); |
| |
| TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor); |
| TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor); |
| |
| // Do floating point division if the remainder is not 0. |
| GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); |
| |
| return SmiFromInt32(untagged_result); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x, |
| TNode<Smi> y) { |
| TNode<ExternalReference> smi_lexicographic_compare = |
| ExternalConstant(ExternalReference::smi_lexicographic_compare_function()); |
| TNode<ExternalReference> isolate_ptr = |
| ExternalConstant(ExternalReference::isolate_address(isolate())); |
| return CAST(CallCFunction(smi_lexicographic_compare, MachineType::AnyTagged(), |
| std::make_pair(MachineType::Pointer(), isolate_ptr), |
| std::make_pair(MachineType::AnyTagged(), x), |
| std::make_pair(MachineType::AnyTagged(), y))); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TruncateWordToInt32(TNode<WordT> value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value)); |
| } |
| return ReinterpretCast<Int32T>(value); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32(TNode<IntPtrT> value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value)); |
| } |
| return ReinterpretCast<Int32T>(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) { |
| STATIC_ASSERT(kSmiTagMask < kMaxUInt32); |
| return Word32Equal( |
| Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), |
| Int32Constant(kSmiTagMask)), |
| Int32Constant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) { |
| return Word32BinaryNot(TaggedIsSmi(a)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(TNode<Object> a) { |
| #if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) |
| return Word32Equal( |
| Word32And( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), |
| Uint32Constant(static_cast<uint32_t>(kSmiTagMask | kSmiSignMask))), |
| Int32Constant(0)); |
| #else |
| return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a), |
| IntPtrConstant(kSmiTagMask | kSmiSignMask)), |
| IntPtrConstant(0)); |
| #endif |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsAligned(TNode<WordT> word, |
| size_t alignment) { |
| DCHECK(base::bits::IsPowerOfTwo(alignment)); |
| DCHECK_LE(alignment, kMaxUInt32); |
| return Word32Equal( |
| Int32Constant(0), |
| Word32And(TruncateWordToInt32(word), |
| Uint32Constant(static_cast<uint32_t>(alignment) - 1))); |
| } |
| |
| #if DEBUG |
| void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { |
| CodeAssembler::Bind(label, debug_info); |
| } |
| #endif // DEBUG |
| |
| void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| TNode<FixedDoubleArray> array, TNode<IntPtrT> index, Label* if_hole) { |
| return LoadFixedDoubleArrayElement(array, index, if_hole); |
| } |
| |
| void CodeStubAssembler::BranchIfJSReceiver(TNode<Object> object, Label* if_true, |
| Label* if_false) { |
| GotoIf(TaggedIsSmi(object), if_false); |
| STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); |
| Branch(IsJSReceiver(CAST(object)), if_true, if_false); |
| } |
| |
| void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { |
| #ifdef V8_ENABLE_FORCE_SLOW_PATH |
| const TNode<ExternalReference> force_slow_path_addr = |
| ExternalConstant(ExternalReference::force_slow_path(isolate())); |
| const TNode<Uint8T> force_slow = Load<Uint8T>(force_slow_path_addr); |
| |
| GotoIf(force_slow, if_true); |
| #endif |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags, |
| TNode<RawPtrT> top_address, |
| TNode<RawPtrT> limit_address) { |
| Label if_out_of_memory(this, Label::kDeferred); |
| |
| // TODO(jgruber,jkummerow): Extract the slow paths (= probably everything |
| // but bump pointer allocation) into a builtin to save code space. The |
| // size_in_bytes check may be moved there as well since a non-smi |
| // size_in_bytes probably doesn't fit into the bump pointer region |
| // (double-check that). |
| |
| intptr_t size_in_bytes_constant; |
| bool size_in_bytes_is_constant = false; |
| if (TryToIntPtrConstant(size_in_bytes, &size_in_bytes_constant)) { |
| size_in_bytes_is_constant = true; |
| CHECK(Internals::IsValidSmi(size_in_bytes_constant)); |
| CHECK_GT(size_in_bytes_constant, 0); |
| } else { |
| GotoIfNot(IsValidPositiveSmi(size_in_bytes), &if_out_of_memory); |
| } |
| |
| TNode<RawPtrT> top = Load<RawPtrT>(top_address); |
| TNode<RawPtrT> limit = Load<RawPtrT>(limit_address); |
| |
| // If there's not enough space, call the runtime. |
| TVARIABLE(Object, result); |
| Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this); |
| |
| bool needs_double_alignment = flags & AllocationFlag::kDoubleAlignment; |
| bool allow_large_object_allocation = |
| flags & AllocationFlag::kAllowLargeObjectAllocation; |
| |
| if (allow_large_object_allocation) { |
| Label next(this); |
| GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); |
| |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| Goto(&out); |
| |
| BIND(&next); |
| } |
| |
| TVARIABLE(IntPtrT, adjusted_size, size_in_bytes); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIfNot(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &next); |
| |
| adjusted_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| TNode<IntPtrT> new_top = |
| IntPtrAdd(UncheckedCast<IntPtrT>(top), adjusted_size.value()); |
| |
| Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, |
| &no_runtime_call); |
| |
| BIND(&runtime_call); |
| { |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| if (flags & AllocationFlag::kPretenured) { |
| result = |
| CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } |
| Goto(&out); |
| } |
| |
| // When there is enough space, return `top' and bump it up. |
| BIND(&no_runtime_call); |
| { |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, |
| new_top); |
| |
| TVARIABLE(IntPtrT, address, UncheckedCast<IntPtrT>(top)); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIf(IntPtrEqual(adjusted_size.value(), size_in_bytes), &next); |
| |
| // Store a filler and increase the address by 4. |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, top, |
| OnePointerFillerMapConstant()); |
| address = IntPtrAdd(UncheckedCast<IntPtrT>(top), IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| result = BitcastWordToTagged( |
| IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag))); |
| Goto(&out); |
| } |
| |
| if (!size_in_bytes_is_constant) { |
| BIND(&if_out_of_memory); |
| CallRuntime(Runtime::kFatalProcessOutOfMemoryInAllocateRaw, |
| NoContextConstant()); |
| Unreachable(); |
| } |
| |
| BIND(&out); |
| return UncheckedCast<HeapObject>(result.value()); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| DCHECK_EQ(flags & AllocationFlag::kDoubleAlignment, 0); |
| return AllocateRaw(size_in_bytes, flags, top_address, limit_address); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| #if defined(V8_HOST_ARCH_32_BIT) |
| return AllocateRaw(size_in_bytes, flags | AllocationFlag::kDoubleAlignment, |
| top_address, limit_address); |
| #elif defined(V8_HOST_ARCH_64_BIT) |
| #ifdef V8_COMPRESS_POINTERS |
| // TODO(ishell, v8:8875): Consider using aligned allocations once the |
| // allocation alignment inconsistency is fixed. For now we keep using |
| // unaligned access since both x64 and arm64 architectures (where pointer |
| // compression is supported) allow unaligned access to doubles and full words. |
| #endif // V8_COMPRESS_POINTERS |
| // Allocation on 64 bit machine is naturally double aligned |
| return AllocateRaw(size_in_bytes, flags & ~AllocationFlag::kDoubleAlignment, |
| top_address, limit_address); |
| #else |
| #error Architecture not supported |
| #endif |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags) { |
| DCHECK(flags == AllocationFlag::kNone || |
| flags == AllocationFlag::kDoubleAlignment); |
| CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes)); |
| return Allocate(size_in_bytes, flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags) { |
| Comment("Allocate"); |
| if (FLAG_single_generation) flags |= AllocationFlag::kPretenured; |
| bool const new_space = !(flags & AllocationFlag::kPretenured); |
| bool const allow_large_objects = |
| flags & AllocationFlag::kAllowLargeObjectAllocation; |
| if (!allow_large_objects) { |
| intptr_t size_constant; |
| if (TryToIntPtrConstant(size_in_bytes, &size_constant)) { |
| CHECK_LE(size_constant, kMaxRegularHeapObjectSize); |
| } else { |
| CSA_DCHECK(this, IsRegularHeapObjectSize(size_in_bytes)); |
| } |
| } |
| if (!(flags & AllocationFlag::kDoubleAlignment)) { |
| return OptimizedAllocate( |
| size_in_bytes, |
| new_space ? AllocationType::kYoung : AllocationType::kOld, |
| allow_large_objects ? AllowLargeObjects::kTrue |
| : AllowLargeObjects::kFalse); |
| } |
| TNode<ExternalReference> top_address = ExternalConstant( |
| new_space |
| ? ExternalReference::new_space_allocation_top_address(isolate()) |
| : ExternalReference::old_space_allocation_top_address(isolate())); |
| |
| #ifdef DEBUG |
| // New space is optional and if disabled both top and limit return |
| // kNullAddress. |
| if (ExternalReference::new_space_allocation_top_address(isolate()) |
| .address() != kNullAddress) { |
| Address raw_top_address = |
| ExternalReference::new_space_allocation_top_address(isolate()) |
| .address(); |
| Address raw_limit_address = |
| ExternalReference::new_space_allocation_limit_address(isolate()) |
| .address(); |
| |
| CHECK_EQ(kSystemPointerSize, raw_limit_address - raw_top_address); |
| } |
| |
| DCHECK_EQ(kSystemPointerSize, |
| ExternalReference::old_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::old_space_allocation_top_address(isolate()) |
| .address()); |
| #endif |
| |
| TNode<IntPtrT> limit_address = |
| IntPtrAdd(ReinterpretCast<IntPtrT>(top_address), |
| IntPtrConstant(kSystemPointerSize)); |
| |
| if (flags & AllocationFlag::kDoubleAlignment) { |
| return AllocateRawDoubleAligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } else { |
| return AllocateRawUnaligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, |
| AllocationFlags flags) { |
| CHECK(flags == AllocationFlag::kNone || |
| flags == AllocationFlag::kDoubleAlignment); |
| DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize); |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes, |
| AllocationFlags flags) { |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) { |
| return UintPtrLessThanOrEqual(size, |
| IntPtrConstant(kMaxRegularHeapObjectSize)); |
| } |
| |
| #if V8_ENABLE_WEBASSEMBLY |
| TNode<HeapObject> CodeStubAssembler::AllocateWasmArray( |
| TNode<IntPtrT> size_in_bytes, int initialization) { |
| TNode<HeapObject> array = |
| Allocate(size_in_bytes, AllocationFlag::kAllowLargeObjectAllocation); |
| if (initialization == kUninitialized) return array; |
| |
| TNode<IntPtrT> array_address = BitcastTaggedToWord(array); |
| TNode<IntPtrT> start = IntPtrAdd( |
| array_address, IntPtrConstant(WasmArray::kHeaderSize - kHeapObjectTag)); |
| TNode<IntPtrT> limit = IntPtrAdd( |
| array_address, IntPtrSub(size_in_bytes, IntPtrConstant(kHeapObjectTag))); |
| |
| TNode<Object> value; |
| if (initialization == kInitializeToZero) { |
| // A pointer-sized zero pattern is just what we need for numeric Wasm |
| // arrays (their object size is rounded up to a multiple of kPointerSize). |
| value = SmiConstant(0); |
| } else if (initialization == kInitializeToNull) { |
| value = NullConstant(); |
| } else { |
| UNREACHABLE(); |
| } |
| StoreFieldsNoWriteBarrier(start, limit, value); |
| return array; |
| } |
| #endif // V8_ENABLE_WEBASSEMBLY |
| |
| void CodeStubAssembler::BranchIfToBooleanIsTrue(TNode<Object> value, |
| Label* if_true, |
| Label* if_false) { |
| Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred), |
| if_bigint(this, Label::kDeferred); |
| // Rule out false {value}. |
| GotoIf(TaggedEqual(value, FalseConstant()), if_false); |
| |
| // Check if {value} is a Smi or a HeapObject. |
| Branch(TaggedIsSmi(value), &if_smi, &if_notsmi); |
| |
| BIND(&if_smi); |
| { |
| // The {value} is a Smi, only need to check against zero. |
| BranchIfSmiEqual(CAST(value), SmiConstant(0), if_false, if_true); |
| } |
| |
| BIND(&if_notsmi); |
| { |
| TNode<HeapObject> value_heapobject = CAST(value); |
| |
| // Check if {value} is the empty string. |
| GotoIf(IsEmptyString(value_heapobject), if_false); |
| |
| // The {value} is a HeapObject, load its map. |
| TNode<Map> value_map = LoadMap(value_heapobject); |
| |
| // Only null, undefined and document.all have the undetectable bit set, |
| // so we can return false immediately when that bit is set. |
| GotoIf(IsUndetectableMap(value_map), if_false); |
| |
| // We still need to handle numbers specially, but all other {value}s |
| // that make it here yield true. |
| GotoIf(IsHeapNumberMap(value_map), &if_heapnumber); |
| Branch(IsBigInt(value_heapobject), &if_bigint, if_true); |
| |
| BIND(&if_heapnumber); |
| { |
| // Load the floating point value of {value}. |
| TNode<Float64T> value_value = |
| LoadObjectField<Float64T>(value_heapobject, HeapNumber::kValueOffset); |
| |
| // Check if the floating point {value} is neither 0.0, -0.0 nor NaN. |
| Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)), |
| if_true, if_false); |
| } |
| |
| BIND(&if_bigint); |
| { |
| TNode<BigInt> bigint = CAST(value); |
| TNode<Word32T> bitfield = LoadBigIntBitfield(bigint); |
| TNode<Uint32T> length = DecodeWord32<BigIntBase::LengthBits>(bitfield); |
| Branch(Word32Equal(length, Int32Constant(0)), if_false, if_true); |
| } |
| } |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadSandboxedPointerFromObject( |
| TNode<HeapObject> object, TNode<IntPtrT> field_offset) { |
| #ifdef V8_SANDBOXED_POINTERS |
| return ReinterpretCast<RawPtrT>( |
| LoadObjectField<SandboxedPtrT>(object, field_offset)); |
| #else |
| return LoadObjectField<RawPtrT>(object, field_offset); |
| #endif // V8_SANDBOXED_POINTERS |
| } |
| |
| void CodeStubAssembler::StoreSandboxedPointerToObject(TNode<HeapObject> object, |
| TNode<IntPtrT> offset, |
| TNode<RawPtrT> pointer) { |
| #ifdef V8_SANDBOXED_POINTERS |
| TNode<SandboxedPtrT> sbx_ptr = ReinterpretCast<SandboxedPtrT>(pointer); |
| |
| // Ensure pointer points into the sandbox. |
| TNode<ExternalReference> sandbox_base_address = |
| ExternalConstant(ExternalReference::sandbox_base_address()); |
| TNode<ExternalReference> sandbox_end_address = |
| ExternalConstant(ExternalReference::sandbox_end_address()); |
| TNode<UintPtrT> sandbox_base = Load<UintPtrT>(sandbox_base_address); |
| TNode<UintPtrT> sandbox_end = Load<UintPtrT>(sandbox_end_address); |
| CSA_CHECK(this, UintPtrGreaterThanOrEqual(sbx_ptr, sandbox_base)); |
| CSA_CHECK(this, UintPtrLessThan(sbx_ptr, sandbox_end)); |
| |
| StoreObjectFieldNoWriteBarrier<SandboxedPtrT>(object, offset, sbx_ptr); |
| #else |
| StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer); |
| #endif // V8_SANDBOXED_POINTERS |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::EmptyBackingStoreBufferConstant() { |
| #ifdef V8_SANDBOXED_POINTERS |
| // TODO(chromium:1218005) consider creating a LoadSandboxedPointerConstant() |
| // if more of these constants are required later on. |
| TNode<ExternalReference> empty_backing_store_buffer = |
| ExternalConstant(ExternalReference::empty_backing_store_buffer()); |
| return Load<RawPtrT>(empty_backing_store_buffer); |
| #else |
| return ReinterpretCast<RawPtrT>(IntPtrConstant(0)); |
| #endif // V8_SANDBOXED_POINTERS |
| } |
| |
| #ifdef V8_SANDBOXED_EXTERNAL_POINTERS |
| TNode<ExternalPointerT> CodeStubAssembler::ChangeIndexToExternalPointer( |
| TNode<Uint32T> index) { |
| DCHECK_EQ(kExternalPointerSize, kUInt32Size); |
| TNode<Uint32T> shifted_index = |
| Word32Shl(index, Uint32Constant(kExternalPointerIndexShift)); |
| return ReinterpretCast<ExternalPointerT>(shifted_index); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToIndex( |
| TNode<ExternalPointerT> external_pointer) { |
| DCHECK_EQ(kExternalPointerSize, kUInt32Size); |
| TNode<Uint32T> shifted_index = ReinterpretCast<Uint32T>(external_pointer); |
| return Word32Shr(shifted_index, Uint32Constant(kExternalPointerIndexShift)); |
| } |
| #endif // V8_SANDBOXED_EXTERNAL_POINTERS |
| |
| void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object, |
| TNode<IntPtrT> offset) { |
| #ifdef V8_SANDBOXED_EXTERNAL_POINTERS |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| |
| // We could implement the fast path for allocating from the freelist here, |
| // however, this logic needs to be atomic and so requires CSA to expose |
| // atomic operations. |
| TNode<ExternalReference> table_allocate_function = ExternalConstant( |
| ExternalReference::external_pointer_table_allocate_entry()); |
| TNode<Uint32T> index = UncheckedCast<Uint32T>(CallCFunction( |
| table_allocate_function, MachineType::Uint32(), |
| std::make_pair(MachineType::Pointer(), external_pointer_table_address))); |
| |
| // Currently, we assume that the caller will immediately initialize the entry |
| // through StoreExternalPointerToObject after allocating it. That way, we |
| // avoid initializing the entry twice (once with nullptr, then again with the |
| // real value). TODO(saelo) initialize the entry with zero here and switch |
| // callers to a version that initializes the entry with a given pointer. |
| |
| TNode<ExternalPointerT> pointer = ChangeIndexToExternalPointer(index); |
| StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, pointer); |
| #endif |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject( |
| TNode<HeapObject> object, TNode<IntPtrT> offset, |
| ExternalPointerTag external_pointer_tag) { |
| #ifdef V8_SANDBOXED_EXTERNAL_POINTERS |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| TNode<RawPtrT> table = UncheckedCast<RawPtrT>( |
| Load(MachineType::Pointer(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableBufferOffset))); |
| |
| TNode<ExternalPointerT> encoded = |
| LoadObjectField<ExternalPointerT>(object, offset); |
| TNode<Uint32T> index = ChangeExternalPointerToIndex(encoded); |
| // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code |
| // that does one shift right instead of two shifts (right and then left). |
| TNode<IntPtrT> table_offset = ElementOffsetFromIndex( |
| ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); |
| |
| TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset); |
| if (external_pointer_tag != 0) { |
| TNode<UintPtrT> tag = UintPtrConstant(~external_pointer_tag); |
| entry = UncheckedCast<UintPtrT>(WordAnd(entry, tag)); |
| } |
| return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry)); |
| #else |
| return LoadObjectField<RawPtrT>(object, offset); |
| #endif // V8_SANDBOXED_EXTERNAL_POINTERS |
| } |
| |
| void CodeStubAssembler::StoreExternalPointerToObject( |
| TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<RawPtrT> pointer, |
| ExternalPointerTag external_pointer_tag) { |
| #ifdef V8_SANDBOXED_EXTERNAL_POINTERS |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| TNode<RawPtrT> table = UncheckedCast<RawPtrT>( |
| Load(MachineType::Pointer(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableBufferOffset))); |
| |
| TNode<ExternalPointerT> encoded = |
| LoadObjectField<ExternalPointerT>(object, offset); |
| TNode<Uint32T> index = ChangeExternalPointerToIndex(encoded); |
| // TODO(v8:10391): consider updating ElementOffsetFromIndex to generate code |
| // that does one shift right instead of two shifts (right and then left). |
| TNode<IntPtrT> table_offset = ElementOffsetFromIndex( |
| ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); |
| |
| TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer); |
| if (external_pointer_tag != 0) { |
| TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag); |
| value = UncheckedCast<UintPtrT>(WordOr(pointer, tag)); |
| } |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset, |
| value); |
| #else |
| StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer); |
| #endif // V8_SANDBOXED_EXTERNAL_POINTERS |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) { |
| TNode<RawPtrT> frame_pointer = LoadParentFramePointer(); |
| return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); |
| } |
| |
| TNode<Uint8T> CodeStubAssembler::LoadUint8Ptr(TNode<RawPtrT> ptr, |
| TNode<IntPtrT> offset) { |
| return Load<Uint8T>(IntPtrAdd(ReinterpretCast<IntPtrT>(ptr), offset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField( |
| TNode<HeapObject> object, int offset) { |
| // Please use LoadMap(object) instead. |
| DCHECK_NE(offset, HeapObject::kMapOffset); |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return ChangeInt32ToIntPtr(LoadObjectField<Int32T>(object, offset)); |
| } else { |
| return SmiToIntPtr(LoadObjectField<Smi>(object, offset)); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField( |
| TNode<HeapObject> object, int offset) { |
| // Please use LoadMap(object) instead. |
| DCHECK_NE(offset, HeapObject::kMapOffset); |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return LoadObjectField<Int32T>(object, offset); |
| } else { |
| return SmiToInt32(LoadObjectField<Smi>(object, offset)); |
| } |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue( |
| TNode<HeapObject> object) { |
| CSA_DCHECK(this, Word32Or(IsHeapNumber(object), IsOddball(object))); |
| STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset); |
| return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset); |
| } |
| |
| TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) { |
| Handle<Map> map_handle( |
| Map::GetInstanceTypeMap(ReadOnlyRoots(isolate()), instance_type), |
| isolate()); |
| return HeapConstant(map_handle); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) { |
| TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset); |
| #ifdef V8_MAP_PACKING |
| // Check the loaded map is unpacked. i.e. the lowest two bits != 0b10 |
| CSA_DCHECK(this, |
| WordNotEqual(WordAnd(BitcastTaggedToWord(map), |
| IntPtrConstant(Internals::kMapWordXorMask)), |
| IntPtrConstant(Internals::kMapWordSignature))); |
| #endif |
| return map; |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) { |
| return LoadMapInstanceType(LoadMap(object)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::HasInstanceType(TNode<HeapObject> object, |
| InstanceType instance_type) { |
| return InstanceTypeEqual(LoadInstanceType(object), instance_type); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType( |
| TNode<HeapObject> object, InstanceType instance_type) { |
| return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType( |
| TNode<HeapObject> any_tagged, InstanceType type) { |
| /* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */ |
| TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged); |
| return Select<BoolT>( |
| tagged_is_smi, [=]() { return tagged_is_smi; }, |
| [=]() { return DoesntHaveInstanceType(any_tagged, type); }); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) { |
| TNode<BoolT> is_special = |
| IsSpecialReceiverInstanceType(LoadMapInstanceType(map)); |
| uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask | |
| Map::Bits1::IsAccessCheckNeededBit::kMask; |
| USE(mask); |
| // Interceptors or access checks imply special receiver. |
| CSA_DCHECK(this, |
| SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask), |
| is_special, Int32TrueConstant())); |
| return is_special; |
| } |
| |
| TNode<Word32T> CodeStubAssembler::IsStringWrapperElementsKind(TNode<Map> map) { |
| TNode<Int32T> kind = LoadMapElementsKind(map); |
| return Word32Or( |
| Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)), |
| Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS))); |
| } |
| |
| void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map, |
| Label* if_slow) { |
| GotoIf(IsStringWrapperElementsKind(map), if_slow); |
| GotoIf(IsSpecialReceiverMap(map), if_slow); |
| GotoIf(IsDictionaryMap(map), if_slow); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadFastProperties( |
| TNode<JSReceiver> object) { |
| CSA_SLOW_DCHECK(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| return Select<HeapObject>( |
| TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); }, |
| [=] { return CAST(properties); }); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadSlowProperties( |
| TNode<JSReceiver> object) { |
| CSA_SLOW_DCHECK(this, IsDictionaryMap(LoadMap(object))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| NodeGenerator<HeapObject> make_empty = [=]() -> TNode<HeapObject> { |
| if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { |
| return EmptySwissPropertyDictionaryConstant(); |
| } else { |
| return EmptyPropertyDictionaryConstant(); |
| } |
| }; |
| NodeGenerator<HeapObject> cast_properties = [=] { |
| TNode<HeapObject> dict = CAST(properties); |
| if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { |
| CSA_DCHECK(this, Word32Or(IsSwissNameDictionary(dict), |
| IsGlobalDictionary(dict))); |
| } else { |
| CSA_DCHECK(this, |
| Word32Or(IsNameDictionary(dict), IsGlobalDictionary(dict))); |
| } |
| return dict; |
| }; |
| return Select<HeapObject>(TaggedIsSmi(properties), make_empty, |
| cast_properties); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength( |
| TNode<Context> context, TNode<JSArgumentsObject> array) { |
| CSA_DCHECK(this, IsJSArgumentsObjectWithLength(context, array)); |
| constexpr int offset = JSStrictArgumentsObject::kLengthOffset; |
| STATIC_ASSERT(offset == JSSloppyArgumentsObject::kLengthOffset); |
| return LoadObjectField(array, offset); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) { |
| TNode<Number> length = LoadJSArrayLength(array); |
| CSA_DCHECK(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)), |
| IsElementsKindInRange( |
| LoadElementsKind(array), |
| FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND, |
| LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND))); |
| // JSArray length is always a positive Smi for fast arrays. |
| CSA_SLOW_DCHECK(this, TaggedIsPositiveSmi(length)); |
| return CAST(length); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength( |
| TNode<FixedArrayBase> array) { |
| CSA_SLOW_DCHECK(this, IsNotWeakFixedArraySubclass(array)); |
| return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength( |
| TNode<FixedArrayBase> array) { |
| return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadFeedbackVectorLength( |
| TNode<FeedbackVector> vector) { |
| return ChangeInt32ToIntPtr( |
| LoadObjectField<Int32T>(vector, FeedbackVector::kLengthOffset)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength( |
| TNode<WeakFixedArray> array) { |
| return LoadObjectField<Smi>(array, WeakFixedArray::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength( |
| TNode<WeakFixedArray> array) { |
| return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadNumberOfDescriptors( |
| TNode<DescriptorArray> array) { |
| return UncheckedCast<Int32T>(LoadObjectField<Int16T>( |
| array, DescriptorArray::kNumberOfDescriptorsOffset)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadNumberOfOwnDescriptors(TNode<Map> map) { |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| return UncheckedCast<Int32T>( |
| DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bit_field3)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField(TNode<Map> map) { |
| return UncheckedCast<Int32T>( |
| LoadObjectField<Uint8T>(map, Map::kBitFieldOffset)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField2(TNode<Map> map) { |
| return UncheckedCast<Int32T>( |
| LoadObjectField<Uint8T>(map, Map::kBitField2Offset)); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(TNode<Map> map) { |
| return LoadObjectField<Uint32T>(map, Map::kBitField3Offset); |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(TNode<Map> map) { |
| return LoadObjectField<Uint16T>(map, Map::kInstanceTypeOffset); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(TNode<Map> map) { |
| TNode<Int32T> bit_field2 = LoadMapBitField2(map); |
| return Signed(DecodeWord32<Map::Bits2::ElementsKindBits>(bit_field2)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadElementsKind(TNode<HeapObject> object) { |
| return LoadMapElementsKind(LoadMap(object)); |
| } |
| |
| TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(TNode<Map> map) { |
| return LoadObjectField<DescriptorArray>(map, Map::kInstanceDescriptorsOffset); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(TNode<Map> map) { |
| return LoadObjectField<HeapObject>(map, Map::kPrototypeOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) { |
| return ChangeInt32ToIntPtr( |
| LoadObjectField<Uint8T>(map, Map::kInstanceSizeInWordsOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords( |
| TNode<Map> map) { |
| // See Map::GetInObjectPropertiesStartInWords() for details. |
| CSA_DCHECK(this, IsJSObjectMap(map)); |
| return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>( |
| map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex( |
| TNode<Map> map) { |
| // See Map::GetConstructorFunctionIndex() for details. |
| CSA_DCHECK(this, IsPrimitiveInstanceType(LoadMapInstanceType(map))); |
| return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>( |
| map, Map::kInobjectPropertiesStartOrConstructorFunctionIndexOffset)); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) { |
| TVARIABLE(Object, result, |
| LoadObjectField( |
| map, Map::kConstructorOrBackPointerOrNativeContextOffset)); |
| |
| Label done(this), loop(this, &result); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| GotoIf(TaggedIsSmi(result.value()), &done); |
| TNode<BoolT> is_map_type = |
| InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE); |
| GotoIfNot(is_map_type, &done); |
| result = |
| LoadObjectField(CAST(result.value()), |
| Map::kConstructorOrBackPointerOrNativeContextOffset); |
| Goto(&loop); |
| } |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<WordT> CodeStubAssembler::LoadMapEnumLength(TNode<Map> map) { |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| return DecodeWordFromWord32<Map::Bits3::EnumLengthBits>(bit_field3); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapBackPointer(TNode<Map> map) { |
| TNode<HeapObject> object = CAST(LoadObjectField( |
| map, Map::kConstructorOrBackPointerOrNativeContextOffset)); |
| return Select<Object>( |
| IsMap(object), [=] { return object; }, |
| [=] { return UndefinedConstant(); }); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties( |
| TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) { |
| // This check can have false positives, since it applies to any |
| // JSPrimitiveWrapper type. |
| GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); |
| |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| GotoIf(IsSetWord32(bit_field3, Map::Bits3::IsDictionaryMapBit::kMask), |
| bailout); |
| |
| return bit_field3; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash( |
| TNode<JSReceiver> receiver, Label* if_no_hash) { |
| TVARIABLE(IntPtrT, var_hash); |
| Label done(this), if_smi(this), if_property_array(this), |
| if_swiss_property_dictionary(this), if_property_dictionary(this), |
| if_fixed_array(this); |
| |
| TNode<Object> properties_or_hash = |
| LoadObjectField(receiver, JSReceiver::kPropertiesOrHashOffset); |
| GotoIf(TaggedIsSmi(properties_or_hash), &if_smi); |
| |
| TNode<HeapObject> properties = CAST(properties_or_hash); |
| TNode<Uint16T> properties_instance_type = LoadInstanceType(properties); |
| |
| GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE), |
| &if_property_array); |
| if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { |
| GotoIf( |
| InstanceTypeEqual(properties_instance_type, SWISS_NAME_DICTIONARY_TYPE), |
| &if_swiss_property_dictionary); |
| } |
| Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE), |
| &if_property_dictionary, &if_fixed_array); |
| |
| BIND(&if_fixed_array); |
| { |
| var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel); |
| Goto(&done); |
| } |
| |
| BIND(&if_smi); |
| { |
| var_hash = SmiUntag(CAST(properties_or_hash)); |
| Goto(&done); |
| } |
| |
| BIND(&if_property_array); |
| { |
| TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField( |
| properties, PropertyArray::kLengthAndHashOffset); |
| var_hash = Signed(DecodeWord<PropertyArray::HashField>(length_and_hash)); |
| Goto(&done); |
| } |
| if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) { |
| BIND(&if_swiss_property_dictionary); |
| { |
| var_hash = Signed( |
| ChangeUint32ToWord(LoadSwissNameDictionaryHash(CAST(properties)))); |
| Goto(&done); |
| } |
| } |
| |
| BIND(&if_property_dictionary); |
| { |
| var_hash = SmiUntag(CAST(LoadFixedArrayElement( |
| CAST(properties), NameDictionary::kObjectHashIndex))); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| if (if_no_hash != nullptr) { |
| GotoIf(IntPtrEqual(var_hash.value(), |
| IntPtrConstant(PropertyArray::kNoHashSentinel)), |
| if_no_hash); |
| } |
| return var_hash.value(); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHashAssumeComputed(TNode<Name> name) { |
| TNode<Uint32T> hash_field = LoadNameRawHashField(name); |
| CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask)); |
| return DecodeWord32<Name::HashBits>(hash_field); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHash(TNode<Name> name, |
| Label* if_hash_not_computed) { |
| TNode<Uint32T> raw_hash_field = LoadNameRawHashField(name); |
| if (if_hash_not_computed != nullptr) { |
| GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask), |
| if_hash_not_computed); |
| } |
| return DecodeWord32<Name::HashBits>(raw_hash_field); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(TNode<String> string) { |
| return SmiFromIntPtr(LoadStringLengthAsWord(string)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(TNode<String> string) { |
| return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string))); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32( |
| TNode<String> string) { |
| return LoadObjectField<Uint32T>(string, String::kLengthOffset); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadJSPrimitiveWrapperValue( |
| TNode<JSPrimitiveWrapper> object) { |
| return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); |
| } |
| |
| void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object, |
| Label* if_smi, Label* if_cleared, |
| Label* if_weak, Label* if_strong, |
| TVariable<Object>* extracted) { |
| Label inner_if_smi(this), inner_if_strong(this); |
| |
| GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi); |
| |
| GotoIf(IsCleared(maybe_object), if_cleared); |
| |
| GotoIf(IsStrong(maybe_object), &inner_if_strong); |
| |
| *extracted = GetHeapObjectAssumeWeak(maybe_object); |
| Goto(if_weak); |
| |
| BIND(&inner_if_smi); |
| *extracted = CAST(maybe_object); |
| Goto(if_smi); |
| |
| BIND(&inner_if_strong); |
| *extracted = CAST(maybe_object); |
| Goto(if_strong); |
| } |
| |
| void CodeStubAssembler::DcheckHasValidMap(TNode<HeapObject> object) { |
| #ifdef V8_MAP_PACKING |
| // Test if the map is an unpacked and valid map |
| CSA_DCHECK(this, IsMap(LoadMap(object))); |
| #endif |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) { |
| return Word32Equal(Word32And(TruncateIntPtrToInt32( |
| BitcastTaggedToWordForTagAndSmiBits(value)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kHeapObjectTag)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong( |
| TNode<MaybeObject> value, Label* if_not_strong) { |
| GotoIfNot(IsStrong(value), if_not_strong); |
| return CAST(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) { |
| return Word32Equal(Word32And(TruncateIntPtrToInt32( |
| BitcastTaggedToWordForTagAndSmiBits(value)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kWeakHeapObjectTag)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) { |
| return Word32Equal(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), |
| Int32Constant(kClearedWeakHeapObjectLower32)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value) { |
| CSA_DCHECK(this, IsWeakOrCleared(value)); |
| CSA_DCHECK(this, IsNotCleared(value)); |
| return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd( |
| BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask)))); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value, Label* if_cleared) { |
| GotoIf(IsCleared(value), if_cleared); |
| return GetHeapObjectAssumeWeak(value); |
| } |
| |
| // This version generates |
| // (maybe_object & ~mask) == value |
| // It works for non-Smi |maybe_object| and for both Smi and HeapObject values |
| // but requires a big constant for ~mask. |
| TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject( |
| TNode<MaybeObject> maybe_object, TNode<Object> value) { |
| CSA_DCHECK(this, TaggedIsNotSmi(maybe_object)); |
| if (COMPRESS_POINTERS_BOOL) { |
| return Word32Equal( |
| Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), |
| Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))), |
| TruncateWordToInt32(BitcastTaggedToWord(value))); |
| } else { |
| return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object), |
| IntPtrConstant(~kWeakHeapObjectMask)), |
| BitcastTaggedToWord(value)); |
| } |
| } |
| |
| // This version generates |
| // maybe_object == (heap_object | mask) |
| // It works for any |maybe_object| values and generates a better code because it |
| // uses a small constant for mask. |
| TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo( |
| TNode<MaybeObject> maybe_object, TNode<HeapObject> heap_object) { |
| if (COMPRESS_POINTERS_BOOL) { |
| return Word32Equal( |
| TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), |
| Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)), |
| Int32Constant(kWeakHeapObjectMask))); |
| } else { |
| return WordEqual(BitcastMaybeObjectToWord(maybe_object), |
| WordOr(BitcastTaggedToWord(heap_object), |
| IntPtrConstant(kWeakHeapObjectMask))); |
| } |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) { |
| return ReinterpretCast<MaybeObject>(BitcastWordToTagged( |
| WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag)))); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<FixedArray> array) { |
| return LoadAndUntagFixedArrayBaseLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<WeakFixedArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<PropertyArray> array) { |
| return LoadPropertyArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<DescriptorArray> array) { |
| return IntPtrMul(ChangeInt32ToIntPtr(LoadNumberOfDescriptors(array)), |
| IntPtrConstant(DescriptorArray::kEntrySize)); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<TransitionArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <typename Array, typename TIndex, typename TValue> |
| TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array, |
| int array_header_size, |
| TNode<TIndex> index_node, |
| int additional_offset) { |
| // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? |
| static_assert(std::is_same<TIndex, Smi>::value || |
| std::is_same<TIndex, UintPtrT>::value || |
| std::is_same<TIndex, IntPtrT>::value, |
| "Only Smi, UintPtrT or IntPtrT indices are allowed"); |
| CSA_DCHECK(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node), |
| IntPtrConstant(0))); |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, header_size); |
| CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(array), |
| array_header_size)); |
| constexpr MachineType machine_type = MachineTypeOf<TValue>::value; |
| return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset)); |
| } |
| |
| template V8_EXPORT_PRIVATE TNode<MaybeObject> |
| CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>( |
| TNode<TransitionArray>, int, TNode<IntPtrT>, int); |
| |
| template <typename TIndex> |
| TNode<Object> CodeStubAssembler::LoadFixedArrayElement( |
| TNode<FixedArray> object, TNode<TIndex> index, int additional_offset, |
| CheckBounds check_bounds) { |
| // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? |
| static_assert(std::is_same<TIndex, Smi>::value || |
| std::is_same<TIndex, UintPtrT>::value || |
| std::is_same<TIndex, IntPtrT>::value, |
| "Only Smi, UintPtrT or IntPtrT indexes are allowed"); |
| CSA_DCHECK(this, IsFixedArraySubclass(object)); |
| CSA_DCHECK(this, IsNotWeakFixedArraySubclass(object)); |
| |
| if (NeedsBoundsCheck(check_bounds)) { |
| FixedArrayBoundsCheck(object, index, additional_offset); |
| } |
| TNode<MaybeObject> element = LoadArrayElement(object, FixedArray::kHeaderSize, |
| index, additional_offset); |
| return CAST(element); |
| } |
| |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>, |
| int, CheckBounds); |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>, |
| TNode<UintPtrT>, int, |
| CheckBounds); |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>, |
| TNode<IntPtrT>, int, |
| CheckBounds); |
| |
| void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array, |
| TNode<Smi> index, |
| int additional_offset) { |
| if (!FLAG_fixed_array_bounds_checks) return; |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| TNode<Smi> effective_index; |
| Smi constant_index; |
| bool index_is_constant = TryToSmiConstant(index, &constant_index); |
| if (index_is_constant) { |
| effective_index = SmiConstant(Smi::ToInt(constant_index) + |
| additional_offset / kTaggedSize); |
| } else { |
| effective_index = |
| SmiAdd(index, SmiConstant(additional_offset / kTaggedSize)); |
| } |
| CSA_CHECK(this, SmiBelow(effective_index, LoadFixedArrayBaseLength(array))); |
| } |
| |
| void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array, |
| TNode<IntPtrT> index, |
| int additional_offset) { |
| if (!FLAG_fixed_array_bounds_checks) return; |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| // IntPtrAdd does constant-folding automatically. |
| TNode<IntPtrT> effective_index = |
| IntPtrAdd(index, IntPtrConstant(additional_offset / kTaggedSize)); |
| CSA_CHECK(this, UintPtrLessThan(effective_index, |
| LoadAndUntagFixedArrayBaseLength(array))); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadPropertyArrayElement( |
| TNode<PropertyArray> object, TNode<IntPtrT> index) { |
| int additional_offset = 0; |
| return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index, |
| additional_offset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength( |
| TNode<PropertyArray> object) { |
| TNode<IntPtrT> value = |
| LoadAndUntagObjectField(object, PropertyArray::kLengthAndHashOffset); |
| return Signed(DecodeWord<PropertyArray::LengthField>(value)); |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr( |
| TNode<JSTypedArray> typed_array) { |
| // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer). |
| TNode<RawPtrT> external_pointer = |
| LoadJSTypedArrayExternalPointerPtr(typed_array); |
| |
| TNode<IntPtrT> base_pointer; |
| if (COMPRESS_POINTERS_BOOL) { |
| TNode<Int32T> compressed_base = |
| LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset); |
| // Zero-extend TaggedT to WordT according to current compression scheme |
| // so that the addition with |external_pointer| (which already contains |
| // compensated offset value) below will decompress the tagged value. |
| // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for |
| // details. |
| base_pointer = Signed(ChangeUint32ToWord(compressed_base)); |
| } else { |
| base_pointer = |
| LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset); |
| } |
| return RawPtrAdd(external_pointer, base_pointer); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) { |
| if (Is64()) { |
| TNode<IntPtrT> value = Load<IntPtrT>(data_pointer, offset); |
| return BigIntFromInt64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<IntPtrT> high = Load<IntPtrT>(data_pointer, offset); |
| TNode<IntPtrT> low = Load<IntPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #else |
| TNode<IntPtrT> low = Load<IntPtrT>(data_pointer, offset); |
| TNode<IntPtrT> high = Load<IntPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #endif |
| return BigIntFromInt32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low, |
| TNode<IntPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| TVARIABLE(Word32T, var_sign, Int32Constant(BigInt::SignBits::encode(false))); |
| TVARIABLE(IntPtrT, var_high, high); |
| TVARIABLE(IntPtrT, var_low, low); |
| Label high_zero(this), negative(this), allocate_one_digit(this), |
| allocate_two_digits(this), if_zero(this), done(this); |
| |
| GotoIf(IntPtrEqual(var_high.value(), IntPtrConstant(0)), &high_zero); |
| Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative, |
| &allocate_two_digits); |
| |
| BIND(&high_zero); |
| Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &if_zero, |
| &allocate_one_digit); |
| |
| BIND(&negative); |
| { |
| var_sign = Int32Constant(BigInt::SignBits::encode(true)); |
| // We must negate the value by computing "0 - (high|low)", performing |
| // both parts of the subtraction separately and manually taking care |
| // of the carry bit (which is 1 iff low != 0). |
| var_high = IntPtrSub(IntPtrConstant(0), var_high.value()); |
| Label carry(this), no_carry(this); |
| Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry); |
| BIND(&carry); |
| var_high = IntPtrSub(var_high.value(), IntPtrConstant(1)); |
| Goto(&no_carry); |
| BIND(&no_carry); |
| var_low = IntPtrSub(IntPtrConstant(0), var_low.value()); |
| // var_high was non-zero going into this block, but subtracting the |
| // carry bit from it could bring us back onto the "one digit" path. |
| Branch(IntPtrEqual(var_high.value(), IntPtrConstant(0)), |
| &allocate_one_digit, &allocate_two_digits); |
| } |
| |
| BIND(&allocate_one_digit); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(1)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| Goto(&done); |
| } |
| |
| BIND(&allocate_two_digits); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(2)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(2)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value())); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_positive(this), if_negative(this), if_zero(this); |
| GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive, |
| &if_negative); |
| |
| BIND(&if_positive); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(false) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(value)); |
| Goto(&done); |
| } |
| |
| BIND(&if_negative); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(true) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, |
| Unsigned(IntPtrSub(IntPtrConstant(0), value))); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| { |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<IntPtrT> offset) { |
| Label if_zero(this), done(this); |
| if (Is64()) { |
| TNode<UintPtrT> value = Load<UintPtrT>(data_pointer, offset); |
| return BigIntFromUint64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<UintPtrT> high = Load<UintPtrT>(data_pointer, offset); |
| TNode<UintPtrT> low = Load<UintPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #else |
| TNode<UintPtrT> low = Load<UintPtrT>(data_pointer, offset); |
| TNode<UintPtrT> high = Load<UintPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #endif |
| return BigIntFromUint32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint32Pair(TNode<UintPtrT> low, |
| TNode<UintPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label high_zero(this), if_zero(this), done(this); |
| |
| GotoIf(IntPtrEqual(high, IntPtrConstant(0)), &high_zero); |
| var_result = AllocateBigInt(IntPtrConstant(2)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| StoreBigIntDigit(var_result.value(), 1, high); |
| Goto(&done); |
| |
| BIND(&high_zero); |
| GotoIf(IntPtrEqual(low, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_zero(this); |
| GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, value); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<UintPtrT> index, |
| ElementsKind elements_kind) { |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(Signed(index), elements_kind, 0); |
| switch (elements_kind) { |
| case UINT8_ELEMENTS: /* fall through */ |
| case UINT8_CLAMPED_ELEMENTS: |
| return SmiFromInt32(Load<Uint8T>(data_pointer, offset)); |
| case INT8_ELEMENTS: |
| return SmiFromInt32(Load<Int8T>(data_pointer, offset)); |
| case UINT16_ELEMENTS: |
| return SmiFromInt32(Load<Uint16T>(data_pointer, offset)); |
| case INT16_ELEMENTS: |
| return SmiFromInt32(Load<Int16T>(data_pointer, offset)); |
| case UINT32_ELEMENTS: |
| return ChangeUint32ToTagged(Load<Uint32T>(data_pointer, offset)); |
| case INT32_ELEMENTS: |
| return ChangeInt32ToTagged(Load<Int32T>(data_pointer, offset)); |
| case FLOAT32_ELEMENTS: |
| return AllocateHeapNumberWithValue( |
| ChangeFloat32ToFloat64(Load<Float32T>(data_pointer, offset))); |
| case FLOAT64_ELEMENTS: |
| return AllocateHeapNumberWithValue(Load<Float64T>(data_pointer, offset)); |
| case BIGINT64_ELEMENTS: |
| return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset); |
| case BIGUINT64_ELEMENTS: |
| return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<UintPtrT> index, |
| TNode<Int32T> elements_kind) { |
| TVARIABLE(Numeric, var_result); |
| Label done(this), if_unknown_type(this, Label::kDeferred); |
| int32_t elements_kinds[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| Label* elements_kind_labels[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| // The same labels again for RAB / GSAB. We dispatch RAB / GSAB elements |
| // kinds to the corresponding non-RAB / GSAB elements kinds. |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); |
| |
| Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels, |
| arraysize(elements_kinds)); |
| |
| BIND(&if_unknown_type); |
| Unreachable(); |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| BIND(&if_##type##array); \ |
| { \ |
| var_result = LoadFixedTypedArrayElementAsTagged(data_pointer, index, \ |
| TYPE##_ELEMENTS); \ |
| Goto(&done); \ |
| } |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| template <typename TIndex> |
| TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot, |
| int additional_offset) { |
| int32_t header_size = FeedbackVector::kRawFeedbackSlotsOffset + |
| additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size); |
| CSA_SLOW_DCHECK( |
| this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), |
| FeedbackVector::kHeaderSize)); |
| return Load<MaybeObject>(feedback_vector, offset); |
| } |
| |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot, |
| int additional_offset); |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot, |
| int additional_offset); |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, |
| int additional_offset); |
| |
| template <typename Array> |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement( |
| TNode<Array> object, int array_header_size, TNode<IntPtrT> index, |
| int additional_offset) { |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int endian_correction = 0; |
| #if V8_TARGET_LITTLE_ENDIAN |
| if (SmiValuesAre32Bits()) endian_correction = 4; |
| #endif |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag + |
| endian_correction; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size); |
| CSA_DCHECK(this, IsOffsetInBounds(offset, LoadArrayLength(object), |
| array_header_size + endian_correction)); |
| if (SmiValuesAre32Bits()) { |
| return Load<Int32T>(object, offset); |
| } else { |
| return SmiToInt32(Load<Smi>(object, offset)); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( |
| TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset) { |
| CSA_SLOW_DCHECK(this, IsFixedArraySubclass(object)); |
| return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize, |
| index, additional_offset); |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement( |
| TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) { |
| return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index, |
| additional_offset); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement( |
| TNode<FixedDoubleArray> object, TNode<IntPtrT> index, Label* if_hole, |
| MachineType machine_type) { |
| int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size); |
| CSA_DCHECK(this, IsOffsetInBounds( |
| offset, LoadAndUntagFixedArrayBaseLength(object), |
| FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS)); |
| return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged( |
| TNode<FixedArrayBase> elements, TNode<IntPtrT> index, |
| TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole) { |
| TVARIABLE(Object, var_result); |
| Label done(this), if_packed(this), if_holey(this), if_packed_double(this), |
| if_holey_double(this), if_dictionary(this, Label::kDeferred); |
| |
| int32_t kinds[] = { |