blob: cfc89d17ae34ea6c7d535950a58bda313d51d972 [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
static void InitializeArrayConstructorDescriptor(
Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
Address deopt_handler = Runtime::FunctionForId(
Runtime::kArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
static void InitializeInternalArrayConstructorDescriptor(
Isolate* isolate, CodeStubDescriptor* descriptor,
int constant_stack_parameter_count) {
Address deopt_handler = Runtime::FunctionForId(
Runtime::kInternalArrayConstructor)->entry;
if (constant_stack_parameter_count == 0) {
descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE);
} else {
descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
}
}
void ArrayNoArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
}
void ArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
}
void ArrayNArgumentsConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
}
void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
}
void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
}
void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
}
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
Label* rhs_not_nan,
Label* slow,
bool strict);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments, adjust sp.
__ Subu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
__ sw(descriptor.GetEnvironmentParameterRegister(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
}
__ Ret();
}
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Register result_reg = destination();
int double_offset = offset();
// Account for saved regs if input is sp.
if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
Register scratch =
GetRegisterThatIsNotOneOf(input_reg, result_reg);
Register scratch2 =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch3 =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
DoubleRegister double_scratch = kLithiumScratchDouble;
__ Push(scratch, scratch2, scratch3);
if (!skip_fastpath()) {
// Load double input.
__ ldc1(double_scratch, MemOperand(input_reg, double_offset));
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
__ ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
__ Trunc_w_d(double_scratch, double_scratch);
// Move the converted value into the result register.
__ mfc1(scratch3, double_scratch);
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
__ ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
__ And(
scratch, scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
| kFCSRInvalidOpFlagMask);
// If we had no exceptions then set result_reg and we are done.
Label error;
__ Branch(&error, ne, scratch, Operand(zero_reg));
__ Move(result_reg, scratch3);
__ Branch(&done);
__ bind(&error);
}
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
__ lw(input_low,
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
__ lw(input_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
__ Ext(result_reg,
input_high,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Check for Infinity and NaNs, which should return 0.
__ Subu(scratch, result_reg, HeapNumber::kExponentMask);
__ Movz(result_reg, zero_reg, scratch);
__ Branch(&done, eq, scratch, Operand(zero_reg));
// Express exponent as delta to (number of mantissa bits + 31).
__ Subu(result_reg,
result_reg,
Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
// If the delta is strictly positive, all bits would be shifted away,
// which means that we can return 0.
__ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
__ mov(result_reg, zero_reg);
__ Branch(&done);
__ bind(&normal_exponent);
const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
// Calculate shift.
__ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
// Save the sign.
Register sign = result_reg;
result_reg = no_reg;
__ And(sign, input_high, Operand(HeapNumber::kSignMask));
// On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
// to check for this specific case.
Label high_shift_needed, high_shift_done;
__ Branch(&high_shift_needed, lt, scratch, Operand(32));
__ mov(input_high, zero_reg);
__ Branch(&high_shift_done);
__ bind(&high_shift_needed);
// Set the implicit 1 before the mantissa part in input_high.
__ Or(input_high,
input_high,
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
// Shift the mantissa bits to the correct position.
// We don't need to clear non-mantissa bits as they will be shifted away.
// If they weren't, it would mean that the answer is in the 32bit range.
__ sllv(input_high, input_high, scratch);
__ bind(&high_shift_done);
// Replace the shifted bits with bits from the lower mantissa word.
Label pos_shift, shift_done;
__ li(at, 32);
__ subu(scratch, at, scratch);
__ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
// Negate scratch.
__ Subu(scratch, zero_reg, scratch);
__ sllv(input_low, input_low, scratch);
__ Branch(&shift_done);
__ bind(&pos_shift);
__ srlv(input_low, input_low, scratch);
__ bind(&shift_done);
__ Or(input_high, input_high, Operand(input_low));
// Restore sign if necessary.
__ mov(scratch, sign);
result_reg = sign;
sign = no_reg;
__ Subu(result_reg, zero_reg, input_high);
__ Movz(result_reg, input_high, scratch);
__ bind(&done);
__ Pop(scratch, scratch2, scratch3);
__ Ret();
}
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
__ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cc == less || cc == greater) {
__ GetObjectType(a0, t4, t4);
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
} else {
__ GetObjectType(a0, t4, t4);
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
__ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
if (cc == less_equal || cc == greater_equal) {
__ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
__ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
__ Branch(&return_equal, ne, a0, Operand(t2));
DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) {
// undefined <= undefined should fail.
__ li(v0, Operand(GREATER));
} else {
// undefined >= undefined should fail.
__ li(v0, Operand(LESS));
}
}
}
}
__ bind(&return_equal);
DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == less) {
__ li(v0, Operand(GREATER)); // Things aren't less than themselves.
} else if (cc == greater) {
__ li(v0, Operand(LESS)); // Things aren't greater than themselves.
} else {
__ mov(v0, zero_reg); // Things are <=, >=, ==, === themselves.
}
// For less and greater we don't have to check for NaN since the result of
// x < x is false regardless. For the others here is some code to check
// for NaN.
if (cc != lt && cc != gt) {
__ bind(&heap_number);
// It is a heap number, so return non-equal if it's NaN and equal if it's
// not NaN.
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
__ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ And(t3, t2, Operand(exp_mask_reg));
// If all bits not set (ne cond), then not a NaN, objects are equal.
__ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
// Shift out flag and all exponent bits, retaining only mantissa.
__ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
// Or with all low-bits of mantissa.
__ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
__ Or(v0, t3, Operand(t2));
// For equal we already have the right value in v0: Return zero (equal)
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
// not (it's a NaN). For <= and >= we need to load v0 with the failing
// value if it's a NaN.
if (cc != eq) {
// All-zero means Infinity means equal.
__ Ret(eq, v0, Operand(zero_reg));
DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == le) {
__ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
} else {
__ li(v0, Operand(LESS)); // NaN >= NaN should fail.
}
}
}
// No fall through here.
__ bind(&not_identical);
}
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
Label* both_loaded_as_doubles,
Label* slow,
bool strict) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
Label lhs_is_smi;
__ JumpIfSmi(lhs, &lhs_is_smi);
// Rhs is a Smi.
// Check whether the non-smi is a heap number.
__ GetObjectType(lhs, t4, t4);
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal (lhs is already not zero).
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ mov(v0, lhs);
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
__ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
}
// Rhs is a smi, lhs is a number.
// Convert smi rhs to double.
__ sra(at, rhs, kSmiTagSize);
__ mtc1(at, f14);
__ cvt_d_w(f14, f14);
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
__ bind(&lhs_is_smi);
// Lhs is a Smi. Check whether the non-smi is a heap number.
__ GetObjectType(rhs, t4, t4);
if (strict) {
// If lhs was not a number and rhs was a Smi then strict equality cannot
// succeed. Return non-equal.
__ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
__ li(v0, Operand(1));
} else {
// Smi compared non-strictly with a non-Smi non-heap-number. Call
// the runtime.
__ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
}
// Lhs is a smi, rhs is a number.
// Convert smi lhs to double.
__ sra(at, lhs, kSmiTagSize);
__ mtc1(at, f12);
__ cvt_d_w(f12, f12);
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
// If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into a2 and compare it with
// FIRST_SPEC_OBJECT_TYPE.
__ GetObjectType(lhs, a2, a2);
__ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
// Return non-zero.
Label return_not_equal;
__ bind(&return_not_equal);
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1));
__ bind(&first_non_object);
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
__ GetObjectType(rhs, a3, a3);
__ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
// Now that we have the types we might as well check for
// internalized-internalized.
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ Or(a2, a2, Operand(a3));
__ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
__ Branch(&return_not_equal, eq, at, Operand(zero_reg));
}
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Register lhs,
Register rhs,
Label* both_loaded_as_doubles,
Label* not_heap_numbers,
Label* slow) {
__ GetObjectType(lhs, a3, a2);
__ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
// If first was a heap number & second wasn't, go to slow case.
__ Branch(slow, ne, a3, Operand(a2));
// Both are heap numbers. Load them up then jump to the code we have
// for that.
__ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
__ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles);
}
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs,
Register rhs,
Label* possible_strings,
Label* not_both_strings) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
Label object_test;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
__ And(at, a2, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
__ And(at, a3, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
// Both are internalized strings. We already checked they weren't the same
// pointer so they are not equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
__ bind(&object_test);
__ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ GetObjectType(rhs, a2, a3);
__ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
// equal to undefined.
__ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
__ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
__ and_(a0, a2, a3);
__ And(a0, a0, Operand(1 << Map::kIsUndetectable));
__ Ret(USE_DELAY_SLOT);
__ xori(v0, a0, 1 << Map::kIsUndetectable);
}
static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
Register scratch,
CompareICState::State expected,
Label* fail) {
Label ok;
if (expected == CompareICState::SMI) {
__ JumpIfNotSmi(input, fail);
} else if (expected == CompareICState::NUMBER) {
__ JumpIfSmi(input, &ok);
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
DONT_DO_SMI_CHECK);
}
// We could be strict about internalized/string here, but as long as
// hydrogen doesn't care, the stub doesn't have to care either.
__ bind(&ok);
}
// On entry a1 and a2 are the values to be compared.
// On exit a0 is 0, positive or negative to indicate the result of
// the comparison.
void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
Register lhs = a1;
Register rhs = a0;
Condition cc = GetCondition();
Label miss;
CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
Label not_two_smis, smi_done;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &not_two_smis);
__ sra(a1, a1, 1);
__ sra(a0, a0, 1);
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a1, a0);
__ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
DCHECK_EQ(0, Smi::FromInt(0));
__ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
// 2) Go to slow.
// 3) Fall through to both_loaded_as_doubles.
// 4) Jump to rhs_not_nan.
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
EmitSmiNonsmiComparison(masm, lhs, rhs,
&both_loaded_as_doubles, &slow, strict());
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
// and the right hand side if we have FPU. Otherwise a2, a3 represent
// left hand side and a0, a1 represent right hand side.
Label nan;
__ li(t0, Operand(LESS));
__ li(t1, Operand(GREATER));
__ li(t2, Operand(EQUAL));
// Check if either rhs or lhs is NaN.
__ BranchF(NULL, &nan, eq, f12, f14);
// Check if LESS condition is satisfied. If true, move conditionally
// result to v0.
if (!IsMipsArchVariant(kMips32r6)) {
__ c(OLT, D, f12, f14);
__ Movt(v0, t0);
// Use previous check to store conditionally to v0 oposite condition
// (GREATER). If rhs is equal to lhs, this will be corrected in next
// check.
__ Movf(v0, t1);
// Check if EQUAL condition is satisfied. If true, move conditionally
// result to v0.
__ c(EQ, D, f12, f14);
__ Movt(v0, t2);
} else {
Label skip;
__ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
__ mov(v0, t0); // Return LESS as result.
__ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
__ mov(v0, t2); // Return EQUAL as result.
__ mov(v0, t1); // Return GREATER as result.
__ bind(&skip);
}
__ Ret();
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
DCHECK(is_int16(GREATER) && is_int16(LESS));
__ Ret(USE_DELAY_SLOT);
if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles and jump to the code that handles
// that case. If the inputs are not doubles then jumps to
// check_for_internalized_strings.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
lhs,
rhs,
&both_loaded_as_doubles,
&check_for_internalized_strings,
&flat_string_check);
__ bind(&check_for_internalized_strings);
if (cc == eq && !strict()) {
// Returns an answer for two internalized strings or two
// detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
EmitCheckForInternalizedStringsOrObjects(
masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential one-byte strings,
// and inline if that is the case.
__ bind(&flat_string_check);
__ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
a3);
if (cc == eq) {
StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
} else {
StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
t1);
}
// Never falls through to here.
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
if (cc == eq) {
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
if (cc == lt || cc == le) {
ncr = GREATER;
} else {
DCHECK(cc == gt || cc == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
__ push(a0);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
__ bind(&miss);
GenerateMiss(masm);
}
void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
__ mov(t9, ra);
__ pop(ra);
__ PushSafepointRegisters();
__ Jump(t9);
}
void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
__ mov(t9, ra);
__ pop(ra);
__ PopSafepointRegisters();
__ Jump(t9);
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
if (save_doubles()) {
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
const int fp_argument_count = 0;
const Register scratch = a1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
__ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles()) {
__ MultiPopFPU(kCallerSavedFPU);
}
__ MultiPop(kJSCallerSaved | ra.bit());
__ Ret();
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent.is(a2));
const Register heapnumbermap = t1;
const Register heapnumber = v0;
const DoubleRegister double_base = f2;
const DoubleRegister double_exponent = f4;
const DoubleRegister double_result = f0;
const DoubleRegister double_scratch = f6;
const FPURegister single_scratch = f8;
const Register scratch = t5;
const Register scratch2 = t3;
Label call_runtime, done, int_exponent;
if (exponent_type() == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
// This can only happen if the stub is called from non-optimized code.
// Load input parameters from stack to double registers.
__ lw(base, MemOperand(sp, 1 * kPointerSize));
__ lw(exponent, MemOperand(sp, 0 * kPointerSize));
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
__ mtc1(scratch, single_scratch);
__ cvt_d_w(double_base, single_scratch);
__ bind(&unpack_exponent);
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type() == TAGGED) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
if (exponent_type() != INTEGER) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
scratch,
double_exponent,
at,
double_scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
__ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
if (exponent_type() == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
// compile time and uses DoMathPowHalf instead. We then skip this check
// for non-constant cases of +/-0.5 as these hardly occur.
Label not_plus_half;
// Test for 0.5.
__ Move(double_scratch, 0.5);
__ BranchF(USE_DELAY_SLOT,
&not_plus_half,
NULL,
ne,
double_exponent,
double_scratch);
// double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
__ neg_d(double_result, double_scratch);
// Add +0 to convert -0 to +0.
__ add_d(double_scratch, double_base, kDoubleRegZero);
__ sqrt_d(double_result, double_scratch);
__ jmp(&done);
__ bind(&not_plus_half);
__ Move(double_scratch, -0.5);
__ BranchF(USE_DELAY_SLOT,
&call_runtime,
NULL,
ne,
double_exponent,
double_scratch);
// double_scratch can be overwritten in the delay slot.
// Calculates square root of base. Check for the special case of
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
__ Move(double_scratch, -V8_INFINITY);
__ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
__ Move(double_result, kDoubleRegZero);
// Add +0 to convert -0 to +0.
__ add_d(double_scratch, double_base, kDoubleRegZero);
__ Move(double_result, 1);
__ sqrt_d(double_scratch, double_scratch);
__ div_d(double_result, double_result, double_scratch);
__ jmp(&done);
}
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch2);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
__ MovFromFloatResult(double_result);
__ jmp(&done);
__ bind(&int_exponent_convert);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
if (exponent_type() == INTEGER) {
__ mov(scratch, exponent);
} else {
// Exponent has previously been stored into scratch as untagged integer.
__ mov(exponent, scratch);
}
__ mov_d(double_scratch, double_base); // Back up base.
__ Move(double_result, 1.0);
// Get absolute value of exponent.
Label positive_exponent;
__ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
__ Subu(scratch, zero_reg, scratch);
__ bind(&positive_exponent);
Label while_true, no_carry, loop_end;
__ bind(&while_true);
__ And(scratch2, scratch, 1);
__ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
__ mul_d(double_result, double_result, double_scratch);
__ bind(&no_carry);
__ sra(scratch, scratch, 1);
__ Branch(&loop_end, eq, scratch, Operand(zero_reg));
__ mul_d(double_scratch, double_scratch, double_scratch);
__ Branch(&while_true);
__ bind(&loop_end);
__ Branch(&done, ge, exponent, Operand(zero_reg));
__ Move(double_scratch, 1.0);
__ div_d(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
__ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
// double_exponent may not contain the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
__ mtc1(exponent, single_scratch);
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
__ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
// The stub is called from non-optimized code, which expects the result
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(v0));
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()),
0, 2);
}
__ pop(ra);
__ MovFromFloatResult(double_result);
__ bind(&done);
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
bool CEntryStub::NeedsImmovableCode() {
return true;
}
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
}
void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
StoreRegistersStateStub stub(isolate);
stub.GetCode();
}
void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
RestoreRegistersStateStub stub(isolate);
stub.GetCode();
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(isolate, 1, kDontSaveFPRegs);
stub.GetCode();
}
void CEntryStub::Generate(MacroAssembler* masm) {
// Called from JavaScript; parameters are on stack as if calling JS function
// a0: number of arguments including receiver
// a1: pointer to builtin function
// fp: frame pointer (restored after C call)
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Compute the argv pointer in a callee-saved register.
__ sll(s1, a0, kPointerSizeLog2);
__ Addu(s1, sp, s1);
__ Subu(s1, s1, kPointerSize);
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(save_doubles());
// s0: number of arguments including receiver (C callee-saved)
// s1: pointer to first argument (C callee-saved)
// s2: pointer to builtin function (C callee-saved)
// Prepare arguments for C routine.
// a0 = argc
__ mov(s0, a0);
__ mov(s2, a1);
// a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
__ AssertStackIsAligned();
__ li(a2, Operand(ExternalReference::isolate_address(isolate())));
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
// This branch-and-link sequence is needed to find the current PC on mips,
// saved to the ra register.
// Use masm-> here instead of the double-underscore macro since extra
// coverage code can interfere with the proper calculation of ra.
Label find_ra;
masm->bal(&find_ra); // bal exposes branch delay slot.
masm->mov(a1, s1);
masm->bind(&find_ra);
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
const int kNumInstructionsToJump = 5;
masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
masm->jalr(t9);
// Set up sp in the delay slot.
masm->addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
// Runtime functions should not return 'the hole'. Allowing it to escape may
// lead to crashes in the IC code later.
if (FLAG_debug_code) {
Label okay;
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
__ Branch(&okay, ne, v0, Operand(t0));
__ stop("The hole escaped");
__ bind(&okay);
}
// Check result for exception sentinel.
Label exception_returned;
__ LoadRoot(t0, Heap::kExceptionRootIndex);
__ Branch(&exception_returned, eq, t0, Operand(v0));
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
__ li(a2, Operand(pending_exception_address));
__ lw(a2, MemOperand(a2));
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, t0, Operand(a2));
__ stop("Unexpected pending exception");
__ bind(&okay);
}
// Exit C frame and return.
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
// s0: still holds argc (callee-saved).
__ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
// Retrieve the pending exception.
__ li(a2, Operand(pending_exception_address));
__ lw(v0, MemOperand(a2));
// Clear the pending exception.
__ li(a3, Operand(isolate()->factory()->the_hole_value()));
__ sw(a3, MemOperand(a2));
// Special handling of termination exceptions which are uncatchable
// by javascript code.
Label throw_termination_exception;
__ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
__ Branch(&throw_termination_exception, eq, v0, Operand(t0));
// Handle normal exception.
__ Throw(v0);
__ bind(&throw_termination_exception);
__ ThrowUncatchable(v0);
}
void JSEntryStub::Generate(MacroAssembler* masm) {
Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
// Registers:
// a0: entry address
// a1: function
// a2: receiver
// a3: argc
//
// Stack:
// 4 args slots
// args
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
// Save callee-saved FPU registers.
__ MultiPushFPU(kCalleeSavedFPU);
// Set up the reserved register for 0.0.
__ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
__ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
// We build an EntryFrame.
__ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
int marker = type();
__ li(t2, Operand(Smi::FromInt(marker)));
__ li(t1, Operand(Smi::FromInt(marker)));
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
__ lw(t0, MemOperand(t0));
__ Push(t3, t2, t1, t0);
// Set up frame pointer for the frame to be pushed.
__ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
// a0: entry_address
// a1: function
// a2: receiver_pointer
// a3: argc
// s0: argv
//
// Stack:
// caller fp |
// function slot | entry frame
// context slot |
// bad fp (0xff...f) |
// callee saved registers + ra
// 4 args slots
// args
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ lw(t2, MemOperand(t1));
__ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
__ sw(fp, MemOperand(t1));
__ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
Label cont;
__ b(&cont);
__ nop(); // Branch delay slot nop.
__ bind(&non_outermost_js);
__ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
__ push(t0);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
__ jmp(&invoke);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
// Invoke: Link this frame into the handler chain. There's only one
// handler block in this code object, so its index is 0.
__ bind(&invoke);
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bal(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
// saved values before returning a failure to C.
// Clear any pending exceptions.
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(t1, MemOperand(t0));
// Invoke the function by calling through JS entry trampoline builtin.
// Notice that we cannot store a reference to the trampoline code directly in
// this stub, because runtime stubs are not traversed when doing GC.
// Registers:
// a0: entry_address
// a1: function
// a2: receiver_pointer
// a3: argc
// s0: argv
//
// Stack:
// handler frame
// entry frame
// callee saved registers + ra
// 4 args slots
// args
if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate);
__ li(t0, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
__ li(t0, Operand(entry));
}
__ lw(t9, MemOperand(t0)); // Deref address.
// Call JSEntryTrampoline.
__ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
__ Call(t9);
// Unlink this frame from the handler chain.
__ PopTryHandler();
__ bind(&exit); // v0 holds result
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(t1);
__ Branch(&non_outermost_js_2,
ne,
t1,
Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ li(t1, Operand(ExternalReference(js_entry_sp)));
__ sw(zero_reg, MemOperand(t1));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(t1);
__ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
__ sw(t1, MemOperand(t0));
// Reset the stack to the callee saved registers.
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
// Restore callee-saved fpu registers.
__ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
__ MultiPop(kCalleeSaved | ra.bit());
// Return.
__ Jump(ra);
}
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register scratch = a3;
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
STRING_INDEX_IS_ARRAY_INDEX,
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
// Uses registers a0 to t0.
// Expected input (depending on whether args are in registers or on the stack):
// * object: a0 or at sp + 1 * kPointerSize.
// * function: a1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
// In this case the offset to the inline site to patch is passed on the stack,
// in the safepoint slot for register t0.
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
// Fixed register usage throughout the stub:
const Register object = a0; // Object (lhs).
Register map = a3; // Map of the object.
const Register function = a1; // Function (rhs).
const Register prototype = t0; // Prototype of the function.
const Register inline_site = t5;
const Register scratch = a2;
const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
__ lw(object, MemOperand(sp, 1 * kPointerSize));
__ lw(function, MemOperand(sp, 0));
}
// Check that the left hand is a JS object and load map.
__ JumpIfSmi(object, &not_js_object);
__ IsObjectJSObjectType(object, map, scratch, &not_js_object);
// If there is a call site cache don't look in the global cache, but do the
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
Label miss;
__ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
__ Branch(&miss, ne, function, Operand(at));
__ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
__ Branch(&miss, ne, map, Operand(at));
__ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&miss);
}
// Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
// Check that the function prototype is a JS object.
__ JumpIfSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
// Update the global instanceof or call site inlined cache with the current
// map and function. The cached answer will be set when it is known below.
if (!HasCallSiteInlineCheck()) {
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
} else {
DCHECK(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
// The offset was stored in t0 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
__ LoadFromSafepointRegisterSlot(scratch, t0);
__ Subu(inline_site, ra, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
__ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
}
// Register mapping: a3 is object map and t0 is function prototype.
// Get prototype of object into a2.
__ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
// We don't need map any more. Use it as a scratch register.
Register scratch2 = map;
map = no_reg;
// Loop through the prototype chain looking for the function prototype.
__ LoadRoot(scratch2, Heap::kNullValueRootIndex);
__ bind(&loop);
__ Branch(&is_instance, eq, scratch, Operand(prototype));
__ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
__ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ Branch(&loop);
__ bind(&is_instance);
DCHECK(Smi::FromInt(0) == 0);
if (!HasCallSiteInlineCheck()) {
__ mov(v0, zero_reg);
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
}
} else {
// Patch the call site to return true.
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
// Get the boolean result location in scratch and patch it.
__ PatchRelocatedValue(inline_site, scratch, v0);
if (!ReturnTrueFalseObject()) {
DCHECK_EQ(Smi::FromInt(0), 0);
__ mov(v0, zero_reg);
}
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&is_not_instance);
if (!HasCallSiteInlineCheck()) {
__ li(v0, Operand(Smi::FromInt(1)));
__ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
}
} else {
// Patch the call site to return false.
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
__ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
// Get the boolean result location in scratch and patch it.
__ PatchRelocatedValue(inline_site, scratch, v0);
if (!ReturnTrueFalseObject()) {
__ li(v0, Operand(Smi::FromInt(1)));
}
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
__ bind(&not_js_object);
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
__ JumpIfSmi(function, &slow);
__ GetObjectType(function, scratch2, scratch);
__ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
// Null is not instance of anything.
__ Branch(&object_not_null, ne, object,
Operand(isolate()->factory()->null_value()));
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null);
// Smi values are not instances of anything.
__ JumpIfNotSmi(object, &object_not_null_or_smi);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
__ bind(&object_not_null_or_smi);
// String values are not instances of anything.
__ IsObjectJSStringType(object, scratch, &slow);
if (ReturnTrueFalseObject()) {
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
} else {
__ li(v0, Operand(Smi::FromInt(1)));
}
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
// Slow-case. Tail call builtin.
__ bind(&slow);
if (!ReturnTrueFalseObject()) {
if (HasArgsInRegisters()) {
__ Push(a0, a1);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a0, a1);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
__ mov(a0, v0);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
__ LoadRoot(v0, Heap::kFalseValueRootIndex);
__ DropAndRet(HasArgsInRegisters() ? 0 : 2);
}
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
t0, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
StandardFrameConstants::kCallerSPOffset - kPointerSize;
DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
// Check that the key is a smiGenerateReadElement.
Label slow;
__ JumpIfNotSmi(a1, &slow);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
__ Branch(&adaptor,
eq,
a3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Check index (a1) against formal parameters count limit passed in
// through register a0. Use unsigned comparison to get negative
// check for free.
__ Branch(&slow, hs, a1, Operand(a0));
// Read the argument from the stack and return it.
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, fp, Operand(t3));
__ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
// Arguments adaptor case: Check index (a1) against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
// comparison to get negative check for free.
__ bind(&adaptor);
__ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Branch(&slow, Ugreater_equal, a1, Operand(a0));
// Read the argument from the adaptor frame and return it.
__ subu(a3, a0, a1);
__ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(t3));
__ Ret(USE_DELAY_SLOT);
__ lw(v0, MemOperand(a3, kDisplacement));
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
__ bind(&slow);
__ push(a1);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
__ Branch(&runtime,
ne,
a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sw(a2, MemOperand(sp, 0 * kPointerSize));
__ sll(t3, a2, 1);
__ Addu(a3, a3, Operand(t3));
__ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// sp[0] : number of parameters (tagged)
// sp[4] : address of receiver argument
// sp[8] : function
// Registers used over whole function:
// t2 : allocated object (tagged)
// t5 : mapped parameter count (tagged)
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
// a1 = parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
Label adaptor_frame, try_allocate;
__ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
__ Branch(&adaptor_frame,
eq,
a2,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
__ mov(a2, a1);
__ b(&try_allocate);
__ nop(); // Branch delay slot nop.
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
__ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(t6, a2, 1);
__ Addu(a3, a3, Operand(t6));
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
// a1 = parameter count (tagged)
// a2 = argument count (tagged)
// Compute the mapped parameter count = min(a1, a2) in a1.
Label skip_min;
__ Branch(&skip_min, lt, a1, Operand(a2));
__ mov(a1, a2);
__ bind(&skip_min);
__ bind(&try_allocate);
// Compute the sizes of backing store, parameter map, and arguments object.
// 1. Parameter map, has 2 extra words containing context and backing store.
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
DCHECK_EQ(0, Smi::FromInt(0));
__ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
__ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
__ sll(t5, a1, 1);
__ addiu(t5, t5, kParameterMapHeaderSize);
__ bind(&param_map_size);
// 2. Backing store.
__ sll(t6, a2, 1);
__ Addu(t5, t5, Operand(t6));
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
// Get the arguments boilerplate from the current native context into t0.
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
__ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
__ bind(&skip2_ne);
__ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kAliasedOffset));
__ bind(&skip2_eq);
// v0 = address of new object (tagged)
// a1 = mapped parameter count (tagged)
// a2 = argument count (smi-tagged)
// t0 = address of arguments map (tagged)
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
__ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
__ lw(a3, MemOperand(sp, 2 * kPointerSize));
__ AssertNotSmi(a3);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
__ sw(a3, FieldMemOperand(v0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
__ AssertSmi(a2);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
__ sw(a2, FieldMemOperand(v0, kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
// it will point to the backing store.
__ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
// a1 = mapped parameter count (tagged)
// a2 = argument count (tagged)
// t0 = address of parameter map or backing store (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
__ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
// Move backing store address to a3, because it is
// expected there when filling in the unmapped arguments.
__ mov(a3, t0);
__ bind(&skip3);
__ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
__ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
__ Addu(t2, a1, Operand(Smi::FromInt(2)));
__ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
__ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
__ sll(t6, a1, 1);
__ Addu(t2, t0, Operand(t6));
__ Addu(t2, t2, Operand(kParameterMapHeaderSize));
__ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
// where parameters are stored in reverse order, at
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
// The mapped parameter thus need to get indices
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
__ mov(t2, a1);
__ lw(t5, MemOperand(sp, 0 * kPointerSize));
__ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ Subu(t5, t5, Operand(a1));
__ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
__ sll(t6, t2, 1);
__ Addu(a3, t0, Operand(t6));
__ Addu(a3, a3, Operand(kParameterMapHeaderSize));
// t2 = loop variable (tagged)
// a1 = mapping index (tagged)
// a3 = address of backing store (tagged)
// t0 = address of parameter map (tagged)
// t1 = temporary scratch (a.o., for address calculation)
// t3 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
__ Subu(t2, t2, Operand(Smi::FromInt(1)));
__ sll(t1, t2, 1);
__ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
__ Addu(t6, t0, t1);
__ sw(t5, MemOperand(t6));
__ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
__ Addu(t6, a3, t1);
__ sw(t3, MemOperand(t6));
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
__ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
__ bind(&skip_parameter_map);
// a2 = argument count (tagged)
// a3 = address of backing store (tagged)
// t1 = scratch
// Copy arguments header and remaining slots (if there are any).
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
__ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
__ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
__ mov(t5, a1);
__ lw(t0, MemOperand(sp, 1 * kPointerSize));
__ sll(t6, t5, 1);
__ Subu(t0, t0, Operand(t6));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
__ Subu(t0, t0, Operand(kPointerSize));
__ lw(t2, MemOperand(t0, 0));
__ sll(t6, t5, 1);
__ Addu(t1, a3, Operand(t6));
__ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
__ Branch(&arguments_loop, lt, t5, Operand(a2));
// Return and remove the on-stack parameters.
__ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
// a2 = argument count (tagged)
__ bind(&runtime);
__ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label slow;
Register receiver = LoadDescriptor::ReceiverRegister();
Register key = LoadDescriptor::NameRegister();
// Check that the key is an array index, that is Uint32.
__ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
__ Branch(&slow, ne, t0, Operand(zero_reg));
// Everything is fine, call runtime.
__ Push(receiver, key); // Receiver, key.
// Perform tail call to the entry.
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
masm->isolate()),
2, 1);
__ bind(&slow);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
__ Branch(&adaptor_frame,
eq,
a3,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Get the length from the frame.
__ lw(a1, MemOperand(sp, 0));
__ Branch(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sw(a1, MemOperand(sp, 0));
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a3, a2, Operand(at));
__ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
__ sw(a3, MemOperand(sp, 1 * kPointerSize));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
__ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
__ srl(a1, a1, kSmiTagSize);
__ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
__ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
__ Allocate(a1, v0, a2, a3, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
__ lw(t0, MemOperand(
t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
__ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ lw(a1, MemOperand(sp, 0 * kPointerSize));
__ AssertSmi(a1);
__ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
__ Branch(&done, eq, a1, Operand(zero_reg));
// Get the parameters pointer from the stack.
__ lw(a2, MemOperand(sp, 1 * kPointerSize));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
__ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
__ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
__ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
// Untag the length for the loop.
__ srl(a1, a1, kSmiTagSize);
// Copy the fixed array slots.
Label loop;
// Set up t0 to point to the first array slot.
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
// Pre-decrement a2 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
__ Addu(a2, a2, Operand(-kPointerSize));
__ lw(a3, MemOperand(a2));
// Post-increment t0 with kPointerSize on each iteration.
__ sw(a3, MemOperand(t0));
__ Addu(t0, t0, Operand(kPointerSize));
__ Subu(a1, a1, Operand(1));
__ Branch(&loop, ne, a1, Operand(zero_reg));
// Return and remove the on-stack parameters.
__ bind(&done);
__ DropAndRet(3);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
// sp[0]: last_match_info (expected JSArray)
// sp[4]: previous index
// sp[8]: subject string
// sp[12]: JSRegExp object
const int kLastMatchInfoOffset = 0 * kPointerSize;
const int kPreviousIndexOffset = 1 * kPointerSize;
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
// directly from generated code the native RegExp code will not do a GC and
// therefore the content of these registers are safe to use after the call.
// MIPS - using s0..s2, since we are not using CEntry Stub.
Register subject = s0;
Register regexp_data = s1;
Register last_match_info_elements = s2;
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
ExternalReference::address_of_regexp_stack_memory_address(
isolate());
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ li(a0, Operand(address_of_regexp_stack_memory_size));
__ lw(a0, MemOperand(a0, 0));
__ Branch(&runtime, eq, a0, Operand(zero_reg));
// Check that the first argument is a JSRegExp object.
__ lw(a0, MemOperand(sp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
__ JumpIfSmi(a0, &runtime);
__ GetObjectType(a0, a1, a1);
__ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
__ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
Operand(zero_reg));
__ GetObjectType(regexp_data, a0, a0);
__ Check(eq,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
a0,
Operand(FIXED_ARRAY_TYPE));
}
// regexp_data: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
__ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
// regexp_data: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ lw(a2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Check (number_of_captures + 1) * 2 <= offsets vector size
// Or number_of_captures * 2 <= offsets vector size - 2
// Multiplying by 2 comes for free since a2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Branch(
&runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
// Reset offset for possibly sliced string.
__ mov(t0, zero_reg);
__ lw(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
__ mov(a3, subject); // Make a copy of the original subject string.
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
// subject: subject string
// a3: subject string
// a0: subject string instance type
// regexp_data: RegExp data (FixedArray)
// Handle subject string according to its encoding and representation:
// (1) Sequential string? If yes, go to (5).
// (2) Anything but sequential or cons? If yes, go to (6).
// (3) Cons string. If the string is flat, replace subject with first string.
// Otherwise bailout.
// (4) Is subject external? If yes, go to (7).
// (5) Sequential string. Load regexp code according to encoding.
// (E) Carry on.
/// [...]
// Deferred code at the end of the stub:
// (6) Not a long external string? If yes, go to (8).
// (7) External string. Make it, offset-wise, look like a sequential string.
// Go to (5).
// (8) Short external string or not a string? If yes, bail out to runtime.
// (9) Sliced string. Replace subject with parent. Go to (4).
Label seq_string /* 5 */, external_string /* 7 */,
check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
not_long_external /* 8 */;
// (1) Sequential string? If yes, go to (5).
__ And(a1,
a0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
// (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
// Go to (6).
__ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
// (3) Cons string. Check that it's flat.
// Replace subject with first string and reload instance type.
__ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
__ LoadRoot(a1, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, a0, Operand(a1));
__ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
// (4) Is subject external? If yes, go to (7).
__ bind(&check_underlying);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
// The underlying external string is never a short external string.
STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
__ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
// (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
// subject: sequential subject string (or look-alike, external string)
// a3: original subject string
// Load previous index and check range before a3 is overwritten. We have to
// use a3 instead of subject here because subject might have been only made
// to look like a sequential string when it actually is an external string.
__ lw(a1, MemOperand(sp, kPreviousIndexOffset));
__ JumpIfNotSmi(a1, &runtime);
__ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
__ Branch(&runtime, ls, a3, Operand(a1));
__ sra(a1, a1, kSmiTagSize); // Untag the Smi.
STATIC_ASSERT(kStringEncodingMask == 4);
STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for one-byte.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
// (E) Carry on. String handling is done.
// t9: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(t9, &runtime);
// a1: previous index
// a3: encoding of subject string (1 if one_byte, 0 if two_byte);
// t9: code
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// All checks done. Now push arguments for native regexp code.
__ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1, a0, a2);
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
const int kParameterRegisters = 4;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
// Arguments are before that on the stack or in registers, meaning we
// treat the return address as argument 5. Thus every argument after that
// needs to be shifted back by 1. Since DirectCEntryStub will handle
// allocating space for the c argument slots, we don't need to calculate
// that into the argument positions on the stack. This is how the stack will
// look (sp meaning the value of sp at this moment):
// [sp + 5] - Argument 9
// [sp + 4] - Argument 8
// [sp + 3] - Argument 7
// [sp + 2] - Argument 6
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
// Argument 9: Pass current isolate address.
// CFunctionArgumentOperand handles MIPS stack argument slots.
__ li(a0, Operand(ExternalReference::isolate_address(isolate())));
__ sw(a0, MemOperand(sp, 5 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ li(a0, Operand(1));
__ sw(a0, MemOperand(sp, 4 * kPointerSize));
// Argument 7: Start (high end) of backtracking stack memory area.
__ li(a0, Operand(address_of_regexp_stack_memory_address));
__ lw(a0, MemOperand(a0, 0));
__ li(a2, Operand(address_of_regexp_stack_memory_size));
__ lw(a2, MemOperand(a2, 0));
__ addu(a0, a0, a2);
__ sw(a0, MemOperand(sp, 3 * kPointerSize));
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
__ mov(a0, zero_reg);
__ sw(a0, MemOperand(sp, 2 * kPointerSize));
// Argument 5: static offsets vector buffer.
__ li(a0, Operand(
ExternalReference::address_of_static_offsets_vector(isolate())));
__ sw(a0, MemOperand(sp, 1 * kPointerSize));
// For arguments 4 and 3 get string length, calculate start of string data
// calculate the shift of the index (0 for one-byte and 1 for two-byte).
__ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
__ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
// Load the length from the original subject string from the previous stack
// frame. Therefore we have to use fp, which points exactly to two pointer
// sizes below the previous sp. (Because creating a new stack frame pushes
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
__ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
// If slice offset is not 0, load the length from the original sliced string.
// Argument 4, a3: End of string data
// Argument 3, a2: Start of string data
// Prepare start and end index of the input.
__ sllv(t1, t0, a3);
__ addu(t0, t2, t1);
__ sllv(t1, a1, a3);
__ addu(a2, t0, t1);
__ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
__ sra(t2, t2, kSmiTagSize);
__ sllv(t1, t2, a3);
__ addu(a3, t0, t1);
// Argument 2 (a1): Previous index.
// Already there
// Argument 1 (a0): Subject string.
__ mov(a0, subject);
// Locate the code entry and call it.
__ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, t9);
__ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
// Check the result.
Label success;
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
Label failure;
__ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
// If not exception it can only be retry. Handle that in the runtime system.
__ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// Result must now be exception. If there is no pending exception already a
// stack overflow (on the backtrack stack) was detected in RegExp code but
// haven't created the exception yet. Handle that in the runtime system.
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
__ li(a1, Operand(isolate()->factory()->the_hole_value()));
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
__ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
// Check if the exception is a termination. If so, throw as uncatchable.
__ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
Label termination_exception;
__ Branch(&termination_exception, eq, v0, Operand(a0));
__ Throw(v0);
__ bind(&termination_exception);
__ ThrowUncatchable(v0);
__ bind(&failure);
// For failure and exception return null.
__ li(v0, Operand(isolate()->factory()->null_value()));
__ DropAndRet(4);
// Process the result from the native regexp code.
__ bind(&success);
__ lw(a1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
// Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
__ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
__ JumpIfSmi(a0, &runtime);
__ GetObjectType(a0, a2, a2);
__ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
// Check that the JSArray is in fast case.
__ lw(last_match_info_elements,
FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&runtime, ne, a0, Operand(at));
// Check that the last match info has space for the capture registers and the
// additional information.
__ lw(a0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
__ sra(at, a0, kSmiTagSize);
__ Branch(&runtime, gt, a2, Operand(at));
// a1: number of capture registers
// subject: subject string
// Store the capture count.
__ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
__ sw(a2, FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
subject,
t3,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
__ mov(subject, a2);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastInputOffset,
subject,
t3,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
ExternalReference::address_of_static_offsets_vector(isolate());
__ li(a2, Operand(address_of_static_offsets_vector));
// a1: number of capture registers
// a2: offsets vector
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wrapping after zero.
__ Addu(a0,
last_match_info_elements,
Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ Subu(a1, a1, Operand(1));
__ Branch(&done, lt, a1, Operand(zero_reg));
// Read the value from the static offsets vector buffer.
__ lw(a3, MemOperand(a2, 0));
__ addiu(a2, a2, kPointerSize);
// Store the smi value in the last match info.
__ sll(a3, a3, kSmiTagSize); // Convert to Smi.
__ sw(a3, MemOperand(a0, 0));
__ Branch(&next_capture, USE_DELAY_SLOT);
__ addiu(a0, a0, kPointerSize); // In branch delay slot.
__ bind(&done);
// Return last match info.
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
__ DropAndRet(4);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
__ bind(&not_seq_nor_cons);
// Go to (8).
__ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
// (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ And(at, a0, Operand(kIsIndirectStringMask));
__ Assert(eq,
kExternalStringExpectedButNotFound,
at,
Operand(zero_reg));
}
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ jmp(&seq_string); // Go to (5).
// (8) Short external string or not a string? If yes, bail out to runtime.
__ bind(&not_long_external);
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
__ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
// (9) Sliced string. Replace subject with parent. Go to (4).
// Load offset into t0 and replace subject string with parent.
__ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ sra(t0, t0, kSmiTagSize);
__ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : Feedback vector
// a3 : slot in feedback vector (Smi)
Label initialize, done, miss, megamorphic, not_array_function;
DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
masm->isolate()->heap()->megamorphic_symbol());
DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t0.
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
__ Branch(&done, eq, t0, Operand(a1));
if (!FLAG_pretenuring_call_new) {
// If we came here, we need to see if we are the array function.
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the slot either some other function or an
// AllocationSite. Do a map check on the object in a3.
__ lw(t1, FieldMemOperand(t0, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
__ Branch(&megamorphic, ne, a1, Operand(t0));
__ jmp(&done);
}
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
__ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
__ Branch(&initialize, eq, t0, Operand(at));
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
__ bind(&initialize);
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function.
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
__ Branch(&not_array_function, ne, a1, Operand(t0));
// The target function is the Array constructor,
// Create an AllocationSite if we don't already have it, store it in the
// slot.
{
FrameScope scope(masm, StackFrame::INTERNAL);
const RegList kSavedRegs =
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
// Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
CreateAllocationSiteStub create_stub(masm->isolate());
__ CallStub(&create_stub);
__ MultiPop(kSavedRegs);
__ SmiUntag(a0);
}
__ Branch(&done);
__ bind(&not_array_function);
}
__ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t0, a2, Operand(t0));
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ sw(a1, MemOperand(t0, 0));
__ Push(t0, a2, a1);
__ RecordWrite(a2, t0, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ Pop(t0, a2, a1);
__ bind(&done);
}
static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
__ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(a3, SharedFunctionInfo::kCompilerHintsOffset));
// Do not transform the receiver for strict mode functions.
int32_t strict_mode_function_mask =
1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
// Do not transform the receiver for native (Compilerhints already in a3).
int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
__ And(at, t0, Operand(strict_mode_function_mask | native_mask));
__ Branch(cont, ne, at, Operand(zero_reg));
}
static void EmitSlowCase(MacroAssembler* masm,
int argc,
Label* non_function) {
// Check for function proxy.
__ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // put proxy as additional argument
__ li(a0, Operand(argc + 1, RelocInfo::NONE32));
__ mov(a2, zero_reg);
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
{
Handle<Code> adaptor =
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
__ Jump(adaptor, RelocInfo::CODE_TARGET);
}
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
// of the original receiver from the call site).
__ bind(non_function);
__ sw(a1, MemOperand(sp, argc * kPointerSize));
__ li(a0, Operand(argc)); // Set up the number of arguments.
__ mov(a2, zero_reg);
__ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(a1, a3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(a1);
}
__ Branch(USE_DELAY_SLOT, cont);
__ sw(v0, MemOperand(sp, argc * kPointerSize));
}
static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// a1 : the function to call
Label slow, non_function, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
__ JumpIfSmi(a1, &non_function);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, t0, t0);
__ Branch(&slow, ne, t0, Operand(JS_FUNCTION_TYPE));
}
// Fast-case: Invoke the function now.
// a1: pushed function
ParameterCount actual(argc);
if (call_as_method) {
if (needs_checks) {
EmitContinueIfStrictOrNative(masm, &cont);
}
// Compute the receiver in sloppy mode.
__ lw(a3, MemOperand(sp, argc * kPointerSize));
if (needs_checks) {
__ JumpIfSmi(a3, &wrap);
__ GetObjectType(a3, t0, t0);
__ Branch(&wrap, lt, t0, Operand(FIRST_SPEC_OBJECT_TYPE));