blob: 9e44af91445914c4f11f30bbb7d7dd960f454217 [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits.h> // For LONG_MIN, LONG_MAX.
#if V8_TARGET_ARCH_MIPS64
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/debug/debug.h"
#include "src/mips64/macro-assembler-mips64.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
// Floating point constants.
const uint64_t kDoubleSignMask = Double::kSignMask;
const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
const uint32_t kSingleSignMask = kBinary32SignMask;
const uint32_t kSingleExponentMask = kBinary32ExponentMask;
const uint32_t kSingleExponentShift = kBinary32ExponentShift;
const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false),
has_double_zero_reg_set_(false) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ =
Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
}
}
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
lb(dst, src);
} else if (r.IsUInteger8()) {
lbu(dst, src);
} else if (r.IsInteger16()) {
lh(dst, src);
} else if (r.IsUInteger16()) {
lhu(dst, src);
} else if (r.IsInteger32()) {
lw(dst, src);
} else {
ld(dst, src);
}
}
void MacroAssembler::Store(Register src,
const MemOperand& dst,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
sb(src, dst);
} else if (r.IsInteger16() || r.IsUInteger16()) {
sh(src, dst);
} else if (r.IsInteger32()) {
sw(src, dst);
} else {
if (r.IsHeapObject()) {
AssertNotSmi(src);
} else if (r.IsSmi()) {
AssertSmi(src);
}
sd(src, dst);
}
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Branch(2, NegateCondition(cond), src1, src2);
sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Push(ra, fp, marker_reg);
Daddu(fp, sp, Operand(kPointerSize));
} else {
Push(ra, fp);
mov(fp, sp);
}
}
void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
Pop(ra, fp, marker_reg);
} else {
Pop(ra, fp);
}
}
void MacroAssembler::PushStandardFrame(Register function_reg) {
int offset = -StandardFrameConstants::kContextOffset;
if (function_reg.is_valid()) {
Push(ra, fp, cp, function_reg);
offset += kPointerSize;
} else {
Push(ra, fp, cp);
}
Daddu(fp, sp, Operand(offset));
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK(num_unsaved >= 0);
if (num_unsaved > 0) {
Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
}
MultiPush(kSafepointSavedRegisters);
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
if (num_unsaved > 0) {
Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
}
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
sd(src, SafepointRegisterSlot(dst));
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
ld(dst, SafepointRegisterSlot(src));
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
return kSafepointRegisterStackIndexMap[reg_code];
}
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
}
// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWriteField(
Register object,
int offset,
Register value,
Register dst,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
}
RecordWrite(object,
dst,
value,
ra_status,
save_fp,
remembered_set_action,
OMIT_SMI_CHECK,
pointers_to_here_check_for_value);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
}
// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
void MacroAssembler::RecordWriteForMap(Register object,
Register map,
Register dst,
RAStatus ra_status,
SaveFPRegsMode fp_mode) {
if (emit_debug_code()) {
DCHECK(!dst.is(at));
ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
dst,
Operand(isolate()->factory()->meta_map()));
}
if (!FLAG_incremental_marking) {
return;
}
if (emit_debug_code()) {
ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
map,
Operand(at));
}
Label done;
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
// relies on the fact that maps can never be in new space.
CheckPageFlag(map,
map, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
Branch(&ok, eq, at, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
}
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
bind(&done);
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
// The register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(
Register object,
Register address,
Register value,
RAStatus ra_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
ld(at, MemOperand(address));
Assert(
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
if (remembered_set_action == OMIT_REMEMBERED_SET &&
!FLAG_incremental_marking) {
return;
}
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
}
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
bind(&done);
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
value);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
const int offset = JSFunction::kCodeEntryOffset;
// Since a code entry (value) is always in old space, we don't need to update
// remembered set. If incremental marking is off, there is nothing for us to
// do.
if (!FLAG_incremental_marking) return;
DCHECK(js_function.is(a1));
DCHECK(code_entry.is(a4));
DCHECK(scratch.is(a5));
AssertNotSmi(js_function);
if (emit_debug_code()) {
Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
ld(at, MemOperand(scratch));
Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
Operand(code_entry));
}
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
CheckPageFlag(code_entry, scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
CheckPageFlag(js_function, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
const Register dst = scratch;
Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
// Save caller-saved registers. js_function and code_entry are in the
// caller-saved register list.
DCHECK(kJSCallerSaved & js_function.bit());
DCHECK(kJSCallerSaved & code_entry.bit());
MultiPush(kJSCallerSaved | ra.bit());
int argument_count = 3;
PrepareCallCFunction(argument_count, code_entry);
Move(a0, js_function);
Move(a1, dst);
li(a2, Operand(ExternalReference::isolate_address(isolate())));
{
AllowExternalCallThatCantCauseGC scope(this);
CallCFunction(
ExternalReference::incremental_marking_record_write_code_entry_function(
isolate()),
argument_count);
}
// Restore caller-saved registers.
MultiPop(kJSCallerSaved | ra.bit());
bind(&done);
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
li(t8, Operand(store_buffer));
ld(scratch, MemOperand(t8));
// Store pointer to buffer and increment buffer top.
sd(address, MemOperand(scratch));
Daddu(scratch, scratch, kPointerSize);
// Write back new top of buffer.
sd(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
DCHECK(!scratch.is(t8));
if (and_then == kFallThroughAtEnd) {
Branch(&done, ne, t8, Operand(zero_reg));
} else {
DCHECK(and_then == kReturnAtEnd);
Ret(ne, t8, Operand(zero_reg));
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
if (and_then == kReturnAtEnd) {
Ret();
}
}
// -----------------------------------------------------------------------------
// Allocation support.
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
SmiUntag(scratch);
// Xor original key with a seed.
xor_(reg0, reg0, scratch);
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
// The algorithm uses 32-bit integer values.
nor(scratch, reg0, zero_reg);
Lsa(reg0, scratch, reg0, 15);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
Lsa(reg0, reg0, reg0, 2);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
xor_(reg0, reg0, at);
// hash = hash * 2057;
sll(scratch, reg0, 11);
Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
srl(at, reg0, 16);
xor_(reg0, reg0, at);
And(reg0, reg0, Operand(0x3fffffff));
}
// ---------------------------------------------------------------------------
// Instruction macros.
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
addu(rd, rs, at);
}
}
}
void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
daddu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
daddu(rd, rs, at);
}
}
}
void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, static_cast<int32_t>(
-rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
subu(rd, rs, at);
}
}
}
void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
dsubu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
daddiu(rd, rs,
static_cast<int32_t>(
-rt.imm64_)); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
dsubu(rd, rs, at);
}
}
}
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
mul(rd, rs, at);
}
}
void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
mult(rs, rt.rm());
mfhi(rd);
} else {
muh(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
mult(rs, at);
mfhi(rd);
} else {
muh(rd, rs, at);
}
}
}
void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
multu(rs, rt.rm());
mfhi(rd);
} else {
muhu(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
multu(rs, at);
mfhi(rd);
} else {
muhu(rd, rs, at);
}
}
}
void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmul(rd, rs, rt.rm());
} else {
dmult(rs, rt.rm());
mflo(rd);
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant == kMips64r6) {
dmul(rd, rs, at);
} else {
dmult(rs, at);
mflo(rd);
}
}
}
void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, rt.rm());
} else {
dmult(rs, rt.rm());
mfhi(rd);
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant == kMips64r6) {
dmuh(rd, rs, at);
} else {
dmult(rs, at);
mfhi(rd);
}
}
}
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
mult(rs, at);
}
}
void MacroAssembler::Dmult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmult(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
dmult(rs, at);
}
}
void MacroAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
multu(rs, at);
}
}
void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
dmultu(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
dmultu(rs, at);
}
}
void MacroAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
div(rs, at);
}
}
void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
mflo(res);
} else {
div(res, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
div(rs, at);
mflo(res);
} else {
div(res, rs, at);
}
}
}
void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
div(rs, rt.rm());
mfhi(rd);
} else {
mod(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
div(rs, at);
mfhi(rd);
} else {
mod(rd, rs, at);
}
}
}
void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
mfhi(rd);
} else {
modu(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
divu(rs, at);
mfhi(rd);
} else {
modu(rd, rs, at);
}
}
}
void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddiv(rs, at);
}
}
void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mflo(rd);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddiv(rs, at);
mflo(rd);
}
} else {
if (rt.is_reg()) {
ddiv(rd, rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddiv(rd, rs, at);
}
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
divu(rs, at);
}
}
void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
divu(rs, rt.rm());
mflo(res);
} else {
divu(res, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
divu(rs, at);
mflo(res);
} else {
divu(res, rs, at);
}
}
}
void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddivu(rs, at);
}
}
void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant != kMips64r6) {
ddivu(rs, rt.rm());
mflo(res);
} else {
ddivu(res, rs, rt.rm());
}
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
if (kArchVariant != kMips64r6) {
ddivu(rs, at);
mflo(res);
} else {
ddivu(res, rs, at);
}
}
}
void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddiv(rs, rt.rm());
mfhi(rd);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddiv(rs, at);
mfhi(rd);
}
} else {
if (rt.is_reg()) {
dmod(rd, rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
dmod(rd, rs, at);
}
}
}
void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
if (kArchVariant != kMips64r6) {
if (rt.is_reg()) {
ddivu(rs, rt.rm());
mfhi(rd);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
ddivu(rs, at);
mfhi(rd);
}
} else {
if (rt.is_reg()) {
dmodu(rd, rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
dmodu(rd, rs, at);
}
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
and_(rd, rs, at);
}
}
}
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
or_(rd, rs, at);
}
}
}
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
xor_(rd, rs, at);
}
}
}
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
nor(rd, rs, at);
}
}
void MacroAssembler::Neg(Register rs, const Operand& rt) {
DCHECK(rt.is_reg());
DCHECK(!at.is(rs));
DCHECK(!at.is(rt.rm()));
li(at, -1);
xor_(rs, rt.rm(), at);
}
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, static_cast<int32_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
slt(rd, rs, at);
}
}
}
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
const uint64_t int16_min = std::numeric_limits<int16_t>::min();
if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
// Imm range is: [0, 32767].
sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
} else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
// Imm range is: [max_unsigned-32767,max_unsigned].
sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
li(at, rt);
sltu(rd, rs, at);
}
}
}
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
int64_t ror_value = rt.imm64_ % 32;
if (ror_value < 0) {
ror_value += 32;
}
rotr(rd, rs, ror_value);
}
}
void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
drotrv(rd, rs, rt.rm());
} else {
int64_t dror_value = rt.imm64_ % 64;
if (dror_value < 0) dror_value += 64;
if (dror_value <= 31) {
drotr(rd, rs, dror_value);
} else {
drotr32(rd, rs, dror_value - 32);
}
}
}
void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
pref(hint, rs);
}
void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
lsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd.is(rt) ? scratch : rd;
DCHECK(!tmp.is(rt));
sll(tmp, rs, sa);
Addu(rd, rt, tmp);
}
}
void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
Register scratch) {
DCHECK(sa >= 1 && sa <= 31);
if (kArchVariant == kMips64r6 && sa <= 4) {
dlsa(rd, rt, rs, sa - 1);
} else {
Register tmp = rd.is(rt) ? scratch : rd;
DCHECK(!tmp.is(rt));
dsll(tmp, rs, sa);
Daddu(rd, rt, tmp);
}
}
void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bnvc(rs, rt, &skip);
BranchLong(L, PROTECT);
bind(&skip);
} else {
bovc(rs, rt, L);
}
}
void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
if (is_trampoline_emitted()) {
Label skip;
bovc(rs, rt, &skip);
BranchLong(L, PROTECT);
bind(&skip);
} else {
bnvc(rs, rt, L);
}
}
// ------------Pseudo-instructions-------------
// Change endianness
void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
operand_size == 8);
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
if (operand_size == 1) {
seb(src, src);
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 2) {
seh(src, src);
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 4) {
sll(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
} else {
dsbh(dest, src);
dshd(dest, dest);
}
}
void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
if (operand_size == 1) {
andi(src, src, 0xFF);
dsbh(dest, src);
dshd(dest, dest);
} else if (operand_size == 2) {
andi(src, src, 0xFFFF);
dsbh(dest, src);
dshd(dest, dest);
} else {
dsll32(src, src, 0);
dsrl32(src, src, 0);
dsbh(dest, src);
dshd(dest, dest);
}
}
void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLwrOffset) &&
is_int16(rs.offset() + kMipsLwlOffset)) {
if (!rd.is(rs.rm())) {
lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
} else {
lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
lwr(rd, MemOperand(at, kMipsLwrOffset));
lwl(rd, MemOperand(at, kMipsLwlOffset));
}
}
}
void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
lwu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(rd, rs);
Dext(rd, rd, 0, 32);
}
}
void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSwrOffset) &&
is_int16(rs.offset() + kMipsSwlOffset)) {
swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
swr(rd, MemOperand(at, kMipsSwrOffset));
swl(rd, MemOperand(at, kMipsSwlOffset));
}
}
}
void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lb(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lb(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
lhu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(at, rs);
lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
lbu(rd, MemOperand(at, 1));
lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
lbu(rd, MemOperand(at, 0));
lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
or_(rd, rd, at);
}
}
void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
// If offset > 16 bits, load address to at with offset 0.
if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
LoadRegPlusOffsetToAt(rs);
source = MemOperand(at, 0);
}
if (!scratch.is(rd)) {
mov(scratch, rd);
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
sb(scratch, source);
srl(scratch, scratch, 8);
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
sb(scratch, source);
#endif
}
}
void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLdrOffset) &&
is_int16(rs.offset() + kMipsLdlOffset)) {
if (!rd.is(rs.rm())) {
ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
} else {
ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
mov(rd, at);
}
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
ldr(rd, MemOperand(at, kMipsLdrOffset));
ldl(rd, MemOperand(at, kMipsLdlOffset));
}
}
}
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Register scratch) {
lwu(rd, rs);
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
Daddu(rd, rd, scratch);
}
void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
sd(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSdrOffset) &&
is_int16(rs.offset() + kMipsSdlOffset)) {
sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
} else {
LoadRegPlusOffsetToAt(rs);
sdr(rd, MemOperand(at, kMipsSdrOffset));
sdl(rd, MemOperand(at, kMipsSdlOffset));
}
}
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
sw(rd, rs);
dsrl32(scratch, rd, 0);
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
lwc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(scratch, rs);
mtc1(scratch, fd);
}
}
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
swc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
mfc1(scratch, fd);
Usw(scratch, rs);
}
}
void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
ldc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Uld(scratch, rs);
dmtc1(scratch, fd);
}
}
void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
sdc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
dmfc1(scratch, fd);
Usd(scratch, rs);
}
}
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
if ((imm >> (bitnum - 1)) & 0x1) {
imm = (imm >> bitnum) + 1;
} else {
imm = imm >> bitnum;
}
return imm;
}
bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
bool higher_bits_sign_extended = false;
if (is_int16(j.imm64_)) {
daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
} else if (!(j.imm64_ & kHiMask)) {
ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
} else if (!(j.imm64_ & kImm16Mask)) {
lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
higher_bits_sign_extended = true;
}
} else {
lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm64_ & kImm16Mask));
if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
higher_bits_sign_extended = true;
}
}
return higher_bits_sign_extended;
}
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int32(j.imm64_)) {
LiLower32BitHelper(rd, j);
} else {
if (kArchVariant == kMips64r6) {
int64_t imm = j.imm64_;
bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
imm = ShiftAndFixSignExtension(imm, 32);
// If LUI writes 1s to higher bits, we need both DAHI/DATI.
if ((imm & kImm16Mask) ||
(higher_bits_sign_extended && (j.imm64_ > 0))) {
dahi(rd, imm & kImm16Mask);
}
imm = ShiftAndFixSignExtension(imm, 16);
if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
(higher_bits_sign_extended && (j.imm64_ > 0))) {
dati(rd, imm & kImm16Mask);
}
} else {
if (is_int48(j.imm64_)) {
if ((j.imm64_ >> 32) & kImm16Mask) {
lui(rd, (j.imm64_ >> 32) & kImm16Mask);
if ((j.imm64_ >> 16) & kImm16Mask) {
ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
}
} else {
ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
}
dsll(rd, rd, 16);
if (j.imm64_ & kImm16Mask) {
ori(rd, rd, j.imm64_ & kImm16Mask);
}
} else {
lui(rd, (j.imm64_ >> 48) & kImm16Mask);
if ((j.imm64_ >> 32) & kImm16Mask) {
ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
}
if ((j.imm64_ >> 16) & kImm16Mask) {
dsll(rd, rd, 16);
ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
if (j.imm64_ & kImm16Mask) {
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else {
dsll(rd, rd, 16);
}
} else {
if (j.imm64_ & kImm16Mask) {
dsll32(rd, rd, 0);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else {
dsll32(rd, rd, 0);
}
}
}
}
}
} else if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm64_);
lui(rd, (j.imm64_ >> 32) & kImm16Mask);
ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else if (mode == ADDRESS_LOAD) {
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need all 4 instructions.
lui(rd, (j.imm64_ >> 32) & kImm16Mask);
ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else {
if (kArchVariant == kMips64r6) {
int64_t imm = j.imm64_;
lui(rd, (imm >> kLuiShift) & kImm16Mask);
if (imm & kImm16Mask) {
ori(rd, rd, (imm & kImm16Mask));
}
if ((imm >> 31) & 0x1) {
imm = (imm >> 32) + 1;
} else {
imm = imm >> 32;
}
dahi(rd, imm & kImm16Mask);
if ((imm >> 15) & 0x1) {
imm = (imm >> 16) + 1;
} else {
imm = imm >> 16;
}
dati(rd, imm & kImm16Mask);
} else {
lui(rd, (j.imm64_ >> 48) & kImm16Mask);
ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
}
}
}
void MacroAssembler::MultiPush(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPushReversed(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
daddiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
daddiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
Dsubu(sp, sp, Operand(stack_offset));
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
daddiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
daddiu(sp, sp, stack_offset);
}
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size < 33);
ext_(rt, rs, pos, size);
}
void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 64);
DCHECK(size > 0 && size <= 64);
DCHECK(pos + size <= 64);
if (pos < 32) {
if (size <= 32) {
Dext(rt, rs, pos, size);
} else {
Dextm(rt, rs, pos, size);
}
} else if (pos < 64) {
DCHECK(size <= 32);
Dextu(rt, rs, pos, size);
}
}
void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(size > 0 && size <= 32);
dext_(rt, rs, pos, size);
}
void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(size > 32 && size <= 64);
DCHECK((pos + size) > 32 && (pos + size) <= 64);
dextm(rt, rs, pos, size);
}
void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos >= 32 && pos < 64);
DCHECK(size > 0 && size <= 32);
DCHECK((pos + size) > 32 && (pos + size) <= 64);
dextu(rt, rs, pos, size);
}
void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size <= 32);
DCHECK(size != 0);
dins_(rt, rs, pos, size);
}
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
DCHECK(pos + size <= 32);
DCHECK(size != 0);
ins_(rt, rs, pos, size);
}
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
neg_s(fd, fs);
} else {
DCHECK(kArchVariant == kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
BranchF32(nullptr, &is_nan, eq, fs, fs);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_s will return the same NaN value,
// while the sign has to be changed separately.
neg_s(fd, fs); // In delay slot.
bind(&is_nan);
mfc1(scratch1, fs);
And(scratch2, scratch1, Operand(~kBinary32SignMask));
And(scratch1, scratch1, Operand(kBinary32SignMask));
Xor(scratch1, scratch1, Operand(kBinary32SignMask));
Or(scratch2, scratch2, scratch1);
mtc1(scratch2, fd);
bind(&done);
}
}
void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_d changes the sign for NaN-like operands as well.
neg_d(fd, fs);
} else {
DCHECK(kArchVariant == kMips64r2);
Label is_nan, done;
Register scratch1 = t8;
Register scratch2 = t9;
BranchF64(nullptr, &is_nan, eq, fs, fs);
Branch(USE_DELAY_SLOT, &done);
// For NaN input, neg_d will return the same NaN value,
// while the sign has to be changed separately.
neg_d(fd, fs); // In delay slot.
bind(&is_nan);
dmfc1(scratch1, fs);
And(scratch2, scratch1, Operand(~Double::kSignMask));
And(scratch1, scratch1, Operand(Double::kSignMask));
Xor(scratch1, scratch1, Operand(Double::kSignMask));
Or(scratch2, scratch2, scratch1);
dmtc1(scratch2, fd);
bind(&done);
}
}
void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_d_uw(fd, t8);
}
void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
dmtc1(t9, fd);
cvt_d_l(fd, fd);
}
void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_d_ul(fd, t8);
}
void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
Label msb_clear, conversion_done;
Branch(&msb_clear, ge, rs, Operand(zero_reg));
// Rs >= 2^63
andi(t9, rs, 1);
dsrl(rs, rs, 1);
or_(t9, t9, rs);
dmtc1(t9, fd);
cvt_d_l(fd, fd);
Branch(USE_DELAY_SLOT, &conversion_done);
add_d(fd, fd, fd); // In delay slot.
bind(&msb_clear);
// Rs < 2^63, we can do simple conversion.
dmtc1(rs, fd);
cvt_d_l(fd, fd);
bind(&conversion_done);
}
void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_s_uw(fd, t8);
}
void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
// Zero extend int32 in rs.
Dext(t9, rs, 0, 32);
dmtc1(t9, fd);
cvt_s_l(fd, fd);
}
void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
dmfc1(t8, fs);
Cvt_s_ul(fd, t8);
}
void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
// Convert rs to a FP value in fd.
DCHECK(!rs.is(t9));
DCHECK(!rs.is(at));
Label positive, conversion_done;
Branch(&positive, ge, rs, Operand(zero_reg));
// Rs >= 2^31.
andi(t9, rs, 1);
dsrl(rs, rs, 1);
or_(t9, t9, rs);
dmtc1(t9, fd);
cvt_s_l(fd, fd);
Branch(USE_DELAY_SLOT, &conversion_done);
add_s(fd, fd, fd); // In delay slot.
bind(&positive);
// Rs < 2^31, we can do simple conversion.
dmtc1(rs, fd);
cvt_s_l(fd, fd);
bind(&conversion_done);
}
void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
round_l_d(fd, fs);
}
void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
floor_l_d(fd, fs);
}
void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
ceil_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
trunc_l_d(fd, fs);
}
void MacroAssembler::Trunc_l_ud(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
// Load to GPR.
dmfc1(t8, fs);
// Reset sign bit.
li(at, 0x7fffffffffffffff);
and_(t8, t8, at);
dmtc1(t8, fs);
trunc_l_d(fd, fs);
}
void MacroAssembler::Trunc_uw_d(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
Trunc_uw_d(fs, t8, scratch);
mtc1(t8, fd);
}
void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
FPURegister scratch) {
Trunc_uw_s(fs, t8, scratch);
mtc1(t8, fd);
}
void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_d(fs, t8, scratch, result);
dmtc1(t8, fd);
}
void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_s(fs, t8, scratch, result);
dmtc1(t8, fd);
}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
trunc_w_d(fd, fs);
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
round_w_d(fd, fs);
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
floor_w_d(fd, fs);
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
ceil_w_d(fd, fs);
}
void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
mtc1(zero_reg, scratch);
mthc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
BranchF(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
sub_d(scratch, fd, scratch);
trunc_w_d(scratch, scratch);
mfc1(rs, scratch);
Or(rs, rs, 1 << 31);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_w_d(scratch, fd);
mfc1(rs, scratch);
bind(&done);
}
void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
FPURegister scratch) {
DCHECK(!fd.is(scratch));
DCHECK(!rs.is(at));
// Load 2^31 into scratch as its float representation.
li(at, 0x4F000000);
mtc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
BranchF32(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
sub_s(scratch, fd, scratch);
trunc_w_s(scratch, scratch);
mfc1(rs, scratch);
Or(rs, rs, 1 << 31);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_w_s(scratch, fd);
mfc1(rs, scratch);
bind(&done);
}
void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(!fd.is(scratch));
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
mov(result, zero_reg);
Move(scratch, -1.0);
// If fd =< -1 or unordered, then the conversion fails.
BranchF(&fail, &fail, le, fd, scratch);
}
// Load 2^63 into scratch as its double representation.
li(at, 0x43e0000000000000);
dmtc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
BranchF(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
sub_d(scratch, fd, scratch);
trunc_l_d(scratch, scratch);
dmfc1(rs, scratch);
Or(rs, rs, Operand(1UL << 63));
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_l_d(scratch, fd);
dmfc1(rs, scratch);
bind(&done);
if (result.is_valid()) {
// Conversion is failed if the result is negative.
addiu(at, zero_reg, -1);
dsrl(at, at, 1); // Load 2^62.
dmfc1(result, scratch);
xor_(result, result, at);
Slt(result, zero_reg, result);
}
bind(&fail);
}
void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
DCHECK(!fd.is(scratch));
DCHECK(!AreAliased(rs, result, at));
Label simple_convert, done, fail;
if (result.is_valid()) {
mov(result, zero_reg);
Move(scratch, -1.0f);
// If fd =< -1 or unordered, then the conversion fails.
BranchF32(&fail, &fail, le, fd, scratch);
}
// Load 2^63 into scratch as its float representation.
li(at, 0x5f000000);
mtc1(at, scratch);
// Test if scratch > fd.
// If fd < 2^63 we can convert it normally.
BranchF32(&simple_convert, nullptr, lt, fd, scratch);
// First we subtract 2^63 from fd, then trunc it to rs
// and add 2^63 to rs.
sub_s(scratch, fd, scratch);
trunc_l_s(scratch, scratch);
dmfc1(rs, scratch);
Or(rs, rs, Operand(1UL << 63));
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_l_s(scratch, fd);
dmfc1(rs, scratch);
bind(&done);
if (result.is_valid()) {
// Conversion is failed if the result is negative or unordered.
addiu(at, zero_reg, -1);
dsrl(at, at, 1); // Load 2^62.
dmfc1(result, scratch);
xor_(result, result, at);
Slt(result, zero_reg, result);
}
bind(&fail);
}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
if (0) { // TODO(plind): find reasonable arch-variant symbol names.
madd_d(fd, fr, fs, ft);
} else {
// Can not change source regs's value.
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == al) {
Branch(bd, target);
return;
}
if (kArchVariant == kMips64r6) {
sizeField = sizeField == D ? L : W;
}
DCHECK(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
bool long_branch =
nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
if (kArchVariant != kMips64r6) {
if (long_branch) {
Label skip;
c(UN, sizeField, cmp1, cmp2);
bc1f(&skip);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
c(UN, sizeField, cmp1, cmp2);
bc1t(nan);
if (bd == PROTECT) {
nop();
}
}
} else {
// Use kDoubleCompareReg for comparison result. It has to be unavailable
// to lithium
// register allocator.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
if (long_branch) {
Label skip;
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(&skip, kDoubleCompareReg);
nop();
BranchLong(nan, bd);
bind(&skip);
} else {
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(nan, kDoubleCompareReg);
if (bd == PROTECT) {
nop();
}
}
}
}
if (target) {
bool long_branch =
target->is_bound() ? !is_near(target) : is_trampoline_emitted();
if (long_branch) {
Label skip;
Condition neg_cond = NegateFpuCondition(cond);
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
BranchLong(target, bd);
bind(&skip);
} else {
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
}
}
}
void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
Condition cc, FPURegister cmp1,
FPURegister cmp2, BranchDelaySlot bd) {
if (kArchVariant != kMips64r6) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
switch (cc) {
case lt:
c(OLT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ult:
c(ULT, sizeField, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ugt:
c(OLE, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case uge:
c(OLT, sizeField, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ule:
c(ULE, sizeField, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ueq:
c(UEQ, sizeField, cmp1, cmp2);
bc1t(target);
break;
case ne: // Unordered or not equal.
c(EQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
case ogl:
c(UEQ, sizeField, cmp1, cmp2);
bc1f(target);
break;
default:
CHECK(0);
}
}
} else {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
// Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
// 1) mode.
DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
switch (cc) {
case lt:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ult:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case gt:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ugt:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ge:
cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case uge:
cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case le:
cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ule:
cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case eq:
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ueq:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1nez(target, kDoubleCompareReg);
break;
case ne:
cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
case ogl:
cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
bc1eqz(target, kDoubleCompareReg);
break;
default:
CHECK(0);
}
}
}
if (bd == PROTECT) {
nop();
}
}
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
DCHECK(!src_low.is(at));
mfhc1(at, dst);
mtc1(src_low, dst);
mthc1(at, dst);
}
void MacroAssembler::Move(FPURegister dst, float imm) {
li(at, Operand(bit_cast<int32_t>(imm)));
mtc1(at, dst);
}
void MacroAssembler::Move(FPURegister dst, double imm) {
int64_t imm_bits = bit_cast<int64_t>(imm);
// Handle special values first.
if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
mov_d(dst, kDoubleRegZero);
} else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
Neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
// Move the low part of the double into the lower bits of the corresponding
// FPU register.
if (lo != 0) {
if (!(lo & kImm16Mask)) {
lui(at, (lo >> kLuiShift) & kImm16Mask);
mtc1(at, dst);
} else if (!(lo & kHiMask)) {
ori(at, zero_reg, lo & kImm16Mask);
mtc1(at, dst);
} else {
lui(at, (lo >> kLuiShift) & kImm16Mask);
ori(at, at, lo & kImm16Mask);
mtc1(at, dst);
}
} else {
mtc1(zero_reg, dst);
}
// Move the high part of the double into the high bits of the corresponding
// FPU register.
if (hi != 0) {
if (!(hi & kImm16Mask)) {
lui(at, (hi >> kLuiShift) & kImm16Mask);
mthc1(at, dst);
} else if (!(hi & kHiMask)) {
ori(at, zero_reg, hi & kImm16Mask);
mthc1(at, dst);
} else {
lui(at, (hi >> kLuiShift) & kImm16Mask);
ori(at, at, hi & kImm16Mask);
mthc1(at, dst);
}
} else {
mthc1(zero_reg, dst);
}
if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movz(rd, rs, rt);
}
}
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kMips64r6) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movn(rd, rs, rt);
}
}
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
movt(rd, rs, cc);
}
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
#define __ masm->
static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
FPURegister src1, FPURegister src2, Label* equal) {
if (src1.is(src2)) {
__ Move(dst, src1);
return true;
}
Label other, compare_not_equal;
FPURegister left, right;
if (kind == MaxMinKind::kMin) {
left = src1;
right = src2;
} else {
left = src2;
right = src1;
}
__ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
// Left and right hand side are equal, check for -0 vs. +0.
__ dmfc1(t8, src1);
__ Branch(&other, eq, t8, Operand(0x8000000000000000));
__ Move_d(dst