| // Copyright 2013 the V8 project authors. All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are |
| // met: |
| // |
| // * Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // * Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // * Neither the name of Google Inc. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #if V8_TARGET_ARCH_ARM64 |
| |
| #include "src/codegen/arm64/assembler-arm64.h" |
| |
| #include "src/base/bits.h" |
| #include "src/base/cpu.h" |
| #include "src/base/small-vector.h" |
| #include "src/codegen/arm64/assembler-arm64-inl.h" |
| #include "src/codegen/register-configuration.h" |
| #include "src/codegen/safepoint-table.h" |
| #include "src/codegen/string-constants.h" |
| #include "src/execution/frame-constants.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| namespace { |
| |
| #ifdef USE_SIMULATOR |
| unsigned SimulatorFeaturesFromCommandLine() { |
| if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) { |
| return 0; |
| } |
| if (strcmp(FLAG_sim_arm64_optional_features, "all") == 0) { |
| return (1u << NUMBER_OF_CPU_FEATURES) - 1; |
| } |
| fprintf( |
| stderr, |
| "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n", |
| FLAG_sim_arm64_optional_features); |
| fprintf(stderr, |
| "Supported values are: none\n" |
| " all\n"); |
| FATAL("sim-arm64-optional-features"); |
| } |
| #endif // USE_SIMULATOR |
| |
| constexpr unsigned CpuFeaturesFromCompiler() { |
| unsigned features = 0; |
| #if defined(__ARM_FEATURE_JCVT) |
| features |= 1u << JSCVT; |
| #endif |
| return features; |
| } |
| |
| constexpr unsigned CpuFeaturesFromTargetOS() { |
| unsigned features = 0; |
| #if defined(V8_TARGET_OS_MACOS) |
| features |= 1u << JSCVT; |
| #endif |
| return features; |
| } |
| |
| } // namespace |
| |
| // ----------------------------------------------------------------------------- |
| // CpuFeatures implementation. |
| bool CpuFeatures::SupportsWasmSimd128() { return true; } |
| |
| void CpuFeatures::ProbeImpl(bool cross_compile) { |
| // Only use statically determined features for cross compile (snapshot). |
| if (cross_compile) { |
| supported_ |= CpuFeaturesFromCompiler(); |
| supported_ |= CpuFeaturesFromTargetOS(); |
| return; |
| } |
| |
| // We used to probe for coherent cache support, but on older CPUs it |
| // causes crashes (crbug.com/524337), and newer CPUs don't even have |
| // the feature any more. |
| |
| #ifdef USE_SIMULATOR |
| supported_ |= SimulatorFeaturesFromCommandLine(); |
| #else |
| // Probe for additional features at runtime. |
| base::CPU cpu; |
| unsigned runtime = 0; |
| if (cpu.has_jscvt()) { |
| runtime |= 1u << JSCVT; |
| } |
| |
| // Use the best of the features found by CPU detection and those inferred from |
| // the build system. |
| supported_ |= CpuFeaturesFromCompiler(); |
| supported_ |= runtime; |
| #endif // USE_SIMULATOR |
| |
| // Set a static value on whether Simd is supported. |
| // This variable is only used for certain archs to query SupportWasmSimd128() |
| // at runtime in builtins using an extern ref. Other callers should use |
| // CpuFeatures::SupportWasmSimd128(). |
| CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128(); |
| } |
| |
| void CpuFeatures::PrintTarget() {} |
| void CpuFeatures::PrintFeatures() {} |
| |
| // ----------------------------------------------------------------------------- |
| // CPURegList utilities. |
| |
| CPURegister CPURegList::PopLowestIndex() { |
| if (IsEmpty()) { |
| return NoCPUReg; |
| } |
| int index = base::bits::CountTrailingZeros(list_); |
| DCHECK((1LL << index) & list_); |
| Remove(index); |
| return CPURegister::Create(index, size_, type_); |
| } |
| |
| CPURegister CPURegList::PopHighestIndex() { |
| if (IsEmpty()) { |
| return NoCPUReg; |
| } |
| int index = CountLeadingZeros(list_, kRegListSizeInBits); |
| index = kRegListSizeInBits - 1 - index; |
| DCHECK((1LL << index) & list_); |
| Remove(index); |
| return CPURegister::Create(index, size_, type_); |
| } |
| |
| void CPURegList::Align() { |
| // Use padreg, if necessary, to maintain stack alignment. |
| if (Count() % 2 != 0) { |
| if (IncludesAliasOf(padreg)) { |
| Remove(padreg); |
| } else { |
| Combine(padreg); |
| } |
| } |
| |
| DCHECK_EQ(Count() % 2, 0); |
| } |
| |
| CPURegList CPURegList::GetCalleeSaved(int size) { |
| return CPURegList(CPURegister::kRegister, size, 19, 28); |
| } |
| |
| CPURegList CPURegList::GetCalleeSavedV(int size) { |
| return CPURegList(CPURegister::kVRegister, size, 8, 15); |
| } |
| |
| CPURegList CPURegList::GetCallerSaved(int size) { |
| // x18 is the platform register and is reserved for the use of platform ABIs. |
| // Registers x0-x17 are caller-saved. |
| CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17); |
| return list; |
| } |
| |
| CPURegList CPURegList::GetCallerSavedV(int size) { |
| // Registers d0-d7 and d16-d31 are caller-saved. |
| CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7); |
| list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31)); |
| return list; |
| } |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of RelocInfo |
| |
| const int RelocInfo::kApplyMask = |
| RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | |
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) | |
| RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE); |
| |
| bool RelocInfo::IsCodedSpecially() { |
| // The deserializer needs to know whether a pointer is specially coded. Being |
| // specially coded on ARM64 means that it is an immediate branch. |
| Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| if (instr->IsLdrLiteralX()) { |
| return false; |
| } else { |
| DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); |
| return true; |
| } |
| } |
| |
| bool RelocInfo::IsInConstantPool() { |
| Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| DCHECK_IMPLIES(instr->IsLdrLiteralW(), COMPRESS_POINTERS_BOOL); |
| return instr->IsLdrLiteralX() || |
| (COMPRESS_POINTERS_BOOL && instr->IsLdrLiteralW()); |
| } |
| |
| uint32_t RelocInfo::wasm_call_tag() const { |
| DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); |
| Instruction* instr = reinterpret_cast<Instruction*>(pc_); |
| if (instr->IsLdrLiteralX()) { |
| return static_cast<uint32_t>( |
| Memory<Address>(Assembler::target_pointer_address_at(pc_))); |
| } else { |
| DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); |
| return static_cast<uint32_t>(instr->ImmPCOffset() / kInstrSize); |
| } |
| } |
| |
| bool AreAliased(const CPURegister& reg1, const CPURegister& reg2, |
| const CPURegister& reg3, const CPURegister& reg4, |
| const CPURegister& reg5, const CPURegister& reg6, |
| const CPURegister& reg7, const CPURegister& reg8) { |
| int number_of_valid_regs = 0; |
| int number_of_valid_fpregs = 0; |
| |
| uint64_t unique_regs = 0; |
| uint64_t unique_fpregs = 0; |
| |
| const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; |
| |
| for (unsigned i = 0; i < arraysize(regs); i++) { |
| if (regs[i].IsRegister()) { |
| number_of_valid_regs++; |
| unique_regs |= (uint64_t{1} << regs[i].code()); |
| } else if (regs[i].IsVRegister()) { |
| number_of_valid_fpregs++; |
| unique_fpregs |= (uint64_t{1} << regs[i].code()); |
| } else { |
| DCHECK(!regs[i].is_valid()); |
| } |
| } |
| |
| int number_of_unique_regs = |
| CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); |
| int number_of_unique_fpregs = |
| CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); |
| |
| DCHECK(number_of_valid_regs >= number_of_unique_regs); |
| DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs); |
| |
| return (number_of_valid_regs != number_of_unique_regs) || |
| (number_of_valid_fpregs != number_of_unique_fpregs); |
| } |
| |
| bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, |
| const CPURegister& reg3, const CPURegister& reg4, |
| const CPURegister& reg5, const CPURegister& reg6, |
| const CPURegister& reg7, const CPURegister& reg8) { |
| DCHECK(reg1.is_valid()); |
| bool match = true; |
| match &= !reg2.is_valid() || reg2.IsSameSizeAndType(reg1); |
| match &= !reg3.is_valid() || reg3.IsSameSizeAndType(reg1); |
| match &= !reg4.is_valid() || reg4.IsSameSizeAndType(reg1); |
| match &= !reg5.is_valid() || reg5.IsSameSizeAndType(reg1); |
| match &= !reg6.is_valid() || reg6.IsSameSizeAndType(reg1); |
| match &= !reg7.is_valid() || reg7.IsSameSizeAndType(reg1); |
| match &= !reg8.is_valid() || reg8.IsSameSizeAndType(reg1); |
| return match; |
| } |
| |
| bool AreSameFormat(const VRegister& reg1, const VRegister& reg2, |
| const VRegister& reg3, const VRegister& reg4) { |
| DCHECK(reg1.is_valid()); |
| return (!reg2.is_valid() || reg2.IsSameFormat(reg1)) && |
| (!reg3.is_valid() || reg3.IsSameFormat(reg1)) && |
| (!reg4.is_valid() || reg4.IsSameFormat(reg1)); |
| } |
| |
| bool AreConsecutive(const VRegister& reg1, const VRegister& reg2, |
| const VRegister& reg3, const VRegister& reg4) { |
| DCHECK(reg1.is_valid()); |
| if (!reg2.is_valid()) { |
| DCHECK(!reg3.is_valid() && !reg4.is_valid()); |
| return true; |
| } else if (reg2.code() != ((reg1.code() + 1) % kNumberOfVRegisters)) { |
| return false; |
| } |
| |
| if (!reg3.is_valid()) { |
| DCHECK(!reg4.is_valid()); |
| return true; |
| } else if (reg3.code() != ((reg2.code() + 1) % kNumberOfVRegisters)) { |
| return false; |
| } |
| |
| if (!reg4.is_valid()) { |
| return true; |
| } else if (reg4.code() != ((reg3.code() + 1) % kNumberOfVRegisters)) { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool Operand::NeedsRelocation(const Assembler* assembler) const { |
| RelocInfo::Mode rmode = immediate_.rmode(); |
| |
| if (RelocInfo::IsOnlyForSerializer(rmode)) { |
| return assembler->options().record_reloc_info_for_serialization; |
| } |
| |
| return !RelocInfo::IsNoInfo(rmode); |
| } |
| |
| // Assembler |
| Assembler::Assembler(const AssemblerOptions& options, |
| std::unique_ptr<AssemblerBuffer> buffer) |
| : AssemblerBase(options, std::move(buffer)), |
| unresolved_branches_(), |
| constpool_(this) { |
| veneer_pool_blocked_nesting_ = 0; |
| Reset(); |
| |
| #if defined(V8_OS_WIN) |
| if (options.collect_win64_unwind_info) { |
| xdata_encoder_ = std::make_unique<win64_unwindinfo::XdataEncoder>(*this); |
| } |
| #endif |
| } |
| |
| Assembler::~Assembler() { |
| DCHECK(constpool_.IsEmpty()); |
| DCHECK_EQ(veneer_pool_blocked_nesting_, 0); |
| } |
| |
| void Assembler::AbortedCodeGeneration() { constpool_.Clear(); } |
| |
| void Assembler::Reset() { |
| #ifdef DEBUG |
| DCHECK((pc_ >= buffer_start_) && (pc_ < buffer_start_ + buffer_->size())); |
| DCHECK_EQ(veneer_pool_blocked_nesting_, 0); |
| DCHECK(unresolved_branches_.empty()); |
| memset(buffer_start_, 0, pc_ - buffer_start_); |
| #endif |
| pc_ = buffer_start_; |
| reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); |
| constpool_.Clear(); |
| next_veneer_pool_check_ = kMaxInt; |
| } |
| |
| #if defined(V8_OS_WIN) |
| win64_unwindinfo::BuiltinUnwindInfo Assembler::GetUnwindInfo() const { |
| DCHECK(options().collect_win64_unwind_info); |
| DCHECK_NOT_NULL(xdata_encoder_); |
| return xdata_encoder_->unwinding_info(); |
| } |
| #endif |
| |
| void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { |
| DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); |
| for (auto& request : heap_object_requests_) { |
| Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset(); |
| switch (request.kind()) { |
| case HeapObjectRequest::kHeapNumber: { |
| Handle<HeapObject> object = |
| isolate->factory()->NewHeapNumber<AllocationType::kOld>( |
| request.heap_number()); |
| EmbeddedObjectIndex index = AddEmbeddedObject(object); |
| set_embedded_object_index_referenced_from(pc, index); |
| break; |
| } |
| case HeapObjectRequest::kStringConstant: { |
| const StringConstantBase* str = request.string(); |
| CHECK_NOT_NULL(str); |
| EmbeddedObjectIndex index = |
| AddEmbeddedObject(str->AllocateStringConstant(isolate)); |
| set_embedded_object_index_referenced_from(pc, index); |
| break; |
| } |
| } |
| } |
| } |
| |
| void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, |
| SafepointTableBuilder* safepoint_table_builder, |
| int handler_table_offset) { |
| // As a crutch to avoid having to add manual Align calls wherever we use a |
| // raw workflow to create Code objects (mostly in tests), add another Align |
| // call here. It does no harm - the end of the Code object is aligned to the |
| // (larger) kCodeAlignment anyways. |
| // TODO(jgruber): Consider moving responsibility for proper alignment to |
| // metadata table builders (safepoint, handler, constant pool, code |
| // comments). |
| DataAlign(Code::kMetadataAlignment); |
| |
| // Emit constant pool if necessary. |
| ForceConstantPoolEmissionWithoutJump(); |
| DCHECK(constpool_.IsEmpty()); |
| |
| int code_comments_size = WriteCodeComments(); |
| |
| AllocateAndInstallRequestedHeapObjects(isolate); |
| |
| // Set up code descriptor. |
| // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to |
| // this point to make CodeDesc initialization less fiddly. |
| |
| static constexpr int kConstantPoolSize = 0; |
| const int instruction_size = pc_offset(); |
| const int code_comments_offset = instruction_size - code_comments_size; |
| const int constant_pool_offset = code_comments_offset - kConstantPoolSize; |
| const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) |
| ? constant_pool_offset |
| : handler_table_offset; |
| const int safepoint_table_offset = |
| (safepoint_table_builder == kNoSafepointTable) |
| ? handler_table_offset2 |
| : safepoint_table_builder->safepoint_table_offset(); |
| const int reloc_info_offset = |
| static_cast<int>(reloc_info_writer.pos() - buffer_->start()); |
| CodeDesc::Initialize(desc, this, safepoint_table_offset, |
| handler_table_offset2, constant_pool_offset, |
| code_comments_offset, reloc_info_offset); |
| } |
| |
| void Assembler::Align(int m) { |
| // If not, the loop below won't terminate. |
| DCHECK(IsAligned(pc_offset(), kInstrSize)); |
| DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m)); |
| while ((pc_offset() & (m - 1)) != 0) { |
| nop(); |
| } |
| } |
| |
| void Assembler::CodeTargetAlign() { |
| // Preferred alignment of jump targets on some ARM chips. |
| Align(8); |
| } |
| |
| void Assembler::CheckLabelLinkChain(Label const* label) { |
| #ifdef DEBUG |
| if (label->is_linked()) { |
| static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour. |
| int links_checked = 0; |
| int64_t linkoffset = label->pos(); |
| bool end_of_chain = false; |
| while (!end_of_chain) { |
| if (++links_checked > kMaxLinksToCheck) break; |
| Instruction* link = InstructionAt(linkoffset); |
| int64_t linkpcoffset = link->ImmPCOffset(); |
| int64_t prevlinkoffset = linkoffset + linkpcoffset; |
| |
| end_of_chain = (linkoffset == prevlinkoffset); |
| linkoffset = linkoffset + linkpcoffset; |
| } |
| } |
| #endif |
| } |
| |
| void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, |
| Label* label, |
| Instruction* label_veneer) { |
| DCHECK(label->is_linked()); |
| |
| CheckLabelLinkChain(label); |
| |
| Instruction* link = InstructionAt(label->pos()); |
| Instruction* prev_link = link; |
| Instruction* next_link; |
| bool end_of_chain = false; |
| |
| while (link != branch && !end_of_chain) { |
| next_link = link->ImmPCOffsetTarget(); |
| end_of_chain = (link == next_link); |
| prev_link = link; |
| link = next_link; |
| } |
| |
| DCHECK(branch == link); |
| next_link = branch->ImmPCOffsetTarget(); |
| |
| if (branch == prev_link) { |
| // The branch is the first instruction in the chain. |
| if (branch == next_link) { |
| // It is also the last instruction in the chain, so it is the only branch |
| // currently referring to this label. |
| label->Unuse(); |
| } else { |
| label->link_to( |
| static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_start_)); |
| } |
| |
| } else if (branch == next_link) { |
| // The branch is the last (but not also the first) instruction in the chain. |
| prev_link->SetImmPCOffsetTarget(options(), prev_link); |
| |
| } else { |
| // The branch is in the middle of the chain. |
| if (prev_link->IsTargetInImmPCOffsetRange(next_link)) { |
| prev_link->SetImmPCOffsetTarget(options(), next_link); |
| } else if (label_veneer != nullptr) { |
| // Use the veneer for all previous links in the chain. |
| prev_link->SetImmPCOffsetTarget(options(), prev_link); |
| |
| end_of_chain = false; |
| link = next_link; |
| while (!end_of_chain) { |
| next_link = link->ImmPCOffsetTarget(); |
| end_of_chain = (link == next_link); |
| link->SetImmPCOffsetTarget(options(), label_veneer); |
| link = next_link; |
| } |
| } else { |
| // The assert below will fire. |
| // Some other work could be attempted to fix up the chain, but it would be |
| // rather complicated. If we crash here, we may want to consider using an |
| // other mechanism than a chain of branches. |
| // |
| // Note that this situation currently should not happen, as we always call |
| // this function with a veneer to the target label. |
| // However this could happen with a MacroAssembler in the following state: |
| // [previous code] |
| // B(label); |
| // [20KB code] |
| // Tbz(label); // First tbz. Pointing to unconditional branch. |
| // [20KB code] |
| // Tbz(label); // Second tbz. Pointing to the first tbz. |
| // [more code] |
| // and this function is called to remove the first tbz from the label link |
| // chain. Since tbz has a range of +-32KB, the second tbz cannot point to |
| // the unconditional branch. |
| CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link)); |
| UNREACHABLE(); |
| } |
| } |
| |
| CheckLabelLinkChain(label); |
| } |
| |
| void Assembler::bind(Label* label) { |
| // Bind label to the address at pc_. All instructions (most likely branches) |
| // that are linked to this label will be updated to point to the newly-bound |
| // label. |
| |
| DCHECK(!label->is_near_linked()); |
| DCHECK(!label->is_bound()); |
| |
| DeleteUnresolvedBranchInfoForLabel(label); |
| |
| // If the label is linked, the link chain looks something like this: |
| // |
| // |--I----I-------I-------L |
| // |---------------------->| pc_offset |
| // |-------------->| linkoffset = label->pos() |
| // |<------| link->ImmPCOffset() |
| // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset() |
| // |
| // On each iteration, the last link is updated and then removed from the |
| // chain until only one remains. At that point, the label is bound. |
| // |
| // If the label is not linked, no preparation is required before binding. |
| while (label->is_linked()) { |
| int linkoffset = label->pos(); |
| Instruction* link = InstructionAt(linkoffset); |
| int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset()); |
| |
| CheckLabelLinkChain(label); |
| |
| DCHECK_GE(linkoffset, 0); |
| DCHECK(linkoffset < pc_offset()); |
| DCHECK((linkoffset > prevlinkoffset) || |
| (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); |
| DCHECK_GE(prevlinkoffset, 0); |
| |
| // Update the link to point to the label. |
| if (link->IsUnresolvedInternalReference()) { |
| // Internal references do not get patched to an instruction but directly |
| // to an address. |
| internal_reference_positions_.push_back(linkoffset); |
| memcpy(link, &pc_, kSystemPointerSize); |
| } else { |
| link->SetImmPCOffsetTarget(options(), |
| reinterpret_cast<Instruction*>(pc_)); |
| } |
| |
| // Link the label to the previous link in the chain. |
| if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) { |
| // We hit kStartOfLabelLinkChain, so the chain is fully processed. |
| label->Unuse(); |
| } else { |
| // Update the label for the next iteration. |
| label->link_to(prevlinkoffset); |
| } |
| } |
| label->bind_to(pc_offset()); |
| |
| DCHECK(label->is_bound()); |
| DCHECK(!label->is_linked()); |
| } |
| |
| int Assembler::LinkAndGetByteOffsetTo(Label* label) { |
| DCHECK_EQ(sizeof(*pc_), 1); |
| CheckLabelLinkChain(label); |
| |
| int offset; |
| if (label->is_bound()) { |
| // The label is bound, so it does not need to be updated. Referring |
| // instructions must link directly to the label as they will not be |
| // updated. |
| // |
| // In this case, label->pos() returns the offset of the label from the |
| // start of the buffer. |
| // |
| // Note that offset can be zero for self-referential instructions. (This |
| // could be useful for ADR, for example.) |
| offset = label->pos() - pc_offset(); |
| DCHECK_LE(offset, 0); |
| } else { |
| if (label->is_linked()) { |
| // The label is linked, so the referring instruction should be added onto |
| // the end of the label's link chain. |
| // |
| // In this case, label->pos() returns the offset of the last linked |
| // instruction from the start of the buffer. |
| offset = label->pos() - pc_offset(); |
| DCHECK_NE(offset, kStartOfLabelLinkChain); |
| // Note that the offset here needs to be PC-relative only so that the |
| // first instruction in a buffer can link to an unbound label. Otherwise, |
| // the offset would be 0 for this case, and 0 is reserved for |
| // kStartOfLabelLinkChain. |
| } else { |
| // The label is unused, so it now becomes linked and the referring |
| // instruction is at the start of the new link chain. |
| offset = kStartOfLabelLinkChain; |
| } |
| // The instruction at pc is now the last link in the label's chain. |
| label->link_to(pc_offset()); |
| } |
| |
| return offset; |
| } |
| |
| void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) { |
| DCHECK(label->is_linked()); |
| CheckLabelLinkChain(label); |
| |
| int link_offset = label->pos(); |
| int link_pcoffset; |
| bool end_of_chain = false; |
| |
| while (!end_of_chain) { |
| Instruction* link = InstructionAt(link_offset); |
| link_pcoffset = static_cast<int>(link->ImmPCOffset()); |
| |
| // ADR instructions are not handled by veneers. |
| if (link->IsImmBranch()) { |
| int max_reachable_pc = |
| static_cast<int>(InstructionOffset(link) + |
| Instruction::ImmBranchRange(link->BranchType())); |
| using unresolved_info_it = std::multimap<int, FarBranchInfo>::iterator; |
| std::pair<unresolved_info_it, unresolved_info_it> range; |
| range = unresolved_branches_.equal_range(max_reachable_pc); |
| unresolved_info_it it; |
| for (it = range.first; it != range.second; ++it) { |
| if (it->second.pc_offset_ == link_offset) { |
| unresolved_branches_.erase(it); |
| break; |
| } |
| } |
| } |
| |
| end_of_chain = (link_pcoffset == 0); |
| link_offset = link_offset + link_pcoffset; |
| } |
| } |
| |
| void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { |
| if (unresolved_branches_.empty()) { |
| DCHECK_EQ(next_veneer_pool_check_, kMaxInt); |
| return; |
| } |
| |
| if (label->is_linked()) { |
| // Branches to this label will be resolved when the label is bound, normally |
| // just after all the associated info has been deleted. |
| DeleteUnresolvedBranchInfoForLabelTraverse(label); |
| } |
| if (unresolved_branches_.empty()) { |
| next_veneer_pool_check_ = kMaxInt; |
| } else { |
| next_veneer_pool_check_ = |
| unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; |
| } |
| } |
| |
| bool Assembler::IsConstantPoolAt(Instruction* instr) { |
| // The constant pool marker is made of two instructions. These instructions |
| // will never be emitted by the JIT, so checking for the first one is enough: |
| // 0: ldr xzr, #<size of pool> |
| bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode); |
| |
| // It is still worth asserting the marker is complete. |
| // 4: blr xzr |
| DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() && |
| instr->following()->Rn() == kZeroRegCode)); |
| |
| return result; |
| } |
| |
| int Assembler::ConstantPoolSizeAt(Instruction* instr) { |
| #ifdef USE_SIMULATOR |
| // Assembler::debug() embeds constants directly into the instruction stream. |
| // Although this is not a genuine constant pool, treat it like one to avoid |
| // disassembling the constants. |
| if ((instr->Mask(ExceptionMask) == HLT) && |
| (instr->ImmException() == kImmExceptionIsDebug)) { |
| const char* message = reinterpret_cast<const char*>( |
| instr->InstructionAtOffset(kDebugMessageOffset)); |
| int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1); |
| return RoundUp(size, kInstrSize) / kInstrSize; |
| } |
| // Same for printf support, see MacroAssembler::CallPrintf(). |
| if ((instr->Mask(ExceptionMask) == HLT) && |
| (instr->ImmException() == kImmExceptionIsPrintf)) { |
| return kPrintfLength / kInstrSize; |
| } |
| #endif |
| if (IsConstantPoolAt(instr)) { |
| return instr->ImmLLiteral(); |
| } else { |
| return -1; |
| } |
| } |
| |
| void Assembler::EmitPoolGuard() { |
| // We must generate only one instruction as this is used in scopes that |
| // control the size of the code generated. |
| Emit(BLR | Rn(xzr)); |
| } |
| |
| void Assembler::StartBlockVeneerPool() { ++veneer_pool_blocked_nesting_; } |
| |
| void Assembler::EndBlockVeneerPool() { |
| if (--veneer_pool_blocked_nesting_ == 0) { |
| // Check the veneer pool hasn't been blocked for too long. |
| DCHECK(unresolved_branches_.empty() || |
| (pc_offset() < unresolved_branches_first_limit())); |
| } |
| } |
| |
| void Assembler::br(const Register& xn) { |
| DCHECK(xn.Is64Bits()); |
| Emit(BR | Rn(xn)); |
| } |
| |
| void Assembler::blr(const Register& xn) { |
| DCHECK(xn.Is64Bits()); |
| // The pattern 'blr xzr' is used as a guard to detect when execution falls |
| // through the constant pool. It should not be emitted. |
| DCHECK_NE(xn, xzr); |
| Emit(BLR | Rn(xn)); |
| } |
| |
| void Assembler::ret(const Register& xn) { |
| DCHECK(xn.Is64Bits()); |
| Emit(RET | Rn(xn)); |
| } |
| |
| void Assembler::b(int imm26) { Emit(B | ImmUncondBranch(imm26)); } |
| |
| void Assembler::b(Label* label) { b(LinkAndGetInstructionOffsetTo(label)); } |
| |
| void Assembler::b(int imm19, Condition cond) { |
| Emit(B_cond | ImmCondBranch(imm19) | cond); |
| } |
| |
| void Assembler::b(Label* label, Condition cond) { |
| b(LinkAndGetInstructionOffsetTo(label), cond); |
| } |
| |
| void Assembler::bl(int imm26) { Emit(BL | ImmUncondBranch(imm26)); } |
| |
| void Assembler::bl(Label* label) { bl(LinkAndGetInstructionOffsetTo(label)); } |
| |
| void Assembler::cbz(const Register& rt, int imm19) { |
| Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt)); |
| } |
| |
| void Assembler::cbz(const Register& rt, Label* label) { |
| cbz(rt, LinkAndGetInstructionOffsetTo(label)); |
| } |
| |
| void Assembler::cbnz(const Register& rt, int imm19) { |
| Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt)); |
| } |
| |
| void Assembler::cbnz(const Register& rt, Label* label) { |
| cbnz(rt, LinkAndGetInstructionOffsetTo(label)); |
| } |
| |
| void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14) { |
| DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| } |
| |
| void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) { |
| tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| } |
| |
| void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14) { |
| DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); |
| Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); |
| } |
| |
| void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) { |
| tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); |
| } |
| |
| void Assembler::adr(const Register& rd, int imm21) { |
| DCHECK(rd.Is64Bits()); |
| Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); |
| } |
| |
| void Assembler::adr(const Register& rd, Label* label) { |
| adr(rd, LinkAndGetByteOffsetTo(label)); |
| } |
| |
| void Assembler::nop(NopMarkerTypes n) { |
| DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER)); |
| mov(Register::XRegFromCode(n), Register::XRegFromCode(n)); |
| } |
| |
| void Assembler::add(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, LeaveFlags, ADD); |
| } |
| |
| void Assembler::adds(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, SetFlags, ADD); |
| } |
| |
| void Assembler::cmn(const Register& rn, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rn); |
| adds(zr, rn, operand); |
| } |
| |
| void Assembler::sub(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, LeaveFlags, SUB); |
| } |
| |
| void Assembler::subs(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSub(rd, rn, operand, SetFlags, SUB); |
| } |
| |
| void Assembler::cmp(const Register& rn, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rn); |
| subs(zr, rn, operand); |
| } |
| |
| void Assembler::neg(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sub(rd, zr, operand); |
| } |
| |
| void Assembler::negs(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| subs(rd, zr, operand); |
| } |
| |
| void Assembler::adc(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC); |
| } |
| |
| void Assembler::adcs(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, SetFlags, ADC); |
| } |
| |
| void Assembler::sbc(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC); |
| } |
| |
| void Assembler::sbcs(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| AddSubWithCarry(rd, rn, operand, SetFlags, SBC); |
| } |
| |
| void Assembler::ngc(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sbc(rd, zr, operand); |
| } |
| |
| void Assembler::ngcs(const Register& rd, const Operand& operand) { |
| Register zr = AppropriateZeroRegFor(rd); |
| sbcs(rd, zr, operand); |
| } |
| |
| // Logical instructions. |
| void Assembler::and_(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, AND); |
| } |
| |
| void Assembler::ands(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, ANDS); |
| } |
| |
| void Assembler::tst(const Register& rn, const Operand& operand) { |
| ands(AppropriateZeroRegFor(rn), rn, operand); |
| } |
| |
| void Assembler::bic(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, BIC); |
| } |
| |
| void Assembler::bics(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, BICS); |
| } |
| |
| void Assembler::orr(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, ORR); |
| } |
| |
| void Assembler::orn(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, ORN); |
| } |
| |
| void Assembler::eor(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, EOR); |
| } |
| |
| void Assembler::eon(const Register& rd, const Register& rn, |
| const Operand& operand) { |
| Logical(rd, rn, operand, EON); |
| } |
| |
| void Assembler::lslv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::lsrv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::asrv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::rorv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| // Bitfield operations. |
| void Assembler::bfm(const Register& rd, const Register& rn, int immr, |
| int imms) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | BFM | N | ImmR(immr, rd.SizeInBits()) | |
| ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::sbfm(const Register& rd, const Register& rn, int immr, |
| int imms) { |
| DCHECK(rd.Is64Bits() || rn.Is32Bits()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | SBFM | N | ImmR(immr, rd.SizeInBits()) | |
| ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::ubfm(const Register& rd, const Register& rn, int immr, |
| int imms) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | UBFM | N | ImmR(immr, rd.SizeInBits()) | |
| ImmS(imms, rn.SizeInBits()) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::extr(const Register& rd, const Register& rn, const Register& rm, |
| int lsb) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); |
| Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.SizeInBits()) | Rn(rn) | |
| Rd(rd)); |
| } |
| |
| void Assembler::csel(const Register& rd, const Register& rn, const Register& rm, |
| Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSEL); |
| } |
| |
| void Assembler::csinc(const Register& rd, const Register& rn, |
| const Register& rm, Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSINC); |
| } |
| |
| void Assembler::csinv(const Register& rd, const Register& rn, |
| const Register& rm, Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSINV); |
| } |
| |
| void Assembler::csneg(const Register& rd, const Register& rn, |
| const Register& rm, Condition cond) { |
| ConditionalSelect(rd, rn, rm, cond, CSNEG); |
| } |
| |
| void Assembler::cset(const Register& rd, Condition cond) { |
| DCHECK((cond != al) && (cond != nv)); |
| Register zr = AppropriateZeroRegFor(rd); |
| csinc(rd, zr, zr, NegateCondition(cond)); |
| } |
| |
| void Assembler::csetm(const Register& rd, Condition cond) { |
| DCHECK((cond != al) && (cond != nv)); |
| Register zr = AppropriateZeroRegFor(rd); |
| csinv(rd, zr, zr, NegateCondition(cond)); |
| } |
| |
| void Assembler::cinc(const Register& rd, const Register& rn, Condition cond) { |
| DCHECK((cond != al) && (cond != nv)); |
| csinc(rd, rn, rn, NegateCondition(cond)); |
| } |
| |
| void Assembler::cinv(const Register& rd, const Register& rn, Condition cond) { |
| DCHECK((cond != al) && (cond != nv)); |
| csinv(rd, rn, rn, NegateCondition(cond)); |
| } |
| |
| void Assembler::cneg(const Register& rd, const Register& rn, Condition cond) { |
| DCHECK((cond != al) && (cond != nv)); |
| csneg(rd, rn, rn, NegateCondition(cond)); |
| } |
| |
| void Assembler::ConditionalSelect(const Register& rd, const Register& rn, |
| const Register& rm, Condition cond, |
| ConditionalSelectOp op) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::ccmn(const Register& rn, const Operand& operand, |
| StatusFlags nzcv, Condition cond) { |
| ConditionalCompare(rn, operand, nzcv, cond, CCMN); |
| } |
| |
| void Assembler::ccmp(const Register& rn, const Operand& operand, |
| StatusFlags nzcv, Condition cond) { |
| ConditionalCompare(rn, operand, nzcv, cond, CCMP); |
| } |
| |
| void Assembler::DataProcessing3Source(const Register& rd, const Register& rn, |
| const Register& rm, const Register& ra, |
| DataProcessing3SourceOp op) { |
| Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::mul(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| Register zr = AppropriateZeroRegFor(rn); |
| DataProcessing3Source(rd, rn, rm, zr, MADD); |
| } |
| |
| void Assembler::madd(const Register& rd, const Register& rn, const Register& rm, |
| const Register& ra) { |
| DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); |
| DataProcessing3Source(rd, rn, rm, ra, MADD); |
| } |
| |
| void Assembler::mneg(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| Register zr = AppropriateZeroRegFor(rn); |
| DataProcessing3Source(rd, rn, rm, zr, MSUB); |
| } |
| |
| void Assembler::msub(const Register& rd, const Register& rn, const Register& rm, |
| const Register& ra) { |
| DCHECK(AreSameSizeAndType(rd, rn, rm, ra)); |
| DataProcessing3Source(rd, rn, rm, ra, MSUB); |
| } |
| |
| void Assembler::smaddl(const Register& rd, const Register& rn, |
| const Register& rm, const Register& ra) { |
| DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); |
| } |
| |
| void Assembler::smsubl(const Register& rd, const Register& rn, |
| const Register& rm, const Register& ra) { |
| DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); |
| } |
| |
| void Assembler::umaddl(const Register& rd, const Register& rn, |
| const Register& rm, const Register& ra) { |
| DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); |
| } |
| |
| void Assembler::umsubl(const Register& rd, const Register& rn, |
| const Register& rm, const Register& ra) { |
| DCHECK(rd.Is64Bits() && ra.Is64Bits()); |
| DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); |
| } |
| |
| void Assembler::smull(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.Is64Bits()); |
| DCHECK(rn.Is32Bits() && rm.Is32Bits()); |
| DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); |
| } |
| |
| void Assembler::smulh(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(AreSameSizeAndType(rd, rn, rm)); |
| DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); |
| } |
| |
| void Assembler::sdiv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::udiv(const Register& rd, const Register& rn, |
| const Register& rm) { |
| DCHECK(rd.SizeInBits() == rn.SizeInBits()); |
| DCHECK(rd.SizeInBits() == rm.SizeInBits()); |
| Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); |
| } |
| |
| void Assembler::rbit(const Register& rd, const Register& rn) { |
| DataProcessing1Source(rd, rn, RBIT); |
| } |
| |
| void Assembler::rev16(const Register& rd, const Register& rn) { |
| DataProcessing1Source(rd, rn, REV16); |
| } |
| |
| void Assembler::rev32(const Register& rd, const Register& rn) { |
| DCHECK(rd.Is64Bits()); |
| DataProcessing1Source(rd, rn, REV); |
| } |
| |
| void Assembler::rev(const Register& rd, const Register& rn) { |
| DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); |
| } |
| |
| void Assembler::clz(const Register& rd, const Register& rn) { |
| DataProcessing1Source(rd, rn, CLZ); |
| } |
| |
| void Assembler::cls(const Register& rd, const Register& rn) { |
| DataProcessing1Source(rd, rn, CLS); |
| } |
| |
| void Assembler::pacib1716() { Emit(PACIB1716); } |
| void Assembler::autib1716() { Emit(AUTIB1716); } |
| void Assembler::pacibsp() { Emit(PACIBSP); } |
| void Assembler::autibsp() { Emit(AUTIBSP); } |
| |
| void Assembler::bti(BranchTargetIdentifier id) { |
| SystemHint op; |
| switch (id) { |
| case BranchTargetIdentifier::kBti: |
| op = BTI; |
| break; |
| case BranchTargetIdentifier::kBtiCall: |
| op = BTI_c; |
| break; |
| case BranchTargetIdentifier::kBtiJump: |
| op = BTI_j; |
| break; |
| case BranchTargetIdentifier::kBtiJumpCall: |
| op = BTI_jc; |
| break; |
| case BranchTargetIdentifier::kNone: |
| case BranchTargetIdentifier::kPacibsp: |
| // We always want to generate a BTI instruction here, so disallow |
| // skipping its generation or generating a PACIBSP instead. |
| UNREACHABLE(); |
| } |
| hint(op); |
| } |
| |
| void Assembler::ldp(const CPURegister& rt, const CPURegister& rt2, |
| const MemOperand& src) { |
| LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2)); |
| } |
| |
| void Assembler::stp(const CPURegister& rt, const CPURegister& rt2, |
| const MemOperand& dst) { |
| LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); |
| |
| #if defined(V8_OS_WIN) |
| if (xdata_encoder_ && rt == x29 && rt2 == lr && dst.base().IsSP()) { |
| xdata_encoder_->onSaveFpLr(); |
| } |
| #endif |
| } |
| |
| void Assembler::ldpsw(const Register& rt, const Register& rt2, |
| const MemOperand& src) { |
| DCHECK(rt.Is64Bits()); |
| LoadStorePair(rt, rt2, src, LDPSW_x); |
| } |
| |
| void Assembler::LoadStorePair(const CPURegister& rt, const CPURegister& rt2, |
| const MemOperand& addr, LoadStorePairOp op) { |
| // 'rt' and 'rt2' can only be aliased for stores. |
| DCHECK(((op & LoadStorePairLBit) == 0) || rt != rt2); |
| DCHECK(AreSameSizeAndType(rt, rt2)); |
| DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op))); |
| int offset = static_cast<int>(addr.offset()); |
| |
| Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | |
| ImmLSPair(offset, CalcLSPairDataSize(op)); |
| |
| Instr addrmodeop; |
| if (addr.IsImmediateOffset()) { |
| addrmodeop = LoadStorePairOffsetFixed; |
| } else { |
| // Pre-index and post-index modes. |
| DCHECK_NE(rt, addr.base()); |
| DCHECK_NE(rt2, addr.base()); |
| DCHECK_NE(addr.offset(), 0); |
| if (addr.IsPreIndex()) { |
| addrmodeop = LoadStorePairPreIndexFixed; |
| } else { |
| DCHECK(addr.IsPostIndex()); |
| addrmodeop = LoadStorePairPostIndexFixed; |
| } |
| } |
| Emit(addrmodeop | memop); |
| } |
| |
| // Memory instructions. |
| void Assembler::ldrb(const Register& rt, const MemOperand& src) { |
| LoadStore(rt, src, LDRB_w); |
| } |
| |
| void Assembler::strb(const Register& rt, const MemOperand& dst) { |
| LoadStore(rt, dst, STRB_w); |
| } |
| |
| void Assembler::ldrsb(const Register& rt, const MemOperand& src) { |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w); |
| } |
| |
| void Assembler::ldrh(const Register& rt, const MemOperand& src) { |
| LoadStore(rt, src, LDRH_w); |
| } |
| |
| void Assembler::strh(const Register& rt, const MemOperand& dst) { |
| LoadStore(rt, dst, STRH_w); |
| } |
| |
| void Assembler::ldrsh(const Register& rt, const MemOperand& src) { |
| LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w); |
| } |
| |
| void Assembler::ldr(const CPURegister& rt, const MemOperand& src) { |
| LoadStore(rt, src, LoadOpFor(rt)); |
| } |
| |
| void Assembler::str(const CPURegister& rt, const MemOperand& src) { |
| LoadStore(rt, src, StoreOpFor(rt)); |
| } |
| |
| void Assembler::ldrsw(const Register& rt, const MemOperand& src) { |
| DCHECK(rt.Is64Bits()); |
| LoadStore(rt, src, LDRSW_x); |
| } |
| |
| void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { |
| // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a |
| // constant pool. It should not be emitted. |
| DCHECK(!rt.IsZero()); |
| Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); |
| } |
| |
| Operand Operand::EmbeddedNumber(double number) { |
| int32_t smi; |
| if (DoubleToSmiInteger(number, &smi)) { |
| return Operand(Immediate(Smi::FromInt(smi))); |
| } |
| Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); |
| result.heap_object_request_.emplace(number); |
| DCHECK(result.IsHeapObjectRequest()); |
| return result; |
| } |
| |
| Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { |
| Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); |
| result.heap_object_request_.emplace(str); |
| DCHECK(result.IsHeapObjectRequest()); |
| return result; |
| } |
| |
| void Assembler::ldr(const CPURegister& rt, const Operand& operand) { |
| if (operand.IsHeapObjectRequest()) { |
| BlockPoolsScope no_pool_before_ldr_of_heap_object_request(this); |
| RequestHeapObject(operand.heap_object_request()); |
| ldr(rt, operand.immediate_for_heap_object_request()); |
| } else { |
| ldr(rt, operand.immediate()); |
| } |
| } |
| |
| void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { |
| BlockPoolsScope no_pool_before_ldr_pcrel_instr(this); |
| RecordRelocInfo(imm.rmode(), imm.value()); |
| // The load will be patched when the constpool is emitted, patching code |
| // expect a load literal with offset 0. |
| ldr_pcrel(rt, 0); |
| } |
| |
| void Assembler::ldar(const Register& rt, const Register& rn) { |
| DCHECK(rn.Is64Bits()); |
| LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAR_w : LDAR_x; |
| Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::ldaxr(const Register& rt, const Register& rn) { |
| DCHECK(rn.Is64Bits()); |
| LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? LDAXR_w : LDAXR_x; |
| Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlr(const Register& rt, const Register& rn) { |
| DCHECK(rn.Is64Bits()); |
| LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLR_w : STLR_x; |
| Emit(op | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlxr(const Register& rs, const Register& rt, |
| const Register& rn) { |
| DCHECK(rn.Is64Bits()); |
| DCHECK(rs != rt && rs != rn); |
| LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x; |
| Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::ldarb(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(LDAR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::ldaxrb(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(LDAXR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlrb(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(STLR_b | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlxrb(const Register& rs, const Register& rt, |
| const Register& rn) { |
| DCHECK(rs.Is32Bits()); |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| DCHECK(rs != rt && rs != rn); |
| Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::ldarh(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(LDAR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::ldaxrh(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(LDAXR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlrh(const Register& rt, const Register& rn) { |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| Emit(STLR_h | Rs(x31) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::stlxrh(const Register& rs, const Register& rt, |
| const Register& rn) { |
| DCHECK(rs.Is32Bits()); |
| DCHECK(rt.Is32Bits()); |
| DCHECK(rn.Is64Bits()); |
| DCHECK(rs != rt && rs != rn); |
| Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt)); |
| } |
| |
| void Assembler::NEON3DifferentL(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm, NEON3DifferentOp vop) { |
| DCHECK(AreSameFormat(vn, vm)); |
| DCHECK((vn.Is1H() && vd.Is1S()) || (vn.Is1S() && vd.Is1D()) || |
| (vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || |
| (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || |
| (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); |
| Instr format, op = vop; |
| if (vd.IsScalar()) { |
| op |= NEON_Q | NEONScalar; |
| format = SFormat(vn); |
| } else { |
| format = VFormat(vn); |
| } |
| Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEON3DifferentW(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm, NEON3DifferentOp vop) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK((vm.Is8B() && vd.Is8H()) || (vm.Is4H() && vd.Is4S()) || |
| (vm.Is2S() && vd.Is2D()) || (vm.Is16B() && vd.Is8H()) || |
| (vm.Is8H() && vd.Is4S()) || (vm.Is4S() && vd.Is2D())); |
| Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEON3DifferentHN(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm, NEON3DifferentOp vop) { |
| DCHECK(AreSameFormat(vm, vn)); |
| DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || |
| (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || |
| (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); |
| Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| #define NEON_3DIFF_LONG_LIST(V) \ |
| V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \ |
| V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \ |
| V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \ |
| V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \ |
| V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \ |
| V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \ |
| V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \ |
| V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \ |
| V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \ |
| V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \ |
| V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \ |
| V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \ |
| V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \ |
| V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \ |
| V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \ |
| V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \ |
| V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \ |
| V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \ |
| V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \ |
| V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \ |
| V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \ |
| V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \ |
| V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \ |
| V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \ |
| V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \ |
| V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \ |
| V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \ |
| V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \ |
| V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \ |
| V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \ |
| V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ |
| V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \ |
| V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \ |
| V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) |
| |
| #define DEFINE_ASM_FUNC(FN, OP, AS) \ |
| void Assembler::FN(const VRegister& vd, const VRegister& vn, \ |
| const VRegister& vm) { \ |
| DCHECK(AS); \ |
| NEON3DifferentL(vd, vn, vm, OP); \ |
| } |
| NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC) |
| #undef DEFINE_ASM_FUNC |
| |
| #define NEON_3DIFF_HN_LIST(V) \ |
| V(addhn, NEON_ADDHN, vd.IsD()) \ |
| V(addhn2, NEON_ADDHN2, vd.IsQ()) \ |
| V(raddhn, NEON_RADDHN, vd.IsD()) \ |
| V(raddhn2, NEON_RADDHN2, vd.IsQ()) \ |
| V(subhn, NEON_SUBHN, vd.IsD()) \ |
| V(subhn2, NEON_SUBHN2, vd.IsQ()) \ |
| V(rsubhn, NEON_RSUBHN, vd.IsD()) \ |
| V(rsubhn2, NEON_RSUBHN2, vd.IsQ()) |
| |
| #define DEFINE_ASM_FUNC(FN, OP, AS) \ |
| void Assembler::FN(const VRegister& vd, const VRegister& vn, \ |
| const VRegister& vm) { \ |
| DCHECK(AS); \ |
| NEON3DifferentHN(vd, vn, vm, OP); \ |
| } |
| NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC) |
| #undef DEFINE_ASM_FUNC |
| |
| void Assembler::NEONPerm(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm, NEONPermOp op) { |
| DCHECK(AreSameFormat(vd, vn, vm)); |
| DCHECK(!vd.Is1D()); |
| Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::trn1(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_TRN1); |
| } |
| |
| void Assembler::trn2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_TRN2); |
| } |
| |
| void Assembler::uzp1(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_UZP1); |
| } |
| |
| void Assembler::uzp2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_UZP2); |
| } |
| |
| void Assembler::zip1(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_ZIP1); |
| } |
| |
| void Assembler::zip2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONPerm(vd, vn, vm, NEON_ZIP2); |
| } |
| |
| void Assembler::NEONShiftImmediate(const VRegister& vd, const VRegister& vn, |
| NEONShiftImmediateOp op, int immh_immb) { |
| DCHECK(AreSameFormat(vd, vn)); |
| Instr q, scalar; |
| if (vn.IsScalar()) { |
| q = NEON_Q; |
| scalar = NEONScalar; |
| } else { |
| q = vd.IsD() ? 0 : NEON_Q; |
| scalar = 0; |
| } |
| Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEONShiftLeftImmediate(const VRegister& vd, const VRegister& vn, |
| int shift, NEONShiftImmediateOp op) { |
| int laneSizeInBits = vn.LaneSizeInBits(); |
| DCHECK((shift >= 0) && (shift < laneSizeInBits)); |
| NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16); |
| } |
| |
| void Assembler::NEONShiftRightImmediate(const VRegister& vd, |
| const VRegister& vn, int shift, |
| NEONShiftImmediateOp op) { |
| int laneSizeInBits = vn.LaneSizeInBits(); |
| DCHECK((shift >= 1) && (shift <= laneSizeInBits)); |
| NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16); |
| } |
| |
| void Assembler::NEONShiftImmediateL(const VRegister& vd, const VRegister& vn, |
| int shift, NEONShiftImmediateOp op) { |
| int laneSizeInBits = vn.LaneSizeInBits(); |
| DCHECK((shift >= 0) && (shift < laneSizeInBits)); |
| int immh_immb = (laneSizeInBits + shift) << 16; |
| |
| DCHECK((vn.Is8B() && vd.Is8H()) || (vn.Is4H() && vd.Is4S()) || |
| (vn.Is2S() && vd.Is2D()) || (vn.Is16B() && vd.Is8H()) || |
| (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); |
| Instr q; |
| q = vn.IsD() ? 0 : NEON_Q; |
| Emit(q | op | immh_immb | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEONShiftImmediateN(const VRegister& vd, const VRegister& vn, |
| int shift, NEONShiftImmediateOp op) { |
| Instr q, scalar; |
| int laneSizeInBits = vd.LaneSizeInBits(); |
| DCHECK((shift >= 1) && (shift <= laneSizeInBits)); |
| int immh_immb = (2 * laneSizeInBits - shift) << 16; |
| |
| if (vn.IsScalar()) { |
| DCHECK((vd.Is1B() && vn.Is1H()) || (vd.Is1H() && vn.Is1S()) || |
| (vd.Is1S() && vn.Is1D())); |
| q = NEON_Q; |
| scalar = NEONScalar; |
| } else { |
| DCHECK((vd.Is8B() && vn.Is8H()) || (vd.Is4H() && vn.Is4S()) || |
| (vd.Is2S() && vn.Is2D()) || (vd.Is16B() && vn.Is8H()) || |
| (vd.Is8H() && vn.Is4S()) || (vd.Is4S() && vn.Is2D())); |
| scalar = 0; |
| q = vd.IsD() ? 0 : NEON_Q; |
| } |
| Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::shl(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL); |
| } |
| |
| void Assembler::sli(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI); |
| } |
| |
| void Assembler::sqshl(const VRegister& vd, const VRegister& vn, int shift) { |
| NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm); |
| } |
| |
| void Assembler::sqshlu(const VRegister& vd, const VRegister& vn, int shift) { |
| NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU); |
| } |
| |
| void Assembler::uqshl(const VRegister& vd, const VRegister& vn, int shift) { |
| NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm); |
| } |
| |
| void Assembler::sshll(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsD()); |
| NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); |
| } |
| |
| void Assembler::sshll2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsQ()); |
| NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL); |
| } |
| |
| void Assembler::sxtl(const VRegister& vd, const VRegister& vn) { |
| sshll(vd, vn, 0); |
| } |
| |
| void Assembler::sxtl2(const VRegister& vd, const VRegister& vn) { |
| sshll2(vd, vn, 0); |
| } |
| |
| void Assembler::ushll(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsD()); |
| NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); |
| } |
| |
| void Assembler::ushll2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsQ()); |
| NEONShiftImmediateL(vd, vn, shift, NEON_USHLL); |
| } |
| |
| void Assembler::uxtl(const VRegister& vd, const VRegister& vn) { |
| ushll(vd, vn, 0); |
| } |
| |
| void Assembler::uxtl2(const VRegister& vd, const VRegister& vn) { |
| ushll2(vd, vn, 0); |
| } |
| |
| void Assembler::sri(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_SRI); |
| } |
| |
| void Assembler::sshr(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR); |
| } |
| |
| void Assembler::ushr(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_USHR); |
| } |
| |
| void Assembler::srshr(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR); |
| } |
| |
| void Assembler::urshr(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR); |
| } |
| |
| void Assembler::ssra(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA); |
| } |
| |
| void Assembler::usra(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_USRA); |
| } |
| |
| void Assembler::srsra(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA); |
| } |
| |
| void Assembler::ursra(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsVector() || vd.Is1D()); |
| NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA); |
| } |
| |
| void Assembler::shrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsD()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); |
| } |
| |
| void Assembler::shrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SHRN); |
| } |
| |
| void Assembler::rshrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsD()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); |
| } |
| |
| void Assembler::rshrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN); |
| } |
| |
| void Assembler::sqshrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); |
| } |
| |
| void Assembler::sqshrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN); |
| } |
| |
| void Assembler::sqrshrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); |
| } |
| |
| void Assembler::sqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN); |
| } |
| |
| void Assembler::sqshrun(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); |
| } |
| |
| void Assembler::sqshrun2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN); |
| } |
| |
| void Assembler::sqrshrun(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); |
| } |
| |
| void Assembler::sqrshrun2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN); |
| } |
| |
| void Assembler::uqshrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); |
| } |
| |
| void Assembler::uqshrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN); |
| } |
| |
| void Assembler::uqrshrn(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vd.IsD() || (vn.IsScalar() && vd.IsScalar())); |
| NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); |
| } |
| |
| void Assembler::uqrshrn2(const VRegister& vd, const VRegister& vn, int shift) { |
| DCHECK(vn.IsVector() && vd.IsQ()); |
| NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN); |
| } |
| |
| void Assembler::uaddw(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_UADDW); |
| } |
| |
| void Assembler::uaddw2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_UADDW2); |
| } |
| |
| void Assembler::saddw(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_SADDW); |
| } |
| |
| void Assembler::saddw2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_SADDW2); |
| } |
| |
| void Assembler::usubw(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_USUBW); |
| } |
| |
| void Assembler::usubw2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_USUBW2); |
| } |
| |
| void Assembler::ssubw(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsD()); |
| NEON3DifferentW(vd, vn, vm, NEON_SSUBW); |
| } |
| |
| void Assembler::ssubw2(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| DCHECK(vm.IsQ()); |
| NEON3DifferentW(vd, vn, vm, NEON_SSUBW2); |
| } |
| |
| void Assembler::mov(const Register& rd, const Register& rm) { |
| // Moves involving the stack pointer are encoded as add immediate with |
| // second operand of zero. Otherwise, orr with first operand zr is |
| // used. |
| if (rd.IsSP() || rm.IsSP()) { |
| add(rd, rm, 0); |
| } else { |
| orr(rd, AppropriateZeroRegFor(rd), rm); |
| } |
| } |
| |
| void Assembler::ins(const VRegister& vd, int vd_index, const Register& rn) { |
| // We support vd arguments of the form vd.VxT() or vd.T(), where x is the |
| // number of lanes, and T is b, h, s or d. |
| int lane_size = vd.LaneSizeInBytes(); |
| NEONFormatField format; |
| switch (lane_size) { |
| case 1: |
| format = NEON_16B; |
| DCHECK(rn.IsW()); |
| break; |
| case 2: |
| format = NEON_8H; |
| DCHECK(rn.IsW()); |
| break; |
| case 4: |
| format = NEON_4S; |
| DCHECK(rn.IsW()); |
| break; |
| default: |
| DCHECK_EQ(lane_size, 8); |
| DCHECK(rn.IsX()); |
| format = NEON_2D; |
| break; |
| } |
| |
| DCHECK((0 <= vd_index) && |
| (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); |
| Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd)); |
| } |
| |
| void Assembler::mov(const Register& rd, const VRegister& vn, int vn_index) { |
| DCHECK_GE(vn.SizeInBytes(), 4); |
| umov(rd, vn, vn_index); |
| } |
| |
| void Assembler::smov(const Register& rd, const VRegister& vn, int vn_index) { |
| // We support vn arguments of the form vn.VxT() or vn.T(), where x is the |
| // number of lanes, and T is b, h, s. |
| int lane_size = vn.LaneSizeInBytes(); |
| NEONFormatField format; |
| Instr q = 0; |
| switch (lane_size) { |
| case 1: |
| format = NEON_16B; |
| break; |
| case 2: |
| format = NEON_8H; |
| break; |
| default: |
| DCHECK_EQ(lane_size, 4); |
| DCHECK(rd.IsX()); |
| format = NEON_4S; |
| break; |
| } |
| q = rd.IsW() ? 0 : NEON_Q; |
| DCHECK((0 <= vn_index) && |
| (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); |
| Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); |
| } |
| |
| void Assembler::cls(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(!vd.Is1D() && !vd.Is2D()); |
| Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::clz(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(!vd.Is1D() && !vd.Is2D()); |
| Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::cnt(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(vd.Is8B() || vd.Is16B()); |
| Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::rev16(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(vd.Is8B() || vd.Is16B()); |
| Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::rev32(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H()); |
| Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::rev64(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(!vd.Is1D() && !vd.Is2D()); |
| Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::ursqrte(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(vd.Is2S() || vd.Is4S()); |
| Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::urecpe(const VRegister& vd, const VRegister& vn) { |
| DCHECK(AreSameFormat(vd, vn)); |
| DCHECK(vd.Is2S() || vd.Is4S()); |
| Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEONAddlp(const VRegister& vd, const VRegister& vn, |
| NEON2RegMiscOp op) { |
| DCHECK((op == NEON_SADDLP) || (op == NEON_UADDLP) || (op == NEON_SADALP) || |
| (op == NEON_UADALP)); |
| |
| DCHECK((vn.Is8B() && vd.Is4H()) || (vn.Is4H() && vd.Is2S()) || |
| (vn.Is2S() && vd.Is1D()) || (vn.Is16B() && vd.Is8H()) || |
| (vn.Is8H() && vd.Is4S()) || (vn.Is4S() && vd.Is2D())); |
| Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::saddlp(const VRegister& vd, const VRegister& vn) { |
| NEONAddlp(vd, vn, NEON_SADDLP); |
| } |
| |
| void Assembler::uaddlp(const VRegister& vd, const VRegister& vn) { |
| NEONAddlp(vd, vn, NEON_UADDLP); |
| } |
| |
| void Assembler::sadalp(const VRegister& vd, const VRegister& vn) { |
| NEONAddlp(vd, vn, NEON_SADALP); |
| } |
| |
| void Assembler::uadalp(const VRegister& vd, const VRegister& vn) { |
| NEONAddlp(vd, vn, NEON_UADALP); |
| } |
| |
| void Assembler::NEONAcrossLanesL(const VRegister& vd, const VRegister& vn, |
| NEONAcrossLanesOp op) { |
| DCHECK((vn.Is8B() && vd.Is1H()) || (vn.Is16B() && vd.Is1H()) || |
| (vn.Is4H() && vd.Is1S()) || (vn.Is8H() && vd.Is1S()) || |
| (vn.Is4S() && vd.Is1D())); |
| Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::saddlv(const VRegister& vd, const VRegister& vn) { |
| NEONAcrossLanesL(vd, vn, NEON_SADDLV); |
| } |
| |
| void Assembler::uaddlv(const VRegister& vd, const VRegister& vn) { |
| NEONAcrossLanesL(vd, vn, NEON_UADDLV); |
| } |
| |
| void Assembler::NEONAcrossLanes(const VRegister& vd, const VRegister& vn, |
| NEONAcrossLanesOp op) { |
| DCHECK((vn.Is8B() && vd.Is1B()) || (vn.Is16B() && vd.Is1B()) || |
| (vn.Is4H() && vd.Is1H()) || (vn.Is8H() && vd.Is1H()) || |
| (vn.Is4S() && vd.Is1S())); |
| if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) { |
| Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd)); |
| } else { |
| Emit(VFormat(vn) | op | Rn(vn) | Rd(vd)); |
| } |
| } |
| |
| #define NEON_ACROSSLANES_LIST(V) \ |
| V(fmaxv, NEON_FMAXV, vd.Is1S()) \ |
| V(fminv, NEON_FMINV, vd.Is1S()) \ |
| V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \ |
| V(fminnmv, NEON_FMINNMV, vd.Is1S()) \ |
| V(addv, NEON_ADDV, true) \ |
| V(smaxv, NEON_SMAXV, true) \ |
| V(sminv, NEON_SMINV, true) \ |
| V(umaxv, NEON_UMAXV, true) \ |
| V(uminv, NEON_UMINV, true) |
| |
| #define DEFINE_ASM_FUNC(FN, OP, AS) \ |
| void Assembler::FN(const VRegister& vd, const VRegister& vn) { \ |
| DCHECK(AS); \ |
| NEONAcrossLanes(vd, vn, OP); \ |
| } |
| NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC) |
| #undef DEFINE_ASM_FUNC |
| |
| void Assembler::mov(const VRegister& vd, int vd_index, const Register& rn) { |
| ins(vd, vd_index, rn); |
| } |
| |
| void Assembler::umov(const Register& rd, const VRegister& vn, int vn_index) { |
| // We support vn arguments of the form vn.VxT() or vn.T(), where x is the |
| // number of lanes, and T is b, h, s or d. |
| int lane_size = vn.LaneSizeInBytes(); |
| NEONFormatField format; |
| Instr q = 0; |
| switch (lane_size) { |
| case 1: |
| format = NEON_16B; |
| DCHECK(rd.IsW()); |
| break; |
| case 2: |
| format = NEON_8H; |
| DCHECK(rd.IsW()); |
| break; |
| case 4: |
| format = NEON_4S; |
| DCHECK(rd.IsW()); |
| break; |
| default: |
| DCHECK_EQ(lane_size, 8); |
| DCHECK(rd.IsX()); |
| format = NEON_2D; |
| q = NEON_Q; |
| break; |
| } |
| |
| DCHECK((0 <= vn_index) && |
| (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); |
| Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd)); |
| } |
| |
| void Assembler::mov(const VRegister& vd, const VRegister& vn, int vn_index) { |
| DCHECK(vd.IsScalar()); |
| dup(vd, vn, vn_index); |
| } |
| |
| void Assembler::dup(const VRegister& vd, const Register& rn) { |
| DCHECK(!vd.Is1D()); |
| DCHECK_EQ(vd.Is2D(), rn.IsX()); |
| Instr q = vd.IsD() ? 0 : NEON_Q; |
| Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd)); |
| } |
| |
| void Assembler::ins(const VRegister& vd, int vd_index, const VRegister& vn, |
| int vn_index) { |
| DCHECK(AreSameFormat(vd, vn)); |
| // We support vd arguments of the form vd.VxT() or vd.T(), where x is the |
| // number of lanes, and T is b, h, s or d. |
| int lane_size = vd.LaneSizeInBytes(); |
| NEONFormatField format; |
| switch (lane_size) { |
| case 1: |
| format = NEON_16B; |
| break; |
| case 2: |
| format = NEON_8H; |
| break; |
| case 4: |
| format = NEON_4S; |
| break; |
| default: |
| DCHECK_EQ(lane_size, 8); |
| format = NEON_2D; |
| break; |
| } |
| |
| DCHECK((0 <= vd_index) && |
| (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); |
| DCHECK((0 <= vn_index) && |
| (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format)))); |
| Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) | |
| ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::NEONTable(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm, NEONTableOp op) { |
| DCHECK(vd.Is16B() || vd.Is8B()); |
| DCHECK(vn.Is16B()); |
| DCHECK(AreSameFormat(vd, vm)); |
| Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd)); |
| } |
| |
| void Assembler::tbl(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONTable(vd, vn, vm, NEON_TBL_1v); |
| } |
| |
| void Assembler::tbl(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vm) { |
| USE(vn2); |
| DCHECK(AreSameFormat(vn, vn2)); |
| DCHECK(AreConsecutive(vn, vn2)); |
| NEONTable(vd, vn, vm, NEON_TBL_2v); |
| } |
| |
| void Assembler::tbl(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vn3, |
| const VRegister& vm) { |
| USE(vn2); |
| USE(vn3); |
| DCHECK(AreSameFormat(vn, vn2, vn3)); |
| DCHECK(AreConsecutive(vn, vn2, vn3)); |
| NEONTable(vd, vn, vm, NEON_TBL_3v); |
| } |
| |
| void Assembler::tbl(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vn3, |
| const VRegister& vn4, const VRegister& vm) { |
| USE(vn2); |
| USE(vn3); |
| USE(vn4); |
| DCHECK(AreSameFormat(vn, vn2, vn3, vn4)); |
| DCHECK(AreConsecutive(vn, vn2, vn3, vn4)); |
| NEONTable(vd, vn, vm, NEON_TBL_4v); |
| } |
| |
| void Assembler::tbx(const VRegister& vd, const VRegister& vn, |
| const VRegister& vm) { |
| NEONTable(vd, vn, vm, NEON_TBX_1v); |
| } |
| |
| void Assembler::tbx(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vm) { |
| USE(vn2); |
| DCHECK(AreSameFormat(vn, vn2)); |
| DCHECK(AreConsecutive(vn, vn2)); |
| NEONTable(vd, vn, vm, NEON_TBX_2v); |
| } |
| |
| void Assembler::tbx(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vn3, |
| const VRegister& vm) { |
| USE(vn2); |
| USE(vn3); |
| DCHECK(AreSameFormat(vn, vn2, vn3)); |
| DCHECK(AreConsecutive(vn, vn2, vn3)); |
| NEONTable(vd, vn, vm, NEON_TBX_3v); |
| } |
| |
| void Assembler::tbx(const VRegister& vd, const VRegister& vn, |
| const VRegister& vn2, const VRegister& vn3, |
| const VRegister& vn4, const VRegister& vm) { |
| USE(vn2); |
| USE(vn3); |
| USE(vn4); |
| DCHECK(AreSameFormat(vn, vn2, vn3, vn4)); |
| DCHECK(AreConsecutive(vn, vn2, vn3, vn4)); |
| NEONTable(vd, vn, vm, NEON_TBX_4v); |
| } |
| |
| void Assembler::mov(const VRegister& vd, int vd_index, const VRegister& vn, |
| int vn_index) { |
| ins(vd, vd_index, vn, vn_index); |
| } |
| |
| void Assembler::mvn(const Register& rd, const Operand& operand) { |
| orn(rd, AppropriateZeroRegFor(rd), operand); |
| } |
| |
| void Assembler::mrs(const Register& rt, SystemRegister sysreg) { |
| DCHECK(rt.Is64Bits()); |
| Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); |
| } |
| |
| void Assembler::msr(SystemRegister sysreg, const Register& rt) { |
| DCHECK(rt.Is64Bits()); |
| Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); |
| } |
| |
| void Assembler::hint(SystemHint code) { Emit(HINT | ImmHint(code) | Rt(xzr)); } |
| |
| // NEON structure loads and stores. |
| Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) { |
| Instr addr_field = RnSP(addr.base()); |
| |
| if (addr.IsPostIndex()) { |
| static_assert(NEONLoadStoreMultiStructPostIndex == |
| static_cast<NEONLoadStoreMultiStructPostIndexOp>( |
| NEONLoadStoreSingleStructPostIndex), |
| "Opcodes must match for NEON post index memop."); |
| |
| addr_field |= NEONLoadStoreMultiStructPostIndex; |
| if (addr.offset() == 0) { |
| addr_field |= RmNot31(addr.regoffset()); |
| } else { |
| // The immediate post index addressing mode is indicated by rm = 31. |
| // The immediate is implied by the number of vector registers used. |
| addr_field |= (0x1F << Rm_offset); |
| } |
| } else { |
| DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0)); |
| } |
| return addr_field; |
| } |
| |
| void Assembler::LoadStoreStructVerify(const VRegister& vt, |
| const MemOperand& addr, Instr op) { |
| #ifdef DEBUG |
| // Assert that addressing mode is either offset (with immediate 0), post |
| // index by immediate of the size of the register list, or post index by a |
| // value in a core register. |
| if (addr.IsImmediateOffset()) { |
| DCHECK_EQ(addr.offset(), 0); |
| } else { |
| int offset = vt.SizeInBytes(); |
| switch (op) { |
| case NEON_LD1_1v: |
| case NEON_ST1_1v: |
| offset *= 1; |
| break; |
| case NEONLoadStoreSingleStructLoad1: |
| case NEONLoadStoreSingleStructStore1: |
| case NEON_LD1R: |
| offset = (offset / vt.LaneCount()) * 1; |
| break; |
| |
| case NEON_LD1_2v: |
| case NEON_ST1_2v: |
| case NEON_LD2: |
| case NEON_ST2: |
| offset *= 2; |
| break; |
| case NEONLoadStoreSingleStructLoad2: |
| case NEONLoadStoreSingleStructStore2: |
| case NEON_LD2R: |
| offset = (offset / vt.LaneCount()) * 2; |
| break; |
| |
| case NEON_LD1_3v: |
| case NEON_ST1_3v: |
| case NEON_LD3: |
| case NEON_ST3: |
| offset *= 3; |
| break; |
| case NEONLoadStoreSingleStructLoad3: |
| case NEONLoadStoreSingleStructStore3: |
| case NEON_LD3R: |
| offset = (offset / vt.LaneCount()) * 3; |
| break; |
| |
| case NEON_LD1_4v: |
| case NEON_ST1_4v: |
| case NEON_LD4: |
| case NEON_ST4: |
| offset *= 4; |
| break; |
| case NEONLoadStoreSingleStructLoad4: |
| case NEONLoadStoreSingleStructStore4: |
| case NEON_LD4R: |
| offset = (offset / vt.LaneCount()) * 4; |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| DCHECK(addr.regoffset() != NoReg || addr.offset() == offset); |
| } |
| #else |
| USE(vt); |
| USE(addr); |
| USE(op); |
| #endif |
| } |
| |
| void Assembler::LoadStoreStruct(const VRegister& vt, const MemOperand& addr, |
| NEONLoadStoreMultiStructOp op) { |
| LoadStoreStructVerify(vt, addr, op); |
| DCHECK(vt.IsVector() || vt.Is1D()); |
| Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); |
| } |
| |
| void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt, |
| const MemOperand& addr, |
| NEONLoadStoreSingleStructOp op) { |
| LoadStoreStructVerify(vt, addr, op); |
| Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt)); |
| } |
| |
| void Assembler::ld1(const VRegister& vt, const MemOperand& src) { |
| LoadStoreStruct(vt, src, NEON_LD1_1v); |
| } |
| |
| void Assembler::ld1(const VRegister& vt, const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| DCHECK(AreSameFormat(vt, vt2)); |
| DCHECK(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_LD1_2v); |
| } |
| |
| void Assembler::ld1(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| DCHECK(AreSameFormat(vt, vt2, vt3)); |
| DCHECK(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_LD1_3v); |
| } |
| |
| void Assembler::ld1(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| USE(vt4); |
| DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); |
| DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, src, NEON_LD1_4v); |
| } |
| |
| void Assembler::ld2(const VRegister& vt, const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| DCHECK(AreSameFormat(vt, vt2)); |
| DCHECK(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_LD2); |
| } |
| |
| void Assembler::ld2(const VRegister& vt, const VRegister& vt2, int lane, |
| const MemOperand& src) { |
| USE(vt2); |
| DCHECK(AreSameFormat(vt, vt2)); |
| DCHECK(AreConsecutive(vt, vt2)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2); |
| } |
| |
| void Assembler::ld2r(const VRegister& vt, const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| DCHECK(AreSameFormat(vt, vt2)); |
| DCHECK(AreConsecutive(vt, vt2)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R); |
| } |
| |
| void Assembler::ld3(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| DCHECK(AreSameFormat(vt, vt2, vt3)); |
| DCHECK(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_LD3); |
| } |
| |
| void Assembler::ld3(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, int lane, const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| DCHECK(AreSameFormat(vt, vt2, vt3)); |
| DCHECK(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3); |
| } |
| |
| void Assembler::ld3r(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| DCHECK(AreSameFormat(vt, vt2, vt3)); |
| DCHECK(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R); |
| } |
| |
| void Assembler::ld4(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| USE(vt4); |
| DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); |
| DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStruct(vt, src, NEON_LD4); |
| } |
| |
| void Assembler::ld4(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const VRegister& vt4, int lane, |
| const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| USE(vt4); |
| DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); |
| DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4); |
| } |
| |
| void Assembler::ld4r(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| USE(vt4); |
| DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); |
| DCHECK(AreConsecutive(vt, vt2, vt3, vt4)); |
| LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R); |
| } |
| |
| void Assembler::st1(const VRegister& vt, const MemOperand& src) { |
| LoadStoreStruct(vt, src, NEON_ST1_1v); |
| } |
| |
| void Assembler::st1(const VRegister& vt, const VRegister& vt2, |
| const MemOperand& src) { |
| USE(vt2); |
| DCHECK(AreSameFormat(vt, vt2)); |
| DCHECK(AreConsecutive(vt, vt2)); |
| LoadStoreStruct(vt, src, NEON_ST1_2v); |
| } |
| |
| void Assembler::st1(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| DCHECK(AreSameFormat(vt, vt2, vt3)); |
| DCHECK(AreConsecutive(vt, vt2, vt3)); |
| LoadStoreStruct(vt, src, NEON_ST1_3v); |
| } |
| |
| void Assembler::st1(const VRegister& vt, const VRegister& vt2, |
| const VRegister& vt3, const VRegister& vt4, |
| const MemOperand& src) { |
| USE(vt2); |
| USE(vt3); |
| USE(vt4); |
| DCHECK(AreSameFormat(vt, vt2, vt3, vt4)); |
| DCHECK(AreConsecutive(vt, vt2, vt3, vt4 |