[riscv64][register-alloc] Implement vector register independently allocating

vector register has different register file from float register in Risc64 rvv extension.
So this cl add third FPalising kind INDEPENDENT to allocate independently simd register.

Bug: v8:11976

doc: https://docs.google.com/document/d/1UwmUwOI3eeIMYzZFRmeXmfyNXRFHNZAQ4BcN0ODdMmo/edit?usp=sharing

Change-Id: I0fb8901294b4bc44b0bee55e630b60460e42bef2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3383513
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#79449}
diff --git a/src/codegen/arm/register-arm.h b/src/codegen/arm/register-arm.h
index 2ea62a1..7a186f5 100644
--- a/src/codegen/arm/register-arm.h
+++ b/src/codegen/arm/register-arm.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_ARM_REGISTER_ARM_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -125,7 +126,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = false;
+constexpr AliasingKind kFPAliasing = AliasingKind::kCombine;
 constexpr bool kSimdMaskRegisters = false;
 
 enum SwVfpRegisterCode {
diff --git a/src/codegen/arm64/register-arm64.h b/src/codegen/arm64/register-arm64.h
index 57322f0..7fdd2ee 100644
--- a/src/codegen/arm64/register-arm64.h
+++ b/src/codegen/arm64/register-arm64.h
@@ -7,6 +7,7 @@
 
 #include "src/codegen/arm64/utils-arm64.h"
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 #include "src/common/globals.h"
 
@@ -276,7 +277,7 @@
   return argument_count & alignment_mask;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/codegen/ia32/register-ia32.h b/src/codegen/ia32/register-ia32.h
index 8a65e4e..9337fdf 100644
--- a/src/codegen/ia32/register-ia32.h
+++ b/src/codegen/ia32/register-ia32.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_IA32_REGISTER_IA32_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -82,7 +83,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleCode {
diff --git a/src/codegen/loong64/register-loong64.h b/src/codegen/loong64/register-loong64.h
index 6b3da7a..7a94340 100644
--- a/src/codegen/loong64/register-loong64.h
+++ b/src/codegen/loong64/register-loong64.h
@@ -7,6 +7,7 @@
 
 #include "src/codegen/loong64/constants-loong64.h"
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -177,7 +178,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/codegen/machine-type.h b/src/codegen/machine-type.h
index e513b84..2a16e0d 100644
--- a/src/codegen/machine-type.h
+++ b/src/codegen/machine-type.h
@@ -340,6 +340,10 @@
   return rep >= MachineRepresentation::kFirstFPRepresentation;
 }
 
+inline bool IsSimd128(MachineRepresentation rep) {
+  return rep == MachineRepresentation::kSimd128;
+}
+
 inline bool CanBeTaggedPointer(MachineRepresentation rep) {
   return rep == MachineRepresentation::kTagged ||
          rep == MachineRepresentation::kTaggedPointer ||
diff --git a/src/codegen/mips/register-mips.h b/src/codegen/mips/register-mips.h
index a33400b..c194a0b 100644
--- a/src/codegen/mips/register-mips.h
+++ b/src/codegen/mips/register-mips.h
@@ -7,6 +7,7 @@
 
 #include "src/codegen/mips/constants-mips.h"
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -209,7 +210,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/codegen/mips64/register-mips64.h b/src/codegen/mips64/register-mips64.h
index 33d02a9..23e1f49 100644
--- a/src/codegen/mips64/register-mips64.h
+++ b/src/codegen/mips64/register-mips64.h
@@ -7,6 +7,7 @@
 
 #include "src/codegen/mips64/constants-mips64.h"
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -209,7 +210,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum MSARegisterCode {
diff --git a/src/codegen/ppc/register-ppc.h b/src/codegen/ppc/register-ppc.h
index 6a67c07..fa1a265 100644
--- a/src/codegen/ppc/register-ppc.h
+++ b/src/codegen/ppc/register-ppc.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_PPC_REGISTER_PPC_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -219,7 +220,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/codegen/register-configuration.cc b/src/codegen/register-configuration.cc
index 5a469a2..a78d3bf 100644
--- a/src/codegen/register-configuration.cc
+++ b/src/codegen/register-configuration.cc
@@ -19,6 +19,10 @@
     ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
 static const int kMaxAllocatableDoubleRegisterCount =
     ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
+#if V8_TARGET_ARCH_RISCV64
+static const int kMaxAllocatableSIMD128RegisterCount =
+    ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
+#endif
 
 static const int kAllocatableGeneralCodes[] = {
 #define REGISTER_CODE(R) kRegCode_##R,
@@ -34,6 +38,13 @@
 #endif  // V8_TARGET_ARCH_ARM
 #undef REGISTER_CODE
 
+#if V8_TARGET_ARCH_RISCV64
+static const int kAllocatableSIMD128Codes[] = {
+#define REGISTER_CODE(R) kVRCode_##R,
+    ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
+#undef REGISTER_CODE
+#endif  // V8_TARGET_ARCH_RISCV64
+
 STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
               Register::kNumRegisters);
 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
@@ -43,6 +54,15 @@
 STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
               Simd128Register::kNumRegisters);
 
+static int get_num_simd128_registers() {
+  return
+#if V8_TARGET_ARCH_RISCV64
+      Simd128Register::kNumRegisters;
+#else
+      0;
+#endif  // V8_TARGET_ARCH_RISCV64
+}
+
 // Callers on architectures other than Arm expect this to be be constant
 // between build and runtime. Avoid adding variability on other platforms.
 static int get_num_allocatable_double_registers() {
@@ -78,6 +98,15 @@
 
 #undef REGISTER_COUNT
 
+static int get_num_allocatable_simd128_registers() {
+  return
+#if V8_TARGET_ARCH_RISCV64
+      kMaxAllocatableSIMD128RegisterCount;
+#else
+      0;
+#endif
+}
+
 // Callers on architectures other than Arm expect this to be be constant
 // between build and runtime. Avoid adding variability on other platforms.
 static const int* get_allocatable_double_codes() {
@@ -90,16 +119,24 @@
 #endif
 }
 
+static const int* get_allocatable_simd128_codes() {
+  return
+#if V8_TARGET_ARCH_RISCV64
+      kAllocatableSIMD128Codes;
+#else
+      kAllocatableDoubleCodes;
+#endif
+}
+
 class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
  public:
   ArchDefaultRegisterConfiguration()
       : RegisterConfiguration(
-            Register::kNumRegisters, DoubleRegister::kNumRegisters,
-            kMaxAllocatableGeneralRegisterCount,
-            get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
-            get_allocatable_double_codes(),
-            kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
-  }
+            kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
+            get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount,
+            get_num_allocatable_double_registers(),
+            get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes,
+            get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
 };
 
 DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
@@ -115,12 +152,12 @@
       std::unique_ptr<int[]> allocatable_general_register_codes,
       std::unique_ptr<char const*[]> allocatable_general_register_names)
       : RegisterConfiguration(
-            Register::kNumRegisters, DoubleRegister::kNumRegisters,
-            num_allocatable_general_registers,
+            kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
+            get_num_simd128_registers(), num_allocatable_general_registers,
             get_num_allocatable_double_registers(),
+            get_num_allocatable_simd128_registers(),
             allocatable_general_register_codes.get(),
-            get_allocatable_double_codes(),
-            kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
+            get_allocatable_double_codes(), get_allocatable_simd128_codes()),
         allocatable_general_register_codes_(
             std::move(allocatable_general_register_codes)),
         allocatable_general_register_names_(
@@ -172,18 +209,20 @@
 }
 
 RegisterConfiguration::RegisterConfiguration(
-    int num_general_registers, int num_double_registers,
+    AliasingKind fp_aliasing_kind, int num_general_registers,
+    int num_double_registers, int num_simd128_registers,
     int num_allocatable_general_registers, int num_allocatable_double_registers,
-    const int* allocatable_general_codes, const int* allocatable_double_codes,
-    AliasingKind fp_aliasing_kind)
+    int num_allocatable_simd128_registers, const int* allocatable_general_codes,
+    const int* allocatable_double_codes,
+    const int* independent_allocatable_simd128_codes)
     : num_general_registers_(num_general_registers),
       num_float_registers_(0),
       num_double_registers_(num_double_registers),
-      num_simd128_registers_(0),
+      num_simd128_registers_(num_simd128_registers),
       num_allocatable_general_registers_(num_allocatable_general_registers),
       num_allocatable_float_registers_(0),
       num_allocatable_double_registers_(num_allocatable_double_registers),
-      num_allocatable_simd128_registers_(0),
+      num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
       allocatable_general_codes_mask_(0),
       allocatable_float_codes_mask_(0),
       allocatable_double_codes_mask_(0),
@@ -201,7 +240,7 @@
     allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
   }
 
-  if (fp_aliasing_kind_ == COMBINE) {
+  if (fp_aliasing_kind_ == AliasingKind::kCombine) {
     num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
                                ? num_double_registers_ * 2
                                : kMaxFPRegisters;
@@ -228,8 +267,7 @@
       }
       last_simd128_code = next_simd128_code;
     }
-  } else {
-    DCHECK(fp_aliasing_kind_ == OVERLAP);
+  } else if (fp_aliasing_kind_ == AliasingKind::kOverlap) {
     num_float_registers_ = num_simd128_registers_ = num_double_registers_;
     num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
         num_allocatable_double_registers_;
@@ -239,6 +277,21 @@
     }
     allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
         allocatable_double_codes_mask_;
+  } else {
+    DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent);
+    DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
+    num_float_registers_ = num_double_registers_;
+    num_allocatable_float_registers_ = num_allocatable_double_registers_;
+    for (int i = 0; i < num_allocatable_float_registers_; ++i) {
+      allocatable_float_codes_[i] = allocatable_double_codes_[i];
+    }
+    allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
+    for (int i = 0; i < num_allocatable_simd128_registers; i++) {
+      allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
+    }
+    for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
+      allocatable_simd128_codes_mask_ |= (1 << allocatable_simd128_codes_[i]);
+    }
   }
 }
 
@@ -251,7 +304,7 @@
 int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
                                       MachineRepresentation other_rep,
                                       int* alias_base_index) const {
-  DCHECK(fp_aliasing_kind_ == COMBINE);
+  DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
   DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
   if (rep == other_rep) {
     *alias_base_index = index;
@@ -277,7 +330,7 @@
 bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
                                        MachineRepresentation other_rep,
                                        int other_index) const {
-  DCHECK(fp_aliasing_kind_ == COMBINE);
+  DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
   DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
   if (rep == other_rep) {
     return index == other_index;
diff --git a/src/codegen/register-configuration.h b/src/codegen/register-configuration.h
index cdf9dda..4f39ac9 100644
--- a/src/codegen/register-configuration.h
+++ b/src/codegen/register-configuration.h
@@ -16,15 +16,17 @@
 
 // An architecture independent representation of the sets of registers available
 // for instruction creation.
+enum class AliasingKind {
+  // Registers alias a single register of every other size (e.g. Intel).
+  kOverlap,
+  // Registers alias two registers of the next smaller size (e.g. ARM).
+  kCombine,
+  // SIMD128 Registers are independent of every other size (e.g Riscv)
+  kIndependent
+};
+
 class V8_EXPORT_PRIVATE RegisterConfiguration {
  public:
-  enum AliasingKind {
-    // Registers alias a single register of every other size (e.g. Intel).
-    OVERLAP,
-    // Registers alias two registers of the next smaller size (e.g. ARM).
-    COMBINE
-  };
-
   // Architecture independent maxes.
   static constexpr int kMaxGeneralRegisters = 32;
   static constexpr int kMaxFPRegisters = 32;
@@ -40,12 +42,14 @@
   static const RegisterConfiguration* RestrictGeneralRegisters(
       RegList registers);
 
-  RegisterConfiguration(int num_general_registers, int num_double_registers,
-                        int num_allocatable_general_registers,
-                        int num_allocatable_double_registers,
-                        const int* allocatable_general_codes,
-                        const int* allocatable_double_codes,
-                        AliasingKind fp_aliasing_kind);
+  RegisterConfiguration(
+      AliasingKind fp_aliasing_kind, int num_general_registers,
+      int num_double_registers, int num_simd128_registers,
+      int num_allocatable_general_registers,
+      int num_allocatable_double_registers,
+      int num_allocatable_simd128_registers,
+      const int* allocatable_general_codes, const int* allocatable_double_codes,
+      const int* independent_allocatable_simd128_codes = nullptr);
 
   int num_general_registers() const { return num_general_registers_; }
   int num_float_registers() const { return num_float_registers_; }
diff --git a/src/codegen/riscv64/register-riscv64.h b/src/codegen/riscv64/register-riscv64.h
index 42337ee..e8ff722 100644
--- a/src/codegen/riscv64/register-riscv64.h
+++ b/src/codegen/riscv64/register-riscv64.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 #include "src/codegen/riscv64/constants-riscv64.h"
 
@@ -55,10 +56,11 @@
   V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
   V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
 
-#define UNALLOACTABLE_VECTOR_REGISTERS(V)                 \
-  V(v9)  V(v10) V(v11) V(v12) V(v13) V(v14) V(v15)        \
-  V(v18) V(v19) V(v20) V(v21) V(v22) V(v23)               \
-  V(v24) V(v25)
+#define ALLOCATABLE_SIMD128_REGISTERS(V)            \
+  V(v1)  V(v2)  V(v3)  V(v4)  V(v5)  V(v6)  V(v7)   \
+  V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) V(v16)  \
+  V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v26)  \
+  V(v27) V(v28) V(v29) V(v30) V(v31)
 
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                              \
   V(ft1)  V(ft2) V(ft3) V(ft4)  V(ft5) V(ft6) V(ft7) V(ft8)          \
@@ -253,7 +255,7 @@
 Register ToRegister(int num);
 
 constexpr bool kPadArguments = false;
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
@@ -299,11 +301,6 @@
   // register and floating point register are shared.
   VRegister toV() const {
     DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
-    // FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
-    // And v8 is unallocated so we replace v0 with v8
-    if (code() == 0) {
-      return VRegister(8);
-    }
     return VRegister(code());
   }
 
diff --git a/src/codegen/s390/register-s390.h b/src/codegen/s390/register-s390.h
index 0295f8a..4007afd 100644
--- a/src/codegen/s390/register-s390.h
+++ b/src/codegen/s390/register-s390.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_S390_REGISTER_S390_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -173,7 +174,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/codegen/x64/register-x64.h b/src/codegen/x64/register-x64.h
index 88aabe7..261be62 100644
--- a/src/codegen/x64/register-x64.h
+++ b/src/codegen/x64/register-x64.h
@@ -6,6 +6,7 @@
 #define V8_CODEGEN_X64_REGISTER_X64_H_
 
 #include "src/codegen/register-base.h"
+#include "src/codegen/register-configuration.h"
 #include "src/codegen/reglist.h"
 
 namespace v8 {
@@ -176,7 +177,7 @@
   return 0;
 }
 
-constexpr bool kSimpleFPAliasing = true;
+constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
 constexpr bool kSimdMaskRegisters = false;
 
 enum DoubleRegisterCode {
diff --git a/src/compiler/backend/gap-resolver.cc b/src/compiler/backend/gap-resolver.cc
index e9aeb2f..d6c3d00 100644
--- a/src/compiler/backend/gap-resolver.cc
+++ b/src/compiler/backend/gap-resolver.cc
@@ -22,7 +22,7 @@
 // aliasing, and makes swaps much easier to implement.
 MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
                     ParallelMove* moves) {
-  DCHECK(!kSimpleFPAliasing);
+  DCHECK(kFPAliasing == AliasingKind::kCombine);
   // Splitting is only possible when the slot size is the same as float size.
   DCHECK_EQ(kSystemPointerSize, kFloatSize);
   const LocationOperand& src_loc = LocationOperand::cast(move->source());
@@ -104,7 +104,8 @@
     i++;
     source_kinds.Add(GetKind(move->source()));
     destination_kinds.Add(GetKind(move->destination()));
-    if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
+    if (kFPAliasing == AliasingKind::kCombine &&
+        move->destination().IsFPRegister()) {
       fp_reps |= RepresentationBit(
           LocationOperand::cast(move->destination()).representation());
     }
@@ -119,7 +120,7 @@
     return;
   }
 
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
       // Start with the smallest FP moves, so we never encounter smaller moves
       // in the middle of a cycle of larger moves.
@@ -166,8 +167,8 @@
   move->SetPending();
 
   // We may need to split moves between FP locations differently.
-  const bool is_fp_loc_move =
-      !kSimpleFPAliasing && destination.IsFPLocationOperand();
+  const bool is_fp_loc_move = kFPAliasing == AliasingKind::kCombine &&
+                              destination.IsFPLocationOperand();
 
   // Perform a depth-first traversal of the move graph to resolve dependencies.
   // Any unperformed, unpending move with a source the same as this one's
diff --git a/src/compiler/backend/instruction.cc b/src/compiler/backend/instruction.cc
index da51a3b..45d4de7 100644
--- a/src/compiler/backend/instruction.cc
+++ b/src/compiler/backend/instruction.cc
@@ -81,13 +81,13 @@
 }
 
 bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
-  const bool kComplexFPAliasing = !kSimpleFPAliasing &&
+  const bool kCombineFPAliasing = kFPAliasing == AliasingKind::kCombine &&
                                   this->IsFPLocationOperand() &&
                                   other.IsFPLocationOperand();
   const bool kComplexS128SlotAliasing =
       (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
       (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
-  if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
+  if (!kCombineFPAliasing && !kComplexS128SlotAliasing) {
     return EqualsCanonicalized(other);
   }
   const LocationOperand& loc = *LocationOperand::cast(this);
@@ -98,7 +98,7 @@
   MachineRepresentation rep = loc.representation();
   MachineRepresentation other_rep = other_loc.representation();
 
-  if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
+  if (kCombineFPAliasing && !kComplexS128SlotAliasing) {
     if (rep == other_rep) return EqualsCanonicalized(other);
     if (kind == LocationOperand::REGISTER) {
       // FP register-register interference.
@@ -126,7 +126,7 @@
 bool LocationOperand::IsCompatible(LocationOperand* op) {
   if (IsRegister() || IsStackSlot()) {
     return op->IsRegister() || op->IsStackSlot();
-  } else if (kSimpleFPAliasing) {
+  } else if (kFPAliasing != AliasingKind::kCombine) {
     // A backend may choose to generate the same instruction sequence regardless
     // of the FP representation. As a result, we can relax the compatibility and
     // allow a Double to be moved in a Float for example. However, this is only
@@ -162,8 +162,11 @@
                     << ")";
         case UnallocatedOperand::FIXED_FP_REGISTER:
           return os << "(="
-                    << DoubleRegister::from_code(
-                           unalloc->fixed_register_index())
+                    << (unalloc->IsSimd128Register()
+                            ? i::RegisterName((Simd128Register::from_code(
+                                  unalloc->fixed_register_index())))
+                            : i::RegisterName(DoubleRegister::from_code(
+                                  unalloc->fixed_register_index())))
                     << ")";
         case UnallocatedOperand::MUST_HAVE_REGISTER:
           return os << "(R)";
@@ -296,8 +299,8 @@
 
 void ParallelMove::PrepareInsertAfter(
     MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
-  bool no_aliasing =
-      kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
+  bool no_aliasing = kFPAliasing != AliasingKind::kCombine ||
+                     !move->destination().IsFPLocationOperand();
   MoveOperands* replacement = nullptr;
   MoveOperands* eliminated = nullptr;
   for (MoveOperands* curr : *this) {
diff --git a/src/compiler/backend/instruction.h b/src/compiler/backend/instruction.h
index b56d74b..89394b2 100644
--- a/src/compiler/backend/instruction.h
+++ b/src/compiler/backend/instruction.h
@@ -695,12 +695,19 @@
   if (IsAnyLocationOperand()) {
     MachineRepresentation canonical = MachineRepresentation::kNone;
     if (IsFPRegister()) {
-      if (kSimpleFPAliasing) {
+      if (kFPAliasing == AliasingKind::kOverlap) {
         // We treat all FP register operands the same for simple aliasing.
         canonical = MachineRepresentation::kFloat64;
+      } else if (kFPAliasing == AliasingKind::kIndependent) {
+        if (IsSimd128Register()) {
+          canonical = MachineRepresentation::kSimd128;
+        } else {
+          canonical = MachineRepresentation::kFloat64;
+        }
       } else {
         // We need to distinguish FP register operands of different reps when
-        // aliasing is not simple (e.g. ARM).
+        // aliasing is AliasingKind::kCombine (e.g. ARM).
+        DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
         canonical = LocationOperand::cast(this)->representation();
       }
     }
@@ -1696,6 +1703,12 @@
     return (representation_mask() & kFPRepMask) != 0;
   }
 
+  bool HasSimd128VirtualRegisters() const {
+    constexpr int kSimd128RepMask =
+        RepresentationBit(MachineRepresentation::kSimd128);
+    return (representation_mask() & kSimd128RepMask) != 0;
+  }
+
   Instruction* GetBlockStart(RpoNumber rpo) const;
 
   using const_iterator = InstructionDeque::const_iterator;
diff --git a/src/compiler/backend/mid-tier-register-allocator.cc b/src/compiler/backend/mid-tier-register-allocator.cc
index 6dc10cf..6d70841 100644
--- a/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/src/compiler/backend/mid-tier-register-allocator.cc
@@ -78,6 +78,7 @@
  private:
   RegisterState* general_registers_in_state_;
   RegisterState* double_registers_in_state_;
+  RegisterState* simd128_registers_in_state_;
 
   DeferredBlocksRegion* deferred_blocks_region_;
 
@@ -92,6 +93,8 @@
       return general_registers_in_state_;
     case RegisterKind::kDouble:
       return double_registers_in_state_;
+    case RegisterKind::kSimd128:
+      return simd128_registers_in_state_;
   }
 }
 
@@ -106,6 +109,10 @@
       DCHECK_NULL(double_registers_in_state_);
       double_registers_in_state_ = register_state;
       break;
+    case RegisterKind::kSimd128:
+      DCHECK_NULL(simd128_registers_in_state_);
+      simd128_registers_in_state_ = register_state;
+      break;
   }
 }
 
@@ -180,7 +187,8 @@
   }
 
   uintptr_t ToBit(MachineRepresentation rep) const {
-    if (kSimpleFPAliasing || rep != MachineRepresentation::kSimd128) {
+    if (kFPAliasing != AliasingKind::kCombine ||
+        rep != MachineRepresentation::kSimd128) {
       return 1ull << ToInt();
     } else {
       DCHECK_EQ(rep, MachineRepresentation::kSimd128);
@@ -1526,11 +1534,11 @@
   bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
                                            RegisterIndex reg);
 
-  // If {!kSimpleFPAliasing}, two FP registers alias one SIMD register. This
-  // returns the index of the higher aliasing FP register from the SIMD register
-  // index (which is the same as the lower register index).
+  // If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD
+  // register. This returns the index of the higher aliasing FP register from
+  // the SIMD register index (which is the same as the lower register index).
   RegisterIndex simdSibling(RegisterIndex reg) const {
-    CHECK(!kSimpleFPAliasing);  // Statically evaluated.
+    CHECK_EQ(kFPAliasing, AliasingKind::kCombine);  // Statically evaluated.
     RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1};
 #ifdef DEBUG
     // Check that {reg} is indeed the lower SIMD half and {sibling} is the
@@ -1581,7 +1589,7 @@
   RegisterBitVector allocated_registers_bits_;
   RegisterBitVector same_input_output_registers_bits_;
 
-  // These fields are only used when kSimpleFPAliasing == false.
+  // These fields are only used when kFPAliasing == COMBINE.
   base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
   base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
   base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
@@ -1612,9 +1620,9 @@
     reg_code_to_index_[reg_code] = RegisterIndex(i);
   }
 
-  // If the architecture has non-simple FP aliasing, initialize float and
+  // If the architecture has COMBINE FP aliasing, initialize float and
   // simd128 specific register details.
-  if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
+  if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
     const RegisterConfiguration* config = data->config();
 
     //  Float registers.
@@ -1784,15 +1792,17 @@
         processed_regs.Add(reg, rep);
 
         bool reg_in_use = register_state_->IsAllocated(reg);
-        // For non-simple FP aliasing, the register is also "in use" if the
+        // For COMBINE FP aliasing, the register is also "in use" if the
         // FP register for the upper half is allocated.
-        if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
+        if (kFPAliasing == AliasingKind::kCombine &&
+            rep == MachineRepresentation::kSimd128) {
           reg_in_use |= register_state_->IsAllocated(simdSibling(reg));
         }
         // Similarly (but the other way around), the register might be the upper
         // half of a SIMD register that is allocated.
-        if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
-                                   rep == MachineRepresentation::kFloat32)) {
+        if (kFPAliasing == AliasingKind::kCombine &&
+            (rep == MachineRepresentation::kFloat64 ||
+             rep == MachineRepresentation::kFloat32)) {
           int simd_reg_code;
           CHECK_EQ(1, data_->config()->GetAliases(
                           rep, ToRegCode(reg, rep),
@@ -1881,7 +1891,8 @@
     reg_state->Spill(reg, allocated, current_block_, data_);
   }
   // Also spill the "simd sibling" register if we want to use {reg} for SIMD.
-  if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      rep == MachineRepresentation::kSimd128) {
     RegisterIndex sibling = simdSibling(reg);
     if (reg_state->IsAllocated(sibling)) {
       int virtual_register = reg_state->VirtualRegisterForRegister(sibling);
@@ -1893,8 +1904,9 @@
     }
   }
   // Similarly, spill the whole SIMD register if we want to use a part of it.
-  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
-                             rep == MachineRepresentation::kFloat32)) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      (rep == MachineRepresentation::kFloat64 ||
+       rep == MachineRepresentation::kFloat32)) {
     int simd_reg_code;
     CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep),
                                             MachineRepresentation::kSimd128,
@@ -1980,7 +1992,8 @@
 
 RegisterIndex SinglePassRegisterAllocator::FromRegCode(
     int reg_code, MachineRepresentation rep) const {
-  if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      kind() == RegisterKind::kDouble) {
     if (rep == MachineRepresentation::kFloat32) {
       return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
     } else if (rep == MachineRepresentation::kSimd128) {
@@ -1994,7 +2007,8 @@
 
 int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
                                            MachineRepresentation rep) const {
-  if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      kind() == RegisterKind::kDouble) {
     if (rep == MachineRepresentation::kFloat32) {
       DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
       return index_to_float32_reg_code_->at(reg.ToInt());
@@ -2129,7 +2143,8 @@
 
 bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
                                                 MachineRepresentation rep) {
-  if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
+  if (kFPAliasing != AliasingKind::kCombine ||
+      kind() == RegisterKind::kGeneral) {
     return true;
   } else {
     switch (rep) {
@@ -2157,7 +2172,8 @@
 RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
     const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
   RegisterIndex chosen_reg = RegisterIndex::Invalid();
-  if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
+  if (kFPAliasing != AliasingKind::kCombine ||
+      kind() == RegisterKind::kGeneral) {
     chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_);
   } else {
     // If we don't have simple fp aliasing, we need to check each register
@@ -2195,8 +2211,11 @@
     if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
     // With non-simple FP aliasing, a SIMD register might block more than one FP
     // register.
-    DCHECK_IMPLIES(kSimpleFPAliasing, register_state_->IsAllocated(reg));
-    if (!kSimpleFPAliasing && !register_state_->IsAllocated(reg)) continue;
+    DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine,
+                   register_state_->IsAllocated(reg));
+    if (kFPAliasing == AliasingKind::kCombine &&
+        !register_state_->IsAllocated(reg))
+      continue;
 
     VirtualRegisterData& vreg_data =
         VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
@@ -2245,7 +2264,8 @@
     RegisterIndex reg, MachineRepresentation rep) {
   SpillRegister(reg);
 
-  if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      rep == MachineRepresentation::kSimd128) {
     SpillRegister(simdSibling(reg));
   }
 }
@@ -2636,7 +2656,8 @@
   }
   // Also potentially spill the "sibling SIMD register" on architectures where a
   // SIMD register aliases two FP registers.
-  if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      rep == MachineRepresentation::kSimd128) {
     if (register_state_->IsAllocated(simdSibling(reg)) &&
         !DefinedAfter(virtual_register, instr_index, pos)) {
       SpillRegister(simdSibling(reg));
@@ -2644,8 +2665,9 @@
   }
   // Similarly (but the other way around), spill a SIMD register that (partly)
   // overlaps with a fixed FP register.
-  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
-                             rep == MachineRepresentation::kFloat32)) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      (rep == MachineRepresentation::kFloat64 ||
+       rep == MachineRepresentation::kFloat32)) {
     int simd_reg_code;
     CHECK_EQ(
         1, data_->config()->GetAliases(
diff --git a/src/compiler/backend/move-optimizer.cc b/src/compiler/backend/move-optimizer.cc
index 88a34c8..8544259 100644
--- a/src/compiler/backend/move-optimizer.cc
+++ b/src/compiler/backend/move-optimizer.cc
@@ -38,7 +38,7 @@
   void InsertOp(const InstructionOperand& op) {
     set_->push_back(op);
 
-    if (!kSimpleFPAliasing && op.IsFPRegister())
+    if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister())
       fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation());
   }
 
@@ -52,7 +52,7 @@
   bool ContainsOpOrAlias(const InstructionOperand& op) const {
     if (Contains(op)) return true;
 
-    if (!kSimpleFPAliasing && op.IsFPRegister()) {
+    if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister()) {
       // Platforms where FP registers have complex aliasing need extra checks.
       const LocationOperand& loc = LocationOperand::cast(op);
       MachineRepresentation rep = loc.representation();
diff --git a/src/compiler/backend/register-allocation.h b/src/compiler/backend/register-allocation.h
index 33a0854..4c0bfe1 100644
--- a/src/compiler/backend/register-allocation.h
+++ b/src/compiler/backend/register-allocation.h
@@ -12,7 +12,7 @@
 namespace internal {
 namespace compiler {
 
-enum class RegisterKind { kGeneral, kDouble };
+enum class RegisterKind { kGeneral, kDouble, kSimd128 };
 
 inline int GetRegisterCount(const RegisterConfiguration* config,
                             RegisterKind kind) {
@@ -21,6 +21,8 @@
       return config->num_general_registers();
     case RegisterKind::kDouble:
       return config->num_double_registers();
+    case RegisterKind::kSimd128:
+      return config->num_simd128_registers();
   }
 }
 
@@ -31,6 +33,8 @@
       return config->num_allocatable_general_registers();
     case RegisterKind::kDouble:
       return config->num_allocatable_double_registers();
+    case RegisterKind::kSimd128:
+      return config->num_allocatable_simd128_registers();
   }
 }
 
@@ -41,6 +45,8 @@
       return config->allocatable_general_codes();
     case RegisterKind::kDouble:
       return config->allocatable_double_codes();
+    case RegisterKind::kSimd128:
+      return config->allocatable_simd128_codes();
   }
 }
 
diff --git a/src/compiler/backend/register-allocator.cc b/src/compiler/backend/register-allocator.cc
index 6b2f1e2..f0bf4e2 100644
--- a/src/compiler/backend/register-allocator.cc
+++ b/src/compiler/backend/register-allocator.cc
@@ -379,8 +379,13 @@
 }
 
 RegisterKind LiveRange::kind() const {
-  return IsFloatingPoint(representation()) ? RegisterKind::kDouble
-                                           : RegisterKind::kGeneral;
+  if (kFPAliasing == AliasingKind::kIndependent &&
+      IsSimd128(representation())) {
+    return RegisterKind::kSimd128;
+  } else {
+    return IsFloatingPoint(representation()) ? RegisterKind::kDouble
+                                             : RegisterKind::kGeneral;
+  }
 }
 
 UsePosition* LiveRange::FirstHintPosition(int* register_index) {
@@ -1321,7 +1326,7 @@
       flags_(flags),
       tick_counter_(tick_counter),
       slot_for_const_range_(zone) {
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     fixed_float_live_ranges_.resize(
         kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
         nullptr);
@@ -1329,6 +1334,11 @@
         kNumberOfFixedRangesPerRegister *
             this->config()->num_simd128_registers(),
         nullptr);
+  } else if (kFPAliasing == AliasingKind::kIndependent) {
+    fixed_simd128_live_ranges_.resize(
+        kNumberOfFixedRangesPerRegister *
+            this->config()->num_simd128_registers(),
+        nullptr);
   }
 
   assigned_registers_ = code_zone()->New<BitVector>(
@@ -1339,6 +1349,12 @@
       this->config()->num_general_registers(), code_zone());
   fixed_fp_register_use_ = code_zone()->New<BitVector>(
       this->config()->num_double_registers(), code_zone());
+  if (kFPAliasing == AliasingKind::kIndependent) {
+    assigned_simd128_registers_ = code_zone()->New<BitVector>(
+        this->config()->num_simd128_registers(), code_zone());
+    fixed_simd128_register_use_ = code_zone()->New<BitVector>(
+        this->config()->num_simd128_registers(), code_zone());
+  }
 
   this->frame()->SetAllocatedRegisters(assigned_registers_);
   this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
@@ -1477,8 +1493,14 @@
   switch (rep) {
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kSimd128:
-      if (kSimpleFPAliasing) {
+      if (kFPAliasing == AliasingKind::kOverlap) {
         fixed_fp_register_use_->Add(index);
+      } else if (kFPAliasing == AliasingKind::kIndependent) {
+        if (rep == MachineRepresentation::kFloat32) {
+          fixed_fp_register_use_->Add(index);
+        } else {
+          fixed_simd128_register_use_->Add(index);
+        }
       } else {
         int alias_base_index = -1;
         int aliases = config()->GetAliases(
@@ -1505,19 +1527,26 @@
   switch (rep) {
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kSimd128: {
-      if (kSimpleFPAliasing) {
+      if (kFPAliasing == AliasingKind::kOverlap) {
         return fixed_fp_register_use_->Contains(index);
+      } else if (kFPAliasing == AliasingKind::kIndependent) {
+        if (rep == MachineRepresentation::kFloat32) {
+          return fixed_fp_register_use_->Contains(index);
+        } else {
+          return fixed_simd128_register_use_->Contains(index);
+        }
+      } else {
+        int alias_base_index = -1;
+        int aliases = config()->GetAliases(
+            rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+        DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+        bool result = false;
+        while (aliases-- && !result) {
+          int aliased_reg = alias_base_index + aliases;
+          result |= fixed_fp_register_use_->Contains(aliased_reg);
+        }
+        return result;
       }
-      int alias_base_index = -1;
-      int aliases = config()->GetAliases(
-          rep, index, MachineRepresentation::kFloat64, &alias_base_index);
-      DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
-      bool result = false;
-      while (aliases-- && !result) {
-        int aliased_reg = alias_base_index + aliases;
-        result |= fixed_fp_register_use_->Contains(aliased_reg);
-      }
-      return result;
     }
     case MachineRepresentation::kFloat64:
       return fixed_fp_register_use_->Contains(index);
@@ -1532,8 +1561,14 @@
   switch (rep) {
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kSimd128:
-      if (kSimpleFPAliasing) {
+      if (kFPAliasing == AliasingKind::kOverlap) {
         assigned_double_registers_->Add(index);
+      } else if (kFPAliasing == AliasingKind::kIndependent) {
+        if (rep == MachineRepresentation::kFloat32) {
+          assigned_double_registers_->Add(index);
+        } else {
+          assigned_simd128_registers_->Add(index);
+        }
       } else {
         int alias_base_index = -1;
         int aliases = config()->GetAliases(
@@ -1946,7 +1981,7 @@
   int num_regs = config()->num_double_registers();
   ZoneVector<TopLevelLiveRange*>* live_ranges =
       &data()->fixed_double_live_ranges();
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     switch (rep) {
       case MachineRepresentation::kFloat32:
         num_regs = config()->num_float_registers();
@@ -1979,6 +2014,32 @@
   return result;
 }
 
+TopLevelLiveRange* LiveRangeBuilder::FixedSIMD128LiveRangeFor(
+    int index, SpillMode spill_mode) {
+  DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
+  int num_regs = config()->num_simd128_registers();
+  ZoneVector<TopLevelLiveRange*>* live_ranges =
+      &data()->fixed_simd128_live_ranges();
+  int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
+
+  DCHECK(index < num_regs);
+  USE(num_regs);
+  TopLevelLiveRange* result = (*live_ranges)[offset + index];
+  if (result == nullptr) {
+    result = data()->NewLiveRange(
+        FixedFPLiveRangeID(offset + index, MachineRepresentation::kSimd128),
+        MachineRepresentation::kSimd128);
+    DCHECK(result->IsFixed());
+    result->set_assigned_register(index);
+    data()->MarkAllocated(MachineRepresentation::kSimd128, index);
+    if (spill_mode == SpillMode::kSpillDeferred) {
+      result->set_deferred_fixed();
+    }
+    (*live_ranges)[offset + index] = result;
+  }
+  return result;
+}
+
 TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
                                                   SpillMode spill_mode) {
   if (operand->IsUnallocated()) {
@@ -1992,6 +2053,10 @@
         LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
   } else if (operand->IsFPRegister()) {
     LocationOperand* op = LocationOperand::cast(operand);
+    if (kFPAliasing == AliasingKind::kIndependent &&
+        op->representation() == MachineRepresentation::kSimd128) {
+      return FixedSIMD128LiveRangeFor(op->register_code(), spill_mode);
+    }
     return FixedFPLiveRangeFor(op->register_code(), op->representation(),
                                spill_mode);
   } else {
@@ -2055,10 +2120,13 @@
       LifetimePosition::GapFromInstructionIndex(block_start);
   bool fixed_float_live_ranges = false;
   bool fixed_simd128_live_ranges = false;
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     int mask = data()->code()->representation_mask();
     fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
     fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
+  } else if (kFPAliasing == AliasingKind::kIndependent) {
+    int mask = data()->code()->representation_mask();
+    fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
   }
   SpillMode spill_mode = SpillModeForBlock(block);
 
@@ -2120,7 +2188,7 @@
                               allocation_zone(), data()->is_trace_alloc());
       }
       // Clobber fixed float registers on archs with non-simple aliasing.
-      if (!kSimpleFPAliasing) {
+      if (kFPAliasing == AliasingKind::kCombine) {
         if (fixed_float_live_ranges) {
           for (int i = 0; i < config()->num_allocatable_float_registers();
                ++i) {
@@ -2143,6 +2211,17 @@
                                   allocation_zone(), data()->is_trace_alloc());
           }
         }
+      } else if (kFPAliasing == AliasingKind::kIndependent) {
+        if (fixed_simd128_live_ranges) {
+          for (int i = 0; i < config()->num_allocatable_simd128_registers();
+               ++i) {
+            int code = config()->GetAllocatableSimd128Code(i);
+            TopLevelLiveRange* range =
+                FixedSIMD128LiveRangeFor(code, spill_mode);
+            range->AddUseInterval(curr_position, curr_position.End(),
+                                  allocation_zone(), data()->is_trace_alloc());
+          }
+        }
       }
     }
 
@@ -2715,7 +2794,7 @@
       allocatable_register_codes_(
           GetAllocatableRegisterCodes(data->config(), kind)),
       check_fp_aliasing_(false) {
-  if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
+  if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
     check_fp_aliasing_ = (data->code()->representation_mask() &
                           (kFloat32Bit | kSimd128Bit)) != 0;
   }
@@ -2923,9 +3002,14 @@
 
 const char* RegisterAllocator::RegisterName(int register_code) const {
   if (register_code == kUnassignedRegister) return "unassigned";
-  return mode() == RegisterKind::kGeneral
-             ? i::RegisterName(Register::from_code(register_code))
-             : i::RegisterName(DoubleRegister::from_code(register_code));
+  switch (mode()) {
+    case RegisterKind::kGeneral:
+      return i::RegisterName(Register::from_code(register_code));
+    case RegisterKind::kDouble:
+      return i::RegisterName(DoubleRegister::from_code(register_code));
+    case RegisterKind::kSimd128:
+      return i::RegisterName(Simd128Register::from_code(register_code));
+  }
 }
 
 LinearScanAllocator::LinearScanAllocator(TopTierRegisterAllocationData* data,
@@ -3077,11 +3161,12 @@
   // intersection for the entire future.
   LifetimePosition new_end = range->End();
   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
-    if ((kSimpleFPAliasing || !check_fp_aliasing()) && cur_reg != reg) {
+    if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
+        cur_reg != reg) {
       continue;
     }
     for (const LiveRange* cur_inactive : inactive_live_ranges(cur_reg)) {
-      if (!kSimpleFPAliasing && check_fp_aliasing() &&
+      if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
           !data()->config()->AreAliases(cur_inactive->representation(), cur_reg,
                                         range->representation(), reg)) {
         continue;
@@ -3293,7 +3378,8 @@
                             std::function<bool(TopLevelLiveRange*)> filter,
                             RangeWithRegisterSet* to_be_live,
                             bool* taken_registers) {
-    bool check_aliasing = !kSimpleFPAliasing && check_fp_aliasing();
+    bool check_aliasing =
+        kFPAliasing == AliasingKind::kCombine && check_fp_aliasing();
     for (const auto& val : counts) {
       if (!filter(val.first)) continue;
       if (val.second.count >= majority) {
@@ -3365,7 +3451,7 @@
                                                update_caches) {
         if (other->TopLevel()->IsFixed()) return;
         int reg = range->assigned_register();
-        if (kSimpleFPAliasing || !check_fp_aliasing()) {
+        if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
           if (other->assigned_register() != reg) {
             return;
           }
@@ -3411,7 +3497,7 @@
         });
       }
       for (int reg = 0; reg < num_registers(); ++reg) {
-        if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+        if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
             reg != range->assigned_register()) {
           continue;
         }
@@ -3432,7 +3518,7 @@
           }
         }
       }
-    } else {
+    } else if (mode() == RegisterKind::kDouble) {
       for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
         if (current != nullptr) {
           if (current->IsDeferredFixed()) {
@@ -3440,7 +3526,7 @@
           }
         }
       }
-      if (!kSimpleFPAliasing && check_fp_aliasing()) {
+      if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
         for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
           if (current != nullptr) {
             if (current->IsDeferredFixed()) {
@@ -3456,6 +3542,15 @@
           }
         }
       }
+    } else {
+      DCHECK_EQ(mode(), RegisterKind::kSimd128);
+      for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+        if (current != nullptr) {
+          if (current->IsDeferredFixed()) {
+            add_to_inactive(current);
+          }
+        }
+      }
     }
   } else {
     // Remove all ranges.
@@ -3528,14 +3623,14 @@
         AddToInactive(current);
       }
     }
-  } else {
+  } else if (mode() == RegisterKind::kDouble) {
     for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
       if (current != nullptr) {
         if (current->IsDeferredFixed()) continue;
         AddToInactive(current);
       }
     }
-    if (!kSimpleFPAliasing && check_fp_aliasing()) {
+    if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing()) {
       for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
         if (current != nullptr) {
           if (current->IsDeferredFixed()) continue;
@@ -3549,6 +3644,14 @@
         }
       }
     }
+  } else {
+    DCHECK(mode() == RegisterKind::kSimd128);
+    for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+      if (current != nullptr) {
+        if (current->IsDeferredFixed()) continue;
+        AddToInactive(current);
+      }
+    }
   }
 
   RpoNumber last_block = RpoNumber::FromInt(0);
@@ -3893,7 +3996,7 @@
 void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
                                            int* num_regs, int* num_codes,
                                            const int** codes) const {
-  DCHECK(!kSimpleFPAliasing);
+  DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
   if (rep == MachineRepresentation::kFloat32) {
     *num_regs = data()->config()->num_float_registers();
     *num_codes = data()->config()->num_allocatable_float_registers();
@@ -3907,15 +4010,29 @@
   }
 }
 
+void LinearScanAllocator::GetSIMD128RegisterSet(int* num_regs, int* num_codes,
+                                                const int** codes) const {
+  DCHECK_EQ(kFPAliasing, AliasingKind::kIndependent);
+
+  *num_regs = data()->config()->num_simd128_registers();
+  *num_codes = data()->config()->num_allocatable_simd128_registers();
+  *codes = data()->config()->allocatable_simd128_codes();
+}
+
 void LinearScanAllocator::FindFreeRegistersForRange(
     LiveRange* range, base::Vector<LifetimePosition> positions) {
   int num_regs = num_registers();
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
   MachineRepresentation rep = range->representation();
-  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
-                             rep == MachineRepresentation::kSimd128))
+  if (kFPAliasing == AliasingKind::kCombine &&
+      (rep == MachineRepresentation::kFloat32 ||
+       rep == MachineRepresentation::kSimd128)) {
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+  } else if (kFPAliasing == AliasingKind::kIndependent &&
+             (rep == MachineRepresentation::kSimd128)) {
+    GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
+  }
   DCHECK_GE(positions.length(), num_regs);
 
   for (int i = 0; i < num_regs; ++i) {
@@ -3924,7 +4041,7 @@
 
   for (LiveRange* cur_active : active_live_ranges()) {
     int cur_reg = cur_active->assigned_register();
-    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+    if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
       positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
       TRACE("Register %s is free until pos %d (1) due to %d\n",
             RegisterName(cur_reg),
@@ -3949,7 +4066,7 @@
       // No need to carry out intersections, when this register won't be
       // interesting to this range anyway.
       // TODO(mtrofin): extend to aliased ranges, too.
-      if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+      if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) &&
           (positions[cur_reg] <= cur_inactive->NextStart() ||
            range->End() <= cur_inactive->NextStart())) {
         break;
@@ -3957,7 +4074,7 @@
       LifetimePosition next_intersection =
           cur_inactive->FirstIntersection(range);
       if (!next_intersection.IsValid()) continue;
-      if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
         positions[cur_reg] = std::min(positions[cur_reg], next_intersection);
         TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
               positions[cur_reg].value());
@@ -4029,9 +4146,13 @@
   int num_codes = num_allocatable_registers();
   const int* codes = allocatable_register_codes();
   MachineRepresentation rep = current->representation();
-  if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
-                             rep == MachineRepresentation::kSimd128)) {
+  if (kFPAliasing == AliasingKind::kCombine &&
+      (rep == MachineRepresentation::kFloat32 ||
+       rep == MachineRepresentation::kSimd128)) {
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+  } else if (kFPAliasing == AliasingKind::kIndependent &&
+             (rep == MachineRepresentation::kSimd128)) {
+    GetSIMD128RegisterSet(&num_regs, &num_codes, &codes);
   }
 
   DCHECK_GE(free_until_pos.length(), num_codes);
@@ -4137,7 +4258,7 @@
     int cur_reg = range->assigned_register();
     bool is_fixed_or_cant_spill =
         range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
-    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+    if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
       if (is_fixed_or_cant_spill) {
         block_pos[cur_reg] = use_pos[cur_reg] =
             LifetimePosition::GapFromInstructionIndex(0);
@@ -4176,7 +4297,7 @@
       // Don't perform costly intersections if they are guaranteed to not update
       // block_pos or use_pos.
       // TODO(mtrofin): extend to aliased ranges, too.
-      if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+      if ((kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing())) {
         DCHECK_LE(use_pos[cur_reg], block_pos[cur_reg]);
         if (block_pos[cur_reg] <= range->NextStart()) break;
         if (!is_fixed && use_pos[cur_reg] <= range->NextStart()) continue;
@@ -4185,7 +4306,7 @@
       LifetimePosition next_intersection = range->FirstIntersection(current);
       if (!next_intersection.IsValid()) continue;
 
-      if (kSimpleFPAliasing || !check_fp_aliasing()) {
+      if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
         if (is_fixed) {
           block_pos[cur_reg] = std::min(block_pos[cur_reg], next_intersection);
           use_pos[cur_reg] = std::min(block_pos[cur_reg], use_pos[cur_reg]);
@@ -4284,7 +4405,7 @@
   for (auto it = active_live_ranges().begin();
        it != active_live_ranges().end();) {
     LiveRange* range = *it;
-    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+    if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
       if (range->assigned_register() != reg) {
         ++it;
         continue;
@@ -4323,13 +4444,13 @@
   }
 
   for (int cur_reg = 0; cur_reg < num_registers(); ++cur_reg) {
-    if (kSimpleFPAliasing || !check_fp_aliasing()) {
+    if (kFPAliasing != AliasingKind::kCombine || !check_fp_aliasing()) {
       if (cur_reg != reg) continue;
     }
     for (auto it = inactive_live_ranges(cur_reg).begin();
          it != inactive_live_ranges(cur_reg).end();) {
       LiveRange* range = *it;
-      if (!kSimpleFPAliasing && check_fp_aliasing() &&
+      if (kFPAliasing == AliasingKind::kCombine && check_fp_aliasing() &&
           !data()->config()->AreAliases(current->representation(), reg,
                                         range->representation(), cur_reg)) {
         ++it;
diff --git a/src/compiler/backend/register-allocator.h b/src/compiler/backend/register-allocator.h
index 2a9e6dd..adb0dbd 100644
--- a/src/compiler/backend/register-allocator.h
+++ b/src/compiler/backend/register-allocator.h
@@ -372,8 +372,10 @@
   DelayedReferences delayed_references_;
   BitVector* assigned_registers_;
   BitVector* assigned_double_registers_;
+  BitVector* assigned_simd128_registers_;
   BitVector* fixed_register_use_;
   BitVector* fixed_fp_register_use_;
+  BitVector* fixed_simd128_register_use_;
   int virtual_register_count_;
   RangesWithPreassignedSlots preassigned_slot_ranges_;
   ZoneVector<ZoneVector<LiveRange*>> spill_state_;
@@ -1244,6 +1246,7 @@
   TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode);
   TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep,
                                          SpillMode spill_mode);
+  TopLevelLiveRange* FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode);
 
   void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
   void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -1484,6 +1487,8 @@
       LiveRange* range, const base::Vector<LifetimePosition>& free_until_pos);
   void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
                         int* num_codes, const int** codes) const;
+  void GetSIMD128RegisterSet(int* num_regs, int* num_codes,
+                             const int** codes) const;
   void FindFreeRegistersForRange(LiveRange* range,
                                  base::Vector<LifetimePosition> free_until_pos);
   void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
diff --git a/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index b6ced36..24593d8 100644
--- a/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -3073,7 +3073,7 @@
 void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
   RiscvOperandGenerator g(this);
   InstructionOperand temp = g.TempFpRegister(v16);
-  InstructionOperand temp1 = g.TempFpRegister(v17);
+  InstructionOperand temp1 = g.TempFpRegister(v14);
   InstructionOperand temp2 = g.TempFpRegister(v30);
   InstructionOperand dst = g.DefineAsRegister(node);
   this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 5780ace..9f99917 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -2262,6 +2262,17 @@
   }
 };
 
+template <typename RegAllocator>
+struct AllocateSimd128RegistersPhase {
+  DECL_PIPELINE_PHASE_CONSTANTS(AllocateSIMD128Registers)
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    RegAllocator allocator(data->top_tier_register_allocation_data(),
+                           RegisterKind::kSimd128, temp_zone);
+    allocator.AllocateRegisters();
+  }
+};
+
 struct DecideSpillingModePhase {
   DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
 
@@ -3734,6 +3745,11 @@
     Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
   }
 
+  if (data->sequence()->HasSimd128VirtualRegisters() &&
+      (kFPAliasing == AliasingKind::kIndependent)) {
+    Run<AllocateSimd128RegistersPhase<LinearScanAllocator>>();
+  }
+
   Run<DecideSpillingModePhase>();
   Run<AssignSpillSlotsPhase>();
   Run<CommitAssignmentPhase>();
diff --git a/src/logging/runtime-call-stats.h b/src/logging/runtime-call-stats.h
index 3453faa..ff2893f 100644
--- a/src/logging/runtime-call-stats.h
+++ b/src/logging/runtime-call-stats.h
@@ -317,8 +317,8 @@
   ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis)                    \
   ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script)                           \
   ADD_THREAD_SPECIFIC_COUNTER(V, Compile, CompileTask)                      \
-                                                                            \
   ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters)             \
+  ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateSIMD128Registers)        \
   ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters)        \
   ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode)                    \
   ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots)                \
diff --git a/src/wasm/baseline/liftoff-register.h b/src/wasm/baseline/liftoff-register.h
index a566ea7..a31570a 100644
--- a/src/wasm/baseline/liftoff-register.h
+++ b/src/wasm/baseline/liftoff-register.h
@@ -17,7 +17,7 @@
 namespace wasm {
 
 static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
-static constexpr bool kNeedS128RegPair = !kSimpleFPAliasing;
+static constexpr bool kNeedS128RegPair = kFPAliasing == AliasingKind::kCombine;
 
 enum RegClass : uint8_t {
   kGpReg,
@@ -190,7 +190,7 @@
   // LiftoffRegister.
   static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
                                             int code) {
-    if (!kSimpleFPAliasing && kind == kF32) {
+    if (kFPAliasing == AliasingKind::kCombine && kind == kF32) {
       // Liftoff assumes a one-to-one mapping between float registers and
       // double registers, and so does not distinguish between f32 and f64
       // registers. The f32 register code must therefore be halved in order
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 2481e45..b0ff48c 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -428,11 +428,6 @@
   'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
 
   # SIMD not fully implemented yet.
-  'test-run-wasm-relaxed-simd/*': [SKIP],
-  'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
-  'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
-  'test-run-wasm-simd-liftoff/*': [SKIP],
-  'test-run-wasm-simd/*':[SKIP],
   'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
   'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
 
diff --git a/test/cctest/compiler/test-code-generator.cc b/test/cctest/compiler/test-code-generator.cc
index f830373..f1658ab 100644
--- a/test/cctest/compiler/test-code-generator.cc
+++ b/test/cctest/compiler/test-code-generator.cc
@@ -460,7 +460,7 @@
         ((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
         "kDoubleRegisterCount should be a multiple of two and three.");
     for (int i = 0; i < kDoubleRegisterCount; i += 2) {
-      if (kSimpleFPAliasing) {
+      if (kFPAliasing != AliasingKind::kCombine) {
         // Allocate three registers at once if kSimd128 is supported, else
         // allocate in pairs.
         AddRegister(&test_signature, MachineRepresentation::kFloat32,
diff --git a/test/cctest/compiler/test-gap-resolver.cc b/test/cctest/compiler/test-gap-resolver.cc
index faa0367..664e41f 100644
--- a/test/cctest/compiler/test-gap-resolver.cc
+++ b/test/cctest/compiler/test-gap-resolver.cc
@@ -17,7 +17,7 @@
 // simplify ParallelMove equivalence testing.
 void GetCanonicalOperands(const InstructionOperand& op,
                           std::vector<InstructionOperand>* fragments) {
-  CHECK(!kSimpleFPAliasing);
+  CHECK_EQ(kFPAliasing, AliasingKind::kCombine);
   CHECK(op.IsFPLocationOperand());
   const LocationOperand& loc = LocationOperand::cast(op);
   MachineRepresentation rep = loc.representation();
@@ -51,7 +51,7 @@
       CHECK(!m->IsRedundant());
       const InstructionOperand& src = m->source();
       const InstructionOperand& dst = m->destination();
-      if (!kSimpleFPAliasing && src.IsFPLocationOperand() &&
+      if (kFPAliasing == AliasingKind::kCombine && src.IsFPLocationOperand() &&
           dst.IsFPLocationOperand()) {
         // Canonicalize FP location-location moves by fragmenting them into
         // an equivalent sequence of float32 moves, to simplify state
@@ -137,8 +137,15 @@
       // Preserve FP representation when FP register aliasing is complex.
       // Otherwise, canonicalize to kFloat64.
       if (IsFloatingPoint(loc_op.representation())) {
-        rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64
-                                : loc_op.representation();
+        if (kFPAliasing == AliasingKind::kIndependent) {
+          rep = IsSimd128(loc_op.representation())
+                    ? MachineRepresentation::kSimd128
+                    : MachineRepresentation::kFloat64;
+        } else if (kFPAliasing == AliasingKind::kOverlap) {
+          rep = MachineRepresentation::kFloat64;
+        } else {
+          rep = loc_op.representation();
+        }
       }
       if (loc_op.IsAnyRegister()) {
         index = loc_op.register_code();
@@ -234,7 +241,8 @@
       // On architectures where FP register aliasing is non-simple, update the
       // destinations set with the float equivalents of the operand and check
       // that all destinations are unique and do not alias each other.
-      if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) {
+      if (kFPAliasing == AliasingKind::kCombine &&
+          mo.destination().IsFPLocationOperand()) {
         std::vector<InstructionOperand> dst_fragments;
         GetCanonicalOperands(dst, &dst_fragments);
         CHECK(!dst_fragments.empty());
@@ -383,7 +391,7 @@
 
 TEST(Aliasing) {
   // On platforms with simple aliasing, these parallel moves are ill-formed.
-  if (kSimpleFPAliasing) return;
+  if (kFPAliasing != AliasingKind::kCombine) return;
 
   ParallelMoveCreator pmc;
   Zone* zone = pmc.main_zone();
diff --git a/test/unittests/codegen/register-configuration-unittest.cc b/test/unittests/codegen/register-configuration-unittest.cc
index 060370b..cd96cfa 100644
--- a/test/unittests/codegen/register-configuration-unittest.cc
+++ b/test/unittests/codegen/register-configuration-unittest.cc
@@ -26,10 +26,10 @@
   int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
   int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
 
-  RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
-                             kNumAllocatableGeneralRegs,
-                             kNumAllocatableDoubleRegs, general_codes,
-                             double_codes, RegisterConfiguration::OVERLAP);
+  RegisterConfiguration test(AliasingKind::kOverlap, kNumGeneralRegs,
+                             kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
+                             kNumAllocatableDoubleRegs, 0, general_codes,
+                             double_codes);
 
   EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
   EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
@@ -62,10 +62,10 @@
   int general_codes[] = {1, 2};
   int double_codes[] = {2, 3, 16};  // reg 16 should not alias registers 32, 33.
 
-  RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
-                             kNumAllocatableGeneralRegs,
-                             kNumAllocatableDoubleRegs, general_codes,
-                             double_codes, RegisterConfiguration::COMBINE);
+  RegisterConfiguration test(AliasingKind::kCombine, kNumGeneralRegs,
+                             kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
+                             kNumAllocatableDoubleRegs, 0, general_codes,
+                             double_codes);
 
   // There are 3 allocatable double regs, but only 2 can alias float regs.
   EXPECT_EQ(test.num_allocatable_float_registers(), 4);
diff --git a/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/test/unittests/compiler/backend/instruction-sequence-unittest.cc
index d644906..5d049e0 100644
--- a/test/unittests/compiler/backend/instruction-sequence-unittest.cc
+++ b/test/unittests/compiler/backend/instruction-sequence-unittest.cc
@@ -24,6 +24,7 @@
     : sequence_(nullptr),
       num_general_registers_(Register::kNumRegisters),
       num_double_registers_(DoubleRegister::kNumRegisters),
+      num_simd128_registers_(Simd128Register::kNumRegisters),
       instruction_blocks_(zone()),
       current_block_(nullptr),
       block_returns_(false) {}
@@ -69,11 +70,10 @@
 const RegisterConfiguration* InstructionSequenceTest::config() {
   if (!config_) {
     config_.reset(new RegisterConfiguration(
-        num_general_registers_, num_double_registers_, num_general_registers_,
-        num_double_registers_, kAllocatableCodes.data(),
-        kAllocatableCodes.data(),
-        kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
-                          : RegisterConfiguration::COMBINE));
+        kFPAliasing, num_general_registers_, num_double_registers_,
+        num_simd128_registers_, num_general_registers_, num_double_registers_,
+        num_simd128_registers_, kAllocatableCodes.data(),
+        kAllocatableCodes.data(), kAllocatableCodes.data()));
   }
   return config_.get();
 }
diff --git a/test/unittests/compiler/backend/instruction-sequence-unittest.h b/test/unittests/compiler/backend/instruction-sequence-unittest.h
index 0a8768d..f624b91 100644
--- a/test/unittests/compiler/backend/instruction-sequence-unittest.h
+++ b/test/unittests/compiler/backend/instruction-sequence-unittest.h
@@ -279,6 +279,7 @@
   InstructionSequence* sequence_;
   int num_general_registers_;
   int num_double_registers_;
+  int num_simd128_registers_;
 
   // Block building state.
   InstructionBlocks instruction_blocks_;
diff --git a/test/unittests/compiler/backend/instruction-unittest.cc b/test/unittests/compiler/backend/instruction-unittest.cc
index 0a36179..2cbc5fc 100644
--- a/test/unittests/compiler/backend/instruction-unittest.cc
+++ b/test/unittests/compiler/backend/instruction-unittest.cc
@@ -85,7 +85,7 @@
     EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
   }
 
-  if (kSimpleFPAliasing) {
+  if (kFPAliasing != AliasingKind::kCombine) {
     // Simple FP aliasing: interfering registers of different reps have the same
     // index.
     for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
@@ -162,7 +162,7 @@
     CHECK(Contains(&to_eliminate, d2, d0));
   }
 
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     // Moves inserted after should cause all interfering moves to be eliminated.
     auto s0 = AllocatedOperand(LocationOperand::REGISTER,
                                MachineRepresentation::kFloat32, 0);
diff --git a/test/unittests/compiler/regalloc/move-optimizer-unittest.cc b/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 344ea3d..4a26bbc 100644
--- a/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
+++ b/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -360,7 +360,7 @@
   EmitNop();
   Instruction* first_instr = LastInstruction();
   AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64));
-  if (!kSimpleFPAliasing) {
+  if (kFPAliasing == AliasingKind::kCombine) {
     // We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3.
     // Add moves to registers s2 and s3.
     AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32));