[msvc] minor MSVC friendly changes

Changed GCC specific __sync_synchronize() with a more general C++11
std::atomic_thread_fence(std::memory_order_seq_cst).

Changed GCC specific __attribute__((aligned(n))) with a more general
C++11 alignas(n).

Change-Id: Ie59df1b74305e2afdda8b23c9d5cd9a57bd97eef
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4403215
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Cr-Commit-Position: refs/heads/main@{#87033}
diff --git a/AUTHORS b/AUTHORS
index d84c722..14febd2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -243,6 +243,7 @@
 Shawn Anastasio <shawnanastasio@gmail.com>
 Shawn Presser <shawnpresser@gmail.com>
 Stefan Penner <stefan.penner@gmail.com>
+Stefan Stojanovic <stefko.stojanovic@gmail.com>
 Stephan Hartmann <stha09@googlemail.com>
 Stephen Belanger <stephen.belanger@datadoghq.com>
 Sylvestre Ledru <sledru@mozilla.com>
diff --git a/src/execution/arm64/simulator-arm64.cc b/src/execution/arm64/simulator-arm64.cc
index 4fcb07c..829638b 100644
--- a/src/execution/arm64/simulator-arm64.cc
+++ b/src/execution/arm64/simulator-arm64.cc
@@ -2554,7 +2554,7 @@
   T data = MemoryRead<T>(address);
   if (is_acquire) {
     // Approximate load-acquire by issuing a full barrier after the load.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
   if (data == comparevalue) {
@@ -2564,7 +2564,7 @@
       local_monitor_.NotifyStore();
       GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
       // Approximate store-release by issuing a full barrier before the store.
-      __sync_synchronize();
+      std::atomic_thread_fence(std::memory_order_seq_cst);
     }
 
     MemoryWrite<T>(address, newvalue);
@@ -2610,7 +2610,7 @@
 
   if (is_acquire) {
     // Approximate load-acquire by issuing a full barrier after the load.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
   bool same =
@@ -2622,7 +2622,7 @@
       local_monitor_.NotifyStore();
       GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
       // Approximate store-release by issuing a full barrier before the store.
-      __sync_synchronize();
+      std::atomic_thread_fence(std::memory_order_seq_cst);
     }
 
     MemoryWrite<T>(address, newvalue_low);
@@ -2666,7 +2666,7 @@
 
   if (is_acquire) {
     // Approximate load-acquire by issuing a full barrier after the load.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
   T result = 0;
@@ -2703,7 +2703,7 @@
     local_monitor_.NotifyStore();
     GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
     // Approximate store-release by issuing a full barrier before the store.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
   MemoryWrite<T>(address, result);
@@ -2734,7 +2734,7 @@
   T data = MemoryRead<T>(address);
   if (is_acquire) {
     // Approximate load-acquire by issuing a full barrier after the load.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
 
   if (is_release) {
@@ -2742,7 +2742,7 @@
     local_monitor_.NotifyStore();
     GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_processor_);
     // Approximate store-release by issuing a full barrier before the store.
-    __sync_synchronize();
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   }
   MemoryWrite<T>(address, reg<T>(rs));
 
@@ -3739,11 +3739,7 @@
         UNIMPLEMENTED();
     }
   } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
-#if defined(V8_OS_WIN)
-    MemoryBarrier();
-#else
-    __sync_synchronize();
-#endif
+    std::atomic_thread_fence(std::memory_order_seq_cst);
   } else {
     UNIMPLEMENTED();
   }
diff --git a/test/cctest/test-assembler-arm64.cc b/test/cctest/test-assembler-arm64.cc
index b227225..7553884 100644
--- a/test/cctest/test-assembler-arm64.cc
+++ b/test/cctest/test-assembler-arm64.cc
@@ -14771,12 +14771,12 @@
                                 AtomicMemoryStoreSignature* store_funcs,
                                 uint64_t arg1, uint64_t arg2, uint64_t expected,
                                 uint64_t result_mask) {
-  uint64_t data0[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data1[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data2[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data3[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data4[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data5[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data0[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data1[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data2[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data3[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data4[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data5[] = {arg2, 0};
 
   SETUP();
   SETUP_FEATURE(LSE);
@@ -14838,12 +14838,12 @@
                                 AtomicMemoryStoreSignature* store_funcs,
                                 uint64_t arg1, uint64_t arg2,
                                 uint64_t expected) {
-  uint64_t data0[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data1[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data2[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data3[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data4[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
-  uint64_t data5[] __attribute__((aligned(kXRegSize * 2))) = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data0[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data1[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data2[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data3[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data4[] = {arg2, 0};
+  alignas(kXRegSize * 2) uint64_t data5[] = {arg2, 0};
 
   SETUP();
   SETUP_FEATURE(LSE);