third_party/tcmalloc: Remove unused atomics Acquire_Store/Release_Load

https://github.com/gperftools/gperftools/pull/1249

Bug: 420970
Change-Id: I5fcdec030be36894644f3b098943e6017a743f70
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/2689003
Commit-Queue: Venkatesh Srinivas <venkateshs@chromium.org>
Reviewed-by: Alexander Potapenko <glider@chromium.org>
Reviewed-by: Primiano Tucci <primiano@chromium.org>
Reviewed-by: Will Harris <wfh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#854094}
GitOrigin-RevId: bf1568c318feea74ae2f2f946bf52901e7df1315
diff --git a/src/base/atomicops-internals-arm-generic.h b/src/base/atomicops-internals-arm-generic.h
index 2256bbc..c12a84d 100644
--- a/src/base/atomicops-internals-arm-generic.h
+++ b/src/base/atomicops-internals-arm-generic.h
@@ -122,11 +122,6 @@
   pLinuxKernelMemoryBarrier();
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
@@ -142,11 +137,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 
 // 64-bit versions are not implemented yet.
 
@@ -185,10 +175,6 @@
   NotImplementedFatalError("NoBarrier_Store");
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NotImplementedFatalError("Acquire_Store64");
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   NotImplementedFatalError("Release_Store");
 }
@@ -203,11 +189,6 @@
   return 0;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  NotImplementedFatalError("Atomic64 Release_Load");
-  return 0;
-}
-
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
diff --git a/src/base/atomicops-internals-arm-v6plus.h b/src/base/atomicops-internals-arm-v6plus.h
index a7df765..afcd79c 100644
--- a/src/base/atomicops-internals-arm-v6plus.h
+++ b/src/base/atomicops-internals-arm-v6plus.h
@@ -137,11 +137,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
@@ -157,11 +152,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 // 64-bit versions are only available if LDREXD and STREXD instructions
 // are available.
 #ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
@@ -289,11 +279,6 @@
 
 #endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   MemoryBarrier();
   NoBarrier_Store(ptr, value);
@@ -305,11 +290,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
diff --git a/src/base/atomicops-internals-gcc.h b/src/base/atomicops-internals-gcc.h
index f8d2786..0dcf03e 100644
--- a/src/base/atomicops-internals-gcc.h
+++ b/src/base/atomicops-internals-gcc.h
@@ -99,11 +99,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
@@ -119,11 +114,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 // 64-bit versions
 
 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
@@ -172,11 +162,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   MemoryBarrier();
   *ptr = value;
@@ -192,11 +177,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 }  // namespace base::subtle
 }  // namespace base
 
diff --git a/src/base/atomicops-internals-linuxppc.h b/src/base/atomicops-internals-linuxppc.h
index b52fdf0..73aa156 100644
--- a/src/base/atomicops-internals-linuxppc.h
+++ b/src/base/atomicops-internals-linuxppc.h
@@ -359,14 +359,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  *ptr = value;
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-}
-
 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   _lwsync();
   *ptr = value;
@@ -382,14 +374,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-  return *ptr;
-}
-
 #ifdef __PPC64__
 
 // 64-bit Versions.
@@ -398,14 +382,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  *ptr = value;
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-}
-
 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   _lwsync();
   *ptr = value;
@@ -421,14 +397,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-  return *ptr;
-}
-
 #endif
 
 }   // namespace base::subtle
diff --git a/src/base/atomicops-internals-macosx.h b/src/base/atomicops-internals-macosx.h
index b5130d4..c21f606 100644
--- a/src/base/atomicops-internals-macosx.h
+++ b/src/base/atomicops-internals-macosx.h
@@ -172,11 +172,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
@@ -192,11 +187,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 // 64-bit version
 
 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
@@ -268,11 +258,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   MemoryBarrier();
   *ptr = value;
@@ -288,11 +273,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 #else
 
 // 64-bit implementation on 32-bit platform
@@ -342,11 +322,6 @@
 #endif
 
 
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   MemoryBarrier();
   NoBarrier_Store(ptr, value);
@@ -358,10 +333,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
 #endif  // __LP64__
 
 }   // namespace base::subtle
diff --git a/src/base/atomicops-internals-mips.h b/src/base/atomicops-internals-mips.h
index 4bfd7f6..58e0f14 100644
--- a/src/base/atomicops-internals-mips.h
+++ b/src/base/atomicops-internals-mips.h
@@ -161,12 +161,6 @@
     return NoBarrier_AtomicExchange(ptr, new_value);
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
-{
-    *ptr = value;
-    MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
 {
     MemoryBarrier();
@@ -185,12 +179,6 @@
     return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr)
-{
-    MemoryBarrier();
-    return *ptr;
-}
-
 #if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
 
 typedef int64_t Atomic64;
@@ -285,12 +273,6 @@
     return NoBarrier_AtomicExchange(ptr, new_value);
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
-{
-    *ptr = value;
-    MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
 {
     MemoryBarrier();
@@ -309,12 +291,6 @@
     return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr)
-{
-    MemoryBarrier();
-    return *ptr;
-}
-
 #endif
 
 }   // namespace base::subtle
diff --git a/src/base/atomicops-internals-windows.h b/src/base/atomicops-internals-windows.h
index 9d0d806..ebd5742 100644
--- a/src/base/atomicops-internals-windows.h
+++ b/src/base/atomicops-internals-windows.h
@@ -189,10 +189,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  Acquire_AtomicExchange(ptr, value);
-}
-
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
   // See comments in Atomic64 version of Release_Store() below.
@@ -207,11 +203,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 // 64-bit operations
 
 #if defined(_WIN64) || defined(__MINGW64__)
@@ -299,11 +290,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_AtomicExchange(ptr, value);
-              // acts as a barrier in this implementation
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
 
@@ -324,11 +310,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 #else  // defined(_WIN64) || defined(__MINGW64__)
 
 // 64-bit low-level operations on 32-bit platform
@@ -394,11 +375,6 @@
   	}
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_AtomicExchange(ptr, value);
-              // acts as a barrier in this implementation
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   NoBarrier_Store(ptr, value);
 }
@@ -420,11 +396,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-
 #endif  // defined(_WIN64) || defined(__MINGW64__)
 
 
diff --git a/src/base/atomicops-internals-x86.h b/src/base/atomicops-internals-x86.h
index e441ac7..4eadacb 100644
--- a/src/base/atomicops-internals-x86.h
+++ b/src/base/atomicops-internals-x86.h
@@ -128,11 +128,6 @@
   __asm__ __volatile__("mfence" : : : "memory");
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 #else
 
 inline void MemoryBarrier() {
@@ -144,14 +139,6 @@
   }
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
-    *ptr = value;
-    __asm__ __volatile__("mfence" : : : "memory");
-  } else {
-    Acquire_AtomicExchange(ptr, value);
-  }
-}
 #endif
 
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -171,11 +158,6 @@
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 #if defined(__x86_64__)
 
 // 64-bit low-level operations on 64-bit platform.
@@ -216,11 +198,6 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   ATOMICOPS_COMPILER_BARRIER();
 
@@ -254,11 +231,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
 #else // defined(__x86_64__)
 
 // 64-bit low-level operations on 32-bit platform.
@@ -333,11 +305,6 @@
                          "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
   ATOMICOPS_COMPILER_BARRIER();
   NoBarrier_Store(ptr, value);
@@ -363,11 +330,6 @@
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-
 #endif // defined(__x86_64__)
 
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index dac95be..2f0bc5b 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -205,11 +205,6 @@
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
 }
 
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  return base::subtle::Acquire_Store(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
   return base::subtle::Release_Store(
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
@@ -225,11 +220,6 @@
       reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
 }
 
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-  return base::subtle::Release_Load(
-      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
 }  // namespace base::subtle
 }  // namespace base
 #endif  // AtomicWordCastType
@@ -268,11 +258,9 @@
                                 Atomic32 old_value,
                                 Atomic32 new_value);
 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
 
 // Corresponding operations on Atomic64
 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
@@ -289,11 +277,9 @@
                                 Atomic64 old_value,
                                 Atomic64 new_value);
 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
 }  // namespace base::subtle
 }  // namespace base
 
@@ -321,10 +307,6 @@
   return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
 }
 
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  return base::subtle::Acquire_Store(ptr, value);
-}
-
 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
   return base::subtle::Release_Store(ptr, value);
 }
@@ -332,10 +314,6 @@
 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
   return base::subtle::Acquire_Load(ptr);
 }
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-  return base::subtle::Release_Load(ptr);
-}
 #endif  // AtomicWordCastType
 
 // 32-bit Acquire/Release operations to be deprecated.
@@ -350,18 +328,12 @@
                                        Atomic32 new_value) {
   return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
 }
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  base::subtle::Acquire_Store(ptr, value);
-}
 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   return base::subtle::Release_Store(ptr, value);
 }
 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   return base::subtle::Acquire_Load(ptr);
 }
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  return base::subtle::Release_Load(ptr);
-}
 
 #ifdef BASE_HAS_ATOMIC64
 
@@ -377,10 +349,6 @@
     base::subtle::Atomic64 old_value, base::subtle::Atomic64 new_value) {
   return base::subtle::Release_CompareAndSwap(ptr, old_value, new_value);
 }
-inline void Acquire_Store(
-    volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
-  base::subtle::Acquire_Store(ptr, value);
-}
 inline void Release_Store(
     volatile base::subtle::Atomic64* ptr, base::subtle::Atomic64 value) {
   return base::subtle::Release_Store(ptr, value);
@@ -389,10 +357,6 @@
     volatile const base::subtle::Atomic64* ptr) {
   return base::subtle::Acquire_Load(ptr);
 }
-inline base::subtle::Atomic64 Release_Load(
-    volatile const base::subtle::Atomic64* ptr) {
-  return base::subtle::Release_Load(ptr);
-}
 
 #endif  // BASE_HAS_ATOMIC64
 
diff --git a/src/tests/atomicops_unittest.cc b/src/tests/atomicops_unittest.cc
index aa82a6b..76aae2e 100644
--- a/src/tests/atomicops_unittest.cc
+++ b/src/tests/atomicops_unittest.cc
@@ -104,11 +104,6 @@
   base::subtle::NoBarrier_Store(&value, kVal2);
   ASSERT_EQ(kVal2, value);
 
-  base::subtle::Acquire_Store(&value, kVal1);
-  ASSERT_EQ(kVal1, value);
-  base::subtle::Acquire_Store(&value, kVal2);
-  ASSERT_EQ(kVal2, value);
-
   base::subtle::Release_Store(&value, kVal1);
   ASSERT_EQ(kVal1, value);
   base::subtle::Release_Store(&value, kVal2);
@@ -133,11 +128,6 @@
   ASSERT_EQ(kVal1, base::subtle::Acquire_Load(&value));
   value = kVal2;
   ASSERT_EQ(kVal2, base::subtle::Acquire_Load(&value));
-
-  value = kVal1;
-  ASSERT_EQ(kVal1, base::subtle::Release_Load(&value));
-  value = kVal2;
-  ASSERT_EQ(kVal2, base::subtle::Release_Load(&value));
 }
 
 template <class AtomicType>