tcmalloc: Use C++11 atomics where appropriate.

Reland now that we have a newer sysroot.

Ports these CLs to tcmalloc:
https://codereview.chromium.org/636783002/
https://codereview.chromium.org/1466833002/ (except mac)

No intended behavior change, but it should remove
the static initializer in atomicops_internals_x86_gcc.h
on Linux.  It's also less code.

BUG=94925,559247,572525

Committed: https://crrev.com/e13537fe418eff11d3cab9077f6a647d7c74f103
Cr-Original-Commit-Position: refs/heads/master@{#464440}
Review-Url: https://codereview.chromium.org/1549913002
Cr-Original-Original-Commit-Position: refs/heads/master@{#366904}
Cr-Mirrored-From: https://chromium.googlesource.com/chromium/src
Cr-Mirrored-Commit: 2f6d8f01d9087e8bebab5b7d2d25b28d657dbbb7
diff --git a/src/base/atomicops-internals-arm-generic.h b/src/base/atomicops-internals-arm-generic.h
deleted file mode 100644
index e083f8d..0000000
--- a/src/base/atomicops-internals-arm-generic.h
+++ /dev/null
@@ -1,235 +0,0 @@
-// Copyright (c) 2003, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// ---
-//
-// Author: Lei Zhang, Sasha Levitskiy
-//
-// This file is an internal atomic implementation, use base/atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
-#define BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
-
-#include <stdio.h>
-#include "base/abort.h"
-#include "base/basictypes.h"
-
-typedef int32_t Atomic32;
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-// pLinuxKernelCmpxchg has both acquire and release barrier sematincs.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
-                                           Atomic32 new_value,
-                                           volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg ATTRIBUTE_WEAK =
-    (LinuxKernelCmpxchgFunc) 0xffff0fc0;
-
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier ATTRIBUTE_WEAK =
-    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
-
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value = *ptr;
-  do {
-    if (!pLinuxKernelCmpxchg(old_value, new_value,
-                             const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (pLinuxKernelCmpxchg(old_value, new_value,
-                               const_cast<Atomic32*>(ptr)));
-  return old_value;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  for (;;) {
-    // Atomic exchange the old value with an incremented one.
-    Atomic32 old_value = *ptr;
-    Atomic32 new_value = old_value + increment;
-    if (pLinuxKernelCmpxchg(old_value, new_value,
-                            const_cast<Atomic32*>(ptr)) == 0) {
-      // The exchange took place as expected.
-      return new_value;
-    }
-    // Otherwise, *ptr changed mid-loop and we need to retry.
-  }
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void MemoryBarrier() {
-  pLinuxKernelMemoryBarrier();
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-
-// 64-bit versions are not implemented yet.
-
-inline void NotImplementedFatalError(const char *function_name) {
-  fprintf(stderr, "64-bit %s() not implemented on this platform\n",
-          function_name);
-  tcmalloc::Abort();
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  NotImplementedFatalError("NoBarrier_CompareAndSwap");
-  return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  NotImplementedFatalError("NoBarrier_AtomicExchange");
-  return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  NotImplementedFatalError("NoBarrier_AtomicIncrement");
-  return 0;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  NotImplementedFatalError("Barrier_AtomicIncrement");
-  return 0;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NotImplementedFatalError("NoBarrier_Store");
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NotImplementedFatalError("Acquire_Store64");
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NotImplementedFatalError("Release_Store");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  NotImplementedFatalError("NoBarrier_Load");
-  return 0;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  NotImplementedFatalError("Atomic64 Acquire_Load");
-  return 0;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  NotImplementedFatalError("Atomic64 Release_Load");
-  return 0;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap");
-  return 0;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  NotImplementedFatalError("Atomic64 Release_CompareAndSwap");
-  return 0;
-}
-
-}  // namespace base::subtle
-}  // namespace base
-
-#endif  // BASE_ATOMICOPS_INTERNALS_ARM_GENERIC_H_
diff --git a/src/base/atomicops-internals-arm-v6plus.h b/src/base/atomicops-internals-arm-v6plus.h
deleted file mode 100644
index dc06987..0000000
--- a/src/base/atomicops-internals-arm-v6plus.h
+++ /dev/null
@@ -1,369 +0,0 @@
-// Copyright (c) 2011, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-//     * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// ---
-//
-// Author: Sasha Levitskiy
-// based on atomicops-internals by Sanjay Ghemawat
-//
-// This file is an internal atomic implementation, use base/atomicops.h instead.
-//
-// This code implements ARM atomics for architectures V6 and  newer.
-
-#ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
-#define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
-
-#include <stdio.h>
-#include <stdlib.h>
-#include "base/abort.h"
-#include "base/basictypes.h"  // For COMPILE_ASSERT
-
-// The LDREXD and STREXD instructions in ARM all v7 variants or above.  In v6,
-// only some variants support it.  For simplicity, we only use exclusive
-// 64-bit load/store in V7 or above.
-#if defined(ARMV7)
-# define BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
-#endif
-
-typedef int32_t Atomic32;
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-// 32-bit low-level ops
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 oldval, res;
-  do {
-    __asm__ __volatile__(
-    "ldrex   %1, [%3]\n"
-    "mov     %0, #0\n"
-    "teq     %1, %4\n"
-    // The following IT (if-then) instruction is needed for the subsequent
-    // conditional instruction STREXEQ when compiling in THUMB mode.
-    // In ARM mode, the compiler/assembler will not generate any code for it.
-    "it      eq\n"
-    "strexeq %0, %5, [%3]\n"
-        : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
-        : "r" (ptr), "Ir" (old_value), "r" (new_value)
-        : "cc");
-  } while (res);
-  return oldval;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  Atomic32 tmp, old;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrex  %1, [%2]\n"
-      "strex  %0, %3, [%2]\n"
-      "teq    %0, #0\n"
-      "bne    1b"
-      : "=&r" (tmp), "=&r" (old)
-      : "r" (ptr), "r" (new_value)
-      : "cc", "memory");
-  return old;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 tmp, res;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrex  %1, [%2]\n"
-      "add    %1, %1, %3\n"
-      "strex  %0, %1, [%2]\n"
-      "teq    %0, #0\n"
-      "bne    1b"
-      : "=&r" (tmp), "=&r"(res)
-      : "r" (ptr), "r"(increment)
-      : "cc", "memory");
-  return res;
-}
-
-inline void MemoryBarrier() {
-  __asm__ __volatile__("dmb" : : : "memory");
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  Atomic32 tmp, res;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrex  %1, [%2]\n"
-      "add    %1, %1, %3\n"
-      "dmb\n"
-      "strex  %0, %1, [%2]\n"
-      "teq    %0, #0\n"
-      "bne    1b"
-      : "=&r" (tmp), "=&r"(res)
-      : "r" (ptr), "r"(increment)
-      : "cc", "memory");
-  return res;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  MemoryBarrier();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr;
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-// 64-bit versions are only available if LDREXD and STREXD instructions
-// are available.
-#ifdef BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
-
-#define BASE_HAS_ATOMIC64 1
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 oldval, res;
-  do {
-    __asm__ __volatile__(
-    "ldrexd   %1, [%3]\n"
-    "mov      %0, #0\n"
-    "teq      %Q1, %Q4\n"
-    // The following IT (if-then) instructions are needed for the subsequent
-    // conditional instructions when compiling in THUMB mode.
-    // In ARM mode, the compiler/assembler will not generate any code for it.
-    "it       eq\n"
-    "teqeq    %R1, %R4\n"
-    "it       eq\n"
-    "strexdeq %0, %5, [%3]\n"
-        : "=&r" (res), "=&r" (oldval), "+Q" (*ptr)
-        : "r" (ptr), "Ir" (old_value), "r" (new_value)
-        : "cc");
-  } while (res);
-  return oldval;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  int store_failed;
-  Atomic64 old;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrexd  %1, [%2]\n"
-      "strexd  %0, %3, [%2]\n"
-      "teq     %0, #0\n"
-      "bne     1b"
-      : "=&r" (store_failed), "=&r" (old)
-      : "r" (ptr), "r" (new_value)
-      : "cc", "memory");
-  return old;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  int store_failed;
-  Atomic64 res;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrexd  %1, [%2]\n"
-      "adds    %Q1, %Q1, %Q3\n"
-      "adc     %R1, %R1, %R3\n"
-      "strexd  %0, %1, [%2]\n"
-      "teq     %0, #0\n"
-      "bne     1b"
-      : "=&r" (store_failed), "=&r"(res)
-      : "r" (ptr), "r"(increment)
-      : "cc", "memory");
-  return res;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  int store_failed;
-  Atomic64 res;
-  __asm__ __volatile__(
-      "1:\n"
-      "ldrexd  %1, [%2]\n"
-      "adds    %Q1, %Q1, %Q3\n"
-      "adc     %R1, %R1, %R3\n"
-      "dmb\n"
-      "strexd  %0, %1, [%2]\n"
-      "teq     %0, #0\n"
-      "bne     1b"
-      : "=&r" (store_failed), "=&r"(res)
-      : "r" (ptr), "r"(increment)
-      : "cc", "memory");
-  return res;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  int store_failed;
-  Atomic64 dummy;
-  __asm__ __volatile__(
-      "1:\n"
-      // Dummy load to lock cache line.
-      "ldrexd  %1, [%3]\n"
-      "strexd  %0, %2, [%3]\n"
-      "teq     %0, #0\n"
-      "bne     1b"
-      : "=&r" (store_failed), "=&r"(dummy)
-      : "r"(value), "r" (ptr)
-      : "cc", "memory");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  Atomic64 res;
-  __asm__ __volatile__(
-  "ldrexd   %0, [%1]\n"
-  "clrex\n"
-      : "=r" (res)
-      : "r"(ptr), "Q"(*ptr));
-  return res;
-}
-
-#else // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
-
-inline void NotImplementedFatalError(const char *function_name) {
-  fprintf(stderr, "64-bit %s() not implemented on this platform\n",
-          function_name);
-  tcmalloc::Abort();
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  NotImplementedFatalError("NoBarrier_CompareAndSwap");
-  return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  NotImplementedFatalError("NoBarrier_AtomicExchange");
-  return 0;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  NotImplementedFatalError("NoBarrier_AtomicIncrement");
-  return 0;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  NotImplementedFatalError("Barrier_AtomicIncrement");
-  return 0;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NotImplementedFatalError("NoBarrier_Store");
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  NotImplementedFatalError("NoBarrier_Load");
-  return 0;
-}
-
-#endif // BASE_ATOMICOPS_HAS_LDREXD_AND_STREXD
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  MemoryBarrier();
-  NoBarrier_Store(ptr, value);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = NoBarrier_Load(ptr);
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  MemoryBarrier();
-  return value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  MemoryBarrier();
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-}  // namespace subtle ends
-}  // namespace base ends
-
-#endif  // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_
diff --git a/src/base/atomicops-internals-linuxppc.h b/src/base/atomicops-internals-linuxppc.h
deleted file mode 100644
index 7e49560..0000000
--- a/src/base/atomicops-internals-linuxppc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/* Copyright (c) 2008, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- */
-
-// Implementation of atomic operations for ppc-linux.  This file should not
-// be included directly.  Clients should instead include
-// "base/atomicops.h".
-
-#ifndef BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
-#define BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
-
-typedef int32_t Atomic32;
-
-#ifdef __PPC64__
-#define BASE_HAS_ATOMIC64 1
-#endif
-
-namespace base {
-namespace subtle {
-
-static inline void _sync(void) {
-  __asm__ __volatile__("sync": : : "memory");
-}
-
-static inline void _lwsync(void) {
-  // gcc defines __NO_LWSYNC__ when appropriate; see
-  //    http://gcc.gnu.org/ml/gcc-patches/2006-11/msg01238.html
-#ifdef __NO_LWSYNC__
-  __asm__ __volatile__("msync": : : "memory");
-#else
-  __asm__ __volatile__("lwsync": : : "memory");
-#endif
-}
-
-static inline void _isync(void) {
-  __asm__ __volatile__("isync": : : "memory");
-}
-
-static inline Atomic32 OSAtomicAdd32(Atomic32 amount, Atomic32 *value) {
-  Atomic32 t;
-  __asm__ __volatile__(
-"1:		lwarx   %0,0,%3\n\
-		add     %0,%2,%0\n\
-		stwcx.  %0,0,%3 \n\
-		bne-    1b"
-		: "=&r" (t), "+m" (*value)
-		: "r" (amount), "r" (value)
-                : "cc");
-  return t;
-}
-
-static inline Atomic32 OSAtomicAdd32Barrier(Atomic32 amount, Atomic32 *value) {
-  Atomic32 t;
-  _lwsync();
-  t = OSAtomicAdd32(amount, value);
-  // This is based on the code snippet in the architecture manual (Vol
-  // 2, Appendix B).  It's a little tricky: correctness depends on the
-  // fact that the code right before this (in OSAtomicAdd32) has a
-  // conditional branch with a data dependency on the update.
-  // Otherwise, we'd have to use sync.
-  _isync();
-  return t;
-}
-
-static inline bool OSAtomicCompareAndSwap32(Atomic32 old_value,
-                                            Atomic32 new_value,
-                                            Atomic32 *value) {
-  Atomic32 prev;
-  __asm__ __volatile__(
-"1:		lwarx   %0,0,%2\n\
-		cmpw    0,%0,%3\n\
-		bne-    2f\n\
-		stwcx.  %4,0,%2\n\
-		bne-    1b\n\
-2:"
-                : "=&r" (prev), "+m" (*value)
-                : "r" (value), "r" (old_value), "r" (new_value)
-                : "cc");
-  return prev == old_value;
-}
-
-static inline Atomic32 OSAtomicCompareAndSwap32Acquire(Atomic32 old_value,
-                                                       Atomic32 new_value,
-                                                       Atomic32 *value) {
-  Atomic32 t;
-  t = OSAtomicCompareAndSwap32(old_value, new_value, value);
-  // This is based on the code snippet in the architecture manual (Vol
-  // 2, Appendix B).  It's a little tricky: correctness depends on the
-  // fact that the code right before this (in
-  // OSAtomicCompareAndSwap32) has a conditional branch with a data
-  // dependency on the update.  Otherwise, we'd have to use sync.
-  _isync();
-  return t;
-}
-
-static inline Atomic32 OSAtomicCompareAndSwap32Release(Atomic32 old_value,
-                                                       Atomic32 new_value,
-                                                       Atomic32 *value) {
-  _lwsync();
-  return OSAtomicCompareAndSwap32(old_value, new_value, value);
-}
-
-typedef int64_t Atomic64;
-
-inline void MemoryBarrier() {
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-}
-
-// 32-bit Versions.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32(old_value, new_value,
-                                 const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
-                                         Atomic32 new_value) {
-  Atomic32 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap32(old_value, new_value,
-                                     const_cast<Atomic32*>(ptr)));
-  return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
-                                          Atomic32 increment) {
-  return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
-                                        Atomic32 increment) {
-  return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32Acquire(old_value, new_value,
-                                        const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap32Release(old_value, new_value,
-                                        const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-#ifdef __PPC64__
-
-// 64-bit Versions.
-
-static inline Atomic64 OSAtomicAdd64(Atomic64 amount, Atomic64 *value) {
-  Atomic64 t;
-  __asm__ __volatile__(
-"1:		ldarx   %0,0,%3\n\
-		add     %0,%2,%0\n\
-		stdcx.  %0,0,%3 \n\
-		bne-    1b"
-		: "=&r" (t), "+m" (*value)
-		: "r" (amount), "r" (value)
-                : "cc");
-  return t;
-}
-
-static inline Atomic64 OSAtomicAdd64Barrier(Atomic64 amount, Atomic64 *value) {
-  Atomic64 t;
-  _lwsync();
-  t = OSAtomicAdd64(amount, value);
-  // This is based on the code snippet in the architecture manual (Vol
-  // 2, Appendix B).  It's a little tricky: correctness depends on the
-  // fact that the code right before this (in OSAtomicAdd64) has a
-  // conditional branch with a data dependency on the update.
-  // Otherwise, we'd have to use sync.
-  _isync();
-  return t;
-}
-
-static inline bool OSAtomicCompareAndSwap64(Atomic64 old_value,
-                                            Atomic64 new_value,
-                                            Atomic64 *value) {
-  Atomic64 prev;
-  __asm__ __volatile__(
-"1:		ldarx   %0,0,%2\n\
-		cmpw    0,%0,%3\n\
-		bne-    2f\n\
-		stdcx.  %4,0,%2\n\
-		bne-    1b\n\
-2:"
-                : "=&r" (prev), "+m" (*value)
-                : "r" (value), "r" (old_value), "r" (new_value)
-                : "cc");
-  return prev == old_value;
-}
-
-static inline Atomic64 OSAtomicCompareAndSwap64Acquire(Atomic64 old_value,
-                                                       Atomic64 new_value,
-                                                       Atomic64 *value) {
-  Atomic64 t;
-  t = OSAtomicCompareAndSwap64(old_value, new_value, value);
-  // This is based on the code snippet in the architecture manual (Vol
-  // 2, Appendix B).  It's a little tricky: correctness depends on the
-  // fact that the code right before this (in
-  // OSAtomicCompareAndSwap64) has a conditional branch with a data
-  // dependency on the update.  Otherwise, we'd have to use sync.
-  _isync();
-  return t;
-}
-
-static inline Atomic64 OSAtomicCompareAndSwap64Release(Atomic64 old_value,
-                                                       Atomic64 new_value,
-                                                       Atomic64 *value) {
-  _lwsync();
-  return OSAtomicCompareAndSwap64(old_value, new_value, value);
-}
-
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64(old_value, new_value,
-                                 const_cast<Atomic64*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
-                                         Atomic64 new_value) {
-  Atomic64 old_value;
-  do {
-    old_value = *ptr;
-  } while (!OSAtomicCompareAndSwap64(old_value, new_value,
-                                     const_cast<Atomic64*>(ptr)));
-  return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
-                                          Atomic64 increment) {
-  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
-                                        Atomic64 increment) {
-  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64Acquire(old_value, new_value,
-                                        const_cast<Atomic64*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 prev_value;
-  do {
-    if (OSAtomicCompareAndSwap64Release(old_value, new_value,
-                                        const_cast<Atomic64*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
-  return prev_value;
-}
-
-#endif
-
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  *ptr = value;
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-}
-
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
-  _lwsync();
-  *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
-  Atomic32 value = *ptr;
-  _lwsync();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-  return *ptr;
-}
-
-#ifdef __PPC64__
-
-// 64-bit Versions.
-
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  *ptr = value;
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-}
-
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
-  _lwsync();
-  *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
-  Atomic64 value = *ptr;
-  _lwsync();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
-  // This can't be _lwsync(); we need to order the immediately
-  // preceding stores against any load that may follow, but lwsync
-  // doesn't guarantee that.
-  _sync();
-  return *ptr;
-}
-
-#endif
-
-}   // namespace base::subtle
-}   // namespace base
-
-#endif  // BASE_ATOMICOPS_INTERNALS_LINUXPPC_H_
diff --git a/src/base/atomicops-internals-x86.cc b/src/base/atomicops-internals-x86.cc
deleted file mode 100644
index 4f75d47..0000000
--- a/src/base/atomicops-internals-x86.cc
+++ /dev/null
@@ -1,125 +0,0 @@
-/* Copyright (c) 2007, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * This module gets enough CPU information to optimize the
- * atomicops module on x86.
- */
-
-#include "base/atomicops.h"
-#include "base/basictypes.h"
-#include "base/googleinit.h"
-#include "base/logging.h"
-#include <string.h>
-
-// This file only makes sense with atomicops-internals-x86.h -- it
-// depends on structs that are defined in that file.  If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef BASE_ATOMICOPS_INTERNALS_X86_H_
-
-// Inline cpuid instruction.  In PIC compilations, %ebx contains the address
-// of the global offset table.  To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
-  asm ("mov %%ebx, %%edi\n"    \
-       "cpuid\n"               \
-       "xchg %%edi, %%ebx\n"   \
-       : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined (__x86_64__)
-#define cpuid(a, b, c, d, inp) \
-  asm ("mov %%rbx, %%rdi\n"    \
-       "cpuid\n"               \
-       "xchg %%rdi, %%rbx\n"   \
-       : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid)        // initialize the struct only on x86
-
-// Set the flags so that code will run correctly and conservatively
-// until InitGoogle() is called.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
-  false,          // bug can't exist before process spawns multiple threads
-  false,          // no SSE2
-  false,          // no cmpxchg16b
-};
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-static void AtomicOps_Internalx86CPUFeaturesInit() {
-  uint32 eax;
-  uint32 ebx;
-  uint32 ecx;
-  uint32 edx;
-
-  // Get vendor string (issue CPUID with eax = 0)
-  cpuid(eax, ebx, ecx, edx, 0);
-  char vendor[13];
-  memcpy(vendor, &ebx, 4);
-  memcpy(vendor + 4, &edx, 4);
-  memcpy(vendor + 8, &ecx, 4);
-  vendor[12] = 0;
-
-  // get feature flags in ecx/edx, and family/model in eax
-  cpuid(eax, ebx, ecx, edx, 1);
-
-  int family = (eax >> 8) & 0xf;        // family and model fields
-  int model = (eax >> 4) & 0xf;
-  if (family == 0xf) {                  // use extended family and model fields
-    family += (eax >> 20) & 0xff;
-    model += ((eax >> 16) & 0xf) << 4;
-  }
-
-  // Opteron Rev E has a bug in which on very rare occasions a locked
-  // instruction doesn't act as a read-acquire barrier if followed by a
-  // non-locked read-modify-write instruction.  Rev F has this bug in 
-  // pre-release versions, but not in versions released to customers,
-  // so we test only for Rev E, which is family 15, model 32..63 inclusive.
-  if (strcmp(vendor, "AuthenticAMD") == 0 &&       // AMD
-      family == 15 &&
-      32 <= model && model <= 63) {
-    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
-  } else {
-    AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
-  }
-
-  // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
-  AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-
-  // ecx bit 13 indicates whether the cmpxchg16b instruction is supported
-  AtomicOps_Internalx86CPUFeatures.has_cmpxchg16b = ((ecx >> 13) & 1);
-}
-
-REGISTER_MODULE_INITIALIZER(atomicops_x86, {
-  AtomicOps_Internalx86CPUFeaturesInit();
-});
-
-#endif
-
-#endif  /* ifdef BASE_ATOMICOPS_INTERNALS_X86_H_ */
diff --git a/src/base/atomicops-internals-x86.h b/src/base/atomicops-internals-x86.h
deleted file mode 100644
index c34aa5c..0000000
--- a/src/base/atomicops-internals-x86.h
+++ /dev/null
@@ -1,428 +0,0 @@
-/* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- * 
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- * 
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- * 
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * ---
- * Author: Sanjay Ghemawat
- */
-
-// Implementation of atomic operations for x86.  This file should not
-// be included directly.  Clients should instead include
-// "base/atomicops.h".
-
-#ifndef BASE_ATOMICOPS_INTERNALS_X86_H_
-#define BASE_ATOMICOPS_INTERNALS_X86_H_
-
-typedef int32_t Atomic32;
-#define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
-
-
-// NOTE(vchen): x86 does not need to define AtomicWordCastType, because it
-// already matches Atomic32 or Atomic64, depending on the platform.
-
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86.  Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
-  bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
-                            // after acquire compare-and-swap.
-  bool has_sse2;            // Processor has SSE2.
-  bool has_cmpxchg16b;      // Processor supports cmpxchg16b instruction.
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-
-namespace base {
-namespace subtle {
-
-typedef int64_t Atomic64;
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
-                                         Atomic32 old_value,
-                                         Atomic32 new_value) {
-  Atomic32 prev;
-  __asm__ __volatile__("lock; cmpxchgl %1,%2"
-                       : "=a" (prev)
-                       : "q" (new_value), "m" (*ptr), "0" (old_value)
-                       : "memory");
-  return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
-                                         Atomic32 new_value) {
-  __asm__ __volatile__("xchgl %1,%0"  // The lock prefix is implicit for xchg.
-                       : "=r" (new_value)
-                       : "m" (*ptr), "0" (new_value)
-                       : "memory");
-  return new_value;  // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
-  Atomic32 temp = increment;
-  __asm__ __volatile__("lock; xaddl %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now holds the old value of *ptr
-  return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                        Atomic32 increment) {
-  Atomic32 temp = increment;
-  __asm__ __volatile__("lock; xaddl %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now holds the old value of *ptr
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
-                                       Atomic32 old_value,
-                                       Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
-  __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
-  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
-    __asm__ __volatile__("mfence" : : : "memory");
-  } else { // mfence is faster but not present on PIII
-    Atomic32 x = 0;
-    NoBarrier_AtomicExchange(&x, 0);  // acts as a barrier on PIII
-  }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
-  if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
-    *ptr = value;
-    __asm__ __volatile__("mfence" : : : "memory");
-  } else {
-    NoBarrier_AtomicExchange(ptr, value);
-                          // acts as a barrier on PIII
-  }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
-  ATOMICOPS_COMPILER_BARRIER();
-  *ptr = value; // An x86 store acts as a release barrier.
-  // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
-  Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
-  // See comments in Atomic64 version of Release_Store(), below.
-  ATOMICOPS_COMPILER_BARRIER();
-  return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#if defined(__x86_64__)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_value,
-                                         Atomic64 new_value) {
-  Atomic64 prev;
-  __asm__ __volatile__("lock; cmpxchgq %1,%2"
-                       : "=a" (prev)
-                       : "q" (new_value), "m" (*ptr), "0" (old_value)
-                       : "memory");
-  return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_value) {
-  __asm__ __volatile__("xchgq %1,%0"  // The lock prefix is implicit for xchg.
-                       : "=r" (new_value)
-                       : "m" (*ptr), "0" (new_value)
-                       : "memory");
-  return new_value;  // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  Atomic64 temp = increment;
-  __asm__ __volatile__("lock; xaddq %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now contains the previous value of *ptr
-  return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  Atomic64 temp = increment;
-  __asm__ __volatile__("lock; xaddq %0,%1"
-                       : "+r" (temp), "+m" (*ptr)
-                       : : "memory");
-  // temp now contains the previous value of *ptr
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  *ptr = value;
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  ATOMICOPS_COMPILER_BARRIER();
-
-  *ptr = value; // An x86 store acts as a release barrier
-                // for current AMD/Intel chips as of Jan 2008.
-                // See also Acquire_Load(), below.
-
-  // When new chips come out, check:
-  //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
-  //  System Programming Guide, Chatper 7: Multiple-processor management,
-  //  Section 7.2, Memory Ordering.
-  // Last seen at:
-  //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
-  //
-  // x86 stores/loads fail to act as barriers for a few instructions (clflush
-  // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
-  // not generated by the compiler, and are rare.  Users of these instructions
-  // need to know about cache behaviour in any case since all of these involve
-  // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
-                         // for current AMD/Intel chips as of Jan 2008.
-                         // See also Release_Store(), above.
-  ATOMICOPS_COMPILER_BARRIER();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return *ptr;
-}
-
-#else // defined(__x86_64__)
-
-// 64-bit low-level operations on 32-bit platform.
-
-#if !((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-// For compilers older than gcc 4.1, we use inline asm.
-//
-// Potential pitfalls:
-//
-// 1. %ebx points to Global offset table (GOT) with -fPIC.
-//    We need to preserve this register.
-// 2. When explicit registers are used in inline asm, the
-//    compiler may not be aware of it and might try to reuse
-//    the same register for another argument which has constraints
-//    that allow it ("r" for example).
-
-inline Atomic64 __sync_val_compare_and_swap(volatile Atomic64* ptr,
-                                            Atomic64 old_value,
-                                            Atomic64 new_value) {
-  Atomic64 prev;
-  __asm__ __volatile__("push %%ebx\n\t"
-                       "movl (%3), %%ebx\n\t"    // Move 64-bit new_value into
-                       "movl 4(%3), %%ecx\n\t"   // ecx:ebx
-                       "lock; cmpxchg8b (%1)\n\t"// If edx:eax (old_value) same
-                       "pop %%ebx\n\t"
-                       : "=A" (prev)             // as contents of ptr:
-                       : "D" (ptr),              //   ecx:ebx => ptr
-                         "0" (old_value),        // else:
-                         "S" (&new_value)        //   old *ptr => edx:eax
-                       : "memory", "%ecx");
-  return prev;
-}
-#endif  // Compiler < gcc-4.1
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
-                                         Atomic64 old_val,
-                                         Atomic64 new_val) {
-  return __sync_val_compare_and_swap(ptr, old_val, new_val);
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
-                                         Atomic64 new_val) {
-  Atomic64 old_val;
-
-  do {
-    old_val = *ptr;
-  } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
-
-  return old_val;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
-                                          Atomic64 increment) {
-  Atomic64 old_val, new_val;
-
-  do {
-    old_val = *ptr;
-    new_val = old_val + increment;
-  } while (__sync_val_compare_and_swap(ptr, old_val, new_val) != old_val);
-
-  return old_val + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
-                                        Atomic64 increment) {
-  Atomic64 new_val = NoBarrier_AtomicIncrement(ptr, increment);
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return new_val;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
-  __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
-                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
-                       "emms\n\t"            // Empty mmx state/Reset FP regs
-                       : "=m" (*ptr)
-                       : "m" (value)
-                       : // mark the FP stack and mmx registers as clobbered
-			 "st", "st(1)", "st(2)", "st(3)", "st(4)",
-                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
-                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
-  NoBarrier_Store(ptr, value);
-  MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
-  ATOMICOPS_COMPILER_BARRIER();
-  NoBarrier_Store(ptr, value);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
-  Atomic64 value;
-  __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
-                       "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
-                       "emms\n\t"            // Empty mmx state/Reset FP regs
-                       : "=m" (value)
-                       : "m" (*ptr)
-                       : // mark the FP stack and mmx registers as clobbered
-                         "st", "st(1)", "st(2)", "st(3)", "st(4)",
-                         "st(5)", "st(6)", "st(7)", "mm0", "mm1",
-                         "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
-  return value;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
-  Atomic64 value = NoBarrier_Load(ptr);
-  ATOMICOPS_COMPILER_BARRIER();
-  return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
-  MemoryBarrier();
-  return NoBarrier_Load(ptr);
-}
-
-#endif // defined(__x86_64__)
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
-    __asm__ __volatile__("lfence" : : : "memory");
-  }
-  return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
-                                       Atomic64 old_value,
-                                       Atomic64 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-} // namespace base::subtle
-} // namespace base
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif  // BASE_ATOMICOPS_INTERNALS_X86_H_
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 17e8a27..dcb273d 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -31,10 +31,6 @@
  * Author: Sanjay Ghemawat
  */
 
-// For atomic operations on statistics counters, see atomic_stats_counter.h.
-// For atomic operations on sequence numbers, see atomic_sequence_num.h.
-// For atomic operations on reference counts, see atomic_refcount.h.
-
 // Some fast atomic operations -- typically with machine-dependent
 // implementations.  This file may need editing as Google code is
 // ported to different architectures.
@@ -83,30 +79,14 @@
 // #else
 // # define AtomicWordCastType Atomic32
 // #endif
-// TODO(csilvers): figure out ARCH_PIII/ARCH_K8 (perhaps via ./configure?)
 // ------------------------------------------------------------------------
 
-#include "base/arm_instruction_set_select.h"
-
-// TODO(csilvers): match piii, not just __i386.  Also, match k8
 #if defined(__MACH__) && defined(__APPLE__)
 #include "base/atomicops-internals-macosx.h"
-#elif defined(__GNUC__) && defined(ARMV6)
-#include "base/atomicops-internals-arm-v6plus.h"
-#elif defined(ARMV3)
-#include "base/atomicops-internals-arm-generic.h"
 #elif defined(_WIN32)
 #include "base/atomicops-internals-windows.h"
-#elif defined(__GNUC__) && (defined(__i386) || defined(__x86_64__))
-#include "base/atomicops-internals-x86.h"
-#elif defined(__linux__) && defined(__PPC__)
-#include "base/atomicops-internals-linuxppc.h"
 #else
-// Assume x86 for now.  If you need to support a new architecture and
-// don't know how to implement atomic ops, you can probably get away
-// with using pthreads, since atomicops is only used by spinlock.h/cc
-//#error You need to implement atomic operations for this architecture
-#include "base/atomicops-internals-x86.h"
+#include "base/atomicops_internals_portable.h"
 #endif
 
 // Signed type that can hold a pointer and supports the atomic ops below, as
diff --git a/src/base/atomicops_internals_portable.h b/src/base/atomicops_internals_portable.h
new file mode 100644
index 0000000..e853c61
--- /dev/null
+++ b/src/base/atomicops_internals_portable.h
@@ -0,0 +1,219 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// TODO(jfb) Modify the atomicops.h API and user code to declare atomic
+//           locations as truly atomic. See the static_assert below.
+//
+// Of note in this implementation:
+//  * All NoBarrier variants are implemented as relaxed.
+//  * All Barrier variants are implemented as sequentially-consistent.
+//  * Compare exchange's failure ordering is always the same as the success one
+//    (except for release, which fails as relaxed): using a weaker ordering is
+//    only valid under certain uses of compare exchange.
+//  * Acquire store doesn't exist in the C11 memory model, it is instead
+//    implemented as a relaxed store followed by a sequentially consistent
+//    fence.
+//  * Release load doesn't exist in the C11 memory model, it is instead
+//    implemented as sequentially consistent fence followed by a relaxed load.
+//  * Atomic increment is expected to return the post-incremented value, whereas
+//    C11 fetch add returns the previous value. The implementation therefore
+//    needs to increment twice (which the compiler should be able to detect and
+//    optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <stdint.h>
+#include <atomic>
+
+typedef int32_t Atomic32;
+#define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
+
+#include "build/build_config.h"
+
+namespace base {
+namespace subtle {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h. This requires casting memory locations to the atomic types, and
+// assumes that the API and the C++11 implementation are layout-compatible,
+// which isn't true for all implementations or hardware platforms. The static
+// assertion should detect this issue, were it to fire then this header
+// shouldn't be used.
+//
+// TODO(jfb) If this header manages to stay committed then the API should be
+//           modified, and all call sites updated.
+typedef volatile std::atomic<Atomic32>* AtomicLocation32;
+static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32),
+              "incompatible 32-bit atomic layout");
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+  // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+  // not defined, leading to the linker complaining about undefined references.
+  __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+  std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  return ((AtomicLocation32)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  return increment + ((AtomicLocation32)ptr)
+                         ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  return increment + ((AtomicLocation32)ptr)->fetch_add(increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  ((AtomicLocation32)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  ((AtomicLocation32)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  return ((AtomicLocation32)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed);
+}
+
+#if defined(BASE_HAS_ATOMIC64)
+typedef int64_t Atomic64;
+
+typedef volatile std::atomic<Atomic64>* AtomicLocation64;
+static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64),
+              "incompatible 64-bit atomic layout");
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_relaxed,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  return ((AtomicLocation64)ptr)
+      ->exchange(new_value, std::memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  return increment + ((AtomicLocation64)ptr)
+                         ->fetch_add(increment, std::memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  return increment + ((AtomicLocation64)ptr)->fetch_add(increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_acquire,
+                                std::memory_order_acquire);
+  return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  ((AtomicLocation64)ptr)
+      ->compare_exchange_strong(old_value, new_value, std::memory_order_release,
+                                std::memory_order_relaxed);
+  return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed);
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  ((AtomicLocation64)ptr)->store(value, std::memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  return ((AtomicLocation64)ptr)->load(std::memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed);
+}
+#endif  // defined(BASE_HAS_ATOMIC64)
+}  // namespace subtle
+}  // namespace base
+
+#endif  // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_