| // Copyright 2010 the V8 project authors. All rights reserved. | 
 | // Redistribution and use in source and binary forms, with or without | 
 | // modification, are permitted provided that the following conditions are | 
 | // met: | 
 | // | 
 | //     * Redistributions of source code must retain the above copyright | 
 | //       notice, this list of conditions and the following disclaimer. | 
 | //     * Redistributions in binary form must reproduce the above | 
 | //       copyright notice, this list of conditions and the following | 
 | //       disclaimer in the documentation and/or other materials provided | 
 | //       with the distribution. | 
 | //     * Neither the name of Google Inc. nor the names of its | 
 | //       contributors may be used to endorse or promote products derived | 
 | //       from this software without specific prior written permission. | 
 | // | 
 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
 |  | 
 | // This file is an internal atomic implementation, use atomicops.h instead. | 
 |  | 
 | #ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 
 | #define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 
 |  | 
 | #include <libkern/OSAtomic.h> | 
 |  | 
 | namespace v8 { | 
 | namespace internal { | 
 |  | 
 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 
 |                                          Atomic32 old_value, | 
 |                                          Atomic32 new_value) { | 
 |   Atomic32 prev_value; | 
 |   do { | 
 |     if (OSAtomicCompareAndSwap32(old_value, new_value, | 
 |                                  const_cast<Atomic32*>(ptr))) { | 
 |       return old_value; | 
 |     } | 
 |     prev_value = *ptr; | 
 |   } while (prev_value == old_value); | 
 |   return prev_value; | 
 | } | 
 |  | 
 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 
 |                                          Atomic32 new_value) { | 
 |   Atomic32 old_value; | 
 |   do { | 
 |     old_value = *ptr; | 
 |   } while (!OSAtomicCompareAndSwap32(old_value, new_value, | 
 |                                      const_cast<Atomic32*>(ptr))); | 
 |   return old_value; | 
 | } | 
 |  | 
 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 
 |                                           Atomic32 increment) { | 
 |   return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | 
 | } | 
 |  | 
 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 
 |                                           Atomic32 increment) { | 
 |   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | 
 | } | 
 |  | 
 | inline void MemoryBarrier() { | 
 |   OSMemoryBarrier(); | 
 | } | 
 |  | 
 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 
 |                                        Atomic32 old_value, | 
 |                                        Atomic32 new_value) { | 
 |   Atomic32 prev_value; | 
 |   do { | 
 |     if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | 
 |                                         const_cast<Atomic32*>(ptr))) { | 
 |       return old_value; | 
 |     } | 
 |     prev_value = *ptr; | 
 |   } while (prev_value == old_value); | 
 |   return prev_value; | 
 | } | 
 |  | 
 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 
 |                                        Atomic32 old_value, | 
 |                                        Atomic32 new_value) { | 
 |   return Acquire_CompareAndSwap(ptr, old_value, new_value); | 
 | } | 
 |  | 
 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |   *ptr = value; | 
 | } | 
 |  | 
 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |   *ptr = value; | 
 |   MemoryBarrier(); | 
 | } | 
 |  | 
 | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
 |   MemoryBarrier(); | 
 |   *ptr = value; | 
 | } | 
 |  | 
 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 
 |   return *ptr; | 
 | } | 
 |  | 
 | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
 |   Atomic32 value = *ptr; | 
 |   MemoryBarrier(); | 
 |   return value; | 
 | } | 
 |  | 
 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 
 |   MemoryBarrier(); | 
 |   return *ptr; | 
 | } | 
 |  | 
 | #ifdef __LP64__ | 
 |  | 
 | // 64-bit implementation on 64-bit platform | 
 |  | 
 | inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
 |                                          Atomic64 old_value, | 
 |                                          Atomic64 new_value) { | 
 |   Atomic64 prev_value; | 
 |   do { | 
 |     if (OSAtomicCompareAndSwap64(old_value, new_value, | 
 |                                  const_cast<Atomic64*>(ptr))) { | 
 |       return old_value; | 
 |     } | 
 |     prev_value = *ptr; | 
 |   } while (prev_value == old_value); | 
 |   return prev_value; | 
 | } | 
 |  | 
 | inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 
 |                                          Atomic64 new_value) { | 
 |   Atomic64 old_value; | 
 |   do { | 
 |     old_value = *ptr; | 
 |   } while (!OSAtomicCompareAndSwap64(old_value, new_value, | 
 |                                      const_cast<Atomic64*>(ptr))); | 
 |   return old_value; | 
 | } | 
 |  | 
 | inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |                                           Atomic64 increment) { | 
 |   return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); | 
 | } | 
 |  | 
 | inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 
 |                                         Atomic64 increment) { | 
 |   return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); | 
 | } | 
 |  | 
 | inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 
 |                                        Atomic64 old_value, | 
 |                                        Atomic64 new_value) { | 
 |   Atomic64 prev_value; | 
 |   do { | 
 |     if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, | 
 |                                         const_cast<Atomic64*>(ptr))) { | 
 |       return old_value; | 
 |     } | 
 |     prev_value = *ptr; | 
 |   } while (prev_value == old_value); | 
 |   return prev_value; | 
 | } | 
 |  | 
 | inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 
 |                                        Atomic64 old_value, | 
 |                                        Atomic64 new_value) { | 
 |   // The lib kern interface does not distinguish between | 
 |   // Acquire and Release memory barriers; they are equivalent. | 
 |   return Acquire_CompareAndSwap(ptr, old_value, new_value); | 
 | } | 
 |  | 
 | inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |   *ptr = value; | 
 | } | 
 |  | 
 | inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |   *ptr = value; | 
 |   MemoryBarrier(); | 
 | } | 
 |  | 
 | inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 
 |   MemoryBarrier(); | 
 |   *ptr = value; | 
 | } | 
 |  | 
 | inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 
 |   return *ptr; | 
 | } | 
 |  | 
 | inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 
 |   Atomic64 value = *ptr; | 
 |   MemoryBarrier(); | 
 |   return value; | 
 | } | 
 |  | 
 | inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
 |   MemoryBarrier(); | 
 |   return *ptr; | 
 | } | 
 |  | 
 | #endif  // defined(__LP64__) | 
 |  | 
 | // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different | 
 | // on the Mac, even when they are the same size.  We need to explicitly cast | 
 | // from AtomicWord to Atomic32/64 to implement the AtomicWord interface. | 
 | #ifdef __LP64__ | 
 | #define AtomicWordCastType Atomic64 | 
 | #else | 
 | #define AtomicWordCastType Atomic32 | 
 | #endif | 
 |  | 
 | inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, | 
 |                                            AtomicWord old_value, | 
 |                                            AtomicWord new_value) { | 
 |   return NoBarrier_CompareAndSwap( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
 |       old_value, new_value); | 
 | } | 
 |  | 
 | inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, | 
 |                                            AtomicWord new_value) { | 
 |   return NoBarrier_AtomicExchange( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | 
 | } | 
 |  | 
 | inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, | 
 |                                             AtomicWord increment) { | 
 |   return NoBarrier_AtomicIncrement( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | 
 | } | 
 |  | 
 | inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, | 
 |                                           AtomicWord increment) { | 
 |   return Barrier_AtomicIncrement( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | 
 | } | 
 |  | 
 | inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, | 
 |                                          AtomicWord old_value, | 
 |                                          AtomicWord new_value) { | 
 |   return v8::internal::Acquire_CompareAndSwap( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
 |       old_value, new_value); | 
 | } | 
 |  | 
 | inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, | 
 |                                          AtomicWord old_value, | 
 |                                          AtomicWord new_value) { | 
 |   return v8::internal::Release_CompareAndSwap( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 
 |       old_value, new_value); | 
 | } | 
 |  | 
 | inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
 |   NoBarrier_Store( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
 | } | 
 |  | 
 | inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
 |   return v8::internal::Acquire_Store( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
 | } | 
 |  | 
 | inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { | 
 |   return v8::internal::Release_Store( | 
 |       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | 
 | } | 
 |  | 
 | inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { | 
 |   return NoBarrier_Load( | 
 |       reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
 | } | 
 |  | 
 | inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { | 
 |   return v8::internal::Acquire_Load( | 
 |       reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
 | } | 
 |  | 
 | inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | 
 |   return v8::internal::Release_Load( | 
 |       reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 
 | } | 
 |  | 
 | #undef AtomicWordCastType | 
 |  | 
 | } }  // namespace v8::internal | 
 |  | 
 | #endif  // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |