Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

224 строки
6.6 KiB

  1. // Copyright (c) 2011 The LevelDB Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file. See the AUTHORS file for names of contributors.
  4. // AtomicPointer provides storage for a lock-free pointer.
  5. // Platform-dependent implementation of AtomicPointer:
  6. // - If the platform provides a cheap barrier, we use it with raw pointers
  7. // - If cstdatomic is present (on newer versions of gcc, it is), we use
  8. // a cstdatomic-based AtomicPointer. However we prefer the memory
  9. // barrier based version, because at least on a gcc 4.4 32-bit build
  10. // on linux, we have encountered a buggy <cstdatomic>
  11. // implementation. Also, some <cstdatomic> implementations are much
  12. // slower than a memory-barrier based implementation (~16ns for
  13. // <cstdatomic> based acquire-load vs. ~1ns for a barrier based
  14. // acquire-load).
  15. // This code is based on atomicops-internals-* in Google's perftools:
  16. // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
  17. #ifndef PORT_ATOMIC_POINTER_H_
  18. #define PORT_ATOMIC_POINTER_H_
  19. #include <stdint.h>
  20. #ifdef LEVELDB_CSTDATOMIC_PRESENT
  21. #include <cstdatomic>
  22. #endif
  23. #ifdef OS_WIN
  24. #include <windows.h>
  25. #endif
  26. #ifdef OS_MACOSX
  27. #include <libkern/OSAtomic.h>
  28. #endif
  29. #if defined(_M_X64) || defined(__x86_64__)
  30. #define ARCH_CPU_X86_FAMILY 1
  31. #elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
  32. #define ARCH_CPU_X86_FAMILY 1
  33. #elif defined(__ARMEL__)
  34. #define ARCH_CPU_ARM_FAMILY 1
  35. #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
  36. #define ARCH_CPU_PPC_FAMILY 1
  37. #endif
  38. namespace leveldb {
  39. namespace port {
  40. // Define MemoryBarrier() if available
  41. // Windows on x86
  42. #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
  43. // windows.h already provides a MemoryBarrier(void) macro
  44. // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
  45. #define LEVELDB_HAVE_MEMORY_BARRIER
  46. // Gcc on x86
  47. #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
  48. inline void MemoryBarrier() {
  49. // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
  50. // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
  51. __asm__ __volatile__("" : : : "memory");
  52. }
  53. #define LEVELDB_HAVE_MEMORY_BARRIER
  54. // Sun Studio
  55. #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
  56. inline void MemoryBarrier() {
  57. // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
  58. // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
  59. asm volatile("" : : : "memory");
  60. }
  61. #define LEVELDB_HAVE_MEMORY_BARRIER
  62. // Mac OS
  63. #elif defined(OS_MACOSX)
  64. inline void MemoryBarrier() {
  65. OSMemoryBarrier();
  66. }
  67. #define LEVELDB_HAVE_MEMORY_BARRIER
  68. // ARM Linux
  69. #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
  70. typedef void (*LinuxKernelMemoryBarrierFunc)(void);
  71. // The Linux ARM kernel provides a highly optimized device-specific memory
  72. // barrier function at a fixed memory address that is mapped in every
  73. // user-level process.
  74. //
  75. // This beats using CPU-specific instructions which are, on single-core
  76. // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
  77. // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
  78. // shows that the extra function call cost is completely negligible on
  79. // multi-core devices.
  80. //
  81. inline void MemoryBarrier() {
  82. (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
  83. }
  84. #define LEVELDB_HAVE_MEMORY_BARRIER
  85. // PPC
  86. #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
  87. inline void MemoryBarrier() {
  88. // TODO for some powerpc expert: is there a cheaper suitable variant?
  89. // Perhaps by having separate barriers for acquire and release ops.
  90. asm volatile("sync" : : : "memory");
  91. }
  92. #define LEVELDB_HAVE_MEMORY_BARRIER
  93. #endif
  94. // AtomicPointer built using platform-specific MemoryBarrier()
  95. #if defined(LEVELDB_HAVE_MEMORY_BARRIER)
  96. class AtomicPointer {
  97. private:
  98. void* rep_;
  99. public:
  100. AtomicPointer() { }
  101. explicit AtomicPointer(void* p) : rep_(p) {}
  102. inline void* NoBarrier_Load() const { return rep_; }
  103. inline void NoBarrier_Store(void* v) { rep_ = v; }
  104. inline void* Acquire_Load() const {
  105. void* result = rep_;
  106. MemoryBarrier();
  107. return result;
  108. }
  109. inline void Release_Store(void* v) {
  110. MemoryBarrier();
  111. rep_ = v;
  112. }
  113. };
  114. // AtomicPointer based on <cstdatomic>
  115. #elif defined(LEVELDB_CSTDATOMIC_PRESENT)
  116. class AtomicPointer {
  117. private:
  118. std::atomic<void*> rep_;
  119. public:
  120. AtomicPointer() { }
  121. explicit AtomicPointer(void* v) : rep_(v) { }
  122. inline void* Acquire_Load() const {
  123. return rep_.load(std::memory_order_acquire);
  124. }
  125. inline void Release_Store(void* v) {
  126. rep_.store(v, std::memory_order_release);
  127. }
  128. inline void* NoBarrier_Load() const {
  129. return rep_.load(std::memory_order_relaxed);
  130. }
  131. inline void NoBarrier_Store(void* v) {
  132. rep_.store(v, std::memory_order_relaxed);
  133. }
  134. };
  135. // Atomic pointer based on sparc memory barriers
  136. #elif defined(__sparcv9) && defined(__GNUC__)
  137. class AtomicPointer {
  138. private:
  139. void* rep_;
  140. public:
  141. AtomicPointer() { }
  142. explicit AtomicPointer(void* v) : rep_(v) { }
  143. inline void* Acquire_Load() const {
  144. void* val;
  145. __asm__ __volatile__ (
  146. "ldx [%[rep_]], %[val] \n\t"
  147. "membar #LoadLoad|#LoadStore \n\t"
  148. : [val] "=r" (val)
  149. : [rep_] "r" (&rep_)
  150. : "memory");
  151. return val;
  152. }
  153. inline void Release_Store(void* v) {
  154. __asm__ __volatile__ (
  155. "membar #LoadStore|#StoreStore \n\t"
  156. "stx %[v], [%[rep_]] \n\t"
  157. :
  158. : [rep_] "r" (&rep_), [v] "r" (v)
  159. : "memory");
  160. }
  161. inline void* NoBarrier_Load() const { return rep_; }
  162. inline void NoBarrier_Store(void* v) { rep_ = v; }
  163. };
  164. // Atomic pointer based on ia64 acq/rel
  165. #elif defined(__ia64) && defined(__GNUC__)
  166. class AtomicPointer {
  167. private:
  168. void* rep_;
  169. public:
  170. AtomicPointer() { }
  171. explicit AtomicPointer(void* v) : rep_(v) { }
  172. inline void* Acquire_Load() const {
  173. void* val ;
  174. __asm__ __volatile__ (
  175. "ld8.acq %[val] = [%[rep_]] \n\t"
  176. : [val] "=r" (val)
  177. : [rep_] "r" (&rep_)
  178. : "memory"
  179. );
  180. return val;
  181. }
  182. inline void Release_Store(void* v) {
  183. __asm__ __volatile__ (
  184. "st8.rel [%[rep_]] = %[v] \n\t"
  185. :
  186. : [rep_] "r" (&rep_), [v] "r" (v)
  187. : "memory"
  188. );
  189. }
  190. inline void* NoBarrier_Load() const { return rep_; }
  191. inline void NoBarrier_Store(void* v) { rep_ = v; }
  192. };
  193. // We have neither MemoryBarrier(), nor <cstdatomic>
  194. #else
  195. #error Please implement AtomicPointer for this platform.
  196. #endif
  197. #undef LEVELDB_HAVE_MEMORY_BARRIER
  198. #undef ARCH_CPU_X86_FAMILY
  199. #undef ARCH_CPU_ARM_FAMILY
  200. #undef ARCH_CPU_PPC_FAMILY
  201. } // namespace port
  202. } // namespace leveldb
  203. #endif // PORT_ATOMIC_POINTER_H_