00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038 #ifndef BASE_ATOMICOPS_INTERNALS_X86_H__
00039 #define BASE_ATOMICOPS_INTERNALS_X86_H__
00040
00041 typedef intptr_t AtomicWord;
00042 typedef int32_t Atomic32;
00043
00044
00045
00046
00047 #if defined(__x86_64__)
00048 #define ATOMICOPS_WORD_SUFFIX "q"
00049 #else
00050 #define ATOMICOPS_WORD_SUFFIX "l"
00051 #endif
00052
00053
00054
00055
00056
00057 struct AtomicOps_x86CPUFeatureStruct {
00058 bool has_amd_lock_mb_bug;
00059
00060 bool has_sse2;
00061 bool has_cmpxchg16b;
00062 };
00063 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
00064
00065 inline AtomicWord CompareAndSwap(volatile AtomicWord* ptr,
00066 AtomicWord old_value,
00067 AtomicWord new_value) {
00068 AtomicWord prev;
00069 __asm__ __volatile__("lock; cmpxchg" ATOMICOPS_WORD_SUFFIX " %1,%2"
00070 : "=a" (prev)
00071 : "q" (new_value), "m" (*ptr), "0" (old_value)
00072 : "memory");
00073 return prev;
00074 }
00075
00076 inline AtomicWord AtomicExchange(volatile AtomicWord* ptr,
00077 AtomicWord new_value) {
00078 __asm__ __volatile__("xchg" ATOMICOPS_WORD_SUFFIX " %1,%0"
00079 : "=r" (new_value)
00080 : "m" (*ptr), "0" (new_value)
00081 : "memory");
00082 return new_value;
00083 }
00084
00085 inline AtomicWord AtomicIncrement(volatile AtomicWord* ptr, AtomicWord increment) {
00086 AtomicWord temp = increment;
00087 __asm__ __volatile__("lock; xadd" ATOMICOPS_WORD_SUFFIX " %0,%1"
00088 : "+r" (temp), "+m" (*ptr)
00089 : : "memory");
00090
00091 return temp + increment;
00092 }
00093
00094 #undef ATOMICOPS_WORD_SUFFIX
00095
00096
00097 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
00098 AtomicWord old_value,
00099 AtomicWord new_value) {
00100 AtomicWord x = CompareAndSwap(ptr, old_value, new_value);
00101 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
00102 __asm__ __volatile__("lfence" : : : "memory");
00103 }
00104 return x;
00105 }
00106
00107 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
00108 AtomicWord old_value,
00109 AtomicWord new_value) {
00110 return CompareAndSwap(ptr, old_value, new_value);
00111 }
00112
00113 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
00114
00115 #if defined(__x86_64__)
00116
00117 inline void MemoryBarrier() {
00118 __asm__ __volatile__("mfence" : : : "memory");
00119 }
00120
00121 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
00122 *ptr = value;
00123 MemoryBarrier();
00124 }
00125
00126 #else
00127
00128 inline void MemoryBarrier() {
00129 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
00130 __asm__ __volatile__("mfence" : : : "memory");
00131 } else {
00132 AtomicWord x = 0;
00133 AtomicExchange(&x, 0);
00134 }
00135 }
00136
00137 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
00138 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
00139 *ptr = value;
00140 __asm__ __volatile__("mfence" : : : "memory");
00141 } else {
00142 AtomicExchange(ptr, value);
00143 }
00144 }
00145
00146 #endif
00147
00148 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
00149 ATOMICOPS_COMPILER_BARRIER();
00150 *ptr = value;
00151
00152
00153
00154
00155
00156
00157
00158 }
00159
00160 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
00161 AtomicWord value = *ptr;
00162 MemoryBarrier();
00163 return value;
00164 }
00165
00166 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
00167 MemoryBarrier();
00168 return *ptr;
00169 }
00170
00171
00172
00173
00174 #ifndef INT32_EQUALS_INTPTR
00175
00176 inline Atomic32 CompareAndSwap(volatile Atomic32* ptr,
00177 Atomic32 old_value,
00178 Atomic32 new_value) {
00179 Atomic32 prev;
00180 __asm__ __volatile__("lock; cmpxchgl %1,%2"
00181 : "=a" (prev)
00182 : "q" (new_value), "m" (*ptr), "0" (old_value)
00183 : "memory");
00184 return prev;
00185 }
00186
00187 inline Atomic32 AtomicExchange(volatile Atomic32* ptr,
00188 Atomic32 new_value) {
00189 __asm__ __volatile__("xchgl %1,%0"
00190 : "=r" (new_value)
00191 : "m" (*ptr), "0" (new_value)
00192 : "memory");
00193 return new_value;
00194 }
00195
00196 inline Atomic32 AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) {
00197 Atomic32 temp = increment;
00198 __asm__ __volatile__("lock; xaddl %0,%1"
00199 : "+r" (temp), "+m" (*ptr)
00200 : : "memory");
00201
00202 return temp + increment;
00203 }
00204
00205 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
00206 Atomic32 old_value,
00207 Atomic32 new_value) {
00208 Atomic32 x = CompareAndSwap(ptr, old_value, new_value);
00209 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
00210 __asm__ __volatile__("lfence" : : : "memory");
00211 }
00212 return x;
00213 }
00214
00215 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
00216 Atomic32 old_value,
00217 Atomic32 new_value) {
00218 return CompareAndSwap(ptr, old_value, new_value);
00219 }
00220
00221 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
00222 *ptr = value;
00223 MemoryBarrier();
00224 }
00225
00226 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
00227 ATOMICOPS_COMPILER_BARRIER();
00228 *ptr = value;
00229
00230
00231
00232
00233
00234
00235
00236 }
00237
00238 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
00239 Atomic32 value = *ptr;
00240 MemoryBarrier();
00241 return value;
00242 }
00243
00244 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
00245 MemoryBarrier();
00246 return *ptr;
00247 }
00248
00249 #endif
00250
00251 #undef ATOMICOPS_COMPILER_BARRIER
00252
00253 #endif // BASE_ATOMICOPS_INTERNALS_X86_H__