atomic.h

00001 /*
00002     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_atomic_H
00022 #define __TBB_atomic_H
00023 
00024 #include <cstddef>
00025 #include "tbb_stddef.h"
00026 
00027 #if _MSC_VER 
00028 #define __TBB_LONG_LONG __int64
00029 #else
00030 #define __TBB_LONG_LONG long long
00031 #endif /* _MSC_VER */
00032 
00033 #include "tbb_machine.h"
00034 
00035 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00036     // Workaround for overzealous compiler warnings 
00037     #pragma warning (push)
00038     #pragma warning (disable: 4244 4267)
00039 #endif
00040 
00041 namespace tbb {
00042 
00044 enum memory_semantics {
00046     __TBB_full_fence,
00048     acquire,
00050     release
00051 };
00052 
00054 namespace internal {
00055 
00056 #if __GNUC__ || __SUNPRO_CC || __IBMCPP__
00057 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
00058 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
00059 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
00060 #else 
00061 #error Do not know syntax for forcing alignment.
00062 #endif /* __GNUC__ */
00063 
00064 template<size_t S>
00065 struct atomic_rep;           // Primary template declared, but never defined.
00066 
00067 template<>
00068 struct atomic_rep<1> {       // Specialization
00069     typedef int8_t word;
00070     int8_t value;
00071 };
00072 template<>
00073 struct atomic_rep<2> {       // Specialization
00074     typedef int16_t word;
00075     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
00076 };
00077 template<>
00078 struct atomic_rep<4> {       // Specialization
00079 #if _MSC_VER && __TBB_WORDSIZE==4
00080     // Work-around that avoids spurious /Wp64 warnings
00081     typedef intptr_t word;
00082 #else
00083     typedef int32_t word;
00084 #endif
00085     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
00086 };
00087 #if __TBB_64BIT_ATOMICS
00088 template<>
00089 struct atomic_rep<8> {       // Specialization
00090     typedef int64_t word;
00091     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
00092 };
00093 #endif
00094 
00095 template<size_t Size, memory_semantics M>
00096 struct atomic_traits;        // Primary template declared, but not defined.
00097 
00098 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
00099     template<> struct atomic_traits<S,M> {                               \
00100         typedef atomic_rep<S>::word word;                               \
00101         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00102             return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
00103         }                                                                       \
00104         inline static word fetch_and_add( volatile void* location, word addend ) { \
00105             return __TBB_FetchAndAdd##S##M(location,addend);                    \
00106         }                                                                       \
00107         inline static word fetch_and_store( volatile void* location, word value ) {\
00108             return __TBB_FetchAndStore##S##M(location,value);                   \
00109         }                                                                       \
00110     };
00111 
00112 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
00113     template<memory_semantics M>                                         \
00114     struct atomic_traits<S,M> {                                          \
00115         typedef atomic_rep<S>::word word;                               \
00116         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00117             return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
00118         }                                                                       \
00119         inline static word fetch_and_add( volatile void* location, word addend ) { \
00120             return __TBB_FetchAndAdd##S(location,addend);                       \
00121         }                                                                       \
00122         inline static word fetch_and_store( volatile void* location, word value ) {\
00123             return __TBB_FetchAndStore##S(location,value);                      \
00124         }                                                                       \
00125     };
00126 
00127 #if __TBB_DECL_FENCED_ATOMICS
00128 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
00129 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
00130 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
00131 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
00132 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
00133 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
00134 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
00135 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
00136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
00137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
00138 #if __TBB_64BIT_ATOMICS
00139 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
00140 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
00141 #endif
00142 #else
00143 __TBB_DECL_ATOMIC_PRIMITIVES(1)
00144 __TBB_DECL_ATOMIC_PRIMITIVES(2)
00145 __TBB_DECL_ATOMIC_PRIMITIVES(4)
00146 #if __TBB_64BIT_ATOMICS
00147 __TBB_DECL_ATOMIC_PRIMITIVES(8)
00148 #endif
00149 #endif
00150 
00152 
00154 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
00155 
00157 
00159 template<typename T>
00160 struct atomic_impl {
00161 protected:
00162     atomic_rep<sizeof(T)> rep;
00163 private:
00165     union converter {
00166         T value;
00167         typename atomic_rep<sizeof(T)>::word bits;
00168     };
00169 public:
00170     typedef T value_type;
00171 
00172     template<memory_semantics M>
00173     value_type fetch_and_store( value_type value ) {
00174         converter u, w;
00175         u.value = value;
00176         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
00177         return w.value;
00178     }
00179 
00180     value_type fetch_and_store( value_type value ) {
00181         return fetch_and_store<__TBB_full_fence>(value);
00182     }
00183 
00184     template<memory_semantics M>
00185     value_type compare_and_swap( value_type value, value_type comparand ) {
00186         converter u, v, w;
00187         u.value = value;
00188         v.value = comparand;
00189         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
00190         return w.value;
00191     }
00192 
00193     value_type compare_and_swap( value_type value, value_type comparand ) {
00194         return compare_and_swap<__TBB_full_fence>(value,comparand);
00195     }
00196 
00197     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
00198         converter w;
00199         w.bits = __TBB_load_with_acquire( rep.value );
00200         return w.value;
00201     }
00202 
00203 protected:
00204     value_type store_with_release( value_type rhs ) {
00205         converter u;
00206         u.value = rhs;
00207         __TBB_store_with_release(rep.value,u.bits);
00208         return rhs;
00209     }
00210 };
00211 
00213 
00216 template<typename I, typename D, typename StepType>
00217 struct atomic_impl_with_arithmetic: atomic_impl<I> {
00218 public:
00219     typedef I value_type;
00220 
00221     template<memory_semantics M>
00222     value_type fetch_and_add( D addend ) {
00223         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
00224     }
00225 
00226     value_type fetch_and_add( D addend ) {
00227         return fetch_and_add<__TBB_full_fence>(addend);
00228     }
00229 
00230     template<memory_semantics M>
00231     value_type fetch_and_increment() {
00232         return fetch_and_add<M>(1);
00233     }
00234 
00235     value_type fetch_and_increment() {
00236         return fetch_and_add(1);
00237     }
00238 
00239     template<memory_semantics M>
00240     value_type fetch_and_decrement() {
00241         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
00242     }
00243 
00244     value_type fetch_and_decrement() {
00245         return fetch_and_add(__TBB_MINUS_ONE(D));
00246     }
00247 
00248 public:
00249     value_type operator+=( D addend ) {
00250         return fetch_and_add(addend)+addend;
00251     }
00252 
00253     value_type operator-=( D addend ) {
00254         // Additive inverse of addend computed using binary minus,
00255         // instead of unary minus, for sake of avoiding compiler warnings.
00256         return operator+=(D(0)-addend);    
00257     }
00258 
00259     value_type operator++() {
00260         return fetch_and_add(1)+1;
00261     }
00262 
00263     value_type operator--() {
00264         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
00265     }
00266 
00267     value_type operator++(int) {
00268         return fetch_and_add(1);
00269     }
00270 
00271     value_type operator--(int) {
00272         return fetch_and_add(__TBB_MINUS_ONE(D));
00273     }
00274 };
00275 
00276 } /* Internal */
00278 
00280 
00282 template<typename T>
00283 struct atomic: internal::atomic_impl<T> {
00284     T operator=( T rhs ) {
00285         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00286         return this->store_with_release(rhs);
00287     }
00288     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
00289 };
00290 
00291 #define __TBB_DECL_ATOMIC(T) \
00292     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00293         T operator=( T rhs ) {return store_with_release(rhs);}  \
00294         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00295     };
00296 
00297 #if __TBB_64BIT_ATOMICS
00298 // otherwise size is verified by test_atomic
00299 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
00300 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
00301 #endif
00302 __TBB_DECL_ATOMIC(long)
00303 __TBB_DECL_ATOMIC(unsigned long)
00304 
00305 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
00306 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
00307    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
00308    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
00309    type synonyms on the platform.  Type U should be the wider variant of T from the
00310    perspective of /Wp64. */
00311 #define __TBB_DECL_ATOMIC_ALT(T,U) \
00312     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00313         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
00314         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00315     };
00316 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
00317 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
00318 #else
00319 __TBB_DECL_ATOMIC(unsigned)
00320 __TBB_DECL_ATOMIC(int)
00321 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
00322 
00323 __TBB_DECL_ATOMIC(unsigned short)
00324 __TBB_DECL_ATOMIC(short)
00325 __TBB_DECL_ATOMIC(char)
00326 __TBB_DECL_ATOMIC(signed char)
00327 __TBB_DECL_ATOMIC(unsigned char)
00328 
00329 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
00330 __TBB_DECL_ATOMIC(wchar_t)
00331 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
00332 
00334 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
00335     T* operator=( T* rhs ) {
00336         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00337         return this->store_with_release(rhs);
00338     }
00339     atomic<T*>& operator=( const atomic<T*>& rhs ) {
00340         this->store_with_release(rhs); return *this;
00341     }
00342     T* operator->() const {
00343         return (*this);
00344     }
00345 };
00346 
00348 template<> struct atomic<void*>: internal::atomic_impl<void*> {
00349     void* operator=( void* rhs ) {
00350         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00351         return this->store_with_release(rhs);
00352     }
00353     atomic<void*>& operator=( const atomic<void*>& rhs ) {
00354         this->store_with_release(rhs); return *this;
00355     }
00356 };
00357 
00358 } // namespace tbb
00359 
00360 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00361     #pragma warning (pop)
00362 #endif // warnings 4244, 4267 are back
00363 
00364 #endif /* __TBB_atomic_H */

Copyright © 2005-2011 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.