enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "cache_aligned_allocator.h"
00027 #include "aligned_space.h"
00028 #include <string.h>  // for memcpy
00029 
00030 #if _WIN32||_WIN64
00031 #include "machine/windows_api.h"
00032 #else
00033 #include <pthread.h>
00034 #endif
00035 
00036 namespace tbb {
00037 
00039 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00040 
00041 namespace interface6 {
00042  
00044     namespace internal { 
00045 
00046         template<ets_key_usage_type ETS_key_type>
00047         class ets_base: tbb::internal::no_copy {
00048         protected:
00049 #if _WIN32||_WIN64
00050             typedef DWORD key_type;
00051 #else
00052             typedef pthread_t key_type;
00053 #endif
00054 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00055         public:
00056 #endif
00057             struct slot;
00058 
00059             struct array {
00060                 array* next;
00061                 size_t lg_size;
00062                 slot& at( size_t k ) {
00063                     return ((slot*)(void*)(this+1))[k];
00064                 }
00065                 size_t size() const {return (size_t)1<<lg_size;}
00066                 size_t mask() const {return size()-1;}
00067                 size_t start( size_t h ) const {
00068                     return h>>(8*sizeof(size_t)-lg_size);
00069                 }
00070             };
00071             struct slot {
00072                 key_type key;
00073                 void* ptr;
00074                 bool empty() const {return !key;}
00075                 bool match( key_type k ) const {return key==k;}
00076                 bool claim( key_type k ) {
00077                     __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
00078                     return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
00079                 }
00080             };
00081 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00082         protected:
00083 #endif
00084         
00085             static key_type key_of_current_thread() {
00086                tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
00087                key_type k;
00088                memcpy( &k, &id, sizeof(k) );
00089                return k;
00090             }
00091 
00093 
00095             atomic<array*> my_root;
00096             atomic<size_t> my_count;
00097             virtual void* create_local() = 0;
00098             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00099             virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
00100             array* allocate( size_t lg_size ) {
00101                 size_t n = 1<<lg_size;  
00102                 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
00103                 a->lg_size = lg_size;
00104                 std::memset( a+1, 0, n*sizeof(slot) );
00105                 return a;
00106             }
00107             void free(array* a) {
00108                 size_t n = 1<<(a->lg_size);  
00109                 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
00110             }
00111             static size_t hash( key_type k ) {
00112                 // Multiplicative hashing.  Client should use *upper* bits.
00113                 // casts required for Mac gcc4.* compiler
00114 #if __TBB_WORDSIZE == 4
00115                 return uintptr_t(k)*0x9E3779B9;
00116 #else
00117                 return uintptr_t(k)*0x9E3779B97F4A7C15;
00118 #endif 
00119             } 
00120         
00121             ets_base() {my_root=NULL; my_count=0;}
00122             virtual ~ets_base();  // g++ complains if this is not virtual...
00123             void* table_lookup( bool& exists );
00124             void table_clear();
00125             slot& table_find( key_type k ) {
00126                 size_t h = hash(k);
00127                 array* r = my_root;
00128                 size_t mask = r->mask();
00129                 for(size_t i = r->start(h);;i=(i+1)&mask) {
00130                     slot& s = r->at(i);
00131                     if( s.empty() || s.match(k) )
00132                         return s;
00133                 }
00134             }
00135             void table_reserve_for_copy( const ets_base& other ) {
00136                 __TBB_ASSERT(!my_root,NULL);
00137                 __TBB_ASSERT(!my_count,NULL);
00138                 if( other.my_root ) {
00139                     array* a = allocate(other.my_root->lg_size);
00140                     a->next = NULL;
00141                     my_root = a;
00142                     my_count = other.my_count;
00143                 }
00144             }
00145         };
00146 
00147         template<ets_key_usage_type ETS_key_type>
00148         ets_base<ETS_key_type>::~ets_base() {
00149             __TBB_ASSERT(!my_root, NULL);
00150         }
00151 
00152         template<ets_key_usage_type ETS_key_type>
00153         void ets_base<ETS_key_type>::table_clear() {
00154             while( array* r = my_root ) {
00155                 my_root = r->next;
00156                 free(r);
00157             }
00158             my_count = 0;
00159         }
00160                 
00161         template<ets_key_usage_type ETS_key_type>
00162         void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
00163             const key_type k = key_of_current_thread(); 
00164 
00165             __TBB_ASSERT(k!=0,NULL);
00166             void* found;
00167             size_t h = hash(k);
00168             for( array* r=my_root; r; r=r->next ) {
00169                 size_t mask=r->mask();
00170                 for(size_t i = r->start(h); ;i=(i+1)&mask) {
00171                     slot& s = r->at(i);
00172                     if( s.empty() ) break;
00173                     if( s.match(k) ) {
00174                         if( r==my_root ) {
00175                             // Success at top level
00176                             exists = true;
00177                             return s.ptr;
00178                         } else {
00179                             // Success at some other level.  Need to insert at top level.
00180                             exists = true;
00181                             found = s.ptr;
00182                             goto insert;
00183                         }
00184                     }
00185                 }
00186             }
00187             // Key does not yet exist
00188             exists = false;
00189             found = create_local();
00190             {
00191                 size_t c = ++my_count;
00192                 array* r = my_root;
00193                 if( !r || c>r->size()/2 ) {
00194                     size_t s = r ? r->lg_size : 2;
00195                     while( c>size_t(1)<<(s-1) ) ++s;
00196                     array* a = allocate(s);
00197                     for(;;) {
00198                         a->next = my_root;
00199                         array* new_r = my_root.compare_and_swap(a,r);
00200                         if( new_r==r ) break;
00201                         if( new_r->lg_size>=s ) {
00202                             // Another thread inserted an equal or  bigger array, so our array is superfluous.
00203                             free(a);
00204                             break;
00205                         }
00206                         r = new_r;
00207                     }
00208                 }
00209             }
00210         insert:
00211             // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
00212             array* ir = my_root;
00213             size_t mask = ir->mask();
00214             for(size_t i = ir->start(h);;i=(i+1)&mask) {
00215                 slot& s = ir->at(i);
00216                 if( s.empty() ) {
00217                     if( s.claim(k) ) {
00218                         s.ptr = found;
00219                         return found;
00220                     }
00221                 }
00222             }
00223         }
00224 
00226         template <>
00227         class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
00228             typedef ets_base<ets_no_key> super;
00229 #if _WIN32||_WIN64
00230             typedef DWORD tls_key_t;
00231             void create_key() { my_key = TlsAlloc(); }
00232             void destroy_key() { TlsFree(my_key); }
00233             void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
00234             void* get_tls() { return (void *)TlsGetValue(my_key); }
00235 #else
00236             typedef pthread_key_t tls_key_t;
00237             void create_key() { pthread_key_create(&my_key, NULL); }
00238             void destroy_key() { pthread_key_delete(my_key); }
00239             void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
00240             void* get_tls() const { return pthread_getspecific(my_key); }
00241 #endif
00242             tls_key_t my_key;
00243             virtual void* create_local() = 0;
00244             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00245             virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
00246         public:
00247             ets_base() {create_key();}
00248             ~ets_base() {destroy_key();}
00249             void* table_lookup( bool& exists ) {
00250                 void* found = get_tls();
00251                 if( found ) {
00252                     exists=true;
00253                 } else {
00254                     found = super::table_lookup(exists);
00255                     set_tls(found);
00256                 }
00257                 return found; 
00258             }
00259             void table_clear() {
00260                 destroy_key();
00261                 create_key(); 
00262                 super::table_clear();
00263             }
00264         };
00265 
00267         template< typename Container, typename Value >
00268         class enumerable_thread_specific_iterator 
00269 #if defined(_WIN64) && defined(_MSC_VER) 
00270             // Ensure that Microsoft's internal template function _Val_type works correctly.
00271             : public std::iterator<std::random_access_iterator_tag,Value>
00272 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00273         {
00275         
00276             Container *my_container;
00277             typename Container::size_type my_index;
00278             mutable Value *my_value;
00279         
00280             template<typename C, typename T>
00281             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00282                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00283         
00284             template<typename C, typename T, typename U>
00285             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00286                                     const enumerable_thread_specific_iterator<C,U>& j );
00287         
00288             template<typename C, typename T, typename U>
00289             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00290                                    const enumerable_thread_specific_iterator<C,U>& j );
00291         
00292             template<typename C, typename T, typename U>
00293             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00294             
00295             template<typename C, typename U> 
00296             friend class enumerable_thread_specific_iterator;
00297         
00298             public:
00299         
00300             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00301                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00302         
00304             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00305         
00306             template<typename U>
00307             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00308                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00309         
00310             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00311                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00312             }
00313         
00314             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00315                 my_index += offset;
00316                 my_value = NULL;
00317                 return *this;
00318             }
00319         
00320             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00321                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00322             }
00323         
00324             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00325                 my_index -= offset;
00326                 my_value = NULL;
00327                 return *this;
00328             }
00329         
00330             Value& operator*() const {
00331                 Value* value = my_value;
00332                 if( !value ) {
00333                     value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00334                 }
00335                 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00336                 return *value;
00337             }
00338         
00339             Value& operator[]( ptrdiff_t k ) const {
00340                return (*my_container)[my_index + k].value;
00341             }
00342         
00343             Value* operator->() const {return &operator*();}
00344         
00345             enumerable_thread_specific_iterator& operator++() {
00346                 ++my_index;
00347                 my_value = NULL;
00348                 return *this;
00349             }
00350         
00351             enumerable_thread_specific_iterator& operator--() {
00352                 --my_index;
00353                 my_value = NULL;
00354                 return *this;
00355             }
00356         
00358             enumerable_thread_specific_iterator operator++(int) {
00359                 enumerable_thread_specific_iterator result = *this;
00360                 ++my_index;
00361                 my_value = NULL;
00362                 return result;
00363             }
00364         
00366             enumerable_thread_specific_iterator operator--(int) {
00367                 enumerable_thread_specific_iterator result = *this;
00368                 --my_index;
00369                 my_value = NULL;
00370                 return result;
00371             }
00372         
00373             // STL support
00374             typedef ptrdiff_t difference_type;
00375             typedef Value value_type;
00376             typedef Value* pointer;
00377             typedef Value& reference;
00378             typedef std::random_access_iterator_tag iterator_category;
00379         };
00380         
00381         template<typename Container, typename T>
00382         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00383                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00384             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00385         }
00386         
00387         template<typename Container, typename T, typename U>
00388         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00389                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00390             return i.my_index==j.my_index && i.my_container == j.my_container;
00391         }
00392         
00393         template<typename Container, typename T, typename U>
00394         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00395                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00396             return !(i==j);
00397         }
00398         
00399         template<typename Container, typename T, typename U>
00400         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00401                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00402             return i.my_index<j.my_index;
00403         }
00404         
00405         template<typename Container, typename T, typename U>
00406         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00407                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00408             return j<i;
00409         }
00410         
00411         template<typename Container, typename T, typename U>
00412         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00413                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00414             return !(i<j);
00415         }
00416         
00417         template<typename Container, typename T, typename U>
00418         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00419                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00420             return !(j<i);
00421         }
00422         
00423         template<typename Container, typename T, typename U>
00424         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00425                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00426             return i.my_index-j.my_index;
00427         }
00428 
00429     template<typename SegmentedContainer, typename Value >
00430         class segmented_iterator
00431 #if defined(_WIN64) && defined(_MSC_VER)
00432         : public std::iterator<std::input_iterator_tag, Value>
00433 #endif
00434         {
00435             template<typename C, typename T, typename U>
00436             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00437 
00438             template<typename C, typename T, typename U>
00439             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00440             
00441             template<typename C, typename U> 
00442             friend class segmented_iterator;
00443 
00444             public:
00445 
00446                 segmented_iterator() {my_segcont = NULL;}
00447 
00448                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00449                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00450                     outer_iter(my_segcont->end()) { }
00451 
00452                 ~segmented_iterator() {}
00453 
00454                 typedef typename SegmentedContainer::iterator outer_iterator;
00455                 typedef typename SegmentedContainer::value_type InnerContainer;
00456                 typedef typename InnerContainer::iterator inner_iterator;
00457 
00458                 // STL support
00459                 typedef ptrdiff_t difference_type;
00460                 typedef Value value_type;
00461                 typedef typename SegmentedContainer::size_type size_type;
00462                 typedef Value* pointer;
00463                 typedef Value& reference;
00464                 typedef std::input_iterator_tag iterator_category;
00465 
00466                 // Copy Constructor
00467                 template<typename U>
00468                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00469                     my_segcont(other.my_segcont),
00470                     outer_iter(other.outer_iter),
00471                     // can we assign a default-constructed iterator to inner if we're at the end?
00472                     inner_iter(other.inner_iter)
00473                 {}
00474 
00475                 // assignment
00476                 template<typename U>
00477                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00478                     if(this != &other) {
00479                         my_segcont = other.my_segcont;
00480                         outer_iter = other.outer_iter;
00481                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00482                     }
00483                     return *this;
00484                 }
00485 
00486                 // allow assignment of outer iterator to segmented iterator.  Once it is
00487                 // assigned, move forward until a non-empty inner container is found or
00488                 // the end of the outer container is reached.
00489                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00490                     __TBB_ASSERT(my_segcont != NULL, NULL);
00491                     // check that this iterator points to something inside the segmented container
00492                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00493                         if( !outer_iter->empty() ) {
00494                             inner_iter = outer_iter->begin();
00495                             break;
00496                         }
00497                     }
00498                     return *this;
00499                 }
00500 
00501                 // pre-increment
00502                 segmented_iterator& operator++() {
00503                     advance_me();
00504                     return *this;
00505                 }
00506 
00507                 // post-increment
00508                 segmented_iterator operator++(int) {
00509                     segmented_iterator tmp = *this;
00510                     operator++();
00511                     return tmp;
00512                 }
00513 
00514                 bool operator==(const outer_iterator& other_outer) const {
00515                     __TBB_ASSERT(my_segcont != NULL, NULL);
00516                     return (outer_iter == other_outer &&
00517                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00518                 }
00519 
00520                 bool operator!=(const outer_iterator& other_outer) const {
00521                     return !operator==(other_outer);
00522 
00523                 }
00524 
00525                 // (i)* RHS
00526                 reference operator*() const {
00527                     __TBB_ASSERT(my_segcont != NULL, NULL);
00528                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00529                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00530                     return *inner_iter;
00531                 }
00532 
00533                 // i->
00534                 pointer operator->() const { return &operator*();}
00535 
00536             private:
00537                 SegmentedContainer*             my_segcont;
00538                 outer_iterator outer_iter;
00539                 inner_iterator inner_iter;
00540 
00541                 void advance_me() {
00542                     __TBB_ASSERT(my_segcont != NULL, NULL);
00543                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00544                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00545                     ++inner_iter;
00546                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00547                         inner_iter = outer_iter->begin();
00548                     }
00549                 }
00550         };    // segmented_iterator
00551 
00552         template<typename SegmentedContainer, typename T, typename U>
00553         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00554                          const segmented_iterator<SegmentedContainer,U>& j ) {
00555             if(i.my_segcont != j.my_segcont) return false;
00556             if(i.my_segcont == NULL) return true;
00557             if(i.outer_iter != j.outer_iter) return false;
00558             if(i.outer_iter == i.my_segcont->end()) return true;
00559             return i.inner_iter == j.inner_iter;
00560         }
00561 
00562         // !=
00563         template<typename SegmentedContainer, typename T, typename U>
00564         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00565                          const segmented_iterator<SegmentedContainer,U>& j ) {
00566             return !(i==j);
00567         }
00568 
00569         template<typename T>
00570         struct destruct_only: tbb::internal::no_copy {
00571             tbb::aligned_space<T,1> value;
00572             ~destruct_only() {value.begin()[0].~T();}
00573         };
00574 
00575         template<typename T>
00576         struct construct_by_default: tbb::internal::no_assign {
00577             void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
00578             construct_by_default( int ) {}
00579         };
00580 
00581         template<typename T>
00582         struct construct_by_exemplar: tbb::internal::no_assign {
00583             const T exemplar;
00584             void construct(void*where) {new(where) T(exemplar);}
00585             construct_by_exemplar( const T& t ) : exemplar(t) {}
00586         };
00587 
00588         template<typename T, typename Finit>
00589         struct construct_by_finit: tbb::internal::no_assign {
00590             Finit f;
00591             void construct(void* where) {new(where) T(f());}
00592             construct_by_finit( const Finit& f_ ) : f(f_) {}
00593         };
00594 
00595         // storage for initialization function pointer
00596         template<typename T>
00597         class callback_base {
00598         public:
00599             // Clone *this
00600             virtual callback_base* clone() = 0;
00601             // Destruct and free *this
00602             virtual void destroy() = 0;
00603             // Need virtual destructor to satisfy GCC compiler warning
00604             virtual ~callback_base() { }
00605             // Construct T at where
00606             virtual void construct(void* where) = 0;
00607         };
00608 
00609         template <typename T, typename Constructor>
00610         class callback_leaf: public callback_base<T>, Constructor {
00611             template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
00612 
00613             typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
00614 
00615             /*override*/ callback_base<T>* clone() {
00616                 void* where = my_allocator_type().allocate(1);
00617                 return new(where) callback_leaf(*this);
00618             }
00619 
00620             /*override*/ void destroy() {
00621                 my_allocator_type().destroy(this);
00622                 my_allocator_type().deallocate(this,1);
00623             }
00624 
00625             /*override*/ void construct(void* where) {
00626                 Constructor::construct(where);
00627             }  
00628         public:
00629             template<typename X>
00630             static callback_base<T>* make( const X& x ) {
00631                 void* where = my_allocator_type().allocate(1);
00632                 return new(where) callback_leaf(x);
00633             }
00634         };
00635 
00637 
00642         template<typename U, size_t ModularSize>
00643         struct ets_element {
00644             char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
00645             void unconstruct() {
00646                 tbb::internal::punned_cast<U*>(&value)->~U();
00647             }
00648         };
00649 
00650     } // namespace internal
00652 
00654 
00673     template <typename T, 
00674               typename Allocator=cache_aligned_allocator<T>, 
00675               ets_key_usage_type ETS_key_type=ets_no_key > 
00676     class enumerable_thread_specific: internal::ets_base<ETS_key_type> { 
00677 
00678         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00679     
00680         typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
00681 
00683         template<typename I>
00684         class generic_range_type: public blocked_range<I> {
00685         public:
00686             typedef T value_type;
00687             typedef T& reference;
00688             typedef const T& const_reference;
00689             typedef I iterator;
00690             typedef ptrdiff_t difference_type;
00691             generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
00692             template<typename U>
00693             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00694             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00695         };
00696     
00697         typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00698         typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00699         
00700         internal::callback_base<T> *my_construct_callback;
00701 
00702         internal_collection_type my_locals;
00703    
00704         /*override*/ void* create_local() {
00705 #if TBB_DEPRECATED
00706             void* lref = &my_locals[my_locals.push_back(padded_element())];
00707 #else
00708             void* lref = &*my_locals.push_back(padded_element());
00709 #endif
00710             my_construct_callback->construct(lref);
00711             return lref;
00712         } 
00713 
00714         void unconstruct_locals() {
00715             for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00716                 cvi->unconstruct();
00717             }
00718         }
00719 
00720         typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
00721 
00722         // _size is in bytes
00723         /*override*/ void* create_array(size_t _size) {
00724             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00725             return array_allocator_type().allocate(nelements);
00726         }
00727 
00728         /*override*/ void free_array( void* _ptr, size_t _size) {
00729             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00730             array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
00731         }
00732    
00733     public:
00734     
00736         typedef Allocator allocator_type;
00737         typedef T value_type;
00738         typedef T& reference;
00739         typedef const T& const_reference;
00740         typedef T* pointer;
00741         typedef const T* const_pointer;
00742         typedef typename internal_collection_type::size_type size_type;
00743         typedef typename internal_collection_type::difference_type difference_type;
00744     
00745         // Iterator types
00746         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00747         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00748 
00749         // Parallel range types
00750         typedef generic_range_type< iterator > range_type;
00751         typedef generic_range_type< const_iterator > const_range_type;
00752     
00754         enumerable_thread_specific() : 
00755             my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) ) 
00756         {}
00757 
00759         template <typename Finit>
00760         enumerable_thread_specific( Finit finit ) : 
00761             my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) ) 
00762         {}
00763     
00765         enumerable_thread_specific(const T& exemplar) : 
00766             my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
00767         {}
00768     
00770         ~enumerable_thread_specific() { 
00771             my_construct_callback->destroy();
00772             this->clear();  // deallocation before the derived class is finished destructing
00773             // So free(array *) is still accessible
00774         }
00775       
00777         reference local() {
00778             bool exists;
00779             return local(exists);
00780         }
00781 
00783         reference local(bool& exists)  {
00784             void* ptr = this->table_lookup(exists);
00785             return *(T*)ptr;
00786         }
00787 
00789         size_type size() const { return my_locals.size(); }
00790     
00792         bool empty() const { return my_locals.empty(); }
00793     
00795         iterator begin() { return iterator( my_locals, 0 ); }
00797         iterator end() { return iterator(my_locals, my_locals.size() ); }
00798     
00800         const_iterator begin() const { return const_iterator(my_locals, 0); }
00801     
00803         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00804 
00806         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00807         
00809         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00810 
00812         void clear() {
00813             unconstruct_locals();
00814             my_locals.clear();
00815             this->table_clear();
00816             // callback is not destroyed
00817             // exemplar is not destroyed
00818         }
00819 
00820     private:
00821 
00822         template<typename U, typename A2, ets_key_usage_type C2>
00823         void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
00824 
00825     public:
00826 
00827         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00828         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
00829         {
00830             internal_copy(other);
00831         }
00832 
00833         enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
00834         {
00835             internal_copy(other);
00836         }
00837 
00838     private:
00839 
00840         template<typename U, typename A2, ets_key_usage_type C2>
00841         enumerable_thread_specific &
00842         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00843             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00844                 this->clear(); 
00845                 my_construct_callback->destroy();
00846                 my_construct_callback = 0;
00847                 internal_copy( other );
00848             }
00849             return *this;
00850         }
00851 
00852     public:
00853 
00854         // assignment
00855         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00856             return internal_assign(other);
00857         }
00858 
00859         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00860         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00861         {
00862             return internal_assign(other);
00863         }
00864 
00865         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00866         template <typename combine_func_t>
00867         T combine(combine_func_t f_combine) {
00868             if(begin() == end()) {
00869                 internal::destruct_only<T> location;
00870                 my_construct_callback->construct(location.value.begin());
00871                 return *location.value.begin();
00872             }
00873             const_iterator ci = begin();
00874             T my_result = *ci;
00875             while(++ci != end()) 
00876                 my_result = f_combine( my_result, *ci );
00877             return my_result;
00878         }
00879 
00880         // combine_func_t has signature void(T) or void(const T&)
00881         template <typename combine_func_t>
00882         void combine_each(combine_func_t f_combine) {
00883             for(const_iterator ci = begin(); ci != end(); ++ci) {
00884                 f_combine( *ci );
00885             }
00886         }
00887 
00888     }; // enumerable_thread_specific
00889 
00890     template <typename T, typename Allocator, ets_key_usage_type ETS_key_type> 
00891     template<typename U, typename A2, ets_key_usage_type C2>
00892     void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
00893         // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
00894         my_construct_callback = other.my_construct_callback->clone();
00895 
00896         typedef internal::ets_base<ets_no_key> base;
00897         __TBB_ASSERT(my_locals.size()==0,NULL);
00898         this->table_reserve_for_copy( other );
00899         for( base::array* r=other.my_root; r; r=r->next ) {
00900             for( size_t i=0; i<r->size(); ++i ) {
00901                 base::slot& s1 = r->at(i);
00902                 if( !s1.empty() ) {
00903                     base::slot& s2 = this->table_find(s1.key);
00904                     if( s2.empty() ) { 
00905 #if TBB_DEPRECATED
00906                         void* lref = &my_locals[my_locals.push_back(padded_element())];
00907 #else
00908                         void* lref = &*my_locals.push_back(padded_element());
00909 #endif
00910                         s2.ptr = new(lref) T(*(U*)s1.ptr);
00911                         s2.key = s1.key;
00912                     } else {
00913                         // Skip the duplicate
00914                     } 
00915                 }
00916             }
00917         }
00918     }
00919 
00920     template< typename Container >
00921     class flattened2d {
00922 
00923         // This intermediate typedef is to address issues with VC7.1 compilers
00924         typedef typename Container::value_type conval_type;
00925 
00926     public:
00927 
00929         typedef typename conval_type::size_type size_type;
00930         typedef typename conval_type::difference_type difference_type;
00931         typedef typename conval_type::allocator_type allocator_type;
00932         typedef typename conval_type::value_type value_type;
00933         typedef typename conval_type::reference reference;
00934         typedef typename conval_type::const_reference const_reference;
00935         typedef typename conval_type::pointer pointer;
00936         typedef typename conval_type::const_pointer const_pointer;
00937 
00938         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00939         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00940 
00941         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00942             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00943 
00944         flattened2d( const Container &c ) : 
00945             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00946 
00947         iterator begin() { return iterator(*my_container) = my_begin; }
00948         iterator end() { return iterator(*my_container) = my_end; }
00949         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00950         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00951 
00952         size_type size() const {
00953             size_type tot_size = 0;
00954             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00955                 tot_size += i->size();
00956             }
00957             return tot_size;
00958         }
00959 
00960     private:
00961 
00962         Container *my_container;
00963         typename Container::const_iterator my_begin;
00964         typename Container::const_iterator my_end;
00965 
00966     };
00967 
00968     template <typename Container>
00969     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00970         return flattened2d<Container>(c, b, e);
00971     }
00972 
00973     template <typename Container>
00974     flattened2d<Container> flatten2d(const Container &c) {
00975         return flattened2d<Container>(c);
00976     }
00977 
00978 } // interface6
00979 
00980 namespace internal {
00981 using interface6::internal::segmented_iterator;
00982 }
00983 
00984 using interface6::enumerable_thread_specific;
00985 using interface6::flattened2d;
00986 using interface6::flatten2d;
00987 
00988 } // namespace tbb
00989 
00990 #endif

Copyright © 2005-2011 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.