enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "cache_aligned_allocator.h"
00027 #include "aligned_space.h"
00028 #if __SUNPRO_CC
00029 #include <string.h>  // for memcpy
00030 #endif
00031 
00032 #if _WIN32||_WIN64
00033 #include <windows.h>
00034 #else
00035 #include <pthread.h>
00036 #endif
00037 
00038 namespace tbb {
00039 
00041 enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00042 
00043 namespace interface6 {
00044  
00046     namespace internal { 
00047 
00048         template<ets_key_usage_type ETS_key_type>
00049         class ets_base: tbb::internal::no_copy {
00050         protected:
00051 #if _WIN32||_WIN64
00052             typedef DWORD key_type;
00053 #else
00054             typedef pthread_t key_type;
00055 #endif
00056 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00057         public:
00058 #endif
00059             struct slot;
00060 
00061             struct array {
00062                 array* next;
00063                 size_t lg_size;
00064                 slot& at( size_t k ) {
00065                     return ((slot*)(void*)(this+1))[k];
00066                 }
00067                 size_t size() const {return (size_t)1<<lg_size;}
00068                 size_t mask() const {return size()-1;}
00069                 size_t start( size_t h ) const {
00070                     return h>>(8*sizeof(size_t)-lg_size);
00071                 }
00072             };
00073             struct slot {
00074                 key_type key;
00075                 void* ptr;
00076                 bool empty() const {return !key;}
00077                 bool match( key_type k ) const {return key==k;}
00078                 bool claim( key_type k ) {
00079                     __TBB_ASSERT(sizeof(tbb::atomic<key_type>)==sizeof(key_type), NULL);
00080                     return tbb::internal::punned_cast<tbb::atomic<key_type>*>(&key)->compare_and_swap(k,0)==0;
00081                 }
00082             };
00083 #if __TBB_GCC_3_3_PROTECTED_BROKEN
00084         protected:
00085 #endif
00086         
00087             static key_type key_of_current_thread() {
00088                tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id();
00089                key_type k;
00090                memcpy( &k, &id, sizeof(k) );
00091                return k;
00092             }
00093 
00095 
00097             atomic<array*> my_root;
00098             atomic<size_t> my_count;
00099             virtual void* create_local() = 0;
00100             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00101             virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes
00102             array* allocate( size_t lg_size ) {
00103                 size_t n = 1<<lg_size;  
00104                 array* a = static_cast<array*>(create_array( sizeof(array)+n*sizeof(slot) ));
00105                 a->lg_size = lg_size;
00106                 std::memset( a+1, 0, n*sizeof(slot) );
00107                 return a;
00108             }
00109             void free(array* a) {
00110                 size_t n = 1<<(a->lg_size);  
00111                 free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) );
00112             }
00113             static size_t hash( key_type k ) {
00114                 // Multiplicative hashing.  Client should use *upper* bits.
00115                 // casts required for Mac gcc4.* compiler
00116 #if __TBB_WORDSIZE == 4
00117                 return uintptr_t(k)*0x9E3779B9;
00118 #else
00119                 return uintptr_t(k)*0x9E3779B97F4A7C15;
00120 #endif 
00121             } 
00122         
00123             ets_base() {my_root=NULL; my_count=0;}
00124             virtual ~ets_base();  // g++ complains if this is not virtual...
00125             void* table_lookup( bool& exists );
00126             void table_clear();
00127             slot& table_find( key_type k ) {
00128                 size_t h = hash(k);
00129                 array* r = my_root;
00130                 size_t mask = r->mask();
00131                 for(size_t i = r->start(h);;i=(i+1)&mask) {
00132                     slot& s = r->at(i);
00133                     if( s.empty() || s.match(k) )
00134                         return s;
00135                 }
00136             }
00137             void table_reserve_for_copy( const ets_base& other ) {
00138                 __TBB_ASSERT(!my_root,NULL);
00139                 __TBB_ASSERT(!my_count,NULL);
00140                 if( other.my_root ) {
00141                     array* a = allocate(other.my_root->lg_size);
00142                     a->next = NULL;
00143                     my_root = a;
00144                     my_count = other.my_count;
00145                 }
00146             }
00147         };
00148 
00149         template<ets_key_usage_type ETS_key_type>
00150         ets_base<ETS_key_type>::~ets_base() {
00151             __TBB_ASSERT(!my_root, NULL);
00152         }
00153 
00154         template<ets_key_usage_type ETS_key_type>
00155         void ets_base<ETS_key_type>::table_clear() {
00156             while( array* r = my_root ) {
00157                 my_root = r->next;
00158                 free(r);
00159             }
00160             my_count = 0;
00161         }
00162                 
00163         template<ets_key_usage_type ETS_key_type>
00164         void* ets_base<ETS_key_type>::table_lookup( bool& exists ) {
00165             const key_type k = key_of_current_thread(); 
00166 
00167             __TBB_ASSERT(k!=0,NULL);
00168             void* found;
00169             size_t h = hash(k);
00170             for( array* r=my_root; r; r=r->next ) {
00171                 size_t mask=r->mask();
00172                 for(size_t i = r->start(h); ;i=(i+1)&mask) {
00173                     slot& s = r->at(i);
00174                     if( s.empty() ) break;
00175                     if( s.match(k) ) {
00176                         if( r==my_root ) {
00177                             // Success at top level
00178                             exists = true;
00179                             return s.ptr;
00180                         } else {
00181                             // Success at some other level.  Need to insert at top level.
00182                             exists = true;
00183                             found = s.ptr;
00184                             goto insert;
00185                         }
00186                     }
00187                 }
00188             }
00189             // Key does not yet exist
00190             exists = false;
00191             found = create_local();
00192             {
00193                 size_t c = ++my_count;
00194                 array* r = my_root;
00195                 if( !r || c>r->size()/2 ) {
00196                     size_t s = r ? r->lg_size : 2;
00197                     while( c>size_t(1)<<(s-1) ) ++s;
00198                     array* a = allocate(s);
00199                     for(;;) {
00200                         a->next = my_root;
00201                         array* new_r = my_root.compare_and_swap(a,r);
00202                         if( new_r==r ) break;
00203                         if( new_r->lg_size>=s ) {
00204                             // Another thread inserted an equal or  bigger array, so our array is superfluous.
00205                             free(a);
00206                             break;
00207                         }
00208                         r = new_r;
00209                     }
00210                 }
00211             }
00212         insert:
00213             // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it.
00214             array* ir = my_root;
00215             size_t mask = ir->mask();
00216             for(size_t i = ir->start(h);;i=(i+1)&mask) {
00217                 slot& s = ir->at(i);
00218                 if( s.empty() ) {
00219                     if( s.claim(k) ) {
00220                         s.ptr = found;
00221                         return found;
00222                     }
00223                 }
00224             }
00225         }
00226 
00228         template <>
00229         class ets_base<ets_key_per_instance>: protected ets_base<ets_no_key> {
00230             typedef ets_base<ets_no_key> super;
00231 #if _WIN32||_WIN64
00232             typedef DWORD tls_key_t;
00233             void create_key() { my_key = TlsAlloc(); }
00234             void destroy_key() { TlsFree(my_key); }
00235             void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); }
00236             void* get_tls() { return (void *)TlsGetValue(my_key); }
00237 #else
00238             typedef pthread_key_t tls_key_t;
00239             void create_key() { pthread_key_create(&my_key, NULL); }
00240             void destroy_key() { pthread_key_delete(my_key); }
00241             void set_tls( void * value ) const { pthread_setspecific(my_key, value); }
00242             void* get_tls() const { return pthread_getspecific(my_key); }
00243 #endif
00244             tls_key_t my_key;
00245             virtual void* create_local() = 0;
00246             virtual void* create_array(size_t _size) = 0;  // _size in bytes
00247             virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes
00248         public:
00249             ets_base() {create_key();}
00250             ~ets_base() {destroy_key();}
00251             void* table_lookup( bool& exists ) {
00252                 void* found = get_tls();
00253                 if( found ) {
00254                     exists=true;
00255                 } else {
00256                     found = super::table_lookup(exists);
00257                     set_tls(found);
00258                 }
00259                 return found; 
00260             }
00261             void table_clear() {
00262                 destroy_key();
00263                 create_key(); 
00264                 super::table_clear();
00265             }
00266         };
00267 
00269         template< typename Container, typename Value >
00270         class enumerable_thread_specific_iterator 
00271 #if defined(_WIN64) && defined(_MSC_VER) 
00272             // Ensure that Microsoft's internal template function _Val_type works correctly.
00273             : public std::iterator<std::random_access_iterator_tag,Value>
00274 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00275         {
00277         
00278             Container *my_container;
00279             typename Container::size_type my_index;
00280             mutable Value *my_value;
00281         
00282             template<typename C, typename T>
00283             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00284                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00285         
00286             template<typename C, typename T, typename U>
00287             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00288                                     const enumerable_thread_specific_iterator<C,U>& j );
00289         
00290             template<typename C, typename T, typename U>
00291             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00292                                    const enumerable_thread_specific_iterator<C,U>& j );
00293         
00294             template<typename C, typename T, typename U>
00295             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00296             
00297             template<typename C, typename U> 
00298             friend class enumerable_thread_specific_iterator;
00299         
00300             public:
00301         
00302             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00303                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00304         
00306             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00307         
00308             template<typename U>
00309             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00310                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00311         
00312             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00313                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00314             }
00315         
00316             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00317                 my_index += offset;
00318                 my_value = NULL;
00319                 return *this;
00320             }
00321         
00322             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00323                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00324             }
00325         
00326             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00327                 my_index -= offset;
00328                 my_value = NULL;
00329                 return *this;
00330             }
00331         
00332             Value& operator*() const {
00333                 Value* value = my_value;
00334                 if( !value ) {
00335                     value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00336                 }
00337                 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00338                 return *value;
00339             }
00340         
00341             Value& operator[]( ptrdiff_t k ) const {
00342                return (*my_container)[my_index + k].value;
00343             }
00344         
00345             Value* operator->() const {return &operator*();}
00346         
00347             enumerable_thread_specific_iterator& operator++() {
00348                 ++my_index;
00349                 my_value = NULL;
00350                 return *this;
00351             }
00352         
00353             enumerable_thread_specific_iterator& operator--() {
00354                 --my_index;
00355                 my_value = NULL;
00356                 return *this;
00357             }
00358         
00360             enumerable_thread_specific_iterator operator++(int) {
00361                 enumerable_thread_specific_iterator result = *this;
00362                 ++my_index;
00363                 my_value = NULL;
00364                 return result;
00365             }
00366         
00368             enumerable_thread_specific_iterator operator--(int) {
00369                 enumerable_thread_specific_iterator result = *this;
00370                 --my_index;
00371                 my_value = NULL;
00372                 return result;
00373             }
00374         
00375             // STL support
00376             typedef ptrdiff_t difference_type;
00377             typedef Value value_type;
00378             typedef Value* pointer;
00379             typedef Value& reference;
00380             typedef std::random_access_iterator_tag iterator_category;
00381         };
00382         
00383         template<typename Container, typename T>
00384         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00385                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00386             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00387         }
00388         
00389         template<typename Container, typename T, typename U>
00390         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00391                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00392             return i.my_index==j.my_index && i.my_container == j.my_container;
00393         }
00394         
00395         template<typename Container, typename T, typename U>
00396         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00397                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00398             return !(i==j);
00399         }
00400         
00401         template<typename Container, typename T, typename U>
00402         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00403                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00404             return i.my_index<j.my_index;
00405         }
00406         
00407         template<typename Container, typename T, typename U>
00408         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00409                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00410             return j<i;
00411         }
00412         
00413         template<typename Container, typename T, typename U>
00414         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00415                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00416             return !(i<j);
00417         }
00418         
00419         template<typename Container, typename T, typename U>
00420         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00421                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00422             return !(j<i);
00423         }
00424         
00425         template<typename Container, typename T, typename U>
00426         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00427                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00428             return i.my_index-j.my_index;
00429         }
00430 
00431     template<typename SegmentedContainer, typename Value >
00432         class segmented_iterator
00433 #if defined(_WIN64) && defined(_MSC_VER)
00434         : public std::iterator<std::input_iterator_tag, Value>
00435 #endif
00436         {
00437             template<typename C, typename T, typename U>
00438             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00439 
00440             template<typename C, typename T, typename U>
00441             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00442             
00443             template<typename C, typename U> 
00444             friend class segmented_iterator;
00445 
00446             public:
00447 
00448                 segmented_iterator() {my_segcont = NULL;}
00449 
00450                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00451                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00452                     outer_iter(my_segcont->end()) { }
00453 
00454                 ~segmented_iterator() {}
00455 
00456                 typedef typename SegmentedContainer::iterator outer_iterator;
00457                 typedef typename SegmentedContainer::value_type InnerContainer;
00458                 typedef typename InnerContainer::iterator inner_iterator;
00459 
00460                 // STL support
00461                 typedef ptrdiff_t difference_type;
00462                 typedef Value value_type;
00463                 typedef typename SegmentedContainer::size_type size_type;
00464                 typedef Value* pointer;
00465                 typedef Value& reference;
00466                 typedef std::input_iterator_tag iterator_category;
00467 
00468                 // Copy Constructor
00469                 template<typename U>
00470                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00471                     my_segcont(other.my_segcont),
00472                     outer_iter(other.outer_iter),
00473                     // can we assign a default-constructed iterator to inner if we're at the end?
00474                     inner_iter(other.inner_iter)
00475                 {}
00476 
00477                 // assignment
00478                 template<typename U>
00479                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00480                     if(this != &other) {
00481                         my_segcont = other.my_segcont;
00482                         outer_iter = other.outer_iter;
00483                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00484                     }
00485                     return *this;
00486                 }
00487 
00488                 // allow assignment of outer iterator to segmented iterator.  Once it is
00489                 // assigned, move forward until a non-empty inner container is found or
00490                 // the end of the outer container is reached.
00491                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00492                     __TBB_ASSERT(my_segcont != NULL, NULL);
00493                     // check that this iterator points to something inside the segmented container
00494                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00495                         if( !outer_iter->empty() ) {
00496                             inner_iter = outer_iter->begin();
00497                             break;
00498                         }
00499                     }
00500                     return *this;
00501                 }
00502 
00503                 // pre-increment
00504                 segmented_iterator& operator++() {
00505                     advance_me();
00506                     return *this;
00507                 }
00508 
00509                 // post-increment
00510                 segmented_iterator operator++(int) {
00511                     segmented_iterator tmp = *this;
00512                     operator++();
00513                     return tmp;
00514                 }
00515 
00516                 bool operator==(const outer_iterator& other_outer) const {
00517                     __TBB_ASSERT(my_segcont != NULL, NULL);
00518                     return (outer_iter == other_outer &&
00519                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00520                 }
00521 
00522                 bool operator!=(const outer_iterator& other_outer) const {
00523                     return !operator==(other_outer);
00524 
00525                 }
00526 
00527                 // (i)* RHS
00528                 reference operator*() const {
00529                     __TBB_ASSERT(my_segcont != NULL, NULL);
00530                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00531                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00532                     return *inner_iter;
00533                 }
00534 
00535                 // i->
00536                 pointer operator->() const { return &operator*();}
00537 
00538             private:
00539                 SegmentedContainer*             my_segcont;
00540                 outer_iterator outer_iter;
00541                 inner_iterator inner_iter;
00542 
00543                 void advance_me() {
00544                     __TBB_ASSERT(my_segcont != NULL, NULL);
00545                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00546                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00547                     ++inner_iter;
00548                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00549                         inner_iter = outer_iter->begin();
00550                     }
00551                 }
00552         };    // segmented_iterator
00553 
00554         template<typename SegmentedContainer, typename T, typename U>
00555         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00556                          const segmented_iterator<SegmentedContainer,U>& j ) {
00557             if(i.my_segcont != j.my_segcont) return false;
00558             if(i.my_segcont == NULL) return true;
00559             if(i.outer_iter != j.outer_iter) return false;
00560             if(i.outer_iter == i.my_segcont->end()) return true;
00561             return i.inner_iter == j.inner_iter;
00562         }
00563 
00564         // !=
00565         template<typename SegmentedContainer, typename T, typename U>
00566         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00567                          const segmented_iterator<SegmentedContainer,U>& j ) {
00568             return !(i==j);
00569         }
00570 
00571         template<typename T>
00572         struct destruct_only: tbb::internal::no_copy {
00573             tbb::aligned_space<T,1> value;
00574             ~destruct_only() {value.begin()[0].~T();}
00575         };
00576 
00577         template<typename T>
00578         struct construct_by_default: tbb::internal::no_assign {
00579             void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization.
00580             construct_by_default( int ) {}
00581         };
00582 
00583         template<typename T>
00584         struct construct_by_exemplar: tbb::internal::no_assign {
00585             const T exemplar;
00586             void construct(void*where) {new(where) T(exemplar);}
00587             construct_by_exemplar( const T& t ) : exemplar(t) {}
00588         };
00589 
00590         template<typename T, typename Finit>
00591         struct construct_by_finit: tbb::internal::no_assign {
00592             Finit f;
00593             void construct(void* where) {new(where) T(f());}
00594             construct_by_finit( const Finit& f_ ) : f(f_) {}
00595         };
00596 
00597         // storage for initialization function pointer
00598         template<typename T>
00599         class callback_base {
00600         public:
00601             // Clone *this
00602             virtual callback_base* clone() = 0;
00603             // Destruct and free *this
00604             virtual void destroy() = 0;
00605             // Need virtual destructor to satisfy GCC compiler warning
00606             virtual ~callback_base() { }
00607             // Construct T at where
00608             virtual void construct(void* where) = 0;
00609         };
00610 
00611         template <typename T, typename Constructor>
00612         class callback_leaf: public callback_base<T>, Constructor {
00613             template<typename X> callback_leaf( const X& x ) : Constructor(x) {}
00614 
00615             typedef typename tbb::tbb_allocator<callback_leaf> my_allocator_type;
00616 
00617             /*override*/ callback_base<T>* clone() {
00618                 void* where = my_allocator_type().allocate(1);
00619                 return new(where) callback_leaf(*this);
00620             }
00621 
00622             /*override*/ void destroy() {
00623                 my_allocator_type().destroy(this);
00624                 my_allocator_type().deallocate(this,1);
00625             }
00626 
00627             /*override*/ void construct(void* where) {
00628                 Constructor::construct(where);
00629             }  
00630         public:
00631             template<typename X>
00632             static callback_base<T>* make( const X& x ) {
00633                 void* where = my_allocator_type().allocate(1);
00634                 return new(where) callback_leaf(x);
00635             }
00636         };
00637 
00639 
00644         template<typename U, size_t ModularSize>
00645         struct ets_element {
00646             char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)];
00647             void unconstruct() {
00648                 tbb::internal::punned_cast<U*>(&value)->~U();
00649             }
00650         };
00651 
00652     } // namespace internal
00654 
00656 
00675     template <typename T, 
00676               typename Allocator=cache_aligned_allocator<T>, 
00677               ets_key_usage_type ETS_key_type=ets_no_key > 
00678     class enumerable_thread_specific: internal::ets_base<ETS_key_type> { 
00679 
00680         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00681     
00682         typedef internal::ets_element<T,sizeof(T)%tbb::internal::NFS_MaxLineSize> padded_element;
00683 
00685         template<typename I>
00686         class generic_range_type: public blocked_range<I> {
00687         public:
00688             typedef T value_type;
00689             typedef T& reference;
00690             typedef const T& const_reference;
00691             typedef I iterator;
00692             typedef ptrdiff_t difference_type;
00693             generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
00694             template<typename U>
00695             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00696             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00697         };
00698     
00699         typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00700         typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00701         
00702         internal::callback_base<T> *my_construct_callback;
00703 
00704         internal_collection_type my_locals;
00705    
00706         /*override*/ void* create_local() {
00707 #if TBB_DEPRECATED
00708             void* lref = &my_locals[my_locals.push_back(padded_element())];
00709 #else
00710             void* lref = &*my_locals.push_back(padded_element());
00711 #endif
00712             my_construct_callback->construct(lref);
00713             return lref;
00714         } 
00715 
00716         void unconstruct_locals() {
00717             for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00718                 cvi->unconstruct();
00719             }
00720         }
00721 
00722         typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type;
00723 
00724         // _size is in bytes
00725         /*override*/ void* create_array(size_t _size) {
00726             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00727             return array_allocator_type().allocate(nelements);
00728         }
00729 
00730         /*override*/ void free_array( void* _ptr, size_t _size) {
00731             size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t);
00732             array_allocator_type().deallocate( reinterpret_cast<uintptr_t *>(_ptr),nelements);
00733         }
00734    
00735     public:
00736     
00738         typedef Allocator allocator_type;
00739         typedef T value_type;
00740         typedef T& reference;
00741         typedef const T& const_reference;
00742         typedef T* pointer;
00743         typedef const T* const_pointer;
00744         typedef typename internal_collection_type::size_type size_type;
00745         typedef typename internal_collection_type::difference_type difference_type;
00746     
00747         // Iterator types
00748         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00749         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00750 
00751         // Parallel range types
00752         typedef generic_range_type< iterator > range_type;
00753         typedef generic_range_type< const_iterator > const_range_type;
00754     
00756         enumerable_thread_specific() : 
00757             my_construct_callback( internal::callback_leaf<T,internal::construct_by_default<T> >::make(/*dummy argument*/0) ) 
00758         {}
00759 
00761         template <typename Finit>
00762         enumerable_thread_specific( Finit finit ) : 
00763             my_construct_callback( internal::callback_leaf<T,internal::construct_by_finit<T,Finit> >::make( finit ) ) 
00764         {}
00765     
00767         enumerable_thread_specific(const T& exemplar) : 
00768             my_construct_callback( internal::callback_leaf<T,internal::construct_by_exemplar<T> >::make( exemplar ) )
00769         {}
00770     
00772         ~enumerable_thread_specific() { 
00773             my_construct_callback->destroy();
00774             this->clear();  // deallocation before the derived class is finished destructing
00775             // So free(array *) is still accessible
00776         }
00777       
00779         reference local() {
00780             bool exists;
00781             return local(exists);
00782         }
00783 
00785         reference local(bool& exists)  {
00786             __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented"); 
00787             void* ptr = this->table_lookup(exists);
00788             return *(T*)ptr;
00789         }
00790 
00792         size_type size() const { return my_locals.size(); }
00793     
00795         bool empty() const { return my_locals.empty(); }
00796     
00798         iterator begin() { return iterator( my_locals, 0 ); }
00800         iterator end() { return iterator(my_locals, my_locals.size() ); }
00801     
00803         const_iterator begin() const { return const_iterator(my_locals, 0); }
00804     
00806         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00807 
00809         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00810         
00812         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00813 
00815         void clear() {
00816             unconstruct_locals();
00817             my_locals.clear();
00818             this->table_clear();
00819             // callback is not destroyed
00820             // exemplar is not destroyed
00821         }
00822 
00823     private:
00824 
00825         template<typename U, typename A2, ets_key_usage_type C2>
00826         void internal_copy( const enumerable_thread_specific<U, A2, C2>& other);
00827 
00828     public:
00829 
00830         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00831         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : internal::ets_base<ETS_key_type> ()
00832         {
00833             internal_copy(other);
00834         }
00835 
00836         enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base<ETS_key_type> ()
00837         {
00838             internal_copy(other);
00839         }
00840 
00841     private:
00842 
00843         template<typename U, typename A2, ets_key_usage_type C2>
00844         enumerable_thread_specific &
00845         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00846             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00847                 this->clear(); 
00848                 my_construct_callback->destroy();
00849                 my_construct_callback = 0;
00850                 internal_copy( other );
00851             }
00852             return *this;
00853         }
00854 
00855     public:
00856 
00857         // assignment
00858         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00859             return internal_assign(other);
00860         }
00861 
00862         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00863         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00864         {
00865             return internal_assign(other);
00866         }
00867 
00868         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00869         template <typename combine_func_t>
00870         T combine(combine_func_t f_combine) {
00871             if(begin() == end()) {
00872                 internal::destruct_only<T> location;
00873                 my_construct_callback->construct(location.value.begin());
00874                 return *location.value.begin();
00875             }
00876             const_iterator ci = begin();
00877             T my_result = *ci;
00878             while(++ci != end()) 
00879                 my_result = f_combine( my_result, *ci );
00880             return my_result;
00881         }
00882 
00883         // combine_func_t has signature void(T) or void(const T&)
00884         template <typename combine_func_t>
00885         void combine_each(combine_func_t f_combine) {
00886             for(const_iterator ci = begin(); ci != end(); ++ci) {
00887                 f_combine( *ci );
00888             }
00889         }
00890 
00891     }; // enumerable_thread_specific
00892 
00893     template <typename T, typename Allocator, ets_key_usage_type ETS_key_type> 
00894     template<typename U, typename A2, ets_key_usage_type C2>
00895     void enumerable_thread_specific<T,Allocator,ETS_key_type>::internal_copy( const enumerable_thread_specific<U, A2, C2>& other) {
00896         // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception.
00897         my_construct_callback = other.my_construct_callback->clone();
00898 
00899         typedef internal::ets_base<ets_no_key> base;
00900         __TBB_ASSERT(my_locals.size()==0,NULL);
00901         this->table_reserve_for_copy( other );
00902         for( base::array* r=other.my_root; r; r=r->next ) {
00903             for( size_t i=0; i<r->size(); ++i ) {
00904                 base::slot& s1 = r->at(i);
00905                 if( !s1.empty() ) {
00906                     base::slot& s2 = this->table_find(s1.key);
00907                     if( s2.empty() ) { 
00908 #if TBB_DEPRECATED
00909                         void* lref = &my_locals[my_locals.push_back(padded_element())];
00910 #else
00911                         void* lref = &*my_locals.push_back(padded_element());
00912 #endif
00913                         s2.ptr = new(lref) T(*(U*)s1.ptr);
00914                         s2.key = s1.key;
00915                     } else {
00916                         // Skip the duplicate
00917                     } 
00918                 }
00919             }
00920         }
00921     }
00922 
00923     template< typename Container >
00924     class flattened2d {
00925 
00926         // This intermediate typedef is to address issues with VC7.1 compilers
00927         typedef typename Container::value_type conval_type;
00928 
00929     public:
00930 
00932         typedef typename conval_type::size_type size_type;
00933         typedef typename conval_type::difference_type difference_type;
00934         typedef typename conval_type::allocator_type allocator_type;
00935         typedef typename conval_type::value_type value_type;
00936         typedef typename conval_type::reference reference;
00937         typedef typename conval_type::const_reference const_reference;
00938         typedef typename conval_type::pointer pointer;
00939         typedef typename conval_type::const_pointer const_pointer;
00940 
00941         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00942         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00943 
00944         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00945             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00946 
00947         flattened2d( const Container &c ) : 
00948             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00949 
00950         iterator begin() { return iterator(*my_container) = my_begin; }
00951         iterator end() { return iterator(*my_container) = my_end; }
00952         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00953         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00954 
00955         size_type size() const {
00956             size_type tot_size = 0;
00957             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00958                 tot_size += i->size();
00959             }
00960             return tot_size;
00961         }
00962 
00963     private:
00964 
00965         Container *my_container;
00966         typename Container::const_iterator my_begin;
00967         typename Container::const_iterator my_end;
00968 
00969     };
00970 
00971     template <typename Container>
00972     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00973         return flattened2d<Container>(c, b, e);
00974     }
00975 
00976     template <typename Container>
00977     flattened2d<Container> flatten2d(const Container &c) {
00978         return flattened2d<Container>(c);
00979     }
00980 
00981 } // interface6
00982 
00983 namespace internal {
00984 using interface6::internal::segmented_iterator;
00985 }
00986 
00987 using interface6::enumerable_thread_specific;
00988 using interface6::flattened2d;
00989 using interface6::flatten2d;
00990 
00991 } // namespace tbb
00992 
00993 #endif

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.