00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027
00028 #pragma warning (push)
00029 #pragma warning (disable: 4530)
00030 #endif
00031
00032 #include <iterator>
00033 #include <utility>
00034 #include <cstring>
00035
00036 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00037 #pragma warning (pop)
00038 #endif
00039
00040 #include "cache_aligned_allocator.h"
00041 #include "tbb_allocator.h"
00042 #include "spin_rw_mutex.h"
00043 #include "atomic.h"
00044 #include "aligned_space.h"
00045 #include "tbb_exception.h"
00046 #include "_concurrent_unordered_internal.h"
00047 #if TBB_USE_PERFORMANCE_WARNINGS
00048 #include <typeinfo>
00049 #endif
00050
00051 namespace tbb {
00052
00054 namespace internal {
00056 void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
00058 void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
00060 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
00061 }
00063
00065 template<typename Key>
00066 struct tbb_hash_compare {
00067 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00068 static bool equal( const Key& a, const Key& b ) { return a == b; }
00069 };
00070
00071 namespace interface4 {
00072
00073 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00074 class concurrent_hash_map;
00075
00077 namespace internal {
00078
00079
00081 typedef size_t hashcode_t;
00083 struct hash_map_node_base : tbb::internal::no_copy {
00085 typedef spin_rw_mutex mutex_t;
00087 typedef mutex_t::scoped_lock scoped_t;
00089 hash_map_node_base *next;
00090 mutex_t mutex;
00091 };
00093 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00095 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00097 class hash_map_base {
00098 public:
00100 typedef size_t size_type;
00102 typedef size_t hashcode_t;
00104 typedef size_t segment_index_t;
00106 typedef hash_map_node_base node_base;
00108 struct bucket : tbb::internal::no_copy {
00110 typedef spin_rw_mutex mutex_t;
00112 typedef mutex_t::scoped_lock scoped_t;
00113 mutex_t mutex;
00114 node_base *node_list;
00115 };
00117 static size_type const embedded_block = 1;
00119 static size_type const embedded_buckets = 1<<embedded_block;
00121 static size_type const first_block = 8;
00123 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00125 typedef bucket *segment_ptr_t;
00127 typedef segment_ptr_t segments_table_t[pointers_per_table];
00129 atomic<hashcode_t> my_mask;
00131 segments_table_t my_table;
00133 atomic<size_type> my_size;
00135 bucket my_embedded_segment[embedded_buckets];
00136
00138 hash_map_base() {
00139 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00140 + sizeof(my_size) + sizeof(my_mask)
00141 + embedded_buckets*sizeof(bucket) );
00142 for( size_type i = 0; i < embedded_block; i++ )
00143 my_table[i] = my_embedded_segment + segment_base(i);
00144 my_mask = embedded_buckets - 1;
00145 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00146 }
00147
00149 static segment_index_t segment_index_of( size_type index ) {
00150 return segment_index_t( __TBB_Log2( index|1 ) );
00151 }
00152
00154 static segment_index_t segment_base( segment_index_t k ) {
00155 return (segment_index_t(1)<<k & ~segment_index_t(1));
00156 }
00157
00159 static size_type segment_size( segment_index_t k ) {
00160 return size_type(1)<<k;
00161 }
00162
00164 static bool is_valid( void *ptr ) {
00165 return reinterpret_cast<size_t>(ptr) > size_t(63);
00166 }
00167
00169 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00170 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00171 else for(size_type i = 0; i < sz; i++, ptr++) {
00172 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00173 ptr->node_list = rehash_req;
00174 }
00175 }
00176
00178 static void add_to_bucket( bucket *b, node_base *n ) {
00179 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00180 n->next = b->node_list;
00181 b->node_list = n;
00182 }
00183
00185 struct enable_segment_failsafe {
00186 segment_ptr_t *my_segment_ptr;
00187 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00188 ~enable_segment_failsafe() {
00189 if( my_segment_ptr ) *my_segment_ptr = 0;
00190 }
00191 };
00192
00194 void enable_segment( segment_index_t k, bool is_initial = false ) {
00195 __TBB_ASSERT( k, "Zero segment must be embedded" );
00196 enable_segment_failsafe watchdog( my_table, k );
00197 cache_aligned_allocator<bucket> alloc;
00198 size_type sz;
00199 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00200 if( k >= first_block ) {
00201 sz = segment_size( k );
00202 segment_ptr_t ptr = alloc.allocate( sz );
00203 init_buckets( ptr, sz, is_initial );
00204 #if TBB_USE_THREADING_TOOLS
00205
00206 itt_store_pointer_with_release_v3( my_table + k, ptr );
00207 #else
00208 my_table[k] = ptr;
00209 #endif
00210 sz <<= 1;
00211 } else {
00212 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00213 sz = segment_size( first_block );
00214 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00215 init_buckets( ptr, sz - embedded_buckets, is_initial );
00216 ptr -= segment_base(embedded_block);
00217 for(segment_index_t i = embedded_block; i < first_block; i++)
00218 #if TBB_USE_THREADING_TOOLS
00219 itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
00220 #else
00221 my_table[i] = ptr + segment_base(i);
00222 #endif
00223 }
00224 #if TBB_USE_THREADING_TOOLS
00225 itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
00226 #else
00227 my_mask = sz - 1;
00228 #endif
00229 watchdog.my_segment_ptr = 0;
00230 }
00231
00233 bucket *get_bucket( hashcode_t h ) const throw() {
00234 segment_index_t s = segment_index_of( h );
00235 h -= segment_base(s);
00236 segment_ptr_t seg = my_table[s];
00237 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00238 return &seg[h];
00239 }
00240
00241
00242 void mark_rehashed_levels( hashcode_t h ) throw () {
00243 segment_index_t s = segment_index_of( h );
00244 while( segment_ptr_t seg = my_table[++s] )
00245 if( seg[h].node_list == rehash_req ) {
00246 seg[h].node_list = empty_rehashed;
00247 mark_rehashed_levels( h + segment_base(s) );
00248 }
00249 }
00250
00252
00253 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00254 hashcode_t m_now, m_old = m;
00255 #if TBB_USE_THREADING_TOOLS
00256 m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00257 #else
00258 m_now = my_mask;
00259 #endif
00260 if( m_old != m_now )
00261 return check_rehashing_collision( h, m_old, m = m_now );
00262 return false;
00263 }
00264
00266 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00267 __TBB_ASSERT(m_old != m, NULL);
00268 if( (h & m_old) != (h & m) ) {
00269
00270
00271 for( ++m_old; !(h & m_old); m_old <<= 1 )
00272 ;
00273 m_old = (m_old<<1) - 1;
00274 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00275
00276 #if TBB_USE_THREADING_TOOLS
00277 if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
00278 #else
00279 if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
00280 #endif
00281 return true;
00282 }
00283 return false;
00284 }
00285
00287 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00288 size_type sz = ++my_size;
00289 add_to_bucket( b, n );
00290
00291 if( sz >= mask ) {
00292 segment_index_t new_seg = segment_index_of( mask+1 );
00293 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00294 #if TBB_USE_THREADING_TOOLS
00295 if( !itt_load_pointer_v3(my_table+new_seg)
00296 #else
00297 if( !my_table[new_seg]
00298 #endif
00299 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00300 return new_seg;
00301 }
00302 return 0;
00303 }
00304
00306 void reserve(size_type buckets) {
00307 if( !buckets-- ) return;
00308 bool is_initial = !my_size;
00309 for( size_type m = my_mask; buckets > m; m = my_mask )
00310 enable_segment( segment_index_of( m+1 ), is_initial );
00311 }
00313 void internal_swap(hash_map_base &table) {
00314 std::swap(this->my_mask, table.my_mask);
00315 std::swap(this->my_size, table.my_size);
00316 for(size_type i = 0; i < embedded_buckets; i++)
00317 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00318 for(size_type i = embedded_block; i < pointers_per_table; i++)
00319 std::swap(this->my_table[i], table.my_table[i]);
00320 }
00321 };
00322
00323 template<typename Iterator>
00324 class hash_map_range;
00325
00327
00329 template<typename Container, typename Value>
00330 class hash_map_iterator
00331 : public std::iterator<std::forward_iterator_tag,Value>
00332 {
00333 typedef Container map_type;
00334 typedef typename Container::node node;
00335 typedef hash_map_base::node_base node_base;
00336 typedef hash_map_base::bucket bucket;
00337
00338 template<typename C, typename T, typename U>
00339 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00340
00341 template<typename C, typename T, typename U>
00342 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00343
00344 template<typename C, typename T, typename U>
00345 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00346
00347 template<typename C, typename U>
00348 friend class hash_map_iterator;
00349
00350 template<typename I>
00351 friend class hash_map_range;
00352
00353 void advance_to_next_bucket() {
00354 size_t k = my_index+1;
00355 while( my_bucket && k <= my_map->my_mask ) {
00356
00357 if( k& (k-2) )
00358 ++my_bucket;
00359 else my_bucket = my_map->get_bucket( k );
00360 my_node = static_cast<node*>( my_bucket->node_list );
00361 if( hash_map_base::is_valid(my_node) ) {
00362 my_index = k; return;
00363 }
00364 ++k;
00365 }
00366 my_bucket = 0; my_node = 0; my_index = k;
00367 }
00368 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00369 template<typename Key, typename T, typename HashCompare, typename A>
00370 friend class interface4::concurrent_hash_map;
00371 #else
00372 public:
00373 #endif
00375 const Container *my_map;
00376
00378 size_t my_index;
00379
00381 const bucket *my_bucket;
00382
00384 node *my_node;
00385
00386 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00387
00388 public:
00390 hash_map_iterator() {}
00391 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00392 my_map(other.my_map),
00393 my_index(other.my_index),
00394 my_bucket(other.my_bucket),
00395 my_node(other.my_node)
00396 {}
00397 Value& operator*() const {
00398 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00399 return my_node->item;
00400 }
00401 Value* operator->() const {return &operator*();}
00402 hash_map_iterator& operator++();
00403
00405 Value* operator++(int) {
00406 Value* result = &operator*();
00407 operator++();
00408 return result;
00409 }
00410 };
00411
00412 template<typename Container, typename Value>
00413 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00414 my_map(&map),
00415 my_index(index),
00416 my_bucket(b),
00417 my_node( static_cast<node*>(n) )
00418 {
00419 if( b && !hash_map_base::is_valid(n) )
00420 advance_to_next_bucket();
00421 }
00422
00423 template<typename Container, typename Value>
00424 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00425 my_node = static_cast<node*>( my_node->next );
00426 if( !my_node ) advance_to_next_bucket();
00427 return *this;
00428 }
00429
00430 template<typename Container, typename T, typename U>
00431 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00432 return i.my_node == j.my_node && i.my_map == j.my_map;
00433 }
00434
00435 template<typename Container, typename T, typename U>
00436 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00437 return i.my_node != j.my_node || i.my_map != j.my_map;
00438 }
00439
00441
00442 template<typename Iterator>
00443 class hash_map_range {
00444 typedef typename Iterator::map_type map_type;
00445 Iterator my_begin;
00446 Iterator my_end;
00447 mutable Iterator my_midpoint;
00448 size_t my_grainsize;
00450 void set_midpoint() const;
00451 template<typename U> friend class hash_map_range;
00452 public:
00454 typedef std::size_t size_type;
00455 typedef typename Iterator::value_type value_type;
00456 typedef typename Iterator::reference reference;
00457 typedef typename Iterator::difference_type difference_type;
00458 typedef Iterator iterator;
00459
00461 bool empty() const {return my_begin==my_end;}
00462
00464 bool is_divisible() const {
00465 return my_midpoint!=my_end;
00466 }
00468 hash_map_range( hash_map_range& r, split ) :
00469 my_end(r.my_end),
00470 my_grainsize(r.my_grainsize)
00471 {
00472 r.my_end = my_begin = r.my_midpoint;
00473 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00474 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00475 set_midpoint();
00476 r.set_midpoint();
00477 }
00479 template<typename U>
00480 hash_map_range( hash_map_range<U>& r) :
00481 my_begin(r.my_begin),
00482 my_end(r.my_end),
00483 my_midpoint(r.my_midpoint),
00484 my_grainsize(r.my_grainsize)
00485 {}
00486 #if TBB_DEPRECATED
00488 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00489 my_begin(begin_),
00490 my_end(end_),
00491 my_grainsize(grainsize_)
00492 {
00493 if(!my_end.my_index && !my_end.my_bucket)
00494 my_end.my_index = my_end.my_map->my_mask + 1;
00495 set_midpoint();
00496 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00497 }
00498 #endif
00500 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00501 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00502 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00503 my_grainsize( grainsize_ )
00504 {
00505 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00506 set_midpoint();
00507 }
00508 const Iterator& begin() const {return my_begin;}
00509 const Iterator& end() const {return my_end;}
00511 size_type grainsize() const {return my_grainsize;}
00512 };
00513
00514 template<typename Iterator>
00515 void hash_map_range<Iterator>::set_midpoint() const {
00516
00517 size_t m = my_end.my_index-my_begin.my_index;
00518 if( m > my_grainsize ) {
00519 m = my_begin.my_index + m/2u;
00520 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00521 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00522 } else {
00523 my_midpoint = my_end;
00524 }
00525 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00526 "my_begin is after my_midpoint" );
00527 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00528 "my_midpoint is after my_end" );
00529 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00530 "[my_begin, my_midpoint) range should not be empty" );
00531 }
00532
00533 }
00535
00537
00566 template<typename Key, typename T, typename HashCompare, typename Allocator>
00567 class concurrent_hash_map : protected internal::hash_map_base {
00568 template<typename Container, typename Value>
00569 friend class internal::hash_map_iterator;
00570
00571 template<typename I>
00572 friend class internal::hash_map_range;
00573
00574 public:
00575 typedef Key key_type;
00576 typedef T mapped_type;
00577 typedef std::pair<const Key,T> value_type;
00578 typedef hash_map_base::size_type size_type;
00579 typedef ptrdiff_t difference_type;
00580 typedef value_type *pointer;
00581 typedef const value_type *const_pointer;
00582 typedef value_type &reference;
00583 typedef const value_type &const_reference;
00584 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00585 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00586 typedef internal::hash_map_range<iterator> range_type;
00587 typedef internal::hash_map_range<const_iterator> const_range_type;
00588 typedef Allocator allocator_type;
00589
00590 protected:
00591 friend class const_accessor;
00592 struct node;
00593 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00594 node_allocator_type my_allocator;
00595 HashCompare my_hash_compare;
00596
00597 struct node : public node_base {
00598 value_type item;
00599 node( const Key &key ) : item(key, T()) {}
00600 node( const Key &key, const T &t ) : item(key, t) {}
00601
00602 void *operator new( size_t , node_allocator_type &a ) {
00603 void *ptr = a.allocate(1);
00604 if(!ptr)
00605 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00606 return ptr;
00607 }
00608
00609 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00610 };
00611
00612 void delete_node( node_base *n ) {
00613 my_allocator.destroy( static_cast<node*>(n) );
00614 my_allocator.deallocate( static_cast<node*>(n), 1);
00615 }
00616
00617 node *search_bucket( const key_type &key, bucket *b ) const {
00618 node *n = static_cast<node*>( b->node_list );
00619 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00620 n = static_cast<node*>( n->next );
00621 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00622 return n;
00623 }
00624
00626 class bucket_accessor : public bucket::scoped_t {
00627 bool my_is_writer;
00628 bucket *my_b;
00629 public:
00630 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00632 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00633 my_b = base->get_bucket( h );
00634 #if TBB_USE_THREADING_TOOLS
00635
00636 if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
00637 #else
00638 if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
00639 #endif
00640 && try_acquire( my_b->mutex, true ) )
00641 {
00642 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00643 my_is_writer = true;
00644 }
00645 else bucket::scoped_t::acquire( my_b->mutex, my_is_writer = writer );
00646 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00647 }
00649 bool is_writer() { return my_is_writer; }
00651 bucket *operator() () { return my_b; }
00652
00653 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
00654 };
00655
00656
00657 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00658 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00659 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00660 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00661 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00662
00663 bucket_accessor b_old( this, h & mask );
00664
00665 mask = (mask<<1) | 1;
00666 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00667 restart:
00668 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00669 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00670 #if TBB_USE_ASSERT
00671 hashcode_t bmask = h & (mask>>1);
00672 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00673 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00674 #endif
00675 if( (c & mask) == h ) {
00676 if( !b_old.is_writer() )
00677 if( !b_old.upgrade_to_writer() ) {
00678 goto restart;
00679 }
00680 *p = n->next;
00681 add_to_bucket( b_new, n );
00682 } else p = &n->next;
00683 }
00684 }
00685
00686 public:
00687
00688 class accessor;
00690 class const_accessor {
00691 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00692 friend class accessor;
00693 void operator=( const accessor & ) const;
00694 const_accessor( const accessor & );
00695 public:
00697 typedef const typename concurrent_hash_map::value_type value_type;
00698
00700 bool empty() const {return !my_node;}
00701
00703 void release() {
00704 if( my_node ) {
00705 my_lock.release();
00706 my_node = 0;
00707 }
00708 }
00709
00711 const_reference operator*() const {
00712 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00713 return my_node->item;
00714 }
00715
00717 const_pointer operator->() const {
00718 return &operator*();
00719 }
00720
00722 const_accessor() : my_node(NULL) {}
00723
00725 ~const_accessor() {
00726 my_node = NULL;
00727 }
00728 private:
00729 node *my_node;
00730 typename node::scoped_t my_lock;
00731 hashcode_t my_hash;
00732 };
00733
00735 class accessor: public const_accessor {
00736 public:
00738 typedef typename concurrent_hash_map::value_type value_type;
00739
00741 reference operator*() const {
00742 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00743 return this->my_node->item;
00744 }
00745
00747 pointer operator->() const {
00748 return &operator*();
00749 }
00750 };
00751
00753 concurrent_hash_map(const allocator_type &a = allocator_type())
00754 : internal::hash_map_base(), my_allocator(a)
00755 {}
00756
00758 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00759 : my_allocator(a)
00760 {
00761 reserve( n );
00762 }
00763
00765 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00766 : internal::hash_map_base(), my_allocator(a)
00767 {
00768 internal_copy(table);
00769 }
00770
00772 template<typename I>
00773 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00774 : my_allocator(a)
00775 {
00776 reserve( std::distance(first, last) );
00777 internal_copy(first, last);
00778 }
00779
00781 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00782 if( this!=&table ) {
00783 clear();
00784 internal_copy(table);
00785 }
00786 return *this;
00787 }
00788
00789
00791
00793 void rehash(size_type n = 0);
00794
00796 void clear();
00797
00799 ~concurrent_hash_map() { clear(); }
00800
00801
00802
00803
00804 range_type range( size_type grainsize=1 ) {
00805 return range_type( *this, grainsize );
00806 }
00807 const_range_type range( size_type grainsize=1 ) const {
00808 return const_range_type( *this, grainsize );
00809 }
00810
00811
00812
00813
00814 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00815 iterator end() {return iterator(*this,0,0,0);}
00816 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00817 const_iterator end() const {return const_iterator(*this,0,0,0);}
00818 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00819 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00820
00822 size_type size() const { return my_size; }
00823
00825 bool empty() const { return my_size == 0; }
00826
00828 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00829
00831 size_type bucket_count() const { return my_mask+1; }
00832
00834 allocator_type get_allocator() const { return this->my_allocator; }
00835
00837 void swap(concurrent_hash_map &table);
00838
00839
00840
00841
00842
00844 size_type count( const Key &key ) const {
00845 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00846 }
00847
00849
00850 bool find( const_accessor &result, const Key &key ) const {
00851 result.release();
00852 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00853 }
00854
00856
00857 bool find( accessor &result, const Key &key ) {
00858 result.release();
00859 return lookup(false, key, NULL, &result, true );
00860 }
00861
00863
00864 bool insert( const_accessor &result, const Key &key ) {
00865 result.release();
00866 return lookup(true, key, NULL, &result, false );
00867 }
00868
00870
00871 bool insert( accessor &result, const Key &key ) {
00872 result.release();
00873 return lookup(true, key, NULL, &result, true );
00874 }
00875
00877
00878 bool insert( const_accessor &result, const value_type &value ) {
00879 result.release();
00880 return lookup(true, value.first, &value.second, &result, false );
00881 }
00882
00884
00885 bool insert( accessor &result, const value_type &value ) {
00886 result.release();
00887 return lookup(true, value.first, &value.second, &result, true );
00888 }
00889
00891
00892 bool insert( const value_type &value ) {
00893 return lookup(true, value.first, &value.second, NULL, false );
00894 }
00895
00897 template<typename I>
00898 void insert(I first, I last) {
00899 for(; first != last; ++first)
00900 insert( *first );
00901 }
00902
00904
00905 bool erase( const Key& key );
00906
00908
00909 bool erase( const_accessor& item_accessor ) {
00910 return exclude( item_accessor, true );
00911 }
00912
00914
00915 bool erase( accessor& item_accessor ) {
00916 return exclude( item_accessor, false );
00917 }
00918
00919 protected:
00921 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00922
00924 bool exclude( const_accessor &item_accessor, bool readonly );
00925
00927 template<typename I>
00928 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00929
00931 void internal_copy( const concurrent_hash_map& source );
00932
00933 template<typename I>
00934 void internal_copy(I first, I last);
00935
00937
00939 const_pointer internal_fast_find( const Key& key ) const {
00940 hashcode_t h = my_hash_compare.hash( key );
00941 #if TBB_USE_THREADING_TOOLS
00942 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00943 #else
00944 hashcode_t m = my_mask;
00945 #endif
00946 node *n;
00947 restart:
00948 __TBB_ASSERT((m&(m+1))==0, NULL);
00949 bucket *b = get_bucket( h & m );
00950 #if TBB_USE_THREADING_TOOLS
00951
00952 if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
00953 #else
00954 if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
00955 #endif
00956 {
00957 bucket::scoped_t lock;
00958 if( lock.try_acquire( b->mutex, true ) ) {
00959 if( b->node_list == internal::rehash_req)
00960 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00961 }
00962 else lock.acquire( b->mutex, false );
00963 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00964 }
00965 n = search_bucket( key, b );
00966 if( n )
00967 return &n->item;
00968 else if( check_mask_race( h, m ) )
00969 goto restart;
00970 return 0;
00971 }
00972 };
00973
00974 #if _MSC_VER && !defined(__INTEL_COMPILER)
00975
00976 #pragma warning( push )
00977 #pragma warning( disable: 4127 )
00978 #endif
00979
00980 template<typename Key, typename T, typename HashCompare, typename A>
00981 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
00982 __TBB_ASSERT( !result || !result->my_node, NULL );
00983 segment_index_t grow_segment;
00984 bool return_value;
00985 node *n, *tmp_n = 0;
00986 hashcode_t const h = my_hash_compare.hash( key );
00987 #if TBB_USE_THREADING_TOOLS
00988 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00989 #else
00990 hashcode_t m = my_mask;
00991 #endif
00992 restart:
00993 {
00994 __TBB_ASSERT((m&(m+1))==0, NULL);
00995 return_value = false;
00996
00997 bucket_accessor b( this, h & m );
00998
00999
01000 n = search_bucket( key, b() );
01001 if( op_insert ) {
01002
01003 if( !n ) {
01004 if( !tmp_n ) {
01005 if(t) tmp_n = new( my_allocator ) node(key, *t);
01006 else tmp_n = new( my_allocator ) node(key);
01007 }
01008 if( !b.is_writer() && !b.upgrade_to_writer() ) {
01009
01010 n = search_bucket( key, b() );
01011 if( is_valid(n) ) {
01012 b.downgrade_to_reader();
01013 goto exists;
01014 }
01015 }
01016 if( check_mask_race(h, m) )
01017 goto restart;
01018
01019 grow_segment = insert_new_node( b(), n = tmp_n, m );
01020 tmp_n = 0;
01021 return_value = true;
01022 } else {
01023 exists: grow_segment = 0;
01024 }
01025 } else {
01026 if( !n ) {
01027 if( check_mask_race( h, m ) )
01028 goto restart;
01029 return false;
01030 }
01031 return_value = true;
01032 grow_segment = 0;
01033 }
01034 if( !result ) goto check_growth;
01035
01036
01037 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
01038
01039 tbb::internal::atomic_backoff trials;
01040 do {
01041 if( !trials.bounded_pause() ) {
01042
01043 b.release();
01044 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01045 __TBB_Yield();
01046 #if TBB_USE_THREADING_TOOLS
01047 m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01048 #else
01049 m = my_mask;
01050 #endif
01051 goto restart;
01052 }
01053 } while( !result->my_lock.try_acquire( n->mutex, write ) );
01054 }
01055 }
01056 result->my_node = n;
01057 result->my_hash = h;
01058 check_growth:
01059
01060 if( grow_segment )
01061 enable_segment( grow_segment );
01062 if( tmp_n )
01063 delete_node( tmp_n );
01064 return return_value;
01065 }
01066
01067 template<typename Key, typename T, typename HashCompare, typename A>
01068 template<typename I>
01069 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01070 hashcode_t h = my_hash_compare.hash( key );
01071 hashcode_t m = my_mask;
01072 __TBB_ASSERT((m&(m+1))==0, NULL);
01073 h &= m;
01074 bucket *b = get_bucket( h );
01075 while( b->node_list == internal::rehash_req ) {
01076 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01077 b = get_bucket( h &= m );
01078 }
01079 node *n = search_bucket( key, b );
01080 if( !n )
01081 return std::make_pair(end_, end_);
01082 iterator lower(*this, h, b, n), upper(lower);
01083 return std::make_pair(lower, ++upper);
01084 }
01085
01086 template<typename Key, typename T, typename HashCompare, typename A>
01087 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
01088 __TBB_ASSERT( item_accessor.my_node, NULL );
01089 node_base *const n = item_accessor.my_node;
01090 item_accessor.my_node = NULL;
01091 hashcode_t const h = item_accessor.my_hash;
01092 #if TBB_USE_THREADING_TOOLS
01093 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01094 #else
01095 hashcode_t m = my_mask;
01096 #endif
01097 do {
01098
01099 bucket_accessor b( this, h & m, true );
01100 node_base **p = &b()->node_list;
01101 while( *p && *p != n )
01102 p = &(*p)->next;
01103 if( !*p ) {
01104 if( check_mask_race( h, m ) )
01105 continue;
01106 item_accessor.my_lock.release();
01107 return false;
01108 }
01109 __TBB_ASSERT( *p == n, NULL );
01110 *p = n->next;
01111 my_size--;
01112 break;
01113 } while(true);
01114 if( readonly )
01115 item_accessor.my_lock.upgrade_to_writer();
01116 item_accessor.my_lock.release();
01117 delete_node( n );
01118 return true;
01119 }
01120
01121 template<typename Key, typename T, typename HashCompare, typename A>
01122 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01123 node_base *n;
01124 hashcode_t const h = my_hash_compare.hash( key );
01125 #if TBB_USE_THREADING_TOOLS
01126 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01127 #else
01128 hashcode_t m = my_mask;
01129 #endif
01130 restart:
01131 {
01132
01133 bucket_accessor b( this, h & m );
01134 search:
01135 node_base **p = &b()->node_list;
01136 n = *p;
01137 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01138 p = &n->next;
01139 n = *p;
01140 }
01141 if( !n ) {
01142 if( check_mask_race( h, m ) )
01143 goto restart;
01144 return false;
01145 }
01146 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01147 if( check_mask_race( h, m ) )
01148 goto restart;
01149 goto search;
01150 }
01151 *p = n->next;
01152 my_size--;
01153 }
01154 {
01155 typename node::scoped_t item_locker( n->mutex, true );
01156 }
01157
01158 delete_node( n );
01159 return true;
01160 }
01161
01162 template<typename Key, typename T, typename HashCompare, typename A>
01163 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01164 std::swap(this->my_allocator, table.my_allocator);
01165 std::swap(this->my_hash_compare, table.my_hash_compare);
01166 internal_swap(table);
01167 }
01168
01169 template<typename Key, typename T, typename HashCompare, typename A>
01170 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01171 reserve( sz );
01172 hashcode_t mask = my_mask;
01173 hashcode_t b = (mask+1)>>1;
01174 __TBB_ASSERT((b&(b-1))==0, NULL);
01175 bucket *bp = get_bucket( b );
01176 for(; b <= mask; b++, bp++ ) {
01177 node_base *n = bp->node_list;
01178 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01179 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01180 if( n == internal::rehash_req ) {
01181 hashcode_t h = b; bucket *b_old = bp;
01182 do {
01183 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01184 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01185 b_old = get_bucket( h &= m );
01186 } while( b_old->node_list == internal::rehash_req );
01187
01188 mark_rehashed_levels( h );
01189 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01190 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01191 if( (c & mask) != h ) {
01192 *p = q->next;
01193 bucket *b_new = get_bucket( c & mask );
01194 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01195 add_to_bucket( b_new, q );
01196 } else p = &q->next;
01197 }
01198 }
01199 }
01200 #if TBB_USE_PERFORMANCE_WARNINGS
01201 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01202 static bool reported = false;
01203 #endif
01204 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01205 for( b = 0; b <= mask; b++ ) {
01206 if( b & (b-2) ) ++bp;
01207 else bp = get_bucket( b );
01208 node_base *n = bp->node_list;
01209 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01210 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01211 #if TBB_USE_PERFORMANCE_WARNINGS
01212 if( n == internal::empty_rehashed ) empty_buckets++;
01213 else if( n->next ) overpopulated_buckets++;
01214 #endif
01215 #if TBB_USE_ASSERT
01216 for( ; is_valid(n); n = n->next ) {
01217 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01218 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01219 }
01220 #endif
01221 }
01222 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01223 #if TBB_USE_PERFORMANCE_WARNINGS
01224 if( buckets > current_size) empty_buckets -= buckets - current_size;
01225 else overpopulated_buckets -= current_size - buckets;
01226 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01227 tbb::internal::runtime_warning(
01228 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01229 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01230 reported = true;
01231 }
01232 #endif
01233 }
01234
01235 template<typename Key, typename T, typename HashCompare, typename A>
01236 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01237 hashcode_t m = my_mask;
01238 __TBB_ASSERT((m&(m+1))==0, NULL);
01239 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01240 #if TBB_USE_PERFORMANCE_WARNINGS
01241 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01242 static bool reported = false;
01243 #endif
01244 bucket *bp = 0;
01245
01246 for( segment_index_t b = 0; b <= m; b++ ) {
01247 if( b & (b-2) ) ++bp;
01248 else bp = get_bucket( b );
01249 node_base *n = bp->node_list;
01250 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01251 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01252 #if TBB_USE_PERFORMANCE_WARNINGS
01253 if( n == internal::empty_rehashed ) empty_buckets++;
01254 else if( n == internal::rehash_req ) buckets--;
01255 else if( n->next ) overpopulated_buckets++;
01256 #endif
01257 #if __TBB_EXTRA_DEBUG
01258 for(; is_valid(n); n = n->next ) {
01259 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01260 h &= m;
01261 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01262 }
01263 #endif
01264 }
01265 #if TBB_USE_PERFORMANCE_WARNINGS
01266 if( buckets > current_size) empty_buckets -= buckets - current_size;
01267 else overpopulated_buckets -= current_size - buckets;
01268 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01269 tbb::internal::runtime_warning(
01270 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01271 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01272 reported = true;
01273 }
01274 #endif
01275 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01276 my_size = 0;
01277 segment_index_t s = segment_index_of( m );
01278 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01279 cache_aligned_allocator<bucket> alloc;
01280 do {
01281 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01282 segment_ptr_t buckets_ptr = my_table[s];
01283 size_type sz = segment_size( s ? s : 1 );
01284 for( segment_index_t i = 0; i < sz; i++ )
01285 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01286 buckets_ptr[i].node_list = n->next;
01287 delete_node( n );
01288 }
01289 if( s >= first_block)
01290 alloc.deallocate( buckets_ptr, sz );
01291 else if( s == embedded_block && embedded_block != first_block )
01292 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01293 if( s >= embedded_block ) my_table[s] = 0;
01294 } while(s-- > 0);
01295 my_mask = embedded_buckets - 1;
01296 }
01297
01298 template<typename Key, typename T, typename HashCompare, typename A>
01299 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01300 reserve( source.my_size );
01301 hashcode_t mask = source.my_mask;
01302 if( my_mask == mask ) {
01303 bucket *dst = 0, *src = 0;
01304 bool rehash_required = false;
01305 for( hashcode_t k = 0; k <= mask; k++ ) {
01306 if( k & (k-2) ) ++dst,src++;
01307 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01308 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01309 node *n = static_cast<node*>( src->node_list );
01310 if( n == internal::rehash_req ) {
01311 rehash_required = true;
01312 dst->node_list = internal::rehash_req;
01313 } else for(; n; n = static_cast<node*>( n->next ) ) {
01314 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01315 ++my_size;
01316 }
01317 }
01318 if( rehash_required ) rehash();
01319 } else internal_copy( source.begin(), source.end() );
01320 }
01321
01322 template<typename Key, typename T, typename HashCompare, typename A>
01323 template<typename I>
01324 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01325 hashcode_t m = my_mask;
01326 for(; first != last; ++first) {
01327 hashcode_t h = my_hash_compare.hash( first->first );
01328 bucket *b = get_bucket( h & m );
01329 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01330 node *n = new( my_allocator ) node(first->first, first->second);
01331 add_to_bucket( b, n );
01332 ++my_size;
01333 }
01334 }
01335
01336 }
01337
01338 using interface4::concurrent_hash_map;
01339
01340
01341 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01342 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01343 if(a.size() != b.size()) return false;
01344 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01345 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01346 for(; i != i_end; ++i) {
01347 j = b.equal_range(i->first).first;
01348 if( j == j_end || !(i->second == j->second) ) return false;
01349 }
01350 return true;
01351 }
01352
01353 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01354 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01355 { return !(a == b); }
01356
01357 template<typename Key, typename T, typename HashCompare, typename A>
01358 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01359 { a.swap( b ); }
01360
01361 #if _MSC_VER && !defined(__INTEL_COMPILER)
01362 #pragma warning( pop )
01363 #endif // warning 4127 is back
01364
01365 }
01366
01367 #endif