_concurrent_unordered_internal.h

00001 /*
00002     Copyright 2005-2011 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 /* Container implementations in this header are based on PPL implementations 
00022    provided by Microsoft. */
00023 
00024 #ifndef __TBB_concurrent_unordered_internal_H
00025 #define __TBB_concurrent_unordered_internal_H
00026 
00027 #include "tbb_stddef.h"
00028 
00029 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00030     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
00031     #pragma warning (push)
00032     #pragma warning (disable: 4530)
00033 #endif
00034 
00035 #include <iterator>
00036 #include <utility>      // Need std::pair
00037 #include <functional>
00038 #include <string>       // For tbb_hasher
00039 #include <cstring>      // Need std::memset
00040 
00041 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00042     #pragma warning (pop)
00043 #endif
00044 
00045 #include "atomic.h"
00046 #include "tbb_exception.h"
00047 #include "tbb_allocator.h"
00048 
00049 namespace tbb {
00050 namespace interface5 {
00052 namespace internal {
00053 
00054 template <typename T, typename Allocator>
00055 class split_ordered_list;
00056 template <typename Traits>
00057 class concurrent_unordered_base;
00058 
00059 // Forward list iterators (without skipping dummy elements)
00060 template<class Solist, typename Value>
00061 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
00062 {
00063     template <typename T, typename Allocator>
00064     friend class split_ordered_list;
00065     template <typename Traits>
00066     friend class concurrent_unordered_base;
00067     template<class M, typename V>
00068     friend class flist_iterator;
00069 
00070     typedef typename Solist::nodeptr_t nodeptr_t;
00071 public:
00072     typedef typename Solist::value_type value_type;
00073     typedef typename Solist::difference_type difference_type;
00074     typedef typename Solist::pointer pointer;
00075     typedef typename Solist::reference reference;
00076 
00077     flist_iterator() : my_node_ptr(0) {}
00078     flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
00079         : my_node_ptr(other.my_node_ptr) {}
00080 
00081     reference operator*() const { return my_node_ptr->my_element; }
00082     pointer operator->() const { return &**this; }
00083 
00084     flist_iterator& operator++() {
00085         my_node_ptr = my_node_ptr->my_next;
00086         return *this;
00087     }
00088 
00089     flist_iterator operator++(int) {
00090         flist_iterator tmp = *this;
00091         ++*this;
00092         return tmp;
00093     }
00094 
00095 protected:
00096     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
00097     nodeptr_t get_node_ptr() const { return my_node_ptr; }
00098 
00099     nodeptr_t my_node_ptr;
00100 
00101     template<typename M, typename T, typename U>
00102     friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
00103     template<typename M, typename T, typename U>
00104     friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
00105 };
00106 
00107 template<typename Solist, typename T, typename U>
00108 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
00109     return i.my_node_ptr == j.my_node_ptr;
00110 }
00111 template<typename Solist, typename T, typename U>
00112 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
00113     return i.my_node_ptr != j.my_node_ptr;
00114 }
00115 
00116 // Split-order list iterators, needed to skip dummy elements
00117 template<class Solist, typename Value>
00118 class solist_iterator : public flist_iterator<Solist, Value>
00119 {
00120     typedef flist_iterator<Solist, Value> base_type;
00121     typedef typename Solist::nodeptr_t nodeptr_t;
00122     using base_type::get_node_ptr;
00123     template <typename T, typename Allocator>
00124     friend class split_ordered_list;
00125     template<class M, typename V>
00126     friend class solist_iterator;
00127     template<typename M, typename T, typename U>
00128     friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
00129     template<typename M, typename T, typename U>
00130     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
00131 
00132     const Solist *my_list_ptr;
00133     solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
00134 
00135 public:
00136     typedef typename Solist::value_type value_type;
00137     typedef typename Solist::difference_type difference_type;
00138     typedef typename Solist::pointer pointer;
00139     typedef typename Solist::reference reference;
00140 
00141     solist_iterator() {}
00142     solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
00143         : base_type(other), my_list_ptr(other.my_list_ptr) {}
00144 
00145     reference operator*() const {
00146         return this->base_type::operator*();
00147     }
00148 
00149     pointer operator->() const {
00150         return (&**this);
00151     }
00152 
00153     solist_iterator& operator++() {
00154         do ++(*(base_type *)this);
00155         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00156 
00157         return (*this);
00158     }
00159 
00160     solist_iterator operator++(int) {
00161         solist_iterator tmp = *this;
00162         do ++*this;
00163         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00164 
00165         return (tmp);
00166     }
00167 };
00168 
00169 template<typename Solist, typename T, typename U>
00170 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
00171     return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
00172 }
00173 template<typename Solist, typename T, typename U>
00174 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
00175     return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
00176 }
00177 
00178 // Forward type and class definitions
00179 typedef size_t sokey_t;
00180 
00181 // Forward list in which elements are sorted in a split-order
00182 template <typename T, typename Allocator>
00183 class split_ordered_list
00184 {
00185 public:
00186     typedef split_ordered_list<T, Allocator> self_type;
00187     typedef typename Allocator::template rebind<T>::other allocator_type;
00188     struct node;
00189     typedef node *nodeptr_t;
00190 
00191     typedef typename allocator_type::size_type size_type;
00192     typedef typename allocator_type::difference_type difference_type;
00193     typedef typename allocator_type::pointer pointer;
00194     typedef typename allocator_type::const_pointer const_pointer;
00195     typedef typename allocator_type::reference reference;
00196     typedef typename allocator_type::const_reference const_reference;
00197     typedef typename allocator_type::value_type value_type;
00198 
00199     typedef solist_iterator<self_type, const value_type> const_iterator;
00200     typedef solist_iterator<self_type, value_type> iterator;
00201     typedef flist_iterator<self_type, const value_type> raw_const_iterator;
00202     typedef flist_iterator<self_type, value_type> raw_iterator;
00203 
00204     // Node that holds the element in a split-ordered list
00205     struct node : tbb::internal::no_assign
00206     {
00207         // Initialize the node with the given order key
00208         void init(sokey_t order_key) {
00209             my_order_key = order_key;
00210             my_next = NULL;
00211         }
00212 
00213         // Return the order key (needed for hashing)
00214         sokey_t get_order_key() const { // TODO: remove
00215             return my_order_key;
00216         }
00217 
00218         // Inserts the new element in the list in an atomic fashion
00219         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
00220         {
00221             // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
00222             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
00223 
00224             if (exchange_node == current_node) // TODO: why this branch?
00225             {
00226                 // Operation succeeded, return the new node
00227                 return new_node;
00228             }
00229             else
00230             {
00231                 // Operation failed, return the "interfering" node
00232                 return exchange_node;
00233             }
00234         }
00235 
00236         // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
00237         // in the hash table to quickly index into the right subsection of the split-ordered list.
00238         bool is_dummy() const {
00239             return (my_order_key & 0x1) == 0;
00240         }
00241 
00242 
00243         nodeptr_t  my_next;      // Next element in the list
00244         value_type my_element;   // Element storage
00245         sokey_t    my_order_key; // Order key for this element
00246     };
00247 
00248     // Allocate a new node with the given order key and value
00249     nodeptr_t create_node(sokey_t order_key, const T &value) {
00250         nodeptr_t pnode = my_node_allocator.allocate(1);
00251 
00252         __TBB_TRY {
00253             new(static_cast<void*>(&pnode->my_element)) T(value);
00254             pnode->init(order_key);
00255         } __TBB_CATCH(...) {
00256             my_node_allocator.deallocate(pnode, 1);
00257             __TBB_RETHROW();
00258         }
00259 
00260         return (pnode);
00261     }
00262 
00263     // Allocate a new node with the given order key; used to allocate dummy nodes
00264     nodeptr_t create_node(sokey_t order_key) {
00265         nodeptr_t pnode = my_node_allocator.allocate(1);
00266 
00267         __TBB_TRY {
00268             new(static_cast<void*>(&pnode->my_element)) T();
00269             pnode->init(order_key);
00270         } __TBB_CATCH(...) {
00271             my_node_allocator.deallocate(pnode, 1);
00272             __TBB_RETHROW();
00273         }
00274 
00275         return (pnode);
00276     }
00277 
00278    split_ordered_list(allocator_type a = allocator_type())
00279        : my_node_allocator(a), my_element_count(0)
00280     {
00281         // Immediately allocate a dummy node with order key of 0. This node
00282         // will always be the head of the list.
00283         my_head = create_node(0);
00284     }
00285 
00286     ~split_ordered_list()
00287     {
00288         // Clear the list
00289         clear();
00290 
00291         // Remove the head element which is not cleared by clear()
00292         nodeptr_t pnode = my_head;
00293         my_head = NULL;
00294 
00295         __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
00296 
00297         destroy_node(pnode);
00298     }
00299 
00300     // Common forward list functions
00301 
00302     allocator_type get_allocator() const {
00303         return (my_node_allocator);
00304     }
00305 
00306     void clear() {
00307         nodeptr_t pnext;
00308         nodeptr_t pnode = my_head;
00309 
00310         __TBB_ASSERT(my_head != NULL, "Invalid head list node");
00311         pnext = pnode->my_next;
00312         pnode->my_next = NULL;
00313         pnode = pnext;
00314 
00315         while (pnode != NULL)
00316         {
00317             pnext = pnode->my_next;
00318             destroy_node(pnode);
00319             pnode = pnext;
00320         }
00321 
00322         my_element_count = 0;
00323     }
00324 
00325     // Returns a first non-dummy element in the SOL
00326     iterator begin() {
00327         return first_real_iterator(raw_begin());
00328     }
00329 
00330     // Returns a first non-dummy element in the SOL
00331     const_iterator begin() const {
00332         return first_real_iterator(raw_begin());
00333     }
00334 
00335     iterator end() {
00336         return (iterator(0, this));
00337     }
00338 
00339     const_iterator end() const {
00340         return (const_iterator(0, this));
00341     }
00342 
00343     const_iterator cbegin() const {
00344         return (((const self_type *)this)->begin());
00345     }
00346 
00347     const_iterator cend() const {
00348         return (((const self_type *)this)->end());
00349     }
00350 
00351     // Checks if the number of elements (non-dummy) is 0
00352     bool empty() const {
00353         return (my_element_count == 0);
00354     }
00355 
00356     // Returns the number of non-dummy elements in the list
00357     size_type size() const {
00358         return my_element_count;
00359     }
00360 
00361     // Returns the maximum size of the list, determined by the allocator
00362     size_type max_size() const {
00363         return my_node_allocator.max_size();
00364     }
00365 
00366     // Swaps 'this' list with the passed in one
00367     void swap(self_type& other)
00368     {
00369         if (this == &other)
00370         {
00371             // Nothing to do
00372             return;
00373         }
00374 
00375         std::swap(my_element_count, other.my_element_count);
00376         std::swap(my_head, other.my_head);
00377     }
00378 
00379     // Split-order list functions
00380 
00381     // Returns a first element in the SOL, which is always a dummy
00382     raw_iterator raw_begin() {
00383         return raw_iterator(my_head);
00384     }
00385 
00386     // Returns a first element in the SOL, which is always a dummy
00387     raw_const_iterator raw_begin() const {
00388         return raw_const_iterator(my_head);
00389     }
00390 
00391     raw_iterator raw_end() {
00392         return raw_iterator(0);
00393     }
00394 
00395     raw_const_iterator raw_end() const {
00396         return raw_const_iterator(0);
00397     }
00398 
00399     static sokey_t get_order_key(const raw_const_iterator& it) {
00400         return it.get_node_ptr()->get_order_key();
00401     }
00402 
00403     static sokey_t get_safe_order_key(const raw_const_iterator& it) {
00404         if( !it.get_node_ptr() ) return sokey_t(~0U);
00405         return it.get_node_ptr()->get_order_key();
00406     }
00407 
00408     // Returns a public iterator version of the internal iterator. Public iterator must not
00409     // be a dummy private iterator.
00410     iterator get_iterator(raw_iterator it) {
00411         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00412         return iterator(it.get_node_ptr(), this);
00413     }
00414 
00415     // Returns a public iterator version of the internal iterator. Public iterator must not
00416     // be a dummy private iterator.
00417     const_iterator get_iterator(raw_const_iterator it) const {
00418         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00419         return const_iterator(it.get_node_ptr(), this);
00420     }
00421 
00422     // Returns a non-const version of the raw_iterator
00423     raw_iterator get_iterator(raw_const_iterator it) {
00424         return raw_iterator(it.get_node_ptr());
00425     }
00426 
00427     // Returns a non-const version of the iterator
00428     static iterator get_iterator(const_iterator it) {
00429         return iterator(it.my_node_ptr, it.my_list_ptr);
00430     }
00431 
00432     // Returns a public iterator version of a first non-dummy internal iterator at or after
00433     // the passed in internal iterator.
00434     iterator first_real_iterator(raw_iterator it)
00435     {
00436         // Skip all dummy, internal only iterators
00437         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00438             ++it;
00439 
00440         return iterator(it.get_node_ptr(), this);
00441     }
00442 
00443     // Returns a public iterator version of a first non-dummy internal iterator at or after
00444     // the passed in internal iterator.
00445     const_iterator first_real_iterator(raw_const_iterator it) const
00446     {
00447         // Skip all dummy, internal only iterators
00448         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00449             ++it;
00450 
00451         return const_iterator(it.get_node_ptr(), this);
00452     }
00453 
00454     // Erase an element using the allocator
00455     void destroy_node(nodeptr_t pnode) {
00456         my_node_allocator.destroy(pnode);
00457         my_node_allocator.deallocate(pnode, 1);
00458     }
00459 
00460     // Try to insert a new element in the list. If insert fails, return the node that
00461     // was inserted instead.
00462     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
00463         new_node->my_next = current_node;
00464         return previous->atomic_set_next(new_node, current_node);
00465     }
00466 
00467     // Insert a new element between passed in iterators
00468     std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
00469     {
00470         nodeptr_t pnode = create_node(order_key, value);
00471         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
00472 
00473         if (inserted_node == pnode)
00474         {
00475             // If the insert succeeded, check that the order is correct and increment the element count
00476             check_range();
00477             *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
00478             return std::pair<iterator, bool>(iterator(pnode, this), true);
00479         }
00480         else
00481         {
00482             // If the insert failed (element already there), then delete the new one
00483             destroy_node(pnode);
00484             return std::pair<iterator, bool>(end(), false);
00485         }
00486     }
00487 
00488     // Insert a new dummy element, starting search at a parent dummy element
00489     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
00490     {
00491         raw_iterator last = raw_end();
00492         raw_iterator where = it;
00493 
00494         __TBB_ASSERT(where != last, "Invalid head node");
00495 
00496         ++where;
00497 
00498         // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
00499         nodeptr_t dummy_node = create_node(order_key);
00500 
00501         for (;;)
00502         {
00503             __TBB_ASSERT(it != last, "Invalid head list node");
00504 
00505             // If the head iterator is at the end of the list, or past the point where this dummy
00506             // node needs to be inserted, then try to insert it.
00507             if (where == last || get_order_key(where) > order_key)
00508             {
00509                 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
00510 
00511                 // Try to insert it in the right place
00512                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
00513 
00514                 if (inserted_node == dummy_node)
00515                 {
00516                     // Insertion succeeded, check the list for order violations
00517                     check_range();
00518                     return raw_iterator(dummy_node);
00519                 }
00520                 else
00521                 {
00522                     // Insertion failed: either dummy node was inserted by another thread, or
00523                     // a real element was inserted at exactly the same place as dummy node.
00524                     // Proceed with the search from the previous location where order key was
00525                     // known to be larger (note: this is legal only because there is no safe
00526                     // concurrent erase operation supported).
00527                     where = it;
00528                     ++where;
00529                     continue;
00530                 }
00531             }
00532             else if (get_order_key(where) == order_key)
00533             {
00534                 // Another dummy node with the same value found, discard the new one.
00535                 destroy_node(dummy_node);
00536                 return where;
00537             }
00538 
00539             // Move the iterator forward
00540             it = where;
00541             ++where;
00542         }
00543 
00544     }
00545 
00546     // This erase function can handle both real and dummy nodes
00547     void erase_node(raw_iterator previous, raw_const_iterator& where)
00548     {
00549         nodeptr_t pnode = (where++).get_node_ptr();
00550         nodeptr_t prevnode = previous.get_node_ptr();
00551         __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
00552         prevnode->my_next = pnode->my_next;
00553 
00554         destroy_node(pnode);
00555     }
00556 
00557     // Erase the element (previous node needs to be passed because this is a forward only list)
00558     iterator erase_node(raw_iterator previous, const_iterator where)
00559     {
00560         raw_const_iterator it = where;
00561         erase_node(previous, it);
00562         my_element_count--;
00563 
00564         return get_iterator(first_real_iterator(it));
00565     }
00566 
00567     // Move all elements from the passed in split-ordered list to this one
00568     void move_all(self_type& source)
00569     {
00570         raw_const_iterator first = source.raw_begin();
00571         raw_const_iterator last = source.raw_end();
00572 
00573         if (first == last)
00574             return;
00575 
00576         nodeptr_t previous_node = my_head;
00577         raw_const_iterator begin_iterator = first++;
00578 
00579         // Move all elements one by one, including dummy ones
00580         for (raw_const_iterator it = first; it != last;)
00581         {
00582             nodeptr_t pnode = it.get_node_ptr();
00583 
00584             nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
00585             previous_node = try_insert(previous_node, dummy_node, NULL);
00586             __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
00587             raw_const_iterator where = it++;
00588             source.erase_node(get_iterator(begin_iterator), where);
00589         }
00590         check_range();
00591     }
00592 
00593 
00594 private:
00595 
00596     // Check the list for order violations
00597     void check_range()
00598     {
00599 #if TBB_USE_ASSERT
00600         for (raw_iterator it = raw_begin(); it != raw_end(); ++it)
00601         {
00602             raw_iterator next_iterator = it;
00603             ++next_iterator;
00604 
00605             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
00606         }
00607 #endif
00608     }
00609 
00610     typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
00611     size_type                                             my_element_count;   // Total item count, not counting dummy nodes
00612     nodeptr_t                                             my_head;            // pointer to head node
00613 };
00614 
00615 // Template class for hash compare
00616 template<typename Key, typename Hasher, typename Key_equality>
00617 class hash_compare
00618 {
00619 public:
00620     hash_compare() {}
00621 
00622     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
00623 
00624     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
00625 
00626     size_t operator()(const Key& key) const {
00627         return ((size_t)my_hash_object(key));
00628     }
00629 
00630     bool operator()(const Key& key1, const Key& key2) const {
00631         return (!my_key_compare_object(key1, key2));
00632     }
00633 
00634     Hasher       my_hash_object;        // The hash object
00635     Key_equality my_key_compare_object; // The equality comparator object
00636 };
00637 
00638 #if _MSC_VER
00639 #pragma warning(push)
00640 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
00641 #endif
00642 
00643 template <typename Traits>
00644 class concurrent_unordered_base : public Traits
00645 {
00646 protected:
00647     // Type definitions
00648     typedef concurrent_unordered_base<Traits> self_type;
00649     typedef typename Traits::value_type value_type;
00650     typedef typename Traits::key_type key_type;
00651     typedef typename Traits::hash_compare hash_compare;
00652     typedef typename Traits::value_compare value_compare;
00653     typedef typename Traits::allocator_type allocator_type;
00654     typedef typename allocator_type::pointer pointer;
00655     typedef typename allocator_type::const_pointer const_pointer;
00656     typedef typename allocator_type::reference reference;
00657     typedef typename allocator_type::const_reference const_reference;
00658     typedef typename allocator_type::size_type size_type;
00659     typedef typename allocator_type::difference_type difference_type;
00660     typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
00661     typedef typename solist_t::nodeptr_t nodeptr_t;
00662     // Iterators that walk the entire split-order list, including dummy nodes
00663     typedef typename solist_t::raw_iterator raw_iterator;
00664     typedef typename solist_t::raw_const_iterator raw_const_iterator;
00665     typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
00666     typedef typename solist_t::const_iterator const_iterator;
00667     typedef iterator local_iterator;
00668     typedef const_iterator const_local_iterator;
00669     using Traits::my_hash_compare;
00670     using Traits::get_key;
00671     using Traits::allow_multimapping;
00672 
00673 private:
00674     typedef std::pair<iterator, iterator> pairii_t;
00675     typedef std::pair<const_iterator, const_iterator> paircc_t;
00676 
00677     static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
00678     static const size_type initial_bucket_number = 8;                               // Initial number of buckets
00679     static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
00680 
00681 protected:
00682     // Constructors/Destructors
00683     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
00684         const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
00685         : Traits(hc), my_solist(a),
00686           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
00687     {
00688         if( n_of_buckets == 0) ++n_of_buckets;
00689         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
00690         internal_init();
00691     }
00692 
00693     concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
00694         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
00695     {
00696         internal_init();
00697         internal_copy(right);
00698     }
00699 
00700     concurrent_unordered_base(const concurrent_unordered_base& right)
00701         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
00702     {
00703         internal_init();
00704         internal_copy(right);
00705     }
00706 
00707     concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
00708         if (this != &right)
00709             internal_copy(right);
00710         return (*this);
00711     }
00712 
00713     ~concurrent_unordered_base() {
00714         // Delete all node segments
00715         internal_clear();
00716     }
00717 
00718 public:
00719     allocator_type get_allocator() const {
00720         return my_solist.get_allocator();
00721     }
00722 
00723     // Size and capacity function
00724     bool empty() const {
00725         return my_solist.empty();
00726     }
00727 
00728     size_type size() const {
00729         return my_solist.size();
00730     }
00731 
00732     size_type max_size() const {
00733         return my_solist.max_size();
00734     }
00735 
00736     // Iterators 
00737     iterator begin() {
00738         return my_solist.begin();
00739     }
00740 
00741     const_iterator begin() const {
00742         return my_solist.begin();
00743     }
00744 
00745     iterator end() {
00746         return my_solist.end();
00747     }
00748 
00749     const_iterator end() const {
00750         return my_solist.end();
00751     }
00752 
00753     const_iterator cbegin() const {
00754         return my_solist.cbegin();
00755     }
00756 
00757     const_iterator cend() const {
00758         return my_solist.cend();
00759     }
00760 
00761     // Parallel traversal support
00762     class const_range_type : tbb::internal::no_assign {
00763         const concurrent_unordered_base &my_table;
00764         raw_const_iterator my_begin_node;
00765         raw_const_iterator my_end_node;
00766         mutable raw_const_iterator my_midpoint_node;
00767     public:
00769         typedef typename concurrent_unordered_base::size_type size_type;
00770         typedef typename concurrent_unordered_base::value_type value_type;
00771         typedef typename concurrent_unordered_base::reference reference;
00772         typedef typename concurrent_unordered_base::difference_type difference_type;
00773         typedef typename concurrent_unordered_base::const_iterator iterator;
00774 
00776         bool empty() const {return my_begin_node == my_end_node;}
00777 
00779         bool is_divisible() const {
00780             return my_midpoint_node != my_end_node;
00781         }
00783         const_range_type( const_range_type &r, split ) : 
00784             my_table(r.my_table), my_end_node(r.my_end_node)
00785         {
00786             r.my_end_node = my_begin_node = r.my_midpoint_node;
00787             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00788             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00789             set_midpoint();
00790             r.set_midpoint();
00791         }
00793         const_range_type( const concurrent_unordered_base &a_table ) : 
00794             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
00795             my_end_node(a_table.my_solist.end())
00796         {
00797             set_midpoint();
00798         }
00799         iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
00800         iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
00802         size_type grainsize() const { return 1; }
00803 
00805         void set_midpoint() const {
00806             if( my_begin_node == my_end_node ) // not divisible
00807                 my_midpoint_node = my_end_node;
00808             else {
00809                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
00810                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
00811                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
00812                 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
00813                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
00814                 if( my_midpoint_node == my_begin_node )
00815                     my_midpoint_node = my_end_node;
00816 #if TBB_USE_ASSERT
00817                 else {
00818                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
00819                     __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
00820                     __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
00821                 }
00822 #endif // TBB_USE_ASSERT
00823             }
00824         }
00825     };
00826 
00827     class range_type : public const_range_type {
00828     public:
00829         typedef typename concurrent_unordered_base::iterator iterator;
00831         range_type( range_type &r, split ) : const_range_type( r, split() ) {}
00833         range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
00834 
00835         iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
00836         iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
00837     };
00838 
00839     range_type range() {
00840         return range_type( *this );
00841     }
00842 
00843     const_range_type range() const {
00844         return const_range_type( *this );
00845     }
00846 
00847     // Modifiers
00848     std::pair<iterator, bool> insert(const value_type& value) {
00849         return internal_insert(value);
00850     }
00851 
00852     iterator insert(const_iterator, const value_type& value) {
00853         // Ignore hint
00854         return insert(value).first;
00855     }
00856 
00857     template<class Iterator>
00858     void insert(Iterator first, Iterator last) {
00859         for (Iterator it = first; it != last; ++it)
00860             insert(*it);
00861     }
00862 
00863     iterator unsafe_erase(const_iterator where) {
00864         return internal_erase(where);
00865     }
00866 
00867     iterator unsafe_erase(const_iterator first, const_iterator last) {
00868         while (first != last)
00869             unsafe_erase(first++);
00870         return my_solist.get_iterator(first);
00871     }
00872 
00873     size_type unsafe_erase(const key_type& key) {
00874         pairii_t where = equal_range(key);
00875         size_type item_count = internal_distance(where.first, where.second);
00876         unsafe_erase(where.first, where.second);
00877         return item_count;
00878     }
00879 
00880     void swap(concurrent_unordered_base& right) {
00881         if (this != &right) {
00882             std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
00883             my_solist.swap(right.my_solist);
00884             internal_swap_buckets(right);
00885             std::swap(my_number_of_buckets, right.my_number_of_buckets);
00886             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
00887         }
00888     }
00889 
00890     // Observers
00891     void clear() {
00892         // Clear list
00893         my_solist.clear();
00894 
00895         // Clear buckets
00896         internal_clear();
00897 
00898         // Initialize bucket 0
00899         __TBB_ASSERT(my_buckets[0] == NULL, NULL);
00900         raw_iterator dummy_node = my_solist.raw_begin();
00901         set_bucket(0, dummy_node);
00902     }
00903 
00904     // Lookup
00905     iterator find(const key_type& key) {
00906         return internal_find(key);
00907     }
00908 
00909     const_iterator find(const key_type& key) const {
00910         return const_cast<self_type*>(this)->internal_find(key);
00911     }
00912 
00913     size_type count(const key_type& key) const {
00914         if(allow_multimapping) {
00915             paircc_t answer = equal_range(key);
00916             size_type item_count = internal_distance(answer.first, answer.second);
00917             return item_count;
00918         } else {
00919             return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
00920         }
00921     }
00922 
00923     std::pair<iterator, iterator> equal_range(const key_type& key) {
00924         return internal_equal_range(key);
00925     }
00926 
00927     std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
00928         return const_cast<self_type*>(this)->internal_equal_range(key);
00929     }
00930 
00931     // Bucket interface - for debugging 
00932     size_type unsafe_bucket_count() const {
00933         return my_number_of_buckets;
00934     }
00935 
00936     size_type unsafe_max_bucket_count() const {
00937         return segment_size(pointers_per_table-1);
00938     }
00939 
00940     size_type unsafe_bucket_size(size_type bucket) {
00941         size_type item_count = 0;
00942         if (is_initialized(bucket)) {
00943             raw_iterator it = get_bucket(bucket);
00944             ++it;
00945             for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
00946                 ++item_count;
00947         }
00948         return item_count;
00949     }
00950 
00951     size_type unsafe_bucket(const key_type& key) const {
00952         sokey_t order_key = (sokey_t) my_hash_compare(key);
00953         size_type bucket = order_key % my_number_of_buckets;
00954         return bucket;
00955     }
00956 
00957     // If the bucket is initialized, return a first non-dummy element in it
00958     local_iterator unsafe_begin(size_type bucket) {
00959         if (!is_initialized(bucket))
00960             return end();
00961 
00962         raw_iterator it = get_bucket(bucket);
00963         return my_solist.first_real_iterator(it);
00964     }
00965 
00966     // If the bucket is initialized, return a first non-dummy element in it
00967     const_local_iterator unsafe_begin(size_type bucket) const
00968     {
00969         if (!is_initialized(bucket))
00970             return end();
00971 
00972         raw_const_iterator it = get_bucket(bucket);
00973         return my_solist.first_real_iterator(it);
00974     }
00975 
00976     // @REVIEW: Takes O(n)
00977     // Returns the iterator after the last non-dummy element in the bucket
00978     local_iterator unsafe_end(size_type bucket)
00979     {
00980         if (!is_initialized(bucket))
00981             return end();
00982 
00983         raw_iterator it = get_bucket(bucket);
00984     
00985         // Find the end of the bucket, denoted by the dummy element
00986         do ++it;
00987         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00988 
00989         // Return the first real element past the end of the bucket
00990         return my_solist.first_real_iterator(it);
00991     }
00992 
00993     // @REVIEW: Takes O(n)
00994     // Returns the iterator after the last non-dummy element in the bucket
00995     const_local_iterator unsafe_end(size_type bucket) const
00996     {
00997         if (!is_initialized(bucket))
00998             return end();
00999 
01000         raw_const_iterator it = get_bucket(bucket);
01001     
01002         // Find the end of the bucket, denoted by the dummy element
01003         do ++it;
01004         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
01005 
01006         // Return the first real element past the end of the bucket
01007         return my_solist.first_real_iterator(it);
01008     }
01009 
01010     const_local_iterator unsafe_cbegin(size_type bucket) const {
01011         return ((const self_type *) this)->begin();
01012     }
01013 
01014     const_local_iterator unsafe_cend(size_type bucket) const {
01015         return ((const self_type *) this)->end();
01016     }
01017 
01018     // Hash policy
01019     float load_factor() const {
01020         return (float) size() / (float) unsafe_bucket_count();
01021     }
01022 
01023     float max_load_factor() const {
01024         return my_maximum_bucket_size;
01025     }
01026 
01027     void max_load_factor(float newmax) {
01028         if (newmax != newmax || newmax < 0)
01029             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
01030         my_maximum_bucket_size = newmax;
01031     }
01032 
01033     // This function is a noop, because the underlying split-ordered list
01034     // is already sorted, so an increase in the bucket number will be
01035     // reflected next time this bucket is touched.
01036     void rehash(size_type buckets) {
01037         size_type current_buckets = my_number_of_buckets;
01038         if (current_buckets >= buckets)
01039             return;
01040         my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
01041     }
01042 
01043 private:
01044 
01045     // Initialize the hash and keep the first bucket open
01046     void internal_init() {
01047         // Allocate an array of segment pointers
01048         memset(my_buckets, 0, pointers_per_table * sizeof(void *));
01049 
01050         // Initialize bucket 0
01051         raw_iterator dummy_node = my_solist.raw_begin();
01052         set_bucket(0, dummy_node);
01053     }
01054 
01055     void internal_clear() {
01056         for (size_type index = 0; index < pointers_per_table; ++index) {
01057             if (my_buckets[index] != NULL) {
01058                 size_type sz = segment_size(index);
01059                 for (size_type index2 = 0; index2 < sz; ++index2)
01060                     my_allocator.destroy(&my_buckets[index][index2]);
01061                 my_allocator.deallocate(my_buckets[index], sz);
01062                 my_buckets[index] = 0;
01063             }
01064         }
01065     }
01066 
01067     void internal_copy(const self_type& right) {
01068         clear();
01069 
01070         my_maximum_bucket_size = right.my_maximum_bucket_size;
01071         my_number_of_buckets = right.my_number_of_buckets;
01072 
01073         __TBB_TRY {
01074             insert(right.begin(), right.end());
01075             my_hash_compare = right.my_hash_compare;
01076         } __TBB_CATCH(...) {
01077             my_solist.clear();
01078             __TBB_RETHROW();
01079         }
01080     }
01081 
01082     void internal_swap_buckets(concurrent_unordered_base& right)
01083     {
01084         // Swap all node segments
01085         for (size_type index = 0; index < pointers_per_table; ++index)
01086         {
01087             raw_iterator * iterator_pointer = my_buckets[index];
01088             my_buckets[index] = right.my_buckets[index];
01089             right.my_buckets[index] = iterator_pointer;
01090         }
01091     }
01092 
01093     // Hash APIs
01094     size_type internal_distance(const_iterator first, const_iterator last) const
01095     {
01096         size_type num = 0;
01097 
01098         for (const_iterator it = first; it != last; ++it)
01099             ++num;
01100 
01101         return num;
01102     }
01103 
01104     // Insert an element in the hash given its value
01105     std::pair<iterator, bool> internal_insert(const value_type& value)
01106     {
01107         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
01108         size_type bucket = order_key % my_number_of_buckets;
01109 
01110         // If bucket is empty, initialize it first
01111         if (!is_initialized(bucket))
01112             init_bucket(bucket);
01113 
01114         size_type new_count;
01115         order_key = split_order_key_regular(order_key);
01116         raw_iterator it = get_bucket(bucket);
01117         raw_iterator last = my_solist.raw_end();
01118         raw_iterator where = it;
01119 
01120         __TBB_ASSERT(where != last, "Invalid head node");
01121 
01122         // First node is a dummy node
01123         ++where;
01124 
01125         for (;;)
01126         {
01127             if (where == last || solist_t::get_order_key(where) > order_key)
01128             {
01129                 // Try to insert it in the right place
01130                 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
01131                 
01132                 if (result.second)
01133                 {
01134                     // Insertion succeeded, adjust the table size, if needed
01135                     adjust_table_size(new_count, my_number_of_buckets);
01136                     return result;
01137                 }
01138                 else
01139                 {
01140                     // Insertion failed: either the same node was inserted by another thread, or
01141                     // another element was inserted at exactly the same place as this node.
01142                     // Proceed with the search from the previous location where order key was
01143                     // known to be larger (note: this is legal only because there is no safe
01144                     // concurrent erase operation supported).
01145                     where = it;
01146                     ++where;
01147                     continue;
01148                 }
01149             }
01150             else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
01151             {
01152                 // Element already in the list, return it
01153                 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
01154             }
01155 
01156             // Move the iterator forward
01157             it = where;
01158             ++where;
01159         }
01160     }
01161 
01162     // Find the element in the split-ordered list
01163     iterator internal_find(const key_type& key)
01164     {
01165         sokey_t order_key = (sokey_t) my_hash_compare(key);
01166         size_type bucket = order_key % my_number_of_buckets;
01167 
01168         // If bucket is empty, initialize it first
01169         if (!is_initialized(bucket))
01170             init_bucket(bucket);
01171 
01172         order_key = split_order_key_regular(order_key);
01173         raw_iterator last = my_solist.raw_end();
01174 
01175         for (raw_iterator it = get_bucket(bucket); it != last; ++it)
01176         {
01177             if (solist_t::get_order_key(it) > order_key)
01178             {
01179                 // If the order key is smaller than the current order key, the element
01180                 // is not in the hash.
01181                 return end();
01182             }
01183             else if (solist_t::get_order_key(it) == order_key)
01184             {
01185                 // The fact that order keys match does not mean that the element is found.
01186                 // Key function comparison has to be performed to check whether this is the
01187                 // right element. If not, keep searching while order key is the same.
01188                 if (!my_hash_compare(get_key(*it), key))
01189                     return my_solist.get_iterator(it);
01190             }
01191         }
01192 
01193         return end();
01194     }
01195 
01196     // Erase an element from the list. This is not a concurrency safe function.
01197     iterator internal_erase(const_iterator it)
01198     {
01199         key_type key = get_key(*it);
01200         sokey_t order_key = (sokey_t) my_hash_compare(key);
01201         size_type bucket = order_key % my_number_of_buckets;
01202 
01203         // If bucket is empty, initialize it first
01204         if (!is_initialized(bucket))
01205             init_bucket(bucket);
01206 
01207         order_key = split_order_key_regular(order_key);
01208 
01209         raw_iterator previous = get_bucket(bucket);
01210         raw_iterator last = my_solist.raw_end();
01211         raw_iterator where = previous;
01212 
01213         __TBB_ASSERT(where != last, "Invalid head node");
01214 
01215         // First node is a dummy node
01216         ++where;
01217 
01218         for (;;) {
01219             if (where == last)
01220                 return end();
01221             else if (my_solist.get_iterator(where) == it)
01222                 return my_solist.erase_node(previous, it);
01223 
01224             // Move the iterator forward
01225             previous = where;
01226             ++where;
01227         }
01228     }
01229 
01230     // Return the [begin, end) pair of iterators with the same key values.
01231     // This operation makes sense only if mapping is many-to-one.
01232     pairii_t internal_equal_range(const key_type& key)
01233     {
01234         sokey_t order_key = (sokey_t) my_hash_compare(key);
01235         size_type bucket = order_key % my_number_of_buckets;
01236 
01237         // If bucket is empty, initialize it first
01238         if (!is_initialized(bucket))
01239             init_bucket(bucket);
01240 
01241         order_key = split_order_key_regular(order_key);
01242         raw_iterator end_it = my_solist.raw_end();
01243 
01244         for (raw_iterator it = get_bucket(bucket); it != end_it; ++it)
01245         {
01246             if (solist_t::get_order_key(it) > order_key)
01247             {
01248                 // There is no element with the given key
01249                 return pairii_t(end(), end());
01250             }
01251             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01252             {
01253                 iterator first = my_solist.get_iterator(it);
01254                 iterator last = first;
01255                 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
01256                 return pairii_t(first, last);
01257             }
01258         }
01259 
01260         return pairii_t(end(), end());
01261     }
01262 
01263     // Bucket APIs
01264     void init_bucket(size_type bucket)
01265     {
01266         // Bucket 0 has no parent.
01267         __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
01268 
01269         size_type parent_bucket = get_parent(bucket);
01270 
01271         // All parent_bucket buckets have to be initialized before this bucket is
01272         if (!is_initialized(parent_bucket))
01273             init_bucket(parent_bucket);
01274 
01275         raw_iterator parent = get_bucket(parent_bucket);
01276 
01277         // Create a dummy first node in this bucket
01278         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
01279         set_bucket(bucket, dummy_node);
01280     }
01281 
01282     void adjust_table_size(size_type total_elements, size_type current_size)
01283     {
01284         // Grow the table by a factor of 2 if possible and needed
01285         if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
01286         {
01287             // Double the size of the hash only if size has not changed inbetween loads
01288             __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) );
01289             //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
01290             //due to overzealous compiler warnings in /Wp64 mode
01291         }
01292     }
01293 
01294     size_type get_parent(size_type bucket) const
01295     {
01296         // Unsets bucket's most significant turned-on bit
01297         size_type msb = __TBB_Log2((uintptr_t)bucket);
01298         return bucket & ~(size_type(1) << msb);
01299     }
01300 
01301 
01302     // Dynamic sized array (segments)
01304     static size_type segment_index_of( size_type index ) {
01305         return size_type( __TBB_Log2( uintptr_t(index|1) ) );
01306     }
01307 
01309     static size_type segment_base( size_type k ) {
01310         return (size_type(1)<<k & ~size_type(1));
01311     }
01312 
01314     static size_type segment_size( size_type k ) {
01315         return k? size_type(1)<<k : 2;
01316     }
01317 
01318     raw_iterator get_bucket(size_type bucket) const {
01319         size_type segment = segment_index_of(bucket);
01320         bucket -= segment_base(segment);
01321         __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
01322         return my_buckets[segment][bucket];
01323     }
01324 
01325     void set_bucket(size_type bucket, raw_iterator dummy_head) {
01326         size_type segment = segment_index_of(bucket);
01327         bucket -= segment_base(segment);
01328 
01329         if (my_buckets[segment] == NULL) {
01330             size_type sz = segment_size(segment);
01331             raw_iterator * new_segment = my_allocator.allocate(sz);
01332             std::memset(new_segment, 0, sz*sizeof(raw_iterator));
01333 
01334             if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
01335                 my_allocator.deallocate(new_segment, sz);
01336         }
01337 
01338         my_buckets[segment][bucket] = dummy_head;
01339     }
01340 
01341     bool is_initialized(size_type bucket) const {
01342         size_type segment = segment_index_of(bucket);
01343         bucket -= segment_base(segment);
01344 
01345         if (my_buckets[segment] == NULL)
01346             return false;
01347 
01348         raw_iterator it = my_buckets[segment][bucket];
01349         return (it.get_node_ptr() != NULL);
01350     }
01351 
01352     // Utilities for keys
01353 
01354     // A regular order key has its original hash value reversed and the last bit set
01355     sokey_t split_order_key_regular(sokey_t order_key) const {
01356         return __TBB_ReverseBits(order_key) | 0x1;
01357     }
01358 
01359     // A dummy order key has its original hash value reversed and the last bit unset
01360     sokey_t split_order_key_dummy(sokey_t order_key) const {
01361         return __TBB_ReverseBits(order_key) & ~(0x1);
01362     }
01363 
01364     // Shared variables
01365     atomic<size_type>                                             my_number_of_buckets;       // Current table size
01366     solist_t                                                      my_solist;                  // List where all the elements are kept
01367     typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
01368     float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
01369     atomic<raw_iterator*>                                         my_buckets[pointers_per_table]; // The segment table
01370 };
01371 #if _MSC_VER
01372 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
01373 #endif
01374 
01376 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
01377 } // namespace internal
01380 template<typename T>
01381 inline size_t tbb_hasher( const T& t ) {
01382     return static_cast<size_t>( t ) * internal::hash_multiplier;
01383 }
01384 template<typename P>
01385 inline size_t tbb_hasher( P* ptr ) {
01386     size_t const h = reinterpret_cast<size_t>( ptr );
01387     return (h >> 3) ^ h;
01388 }
01389 template<typename E, typename S, typename A>
01390 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
01391     size_t h = 0;
01392     for( const E* c = s.c_str(); *c; ++c )
01393         h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
01394     return h;
01395 }
01396 template<typename F, typename S>
01397 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
01398     return tbb_hasher(p.first) ^ tbb_hasher(p.second);
01399 }
01400 } // namespace interface5
01401 using interface5::tbb_hasher;
01402 } // namespace tbb
01403 #endif// __TBB_concurrent_unordered_internal_H

Copyright © 2005-2011 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.