mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*-
00002 
00003 // Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00030 /** @file ext/mt_allocator.h
00031  *  This file is a GNU extension to the Standard C++ Library.
00032  */
00033 
00034 #ifndef _MT_ALLOCATOR_H
00035 #define _MT_ALLOCATOR_H 1
00036 
00037 #include <new>
00038 #include <cstdlib>
00039 #include <bits/functexcept.h>
00040 #include <bits/gthr.h>
00041 #include <bits/atomicity.h>
00042 
00043 namespace __gnu_cxx
00044 {
00045   typedef void (*__destroy_handler)(void*);
00046   typedef void (*__create_handler)(void);
00047 
00048   /// @brief  Base class for pool object.
00049   struct __pool_base
00050   {
00051     // Using short int as type for the binmap implies we are never
00052     // caching blocks larger than 65535 with this allocator.
00053     typedef unsigned short int _Binmap_type;
00054 
00055     // Variables used to configure the behavior of the allocator,
00056     // assigned and explained in detail below.
00057     struct _Tune
00058      {
00059       // Compile time constants for the default _Tune values.
00060       enum { _S_align = 8 };
00061       enum { _S_max_bytes = 128 };
00062       enum { _S_min_bin = 8 };
00063       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
00064       enum { _S_max_threads = 4096 };
00065       enum { _S_freelist_headroom = 10 };
00066 
00067       // Alignment needed.
00068       // NB: In any case must be >= sizeof(_Block_record), that
00069       // is 4 on 32 bit machines and 8 on 64 bit machines.
00070       size_t    _M_align;
00071       
00072       // Allocation requests (after round-up to power of 2) below
00073       // this value will be handled by the allocator. A raw new/
00074       // call will be used for requests larger than this value.
00075       size_t    _M_max_bytes; 
00076       
00077       // Size in bytes of the smallest bin.
00078       // NB: Must be a power of 2 and >= _M_align.
00079       size_t    _M_min_bin;
00080       
00081       // In order to avoid fragmenting and minimize the number of
00082       // new() calls we always request new memory using this
00083       // value. Based on previous discussions on the libstdc++
00084       // mailing list we have choosen the value below.
00085       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
00086       size_t    _M_chunk_size;
00087       
00088       // The maximum number of supported threads. For
00089       // single-threaded operation, use one. Maximum values will
00090       // vary depending on details of the underlying system. (For
00091       // instance, Linux 2.4.18 reports 4070 in
00092       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
00093       // 65534)
00094       size_t    _M_max_threads;
00095       
00096       // Each time a deallocation occurs in a threaded application
00097       // we make sure that there are no more than
00098       // _M_freelist_headroom % of used memory on the freelist. If
00099       // the number of additional records is more than
00100       // _M_freelist_headroom % of the freelist, we move these
00101       // records back to the global pool.
00102       size_t    _M_freelist_headroom;
00103       
00104       // Set to true forces all allocations to use new().
00105       bool  _M_force_new; 
00106       
00107       explicit
00108       _Tune()
00109       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
00110       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 
00111       _M_freelist_headroom(_S_freelist_headroom), 
00112       _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
00113       { }
00114 
00115       explicit
00116       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 
00117         size_t __maxthreads, size_t __headroom, bool __force) 
00118       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
00119       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
00120       _M_freelist_headroom(__headroom), _M_force_new(__force)
00121       { }
00122     };
00123     
00124     struct _Block_address
00125     {
00126       void*             _M_initial;
00127       _Block_address*       _M_next;
00128     };
00129     
00130     const _Tune&
00131     _M_get_options() const
00132     { return _M_options; }
00133 
00134     void
00135     _M_set_options(_Tune __t)
00136     { 
00137       if (!_M_init)
00138     _M_options = __t;
00139     }
00140 
00141     bool
00142     _M_check_threshold(size_t __bytes)
00143     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
00144 
00145     size_t
00146     _M_get_binmap(size_t __bytes)
00147     { return _M_binmap[__bytes]; }
00148 
00149     const size_t
00150     _M_get_align()
00151     { return _M_options._M_align; }
00152 
00153     explicit 
00154     __pool_base() 
00155     : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
00156 
00157     explicit 
00158     __pool_base(const _Tune& __options)
00159     : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
00160 
00161   private:
00162     explicit 
00163     __pool_base(const __pool_base&);
00164 
00165     __pool_base&
00166     operator=(const __pool_base&);
00167 
00168   protected:
00169     // Configuration options.
00170     _Tune               _M_options;
00171     
00172     _Binmap_type*       _M_binmap;
00173 
00174     // Configuration of the pool object via _M_options can happen
00175     // after construction but before initialization. After
00176     // initialization is complete, this variable is set to true.
00177     bool            _M_init;
00178   };
00179 
00180 
00181   /**
00182    *  @brief  Data describing the underlying memory pool, parameterized on
00183    *  threading support.
00184    */
00185   template<bool _Thread>
00186     class __pool;
00187 
00188   template<>
00189     class __pool<true>;
00190 
00191   template<>
00192     class __pool<false>;
00193 
00194   /// Specialization for single thread.
00195   template<>
00196     class __pool<false> : public __pool_base
00197     {
00198     public:
00199       union _Block_record
00200       {
00201     // Points to the block_record of the next free block.
00202     _Block_record* volatile         _M_next;
00203       };
00204 
00205       struct _Bin_record
00206       {
00207     // An "array" of pointers to the first free block.
00208     _Block_record** volatile        _M_first;
00209 
00210     // A list of the initial addresses of all allocated blocks.
00211     _Block_address*             _M_address;
00212       };
00213       
00214       void
00215       _M_initialize_once()
00216       {
00217     if (__builtin_expect(_M_init == false, false))
00218       _M_initialize();
00219       }
00220 
00221       void
00222       _M_destroy() throw();
00223 
00224       char* 
00225       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00226     
00227       void
00228       _M_reclaim_block(char* __p, size_t __bytes);
00229     
00230       size_t 
00231       _M_get_thread_id() { return 0; }
00232       
00233       const _Bin_record&
00234       _M_get_bin(size_t __which)
00235       { return _M_bin[__which]; }
00236       
00237       void
00238       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
00239       { }
00240 
00241       explicit __pool() 
00242       : _M_bin(NULL), _M_bin_size(1) { }
00243 
00244       explicit __pool(const __pool_base::_Tune& __tune) 
00245       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
00246 
00247     private:
00248       // An "array" of bin_records each of which represents a specific
00249       // power of 2 size. Memory to this "array" is allocated in
00250       // _M_initialize().
00251       _Bin_record* volatile _M_bin;
00252       
00253       // Actual value calculated in _M_initialize().
00254       size_t                    _M_bin_size;     
00255 
00256       void
00257       _M_initialize();
00258   };
00259  
00260 #ifdef __GTHREADS
00261   /// Specialization for thread enabled, via gthreads.h.
00262   template<>
00263     class __pool<true> : public __pool_base
00264     {
00265     public:
00266       // Each requesting thread is assigned an id ranging from 1 to
00267       // _S_max_threads. Thread id 0 is used as a global memory pool.
00268       // In order to get constant performance on the thread assignment
00269       // routine, we keep a list of free ids. When a thread first
00270       // requests memory we remove the first record in this list and
00271       // stores the address in a __gthread_key. When initializing the
00272       // __gthread_key we specify a destructor. When this destructor
00273       // (i.e. the thread dies) is called, we return the thread id to
00274       // the front of this list.
00275       struct _Thread_record
00276       {
00277     // Points to next free thread id record. NULL if last record in list.
00278     _Thread_record* volatile        _M_next;
00279     
00280     // Thread id ranging from 1 to _S_max_threads.
00281     size_t                          _M_id;
00282       };
00283       
00284       union _Block_record
00285       {
00286     // Points to the block_record of the next free block.
00287     _Block_record* volatile         _M_next;
00288     
00289     // The thread id of the thread which has requested this block.
00290     size_t                          _M_thread_id;
00291       };
00292       
00293       struct _Bin_record
00294       {
00295     // An "array" of pointers to the first free block for each
00296     // thread id. Memory to this "array" is allocated in
00297     // _S_initialize() for _S_max_threads + global pool 0.
00298     _Block_record** volatile        _M_first;
00299     
00300     // A list of the initial addresses of all allocated blocks.
00301     _Block_address*             _M_address;
00302 
00303     // An "array" of counters used to keep track of the amount of
00304     // blocks that are on the freelist/used for each thread id.
00305     // Memory to these "arrays" is allocated in _S_initialize() for
00306     // _S_max_threads + global pool 0.
00307     size_t* volatile                _M_free;
00308     size_t* volatile                _M_used;
00309     
00310     // Each bin has its own mutex which is used to ensure data
00311     // integrity while changing "ownership" on a block.  The mutex
00312     // is initialized in _S_initialize().
00313     __gthread_mutex_t*              _M_mutex;
00314       };
00315       
00316       void
00317       _M_initialize(__destroy_handler __d);
00318 
00319       void
00320       _M_initialize_once(__create_handler __c)
00321       {
00322     // Although the test in __gthread_once() would suffice, we
00323     // wrap test of the once condition in our own unlocked
00324     // check. This saves one function call to pthread_once()
00325     // (which itself only tests for the once value unlocked anyway
00326     // and immediately returns if set)
00327     if (__builtin_expect(_M_init == false, false))
00328       {
00329         if (__gthread_active_p())
00330           __gthread_once(&_M_once, __c);
00331         if (!_M_init)
00332           __c();
00333       }
00334       }
00335 
00336       void
00337       _M_destroy() throw();
00338 
00339       char* 
00340       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00341     
00342       void
00343       _M_reclaim_block(char* __p, size_t __bytes);
00344     
00345       const _Bin_record&
00346       _M_get_bin(size_t __which)
00347       { return _M_bin[__which]; }
00348       
00349       void
00350       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 
00351              size_t __thread_id)
00352       {
00353     if (__gthread_active_p())
00354       {
00355         __block->_M_thread_id = __thread_id;
00356         --__bin._M_free[__thread_id];
00357         ++__bin._M_used[__thread_id];
00358       }
00359       }
00360 
00361       void 
00362       _M_destroy_thread_key(void* __freelist_pos);
00363 
00364       size_t 
00365       _M_get_thread_id();
00366 
00367       explicit __pool() 
00368       : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL) 
00369       {
00370     // On some platforms, __gthread_once_t is an aggregate.
00371     __gthread_once_t __tmp = __GTHREAD_ONCE_INIT;
00372     _M_once = __tmp;
00373       }
00374 
00375       explicit __pool(const __pool_base::_Tune& __tune) 
00376       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1), 
00377       _M_thread_freelist(NULL) 
00378       {
00379     // On some platforms, __gthread_once_t is an aggregate.
00380     __gthread_once_t __tmp = __GTHREAD_ONCE_INIT;
00381     _M_once = __tmp;
00382       }
00383 
00384     private:
00385       // An "array" of bin_records each of which represents a specific
00386       // power of 2 size. Memory to this "array" is allocated in
00387       // _M_initialize().
00388       _Bin_record* volatile _M_bin;
00389 
00390       // Actual value calculated in _M_initialize().
00391       size_t                    _M_bin_size;
00392 
00393       __gthread_once_t      _M_once;
00394       
00395       _Thread_record*       _M_thread_freelist;
00396       void*         _M_thread_freelist_initial;
00397     };
00398 #endif
00399 
00400 
00401   /// @brief  Policy for shared __pool objects.
00402   template<template <bool> class _PoolTp, bool _Thread>
00403     struct __common_pool_policy;
00404 
00405   /// Partial specialization for single thread.
00406   template<template <bool> class _PoolTp>
00407     struct __common_pool_policy<_PoolTp, false>
00408     {
00409       typedef _PoolTp<false> pool_type;
00410       
00411       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00412            bool _Thread1 = false>
00413         struct _M_rebind
00414         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
00415 
00416       static pool_type&
00417       _S_get_pool()
00418       { 
00419     static pool_type _S_pool;
00420     return _S_pool;
00421       }
00422 
00423       static void
00424       _S_initialize_once() 
00425       { 
00426     static bool __init;
00427     if (__builtin_expect(__init == false, false))
00428       {
00429         _S_get_pool()._M_initialize_once(); 
00430         __init = true;
00431       }
00432       }
00433     };
00434 
00435 #ifdef __GTHREADS
00436   /// Partial specialization for thread enabled, via gthreads.h.
00437   template<template <bool> class _PoolTp>
00438     struct __common_pool_policy<_PoolTp, true>
00439     {
00440       typedef _PoolTp<true> pool_type;
00441       
00442       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00443            bool _Thread1 = true>
00444         struct _M_rebind
00445         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
00446 
00447       static pool_type&
00448       _S_get_pool()
00449       { 
00450     static pool_type _S_pool;
00451     return _S_pool;
00452       }
00453 
00454       static void
00455       _S_initialize_once() 
00456       { 
00457     static bool __init;
00458     if (__builtin_expect(__init == false, false))
00459       {
00460         _S_get_pool()._M_initialize_once(_S_initialize); 
00461         __init = true;
00462       }
00463       }
00464 
00465     private:
00466       static void
00467       _S_destroy_thread_key(void* __freelist_pos)
00468       { _S_get_pool()._M_destroy_thread_key(__freelist_pos); }
00469       
00470       static void
00471       _S_initialize() 
00472       { _S_get_pool()._M_initialize(_S_destroy_thread_key); }
00473    };
00474 #endif
00475 
00476  
00477   /// @brief  Policy for individual __pool objects.
00478   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00479     struct __per_type_pool_policy;
00480 
00481   /// Partial specialization for single thread.
00482   template<typename _Tp, template <bool> class _PoolTp>
00483     struct __per_type_pool_policy<_Tp, _PoolTp, false>
00484     {
00485       typedef _Tp value_type;
00486       typedef _PoolTp<false> pool_type;
00487 
00488       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00489            bool _Thread1 = false>
00490         struct _M_rebind
00491         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
00492 
00493       static pool_type&
00494       _S_get_pool()
00495       { 
00496     // Sane defaults for the _PoolTp.
00497     typedef typename pool_type::_Block_record _Block_record;
00498     const static size_t __align = (__alignof__(_Tp) >= sizeof(_Block_record)
00499                        ? __alignof__(_Tp)
00500                        : sizeof(_Block_record));
00501 
00502     typedef typename __pool_base::_Tune _Tune;
00503     static _Tune _S_tune(__align, sizeof(_Tp) * 64,
00504                  sizeof(_Tp) * 2 >= __align ? sizeof(_Tp) * 2
00505                                             : __align,
00506                  sizeof(_Tp) * _Tune::_S_chunk_size,
00507                  _Tune::_S_max_threads,
00508                  _Tune::_S_freelist_headroom,
00509                  getenv("GLIBCXX_FORCE_NEW") ? true : false);
00510     static pool_type _S_pool(_S_tune);
00511     return _S_pool;
00512       }
00513 
00514       static void
00515       _S_initialize_once()
00516       { 
00517     static bool __init;
00518     if (__builtin_expect(__init == false, false))
00519       {
00520         _S_get_pool()._M_initialize_once(); 
00521         __init = true;
00522       }
00523       }
00524     };
00525 
00526 #ifdef __GTHREADS
00527   /// Partial specialization for thread enabled, via gthreads.h.
00528   template<typename _Tp, template <bool> class _PoolTp>
00529     struct __per_type_pool_policy<_Tp, _PoolTp, true>
00530     {
00531       typedef _Tp value_type;
00532       typedef _PoolTp<true> pool_type;
00533 
00534      template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00535            bool _Thread1 = true>
00536         struct _M_rebind
00537         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
00538 
00539       static pool_type&
00540       _S_get_pool()
00541       { 
00542     // Sane defaults for the _PoolTp.
00543     typedef typename pool_type::_Block_record _Block_record;
00544     const static size_t __align = (__alignof__(_Tp) >= sizeof(_Block_record)
00545                        ? __alignof__(_Tp)
00546                        : sizeof(_Block_record));
00547 
00548     typedef typename __pool_base::_Tune _Tune;
00549     static _Tune _S_tune(__align, sizeof(_Tp) * 64,
00550                  sizeof(_Tp) * 2 >= __align ? sizeof(_Tp) * 2
00551                                                         : __align,
00552                  sizeof(_Tp) * _Tune::_S_chunk_size,
00553                  _Tune::_S_max_threads,
00554                  _Tune::_S_freelist_headroom,
00555                  getenv("GLIBCXX_FORCE_NEW") ? true : false);
00556     static pool_type _S_pool(_S_tune);
00557     return _S_pool;
00558       }
00559 
00560       static void
00561       _S_initialize_once()
00562       { 
00563     static bool __init;
00564     if (__builtin_expect(__init == false, false))
00565       {
00566         _S_get_pool()._M_initialize_once(_S_initialize); 
00567         __init = true;
00568       }
00569       }
00570 
00571     private:
00572       static void
00573       _S_destroy_thread_key(void* __freelist_pos)
00574       { _S_get_pool()._M_destroy_thread_key(__freelist_pos); }
00575       
00576       static void
00577       _S_initialize() 
00578       { _S_get_pool()._M_initialize(_S_destroy_thread_key); }
00579     };
00580 #endif
00581 
00582   /// @brief  Base class for _Tp dependent member functions.
00583   template<typename _Tp>
00584     class __mt_alloc_base 
00585     {
00586     public:
00587       typedef size_t                    size_type;
00588       typedef ptrdiff_t                 difference_type;
00589       typedef _Tp*                      pointer;
00590       typedef const _Tp*                const_pointer;
00591       typedef _Tp&                      reference;
00592       typedef const _Tp&                const_reference;
00593       typedef _Tp                       value_type;
00594 
00595       pointer
00596       address(reference __x) const
00597       { return &__x; }
00598 
00599       const_pointer
00600       address(const_reference __x) const
00601       { return &__x; }
00602 
00603       size_type
00604       max_size() const throw() 
00605       { return size_t(-1) / sizeof(_Tp); }
00606 
00607       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00608       // 402. wrong new expression in [some_] allocator::construct
00609       void 
00610       construct(pointer __p, const _Tp& __val) 
00611       { ::new(__p) _Tp(__val); }
00612 
00613       void 
00614       destroy(pointer __p) { __p->~_Tp(); }
00615     };
00616 
00617 #ifdef __GTHREADS
00618 #define __thread_default true
00619 #else
00620 #define __thread_default false
00621 #endif
00622 
00623   /**
00624    *  @brief  This is a fixed size (power of 2) allocator which - when
00625    *  compiled with thread support - will maintain one freelist per
00626    *  size per thread plus a "global" one. Steps are taken to limit
00627    *  the per thread freelist sizes (by returning excess back to
00628    *  the "global" list).
00629    *
00630    *  Further details:
00631    *  http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
00632    */
00633   template<typename _Tp, 
00634        typename _Poolp = __common_pool_policy<__pool, __thread_default> >
00635     class __mt_alloc : public __mt_alloc_base<_Tp>
00636     {
00637     public:
00638       typedef size_t                        size_type;
00639       typedef ptrdiff_t                     difference_type;
00640       typedef _Tp*                          pointer;
00641       typedef const _Tp*                    const_pointer;
00642       typedef _Tp&                          reference;
00643       typedef const _Tp&                    const_reference;
00644       typedef _Tp                           value_type;
00645       typedef _Poolp                __policy_type;
00646       typedef typename _Poolp::pool_type    __pool_type;
00647 
00648       template<typename _Tp1, typename _Poolp1 = _Poolp>
00649         struct rebind
00650         { 
00651       typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
00652       typedef __mt_alloc<_Tp1, pol_type> other;
00653     };
00654 
00655       __mt_alloc() throw() 
00656       { __policy_type::_S_get_pool(); }
00657 
00658       __mt_alloc(const __mt_alloc&) throw() 
00659       { __policy_type::_S_get_pool(); }
00660 
00661       template<typename _Tp1, typename _Poolp1>
00662         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>& obj) throw()  
00663         { __policy_type::_S_get_pool(); }
00664 
00665       ~__mt_alloc() throw() { }
00666 
00667       pointer
00668       allocate(size_type __n, const void* = 0);
00669 
00670       void
00671       deallocate(pointer __p, size_type __n);
00672 
00673       const __pool_base::_Tune
00674       _M_get_options()
00675       { 
00676     // Return a copy, not a reference, for external consumption.
00677     return __policy_type::_S_get_pool()._M_get_options();
00678       }
00679       
00680       void
00681       _M_set_options(__pool_base::_Tune __t)
00682       { __policy_type::_S_get_pool()._M_set_options(__t); }
00683     };
00684 
00685   template<typename _Tp, typename _Poolp>
00686     typename __mt_alloc<_Tp, _Poolp>::pointer
00687     __mt_alloc<_Tp, _Poolp>::
00688     allocate(size_type __n, const void*)
00689     {
00690       if (__builtin_expect(__n > this->max_size(), false))
00691     std::__throw_bad_alloc();
00692 
00693       __policy_type::_S_initialize_once();
00694 
00695       // Requests larger than _M_max_bytes are handled by operator
00696       // new/delete directly.
00697       __pool_type& __pool = __policy_type::_S_get_pool();
00698       const size_t __bytes = __n * sizeof(_Tp);
00699       if (__pool._M_check_threshold(__bytes))
00700     {
00701       void* __ret = ::operator new(__bytes);
00702       return static_cast<_Tp*>(__ret);
00703     }
00704       
00705       // Round up to power of 2 and figure out which bin to use.
00706       const size_t __which = __pool._M_get_binmap(__bytes);
00707       const size_t __thread_id = __pool._M_get_thread_id();
00708       
00709       // Find out if we have blocks on our freelist.  If so, go ahead
00710       // and use them directly without having to lock anything.
00711       char* __c;
00712       typedef typename __pool_type::_Bin_record _Bin_record;
00713       const _Bin_record& __bin = __pool._M_get_bin(__which);
00714       if (__bin._M_first[__thread_id])
00715     {
00716       // Already reserved.
00717       typedef typename __pool_type::_Block_record _Block_record;
00718       _Block_record* __block = __bin._M_first[__thread_id];
00719       __bin._M_first[__thread_id] = __block->_M_next;
00720       
00721       __pool._M_adjust_freelist(__bin, __block, __thread_id);
00722       __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
00723     }
00724       else
00725     {
00726       // Null, reserve.
00727       __c = __pool._M_reserve_block(__bytes, __thread_id);
00728     }
00729       return static_cast<_Tp*>(static_cast<void*>(__c));
00730     }
00731   
00732   template<typename _Tp, typename _Poolp>
00733     void
00734     __mt_alloc<_Tp, _Poolp>::
00735     deallocate(pointer __p, size_type __n)
00736     {
00737       if (__builtin_expect(__p != 0, true))
00738     {
00739       // Requests larger than _M_max_bytes are handled by
00740       // operators new/delete directly.
00741       __pool_type& __pool = __policy_type::_S_get_pool();
00742       const size_t __bytes = __n * sizeof(_Tp);
00743       if (__pool._M_check_threshold(__bytes))
00744         ::operator delete(__p);
00745       else
00746         __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
00747     }
00748     }
00749   
00750   template<typename _Tp, typename _Poolp>
00751     inline bool
00752     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00753     { return true; }
00754   
00755   template<typename _Tp, typename _Poolp>
00756     inline bool
00757     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00758     { return false; }
00759 
00760 #undef __thread_default
00761 } // namespace __gnu_cxx
00762 
00763 #endif

Generated on Sat Apr 2 13:54:42 2005 for libstdc++ source by  doxygen 1.4.0