libstdc++
|
00001 // Allocators -*- C++ -*- 00002 00003 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 00004 // Free Software Foundation, Inc. 00005 // 00006 // This file is part of the GNU ISO C++ Library. This library is free 00007 // software; you can redistribute it and/or modify it under the 00008 // terms of the GNU General Public License as published by the 00009 // Free Software Foundation; either version 3, or (at your option) 00010 // any later version. 00011 00012 // This library is distributed in the hope that it will be useful, 00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00015 // GNU General Public License for more details. 00016 00017 // Under Section 7 of GPL version 3, you are granted additional 00018 // permissions described in the GCC Runtime Library Exception, version 00019 // 3.1, as published by the Free Software Foundation. 00020 00021 // You should have received a copy of the GNU General Public License and 00022 // a copy of the GCC Runtime Library Exception along with this program; 00023 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00024 // <http://www.gnu.org/licenses/>. 00025 00026 /* 00027 * Copyright (c) 1996-1997 00028 * Silicon Graphics Computer Systems, Inc. 00029 * 00030 * Permission to use, copy, modify, distribute and sell this software 00031 * and its documentation for any purpose is hereby granted without fee, 00032 * provided that the above copyright notice appear in all copies and 00033 * that both that copyright notice and this permission notice appear 00034 * in supporting documentation. Silicon Graphics makes no 00035 * representations about the suitability of this software for any 00036 * purpose. It is provided "as is" without express or implied warranty. 00037 */ 00038 00039 /** @file ext/pool_allocator.h 00040 * This file is a GNU extension to the Standard C++ Library. 00041 */ 00042 00043 #ifndef _POOL_ALLOCATOR_H 00044 #define _POOL_ALLOCATOR_H 1 00045 00046 #include <bits/c++config.h> 00047 #include <cstdlib> 00048 #include <new> 00049 #include <bits/functexcept.h> 00050 #include <ext/atomicity.h> 00051 #include <ext/concurrence.h> 00052 #include <bits/move.h> 00053 00054 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) 00055 00056 using std::size_t; 00057 using std::ptrdiff_t; 00058 00059 /** 00060 * @brief Base class for __pool_alloc. 00061 * 00062 * Uses various allocators to fulfill underlying requests (and makes as 00063 * few requests as possible when in default high-speed pool mode). 00064 * 00065 * Important implementation properties: 00066 * 0. If globally mandated, then allocate objects from new 00067 * 1. If the clients request an object of size > _S_max_bytes, the resulting 00068 * object will be obtained directly from new 00069 * 2. In all other cases, we allocate an object of size exactly 00070 * _S_round_up(requested_size). Thus the client has enough size 00071 * information that we can return the object to the proper free list 00072 * without permanently losing part of the object. 00073 */ 00074 class __pool_alloc_base 00075 { 00076 protected: 00077 00078 enum { _S_align = 8 }; 00079 enum { _S_max_bytes = 128 }; 00080 enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align }; 00081 00082 union _Obj 00083 { 00084 union _Obj* _M_free_list_link; 00085 char _M_client_data[1]; // The client sees this. 00086 }; 00087 00088 static _Obj* volatile _S_free_list[_S_free_list_size]; 00089 00090 // Chunk allocation state. 00091 static char* _S_start_free; 00092 static char* _S_end_free; 00093 static size_t _S_heap_size; 00094 00095 size_t 00096 _M_round_up(size_t __bytes) 00097 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } 00098 00099 _Obj* volatile* 00100 _M_get_free_list(size_t __bytes); 00101 00102 __mutex& 00103 _M_get_mutex(); 00104 00105 // Returns an object of size __n, and optionally adds to size __n 00106 // free list. 00107 void* 00108 _M_refill(size_t __n); 00109 00110 // Allocates a chunk for nobjs of size size. nobjs may be reduced 00111 // if it is inconvenient to allocate the requested number. 00112 char* 00113 _M_allocate_chunk(size_t __n, int& __nobjs); 00114 }; 00115 00116 00117 /** 00118 * @brief Allocator using a memory pool with a single lock. 00119 * @ingroup allocators 00120 */ 00121 template<typename _Tp> 00122 class __pool_alloc : private __pool_alloc_base 00123 { 00124 private: 00125 static _Atomic_word _S_force_new; 00126 00127 public: 00128 typedef size_t size_type; 00129 typedef ptrdiff_t difference_type; 00130 typedef _Tp* pointer; 00131 typedef const _Tp* const_pointer; 00132 typedef _Tp& reference; 00133 typedef const _Tp& const_reference; 00134 typedef _Tp value_type; 00135 00136 template<typename _Tp1> 00137 struct rebind 00138 { typedef __pool_alloc<_Tp1> other; }; 00139 00140 __pool_alloc() throw() { } 00141 00142 __pool_alloc(const __pool_alloc&) throw() { } 00143 00144 template<typename _Tp1> 00145 __pool_alloc(const __pool_alloc<_Tp1>&) throw() { } 00146 00147 ~__pool_alloc() throw() { } 00148 00149 pointer 00150 address(reference __x) const { return &__x; } 00151 00152 const_pointer 00153 address(const_reference __x) const { return &__x; } 00154 00155 size_type 00156 max_size() const throw() 00157 { return size_t(-1) / sizeof(_Tp); } 00158 00159 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00160 // 402. wrong new expression in [some_] allocator::construct 00161 void 00162 construct(pointer __p, const _Tp& __val) 00163 { ::new((void *)__p) _Tp(__val); } 00164 00165 #ifdef __GXX_EXPERIMENTAL_CXX0X__ 00166 template<typename... _Args> 00167 void 00168 construct(pointer __p, _Args&&... __args) 00169 { ::new((void *)__p) _Tp(std::forward<_Args>(__args)...); } 00170 #endif 00171 00172 void 00173 destroy(pointer __p) { __p->~_Tp(); } 00174 00175 pointer 00176 allocate(size_type __n, const void* = 0); 00177 00178 void 00179 deallocate(pointer __p, size_type __n); 00180 }; 00181 00182 template<typename _Tp> 00183 inline bool 00184 operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00185 { return true; } 00186 00187 template<typename _Tp> 00188 inline bool 00189 operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00190 { return false; } 00191 00192 template<typename _Tp> 00193 _Atomic_word 00194 __pool_alloc<_Tp>::_S_force_new; 00195 00196 template<typename _Tp> 00197 _Tp* 00198 __pool_alloc<_Tp>::allocate(size_type __n, const void*) 00199 { 00200 pointer __ret = 0; 00201 if (__builtin_expect(__n != 0, true)) 00202 { 00203 if (__builtin_expect(__n > this->max_size(), false)) 00204 std::__throw_bad_alloc(); 00205 00206 // If there is a race through here, assume answer from getenv 00207 // will resolve in same direction. Inspired by techniques 00208 // to efficiently support threading found in basic_string.h. 00209 if (_S_force_new == 0) 00210 { 00211 if (std::getenv("GLIBCXX_FORCE_NEW")) 00212 __atomic_add_dispatch(&_S_force_new, 1); 00213 else 00214 __atomic_add_dispatch(&_S_force_new, -1); 00215 } 00216 00217 const size_t __bytes = __n * sizeof(_Tp); 00218 if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0) 00219 __ret = static_cast<_Tp*>(::operator new(__bytes)); 00220 else 00221 { 00222 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00223 00224 __scoped_lock sentry(_M_get_mutex()); 00225 _Obj* __restrict__ __result = *__free_list; 00226 if (__builtin_expect(__result == 0, 0)) 00227 __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes))); 00228 else 00229 { 00230 *__free_list = __result->_M_free_list_link; 00231 __ret = reinterpret_cast<_Tp*>(__result); 00232 } 00233 if (__builtin_expect(__ret == 0, 0)) 00234 std::__throw_bad_alloc(); 00235 } 00236 } 00237 return __ret; 00238 } 00239 00240 template<typename _Tp> 00241 void 00242 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n) 00243 { 00244 if (__builtin_expect(__n != 0 && __p != 0, true)) 00245 { 00246 const size_t __bytes = __n * sizeof(_Tp); 00247 if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0) 00248 ::operator delete(__p); 00249 else 00250 { 00251 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00252 _Obj* __q = reinterpret_cast<_Obj*>(__p); 00253 00254 __scoped_lock sentry(_M_get_mutex()); 00255 __q ->_M_free_list_link = *__free_list; 00256 *__free_list = __q; 00257 } 00258 } 00259 } 00260 00261 _GLIBCXX_END_NAMESPACE 00262 00263 #endif