libstdc++
|
00001 // Allocators -*- C++ -*- 00002 00003 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 00004 // 2010, 2011 00005 // Free Software Foundation, Inc. 00006 // 00007 // This file is part of the GNU ISO C++ Library. This library is free 00008 // software; you can redistribute it and/or modify it under the 00009 // terms of the GNU General Public License as published by the 00010 // Free Software Foundation; either version 3, or (at your option) 00011 // any later version. 00012 00013 // This library is distributed in the hope that it will be useful, 00014 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 // GNU General Public License for more details. 00017 00018 // Under Section 7 of GPL version 3, you are granted additional 00019 // permissions described in the GCC Runtime Library Exception, version 00020 // 3.1, as published by the Free Software Foundation. 00021 00022 // You should have received a copy of the GNU General Public License and 00023 // a copy of the GCC Runtime Library Exception along with this program; 00024 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00025 // <http://www.gnu.org/licenses/>. 00026 00027 /* 00028 * Copyright (c) 1996-1997 00029 * Silicon Graphics Computer Systems, Inc. 00030 * 00031 * Permission to use, copy, modify, distribute and sell this software 00032 * and its documentation for any purpose is hereby granted without fee, 00033 * provided that the above copyright notice appear in all copies and 00034 * that both that copyright notice and this permission notice appear 00035 * in supporting documentation. Silicon Graphics makes no 00036 * representations about the suitability of this software for any 00037 * purpose. It is provided "as is" without express or implied warranty. 00038 */ 00039 00040 /** @file ext/pool_allocator.h 00041 * This file is a GNU extension to the Standard C++ Library. 00042 */ 00043 00044 #ifndef _POOL_ALLOCATOR_H 00045 #define _POOL_ALLOCATOR_H 1 00046 00047 #include <bits/c++config.h> 00048 #include <cstdlib> 00049 #include <new> 00050 #include <bits/functexcept.h> 00051 #include <ext/atomicity.h> 00052 #include <ext/concurrence.h> 00053 #include <bits/move.h> 00054 00055 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) 00056 { 00057 _GLIBCXX_BEGIN_NAMESPACE_VERSION 00058 00059 using std::size_t; 00060 using std::ptrdiff_t; 00061 00062 /** 00063 * @brief Base class for __pool_alloc. 00064 * 00065 * Uses various allocators to fulfill underlying requests (and makes as 00066 * few requests as possible when in default high-speed pool mode). 00067 * 00068 * Important implementation properties: 00069 * 0. If globally mandated, then allocate objects from new 00070 * 1. If the clients request an object of size > _S_max_bytes, the resulting 00071 * object will be obtained directly from new 00072 * 2. In all other cases, we allocate an object of size exactly 00073 * _S_round_up(requested_size). Thus the client has enough size 00074 * information that we can return the object to the proper free list 00075 * without permanently losing part of the object. 00076 */ 00077 class __pool_alloc_base 00078 { 00079 protected: 00080 00081 enum { _S_align = 8 }; 00082 enum { _S_max_bytes = 128 }; 00083 enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align }; 00084 00085 union _Obj 00086 { 00087 union _Obj* _M_free_list_link; 00088 char _M_client_data[1]; // The client sees this. 00089 }; 00090 00091 static _Obj* volatile _S_free_list[_S_free_list_size]; 00092 00093 // Chunk allocation state. 00094 static char* _S_start_free; 00095 static char* _S_end_free; 00096 static size_t _S_heap_size; 00097 00098 size_t 00099 _M_round_up(size_t __bytes) 00100 { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); } 00101 00102 _GLIBCXX_CONST _Obj* volatile* 00103 _M_get_free_list(size_t __bytes) throw (); 00104 00105 __mutex& 00106 _M_get_mutex() throw (); 00107 00108 // Returns an object of size __n, and optionally adds to size __n 00109 // free list. 00110 void* 00111 _M_refill(size_t __n); 00112 00113 // Allocates a chunk for nobjs of size size. nobjs may be reduced 00114 // if it is inconvenient to allocate the requested number. 00115 char* 00116 _M_allocate_chunk(size_t __n, int& __nobjs); 00117 }; 00118 00119 00120 /** 00121 * @brief Allocator using a memory pool with a single lock. 00122 * @ingroup allocators 00123 */ 00124 template<typename _Tp> 00125 class __pool_alloc : private __pool_alloc_base 00126 { 00127 private: 00128 static _Atomic_word _S_force_new; 00129 00130 public: 00131 typedef size_t size_type; 00132 typedef ptrdiff_t difference_type; 00133 typedef _Tp* pointer; 00134 typedef const _Tp* const_pointer; 00135 typedef _Tp& reference; 00136 typedef const _Tp& const_reference; 00137 typedef _Tp value_type; 00138 00139 template<typename _Tp1> 00140 struct rebind 00141 { typedef __pool_alloc<_Tp1> other; }; 00142 00143 __pool_alloc() _GLIBCXX_USE_NOEXCEPT { } 00144 00145 __pool_alloc(const __pool_alloc&) _GLIBCXX_USE_NOEXCEPT { } 00146 00147 template<typename _Tp1> 00148 __pool_alloc(const __pool_alloc<_Tp1>&) _GLIBCXX_USE_NOEXCEPT { } 00149 00150 ~__pool_alloc() _GLIBCXX_USE_NOEXCEPT { } 00151 00152 pointer 00153 address(reference __x) const _GLIBCXX_NOEXCEPT 00154 { return std::__addressof(__x); } 00155 00156 const_pointer 00157 address(const_reference __x) const _GLIBCXX_NOEXCEPT 00158 { return std::__addressof(__x); } 00159 00160 size_type 00161 max_size() const _GLIBCXX_USE_NOEXCEPT 00162 { return size_t(-1) / sizeof(_Tp); } 00163 00164 #ifdef __GXX_EXPERIMENTAL_CXX0X__ 00165 template<typename _Up, typename... _Args> 00166 void 00167 construct(_Up* __p, _Args&&... __args) 00168 { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); } 00169 00170 template<typename _Up> 00171 void 00172 destroy(_Up* __p) { __p->~_Up(); } 00173 #else 00174 // _GLIBCXX_RESOLVE_LIB_DEFECTS 00175 // 402. wrong new expression in [some_] allocator::construct 00176 void 00177 construct(pointer __p, const _Tp& __val) 00178 { ::new((void *)__p) _Tp(__val); } 00179 00180 void 00181 destroy(pointer __p) { __p->~_Tp(); } 00182 #endif 00183 00184 pointer 00185 allocate(size_type __n, const void* = 0); 00186 00187 void 00188 deallocate(pointer __p, size_type __n); 00189 }; 00190 00191 template<typename _Tp> 00192 inline bool 00193 operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00194 { return true; } 00195 00196 template<typename _Tp> 00197 inline bool 00198 operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&) 00199 { return false; } 00200 00201 template<typename _Tp> 00202 _Atomic_word 00203 __pool_alloc<_Tp>::_S_force_new; 00204 00205 template<typename _Tp> 00206 _Tp* 00207 __pool_alloc<_Tp>::allocate(size_type __n, const void*) 00208 { 00209 pointer __ret = 0; 00210 if (__builtin_expect(__n != 0, true)) 00211 { 00212 if (__n > this->max_size()) 00213 std::__throw_bad_alloc(); 00214 00215 // If there is a race through here, assume answer from getenv 00216 // will resolve in same direction. Inspired by techniques 00217 // to efficiently support threading found in basic_string.h. 00218 if (_S_force_new == 0) 00219 { 00220 if (std::getenv("GLIBCXX_FORCE_NEW")) 00221 __atomic_add_dispatch(&_S_force_new, 1); 00222 else 00223 __atomic_add_dispatch(&_S_force_new, -1); 00224 } 00225 00226 const size_t __bytes = __n * sizeof(_Tp); 00227 if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0) 00228 __ret = static_cast<_Tp*>(::operator new(__bytes)); 00229 else 00230 { 00231 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00232 00233 __scoped_lock sentry(_M_get_mutex()); 00234 _Obj* __restrict__ __result = *__free_list; 00235 if (__builtin_expect(__result == 0, 0)) 00236 __ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes))); 00237 else 00238 { 00239 *__free_list = __result->_M_free_list_link; 00240 __ret = reinterpret_cast<_Tp*>(__result); 00241 } 00242 if (__ret == 0) 00243 std::__throw_bad_alloc(); 00244 } 00245 } 00246 return __ret; 00247 } 00248 00249 template<typename _Tp> 00250 void 00251 __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n) 00252 { 00253 if (__builtin_expect(__n != 0 && __p != 0, true)) 00254 { 00255 const size_t __bytes = __n * sizeof(_Tp); 00256 if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0) 00257 ::operator delete(__p); 00258 else 00259 { 00260 _Obj* volatile* __free_list = _M_get_free_list(__bytes); 00261 _Obj* __q = reinterpret_cast<_Obj*>(__p); 00262 00263 __scoped_lock sentry(_M_get_mutex()); 00264 __q ->_M_free_list_link = *__free_list; 00265 *__free_list = __q; 00266 } 00267 } 00268 } 00269 00270 _GLIBCXX_END_NAMESPACE_VERSION 00271 } // namespace 00272 00273 #endif