libstdc++
mt_allocator.h
Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*-
00002 
00003 // Copyright (C) 2003-2013 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 3, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // Under Section 7 of GPL version 3, you are granted additional
00017 // permissions described in the GCC Runtime Library Exception, version
00018 // 3.1, as published by the Free Software Foundation.
00019 
00020 // You should have received a copy of the GNU General Public License and
00021 // a copy of the GCC Runtime Library Exception along with this program;
00022 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00023 // <http://www.gnu.org/licenses/>.
00024 
00025 /** @file ext/mt_allocator.h
00026  *  This file is a GNU extension to the Standard C++ Library.
00027  */
00028 
00029 #ifndef _MT_ALLOCATOR_H
00030 #define _MT_ALLOCATOR_H 1
00031 
00032 #include <new>
00033 #include <cstdlib>
00034 #include <bits/functexcept.h>
00035 #include <ext/atomicity.h>
00036 #include <bits/move.h>
00037 #if __cplusplus >= 201103L
00038 #include <type_traits>
00039 #endif
00040 
00041 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
00042 {
00043 _GLIBCXX_BEGIN_NAMESPACE_VERSION
00044 
00045   using std::size_t;
00046   using std::ptrdiff_t;
00047 
00048   typedef void (*__destroy_handler)(void*);
00049 
00050   /// Base class for pool object.
00051   struct __pool_base
00052   {
00053     // Using short int as type for the binmap implies we are never
00054     // caching blocks larger than 32768 with this allocator.
00055     typedef unsigned short int _Binmap_type;
00056 
00057     // Variables used to configure the behavior of the allocator,
00058     // assigned and explained in detail below.
00059     struct _Tune
00060      {
00061       // Compile time constants for the default _Tune values.
00062       enum { _S_align = 8 };
00063       enum { _S_max_bytes = 128 };
00064       enum { _S_min_bin = 8 };
00065       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
00066       enum { _S_max_threads = 4096 };
00067       enum { _S_freelist_headroom = 10 };
00068 
00069       // Alignment needed.
00070       // NB: In any case must be >= sizeof(_Block_record), that
00071       // is 4 on 32 bit machines and 8 on 64 bit machines.
00072       size_t    _M_align;
00073       
00074       // Allocation requests (after round-up to power of 2) below
00075       // this value will be handled by the allocator. A raw new/
00076       // call will be used for requests larger than this value.
00077       // NB: Must be much smaller than _M_chunk_size and in any
00078       // case <= 32768.
00079       size_t    _M_max_bytes; 
00080 
00081       // Size in bytes of the smallest bin.
00082       // NB: Must be a power of 2 and >= _M_align (and of course
00083       // much smaller than _M_max_bytes).
00084       size_t    _M_min_bin;
00085 
00086       // In order to avoid fragmenting and minimize the number of
00087       // new() calls we always request new memory using this
00088       // value. Based on previous discussions on the libstdc++
00089       // mailing list we have chosen the value below.
00090       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
00091       // NB: At least one order of magnitude > _M_max_bytes. 
00092       size_t    _M_chunk_size;
00093 
00094       // The maximum number of supported threads. For
00095       // single-threaded operation, use one. Maximum values will
00096       // vary depending on details of the underlying system. (For
00097       // instance, Linux 2.4.18 reports 4070 in
00098       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
00099       // 65534)
00100       size_t    _M_max_threads;
00101 
00102       // Each time a deallocation occurs in a threaded application
00103       // we make sure that there are no more than
00104       // _M_freelist_headroom % of used memory on the freelist. If
00105       // the number of additional records is more than
00106       // _M_freelist_headroom % of the freelist, we move these
00107       // records back to the global pool.
00108       size_t    _M_freelist_headroom;
00109       
00110       // Set to true forces all allocations to use new().
00111       bool  _M_force_new; 
00112       
00113       explicit
00114       _Tune()
00115       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
00116       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads), 
00117       _M_freelist_headroom(_S_freelist_headroom), 
00118       _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
00119       { }
00120 
00121       explicit
00122       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk, 
00123         size_t __maxthreads, size_t __headroom, bool __force) 
00124       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
00125       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
00126       _M_freelist_headroom(__headroom), _M_force_new(__force)
00127       { }
00128     };
00129     
00130     struct _Block_address
00131     {
00132       void*             _M_initial;
00133       _Block_address*       _M_next;
00134     };
00135     
00136     const _Tune&
00137     _M_get_options() const
00138     { return _M_options; }
00139 
00140     void
00141     _M_set_options(_Tune __t)
00142     { 
00143       if (!_M_init)
00144     _M_options = __t;
00145     }
00146 
00147     bool
00148     _M_check_threshold(size_t __bytes)
00149     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
00150 
00151     size_t
00152     _M_get_binmap(size_t __bytes)
00153     { return _M_binmap[__bytes]; }
00154 
00155     size_t
00156     _M_get_align()
00157     { return _M_options._M_align; }
00158 
00159     explicit 
00160     __pool_base() 
00161     : _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
00162 
00163     explicit 
00164     __pool_base(const _Tune& __options)
00165     : _M_options(__options), _M_binmap(0), _M_init(false) { }
00166 
00167   private:
00168     explicit 
00169     __pool_base(const __pool_base&);
00170 
00171     __pool_base&
00172     operator=(const __pool_base&);
00173 
00174   protected:
00175     // Configuration options.
00176     _Tune               _M_options;
00177     
00178     _Binmap_type*       _M_binmap;
00179 
00180     // Configuration of the pool object via _M_options can happen
00181     // after construction but before initialization. After
00182     // initialization is complete, this variable is set to true.
00183     bool            _M_init;
00184   };
00185 
00186 
00187   /**
00188    *  @brief  Data describing the underlying memory pool, parameterized on
00189    *  threading support.
00190    */
00191   template<bool _Thread>
00192     class __pool;
00193 
00194   /// Specialization for single thread.
00195   template<>
00196     class __pool<false> : public __pool_base
00197     {
00198     public:
00199       union _Block_record
00200       {
00201     // Points to the block_record of the next free block.
00202     _Block_record*          _M_next;
00203       };
00204 
00205       struct _Bin_record
00206       {
00207     // An "array" of pointers to the first free block.
00208     _Block_record**         _M_first;
00209 
00210     // A list of the initial addresses of all allocated blocks.
00211     _Block_address*             _M_address;
00212       };
00213       
00214       void
00215       _M_initialize_once()
00216       {
00217     if (__builtin_expect(_M_init == false, false))
00218       _M_initialize();
00219       }
00220 
00221       void
00222       _M_destroy() throw();
00223 
00224       char* 
00225       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00226     
00227       void
00228       _M_reclaim_block(char* __p, size_t __bytes) throw ();
00229     
00230       size_t 
00231       _M_get_thread_id() { return 0; }
00232       
00233       const _Bin_record&
00234       _M_get_bin(size_t __which)
00235       { return _M_bin[__which]; }
00236       
00237       void
00238       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
00239       { }
00240 
00241       explicit __pool() 
00242       : _M_bin(0), _M_bin_size(1) { }
00243 
00244       explicit __pool(const __pool_base::_Tune& __tune) 
00245       : __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
00246 
00247     private:
00248       // An "array" of bin_records each of which represents a specific
00249       // power of 2 size. Memory to this "array" is allocated in
00250       // _M_initialize().
00251       _Bin_record*       _M_bin;
00252       
00253       // Actual value calculated in _M_initialize().
00254       size_t                    _M_bin_size;     
00255 
00256       void
00257       _M_initialize();
00258   };
00259  
00260 #ifdef __GTHREADS
00261   /// Specialization for thread enabled, via gthreads.h.
00262   template<>
00263     class __pool<true> : public __pool_base
00264     {
00265     public:
00266       // Each requesting thread is assigned an id ranging from 1 to
00267       // _S_max_threads. Thread id 0 is used as a global memory pool.
00268       // In order to get constant performance on the thread assignment
00269       // routine, we keep a list of free ids. When a thread first
00270       // requests memory we remove the first record in this list and
00271       // stores the address in a __gthread_key. When initializing the
00272       // __gthread_key we specify a destructor. When this destructor
00273       // (i.e. the thread dies) is called, we return the thread id to
00274       // the front of this list.
00275       struct _Thread_record
00276       {
00277     // Points to next free thread id record. NULL if last record in list.
00278     _Thread_record*         _M_next;
00279     
00280     // Thread id ranging from 1 to _S_max_threads.
00281     size_t                          _M_id;
00282       };
00283       
00284       union _Block_record
00285       {
00286     // Points to the block_record of the next free block.
00287     _Block_record*          _M_next;
00288     
00289     // The thread id of the thread which has requested this block.
00290     size_t                          _M_thread_id;
00291       };
00292       
00293       struct _Bin_record
00294       {
00295     // An "array" of pointers to the first free block for each
00296     // thread id. Memory to this "array" is allocated in
00297     // _S_initialize() for _S_max_threads + global pool 0.
00298     _Block_record**         _M_first;
00299     
00300     // A list of the initial addresses of all allocated blocks.
00301     _Block_address*             _M_address;
00302 
00303     // An "array" of counters used to keep track of the amount of
00304     // blocks that are on the freelist/used for each thread id.
00305     // - Note that the second part of the allocated _M_used "array"
00306     //   actually hosts (atomic) counters of reclaimed blocks:  in
00307     //   _M_reserve_block and in _M_reclaim_block those numbers are
00308     //   subtracted from the first ones to obtain the actual size
00309     //   of the "working set" of the given thread.
00310     // - Memory to these "arrays" is allocated in _S_initialize()
00311     //   for _S_max_threads + global pool 0.
00312     size_t*             _M_free;
00313     size_t*                 _M_used;
00314     
00315     // Each bin has its own mutex which is used to ensure data
00316     // integrity while changing "ownership" on a block.  The mutex
00317     // is initialized in _S_initialize().
00318     __gthread_mutex_t*              _M_mutex;
00319       };
00320       
00321       // XXX GLIBCXX_ABI Deprecated
00322       void
00323       _M_initialize(__destroy_handler);
00324 
00325       void
00326       _M_initialize_once()
00327       {
00328     if (__builtin_expect(_M_init == false, false))
00329       _M_initialize();
00330       }
00331 
00332       void
00333       _M_destroy() throw();
00334 
00335       char* 
00336       _M_reserve_block(size_t __bytes, const size_t __thread_id);
00337     
00338       void
00339       _M_reclaim_block(char* __p, size_t __bytes) throw ();
00340     
00341       const _Bin_record&
00342       _M_get_bin(size_t __which)
00343       { return _M_bin[__which]; }
00344       
00345       void
00346       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block, 
00347              size_t __thread_id)
00348       {
00349     if (__gthread_active_p())
00350       {
00351         __block->_M_thread_id = __thread_id;
00352         --__bin._M_free[__thread_id];
00353         ++__bin._M_used[__thread_id];
00354       }
00355       }
00356 
00357       // XXX GLIBCXX_ABI Deprecated
00358       _GLIBCXX_CONST void 
00359       _M_destroy_thread_key(void*) throw ();
00360 
00361       size_t 
00362       _M_get_thread_id();
00363 
00364       explicit __pool() 
00365       : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0) 
00366       { }
00367 
00368       explicit __pool(const __pool_base::_Tune& __tune) 
00369       : __pool_base(__tune), _M_bin(0), _M_bin_size(1), 
00370     _M_thread_freelist(0) 
00371       { }
00372 
00373     private:
00374       // An "array" of bin_records each of which represents a specific
00375       // power of 2 size. Memory to this "array" is allocated in
00376       // _M_initialize().
00377       _Bin_record*      _M_bin;
00378 
00379       // Actual value calculated in _M_initialize().
00380       size_t                    _M_bin_size;
00381 
00382       _Thread_record*       _M_thread_freelist;
00383       void*         _M_thread_freelist_initial;
00384 
00385       void
00386       _M_initialize();
00387     };
00388 #endif
00389 
00390   template<template <bool> class _PoolTp, bool _Thread>
00391     struct __common_pool
00392     {
00393       typedef _PoolTp<_Thread>      pool_type;
00394       
00395       static pool_type&
00396       _S_get_pool()
00397       { 
00398     static pool_type _S_pool;
00399     return _S_pool;
00400       }
00401     };
00402 
00403   template<template <bool> class _PoolTp, bool _Thread>
00404     struct __common_pool_base;
00405 
00406   template<template <bool> class _PoolTp>
00407     struct __common_pool_base<_PoolTp, false> 
00408     : public __common_pool<_PoolTp, false>
00409     {
00410       using  __common_pool<_PoolTp, false>::_S_get_pool;
00411 
00412       static void
00413       _S_initialize_once()
00414       {
00415     static bool __init;
00416     if (__builtin_expect(__init == false, false))
00417       {
00418         _S_get_pool()._M_initialize_once(); 
00419         __init = true;
00420       }
00421       }
00422     };
00423 
00424 #ifdef __GTHREADS
00425   template<template <bool> class _PoolTp>
00426     struct __common_pool_base<_PoolTp, true>
00427     : public __common_pool<_PoolTp, true>
00428     {
00429       using  __common_pool<_PoolTp, true>::_S_get_pool;
00430       
00431       static void
00432       _S_initialize() 
00433       { _S_get_pool()._M_initialize_once(); }
00434 
00435       static void
00436       _S_initialize_once()
00437       { 
00438     static bool __init;
00439     if (__builtin_expect(__init == false, false))
00440       {
00441         if (__gthread_active_p())
00442           {
00443         // On some platforms, __gthread_once_t is an aggregate.
00444         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00445         __gthread_once(&__once, _S_initialize);
00446           }
00447 
00448         // Double check initialization. May be necessary on some
00449         // systems for proper construction when not compiling with
00450         // thread flags.
00451         _S_get_pool()._M_initialize_once(); 
00452         __init = true;
00453       }
00454       }
00455     };
00456 #endif
00457 
00458   /// Policy for shared __pool objects.
00459   template<template <bool> class _PoolTp, bool _Thread>
00460     struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
00461     {
00462       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00463            bool _Thread1 = _Thread>
00464         struct _M_rebind
00465         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
00466 
00467       using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
00468       using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
00469   };
00470  
00471 
00472   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00473     struct __per_type_pool
00474     {
00475       typedef _Tp           value_type;
00476       typedef _PoolTp<_Thread>      pool_type;
00477       
00478       static pool_type&
00479       _S_get_pool()
00480       { 
00481     // Sane defaults for the _PoolTp.
00482     typedef typename pool_type::_Block_record _Block_record;
00483     const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
00484                    ? __alignof__(_Tp) : sizeof(_Block_record));
00485 
00486     typedef typename __pool_base::_Tune _Tune;
00487     static _Tune _S_tune(__a, sizeof(_Tp) * 64,
00488                  sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
00489                  sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
00490                  _Tune::_S_max_threads,
00491                  _Tune::_S_freelist_headroom,
00492                  std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
00493     static pool_type _S_pool(_S_tune);
00494     return _S_pool;
00495       }
00496     };
00497 
00498   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00499     struct __per_type_pool_base;
00500 
00501   template<typename _Tp, template <bool> class _PoolTp>
00502     struct __per_type_pool_base<_Tp, _PoolTp, false> 
00503     : public __per_type_pool<_Tp, _PoolTp, false> 
00504     {
00505       using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
00506 
00507       static void
00508       _S_initialize_once()
00509       {
00510     static bool __init;
00511     if (__builtin_expect(__init == false, false))
00512       {
00513         _S_get_pool()._M_initialize_once(); 
00514         __init = true;
00515       }
00516       }
00517     };
00518 
00519  #ifdef __GTHREADS
00520  template<typename _Tp, template <bool> class _PoolTp>
00521     struct __per_type_pool_base<_Tp, _PoolTp, true> 
00522     : public __per_type_pool<_Tp, _PoolTp, true> 
00523     {
00524       using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
00525 
00526       static void
00527       _S_initialize() 
00528       { _S_get_pool()._M_initialize_once(); }
00529 
00530       static void
00531       _S_initialize_once()
00532       { 
00533     static bool __init;
00534     if (__builtin_expect(__init == false, false))
00535       {
00536         if (__gthread_active_p())
00537           {
00538         // On some platforms, __gthread_once_t is an aggregate.
00539         static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
00540         __gthread_once(&__once, _S_initialize);
00541           }
00542 
00543         // Double check initialization. May be necessary on some
00544         // systems for proper construction when not compiling with
00545         // thread flags.
00546         _S_get_pool()._M_initialize_once(); 
00547         __init = true;
00548       }
00549       }
00550     };
00551 #endif
00552 
00553   /// Policy for individual __pool objects.
00554   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
00555     struct __per_type_pool_policy 
00556     : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
00557     {
00558       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp, 
00559            bool _Thread1 = _Thread>
00560         struct _M_rebind
00561         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
00562 
00563       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
00564       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
00565   };
00566 
00567 
00568   /// Base class for _Tp dependent member functions.
00569   template<typename _Tp>
00570     class __mt_alloc_base 
00571     {
00572     public:
00573       typedef size_t                    size_type;
00574       typedef ptrdiff_t                 difference_type;
00575       typedef _Tp*                      pointer;
00576       typedef const _Tp*                const_pointer;
00577       typedef _Tp&                      reference;
00578       typedef const _Tp&                const_reference;
00579       typedef _Tp                       value_type;
00580 
00581 #if __cplusplus >= 201103L
00582       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00583       // 2103. propagate_on_container_move_assignment
00584       typedef std::true_type propagate_on_container_move_assignment;
00585 #endif
00586 
00587       pointer
00588       address(reference __x) const _GLIBCXX_NOEXCEPT
00589       { return std::__addressof(__x); }
00590 
00591       const_pointer
00592       address(const_reference __x) const _GLIBCXX_NOEXCEPT
00593       { return std::__addressof(__x); }
00594 
00595       size_type
00596       max_size() const _GLIBCXX_USE_NOEXCEPT 
00597       { return size_t(-1) / sizeof(_Tp); }
00598 
00599 #if __cplusplus >= 201103L
00600       template<typename _Up, typename... _Args>
00601         void
00602         construct(_Up* __p, _Args&&... __args)
00603     { ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
00604 
00605       template<typename _Up>
00606         void 
00607         destroy(_Up* __p) { __p->~_Up(); }
00608 #else
00609       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00610       // 402. wrong new expression in [some_] allocator::construct
00611       void 
00612       construct(pointer __p, const _Tp& __val) 
00613       { ::new((void *)__p) _Tp(__val); }
00614 
00615       void 
00616       destroy(pointer __p) { __p->~_Tp(); }
00617 #endif
00618     };
00619 
00620 #ifdef __GTHREADS
00621 #define __thread_default true
00622 #else
00623 #define __thread_default false
00624 #endif
00625 
00626   /**
00627    *  @brief  This is a fixed size (power of 2) allocator which - when
00628    *  compiled with thread support - will maintain one freelist per
00629    *  size per thread plus a @a global one. Steps are taken to limit
00630    *  the per thread freelist sizes (by returning excess back to
00631    *  the @a global list).
00632    *  @ingroup allocators
00633    *
00634    *  Further details:
00635    *  http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt12ch32.html
00636    */
00637   template<typename _Tp, 
00638        typename _Poolp = __common_pool_policy<__pool, __thread_default> >
00639     class __mt_alloc : public __mt_alloc_base<_Tp>
00640     {
00641     public:
00642       typedef size_t                        size_type;
00643       typedef ptrdiff_t                     difference_type;
00644       typedef _Tp*                          pointer;
00645       typedef const _Tp*                    const_pointer;
00646       typedef _Tp&                          reference;
00647       typedef const _Tp&                    const_reference;
00648       typedef _Tp                           value_type;
00649       typedef _Poolp                __policy_type;
00650       typedef typename _Poolp::pool_type    __pool_type;
00651 
00652       template<typename _Tp1, typename _Poolp1 = _Poolp>
00653         struct rebind
00654         { 
00655       typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
00656       typedef __mt_alloc<_Tp1, pol_type> other;
00657     };
00658 
00659       __mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
00660 
00661       __mt_alloc(const __mt_alloc&) _GLIBCXX_USE_NOEXCEPT { }
00662 
00663       template<typename _Tp1, typename _Poolp1>
00664         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) _GLIBCXX_USE_NOEXCEPT { }
00665 
00666       ~__mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
00667 
00668       pointer
00669       allocate(size_type __n, const void* = 0);
00670 
00671       void
00672       deallocate(pointer __p, size_type __n);
00673 
00674       const __pool_base::_Tune
00675       _M_get_options()
00676       { 
00677     // Return a copy, not a reference, for external consumption.
00678     return __policy_type::_S_get_pool()._M_get_options();
00679       }
00680       
00681       void
00682       _M_set_options(__pool_base::_Tune __t)
00683       { __policy_type::_S_get_pool()._M_set_options(__t); }
00684     };
00685 
00686   template<typename _Tp, typename _Poolp>
00687     typename __mt_alloc<_Tp, _Poolp>::pointer
00688     __mt_alloc<_Tp, _Poolp>::
00689     allocate(size_type __n, const void*)
00690     {
00691       if (__n > this->max_size())
00692     std::__throw_bad_alloc();
00693 
00694       __policy_type::_S_initialize_once();
00695 
00696       // Requests larger than _M_max_bytes are handled by operator
00697       // new/delete directly.
00698       __pool_type& __pool = __policy_type::_S_get_pool();
00699       const size_t __bytes = __n * sizeof(_Tp);
00700       if (__pool._M_check_threshold(__bytes))
00701     {
00702       void* __ret = ::operator new(__bytes);
00703       return static_cast<_Tp*>(__ret);
00704     }
00705       
00706       // Round up to power of 2 and figure out which bin to use.
00707       const size_t __which = __pool._M_get_binmap(__bytes);
00708       const size_t __thread_id = __pool._M_get_thread_id();
00709       
00710       // Find out if we have blocks on our freelist.  If so, go ahead
00711       // and use them directly without having to lock anything.
00712       char* __c;
00713       typedef typename __pool_type::_Bin_record _Bin_record;
00714       const _Bin_record& __bin = __pool._M_get_bin(__which);
00715       if (__bin._M_first[__thread_id])
00716     {
00717       // Already reserved.
00718       typedef typename __pool_type::_Block_record _Block_record;
00719       _Block_record* __block = __bin._M_first[__thread_id];
00720       __bin._M_first[__thread_id] = __block->_M_next;
00721       
00722       __pool._M_adjust_freelist(__bin, __block, __thread_id);
00723       __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
00724     }
00725       else
00726     {
00727       // Null, reserve.
00728       __c = __pool._M_reserve_block(__bytes, __thread_id);
00729     }
00730       return static_cast<_Tp*>(static_cast<void*>(__c));
00731     }
00732   
00733   template<typename _Tp, typename _Poolp>
00734     void
00735     __mt_alloc<_Tp, _Poolp>::
00736     deallocate(pointer __p, size_type __n)
00737     {
00738       if (__builtin_expect(__p != 0, true))
00739     {
00740       // Requests larger than _M_max_bytes are handled by
00741       // operators new/delete directly.
00742       __pool_type& __pool = __policy_type::_S_get_pool();
00743       const size_t __bytes = __n * sizeof(_Tp);
00744       if (__pool._M_check_threshold(__bytes))
00745         ::operator delete(__p);
00746       else
00747         __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
00748     }
00749     }
00750   
00751   template<typename _Tp, typename _Poolp>
00752     inline bool
00753     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00754     { return true; }
00755   
00756   template<typename _Tp, typename _Poolp>
00757     inline bool
00758     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
00759     { return false; }
00760 
00761 #undef __thread_default
00762 
00763 _GLIBCXX_END_NAMESPACE_VERSION
00764 } // namespace
00765 
00766 #endif