mt_allocator.h

Go to the documentation of this file.
00001 // MT-optimized allocator -*- C++ -*-
00002 
00003 // Copyright (C) 2003, 2004 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00035 #ifndef _MT_ALLOCATOR_H
00036 #define _MT_ALLOCATOR_H 1
00037 
00038 #include <new>
00039 #include <cstdlib>
00040 #include <bits/functexcept.h>
00041 #include <bits/gthr.h>
00042 #include <bits/atomicity.h>
00043 
00044 namespace __gnu_cxx
00045 {
00056   template<typename _Tp>
00057     class __mt_alloc
00058     {
00059     public:
00060       typedef size_t                    size_type;
00061       typedef ptrdiff_t                 difference_type;
00062       typedef _Tp*                      pointer;
00063       typedef const _Tp*                const_pointer;
00064       typedef _Tp&                      reference;
00065       typedef const _Tp&                const_reference;
00066       typedef _Tp                       value_type;
00067 
00068       template<typename _Tp1>
00069         struct rebind
00070         { typedef __mt_alloc<_Tp1> other; };
00071 
00072       __mt_alloc() throw() 
00073       {
00074     // XXX
00075       }
00076 
00077       __mt_alloc(const __mt_alloc&) throw() 
00078       {
00079     // XXX
00080       }
00081 
00082       template<typename _Tp1>
00083         __mt_alloc(const __mt_alloc<_Tp1>& obj) throw()  
00084         {
00085       // XXX
00086     }
00087 
00088       ~__mt_alloc() throw() { }
00089 
00090       pointer
00091       address(reference __x) const
00092       { return &__x; }
00093 
00094       const_pointer
00095       address(const_reference __x) const
00096       { return &__x; }
00097 
00098       size_type
00099       max_size() const throw() 
00100       { return size_t(-1) / sizeof(_Tp); }
00101 
00102       // _GLIBCXX_RESOLVE_LIB_DEFECTS
00103       // 402. wrong new expression in [some_] allocator::construct
00104       void 
00105       construct(pointer __p, const _Tp& __val) 
00106       { ::new(__p) _Tp(__val); }
00107 
00108       void 
00109       destroy(pointer __p) { __p->~_Tp(); }
00110 
00111       pointer
00112       allocate(size_type __n, const void* = 0);
00113 
00114       void
00115       deallocate(pointer __p, size_type __n);
00116 
00117       // Variables used to configure the behavior of the allocator,
00118       // assigned and explained in detail below.
00119       struct _Tune
00120       {
00121     // Alignment needed.
00122     // NB: In any case must be >= sizeof(_Block_record), that
00123     // is 4 on 32 bit machines and 8 on 64 bit machines.
00124     size_t  _M_align;
00125 
00126     // Allocation requests (after round-up to power of 2) below
00127     // this value will be handled by the allocator. A raw new/
00128     // call will be used for requests larger than this value.
00129     size_t  _M_max_bytes; 
00130 
00131     // Size in bytes of the smallest bin.
00132     // NB: Must be a power of 2 and >= _M_align.
00133     size_t  _M_min_bin;
00134 
00135     // In order to avoid fragmenting and minimize the number of
00136     // new() calls we always request new memory using this
00137     // value. Based on previous discussions on the libstdc++
00138     // mailing list we have choosen the value below.
00139     // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
00140     size_t  _M_chunk_size;
00141 
00142     // The maximum number of supported threads. Our Linux 2.4.18
00143     // reports 4070 in /proc/sys/kernel/threads-max
00144     size_t  _M_max_threads;
00145 
00146     // Each time a deallocation occurs in a threaded application
00147     // we make sure that there are no more than
00148     // _M_freelist_headroom % of used memory on the freelist. If
00149     // the number of additional records is more than
00150     // _M_freelist_headroom % of the freelist, we move these
00151     // records back to the global pool.
00152     size_t  _M_freelist_headroom;
00153 
00154     // Set to true forces all allocations to use new().
00155     bool    _M_force_new; 
00156      
00157     explicit
00158     _Tune()
00159     : _M_align(8), _M_max_bytes(128), _M_min_bin(8),
00160       _M_chunk_size(4096 - 4 * sizeof(void*)), 
00161       _M_max_threads(4096), _M_freelist_headroom(10), 
00162       _M_force_new(getenv("GLIBCXX_FORCE_NEW") ? true : false)
00163     { }
00164 
00165     explicit
00166     _Tune(size_t __align, size_t __maxb, size_t __minbin,
00167           size_t __chunk, size_t __maxthreads, size_t __headroom,
00168           bool __force) 
00169     : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
00170       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
00171       _M_freelist_headroom(__headroom), _M_force_new(__force)
00172     { }
00173       };
00174 
00175     private:
00176       // We need to create the initial lists and set up some variables
00177       // before we can answer to the first request for memory.
00178 #ifdef __GTHREADS
00179       static __gthread_once_t       _S_once;
00180 #endif
00181       static bool           _S_init;
00182 
00183       static void
00184       _S_initialize();
00185 
00186       // Configuration options.
00187       static _Tune              _S_options;
00188 
00189       static const _Tune
00190       _S_get_options()
00191       { return _S_options; }
00192 
00193       static void
00194       _S_set_options(_Tune __t)
00195       { 
00196     if (!_S_init)
00197       _S_options = __t;
00198       }
00199 
00200       // Using short int as type for the binmap implies we are never
00201       // caching blocks larger than 65535 with this allocator
00202       typedef unsigned short int        _Binmap_type;
00203       static _Binmap_type*      _S_binmap;
00204 
00205       // Each requesting thread is assigned an id ranging from 1 to
00206       // _S_max_threads. Thread id 0 is used as a global memory pool.
00207       // In order to get constant performance on the thread assignment
00208       // routine, we keep a list of free ids. When a thread first
00209       // requests memory we remove the first record in this list and
00210       // stores the address in a __gthread_key. When initializing the
00211       // __gthread_key we specify a destructor. When this destructor
00212       // (i.e. the thread dies) is called, we return the thread id to
00213       // the front of this list.
00214 #ifdef __GTHREADS
00215       struct _Thread_record
00216       {
00217         // Points to next free thread id record. NULL if last record in list.
00218         _Thread_record* volatile        _M_next;
00219 
00220     // Thread id ranging from 1 to _S_max_threads.
00221         size_t                          _M_id;
00222       };
00223 
00224       static _Thread_record* volatile   _S_thread_freelist_first;
00225       static __gthread_mutex_t      _S_thread_freelist_mutex;
00226       static __gthread_key_t        _S_thread_key;
00227 
00228       static void 
00229       _S_destroy_thread_key(void* __freelist_pos);
00230 #endif
00231 
00232       static size_t 
00233       _S_get_thread_id();
00234 
00235       union _Block_record
00236       {
00237     // Points to the block_record of the next free block.
00238         _Block_record* volatile         _M_next;
00239 
00240 #ifdef __GTHREADS
00241     // The thread id of the thread which has requested this block.
00242         size_t                          _M_thread_id;
00243 #endif
00244       };
00245 
00246       struct _Bin_record
00247       {
00248     // An "array" of pointers to the first free block for each
00249     // thread id. Memory to this "array" is allocated in _S_initialize()
00250     // for _S_max_threads + global pool 0.
00251         _Block_record** volatile        _M_first;
00252 
00253 #ifdef __GTHREADS
00254     // An "array" of counters used to keep track of the amount of
00255     // blocks that are on the freelist/used for each thread id.
00256     // Memory to these "arrays" is allocated in _S_initialize() for
00257     // _S_max_threads + global pool 0.
00258         size_t* volatile                _M_free;
00259         size_t* volatile                _M_used;
00260 
00261     // Each bin has its own mutex which is used to ensure data
00262     // integrity while changing "ownership" on a block.  The mutex
00263     // is initialized in _S_initialize().
00264         __gthread_mutex_t*              _M_mutex;
00265 #endif
00266       };
00267 
00268       // An "array" of bin_records each of which represents a specific
00269       // power of 2 size. Memory to this "array" is allocated in
00270       // _S_initialize().
00271       static _Bin_record* volatile      _S_bin;
00272 
00273       // Actual value calculated in _S_initialize().
00274       static size_t                     _S_bin_size; 
00275     };
00276 
00277   template<typename _Tp>
00278     typename __mt_alloc<_Tp>::pointer
00279     __mt_alloc<_Tp>::
00280     allocate(size_type __n, const void*)
00281     {
00282       // Although the test in __gthread_once() would suffice, we wrap
00283       // test of the once condition in our own unlocked check. This
00284       // saves one function call to pthread_once() (which itself only
00285       // tests for the once value unlocked anyway and immediately
00286       // returns if set)
00287       if (!_S_init)
00288     {
00289 #ifdef __GTHREADS
00290       if (__gthread_active_p())
00291         __gthread_once(&_S_once, _S_initialize);
00292 #endif
00293       if (!_S_init)
00294         _S_initialize();
00295     }
00296       
00297       // Requests larger than _M_max_bytes are handled by new/delete
00298       // directly.
00299       const size_t __bytes = __n * sizeof(_Tp);
00300       if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
00301     {
00302       void* __ret = ::operator new(__bytes);
00303       return static_cast<_Tp*>(__ret);
00304     }
00305 
00306       // Round up to power of 2 and figure out which bin to use.
00307       const size_t __which = _S_binmap[__bytes];      
00308       const size_t __thread_id = _S_get_thread_id();
00309       
00310       // Find out if we have blocks on our freelist.  If so, go ahead
00311       // and use them directly without having to lock anything.
00312       const _Bin_record& __bin = _S_bin[__which];
00313       _Block_record* __block = NULL;
00314       if (__bin._M_first[__thread_id] == NULL)
00315     {
00316       // NB: For alignment reasons, we can't use the first _M_align
00317       // bytes, even when sizeof(_Block_record) < _M_align.
00318       const size_t __bin_size = ((_S_options._M_min_bin << __which)
00319                      + _S_options._M_align);
00320       size_t __block_count = _S_options._M_chunk_size / __bin_size;   
00321 
00322       // Are we using threads?
00323       // - Yes, check if there are free blocks on the global
00324       //   list. If so, grab up to __block_count blocks in one
00325       //   lock and change ownership. If the global list is 
00326       //   empty, we allocate a new chunk and add those blocks 
00327       //   directly to our own freelist (with us as owner).
00328       // - No, all operations are made directly to global pool 0
00329       //   no need to lock or change ownership but check for free
00330       //   blocks on global list (and if not add new ones) and
00331       //   get the first one.
00332 #ifdef __GTHREADS
00333       if (__gthread_active_p())
00334         {
00335           __gthread_mutex_lock(__bin._M_mutex);
00336           if (__bin._M_first[0] == NULL)
00337         {
00338           // No need to hold the lock when we are adding a
00339           // whole chunk to our own list.
00340           __gthread_mutex_unlock(__bin._M_mutex);
00341           
00342           void* __v = ::operator new(_S_options._M_chunk_size);
00343           __bin._M_first[__thread_id] = static_cast<_Block_record*>(__v);
00344           __bin._M_free[__thread_id] = __block_count;
00345 
00346           --__block_count;
00347           __block = __bin._M_first[__thread_id];
00348           while (__block_count-- > 0)
00349             {
00350               char* __c = reinterpret_cast<char*>(__block) + __bin_size;
00351               __block->_M_next = reinterpret_cast<_Block_record*>(__c);
00352               __block = __block->_M_next;
00353             }
00354           __block->_M_next = NULL;
00355         }
00356           else
00357         {
00358           // Is the number of required blocks greater than or
00359           // equal to the number that can be provided by the
00360           // global free list?
00361           __bin._M_first[__thread_id] = __bin._M_first[0];
00362           if (__block_count >= __bin._M_free[0])
00363             {
00364               __bin._M_free[__thread_id] = __bin._M_free[0];
00365               __bin._M_free[0] = 0;
00366               __bin._M_first[0] = NULL;
00367             }
00368           else
00369             {
00370               __bin._M_free[__thread_id] = __block_count;
00371               __bin._M_free[0] -= __block_count;
00372               --__block_count;
00373               __block = __bin._M_first[0];
00374               while (__block_count-- > 0)
00375             __block = __block->_M_next;
00376               __bin._M_first[0] = __block->_M_next;
00377               __block->_M_next = NULL;
00378             }
00379           __gthread_mutex_unlock(__bin._M_mutex);
00380         }
00381         }
00382       else
00383 #endif
00384         {
00385           void* __v = ::operator new(_S_options._M_chunk_size);
00386           __bin._M_first[0] = static_cast<_Block_record*>(__v);
00387           
00388           --__block_count;
00389           __block = __bin._M_first[0];
00390           while (__block_count-- > 0)
00391         {
00392           char* __c = reinterpret_cast<char*>(__block) + __bin_size;
00393           __block->_M_next = reinterpret_cast<_Block_record*>(__c);
00394           __block = __block->_M_next;
00395         }
00396           __block->_M_next = NULL;
00397         }
00398     }
00399 
00400       __block = __bin._M_first[__thread_id];
00401       __bin._M_first[__thread_id] = __bin._M_first[__thread_id]->_M_next;
00402 #ifdef __GTHREADS
00403       if (__gthread_active_p())
00404     {
00405       __block->_M_thread_id = __thread_id;
00406       --__bin._M_free[__thread_id];
00407       ++__bin._M_used[__thread_id];
00408     }
00409 #endif
00410 
00411       char* __c = reinterpret_cast<char*>(__block) + _S_options._M_align;
00412       return static_cast<_Tp*>(static_cast<void*>(__c));
00413     }
00414   
00415   template<typename _Tp>
00416     void
00417     __mt_alloc<_Tp>::
00418     deallocate(pointer __p, size_type __n)
00419     {
00420       // Requests larger than _M_max_bytes are handled by operators
00421       // new/delete directly.
00422       const size_t __bytes = __n * sizeof(_Tp);
00423       if (__bytes > _S_options._M_max_bytes || _S_options._M_force_new)
00424     {
00425       ::operator delete(__p);
00426       return;
00427     }
00428       
00429       // Round up to power of 2 and figure out which bin to use.
00430       const size_t __which = _S_binmap[__bytes];
00431       const _Bin_record& __bin = _S_bin[__which];
00432 
00433       char* __c = reinterpret_cast<char*>(__p) - _S_options._M_align;
00434       _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
00435       
00436 #ifdef __GTHREADS
00437       if (__gthread_active_p())
00438     {
00439       // Calculate the number of records to remove from our freelist:
00440       // in order to avoid too much contention we wait until the
00441       // number of records is "high enough".
00442       const size_t __thread_id = _S_get_thread_id();
00443 
00444       long __remove = ((__bin._M_free[__thread_id]
00445                 * _S_options._M_freelist_headroom)
00446                - __bin._M_used[__thread_id]);
00447       if (__remove > static_cast<long>(100 * (_S_bin_size - __which)
00448                        * _S_options._M_freelist_headroom)
00449           && __remove > static_cast<long>(__bin._M_free[__thread_id]))
00450         {
00451           _Block_record* __tmp = __bin._M_first[__thread_id];
00452           _Block_record* __first = __tmp;
00453           __remove /= _S_options._M_freelist_headroom;
00454           const long __removed = __remove;
00455           --__remove;
00456           while (__remove-- > 0)
00457         __tmp = __tmp->_M_next;
00458           __bin._M_first[__thread_id] = __tmp->_M_next;
00459           __bin._M_free[__thread_id] -= __removed;
00460 
00461           __gthread_mutex_lock(__bin._M_mutex);
00462           __tmp->_M_next = __bin._M_first[0];
00463           __bin._M_first[0] = __first;
00464           __bin._M_free[0] += __removed;
00465           __gthread_mutex_unlock(__bin._M_mutex);
00466         }
00467       
00468       // Return this block to our list and update counters and
00469       // owner id as needed.
00470       --__bin._M_used[__block->_M_thread_id];
00471 
00472       __block->_M_next = __bin._M_first[__thread_id];
00473       __bin._M_first[__thread_id] = __block;
00474       
00475       ++__bin._M_free[__thread_id];
00476     }
00477       else
00478 #endif
00479     {
00480       // Single threaded application - return to global pool.
00481       __block->_M_next = __bin._M_first[0];
00482       __bin._M_first[0] = __block;
00483     }
00484     }
00485   
00486   template<typename _Tp>
00487     void
00488     __mt_alloc<_Tp>::
00489     _S_initialize()
00490     {
00491       // This method is called on the first allocation (when _S_init is still
00492       // false) to create the bins.
00493       
00494       // Ensure that the static initialization of _S_options has
00495       // happened.  This depends on (a) _M_align == 0 being an invalid
00496       // value that is only present at startup, and (b) the real
00497       // static initialization that happens later not actually
00498       // changing anything.
00499       if (_S_options._M_align == 0) 
00500         new (&_S_options) _Tune;
00501   
00502       // _M_force_new must not change after the first allocate(),
00503       // which in turn calls this method, so if it's false, it's false
00504       // forever and we don't need to return here ever again.
00505       if (_S_options._M_force_new) 
00506     {
00507       _S_init = true;
00508       return;
00509     }
00510 
00511       // Calculate the number of bins required based on _M_max_bytes.
00512       // _S_bin_size is statically-initialized to one.
00513       size_t __bin_size = _S_options._M_min_bin;
00514       while (_S_options._M_max_bytes > __bin_size)
00515     {
00516       __bin_size <<= 1;
00517       ++_S_bin_size;
00518     }
00519 
00520       // Setup the bin map for quick lookup of the relevant bin.
00521       const size_t __j = (_S_options._M_max_bytes + 1) * sizeof(_Binmap_type);
00522       _S_binmap = static_cast<_Binmap_type*>(::operator new(__j));
00523 
00524       _Binmap_type* __bp = _S_binmap;
00525       _Binmap_type __bin_max = _S_options._M_min_bin;
00526       _Binmap_type __bint = 0;
00527       for (_Binmap_type __ct = 0; __ct <= _S_options._M_max_bytes; ++__ct)
00528         {
00529           if (__ct > __bin_max)
00530             {
00531               __bin_max <<= 1;
00532               ++__bint;
00533             }
00534           *__bp++ = __bint;
00535         }
00536 
00537       // Initialize _S_bin and its members.
00538       void* __v = ::operator new(sizeof(_Bin_record) * _S_bin_size);
00539       _S_bin = static_cast<_Bin_record*>(__v);
00540 
00541       // If __gthread_active_p() create and initialize the list of
00542       // free thread ids. Single threaded applications use thread id 0
00543       // directly and have no need for this.
00544 #ifdef __GTHREADS
00545       if (__gthread_active_p())
00546         {
00547       const size_t __k = sizeof(_Thread_record) * _S_options._M_max_threads;
00548       __v = ::operator new(__k);
00549           _S_thread_freelist_first = static_cast<_Thread_record*>(__v);
00550 
00551       // NOTE! The first assignable thread id is 1 since the
00552       // global pool uses id 0
00553           size_t __i;
00554           for (__i = 1; __i < _S_options._M_max_threads; ++__i)
00555             {
00556           _Thread_record& __tr = _S_thread_freelist_first[__i - 1];
00557               __tr._M_next = &_S_thread_freelist_first[__i];
00558               __tr._M_id = __i;
00559             }
00560 
00561           // Set last record.
00562           _S_thread_freelist_first[__i - 1]._M_next = NULL;
00563           _S_thread_freelist_first[__i - 1]._M_id = __i;
00564 
00565       // Make sure this is initialized.
00566 #ifndef __GTHREAD_MUTEX_INIT
00567       __GTHREAD_MUTEX_INIT_FUNCTION(&_S_thread_freelist_mutex);
00568 #endif
00569           // Initialize per thread key to hold pointer to
00570           // _S_thread_freelist.
00571           __gthread_key_create(&_S_thread_key, _S_destroy_thread_key);
00572 
00573       const size_t __max_threads = _S_options._M_max_threads + 1;
00574       for (size_t __n = 0; __n < _S_bin_size; ++__n)
00575         {
00576           _Bin_record& __bin = _S_bin[__n];
00577           __v = ::operator new(sizeof(_Block_record*) * __max_threads);
00578           __bin._M_first = static_cast<_Block_record**>(__v);
00579 
00580           __v = ::operator new(sizeof(size_t) * __max_threads);
00581               __bin._M_free = static_cast<size_t*>(__v);
00582 
00583           __v = ::operator new(sizeof(size_t) * __max_threads);
00584               __bin._M_used = static_cast<size_t*>(__v);
00585 
00586           __v = ::operator new(sizeof(__gthread_mutex_t));
00587               __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
00588 
00589 #ifdef __GTHREAD_MUTEX_INIT
00590               {
00591                 // Do not copy a POSIX/gthr mutex once in use.
00592                 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
00593                 *__bin._M_mutex = __tmp;
00594               }
00595 #else
00596               { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
00597 #endif
00598 
00599           for (size_t __threadn = 0; __threadn < __max_threads;
00600            ++__threadn)
00601         {
00602           __bin._M_first[__threadn] = NULL;
00603           __bin._M_free[__threadn] = 0;
00604           __bin._M_used[__threadn] = 0;
00605         }
00606         }
00607     }
00608       else
00609 #endif  
00610     for (size_t __n = 0; __n < _S_bin_size; ++__n)
00611       {
00612         _Bin_record& __bin = _S_bin[__n];
00613         __v = ::operator new(sizeof(_Block_record*));
00614         __bin._M_first = static_cast<_Block_record**>(__v);
00615         __bin._M_first[0] = NULL;
00616       }
00617 
00618       _S_init = true;
00619     }
00620 
00621   template<typename _Tp>
00622     size_t
00623     __mt_alloc<_Tp>::
00624     _S_get_thread_id()
00625     {
00626 #ifdef __GTHREADS
00627       // If we have thread support and it's active we check the thread
00628       // key value and return its id or if it's not set we take the
00629       // first record from _S_thread_freelist and sets the key and
00630       // returns it's id.
00631       if (__gthread_active_p())
00632         {
00633           _Thread_record* __freelist_pos =
00634         static_cast<_Thread_record*>(__gthread_getspecific(_S_thread_key)); 
00635       if (__freelist_pos == NULL)
00636             {
00637           // Since _S_options._M_max_threads must be larger than
00638           // the theoretical max number of threads of the OS the
00639           // list can never be empty.
00640               __gthread_mutex_lock(&_S_thread_freelist_mutex);
00641               __freelist_pos = _S_thread_freelist_first;
00642               _S_thread_freelist_first = _S_thread_freelist_first->_M_next;
00643               __gthread_mutex_unlock(&_S_thread_freelist_mutex);
00644 
00645               __gthread_setspecific(_S_thread_key, 
00646                     static_cast<void*>(__freelist_pos));
00647             }
00648           return __freelist_pos->_M_id;
00649         }
00650 #endif
00651       // Otherwise (no thread support or inactive) all requests are
00652       // served from the global pool 0.
00653       return 0;
00654     }
00655 
00656 #ifdef __GTHREADS
00657   template<typename _Tp>
00658     void
00659     __mt_alloc<_Tp>::
00660     _S_destroy_thread_key(void* __freelist_pos)
00661     {
00662       // Return this thread id record to front of thread_freelist.
00663       __gthread_mutex_lock(&_S_thread_freelist_mutex);
00664       _Thread_record* __tr = static_cast<_Thread_record*>(__freelist_pos);
00665       __tr->_M_next = _S_thread_freelist_first;
00666       _S_thread_freelist_first = __tr;
00667       __gthread_mutex_unlock(&_S_thread_freelist_mutex);
00668     }
00669 #endif
00670 
00671   template<typename _Tp>
00672     inline bool
00673     operator==(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
00674     { return true; }
00675   
00676   template<typename _Tp>
00677     inline bool
00678     operator!=(const __mt_alloc<_Tp>&, const __mt_alloc<_Tp>&)
00679     { return false; }
00680 
00681   template<typename _Tp> 
00682     bool __mt_alloc<_Tp>::_S_init = false;
00683 
00684   template<typename _Tp> 
00685     typename __mt_alloc<_Tp>::_Tune __mt_alloc<_Tp>::_S_options;
00686 
00687   template<typename _Tp> 
00688     typename __mt_alloc<_Tp>::_Binmap_type* __mt_alloc<_Tp>::_S_binmap;
00689 
00690   template<typename _Tp> 
00691     typename __mt_alloc<_Tp>::_Bin_record* volatile __mt_alloc<_Tp>::_S_bin;
00692 
00693   template<typename _Tp> 
00694     size_t __mt_alloc<_Tp>::_S_bin_size = 1;
00695 
00696   // Actual initialization in _S_initialize().
00697 #ifdef __GTHREADS
00698   template<typename _Tp> 
00699     __gthread_once_t __mt_alloc<_Tp>::_S_once = __GTHREAD_ONCE_INIT;
00700 
00701   template<typename _Tp> 
00702     typename __mt_alloc<_Tp>::_Thread_record*
00703     volatile __mt_alloc<_Tp>::_S_thread_freelist_first = NULL;
00704 
00705   template<typename _Tp> 
00706     __gthread_key_t __mt_alloc<_Tp>::_S_thread_key;
00707 
00708   template<typename _Tp> 
00709     __gthread_mutex_t
00710 #ifdef __GTHREAD_MUTEX_INIT
00711     __mt_alloc<_Tp>::_S_thread_freelist_mutex = __GTHREAD_MUTEX_INIT;
00712 #else
00713     __mt_alloc<_Tp>::_S_thread_freelist_mutex;
00714 #endif
00715 #endif
00716 } // namespace __gnu_cxx
00717 
00718 #endif

Generated on Tue Jan 30 17:31:51 2007 for GNU C++ STL by doxygen 1.3.6