bitmap_allocator.h

Go to the documentation of this file.
00001 // Bitmapped Allocator. -*- C++ -*-
00002 
00003 // Copyright (C) 2004 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00030 
00031 
00032 #if !defined _BITMAP_ALLOCATOR_H
00033 #define _BITMAP_ALLOCATOR_H 1
00034 
00035 #include <cstddef>
00036 //For std::size_t, and ptrdiff_t.
00037 #include <utility>
00038 //For std::pair.
00039 #include <algorithm>
00040 //std::find_if, and std::lower_bound.
00041 #include <vector>
00042 //For the free list of exponentially growing memory blocks. At max,
00043 //size of the vector should be  not more than the number of bits in an
00044 //integer or an unsigned integer.
00045 #include <functional>
00046 //For greater_equal, and less_equal.
00047 #include <new>
00048 //For operator new.
00049 #include <bits/gthr.h>
00050 //For __gthread_mutex_t, __gthread_mutex_lock and __gthread_mutex_unlock.
00051 #include <ext/new_allocator.h>
00052 //For __gnu_cxx::new_allocator for std::vector.
00053 
00054 #include <cassert>
00055 #define NDEBUG
00056 
00057 //#define CHECK_FOR_ERRORS
00058 //#define __CPU_HAS_BACKWARD_BRANCH_PREDICTION
00059 
00060 namespace __gnu_cxx
00061 {
00062   namespace {
00063 #if defined __GTHREADS
00064     bool const __threads_enabled = __gthread_active_p();
00065 #endif
00066 
00067   }
00068 
00069 #if defined __GTHREADS
00070   class _Mutex {
00071     __gthread_mutex_t _M_mut;
00072     //Prevent Copying and assignment.
00073     _Mutex (_Mutex const&);
00074     _Mutex& operator= (_Mutex const&);
00075   public:
00076     _Mutex ()
00077     {
00078       if (__threads_enabled)
00079     {
00080 #if !defined __GTHREAD_MUTEX_INIT
00081       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mut);
00082 #else
00083       __gthread_mutex_t __mtemp = __GTHREAD_MUTEX_INIT;
00084       _M_mut = __mtemp;
00085 #endif
00086     }
00087     }
00088     ~_Mutex ()
00089     {
00090       //Gthreads does not define a Mutex Destruction Function.
00091     }
00092     __gthread_mutex_t *_M_get() { return &_M_mut; }
00093   };
00094 
00095   class _Lock {
00096     _Mutex* _M_pmt;
00097     bool _M_locked;
00098     //Prevent Copying and assignment.
00099     _Lock (_Lock const&);
00100     _Lock& operator= (_Lock const&);
00101   public:
00102     _Lock(_Mutex* __mptr)
00103       : _M_pmt(__mptr), _M_locked(false)
00104     { this->_M_lock(); }
00105     void _M_lock()
00106     {
00107       if (__threads_enabled)
00108     {
00109       _M_locked = true;
00110       __gthread_mutex_lock(_M_pmt->_M_get());
00111     }
00112     }
00113     void _M_unlock()
00114     {
00115       if (__threads_enabled)
00116     {
00117       if (__builtin_expect(_M_locked, true))
00118         {
00119           __gthread_mutex_unlock(_M_pmt->_M_get());
00120           _M_locked = false;
00121         }
00122     }
00123     }
00124     ~_Lock() { this->_M_unlock(); }
00125   };
00126 #endif
00127 
00128 
00129 
00130   namespace __aux_balloc {
00131     static const unsigned int _Bits_Per_Byte = 8;
00132     static const unsigned int _Bits_Per_Block = sizeof(unsigned int) * _Bits_Per_Byte;
00133 
00134     template <typename _Addr_Pair_t>
00135     inline size_t __balloc_num_blocks (_Addr_Pair_t __ap)
00136     {
00137       return (__ap.second - __ap.first) + 1;
00138     }
00139 
00140     template <typename _Addr_Pair_t>
00141     inline size_t __balloc_num_bit_maps (_Addr_Pair_t __ap)
00142     {
00143       return __balloc_num_blocks(__ap) / _Bits_Per_Block;
00144     }
00145 
00146     //T should be a pointer type.
00147     template <typename _Tp>
00148     class _Inclusive_between : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> {
00149       typedef _Tp pointer;
00150       pointer _M_ptr_value;
00151       typedef typename std::pair<_Tp, _Tp> _Block_pair;
00152 
00153     public:
00154       _Inclusive_between (pointer __ptr) : _M_ptr_value(__ptr) { }
00155       bool operator () (_Block_pair __bp) const throw ()
00156       {
00157     if (std::less_equal<pointer> ()(_M_ptr_value, __bp.second) && 
00158         std::greater_equal<pointer> ()(_M_ptr_value, __bp.first))
00159       return true;
00160     else
00161       return false;
00162       }
00163     };
00164   
00165     //Used to pass a Functor to functions by reference.
00166     template <typename _Functor>
00167     class _Functor_Ref : 
00168       public std::unary_function<typename _Functor::argument_type, typename _Functor::result_type> {
00169       _Functor& _M_fref;
00170     
00171     public:
00172       typedef typename _Functor::argument_type argument_type;
00173       typedef typename _Functor::result_type result_type;
00174 
00175       _Functor_Ref (_Functor& __fref) : _M_fref(__fref) { }
00176       result_type operator() (argument_type __arg) { return _M_fref (__arg); }
00177     };
00178 
00179 
00180     //T should be a pointer type, and A is the Allocator for the vector.
00181     template <typename _Tp, typename _Alloc>
00182     class _Ffit_finder 
00183       : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> {
00184       typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector;
00185       typedef typename _BPVector::difference_type _Counter_type;
00186       typedef typename std::pair<_Tp, _Tp> _Block_pair;
00187 
00188       unsigned int *_M_pbitmap;
00189       unsigned int _M_data_offset;
00190 
00191     public:
00192       _Ffit_finder () 
00193     : _M_pbitmap (0), _M_data_offset (0)
00194       { }
00195 
00196       bool operator() (_Block_pair __bp) throw()
00197       {
00198     //Set the _rover to the last unsigned integer, which is the
00199     //bitmap to the first free block. Thus, the bitmaps are in exact
00200     //reverse order of the actual memory layout. So, we count down
00201     //the bimaps, which is the same as moving up the memory.
00202 
00203     //If the used count stored at the start of the Bit Map headers
00204     //is equal to the number of Objects that the current Block can
00205     //store, then there is definitely no space for another single
00206     //object, so just return false.
00207     _Counter_type __diff = __gnu_cxx::__aux_balloc::__balloc_num_bit_maps (__bp);
00208 
00209     assert (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) <= 
00210         __gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp));
00211 
00212     if (*(reinterpret_cast<unsigned int*>(__bp.first) - (__diff + 1)) == 
00213         __gnu_cxx::__aux_balloc::__balloc_num_blocks (__bp))
00214       return false;
00215 
00216     unsigned int *__rover = reinterpret_cast<unsigned int*>(__bp.first) - 1;
00217     for (_Counter_type __i = 0; __i < __diff; ++__i)
00218       {
00219         _M_data_offset = __i;
00220         if (*__rover)
00221           {
00222         _M_pbitmap = __rover;
00223         return true;
00224           }
00225         --__rover;
00226       }
00227     return false;
00228       }
00229     
00230       unsigned int *_M_get () { return _M_pbitmap; }
00231       unsigned int _M_offset () { return _M_data_offset * _Bits_Per_Block; }
00232     };
00233   
00234     //T should be a pointer type.
00235     template <typename _Tp, typename _Alloc>
00236     class _Bit_map_counter {
00237     
00238       typedef typename std::vector<std::pair<_Tp, _Tp>, _Alloc> _BPVector;
00239       typedef typename _BPVector::size_type _Index_type;
00240       typedef _Tp pointer;
00241     
00242       _BPVector& _M_vbp;
00243       unsigned int *_M_curr_bmap;
00244       unsigned int *_M_last_bmap_in_block;
00245       _Index_type _M_curr_index;
00246     
00247     public:
00248       //Use the 2nd parameter with care. Make sure that such an entry
00249       //exists in the vector before passing that particular index to
00250       //this ctor.
00251       _Bit_map_counter (_BPVector& Rvbp, int __index = -1) 
00252     : _M_vbp(Rvbp)
00253       {
00254     this->_M_reset(__index);
00255       }
00256     
00257       void _M_reset (int __index = -1) throw()
00258       {
00259     if (__index == -1)
00260       {
00261         _M_curr_bmap = 0;
00262         _M_curr_index = (_Index_type)-1;
00263         return;
00264       }
00265 
00266     _M_curr_index = __index;
00267     _M_curr_bmap = reinterpret_cast<unsigned int*>(_M_vbp[_M_curr_index].first) - 1;
00268 
00269     assert (__index <= (int)_M_vbp.size() - 1);
00270     
00271     _M_last_bmap_in_block = _M_curr_bmap - 
00272       ((_M_vbp[_M_curr_index].second - _M_vbp[_M_curr_index].first + 1) / _Bits_Per_Block - 1);
00273       }
00274     
00275       //Dangerous Function! Use with extreme care. Pass to this
00276       //function ONLY those values that are known to be correct,
00277       //otherwise this will mess up big time.
00278       void _M_set_internal_bit_map (unsigned int *__new_internal_marker) throw()
00279       {
00280     _M_curr_bmap = __new_internal_marker;
00281       }
00282     
00283       bool _M_finished () const throw()
00284       {
00285     return (_M_curr_bmap == 0);
00286       }
00287     
00288       _Bit_map_counter& operator++ () throw()
00289       {
00290     if (_M_curr_bmap == _M_last_bmap_in_block)
00291       {
00292         if (++_M_curr_index == _M_vbp.size())
00293           {
00294         _M_curr_bmap = 0;
00295           }
00296         else
00297           {
00298         this->_M_reset (_M_curr_index);
00299           }
00300       }
00301     else
00302       {
00303         --_M_curr_bmap;
00304       }
00305     return *this;
00306       }
00307     
00308       unsigned int *_M_get ()
00309       {
00310     return _M_curr_bmap;
00311       }
00312     
00313       pointer _M_base () { return _M_vbp[_M_curr_index].first; }
00314       unsigned int _M_offset ()
00315       {
00316     return _Bits_Per_Block * ((reinterpret_cast<unsigned int*>(this->_M_base()) - _M_curr_bmap) - 1);
00317       }
00318     
00319       unsigned int _M_where () { return _M_curr_index; }
00320     };
00321   }
00322 
00323   //Generic Version of the bsf instruction.
00324   typedef unsigned int _Bit_map_type;
00325   static inline unsigned int _Bit_scan_forward (register _Bit_map_type __num)
00326   {
00327     return static_cast<unsigned int>(__builtin_ctz(__num));
00328   }
00329 
00330   struct _OOM_handler {
00331     static std::new_handler _S_old_handler;
00332     static bool _S_handled_oom;
00333     typedef void (*_FL_clear_proc)(void);
00334     static _FL_clear_proc _S_oom_fcp;
00335     
00336     _OOM_handler (_FL_clear_proc __fcp)
00337     {
00338       _S_oom_fcp = __fcp;
00339       _S_old_handler = std::set_new_handler (_S_handle_oom_proc);
00340       _S_handled_oom = false;
00341     }
00342 
00343     static void _S_handle_oom_proc()
00344     {
00345       _S_oom_fcp();
00346       std::set_new_handler (_S_old_handler);
00347       _S_handled_oom = true;
00348     }
00349 
00350     ~_OOM_handler ()
00351     {
00352       if (!_S_handled_oom)
00353     std::set_new_handler (_S_old_handler);
00354     }
00355   };
00356   
00357   std::new_handler _OOM_handler::_S_old_handler;
00358   bool _OOM_handler::_S_handled_oom = false;
00359   _OOM_handler::_FL_clear_proc _OOM_handler::_S_oom_fcp = 0;
00360   
00361 
00362   class _BA_free_list_store {
00363     struct _LT_pointer_compare {
00364       template <typename _Tp>
00365       bool operator() (_Tp* __pt, _Tp const& __crt) const throw()
00366       {
00367     return *__pt < __crt;
00368       }
00369     };
00370 
00371 #if defined __GTHREADS
00372     static _Mutex _S_bfl_mutex;
00373 #endif
00374     static std::vector<unsigned int*> _S_free_list;
00375     typedef std::vector<unsigned int*>::iterator _FLIter;
00376 
00377     static void _S_validate_free_list(unsigned int *__addr) throw()
00378     {
00379       const unsigned int __max_size = 64;
00380       if (_S_free_list.size() >= __max_size)
00381     {
00382       //Ok, the threshold value has been reached.
00383       //We determine which block to remove from the list of free
00384       //blocks.
00385       if (*__addr >= *_S_free_list.back())
00386         {
00387           //Ok, the new block is greater than or equal to the last
00388           //block in the list of free blocks. We just free the new
00389           //block.
00390           operator delete((void*)__addr);
00391           return;
00392         }
00393       else
00394         {
00395           //Deallocate the last block in the list of free lists, and
00396           //insert the new one in it's correct position.
00397           operator delete((void*)_S_free_list.back());
00398           _S_free_list.pop_back();
00399         }
00400     }
00401       
00402       //Just add the block to the list of free lists
00403       //unconditionally.
00404       _FLIter __temp = std::lower_bound(_S_free_list.begin(), _S_free_list.end(), 
00405                     *__addr, _LT_pointer_compare ());
00406       //We may insert the new free list before _temp;
00407       _S_free_list.insert(__temp, __addr);
00408     }
00409 
00410     static bool _S_should_i_give(unsigned int __block_size, unsigned int __required_size) throw()
00411     {
00412       const unsigned int __max_wastage_percentage = 36;
00413       if (__block_size >= __required_size && 
00414       (((__block_size - __required_size) * 100 / __block_size) < __max_wastage_percentage))
00415     return true;
00416       else
00417     return false;
00418     }
00419 
00420   public:
00421     typedef _BA_free_list_store _BFL_type;
00422 
00423     static inline void _S_insert_free_list(unsigned int *__addr) throw()
00424     {
00425 #if defined __GTHREADS
00426       _Lock __bfl_lock(&_S_bfl_mutex);
00427 #endif
00428       //Call _S_validate_free_list to decide what should be done with this
00429       //particular free list.
00430       _S_validate_free_list(--__addr);
00431     }
00432     
00433     static unsigned int *_S_get_free_list(unsigned int __sz) throw (std::bad_alloc)
00434     {
00435 #if defined __GTHREADS
00436       _Lock __bfl_lock(&_S_bfl_mutex);
00437 #endif
00438       _FLIter __temp = std::lower_bound(_S_free_list.begin(), _S_free_list.end(), 
00439                     __sz, _LT_pointer_compare());
00440       if (__temp == _S_free_list.end() || !_S_should_i_give (**__temp, __sz))
00441     {
00442       //We hold the lock because the OOM_Handler is a stateless
00443       //entity.
00444       _OOM_handler __set_handler(_BFL_type::_S_clear);
00445       unsigned int *__ret_val = reinterpret_cast<unsigned int*>
00446         (operator new (__sz + sizeof(unsigned int)));
00447       *__ret_val = __sz;
00448       return ++__ret_val;
00449     }
00450       else
00451     {
00452       unsigned int* __ret_val = *__temp;
00453       _S_free_list.erase (__temp);
00454       return ++__ret_val;
00455     }
00456     }
00457 
00458     //This function just clears the internal Free List, and gives back
00459     //all the memory to the OS.
00460     static void _S_clear()
00461     {
00462 #if defined __GTHREADS
00463       _Lock __bfl_lock(&_S_bfl_mutex);
00464 #endif
00465       _FLIter __iter = _S_free_list.begin();
00466       while (__iter != _S_free_list.end())
00467     {
00468       operator delete((void*)*__iter);
00469       ++__iter;
00470     }
00471       _S_free_list.clear();
00472     }
00473 
00474   };
00475 
00476 #if defined __GTHREADS
00477   _Mutex _BA_free_list_store::_S_bfl_mutex;
00478 #endif
00479   std::vector<unsigned int*> _BA_free_list_store::_S_free_list;
00480 
00481   template <typename _Tp> class bitmap_allocator;
00482   // specialize for void:
00483   template <> class bitmap_allocator<void> {
00484   public:
00485     typedef void*       pointer;
00486     typedef const void* const_pointer;
00487     //  reference-to-void members are impossible.
00488     typedef void  value_type;
00489     template <typename _Tp1> struct rebind { typedef bitmap_allocator<_Tp1> other; };
00490   };
00491 
00492   template <typename _Tp> class bitmap_allocator : private _BA_free_list_store {
00493   public:
00494     typedef size_t    size_type;
00495     typedef ptrdiff_t difference_type;
00496     typedef _Tp*        pointer;
00497     typedef const _Tp*  const_pointer;
00498     typedef _Tp&        reference;
00499     typedef const _Tp&  const_reference;
00500     typedef _Tp         value_type;
00501     template <typename _Tp1> struct rebind { typedef bitmap_allocator<_Tp1> other; };
00502 
00503   private:
00504     static const unsigned int _Bits_Per_Byte = 8;
00505     static const unsigned int _Bits_Per_Block = sizeof(unsigned int) * _Bits_Per_Byte;
00506 
00507     static inline void _S_bit_allocate(unsigned int *__pbmap, unsigned int __pos) throw()
00508     {
00509       unsigned int __mask = 1 << __pos;
00510       __mask = ~__mask;
00511       *__pbmap &= __mask;
00512     }
00513   
00514     static inline void _S_bit_free(unsigned int *__pbmap, unsigned int __pos) throw()
00515     {
00516       unsigned int __mask = 1 << __pos;
00517       *__pbmap |= __mask;
00518     }
00519 
00520     static inline void *_S_memory_get(size_t __sz) throw (std::bad_alloc)
00521     {
00522       return operator new(__sz);
00523     }
00524 
00525     static inline void _S_memory_put(void *__vptr) throw ()
00526     {
00527       operator delete(__vptr);
00528     }
00529 
00530     typedef typename std::pair<pointer, pointer> _Block_pair;
00531     typedef typename __gnu_cxx::new_allocator<_Block_pair> _BPVec_allocator_type;
00532     typedef typename std::vector<_Block_pair, _BPVec_allocator_type> _BPVector;
00533 
00534 
00535 #if defined CHECK_FOR_ERRORS
00536     //Complexity: O(lg(N)). Where, N is the number of block of size
00537     //sizeof(value_type).
00538     static void _S_check_for_free_blocks() throw()
00539     {
00540       typedef typename __gnu_cxx::__aux_balloc::_Ffit_finder<pointer, _BPVec_allocator_type> _FFF;
00541       _FFF __fff;
00542       typedef typename _BPVector::iterator _BPiter;
00543       _BPiter __bpi = std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(), 
00544                    __gnu_cxx::__aux_balloc::_Functor_Ref<_FFF>(__fff));
00545       assert(__bpi == _S_mem_blocks.end());
00546     }
00547 #endif
00548 
00549 
00550     //Complexity: O(1), but internally depends upon the complexity of
00551     //the function _BA_free_list_store::_S_get_free_list. The part
00552     //where the bitmap headers are written is of worst case complexity:
00553     //O(X),where X is the number of blocks of size sizeof(value_type)
00554     //within the newly acquired block. Having a tight bound.
00555     static void _S_refill_pool() throw (std::bad_alloc)
00556     {
00557 #if defined CHECK_FOR_ERRORS
00558       _S_check_for_free_blocks();
00559 #endif
00560 
00561       const unsigned int __num_bit_maps = _S_block_size / _Bits_Per_Block;
00562       const unsigned int __size_to_allocate = sizeof(unsigned int) + 
00563     _S_block_size * sizeof(value_type) + __num_bit_maps*sizeof(unsigned int);
00564 
00565       unsigned int *__temp = 
00566     reinterpret_cast<unsigned int*>(_BA_free_list_store::_S_get_free_list(__size_to_allocate));
00567       *__temp = 0;
00568       ++__temp;
00569 
00570       //The Header information goes at the Beginning of the Block.
00571       _Block_pair __bp = std::make_pair(reinterpret_cast<pointer>(__temp + __num_bit_maps), 
00572                        reinterpret_cast<pointer>(__temp + __num_bit_maps) 
00573                     + _S_block_size - 1);
00574 
00575       //Fill the Vector with this information.
00576       _S_mem_blocks.push_back(__bp);
00577 
00578       unsigned int __bit_mask = 0; //0 Indicates all Allocated.
00579       __bit_mask = ~__bit_mask; //1 Indicates all Free.
00580 
00581       for (unsigned int __i = 0; __i < __num_bit_maps; ++__i)
00582     __temp[__i] = __bit_mask;
00583 
00584       //On some implementations, operator new might throw bad_alloc, or
00585       //malloc might fail if the size passed is too large, therefore, we
00586       //limit the size passed to malloc or operator new.
00587       _S_block_size *= 2;
00588     }
00589 
00590     static _BPVector _S_mem_blocks;
00591     static unsigned int _S_block_size;
00592     static __gnu_cxx::__aux_balloc::_Bit_map_counter<pointer, _BPVec_allocator_type> _S_last_request;
00593     static typename _BPVector::size_type _S_last_dealloc_index;
00594 #if defined __GTHREADS
00595     static _Mutex _S_mut;
00596 #endif
00597 
00598     //Complexity: Worst case complexity is O(N), but that is hardly ever
00599     //hit. if and when this particular case is encountered, the next few
00600     //cases are guaranteed to have a worst case complexity of O(1)!
00601     //That's why this function performs very well on the average. you
00602     //can consider this function to be having a complexity refrred to
00603     //commonly as: Amortized Constant time.
00604     static pointer _S_allocate_single_object()
00605     {
00606 #if defined __GTHREADS
00607       _Lock __bit_lock(&_S_mut);
00608 #endif
00609 
00610       //The algorithm is something like this: The last_requst variable
00611       //points to the last accessed Bit Map. When such a condition
00612       //occurs, we try to find a free block in the current bitmap, or
00613       //succeeding bitmaps until the last bitmap is reached. If no free
00614       //block turns up, we resort to First Fit method.
00615 
00616       //WARNING: Do not re-order the condition in the while statement
00617       //below, because it relies on C++'s short-circuit
00618       //evaluation. The return from _S_last_request->_M_get() will NOT
00619       //be dereferenceable if _S_last_request->_M_finished() returns
00620       //true. This would inevitibly lead to a NULL pointer dereference
00621       //if tinkered with.
00622       while (_S_last_request._M_finished() == false && (*(_S_last_request._M_get()) == 0))
00623     {
00624       _S_last_request.operator++();
00625     }
00626 
00627       if (__builtin_expect(_S_last_request._M_finished() == true, false))
00628     {
00629       //Fall Back to First Fit algorithm.
00630       typedef typename __gnu_cxx::__aux_balloc::_Ffit_finder<pointer, _BPVec_allocator_type> _FFF;
00631       _FFF __fff;
00632       typedef typename _BPVector::iterator _BPiter;
00633       _BPiter __bpi = std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(), 
00634                       __gnu_cxx::__aux_balloc::_Functor_Ref<_FFF>(__fff));
00635 
00636       if (__bpi != _S_mem_blocks.end())
00637         {
00638           //Search was successful. Ok, now mark the first bit from
00639           //the right as 0, meaning Allocated. This bit is obtained
00640           //by calling _M_get() on __fff.
00641           unsigned int __nz_bit = _Bit_scan_forward(*__fff._M_get());
00642           _S_bit_allocate(__fff._M_get(), __nz_bit);
00643 
00644           _S_last_request._M_reset(__bpi - _S_mem_blocks.begin());
00645 
00646           //Now, get the address of the bit we marked as allocated.
00647           pointer __ret_val = __bpi->first + __fff._M_offset() + __nz_bit;
00648           unsigned int *__puse_count = reinterpret_cast<unsigned int*>(__bpi->first) - 
00649         (__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(*__bpi) + 1);
00650           ++(*__puse_count);
00651           return __ret_val;
00652         }
00653       else
00654         {
00655           //Search was unsuccessful. We Add more memory to the pool
00656           //by calling _S_refill_pool().
00657           _S_refill_pool();
00658 
00659           //_M_Reset the _S_last_request structure to the first free
00660           //block's bit map.
00661           _S_last_request._M_reset(_S_mem_blocks.size() - 1);
00662 
00663           //Now, mark that bit as allocated.
00664         }
00665     }
00666       //_S_last_request holds a pointer to a valid bit map, that points
00667       //to a free block in memory.
00668       unsigned int __nz_bit = _Bit_scan_forward(*_S_last_request._M_get());
00669       _S_bit_allocate(_S_last_request._M_get(), __nz_bit);
00670 
00671       pointer __ret_val = _S_last_request._M_base() + _S_last_request._M_offset() + __nz_bit;
00672 
00673       unsigned int *__puse_count = reinterpret_cast<unsigned int*>
00674     (_S_mem_blocks[_S_last_request._M_where()].first) - 
00675     (__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(_S_mem_blocks[_S_last_request._M_where()]) + 1);
00676       ++(*__puse_count);
00677       return __ret_val;
00678     }
00679 
00680     //Complexity: O(lg(N)), but the worst case is hit quite often! I
00681     //need to do something about this. I'll be able to work on it, only
00682     //when I have some solid figures from a few real apps.
00683     static void _S_deallocate_single_object(pointer __p) throw()
00684     {
00685 #if defined __GTHREADS
00686       _Lock __bit_lock(&_S_mut);
00687 #endif
00688 
00689       typedef typename _BPVector::iterator _Iterator;
00690       typedef typename _BPVector::difference_type _Difference_type;
00691 
00692       _Difference_type __diff;
00693       int __displacement;
00694 
00695       assert(_S_last_dealloc_index >= 0);
00696 
00697       if (__gnu_cxx::__aux_balloc::_Inclusive_between<pointer>(__p)(_S_mem_blocks[_S_last_dealloc_index]))
00698     {
00699       assert(_S_last_dealloc_index <= _S_mem_blocks.size() - 1);
00700 
00701       //Initial Assumption was correct!
00702       __diff = _S_last_dealloc_index;
00703       __displacement = __p - _S_mem_blocks[__diff].first;
00704     }
00705       else
00706     {
00707       _Iterator _iter = (std::find_if(_S_mem_blocks.begin(), _S_mem_blocks.end(), 
00708                       __gnu_cxx::__aux_balloc::_Inclusive_between<pointer>(__p)));
00709       assert(_iter != _S_mem_blocks.end());
00710 
00711       __diff = _iter - _S_mem_blocks.begin();
00712       __displacement = __p - _S_mem_blocks[__diff].first;
00713       _S_last_dealloc_index = __diff;
00714     }
00715 
00716       //Get the position of the iterator that has been found.
00717       const unsigned int __rotate = __displacement % _Bits_Per_Block;
00718       unsigned int *__bit_mapC = reinterpret_cast<unsigned int*>(_S_mem_blocks[__diff].first) - 1;
00719       __bit_mapC -= (__displacement / _Bits_Per_Block);
00720       
00721       _S_bit_free(__bit_mapC, __rotate);
00722       unsigned int *__puse_count = reinterpret_cast<unsigned int*>
00723     (_S_mem_blocks[__diff].first) - 
00724     (__gnu_cxx::__aux_balloc::__balloc_num_bit_maps(_S_mem_blocks[__diff]) + 1);
00725 
00726       assert(*__puse_count != 0);
00727 
00728       --(*__puse_count);
00729 
00730       if (__builtin_expect(*__puse_count == 0, false))
00731     {
00732       _S_block_size /= 2;
00733       
00734       //We may safely remove this block.
00735       _Block_pair __bp = _S_mem_blocks[__diff];
00736       _S_insert_free_list(__puse_count);
00737       _S_mem_blocks.erase(_S_mem_blocks.begin() + __diff);
00738 
00739       //We reset the _S_last_request variable to reflect the erased
00740       //block. We do this to protect future requests after the last
00741       //block has been removed from a particular memory Chunk,
00742       //which in turn has been returned to the free list, and
00743       //hence had been erased from the vector, so the size of the
00744       //vector gets reduced by 1.
00745       if ((_Difference_type)_S_last_request._M_where() >= __diff--)
00746         {
00747           _S_last_request._M_reset(__diff);
00748           //          assert(__diff >= 0);
00749         }
00750 
00751       //If the Index into the vector of the region of memory that
00752       //might hold the next address that will be passed to
00753       //deallocated may have been invalidated due to the above
00754       //erase procedure being called on the vector, hence we try
00755       //to restore this invariant too.
00756       if (_S_last_dealloc_index >= _S_mem_blocks.size())
00757         {
00758           _S_last_dealloc_index =(__diff != -1 ? __diff : 0);
00759           assert(_S_last_dealloc_index >= 0);
00760         }
00761     }
00762     }
00763 
00764   public:
00765     bitmap_allocator() throw()
00766     { }
00767 
00768     bitmap_allocator(const bitmap_allocator&) { }
00769 
00770     template <typename _Tp1> bitmap_allocator(const bitmap_allocator<_Tp1>&) throw()
00771     { }
00772 
00773     ~bitmap_allocator() throw()
00774     { }
00775 
00776     //Complexity: O(1), but internally the complexity depends upon the
00777     //complexity of the function(s) _S_allocate_single_object and
00778     //_S_memory_get.
00779     pointer allocate(size_type __n)
00780     {
00781       if (__builtin_expect(__n == 1, true))
00782     return _S_allocate_single_object();
00783       else
00784     return reinterpret_cast<pointer>(_S_memory_get(__n * sizeof(value_type)));
00785     }
00786 
00787     //Complexity: Worst case complexity is O(N) where N is the number of
00788     //blocks of size sizeof(value_type) within the free lists that the
00789     //allocator holds. However, this worst case is hit only when the
00790     //user supplies a bogus argument to hint. If the hint argument is
00791     //sensible, then the complexity drops to O(lg(N)), and in extreme
00792     //cases, even drops to as low as O(1). So, if the user supplied
00793     //argument is good, then this function performs very well.
00794     pointer allocate(size_type __n, typename bitmap_allocator<void>::const_pointer)
00795     {
00796       return allocate(__n);
00797     }
00798 
00799     void deallocate(pointer __p, size_type __n) throw()
00800     {
00801       if (__builtin_expect(__n == 1, true))
00802     _S_deallocate_single_object(__p);
00803       else
00804     _S_memory_put(__p);
00805     }
00806 
00807     pointer address(reference r) const { return &r; }
00808     const_pointer address(const_reference r) const { return &r; }
00809 
00810     size_type max_size(void) const throw() { return (size_type()-1)/sizeof(value_type); }
00811 
00812     void construct (pointer p, const_reference __data)
00813     {
00814       ::new(p) value_type(__data);
00815     }
00816 
00817     void destroy (pointer p)
00818     {
00819       p->~value_type();
00820     }
00821 
00822   };
00823 
00824   template <typename _Tp>
00825   typename bitmap_allocator<_Tp>::_BPVector bitmap_allocator<_Tp>::_S_mem_blocks;
00826 
00827   template <typename _Tp>
00828   unsigned int bitmap_allocator<_Tp>::_S_block_size = bitmap_allocator<_Tp>::_Bits_Per_Block;
00829 
00830   template <typename _Tp>
00831   typename __gnu_cxx::bitmap_allocator<_Tp>::_BPVector::size_type 
00832   bitmap_allocator<_Tp>::_S_last_dealloc_index = 0;
00833 
00834   template <typename _Tp>
00835   __gnu_cxx::__aux_balloc::_Bit_map_counter 
00836   <typename bitmap_allocator<_Tp>::pointer, typename bitmap_allocator<_Tp>::_BPVec_allocator_type> 
00837   bitmap_allocator<_Tp>::_S_last_request(_S_mem_blocks);
00838 
00839 #if defined __GTHREADS
00840   template <typename _Tp>
00841   __gnu_cxx::_Mutex
00842   bitmap_allocator<_Tp>::_S_mut;
00843 #endif
00844 
00845   template <typename _Tp1, typename _Tp2>
00846   bool operator== (const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw()
00847   {
00848     return true;
00849   }
00850   
00851   template <typename _Tp1, typename _Tp2>
00852   bool operator!= (const bitmap_allocator<_Tp1>&, const bitmap_allocator<_Tp2>&) throw()
00853   {
00854     return false;
00855   }
00856 }
00857 
00858 
00859 #endif //_BITMAP_ALLOCATOR_H

Generated on Tue Jan 30 17:31:47 2007 for GNU C++ STL by doxygen 1.3.6