base/malloc/malloc.c

Go to the documentation of this file.
00001 /*!\file malloc.c
00002  * \brief Dynamic memory allocation services.
00003  *
00004  * Copyright (C) 2007 Paolo Mantegazza <mantegazza@aero.polimi.it>.
00005  * Specific following parts as copyrighted/licensed by their authors. 
00006  *
00007  * RTAI is free software; you can redistribute it and/or modify it
00008  * under the terms of the GNU General Public License as published by
00009  * the Free Software Foundation; either version 2 of the License, or
00010  * (at your option) any later version.
00011  *
00012  * RTAI is distributed in the hope that it will be useful, but WITHOUT
00013  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
00014  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
00015  * License for more details.
00016  *
00017  * You should have received a copy of the GNU General Public License
00018  * along with RTAI; if not, write to the Free Software Foundation,
00019  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00020  *
00021  * \ingroup shm
00022  */
00023 
00024 /*!
00025  * @addtogroup shm
00026  *@{*/
00027 
00028 #include <linux/module.h>
00029 #include <linux/kernel.h>
00030 #include <linux/version.h>
00031 #include <linux/slab.h>
00032 
00033 #include <rtai_config.h>
00034 #include <asm/rtai.h>
00035 #include <rtai_malloc.h>
00036 #include <rtai_shm.h>
00037 
00038 int rtai_global_heap_size = RTHEAP_GLOBALSZ;
00039 RTAI_MODULE_PARM(rtai_global_heap_size, int);
00040 
00041 void *rtai_global_heap_adr = NULL;
00042 
00043 rtheap_t rtai_global_heap;  /* Global system heap */
00044 
00045 static void *alloc_extent(u_long size, int suprt)
00046 {
00047     caddr_t p;
00048     if (!suprt) {
00049         if ((p = (caddr_t)vmalloc(size))) {
00050             unsigned long _p = (unsigned long)p;
00051 //          printk("RTAI[malloc]: vmalloced extent %p, size %lu.\n", p, size);
00052             for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00053 //              mem_map_reserve(virt_to_page(UVIRT_TO_KVA(_p)));
00054                 SetPageReserved(vmalloc_to_page((void *)_p));
00055             }
00056         }
00057     } else {
00058         if (size <= KMALLOC_LIMIT) {
00059             p = kmalloc(size, suprt);
00060         } else {
00061             p = (void*)__get_free_pages(suprt, get_order(size));
00062         }
00063 //      p = (caddr_t)kmalloc(size, suprt);
00064 //      printk("RTAI[malloc]: kmalloced extent %p, size %lu.\n", p, size);
00065     }
00066     if (p) {
00067         memset(p, 0, size);
00068     }
00069     return p;
00070 }
00071 
00072 static void free_extent(void *p, u_long size, int suprt)
00073 {
00074     if (!suprt) {
00075         unsigned long _p = (unsigned long)p;
00076 
00077 //      printk("RTAI[malloc]: vfreed extent %p, size %lu.\n", p, size);
00078         for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00079 //          mem_map_unreserve(virt_to_page(UVIRT_TO_KVA(_p)));
00080             ClearPageReserved(vmalloc_to_page((void *)_p));
00081         }
00082         vfree(p);
00083     } else {
00084 //      printk("RTAI[malloc]: kfreed extent %p, size %lu.\n", p, size);
00085 //      kfree(p);
00086         if (size <= KMALLOC_LIMIT) {
00087             kfree(p);
00088         } else {
00089             free_pages((unsigned long)p, get_order(size));
00090         }
00091     }
00092 }
00093 
00094 #ifndef CONFIG_RTAI_USE_TLSF
00095 #define CONFIG_RTAI_USE_TLSF 0
00096 #endif
00097 
00098 #if CONFIG_RTAI_USE_TLSF
00099 
00100 static size_t init_memory_pool(size_t mem_pool_size, void *mem_pool);
00101 static inline void *malloc_ex(size_t size, void *mem_pool);
00102 static inline void free_ex(void *ptr, void *mem_pool);
00103 
00104 static void init_extent(rtheap_t *heap, rtextent_t *extent)
00105 {
00106     INIT_LIST_HEAD(&extent->link);
00107     extent->membase = (caddr_t)extent + sizeof(rtextent_t);
00108     extent->memlim  = (caddr_t)extent + heap->extentsize;
00109 }
00110 
00111 int rtheap_init(rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
00112 {
00113     rtextent_t *extent;
00114 
00115     INIT_LIST_HEAD(&heap->extents);
00116     spin_lock_init(&heap->lock);
00117 
00118     heap->extentsize = heapsize;
00119     if (!heapaddr && suprt) {
00120         if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
00121             heap->extentsize = KMALLOC_LIMIT;
00122             heapaddr = NULL;
00123         }
00124     }
00125 
00126     if (heapaddr) {
00127         extent = (rtextent_t *)heapaddr;
00128         init_extent(heap, extent);
00129         list_add_tail(&extent->link, &heap->extents);
00130         return init_memory_pool(heapsize - sizeof(rtextent_t), heapaddr + sizeof(rtextent_t)) < 0 ? RTHEAP_NOMEM : 0;
00131     } else {
00132         u_long init_size = 0;
00133         while (init_size < heapsize) {
00134             if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt)) || init_memory_pool(heap->extentsize - sizeof(rtextent_t), (void *)extent + sizeof(rtextent_t)) < 0) {
00135                 struct list_head *holder, *nholder;
00136                 list_for_each_safe(holder, nholder, &heap->extents) {
00137                     extent = list_entry(holder, rtextent_t, link);
00138                     free_extent(extent, heap->extentsize, suprt);
00139                 }
00140                 return RTHEAP_NOMEM;
00141             }
00142             init_extent(heap, extent);
00143             list_add_tail(&extent->link, &heap->extents);
00144             init_size += heap->extentsize;
00145         }
00146     }
00147     return 0;
00148 }
00149 
00150 void rtheap_destroy(rtheap_t *heap, int suprt)
00151 {
00152     struct list_head *holder, *nholder;
00153     list_for_each_safe(holder, nholder, &heap->extents) {
00154         free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
00155     }
00156 }
00157 
00158 void *rtheap_alloc(rtheap_t *heap, u_long size, int mode)
00159 {
00160     void *adr = NULL;
00161     struct list_head *holder;
00162     unsigned long flags;
00163 
00164     if (!size) {
00165         return NULL;
00166     }
00167 
00168     flags = rt_spin_lock_irqsave(&heap->lock);
00169     list_for_each(holder, &heap->extents) {
00170         if ((adr = malloc_ex(size, list_entry(holder, rtextent_t, link)->membase)) != NULL) {
00171             break;
00172         }
00173     }
00174     rt_spin_unlock_irqrestore(flags, &heap->lock);
00175     return adr;
00176 }
00177 
00178 int rtheap_free(rtheap_t *heap, void *block)
00179 {
00180     unsigned long flags;
00181     struct list_head *holder;
00182 
00183     flags = rt_spin_lock_irqsave(&heap->lock);
00184     list_for_each(holder, &heap->extents) {
00185         rtextent_t *extent;
00186         extent = list_entry(holder, rtextent_t, link);
00187         if ((caddr_t)block < extent->memlim && (caddr_t)block >= extent->membase) {
00188             free_ex(block, extent->membase);
00189             break;
00190         }
00191     }
00192     rt_spin_unlock_irqrestore(flags, &heap->lock);
00193     return 0;
00194 }
00195 
00196 /******************************** BEGIN TLSF ********************************/
00197 
00198 /* 
00199  * Two Levels Segregate Fit memory allocator (TLSF)
00200  * Version 2.4.2
00201  *
00202  * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
00203  *
00204  * Thanks to Ismael Ripoll for his suggestions and reviews
00205  *
00206  * Copyright (C) 2008, 2007, 2006, 2005, 2004
00207  *
00208  * This code is released using a dual license strategy: GPL/LGPL
00209  * You can choose the licence that better fits your requirements.
00210  *
00211  * Released under the terms of the GNU General Public License Version 2.0
00212  * Released under the terms of the GNU Lesser General Public License Version 2.1
00213  *
00214  * Adaption to RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>,
00215  * with TLSF downloaded from: http://rtportal.upv.es/rtmalloc/.
00216  *
00217  */
00218 
00219 /*
00220  * Code contributions:
00221  *
00222  * (Jul 28 2007)  Herman ten Brugge <hermantenbrugge@home.nl>:
00223  *
00224  * - Add 64 bit support. It now runs on x86_64 and solaris64.
00225  * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
00226  * - Remove assembly code. I could not measure any performance difference 
00227  *   on my core2 processor. This also makes the code more portable.
00228  * - Moved defines/typedefs from tlsf.h to tlsf.c
00229  * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to 
00230  *   (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact 
00231  *    that the minumum size is still sizeof 
00232  *   (bhdr_t).
00233  * - Changed all C++ comment style to C style. (// -> /.* ... *./)
00234  * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to 
00235  *   avoid confusion with the standard ffs function which returns 
00236  *   different values.
00237  * - Created set_bit/clear_bit fuctions because they are not present 
00238  *   on x86_64.
00239  * - Added locking support + extra file target.h to show how to use it.
00240  * - Added get_used_size function (REMOVED in 2.4)
00241  * - Added rtl_realloc and rtl_calloc function
00242  * - Implemented realloc clever support.
00243  * - Added some test code in the example directory.
00244  *        
00245  *
00246  * (Oct 23 2006) Adam Scislowicz: 
00247  *
00248  * - Support for ARMv5 implemented
00249  *
00250  */
00251 
00252 /*#define USE_SBRK        (0) */
00253 /*#define USE_MMAP        (0) */
00254 
00255 #ifndef TLSF_USE_LOCKS
00256 #define TLSF_USE_LOCKS  (0)
00257 #endif
00258 
00259 #ifndef TLSF_STATISTIC
00260 #define TLSF_STATISTIC  (1)
00261 #endif
00262 
00263 #ifndef USE_MMAP
00264 #define USE_MMAP    (0)
00265 #endif
00266 
00267 #ifndef USE_SBRK
00268 #define USE_SBRK    (0)
00269 #endif
00270 
00271 
00272 #if TLSF_USE_LOCKS
00273 #include "target.h"
00274 #else
00275 #define TLSF_CREATE_LOCK(_unused_)   do{}while(0)
00276 #define TLSF_DESTROY_LOCK(_unused_)  do{}while(0) 
00277 #define TLSF_ACQUIRE_LOCK(_unused_)  do{}while(0)
00278 #define TLSF_RELEASE_LOCK(_unused_)  do{}while(0)
00279 #endif
00280 
00281 #if TLSF_STATISTIC
00282 #define TLSF_ADD_SIZE(tlsf, b) do {                                 \
00283         tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;  \
00284         if (tlsf->used_size > tlsf->max_size) {                     \
00285             tlsf->max_size = tlsf->used_size;                       \
00286         }                                       \
00287     } while(0)
00288 
00289 #define TLSF_REMOVE_SIZE(tlsf, b) do {                              \
00290         tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;  \
00291     } while(0)
00292 #else
00293 #define TLSF_ADD_SIZE(tlsf, b)       do{}while(0)
00294 #define TLSF_REMOVE_SIZE(tlsf, b)    do{}while(0)
00295 #endif
00296 
00297 #if USE_MMAP || USE_SBRK
00298 #include <unistd.h>
00299 #endif
00300 
00301 #if USE_MMAP
00302 #include <sys/mman.h>
00303 #endif
00304 
00305 #if !defined(__GNUC__)
00306 #ifndef __inline__
00307 #define __inline__
00308 #endif
00309 #endif
00310 
00311 /* The  debug functions  only can  be used  when _DEBUG_TLSF_  is set. */
00312 #ifndef _DEBUG_TLSF_
00313 #define _DEBUG_TLSF_  (0)
00314 #endif
00315 
00316 /*************************************************************************/
00317 /* Definition of the structures used by TLSF */
00318 
00319 
00320 /* Some IMPORTANT TLSF parameters */
00321 /* Unlike the preview TLSF versions, now they are statics */
00322 #define BLOCK_ALIGN (sizeof(void *) * 2)
00323 
00324 #define MAX_FLI     (30)
00325 #define MAX_LOG2_SLI    (5)
00326 #define MAX_SLI     (1 << MAX_LOG2_SLI)     /* MAX_SLI = 2^MAX_LOG2_SLI */
00327 
00328 #define FLI_OFFSET  (6)     /* tlsf structure just will manage blocks bigger */
00329 /* than 128 bytes */
00330 #define SMALL_BLOCK (128)
00331 #define REAL_FLI    (MAX_FLI - FLI_OFFSET)
00332 #define MIN_BLOCK_SIZE  (sizeof (free_ptr_t))
00333 #define BHDR_OVERHEAD   (sizeof (bhdr_t) - MIN_BLOCK_SIZE)
00334 #define TLSF_SIGNATURE  (0x2A59FA59)
00335 
00336 #define PTR_MASK    (sizeof(void *) - 1)
00337 #undef BLOCK_SIZE
00338 #define BLOCK_SIZE  (0xFFFFFFFF - PTR_MASK)
00339 
00340 #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
00341 #define MEM_ALIGN         ((BLOCK_ALIGN) - 1)
00342 #define ROUNDUP_SIZE(_r)          (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
00343 #define ROUNDDOWN_SIZE(_r)        ((_r) & ~MEM_ALIGN)
00344 #define ROUNDUP(_x, _v)           ((((~(_x)) + 1) & ((_v)-1)) + (_x))
00345 
00346 #define BLOCK_STATE (0x1)
00347 #define PREV_STATE  (0x2)
00348 
00349 /* bit 0 of the block size */
00350 #define FREE_BLOCK  (0x1)
00351 #define USED_BLOCK  (0x0)
00352 
00353 /* bit 1 of the block size */
00354 #define PREV_FREE   (0x2)
00355 #define PREV_USED   (0x0)
00356 
00357 
00358 #define DEFAULT_AREA_SIZE (1024*10)
00359 
00360 #if USE_MMAP
00361 #define PAGE_SIZE (getpagesize())
00362 #endif
00363 
00364 #define PRINT_MSG(fmt, args...) rt_printk(fmt, ## args)
00365 #define ERROR_MSG(fmt, args...) rt_printk(fmt, ## args)
00366 
00367 typedef unsigned int u32_t;     /* NOTE: Make sure that this type is 4 bytes long on your computer */
00368 typedef unsigned char u8_t;     /* NOTE: Make sure that this type is 1 byte on your computer */
00369 
00370 typedef struct free_ptr_struct {
00371     struct bhdr_struct *prev;
00372     struct bhdr_struct *next;
00373 } free_ptr_t;
00374 
00375 typedef struct bhdr_struct {
00376     /* This pointer is just valid if the first bit of size is set */
00377     struct bhdr_struct *prev_hdr;
00378     /* The size is stored in bytes */
00379     size_t size;                /* bit 0 indicates whether the block is used and */
00380     /* bit 1 allows to know whether the previous block is free */
00381     union {
00382         struct free_ptr_struct free_ptr;
00383         u8_t buffer[1];         /*sizeof(struct free_ptr_struct)]; */
00384     } ptr;
00385 } bhdr_t;
00386 
00387 /* This structure is embedded at the beginning of each area, giving us
00388  * enough information to cope with a set of areas */
00389 
00390 typedef struct area_info_struct {
00391     bhdr_t *end;
00392     struct area_info_struct *next;
00393 } area_info_t;
00394 
00395 typedef struct TLSF_struct {
00396     /* the TLSF's structure signature */
00397     u32_t tlsf_signature;
00398 
00399 #if TLSF_USE_LOCKS
00400     TLSF_MLOCK_T lock;
00401 #endif
00402 
00403 #if TLSF_STATISTIC
00404     /* These can not be calculated outside tlsf because we
00405      * do not know the sizes when freeing/reallocing memory. */
00406     size_t used_size;
00407     size_t max_size;
00408 #endif
00409 
00410     /* A linked list holding all the existing areas */
00411     area_info_t *area_head;
00412 
00413     /* the first-level bitmap */
00414     /* This array should have a size of REAL_FLI bits */
00415     u32_t fl_bitmap;
00416 
00417     /* the second-level bitmap */
00418     u32_t sl_bitmap[REAL_FLI];
00419 
00420     bhdr_t *matrix[REAL_FLI][MAX_SLI];
00421 } tlsf_t;
00422 
00423 
00424 /******************************************************************/
00425 /**************     Helping functions    **************************/
00426 /******************************************************************/
00427 static __inline__ void tlsf_set_bit(int nr, u32_t * addr);
00428 static __inline__ void tlsf_clear_bit(int nr, u32_t * addr);
00429 static __inline__ int ls_bit(int x);
00430 static __inline__ int ms_bit(int x);
00431 static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
00432 static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
00433 static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
00434 static __inline__ bhdr_t *process_area(void *area, size_t size);
00435 #if USE_SBRK || USE_MMAP
00436 static __inline__ void *get_new_area(size_t * size);
00437 #endif
00438 
00439 static const int table[] = {
00440     -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
00441     4, 4,
00442     4, 4, 4, 4, 4, 4, 4,
00443     5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
00444     5,
00445     5, 5, 5, 5, 5, 5, 5,
00446     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
00447     6,
00448     6, 6, 6, 6, 6, 6, 6,
00449     6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
00450     6,
00451     6, 6, 6, 6, 6, 6, 6,
00452     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00453     7,
00454     7, 7, 7, 7, 7, 7, 7,
00455     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00456     7,
00457     7, 7, 7, 7, 7, 7, 7,
00458     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00459     7,
00460     7, 7, 7, 7, 7, 7, 7,
00461     7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00462     7,
00463     7, 7, 7, 7, 7, 7, 7
00464 };
00465 
00466 static __inline__ int ls_bit(int i)
00467 {
00468     unsigned int a;
00469     unsigned int x = i & -i;
00470 
00471     a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
00472     return table[x >> a] + a;
00473 }
00474 
00475 static __inline__ int ms_bit(int i)
00476 {
00477     unsigned int a;
00478     unsigned int x = (unsigned int) i;
00479 
00480     a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
00481     return table[x >> a] + a;
00482 }
00483 
00484 static __inline__ void tlsf_set_bit(int nr, u32_t * addr)
00485 {
00486     addr[nr >> 5] |= 1 << (nr & 0x1f);
00487 }
00488 
00489 static __inline__ void tlsf_clear_bit(int nr, u32_t * addr)
00490 {
00491     addr[nr >> 5] &= ~(1 << (nr & 0x1f));
00492 }
00493 
00494 static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
00495 {
00496     int _t;
00497 
00498     if (*_r < SMALL_BLOCK) {
00499         *_fl = 0;
00500         *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
00501     } else {
00502         _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
00503         *_r = *_r + _t;
00504         *_fl = ms_bit(*_r);
00505         *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
00506         *_fl -= FLI_OFFSET;
00507         /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
00508          *_fl = *_sl = 0;
00509          */
00510         *_r &= ~_t;
00511     }
00512 }
00513 
00514 static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
00515 {
00516     if (_r < SMALL_BLOCK) {
00517         *_fl = 0;
00518         *_sl = _r / (SMALL_BLOCK / MAX_SLI);
00519     } else {
00520         *_fl = ms_bit(_r);
00521         *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
00522         *_fl -= FLI_OFFSET;
00523     }
00524 }
00525 
00526 
00527 static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
00528 {
00529     u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
00530     bhdr_t *_b = NULL;
00531 
00532     if (_tmp) {
00533         *_sl = ls_bit(_tmp);
00534         _b = _tlsf->matrix[*_fl][*_sl];
00535     } else {
00536         *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
00537         if (*_fl > 0) {         /* likely */
00538             *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
00539             _b = _tlsf->matrix[*_fl][*_sl];
00540         }
00541     }
00542     return _b;
00543 }
00544 
00545 
00546 #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) {                    \
00547         _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next;      \
00548         if (_tlsf -> matrix[_fl][_sl])                              \
00549             _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL;  \
00550         else {                                                      \
00551             tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]);                \
00552             if (!_tlsf -> sl_bitmap [_fl])                          \
00553                 tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap);              \
00554         }                                                           \
00555         _b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL};             \
00556     }
00557 
00558 
00559 #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) {                            \
00560         if (_b -> ptr.free_ptr.next)                                    \
00561             _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
00562         if (_b -> ptr.free_ptr.prev)                                    \
00563             _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
00564         if (_tlsf -> matrix [_fl][_sl] == _b) {                         \
00565             _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next;       \
00566             if (!_tlsf -> matrix [_fl][_sl]) {                          \
00567                 tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]);             \
00568                 if (!_tlsf -> sl_bitmap [_fl])                          \
00569                     tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap);              \
00570             }                                                           \
00571         }                                                               \
00572         _b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL};                 \
00573     }
00574 
00575 #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) {                             \
00576         _b -> ptr.free_ptr = (free_ptr_t) {NULL, _tlsf -> matrix [_fl][_sl]}; \
00577         if (_tlsf -> matrix [_fl][_sl])                                 \
00578             _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b;       \
00579         _tlsf -> matrix [_fl][_sl] = _b;                                \
00580         tlsf_set_bit (_sl, &_tlsf -> sl_bitmap [_fl]);                      \
00581         tlsf_set_bit (_fl, &_tlsf -> fl_bitmap);                                \
00582     }
00583 
00584 #if USE_SBRK || USE_MMAP
00585 static __inline__ void *get_new_area(size_t * size) 
00586 {
00587     void *area;
00588 
00589 #if USE_SBRK
00590     area = sbrk(0);
00591     if (sbrk(*size) != ((void *) ~0))
00592         return area;
00593 #endif
00594 
00595 #if USE_MMAP
00596     *size = ROUNDUP(*size, PAGE_SIZE);
00597     if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
00598         return area;
00599 #endif
00600     return ((void *) ~0);
00601 }
00602 #endif
00603 
00604 static __inline__ bhdr_t *process_area(void *area, size_t size)
00605 {
00606     bhdr_t *b, *lb, *ib;
00607     area_info_t *ai;
00608 
00609     ib = (bhdr_t *) area;
00610     ib->size =
00611         (sizeof(area_info_t) <
00612          MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
00613     b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
00614     b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
00615     b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
00616     lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00617     lb->prev_hdr = b;
00618     lb->size = 0 | USED_BLOCK | PREV_FREE;
00619     ai = (area_info_t *) ib->ptr.buffer;
00620     ai->next = 0;
00621     ai->end = lb;
00622     return ib;
00623 }
00624 
00625 /******************************************************************/
00626 /******************** Begin of the allocator code *****************/
00627 /******************************************************************/
00628 
00629 /******************************************************************/
00630 size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
00631 {
00632 /******************************************************************/
00633     char *mp = NULL;
00634     tlsf_t *tlsf;
00635     bhdr_t *b, *ib;
00636 
00637     if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
00638         ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
00639         return -1;
00640     }
00641 
00642     if (((unsigned long) mem_pool & PTR_MASK)) {
00643         ERROR_MSG("init_memory_pool (): mem_pool must be aligned to a word\n");
00644         return -1;
00645     }
00646     tlsf = (tlsf_t *) mem_pool;
00647     /* Check if already initialised */
00648     if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
00649         mp = mem_pool;
00650         b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(sizeof(tlsf_t)));
00651         return b->size & BLOCK_SIZE;
00652     }
00653 
00654     mp = mem_pool;
00655 
00656     /* Zeroing the memory pool */
00657     memset(mem_pool, 0, sizeof(tlsf_t));
00658 
00659     tlsf->tlsf_signature = TLSF_SIGNATURE;
00660 
00661     TLSF_CREATE_LOCK(&tlsf->lock);
00662 
00663     ib = process_area(GET_NEXT_BLOCK
00664                       (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
00665     b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
00666     free_ex(b->ptr.buffer, tlsf);
00667     tlsf->area_head = (area_info_t *) ib->ptr.buffer;
00668 
00669 #if TLSF_STATISTIC
00670     tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
00671     tlsf->max_size = tlsf->used_size;
00672 #endif
00673 
00674     return (b->size & BLOCK_SIZE);
00675 }
00676 
00677 /******************************************************************/
00678 void *malloc_ex(size_t size, void *mem_pool)
00679 {
00680 /******************************************************************/
00681     tlsf_t *tlsf = (tlsf_t *) mem_pool;
00682     bhdr_t *b, *b2, *next_b;
00683     int fl, sl;
00684     size_t tmp_size;
00685 
00686     size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
00687 
00688     /* Rounding up the requested size and calculating fl and sl */
00689     MAPPING_SEARCH(&size, &fl, &sl);
00690 
00691     /* Searching a free block, recall that this function changes the values of fl and sl,
00692        so they are not longer valid when the function fails */
00693     b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
00694 #if USE_MMAP || USE_SBRK
00695     if (!b) {
00696         size_t area_size;
00697         void *area;
00698         /* Growing the pool size when needed */
00699         area_size = size + BHDR_OVERHEAD * 8;   /* size plus enough room for the requered headers. */
00700         area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
00701         area = get_new_area(&area_size);        /* Call sbrk or mmap */
00702         if (area == ((void *) ~0))
00703             return NULL;        /* Not enough system memory */
00704         add_new_area(area, area_size, mem_pool);
00705         /* Rounding up the requested size and calculating fl and sl */
00706         MAPPING_SEARCH(&size, &fl, &sl);
00707         /* Searching a free block */
00708         b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
00709     }
00710 #endif
00711     if (!b)
00712         return NULL;            /* Not found */
00713 
00714     EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
00715 
00716     /*-- found: */
00717     next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00718     /* Should the block be split? */
00719     tmp_size = (b->size & BLOCK_SIZE) - size;
00720     if (tmp_size >= sizeof(bhdr_t)) {
00721         tmp_size -= BHDR_OVERHEAD;
00722         b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
00723         b2->size = tmp_size | FREE_BLOCK | PREV_USED;
00724         next_b->prev_hdr = b2;
00725         MAPPING_INSERT(tmp_size, &fl, &sl);
00726         INSERT_BLOCK(b2, tlsf, fl, sl);
00727 
00728         b->size = size | (b->size & PREV_STATE);
00729     } else {
00730         next_b->size &= (~PREV_FREE);
00731         b->size &= (~FREE_BLOCK);       /* Now it's used */
00732     }
00733 
00734     TLSF_ADD_SIZE(tlsf, b);
00735 
00736     return (void *) b->ptr.buffer;
00737 }
00738 
00739 /******************************************************************/
00740 void free_ex(void *ptr, void *mem_pool)
00741 {
00742 /******************************************************************/
00743     tlsf_t *tlsf = (tlsf_t *) mem_pool;
00744     bhdr_t *b, *tmp_b;
00745     int fl = 0, sl = 0;
00746 
00747     if (!ptr) {
00748         return;
00749     }
00750     b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
00751     b->size |= FREE_BLOCK;
00752 
00753     TLSF_REMOVE_SIZE(tlsf, b);
00754 
00755     b->ptr.free_ptr = (free_ptr_t) { NULL, NULL};
00756     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00757     if (tmp_b->size & FREE_BLOCK) {
00758         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
00759         EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
00760         b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
00761     }
00762     if (b->size & PREV_FREE) {
00763         tmp_b = b->prev_hdr;
00764         MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
00765         EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
00766         tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
00767         b = tmp_b;
00768     }
00769     MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
00770     INSERT_BLOCK(b, tlsf, fl, sl);
00771 
00772     tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00773     tmp_b->size |= PREV_FREE;
00774     tmp_b->prev_hdr = b;
00775 }
00776 
00777 unsigned long tlsf_get_used_size(rtheap_t *heap) {
00778 #if TLSF_STATISTIC
00779         struct list_head *holder;
00780         list_for_each(holder, &heap->extents) { break; }
00781     return ((tlsf_t *)(list_entry(holder, rtextent_t, link)->membase))->used_size;
00782 #else
00783     return 0;
00784 #endif
00785 }
00786 
00787 /******************************** END TLSF ********************************/
00788 
00789 #else /* !CONFIG_RTAI_USE_TLSF */
00790 
00791 /***************************** BEGIN BSD GPMA ********************************/
00792 
00793 /*!\file malloc.c
00794  * \brief Dynamic memory allocation services.
00795  *
00796  * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
00797  *
00798  * RTAI is free software; you can redistribute it and/or modify it
00799  * under the terms of the GNU General Public License as published by
00800  * the Free Software Foundation; either version 2 of the License, or
00801  * (at your option) any later version.
00802  *
00803  * RTAI is distributed in the hope that it will be useful, but WITHOUT
00804  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
00805  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
00806  * License for more details.
00807  *
00808  * You should have received a copy of the GNU General Public License
00809  * along with RTAI; if not, write to the Free Software Foundation,
00810  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00811  *
00812  * Dynamic memory allocation services lifted and adapted from the
00813  * Xenomai nucleus.
00814  *
00815  * This file implements the RTAI dynamic memory allocator based on the
00816  * algorithm described in "Design of a General Purpose Memory
00817  * Allocator for the 4.3BSD Unix Kernel" by Marshall K. McKusick and
00818  * Michael J. Karels.
00819  *
00820  * \ingroup shm
00821  */
00822 
00823 /*!
00824  * @addtogroup shm
00825  *@{*/
00826 
00827 static void init_extent (rtheap_t *heap, rtextent_t *extent)
00828 {
00829     caddr_t freepage;
00830     int n, lastpgnum;
00831 
00832     INIT_LIST_HEAD(&extent->link);
00833 
00834     /* The page area starts right after the (aligned) header. */
00835     extent->membase = (caddr_t)extent + heap->hdrsize;
00836     lastpgnum = heap->npages - 1;
00837 
00838     /* Mark each page as free in the page map. */
00839     for (n = 0, freepage = extent->membase; n < lastpgnum; n++, freepage += heap->pagesize) {
00840         *((caddr_t *)freepage) = freepage + heap->pagesize;
00841         extent->pagemap[n] = RTHEAP_PFREE;
00842     }
00843     *((caddr_t *)freepage) = NULL;
00844     extent->pagemap[lastpgnum] = RTHEAP_PFREE;
00845     extent->memlim = freepage + heap->pagesize;
00846 
00847     /* The first page starts the free list of a new extent. */
00848     extent->freelist = extent->membase;
00849 }
00850 
00851 /*! 
00852  * \fn int rtheap_init(rtheap_t *heap,
00853                        void *heapaddr,
00854                u_long heapsize,
00855                u_long pagesize,
00856                int suprt);
00857  * \brief Initialize a memory heap.
00858  *
00859  * Initializes a memory heap suitable for dynamic memory allocation
00860  * requests.  The heap manager can operate in two modes, whether
00861  * time-bounded if the heap storage area and size are statically
00862  * defined at initialization time, or dynamically extendable at the
00863  * expense of a less deterministic behaviour.
00864  *
00865  * @param heap The address of a heap descriptor the memory manager
00866  * will use to store the allocation data.  This descriptor must always
00867  * be valid while the heap is active therefore it must be allocated in
00868  * permanent memory.
00869  *
00870  * @param heapaddr The address of a statically-defined heap storage
00871  * area. If this parameter is non-zero, all allocations will be made
00872  * from the given area in fully time-bounded mode. In such a case, the
00873  * heap is non-extendable. If a null address is passed, the heap
00874  * manager will attempt to extend the heap each time a memory
00875  * starvation is encountered. In the latter case, the heap manager
00876  * will request additional chunks of core memory to Linux when needed,
00877  * voiding the real-time guarantee for the caller.
00878  *
00879  * @param heapsize If heapaddr is non-zero, heapsize gives the size in
00880  * bytes of the statically-defined storage area. Otherwise, heapsize
00881  * defines the standard length of each extent that will be requested
00882  * to Linux when a memory starvation is encountered for the heap.
00883  * heapsize must be a multiple of pagesize and lower than 16
00884  * Mbytes. Depending on the Linux allocation service used, requests
00885  * for extent memory might be limited in size. For instance, heapsize
00886  * must be lower than 128Kb for kmalloc()-based allocations. In the
00887  * current implementation, heapsize must be large enough to contain an
00888  * internal header. The following formula gives the size of this
00889  * header: hdrsize = (sizeof(rtextent_t) + ((heapsize -
00890  * sizeof(rtextent_t))) / (pagesize + 1) + 15) & ~15;
00891  *
00892  * @param pagesize The size in bytes of the fundamental memory page
00893  * which will be used to subdivide the heap internally. Choosing the
00894  * right page size is important regarding performance and memory
00895  * fragmentation issues, so it might be a good idea to take a look at
00896  * http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf to pick the
00897  * best one for your needs. In the current implementation, pagesize
00898  * must be a power of two in the range [ 8 .. 32768] inclusive.
00899  *
00900  * @return 0 is returned upon success, or one of the following
00901  * error codes:
00902  * - RTHEAP_PARAM is returned whenever a parameter is invalid.
00903  * - RTHEAP_NOMEM is returned if no initial extent can be allocated
00904  * for a dynamically extendable heap (i.e. heapaddr == NULL).
00905  *
00906  * Side-effect: This routine does not call the rescheduling procedure.
00907  *
00908  * Context: This routine must be called on behalf of a thread context.
00909  */
00910 
00911 int rtheap_init (rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
00912 {
00913     u_long hdrsize, pmapsize, shiftsize, pageshift;
00914     rtextent_t *extent;
00915     int n;
00916 
00917     /*
00918      * Perform some parametrical checks first.
00919      * Constraints are:
00920      * PAGESIZE must be >= 2 ** MINLOG2.
00921      * PAGESIZE must be <= 2 ** MAXLOG2.
00922      * PAGESIZE must be a power of 2.
00923      * HEAPSIZE must be large enough to contain the static part of an
00924      * extent header.
00925      * HEAPSIZE must be a multiple of PAGESIZE.
00926      * HEAPSIZE must be lower than RTHEAP_MAXEXTSZ.
00927      */
00928     if ((pagesize < (1 << RTHEAP_MINLOG2)) ||
00929         (pagesize > (1 << RTHEAP_MAXLOG2)) ||
00930         (pagesize & (pagesize - 1)) != 0 ||
00931         heapsize <= sizeof(rtextent_t) ||
00932         heapsize > RTHEAP_MAXEXTSZ ||
00933         (heapsize & (pagesize - 1)) != 0) {
00934         return RTHEAP_PARAM;
00935     }
00936 
00937     /* Determine the page map overhead inside the given extent
00938        size. We need to reserve a byte in a page map for each page
00939        which is addressable into this extent. The page map is itself
00940        stored in the extent space, right after the static part of its
00941        header, and before the first allocatable page. */
00942     pmapsize = ((heapsize - sizeof(rtextent_t)) * sizeof(u_char)) / (pagesize + sizeof(u_char));
00943 
00944     /* The overall header size is: static_part + page_map rounded to
00945        the minimum alignment size. */
00946     hdrsize = (sizeof(rtextent_t) + pmapsize + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
00947 
00948     /* An extent must contain at least two addressable pages to cope
00949        with allocation sizes between pagesize and 2 * pagesize. */
00950     if (hdrsize + 2 * pagesize > heapsize) {
00951         return RTHEAP_PARAM;
00952     }
00953 
00954     /* Compute the page shiftmask from the page size (i.e. log2 value). */
00955     for (pageshift = 0, shiftsize = pagesize; shiftsize > 1; shiftsize >>= 1, pageshift++);
00956 
00957     heap->pagesize   = pagesize;
00958     heap->pageshift  = pageshift;
00959     heap->hdrsize    = hdrsize;
00960 
00961     heap->extentsize = heapsize;
00962     if (!heapaddr && suprt) {
00963         if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
00964             heap->extentsize = KMALLOC_LIMIT;
00965             heapaddr = NULL;
00966         }
00967     }
00968 
00969     heap->npages     = (heap->extentsize - hdrsize) >> pageshift;
00970     heap->maxcont    = heap->npages*pagesize;
00971     heap->flags      =
00972     heap->ubytes     = 0;
00973     INIT_LIST_HEAD(&heap->extents);
00974     spin_lock_init(&heap->lock);
00975 
00976     for (n = 0; n < RTHEAP_NBUCKETS; n++) {
00977         heap->buckets[n] = NULL;
00978     }
00979 
00980     if (heapaddr) {
00981         extent = (rtextent_t *)heapaddr;
00982         init_extent(heap, extent);
00983         list_add_tail(&extent->link, &heap->extents);
00984     } else {
00985         u_long init_size = 0;
00986         while (init_size < heapsize) {
00987             if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt))) {
00988                 struct list_head *holder, *nholder;
00989                 list_for_each_safe(holder, nholder, &heap->extents) {
00990                     extent = list_entry(holder, rtextent_t, link);
00991                     free_extent(extent, heap->extentsize, suprt);
00992                 }
00993                 return RTHEAP_NOMEM;
00994             }
00995             init_extent(heap, extent);
00996             list_add_tail(&extent->link, &heap->extents);
00997             init_size += heap->extentsize;
00998         }
00999     }
01000     return 0;
01001 }
01002 
01003 /*! 
01004  * \fn void rtheap_destroy(rtheap_t *heap);
01005  * \brief Destroys a memory heap.
01006  *
01007  * Destroys a memory heap. Dynamically allocated extents are returned
01008  * to Linux.
01009  *
01010  * @param heap The descriptor address of the destroyed heap.
01011  *
01012  * Side-effect: This routine does not call the rescheduling procedure.
01013  *
01014  * Context: This routine must be called on behalf of a thread context.
01015  */
01016 
01017 void rtheap_destroy (rtheap_t *heap, int suprt)
01018 {
01019     struct list_head *holder, *nholder;
01020 
01021     list_for_each_safe(holder, nholder, &heap->extents) {
01022         free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
01023     }
01024 }
01025 
01026 /*
01027  * get_free_range() -- Obtain a range of contiguous free pages to
01028  * fulfill an allocation of 2 ** log2size. Each extent is searched,
01029  * and a new one is allocated if needed, provided the heap is
01030  * extendable. Must be called with the heap lock set.
01031  */
01032 
01033 static caddr_t get_free_range (rtheap_t *heap,
01034                    u_long bsize,
01035                    int log2size,
01036                    int mode)
01037 {
01038     caddr_t block, eblock, freepage, lastpage, headpage, freehead = NULL;
01039     u_long pagenum, pagecont, freecont;
01040     struct list_head *holder;
01041     rtextent_t *extent;
01042 
01043     list_for_each(holder,&heap->extents) {
01044 
01045     extent = list_entry(holder,rtextent_t,link);
01046     freepage = extent->freelist;
01047 
01048     while (freepage != NULL)
01049         {
01050         headpage = freepage;
01051             freecont = 0;
01052 
01053         /* Search for a range of contiguous pages in the free page
01054            list of the current extent. The range must be 'bsize'
01055            long. */
01056         do
01057         {
01058         lastpage = freepage;
01059         freepage = *((caddr_t *)freepage);
01060         freecont += heap->pagesize;
01061         }
01062         while (freepage == lastpage + heap->pagesize && freecont < bsize);
01063 
01064         if (freecont >= bsize)
01065         {
01066         /* Ok, got it. Just update the extent's free page
01067            list, then proceed to the next step. */
01068 
01069         if (headpage == extent->freelist)
01070             extent->freelist = *((caddr_t *)lastpage);
01071         else   
01072             *((caddr_t *)freehead) = *((caddr_t *)lastpage);
01073 
01074         goto splitpage;
01075         }
01076 
01077         freehead = lastpage;
01078         }
01079     }
01080 
01081     /* No available free range in the existing extents so far. If we
01082        cannot extend the heap, we have failed and we are done with
01083        this request. */
01084 
01085     return NULL;
01086 
01087 splitpage:
01088 
01089     /* At this point, headpage is valid and points to the first page
01090        of a range of contiguous free pages larger or equal than
01091        'bsize'. */
01092 
01093     if (bsize < heap->pagesize)
01094     {
01095     /* If the allocation size is smaller than the standard page
01096        size, split the page in smaller blocks of this size,
01097        building a free list of free blocks. */
01098 
01099     for (block = headpage, eblock = headpage + heap->pagesize - bsize;
01100          block < eblock; block += bsize)
01101         *((caddr_t *)block) = block + bsize;
01102 
01103     *((caddr_t *)eblock) = NULL;
01104     }
01105     else   
01106         *((caddr_t *)headpage) = NULL;
01107 
01108     pagenum = (headpage - extent->membase) >> heap->pageshift;
01109 
01110     /* Update the extent's page map.  If log2size is non-zero
01111        (i.e. bsize <= 2 * pagesize), store it in the first page's slot
01112        to record the exact block size (which is a power of
01113        two). Otherwise, store the special marker RTHEAP_PLIST,
01114        indicating the start of a block whose size is a multiple of the
01115        standard page size, but not necessarily a power of two.  In any
01116        case, the following pages slots are marked as 'continued'
01117        (PCONT). */
01118 
01119     extent->pagemap[pagenum] = log2size ? log2size : RTHEAP_PLIST;
01120 
01121     for (pagecont = bsize >> heap->pageshift; pagecont > 1; pagecont--)
01122     extent->pagemap[pagenum + pagecont - 1] = RTHEAP_PCONT;
01123 
01124     return headpage;
01125 }
01126 
01127 /*! 
01128  * \fn void *rtheap_alloc(rtheap_t *heap, u_long size, int flags);
01129  * \brief Allocate a memory block from a memory heap.
01130  *
01131  * Allocates a contiguous region of memory from an active memory heap.
01132  * Such allocation is guaranteed to be time-bounded if the heap is
01133  * non-extendable (see rtheap_init()). Otherwise, it might trigger a
01134  * dynamic extension of the storage area through an internal request
01135  * to the Linux allocation service (kmalloc/vmalloc).
01136  *
01137  * @param heap The descriptor address of the heap to get memory from.
01138  *
01139  * @param size The size in bytes of the requested block. Sizes lower
01140  * or equal to the page size are rounded either to the minimum
01141  * allocation size if lower than this value, or to the minimum
01142  * alignment size if greater or equal to this value. In the current
01143  * implementation, with MINALLOC = 16 and MINALIGN = 16, a 15 bytes
01144  * request will be rounded to 16 bytes, and a 17 bytes request will be
01145  * rounded to 32.
01146  *
01147  * @param flags A set of flags affecting the operation. Unless
01148  * RTHEAP_EXTEND is passed and the heap is extendable, this service
01149  * will return NULL without attempting to extend the heap dynamically
01150  * upon memory starvation.
01151  *
01152  * @return The address of the allocated region upon success, or NULL
01153  * if no memory is available from the specified non-extendable heap,
01154  * or no memory can be obtained from Linux to extend the heap.
01155  *
01156  * Side-effect: This routine does not call the rescheduling procedure.
01157  *
01158  * Context: This routine can always be called on behalf of a thread
01159  * context. It can also be called on behalf of an IST context if the
01160  * heap storage area has been statically-defined at initialization
01161  * time (see rtheap_init()).
01162  */
01163 
01164 void *rtheap_alloc (rtheap_t *heap, u_long size, int mode)
01165 
01166 {
01167     u_long bsize, flags;
01168     caddr_t block;
01169     int log2size;
01170 
01171     if (size == 0)
01172     return NULL;
01173 
01174     if (size <= heap->pagesize)
01175     /* Sizes lower or equal to the page size are rounded either to
01176        the minimum allocation size if lower than this value, or to
01177        the minimum alignment size if greater or equal to this
01178        value. In other words, with MINALLOC = 15 and MINALIGN =
01179        16, a 15 bytes request will be rounded to 16 bytes, and a
01180        17 bytes request will be rounded to 32. */
01181     {
01182     if (size <= RTHEAP_MINALIGNSZ)
01183         size = (size + RTHEAP_MINALLOCSZ - 1) & ~(RTHEAP_MINALLOCSZ - 1);
01184     else
01185         size = (size + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
01186     }
01187     else
01188     /* Sizes greater than the page size are rounded to a multiple
01189        of the page size. */
01190     size = (size + heap->pagesize - 1) & ~(heap->pagesize - 1);
01191 
01192     /* It becomes more space efficient to directly allocate pages from
01193        the free page list whenever the requested size is greater than
01194        2 times the page size. Otherwise, use the bucketed memory
01195        blocks. */
01196 
01197     if (size <= heap->pagesize * 2)
01198     {
01199     /* Find the first power of two greater or equal to the rounded
01200        size. The log2 value of this size is also computed. */
01201 
01202     for (bsize = (1 << RTHEAP_MINLOG2), log2size = RTHEAP_MINLOG2;
01203          bsize < size; bsize <<= 1, log2size++)
01204         ; /* Loop */
01205 
01206     flags = rt_spin_lock_irqsave(&heap->lock);
01207 
01208     block = heap->buckets[log2size - RTHEAP_MINLOG2];
01209 
01210     if (block == NULL)
01211         {
01212         block = get_free_range(heap,bsize,log2size,mode);
01213 
01214         if (block == NULL)
01215         goto release_and_exit;
01216         }
01217 
01218     heap->buckets[log2size - RTHEAP_MINLOG2] = *((caddr_t *)block);
01219     heap->ubytes += bsize;
01220     }
01221     else
01222         {
01223         if (size > heap->maxcont)
01224             return NULL;
01225 
01226     flags = rt_spin_lock_irqsave(&heap->lock);
01227 
01228     /* Directly request a free page range. */
01229     block = get_free_range(heap,size,0,mode);
01230 
01231     if (block)   
01232         heap->ubytes += size;
01233     }
01234 
01235 release_and_exit:
01236 
01237     rt_spin_unlock_irqrestore(flags,&heap->lock);
01238 
01239     return block;
01240 }
01241 
01242 /*! 
01243  * \fn int rtheap_free(rtheap_t *heap, void *block);
01244  * \brief Release a memory block to a memory heap.
01245  *
01246  * Releases a memory region to the memory heap it was previously
01247  * allocated from.
01248  *
01249  * @param heap The descriptor address of the heap to release memory
01250  * to.
01251  *
01252  * @param block The address of the region to release returned by a
01253  * previous call to rtheap_alloc().
01254  *
01255  * @return 0 is returned upon success, or RTHEAP_PARAM is returned
01256  * whenever the block is not a valid region of the specified heap.
01257  *
01258  * Side-effect: This routine does not call the rescheduling procedure.
01259  *
01260  * Context: This routine can be called on behalf of a thread or IST
01261  * context
01262  */
01263 
01264 int rtheap_free (rtheap_t *heap, void *block)
01265 
01266 {
01267     u_long pagenum, pagecont, boffset, bsize, flags;
01268     caddr_t freepage, lastpage, nextpage, tailpage;
01269     rtextent_t *extent = NULL;
01270     struct list_head *holder;
01271     int log2size, npages;
01272 
01273     flags = rt_spin_lock_irqsave(&heap->lock);
01274 
01275     /* Find the extent from which the returned block is
01276        originating. If the heap is non-extendable, then a single
01277        extent is scanned at most. */
01278 
01279     list_for_each(holder,&heap->extents) {
01280 
01281         extent = list_entry(holder,rtextent_t,link);
01282 
01283     if ((caddr_t)block >= extent->membase &&
01284         (caddr_t)block < extent->memlim)
01285         break;
01286     }
01287 
01288     if (!holder)
01289     goto unlock_and_fail;
01290 
01291     /* Compute the heading page number in the page map. */
01292     pagenum = ((caddr_t)block - extent->membase) >> heap->pageshift;
01293     boffset = ((caddr_t)block - (extent->membase + (pagenum << heap->pageshift)));
01294 
01295     switch (extent->pagemap[pagenum])
01296     {
01297     case RTHEAP_PFREE: /* Unallocated page? */
01298     case RTHEAP_PCONT:  /* Not a range heading page? */
01299 
01300 unlock_and_fail:
01301 
01302         rt_spin_unlock_irqrestore(flags,&heap->lock);
01303         return RTHEAP_PARAM;
01304 
01305     case RTHEAP_PLIST:
01306 
01307         npages = 1;
01308 
01309         while (npages < heap->npages &&
01310            extent->pagemap[pagenum + npages] == RTHEAP_PCONT)
01311         npages++;
01312 
01313         bsize = npages * heap->pagesize;
01314 
01315         /* Link all freed pages in a single sub-list. */
01316 
01317         for (freepage = (caddr_t)block,
01318              tailpage = (caddr_t)block + bsize - heap->pagesize;
01319          freepage < tailpage; freepage += heap->pagesize)
01320         *((caddr_t *)freepage) = freepage + heap->pagesize;
01321 
01322         /* Mark the released pages as free in the extent's page map. */
01323 
01324         for (pagecont = 0; pagecont < npages; pagecont++)
01325         extent->pagemap[pagenum + pagecont] = RTHEAP_PFREE;
01326 
01327         /* Return the sub-list to the free page list, keeping
01328            an increasing address order to favor coalescence. */
01329     
01330         for (nextpage = extent->freelist, lastpage = NULL;
01331          nextpage != NULL && nextpage < (caddr_t)block;
01332          lastpage = nextpage, nextpage = *((caddr_t *)nextpage))
01333         ; /* Loop */
01334 
01335         *((caddr_t *)tailpage) = nextpage;
01336 
01337         if (lastpage)
01338         *((caddr_t *)lastpage) = (caddr_t)block;
01339         else
01340         extent->freelist = (caddr_t)block;
01341 
01342         break;
01343 
01344     default:
01345 
01346         log2size = extent->pagemap[pagenum];
01347         bsize = (1 << log2size);
01348 
01349         if ((boffset & (bsize - 1)) != 0) /* Not a block start? */
01350         goto unlock_and_fail;
01351 
01352         /* Return the block to the bucketed memory space. */
01353 
01354         *((caddr_t *)block) = heap->buckets[log2size - RTHEAP_MINLOG2];
01355         heap->buckets[log2size - RTHEAP_MINLOG2] = block;
01356 
01357         break;
01358     }
01359 
01360     heap->ubytes -= bsize;
01361 
01362     rt_spin_unlock_irqrestore(flags,&heap->lock);
01363 
01364     return 0;
01365 }
01366 
01367 /*
01368  * IMPLEMENTATION NOTES:
01369  *
01370  * The implementation follows the algorithm described in a USENIX
01371  * 1988 paper called "Design of a General Purpose Memory Allocator for
01372  * the 4.3BSD Unix Kernel" by Marshall K. McKusick and Michael
01373  * J. Karels. You can find it at various locations on the net,
01374  * including http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
01375  * A minor variation allows this implementation to have 'extendable'
01376  * heaps when needed, with multiple memory extents providing autonomous
01377  * page address spaces. When the non-extendable form is used, the heap
01378  * management routines show bounded worst-case execution time.
01379  *
01380  * The data structures hierarchy is as follows:
01381  *
01382  * HEAP {
01383  *      block_buckets[]
01384  *      extent_queue -------+
01385  * }                        |
01386  *                          V
01387  *                       EXTENT #1 {
01388  *                              <static header>
01389  *                              page_map[npages]
01390  *                              page_array[npages][pagesize]
01391  *                       } -+
01392  *                          |
01393  *                          |
01394  *                          V
01395  *                       EXTENT #n {
01396  *                              <static header>
01397  *                              page_map[npages]
01398  *                              page_array[npages][pagesize]
01399  *                       }
01400  */
01401 
01402 /*@}*/
01403 
01404 /****************************** END BSD GPMA ********************************/
01405 
01406 #endif /* CONFIG_RTAI_USE_TLSF */
01407 
01408 int __rtai_heap_init (void)
01409 {
01410     rtai_global_heap_size = (rtai_global_heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
01411     if (rtheap_init(&rtai_global_heap, NULL, rtai_global_heap_size, PAGE_SIZE, 0)) {
01412         printk(KERN_INFO "RTAI[malloc]: failed to initialize the global heap (size=%d bytes).\n", rtai_global_heap_size);
01413         return 1;
01414     }
01415     rtai_global_heap_adr = rtai_global_heap.extents.next;
01416     printk(KERN_INFO "RTAI[malloc]: global heap size = %d bytes, <%s>.\n", rtai_global_heap_size, CONFIG_RTAI_USE_TLSF ? "TLSF" : "BSD");
01417     return 0;
01418 }
01419 
01420 void __rtai_heap_exit (void)
01421 {
01422     rtheap_destroy(&rtai_global_heap, 0);
01423     printk("RTAI[malloc]: unloaded.\n");
01424 }
01425 
01426 #ifndef CONFIG_RTAI_MALLOC_BUILTIN
01427 module_init(__rtai_heap_init);
01428 module_exit(__rtai_heap_exit);
01429 #endif /* !CONFIG_RTAI_MALLOC_BUILTIN */
01430 
01431 #ifndef CONFIG_KBUILD
01432 #define CONFIG_KBUILD
01433 #endif
01434 
01435 #ifdef CONFIG_KBUILD
01436 EXPORT_SYMBOL(rtheap_init);
01437 EXPORT_SYMBOL(rtheap_destroy);
01438 EXPORT_SYMBOL(rtheap_alloc);
01439 EXPORT_SYMBOL(rtheap_free);
01440 EXPORT_SYMBOL(rtai_global_heap);
01441 EXPORT_SYMBOL(rtai_global_heap_adr);
01442 EXPORT_SYMBOL(rtai_global_heap_size);
01443 #endif /* CONFIG_KBUILD */

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7