00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028 #include <linux/module.h>
00029 #include <linux/kernel.h>
00030 #include <linux/version.h>
00031 #include <linux/slab.h>
00032
00033 #include <rtai_config.h>
00034 #include <asm/rtai.h>
00035 #include <rtai_malloc.h>
00036 #include <rtai_shm.h>
00037
00038 int rtai_global_heap_size = RTHEAP_GLOBALSZ;
00039 RTAI_MODULE_PARM(rtai_global_heap_size, int);
00040
00041 void *rtai_global_heap_adr = NULL;
00042
00043 rtheap_t rtai_global_heap;
00044
00045 static void *alloc_extent(u_long size, int suprt)
00046 {
00047 caddr_t p;
00048 if (!suprt) {
00049 if ((p = (caddr_t)vmalloc(size))) {
00050 unsigned long _p = (unsigned long)p;
00051
00052 for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00053
00054 SetPageReserved(vmalloc_to_page((void *)_p));
00055 }
00056 }
00057 } else {
00058 if (size <= KMALLOC_LIMIT) {
00059 p = kmalloc(size, suprt);
00060 } else {
00061 p = (void*)__get_free_pages(suprt, get_order(size));
00062 }
00063
00064
00065 }
00066 if (p) {
00067 memset(p, 0, size);
00068 }
00069 return p;
00070 }
00071
00072 static void free_extent(void *p, u_long size, int suprt)
00073 {
00074 if (!suprt) {
00075 unsigned long _p = (unsigned long)p;
00076
00077
00078 for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00079
00080 ClearPageReserved(vmalloc_to_page((void *)_p));
00081 }
00082 vfree(p);
00083 } else {
00084
00085
00086 if (size <= KMALLOC_LIMIT) {
00087 kfree(p);
00088 } else {
00089 free_pages((unsigned long)p, get_order(size));
00090 }
00091 }
00092 }
00093
00094 #ifndef CONFIG_RTAI_USE_TLSF
00095 #define CONFIG_RTAI_USE_TLSF 0
00096 #endif
00097
00098 #if CONFIG_RTAI_USE_TLSF
00099
00100 static size_t init_memory_pool(size_t mem_pool_size, void *mem_pool);
00101 static inline void *malloc_ex(size_t size, void *mem_pool);
00102 static inline void free_ex(void *ptr, void *mem_pool);
00103
00104 static void init_extent(rtheap_t *heap, rtextent_t *extent)
00105 {
00106 INIT_LIST_HEAD(&extent->link);
00107 extent->membase = (caddr_t)extent + sizeof(rtextent_t);
00108 extent->memlim = (caddr_t)extent + heap->extentsize;
00109 }
00110
00111 int rtheap_init(rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
00112 {
00113 rtextent_t *extent;
00114
00115 INIT_LIST_HEAD(&heap->extents);
00116 spin_lock_init(&heap->lock);
00117
00118 heap->extentsize = heapsize;
00119 if (!heapaddr && suprt) {
00120 if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
00121 heap->extentsize = KMALLOC_LIMIT;
00122 heapaddr = NULL;
00123 }
00124 }
00125
00126 if (heapaddr) {
00127 extent = (rtextent_t *)heapaddr;
00128 init_extent(heap, extent);
00129 list_add_tail(&extent->link, &heap->extents);
00130 return init_memory_pool(heapsize - sizeof(rtextent_t), heapaddr + sizeof(rtextent_t)) < 0 ? RTHEAP_NOMEM : 0;
00131 } else {
00132 u_long init_size = 0;
00133 while (init_size < heapsize) {
00134 if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt)) || init_memory_pool(heap->extentsize - sizeof(rtextent_t), (void *)extent + sizeof(rtextent_t)) < 0) {
00135 struct list_head *holder, *nholder;
00136 list_for_each_safe(holder, nholder, &heap->extents) {
00137 extent = list_entry(holder, rtextent_t, link);
00138 free_extent(extent, heap->extentsize, suprt);
00139 }
00140 return RTHEAP_NOMEM;
00141 }
00142 init_extent(heap, extent);
00143 list_add_tail(&extent->link, &heap->extents);
00144 init_size += heap->extentsize;
00145 }
00146 }
00147 return 0;
00148 }
00149
00150 void rtheap_destroy(rtheap_t *heap, int suprt)
00151 {
00152 struct list_head *holder, *nholder;
00153 list_for_each_safe(holder, nholder, &heap->extents) {
00154 free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
00155 }
00156 }
00157
00158 void *rtheap_alloc(rtheap_t *heap, u_long size, int mode)
00159 {
00160 void *adr = NULL;
00161 struct list_head *holder;
00162 unsigned long flags;
00163
00164 if (!size) {
00165 return NULL;
00166 }
00167
00168 flags = rt_spin_lock_irqsave(&heap->lock);
00169 list_for_each(holder, &heap->extents) {
00170 if ((adr = malloc_ex(size, list_entry(holder, rtextent_t, link)->membase)) != NULL) {
00171 break;
00172 }
00173 }
00174 rt_spin_unlock_irqrestore(flags, &heap->lock);
00175 return adr;
00176 }
00177
00178 int rtheap_free(rtheap_t *heap, void *block)
00179 {
00180 unsigned long flags;
00181 struct list_head *holder;
00182
00183 flags = rt_spin_lock_irqsave(&heap->lock);
00184 list_for_each(holder, &heap->extents) {
00185 rtextent_t *extent;
00186 extent = list_entry(holder, rtextent_t, link);
00187 if ((caddr_t)block < extent->memlim && (caddr_t)block >= extent->membase) {
00188 free_ex(block, extent->membase);
00189 break;
00190 }
00191 }
00192 rt_spin_unlock_irqrestore(flags, &heap->lock);
00193 return 0;
00194 }
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223
00224
00225
00226
00227
00228
00229
00230
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255 #ifndef TLSF_USE_LOCKS
00256 #define TLSF_USE_LOCKS (0)
00257 #endif
00258
00259 #ifndef TLSF_STATISTIC
00260 #define TLSF_STATISTIC (1)
00261 #endif
00262
00263 #ifndef USE_MMAP
00264 #define USE_MMAP (0)
00265 #endif
00266
00267 #ifndef USE_SBRK
00268 #define USE_SBRK (0)
00269 #endif
00270
00271
00272 #if TLSF_USE_LOCKS
00273 #include "target.h"
00274 #else
00275 #define TLSF_CREATE_LOCK(_unused_) do{}while(0)
00276 #define TLSF_DESTROY_LOCK(_unused_) do{}while(0)
00277 #define TLSF_ACQUIRE_LOCK(_unused_) do{}while(0)
00278 #define TLSF_RELEASE_LOCK(_unused_) do{}while(0)
00279 #endif
00280
00281 #if TLSF_STATISTIC
00282 #define TLSF_ADD_SIZE(tlsf, b) do { \
00283 tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
00284 if (tlsf->used_size > tlsf->max_size) { \
00285 tlsf->max_size = tlsf->used_size; \
00286 } \
00287 } while(0)
00288
00289 #define TLSF_REMOVE_SIZE(tlsf, b) do { \
00290 tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
00291 } while(0)
00292 #else
00293 #define TLSF_ADD_SIZE(tlsf, b) do{}while(0)
00294 #define TLSF_REMOVE_SIZE(tlsf, b) do{}while(0)
00295 #endif
00296
00297 #if USE_MMAP || USE_SBRK
00298 #include <unistd.h>
00299 #endif
00300
00301 #if USE_MMAP
00302 #include <sys/mman.h>
00303 #endif
00304
00305 #if !defined(__GNUC__)
00306 #ifndef __inline__
00307 #define __inline__
00308 #endif
00309 #endif
00310
00311
00312 #ifndef _DEBUG_TLSF_
00313 #define _DEBUG_TLSF_ (0)
00314 #endif
00315
00316
00317
00318
00319
00320
00321
00322 #define BLOCK_ALIGN (sizeof(void *) * 2)
00323
00324 #define MAX_FLI (30)
00325 #define MAX_LOG2_SLI (5)
00326 #define MAX_SLI (1 << MAX_LOG2_SLI)
00327
00328 #define FLI_OFFSET (6)
00329
00330 #define SMALL_BLOCK (128)
00331 #define REAL_FLI (MAX_FLI - FLI_OFFSET)
00332 #define MIN_BLOCK_SIZE (sizeof (free_ptr_t))
00333 #define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE)
00334 #define TLSF_SIGNATURE (0x2A59FA59)
00335
00336 #define PTR_MASK (sizeof(void *) - 1)
00337 #undef BLOCK_SIZE
00338 #define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK)
00339
00340 #define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
00341 #define MEM_ALIGN ((BLOCK_ALIGN) - 1)
00342 #define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
00343 #define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN)
00344 #define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x))
00345
00346 #define BLOCK_STATE (0x1)
00347 #define PREV_STATE (0x2)
00348
00349
00350 #define FREE_BLOCK (0x1)
00351 #define USED_BLOCK (0x0)
00352
00353
00354 #define PREV_FREE (0x2)
00355 #define PREV_USED (0x0)
00356
00357
00358 #define DEFAULT_AREA_SIZE (1024*10)
00359
00360 #if USE_MMAP
00361 #define PAGE_SIZE (getpagesize())
00362 #endif
00363
00364 #define PRINT_MSG(fmt, args...) rt_printk(fmt, ## args)
00365 #define ERROR_MSG(fmt, args...) rt_printk(fmt, ## args)
00366
00367 typedef unsigned int u32_t;
00368 typedef unsigned char u8_t;
00369
00370 typedef struct free_ptr_struct {
00371 struct bhdr_struct *prev;
00372 struct bhdr_struct *next;
00373 } free_ptr_t;
00374
00375 typedef struct bhdr_struct {
00376
00377 struct bhdr_struct *prev_hdr;
00378
00379 size_t size;
00380
00381 union {
00382 struct free_ptr_struct free_ptr;
00383 u8_t buffer[1];
00384 } ptr;
00385 } bhdr_t;
00386
00387
00388
00389
00390 typedef struct area_info_struct {
00391 bhdr_t *end;
00392 struct area_info_struct *next;
00393 } area_info_t;
00394
00395 typedef struct TLSF_struct {
00396
00397 u32_t tlsf_signature;
00398
00399 #if TLSF_USE_LOCKS
00400 TLSF_MLOCK_T lock;
00401 #endif
00402
00403 #if TLSF_STATISTIC
00404
00405
00406 size_t used_size;
00407 size_t max_size;
00408 #endif
00409
00410
00411 area_info_t *area_head;
00412
00413
00414
00415 u32_t fl_bitmap;
00416
00417
00418 u32_t sl_bitmap[REAL_FLI];
00419
00420 bhdr_t *matrix[REAL_FLI][MAX_SLI];
00421 } tlsf_t;
00422
00423
00424
00425
00426
00427 static __inline__ void tlsf_set_bit(int nr, u32_t * addr);
00428 static __inline__ void tlsf_clear_bit(int nr, u32_t * addr);
00429 static __inline__ int ls_bit(int x);
00430 static __inline__ int ms_bit(int x);
00431 static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
00432 static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
00433 static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
00434 static __inline__ bhdr_t *process_area(void *area, size_t size);
00435 #if USE_SBRK || USE_MMAP
00436 static __inline__ void *get_new_area(size_t * size);
00437 #endif
00438
00439 static const int table[] = {
00440 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
00441 4, 4,
00442 4, 4, 4, 4, 4, 4, 4,
00443 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
00444 5,
00445 5, 5, 5, 5, 5, 5, 5,
00446 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
00447 6,
00448 6, 6, 6, 6, 6, 6, 6,
00449 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
00450 6,
00451 6, 6, 6, 6, 6, 6, 6,
00452 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00453 7,
00454 7, 7, 7, 7, 7, 7, 7,
00455 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00456 7,
00457 7, 7, 7, 7, 7, 7, 7,
00458 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00459 7,
00460 7, 7, 7, 7, 7, 7, 7,
00461 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
00462 7,
00463 7, 7, 7, 7, 7, 7, 7
00464 };
00465
00466 static __inline__ int ls_bit(int i)
00467 {
00468 unsigned int a;
00469 unsigned int x = i & -i;
00470
00471 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
00472 return table[x >> a] + a;
00473 }
00474
00475 static __inline__ int ms_bit(int i)
00476 {
00477 unsigned int a;
00478 unsigned int x = (unsigned int) i;
00479
00480 a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
00481 return table[x >> a] + a;
00482 }
00483
00484 static __inline__ void tlsf_set_bit(int nr, u32_t * addr)
00485 {
00486 addr[nr >> 5] |= 1 << (nr & 0x1f);
00487 }
00488
00489 static __inline__ void tlsf_clear_bit(int nr, u32_t * addr)
00490 {
00491 addr[nr >> 5] &= ~(1 << (nr & 0x1f));
00492 }
00493
00494 static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
00495 {
00496 int _t;
00497
00498 if (*_r < SMALL_BLOCK) {
00499 *_fl = 0;
00500 *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
00501 } else {
00502 _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
00503 *_r = *_r + _t;
00504 *_fl = ms_bit(*_r);
00505 *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
00506 *_fl -= FLI_OFFSET;
00507
00508
00509
00510 *_r &= ~_t;
00511 }
00512 }
00513
00514 static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
00515 {
00516 if (_r < SMALL_BLOCK) {
00517 *_fl = 0;
00518 *_sl = _r / (SMALL_BLOCK / MAX_SLI);
00519 } else {
00520 *_fl = ms_bit(_r);
00521 *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
00522 *_fl -= FLI_OFFSET;
00523 }
00524 }
00525
00526
00527 static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
00528 {
00529 u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
00530 bhdr_t *_b = NULL;
00531
00532 if (_tmp) {
00533 *_sl = ls_bit(_tmp);
00534 _b = _tlsf->matrix[*_fl][*_sl];
00535 } else {
00536 *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
00537 if (*_fl > 0) {
00538 *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
00539 _b = _tlsf->matrix[*_fl][*_sl];
00540 }
00541 }
00542 return _b;
00543 }
00544
00545
00546 #define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) { \
00547 _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \
00548 if (_tlsf -> matrix[_fl][_sl]) \
00549 _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \
00550 else { \
00551 tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
00552 if (!_tlsf -> sl_bitmap [_fl]) \
00553 tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap); \
00554 } \
00555 _b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL}; \
00556 }
00557
00558
00559 #define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) { \
00560 if (_b -> ptr.free_ptr.next) \
00561 _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
00562 if (_b -> ptr.free_ptr.prev) \
00563 _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
00564 if (_tlsf -> matrix [_fl][_sl] == _b) { \
00565 _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \
00566 if (!_tlsf -> matrix [_fl][_sl]) { \
00567 tlsf_clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \
00568 if (!_tlsf -> sl_bitmap [_fl]) \
00569 tlsf_clear_bit (_fl, &_tlsf -> fl_bitmap); \
00570 } \
00571 } \
00572 _b -> ptr.free_ptr = (free_ptr_t) {NULL, NULL}; \
00573 }
00574
00575 #define INSERT_BLOCK(_b, _tlsf, _fl, _sl) { \
00576 _b -> ptr.free_ptr = (free_ptr_t) {NULL, _tlsf -> matrix [_fl][_sl]}; \
00577 if (_tlsf -> matrix [_fl][_sl]) \
00578 _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \
00579 _tlsf -> matrix [_fl][_sl] = _b; \
00580 tlsf_set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
00581 tlsf_set_bit (_fl, &_tlsf -> fl_bitmap); \
00582 }
00583
00584 #if USE_SBRK || USE_MMAP
00585 static __inline__ void *get_new_area(size_t * size)
00586 {
00587 void *area;
00588
00589 #if USE_SBRK
00590 area = sbrk(0);
00591 if (sbrk(*size) != ((void *) ~0))
00592 return area;
00593 #endif
00594
00595 #if USE_MMAP
00596 *size = ROUNDUP(*size, PAGE_SIZE);
00597 if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
00598 return area;
00599 #endif
00600 return ((void *) ~0);
00601 }
00602 #endif
00603
00604 static __inline__ bhdr_t *process_area(void *area, size_t size)
00605 {
00606 bhdr_t *b, *lb, *ib;
00607 area_info_t *ai;
00608
00609 ib = (bhdr_t *) area;
00610 ib->size =
00611 (sizeof(area_info_t) <
00612 MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
00613 b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
00614 b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
00615 b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
00616 lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00617 lb->prev_hdr = b;
00618 lb->size = 0 | USED_BLOCK | PREV_FREE;
00619 ai = (area_info_t *) ib->ptr.buffer;
00620 ai->next = 0;
00621 ai->end = lb;
00622 return ib;
00623 }
00624
00625
00626
00627
00628
00629
00630 size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
00631 {
00632
00633 char *mp = NULL;
00634 tlsf_t *tlsf;
00635 bhdr_t *b, *ib;
00636
00637 if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
00638 ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
00639 return -1;
00640 }
00641
00642 if (((unsigned long) mem_pool & PTR_MASK)) {
00643 ERROR_MSG("init_memory_pool (): mem_pool must be aligned to a word\n");
00644 return -1;
00645 }
00646 tlsf = (tlsf_t *) mem_pool;
00647
00648 if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
00649 mp = mem_pool;
00650 b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(sizeof(tlsf_t)));
00651 return b->size & BLOCK_SIZE;
00652 }
00653
00654 mp = mem_pool;
00655
00656
00657 memset(mem_pool, 0, sizeof(tlsf_t));
00658
00659 tlsf->tlsf_signature = TLSF_SIGNATURE;
00660
00661 TLSF_CREATE_LOCK(&tlsf->lock);
00662
00663 ib = process_area(GET_NEXT_BLOCK
00664 (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
00665 b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
00666 free_ex(b->ptr.buffer, tlsf);
00667 tlsf->area_head = (area_info_t *) ib->ptr.buffer;
00668
00669 #if TLSF_STATISTIC
00670 tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
00671 tlsf->max_size = tlsf->used_size;
00672 #endif
00673
00674 return (b->size & BLOCK_SIZE);
00675 }
00676
00677
00678 void *malloc_ex(size_t size, void *mem_pool)
00679 {
00680
00681 tlsf_t *tlsf = (tlsf_t *) mem_pool;
00682 bhdr_t *b, *b2, *next_b;
00683 int fl, sl;
00684 size_t tmp_size;
00685
00686 size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
00687
00688
00689 MAPPING_SEARCH(&size, &fl, &sl);
00690
00691
00692
00693 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
00694 #if USE_MMAP || USE_SBRK
00695 if (!b) {
00696 size_t area_size;
00697 void *area;
00698
00699 area_size = size + BHDR_OVERHEAD * 8;
00700 area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
00701 area = get_new_area(&area_size);
00702 if (area == ((void *) ~0))
00703 return NULL;
00704 add_new_area(area, area_size, mem_pool);
00705
00706 MAPPING_SEARCH(&size, &fl, &sl);
00707
00708 b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
00709 }
00710 #endif
00711 if (!b)
00712 return NULL;
00713
00714 EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
00715
00716
00717 next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00718
00719 tmp_size = (b->size & BLOCK_SIZE) - size;
00720 if (tmp_size >= sizeof(bhdr_t)) {
00721 tmp_size -= BHDR_OVERHEAD;
00722 b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
00723 b2->size = tmp_size | FREE_BLOCK | PREV_USED;
00724 next_b->prev_hdr = b2;
00725 MAPPING_INSERT(tmp_size, &fl, &sl);
00726 INSERT_BLOCK(b2, tlsf, fl, sl);
00727
00728 b->size = size | (b->size & PREV_STATE);
00729 } else {
00730 next_b->size &= (~PREV_FREE);
00731 b->size &= (~FREE_BLOCK);
00732 }
00733
00734 TLSF_ADD_SIZE(tlsf, b);
00735
00736 return (void *) b->ptr.buffer;
00737 }
00738
00739
00740 void free_ex(void *ptr, void *mem_pool)
00741 {
00742
00743 tlsf_t *tlsf = (tlsf_t *) mem_pool;
00744 bhdr_t *b, *tmp_b;
00745 int fl = 0, sl = 0;
00746
00747 if (!ptr) {
00748 return;
00749 }
00750 b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
00751 b->size |= FREE_BLOCK;
00752
00753 TLSF_REMOVE_SIZE(tlsf, b);
00754
00755 b->ptr.free_ptr = (free_ptr_t) { NULL, NULL};
00756 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00757 if (tmp_b->size & FREE_BLOCK) {
00758 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
00759 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
00760 b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
00761 }
00762 if (b->size & PREV_FREE) {
00763 tmp_b = b->prev_hdr;
00764 MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
00765 EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
00766 tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
00767 b = tmp_b;
00768 }
00769 MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
00770 INSERT_BLOCK(b, tlsf, fl, sl);
00771
00772 tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
00773 tmp_b->size |= PREV_FREE;
00774 tmp_b->prev_hdr = b;
00775 }
00776
00777 unsigned long tlsf_get_used_size(rtheap_t *heap) {
00778 #if TLSF_STATISTIC
00779 struct list_head *holder;
00780 list_for_each(holder, &heap->extents) { break; }
00781 return ((tlsf_t *)(list_entry(holder, rtextent_t, link)->membase))->used_size;
00782 #else
00783 return 0;
00784 #endif
00785 }
00786
00787
00788
00789 #else
00790
00791
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811
00812
00813
00814
00815
00816
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827 static void init_extent (rtheap_t *heap, rtextent_t *extent)
00828 {
00829 caddr_t freepage;
00830 int n, lastpgnum;
00831
00832 INIT_LIST_HEAD(&extent->link);
00833
00834
00835 extent->membase = (caddr_t)extent + heap->hdrsize;
00836 lastpgnum = heap->npages - 1;
00837
00838
00839 for (n = 0, freepage = extent->membase; n < lastpgnum; n++, freepage += heap->pagesize) {
00840 *((caddr_t *)freepage) = freepage + heap->pagesize;
00841 extent->pagemap[n] = RTHEAP_PFREE;
00842 }
00843 *((caddr_t *)freepage) = NULL;
00844 extent->pagemap[lastpgnum] = RTHEAP_PFREE;
00845 extent->memlim = freepage + heap->pagesize;
00846
00847
00848 extent->freelist = extent->membase;
00849 }
00850
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901
00902
00903
00904
00905
00906
00907
00908
00909
00910
00911 int rtheap_init (rtheap_t *heap, void *heapaddr, u_long heapsize, u_long pagesize, int suprt)
00912 {
00913 u_long hdrsize, pmapsize, shiftsize, pageshift;
00914 rtextent_t *extent;
00915 int n;
00916
00917
00918
00919
00920
00921
00922
00923
00924
00925
00926
00927
00928 if ((pagesize < (1 << RTHEAP_MINLOG2)) ||
00929 (pagesize > (1 << RTHEAP_MAXLOG2)) ||
00930 (pagesize & (pagesize - 1)) != 0 ||
00931 heapsize <= sizeof(rtextent_t) ||
00932 heapsize > RTHEAP_MAXEXTSZ ||
00933 (heapsize & (pagesize - 1)) != 0) {
00934 return RTHEAP_PARAM;
00935 }
00936
00937
00938
00939
00940
00941
00942 pmapsize = ((heapsize - sizeof(rtextent_t)) * sizeof(u_char)) / (pagesize + sizeof(u_char));
00943
00944
00945
00946 hdrsize = (sizeof(rtextent_t) + pmapsize + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
00947
00948
00949
00950 if (hdrsize + 2 * pagesize > heapsize) {
00951 return RTHEAP_PARAM;
00952 }
00953
00954
00955 for (pageshift = 0, shiftsize = pagesize; shiftsize > 1; shiftsize >>= 1, pageshift++);
00956
00957 heap->pagesize = pagesize;
00958 heap->pageshift = pageshift;
00959 heap->hdrsize = hdrsize;
00960
00961 heap->extentsize = heapsize;
00962 if (!heapaddr && suprt) {
00963 if (heapsize <= KMALLOC_LIMIT || (heapaddr = alloc_extent(heapsize, suprt)) == NULL) {
00964 heap->extentsize = KMALLOC_LIMIT;
00965 heapaddr = NULL;
00966 }
00967 }
00968
00969 heap->npages = (heap->extentsize - hdrsize) >> pageshift;
00970 heap->maxcont = heap->npages*pagesize;
00971 heap->flags =
00972 heap->ubytes = 0;
00973 INIT_LIST_HEAD(&heap->extents);
00974 spin_lock_init(&heap->lock);
00975
00976 for (n = 0; n < RTHEAP_NBUCKETS; n++) {
00977 heap->buckets[n] = NULL;
00978 }
00979
00980 if (heapaddr) {
00981 extent = (rtextent_t *)heapaddr;
00982 init_extent(heap, extent);
00983 list_add_tail(&extent->link, &heap->extents);
00984 } else {
00985 u_long init_size = 0;
00986 while (init_size < heapsize) {
00987 if (!(extent = (rtextent_t *)alloc_extent(heap->extentsize, suprt))) {
00988 struct list_head *holder, *nholder;
00989 list_for_each_safe(holder, nholder, &heap->extents) {
00990 extent = list_entry(holder, rtextent_t, link);
00991 free_extent(extent, heap->extentsize, suprt);
00992 }
00993 return RTHEAP_NOMEM;
00994 }
00995 init_extent(heap, extent);
00996 list_add_tail(&extent->link, &heap->extents);
00997 init_size += heap->extentsize;
00998 }
00999 }
01000 return 0;
01001 }
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015
01016
01017 void rtheap_destroy (rtheap_t *heap, int suprt)
01018 {
01019 struct list_head *holder, *nholder;
01020
01021 list_for_each_safe(holder, nholder, &heap->extents) {
01022 free_extent(list_entry(holder, rtextent_t, link), heap->extentsize, suprt);
01023 }
01024 }
01025
01026
01027
01028
01029
01030
01031
01032
01033 static caddr_t get_free_range (rtheap_t *heap,
01034 u_long bsize,
01035 int log2size,
01036 int mode)
01037 {
01038 caddr_t block, eblock, freepage, lastpage, headpage, freehead = NULL;
01039 u_long pagenum, pagecont, freecont;
01040 struct list_head *holder;
01041 rtextent_t *extent;
01042
01043 list_for_each(holder,&heap->extents) {
01044
01045 extent = list_entry(holder,rtextent_t,link);
01046 freepage = extent->freelist;
01047
01048 while (freepage != NULL)
01049 {
01050 headpage = freepage;
01051 freecont = 0;
01052
01053
01054
01055
01056 do
01057 {
01058 lastpage = freepage;
01059 freepage = *((caddr_t *)freepage);
01060 freecont += heap->pagesize;
01061 }
01062 while (freepage == lastpage + heap->pagesize && freecont < bsize);
01063
01064 if (freecont >= bsize)
01065 {
01066
01067
01068
01069 if (headpage == extent->freelist)
01070 extent->freelist = *((caddr_t *)lastpage);
01071 else
01072 *((caddr_t *)freehead) = *((caddr_t *)lastpage);
01073
01074 goto splitpage;
01075 }
01076
01077 freehead = lastpage;
01078 }
01079 }
01080
01081
01082
01083
01084
01085 return NULL;
01086
01087 splitpage:
01088
01089
01090
01091
01092
01093 if (bsize < heap->pagesize)
01094 {
01095
01096
01097
01098
01099 for (block = headpage, eblock = headpage + heap->pagesize - bsize;
01100 block < eblock; block += bsize)
01101 *((caddr_t *)block) = block + bsize;
01102
01103 *((caddr_t *)eblock) = NULL;
01104 }
01105 else
01106 *((caddr_t *)headpage) = NULL;
01107
01108 pagenum = (headpage - extent->membase) >> heap->pageshift;
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119 extent->pagemap[pagenum] = log2size ? log2size : RTHEAP_PLIST;
01120
01121 for (pagecont = bsize >> heap->pageshift; pagecont > 1; pagecont--)
01122 extent->pagemap[pagenum + pagecont - 1] = RTHEAP_PCONT;
01123
01124 return headpage;
01125 }
01126
01127
01128
01129
01130
01131
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152
01153
01154
01155
01156
01157
01158
01159
01160
01161
01162
01163
01164 void *rtheap_alloc (rtheap_t *heap, u_long size, int mode)
01165
01166 {
01167 u_long bsize, flags;
01168 caddr_t block;
01169 int log2size;
01170
01171 if (size == 0)
01172 return NULL;
01173
01174 if (size <= heap->pagesize)
01175
01176
01177
01178
01179
01180
01181 {
01182 if (size <= RTHEAP_MINALIGNSZ)
01183 size = (size + RTHEAP_MINALLOCSZ - 1) & ~(RTHEAP_MINALLOCSZ - 1);
01184 else
01185 size = (size + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
01186 }
01187 else
01188
01189
01190 size = (size + heap->pagesize - 1) & ~(heap->pagesize - 1);
01191
01192
01193
01194
01195
01196
01197 if (size <= heap->pagesize * 2)
01198 {
01199
01200
01201
01202 for (bsize = (1 << RTHEAP_MINLOG2), log2size = RTHEAP_MINLOG2;
01203 bsize < size; bsize <<= 1, log2size++)
01204 ;
01205
01206 flags = rt_spin_lock_irqsave(&heap->lock);
01207
01208 block = heap->buckets[log2size - RTHEAP_MINLOG2];
01209
01210 if (block == NULL)
01211 {
01212 block = get_free_range(heap,bsize,log2size,mode);
01213
01214 if (block == NULL)
01215 goto release_and_exit;
01216 }
01217
01218 heap->buckets[log2size - RTHEAP_MINLOG2] = *((caddr_t *)block);
01219 heap->ubytes += bsize;
01220 }
01221 else
01222 {
01223 if (size > heap->maxcont)
01224 return NULL;
01225
01226 flags = rt_spin_lock_irqsave(&heap->lock);
01227
01228
01229 block = get_free_range(heap,size,0,mode);
01230
01231 if (block)
01232 heap->ubytes += size;
01233 }
01234
01235 release_and_exit:
01236
01237 rt_spin_unlock_irqrestore(flags,&heap->lock);
01238
01239 return block;
01240 }
01241
01242
01243
01244
01245
01246
01247
01248
01249
01250
01251
01252
01253
01254
01255
01256
01257
01258
01259
01260
01261
01262
01263
01264 int rtheap_free (rtheap_t *heap, void *block)
01265
01266 {
01267 u_long pagenum, pagecont, boffset, bsize, flags;
01268 caddr_t freepage, lastpage, nextpage, tailpage;
01269 rtextent_t *extent = NULL;
01270 struct list_head *holder;
01271 int log2size, npages;
01272
01273 flags = rt_spin_lock_irqsave(&heap->lock);
01274
01275
01276
01277
01278
01279 list_for_each(holder,&heap->extents) {
01280
01281 extent = list_entry(holder,rtextent_t,link);
01282
01283 if ((caddr_t)block >= extent->membase &&
01284 (caddr_t)block < extent->memlim)
01285 break;
01286 }
01287
01288 if (!holder)
01289 goto unlock_and_fail;
01290
01291
01292 pagenum = ((caddr_t)block - extent->membase) >> heap->pageshift;
01293 boffset = ((caddr_t)block - (extent->membase + (pagenum << heap->pageshift)));
01294
01295 switch (extent->pagemap[pagenum])
01296 {
01297 case RTHEAP_PFREE:
01298 case RTHEAP_PCONT:
01299
01300 unlock_and_fail:
01301
01302 rt_spin_unlock_irqrestore(flags,&heap->lock);
01303 return RTHEAP_PARAM;
01304
01305 case RTHEAP_PLIST:
01306
01307 npages = 1;
01308
01309 while (npages < heap->npages &&
01310 extent->pagemap[pagenum + npages] == RTHEAP_PCONT)
01311 npages++;
01312
01313 bsize = npages * heap->pagesize;
01314
01315
01316
01317 for (freepage = (caddr_t)block,
01318 tailpage = (caddr_t)block + bsize - heap->pagesize;
01319 freepage < tailpage; freepage += heap->pagesize)
01320 *((caddr_t *)freepage) = freepage + heap->pagesize;
01321
01322
01323
01324 for (pagecont = 0; pagecont < npages; pagecont++)
01325 extent->pagemap[pagenum + pagecont] = RTHEAP_PFREE;
01326
01327
01328
01329
01330 for (nextpage = extent->freelist, lastpage = NULL;
01331 nextpage != NULL && nextpage < (caddr_t)block;
01332 lastpage = nextpage, nextpage = *((caddr_t *)nextpage))
01333 ;
01334
01335 *((caddr_t *)tailpage) = nextpage;
01336
01337 if (lastpage)
01338 *((caddr_t *)lastpage) = (caddr_t)block;
01339 else
01340 extent->freelist = (caddr_t)block;
01341
01342 break;
01343
01344 default:
01345
01346 log2size = extent->pagemap[pagenum];
01347 bsize = (1 << log2size);
01348
01349 if ((boffset & (bsize - 1)) != 0)
01350 goto unlock_and_fail;
01351
01352
01353
01354 *((caddr_t *)block) = heap->buckets[log2size - RTHEAP_MINLOG2];
01355 heap->buckets[log2size - RTHEAP_MINLOG2] = block;
01356
01357 break;
01358 }
01359
01360 heap->ubytes -= bsize;
01361
01362 rt_spin_unlock_irqrestore(flags,&heap->lock);
01363
01364 return 0;
01365 }
01366
01367
01368
01369
01370
01371
01372
01373
01374
01375
01376
01377
01378
01379
01380
01381
01382
01383
01384
01385
01386
01387
01388
01389
01390
01391
01392
01393
01394
01395
01396
01397
01398
01399
01400
01401
01402
01403
01404
01405
01406 #endif
01407
01408 int __rtai_heap_init (void)
01409 {
01410 rtai_global_heap_size = (rtai_global_heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
01411 if (rtheap_init(&rtai_global_heap, NULL, rtai_global_heap_size, PAGE_SIZE, 0)) {
01412 printk(KERN_INFO "RTAI[malloc]: failed to initialize the global heap (size=%d bytes).\n", rtai_global_heap_size);
01413 return 1;
01414 }
01415 rtai_global_heap_adr = rtai_global_heap.extents.next;
01416 printk(KERN_INFO "RTAI[malloc]: global heap size = %d bytes, <%s>.\n", rtai_global_heap_size, CONFIG_RTAI_USE_TLSF ? "TLSF" : "BSD");
01417 return 0;
01418 }
01419
01420 void __rtai_heap_exit (void)
01421 {
01422 rtheap_destroy(&rtai_global_heap, 0);
01423 printk("RTAI[malloc]: unloaded.\n");
01424 }
01425
01426 #ifndef CONFIG_RTAI_MALLOC_BUILTIN
01427 module_init(__rtai_heap_init);
01428 module_exit(__rtai_heap_exit);
01429 #endif
01430
01431 #ifndef CONFIG_KBUILD
01432 #define CONFIG_KBUILD
01433 #endif
01434
01435 #ifdef CONFIG_KBUILD
01436 EXPORT_SYMBOL(rtheap_init);
01437 EXPORT_SYMBOL(rtheap_destroy);
01438 EXPORT_SYMBOL(rtheap_alloc);
01439 EXPORT_SYMBOL(rtheap_free);
01440 EXPORT_SYMBOL(rtai_global_heap);
01441 EXPORT_SYMBOL(rtai_global_heap_adr);
01442 EXPORT_SYMBOL(rtai_global_heap_size);
01443 #endif