00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
#include <linux/module.h>
00037
#include <linux/kernel.h>
00038
#include <linux/version.h>
00039
#include <rtai_config.h>
00040
#include <asm/rtai.h>
00041
#ifdef CONFIG_RTAI_MALLOC_VMALLOC
00042
#include <rtai_shm.h>
00043
#else
00044
#include <linux/slab.h>
00045
#endif
00046
#include <rtai_malloc.h>
00047
00048
MODULE_PARM(rtai_global_heap_size,
"i");
00049
00050 int rtai_global_heap_size = RTHEAP_GLOBALSZ;
00051
00052 void *
rtai_global_heap_adr = NULL;
00053
00054 rtheap_t
rtai_global_heap;
00055
00056 static void *
alloc_extent (u_long size)
00057 {
00058 caddr_t p;
00059
#ifdef CONFIG_RTAI_MALLOC_VMALLOC
00060
caddr_t _p;
00061
00062 p = _p = (caddr_t)vmalloc(size);
00063
if (p) {
00064
printk(
"RTAI[malloc]: vmalloced extent %p, size %lu.\n", p, size);
00065
for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00066 mem_map_reserve(virt_to_page(__va(
kvirt_to_pa((u_long)_p))));
00067 }
00068 }
00069
#else
00070 p = (caddr_t)kmalloc(size,GFP_KERNEL);
00071
printk(
"RTAI[malloc]: kmalloced extent %p, size %lu.\n", p, size);
00072
#endif
00073
if (p) {
00074 memset(p, 0, size);
00075 }
00076
return p;
00077 }
00078
00079 static void free_extent (
void *p, u_long size)
00080 {
00081
#ifdef CONFIG_RTAI_MALLOC_VMALLOC
00082
caddr_t _p = (caddr_t)p;
00083
00084
printk(
"RTAI[malloc]: vfreed extent %p, size %lu.\n", p, size);
00085
for (; size > 0; size -= PAGE_SIZE, _p += PAGE_SIZE) {
00086 mem_map_unreserve(virt_to_page(__va(
kvirt_to_pa((u_long)_p))));
00087 }
00088 vfree(p);
00089
#else
00090
printk(
"RTAI[malloc]: kfreed extent %p, size %lu.\n", p, size);
00091 kfree(p);
00092
#endif
00093 }
00094
00095 static void init_extent (rtheap_t *heap, rtextent_t *extent)
00096 {
00097 caddr_t freepage;
00098
int n, lastpgnum;
00099
00100 INIT_LIST_HEAD(&extent->link);
00101
00102
00103 extent->membase = (caddr_t)extent + heap->hdrsize;
00104 lastpgnum = heap->npages - 1;
00105
00106
00107
for (n = 0, freepage = extent->membase; n < lastpgnum; n++, freepage += heap->pagesize) {
00108 *((caddr_t *)freepage) = freepage + heap->pagesize;
00109 extent->pagemap[n] = RTHEAP_PFREE;
00110 }
00111 *((caddr_t *)freepage) = NULL;
00112 extent->pagemap[lastpgnum] = RTHEAP_PFREE;
00113 extent->memlim = freepage + heap->pagesize;
00114
00115
00116 extent->freelist = extent->membase;
00117 }
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130
00131
00132
00133
00134
00135
00136
00137
00138
00139
00140
00141
00142
00143
00144
00145
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178 int rtheap_init (rtheap_t *heap,
void *heapaddr, u_long heapsize, u_long pagesize)
00179 {
00180 u_long hdrsize, pmapsize, shiftsize, pageshift;
00181 rtextent_t *extent;
00182
int n;
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
if ((pagesize < (1 << RTHEAP_MINLOG2)) ||
00196 (pagesize > (1 << RTHEAP_MAXLOG2)) ||
00197 (pagesize & (pagesize - 1)) != 0 ||
00198 heapsize <=
sizeof(rtextent_t) ||
00199 heapsize > RTHEAP_MAXEXTSZ ||
00200 (heapsize & (pagesize - 1)) != 0) {
00201
return RTHEAP_PARAM;
00202 }
00203
00204
00205
00206
00207
00208
00209 pmapsize = ((heapsize -
sizeof(rtextent_t)) *
sizeof(u_char)) / (pagesize +
sizeof(u_char));
00210
00211
00212
00213 hdrsize = (
sizeof(rtextent_t) + pmapsize + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
00214
00215
00216
00217
if (hdrsize + 2 * pagesize > heapsize) {
00218
return RTHEAP_PARAM;
00219 }
00220
00221
00222
for (pageshift = 0, shiftsize = pagesize; shiftsize > 1; shiftsize >>= 1, pageshift++);
00223
00224 heap->pagesize = pagesize;
00225 heap->pageshift = pageshift;
00226 heap->hdrsize = hdrsize;
00227
#ifdef CONFIG_RTAI_MALLOC_VMALLOC
00228
heap->extentsize = heapsize;
00229
#else
00230 heap->extentsize = heapsize > KMALLOC_LIMIT ? KMALLOC_LIMIT : heapsize;
00231
#endif
00232 heap->npages = (heap->extentsize - hdrsize) >> pageshift;
00233 heap->maxcont = heap->npages*pagesize;
00234 heap->flags =
00235 heap->ubytes = 0;
00236 INIT_LIST_HEAD(&heap->extents);
00237 spin_lock_init(&heap->lock);
00238
00239
for (n = 0; n < RTHEAP_NBUCKETS; n++) {
00240 heap->buckets[n] = NULL;
00241 }
00242
00243
if (heapaddr) {
00244 extent = (rtextent_t *)heapaddr;
00245
init_extent(heap, extent);
00246 list_add_tail(&extent->link, &heap->extents);
00247 }
else {
00248 u_long init_size = 0;
00249
while (init_size < heapsize) {
00250
if (!(extent = (rtextent_t *)
alloc_extent(heap->extentsize))) {
00251
struct list_head *holder, *nholder;
00252 list_for_each_safe(holder, nholder, &heap->extents) {
00253 extent = list_entry(holder, rtextent_t, link);
00254
free_extent(extent, heap->extentsize);
00255 }
00256
return RTHEAP_NOMEM;
00257 }
00258
init_extent(heap, extent);
00259 list_add_tail(&extent->link, &heap->extents);
00260 init_size += heap->extentsize;
00261 }
00262 }
00263
return 0;
00264 }
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280 void rtheap_destroy (rtheap_t *heap)
00281 {
00282
struct list_head *holder, *nholder;
00283
00284 list_for_each_safe(holder, nholder, &heap->extents) {
00285
free_extent(list_entry(holder, rtextent_t, link), heap->extentsize);
00286 }
00287 }
00288
00289
00290
00291
00292
00293
00294
00295
00296 static caddr_t
get_free_range (rtheap_t *heap,
00297 u_long bsize,
00298
int log2size,
00299
int mode)
00300 {
00301 caddr_t block, eblock, freepage, lastpage, headpage, freehead = NULL;
00302 u_long pagenum, pagecont, freecont;
00303
struct list_head *holder;
00304 rtextent_t *extent;
00305
00306 list_for_each(holder,&heap->extents) {
00307
00308 extent = list_entry(holder,rtextent_t,link);
00309 freepage = extent->freelist;
00310
00311
while (freepage != NULL)
00312 {
00313 headpage = freepage;
00314 freecont = 0;
00315
00316
00317
00318
00319
do
00320 {
00321 lastpage = freepage;
00322 freepage = *((caddr_t *)freepage);
00323 freecont += heap->pagesize;
00324 }
00325
while (freepage == lastpage + heap->pagesize && freecont < bsize);
00326
00327
if (freecont >= bsize)
00328 {
00329
00330
00331
00332
if (headpage == extent->freelist)
00333 extent->freelist = *((caddr_t *)lastpage);
00334
else
00335 *((caddr_t *)freehead) = *((caddr_t *)lastpage);
00336
00337
goto splitpage;
00338 }
00339
00340 freehead = lastpage;
00341 }
00342 }
00343
00344
00345
00346
00347
00348
return NULL;
00349
00350 splitpage:
00351
00352
00353
00354
00355
00356
if (bsize < heap->pagesize)
00357 {
00358
00359
00360
00361
00362
for (block = headpage, eblock = headpage + heap->pagesize - bsize;
00363 block < eblock; block += bsize)
00364 *((caddr_t *)block) = block + bsize;
00365
00366 *((caddr_t *)eblock) = NULL;
00367 }
00368
else
00369 *((caddr_t *)headpage) = NULL;
00370
00371 pagenum = (headpage - extent->membase) >> heap->pageshift;
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382 extent->pagemap[pagenum] = log2size ? log2size : RTHEAP_PLIST;
00383
00384
for (pagecont = bsize >> heap->pageshift; pagecont > 1; pagecont--)
00385 extent->pagemap[pagenum + pagecont - 1] = RTHEAP_PCONT;
00386
00387
return headpage;
00388 }
00389
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427 void *
rtheap_alloc (rtheap_t *heap, u_long size,
int mode)
00428
00429 {
00430 u_long bsize,
flags;
00431 caddr_t block;
00432
int log2size;
00433
00434
if (size == 0)
00435
return NULL;
00436
00437
if (size <= heap->pagesize)
00438
00439
00440
00441
00442
00443
00444 {
00445
if (size <= RTHEAP_MINALIGNSZ)
00446 size = (size + RTHEAP_MINALLOCSZ - 1) & ~(RTHEAP_MINALLOCSZ - 1);
00447
else
00448 size = (size + RTHEAP_MINALIGNSZ - 1) & ~(RTHEAP_MINALIGNSZ - 1);
00449 }
00450
else
00451
00452
00453 size = (size + heap->pagesize - 1) & ~(heap->pagesize - 1);
00454
00455
00456
00457
00458
00459
00460
if (size <= heap->pagesize * 2)
00461 {
00462
00463
00464
00465
for (bsize = (1 << RTHEAP_MINLOG2), log2size = RTHEAP_MINLOG2;
00466 bsize < size; bsize <<= 1, log2size++)
00467 ;
00468
00469
flags =
rt_spin_lock_irqsave(&heap->lock);
00470
00471 block = heap->buckets[log2size - RTHEAP_MINLOG2];
00472
00473
if (block == NULL)
00474 {
00475 block =
get_free_range(heap,bsize,log2size,mode);
00476
00477
if (block == NULL)
00478
goto release_and_exit;
00479 }
00480
00481 heap->buckets[log2size - RTHEAP_MINLOG2] = *((caddr_t *)block);
00482 heap->ubytes += bsize;
00483 }
00484
else
00485 {
00486
if (size > heap->maxcont)
00487
return NULL;
00488
00489
flags =
rt_spin_lock_irqsave(&heap->lock);
00490
00491
00492 block =
get_free_range(heap,size,0,mode);
00493
00494
if (block)
00495 heap->ubytes += size;
00496 }
00497
00498 release_and_exit:
00499
00500
rt_spin_unlock_irqrestore(
flags,&heap->lock);
00501
00502
return block;
00503 }
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527 int rtheap_free (rtheap_t *heap,
void *block)
00528
00529 {
00530 u_long pagenum, pagecont, boffset, bsize,
flags;
00531 caddr_t freepage, lastpage, nextpage, tailpage;
00532 rtextent_t *extent = NULL;
00533
struct list_head *holder;
00534
int log2size, npages;
00535
00536
flags =
rt_spin_lock_irqsave(&heap->lock);
00537
00538
00539
00540
00541
00542 list_for_each(holder,&heap->extents) {
00543
00544 extent = list_entry(holder,rtextent_t,link);
00545
00546
if ((caddr_t)block >= extent->membase &&
00547 (caddr_t)block < extent->memlim)
00548
break;
00549 }
00550
00551
if (!holder)
00552
goto unlock_and_fail;
00553
00554
00555 pagenum = ((caddr_t)block - extent->membase) >> heap->pageshift;
00556 boffset = ((caddr_t)block - (extent->membase + (pagenum << heap->pageshift)));
00557
00558
switch (extent->pagemap[pagenum])
00559 {
00560
case RTHEAP_PFREE:
00561
case RTHEAP_PCONT:
00562
00563 unlock_and_fail:
00564
00565
rt_spin_unlock_irqrestore(
flags,&heap->lock);
00566
return RTHEAP_PARAM;
00567
00568
case RTHEAP_PLIST:
00569
00570 npages = 1;
00571
00572
while (npages < heap->npages &&
00573 extent->pagemap[pagenum + npages] == RTHEAP_PCONT)
00574 npages++;
00575
00576 bsize = npages * heap->pagesize;
00577
00578
00579
00580
for (freepage = (caddr_t)block,
00581 tailpage = (caddr_t)block + bsize - heap->pagesize;
00582 freepage < tailpage; freepage += heap->pagesize)
00583 *((caddr_t *)freepage) = freepage + heap->pagesize;
00584
00585
00586
00587
for (pagecont = 0; pagecont < npages; pagecont++)
00588 extent->pagemap[pagenum + pagecont] = RTHEAP_PFREE;
00589
00590
00591
00592
00593
for (nextpage = extent->freelist, lastpage = NULL;
00594 nextpage != NULL && nextpage < (caddr_t)block;
00595 lastpage = nextpage, nextpage = *((caddr_t *)nextpage))
00596 ;
00597
00598 *((caddr_t *)tailpage) = nextpage;
00599
00600
if (lastpage)
00601 *((caddr_t *)lastpage) = (caddr_t)block;
00602
else
00603 extent->freelist = (caddr_t)block;
00604
00605
break;
00606
00607
default:
00608
00609 log2size = extent->pagemap[pagenum];
00610 bsize = (1 << log2size);
00611
00612
if ((boffset & (bsize - 1)) != 0)
00613
goto unlock_and_fail;
00614
00615
00616
00617 *((caddr_t *)block) = heap->buckets[log2size - RTHEAP_MINLOG2];
00618 heap->buckets[log2size - RTHEAP_MINLOG2] = block;
00619
00620
break;
00621 }
00622
00623 heap->ubytes -= bsize;
00624
00625
rt_spin_unlock_irqrestore(
flags,&heap->lock);
00626
00627
return 0;
00628 }
00629
00630 int __rtai_heap_init (
void)
00631 {
00632
rtai_global_heap_size = (
rtai_global_heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
00633
if (
rtheap_init(&
rtai_global_heap, NULL,
rtai_global_heap_size, PAGE_SIZE)) {
00634
printk(KERN_INFO
"RTAI[malloc]: failed to initialize the global heap (size=%d bytes).\n",
rtai_global_heap_size);
00635
return 1;
00636 }
00637
rtai_global_heap_adr =
rtai_global_heap.extents.next;
00638
printk(KERN_INFO
"RTAI[malloc]: loaded (global heap size=%d bytes).\n",
rtai_global_heap_size);
00639
return 0;
00640 }
00641
00642 void __rtai_heap_exit (
void)
00643 {
00644
rtheap_destroy(&
rtai_global_heap);
00645
printk(
"RTAI[malloc]: unloaded.\n");
00646 }
00647
00648
00649
00650
00651
00652
00653
00654
00655
00656
00657
00658
00659
00660
00661
00662
00663
00664
00665
00666
00667
00668
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
#ifndef CONFIG_RTAI_MALLOC_BUILTIN
00686
module_init(__rtai_heap_init);
00687
module_exit(__rtai_heap_exit);
00688
#endif
00689
00690
#ifdef CONFIG_KBUILD
00691
EXPORT_SYMBOL(rtheap_init);
00692
EXPORT_SYMBOL(rtheap_destroy);
00693
EXPORT_SYMBOL(rtheap_alloc);
00694
EXPORT_SYMBOL(rtheap_free);
00695
EXPORT_SYMBOL(rtai_global_heap);
00696
EXPORT_SYMBOL(rtai_global_heap_adr);
00697
EXPORT_SYMBOL(rtai_global_heap_size);
00698
#endif