00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 #define RTAI_SHM_MISC_MINOR 254 // The same minor used to mknod for major 10.
00033
00034
#include <linux/version.h>
00035
#include <linux/module.h>
00036
#include <linux/config.h>
00037
#include <linux/errno.h>
00038
#include <linux/mm.h>
00039
#include <linux/miscdevice.h>
00040
00041
#include <rtai_trace.h>
00042
#include <rtai_schedcore.h>
00043
#include <rtai_registry.h>
00044
#include "rtai_shm.h"
00045
00046
MODULE_LICENSE(
"GPL");
00047
00048 #define ALIGN2PAGE(adr) ((void *)PAGE_ALIGN((unsigned long)adr))
00049 #define RT_SHM_OP_PERM() (!(_rt_whoami()->is_hard))
00050
00051 static int SUPRT[] = { 0, GFP_KERNEL, GFP_ATOMIC, GFP_DMA };
00052
00053 static inline void *
_rt_shm_alloc(
unsigned long name,
int size,
int suprt)
00054 {
00055
void *adr;
00056
00057
00058
if (!(adr =
rt_get_adr_cnt(name)) && size > 0 && suprt >= 0 &&
RT_SHM_OP_PERM()) {
00059 size = ((size - 1) & PAGE_MASK) + PAGE_SIZE;
00060
if ((adr = suprt ?
rkmalloc(&size,
SUPRT[suprt]) :
rvmalloc(size))) {
00061
if (!
rt_register(name, adr, suprt ? -size : size, 0)) {
00062
if (suprt) {
00063
rkfree(adr, size);
00064 }
else {
00065
rvfree(adr, size);
00066 }
00067
return 0;
00068 }
00069 memset(
ALIGN2PAGE(adr), 0, size);
00070 }
00071 }
00072
return ALIGN2PAGE(adr);
00073 }
00074
00075 static inline int _rt_shm_free(
unsigned long name,
int size)
00076 {
00077
void *adr;
00078
00079
if (size && (adr =
rt_get_adr(name))) {
00080
if (
RT_SHM_OP_PERM()) {
00081
if (!
rt_drg_on_name_cnt(name) && name !=
GLOBAL_HEAP_ID) {
00082
if (size < 0) {
00083
rkfree(adr, -size);
00084 }
else {
00085
rvfree(adr, size);
00086 }
00087 }
00088 }
00089
return abs(size);
00090 }
00091
return 0;
00092 }
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130 void *
rt_shm_alloc(
unsigned long name,
int size,
int suprt)
00131 {
00132
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_KMALLOC, name, size, 0);
00133
return _rt_shm_alloc(name, size, suprt);
00134 }
00135
00136 static int rt_shm_alloc_usp(
unsigned long name,
int size,
int suprt)
00137 {
00138
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_MALLOC, name, size, current->pid);
00139
00140
if (
_rt_shm_alloc(name, size, suprt)) {
00141 ((current->mm)->mmap)->vm_private_data = (
void *)name;
00142
return abs(
rt_get_type(name));
00143 }
00144
return 0;
00145 }
00146
00147
00148
00149
00150
00151
00152
00153
00154
00155
00156
00157
00158
00159
00160
00161
00162
00163
00164
00165
00166
00167
int rt_shm_free(
unsigned long name)
00168 {
00169
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_KFREE, name, 0, 0);
00170
return _rt_shm_free(name,
rt_get_type(name));
00171 }
00172
00173 static int rt_shm_size(
unsigned long *arg)
00174 {
00175
int size;
00176
struct vm_area_struct *vma;
00177
00178 size = abs(
rt_get_type(*arg));
00179
for (vma = (current->mm)->mmap; vma; vma = vma->vm_next) {
00180
if (vma->vm_private_data == (
void *)*arg && (vma->vm_end - vma->vm_start) == size) {
00181 *arg = vma->vm_start;
00182
return size;
00183 }
00184 }
00185
return 0;
00186 }
00187
00188 static void rtai_shm_vm_open(
struct vm_area_struct *vma)
00189 {
00190
rt_get_adr_cnt((
unsigned long)vma->vm_private_data);
00191 }
00192
00193 static void rtai_shm_vm_close(
struct vm_area_struct *vma)
00194 {
00195
_rt_shm_free((
unsigned long)vma->vm_private_data,
rt_get_type((
unsigned long)vma->vm_private_data));
00196 }
00197
00198 static struct vm_operations_struct
rtai_shm_vm_ops = {
00199 open:
rtai_shm_vm_open,
00200 close:
rtai_shm_vm_close
00201 };
00202
00203
static void rt_set_heap(
unsigned long,
void *);
00204
00205 static int rtai_shm_f_ioctl(
struct inode *inode,
struct file *
file,
unsigned int cmd,
unsigned long arg)
00206 {
00207
switch (cmd) {
00208
case SHM_ALLOC: {
00209
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_MALLOC, ((
unsigned long *)arg)[0], cmd, current->pid);
00210
return rt_shm_alloc_usp(((
unsigned long *)arg)[0], ((
int *)arg)[1], ((
int *)arg)[2]);
00211 }
00212
case SHM_FREE: {
00213
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_FREE, arg, cmd, current->pid);
00214
return _rt_shm_free(arg,
rt_get_type(arg));
00215 }
00216
case SHM_SIZE: {
00217
TRACE_RTAI_SHM(TRACE_RTAI_EV_SHM_GET_SIZE, arg, cmd, current->pid);
00218
return rt_shm_size((
unsigned long *)((
unsigned long *)arg)[0]);
00219 }
00220
case HEAP_SET: {
00221
rt_set_heap(((
unsigned long *)arg)[0], (
void *)((
unsigned long *)arg)[1]);
00222
return 0;
00223 }
00224 }
00225
return 0;
00226 }
00227
00228 static int rtai_shm_f_mmap(
struct file *
file,
struct vm_area_struct *vma)
00229 {
00230
unsigned long name;
00231
int size;
00232
if (!vma->vm_ops) {
00233 vma->vm_ops = &
rtai_shm_vm_ops;
00234 vma->vm_flags |= VM_LOCKED;
00235 name = (
unsigned long)(vma->vm_private_data = ((current->mm)->mmap)->vm_private_data);
00236 ((current->mm)->mmap)->vm_private_data = NULL;
00237
return (size =
rt_get_type(name)) < 0 ?
rkmmap(
ALIGN2PAGE(
rt_get_adr(name)), -size, vma) :
rvmmap(
rt_get_adr(name), size, vma);
00238 }
00239
return -EFAULT;
00240 }
00241
00242 static struct file_operations
rtai_shm_fops = {
00243 ioctl:
rtai_shm_f_ioctl,
00244 mmap:
rtai_shm_f_mmap
00245 };
00246
00247 static struct miscdevice
rtai_shm_dev =
00248 {
RTAI_SHM_MISC_MINOR,
"RTAI_SHM", &
rtai_shm_fops };
00249
00250 static inline void *
_rt_halloc(
int size,
struct rt_heap_t *heap)
00251 {
00252
void *mem_ptr = NULL;
00253
00254
if ((mem_ptr =
rtheap_alloc(heap->heap, size, 0))) {
00255 mem_ptr = heap->uadr + (mem_ptr - heap->kadr);
00256 }
00257
return mem_ptr;
00258 }
00259
00260 static inline void _rt_hfree(
void *addr,
struct rt_heap_t *heap)
00261 {
00262
rtheap_free(heap->heap, heap->kadr + (addr - heap->uadr));
00263 }
00264
00265 #define GLOBAL 0
00266 #define SPECIFIC 1
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
void *
rt_named_malloc(
unsigned long name,
int size)
00325 {
00326
void *mem_ptr;
00327
00328
if ((mem_ptr =
rt_get_adr_cnt(name))) {
00329
return mem_ptr;
00330 }
00331
if ((mem_ptr =
_rt_halloc(size, &
rt_smp_linux_task->heap[
GLOBAL]))) {
00332
if (
rt_register(name, mem_ptr,
IS_HPCK, 0)) {
00333
return mem_ptr;
00334 }
00335
rt_hfree(mem_ptr);
00336 }
00337
return NULL;
00338 }
00339
00340
00341
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
void rt_named_free(
void *adr)
00360 {
00361
unsigned long name;
00362
00363 name =
rt_get_name(adr);
00364
if (!
rt_drg_on_name_cnt(name)) {
00365
_rt_hfree(adr, &
rt_smp_linux_task->heap[GLOBAL]);
00366 }
00367 }
00368
00369
00370
00371
00372
00373 #define RTAI_TASK(return_instr) \
00374
do { \
00375
if (!(task = _rt_whoami())->is_hard) { \
00376
if (!(task = current->rtai_tskext(TSKEXT0))) { \
00377
return_instr; \
00378
} \
00379
} \
00380
} while (0)
00381
00382 static inline void *
rt_halloc_typed(
int size,
int htype)
00383 {
00384
RT_TASK *
task;
00385
00386
RTAI_TASK(
return NULL);
00387
return _rt_halloc(size, &
task->heap[htype]);
00388 }
00389
00390 static inline void rt_hfree_typed(
void *addr,
int htype)
00391 {
00392
RT_TASK *
task;
00393
00394
RTAI_TASK(
return);
00395
_rt_hfree(addr, &
task->heap[htype]);
00396 }
00397
00398 static inline void *
rt_named_halloc_typed(
unsigned long name,
int size,
int htype)
00399 {
00400
RT_TASK *
task;
00401
void *mem_ptr;
00402
00403
RTAI_TASK(
return NULL);
00404
if ((mem_ptr =
rt_get_adr_cnt(name))) {
00405
return task->heap[htype].uadr + (mem_ptr -
task->heap[htype].kadr);
00406 }
00407
if ((mem_ptr =
_rt_halloc(size, &
task->heap[htype]))) {
00408
if (
rt_register(name,
task->heap[htype].kadr + (mem_ptr -
task->heap[htype].uadr),
IS_HPCK, 0)) {
00409
return mem_ptr;
00410 }
00411
_rt_hfree(mem_ptr, &
task->heap[htype]);
00412 }
00413
return NULL;
00414 }
00415
00416 static inline void rt_named_hfree_typed(
void *adr,
int htype)
00417 {
00418
RT_TASK *
task;
00419
unsigned long name;
00420
00421
RTAI_TASK(
return);
00422 name =
rt_get_name(
task->heap[htype].kadr + (adr -
task->heap[htype].uadr));
00423
if (!
rt_drg_on_name_cnt(name)) {
00424
_rt_hfree(adr, &
task->heap[htype]);
00425 }
00426 }
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
void *
rt_halloc(
int size)
00449 {
00450
return rt_halloc_typed(size, SPECIFIC);
00451 }
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
void rt_hfree(
void *adr)
00466 {
00467
rt_hfree_typed(adr, SPECIFIC);
00468 }
00469
00470
00471
00472
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
00500
00501
void *
rt_named_halloc(
unsigned long name,
int size)
00502 {
00503
return rt_named_halloc_typed(name, size, SPECIFIC);
00504 }
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
void rt_named_hfree(
void *adr)
00526 {
00527
rt_named_hfree_typed(adr, SPECIFIC);
00528 }
00529
00530
extern rtheap_t
rtai_global_heap;
00531
extern void *
rtai_global_heap_adr;
00532
extern int rtai_global_heap_size;
00533
00534 static void *
rt_malloc_usp(
int size)
00535 {
00536
return rtai_global_heap_adr ?
rt_halloc_typed(size,
GLOBAL) : NULL;
00537 }
00538
00539 static void rt_free_usp(
void *adr)
00540 {
00541
if (
rtai_global_heap_adr) {
00542
rt_hfree_typed(adr,
GLOBAL);
00543 }
00544 }
00545
00546 static void *
rt_named_malloc_usp(
unsigned long name,
int size)
00547 {
00548
return rtai_global_heap_adr ?
rt_named_halloc_typed(name, size,
GLOBAL) : NULL;
00549 }
00550
00551 static void rt_named_free_usp(
void *adr)
00552 {
00553
if (
rtai_global_heap_adr) {
00554
rt_named_hfree_typed(adr,
GLOBAL);
00555 }
00556 }
00557
00558 static void rt_set_heap(
unsigned long name,
void *adr)
00559 {
00560
void *heap, *hptr;
00561
int size;
00562
RT_TASK *
task;
00563
00564 heap =
rt_get_adr(name);
00565 hptr =
ALIGN2PAGE(heap);
00566 size = ((abs(
rt_get_type(name)) -
sizeof(rtheap_t) - (hptr - heap)) & PAGE_MASK);
00567 heap = hptr + size;
00568
if (!
atomic_cmpxchg((
int *)hptr, 0, name)) {
00569
rtheap_init(heap, hptr, size, PAGE_SIZE);
00570 }
00571
RTAI_TASK(
return);
00572
if (name ==
GLOBAL_HEAP_ID) {
00573
task->heap[
GLOBAL].heap = &
rtai_global_heap;
00574
task->heap[
GLOBAL].kadr =
rtai_global_heap_adr;
00575
task->heap[
GLOBAL].uadr = adr;
00576 }
else {
00577
task->heap[
SPECIFIC].heap = heap;
00578
task->heap[
SPECIFIC].kadr = hptr;
00579
task->heap[
SPECIFIC].uadr = adr;
00580 }
00581 }
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612
00613
00614
00615
00616
00617
00618 void *
rt_heap_open(
unsigned long name,
int size,
int suprt)
00619 {
00620
void *adr;
00621
if ((adr =
rt_shm_alloc(name, ((size - 1) & PAGE_MASK) + PAGE_SIZE +
sizeof(rtheap_t), suprt))) {
00622
rt_set_heap(name, adr);
00623
return adr;
00624 }
00625
return 0;
00626 }
00627
00628 struct rt_native_fun_entry
rt_shm_entries[] = {
00629 { { 0,
rt_shm_alloc_usp },
SHM_ALLOC },
00630 { { 0, rt_shm_free },
SHM_FREE },
00631 { { 0,
rt_shm_size },
SHM_SIZE },
00632 { { 0,
rt_set_heap },
HEAP_SET},
00633 { { 0,
rt_halloc },
HEAP_ALLOC },
00634 { { 0,
rt_hfree },
HEAP_FREE },
00635 { { 0,
rt_named_halloc },
HEAP_NAMED_ALLOC },
00636 { { 0,
rt_named_hfree },
HEAP_NAMED_FREE },
00637 { { 0,
rt_malloc_usp },
MALLOC },
00638 { { 0,
rt_free_usp },
FREE },
00639 { { 0,
rt_named_malloc_usp },
NAMED_MALLOC },
00640 { { 0,
rt_named_free_usp },
NAMED_FREE },
00641 { { 0, 0 }, 000 }
00642 };
00643
00644
extern int set_rt_fun_entries(
struct rt_native_fun_entry *entry);
00645
extern void reset_rt_fun_entries(
struct rt_native_fun_entry *entry);
00646
00647 int __rtai_shm_init (
void)
00648 {
00649
if (misc_register(&
rtai_shm_dev) < 0) {
00650
printk(
"***** UNABLE TO REGISTER THE SHARED MEMORY DEVICE (miscdev minor: %d) *****\n",
RTAI_SHM_MISC_MINOR);
00651
return -EBUSY;
00652 }
00653
#ifndef CONFIG_RTAI_MALLOC_VMALLOC
00654
printk(
"***** WARNING: GLOBAL HEAP NEITHER SHARABLE NOR USABLE FROM USER SPACE (use the vmalloc option for RTAI malloc) *****\n");
00655
#else
00656
rt_register(
GLOBAL_HEAP_ID,
rtai_global_heap_adr,
rtai_global_heap_size, 0);
00657
rt_smp_linux_task->heap[
GLOBAL].heap = &
rtai_global_heap;
00658
rt_smp_linux_task->heap[
GLOBAL].kadr =
00659
rt_smp_linux_task->heap[
GLOBAL].uadr =
rtai_global_heap_adr;
00660
#endif
00661
return set_rt_fun_entries(
rt_shm_entries);
00662 }
00663
00664 void __rtai_shm_exit (
void)
00665 {
00666
extern int max_slots;
00667
int slot;
00668
struct rt_registry_entry entry;
00669
00670
rt_drg_on_name_cnt(
GLOBAL_HEAP_ID);
00671
for (slot = 1; slot <=
max_slots; slot++) {
00672
if (
rt_get_registry_slot(slot, &entry)) {
00673
if (abs(entry.
type) >= PAGE_SIZE) {
00674
char name[8];
00675
while (
_rt_shm_free(entry.
name, entry.
type));
00676
num2nam(entry.
name, name);
00677
rt_printk(
"\nSHM_CLEANUP_MODULE releases: '%s':0x%lx:%lu (%d).\n", name, entry.
name, entry.
name, entry.
type);
00678 }
00679 }
00680 }
00681
reset_rt_fun_entries(
rt_shm_entries);
00682 misc_deregister(&
rtai_shm_dev);
00683
return;
00684 }
00685
00686
00687
00688
#ifndef CONFIG_RTAI_SHM_BUILTIN
00689
module_init(__rtai_shm_init);
00690
module_exit(__rtai_shm_exit);
00691
#endif
00692
00693
#ifdef CONFIG_KBUILD
00694
EXPORT_SYMBOL(rt_shm_alloc);
00695
EXPORT_SYMBOL(rt_shm_free);
00696
EXPORT_SYMBOL(rt_named_malloc);
00697
EXPORT_SYMBOL(rt_named_free);
00698
EXPORT_SYMBOL(rt_halloc);
00699
EXPORT_SYMBOL(rt_hfree);
00700
EXPORT_SYMBOL(rt_named_halloc);
00701
EXPORT_SYMBOL(rt_named_hfree);
00702
EXPORT_SYMBOL(rt_heap_open);
00703
#endif