00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
#include <linux/version.h>
00042
#include <linux/config.h>
00043
#include <linux/module.h>
00044
#include <linux/init.h>
00045
#include <linux/stddef.h>
00046
#include <linux/timer.h>
00047
#include <linux/interrupt.h>
00048
#include <asm/mach/irq.h>
00049
#include <asm/proc/ptrace.h>
00050 #define __RTAI_HAL__
00051
#include <asm/rtai_hal.h>
00052
#include <asm/rtai_lxrt.h>
00053
#include <asm/rtai_usi.h>
00054
#ifdef CONFIG_PROC_FS
00055
#include <linux/proc_fs.h>
00056
#include <rtai_proc_fs.h>
00057
#endif
00058
#include <rtai_version.h>
00059
00060
MODULE_LICENSE(
"GPL");
00061
00062 typedef void (*
isr_hook_t)(
int);
00063
00064
00065
00066
struct {
00067 rt_irq_handler_t
handler;
00068 void *
cookie;
00069 int retmode;
00070 }
rtai_realtime_irq[NR_IRQS]
00071
__attribute__((__aligned__(L1_CACHE_BYTES)));
00072 adomain_t
rtai_domain;
00073 struct rt_times rt_times;
00074 struct rt_times rt_smp_times[
RTAI_NR_CPUS] = { { 0 } };
00075 struct rtai_switch_data
rtai_linux_context[
RTAI_NR_CPUS];
00076 struct calibration_data
rtai_tunables;
00077 volatile unsigned long rtai_cpu_realtime;
00078 volatile unsigned long rtai_cpu_lock;
00079 int rtai_adeos_ptdbase = -1;
00080 long long (*rtai_lxrt_invoke_entry)(
unsigned long,
void *);
00081 struct {
volatile int locked,
rqsted; }
rt_scheduling[
RTAI_NR_CPUS];
00082
#ifdef CONFIG_PROC_FS
00083
struct proc_dir_entry *
rtai_proc_root = NULL;
00084
#endif
00085
00086
00087
00088
static struct {
00089 unsigned long flags;
00090 int count;
00091 }
rtai_linux_irq[NR_IRQS];
00092
static struct {
00093 void (*
k_handler)(
void);
00094
long long (*
u_handler)(
unsigned);
00095 unsigned label;
00096 }
rtai_sysreq_table[RTAI_NR_SRQS];
00097 static unsigned rtai_sysreq_virq;
00098 static unsigned long rtai_sysreq_map = 3;
00099 static unsigned long rtai_sysreq_pending;
00100 static unsigned long rtai_sysreq_running;
00101 static spinlock_t
rtai_ssrq_lock = SPIN_LOCK_UNLOCKED;
00102 static volatile int rtai_sync_level;
00103 static atomic_t
rtai_sync_count = ATOMIC_INIT(1);
00104 static RT_TRAP_HANDLER rtai_trap_handler;
00105 static int (*saved_adeos_syscall_handler)(
struct pt_regs *regs);
00106
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00107
00108 static isr_hook_t rtai_isr_hook;
00109
#endif
00110
00111
unsigned long
00112 rtai_critical_enter(
void (*synch)(
void))
00113 {
00114
unsigned long flags = adeos_critical_enter(synch);
00115
00116
if (
atomic_dec_and_test(&
rtai_sync_count))
00117
rtai_sync_level = 0;
00118
else if (synch != NULL)
00119
printk(KERN_INFO
"RTAI[hal]: warning: nested sync will fail.\n");
00120
00121
return flags;
00122 }
00123
00124
void
00125 rtai_critical_exit(
unsigned long flags)
00126 {
00127
atomic_inc(&
rtai_sync_count);
00128 adeos_critical_exit(
flags);
00129 }
00130
00131
int
00132 rt_request_irq(
unsigned irq, rt_irq_handler_t handler,
void *cookie,
int retmode)
00133 {
00134
unsigned long flags;
00135
00136
if (
handler == NULL || irq >= NR_IRQS)
00137
return -EINVAL;
00138
00139
if (
rtai_realtime_irq[irq].handler != NULL)
00140
return -EBUSY;
00141
00142
flags =
rtai_critical_enter(NULL);
00143
rtai_realtime_irq[irq].handler =
handler;
00144
rtai_realtime_irq[irq].cookie =
cookie;
00145
rtai_critical_exit(
flags);
00146
00147
return 0;
00148 }
00149
00150
int
00151 rt_release_irq(
unsigned irq)
00152 {
00153
unsigned long flags;
00154
00155
if (irq >= NR_IRQS || !
rtai_realtime_irq[irq].handler)
00156
return -EINVAL;
00157
00158
flags =
rtai_critical_enter(NULL);
00159
rtai_realtime_irq[irq].handler = NULL;
00160
rtai_critical_exit(
flags);
00161
00162
return 0;
00163 }
00164
00165
void
00166 rt_set_irq_cookie(
unsigned irq,
void *cookie)
00167 {
00168
if (irq < NR_IRQS)
00169
rtai_realtime_irq[irq].cookie =
cookie;
00170 }
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196
00197
00198
00199
00200
00201
00202
unsigned
00203 rt_startup_irq(
unsigned irq)
00204 {
00205
struct irqdesc *
id = &irq_desc[irq];
00206
id->probing = 0;
00207
id->triggered = 0;
00208
id->disable_depth = 0;
00209
id->unmask(irq);
00210
return 0;
00211 }
00212
00213
void
00214 rt_shutdown_irq(
unsigned irq)
00215 {
00216
struct irqdesc *
id = &irq_desc[irq];
00217
id->disable_depth = (
unsigned int)-1;
00218
id->mask(irq);
00219 }
00220
00221
void
00222 rt_enable_irq(
unsigned irq)
00223 {
00224
struct irqdesc *
id = &irq_desc[irq];
00225
if (
id->disable_depth == 0) {
00226
printk(KERN_ERR
"RTAI[hal]: %s(%u) unbalanced from %p\n",
00227 __func__, irq, __builtin_return_address(0));
00228 }
else if (--
id->disable_depth == 0) {
00229
id->probing = 0;
00230
id->unmask(irq);
00231 }
00232 }
00233
00234
void
00235 rt_disable_irq(
unsigned irq)
00236 {
00237
struct irqdesc *
id = &irq_desc[irq];
00238
if (
id->disable_depth++ == 0)
00239
id->mask(irq);
00240 }
00241
00242
void
00243 rt_mask_and_ack_irq(
unsigned irq)
00244 {
00245 irq_desc[irq].mask_ack(irq);
00246 }
00247
00248
void
00249 rt_unmask_irq(
unsigned irq)
00250 {
00251 irq_desc[irq].unmask(irq);
00252 }
00253
00254
void
00255 rt_ack_irq(
unsigned irq)
00256 {
00257
00258
struct irqdesc *
id = &irq_desc[irq];
00259
id->mask_ack(irq);
00260
id->unmask(irq);
00261 }
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
int
00288 rt_request_linux_irq(
unsigned irq,
00289 irqreturn_t (*handler)(
int irq,
void *dev_id,
struct pt_regs *regs),
00290
char *name,
00291
void *dev_id)
00292 {
00293
unsigned long flags;
00294
00295
if (irq >= NR_IRQS || !
handler)
00296
return -EINVAL;
00297
00298
rtai_save_flags_and_cli(
flags);
00299
00300
if (
rtai_linux_irq[irq].count++ == 0 && irq_desc[irq].action) {
00301
rtai_linux_irq[irq].flags = irq_desc[irq].action->flags;
00302 irq_desc[irq].action->flags |= SA_SHIRQ;
00303 }
00304
00305
rtai_restore_flags(
flags);
00306
00307 request_irq(irq,
handler, SA_SHIRQ, name, dev_id);
00308
00309
return 0;
00310 }
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
int
00324 rt_free_linux_irq(
unsigned irq,
void *dev_id)
00325 {
00326
unsigned long flags;
00327
00328
if (irq >= NR_IRQS ||
rtai_linux_irq[irq].count == 0)
00329
return -EINVAL;
00330
00331
rtai_save_flags_and_cli(
flags);
00332
00333 free_irq(irq, dev_id);
00334
00335
if (--
rtai_linux_irq[irq].count == 0 && irq_desc[irq].action)
00336 irq_desc[irq].action->flags =
rtai_linux_irq[irq].flags;
00337
00338
rtai_restore_flags(
flags);
00339
00340
return 0;
00341 }
00342
00343
00344
00345
00346
00347
00348
00349
00350
00351
void
00352 rt_pend_linux_irq(
unsigned irq)
00353 {
00354 adeos_propagate_irq(irq);
00355 }
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
int
00377 rt_request_srq(
unsigned label,
void (*k_handler)(
void),
long long (*u_handler)(
unsigned))
00378 {
00379
unsigned long flags;
00380
int srq;
00381
00382
if (
k_handler == NULL)
00383
return -EINVAL;
00384
00385
rtai_save_flags_and_cli(
flags);
00386
00387
if (
rtai_sysreq_map != ~0) {
00388
srq = ffz(
rtai_sysreq_map);
00389 set_bit(
srq, &
rtai_sysreq_map);
00390
rtai_sysreq_table[
srq].k_handler =
k_handler;
00391
rtai_sysreq_table[
srq].u_handler =
u_handler;
00392
rtai_sysreq_table[
srq].label =
label;
00393 }
else
00394
srq = -EBUSY;
00395
00396
rtai_restore_flags(
flags);
00397
00398
return srq;
00399 }
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
int
00410 rt_free_srq(
unsigned srq)
00411 {
00412
return (srq < 2 || srq >= RTAI_NR_SRQS || !test_and_clear_bit(
srq, &
rtai_sysreq_map))
00413 ? -EINVAL
00414 : 0;
00415 }
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
void
00428 rt_pend_linux_srq(
unsigned srq)
00429 {
00430
int cpuid;
00431
if (
srq > 0 &&
srq < RTAI_NR_SRQS) {
00432 set_bit(
srq, &
rtai_sysreq_pending);
00433
cpuid = rtai_cpuid();
00434
if (adp_cpu_current[
cpuid] == &
rtai_domain)
00435 adeos_propagate_irq(
rtai_sysreq_virq);
00436
else
00437 adeos_schedule_irq(
rtai_sysreq_virq);
00438 }
00439 }
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
RT_TRAP_HANDLER
00453 rt_set_trap_handler(
RT_TRAP_HANDLER handler)
00454 {
00455
return (
RT_TRAP_HANDLER)xchg(&
rtai_trap_handler,
handler);
00456 }
00457
00458
static void
00459 rtai_irq_trampoline(
unsigned irq)
00460 {
00461
TRACE_RTAI_GLOBAL_IRQ_ENTRY(irq, 0);
00462
00463
if (
rtai_realtime_irq[irq].handler)
00464 {
00465
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00466
adeos_declare_cpuid;
00467 adeos_load_cpuid();
00468
if (!
rt_scheduling[
cpuid].locked++)
00469
rt_scheduling[
cpuid].rqsted = 0;
00470
#endif
00471
rtai_realtime_irq[irq].handler(irq,
rtai_realtime_irq[irq].
cookie);
00472
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00473
if (
rt_scheduling[
cpuid].locked && !(--
rt_scheduling[
cpuid].locked))
00474
if (
rt_scheduling[
cpuid].rqsted > 0 &&
rtai_isr_hook)
00475
rtai_isr_hook(
cpuid);
00476
#endif
00477 }
00478
else
00479 adeos_propagate_irq(irq);
00480
00481
TRACE_RTAI_GLOBAL_IRQ_EXIT();
00482 }
00483
00484
static void
00485 rtai_trap_fault(adevinfo_t *evinfo)
00486 {
00487 adeos_declare_cpuid;
00488
00489
TRACE_RTAI_TRAP_ENTRY(evinfo->event, 0);
00490
00491
if (evinfo->domid != RTAI_DOMAIN_ID)
00492
goto propagate;
00493
00494 adeos_load_cpuid();
00495
00496
00497
00498
00499
00500
00501
if (
rtai_trap_handler != NULL
00502 &&
rtai_trap_handler(evinfo->event, evinfo->event, (
struct pt_regs *)evinfo->evdata,
00503 (
void *)
cpuid) != 0)
00504
goto endtrap;
00505
00506 propagate:
00507
00508 adeos_propagate_event(evinfo);
00509
00510 endtrap:
00511
00512
TRACE_RTAI_TRAP_EXIT();
00513 }
00514
00515
static void
00516 rtai_ssrq_trampoline(
unsigned virq)
00517 {
00518
unsigned long pending;
00519
00520 spin_lock(&
rtai_ssrq_lock);
00521
00522
while ((pending =
rtai_sysreq_pending & ~
rtai_sysreq_running) != 0) {
00523
unsigned srq =
ffnz(pending);
00524 set_bit(
srq, &
rtai_sysreq_running);
00525 clear_bit(
srq, &
rtai_sysreq_pending);
00526 spin_unlock(&
rtai_ssrq_lock);
00527
00528
if (test_bit(
srq, &
rtai_sysreq_map))
00529
rtai_sysreq_table[
srq].k_handler();
00530
00531 clear_bit(
srq, &
rtai_sysreq_running);
00532 spin_lock(&
rtai_ssrq_lock);
00533 }
00534
00535 spin_unlock(&
rtai_ssrq_lock);
00536 }
00537
00538
extern inline long long
00539 rtai_usrq_trampoline(
unsigned long srq,
unsigned long label)
00540 {
00541
long long r = 0;
00542
00543
TRACE_RTAI_SRQ_ENTRY(
srq,
label);
00544
00545
if (
srq > 1 &&
srq < RTAI_NR_SRQS
00546 && test_bit(
srq, &
rtai_sysreq_map)
00547 &&
rtai_sysreq_table[
srq].u_handler != NULL)
00548 r =
rtai_sysreq_table[
srq].u_handler(
label);
00549
else
00550
for (
srq = 2;
srq < RTAI_NR_SRQS;
srq++)
00551
if (test_bit(
srq, &
rtai_sysreq_map)
00552 &&
rtai_sysreq_table[
srq].label ==
label)
00553 r = (
long long)
srq;
00554
00555
TRACE_RTAI_SRQ_EXIT();
00556
00557
return r;
00558 }
00559
00560
00561
00562
static int
00563 rtai_syscall_trampoline(
struct pt_regs *regs)
00564 {
00565
unsigned long srq = regs->ARM_r0;
00566
unsigned long arg = regs->ARM_r1;
00567
00568
#ifdef USI_SRQ_MASK
00569
IF_IS_A_USI_SRQ_CALL_IT();
00570
#endif
00571
00572 {
00573
long long r =
srq > RTAI_NR_SRQS
00574 ?
rtai_lxrt_invoke_entry != NULL
00575 ?
rtai_lxrt_invoke_entry(
srq, (
void *)arg)
00576 : -ENODEV
00577 :
rtai_usrq_trampoline(
srq, arg);
00578 *(
long long*)®s->ARM_r0 = r;
00579 }
00580
if (in_hrt_mode(rtai_cpuid()))
00581
return 1;
00582 local_irq_enable();
00583
return 0;
00584 }
00585
00586
isr_hook_t
00587 rt_set_ihook(
isr_hook_t hookfn)
00588 {
00589
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00590
return (
isr_hook_t)xchg(&
rtai_isr_hook, hookfn);
00591
#else
00592
return NULL;
00593
#endif
00594 }
00595
00596
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00597
void
00598 rtai_set_linux_task_priority(
struct task_struct *task,
int policy,
int prio)
00599 {
00600
task->policy = policy;
00601
task->rt_priority = prio;
00602 set_tsk_need_resched(current);
00603 }
00604
#else
00605
#error "Sorry, Kernels >= 2.6.0 not supported (yet)"
00606
#endif
00607
00608
#ifdef CONFIG_PROC_FS
00609
00610
static int
00611 rtai_read_proc(
char *page,
char **start, off_t off,
int count,
int *eof,
void *data)
00612 {
00613
PROC_PRINT_VARS;
00614
int i, none;
00615
00616
PROC_PRINT(
"\n** RTAI/ARM %s over Adeos %s\n\n", RTAI_RELEASE, ADEOS_VERSION_STRING);
00617
PROC_PRINT(
" TSC frequency: %d Hz\n", RTAI_TSC_FREQ);
00618
PROC_PRINT(
" Timer frequency: %d Hz\n", RTAI_TIMER_FREQ);
00619
PROC_PRINT(
" Timer latency: %d ns, %d TSC ticks\n", RTAI_TIMER_LATENCY,
00620
rtai_imuldiv(RTAI_TIMER_LATENCY, RTAI_TSC_FREQ, 1000000000));
00621
PROC_PRINT(
" Timer setup: %d ns\n", RTAI_TIMER_SETUP_TIME);
00622
PROC_PRINT(
" Timer setup: %d TSC ticks, %d IRQ-timer ticks\n",
00623
rtai_imuldiv(RTAI_TIMER_SETUP_TIME, RTAI_TSC_FREQ, 1000000000),
00624
rtai_imuldiv(RTAI_TIMER_SETUP_TIME, RTAI_TIMER_FREQ, 1000000000));
00625
00626 none = 1;
00627
00628
PROC_PRINT(
"\n** Real-time IRQs used by RTAI: ");
00629
00630
for (i = 0; i < NR_IRQS; i++) {
00631
if (
rtai_realtime_irq[i].handler) {
00632
if (none) {
00633
PROC_PRINT(
"\n");
00634 none = 0;
00635 }
00636
PROC_PRINT(
"\n #%d at %p", i, rtai_realtime_irq[i].handler);
00637 }
00638 }
00639
00640
if (none)
00641
PROC_PRINT(
"none");
00642
00643
PROC_PRINT(
"\n\n");
00644
00645
PROC_PRINT(
"** RTAI extension traps: \n\n");
00646
PROC_PRINT(
" SYSREQ=0x%x\n", RTAI_SYS_VECTOR);
00647
#if 0
00648
PROC_PRINT(
" SHM=0x%x\n", RTAI_SHM_VECTOR);
00649
#endif
00650
PROC_PRINT(
"\n");
00651
00652 none = 1;
00653
PROC_PRINT(
"** RTAI SYSREQs in use: ");
00654
00655
for (i = 0; i < RTAI_NR_SRQS; i++) {
00656
if (
rtai_sysreq_table[i].k_handler ||
rtai_sysreq_table[i].u_handler) {
00657
PROC_PRINT(
"#%d ", i);
00658 none = 0;
00659 }
00660 }
00661
00662
if (none)
00663
PROC_PRINT(
"none");
00664
00665
PROC_PRINT(
"\n\n");
00666
00667
PROC_PRINT_DONE;
00668 }
00669
00670
static int
00671 rtai_proc_register(
void)
00672 {
00673
struct proc_dir_entry *ent;
00674
00675
rtai_proc_root = create_proc_entry(
"rtai", S_IFDIR, 0);
00676
00677
if (!
rtai_proc_root) {
00678
printk(KERN_ERR
"RTAI[hal]: Unable to initialize /proc/rtai.\n");
00679
return -1;
00680 }
00681
00682
rtai_proc_root->owner = THIS_MODULE;
00683
00684 ent = create_proc_entry(
"rtai", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
00685
00686
if (!ent) {
00687
printk(KERN_ERR
"RTAI[hal]: Unable to initialize /proc/rtai/rtai.\n");
00688
return -1;
00689 }
00690
00691 ent->read_proc = rtai_read_proc;
00692
00693
return 0;
00694 }
00695
00696
static void
00697 rtai_proc_unregister(
void)
00698 {
00699 remove_proc_entry(
"rtai", rtai_proc_root);
00700 remove_proc_entry(
"rtai", 0);
00701 }
00702
00703
#endif
00704
00705
static void
00706 rtai_domain_entry(
int iflag)
00707 {
00708
unsigned irq, trapnr;
00709
00710
if (iflag) {
00711
for (irq = 0; irq < NR_IRQS; irq++)
00712 adeos_virtualize_irq(irq,
00713 &
rtai_irq_trampoline,
00714 NULL,
00715 IPIPE_DYNAMIC_MASK);
00716
00717
for (trapnr = 0; trapnr < ADEOS_NR_FAULTS; trapnr++)
00718 adeos_catch_event(trapnr, &
rtai_trap_fault);
00719
00720
printk(KERN_INFO
"RTAI[hal]: %s mounted over Adeos %s.\n", PACKAGE_VERSION, ADEOS_VERSION_STRING);
00721
printk(KERN_INFO
"RTAI[hal]: compiled with %s.\n",
CONFIG_RTAI_COMPILER);
00722 }
00723
00724
#ifdef CONFIG_ADEOS_THREADS
00725
for (;;)
00726 adeos_suspend_domain();
00727
#endif
00728 }
00729
00730
int
00731 __rtai_hal_init(
void)
00732 {
00733
unsigned long flags;
00734 adattr_t attr;
00735
00736
00737
00738
rtai_sysreq_virq = adeos_alloc_irq();
00739
00740
if (!
rtai_sysreq_virq) {
00741
printk(KERN_ERR
"RTAI[hal]: no virtual interrupt available.\n");
00742
return 1;
00743 }
00744
00745
00746
00747
00748
00749
00750
00751
flags =
rtai_critical_enter(NULL);
00752
rtai_adeos_ptdbase = adeos_alloc_ptdkey();
00753
if (adeos_alloc_ptdkey() !=
rtai_adeos_ptdbase + 1) {
00754
rtai_critical_exit(
flags);
00755
printk(KERN_ERR
"RTAI[hal]: per-thread keys not available.\n");
00756
return 1;
00757 }
00758
00759
rtai_lxrt_invoke_entry = NULL;
00760
saved_adeos_syscall_handler = xchg(&adeos_syscall_entry,
rtai_syscall_trampoline);
00761
rtai_critical_exit(
flags);
00762
00763 adeos_virtualize_irq(
rtai_sysreq_virq,
00764 &
rtai_ssrq_trampoline,
00765 NULL,
00766 IPIPE_HANDLE_MASK);
00767
00768
00769
rtai_tunables.cpu_freq =
RTAI_TSC_FREQ;
00770
00771
#ifdef CONFIG_PROC_FS
00772
rtai_proc_register();
00773
#endif
00774
00775
00776
rtai_archdep_init();
00777
00778
00779 adeos_init_attr(&attr);
00780 attr.name =
"RTAI";
00781 attr.domid = RTAI_DOMAIN_ID;
00782 attr.entry = &
rtai_domain_entry;
00783 attr.priority = ADEOS_ROOT_PRI + 100;
00784
00785
printk(KERN_INFO
"RTAI[hal]: mounted (PIPED).\n");
00786
00787
return adeos_register_domain(&
rtai_domain, &attr);
00788 }
00789
00790
void
00791 __rtai_hal_exit(
void)
00792 {
00793
unsigned long flags;
00794
00795
00796
rtai_archdep_exit();
00797
00798
#ifdef CONFIG_PROC_FS
00799
rtai_proc_unregister();
00800
#endif
00801
00802 adeos_virtualize_irq(
rtai_sysreq_virq, NULL, NULL, 0);
00803 adeos_free_irq(
rtai_sysreq_virq);
00804
00805
00806
flags =
rtai_critical_enter(NULL);
00807 adeos_syscall_entry =
saved_adeos_syscall_handler;
00808
rtai_lxrt_invoke_entry = NULL;
00809
rtai_critical_exit(
flags);
00810
00811 adeos_free_ptdkey(
rtai_adeos_ptdbase);
00812 adeos_free_ptdkey(
rtai_adeos_ptdbase + 1);
00813 adeos_unregister_domain(&
rtai_domain);
00814
00815
printk(KERN_INFO
"RTAI[hal]: unmounted.\n");
00816 }
00817
00818
module_init(__rtai_hal_init);
00819
module_exit(__rtai_hal_exit);
00820
00821
EXPORT_SYMBOL(rt_request_irq);
00822
EXPORT_SYMBOL(rt_release_irq);
00823
EXPORT_SYMBOL(rt_set_irq_cookie);
00824
EXPORT_SYMBOL(rt_startup_irq);
00825
EXPORT_SYMBOL(rt_shutdown_irq);
00826
EXPORT_SYMBOL(rt_enable_irq);
00827
EXPORT_SYMBOL(rt_disable_irq);
00828
EXPORT_SYMBOL(rt_mask_and_ack_irq);
00829
EXPORT_SYMBOL(rt_unmask_irq);
00830
EXPORT_SYMBOL(rt_ack_irq);
00831
EXPORT_SYMBOL(rt_request_linux_irq);
00832
EXPORT_SYMBOL(rt_free_linux_irq);
00833
EXPORT_SYMBOL(rt_pend_linux_irq);
00834
EXPORT_SYMBOL(rt_request_srq);
00835
EXPORT_SYMBOL(rt_free_srq);
00836
EXPORT_SYMBOL(rt_pend_linux_srq);
00837
EXPORT_SYMBOL(rt_request_timer);
00838
EXPORT_SYMBOL(rt_free_timer);
00839
EXPORT_SYMBOL(rt_set_trap_handler);
00840
EXPORT_SYMBOL(rt_set_ihook);
00841
EXPORT_SYMBOL(rtai_critical_enter);
00842
EXPORT_SYMBOL(rtai_critical_exit);
00843
EXPORT_SYMBOL(rtai_set_linux_task_priority);
00844
EXPORT_SYMBOL(rtai_linux_context);
00845
EXPORT_SYMBOL(rtai_domain);
00846
EXPORT_SYMBOL(rtai_proc_root);
00847
EXPORT_SYMBOL(rtai_tunables);
00848
EXPORT_SYMBOL(rtai_cpu_lock);
00849
EXPORT_SYMBOL(rtai_cpu_realtime);
00850
EXPORT_SYMBOL(rt_times);
00851
EXPORT_SYMBOL(rt_smp_times);
00852
EXPORT_SYMBOL(rtai_lxrt_invoke_entry);
00853
EXPORT_SYMBOL(rtai_realtime_irq);
00854
EXPORT_SYMBOL(rt_scheduling);