00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043 #include <linux/module.h>
00044 #include <linux/delay.h>
00045
00046 MODULE_LICENSE("GPL");
00047
00048 #include <asm/rtai_hal.h>
00049
00050 #undef INCLUDED_BY_HAL_C
00051 #define INCLUDED_BY_HAL_C
00052
00053 #define CHECK_STACK_IN_IRQ 0
00054
00055 #include <linux/version.h>
00056 #include <linux/slab.h>
00057 #include <linux/errno.h>
00058 #include <linux/module.h>
00059 #include <linux/interrupt.h>
00060
00061 #include <linux/console.h>
00062 #include <asm/system.h>
00063
00064
00065 #include <asm/machdep.h>
00066 #include <asm/io.h>
00067 #include <asm/mmu_context.h>
00068 #include <asm/uaccess.h>
00069 #include <asm/unistd.h>
00070 #include <asm/mcfsim.h>
00071 #define __RTAI_HAL__
00072 #include <asm/rtai_hal.h>
00073 #include <asm/rtai_lxrt.h>
00074 #ifdef CONFIG_PROC_FS
00075 #include <linux/stat.h>
00076 #include <linux/proc_fs.h>
00077 #include <rtai_proc_fs.h>
00078 #endif
00079 #include <stdarg.h>
00080
00081 static unsigned long rtai_cpufreq_arg = CONFIG_CLOCK_FREQ;
00082 RTAI_MODULE_PARM(rtai_cpufreq_arg, ulong);
00083
00084 #define RTAI_NR_IRQS IPIPE_NR_XIRQS
00085
00086
00087
00088
00089
00090 #define rtai_setup_periodic_apic(count, vector)
00091
00092 #define rtai_setup_oneshot_apic(count, vector)
00093
00094 #define __ack_APIC_irq()
00095
00096 struct { volatile int locked, rqsted; } rt_scheduling[RTAI_NR_CPUS];
00097
00098 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00099 static void (*rtai_isr_hook)(int cpuid);
00100 #endif
00101
00102 struct hal_domain_struct rtai_domain;
00103
00104 struct rtai_realtime_irq_s rtai_realtime_irq[RTAI_NR_IRQS];
00105
00106 static struct {
00107 unsigned long flags;
00108 int count;
00109 } rtai_linux_irq[RTAI_NR_IRQS];
00110
00111 static struct {
00112 void (*k_handler)(void);
00113 long long (*u_handler)(unsigned long);
00114 unsigned long label;
00115 } rtai_sysreq_table[RTAI_NR_SRQS];
00116
00117 static unsigned rtai_sysreq_virq;
00118
00119 static unsigned long rtai_sysreq_map = 1;
00120
00121 static unsigned long rtai_sysreq_pending;
00122
00123 static unsigned long rtai_sysreq_running;
00124
00125 static spinlock_t rtai_lsrq_lock = SPIN_LOCK_UNLOCKED;
00126
00127 static volatile int rtai_sync_level;
00128
00129 static atomic_t rtai_sync_count = ATOMIC_INIT(1);
00130
00131 static struct desc_struct rtai_sysvec;
00132
00133 static struct desc_struct rtai_cmpxchg_trap_vec;
00134 static struct desc_struct rtai_xchg_trap_vec;
00135
00136 static RT_TRAP_HANDLER rtai_trap_handler;
00137
00138 struct rt_times rt_times;
00139
00140 struct rt_times rt_smp_times[RTAI_NR_CPUS];
00141
00142 struct rtai_switch_data rtai_linux_context[RTAI_NR_CPUS];
00143
00144 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00145 volatile unsigned long *ipipe_root_status[RTAI_NR_CPUS];
00146 #endif
00147
00148 struct calibration_data rtai_tunables;
00149
00150 volatile unsigned long rtai_cpu_realtime;
00151
00152 volatile unsigned long rtai_cpu_lock[2];
00153
00154 unsigned long rtai_critical_enter (void (*synch)(void))
00155 {
00156 unsigned long flags;
00157
00158 flags = hal_critical_enter(synch);
00159 if (atomic_dec_and_test(&rtai_sync_count)) {
00160 rtai_sync_level = 0;
00161 } else if (synch != NULL) {
00162 printk(KERN_INFO "RTAI[hal]: warning: nested sync will fail.\n");
00163 }
00164 return flags;
00165 }
00166
00167 void rtai_critical_exit (unsigned long flags)
00168 {
00169 atomic_inc(&rtai_sync_count);
00170 hal_critical_exit(flags);
00171 }
00172
00173 unsigned long IsolCpusMask = 0;
00174 RTAI_MODULE_PARM(IsolCpusMask, ulong);
00175
00176 int rt_request_irq (unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode)
00177 {
00178 unsigned long flags;
00179
00180 if (handler == NULL || irq >= RTAI_NR_IRQS) {
00181 return -EINVAL;
00182 }
00183 if (rtai_realtime_irq[irq].handler != NULL) {
00184 return -EBUSY;
00185 }
00186 flags = rtai_critical_enter(NULL);
00187 rtai_realtime_irq[irq].handler = (void *)handler;
00188 rtai_realtime_irq[irq].cookie = cookie;
00189 rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
00190 rtai_realtime_irq[irq].irq_ack = hal_root_domain->irqs[irq].acknowledge;
00191 rtai_critical_exit(flags);
00192 if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
00193 rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask);
00194 }
00195 return 0;
00196 }
00197
00198 int rt_release_irq (unsigned irq)
00199 {
00200 unsigned long flags;
00201 if (irq >= RTAI_NR_IRQS || !rtai_realtime_irq[irq].handler) {
00202 return -EINVAL;
00203 }
00204 flags = rtai_critical_enter(NULL);
00205 rtai_realtime_irq[irq].handler = NULL;
00206 rtai_realtime_irq[irq].irq_ack = hal_root_domain->irqs[irq].acknowledge;
00207 rtai_critical_exit(flags);
00208 if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
00209 rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask);
00210 }
00211 return 0;
00212 }
00213
00214
00215 #include <asm/coldfire.h>
00216 #include <asm/mcftimer.h>
00217
00218 static int timer_inuse=0;
00219
00220 int rt_ack_tmr(unsigned int irq)
00221 {
00222
00223 __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER));
00224
00225
00226 read_timer_cnt();
00227 return 0;
00228 }
00229
00230 int rt_ack_uart(unsigned int irq)
00231 {
00232 mcf_disable_irq0(irq);
00233 return 0;
00234 }
00235
00236 int rt_set_irq_ack(unsigned irq, int (*irq_ack)(unsigned int))
00237 {
00238 if (irq >= RTAI_NR_IRQS) {
00239 return -EINVAL;
00240 }
00241 rtai_realtime_irq[irq].irq_ack = irq_ack ? irq_ack : hal_root_domain->irqs[irq].acknowledge;
00242 return 0;
00243 }
00244
00245 void rt_set_irq_cookie (unsigned irq, void *cookie)
00246 {
00247 if (irq < RTAI_NR_IRQS) {
00248 rtai_realtime_irq[irq].cookie = cookie;
00249 }
00250 }
00251
00252 void rt_set_irq_retmode (unsigned irq, int retmode)
00253 {
00254 if (irq < RTAI_NR_IRQS) {
00255 rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
00256 }
00257 }
00258
00259 extern unsigned long io_apic_irqs;
00260
00261 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00262 #define rtai_irq_desc(irq) (irq_desc[irq].chip)
00263 #endif
00264
00265 #define BEGIN_PIC()
00266 #define END_PIC()
00267 #undef hal_lock_irq
00268 #undef hal_unlock_irq
00269 #define hal_lock_irq(x, y, z)
00270 #define hal_unlock_irq(x, y)
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299 unsigned rt_startup_irq (unsigned irq)
00300 {
00301 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00302 int retval;
00303
00304 BEGIN_PIC();
00305 hal_unlock_irq(hal_root_domain, irq);
00306 retval = rtai_irq_desc(irq)->startup(irq);
00307 END_PIC();
00308 return retval;
00309 #else
00310 return 0;
00311 #endif
00312 }
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338
00339
00340
00341
00342
00343 void rt_shutdown_irq (unsigned irq)
00344 {
00345 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00346 BEGIN_PIC();
00347 rtai_irq_desc(irq)->shutdown(irq);
00348 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00349 hal_clear_irq(hal_root_domain, irq);
00350 #endif
00351 END_PIC();
00352 #endif
00353 }
00354
00355 static inline void _rt_enable_irq (unsigned irq)
00356 {
00357 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00358 BEGIN_PIC();
00359 hal_unlock_irq(hal_root_domain, irq);
00360 rtai_irq_desc(irq)->enable(irq);
00361 END_PIC();
00362 #endif
00363 }
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386
00387
00388
00389
00390
00391
00392 void rt_enable_irq (unsigned irq)
00393 {
00394 _rt_enable_irq(irq);
00395 }
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424 void rt_disable_irq (unsigned irq)
00425 {
00426 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00427 BEGIN_PIC();
00428 rtai_irq_desc(irq)->disable(irq);
00429 hal_lock_irq(hal_root_domain, 0, irq);
00430 END_PIC();
00431 #endif
00432 }
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465 void rt_mask_and_ack_irq (unsigned irq)
00466 {
00467 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00468 rtai_irq_desc(irq)->ack(irq);
00469 #endif
00470 }
00471
00472 static inline void _rt_end_irq (unsigned irq)
00473 {
00474 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00475 BEGIN_PIC();
00476 if (
00477 !(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
00478 hal_unlock_irq(hal_root_domain, irq);
00479 }
00480 rtai_irq_desc(irq)->end(irq);
00481 END_PIC();
00482 #endif
00483 }
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510
00511
00512
00513
00514
00515 void rt_unmask_irq (unsigned irq)
00516 {
00517 _rt_end_irq(irq);
00518 }
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549
00550 void rt_ack_irq (unsigned irq)
00551 {
00552 _rt_enable_irq(irq);
00553 }
00554
00555 void rt_end_irq (unsigned irq)
00556 {
00557 _rt_end_irq(irq);
00558 }
00559
00560 void rt_eoi_irq (unsigned irq)
00561 {
00562 #if LINUX_VERSION_CODE >= RTAI_LT_KERNEL_VERSION_FOR_IRQDESC
00563 BEGIN_PIC();
00564 if (
00565 !(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
00566 hal_unlock_irq(hal_root_domain, irq);
00567 }
00568 rtai_irq_desc(irq)->end(irq);
00569 END_PIC();
00570 #endif
00571 }
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595
00596
00597 int rt_request_linux_irq (unsigned irq, void *handler, char *name, void *dev_id)
00598 {
00599 unsigned long flags;
00600 int retval;
00601
00602 if (irq >= RTAI_NR_IRQS || !handler) {
00603 return -EINVAL;
00604 }
00605
00606 rtai_save_flags_and_cli(flags);
00607 spin_lock(&irq_desc[irq].lock);
00608 if (rtai_linux_irq[irq].count++ == 0 && irq_desc[irq].action) {
00609 rtai_linux_irq[irq].flags = irq_desc[irq].action->flags;
00610 irq_desc[irq].action->flags |= IRQF_SHARED;
00611 }
00612 spin_unlock(&irq_desc[irq].lock);
00613 rtai_restore_flags(flags);
00614
00615 retval = request_irq(irq, handler, IRQF_SHARED, name, dev_id);
00616
00617 return retval;
00618 }
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631 int rt_free_linux_irq (unsigned irq, void *dev_id)
00632 {
00633 unsigned long flags;
00634
00635 if (irq >= RTAI_NR_IRQS || rtai_linux_irq[irq].count == 0) {
00636 return -EINVAL;
00637 }
00638
00639 rtai_save_flags_and_cli(flags);
00640 free_irq(irq,dev_id);
00641 --rtai_linux_irq[irq].count;
00642 rtai_restore_flags(flags);
00643
00644 return 0;
00645 }
00646
00647
00648
00649
00650
00651
00652
00653
00654
00655 void rt_pend_linux_irq (unsigned irq)
00656 {
00657 unsigned long flags;
00658 rtai_save_flags_and_cli(flags);
00659 hal_pend_uncond(irq, rtai_cpuid());
00660 rtai_restore_flags(flags);
00661 }
00662
00663 RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq)
00664 {
00665 unsigned long flags;
00666 rtai_save_flags_and_cli(flags);
00667 hal_pend_uncond(irq, rtai_cpuid());
00668 rtai_restore_flags(flags);
00669 }
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690 int rt_request_srq (unsigned label, void (*k_handler)(void), long long (*u_handler)(unsigned long))
00691 {
00692 unsigned long flags;
00693 int srq;
00694
00695 if (k_handler == NULL) {
00696 return -EINVAL;
00697 }
00698
00699 rtai_save_flags_and_cli(flags);
00700 if (rtai_sysreq_map != ~0) {
00701 set_bit(srq = ffz(rtai_sysreq_map), &rtai_sysreq_map);
00702 rtai_sysreq_table[srq].k_handler = k_handler;
00703 rtai_sysreq_table[srq].u_handler = u_handler;
00704 rtai_sysreq_table[srq].label = label;
00705 } else {
00706 srq = -EBUSY;
00707 }
00708 rtai_restore_flags(flags);
00709
00710 return srq;
00711 }
00712
00713
00714
00715
00716
00717
00718
00719
00720
00721 int rt_free_srq (unsigned srq)
00722 {
00723 return (srq < 1 || srq >= RTAI_NR_SRQS || !test_and_clear_bit(srq, &rtai_sysreq_map)) ? -EINVAL : 0;
00724 }
00725
00726
00727
00728
00729
00730
00731
00732
00733
00734
00735
00736 void rt_pend_linux_srq (unsigned srq)
00737 {
00738 if (srq > 0 && srq < RTAI_NR_SRQS) {
00739 unsigned long flags;
00740 set_bit(srq, &rtai_sysreq_pending);
00741 rtai_save_flags_and_cli(flags);
00742 hal_pend_uncond(rtai_sysreq_virq, rtai_cpuid());
00743 rtai_restore_flags(flags);
00744 }
00745 }
00746
00747 irqreturn_t rtai_broadcast_to_local_timers (int irq, void *dev_id, struct pt_regs *regs)
00748 {
00749 return RTAI_LINUX_IRQ_HANDLED;
00750 }
00751
00752 #define REQUEST_LINUX_IRQ_BROADCAST_TO_APIC_TIMERS() 0
00753
00754 #define FREE_LINUX_IRQ_BROADCAST_TO_APIC_TIMERS();
00755
00756 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00757 #define RTAI_SCHED_ISR_LOCK() \
00758 do { \
00759 if (!rt_scheduling[0].locked++) { \
00760 rt_scheduling[0].rqsted = 0; \
00761 } \
00762 } while (0)
00763 #define RTAI_SCHED_ISR_UNLOCK() \
00764 do { \
00765 if (rt_scheduling[0].locked && !(--rt_scheduling[0].locked)) { \
00766 if (rt_scheduling[0].rqsted > 0 && rtai_isr_hook) { \
00767 rtai_isr_hook(0); \
00768 } \
00769 } \
00770 } while (0)
00771 #else
00772 #define RTAI_SCHED_ISR_LOCK() \
00773 do { cpuid = 0; } while (0)
00774 #define RTAI_SCHED_ISR_UNLOCK() \
00775 do { } while (0)
00776 #endif
00777
00778 #define HAL_TICK_REGS hal_tick_regs[cpuid]
00779
00780 #ifdef LOCKED_LINUX_IN_IRQ_HANDLER
00781 #define HAL_LOCK_LINUX() do { sflags = rt_save_switch_to_real_time(cpuid); } while (0)
00782 #define HAL_UNLOCK_LINUX() do { rtai_cli(); rt_restore_switch_to_linux(sflags, cpuid); } while (0)
00783 #else
00784 #define HAL_LOCK_LINUX() do { sflags = xchg(ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); } while (0)
00785 #define HAL_UNLOCK_LINUX() do { rtai_cli(); ROOT_STATUS_VAL(cpuid) = sflags; } while (0)
00786 #endif
00787
00788 #ifndef STR
00789 #define __STR(x) #x
00790 #define STR(x) __STR(x)
00791 #endif
00792
00793 #ifndef SYMBOL_NAME_STR
00794 #define SYMBOL_NAME_STR(X) #X
00795 #endif
00796
00797 #define SAVE_REG \
00798 "move #0x2700,%sr\n\t" \
00799 "btst #5,%sp@(2)\n\t" \
00800 "bnes 6f\n\t" \
00801 "movel %sp,sw_usp\n\t" \
00802 "addql #8,sw_usp\n\t" \
00803 "movel sw_ksp,%sp\n\t" \
00804 "subql #8,%sp\n\t" \
00805 "clrl %sp@-\n\t" \
00806 "movel %d0,%sp@-\n\t" \
00807 "movel %d0,%sp@-\n\t" \
00808 "lea %sp@(-32),%sp\n\t" \
00809 "moveml %d1-%d5/%a0-%a2,%sp@\n\t" \
00810 "movel sw_usp,%a0\n\t" \
00811 "movel %a0@-,%sp@(48)\n\t" \
00812 "movel %a0@-,%sp@(44)\n\t" \
00813 "bra 7f\n\t" \
00814 "6:\n\t" \
00815 "clrl %sp@-\n\t" \
00816 "movel %d0,%sp@-\n\t" \
00817 "movel %d0,%sp@-\n\t" \
00818 "lea %sp@(-32),%sp\n\t" \
00819 "moveml %d1-%d5/%a0-%a2,%sp@\n\t" \
00820 "7:\n\t" \
00821 "move #0x2000,%sr\n\t"
00822
00823 #define RSTR_REG \
00824 "btst #5,%sp@(46)\n\t" \
00825 "bnes 8f\n\t" \
00826 "move #0x2700,%sr\n\t" \
00827 "movel sw_usp,%a0\n\t" \
00828 "movel %sp@(48),%a0@-\n\t" \
00829 "movel %sp@(44),%a0@-\n\t" \
00830 "moveml %sp@,%d1-%d5/%a0-%a2\n\t" \
00831 "lea %sp@(32),%sp\n\t" \
00832 "movel %sp@+,%d0\n\t" \
00833 "addql #4,%sp\n\t" \
00834 "addl %sp@+,%sp\n\t" \
00835 "addql #8,%sp\n\t" \
00836 "movel %sp,sw_ksp\n\t" \
00837 "subql #8,sw_usp\n\t" \
00838 "movel sw_usp,%sp\n\t" \
00839 "rte\n\t" \
00840 "8:\n\t" \
00841 "moveml %sp@,%d1-%d5/%a0-%a2\n\t" \
00842 "lea %sp@(32),%sp\n\t" \
00843 "movel %sp@+,%d0\n\t" \
00844 "addql #4,%sp\n\t" \
00845 "addl %sp@+,%sp\n\t" \
00846 "rte"
00847
00848 #define DEFINE_VECTORED_ISR(name, fun) \
00849 __asm__ ( \
00850 SYMBOL_NAME_STR(name) ":\n\t" \
00851 SAVE_REG \
00852 "jsr "SYMBOL_NAME_STR(fun)"\n\t" \
00853 RSTR_REG);
00854
00855 #define rtai_critical_sync NULL
00856
00857 int rt_assign_irq_to_cpu (int irq, unsigned long cpus_mask)
00858 {
00859 return 0;
00860 }
00861
00862 int rt_reset_irq_to_sym_mode (int irq)
00863 {
00864 return 0;
00865 }
00866
00867 extern void mcf_settimericr(int timer, int level);
00868
00869
00870
00871
00872
00873
00874
00875
00876
00877
00878
00879 int rt_request_timer (void (*handler)(void), unsigned tick, int unused)
00880 {
00881 unsigned long flags;
00882 int retval;
00883
00884 TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_REQUEST,handler,tick);
00885 if (timer_inuse) return -EINVAL;
00886 timer_inuse = 1;
00887
00888 rtai_save_flags_and_cli(flags);
00889
00890
00891 if (tick > 0)
00892 {
00893 rt_times.linux_tick = LATCH;
00894 rt_times.tick_time = read_timer_cnt();
00895 rt_times.intr_time = rt_times.tick_time + tick;
00896 rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick;
00897 rt_times.periodic_tick = tick;
00898
00899 rt_set_timer_delay(tick);
00900 }
00901 else
00902 {
00903 rt_times.tick_time = rdtsc();
00904 rt_times.linux_tick = imuldiv(LATCH,rtai_tunables.cpu_freq,RTAI_FREQ_8254);
00905 rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick;
00906 rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick;
00907 rt_times.periodic_tick = rt_times.linux_tick;
00908
00909 rt_set_timer_delay(LATCH);
00910 }
00911
00912 retval = rt_request_global_irq(RT_TIMER_IRQ, handler);
00913 rt_set_irq_ack(RT_TIMER_IRQ, rt_ack_tmr);
00914 rtai_restore_flags(flags);
00915 return retval;
00916 }
00917
00918
00919
00920
00921
00922
00923 void rt_free_timer (void)
00924 {
00925 unsigned long flags;
00926
00927 TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_FREE,0,0);
00928
00929 rtai_save_flags_and_cli(flags);
00930 timer_inuse = 0;
00931
00932 rt_free_global_irq(RT_TIMER_IRQ);
00933
00934 rtai_restore_flags(flags);
00935 }
00936
00937 long long rdtsc()
00938 {
00939 return read_timer_cnt() * (tuned.cpu_freq / TIMER_FREQ);
00940 }
00941
00942 RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler)
00943 {
00944 return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler);
00945 }
00946
00947 #define CHECK_KERCTX();
00948
00949 int rtai_8254_timer_handler(struct pt_regs regs);
00950
00951 static int rtai_hirq_dispatcher (unsigned irq, struct pt_regs *regs)
00952 {
00953 unsigned long cpuid = 0;
00954
00955 CHECK_KERCTX();
00956
00957 if (rtai_realtime_irq[irq].handler) {
00958 unsigned long sflags;
00959 if (irq == RT_TIMER_IRQ)
00960 {
00961 unsigned long sflags = 0;
00962 HAL_LOCK_LINUX();
00963 RTAI_SCHED_ISR_LOCK();
00964 if (rtai_realtime_irq[RT_TIMER_IRQ].irq_ack)
00965 rtai_realtime_irq[RT_TIMER_IRQ].irq_ack(RT_TIMER_IRQ);
00966 ((void (*)(void))rtai_realtime_irq[RT_TIMER_IRQ].handler)();
00967 RTAI_SCHED_ISR_UNLOCK();
00968 HAL_UNLOCK_LINUX();
00969 if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) {
00970 rtai_sti();
00971 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00972 HAL_TICK_REGS.sr = regs->sr;
00973 HAL_TICK_REGS.pc = regs->pc;
00974 #else
00975 __raw_get_cpu_var(__ipipe_tick_regs).sr = regs->sr;
00976 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
00977 #endif
00978 hal_fast_flush_pipeline(cpuid);
00979 return 1;
00980 }
00981 return 0;
00982 }
00983
00984 HAL_LOCK_LINUX();
00985 if (rtai_realtime_irq[irq].irq_ack)
00986 rtai_realtime_irq[irq].irq_ack(irq);
00987 mb();
00988 RTAI_SCHED_ISR_LOCK();
00989 if (rtai_realtime_irq[irq].retmode && rtai_realtime_irq[irq].handler(irq, rtai_realtime_irq[irq].cookie)) {
00990 RTAI_SCHED_ISR_UNLOCK();
00991 HAL_UNLOCK_LINUX();
00992 return 0;
00993 } else {
00994 rtai_realtime_irq[irq].handler(irq, rtai_realtime_irq[irq].cookie);
00995 RTAI_SCHED_ISR_UNLOCK();
00996 HAL_UNLOCK_LINUX();
00997 if (test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) {
00998 return 0;
00999 }
01000 }
01001 } else {
01002 unsigned long lflags;
01003 cpuid = rtai_cpuid();
01004
01005 lflags = ROOT_STATUS_VAL(cpuid);
01006 ROOT_STATUS_VAL(cpuid) = (1 << IPIPE_STALL_FLAG);
01007 if (rtai_realtime_irq[irq].irq_ack)
01008 rtai_realtime_irq[irq].irq_ack(irq);
01009 mb();
01010 hal_pend_uncond(irq, cpuid);
01011 ROOT_STATUS_VAL(cpuid) = lflags;
01012
01013 if (test_bit(IPIPE_STALL_FLAG, &lflags)) {
01014 return 0;
01015 }
01016 }
01017
01018 if (irq == hal_tick_irq) {
01019 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
01020 HAL_TICK_REGS.sr = regs->sr;
01021 HAL_TICK_REGS.pc = regs->pc;
01022 #else
01023 __raw_get_cpu_var(__ipipe_tick_regs).sr = regs->sr;
01024 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
01025 #endif
01026 }
01027 rtai_sti();
01028 hal_fast_flush_pipeline(cpuid);
01029 return 1;
01030 }
01031
01032 #ifdef HINT_DIAG_ECHO
01033 #define HINT_DIAG_MSG(x) x
01034 #else
01035 #define HINT_DIAG_MSG(x)
01036 #endif
01037
01038 static int PrintFpuTrap = 0;
01039 RTAI_MODULE_PARM(PrintFpuTrap, int);
01040 static int PrintFpuInit = 0;
01041 RTAI_MODULE_PARM(PrintFpuInit, int);
01042
01043 static int rtai_trap_fault (unsigned event, void *evdata)
01044 {
01045 #ifdef HINT_DIAG_TRAPS
01046 static unsigned long traps_in_hard_intr = 0;
01047 do {
01048 unsigned long flags;
01049 rtai_save_flags_and_cli(flags);
01050 if (flags & ~ALLOWINT) {
01051 if (!test_and_set_bit(event, &traps_in_hard_intr)) {
01052 HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr););
01053 }
01054 }
01055 } while (0);
01056 #endif
01057
01058 static const int trap2sig[] = {
01059 0,
01060 0,
01061 SIGSEGV,
01062 SIGBUS,
01063 SIGILL,
01064 SIGFPE,
01065 SIGFPE,
01066 SIGFPE,
01067 SIGILL,
01068 SIGTRAP,
01069 SIGILL,
01070 SIGILL,
01071 SIGILL,
01072 SIGILL,
01073 SIGILL,
01074 SIGILL,
01075 SIGILL,
01076 SIGILL,
01077 SIGILL,
01078 SIGILL,
01079 SIGILL,
01080 SIGILL,
01081 SIGILL,
01082 SIGILL,
01083 SIGILL,
01084 0,
01085 0,
01086 0,
01087 0,
01088 0,
01089 0,
01090 0,
01091 0,
01092 SIGTRAP,
01093 SIGILL,
01094 SIGILL,
01095 SIGILL,
01096 SIGILL,
01097 SIGILL,
01098 SIGILL,
01099 SIGILL,
01100 SIGILL,
01101 SIGILL,
01102 SIGILL,
01103 SIGILL,
01104 SIGILL,
01105 SIGILL,
01106 SIGTRAP,
01107 SIGFPE,
01108 SIGFPE,
01109 SIGFPE,
01110 SIGFPE,
01111 SIGFPE,
01112 SIGFPE,
01113 SIGFPE,
01114 SIGILL,
01115 SIGILL,
01116 SIGILL,
01117 SIGILL,
01118 SIGILL,
01119 SIGILL,
01120 SIGILL,
01121 SIGILL,
01122 SIGFPE
01123 };
01124
01125 TRACE_RTAI_TRAP_ENTRY(evinfo->event, 0);
01126
01127 if (!in_hrt_mode(rtai_cpuid())) {
01128 goto propagate;
01129 }
01130
01131 if (rtai_trap_handler && rtai_trap_handler(event, trap2sig[event], (struct pt_regs *)evdata, NULL)) {
01132 goto endtrap;
01133 }
01134 propagate:
01135 return 0;
01136 endtrap:
01137 TRACE_RTAI_TRAP_EXIT();
01138 return 1;
01139 }
01140
01141 static void rtai_lsrq_dispatcher (unsigned virq)
01142 {
01143 unsigned long pending, srq;
01144
01145 spin_lock(&rtai_lsrq_lock);
01146 while ((pending = rtai_sysreq_pending & ~rtai_sysreq_running)) {
01147 set_bit(srq = ffnz(pending), &rtai_sysreq_running);
01148 clear_bit(srq, &rtai_sysreq_pending);
01149 spin_unlock(&rtai_lsrq_lock);
01150 if (test_bit(srq, &rtai_sysreq_map)) {
01151 rtai_sysreq_table[srq].k_handler();
01152 }
01153 clear_bit(srq, &rtai_sysreq_running);
01154 spin_lock(&rtai_lsrq_lock);
01155 }
01156 spin_unlock(&rtai_lsrq_lock);
01157 }
01158
01159 static inline long long rtai_usrq_dispatcher (unsigned long srq, unsigned long label)
01160 {
01161 TRACE_RTAI_SRQ_ENTRY(srq);
01162 if (srq > 0 && srq < RTAI_NR_SRQS && test_bit(srq, &rtai_sysreq_map) && rtai_sysreq_table[srq].u_handler) {
01163 return rtai_sysreq_table[srq].u_handler(label);
01164 } else {
01165 for (srq = 1; srq < RTAI_NR_SRQS; srq++) {
01166 if (test_bit(srq, &rtai_sysreq_map) && rtai_sysreq_table[srq].label == label) {
01167 return (long long)srq;
01168 }
01169 }
01170 }
01171 TRACE_RTAI_SRQ_EXIT();
01172 return 0LL;
01173 }
01174
01175 #include <asm/rtai_usi.h>
01176 long long (*rtai_lxrt_dispatcher)(unsigned long, unsigned long);
01177
01178 static int (*sched_intercept_syscall_prologue)(struct pt_regs *);
01179
01180 static int intercept_syscall_prologue(unsigned long event, struct pt_regs *regs)
01181 {
01182 if (likely(regs->LINUX_SYSCALL_NR >= RTAI_SYSCALL_NR)) {
01183 unsigned long srq = regs->LINUX_SYSCALL_REG1;
01184 IF_IS_A_USI_SRQ_CALL_IT(srq, regs->LINUX_SYSCALL_REG2, (long long *)regs->LINUX_SYSCALL_REG3, regs->LINUX_SYSCALL_FLAGS, 1);
01185 *((long long *)regs->LINUX_SYSCALL_REG3) = srq > RTAI_NR_SRQS ? rtai_lxrt_dispatcher(srq, regs->LINUX_SYSCALL_REG2) : rtai_usrq_dispatcher(srq, regs->LINUX_SYSCALL_REG2);
01186 if (!in_hrt_mode(srq = rtai_cpuid())) {
01187 hal_test_and_fast_flush_pipeline(srq);
01188 return 0;
01189 }
01190 return 1;
01191 }
01192 return likely(sched_intercept_syscall_prologue != NULL) ? sched_intercept_syscall_prologue(regs) : 0;
01193 }
01194
01195 inline int usi_SRQ_call(unsigned long srq, unsigned long args, long long* result, unsigned long lsr)
01196 {
01197 IF_IS_A_USI_SRQ_CALL_IT(srq, args, result, lsr, 1);
01198 return 0;
01199 }
01200
01201
01202 asmlinkage int rtai_syscall_dispatcher (__volatile struct pt_regs pt)
01203 {
01204 int cpuid;
01205
01206 long long result;
01207
01208 if (usi_SRQ_call(pt.d0, pt.d1, &result, pt.sr))
01209 return 0;
01210 result = pt.d0 > RTAI_NR_SRQS ? rtai_lxrt_dispatcher(pt.d0, pt.d1) : rtai_usrq_dispatcher(pt.d0, pt.d1);
01211 pt.d2 = result & 0xFFFFFFFF;
01212 pt.d3 = (result >> 32);
01213 if (!in_hrt_mode(cpuid = rtai_cpuid())) {
01214 hal_test_and_fast_flush_pipeline(cpuid);
01215 return 1;
01216 }
01217 return 0;
01218 }
01219
01220 void rtai_uvec_handler(void);
01221 DEFINE_VECTORED_ISR(rtai_uvec_handler, rtai_syscall_dispatcher);
01222
01223 extern void *_ramvec;
01224
01225 struct desc_struct rtai_set_gate_vector (unsigned vector, int type, int dpl, void *handler)
01226 {
01227 struct desc_struct* vector_table = (struct desc_struct*)_ramvec;
01228 struct desc_struct idt_element = vector_table[vector];
01229 vector_table[vector].a = handler;
01230 return idt_element;
01231 }
01232
01233 void rtai_cmpxchg_trap_handler(void);
01234 __asm__ ( \
01235 "rtai_cmpxchg_trap_handler:\n\t" \
01236 "move #0x2700,%sr\n\t" \
01237 "movel %a1@, %d0\n\t" \
01238 "cmpl %d0,%d2\n\t" \
01239 "bnes 1f\n\t" \
01240 "movel %d3,%a1@\n\t" \
01241 "1:\n\t" \
01242 "rte");
01243
01244 void rtai_xchg_trap_handler(void);
01245 __asm__ ( \
01246 "rtai_xchg_trap_handler:\n\t" \
01247 "move #0x2700,%sr\n\t" \
01248 "movel %a1@, %d0\n\t" \
01249 "movel %d2,%a1@\n\t" \
01250 "rte");
01251
01252 void rtai_reset_gate_vector (unsigned vector, struct desc_struct e)
01253 {
01254 struct desc_struct* vector_table = (struct desc_struct*)_ramvec;
01255 vector_table[vector] = e;
01256 }
01257
01258 static void rtai_install_archdep (void)
01259 {
01260
01261 unsigned long flags;
01262
01263 flags = rtai_critical_enter(NULL);
01264
01265 rtai_sysvec = rtai_set_gate_vector(RTAI_SYS_VECTOR, 15, 3, &rtai_uvec_handler);
01266 rtai_cmpxchg_trap_vec = rtai_set_gate_vector(RTAI_CMPXCHG_TRAP_SYS_VECTOR, 15, 3, &rtai_cmpxchg_trap_handler);
01267 rtai_xchg_trap_vec = rtai_set_gate_vector(RTAI_XCHG_TRAP_SYS_VECTOR, 15, 3, &rtai_xchg_trap_handler);
01268 rtai_critical_exit(flags);
01269
01270 hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue);
01271
01272 if (rtai_cpufreq_arg == 0) {
01273 struct hal_sysinfo_struct sysinfo;
01274 hal_get_sysinfo(&sysinfo);
01275 rtai_cpufreq_arg = (unsigned long)sysinfo.cpufreq;
01276 }
01277 rtai_tunables.cpu_freq = rtai_cpufreq_arg;
01278 }
01279
01280 static void rtai_uninstall_archdep(void)
01281 {
01282 unsigned long flags;
01283
01284 flags = rtai_critical_enter(NULL);
01285 rtai_reset_gate_vector(RTAI_SYS_VECTOR, rtai_sysvec);
01286 rtai_reset_gate_vector(RTAI_CMPXCHG_TRAP_SYS_VECTOR, rtai_cmpxchg_trap_vec);
01287 rtai_reset_gate_vector(RTAI_XCHG_TRAP_SYS_VECTOR, rtai_xchg_trap_vec);
01288 rtai_critical_exit(flags);
01289
01290 hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, NULL);
01291 }
01292
01293
01294 int rtai_calibrate_8254 (void)
01295 {
01296 rt_printk("RTAI WARNING: rtai_calibrate_8254() isn't implemented for Coldfire\n");
01297 return 0;
01298 }
01299
01300 void (*rt_set_ihook (void (*hookfn)(int)))(int)
01301 {
01302 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
01303 return (void (*)(int))xchg(&rtai_isr_hook, hookfn);
01304 #else
01305 return NULL;
01306 #endif
01307 }
01308
01309 void rtai_set_linux_task_priority (struct task_struct *task, int policy, int prio)
01310 {
01311 hal_set_linux_task_priority(task, policy, prio);
01312 if (task->rt_priority != prio || task->policy != policy) {
01313 printk("RTAI[hal]: sched_setscheduler(policy = %d, prio = %d) failed, (%s -- pid = %d)\n", policy, prio, task->comm, task->pid);
01314 }
01315 }
01316
01317 #ifdef CONFIG_PROC_FS
01318
01319 struct proc_dir_entry *rtai_proc_root = NULL;
01320
01321 static int rtai_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data)
01322 {
01323 PROC_PRINT_VARS;
01324 int i, none;
01325
01326 PROC_PRINT("\n** RTAI/m68knommu:\n\n");
01327 none = 1;
01328 PROC_PRINT("\n** Real-time IRQs used by RTAI: ");
01329 for (i = 0; i < RTAI_NR_IRQS; i++) {
01330 if (rtai_realtime_irq[i].handler) {
01331 if (none) {
01332 PROC_PRINT("\n");
01333 none = 0;
01334 }
01335 PROC_PRINT("\n #%d at %p", i, rtai_realtime_irq[i].handler);
01336 }
01337 }
01338 if (none) {
01339 PROC_PRINT("none");
01340 }
01341 PROC_PRINT("\n\n");
01342
01343 PROC_PRINT("** RTAI extension traps: \n\n");
01344 PROC_PRINT(" SYSREQ=0x%x\n\n", RTAI_SYS_VECTOR);
01345
01346 none = 1;
01347 PROC_PRINT("** RTAI SYSREQs in use: ");
01348 for (i = 0; i < RTAI_NR_SRQS; i++) {
01349 if (rtai_sysreq_table[i].k_handler || rtai_sysreq_table[i].u_handler) {
01350 PROC_PRINT("#%d ", i);
01351 none = 0;
01352 }
01353 }
01354 if (none) {
01355 PROC_PRINT("none");
01356 }
01357 PROC_PRINT("\n\n");
01358
01359 PROC_PRINT_DONE;
01360 }
01361
01362 static int rtai_proc_register (void)
01363 {
01364 struct proc_dir_entry *ent;
01365
01366 rtai_proc_root = create_proc_entry("rtai",S_IFDIR, 0);
01367 if (!rtai_proc_root) {
01368 printk(KERN_ERR "Unable to initialize /proc/rtai.\n");
01369 return -1;
01370 }
01371 rtai_proc_root->owner = THIS_MODULE;
01372 ent = create_proc_entry("hal",S_IFREG|S_IRUGO|S_IWUSR,rtai_proc_root);
01373 if (!ent) {
01374 printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n");
01375 return -1;
01376 }
01377 ent->read_proc = rtai_read_proc;
01378
01379 return 0;
01380 }
01381
01382 static void rtai_proc_unregister (void)
01383 {
01384 remove_proc_entry("hal",rtai_proc_root);
01385 remove_proc_entry("rtai",0);
01386 }
01387
01388 #endif
01389
01390 FIRST_LINE_OF_RTAI_DOMAIN_ENTRY
01391 {
01392 {
01393 rt_printk(KERN_INFO "RTAI[hal]: <%s> mounted over %s %s.\n", PACKAGE_VERSION, HAL_TYPE, HAL_VERSION_STRING);
01394 rt_printk(KERN_INFO "RTAI[hal]: compiled with %s.\n", CONFIG_RTAI_COMPILER);
01395 }
01396 local_irq_disable_hw();
01397 for (;;) hal_suspend_domain();
01398 }
01399 LAST_LINE_OF_RTAI_DOMAIN_ENTRY
01400
01401 long rtai_catch_event (struct hal_domain_struct *from, unsigned long event, int (*handler)(unsigned long, void *))
01402 {
01403 if (event == HAL_SYSCALL_PROLOGUE) {
01404 sched_intercept_syscall_prologue = (void *)handler;
01405 return 0;
01406 }
01407 return (long)hal_catch_event(from, event, (void *)handler);
01408 }
01409
01410 static void *saved_hal_irq_handler;
01411 extern void *hal_irq_handler;
01412
01413 #undef ack_bad_irq
01414 void ack_bad_irq(unsigned int irq)
01415 {
01416 printk("unexpected IRQ trap at vector %02x\n", irq);
01417 }
01418
01419 int __rtai_hal_init (void)
01420 {
01421 int trapnr, halinv = 0;
01422 struct hal_attr_struct attr;
01423
01424 for (halinv = trapnr = 0; trapnr < HAL_NR_EVENTS; trapnr++) {
01425 if (hal_root_domain->hal_event_handler_fun(trapnr)) {
01426 halinv = 1;
01427 printk("EVENT %d INVALID.\n", trapnr);
01428 }
01429 }
01430 if (halinv) {
01431 printk(KERN_ERR "RTAI[hal]: HAL IMMEDIATE EVENT DISPATCHING BROKEN.\n");
01432 }
01433
01434 if (!(rtai_sysreq_virq = hal_alloc_irq())) {
01435 printk(KERN_ERR "RTAI[hal]: NO VIRTUAL INTERRUPT AVAILABLE.\n");
01436 halinv = 1;
01437 }
01438
01439 if (halinv) {
01440 return -1;
01441 }
01442
01443 for (trapnr = 0; trapnr < RTAI_NR_IRQS; trapnr++) {
01444 rtai_realtime_irq[trapnr].irq_ack = hal_root_domain->irqs[trapnr].acknowledge;
01445 }
01446
01447
01448 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
01449 for (trapnr = 0; trapnr < num_online_cpus(); trapnr++) {
01450 ipipe_root_status[trapnr] = &hal_root_domain->cpudata[trapnr].status;
01451 }
01452 #endif
01453
01454 hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, &rtai_lsrq_dispatcher, NULL, IPIPE_HANDLE_MASK);
01455 saved_hal_irq_handler = hal_irq_handler;
01456 hal_irq_handler = rtai_hirq_dispatcher;
01457
01458 rtai_install_archdep();
01459
01460 #ifdef CONFIG_PROC_FS
01461 rtai_proc_register();
01462 #endif
01463
01464 hal_init_attr(&attr);
01465 attr.name = "RTAI";
01466 attr.domid = RTAI_DOMAIN_ID;
01467 attr.entry = (void *)rtai_domain_entry;
01468 attr.priority = get_domain_pointer(1)->priority + 100;
01469 hal_register_domain(&rtai_domain, &attr);
01470 for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) {
01471 hal_catch_event(hal_root_domain, trapnr, (void *)rtai_trap_fault);
01472 }
01473 rtai_init_taskpri_irqs();
01474
01475 IsolCpusMask = 0;
01476
01477 printk(KERN_INFO "RTAI[hal]: mounted (%s, IMMEDIATE (INTERNAL IRQs VECTORED)).\n", HAL_TYPE);
01478
01479 printk("PIPELINE layers:\n");
01480 for (trapnr = 1; ; trapnr++) {
01481 struct hal_domain_struct *next_domain;
01482 next_domain = get_domain_pointer(trapnr);
01483 if ((unsigned long)next_domain < 10) break;
01484 printk("%p %x %s %d\n", next_domain, next_domain->domid, next_domain->name, next_domain->priority);
01485 }
01486
01487
01488 return 0;
01489 }
01490
01491 void __rtai_hal_exit (void)
01492 {
01493 int trapnr;
01494 #ifdef CONFIG_PROC_FS
01495 rtai_proc_unregister();
01496 #endif
01497 hal_irq_handler = saved_hal_irq_handler;
01498 hal_unregister_domain(&rtai_domain);
01499 for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) {
01500 hal_catch_event(hal_root_domain, trapnr, NULL);
01501 }
01502 hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, NULL, NULL, 0);
01503 hal_free_irq(rtai_sysreq_virq);
01504 rtai_uninstall_archdep();
01505
01506 if (IsolCpusMask) {
01507 for (trapnr = 0; trapnr < IPIPE_NR_XIRQS; trapnr++) {
01508 rt_reset_irq_to_sym_mode(trapnr);
01509 }
01510 }
01511
01512 printk(KERN_INFO "RTAI[hal]: unmounted.\n");
01513 }
01514
01515 module_init(__rtai_hal_init);
01516 module_exit(__rtai_hal_exit);
01517
01518 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,60,0)
01519 asmlinkage int rt_printk(const char *fmt, ...)
01520 {
01521 va_list args;
01522 int r;
01523
01524 va_start(args, fmt);
01525 r = vprintk(fmt, args);
01526 va_end(args);
01527
01528 return r;
01529 }
01530
01531 asmlinkage int rt_sync_printk(const char *fmt, ...)
01532 {
01533 va_list args;
01534 int r;
01535
01536 va_start(args, fmt);
01537 hal_set_printk_sync(&rtai_domain);
01538 r = vprintk(fmt, args);
01539 hal_set_printk_async(&rtai_domain);
01540 va_end(args);
01541
01542 return r;
01543 }
01544 #else
01545 #define VSNPRINTF_BUF 256
01546 asmlinkage int rt_printk(const char *fmt, ...)
01547 {
01548 char buf[VSNPRINTF_BUF];
01549 va_list args;
01550
01551 va_start(args, fmt);
01552 vsnprintf(buf, VSNPRINTF_BUF, fmt, args);
01553 va_end(args);
01554 return printk("%s", buf);
01555 }
01556
01557 asmlinkage int rt_sync_printk(const char *fmt, ...)
01558 {
01559 char buf[VSNPRINTF_BUF];
01560 va_list args;
01561 int r;
01562
01563 va_start(args, fmt);
01564 vsnprintf(buf, VSNPRINTF_BUF, fmt, args);
01565 va_end(args);
01566 hal_set_printk_sync(&rtai_domain);
01567 r = printk("%s", buf);
01568 hal_set_printk_async(&rtai_domain);
01569
01570 return r;
01571 }
01572 #endif
01573
01574
01575
01576
01577
01578 void *ll2a (long long ll, char *s)
01579 {
01580 unsigned long i, k, ul;
01581 char a[20];
01582
01583 if (ll < 0) {
01584 s[0] = 1;
01585 ll = -ll;
01586 } else {
01587 s[0] = 0;
01588 }
01589 i = 0;
01590 while (ll > 0xFFFFFFFF) {
01591 ll = rtai_ulldiv(ll, 10, &k);
01592 a[++i] = k + '0';
01593 }
01594 ul = ((unsigned long *)&ll)[1];
01595 do {
01596 ul = (k = ul)/10;
01597 a[++i] = k - ul*10 + '0';
01598 } while (ul);
01599 if (s[0]) {
01600 k = 1;
01601 s[0] = '-';
01602 } else {
01603 k = 0;
01604 }
01605 a[0] = 0;
01606 while ((s[k++] = a[i--]));
01607 return s;
01608 }
01609
01610 EXPORT_SYMBOL(rtai_realtime_irq);
01611 EXPORT_SYMBOL(rt_request_irq);
01612 EXPORT_SYMBOL(rt_release_irq);
01613 EXPORT_SYMBOL(rt_set_irq_cookie);
01614 EXPORT_SYMBOL(rt_set_irq_retmode);
01615 EXPORT_SYMBOL(rt_startup_irq);
01616 EXPORT_SYMBOL(rt_shutdown_irq);
01617 EXPORT_SYMBOL(rt_enable_irq);
01618 EXPORT_SYMBOL(rt_disable_irq);
01619 EXPORT_SYMBOL(rt_mask_and_ack_irq);
01620 EXPORT_SYMBOL(rt_unmask_irq);
01621 EXPORT_SYMBOL(rt_ack_irq);
01622 EXPORT_SYMBOL(rt_end_irq);
01623 EXPORT_SYMBOL(rt_eoi_irq);
01624 EXPORT_SYMBOL(rt_request_linux_irq);
01625 EXPORT_SYMBOL(rt_free_linux_irq);
01626 EXPORT_SYMBOL(rt_pend_linux_irq);
01627 EXPORT_SYMBOL(usr_rt_pend_linux_irq);
01628 EXPORT_SYMBOL(rt_request_srq);
01629 EXPORT_SYMBOL(rt_free_srq);
01630 EXPORT_SYMBOL(rt_pend_linux_srq);
01631 EXPORT_SYMBOL(rt_assign_irq_to_cpu);
01632 EXPORT_SYMBOL(rt_reset_irq_to_sym_mode);
01633 EXPORT_SYMBOL(rt_request_timer);
01634 EXPORT_SYMBOL(rt_free_timer);
01635 EXPORT_SYMBOL(rdtsc);
01636 EXPORT_SYMBOL(rt_set_trap_handler);
01637 EXPORT_SYMBOL(rt_set_ihook);
01638 EXPORT_SYMBOL(rt_set_irq_ack);
01639
01640 EXPORT_SYMBOL(rtai_calibrate_8254);
01641 EXPORT_SYMBOL(rtai_broadcast_to_local_timers);
01642 EXPORT_SYMBOL(rtai_critical_enter);
01643 EXPORT_SYMBOL(rtai_critical_exit);
01644 EXPORT_SYMBOL(rtai_set_linux_task_priority);
01645
01646 EXPORT_SYMBOL(rtai_linux_context);
01647 EXPORT_SYMBOL(rtai_domain);
01648 EXPORT_SYMBOL(rtai_proc_root);
01649 EXPORT_SYMBOL(rtai_tunables);
01650 EXPORT_SYMBOL(rtai_cpu_lock);
01651 EXPORT_SYMBOL(rtai_cpu_realtime);
01652 EXPORT_SYMBOL(rt_times);
01653 EXPORT_SYMBOL(rt_smp_times);
01654
01655 EXPORT_SYMBOL(rt_printk);
01656 EXPORT_SYMBOL(rt_sync_printk);
01657 EXPORT_SYMBOL(ll2a);
01658
01659 EXPORT_SYMBOL(rtai_set_gate_vector);
01660 EXPORT_SYMBOL(rtai_reset_gate_vector);
01661 EXPORT_SYMBOL(rtai_catch_event);
01662
01663 EXPORT_SYMBOL(rtai_lxrt_dispatcher);
01664 EXPORT_SYMBOL(rt_scheduling);
01665 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
01666 EXPORT_SYMBOL(ipipe_root_status);
01667 #endif
01668
01669
01670
01671 void (*rt_linux_hrt_set_mode)(int clock_event_mode, void *);
01672 int (*rt_linux_hrt_next_shot)(unsigned long, void *);
01673
01674
01675
01676
01677
01678 EXPORT_SYMBOL(rt_linux_hrt_set_mode);
01679 EXPORT_SYMBOL(rt_linux_hrt_next_shot);
01680
01681 void rt_release_rtc(void)
01682 {
01683 return;
01684 }
01685
01686 void rt_request_rtc(long rtc_freq, void *handler)
01687 {
01688 return;
01689 }
01690
01691 EXPORT_SYMBOL(rt_request_rtc);
01692 EXPORT_SYMBOL(rt_release_rtc);