00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034 #include <linux/version.h>
00035 #include <linux/slab.h>
00036 #include <linux/errno.h>
00037 #include <linux/module.h>
00038 #include <linux/init.h>
00039 #include <linux/interrupt.h>
00040 #include <linux/irq.h>
00041 #include <linux/console.h>
00042
00043 #include <asm/system.h>
00044 #include <asm/hw_irq.h>
00045 #include <asm/irq.h>
00046 #include <asm/io.h>
00047 #include <asm/mmu_context.h>
00048 #include <asm/uaccess.h>
00049 #include <asm/time.h>
00050 #include <asm/types.h>
00051 #include <asm/machdep.h>
00052
00053 #define __RTAI_HAL__
00054 #include <asm/rtai_hal.h>
00055 #include <asm/rtai_lxrt.h>
00056
00057
00058 #ifdef CONFIG_PROC_FS
00059 #include <linux/stat.h>
00060 #include <linux/proc_fs.h>
00061 #include <rtai_proc_fs.h>
00062 #endif
00063
00064 #include <stdarg.h>
00065
00066
00067 MODULE_LICENSE("GPL");
00068
00069 #define INTR_VECTOR 5
00070 #define DECR_VECTOR 9
00071
00072 static unsigned long rtai_cpufreq_arg = RTAI_CALIBRATED_CPU_FREQ;
00073 RTAI_MODULE_PARM(rtai_cpufreq_arg, ulong);
00074
00075 #define RTAI_NR_IRQS IPIPE_NR_XIRQS
00076
00077 static int PrintFpuTrap = 0;
00078 RTAI_MODULE_PARM(PrintFpuTrap, int);
00079 static int PrintFpuInit = 0;
00080 RTAI_MODULE_PARM(PrintFpuInit, int);
00081 unsigned long IsolCpusMask = 0;
00082 RTAI_MODULE_PARM(IsolCpusMask, ulong);
00083
00084 struct { volatile int locked, rqsted; } rt_scheduling[RTAI_NR_CPUS];
00085
00086 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00087 static void (*rtai_isr_hook)(int cpuid);
00088 #endif
00089
00090 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00091 #define RTAI_SCHED_ISR_LOCK() \
00092 do { \
00093 if (!rt_scheduling[cpuid].locked++) { \
00094 rt_scheduling[cpuid].rqsted = 0; \
00095 } \
00096 } while (0)
00097 #define RTAI_SCHED_ISR_UNLOCK() \
00098 do { \
00099 if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \
00100 if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_hook) { \
00101 rtai_isr_hook(cpuid); \
00102 } \
00103 } \
00104 } while (0)
00105 #else
00106 #define RTAI_SCHED_ISR_LOCK() \
00107 do { cpuid = rtai_cpuid(); } while (0)
00108 #define RTAI_SCHED_ISR_UNLOCK() \
00109 do { } while (0)
00110 #endif
00111
00112 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,31) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
00113 #define HAL_TICK_REGS hal_tick_regs[cpuid]
00114 #else
00115 #define HAL_TICK_REGS hal_tick_regs
00116 #endif
00117
00118 #ifdef LOCKED_LINUX_IN_IRQ_HANDLER
00119 #define HAL_LOCK_LINUX() do { sflags = rt_save_switch_to_real_time(cpuid = rtai_cpuid()); } while (0)
00120 #define HAL_UNLOCK_LINUX() do { rtai_cli(); rt_restore_switch_to_linux(sflags, cpuid); } while (0)
00121 #else
00122 #define HAL_LOCK_LINUX() do { sflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); } while (0)
00123 #define HAL_UNLOCK_LINUX() do { rtai_cli(); ROOT_STATUS_VAL(cpuid) = sflags; } while (0)
00124 #endif
00125
00126 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
00127
00128 #define RTAI_IRQ_ACK(irq) \
00129 do { \
00130 rtai_realtime_irq[irq].irq_ack(irq, irq_desc + irq); \
00131 } while (0)
00132
00133 #else
00134
00135 #define RTAI_IRQ_ACK(irq) \
00136 do { \
00137 ((void (*)(unsigned int))rtai_realtime_irq[irq].irq_ack)(irq); \
00138 } while (0)
00139
00140 #endif
00141
00142 #define CHECK_KERCTX()
00143
00144 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
00145
00146 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
00147 #define rtai_irq_desc(irq) (irq_desc[irq].handler)
00148 #else
00149 #define rtai_irq_desc(irq) (irq_desc[irq].chip)
00150 #endif
00151
00152 #define BEGIN_PIC()
00153 #define END_PIC()
00154 #undef hal_lock_irq
00155 #undef hal_unlock_irq
00156 #define hal_lock_irq(x, y, z)
00157 #define hal_unlock_irq(x, y)
00158
00159 #else
00160
00161 extern struct hw_interrupt_type hal_std_irq_dtype[];
00162 #define rtai_irq_desc(irq) (&hal_std_irq_dtype[irq])
00163
00164 #define BEGIN_PIC() \
00165 do { \
00166 unsigned long flags, pflags, cpuid; \
00167 rtai_save_flags_and_cli(flags); \
00168 cpuid = rtai_cpuid(); \
00169 pflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid), 1 << IPIPE_STALL_FLAG); \
00170 rtai_save_and_lock_preempt_count()
00171
00172 #define END_PIC() \
00173 rtai_restore_preempt_count(); \
00174 ROOT_STATUS_VAL(cpuid) = pflags; \
00175 rtai_restore_flags(flags); \
00176 } while (0)
00177
00178 #endif
00179
00180
00181
00182 static atomic_t rtai_sync_count = ATOMIC_INIT(1);
00183 static volatile int rtai_sync_level;
00184 static unsigned rtai_sysreq_virq;
00185 struct rtai_realtime_irq_s rtai_realtime_irq[RTAI_NR_IRQS];
00186
00187 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00188 volatile unsigned long *ipipe_root_status[RTAI_NR_CPUS];
00189 #endif
00190
00191 struct calibration_data rtai_tunables;
00192
00193 extern void *hal_syscall_handler;
00194
00195 static RT_TRAP_HANDLER rtai_trap_handler;
00196 extern struct machdep_calls ppc_md;
00197 static unsigned rtai_sysreq_virq;
00198 static unsigned long rtai_sysreq_map = 1;
00199 static unsigned long rtai_sysreq_pending;
00200 static unsigned long rtai_sysreq_running;
00201 static spinlock_t rtai_lsrq_lock = SPIN_LOCK_UNLOCKED;
00202
00203 static struct {
00204 unsigned long flags;
00205 int count;
00206 } rtai_linux_irq[RTAI_NR_IRQS];
00207
00208 static struct {
00209 void (*k_handler)(void);
00210 long long (*u_handler)(unsigned long);
00211 unsigned long label;
00212 } rtai_sysreq_table[RTAI_NR_SRQS];
00213
00214 volatile unsigned long rtai_cpu_lock[2];
00215 struct hal_domain_struct rtai_domain;
00216 volatile unsigned long rtai_cpu_realtime;
00217 struct rt_times rt_times;
00218 struct rtai_switch_data rtai_linux_context[RTAI_NR_CPUS];
00219 struct rt_times rt_smp_times[RTAI_NR_CPUS];
00220
00221
00222
00223
00224
00225
00226 unsigned long rtai_critical_enter (void (*synch)(void))
00227 {
00228 unsigned long flags;
00229
00230 flags = hal_critical_enter(synch);
00231 if (atomic_dec_and_test(&rtai_sync_count)) {
00232 rtai_sync_level = 0;
00233 } else if (synch != NULL) {
00234 printk(KERN_INFO "RTAI[hal]: warning: nested sync will fail.\n");
00235 }
00236 return flags;
00237 }
00238
00239
00240
00241
00242
00243
00244 void rtai_critical_exit (unsigned long flags)
00245 {
00246 atomic_inc(&rtai_sync_count);
00247 hal_critical_exit(flags);
00248 }
00249
00250
00251
00252
00253
00254
00255 int rt_request_irq(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode)
00256 {
00257 unsigned long flags;
00258
00259 if (handler == NULL || irq >= RTAI_NR_IRQS) {
00260 return -EINVAL;
00261 }
00262 if (rtai_realtime_irq[irq].handler != NULL) {
00263 return -EBUSY;
00264 }
00265
00266 flags = rtai_critical_enter(NULL);
00267 rtai_realtime_irq[irq].handler = (void *)handler;
00268 rtai_realtime_irq[irq].cookie = cookie;
00269 rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
00270 rtai_realtime_irq[irq].irq_ack = (void *)hal_root_domain->irqs[irq].acknowledge;
00271 rtai_critical_exit(flags);
00272
00273 if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
00274 rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask);
00275 }
00276
00277 return 0;
00278 }
00279
00280
00281
00282
00283
00284
00285 int rt_release_irq (unsigned irq)
00286 {
00287 unsigned long flags;
00288 if (irq >= RTAI_NR_IRQS || !rtai_realtime_irq[irq].handler) {
00289 return -EINVAL;
00290 }
00291
00292 flags = rtai_critical_enter(NULL);
00293 rtai_realtime_irq[irq].handler = NULL;
00294 rtai_realtime_irq[irq].irq_ack = (void *)hal_root_domain->irqs[irq].acknowledge;
00295 rtai_critical_exit(flags);
00296
00297 if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
00298 rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask);
00299 }
00300
00301 return 0;
00302 }
00303
00304
00305
00306
00307
00308
00309 int rt_set_irq_ack(unsigned irq, int (*irq_ack)(unsigned int))
00310 {
00311 if (irq >= RTAI_NR_IRQS) {
00312 return -EINVAL;
00313 }
00314 rtai_realtime_irq[irq].irq_ack = irq_ack ? irq_ack : (void *)hal_root_domain->irqs[irq].acknowledge;
00315 return 0;
00316 }
00317
00318
00319
00320
00321
00322
00323 void rt_set_irq_cookie (unsigned irq, void *cookie)
00324 {
00325 if (irq < RTAI_NR_IRQS) {
00326 rtai_realtime_irq[irq].cookie = cookie;
00327 }
00328 }
00329
00330
00331
00332
00333
00334
00335 void rt_set_irq_retmode (unsigned irq, int retmode)
00336 {
00337 if (irq < RTAI_NR_IRQS) {
00338 rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
00339 }
00340 }
00341
00342
00343
00344
00345
00346
00347 unsigned rt_startup_irq (unsigned irq)
00348 {
00349 int retval;
00350
00351 BEGIN_PIC();
00352 hal_unlock_irq(hal_root_domain, irq);
00353 retval = rtai_irq_desc(irq)->startup(irq);
00354 END_PIC();
00355 return retval;
00356 }
00357
00358
00359
00360
00361
00362
00363 void rt_shutdown_irq (unsigned irq)
00364 {
00365 BEGIN_PIC();
00366 rtai_irq_desc(irq)->shutdown(irq);
00367 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00368 hal_clear_irq(hal_root_domain, irq);
00369 #endif
00370 END_PIC();
00371 }
00372
00373
00374
00375
00376
00377 static inline void _rt_enable_irq (unsigned irq)
00378 {
00379 BEGIN_PIC();
00380 hal_unlock_irq(hal_root_domain, irq);
00381 rtai_irq_desc(irq)->enable(irq);
00382 END_PIC();
00383 }
00384
00385
00386
00387
00388
00389
00390 void rt_disable_irq (unsigned irq)
00391 {
00392 BEGIN_PIC();
00393 rtai_irq_desc(irq)->disable(irq);
00394 hal_lock_irq(hal_root_domain, cpuid, irq);
00395 END_PIC();
00396 }
00397
00398
00399
00400
00401
00402
00403 static inline void _rt_end_irq (unsigned irq)
00404 {
00405 BEGIN_PIC();
00406 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
00407 hal_unlock_irq(hal_root_domain, irq);
00408 }
00409 rtai_irq_desc(irq)->end(irq);
00410 END_PIC();
00411 }
00412
00413
00414
00415 void rt_mask_and_ack_irq (unsigned irq) { rtai_irq_desc(irq)->ack(irq); }
00416
00417 void rt_enable_irq (unsigned irq) { _rt_enable_irq(irq); }
00418
00419 void rt_unmask_irq (unsigned irq) { _rt_end_irq(irq); }
00420
00421 void rt_ack_irq (unsigned irq) { _rt_enable_irq(irq); }
00422
00423 void rt_end_irq (unsigned irq) { _rt_end_irq(irq); }
00424
00425
00426
00427
00428
00429
00430 int rt_request_linux_irq (unsigned irq, void *handler, char *name, void *dev_id)
00431 {
00432 unsigned long flags;
00433 int retval;
00434
00435 if (irq >= RTAI_NR_IRQS || !handler) {
00436 return -EINVAL;
00437 }
00438
00439 rtai_save_flags_and_cli(flags);
00440 spin_lock(&irq_desc[irq].lock);
00441 if (rtai_linux_irq[irq].count++ == 0 && irq_desc[irq].action) {
00442 rtai_linux_irq[irq].flags = irq_desc[irq].action->flags;
00443 irq_desc[irq].action->flags |= IRQF_SHARED;
00444 }
00445 spin_unlock(&irq_desc[irq].lock);
00446 rtai_restore_flags(flags);
00447
00448 retval = request_irq(irq, handler, IRQF_SHARED, name, dev_id);
00449
00450 return 0;
00451 }
00452
00453
00454
00455
00456
00457
00458 int rt_free_linux_irq (unsigned irq, void *dev_id)
00459 {
00460 unsigned long flags;
00461
00462 if (irq >= RTAI_NR_IRQS || rtai_linux_irq[irq].count == 0) {
00463 return -EINVAL;
00464 }
00465
00466 rtai_save_flags_and_cli(flags);
00467 free_irq(irq, dev_id);
00468
00469 spin_lock(&irq_desc[irq].lock);
00470 if (--rtai_linux_irq[irq].count == 0 && irq_desc[irq].action) {
00471 irq_desc[irq].action->flags = rtai_linux_irq[irq].flags;
00472 }
00473 spin_unlock(&irq_desc[irq].lock);
00474
00475 rtai_restore_flags(flags);
00476
00477 return 0;
00478 }
00479
00480
00481
00482
00483
00484
00485 void rt_pend_linux_irq (unsigned irq)
00486 {
00487 unsigned long flags;
00488 rtai_save_flags_and_cli(flags);
00489 hal_pend_uncond(irq, rtai_cpuid());
00490 rtai_restore_flags(flags);
00491 }
00492
00493
00494
00495
00496
00497
00498 RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq)
00499 {
00500 unsigned long flags;
00501 rtai_save_flags_and_cli(flags);
00502 hal_pend_uncond(irq, rtai_cpuid());
00503 rtai_restore_flags(flags);
00504 }
00505
00506
00507
00508
00509
00510
00511 int rt_request_srq (unsigned label, void (*k_handler)(void), long long (*u_handler)(unsigned long))
00512 {
00513 unsigned long flags;
00514 int srq;
00515
00516 if (k_handler == NULL) {
00517 return -EINVAL;
00518 }
00519
00520 rtai_save_flags_and_cli(flags);
00521
00522 if (rtai_sysreq_map != ~0) {
00523 set_bit(srq = ffz(rtai_sysreq_map), &rtai_sysreq_map);
00524 rtai_sysreq_table[srq].k_handler = k_handler;
00525 rtai_sysreq_table[srq].u_handler = u_handler;
00526 rtai_sysreq_table[srq].label = label;
00527 } else {
00528 srq = -EBUSY;
00529 }
00530 rtai_restore_flags(flags);
00531
00532 return srq;
00533 }
00534
00535
00536
00537
00538
00539
00540 int rt_free_srq (unsigned srq)
00541 {
00542 return (srq < 1 || srq >= RTAI_NR_SRQS || !test_and_clear_bit(srq, &rtai_sysreq_map)) ? -EINVAL : 0;
00543 }
00544
00545
00546
00547
00548
00549
00550 void rt_pend_linux_srq (unsigned srq)
00551 {
00552 if (srq > 0 && srq < RTAI_NR_SRQS) {
00553 unsigned long flags;
00554 set_bit(srq, &rtai_sysreq_pending);
00555 rtai_save_flags_and_cli(flags);
00556 hal_pend_uncond(rtai_sysreq_virq, rtai_cpuid());
00557 rtai_restore_flags(flags);
00558 }
00559 }
00560
00561 #define NR_EXCEPT 48
00562
00563 struct intercept_entry { unsigned long handler, rethandler; };
00564 extern struct intercept_entry *intercept_table[];
00565 static struct intercept_entry old_intercept_table[NR_EXCEPT];
00566
00567
00568
00569
00570
00571 struct intercept_entry rtai_set_gate_vector(unsigned vector, void *handler, void *rethandler)
00572 {
00573 old_intercept_table[vector].handler = intercept_table[vector]->handler;
00574 old_intercept_table[vector].rethandler = intercept_table[vector]->rethandler;
00575 if (handler) {
00576 intercept_table[vector]->handler = (unsigned long)handler;
00577 }
00578 if (rethandler) {
00579 intercept_table[vector]->rethandler = (unsigned long)rethandler;
00580 }
00581 return old_intercept_table[vector];
00582 }
00583
00584
00585
00586
00587
00588 void rtai_reset_gate_vector (unsigned vector, unsigned long handler, unsigned long rethandler)
00589 {
00590 if (!((handler | old_intercept_table[vector].handler) && (rethandler | old_intercept_table[vector].rethandler))) {
00591 return;
00592 }
00593 intercept_table[vector]->handler = handler ? handler : old_intercept_table[vector].handler;
00594 intercept_table[vector]->rethandler = rethandler ? rethandler : old_intercept_table[vector].rethandler;
00595 }
00596
00597 static void (*decr_timer_handler)(void);
00598
00599
00600 int rtai_decr_timer_handler(struct pt_regs *regs)
00601 {
00602 unsigned long cpuid;
00603 unsigned long sflags;
00604
00605 HAL_LOCK_LINUX();
00606 RTAI_SCHED_ISR_LOCK();
00607 decr_timer_handler();
00608 RTAI_SCHED_ISR_UNLOCK();
00609 HAL_UNLOCK_LINUX();
00610 if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) {
00611 rtai_sti();
00612 hal_fast_flush_pipeline(cpuid);
00613 return 1;
00614 }
00615 return 0;
00616 }
00617
00618
00619
00620
00621
00622
00623 void rt_request_apic_timers (void (*handler)(void), struct apic_timer_setup_data *tmdata) { return; }
00624 void rt_free_apic_timers(void) { rt_free_timer(); }
00625 int rt_assign_irq_to_cpu (int irq, unsigned long cpus_mask) { return 0; }
00626 int rt_reset_irq_to_sym_mode (int irq) { return 0; }
00627
00628
00629 static int rtai_request_tickdev(void);
00630
00631 static void rtai_release_tickdev(void);
00632
00633
00634
00635
00636
00637 int rt_request_timer (void (*handler)(void), unsigned tick, int use_apic)
00638 {
00639 unsigned long flags;
00640
00641 rtai_save_flags_and_cli(flags);
00642
00643
00644 rt_times.tick_time = rtai_rdtsc();
00645 rt_times.linux_tick = tb_ticks_per_jiffy;
00646 if (tick > 0) {
00647
00648 if (tick > tb_ticks_per_jiffy) {
00649 tick = tb_ticks_per_jiffy;
00650 }
00651 rt_times.intr_time = rt_times.tick_time + tick;
00652 rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick;
00653 rt_times.periodic_tick = tick;
00654 #ifdef CONFIG_40x
00655
00656 mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
00657
00658 mtspr(SPRN_PIT, tick);
00659 #endif
00660 } else {
00661
00662 rt_times.intr_time = rt_times.tick_time + rt_times.linux_tick;
00663 rt_times.linux_time = rt_times.tick_time + rt_times.linux_tick;
00664 rt_times.periodic_tick = rt_times.linux_tick;
00665 #ifdef CONFIG_40x
00666
00667 mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_ARE);
00668 #endif
00669 }
00670
00671
00672 rt_release_irq(RTAI_TIMER_DECR_IRQ);
00673 decr_timer_handler = handler;
00674
00675
00676
00677 rtai_disarm_decr(rtai_cpuid(), 1);
00678 rt_set_timer_delay(rt_times.periodic_tick);
00679 rtai_set_gate_vector(DECR_VECTOR, rtai_decr_timer_handler, 0);
00680
00681 rtai_request_tickdev();
00682 rtai_restore_flags(flags);
00683 return 0;
00684 }
00685
00686
00687
00688
00689
00690
00691 void rt_free_timer (void)
00692 {
00693 unsigned long flags;
00694
00695 rtai_save_flags_and_cli(flags);
00696 rtai_release_tickdev();
00697 #ifdef CONFIG_40x
00698
00699 mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
00700
00701 mtspr(SPRN_PIT, tb_ticks_per_jiffy);
00702 #endif
00703 rtai_reset_gate_vector(DECR_VECTOR, 0, 0);
00704 rtai_disarm_decr(rtai_cpuid(), 0);
00705 rtai_restore_flags(flags);
00706 }
00707
00708 void rt_request_rtc(long rtc_freq, void *handler)
00709 {
00710 rt_printk("*** RTC NOT IMPLEMENTED YET ON THIS ARCH ***\n");
00711 }
00712
00713 void rt_release_rtc(void)
00714 {
00715 rt_printk("*** RTC NOT IMPLEMENTED YET ON THIS ARCH ***\n");
00716 }
00717
00718
00719
00720
00721
00722
00723 static int spurious_interrupts;
00724
00725 static int rtai_hirq_dispatcher(struct pt_regs *regs)
00726 {
00727 unsigned long cpuid;
00728 int irq;
00729
00730 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)
00731 if ((irq = ppc_md.get_irq()) >= RTAI_NR_IRQS) {
00732 #else
00733 if ((irq = ppc_md.get_irq(regs)) >= RTAI_NR_IRQS) {
00734 #endif
00735 spurious_interrupts++;
00736 return 0;
00737 }
00738
00739 if (rtai_realtime_irq[irq].handler) {
00740 unsigned long sflags;
00741
00742 HAL_LOCK_LINUX();
00743 RTAI_IRQ_ACK(irq);
00744
00745 RTAI_SCHED_ISR_LOCK();
00746 rtai_realtime_irq[irq].handler(irq, rtai_realtime_irq[irq].cookie);
00747 RTAI_SCHED_ISR_UNLOCK();
00748 HAL_UNLOCK_LINUX();
00749
00750 if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) {
00751 return 0;
00752 }
00753 } else {
00754 unsigned long lflags;
00755 lflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid = rtai_cpuid()), (1 << IPIPE_STALL_FLAG));
00756 RTAI_IRQ_ACK(irq);
00757
00758 hal_pend_uncond(irq, cpuid);
00759 ROOT_STATUS_VAL(cpuid) = lflags;
00760 if (test_bit(IPIPE_STALL_FLAG, &lflags)) {
00761 return 0;
00762 }
00763 }
00764 rtai_sti();
00765 hal_fast_flush_pipeline(cpuid);
00766 return 1;
00767 }
00768
00769
00770
00771
00772
00773
00774 RT_TRAP_HANDLER rt_set_trap_handler (RT_TRAP_HANDLER handler)
00775 {
00776 return (RT_TRAP_HANDLER)xchg(&rtai_trap_handler, handler);
00777 }
00778
00779
00780
00781
00782
00783
00784 static int rtai_trap_fault (unsigned event, void *evdata)
00785 {
00786 #ifdef HINT_DIAG_TRAPS
00787 static unsigned long traps_in_hard_intr = 0;
00788 do {
00789 unsigned long flags;
00790 rtai_save_flags_and_cli(flags);
00791 if (!test_bit(RTAI_IFLAG, &flags)) {
00792 if (!test_and_set_bit(event, &traps_in_hard_intr)) {
00793 HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr););
00794 }
00795 }
00796 } while (0);
00797 #endif
00798
00799 static const int trap2sig[] = {
00800 SIGSEGV,
00801 SIGBUS,
00802 SIGFPE,
00803 SIGFPE,
00804 SIGFPE,
00805 SIGFPE,
00806 SIGTRAP,
00807 SIGFPE,
00808 SIGTRAP,
00809 SIGSEGV,
00810 SIGILL,
00811 SIGTRAP,
00812 SIGSEGV,
00813 SIGFPE,
00814 0,
00815 0, 0, 0, 0, 0,
00816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
00817 0, 0
00818 };
00819
00820 TRACE_RTAI_TRAP_ENTRY(evdata->event, 0);
00821
00822 if (!in_hrt_mode(rtai_cpuid())) {
00823 goto propagate;
00824 }
00825
00826 if (event == 2) {
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840
00841 goto endtrap;
00842 }
00843
00844
00845 if (rtai_trap_handler && rtai_trap_handler(event, trap2sig[event], (struct pt_regs *)evdata, NULL)) {
00846 goto endtrap;
00847 }
00848
00849 propagate:
00850 return 0;
00851
00852 endtrap:
00853 TRACE_RTAI_TRAP_EXIT();
00854 return 1;
00855 }
00856
00857
00858
00859
00860
00861
00862 static void rtai_lsrq_dispatcher (unsigned virq)
00863 {
00864 unsigned long pending, srq;
00865
00866 spin_lock(&rtai_lsrq_lock);
00867 while ((pending = rtai_sysreq_pending & ~rtai_sysreq_running)) {
00868 set_bit(srq = ffnz(pending), &rtai_sysreq_running);
00869 clear_bit(srq, &rtai_sysreq_pending);
00870 spin_unlock(&rtai_lsrq_lock);
00871
00872 if (test_bit(srq, &rtai_sysreq_map)) {
00873 rtai_sysreq_table[srq].k_handler();
00874 }
00875
00876 clear_bit(srq, &rtai_sysreq_running);
00877 spin_lock(&rtai_lsrq_lock);
00878 }
00879 spin_unlock(&rtai_lsrq_lock);
00880 }
00881
00882
00883
00884
00885
00886
00887 static inline long long rtai_usrq_dispatcher (unsigned long srq, unsigned long label)
00888 {
00889 TRACE_RTAI_SRQ_ENTRY(srq);
00890
00891 if (srq > 0 && srq < RTAI_NR_SRQS && test_bit(srq, &rtai_sysreq_map) && rtai_sysreq_table[srq].u_handler) {
00892 return rtai_sysreq_table[srq].u_handler(label);
00893 } else {
00894 for (srq = 1; srq < RTAI_NR_SRQS; srq++) {
00895 if (test_bit(srq, &rtai_sysreq_map) && rtai_sysreq_table[srq].label == label) {
00896 return (long long)srq;
00897 }
00898 }
00899 }
00900
00901 TRACE_RTAI_SRQ_EXIT();
00902
00903 return 0LL;
00904 }
00905
00906
00907
00908
00909
00910
00911
00912
00913
00914
00915
00916
00917
00918
00919
00920 #include <asm/rtai_usi.h>
00921 long long (*rtai_lxrt_dispatcher)(unsigned long, unsigned long, void *);
00922
00923 static int (*sched_intercept_syscall_prologue)(struct pt_regs *);
00924
00925 static int intercept_syscall_prologue(unsigned long event, struct pt_regs *regs){
00926 if (likely(regs->gpr[0] >= RTAI_SYSCALL_NR)) {
00927 unsigned long srq = regs->gpr[3];
00928 IF_IS_A_USI_SRQ_CALL_IT(srq, regs->gpr[4], (long long *)regs->gpr[5], regs->msr, 1);
00929 *((long long *)regs->gpr[5]) = srq > RTAI_NR_SRQS ? rtai_lxrt_dispatcher(srq, regs->gpr[4], regs) : rtai_usrq_dispatcher(srq, regs->gpr[4]);
00930 if (!in_hrt_mode(srq = rtai_cpuid())) {
00931 hal_test_and_fast_flush_pipeline(srq);
00932 return 0;
00933 }
00934 return 1;
00935 }
00936 return likely(sched_intercept_syscall_prologue != NULL) ? sched_intercept_syscall_prologue(regs) : 0;
00937 }
00938
00939
00940 asmlinkage int rtai_syscall_dispatcher (struct pt_regs *regs)
00941 {
00942 unsigned long srq = regs->gpr[0];
00943
00944 IF_IS_A_USI_SRQ_CALL_IT(srq, regs->gpr[4], (long long *)regs->gpr[5], regs->msr, 1);
00945
00946 *((long long *)regs->gpr[3]) = srq > RTAI_NR_SRQS ? rtai_lxrt_dispatcher(srq, regs->gpr[4], regs) : rtai_usrq_dispatcher(srq, regs->gpr[4]);
00947
00948 if (!in_hrt_mode(srq = rtai_cpuid())) {
00949 hal_test_and_fast_flush_pipeline(srq);
00950 return 1;
00951 }
00952 return 0;
00953 }
00954
00955
00956
00957
00958
00959 static void rtai_install_archdep (void)
00960 {
00961 struct hal_sysinfo_struct sysinfo;
00962
00963 #if !defined(USE_LINUX_SYSCALL) && !defined(CONFIG_RTAI_LXRT_USE_LINUX_SYSCALL)
00964
00965 #endif
00966
00967 hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue);
00968
00969 hal_get_sysinfo(&sysinfo);
00970
00971 if (sysinfo.archdep.tmirq != RTAI_TIMER_DECR_IRQ) {
00972 printk("RTAI/ipipe: the timer interrupt %d is not supported\n", sysinfo.archdep.tmirq);
00973 }
00974
00975 if (rtai_cpufreq_arg == 0) {
00976 rtai_cpufreq_arg = (unsigned long)sysinfo.cpufreq;
00977 }
00978 rtai_tunables.cpu_freq = rtai_cpufreq_arg;
00979 }
00980
00981
00982
00983
00984
00985
00986 static void rtai_uninstall_archdep (void)
00987 {
00988
00989 hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, NULL);
00990 }
00991
00992
00993
00994
00995
00996
00997 void (*rt_set_ihook (void (*hookfn)(int)))(int)
00998 {
00999 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
01000 return (void (*)(int))xchg(&rtai_isr_hook, hookfn);
01001 #else
01002 return NULL;
01003 #endif
01004 }
01005
01006
01007
01008
01009
01010
01011 void rtai_set_linux_task_priority (struct task_struct *task, int policy, int prio)
01012 {
01013 hal_set_linux_task_priority(task, policy, prio);
01014 if (task->rt_priority != prio || task->policy != policy) {
01015 printk("RTAI[hal]: sched_setscheduler(policy = %d, prio = %d) failed, (%s -- pid = %d)\n", policy, prio, task->comm, task->pid);
01016 }
01017 }
01018
01019 #ifdef CONFIG_PROC_FS
01020
01021 struct proc_dir_entry *rtai_proc_root = NULL;
01022
01023
01024
01025
01026
01027
01028 static int rtai_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data)
01029 {
01030 PROC_PRINT_VARS;
01031 int i, none;
01032
01033 PROC_PRINT("\n** RTAI/ppc over ADEOS/ipipe:\n\n");
01034 PROC_PRINT(" Decr. Frequency: %lu\n", rtai_tunables.cpu_freq);
01035 PROC_PRINT(" Decr. Latency: %d ns\n", RTAI_LATENCY_8254);
01036 PROC_PRINT(" Decr. Setup Time: %d ns\n", RTAI_SETUP_TIME_8254);
01037
01038 none = 1;
01039 PROC_PRINT("\n** Real-time IRQs used by RTAI: ");
01040 for (i = 0; i < RTAI_NR_IRQS; i++) {
01041 if (rtai_realtime_irq[i].handler) {
01042 if (none) {
01043 PROC_PRINT("\n");
01044 none = 0;
01045 }
01046 PROC_PRINT("\n #%d at %p", i, rtai_realtime_irq[i].handler);
01047 }
01048 }
01049 if (none) {
01050 PROC_PRINT("none");
01051 }
01052 PROC_PRINT("\n\n");
01053
01054 PROC_PRINT("** RTAI extension traps: \n\n");
01055 PROC_PRINT(" SYSREQ=0x%x\n", 0xC00);
01056
01057 PROC_PRINT(" IRQ spurious = %d\n", spurious_interrupts);
01058 PROC_PRINT("\n");
01059
01060 none = 1;
01061 PROC_PRINT("** RTAI SYSREQs in use: \n");
01062 for (i = 0; i < RTAI_NR_SRQS; i++) {
01063 if (rtai_sysreq_table[i].k_handler || rtai_sysreq_table[i].u_handler || rtai_sysreq_table[i].label) {
01064 PROC_PRINT(" #%d label:%lu\n", i, rtai_sysreq_table[i].label);
01065 none = 0;
01066 }
01067 }
01068
01069 if (none) {
01070 PROC_PRINT(" none");
01071 }
01072 PROC_PRINT("\n\n");
01073
01074 PROC_PRINT_DONE;
01075 }
01076
01077
01078
01079
01080
01081
01082 static int rtai_proc_register (void)
01083 {
01084 struct proc_dir_entry *ent;
01085
01086 rtai_proc_root = create_proc_entry("rtai", S_IFDIR, 0);
01087 if (!rtai_proc_root) {
01088 printk(KERN_ERR "Unable to initialize /proc/rtai.\n");
01089 return -1;
01090 }
01091 rtai_proc_root->owner = THIS_MODULE;
01092 ent = create_proc_entry("hal", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
01093 if (!ent) {
01094 printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n");
01095 return -1;
01096 }
01097
01098 ent->read_proc = rtai_read_proc;
01099
01100 return 0;
01101 }
01102
01103
01104
01105
01106
01107
01108 static void rtai_proc_unregister (void)
01109 {
01110 remove_proc_entry("hal", rtai_proc_root);
01111 remove_proc_entry("rtai", 0);
01112 }
01113
01114 #endif
01115
01116
01117
01118
01119
01120
01121 static void rtai_domain_entry (int iflag)
01122 {
01123 if (iflag) {
01124 rt_printk(KERN_INFO "RTAI[hal]: <%s> mounted over %s %s.\n", PACKAGE_VERSION, HAL_TYPE, HAL_VERSION_STRING);
01125 rt_printk(KERN_INFO "RTAI[hal]: compiled with %s.\n", CONFIG_RTAI_COMPILER);
01126 }
01127 for (;;) hal_suspend_domain();
01128 }
01129
01130
01131
01132
01133
01134
01135 long rtai_catch_event (struct hal_domain_struct *from, unsigned long event, int (*handler)(unsigned long, void *)) {
01136 if (event == HAL_SYSCALL_PROLOGUE) {
01137 sched_intercept_syscall_prologue = (void *)handler;
01138 return 0;
01139 }
01140 return (long)hal_catch_event(from, event, (void *)handler);
01141 }
01142
01143
01144
01145
01146
01147
01148 extern int ipipe_events_diverted;
01149
01150 int __rtai_hal_init (void)
01151 {
01152 int trapnr, halinv;
01153 struct hal_attr_struct attr;
01154
01155
01156 for (halinv = trapnr = 0; trapnr < HAL_NR_EVENTS; trapnr++) {
01157 if (hal_root_domain->hal_event_handler_fun(trapnr)) {
01158 halinv = 1;
01159 printk("EVENT %d INVALID\n", trapnr);
01160 }
01161 }
01162 if (halinv) {
01163 printk(KERN_ERR "RTAI[hal]: HAL IMMEDIATE EVENT DISPATCHING BROKEN\n");
01164 return -1;
01165 }
01166
01167
01168 if (!(rtai_sysreq_virq = hal_alloc_irq())) {
01169 printk(KERN_ERR "RTAI[hal]: no virtual interrupt available.\n");
01170 return -1;
01171 }
01172
01173
01174 for (trapnr = 0; trapnr < RTAI_NR_IRQS; trapnr++) {
01175 rtai_realtime_irq[trapnr].irq_ack = (void *)hal_root_domain->irqs[trapnr].acknowledge;
01176 }
01177 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
01178 for (trapnr = 0; trapnr < RTAI_NR_CPUS; trapnr++) {
01179 ipipe_root_status[trapnr] = &hal_root_domain->cpudata[trapnr].status;
01180 }
01181 #endif
01182
01183
01184 hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, &rtai_lsrq_dispatcher, NULL, IPIPE_HANDLE_MASK);
01185
01186
01187 rtai_set_gate_vector(INTR_VECTOR, rtai_hirq_dispatcher, 0);
01188
01189
01190 ipipe_events_diverted = 1;
01191 rtai_install_archdep();
01192
01193 #ifdef CONFIG_PROC_FS
01194 rtai_proc_register();
01195 #endif
01196
01197
01198 hal_init_attr(&attr);
01199 attr.name = "RTAI";
01200 attr.domid = RTAI_DOMAIN_ID;
01201 attr.entry = (void *)rtai_domain_entry;
01202 attr.priority = get_domain_pointer(1)->priority + 100;
01203 hal_register_domain(&rtai_domain, &attr);
01204
01205
01206 for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) {
01207 hal_catch_event(hal_root_domain, trapnr, (void *)rtai_trap_fault);
01208 }
01209
01210
01211 printk(KERN_INFO "RTAI[hal]: mounted (%s, IMMEDIATE (INTERNAL IRQs %s).\n", HAL_TYPE, CONFIG_RTAI_DONT_DISPATCH_CORE_IRQS ? "VECTORED" : "DISPATCHED");
01212
01213
01214 printk("PIPELINE layers:\n");
01215 for (trapnr = 1; ; trapnr++) {
01216 struct hal_domain_struct *next_domain;
01217 next_domain = get_domain_pointer(trapnr);
01218 if ((unsigned long)next_domain < 10) break;
01219 printk("%p %x %s %d\n", next_domain, next_domain->domid, next_domain->name, next_domain->priority);
01220 }
01221
01222 return 0;
01223 }
01224
01225
01226
01227
01228
01229
01230 void __rtai_hal_exit (void)
01231 {
01232 int trapnr;
01233 unsigned long flags;
01234
01235 #ifdef CONFIG_PROC_FS
01236 rtai_proc_unregister();
01237 #endif
01238
01239
01240 rtai_save_flags_and_cli(flags);
01241 rtai_reset_gate_vector(INTR_VECTOR, 0, 0);
01242 rtai_restore_flags(flags);
01243
01244
01245 hal_unregister_domain(&rtai_domain);
01246
01247
01248 for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) {
01249 hal_catch_event(hal_root_domain, trapnr, NULL);
01250 }
01251
01252
01253 hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, NULL, NULL, 0);
01254 hal_free_irq(rtai_sysreq_virq);
01255
01256
01257 rtai_uninstall_archdep();
01258 ipipe_events_diverted = 0;
01259
01260
01261 printk(KERN_INFO "RTAI[hal]: unmounted.\n");
01262 }
01263
01264 module_init(__rtai_hal_init);
01265 module_exit(__rtai_hal_exit);
01266
01267
01268
01269
01270
01271
01272 #define LINE_LENGTH 200
01273
01274 asmlinkage int rt_printk(const char *fmt, ...)
01275 {
01276 char line[LINE_LENGTH];
01277 va_list args;
01278 int r;
01279
01280 va_start(args, fmt);
01281 r = vsnprintf(line, LINE_LENGTH, fmt, args);
01282 va_end(args);
01283 printk("%s", line);
01284
01285 return r;
01286 }
01287
01288
01289
01290
01291
01292
01293 asmlinkage int rt_sync_printk(const char *fmt, ...)
01294 {
01295 char line[LINE_LENGTH];
01296 va_list args;
01297 int r;
01298
01299 va_start(args, fmt);
01300 r = vsnprintf(line, LINE_LENGTH, fmt, args);
01301 va_end(args);
01302 hal_set_printk_sync(&rtai_domain);
01303 printk("%s", line);
01304 hal_set_printk_async(&rtai_domain);
01305
01306 return r;
01307 }
01308
01309
01310
01311
01312
01313 void *ll2a (long long ll, char *s)
01314 {
01315 unsigned long i, k, ul;
01316 char a[20];
01317
01318 if (ll < 0) {
01319 s[0] = 1;
01320 ll = -ll;
01321 } else {
01322 s[0] = 0;
01323 }
01324 i = 0;
01325 while (ll > 0xFFFFFFFF) {
01326 ll = rtai_ulldiv(ll, 10, &k);
01327 a[++i] = k + '0';
01328 }
01329 ul = ((unsigned long *)&ll)[LOW];
01330 do {
01331 ul = (k = ul)/10;
01332 a[++i] = k - ul*10 + '0';
01333 } while (ul);
01334 if (s[0]) {
01335 k = 1;
01336 s[0] = '-';
01337 } else {
01338 k = 0;
01339 }
01340 a[0] = 0;
01341 while ((s[k++] = a[i--]));
01342 return s;
01343 }
01344
01345
01346
01347
01348
01349
01350 EXPORT_SYMBOL(rtai_realtime_irq);
01351
01352 EXPORT_SYMBOL(rt_request_irq);
01353 EXPORT_SYMBOL(rt_release_irq);
01354 EXPORT_SYMBOL(rt_set_irq_cookie);
01355 EXPORT_SYMBOL(rt_set_irq_retmode);
01356 EXPORT_SYMBOL(rt_set_irq_ack);
01357
01358 EXPORT_SYMBOL(rt_startup_irq);
01359 EXPORT_SYMBOL(rt_shutdown_irq);
01360 EXPORT_SYMBOL(rt_enable_irq);
01361 EXPORT_SYMBOL(rt_disable_irq);
01362 EXPORT_SYMBOL(rt_mask_and_ack_irq);
01363 EXPORT_SYMBOL(rt_unmask_irq);
01364 EXPORT_SYMBOL(rt_ack_irq);
01365
01366 EXPORT_SYMBOL(rt_request_linux_irq);
01367 EXPORT_SYMBOL(rt_free_linux_irq);
01368 EXPORT_SYMBOL(rt_pend_linux_irq);
01369 EXPORT_SYMBOL(usr_rt_pend_linux_irq);
01370
01371 EXPORT_SYMBOL(rt_request_srq);
01372 EXPORT_SYMBOL(rt_free_srq);
01373 EXPORT_SYMBOL(rt_pend_linux_srq);
01374
01375 EXPORT_SYMBOL(rt_assign_irq_to_cpu);
01376 EXPORT_SYMBOL(rt_reset_irq_to_sym_mode);
01377 EXPORT_SYMBOL(rt_request_apic_timers);
01378 EXPORT_SYMBOL(rt_free_apic_timers);
01379
01380 EXPORT_SYMBOL(rt_request_timer);
01381 EXPORT_SYMBOL(rt_free_timer);
01382 EXPORT_SYMBOL(rt_request_rtc);
01383 EXPORT_SYMBOL(rt_release_rtc);
01384
01385 EXPORT_SYMBOL(rt_set_trap_handler);
01386 EXPORT_SYMBOL(rt_set_ihook);
01387
01388 EXPORT_SYMBOL(rtai_critical_enter);
01389 EXPORT_SYMBOL(rtai_critical_exit);
01390
01391 EXPORT_SYMBOL(rtai_set_linux_task_priority);
01392 EXPORT_SYMBOL(rtai_linux_context);
01393 EXPORT_SYMBOL(rtai_domain);
01394 EXPORT_SYMBOL(rtai_proc_root);
01395 EXPORT_SYMBOL(rtai_tunables);
01396 EXPORT_SYMBOL(rtai_cpu_lock);
01397 EXPORT_SYMBOL(rtai_cpu_realtime);
01398 EXPORT_SYMBOL(rt_times);
01399 EXPORT_SYMBOL(rt_smp_times);
01400
01401 EXPORT_SYMBOL(rt_printk);
01402 EXPORT_SYMBOL(rt_sync_printk);
01403 EXPORT_SYMBOL(ll2a);
01404
01405 EXPORT_SYMBOL(rtai_set_gate_vector);
01406 EXPORT_SYMBOL(rtai_reset_gate_vector);
01407 EXPORT_SYMBOL(rtai_catch_event);
01408
01409 EXPORT_SYMBOL(rtai_lxrt_dispatcher);
01410 EXPORT_SYMBOL(rt_scheduling);
01411 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
01412 EXPORT_SYMBOL(ipipe_root_status);
01413 #endif
01414
01415 void up_task_sw(void *, void *);
01416 EXPORT_SYMBOL(up_task_sw);
01417
01418 #ifdef CONFIG_RTAI_FPU_SUPPORT
01419 void __save_fpenv(void *fpenv);
01420 EXPORT_SYMBOL(__save_fpenv);
01421 void __restore_fpenv(void *fpenv);
01422 EXPORT_SYMBOL(__restore_fpenv);
01423 #endif
01424
01425 EXPORT_SYMBOL(IsolCpusMask);
01426
01427 #ifdef CONFIG_GENERIC_CLOCKEVENTS
01428
01429 #include <linux/clockchips.h>
01430 #include <linux/ipipe_tickdev.h>
01431
01432 void (*rt_linux_hrt_set_mode)(enum clock_event_mode, struct ipipe_tick_device *);
01433 int (*rt_linux_hrt_next_shot)(unsigned long, struct ipipe_tick_device *);
01434
01435
01436
01437
01438
01439
01440 static void _rt_linux_hrt_set_mode(enum clock_event_mode mode, struct ipipe_tick_device *hrt_dev)
01441 {
01442 if (mode == CLOCK_EVT_MODE_ONESHOT || mode == CLOCK_EVT_MODE_SHUTDOWN) {
01443 rt_times.linux_tick = 0;
01444 } else if (mode == CLOCK_EVT_MODE_PERIODIC) {
01445 rt_times.linux_tick = rtai_llimd((1000000000 + HZ/2)/HZ, TIMER_FREQ, 1000000000);
01446 }
01447 }
01448
01449 static int _rt_linux_hrt_next_shot(unsigned long delay, struct ipipe_tick_device *hrt_dev)
01450 {
01451 rt_times.linux_time = rt_times.tick_time + rtai_llimd(delay, TIMER_FREQ, 1000000000);
01452 return 0;
01453 }
01454
01455 #ifdef __IPIPE_FEATURE_REQUEST_TICKDEV
01456 #define IPIPE_REQUEST_TICKDEV(a, b, c, d, e) ipipe_request_tickdev(a, (void *)(b), (void *)(c), d, e)
01457 #else
01458 #define IPIPE_REQUEST_TICKDEV(a, b, c, d, e) ipipe_request_tickdev(a, b, c, d)
01459 #endif
01460
01461 static int rtai_request_tickdev(void)
01462 {
01463 int mode, cpuid;
01464 unsigned long timer_freq;
01465 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
01466 if ((void *)rt_linux_hrt_set_mode != (void *)rt_linux_hrt_next_shot) {
01467 mode = IPIPE_REQUEST_TICKDEV("decrementer", rt_linux_hrt_set_mode, rt_linux_hrt_next_shot, cpuid, &timer_freq);
01468 } else {
01469 mode = IPIPE_REQUEST_TICKDEV("decrementer", _rt_linux_hrt_set_mode, _rt_linux_hrt_next_shot, cpuid, &timer_freq);
01470 }
01471 if (mode == CLOCK_EVT_MODE_UNUSED || mode == CLOCK_EVT_MODE_ONESHOT) {
01472 rt_times.linux_tick = 0;
01473 } else if (mode != CLOCK_EVT_MODE_PERIODIC) {
01474 return mode;
01475 }
01476 }
01477 return 0;
01478 }
01479
01480 static void rtai_release_tickdev(void)
01481 {
01482 int cpuid;
01483 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
01484 ipipe_release_tickdev(cpuid);
01485 }
01486 }
01487
01488 #else
01489
01490 void (*rt_linux_hrt_set_mode)(int clock_event_mode, void *);
01491 int (*rt_linux_hrt_next_shot)(unsigned long, void *);
01492
01493 static int rtai_request_tickdev(void) { return 0; }
01494
01495 static void rtai_release_tickdev(void) { return; }
01496
01497 #endif
01498
01499 EXPORT_SYMBOL(rt_linux_hrt_set_mode);
01500 EXPORT_SYMBOL(rt_linux_hrt_next_shot);