00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042 #ifndef _RTAI_ASM_X8664_HAL_H
00043 #define _RTAI_ASM_X8664_HAL_H
00044
00045 #define RTAI_SYSCALL_MODE //__attribute__((regparm(0)))
00046
00047 #define LOCKED_LINUX_IN_IRQ_HANDLER
00048 #define UNWRAPPED_CATCH_EVENT
00049
00050 #include <rtai_hal_names.h>
00051 #include <asm/rtai_vectors.h>
00052 #include <rtai_types.h>
00053
00054 #ifdef CONFIG_SMP
00055 #define RTAI_NR_CPUS CONFIG_RTAI_CPUS
00056 #else
00057 #define RTAI_NR_CPUS 1
00058 #endif
00059
00060 static __inline__ unsigned long ffnz (unsigned long word) {
00061
00062 __asm__("bsfq %1, %0"
00063 : "=r" (word)
00064 : "r" (word));
00065 return word;
00066 }
00067
00068 static inline unsigned long long rtai_ulldiv(unsigned long long ull, unsigned long uld, unsigned long *r)
00069 {
00070 if (r) {
00071 *r = ull%uld;
00072 }
00073 return ull/uld;
00074 #if 0
00075
00076
00077
00078
00079
00080
00081 unsigned long long qf, rf;
00082 unsigned long tq, rh;
00083 union { unsigned long long ull; unsigned long ul[2]; } p, q;
00084
00085 p.ull = ull;
00086 q.ull = 0;
00087 rf = 0x100000000ULL - (qf = 0xFFFFFFFFUL / uld) * uld;
00088
00089 while (p.ull >= uld) {
00090 q.ul[1] += (tq = p.ul[1] / uld);
00091 rh = p.ul[1] - tq * uld;
00092 q.ull += rh * qf + (tq = p.ul[0] / uld);
00093 p.ull = rh * rf + (p.ul[0] - tq * uld);
00094 }
00095
00096 if (r)
00097 *r = p.ull;
00098
00099 return q.ull;
00100 #endif
00101 }
00102
00103 static inline long rtai_imuldiv (long i, long mult, long div) {
00104
00105
00106
00107 int dummy;
00108
00109 __asm__ __volatile__ ( \
00110 "mulq %%rdx\t\n" \
00111 "divq %%rcx\t\n" \
00112 : "=a" (i), "=d" (dummy)
00113 : "a" (i), "d" (mult), "c" (div));
00114
00115 return i;
00116 }
00117
00118 static inline long long rtai_llimd(long long ll, long mult, long div) {
00119 return rtai_imuldiv(ll, mult, div);
00120 }
00121
00122
00123
00124
00125
00126
00127 static inline unsigned long long rtai_u64div32c(unsigned long long a,
00128 unsigned long b,
00129 int *r) {
00130
00131 union { unsigned long long ull; unsigned long ul[2]; } u;
00132 u.ull = a;
00133 __asm__ __volatile(
00134 "\n movq %%rax,%%rbx"
00135 "\n movq %%rdx,%%rax"
00136 "\n xorq %%rdx,%%rdx"
00137 "\n divq %%rcx"
00138 "\n xchgq %%rax,%%rbx"
00139 "\n divq %%rcx"
00140 "\n movq %%rdx,%%rcx"
00141 "\n movq %%rbx,%%rdx"
00142 : "=a" (u.ul[0]), "=d" (u.ul[1])
00143 : "a" (u.ul[0]), "d" (u.ul[1]), "c" (b)
00144 : "%rbx" );
00145
00146 return a;
00147 }
00148
00149 #if defined(__KERNEL__) && !defined(__cplusplus)
00150 #include <linux/sched.h>
00151 #include <linux/interrupt.h>
00152 #include <asm/desc.h>
00153 #include <asm/system.h>
00154 #include <asm/io.h>
00155 #include <asm/rtai_atomic.h>
00156 #include <asm/rtai_fpu.h>
00157 #ifdef CONFIG_X86_LOCAL_APIC
00158 #include <asm/fixmap.h>
00159 #include <asm/apic.h>
00160 #endif
00161 #include <rtai_trace.h>
00162
00163 struct rtai_realtime_irq_s {
00164 int (*handler)(unsigned irq, void *cookie);
00165 void *cookie;
00166 int retmode;
00167 int cpumask;
00168 int (*irq_ack)(unsigned int, void *);
00169 };
00170
00171
00172
00173
00174
00175
00176
00177 #ifdef CONFIG_X86_IO_APIC
00178 static inline int ext_irq_vector(int irq)
00179 {
00180 if (irq != 2) {
00181 return (FIRST_DEVICE_VECTOR + 8*(irq < 2 ? irq : irq - 1));
00182 }
00183 return -EINVAL;
00184 }
00185 #else
00186 static inline int ext_irq_vector(int irq)
00187 {
00188 if (irq != 2) {
00189 return (FIRST_EXTERNAL_VECTOR + irq);
00190 }
00191 return -EINVAL;
00192 }
00193 #endif
00194
00195 #define RTAI_DOMAIN_ID 0x9ac15d93 // nam2num("rtai_d")
00196 #define RTAI_NR_TRAPS HAL_NR_FAULTS
00197 #define RTAI_NR_SRQS 32
00198
00199 #define RTAI_APIC_TIMER_VECTOR RTAI_APIC_HIGH_VECTOR
00200 #define RTAI_APIC_TIMER_IPI RTAI_APIC_HIGH_IPI
00201 #define RTAI_SMP_NOTIFY_VECTOR RTAI_APIC_LOW_VECTOR
00202 #define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI
00203
00204 #define RTAI_TIMER_8254_IRQ 0
00205 #define RTAI_FREQ_8254 1193180
00206 #define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ)
00207 #define RTAI_COUNTER_2_LATCH 0xfffe
00208 #define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY
00209 #define RTAI_SETUP_TIME_8254 2011
00210
00211 #define RTAI_CALIBRATED_APIC_FREQ 0
00212 #define RTAI_FREQ_APIC (rtai_tunables.apic_freq)
00213 #define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY
00214 #define RTAI_SETUP_TIME_APIC 1000
00215
00216 #define RTAI_TIME_LIMIT 0x7000000000000000LL
00217
00218 #define RTAI_IFLAG 9
00219
00220 #define rtai_cpuid() hal_processor_id()
00221 #define rtai_tskext(idx) hal_tskext[idx]
00222
00223
00224 #define rtai_hw_cli() hal_hw_cli()
00225 #define rtai_hw_sti() hal_hw_sti()
00226 #define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x)
00227 #define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x)
00228 #define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x)
00229
00230
00231 #define rtai_cli() hal_hw_cli()
00232 #define rtai_sti() hal_hw_sti()
00233 #define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x)
00234 #define rtai_restore_flags(x) hal_hw_local_irq_restore(x)
00235 #define rtai_save_flags(x) hal_hw_local_irq_flags(x)
00236
00237 static inline struct hal_domain_struct *get_domain_pointer(int n)
00238 {
00239 struct list_head *p = hal_pipeline.next;
00240 struct hal_domain_struct *d;
00241 unsigned long i = 0;
00242 while (p != &hal_pipeline) {
00243 d = list_entry(p, struct hal_domain_struct, p_link);
00244 if (++i == n) {
00245 return d;
00246 }
00247 p = d->p_link.next;
00248 }
00249 return (struct hal_domain_struct *)i;
00250 }
00251
00252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
00253
00254 #define ROOT_STATUS_ADR(cpuid) (ipipe_root_status[cpuid])
00255 #define ROOT_STATUS_VAL(cpuid) (*ipipe_root_status[cpuid])
00256
00257 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00258 do { \
00259 hal_irq_hits_pp(irq, domain, cpuid); \
00260 if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00261 __set_bit((irq) & IPIPE_IRQ_IMASK, &(domain)->cpudata[cpuid].irq_pending_lo[(irq) >> IPIPE_IRQ_ISHIFT]); \
00262 __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &(domain)->cpudata[cpuid].irq_pending_hi); \
00263 } \
00264 } while (0)
00265
00266 #define hal_fast_flush_pipeline(cpuid) \
00267 do { \
00268 if (hal_root_domain->cpudata[cpuid].irq_pending_hi != 0) { \
00269 rtai_cli(); \
00270 hal_sync_stage(IPIPE_IRQMASK_ANY); \
00271 } \
00272 } while (0)
00273
00274 #else
00275
00276 #define ROOT_STATUS_ADR(cpuid) (&ipipe_cpudom_var(hal_root_domain, status))
00277 #define ROOT_STATUS_VAL(cpuid) (ipipe_cpudom_var(hal_root_domain, status))
00278
00279 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00280 do { \
00281 if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00282 __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqpend_lomask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00283 __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &ipipe_cpudom_var(domain, irqpend_himask)); \
00284 } else { \
00285 __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqheld_mask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00286 } \
00287 ipipe_cpudom_var(domain, irqall)[irq]++; \
00288 } while (0)
00289
00290 #define hal_fast_flush_pipeline(cpuid) \
00291 do { \
00292 if (ipipe_cpudom_var(hal_root_domain, irqpend_himask) != 0) { \
00293 rtai_cli(); \
00294 hal_sync_stage(IPIPE_IRQMASK_ANY); \
00295 } \
00296 } while (0)
00297
00298 #endif
00299
00300 #define hal_pend_uncond(irq, cpuid) hal_pend_domain_uncond(irq, hal_root_domain, cpuid)
00301
00302 extern volatile unsigned long *ipipe_root_status[];
00303
00304 #define hal_test_and_fast_flush_pipeline(cpuid) \
00305 do { \
00306 if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { \
00307 hal_fast_flush_pipeline(cpuid); \
00308 rtai_sti(); \
00309 } \
00310 } while (0)
00311
00312 #ifdef CONFIG_PREEMPT
00313 #define rtai_save_and_lock_preempt_count() \
00314 do { int *prcntp, prcnt; prcnt = xchg(prcntp = &preempt_count(), 1);
00315 #define rtai_restore_preempt_count() \
00316 *prcntp = prcnt; } while (0)
00317 #else
00318 #define rtai_save_and_lock_preempt_count();
00319 #define rtai_restore_preempt_count();
00320 #endif
00321
00322 typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie);
00323
00324 #define RTAI_CALIBRATED_CPU_FREQ 0
00325 #define RTAI_CPU_FREQ (rtai_tunables.cpu_freq)
00326
00327 static inline unsigned long long rtai_rdtsc (void)
00328 {
00329 unsigned int __a,__d;
00330 asm volatile("rdtsc" : "=a" (__a), "=d" (__d));
00331 return ((unsigned long)__a) | (((unsigned long)__d)<<32);
00332 }
00333
00334 struct calibration_data {
00335
00336 unsigned long cpu_freq;
00337 unsigned long apic_freq;
00338 int latency;
00339 int setup_time_TIMER_CPUNIT;
00340 int setup_time_TIMER_UNIT;
00341 int timers_tol[RTAI_NR_CPUS];
00342 };
00343
00344 struct apic_timer_setup_data {
00345
00346 int mode;
00347 int count;
00348 };
00349
00350 extern struct rt_times rt_times;
00351
00352 extern struct rt_times rt_smp_times[RTAI_NR_CPUS];
00353
00354 extern struct calibration_data rtai_tunables;
00355
00356 extern volatile unsigned long rtai_cpu_lock[];
00357
00358 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) || LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
00359 #define apic_write_around apic_write
00360 #endif
00361
00362
00363 #if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00364 #define SET_TASKPRI(cpuid) \
00365 if (!rtai_linux_context[cpuid].set_taskpri) { \
00366 apic_write_around(APIC_TASKPRI, ((apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK) | RTAI_TASKPRI)); \
00367 rtai_linux_context[cpuid].set_taskpri = 1; \
00368 }
00369 #define CLR_TASKPRI(cpuid) \
00370 if (rtai_linux_context[cpuid].set_taskpri) { \
00371 apic_write_around(APIC_TASKPRI, (apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK)); \
00372 rtai_linux_context[cpuid].set_taskpri = 0; \
00373 }
00374 #else
00375 #define SET_TASKPRI(cpuid)
00376 #define CLR_TASKPRI(cpuid)
00377 #endif
00378
00379 extern struct rtai_switch_data {
00380 volatile unsigned long sflags;
00381 volatile unsigned long lflags;
00382 #if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00383 volatile unsigned long set_taskpri;
00384 #endif
00385 } rtai_linux_context[RTAI_NR_CPUS];
00386
00387 irqreturn_t rtai_broadcast_to_local_timers(int irq,
00388 void *dev_id,
00389 struct pt_regs *regs);
00390
00391 static inline unsigned long rtai_save_flags_irqbit(void)
00392 {
00393 unsigned long flags;
00394 rtai_save_flags(flags);
00395 return flags & (1 << RTAI_IFLAG);
00396 }
00397
00398 static inline unsigned long rtai_save_flags_irqbit_and_cli(void)
00399 {
00400 unsigned long flags;
00401 rtai_save_flags_and_cli(flags);
00402 return flags & (1 << RTAI_IFLAG);
00403 }
00404
00405 #ifdef CONFIG_SMP
00406
00407 #define SCHED_VECTOR RTAI_SMP_NOTIFY_VECTOR
00408 #define SCHED_IPI RTAI_SMP_NOTIFY_IPI
00409
00410 #define _send_sched_ipi(dest) \
00411 do { \
00412 apic_wait_icr_idle(); \
00413 apic_write_around(APIC_ICR2, (int)SET_APIC_DEST_FIELD((unsigned int)dest)); \
00414 apic_write_around(APIC_ICR, APIC_DEST_LOGICAL | SCHED_VECTOR); \
00415 } while (0)
00416
00417 #ifdef CONFIG_PREEMPT
00418 #define rt_spin_lock(lock) do { barrier(); _raw_spin_lock(lock); barrier(); } while (0)
00419 #define rt_spin_unlock(lock) do { barrier(); _raw_spin_unlock(lock); barrier(); } while (0)
00420 #else
00421 #define rt_spin_lock(lock) spin_lock(lock)
00422 #define rt_spin_unlock(lock) spin_unlock(lock)
00423 #endif
00424
00425 static inline void rt_spin_lock_hw_irq(spinlock_t *lock)
00426 {
00427 rtai_hw_cli();
00428 rt_spin_lock(lock);
00429 }
00430
00431 static inline void rt_spin_unlock_hw_irq(spinlock_t *lock)
00432 {
00433 rt_spin_unlock(lock);
00434 rtai_hw_sti();
00435 }
00436
00437 static inline unsigned long rt_spin_lock_hw_irqsave(spinlock_t *lock)
00438 {
00439 unsigned long flags;
00440 rtai_hw_save_flags_and_cli(flags);
00441 rt_spin_lock(lock);
00442 return flags;
00443 }
00444
00445 static inline void rt_spin_unlock_hw_irqrestore(unsigned long flags, spinlock_t *lock)
00446 {
00447 rt_spin_unlock(lock);
00448 rtai_hw_restore_flags(flags);
00449 }
00450
00451 static inline void rt_spin_lock_irq(spinlock_t *lock) {
00452
00453 rtai_cli();
00454 rt_spin_lock(lock);
00455 }
00456
00457 static inline void rt_spin_unlock_irq(spinlock_t *lock) {
00458
00459 rt_spin_unlock(lock);
00460 rtai_sti();
00461 }
00462
00463 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock) {
00464
00465 unsigned long flags;
00466 rtai_save_flags_and_cli(flags);
00467 rt_spin_lock(lock);
00468 return flags;
00469 }
00470
00471 static inline void rt_spin_unlock_irqrestore(unsigned long flags, spinlock_t *lock)
00472 {
00473 rt_spin_unlock(lock);
00474 rtai_restore_flags(flags);
00475 }
00476
00477 #if RTAI_NR_CPUS > 2
00478
00479
00480
00481 static inline void rtai_spin_glock(volatile unsigned long *lock)
00482 {
00483 short inc = 0x0100;
00484 __asm__ __volatile__ (
00485 LOCK_PREFIX "xaddw %w0, %1\n"
00486 "1:\t"
00487 "cmpb %h0, %b0\n\t"
00488 "je 2f\n\t"
00489 "rep; nop\n\t"
00490 "movb %1, %b0\n\t"
00491 "jmp 1b\n"
00492 "2:"
00493 :"+Q" (inc), "+m" (lock[1])
00494 :
00495 :"memory", "cc");
00496 }
00497
00498 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00499 {
00500 __asm__ __volatile__(
00501 LOCK_PREFIX "incb %0"
00502 :"+m" (lock[1])
00503 :
00504 :"memory", "cc");
00505 }
00506
00507 #else
00508
00509 static inline void rtai_spin_glock(volatile unsigned long *lock)
00510 {
00511 while (test_and_set_bit(31, lock)) {
00512 cpu_relax();
00513 }
00514 barrier();
00515 }
00516
00517 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00518 {
00519 test_and_clear_bit(31, lock);
00520 cpu_relax();
00521 }
00522
00523 #endif
00524
00525 static inline void rt_get_global_lock(void)
00526 {
00527 barrier();
00528 rtai_cli();
00529 if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00530 rtai_spin_glock(&rtai_cpu_lock[0]);
00531 }
00532 barrier();
00533 }
00534
00535 static inline void rt_release_global_lock(void)
00536 {
00537 barrier();
00538 rtai_cli();
00539 if (test_and_clear_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00540 rtai_spin_gunlock(&rtai_cpu_lock[0]);
00541 }
00542 barrier();
00543 }
00544
00545
00546
00547
00548
00549
00550
00551
00552
00553
00554
00555
00556
00557 static inline void rt_global_cli(void)
00558 {
00559 rt_get_global_lock();
00560 }
00561
00562
00563
00564
00565
00566
00567
00568 static inline void rt_global_sti(void)
00569 {
00570 rt_release_global_lock();
00571 rtai_sti();
00572 }
00573
00574
00575
00576
00577
00578
00579
00580 static inline int rt_global_save_flags_and_cli(void)
00581 {
00582 unsigned long flags;
00583
00584 barrier();
00585 flags = rtai_save_flags_irqbit_and_cli();
00586 if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00587 rtai_spin_glock(&rtai_cpu_lock[0]);
00588 barrier();
00589 return flags | 1;
00590 }
00591 barrier();
00592 return flags;
00593 }
00594
00595
00596
00597
00598
00599
00600
00601
00602 static inline void rt_global_save_flags(unsigned long *flags)
00603 {
00604 unsigned long hflags = rtai_save_flags_irqbit_and_cli();
00605
00606 *flags = test_bit(hal_processor_id(), &rtai_cpu_lock[0]) ? hflags : hflags | 1;
00607 if (hflags) {
00608 rtai_sti();
00609 }
00610 }
00611
00612
00613
00614
00615
00616
00617
00618
00619 static inline void rt_global_restore_flags(unsigned long flags)
00620 {
00621 barrier();
00622 if (test_and_clear_bit(0, &flags)) {
00623 rt_release_global_lock();
00624 } else {
00625 rt_get_global_lock();
00626 }
00627 if (flags) {
00628 rtai_sti();
00629 }
00630 barrier();
00631 }
00632
00633 #else
00634
00635 #define _send_sched_ipi(dest)
00636
00637 #define rt_spin_lock(lock)
00638 #define rt_spin_unlock(lock)
00639
00640 #define rt_spin_lock_irq(lock) do { rtai_cli(); } while (0)
00641 #define rt_spin_unlock_irq(lock) do { rtai_sti(); } while (0)
00642
00643 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock)
00644 {
00645 unsigned long flags;
00646 rtai_save_flags_and_cli(flags);
00647 return flags;
00648 }
00649 #define rt_spin_unlock_irqrestore(flags, lock) do { rtai_restore_flags(flags); } while (0)
00650
00651 #define rt_get_global_lock() do { rtai_cli(); } while (0)
00652 #define rt_release_global_lock()
00653
00654 #define rt_global_cli() do { rtai_cli(); } while (0)
00655 #define rt_global_sti() do { rtai_sti(); } while (0)
00656
00657 static inline unsigned long rt_global_save_flags_and_cli(void)
00658 {
00659 unsigned long flags;
00660 rtai_save_flags_and_cli(flags);
00661 return flags;
00662 }
00663 #define rt_global_restore_flags(flags) do { rtai_restore_flags(flags); } while (0)
00664
00665 #define rt_global_save_flags(flags) do { rtai_save_flags(*flags); } while (0)
00666
00667 #endif
00668
00669 asmlinkage int rt_printk(const char *format, ...);
00670 asmlinkage int rt_printk_sync(const char *format, ...);
00671
00672 extern struct hal_domain_struct rtai_domain;
00673
00674 #define _rt_switch_to_real_time(cpuid) \
00675 do { \
00676 rtai_linux_context[cpuid].lflags = xchg(ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); \
00677 rtai_linux_context[cpuid].sflags = 1; \
00678 hal_current_domain(cpuid) = &rtai_domain; \
00679 } while (0)
00680
00681 #define rt_switch_to_linux(cpuid) \
00682 do { \
00683 if (rtai_linux_context[cpuid].sflags) { \
00684 hal_current_domain(cpuid) = hal_root_domain; \
00685 ROOT_STATUS_VAL(cpuid) = rtai_linux_context[cpuid].lflags; \
00686 rtai_linux_context[cpuid].sflags = 0; \
00687 CLR_TASKPRI(cpuid); \
00688 } \
00689 } while (0)
00690
00691 #define rt_switch_to_real_time(cpuid) \
00692 do { \
00693 if (!rtai_linux_context[cpuid].sflags) { \
00694 _rt_switch_to_real_time(cpuid); \
00695 } \
00696 } while (0)
00697
00698 #define rtai_get_intr_handler(v) \
00699 ((((unsigned long)idt_table[v].offset_high) << 32) | (((unsigned long)idt_table[v].offset_middle) << 16) | ((unsigned long)idt_table[v].offset_low))
00700 #define ack_bad_irq hal_ack_system_irq // linux does not export ack_bad_irq
00701
00702 #define rtai_init_taskpri_irqs() \
00703 do { \
00704 int v; \
00705 for (v = SPURIOUS_APIC_VECTOR + 1; v < 256; v++) { \
00706 hal_virtualize_irq(hal_root_domain, v - FIRST_EXTERNAL_VECTOR, (void (*)(unsigned))rtai_get_intr_handler(v), (void *)ack_bad_irq, IPIPE_HANDLE_MASK); \
00707 } \
00708 } while (0)
00709
00710 static inline int rt_save_switch_to_real_time(int cpuid)
00711 {
00712 SET_TASKPRI(cpuid);
00713 if (!rtai_linux_context[cpuid].sflags) {
00714 _rt_switch_to_real_time(cpuid);
00715 return 0;
00716 }
00717 return 1;
00718 }
00719
00720 #define rt_restore_switch_to_linux(sflags, cpuid) \
00721 do { \
00722 if (!sflags) { \
00723 rt_switch_to_linux(cpuid); \
00724 } else if (!rtai_linux_context[cpuid].sflags) { \
00725 SET_TASKPRI(cpuid); \
00726 _rt_switch_to_real_time(cpuid); \
00727 } \
00728 } while (0)
00729
00730 #define in_hrt_mode(cpuid) (rtai_linux_context[cpuid].sflags)
00731
00732 #if defined(CONFIG_X86_LOCAL_APIC)
00733 static inline unsigned long save_and_set_taskpri(unsigned long taskpri)
00734 {
00735 unsigned long saved_taskpri = apic_read(APIC_TASKPRI);
00736 apic_write(APIC_TASKPRI, taskpri);
00737 return saved_taskpri;
00738 }
00739
00740 #define restore_taskpri(taskpri) \
00741 do { apic_write_around(APIC_TASKPRI, taskpri); } while (0)
00742 #endif
00743
00744 static inline void rt_set_timer_delay (int delay) {
00745
00746 if (delay) {
00747 unsigned long flags;
00748 rtai_hw_save_flags_and_cli(flags);
00749 #ifdef CONFIG_X86_LOCAL_APIC
00750 apic_write_around(APIC_TMICT, delay);
00751 #else
00752 outb(delay & 0xff,0x40);
00753 outb(delay >> 8,0x40);
00754 #endif
00755 rtai_hw_restore_flags(flags);
00756 }
00757 }
00758
00759
00760
00761 unsigned long rtai_critical_enter(void (*synch)(void));
00762
00763 void rtai_critical_exit(unsigned long flags);
00764
00765 int rtai_calibrate_8254(void);
00766
00767 void rtai_set_linux_task_priority(struct task_struct *task,
00768 int policy,
00769 int prio);
00770
00771 long rtai_catch_event (struct hal_domain_struct *domain, unsigned long event, int (*handler)(unsigned long, void *));
00772
00773 #endif
00774
00775
00776
00777 #ifdef __KERNEL__
00778
00779 #include <linux/kernel.h>
00780
00781 #define rtai_print_to_screen rt_printk
00782
00783 void *ll2a(long long ll, char *s);
00784
00785 #ifdef __cplusplus
00786 extern "C" {
00787 #endif
00788
00789 int rt_request_irq(unsigned irq,
00790 int (*handler)(unsigned irq, void *cookie),
00791 void *cookie,
00792 int retmode);
00793
00794 int rt_release_irq(unsigned irq);
00795
00796 int rt_set_irq_ack(unsigned int irq, int (*irq_ack)(unsigned int, void *));
00797
00798 static inline int rt_request_irq_wack(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode, int (*irq_ack)(unsigned int, void *))
00799 {
00800 int retval;
00801 if ((retval = rt_request_irq(irq, handler, cookie, retmode)) < 0) {
00802 return retval;
00803 }
00804 return rt_set_irq_ack(irq, irq_ack);
00805 }
00806
00807 void rt_set_irq_cookie(unsigned irq, void *cookie);
00808
00809 void rt_set_irq_retmode(unsigned irq, int fastret);
00810
00811
00812
00813
00814
00815 unsigned rt_startup_irq(unsigned irq);
00816
00817 void rt_shutdown_irq(unsigned irq);
00818
00819 void rt_enable_irq(unsigned irq);
00820
00821 void rt_disable_irq(unsigned irq);
00822
00823 void rt_mask_and_ack_irq(unsigned irq);
00824
00825 void rt_unmask_irq(unsigned irq);
00826
00827 void rt_ack_irq(unsigned irq);
00828
00829
00830
00831 int rt_request_linux_irq(unsigned irq,
00832 void *handler,
00833 char *name,
00834 void *dev_id);
00835
00836 int rt_free_linux_irq(unsigned irq,
00837 void *dev_id);
00838
00839 void rt_pend_linux_irq(unsigned irq);
00840
00841 RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq(unsigned irq);
00842
00843 void rt_pend_linux_srq(unsigned srq);
00844
00845 int rt_request_srq(unsigned label,
00846 void (*k_handler)(void),
00847 long long (*u_handler)(unsigned long));
00848
00849 int rt_free_srq(unsigned srq);
00850
00851 int rt_assign_irq_to_cpu(int irq,
00852 unsigned long cpus_mask);
00853
00854 int rt_reset_irq_to_sym_mode(int irq);
00855
00856 void rt_request_timer_cpuid(void (*handler)(void),
00857 unsigned tick,
00858 int cpuid);
00859
00860 void rt_request_apic_timers(void (*handler)(void),
00861 struct apic_timer_setup_data *tmdata);
00862
00863 void rt_free_apic_timers(void);
00864
00865 int rt_request_timer(void (*handler)(void), unsigned tick, int use_apic);
00866
00867 void rt_free_timer(void);
00868
00869 RT_TRAP_HANDLER rt_set_trap_handler(RT_TRAP_HANDLER handler);
00870
00871 void rt_release_rtc(void);
00872
00873 void rt_request_rtc(long rtc_freq, void *handler);
00874
00875 #define rt_mount()
00876
00877 #define rt_umount()
00878
00879 RTIME rd_8254_ts(void);
00880
00881 void rt_setup_8254_tsc(void);
00882
00883 void (*rt_set_ihook(void (*hookfn)(int)))(int);
00884
00885
00886
00887 static inline int rt_request_global_irq(unsigned irq, void (*handler)(void))
00888 {
00889 return rt_request_irq(irq, (int (*)(unsigned,void *))handler, 0, 0);
00890 }
00891
00892 static inline int rt_request_global_irq_ext(unsigned irq, void (*handler)(void), unsigned long cookie)
00893 {
00894 return rt_request_irq(irq, (int (*)(unsigned,void *))handler, (void *)cookie, 1);
00895 }
00896
00897 static inline void rt_set_global_irq_ext(unsigned irq, int ext, unsigned long cookie)
00898 {
00899 rt_set_irq_cookie(irq, (void *)cookie);
00900 }
00901
00902 static inline int rt_free_global_irq(unsigned irq)
00903 {
00904 return rt_release_irq(irq);
00905 }
00906
00907 #ifdef __cplusplus
00908 }
00909 #endif
00910
00911 #endif
00912
00913 #include <asm/rtai_oldnames.h>
00914 #include <asm/rtai_emulate_tsc.h>
00915
00916 #define RTAI_DEFAULT_TICK 100000
00917 #ifdef CONFIG_RTAI_TRACE
00918 #define RTAI_DEFAULT_STACKSZ 8192
00919 #else
00920 #define RTAI_DEFAULT_STACKSZ 1024
00921 #endif
00922
00923
00924
00925 #endif
00926
00927 #ifndef _RTAI_HAL_XN_H
00928 #define _RTAI_HAL_XN_H
00929
00930 #define __range_ok(addr, size) (__range_not_ok(addr,size) == 0)
00931
00932 #define NON_RTAI_SCHEDULE(cpuid) do { schedule(); } while (0)
00933
00934 #endif
00935