00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
#ifndef _RTAI_ASM_I386_HAL_H
00039
#define _RTAI_ASM_I386_HAL_H
00040
00041 #define RTAI_DUOSS
00042
#ifndef RTAI_DUOSS
00043
#define RTAI_TRIOSS
00044
#endif
00045 #define LOCKED_LINUX_IN_IRQ_HANDLER
00046 #define UNWRAPPED_CATCH_EVENT
00047 #define DOMAIN_TO_STALL (fusion_domain)
00048
00049
#include <asm/rtai_vectors.h>
00050
#include <rtai_types.h>
00051
#include <rtai_hal_names.h>
00052
00053
#ifdef CONFIG_SMP
00054
#define RTAI_NR_CPUS CONFIG_RTAI_CPUS
00055
#else
00056 #define RTAI_NR_CPUS 1
00057
#endif
00058
00059
#ifndef _RTAI_FUSION_H
00060
static __inline__
unsigned long ffnz (
unsigned long word) {
00061
00062 __asm__(
"bsfl %1, %0"
00063 :
"=r" (word)
00064 :
"r" (word));
00065
return word;
00066 }
00067
#endif
00068
00069
static inline unsigned long long rtai_ulldiv (
unsigned long long ull,
00070
unsigned long uld,
00071
unsigned long *r) {
00072
00073
00074
00075
00076
00077
00078
unsigned long long qf, rf;
00079
unsigned long tq, rh;
00080
union {
unsigned long long ull;
unsigned long ul[2]; } p, q;
00081
00082 p.ull = ull;
00083 q.ull = 0;
00084 rf = 0x100000000ULL - (qf = 0xFFFFFFFFUL / uld) * uld;
00085
00086
while (p.ull >= uld) {
00087 q.ul[1] += (tq = p.ul[1] / uld);
00088 rh = p.ul[1] - tq * uld;
00089 q.ull += rh * qf + (tq = p.ul[0] / uld);
00090 p.ull = rh * rf + (p.ul[0] - tq * uld);
00091 }
00092
00093
if (r)
00094 *r = p.ull;
00095
00096
return q.ull;
00097 }
00098
00099
static inline int rtai_imuldiv (
int i,
int mult,
int div) {
00100
00101
00102
00103
int dummy;
00104
00105 __asm__ __volatile__ ( \
00106
"mull %%edx\t\n" \
00107
"div %%ecx\t\n" \
00108 :
"=a" (i),
"=d" (dummy)
00109 :
"a" (i),
"d" (mult),
"c" (div));
00110
00111
return i;
00112 }
00113
00114
static inline long long rtai_llimd(
long long ll,
int mult,
int div) {
00115
00116
00117
00118 __asm__ __volatile ( \
00119
"movl %%edx,%%ecx\t\n" \
00120
"mull %%esi\t\n" \
00121
"movl %%eax,%%ebx\n\t" \
00122
"movl %%ecx,%%eax\t\n" \
00123
"movl %%edx,%%ecx\t\n" \
00124
"mull %%esi\n\t" \
00125
"addl %%ecx,%%eax\t\n" \
00126
"adcl $0,%%edx\t\n" \
00127
"divl %%edi\n\t" \
00128
"movl %%eax,%%ecx\t\n" \
00129
"movl %%ebx,%%eax\t\n" \
00130
"divl %%edi\n\t" \
00131
"sal $1,%%edx\t\n" \
00132
"cmpl %%edx,%%edi\t\n" \
00133
"movl %%ecx,%%edx\n\t" \
00134
"jge 1f\t\n" \
00135
"addl $1,%%eax\t\n" \
00136
"adcl $0,%%edx\t\n" \
00137
"1:\t\n" \
00138 :
"=A" (ll) \
00139 :
"A" (ll),
"S" (mult),
"D" (div) \
00140 :
"%ebx",
"%ecx");
00141
00142
return ll;
00143 }
00144
00145
00146
00147
00148
00149
00150
static inline unsigned long long rtai_u64div32c(
unsigned long long a,
00151
unsigned long b,
00152
int *r) {
00153 __asm__ __volatile(
00154
"\n movl %%eax,%%ebx"
00155
"\n movl %%edx,%%eax"
00156
"\n xorl %%edx,%%edx"
00157
"\n divl %%ecx"
00158
"\n xchgl %%eax,%%ebx"
00159
"\n divl %%ecx"
00160
"\n movl %%edx,%%ecx"
00161
"\n movl %%ebx,%%edx"
00162 :
"=a" (((
unsigned long *)((
void *)&a))[0]),
"=d" (((
unsigned long *)((
void *)&a))[1])
00163 :
"a" (((
unsigned long *)((
void *)&a))[0]),
"d" (((
unsigned long *)((
void *)&a))[1]),
"c" (b)
00164 :
"%ebx"
00165 );
00166
00167
return a;
00168 }
00169
00170
#if defined(__KERNEL__) && !defined(__cplusplus)
00171
#include <linux/sched.h>
00172
#include <linux/interrupt.h>
00173
#include <asm/desc.h>
00174
#include <asm/system.h>
00175
#include <asm/io.h>
00176
#include <asm/rtai_atomic.h>
00177
#include <asm/rtai_fpu.h>
00178
#ifdef CONFIG_X86_LOCAL_APIC
00179
#include <asm/fixmap.h>
00180
#include <asm/apic.h>
00181
#endif
00182
#include <rtai_trace.h>
00183
00184
00185
00186
00187
00188
00189
00190
#ifdef CONFIG_X86_IO_APIC
00191
static inline int ext_irq_vector(
int irq)
00192 {
00193
if (irq != 2) {
00194
return (FIRST_DEVICE_VECTOR + 8*(irq < 2 ? irq : irq - 1));
00195 }
00196
return -EINVAL;
00197 }
00198
#else
00199
static inline int ext_irq_vector(
int irq)
00200 {
00201
if (irq != 2) {
00202
return (FIRST_EXTERNAL_VECTOR + irq);
00203 }
00204
return -EINVAL;
00205 }
00206
#endif
00207
00208
#define RTAI_DOMAIN_ID 0x9ac15d93 // nam2num("rtai_d")
00209
#define RTAI_NR_TRAPS HAL_NR_FAULTS
00210
#define RTAI_NR_SRQS 32
00211
00212
#define RTAI_APIC_TIMER_VECTOR RTAI_APIC_HIGH_VECTOR
00213
#define RTAI_APIC_TIMER_IPI RTAI_APIC_HIGH_IPI
00214
#define RTAI_SMP_NOTIFY_VECTOR RTAI_APIC_LOW_VECTOR
00215
#define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI
00216
00217
#define RTAI_TIMER_8254_IRQ 0
00218
#define RTAI_FREQ_8254 1193180
00219
#define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ)
00220
#define RTAI_COUNTER_2_LATCH 0xfffe
00221
#define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY
00222
#define RTAI_SETUP_TIME_8254 2011
00223
00224
#define RTAI_CALIBRATED_APIC_FREQ 0
00225
#define RTAI_FREQ_APIC (rtai_tunables.apic_freq)
00226
#define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY
00227
#define RTAI_SETUP_TIME_APIC 1000
00228
00229
#define RTAI_TIME_LIMIT 0x7FFFFFFFFFFFFFFFLL
00230
00231
#define RTAI_IFLAG 9
00232
00233
#define rtai_cpuid() hal_processor_id()
00234
#define rtai_tskext(idx) hal_tskext[idx]
00235
00236
00237
#define rtai_hw_cli() hal_hw_cli()
00238
#define rtai_hw_sti() hal_hw_sti()
00239
#define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x)
00240
#define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x)
00241
#define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x)
00242
00243
00244
#define rtai_cli() hal_hw_cli()
00245
#define rtai_sti() hal_hw_sti()
00246
#define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x)
00247
#define rtai_restore_flags(x) hal_hw_local_irq_restore(x)
00248
#define rtai_save_flags(x) hal_hw_local_irq_flags(x)
00249
00250
extern volatile unsigned long hal_pended;
00251
00252
static inline struct hal_domain_struct *get_domain_pointer(
int n)
00253 {
00254
struct list_head *p =
hal_pipeline.next;
00255
struct hal_domain_struct *d;
00256
unsigned long i = 0;
00257
while (p != &
hal_pipeline) {
00258 d = list_entry(p,
struct hal_domain_struct, p_link);
00259
if (++i == n) {
00260
return d;
00261 }
00262 p = d->p_link.next;
00263 }
00264
return (
struct hal_domain_struct *)i;
00265 }
00266
00267
#define hal_pend_domain_uncond(irq, domain, cpuid) \
00268
do { \
00269
hal_irq_hits_pp(irq, domain, cpuid); \
00270
__set_bit(irq & IPIPE_IRQ_IMASK, &domain->cpudata[cpuid].irq_pending_lo[irq >> IPIPE_IRQ_ISHIFT]); \
00271
__set_bit(irq >> IPIPE_IRQ_ISHIFT, &domain->cpudata[cpuid].irq_pending_hi); \
00272
test_and_set_bit(cpuid, &hal_pended); \
00273 } while (0)
00274
00275
#define hal_pend_uncond(irq, cpuid) hal_pend_domain_uncond(irq, hal_root_domain, cpuid)
00276
00277
#define hal_fast_flush_pipeline(cpuid) \
00278
do { \
00279
if (hal_root_domain->cpudata[cpuid].irq_pending_hi != 0) { \
00280
rtai_cli(); \
00281
hal_sync_stage(IPIPE_IRQMASK_ANY); \
00282
} \
00283
} while (0)
00284
00285
#ifdef RTAI_TRIOSS
00286
#define hal_test_and_fast_flush_pipeline(cpuid) \
00287
do { \
00288
if (!test_bit(IPIPE_STALL_FLAG, &DOMAIN_TO_STALL->cpudata[cpuid].status)) { \
00289
rtai_sti(); \
00290
hal_unstall_pipeline_from(DOMAIN_TO_STALL); \
00291
} \
00292
} while (0)
00293
#else
00294
#define hal_test_and_fast_flush_pipeline(cpuid) \
00295
do { \
00296
if (!test_bit(IPIPE_STALL_FLAG, &hal_root_domain->cpudata[cpuid].status)) { \
00297
hal_fast_flush_pipeline(cpuid); \
00298
rtai_sti(); \
00299
} \
00300
} while (0)
00301
#endif
00302
00303
00304
#ifdef CONFIG_PREEMPT
00305
#define rtai_save_and_lock_preempt_count() \
00306
do { int *prcntp, prcnt; prcnt = xchg(prcntp = &preempt_count(), 1);
00307
#define rtai_restore_preempt_count() \
00308
*prcntp = prcnt; } while (0)
00309
#else
00310
#define rtai_save_and_lock_preempt_count();
00311
#define rtai_restore_preempt_count();
00312
#endif
00313
00314
typedef int (*rt_irq_handler_t)(
unsigned irq,
void *
cookie);
00315
00316
#ifdef CONFIG_X86_TSC
00317
00318
#define RTAI_CALIBRATED_CPU_FREQ 0
00319
#define RTAI_CPU_FREQ (rtai_tunables.cpu_freq)
00320
00321
#if 0
00322
static inline unsigned long long _rtai_hidden_rdtsc (
void) {
00323
unsigned long long t;
00324 __asm__ __volatile__(
"rdtsc" :
"=A" (t));
00325
return t;
00326 }
00327
#define rtai_rdtsc() _rtai_hidden_rdtsc()
00328
#else
00329
#define rtai_rdtsc() ({ unsigned long long t; __asm__ __volatile__( "rdtsc" : "=A" (t)); t; })
00330
#endif
00331
00332
#else
00333
00334
#define RTAI_CPU_FREQ RTAI_FREQ_8254
00335
#define RTAI_CALIBRATED_CPU_FREQ RTAI_FREQ_8254
00336
00337
#define rtai_rdtsc() rd_8254_ts()
00338
00339
#endif
00340
00341
struct calibration_data {
00342
00343
unsigned long cpu_freq;
00344
unsigned long apic_freq;
00345
int latency;
00346
int setup_time_TIMER_CPUNIT;
00347
int setup_time_TIMER_UNIT;
00348
int timers_tol[
RTAI_NR_CPUS];
00349 };
00350
00351
struct apic_timer_setup_data {
00352
00353
int mode;
00354
int count;
00355 };
00356
00357
extern struct rt_times rt_times;
00358
00359
extern struct rt_times rt_smp_times[
RTAI_NR_CPUS];
00360
00361
extern struct calibration_data
rtai_tunables;
00362
00363
extern volatile unsigned long rtai_cpu_realtime;
00364
00365
extern volatile unsigned long rtai_cpu_lock;
00366
00367
00368
extern struct rtai_switch_data {
00369
volatile unsigned long depth;
00370
volatile unsigned long oldflags;
00371
#ifdef RTAI_TRIOSS
00372
volatile struct hal_domain_struct *oldomain;
00373
#endif
00374
#if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00375
volatile unsigned long pridepth;
00376
00377
#endif
00378
}
rtai_linux_context[
RTAI_NR_CPUS];
00379
00380 irqreturn_t rtai_broadcast_to_local_timers(
int irq,
00381
void *dev_id,
00382
struct pt_regs *regs);
00383
00384
#ifdef CONFIG_SMP
00385
00386
#define SCHED_VECTOR RTAI_SMP_NOTIFY_VECTOR
00387
#define SCHED_IPI RTAI_SMP_NOTIFY_IPI
00388
00389
#define _send_sched_ipi(dest) \
00390
do { \
00391
apic_wait_icr_idle(); \
00392
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(dest)); \
00393
apic_write_around(APIC_ICR, APIC_DEST_LOGICAL | SCHED_VECTOR); \
00394
} while (0)
00395
00396
#ifdef CONFIG_PREEMPT
00397
#define rt_spin_lock(lock) do { barrier(); _raw_spin_lock(lock); barrier(); } while (0)
00398
#define rt_spin_unlock(lock) do { barrier(); _raw_spin_unlock(lock); barrier(); } while (0)
00399
#else
00400
#define rt_spin_lock(lock) spin_lock(lock)
00401
#define rt_spin_unlock(lock) spin_unlock(lock)
00402
#endif
00403
00404
static inline void rt_spin_lock_hw_irq(spinlock_t *lock)
00405 {
00406 rtai_hw_cli();
00407
rt_spin_lock(lock);
00408 }
00409
00410
static inline void rt_spin_unlock_hw_irq(spinlock_t *lock)
00411 {
00412
rt_spin_unlock(lock);
00413 rtai_hw_sti();
00414 }
00415
00416
static inline unsigned long rt_spin_lock_hw_irqsave(spinlock_t *lock)
00417 {
00418
unsigned long flags;
00419 rtai_hw_save_flags_and_cli(flags);
00420
rt_spin_lock(lock);
00421
return flags;
00422 }
00423
00424
static inline void rt_spin_unlock_hw_irqrestore(
unsigned long flags, spinlock_t *lock)
00425 {
00426
rt_spin_unlock(lock);
00427 rtai_hw_restore_flags(flags);
00428 }
00429
00430
static inline void rt_spin_lock_irq(spinlock_t *lock) {
00431
00432
rtai_cli();
00433
rt_spin_lock(lock);
00434 }
00435
00436
static inline void rt_spin_unlock_irq(spinlock_t *lock) {
00437
00438
rt_spin_unlock(lock);
00439
rtai_sti();
00440 }
00441
00442
static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock) {
00443
00444
unsigned long flags;
00445
rtai_save_flags_and_cli(flags);
00446
rt_spin_lock(lock);
00447
return flags;
00448 }
00449
00450
static inline void rt_spin_unlock_irqrestore(
unsigned long flags, spinlock_t *lock)
00451 {
00452
rt_spin_unlock(lock);
00453
rtai_restore_flags(flags);
00454 }
00455
00456
static inline void rt_get_global_lock(
void)
00457 {
00458 barrier();
00459
rtai_cli();
00460
if (!test_and_set_bit(
hal_processor_id(), &rtai_cpu_lock)) {
00461
while (test_and_set_bit(31, &rtai_cpu_lock)) {
00462 cpu_relax();
00463 }
00464 }
00465 barrier();
00466 }
00467
00468
static inline void rt_release_global_lock(
void)
00469 {
00470
#if 0
00471
barrier();
00472
rtai_cli();
00473 atomic_clear_mask((0xFFFF0001 <<
hal_processor_id()), (atomic_t *)&rtai_cpu_lock);
00474 cpu_relax();
00475 barrier();
00476
#else
00477
barrier();
00478
rtai_cli();
00479
if (test_and_clear_bit(
hal_processor_id(), &rtai_cpu_lock)) {
00480 test_and_clear_bit(31, &rtai_cpu_lock);
00481 cpu_relax();
00482 }
00483 barrier();
00484
#endif
00485
}
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
static inline void rt_global_cli(
void)
00500 {
00501 rt_get_global_lock();
00502 }
00503
00504
00505
00506
00507
00508
00509
00510
static inline void rt_global_sti(
void)
00511 {
00512 rt_release_global_lock();
00513
rtai_sti();
00514 }
00515
00516
static volatile inline unsigned long rtai_save_flags_irqbit(
void)
00517 {
00518
unsigned long flags;
00519 rtai_save_flags(flags);
00520
return flags & (1 << RTAI_IFLAG);
00521 }
00522
static volatile inline unsigned long rtai_save_flags_irqbit_and_cli(
void)
00523 {
00524
unsigned long flags;
00525
rtai_save_flags_and_cli(flags);
00526
return flags & (1 << RTAI_IFLAG);
00527 }
00528
00529
00530
00531
00532
00533
00534
00535
static inline int rt_global_save_flags_and_cli(
void)
00536 {
00537
unsigned long flags;
00538
00539 barrier();
00540
flags = rtai_save_flags_irqbit_and_cli();
00541
if (!test_and_set_bit(
hal_processor_id(), &rtai_cpu_lock)) {
00542
while (test_and_set_bit(31, &rtai_cpu_lock)) {
00543 cpu_relax();
00544 }
00545 barrier();
00546
return flags | 1;
00547 }
00548 barrier();
00549
return flags;
00550 }
00551
00552
00553
00554
00555
00556
00557
00558
00559
static inline void rt_global_save_flags(
unsigned long *flags)
00560 {
00561
unsigned long hflags = rtai_save_flags_irqbit_and_cli();
00562
00563 *
flags = test_bit(
hal_processor_id(), &rtai_cpu_lock) ? hflags : hflags | 1;
00564
if (hflags) {
00565
rtai_sti();
00566 }
00567 }
00568
00569
00570
00571
00572
00573
00574
00575
00576
static inline void rt_global_restore_flags(
unsigned long flags)
00577 {
00578 barrier();
00579
if (test_and_clear_bit(0, &flags)) {
00580 rt_release_global_lock();
00581 }
else {
00582 rt_get_global_lock();
00583 }
00584
if (
flags) {
00585
rtai_sti();
00586 }
00587 barrier();
00588 }
00589
00590
#else
00591
00592
#define _send_sched_ipi(dest)
00593
00594
#define rt_spin_lock(lock)
00595
#define rt_spin_unlock(lock)
00596
00597
#define rt_spin_lock_irq(lock) do { rtai_cli(); } while (0)
00598
#define rt_spin_unlock_irq(lock) do { rtai_sti(); } while (0)
00599
00600
static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock)
00601 {
00602
unsigned long flags;
00603
rtai_save_flags_and_cli(flags);
00604
return flags;
00605 }
00606
#define rt_spin_unlock_irqrestore(flags, lock) do { rtai_restore_flags(flags); } while (0)
00607
00608
#define rt_get_global_lock() do { rtai_cli(); } while (0)
00609
#define rt_release_global_lock()
00610
00611
#define rt_global_cli() do { rtai_cli(); } while (0)
00612
#define rt_global_sti() do { rtai_sti(); } while (0)
00613
00614
static inline unsigned long rt_global_save_flags_and_cli(
void)
00615 {
00616
unsigned long flags;
00617
rtai_save_flags_and_cli(flags);
00618
return flags;
00619 }
00620
#define rt_global_restore_flags(flags) do { rtai_restore_flags(flags); } while (0)
00621
00622
#define rt_global_save_flags(flags) do { rtai_save_flags(*flags); } while (0)
00623
00624
#endif
00625
00626
int rt_printk(
const char *format, ...);
00627
int rt_printk_sync(
const char *format, ...);
00628
00629
extern struct hal_domain_struct rtai_domain;
00630
extern struct hal_domain_struct *fusion_domain;
00631
00632
static inline void rt_switch_to_real_time_notskpri(
int cpuid)
00633 {
00634
TRACE_RTAI_SWITCHTO_RT(cpuid);
00635
if (!
rtai_linux_context[
cpuid].depth++) {
00636
#ifdef RTAI_TRIOSS
00637
rtai_linux_context[
cpuid].oldflags = xchg(&
DOMAIN_TO_STALL->cpudata[cpuid].status, (1 << IPIPE_STALL_FLAG));
00638
rtai_linux_context[
cpuid].oldomain =
hal_current_domain[
cpuid];
00639
#else
00640
rtai_linux_context[
cpuid].oldflags = xchg(&
hal_root_domain->cpudata[cpuid].status, (1 << IPIPE_STALL_FLAG));
00641
#endif
00642
hal_current_domain[
cpuid] = &
rtai_domain;
00643
00644 }
00645 }
00646
00647
static inline void rt_switch_to_linux_notskpri(
int cpuid)
00648 {
00649
TRACE_RTAI_SWITCHTO_LINUX(cpuid);
00650
if (
rtai_linux_context[
cpuid].depth) {
00651
if (!--
rtai_linux_context[
cpuid].depth) {
00652
00653
#ifdef RTAI_TRIOSS
00654
hal_current_domain[
cpuid] = (
void *)
rtai_linux_context[
cpuid].oldomain;
00655
DOMAIN_TO_STALL->cpudata[
cpuid].status =
rtai_linux_context[
cpuid].oldflags;
00656
#else
00657
hal_current_domain[
cpuid] =
hal_root_domain;
00658
hal_root_domain->cpudata[
cpuid].status =
rtai_linux_context[
cpuid].oldflags;
00659
#endif
00660
}
00661
return;
00662 }
00663
rt_printk(
"*** ERROR: EXCESS LINUX_UNLOCK ***\n");
00664 }
00665
00666
#define rtai_get_intr_handler(v) \
00667
((idt_table[v].b & 0xFFFF0000) | (idt_table[v].a & 0x0000FFFF))
00668
#define ack_bad_irq hal_ack_system_irq // linux does not export ack_bad_irq
00669
00670
#define rtai_init_taskpri_irqs() \
00671
do { \
00672
int v; \
00673
for (v = SPURIOUS_APIC_VECTOR + 1; v < 256; v++) { \
00674
hal_virtualize_irq(hal_root_domain, v - FIRST_EXTERNAL_VECTOR, (void (*)(unsigned))rtai_get_intr_handler(v), ack_bad_irq, IPIPE_HANDLE_MASK); \
00675
} \
00676
} while (0)
00677
00678
#if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00679
static inline void rt_switch_to_real_time(
int cpuid)
00680 {
00681
TRACE_RTAI_SWITCHTO_RT(cpuid);
00682
if (!
rtai_linux_context[
cpuid].pridepth++) {
00683
00684 apic_write_around(APIC_TASKPRI, RTAI_TASKPRI);
00685 }
00686 rt_switch_to_real_time_notskpri(cpuid);
00687 }
00688
00689
static inline void rt_switch_to_linux(
int cpuid)
00690 {
00691
TRACE_RTAI_SWITCHTO_LINUX(cpuid);
00692
if (
rtai_linux_context[
cpuid].pridepth) {
00693
if (!--
rtai_linux_context[
cpuid].pridepth) {
00694
00695 apic_write_around(APIC_TASKPRI, 0);
00696 }
00697 }
00698 rt_switch_to_linux_notskpri(cpuid);
00699 }
00700
#else
00701
#define rt_switch_to_real_time rt_switch_to_real_time_notskpri
00702
#define rt_switch_to_linux rt_switch_to_linux_notskpri
00703
#endif
00704
00705
00706
#define in_hrt_mode(cpuid) (rtai_linux_context[cpuid].depth)
00707
00708
#if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00709
static inline unsigned long save_and_set_taskpri(
unsigned long taskpri)
00710 {
00711
unsigned long saved_taskpri = apic_read(APIC_TASKPRI);
00712 apic_write(APIC_TASKPRI, taskpri);
00713
return saved_taskpri;
00714 }
00715
00716
#define restore_taskpri(taskpri) \
00717
do { apic_write_around(APIC_TASKPRI, taskpri); } while (0)
00718
#endif
00719
00720
static inline void rt_set_timer_delay (
int delay) {
00721
00722
if (delay) {
00723
unsigned long flags;
00724 rtai_hw_save_flags_and_cli(flags);
00725
#ifdef CONFIG_X86_LOCAL_APIC
00726
00727 apic_write_around(APIC_TMICT, delay);
00728
#else
00729 outb(delay & 0xff,0x40);
00730 outb(delay >> 8,0x40);
00731
#endif
00732 rtai_hw_restore_flags(flags);
00733 }
00734 }
00735
00736
00737
00738
unsigned long rtai_critical_enter(
void (*synch)(
void));
00739
00740
void rtai_critical_exit(
unsigned long flags);
00741
00742
int rtai_calibrate_8254(
void);
00743
00744
void rtai_set_linux_task_priority(
struct task_struct *task,
00745
int policy,
00746
int prio);
00747
00748
int rtai_catch_event (
struct hal_domain_struct *ipd,
unsigned long event,
int (*handler)(
unsigned long,
void *));
00749
00750
#endif
00751
00752
00753
00754
#ifdef __KERNEL__
00755
00756
#include <linux/kernel.h>
00757
00758
#define rtai_print_to_screen rt_printk
00759
00760
void *
ll2a(
long long ll,
char *s);
00761
00762
#ifdef __cplusplus
00763
extern "C" {
00764
#endif
00765
00766
int rt_request_irq(
unsigned irq,
00767
int (*handler)(
unsigned irq,
void *cookie),
00768
void *cookie,
00769
int retmode);
00770
00771
int rt_release_irq(
unsigned irq);
00772
00773
void rt_set_irq_cookie(
unsigned irq,
void *cookie);
00774
00775
void rt_set_irq_retmode(
unsigned irq,
int fastret);
00776
00777
00778
00779
00780
00781
unsigned rt_startup_irq(
unsigned irq);
00782
00783
void rt_shutdown_irq(
unsigned irq);
00784
00785
void rt_enable_irq(
unsigned irq);
00786
00787
void rt_disable_irq(
unsigned irq);
00788
00789
void rt_mask_and_ack_irq(
unsigned irq);
00790
00791
void rt_unmask_irq(
unsigned irq);
00792
00793
void rt_ack_irq(
unsigned irq);
00794
00795
00796
00797
00798
#define rtai_do_x86int(irq, handler) \
00799
do { \
00800
__asm__ __volatile__ ( "pushfl; push %%cs; call *%1": : "a" (irq), "m" (handler)); \
00801
} while (0)
00802
00803
struct desc_struct rtai_set_gate_vector (unsigned vector, int type, int dpl, void *
handler);
00804
00805
void rtai_reset_gate_vector(
unsigned vector,
struct desc_struct e);
00806
00807
00808
void rt_do_irq(
unsigned irq);
00809
00810
int rt_request_linux_irq(
unsigned irq,
00811 irqreturn_t (*handler)(
int irq,
00812
void *dev_id,
00813
struct pt_regs *regs),
00814
char *name,
00815
void *dev_id);
00816
00817
int rt_free_linux_irq(
unsigned irq,
00818
void *dev_id);
00819
00820
void rt_pend_linux_irq(
unsigned irq);
00821
00822
void rt_pend_linux_srq(
unsigned srq);
00823
00824
int rt_request_srq(
unsigned label,
00825
void (*k_handler)(
void),
00826
long long (*u_handler)(
unsigned long));
00827
00828
int rt_free_srq(
unsigned srq);
00829
00830
int rt_assign_irq_to_cpu(
int irq,
00831
unsigned long cpus_mask);
00832
00833
int rt_reset_irq_to_sym_mode(
int irq);
00834
00835
void rt_request_timer_cpuid(
void (*handler)(
void),
00836
unsigned tick,
00837
int cpuid);
00838
00839
void rt_request_apic_timers(
void (*handler)(
void),
00840
struct apic_timer_setup_data *tmdata);
00841
00842
void rt_free_apic_timers(
void);
00843
00844
int rt_request_timer(
void (*handler)(
void),
unsigned tick,
int use_apic);
00845
00846
void rt_free_timer(
void);
00847
00848
RT_TRAP_HANDLER rt_set_trap_handler(
RT_TRAP_HANDLER handler);
00849
00850
void rt_release_rtc(
void);
00851
00852
void rt_request_rtc(
long rtc_freq,
void *handler);
00853
00854
#define rt_mount()
00855
00856
#define rt_umount()
00857
00858
RTIME rd_8254_ts(
void);
00859
00860
void rt_setup_8254_tsc(
void);
00861
00862 void (*
rt_set_ihook(
void (*hookfn)(
int)))(
int);
00863
00864
00865
00866
static inline int rt_request_global_irq(
unsigned irq,
void (*handler)(
void))
00867 {
00868
return rt_request_irq(irq, (
int (*)(
unsigned,
void *))handler, 0, 0);
00869 }
00870
00871
static inline int rt_request_global_irq_ext(
unsigned irq,
void (*handler)(
void),
unsigned long cookie)
00872 {
00873
return rt_request_irq(irq, (
int (*)(
unsigned,
void *))handler, (
void *)cookie, 1);
00874 }
00875
00876
static inline void rt_set_global_irq_ext(
unsigned irq,
int ext,
unsigned long cookie)
00877 {
00878
rt_set_irq_cookie(irq, (
void *)cookie);
00879 }
00880
00881
static inline int rt_free_global_irq(
unsigned irq)
00882 {
00883
return rt_release_irq(irq);
00884 }
00885
00886
#ifdef __cplusplus
00887
}
00888
#endif
00889
00890
#endif
00891
00892
#include <asm/rtai_oldnames.h>
00893
#include <asm/rtai_emulate_tsc.h>
00894
00895 #define RTAI_DEFAULT_TICK 100000
00896
#ifdef CONFIG_RTAI_TRACE
00897
#define RTAI_DEFAULT_STACKSZ 8192
00898
#else
00899 #define RTAI_DEFAULT_STACKSZ 1024
00900
#endif
00901
00902
00903
00904
#endif
00905
00906
00907
#ifndef _RTAI_HAL_XN_H
00908
#define _RTAI_HAL_XN_H
00909
00910
00911
#if defined(__KERNEL__) && defined(RTAI_TRIOSS)
00912
00913
extern void xnpod_schedule(
void);
00914
00915
#define XNTIMED 0x00000004
00916
00917
extern unsigned long *nkpod;
00918
extern int fusion_timer_running;
00919
00920
#define SET_FUSION_TIMER_RUNNING() \
00921
do { fusion_timer_running = !!(*nkpod & XNTIMED); } while (0)
00922
00923
#define CLEAR_FUSION_TIMER_RUNNING() \
00924
do { fusion_timer_running = 0; } while (0)
00925
00926
#define IS_FUSION_TIMER_RUNNING() (fusion_timer_running)
00927
00928
#define NON_RTAI_SCHEDULE(cpuid) \
00929
do { \
00930
if (hal_current_domain[cpuid] == hal_root_domain) { \
00931
schedule(); \
00932
} else { \
00933
xnpod_schedule(); \
00934
} \
00935
} while (0)
00936
00937
#else
00938
00939 #define SET_FUSION_TIMER_RUNNING()
00940
00941 #define CLEAR_FUSION_TIMER_RUNNING()
00942
00943 #define IS_FUSION_TIMER_RUNNING() (0)
00944
00945 #define NON_RTAI_SCHEDULE(cpuid) do { schedule(); } while (0)
00946
00947
#endif
00948
00949
#endif