base/include/asm/rtai_hal.h

Go to the documentation of this file.
00001 /**
00002  *   @ingroup hal
00003  *   @file
00004  *
00005  *   ARTI -- RTAI-compatible Adeos-based Real-Time Interface. Based on
00006  *   the original RTAI layer for x86.
00007  *
00008  *   Original RTAI/x86 layer implementation: \n
00009  *   Copyright © 2000 Paolo Mantegazza, \n
00010  *   Copyright © 2000 Steve Papacharalambous, \n
00011  *   Copyright © 2000 Stuart Hughes, \n
00012  *   and others.
00013  *
00014  *   RTAI/x86 rewrite over Adeos: \n
00015  *   Copyright &copy 2002 Philippe Gerum.
00016  *
00017  *   This program is free software; you can redistribute it and/or modify
00018  *   it under the terms of the GNU General Public License as published by
00019  *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
00020  *   USA; either version 2 of the License, or (at your option) any later
00021  *   version.
00022  *
00023  *   This program is distributed in the hope that it will be useful,
00024  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
00025  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00026  *   GNU General Public License for more details.
00027  *
00028  *   You should have received a copy of the GNU General Public License
00029  *   along with this program; if not, write to the Free Software
00030  *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00031  */
00032 
00033 /**
00034  * @addtogroup hal
00035  *@{*/
00036 
00037 
00038 #ifndef _RTAI_ASM_I386_HAL_H
00039 #define _RTAI_ASM_I386_HAL_H
00040 
00041 #include <linux/version.h>
00042 
00043 #if defined(CONFIG_REGPARM) || LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
00044 #define RTAI_SYSCALL_MODE __attribute__((regparm(0)))
00045 #else
00046 #define RTAI_SYSCALL_MODE
00047 #endif
00048 
00049 #define RTAI_DUOSS
00050 #ifndef RTAI_DUOSS
00051 #define RTAI_TRIOSS
00052 #endif
00053 #define LOCKED_LINUX_IN_IRQ_HANDLER
00054 #define DOMAIN_TO_STALL  (fusion_domain)
00055 
00056 #include <rtai_hal_names.h>
00057 #include <asm/rtai_vectors.h>
00058 #include <rtai_types.h>
00059 
00060 #ifdef CONFIG_SMP
00061 #define RTAI_NR_CPUS  CONFIG_RTAI_CPUS
00062 #else /* !CONFIG_SMP */
00063 #define RTAI_NR_CPUS  1
00064 #endif /* CONFIG_SMP */
00065 
00066 #ifndef _RTAI_FUSION_H
00067 static __inline__ unsigned long ffnz (unsigned long word) {
00068     /* Derived from bitops.h's ffs() */
00069     __asm__("bsfl %1, %0"
00070         : "=r" (word)
00071         : "r"  (word));
00072     return word;
00073 }
00074 #endif
00075 
00076 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00077 
00078 /*** 2.4.xx missing bitops, lifted from Linux ***/
00079 
00080 static inline unsigned long __ffs(unsigned long word)
00081 {
00082         __asm__("bsfl %1,%0"
00083                 :"=r" (word)
00084                 :"rm" (word));
00085         return word;
00086 }
00087 
00088 static inline unsigned __find_first_bit(const unsigned long *addr, unsigned size)
00089 {
00090         unsigned x = 0;
00091 
00092         while (x < size) {
00093                 unsigned long val = *addr++;
00094                 if (val)
00095                         return __ffs(val) + x;
00096                 x += (sizeof(*addr)<<3);
00097         }
00098         return x;
00099 }
00100 
00101 static inline int find_next_bit(const unsigned long *addr, int size, int offset)
00102 {
00103     const unsigned long *p = addr + (offset >> 5);
00104     int set = 0, bit = offset & 31, res;
00105 
00106     if (bit) {
00107         /*
00108          * Look for nonzero in the first 32 bits:
00109          */
00110         __asm__("bsfl %1,%0\n\t"
00111             "jne 1f\n\t"
00112             "movl $32, %0\n"
00113             "1:"
00114             : "=r" (set)
00115             : "r" (*p >> bit));
00116         if (set < (32 - bit))
00117             return set + offset;
00118         set = 32 - bit;
00119         p++;
00120     }
00121     /*
00122      * No set bit yet, search remaining full words for a bit
00123      */
00124     res = __find_first_bit (p, size - 32 * (p - addr));
00125     return (offset + set + res);
00126 }
00127 
00128 #define find_first_bit(addr, size) __find_first_bit((addr), (size))
00129 
00130 #endif
00131 
00132 #if 0
00133 static inline unsigned long long rtai_ulldiv (unsigned long long ull,
00134                           unsigned long uld,
00135                           unsigned long *r) {
00136     /*
00137      * Fixed by Marco Morandini <morandini@aero.polimi.it> to work
00138      * with the -fnostrict-aliasing and -O2 combination using GCC
00139      * 3.x.
00140      */
00141 
00142     unsigned long long qf, rf;
00143     unsigned long tq, rh;
00144     union { unsigned long long ull; unsigned long ul[2]; } p, q;
00145 
00146     p.ull = ull;
00147     q.ull = 0;
00148     rf = 0x100000000ULL - (qf = 0xFFFFFFFFUL / uld) * uld;
00149 
00150     while (p.ull >= uld) {
00151         q.ul[1] += (tq = p.ul[1] / uld);
00152     rh = p.ul[1] - tq * uld;
00153     q.ull  += rh * qf + (tq = p.ul[0] / uld);
00154     p.ull   = rh * rf + (p.ul[0] - tq * uld);
00155     }
00156 
00157     if (r)
00158     *r = p.ull;
00159 
00160     return q.ull;
00161 }
00162 #else
00163 
00164 /* do_div below taken from Linux-2.6.20 */
00165 #ifndef do_div
00166 #define do_div(n,base) ({ \
00167         unsigned long __upper, __low, __high, __mod, __base; \
00168         __base = (base); \
00169         asm("":"=a" (__low), "=d" (__high):"A" (n)); \
00170         __upper = __high; \
00171         if (__high) { \
00172                 __upper = __high % (__base); \
00173                 __high = __high / (__base); \
00174         } \
00175         asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
00176         asm("":"=A" (n):"a" (__low),"d" (__high)); \
00177         __mod; \
00178 })
00179 #endif
00180 
00181 static inline unsigned long long rtai_ulldiv (unsigned long long ull, unsigned long uld, unsigned long *r)
00182 {
00183     if (r) {
00184         *r = do_div(ull, uld);
00185         return ull;
00186     }
00187     do_div(ull, uld);
00188     return ull;
00189 }
00190 #endif
00191 
00192 static inline int rtai_imuldiv (int i, int mult, int div) {
00193 
00194     /* Returns (int)i = (int)i*(int)(mult)/(int)div. */
00195     
00196     int dummy;
00197 
00198     __asm__ __volatile__ ( \
00199     "mull %%edx\t\n" \
00200     "div %%ecx\t\n" \
00201     : "=a" (i), "=d" (dummy)
00202         : "a" (i), "d" (mult), "c" (div));
00203 
00204     return i;
00205 }
00206 
00207 static inline long long rtai_llimd(long long ll, int mult, int div) {
00208 
00209     /* Returns (long long)ll = (int)ll*(int)(mult)/(int)div. */
00210 
00211     __asm__ __volatile ( \
00212     "movl %%edx,%%ecx\t\n" \
00213     "mull %%esi\t\n" \
00214     "movl %%eax,%%ebx\n\t" \
00215     "movl %%ecx,%%eax\t\n" \
00216         "movl %%edx,%%ecx\t\n" \
00217         "mull %%esi\n\t" \
00218     "addl %%ecx,%%eax\t\n" \
00219     "adcl $0,%%edx\t\n" \
00220         "divl %%edi\n\t" \
00221         "movl %%eax,%%ecx\t\n" \
00222         "movl %%ebx,%%eax\t\n" \
00223     "divl %%edi\n\t" \
00224     "sal $1,%%edx\t\n" \
00225         "cmpl %%edx,%%edi\t\n" \
00226         "movl %%ecx,%%edx\n\t" \
00227     "jge 1f\t\n" \
00228         "addl $1,%%eax\t\n" \
00229         "adcl $0,%%edx\t\n" \
00230     "1:\t\n" \
00231     : "=A" (ll) \
00232     : "A" (ll), "S" (mult), "D" (div) \
00233     : "%ebx", "%ecx");
00234 
00235     return ll;
00236 }
00237 
00238 /*
00239  *  u64div32c.c is a helper function provided, 2003-03-03, by:
00240  *  Copyright (C) 2003 Nils Hagge <hagge@rts.uni-hannover.de>
00241  */
00242 
00243 static inline unsigned long long rtai_u64div32c(unsigned long long a,
00244                         unsigned long b,
00245                         int *r) {
00246 
00247     union { unsigned long long ull; unsigned long ul[2]; } u;
00248     u.ull = a;
00249     __asm__ __volatile(
00250     "\n        movl    %%eax,%%ebx"
00251     "\n        movl    %%edx,%%eax"
00252     "\n        xorl    %%edx,%%edx"
00253     "\n        divl    %%ecx"
00254     "\n        xchgl   %%eax,%%ebx"
00255     "\n        divl    %%ecx"
00256     "\n        movl    %%edx,%%ecx"
00257     "\n        movl    %%ebx,%%edx"
00258     : "=a" (u.ul[0]), "=d" (u.ul[1])
00259     : "a"  (u.ul[0]), "d"  (u.ul[1]), "c" (b)
00260     : "%ebx" );
00261 
00262     return a;
00263 }
00264 
00265 #if defined(__KERNEL__) && !defined(__cplusplus)
00266 #include <linux/sched.h>
00267 #include <linux/interrupt.h>
00268 #include <asm/desc.h>
00269 #include <asm/system.h>
00270 #include <asm/io.h>
00271 #include <asm/rtai_atomic.h>
00272 #include <asm/rtai_fpu.h>
00273 #ifdef CONFIG_X86_LOCAL_APIC
00274 #include <asm/fixmap.h>
00275 #include <asm/apic.h>
00276 #endif /* CONFIG_X86_LOCAL_APIC */
00277 #include <rtai_trace.h>
00278 
00279 struct rtai_realtime_irq_s {
00280     int (*handler)(unsigned irq, void *cookie);
00281     void *cookie;
00282     int retmode;
00283     int cpumask;
00284     int (*irq_ack)(unsigned int, void *);
00285 };
00286 
00287 /* 
00288  * Linux has this information in io_apic.c, but it does not export it;
00289  * on the other hand it should be fairly stable this way and so we try
00290  * to avoid putting something else in our patch.
00291  */
00292 
00293 #ifdef CONFIG_X86_IO_APIC
00294 static inline int ext_irq_vector(int irq)
00295 {
00296     if (irq != 2) {
00297         return (FIRST_DEVICE_VECTOR + 8*(irq < 2 ? irq : irq - 1));
00298     }
00299     return -EINVAL;
00300 }
00301 #else
00302 static inline int ext_irq_vector(int irq)
00303 {
00304     if (irq != 2) {
00305         return (FIRST_EXTERNAL_VECTOR + irq);
00306     }
00307     return -EINVAL;
00308 }
00309 #endif
00310 
00311 #define RTAI_DOMAIN_ID  0x9ac15d93  // nam2num("rtai_d")
00312 #define RTAI_NR_TRAPS   HAL_NR_FAULTS
00313 #define RTAI_NR_SRQS    32
00314 
00315 #define RTAI_APIC_TIMER_VECTOR    RTAI_APIC_HIGH_VECTOR
00316 #define RTAI_APIC_TIMER_IPI       RTAI_APIC_HIGH_IPI
00317 #define RTAI_SMP_NOTIFY_VECTOR    RTAI_APIC_LOW_VECTOR
00318 #define RTAI_SMP_NOTIFY_IPI       RTAI_APIC_LOW_IPI
00319 
00320 #define RTAI_TIMER_8254_IRQ       0
00321 #define RTAI_FREQ_8254            1193180
00322 #define RTAI_APIC_ICOUNT      ((RTAI_FREQ_APIC + HZ/2)/HZ)
00323 #define RTAI_COUNTER_2_LATCH      0xfffe
00324 #define RTAI_LATENCY_8254         CONFIG_RTAI_SCHED_8254_LATENCY
00325 #define RTAI_SETUP_TIME_8254      2011 
00326 
00327 #define RTAI_CALIBRATED_APIC_FREQ 0
00328 #define RTAI_FREQ_APIC            (rtai_tunables.apic_freq)
00329 #define RTAI_LATENCY_APIC         CONFIG_RTAI_SCHED_APIC_LATENCY
00330 #define RTAI_SETUP_TIME_APIC      1000
00331 
00332 #define RTAI_TIME_LIMIT            0x7000000000000000LL
00333 
00334 #define RTAI_IFLAG  9
00335 
00336 #define rtai_cpuid()      hal_processor_id()
00337 #define rtai_tskext(idx)  hal_tskext[idx]
00338 
00339 /* Use these to grant atomic protection when accessing the hardware */
00340 #define rtai_hw_cli()                  hal_hw_cli()
00341 #define rtai_hw_sti()                  hal_hw_sti()
00342 #define rtai_hw_save_flags_and_cli(x)  hal_hw_local_irq_save(x)
00343 #define rtai_hw_restore_flags(x)       hal_hw_local_irq_restore(x)
00344 #define rtai_hw_save_flags(x)          hal_hw_local_irq_flags(x)
00345 
00346 /* Use these to grant atomic protection in hard real time code */
00347 #define rtai_cli()                  hal_hw_cli()
00348 #define rtai_sti()                  hal_hw_sti()
00349 #define rtai_save_flags_and_cli(x)  hal_hw_local_irq_save(x)
00350 #define rtai_restore_flags(x)       hal_hw_local_irq_restore(x)
00351 #define rtai_save_flags(x)          hal_hw_local_irq_flags(x)
00352 
00353 static inline struct hal_domain_struct *get_domain_pointer(int n)
00354 {
00355     struct list_head *p = hal_pipeline.next;
00356     struct hal_domain_struct *d;
00357     unsigned long i = 0;
00358     while (p != &hal_pipeline) {
00359         d = list_entry(p, struct hal_domain_struct, p_link);
00360         if (++i == n) {
00361             return d;
00362         }
00363         p = d->p_link.next;
00364     }
00365     return (struct hal_domain_struct *)i;
00366 }
00367 
00368 #define RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU  KERNEL_VERSION(2,6,20)
00369 
00370 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00371 
00372 #define ROOT_STATUS_ADR(cpuid)  (ipipe_root_status[cpuid])
00373 #define ROOT_STATUS_VAL(cpuid)  (*ipipe_root_status[cpuid])
00374 
00375 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00376 do { \
00377     hal_irq_hits_pp(irq, domain, cpuid); \
00378     if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00379         __set_bit((irq) & IPIPE_IRQ_IMASK, &(domain)->cpudata[cpuid].irq_pending_lo[(irq) >> IPIPE_IRQ_ISHIFT]); \
00380         __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &(domain)->cpudata[cpuid].irq_pending_hi); \
00381     } \
00382 } while (0)
00383 
00384 #define hal_fast_flush_pipeline(cpuid) \
00385 do { \
00386     if (hal_root_domain->cpudata[cpuid].irq_pending_hi != 0) { \
00387         rtai_cli(); \
00388         hal_sync_stage(IPIPE_IRQMASK_ANY); \
00389     } \
00390 } while (0)
00391 
00392 #else
00393 
00394 #define ROOT_STATUS_ADR(cpuid)  (&ipipe_cpudom_var(hal_root_domain, status))
00395 #define ROOT_STATUS_VAL(cpuid)  (ipipe_cpudom_var(hal_root_domain, status))
00396 
00397 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00398 do { \
00399     if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00400         __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqpend_lomask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00401         __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &ipipe_cpudom_var(domain, irqpend_himask)); \
00402     } else { \
00403         __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqheld_mask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00404     } \
00405     ipipe_cpudom_var(domain, irqall)[irq]++; \
00406 } while (0)
00407 
00408 #define hal_fast_flush_pipeline(cpuid) \
00409 do { \
00410     if (ipipe_cpudom_var(hal_root_domain, irqpend_himask) != 0) { \
00411         rtai_cli(); \
00412         hal_sync_stage(IPIPE_IRQMASK_ANY); \
00413     } \
00414 } while (0)
00415 
00416 #endif
00417 
00418 #define hal_pend_uncond(irq, cpuid)  hal_pend_domain_uncond(irq, hal_root_domain, cpuid)
00419 
00420 extern volatile unsigned long *ipipe_root_status[];
00421 
00422 #ifdef RTAI_TRIOSS
00423 #define hal_test_and_fast_flush_pipeline(cpuid) \
00424 do { \
00425     if (!test_bit(IPIPE_STALL_FLAG, &DOMAIN_TO_STALL->cpudata[cpuid].status)) { \
00426         rtai_sti(); \
00427         hal_unstall_pipeline_from(DOMAIN_TO_STALL); \
00428     } \
00429 } while (0)
00430 #else
00431 #define hal_test_and_fast_flush_pipeline(cpuid) \
00432 do { \
00433     if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { \
00434         hal_fast_flush_pipeline(cpuid); \
00435         rtai_sti(); \
00436     } \
00437 } while (0)
00438 #endif
00439 
00440 
00441 #ifdef CONFIG_PREEMPT
00442 #define rtai_save_and_lock_preempt_count() \
00443     do { int *prcntp, prcnt; prcnt = xchg(prcntp = &preempt_count(), 1);
00444 #define rtai_restore_preempt_count() \
00445          *prcntp = prcnt; } while (0)
00446 #else
00447 #define rtai_save_and_lock_preempt_count();
00448 #define rtai_restore_preempt_count();
00449 #endif
00450 
00451 typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie);
00452 
00453 #ifdef CONFIG_X86_TSC
00454 
00455 #define RTAI_CALIBRATED_CPU_FREQ   0
00456 #define RTAI_CPU_FREQ              (rtai_tunables.cpu_freq)
00457 
00458 #if 0
00459 
00460 static inline unsigned long long _rtai_hidden_rdtsc (void) {
00461     unsigned long long t;
00462     __asm__ __volatile__( "rdtsc" : "=A" (t));
00463     return t;
00464 }
00465 #define rtai_rdtsc() _rtai_hidden_rdtsc()
00466 
00467 #else
00468 
00469 //#define CONFIG_RTAI_DIAG_TSC_SYNC
00470 #if defined(CONFIG_SMP) && defined(CONFIG_RTAI_DIAG_TSC_SYNC) && defined(CONFIG_RTAI_TUNE_TSC_SYNC)
00471 extern volatile long rtai_tsc_ofst[];
00472 #define rtai_rdtsc() ({ unsigned long long t; __asm__ __volatile__( "rdtsc" : "=A" (t)); t - rtai_tsc_ofst[rtai_cpuid()]; })
00473 #else
00474 #define rtai_rdtsc() ({ unsigned long long t; __asm__ __volatile__( "rdtsc" : "=A" (t)); t; })
00475 #endif
00476 
00477 #endif
00478 
00479 #else  /* !CONFIG_X86_TSC */
00480 
00481 #define RTAI_CPU_FREQ             RTAI_FREQ_8254
00482 #define RTAI_CALIBRATED_CPU_FREQ  RTAI_FREQ_8254
00483 
00484 #define rtai_rdtsc() rd_8254_ts()
00485 
00486 #endif /* CONFIG_X86_TSC */
00487 
00488 struct calibration_data {
00489 
00490     unsigned long cpu_freq;
00491     unsigned long apic_freq;
00492     int latency;
00493     int setup_time_TIMER_CPUNIT;
00494     int setup_time_TIMER_UNIT;
00495     int timers_tol[RTAI_NR_CPUS];
00496 };
00497 
00498 struct apic_timer_setup_data {
00499 
00500     int mode;
00501     int count;
00502 };
00503 
00504 extern struct rt_times rt_times;
00505 
00506 extern struct rt_times rt_smp_times[RTAI_NR_CPUS];
00507 
00508 extern struct calibration_data rtai_tunables;
00509 
00510 extern volatile unsigned long rtai_cpu_lock[];
00511 
00512 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
00513 #define apic_write_around apic_write
00514 #endif
00515 
00516 //#define RTAI_TASKPRI 0xf0  // simplest usage without changing Linux code base
00517 #if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00518 #define SET_TASKPRI(cpuid) \
00519     if (!rtai_linux_context[cpuid].set_taskpri) { \
00520         apic_write_around(APIC_TASKPRI, ((apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK) | RTAI_TASKPRI)); \
00521         rtai_linux_context[cpuid].set_taskpri = 1; \
00522     }
00523 #define CLR_TASKPRI(cpuid) \
00524     if (rtai_linux_context[cpuid].set_taskpri) { \
00525         apic_write_around(APIC_TASKPRI, (apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK)); \
00526         rtai_linux_context[cpuid].set_taskpri = 0; \
00527     }
00528 #else
00529 #define SET_TASKPRI(cpuid)
00530 #define CLR_TASKPRI(cpuid)
00531 #endif
00532 
00533 extern struct rtai_switch_data {
00534     volatile unsigned long sflags;
00535     volatile unsigned long lflags;
00536 #if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
00537     volatile unsigned long set_taskpri;
00538 #endif
00539 #ifdef RTAI_TRIOSS
00540     volatile struct hal_domain_struct *oldomain;
00541 #endif
00542 } rtai_linux_context[RTAI_NR_CPUS];
00543 
00544 irqreturn_t rtai_broadcast_to_local_timers(int irq,
00545                        void *dev_id,
00546                        struct pt_regs *regs);
00547 
00548 static inline unsigned long rtai_save_flags_irqbit(void)
00549 {
00550     unsigned long flags;
00551     rtai_save_flags(flags);
00552     return flags & (1 << RTAI_IFLAG);
00553 }
00554 
00555 static inline unsigned long rtai_save_flags_irqbit_and_cli(void)
00556 {
00557     unsigned long flags;
00558     rtai_save_flags_and_cli(flags);
00559     return flags & (1 << RTAI_IFLAG);
00560 }
00561 
00562 #ifdef CONFIG_SMP
00563 
00564 #define SCHED_VECTOR  RTAI_SMP_NOTIFY_VECTOR
00565 #define SCHED_IPI     RTAI_SMP_NOTIFY_IPI
00566 
00567 #define _send_sched_ipi(dest) \
00568 do { \
00569     apic_wait_icr_idle(); \
00570     apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(dest)); \
00571     apic_write_around(APIC_ICR, APIC_DEST_LOGICAL | SCHED_VECTOR); \
00572 } while (0)
00573 
00574 #ifdef CONFIG_PREEMPT
00575 #define rt_spin_lock(lock)    do { barrier(); _raw_spin_lock(lock); barrier(); } while (0)
00576 #define rt_spin_unlock(lock)  do { barrier(); _raw_spin_unlock(lock); barrier(); } while (0)
00577 #else /* !CONFIG_PREEMPT */
00578 #define rt_spin_lock(lock)    spin_lock(lock)
00579 #define rt_spin_unlock(lock)  spin_unlock(lock)
00580 #endif /* CONFIG_PREEMPT */
00581 
00582 static inline void rt_spin_lock_hw_irq(spinlock_t *lock)
00583 {
00584     rtai_hw_cli();
00585     rt_spin_lock(lock);
00586 }
00587 
00588 static inline void rt_spin_unlock_hw_irq(spinlock_t *lock)
00589 {
00590     rt_spin_unlock(lock);
00591     rtai_hw_sti();
00592 }
00593 
00594 static inline unsigned long rt_spin_lock_hw_irqsave(spinlock_t *lock)
00595 {
00596     unsigned long flags;
00597     rtai_hw_save_flags_and_cli(flags);
00598     rt_spin_lock(lock);
00599     return flags;
00600 }
00601 
00602 static inline void rt_spin_unlock_hw_irqrestore(unsigned long flags, spinlock_t *lock)
00603 {
00604     rt_spin_unlock(lock);
00605     rtai_hw_restore_flags(flags);
00606 }
00607 
00608 static inline void rt_spin_lock_irq(spinlock_t *lock) {
00609 
00610     rtai_cli();
00611     rt_spin_lock(lock);
00612 }
00613 
00614 static inline void rt_spin_unlock_irq(spinlock_t *lock) {
00615 
00616     rt_spin_unlock(lock);
00617     rtai_sti();
00618 }
00619 
00620 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock) {
00621 
00622     unsigned long flags;
00623     rtai_save_flags_and_cli(flags);
00624     rt_spin_lock(lock);
00625     return flags;
00626 }
00627 
00628 static inline void rt_spin_unlock_irqrestore(unsigned long flags, spinlock_t *lock)
00629 {
00630     rt_spin_unlock(lock);
00631     rtai_restore_flags(flags);
00632 }
00633 
00634 #if RTAI_NR_CPUS > 2
00635 
00636 // taken from Linux, see the related code there for an explanation
00637 
00638 static inline void rtai_spin_glock(volatile unsigned long *lock)
00639 {
00640  short inc = 0x0100;
00641  __asm__ __volatile__ (
00642  LOCK_PREFIX "xaddw %w0, %1\n"
00643  "1:\t"
00644  "cmpb %h0, %b0\n\t"
00645  "je 2f\n\t"
00646  "rep; nop\n\t"
00647  "movb %1, %b0\n\t"
00648  "jmp 1b\n"
00649  "2:"
00650  :"+Q" (inc), "+m" (lock[1])
00651  :
00652  :"memory", "cc");
00653 }
00654 
00655 #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
00656 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
00657 #else
00658 # define UNLOCK_LOCK_PREFIX
00659 #endif
00660 
00661 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00662 {
00663  __asm__ __volatile__(
00664  UNLOCK_LOCK_PREFIX "incb %0"
00665  :"+m" (lock[1])
00666  :
00667  :"memory", "cc");
00668 }
00669 
00670 #else
00671 
00672 static inline void rtai_spin_glock(volatile unsigned long *lock)
00673 {
00674     while (test_and_set_bit(31, lock)) {
00675         cpu_relax();
00676     }
00677     barrier();
00678 }
00679 
00680 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00681 {
00682     test_and_clear_bit(31, lock);
00683     cpu_relax(); 
00684 }
00685 
00686 #endif
00687 
00688 static inline void rt_get_global_lock(void)
00689 {
00690     barrier();
00691     rtai_cli();
00692     if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00693         rtai_spin_glock(&rtai_cpu_lock[0]);
00694     }
00695     barrier();
00696 }
00697 
00698 static inline void rt_release_global_lock(void)
00699 {
00700     barrier();
00701     rtai_cli();
00702     if (test_and_clear_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00703         rtai_spin_gunlock(&rtai_cpu_lock[0]);
00704     }
00705     barrier();
00706 }
00707 
00708 /**
00709  * Disable interrupts across all CPUs
00710  *
00711  * rt_global_cli hard disables interrupts (cli) on the requesting CPU and
00712  * acquires the global spinlock to the calling CPU so that any other CPU
00713  * synchronized by this method is blocked. Nested calls to rt_global_cli within
00714  * the owner CPU will not cause a deadlock on the global spinlock, as it would
00715  * happen for a normal spinlock.
00716  *
00717  * rt_global_sti hard enables interrupts (sti) on the calling CPU and releases
00718  * the global lock.
00719  */
00720 static inline void rt_global_cli(void)
00721 {
00722     rt_get_global_lock();
00723 }
00724 
00725 /**
00726  * Enable interrupts across all CPUs
00727  *
00728  * rt_global_sti hard enables interrupts (sti) on the calling CPU and releases
00729  * the global lock.
00730  */
00731 static inline void rt_global_sti(void)
00732 {
00733     rt_release_global_lock();
00734     rtai_sti();
00735 }
00736 
00737 /**
00738  * Save CPU flags
00739  *
00740  * rt_global_save_flags_and_cli combines rt_global_save_flags() and
00741  * rt_global_cli().
00742  */
00743 static inline int rt_global_save_flags_and_cli(void)
00744 {
00745     unsigned long flags;
00746 
00747     barrier();
00748     flags = rtai_save_flags_irqbit_and_cli();
00749     if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00750         rtai_spin_glock(&rtai_cpu_lock[0]);
00751         barrier();
00752         return flags | 1;
00753     }
00754     barrier();
00755     return flags;
00756 }
00757 
00758 /**
00759  * Save CPU flags
00760  *
00761  * rt_global_save_flags saves the CPU interrupt flag (IF) bit 9 of @a flags and
00762  * ORs the global lock flag in the first 8 bits of flags. From that you can
00763  * rightly infer that RTAI does not support more than 8 CPUs.
00764  */
00765 static inline void rt_global_save_flags(unsigned long *flags)
00766 {
00767     unsigned long hflags = rtai_save_flags_irqbit_and_cli();
00768 
00769     *flags = test_bit(hal_processor_id(), &rtai_cpu_lock[0]) ? hflags : hflags | 1;
00770     if (hflags) {
00771         rtai_sti();
00772     }
00773 }
00774 
00775 /**
00776  * Restore CPU flags
00777  *
00778  * rt_global_restore_flags restores the CPU hard interrupt flag (IF)
00779  * and the state of the global inter-CPU lock, according to the state
00780  * given by flags.
00781  */
00782 static inline void rt_global_restore_flags(unsigned long flags)
00783 {
00784     barrier();
00785     if (test_and_clear_bit(0, &flags)) {
00786         rt_release_global_lock();
00787     } else {
00788         rt_get_global_lock();
00789     }
00790     if (flags) {
00791         rtai_sti();
00792     }
00793     barrier();
00794 }
00795 
00796 #else /* !CONFIG_SMP */
00797 
00798 #define _send_sched_ipi(dest)
00799 
00800 #define rt_spin_lock(lock)
00801 #define rt_spin_unlock(lock)
00802 
00803 #define rt_spin_lock_irq(lock)    do { rtai_cli(); } while (0)
00804 #define rt_spin_unlock_irq(lock)  do { rtai_sti(); } while (0)
00805 
00806 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock)
00807 {
00808     unsigned long flags;
00809     rtai_save_flags_and_cli(flags);
00810     return flags;
00811 }
00812 #define rt_spin_unlock_irqrestore(flags, lock)  do { rtai_restore_flags(flags); } while (0)
00813 
00814 #define rt_get_global_lock()      do { rtai_cli(); } while (0)
00815 #define rt_release_global_lock()
00816 
00817 #define rt_global_cli()  do { rtai_cli(); } while (0)
00818 #define rt_global_sti()  do { rtai_sti(); } while (0)
00819 
00820 static inline unsigned long rt_global_save_flags_and_cli(void)
00821 {
00822     unsigned long flags;
00823     rtai_save_flags_and_cli(flags);
00824     return flags;
00825 }
00826 #define rt_global_restore_flags(flags)  do { rtai_restore_flags(flags); } while (0)
00827 
00828 #define rt_global_save_flags(flags)     do { rtai_save_flags(*flags); } while (0)
00829 
00830 #endif
00831 
00832 asmlinkage int rt_printk(const char *format, ...);
00833 asmlinkage int rt_sync_printk(const char *format, ...);
00834 
00835 extern struct hal_domain_struct rtai_domain;
00836 extern struct hal_domain_struct *fusion_domain;
00837 
00838 #ifdef RTAI_TRIOSS
00839 
00840 #define _rt_switch_to_real_time(cpuid) \
00841 do { \
00842     rtai_linux_context[cpuid].lflags = xchg(&DOMAIN_TO_STALL->cpudata[cpuid].status, (1 << IPIPE_STALL_FLAG)); \
00843     rtai_linux_context[cpuid].oldomain = hal_current_domain(cpuid); \
00844     rtai_linux_context[cpuid].sflags = 1; \
00845     hal_current_domain(cpuid) = &rtai_domain; \
00846 } while (0)
00847 
00848 #define rt_switch_to_linux(cpuid) \
00849 do { \
00850     if (rtai_linux_context[cpuid].sflags) { \
00851         hal_current_domain(cpuid) = (void *)rtai_linux_context[cpuid].oldomain; \
00852         DOMAIN_TO_STALL->cpudata[cpuid].status = rtai_linux_context[cpuid].lflags; \
00853         rtai_linux_context[cpuid].sflags = 0; \
00854         CLR_TASKPRI(cpuid); \
00855     } \
00856 } while (0)
00857 
00858 #else
00859 
00860 #define _rt_switch_to_real_time(cpuid) \
00861 do { \
00862     rtai_linux_context[cpuid].lflags = xchg(ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); \
00863     rtai_linux_context[cpuid].sflags = 1; \
00864     hal_current_domain(cpuid) = &rtai_domain; \
00865 } while (0)
00866 
00867 #define rt_switch_to_linux(cpuid) \
00868 do { \
00869     if (rtai_linux_context[cpuid].sflags) { \
00870         hal_current_domain(cpuid) = hal_root_domain; \
00871         ROOT_STATUS_VAL(cpuid) = rtai_linux_context[cpuid].lflags; \
00872         rtai_linux_context[cpuid].sflags = 0; \
00873         CLR_TASKPRI(cpuid); \
00874     } \
00875 } while (0)
00876 
00877 #endif
00878 
00879 #define rt_switch_to_real_time(cpuid) \
00880 do { \
00881     if (!rtai_linux_context[cpuid].sflags) { \
00882         _rt_switch_to_real_time(cpuid); \
00883     } \
00884 } while (0)
00885 
00886 #define rtai_get_intr_handler(v) \
00887     ((idt_table[v].b & 0xFFFF0000) | (idt_table[v].a & 0x0000FFFF))
00888 #define ack_bad_irq hal_ack_system_irq // linux does not export ack_bad_irq
00889 
00890 #define rtai_init_taskpri_irqs() \
00891 do { \
00892     int v; \
00893     for (v = SPURIOUS_APIC_VECTOR + 1; v < 256; v++) { \
00894         hal_virtualize_irq(hal_root_domain, v - FIRST_EXTERNAL_VECTOR, (void (*)(unsigned))rtai_get_intr_handler(v), (void *)ack_bad_irq, IPIPE_HANDLE_MASK); \
00895     } \
00896 } while (0)
00897 
00898 static inline int rt_save_switch_to_real_time(int cpuid)
00899 {
00900     SET_TASKPRI(cpuid);
00901     if (!rtai_linux_context[cpuid].sflags) {
00902         _rt_switch_to_real_time(cpuid);
00903         return 0;
00904     } 
00905     return 1;
00906 }
00907 
00908 #define rt_restore_switch_to_linux(sflags, cpuid) \
00909 do { \
00910     if (!sflags) { \
00911         rt_switch_to_linux(cpuid); \
00912     } else if (!rtai_linux_context[cpuid].sflags) { \
00913         SET_TASKPRI(cpuid); \
00914         _rt_switch_to_real_time(cpuid); \
00915     } \
00916 } while (0)
00917 
00918 #define in_hrt_mode(cpuid)  (rtai_linux_context[cpuid].sflags)
00919 
00920 #if defined(CONFIG_X86_LOCAL_APIC)
00921 static inline unsigned long save_and_set_taskpri(unsigned long taskpri)
00922 {
00923     unsigned long saved_taskpri = apic_read(APIC_TASKPRI);
00924     apic_write(APIC_TASKPRI, taskpri);
00925     return saved_taskpri;
00926 }
00927 
00928 #define restore_taskpri(taskpri) \
00929     do { apic_write_around(APIC_TASKPRI, taskpri); } while (0)
00930 #endif
00931 
00932 static inline void rt_set_timer_delay (int delay) {
00933 
00934     if (delay) {
00935         unsigned long flags;
00936         rtai_hw_save_flags_and_cli(flags);
00937 #ifdef CONFIG_X86_LOCAL_APIC
00938     apic_write_around(APIC_TMICT, delay);
00939 #else /* !CONFIG_X86_LOCAL_APIC */
00940     outb(delay & 0xff,0x40);
00941     outb(delay >> 8,0x40);
00942 #endif /* CONFIG_X86_LOCAL_APIC */
00943         rtai_hw_restore_flags(flags);
00944     }
00945 }
00946 
00947     /* Private interface -- Internal use only */
00948 
00949 unsigned long rtai_critical_enter(void (*synch)(void));
00950 
00951 void rtai_critical_exit(unsigned long flags);
00952 
00953 int rtai_calibrate_8254(void);
00954 
00955 void rtai_set_linux_task_priority(struct task_struct *task,
00956                   int policy,
00957                   int prio);
00958 
00959 long rtai_catch_event (struct hal_domain_struct *domain, unsigned long event, int (*handler)(unsigned long, void *));
00960 
00961 #endif /* __KERNEL__ && !__cplusplus */
00962 
00963     /* Public interface */
00964 
00965 #ifdef __KERNEL__
00966 
00967 #include <linux/kernel.h>
00968 
00969 #define rtai_print_to_screen  rt_printk
00970 
00971 void *ll2a(long long ll, char *s);
00972 
00973 #ifdef __cplusplus
00974 extern "C" {
00975 #endif /* __cplusplus */
00976 
00977 int rt_request_irq(unsigned irq,
00978            int (*handler)(unsigned irq, void *cookie),
00979            void *cookie,
00980            int retmode);
00981 
00982 int rt_release_irq(unsigned irq);
00983 
00984 int ack_8259A_irq(unsigned int irq);
00985 
00986 int rt_set_irq_ack(unsigned int irq, int (*irq_ack)(unsigned int, void *));
00987 
00988 static inline int rt_request_irq_wack(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode, int (*irq_ack)(unsigned int, void *))
00989 {
00990     int retval;
00991     if ((retval = rt_request_irq(irq, handler, cookie, retmode)) < 0) {
00992         return retval;
00993     }
00994     return rt_set_irq_ack(irq, irq_ack);
00995 }
00996 
00997 void rt_set_irq_cookie(unsigned irq, void *cookie);
00998 
00999 void rt_set_irq_retmode(unsigned irq, int fastret);
01000 
01001 /**
01002  * @name Programmable Interrupt Controllers (PIC) management functions.
01003  *
01004  *@{*/
01005 unsigned rt_startup_irq(unsigned irq);
01006 
01007 void rt_shutdown_irq(unsigned irq);
01008 
01009 void rt_enable_irq(unsigned irq);
01010 
01011 void rt_disable_irq(unsigned irq);
01012 
01013 void rt_mask_and_ack_irq(unsigned irq);
01014 
01015 void rt_unmask_irq(unsigned irq);
01016 
01017 void rt_ack_irq(unsigned irq);
01018 
01019 /*@}*/
01020 
01021 // this is machine dominance and must stay in our hands, long live DOS!
01022 #define rtai_do_x86int(irq, handler) \
01023 do { \
01024     __asm__ __volatile__ ( "pushfl; push %%cs; call *%1": : "a" (irq), "m" (handler)); \
01025 } while (0)
01026 
01027 struct desc_struct rtai_set_gate_vector (unsigned vector, int type, int dpl, void *handler);
01028 
01029 void rtai_reset_gate_vector(unsigned vector, struct desc_struct e);
01030 // end of machine dominance
01031 
01032 void rt_do_irq(unsigned irq);
01033 
01034 int rt_request_linux_irq(unsigned irq,
01035              void *handler,
01036              char *name,
01037              void *dev_id);
01038 
01039 int rt_free_linux_irq(unsigned irq,
01040               void *dev_id);
01041 
01042 void rt_pend_linux_irq(unsigned irq);
01043 
01044 RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq(unsigned irq);
01045 
01046 void rt_pend_linux_srq(unsigned srq);
01047 
01048 int rt_request_srq(unsigned label,
01049            void (*k_handler)(void),
01050            long long (*u_handler)(unsigned long));
01051 
01052 int rt_free_srq(unsigned srq);
01053 
01054 int rt_assign_irq_to_cpu(int irq,
01055              unsigned long cpus_mask);
01056 
01057 int rt_reset_irq_to_sym_mode(int irq);
01058 
01059 void rt_request_timer_cpuid(void (*handler)(void),
01060                 unsigned tick,
01061                 int cpuid);
01062 
01063 void rt_request_apic_timers(void (*handler)(void),
01064                 struct apic_timer_setup_data *tmdata);
01065 
01066 void rt_free_apic_timers(void);
01067 
01068 int rt_request_timer(void (*handler)(void), unsigned tick, int use_apic);
01069 
01070 void rt_free_timer(void);
01071 
01072 RT_TRAP_HANDLER rt_set_trap_handler(RT_TRAP_HANDLER handler);
01073 
01074 void rt_release_rtc(void);
01075 
01076 void rt_request_rtc(long rtc_freq, void *handler);
01077 
01078 #define rt_mount()
01079 
01080 #define rt_umount()
01081 
01082 RTIME rd_8254_ts(void);
01083 
01084 void rt_setup_8254_tsc(void);
01085 
01086 void (*rt_set_ihook(void (*hookfn)(int)))(int);
01087 
01088 /* Deprecated calls. */
01089 
01090 static inline int rt_request_global_irq(unsigned irq, void (*handler)(void))
01091 {
01092     return rt_request_irq(irq, (int (*)(unsigned,void *))handler, 0, 0);
01093 }
01094 
01095 static inline int rt_request_global_irq_ext(unsigned irq, void (*handler)(void), unsigned long cookie)
01096 {
01097     return rt_request_irq(irq, (int (*)(unsigned,void *))handler, (void *)cookie, 1);
01098 }
01099 
01100 static inline void rt_set_global_irq_ext(unsigned irq, int ext, unsigned long cookie)
01101 {
01102     rt_set_irq_cookie(irq, (void *)cookie);
01103 }
01104 
01105 static inline int rt_free_global_irq(unsigned irq)
01106 {
01107     return rt_release_irq(irq);
01108 }
01109 
01110 #ifdef __cplusplus
01111 }
01112 #endif /* __cplusplus */
01113 
01114 #endif /* __KERNEL__ */
01115 
01116 #include <asm/rtai_oldnames.h>
01117 #include <asm/rtai_emulate_tsc.h>
01118 
01119 #define RTAI_DEFAULT_TICK    100000
01120 #ifdef CONFIG_RTAI_TRACE
01121 #define RTAI_DEFAULT_STACKSZ 8192
01122 #else /* !CONFIG_RTAI_TRACE */
01123 #define RTAI_DEFAULT_STACKSZ 1024
01124 #endif /* CONFIG_RTAI_TRACE */
01125 
01126 /*@}*/
01127 
01128 #endif /* !_RTAI_ASM_I386_HAL_H */
01129 
01130 
01131 #ifndef _RTAI_HAL_XN_H
01132 #define _RTAI_HAL_XN_H
01133 
01134 // this is now a bit misplaced, to be moved where it should belong
01135 #if defined(__KERNEL__) && defined(RTAI_TRIOSS)
01136 
01137 extern void xnpod_schedule(void);
01138 
01139 #define XNTIMED  0x00000004
01140 
01141 extern unsigned long *nkpod;
01142 extern int fusion_timer_running;
01143 
01144 #define SET_FUSION_TIMER_RUNNING() \
01145      do { fusion_timer_running = !!(*nkpod & XNTIMED); } while (0)
01146 
01147 #define CLEAR_FUSION_TIMER_RUNNING() \
01148      do { fusion_timer_running = 0; } while (0)
01149 
01150 #define IS_FUSION_TIMER_RUNNING()  (fusion_timer_running)
01151 
01152 #define NON_RTAI_SCHEDULE(cpuid) \
01153 do { \
01154     if (hal_current_domain(cpuid) == hal_root_domain) { \
01155         schedule(); \
01156     } else { \
01157         xnpod_schedule(); \
01158     } \
01159 } while (0)
01160 
01161 #else /* !RTAI_TRIOSS */
01162 
01163 #define SET_FUSION_TIMER_RUNNING()
01164 
01165 #define CLEAR_FUSION_TIMER_RUNNING()
01166 
01167 #define IS_FUSION_TIMER_RUNNING()  (0)
01168 
01169 #define NON_RTAI_SCHEDULE(cpuid)  do { schedule(); } while (0)
01170 
01171 #endif /* END RTAI_TRIOSS */
01172 
01173 #endif /* !_RTAI_HAL_XN_H */
01174 
01175 

Generated on Tue Feb 2 17:46:04 2010 for RTAI API by  doxygen 1.4.7