base/include/asm-ppc/rtai_hal.h

Go to the documentation of this file.
00001 /**
00002  *   @ingroup hal
00003  *   @file
00004  *
00005  *   ARTI -- RTAI-compatible Adeos-based Real-Time Interface. Based on
00006  *   the original RTAI layer for PPC and the RTAI/x86 rewrite over ADEOS.
00007  *
00008  *   Original RTAI/PPC layer implementation: \n
00009  *   Copyright © 2000 Paolo Mantegazza, \n
00010  *   Copyright © 2001 David Schleef, \n
00011  *   Copyright © 2001 Lineo, Inc, \n
00012  *   Copyright © 2002 Wolfgang Grandegger. \n
00013  *
00014  *   RTAI/PPC rewrite over hal-linux patches: \n
00015  *   Copyright &copy 2006 Antonio Barbalace.
00016  *
00017  *   This program is free software; you can redistribute it and/or modify
00018  *   it under the terms of the GNU General Public License as published by
00019  *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
00020  *   USA; either version 2 of the License, or (at your option) any later
00021  *   version.
00022  *
00023  *   This program is distributed in the hope that it will be useful,
00024  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
00025  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00026  *   GNU General Public License for more details.
00027  *
00028  *   You should have received a copy of the GNU General Public License
00029  *   along with this program; if not, write to the Free Software
00030  *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00031  */
00032 
00033 
00034 #ifndef _RTAI_ASM_PPC_HAL_H
00035 #define _RTAI_ASM_PPC_HAL_H
00036 
00037 #define RTAI_SYSCALL_MODE //__attribute__((regparm(0)))
00038 
00039 #define LOCKED_LINUX_IN_IRQ_HANDLER
00040 #define UNWRAPPED_CATCH_EVENT
00041 
00042 #include <rtai_hal_names.h>
00043 #include <asm/rtai_vectors.h>
00044 #include <rtai_types.h>
00045 
00046 #ifdef CONFIG_SMP
00047 #define RTAI_NR_CPUS  CONFIG_RTAI_CPUS
00048 #else /* !CONFIG_SMP */
00049 #define RTAI_NR_CPUS  1
00050 #endif /* CONFIG_SMP */
00051 
00052 #define NETRPC_ALIGN_RTIME(i) (1 - i%2)
00053 
00054 //---------------------------------------------------------------------------//
00055 //                         Mathematical primitives                           //
00056 //---------------------------------------------------------------------------//
00057 
00058 static inline int ffnz(unsigned long ul)
00059 {
00060     __asm__ __volatile__ ("cntlzw %0, %1" : "=r" (ul) : "r" (ul & (-ul)));
00061     return 31 - ul;
00062 }
00063 
00064 /* One of the silly thing of 32 bits PPCs, no 64 bits result for 32 bits mul. */
00065 static inline unsigned long long rtai_ullmul(unsigned long m0, unsigned long m1)
00066 {
00067     unsigned long long res;
00068     __asm__ __volatile__ ("mulhwu %0, %1, %2"
00069                    : "=r" (((unsigned long *)(void *)&res)[0]) 
00070                    : "%r" (m0), "r" (m1));
00071     ((unsigned long *)(void *)&res)[1] = m0*m1;
00072     return res;
00073 }
00074 
00075 /* One of the silly thing of 32 bits PPCs, no 64 by 32 bits divide. */
00076 static inline unsigned long long rtai_ulldiv(unsigned long long ull, unsigned long uld, unsigned long *r)
00077 {
00078     unsigned long long q, rf;
00079     unsigned long qh, rh, ql, qf;
00080     
00081     q = 0;
00082     rf = (unsigned long long)(0xFFFFFFFF - (qf = 0xFFFFFFFF / uld) * uld) + 1ULL;
00083     while (ull >= uld) {
00084         ((unsigned long *)(void *)&q)[0] += (qh = ((unsigned long *)(void *)&ull)[0] / uld);
00085         rh = ((unsigned long *)(void *)&ull)[0] - qh * uld;
00086         q += rh * (unsigned long long)qf + (ql = ((unsigned long *)(void *)&ull)[1] / uld);
00087         ull = rh * rf + (((unsigned long *)(void *)&ull)[1] - ql * uld);
00088     }
00089     *r = ull;
00090     return q;
00091 }
00092 
00093 static inline int rtai_imuldiv(int i, int mult, int div)
00094 {
00095     /* Returns (int)i = (int)i*(int)(mult)/(int)div. */
00096     unsigned long q, r;
00097     q = rtai_ulldiv(rtai_ullmul(i, mult), div, &r);
00098     return (r + r) > div ? q + 1 : q;
00099 }
00100 
00101 static inline unsigned long long rtai_llimd(unsigned long long ull, unsigned long mult, unsigned long div)
00102 {
00103     /* Returns (long long)ll = (int)ll*(int)(mult)/(int)div. */
00104     unsigned long long low;
00105     unsigned long q, r;
00106     
00107     low  = rtai_ullmul(((unsigned long *)(void *)&ull)[1], mult);   
00108     q = rtai_ulldiv(rtai_ullmul(((unsigned long *)(void *)&ull)[0], mult) + ((unsigned long *)(void *)&low)[0], div, (unsigned long *)(void *)&low);
00109     low = rtai_ulldiv(low, div, &r);
00110     ((unsigned long *)(void *)&low)[0] += q;
00111     
00112     return (r + r) > div ? low + 1 : low;
00113 }
00114 
00115 
00116 //---------------------------------------------------------------------------//
00117 //                     Synchronization primitives                            //
00118 //---------------------------------------------------------------------------//
00119 
00120 #if defined(__KERNEL__) && !defined(__cplusplus)
00121 #include <linux/sched.h>
00122 #include <linux/interrupt.h>
00123 
00124 #include <asm/system.h>
00125 #include <asm/io.h>
00126 #include <asm/time.h>
00127 
00128 #include <asm/rtai_atomic.h>
00129 #include <asm/rtai_fpu.h>
00130 #include <rtai_trace.h>
00131 
00132 struct rtai_realtime_irq_s {
00133         int (*handler)(unsigned irq, void *cookie);
00134         void *cookie;
00135         int retmode;
00136         int cpumask;
00137         int (*irq_ack)(unsigned int, void *);
00138 };
00139 
00140 #define RTAI_DOMAIN_ID  0x52544149
00141 #define RTAI_NR_TRAPS   HAL_NR_FAULTS
00142 #define RTAI_NR_SRQS    32
00143 
00144 #define RTAI_TIMER_DECR_IRQ       IPIPE_VIRQ_BASE
00145 #define RTAI_TIMER_8254_IRQ       RTAI_TIMER_DECR_IRQ
00146 #define RTAI_FREQ_DECR            (rtai_tunables.cpu_freq)
00147 #define RTAI_FREQ_8254            (rtai_tunables.cpu_freq)
00148 #define RTAI_LATENCY_8254         CONFIG_RTAI_SCHED_8254_LATENCY
00149 #define RTAI_SETUP_TIME_8254      500
00150 
00151 #define RTAI_TIME_LIMIT           0x7000000000000000LL
00152 
00153 #define RTAI_IFLAG  15
00154 
00155 #define rtai_cpuid()      hal_processor_id()
00156 #define rtai_tskext(idx)  hal_tskext[idx]
00157 
00158 /* Use these to grant atomic protection when accessing the hardware */
00159 #define rtai_hw_cli()                  hal_hw_cli()
00160 #define rtai_hw_sti()                  hal_hw_sti()
00161 #define rtai_hw_save_flags_and_cli(x)  hal_hw_local_irq_save(x)
00162 #define rtai_hw_restore_flags(x)       hal_hw_local_irq_restore(x)
00163 #define rtai_hw_save_flags(x)          hal_hw_local_irq_flags(x)
00164 
00165 /* Use these to grant atomic protection in hard real time code */
00166 #define rtai_cli()                  hal_hw_cli()
00167 #define rtai_sti()                  hal_hw_sti()
00168 #define rtai_save_flags_and_cli(x)  hal_hw_local_irq_save(x)
00169 #define rtai_restore_flags(x)       hal_hw_local_irq_restore(x)
00170 #define rtai_save_flags(x)          hal_hw_local_irq_flags(x)
00171 
00172 static inline struct hal_domain_struct *get_domain_pointer(int n)
00173 {
00174     struct list_head *p = hal_pipeline.next;
00175     struct hal_domain_struct *d;
00176     unsigned long i = 0;
00177     while (p != &hal_pipeline) {
00178         d = list_entry(p, struct hal_domain_struct, p_link);
00179         if (++i == n) {
00180             return d;
00181         }
00182         p = d->p_link.next;
00183     }
00184     return (struct hal_domain_struct *)i;
00185 }
00186 
00187 #define RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU  KERNEL_VERSION(2,6,20)
00188 
00189 #if LINUX_VERSION_CODE < RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU
00190 
00191 #define ROOT_STATUS_ADR(cpuid)  (ipipe_root_status[cpuid])
00192 #define ROOT_STATUS_VAL(cpuid)  (*ipipe_root_status[cpuid])
00193 
00194 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00195 do { \
00196     hal_irq_hits_pp(irq, domain, cpuid); \
00197     if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00198         __set_bit((irq) & IPIPE_IRQ_IMASK, &(domain)->cpudata[cpuid].irq_pending_lo[(irq) >> IPIPE_IRQ_ISHIFT]); \
00199         __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &(domain)->cpudata[cpuid].irq_pending_hi); \
00200     } \
00201 } while (0)
00202 
00203 #define hal_fast_flush_pipeline(cpuid) \
00204 do { \
00205     if (hal_root_domain->cpudata[cpuid].irq_pending_hi != 0) { \
00206         rtai_cli(); \
00207         hal_sync_stage(IPIPE_IRQMASK_ANY); \
00208     } \
00209 } while (0)
00210 
00211 #else
00212 
00213 #define ROOT_STATUS_ADR(cpuid)  (&ipipe_cpudom_var(hal_root_domain, status))
00214 #define ROOT_STATUS_VAL(cpuid)  (ipipe_cpudom_var(hal_root_domain, status))
00215 
00216 #define hal_pend_domain_uncond(irq, domain, cpuid) \
00217 do { \
00218     if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
00219         __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqpend_lomask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00220         __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &ipipe_cpudom_var(domain, irqpend_himask)); \
00221     } else { \
00222         __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqheld_mask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
00223     } \
00224     ipipe_cpudom_var(domain, irqall)[irq]++; \
00225 } while (0)
00226 
00227 #define hal_fast_flush_pipeline(cpuid) \
00228 do { \
00229     if (ipipe_cpudom_var(hal_root_domain, irqpend_himask) != 0) { \
00230         rtai_cli(); \
00231         hal_sync_stage(IPIPE_IRQMASK_ANY); \
00232     } \
00233 } while (0)
00234 
00235 #endif
00236 
00237 #define hal_pend_uncond(irq, cpuid)  hal_pend_domain_uncond(irq, hal_root_domain, cpuid)
00238 
00239 extern volatile unsigned long *ipipe_root_status[];
00240 
00241 #define hal_test_and_fast_flush_pipeline(cpuid) \
00242 do { \
00243     if (!test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { \
00244         hal_fast_flush_pipeline(cpuid); \
00245         rtai_sti(); \
00246     } \
00247 } while (0)
00248 
00249 #ifdef CONFIG_PREEMPT
00250 #define rtai_save_and_lock_preempt_count() \
00251         do { int *prcntp, prcnt; prcnt = xchg(prcntp = &preempt_count(), 1);
00252 #define rtai_restore_preempt_count() \
00253              *prcntp = prcnt; } while (0)
00254 #else
00255 #define rtai_save_and_lock_preempt_count();
00256 #define rtai_restore_preempt_count();
00257 #endif
00258 
00259 typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie);
00260 
00261 #define RTAI_CALIBRATED_CPU_FREQ   0
00262 #define RTAI_CPU_FREQ              (rtai_tunables.cpu_freq)
00263 
00264 struct calibration_data {
00265     unsigned long cpu_freq;
00266     unsigned long apic_freq;
00267     int latency;
00268     int setup_time_TIMER_CPUNIT;
00269     int setup_time_TIMER_UNIT;
00270     int timers_tol[RTAI_NR_CPUS];
00271 };
00272 
00273 extern struct rt_times rt_times;
00274 extern struct rt_times rt_smp_times[RTAI_NR_CPUS];
00275 extern struct calibration_data rtai_tunables;
00276 extern volatile unsigned long rtai_cpu_realtime;
00277 extern volatile unsigned long rtai_cpu_lock[];
00278 
00279 #define SET_TASKPRI(cpuid)
00280 #define CLR_TASKPRI(cpuid)
00281 
00282 extern struct rtai_switch_data {
00283     volatile unsigned long sflags;
00284     volatile unsigned long lflags;
00285 } rtai_linux_context[RTAI_NR_CPUS];
00286 
00287 static inline unsigned long rtai_save_flags_irqbit(void)
00288 {
00289     unsigned long flags;
00290     rtai_save_flags(flags);
00291     return flags & (1 << RTAI_IFLAG);
00292 }
00293 
00294 static inline unsigned long rtai_save_flags_irqbit_and_cli(void)
00295 {
00296     unsigned long flags;
00297     rtai_save_flags_and_cli(flags);
00298     return flags & (1 << RTAI_IFLAG);
00299 }
00300 
00301 #ifdef CONFIG_SMP
00302 
00303 #define SCHED_VECTOR  RTAI_SMP_NOTIFY_VECTOR
00304 #define SCHED_IPI     RTAI_SMP_NOTIFY_IPI
00305 
00306 #define _send_sched_ipi(dest)  do { mb(); mpic_send_ipi(0x2, dest); } while (0)
00307 
00308 #ifdef CONFIG_PREEMPT
00309 #define rt_spin_lock(lock)    do { barrier(); _raw_spin_lock(lock); barrier(); } while (0)
00310 #define rt_spin_unlock(lock)  do { barrier(); _raw_spin_unlock(lock); barrier(); } while (0)
00311 #else /* !CONFIG_PREEMPT */
00312 #define rt_spin_lock(lock)    spin_lock(lock)
00313 #define rt_spin_unlock(lock)  spin_unlock(lock)
00314 #endif /* CONFIG_PREEMPT */
00315 
00316 static inline void rt_spin_lock_hw_irq(spinlock_t *lock)
00317 {
00318     rtai_hw_cli();
00319     rt_spin_lock(lock);
00320 }
00321 
00322 static inline void rt_spin_unlock_hw_irq(spinlock_t *lock)
00323 {
00324     rt_spin_unlock(lock);
00325     rtai_hw_sti();
00326 }
00327 
00328 static inline unsigned long rt_spin_lock_hw_irqsave(spinlock_t *lock)
00329 {
00330     unsigned long flags;
00331     rtai_hw_save_flags_and_cli(flags);
00332     rt_spin_lock(lock);
00333     return flags;
00334 }
00335 
00336 static inline void rt_spin_unlock_hw_irqrestore(unsigned long flags, spinlock_t *lock)
00337 {
00338     rt_spin_unlock(lock);
00339     rtai_hw_restore_flags(flags);
00340 }
00341 
00342 static inline void rt_spin_lock_irq(spinlock_t *lock)
00343 {
00344     rtai_cli();
00345     rt_spin_lock(lock);
00346 }
00347 
00348 static inline void rt_spin_unlock_irq(spinlock_t *lock)
00349 {
00350     rt_spin_unlock(lock);
00351     rtai_sti();
00352 }
00353 
00354 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock)
00355 {
00356     unsigned long flags;
00357     rtai_save_flags_and_cli(flags);
00358     rt_spin_lock(lock);
00359     return flags;
00360 }
00361 
00362 static inline void rt_spin_unlock_irqrestore(unsigned long flags, spinlock_t *lock)
00363 {
00364     rt_spin_unlock(lock);
00365     rtai_local_irq_restore(flags);
00366 }
00367 
00368 #if RTAI_NR_CPUS > 0
00369 
00370 static inline void rtai_spin_glock(volatile unsigned long *lock)
00371 {
00372     unsigned long val, owner;
00373 #if 0
00374     do {
00375         val = lock[1];
00376     } while (cmpxchg(&lock[1], val, (val + 0x10000) & 0x7FFF7FFF) != val);
00377 #else
00378     val = atomic_add_return(0x10000, (atomic_t *)&lock[1]) - 0x10000;
00379 #endif
00380     if ((owner = (val & 0x7FFF0000) >> 16) != (val & 0x7FFF)) {
00381         while ((lock[1] & 0x7FFF) != owner) {
00382              cpu_relax();
00383         }
00384     }
00385 }
00386 
00387 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00388 {
00389     unsigned long val;
00390     do {
00391         val = lock[1];
00392         cpu_relax();
00393     } while (cmpxchg(&lock[1], val, (val + 1) & 0x7FFF7FFF) != val);
00394 }
00395 
00396 #else
00397 
00398 static inline void rtai_spin_glock(volatile unsigned long *lock)
00399 {
00400     while (test_and_set_bit(31, lock)) {
00401         cpu_relax();
00402     }
00403     barrier();
00404 }
00405 
00406 static inline void rtai_spin_gunlock(volatile unsigned long *lock)
00407 {
00408     test_and_clear_bit(31, lock);
00409     cpu_relax();
00410 }
00411 
00412 #endif
00413 
00414 static inline void rt_get_global_lock(void)
00415 {
00416         barrier();
00417         rtai_cli();
00418         if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00419         rtai_spin_glock(&rtai_cpu_lock[0]);
00420         }
00421         barrier();
00422 }
00423 
00424 static inline void rt_release_global_lock(void)
00425 {
00426         barrier();
00427         rtai_cli();
00428         if (test_and_clear_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00429         rtai_spin_gunlock(&rtai_cpu_lock[0]);
00430         }
00431         barrier();
00432 }
00433 
00434 /**
00435  * Disable interrupts across all CPUs
00436  *
00437  * rt_global_cli hard disables interrupts (cli) on the requesting CPU and
00438  * acquires the global spinlock to the calling CPU so that any other CPU
00439  * synchronized by this method is blocked. Nested calls to rt_global_cli within
00440  * the owner CPU will not cause a deadlock on the global spinlock, as it would
00441  * happen for a normal spinlock.
00442  *
00443  * rt_global_sti hard enables interrupts (sti) on the calling CPU and releases
00444  * the global lock.
00445  */
00446 static inline void rt_global_cli(void)
00447 {
00448     rt_get_global_lock();
00449 }
00450 
00451 /**
00452  * Enable interrupts across all CPUs
00453  *
00454  * rt_global_sti hard enables interrupts (sti) on the calling CPU and releases
00455  * the global lock.
00456  */
00457 static inline void rt_global_sti(void)
00458 {
00459     rt_release_global_lock();
00460     rtai_sti();
00461 }
00462 
00463 /**
00464  * Save CPU flags
00465  *
00466  * rt_global_save_flags_and_cli combines rt_global_save_flags() and
00467  * rt_global_cli().
00468  */
00469 static inline int rt_global_save_flags_and_cli(void)
00470 {
00471         unsigned long flags;
00472 
00473         barrier();
00474         flags = rtai_save_flags_irqbit_and_cli();
00475         if (!test_and_set_bit(hal_processor_id(), &rtai_cpu_lock[0])) {
00476         rtai_spin_glock(&rtai_cpu_lock[0]);
00477                 barrier();
00478                 return flags | 1;
00479         }
00480         barrier();
00481         return flags;
00482 }
00483 
00484 /**
00485  * Save CPU flags
00486  *
00487  * rt_global_save_flags saves the CPU interrupt flag (IF) bit 9 of @a flags and
00488  * ORs the global lock flag in the first 8 bits of flags. From that you can
00489  * rightly infer that RTAI does not support more than 8 CPUs.
00490  */
00491 static inline void rt_global_save_flags(unsigned long *flags)
00492 {
00493         unsigned long hflags = rtai_save_flags_irqbit_and_cli();
00494 
00495         *flags = test_bit(hal_processor_id(), &rtai_cpu_lock[0]) ? hflags : hflags | 1;
00496         if (hflags) {
00497                 rtai_sti();
00498         }
00499 }
00500 
00501 /**
00502  * Restore CPU flags
00503  *
00504  * rt_global_restore_flags restores the CPU hard interrupt flag (IF)
00505  * and the state of the global inter-CPU lock, according to the state
00506  * given by flags.
00507  */
00508 static inline void rt_global_restore_flags(unsigned long flags)
00509 {
00510         barrier();
00511     if (test_and_clear_bit(0, &flags)) {
00512         rt_release_global_lock();
00513     } else {
00514         rt_get_global_lock();
00515     }
00516     if (flags) {
00517         rtai_sti();
00518     }
00519         barrier();
00520 }
00521 
00522 #else /* !CONFIG_SMP */
00523 
00524 #define _send_sched_ipi(dest)
00525 
00526 #define rt_spin_lock(lock)
00527 #define rt_spin_unlock(lock)
00528 
00529 #define rt_spin_lock_irq(lock)    do { rtai_cli(); } while (0)
00530 #define rt_spin_unlock_irq(lock)  do { rtai_sti(); } while (0)
00531 
00532 static inline unsigned long rt_spin_lock_irqsave(spinlock_t *lock)
00533 {
00534         unsigned long flags;
00535         rtai_save_flags_and_cli(flags);
00536         return flags;
00537 }
00538 #define rt_spin_unlock_irqrestore(flags, lock)  do { rtai_restore_flags(flags); } while (0)
00539 
00540 #define rt_get_global_lock()      do { rtai_cli(); } while (0)
00541 #define rt_release_global_lock()
00542 
00543 #define rt_global_cli()  do { rtai_cli(); } while (0)
00544 #define rt_global_sti()  do { rtai_sti(); } while (0)
00545 
00546 static inline unsigned long rt_global_save_flags_and_cli(void)
00547 {
00548         unsigned long flags;
00549         rtai_save_flags_and_cli(flags);
00550         return flags;
00551 }
00552 #define rt_global_restore_flags(flags)  do { rtai_restore_flags(flags); } while (0)
00553 
00554 #define rt_global_save_flags(flags)     do { rtai_save_flags(*flags); } while (0)
00555 
00556 #endif /* CONFIG_SMP */
00557 
00558 
00559 //---------------------------------------------------------------------------//
00560 //                     domain switching routines                             //
00561 //---------------------------------------------------------------------------//
00562 
00563 extern struct hal_domain_struct rtai_domain;
00564 
00565 #define _rt_switch_to_real_time(cpuid) \
00566 do { \
00567     rtai_linux_context[cpuid].lflags = xchg((unsigned long *)ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); \
00568     rtai_linux_context[cpuid].sflags = 1; \
00569     hal_current_domain(cpuid) = &rtai_domain; \
00570 } while (0)
00571 
00572 #define rt_switch_to_linux(cpuid) \
00573 do { \
00574     if (rtai_linux_context[cpuid].sflags) { \
00575         hal_current_domain(cpuid) = hal_root_domain; \
00576         ROOT_STATUS_VAL(cpuid) = rtai_linux_context[cpuid].lflags; \
00577         rtai_linux_context[cpuid].sflags = 0; \
00578         CLR_TASKPRI(cpuid); \
00579     } \
00580 } while (0)
00581 
00582 #define rt_switch_to_real_time(cpuid) \
00583 do { \
00584     if (!rtai_linux_context[cpuid].sflags) { \
00585         _rt_switch_to_real_time(cpuid); \
00586     } \
00587 } while (0)
00588 
00589 #define rtai_get_intr_handler(v)
00590 #define rtai_init_taskpri_irqs()
00591 
00592 static inline int rt_save_switch_to_real_time(int cpuid)
00593 {
00594     SET_TASKPRI(cpuid);
00595     if (!rtai_linux_context[cpuid].sflags) {
00596         _rt_switch_to_real_time(cpuid);
00597         return 0;
00598     } 
00599     return 1;
00600 }
00601 
00602 #define rt_restore_switch_to_linux(sflags, cpuid) \
00603 do { \
00604     if (!sflags) { \
00605         rt_switch_to_linux(cpuid); \
00606     } else if (!rtai_linux_context[cpuid].sflags) { \
00607         SET_TASKPRI(cpuid); \
00608         _rt_switch_to_real_time(cpuid); \
00609     } \
00610 } while (0)
00611 
00612 #define in_hrt_mode(cpuid)  (rtai_linux_context[cpuid].sflags)
00613 
00614 //---------------------------------------------------------------------------//
00615 //                        Timer helper functions                             //
00616 //---------------------------------------------------------------------------//
00617 
00618 static inline unsigned long long rtai_rdtsc (void)
00619 {
00620     unsigned long long ts;
00621     unsigned long chk;
00622     /* See Motorola reference manual for 32 bits PPCs. */
00623     __asm__ __volatile__ ("1: mftbu %0\n"
00624                   "   mftb %1\n"
00625                   "   mftbu %2\n"
00626                   "   cmpw %2,%0\n"
00627                   "   bne 1b\n"
00628                   : "=r" (((unsigned long *)&ts)[0]), 
00629                     "=r" (((unsigned long *)&ts)[1]), 
00630                     "=r" (chk));
00631     return ts;
00632 }
00633 
00634 static inline void rt_set_timer_delay (int delay)
00635 {
00636     /* NOTE: delay MUST be 0 if a periodic timer is being used. */
00637     if (delay == 0) {
00638 #ifdef CONFIG_40x
00639         return;
00640 #else  /* !CONFIG_40x */
00641         while ((delay = rt_times.intr_time - rtai_rdtsc()) <= 0) {
00642             rt_times.intr_time += rt_times.periodic_tick;
00643         }
00644 #endif /* CONFIG_40x */
00645     }
00646 #ifdef CONFIG_40x
00647     mtspr(SPRN_PIT, delay);
00648 #else /* !CONFIG_40x */
00649     set_dec(delay);
00650 #endif /* CONFIG_40x */
00651 }
00652 
00653 static inline void rtai_disarm_decr(int cpuid, int mode)
00654 {
00655 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
00656     per_cpu(disarm_decr, cpuid) = mode;
00657 #else
00658     disarm_decr[cpuid] = mode;
00659 #endif
00660 }
00661 
00662 //---------------------------------------------------------------------------//
00663 //                     Private interface -- internal use only                //
00664 //---------------------------------------------------------------------------//
00665 
00666 unsigned long rtai_critical_enter(void (*synch)(void));
00667 void rtai_critical_exit(unsigned long flags);
00668 
00669 int rtai_calibrate_8254(void);
00670 void rtai_set_linux_task_priority(struct task_struct *task, int policy, int prio);
00671 long rtai_catch_event (struct hal_domain_struct *ipd, unsigned long event, int (*handler)(unsigned long, void *));
00672 
00673 #endif /* __KERNEL__ && !__cplusplus */
00674 
00675 
00676 //---------------------------------------------------------------------------//
00677 //                           Public interface                                //
00678 //---------------------------------------------------------------------------//
00679 
00680 struct apic_timer_setup_data {
00681     int mode;
00682     int count;
00683 };
00684 
00685 #ifdef __KERNEL__
00686 
00687 #include <linux/kernel.h>
00688  
00689 #define rtai_print_to_screen rt_printk
00690 
00691 void *ll2a(long long ll, char *s);
00692 
00693 #ifdef __cplusplus
00694  extern "C" {
00695 #endif /* __cplusplus */
00696 
00697 /*
00698  * rt irq request/free modify functions
00699  */
00700 int rt_request_irq(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode);
00701 int rt_release_irq(unsigned irq);
00702 int rt_set_irq_ack(unsigned irq, int (*irq_ack)(unsigned int));
00703 void rt_set_irq_cookie(unsigned irq, void *cookie);
00704 void rt_set_irq_retmode(unsigned irq, int retmode);
00705 
00706 static inline int rt_request_irq_wack(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode, int (*irq_ack)(unsigned int))
00707 {
00708     int retval;
00709     if ((retval = rt_request_irq(irq, handler, cookie, retmode)) < 0) {
00710         return retval;
00711     }
00712     return rt_set_irq_ack(irq, irq_ack);
00713 }
00714 
00715 
00716 /*
00717  * irq/PIC management functions.
00718  */
00719 
00720 unsigned rt_startup_irq(unsigned irq);
00721 void rt_shutdown_irq(unsigned irq);
00722 
00723 void rt_enable_irq(unsigned irq);
00724 void rt_disable_irq(unsigned irq);
00725 void rt_mask_and_ack_irq(unsigned irq);
00726 void rt_unmask_irq(unsigned irq);
00727 void rt_ack_irq(unsigned irq);
00728 void rt_end_irq(unsigned irq);
00729 
00730 
00731 /*
00732  * Linux related irq function
00733  */
00734 
00735 int rt_request_linux_irq(unsigned irq, void *handler, char *name, void *dev_id);
00736 int rt_free_linux_irq(unsigned irq, void *dev_id);
00737 void rt_pend_linux_irq(unsigned irq);
00738 RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq(unsigned irq);
00739 void rtai_set_linux_task_priority(struct task_struct *task, int policy, int prio);
00740 
00741 /*
00742  * srq related function
00743  */
00744 
00745 void rt_pend_linux_srq(unsigned srq);
00746 int rt_request_srq (unsigned label, void (*k_handler)(void), long long (*u_handler)(unsigned long));
00747 int rt_free_srq(unsigned srq);
00748 
00749 int rt_assign_irq_to_cpu(int irq, unsigned long cpus_mask);
00750 int rt_reset_irq_to_sym_mode(int irq);
00751 void rt_request_timer_cpuid(void (*handler)(void), unsigned tick, int cpuid);
00752 
00753 /*
00754  * timer func
00755  */
00756 
00757 int rt_request_timer(void (*handler)(void), unsigned tick, int use_apic);
00758 void rt_free_timer(void);
00759 
00760 void rt_request_rtc(long rtc_freq, void *handler);
00761 void rt_release_rtc(void);
00762 
00763 /*
00764  * setting trap/hook handler function
00765  */
00766 
00767 RT_TRAP_HANDLER rt_set_trap_handler(RT_TRAP_HANDLER handler);
00768 void (*rt_set_ihook(void (*hookfn)(int)))(int);
00769 
00770 RTIME rd_8254_ts(void);
00771 void rt_setup_8254_tsc(void);
00772 
00773 /*
00774  * real time printk
00775  */
00776 
00777 int rt_printk(const char *format, ...);
00778 int rt_sync_printk(const char *format, ...);
00779 
00780 #ifdef __cplusplus
00781  }
00782 #endif /* __cplusplus */
00783 
00784 #endif /* __KERNEL__ */
00785 
00786 #include <asm/rtai_oldnames.h>
00787 
00788 #define RTAI_DEFAULT_TICK    100000
00789 
00790 #ifdef CONFIG_RTAI_TRACE
00791 #define RTAI_DEFAULT_STACKSZ 8192
00792 #else /* !CONFIG_RTAI_TRACE */
00793 #define RTAI_DEFAULT_STACKSZ 4092
00794 #endif /* CONFIG_RTAI_TRACE */
00795 
00796 #endif /* _RTAI_ASM_PPC_HAL_H */
00797 
00798 #ifndef _RTAI_HAL_XN_H
00799 #define _RTAI_HAL_XN_H
00800 
00801 #define __range_ok(addr, size) (__range_not_ok(addr,size) == 0)
00802 
00803 #define NON_RTAI_SCHEDULE(cpuid)  do { schedule(); } while (0)
00804 
00805 #endif /* !_RTAI_HAL_XN_H */
00806 
00807 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00808 
00809 #ifndef _ASM_GENERIC_DIV64_H
00810 #define _ASM_GENERIC_DIV64_H
00811 /*
00812  * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
00813  * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
00814  *
00815  * The semantics of do_div() are:
00816  *
00817  * uint32_t do_div(uint64_t *n, uint32_t base)
00818  * {
00819  *  uint32_t remainder = *n % base;
00820  *  *n = *n / base;
00821  *  return remainder;
00822  * }
00823  *
00824  * NOTE: macro parameter n is evaluated multiple times,
00825  *       beware of side effects!
00826  */
00827 
00828 //#include <linux/types.h>
00829 //#include <linux/compiler.h>
00830 
00831 #if BITS_PER_LONG == 64
00832 
00833 # define do_div(n,base) ({                  \
00834     uint32_t __base = (base);               \
00835     uint32_t __rem;                     \
00836     __rem = ((uint64_t)(n)) % __base;           \
00837     (n) = ((uint64_t)(n)) / __base;             \
00838     __rem;                          \
00839  })
00840 
00841 #elif BITS_PER_LONG == 32
00842 
00843 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
00844 
00845 /* The unnecessary pointer compare is there
00846  * to check for type safety (n must be 64bit)
00847  */
00848 # define do_div(n,base) ({              \
00849     uint32_t __base = (base);           \
00850     uint32_t __rem;                 \
00851     (void)(((typeof((n)) *)0) == ((uint64_t *)0));  \
00852     if (likely(((n) >> 32) == 0)) {         \
00853         __rem = (uint32_t)(n) % __base;     \
00854         (n) = (uint32_t)(n) / __base;       \
00855     } else                      \
00856         __rem = __div64_32(&(n), __base);   \
00857     __rem;                      \
00858  })
00859 
00860 #endif /* BITS_PER_LONG */
00861 
00862 #endif /* _ASM_GENERIC_DIV64_H */
00863 
00864 #endif

Generated on Tue Feb 2 17:46:04 2010 for RTAI API by  doxygen 1.4.7