base/include/rtai_schedcore.h

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 1999-2008 Paolo Mantegazza <mantegazza@aero.polimi.it>
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License as
00006  * published by the Free Software Foundation; either version 2 of the
00007  * License, or (at your option) any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful,
00010  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00012  * GNU General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software
00016  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00017  */
00018 
00019 
00020 #ifndef _RTAI_SCHEDCORE_H
00021 #define _RTAI_SCHEDCORE_H
00022 
00023 #include <rtai_version.h>
00024 #include <rtai_lxrt.h>
00025 #include <rtai_sched.h>
00026 #include <rtai_malloc.h>
00027 #include <rtai_trace.h>
00028 #include <rtai_leds.h>
00029 #include <rtai_sem.h>
00030 #include <rtai_rwl.h>
00031 #include <rtai_spl.h>
00032 #include <rtai_scb.h>
00033 #include <rtai_mbx.h>
00034 #include <rtai_msg.h>
00035 #include <rtai_tbx.h>
00036 #include <rtai_mq.h>
00037 #include <rtai_bits.h>
00038 #include <rtai_wd.h>
00039 #include <rtai_tasklets.h>
00040 #include <rtai_fifos.h>
00041 #include <rtai_netrpc.h>
00042 #include <rtai_shm.h>
00043 #include <rtai_usi.h>
00044 
00045 #ifdef __KERNEL__
00046 
00047 #include <linux/module.h>
00048 #include <linux/init.h>
00049 #include <linux/kernel.h>
00050 #include <linux/version.h>
00051 #include <linux/errno.h>
00052 #include <linux/slab.h>
00053 #include <linux/timex.h>
00054 #include <linux/sched.h>
00055 #include <asm/param.h>
00056 #include <asm/system.h>
00057 #include <asm/io.h>
00058 
00059 
00060 #ifndef _RTAI_SCHED_XN_H
00061 #define _RTAI_SCHED_XN_H
00062 
00063 #if defined(CONFIG_RTAI_IMMEDIATE_LINUX_SYSCALL) && CONFIG_RTAI_IMMEDIATE_LINUX_SYSCALL
00064 
00065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00066 #define SKIP_IMMEDIATE_LINUX_SYSCALL() \
00067     if (regs->LINUX_SYSCALL_NR == __NR_kill || regs->LINUX_SYSCALL_NR == __NR_rt_sigsuspend) { return 0; }
00068 #else
00069 #define SKIP_IMMEDIATE_LINUX_SYSCALL()
00070 #endif
00071 
00072 #else
00073 
00074 #define SKIP_IMMEDIATE_LINUX_SYSCALL()  do { return 0; } while (0)
00075 
00076 #endif
00077 
00078 #ifdef RTAI_TRIOSS
00079 
00080 extern int nkgkptd;
00081 #define FUSIONEXT  (nkgkptd)
00082 
00083 // provisional, to be replaced by appropriate headers declarations
00084 #define XNSUSP   (0x00000001)
00085 #define XNRELAX  (0x00000100)
00086 
00087 typedef struct xnarchtcb {
00088     union i387_union fpuenv __attribute__ ((aligned (16)));
00089     unsigned stacksize;
00090     unsigned long *stackbase;
00091     unsigned long esp; 
00092     unsigned long eip;
00093     struct task_struct *user_task;
00094     struct task_struct *active_task;
00095     unsigned long *espp; 
00096     unsigned long *eipp;
00097     union i387_union *fpup; 
00098 } xnarchtcb_t;
00099 
00100 typedef struct xnthread { xnarchtcb_t tcb; unsigned long status; } xnthread_t;
00101 
00102 extern void xnpod_resume_thread(void *, unsigned long);
00103 extern void xnpod_schedule(void);
00104 // end of provisional
00105 
00106 extern struct hal_domain_struct *fusion_domain;
00107 
00108 extern struct klist_t fusion_wake_up_srq[];
00109 
00110 #define NON_RTAI_TASK_SUSPEND(task) \
00111 do { \
00112     xnthread_t *thread; \
00113     if ((thread = (task->lnxtsk)->rtai_tskext(FUSIONEXT)) && !(thread->status & XNRELAX)) { \
00114         atomic_set_mask(XNSUSP, (atomic_t *)&thread->status); \
00115     } else { \
00116         (task->lnxtsk)->state = TASK_SOFTREALTIME; \
00117     } \
00118 } while (0)
00119 
00120 #define pend_fusion_wake_up_srq(lnxtsk, cpuid) \
00121 do { \
00122     fusion_wake_up_srq[cpuid].task[fusion_wake_up_srq[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00123     hal_pend_domain_uncond(fusion_wake_up_srq[cpuid].srq, fusion_domain, cpuid); \
00124 } while (0)
00125 
00126 #define NON_RTAI_TASK_RESUME(ready_task) \
00127 do { \
00128     xnthread_t *thread; \
00129     if ((thread = (ready_task->lnxtsk)->rtai_tskext(FUSIONEXT)) && !(thread->status & XNRELAX)) { \
00130         pend_fusion_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); \
00131     } else { \
00132                 pend_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); \
00133     } \
00134 } while (0)
00135 
00136 #define DECLARE_FUSION_WAKE_UP_STUFF \
00137 struct klist_t fusion_wake_up_srq[MAX_WAKEUP_SRQ]; \
00138 static void fusion_wake_up_srq_handler(unsigned srq) \
00139 { \
00140     int cpuid = srq - fusion_wake_up_srq[0].srq; \
00141     while (fusion_wake_up_srq[cpuid].out != fusion_wake_up_srq[cpuid].in) { \
00142         xnpod_resume_thread(((struct task_struct *)fusion_wake_up_srq[cpuid].task[fusion_wake_up_srq[cpuid].out++ & (MAX_WAKEUP_SRQ - 1)])->rtai_tskext(FUSIONEXT), XNSUSP); \
00143     } \
00144     xnpod_schedule(); \
00145 } \
00146 EXPORT_SYMBOL(fusion_wake_up_srq);
00147 
00148 #define REQUEST_RESUME_SRQs_STUFF() \
00149 do { \
00150     int cpuid; \
00151     for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00152             hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq = hal_alloc_irq(), wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00153             hal_virtualize_irq(fusion_domain, fusion_wake_up_srq[cpuid].srq = hal_alloc_irq(), fusion_wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00154     } \
00155 } while (0)
00156 
00157 #define RELEASE_RESUME_SRQs_STUFF() \
00158 do { \
00159     int cpuid; \
00160     for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00161         hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00162         hal_free_irq(wake_up_srq[cpuid].srq); \
00163         hal_virtualize_irq(fusion_domain, fusion_wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00164         hal_free_irq(fusion_wake_up_srq[cpuid].srq); \
00165     } \
00166 } while (0)
00167 
00168 #else /* !RTAI_TRIOSS */
00169 
00170 #define FUSIONEXT  (0)
00171 
00172 #define DECLARE_FUSION_WAKE_UP_STUFF
00173 
00174 #define NON_RTAI_TASK_SUSPEND(task) \
00175     do { (task->lnxtsk)->state = TASK_SOFTREALTIME; } while (0)
00176 
00177 #define NON_RTAI_TASK_RESUME(ready_task) \
00178     do { pend_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); } while (0)
00179 
00180 #define REQUEST_RESUME_SRQs_STUFF() \
00181 do { \
00182     int cpuid; \
00183     for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00184             hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq = hal_alloc_irq(), wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00185         if ( wake_up_srq[cpuid].srq != (wake_up_srq[0].srq + cpuid)) { \
00186             int i; \
00187             for (i = 0; i <= cpuid; i++) { \
00188                 hal_virtualize_irq(hal_root_domain, wake_up_srq[i].srq, NULL, NULL, 0); \
00189                 hal_free_irq(wake_up_srq[i].srq); \
00190             } \
00191             printk("*** NON CONSECUTIVE WAKE UP SRQs, ABORTING ***\n"); \
00192             return -1; \
00193         } \
00194     } \
00195 } while (0)
00196 
00197 #define RELEASE_RESUME_SRQs_STUFF() \
00198 do { \
00199     int cpuid; \
00200     for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00201         hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00202         hal_free_irq(wake_up_srq[cpuid].srq); \
00203     } \
00204 } while (0)
00205 
00206 #endif /* END RTAI_TRIOSS */
00207 
00208 #endif /* !_RTAI_SCHED_XN_H */
00209 
00210 
00211 extern RT_TASK rt_smp_linux_task[];
00212 
00213 extern RT_TASK *rt_smp_current[];
00214 
00215 extern RTIME rt_smp_time_h[];
00216 
00217 extern int rt_smp_oneshot_timer[];
00218 
00219 extern volatile int rt_sched_timed;
00220 
00221 #ifdef CONFIG_RTAI_MALLOC
00222 #ifdef CONFIG_RTAI_MALLOC_BUILTIN
00223 #define sched_mem_init() \
00224     { if(__rtai_heap_init() != 0) { \
00225                 return(-ENOMEM); \
00226         } }
00227 #define sched_mem_end()  __rtai_heap_exit()
00228 #else  /* CONFIG_RTAI_MALLOC_BUILTIN */
00229 #define sched_mem_init()
00230 #define sched_mem_end()
00231 #endif /* !CONFIG_RTAI_MALLOC_BUILTIN */
00232 #define call_exit_handlers(task)            __call_exit_handlers(task)
00233 #define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2)
00234 #else  /* !CONFIG_RTAI_MALLOC */
00235 #define sched_mem_init()
00236 #define sched_mem_end()
00237 #define call_exit_handlers(task)
00238 #define set_exit_handler(task, fun, arg1, arg2)
00239 #endif /* CONFIG_RTAI_MALLOC */
00240 
00241 #define SEMHLF 0x0000FFFF
00242 #define RPCHLF 0xFFFF0000
00243 #define RPCINC 0x00010000
00244 
00245 #define DECLARE_RT_CURRENT int cpuid; RT_TASK *rt_current
00246 #define ASSIGN_RT_CURRENT rt_current = rt_smp_current[cpuid = rtai_cpuid()]
00247 #define RT_CURRENT rt_smp_current[rtai_cpuid()]
00248 
00249 #define MAX_LINUX_RTPRIO  99
00250 #define MIN_LINUX_RTPRIO   1
00251 
00252 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00253 void rtai_handle_isched_lock(int nesting);
00254 #endif /* CONFIG_RTAI_SCHED_ISR_LOCK */
00255 
00256 #ifdef CONFIG_SMP
00257 #define rt_time_h (rt_smp_time_h[cpuid])
00258 #define oneshot_timer (rt_smp_oneshot_timer[cpuid])
00259 #define rt_linux_task (rt_smp_linux_task[cpuid])
00260 #else
00261 #define rt_time_h (rt_smp_time_h[0])
00262 #define oneshot_timer (rt_smp_oneshot_timer[0])
00263 #define rt_linux_task (rt_smp_linux_task[0])
00264 #endif
00265 
00266 /*
00267  * WATCH OUT for the max expected number of arguments of rtai funs and 
00268  * their scattered around different calling ways.
00269  */
00270 
00271 #define RTAI_MAX_FUN_ARGS  9
00272 struct fun_args { unsigned long a[RTAI_MAX_FUN_ARGS]; RTAI_SYSCALL_MODE long long (*fun)(unsigned long, ...); };
00273 //used in sys.c
00274 #define RTAI_FUN_ARGS  arg[0],arg[1],arg[2],arg[3],arg[4],arg[5],arg[6],arg[7],arg[RTAI_MAX_FUN_ARGS - 1]
00275 //used in sched.c and netrpc.c (generalised calls from soft threads)
00276 #define RTAI_FUNARGS   funarg->a[0],funarg->a[1],funarg->a[2],funarg->a[3],funarg->a[4],funarg->a[5],funarg->a[6],funarg->a[7],funarg->a[RTAI_MAX_FUN_ARGS - 1]
00277 //used in netrpc.c
00278 #define RTAI_FUN_A     a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7],a[RTAI_MAX_FUN_ARGS - 1]
00279 
00280 #ifdef CONFIG_SMP
00281 
00282 static inline void send_sched_ipi(unsigned long dest)
00283 {
00284     _send_sched_ipi(dest);
00285 }
00286 
00287 #define RT_SCHEDULE_MAP(schedmap) \
00288     do { if (schedmap) send_sched_ipi(schedmap); } while (0)
00289 
00290 #define RT_SCHEDULE_MAP_BOTH(schedmap) \
00291     do { if (schedmap) send_sched_ipi(schedmap); rt_schedule(); } while (0)
00292 
00293 #define RT_SCHEDULE(task, cpuid) \
00294     do { \
00295         if ((task)->runnable_on_cpus != (cpuid)) { \
00296             send_sched_ipi(1 << (task)->runnable_on_cpus); \
00297         } else { \
00298             rt_schedule(); \
00299         } \
00300     } while (0)
00301 
00302 #define RT_SCHEDULE_BOTH(task, cpuid) \
00303     { \
00304         if ((task)->runnable_on_cpus != (cpuid)) { \
00305             send_sched_ipi(1 << (task)->runnable_on_cpus); \
00306         } \
00307         rt_schedule(); \
00308     }
00309 
00310 #else /* !CONFIG_SMP */
00311 
00312 #define send_sched_ipi(dest)
00313 
00314 #define RT_SCHEDULE_MAP_BOTH(schedmap)  rt_schedule()
00315 
00316 #define RT_SCHEDULE_MAP(schedmap)       rt_schedule()
00317 
00318 #define RT_SCHEDULE(task, cpuid)        rt_schedule()
00319 
00320 #define RT_SCHEDULE_BOTH(task, cpuid)   rt_schedule()
00321 
00322 #endif /* CONFIG_SMP */
00323 
00324 #define BASE_SOFT_PRIORITY 1000000000
00325 
00326 #ifndef TASK_NOWAKEUP
00327 #define TASK_NOWAKEUP  TASK_UNINTERRUPTIBLE
00328 #endif
00329 
00330 #define TASK_HARDREALTIME  (TASK_INTERRUPTIBLE | TASK_NOWAKEUP)
00331 #define TASK_RTAISRVSLEEP  (TASK_INTERRUPTIBLE | TASK_NOWAKEUP)
00332 #define TASK_SOFTREALTIME  TASK_INTERRUPTIBLE
00333 
00334 static inline void enq_ready_edf_task(RT_TASK *ready_task)
00335 {
00336     RT_TASK *task;
00337 #ifdef CONFIG_SMP
00338     task = rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00339 #else
00340     task = rt_smp_linux_task[0].rnext;
00341 #endif
00342     while (task->policy < 0 && ready_task->period >= task->period) {
00343         task = task->rnext;
00344     }
00345     task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00346     ready_task->rnext = task;
00347 }
00348 
00349 struct epoch_struct { spinlock_t lock; volatile int touse; volatile RTIME time[2][2]; };
00350 
00351 #ifdef CONFIG_RTAI_CLOCK_REALTIME
00352 #define REALTIME2COUNT(rtime) \
00353     if (rtime > boot_epoch.time[boot_epoch.touse][0]) { \
00354         rtime -= boot_epoch.time[boot_epoch.touse][0]; \
00355     }
00356 #else 
00357 #define REALTIME2COUNT(rtime)
00358 #endif
00359 
00360 #define MAX_WAKEUP_SRQ (2 << 7)
00361 
00362 struct klist_t { int srq; volatile unsigned long in, out; void *task[MAX_WAKEUP_SRQ]; };
00363 extern struct klist_t wake_up_srq[];
00364 
00365 #define pend_wake_up_srq(lnxtsk, cpuid) \
00366 do { \
00367     wake_up_srq[cpuid].task[wake_up_srq[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00368     hal_pend_uncond(wake_up_srq[cpuid].srq, cpuid); \
00369 } while (0)
00370 
00371 static inline void enq_ready_task(RT_TASK *ready_task)
00372 {
00373     RT_TASK *task;
00374     if (ready_task->is_hard) {
00375 #ifdef CONFIG_SMP
00376         task = rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00377 #else
00378         task = rt_smp_linux_task[0].rnext;
00379 #endif
00380         while (ready_task->priority >= task->priority) {
00381             if ((task = task->rnext)->priority < 0) break;
00382         }
00383         task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task;
00384         ready_task->rnext = task;
00385     } else {
00386         ready_task->state |= RT_SCHED_SFTRDY;
00387         NON_RTAI_TASK_RESUME(ready_task);
00388     }
00389 }
00390 
00391 static inline int renq_ready_task(RT_TASK *ready_task, int priority)
00392 {
00393     int retval;
00394     if ((retval = ready_task->priority != priority)) {
00395         ready_task->priority = priority;
00396         if (ready_task->state == RT_SCHED_READY) {
00397             (ready_task->rprev)->rnext = ready_task->rnext;
00398             (ready_task->rnext)->rprev = ready_task->rprev;
00399             enq_ready_task(ready_task);
00400         }
00401     }
00402     return retval;
00403 }
00404 
00405 static inline void rem_ready_task(RT_TASK *task)
00406 {
00407     if (task->state == RT_SCHED_READY) {
00408         if (!task->is_hard) {
00409             NON_RTAI_TASK_SUSPEND(task);
00410         }
00411 //      task->unblocked = 0;
00412         (task->rprev)->rnext = task->rnext;
00413         (task->rnext)->rprev = task->rprev;
00414     }
00415 }
00416 
00417 static inline void rem_ready_current(RT_TASK *rt_current)
00418 {
00419     if (!rt_current->is_hard) {
00420         NON_RTAI_TASK_SUSPEND(rt_current);
00421     }
00422 //  rt_current->unblocked = 0;
00423     (rt_current->rprev)->rnext = rt_current->rnext;
00424     (rt_current->rnext)->rprev = rt_current->rprev;
00425 }
00426 
00427 #ifdef CONFIG_RTAI_LONG_TIMED_LIST
00428 
00429 /* BINARY TREE */
00430 static inline void enq_timed_task(RT_TASK *timed_task)
00431 {
00432     RT_TASK *taskh, *tsknxt, *task;
00433     rb_node_t **rbtn, *rbtpn = NULL;
00434 #ifdef CONFIG_SMP
00435     task = taskh = &rt_smp_linux_task[timed_task->runnable_on_cpus];
00436 #else
00437     task = taskh = &rt_smp_linux_task[0];
00438 #endif
00439     rbtn = &taskh->rbr.rb_node;
00440 
00441     while (*rbtn) {
00442         rbtpn = *rbtn;
00443         tsknxt = rb_entry(rbtpn, RT_TASK, rbn);
00444         if (timed_task->resume_time > tsknxt->resume_time) {
00445             rbtn = &(rbtpn)->rb_right;
00446         } else {
00447             rbtn = &(rbtpn)->rb_left;
00448             task = tsknxt;
00449         }
00450     }
00451     rb_link_node(&timed_task->rbn, rbtpn, rbtn);
00452     rb_insert_color(&timed_task->rbn, &taskh->rbr);
00453     task->tprev = (timed_task->tprev = task->tprev)->tnext = timed_task;
00454     timed_task->tnext = task;
00455 }
00456 
00457 #define rb_erase_task(task, cpuid) \
00458     rb_erase(&(task)->rbn, &rt_smp_linux_task[cpuid].rbr);
00459 
00460 #else /* !CONFIG_RTAI_LONG_TIMED_LIST */
00461 
00462 /* LINEAR */
00463 static inline void enq_timed_task(RT_TASK *timed_task)
00464 {
00465     RT_TASK *task;
00466 #ifdef CONFIG_SMP
00467     task = rt_smp_linux_task[timed_task->runnable_on_cpus].tnext;
00468 #else
00469     task = rt_smp_linux_task[0].tnext;
00470 #endif
00471     while (timed_task->resume_time > task->resume_time) {
00472         task = task->tnext;
00473     }
00474     task->tprev = (timed_task->tprev = task->tprev)->tnext = timed_task;
00475     timed_task->tnext = task;
00476 }
00477 
00478 #define rb_erase_task(task, cpuid)
00479 
00480 #endif /* !CONFIG_RTAI_LONG_TIMED_LIST */
00481 
00482 static inline void rem_timed_task(RT_TASK *task)
00483 {
00484     if ((task->state & RT_SCHED_DELAYED)) {
00485                 (task->tprev)->tnext = task->tnext;
00486                 (task->tnext)->tprev = task->tprev;
00487 #ifdef CONFIG_SMP
00488         rb_erase_task(task, task->runnable_on_cpus);
00489 #else
00490         rb_erase_task(task, 0);
00491 #endif
00492     }
00493 }
00494 
00495 static inline void wake_up_timed_tasks(int cpuid)
00496 {
00497     RT_TASK *taskh, *task;
00498 #ifdef CONFIG_SMP
00499     task = (taskh = &rt_smp_linux_task[cpuid])->tnext;
00500 #else
00501     task = (taskh = &rt_smp_linux_task[0])->tnext;
00502 #endif
00503     if (task->resume_time <= rt_time_h) {
00504         do {
00505                     if ((task->state &= ~(RT_SCHED_DELAYED | RT_SCHED_SUSPENDED | RT_SCHED_SEMAPHORE | RT_SCHED_RECEIVE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_MBXSUSP | RT_SCHED_POLL)) == RT_SCHED_READY) {
00506                             if (task->policy < 0) {
00507                                     enq_ready_edf_task(task);
00508                             } else {
00509                                     enq_ready_task(task);
00510                             }
00511 #if defined(CONFIG_RTAI_BUSY_TIME_ALIGN) && CONFIG_RTAI_BUSY_TIME_ALIGN
00512                 task->busy_time_align = oneshot_timer;
00513 #endif
00514                     }
00515             rb_erase_task(task, cpuid);
00516             task = task->tnext;
00517         } while (task->resume_time <= rt_time_h);
00518 #ifdef CONFIG_SMP
00519         rt_smp_linux_task[cpuid].tnext = task;
00520         task->tprev = &rt_smp_linux_task[cpuid];
00521 #else
00522         rt_smp_linux_task[0].tnext = task;
00523         task->tprev = &rt_smp_linux_task[0];
00524 #endif
00525     }
00526 }
00527 
00528 #define get_time() rt_get_time()
00529 #if 0
00530 static inline RTIME get_time(void)
00531 {
00532 #ifdef CONFIG_SMP
00533     int cpuid;
00534     return rt_smp_oneshot_timer[cpuid = rtai_cpuid()] ? rdtsc() : rt_smp_times[cpuid].tick_time;
00535 #else
00536     return rt_smp_oneshot_timer[0] ? rdtsc() : rt_smp_times[0].tick_time;
00537 #endif
00538 }
00539 #endif
00540 
00541 static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
00542 {
00543         QUEUE *q;
00544         task->blocked_on = (q = queue);
00545         if (!qtype) {
00546                 while ((q = q->next) != queue && (q->task)->priority <= task->priority);
00547         }
00548         q->prev = (task->queue.prev = q->prev)->next  = &(task->queue);
00549         task->queue.next = q;
00550 }
00551 
00552 
00553 static inline void dequeue_blocked(RT_TASK *task)
00554 {
00555         task->prio_passed_to     = NULL;
00556         (task->queue.prev)->next = task->queue.next;
00557         (task->queue.next)->prev = task->queue.prev;
00558         task->blocked_on         = NULL;
00559 }
00560 
00561 static inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
00562 {
00563         QUEUE *q, *blocked_on;
00564 #ifdef CONFIG_SMP
00565     RT_TASK *rhead;
00566         unsigned long schedmap;
00567         schedmap = 0;
00568 #endif
00569 //  from->prio_passed_to = to;
00570         while (to && to->priority > from->priority) {
00571                 to->priority = from->priority;
00572         if (to->state == RT_SCHED_READY) {
00573             if ((to->rprev)->priority > to->priority || (to->rnext)->priority < to->priority) {
00574 #ifdef CONFIG_SMP
00575                 rhead = rt_smp_linux_task[to->runnable_on_cpus].rnext;
00576 #endif
00577                 (to->rprev)->rnext = to->rnext;
00578                 (to->rnext)->rprev = to->rprev;
00579                 enq_ready_task(to);
00580 #ifdef CONFIG_SMP
00581                 if (rhead != rt_smp_linux_task[to->runnable_on_cpus].rnext)  {
00582                     __set_bit(to->runnable_on_cpus & 0x1F, &schedmap);
00583                 }
00584 #endif
00585             }
00586             break;
00587 //      } else if ((void *)(q = to->blocked_on) > RTE_HIGERR && !((to->state & RT_SCHED_SEMAPHORE) && ((SEM *)q)->qtype)) {
00588         } else if ((unsigned long)(blocked_on = to->blocked_on) > RTE_HIGERR && (((to->state & RT_SCHED_SEMAPHORE) && ((SEM *)blocked_on)->type > 0) || (to->state & (RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN)))) {
00589             if (to->queue.prev != blocked_on) {
00590                 q = blocked_on;
00591                 (to->queue.prev)->next = to->queue.next;
00592                 (to->queue.next)->prev = to->queue.prev;
00593                 while ((q = q->next) != blocked_on && (q->task)->priority <= to->priority);
00594                 q->prev = (to->queue.prev = q->prev)->next  = &(to->queue);
00595                 to->queue.next = q;
00596                 if (to->queue.prev != blocked_on) {
00597                     break;
00598                 }
00599             }
00600             to = (to->state & RT_SCHED_SEMAPHORE) ? ((SEM *)blocked_on)->owndby : blocked_on->task;
00601                 }
00602 //      to = to->prio_passed_to;
00603     }
00604 #ifdef CONFIG_SMP
00605     return schedmap;
00606 #else
00607     return 0;
00608 #endif
00609 }
00610 
00611 static inline RT_TASK *_rt_whoami(void)
00612 {
00613 #ifdef CONFIG_SMP
00614         RT_TASK *rt_current;
00615         unsigned long flags;
00616         flags = rt_global_save_flags_and_cli();
00617         rt_current = RT_CURRENT;
00618         rt_global_restore_flags(flags);
00619         return rt_current;
00620 #else
00621         return rt_smp_current[0];
00622 #endif
00623 }
00624 
00625 static inline void __call_exit_handlers(RT_TASK *task)
00626 {
00627     XHDL *pt, *tmp;
00628 
00629     pt = task->ExitHook; // Initialise ExitHook in rt_task_init()
00630     while ( pt ) {
00631         (*pt->fun) (pt->arg1, pt->arg2);
00632         tmp = pt;
00633         pt  = pt->nxt;
00634         rt_free(tmp);
00635     }
00636     task->ExitHook = 0;
00637 }
00638 
00639 static inline XHDL *__set_exit_handler(RT_TASK *task, void (*fun) (void *, int), void *arg1, int arg2)
00640 {
00641     XHDL *p;
00642 
00643     // exit handler functions are automatically executed at terminattion time by rt_task_delete()
00644     // in the reverse order they were created (like C++ destructors behave).
00645     if (task->magic != RT_TASK_MAGIC) return 0;
00646     if (!(p = (XHDL *) rt_malloc (sizeof(XHDL)))) return 0;
00647     p->fun  = fun;
00648     p->arg1 = arg1;
00649     p->arg2 = arg2;
00650     p->nxt  = task->ExitHook;
00651     return (task->ExitHook = p);
00652 }
00653 
00654 static inline int rtai_init_features (void)
00655 
00656 {
00657 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00658     __rtai_leds_init();
00659 #endif /* CONFIG_RTAI_LEDS_BUILTIN */
00660 #ifdef CONFIG_RTAI_SEM_BUILTIN
00661     __rtai_sem_init();
00662 #endif /* CONFIG_RTAI_SEM_BUILTIN */
00663 #ifdef CONFIG_RTAI_MSG_BUILTIN
00664     __rtai_msg_init();
00665 #endif /* CONFIG_RTAI_MSG_BUILTIN */
00666 #ifdef CONFIG_RTAI_MBX_BUILTIN
00667     __rtai_mbx_init();
00668 #endif /* CONFIG_RTAI_MBX_BUILTIN */
00669 #ifdef CONFIG_RTAI_TBX_BUILTIN
00670     __rtai_msg_queue_init();
00671 #endif /* CONFIG_RTAI_TBX_BUILTIN */
00672 #ifdef CONFIG_RTAI_MQ_BUILTIN
00673     __rtai_mq_init();
00674 #endif /* CONFIG_RTAI_MQ_BUILTIN */
00675 #ifdef CONFIG_RTAI_BITS_BUILTIN
00676     __rtai_bits_init();
00677 #endif /* CONFIG_RTAI_BITS_BUILTIN */
00678 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00679     __rtai_tasklets_init();
00680 #endif /* CONFIG_RTAI_TASKLETS_BUILTIN */
00681 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00682     __rtai_fifos_init();
00683 #endif /* CONFIG_RTAI_FIFOS_BUILTIN */
00684 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00685     __rtai_netrpc_init();
00686 #endif /* CONFIG_RTAI_NETRPC_BUILTIN */
00687 #ifdef CONFIG_RTAI_SHM_BUILTIN
00688     __rtai_shm_init();
00689 #endif /* CONFIG_RTAI_SHM_BUILTIN */
00690 #ifdef CONFIG_RTAI_MATH_BUILTIN
00691     __rtai_math_init();
00692 #endif /* CONFIG_RTAI_MATH_BUILTIN */
00693 #ifdef CONFIG_RTAI_USI
00694         printk(KERN_INFO "RTAI[usi]: enabled.\n");
00695 #endif /* CONFIG_RTAI_USI */
00696 
00697     return 0;
00698 }
00699 
00700 static inline void rtai_cleanup_features (void) {
00701 
00702 #ifdef CONFIG_RTAI_MATH_BUILTIN
00703     __rtai_math_exit();
00704 #endif /* CONFIG_RTAI_MATH_BUILTIN */
00705 #ifdef CONFIG_RTAI_SHM_BUILTIN
00706     __rtai_shm_exit();
00707 #endif /* CONFIG_RTAI_SHM_BUILTIN */
00708 #ifdef CONFIG_RTAI_NETRPC_BUILTIN
00709     __rtai_netrpc_exit();
00710 #endif /* CONFIG_RTAI_NETRPC_BUILTIN */
00711 #ifdef CONFIG_RTAI_FIFOS_BUILTIN
00712     __rtai_fifos_exit();
00713 #endif /* CONFIG_RTAI_FIFOS_BUILTIN */
00714 #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00715     __rtai_tasklets_exit();
00716 #endif /* CONFIG_RTAI_TASKLETS_BUILTIN */
00717 #ifdef CONFIG_RTAI_BITS_BUILTIN
00718     __rtai_bits_exit();
00719 #endif /* CONFIG_RTAI_BITS_BUILTIN */
00720 #ifdef CONFIG_RTAI_MQ_BUILTIN
00721     __rtai_mq_exit();
00722 #endif /* CONFIG_RTAI_MQ_BUILTIN */
00723 #ifdef CONFIG_RTAI_TBX_BUILTIN
00724     __rtai_msg_queue_exit();
00725 #endif /* CONFIG_RTAI_TBX_BUILTIN */
00726 #ifdef CONFIG_RTAI_MBX_BUILTIN
00727     __rtai_mbx_exit();
00728 #endif /* CONFIG_RTAI_MBX_BUILTIN */
00729 #ifdef CONFIG_RTAI_MSG_BUILTIN
00730     __rtai_msg_exit();
00731 #endif /* CONFIG_RTAI_MSG_BUILTIN */
00732 #ifdef CONFIG_RTAI_SEM_BUILTIN
00733     __rtai_sem_exit();
00734 #endif /* CONFIG_RTAI_SEM_BUILTIN */
00735 #ifdef CONFIG_RTAI_LEDS_BUILTIN
00736     __rtai_leds_exit();
00737 #endif /* CONFIG_RTAI_LEDS_BUILTIN */
00738 }
00739 
00740 int rt_check_current_stack(void);
00741 
00742 int rt_kthread_init(RT_TASK *task,
00743             void (*rt_thread)(long),
00744             long data,
00745             int stack_size,
00746             int priority,
00747             int uses_fpu,
00748             void(*signal)(void));
00749 
00750 int rt_kthread_init_cpuid(RT_TASK *task,
00751               void (*rt_thread)(long),
00752               long data,
00753               int stack_size,
00754               int priority,
00755               int uses_fpu,
00756               void(*signal)(void),
00757               unsigned int cpuid);
00758 
00759 #endif /* __KERNEL__ */
00760 
00761 #endif /* !_RTAI_SCHEDCORE_H */

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7