00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029 #include <linux/module.h>
00030 #include <linux/kernel.h>
00031 #include <linux/version.h>
00032 #include <linux/errno.h>
00033 #include <linux/slab.h>
00034 #include <linux/timex.h>
00035 #include <linux/sched.h>
00036 #include <linux/irq.h>
00037 #include <linux/reboot.h>
00038 #include <linux/sys.h>
00039 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
00040 #include <linux/oom.h>
00041 #endif
00042
00043 #include <asm/param.h>
00044 #include <asm/system.h>
00045 #include <asm/io.h>
00046 #include <asm/uaccess.h>
00047 #include <asm/mmu_context.h>
00048
00049 #define __KERNEL_SYSCALLS__
00050 #include <linux/unistd.h>
00051
00052 #ifdef CONFIG_PROC_FS
00053 #include <linux/stat.h>
00054 #include <linux/proc_fs.h>
00055 #include <rtai_proc_fs.h>
00056 static int rtai_proc_sched_register(void);
00057 static void rtai_proc_sched_unregister(void);
00058 int rtai_proc_lxrt_register(void);
00059 void rtai_proc_lxrt_unregister(void);
00060 #endif
00061
00062 #include <rtai.h>
00063 #include <asm/rtai_sched.h>
00064 #include <rtai_lxrt.h>
00065 #include <rtai_registry.h>
00066 #include <rtai_nam2num.h>
00067 #include <rtai_schedcore.h>
00068 #include <rtai_prinher.h>
00069 #include <rtai_signal.h>
00070
00071 MODULE_LICENSE("GPL");
00072
00073
00074
00075 RT_TASK rt_smp_linux_task[NR_RT_CPUS];
00076
00077 RT_TASK *rt_smp_current[NR_RT_CPUS];
00078
00079 RTIME rt_smp_time_h[NR_RT_CPUS];
00080
00081 int rt_smp_oneshot_timer[NR_RT_CPUS];
00082
00083 volatile int rt_sched_timed;
00084
00085 static struct klist_t wake_up_sth[NR_RT_CPUS];
00086 struct klist_t wake_up_hts[NR_RT_CPUS];
00087 struct klist_t wake_up_srq[NR_RT_CPUS];
00088
00089
00090
00091 extern struct { volatile int locked, rqsted; } rt_scheduling[];
00092
00093 static unsigned long rt_smp_linux_cr0[NR_RT_CPUS];
00094
00095 static RT_TASK *rt_smp_fpu_task[NR_RT_CPUS];
00096
00097 static int rt_smp_half_tick[NR_RT_CPUS];
00098
00099 static int rt_smp_oneshot_running[NR_RT_CPUS];
00100
00101 static volatile int rt_smp_timer_shot_fired[NR_RT_CPUS];
00102
00103 static struct rt_times *linux_times;
00104
00105 static RT_TASK *lxrt_wdog_task[NR_RT_CPUS];
00106
00107 RT_TASK *lxrt_prev_task[NR_RT_CPUS];
00108
00109 static int lxrt_notify_reboot(struct notifier_block *nb,
00110 unsigned long event,
00111 void *ptr);
00112
00113 static struct notifier_block lxrt_reboot_notifier = {
00114 .notifier_call = &lxrt_notify_reboot,
00115 .next = NULL,
00116 .priority = 0
00117 };
00118
00119 static struct klist_t klistm[NR_RT_CPUS];
00120
00121 static struct task_struct *kthreadm[NR_RT_CPUS];
00122
00123 static struct semaphore resem[NR_RT_CPUS];
00124
00125 static int endkthread;
00126
00127 #define fpu_task (rt_smp_fpu_task[cpuid])
00128
00129 #define rt_half_tick (rt_smp_half_tick[cpuid])
00130
00131 #define oneshot_running (rt_smp_oneshot_running[cpuid])
00132
00133 #define oneshot_timer_cpuid (rt_smp_oneshot_timer[rtai_cpuid()])
00134
00135 #define timer_shot_fired (rt_smp_timer_shot_fired[cpuid])
00136
00137 #define rt_times (rt_smp_times[cpuid])
00138
00139 #define linux_cr0 (rt_smp_linux_cr0[cpuid])
00140
00141 #define MAX_FRESTK_SRQ (2 << 6)
00142 static struct { int srq; volatile unsigned long in, out; void *mp[MAX_FRESTK_SRQ]; } frstk_srq;
00143
00144 #define KTHREAD_M_PRIO MAX_LINUX_RTPRIO
00145 #define KTHREAD_F_PRIO MAX_LINUX_RTPRIO
00146
00147 #ifdef CONFIG_SMP
00148
00149 extern void rt_set_sched_ipi_gate(void);
00150 extern void rt_reset_sched_ipi_gate(void);
00151 static void rt_schedule_on_schedule_ipi(void);
00152
00153 static inline int rt_request_sched_ipi(void)
00154 {
00155 int retval;
00156 retval = rt_request_irq(SCHED_IPI, (void *)rt_schedule_on_schedule_ipi, NULL, 0);
00157 rt_set_sched_ipi_gate();
00158 return retval;
00159 }
00160
00161 #define rt_free_sched_ipi() \
00162 do { \
00163 rt_release_irq(SCHED_IPI); \
00164 rt_reset_sched_ipi_gate(); \
00165 } while (0)
00166
00167 static inline void sched_get_global_lock(int cpuid)
00168 {
00169 barrier();
00170 if (!test_and_set_bit(cpuid, &rtai_cpu_lock[0])) {
00171 rtai_spin_glock(&rtai_cpu_lock[0]);
00172 }
00173 barrier();
00174 }
00175
00176 static inline void sched_release_global_lock(int cpuid)
00177 {
00178 barrier();
00179 if (test_and_clear_bit(cpuid, &rtai_cpu_lock[0])) {
00180 rtai_spin_gunlock(&rtai_cpu_lock[0]);
00181 }
00182 barrier();
00183 }
00184
00185 #else
00186
00187 #define rt_request_sched_ipi() 0
00188
00189 #define rt_free_sched_ipi()
00190
00191 #define sched_get_global_lock(cpuid)
00192
00193 #define sched_release_global_lock(cpuid)
00194
00195 #endif
00196
00197
00198
00199 #ifdef CONFIG_RTAI_MALLOC
00200 int rtai_kstack_heap_size = (CONFIG_RTAI_KSTACK_HEAPSZ*1024);
00201 RTAI_MODULE_PARM(rtai_kstack_heap_size, int);
00202
00203 static rtheap_t rtai_kstack_heap;
00204
00205 #define rt_kstack_alloc(sz) rtheap_alloc(&rtai_kstack_heap, sz, 0)
00206 #define rt_kstack_free(p) rtheap_free(&rtai_kstack_heap, p)
00207 #else
00208 #define rt_kstack_alloc(sz) rt_malloc(sz)
00209 #define rt_kstack_free(p) rt_free(p)
00210 #endif
00211
00212 static int tasks_per_cpu[NR_RT_CPUS] = { 0, };
00213
00214 int get_min_tasks_cpuid(void)
00215 {
00216 int i, cpuid, min;
00217 min = tasks_per_cpu[cpuid = 0];
00218 for (i = 1; i < num_online_cpus(); i++) {
00219 if (tasks_per_cpu[i] < min) {
00220 min = tasks_per_cpu[cpuid = i];
00221 }
00222 }
00223 return cpuid;
00224 }
00225
00226 void put_current_on_cpu(int cpuid)
00227 {
00228 #ifdef CONFIG_SMP
00229 struct task_struct *task = current;
00230 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00231 task->cpus_allowed = 1 << cpuid;
00232 while (cpuid != rtai_cpuid()) {
00233 task->state = TASK_INTERRUPTIBLE;
00234 schedule_timeout(2);
00235 }
00236 #else
00237 if (set_cpus_allowed(task, cpumask_of_cpu(cpuid))) {
00238 set_cpus_allowed(current, cpumask_of_cpu(((RT_TASK *)(task->rtai_tskext(TSKEXT0)))->runnable_on_cpus = rtai_cpuid()));
00239 }
00240 #endif
00241 #endif
00242 }
00243
00244 int set_rtext(RT_TASK *task, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid, struct task_struct *relink)
00245 {
00246 unsigned long flags;
00247
00248 if (num_online_cpus() <= 1) {
00249 cpuid = 0;
00250 }
00251 if (task->magic == RT_TASK_MAGIC || cpuid >= NR_RT_CPUS || priority < 0) {
00252 return -EINVAL;
00253 }
00254 if (lxrt_wdog_task[cpuid] &&
00255 lxrt_wdog_task[cpuid] != task &&
00256 priority == RT_SCHED_HIGHEST_PRIORITY) {
00257 rt_printk("Highest priority reserved for RTAI watchdog\n");
00258 return -EBUSY;
00259 }
00260 task->uses_fpu = uses_fpu ? 1 : 0;
00261 task->runnable_on_cpus = cpuid;
00262 (task->stack_bottom = (long *)&task->fpu_reg)[0] = 0;
00263 task->magic = RT_TASK_MAGIC;
00264 task->policy = 0;
00265 task->owndres = 0;
00266 task->prio_passed_to = 0;
00267 task->period = 0;
00268 task->resume_time = RT_TIME_END;
00269 task->periodic_resume_time = RT_TIME_END;
00270 task->queue.prev = task->queue.next = &(task->queue);
00271 task->queue.task = task;
00272 task->msg_queue.prev = task->msg_queue.next = &(task->msg_queue);
00273 task->msg_queue.task = task;
00274 task->msg = 0;
00275 task->ret_queue.prev = task->ret_queue.next = &(task->ret_queue);
00276 task->ret_queue.task = NULL;
00277 task->tprev = task->tnext = task->rprev = task->rnext = task;
00278 task->blocked_on = NULL;
00279 task->signal = signal;
00280 task->unblocked = 0;
00281 task->rt_signals = NULL;
00282 memset(task->task_trap_handler, 0, RTAI_NR_TRAPS*sizeof(void *));
00283 task->linux_syscall_server = NULL;
00284 task->busy_time_align = 0;
00285 task->resync_frame = 0;
00286 task->ExitHook = 0;
00287 task->usp_flags = task->usp_flags_mask = task->force_soft = 0;
00288 task->msg_buf[0] = 0;
00289 task->exectime[0] = task->exectime[1] = 0;
00290 task->system_data_ptr = 0;
00291 atomic_inc((atomic_t *)(tasks_per_cpu + cpuid));
00292 if (relink) {
00293 task->priority = task->base_priority = priority;
00294 task->suspdepth = task->is_hard = 1;
00295 task->state = RT_SCHED_READY | RT_SCHED_SUSPENDED;
00296 relink->rtai_tskext(TSKEXT0) = task;
00297 task->lnxtsk = relink;
00298 } else {
00299 task->priority = task->base_priority = BASE_SOFT_PRIORITY + priority;
00300 task->suspdepth = task->is_hard = 0;
00301 task->state = RT_SCHED_READY;
00302 current->rtai_tskext(TSKEXT0) = task;
00303 current->rtai_tskext(TSKEXT1) = task->lnxtsk = current;
00304 put_current_on_cpu(cpuid);
00305 }
00306 flags = rt_global_save_flags_and_cli();
00307 task->next = 0;
00308 rt_linux_task.prev->next = task;
00309 task->prev = rt_linux_task.prev;
00310 rt_linux_task.prev = task;
00311 rt_global_restore_flags(flags);
00312
00313 task->resq.prev = task->resq.next = &task->resq;
00314 task->resq.task = NULL;
00315
00316 return 0;
00317 }
00318
00319
00320 static void start_stop_kthread(RT_TASK *, void (*)(long), long, int, int, void(*)(void), int);
00321
00322 int rt_kthread_init_cpuid(RT_TASK *task, void (*rt_thread)(long), long data,
00323 int stack_size, int priority, int uses_fpu,
00324 void(*signal)(void), unsigned int cpuid)
00325 {
00326 start_stop_kthread(task, rt_thread, data, priority, uses_fpu, signal, cpuid);
00327 return (int)task->retval;
00328 }
00329
00330
00331 int rt_kthread_init(RT_TASK *task, void (*rt_thread)(long), long data,
00332 int stack_size, int priority, int uses_fpu,
00333 void(*signal)(void))
00334 {
00335 return rt_kthread_init_cpuid(task, rt_thread, data, stack_size, priority,
00336 uses_fpu, signal, get_min_tasks_cpuid());
00337 }
00338
00339
00340 #if USE_RTAI_TASKS
00341
00342 asmlinkage static void rt_startup(void(*rt_thread)(long), long data)
00343 {
00344 extern int rt_task_delete(RT_TASK *);
00345 RT_TASK *rt_current = rt_smp_current[rtai_cpuid()];
00346 rt_global_sti();
00347 #if CONFIG_RTAI_MONITOR_EXECTIME
00348 rt_current->exectime[1] = rdtsc();
00349 #endif
00350 ((void (*)(long))rt_current->max_msg_size[0])(rt_current->max_msg_size[1]);
00351 rt_drg_on_adr(rt_current);
00352 rt_task_delete(rt_smp_current[rtai_cpuid()]);
00353 rt_printk("LXRT: task %p returned but could not be delated.\n", rt_current);
00354 }
00355
00356
00357 int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(long), long data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid)
00358 {
00359 long *st, i;
00360 unsigned long flags;
00361
00362 if (num_online_cpus() <= 1) {
00363 cpuid = 0;
00364 }
00365 if (task->magic == RT_TASK_MAGIC || cpuid >= NR_RT_CPUS || priority < 0) {
00366 return -EINVAL;
00367 }
00368 if (!(st = (long *)rt_kstack_alloc(stack_size))) {
00369 return -ENOMEM;
00370 }
00371 if (lxrt_wdog_task[cpuid] && lxrt_wdog_task[cpuid] != task
00372 && priority == RT_SCHED_HIGHEST_PRIORITY) {
00373 rt_printk("Highest priority reserved for RTAI watchdog\n");
00374 return -EBUSY;
00375 }
00376
00377 task->bstack = task->stack = (long *)(((unsigned long)st + stack_size - 0x10) & ~0xF);
00378 task->stack[0] = 0;
00379 task->uses_fpu = uses_fpu ? 1 : 0;
00380 task->runnable_on_cpus = cpuid;
00381 atomic_inc((atomic_t *)(tasks_per_cpu + cpuid));
00382 *(task->stack_bottom = st) = 0;
00383 task->magic = RT_TASK_MAGIC;
00384 task->policy = 0;
00385 task->suspdepth = 1;
00386 task->state = (RT_SCHED_SUSPENDED | RT_SCHED_READY);
00387 task->owndres = 0;
00388 task->is_hard = 1;
00389 task->lnxtsk = 0;
00390 task->priority = task->base_priority = priority;
00391 task->prio_passed_to = 0;
00392 task->period = 0;
00393 task->resume_time = RT_TIME_END;
00394 task->periodic_resume_time = RT_TIME_END;
00395 task->queue.prev = &(task->queue);
00396 task->queue.next = &(task->queue);
00397 task->queue.task = task;
00398 task->msg_queue.prev = &(task->msg_queue);
00399 task->msg_queue.next = &(task->msg_queue);
00400 task->msg_queue.task = task;
00401 task->msg = 0;
00402 task->ret_queue.prev = &(task->ret_queue);
00403 task->ret_queue.next = &(task->ret_queue);
00404 task->ret_queue.task = NULL;
00405 task->tprev = task->tnext =
00406 task->rprev = task->rnext = task;
00407 task->blocked_on = NULL;
00408 task->signal = signal;
00409 task->unblocked = 0;
00410 task->rt_signals = NULL;
00411 for (i = 0; i < RTAI_NR_TRAPS; i++) {
00412 task->task_trap_handler[i] = NULL;
00413 }
00414 task->linux_syscall_server = NULL;
00415 task->busy_time_align = 0;
00416 task->resync_frame = 0;
00417 task->ExitHook = 0;
00418 task->exectime[0] = task->exectime[1] = 0;
00419 task->system_data_ptr = 0;
00420
00421 task->max_msg_size[0] = (long)rt_thread;
00422 task->max_msg_size[1] = data;
00423 init_arch_stack();
00424
00425 flags = rt_global_save_flags_and_cli();
00426 task->next = 0;
00427 rt_linux_task.prev->next = task;
00428 task->prev = rt_linux_task.prev;
00429 rt_linux_task.prev = task;
00430 init_task_fpenv(task);
00431 rt_global_restore_flags(flags);
00432
00433 task->resq.prev = task->resq.next = &task->resq;
00434 task->resq.task = NULL;
00435
00436 return 0;
00437 }
00438
00439 int rt_task_init(RT_TASK *task, void (*rt_thread)(long), long data,
00440 int stack_size, int priority, int uses_fpu,
00441 void(*signal)(void))
00442 {
00443 return rt_task_init_cpuid(task, rt_thread, data, stack_size, priority,
00444 uses_fpu, signal, get_min_tasks_cpuid());
00445 }
00446
00447 #else
00448
00449 int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(long), long data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid)
00450 {
00451 return rt_kthread_init_cpuid(task, rt_thread, data, stack_size, priority, uses_fpu, signal, cpuid);
00452 }
00453
00454 int rt_task_init(RT_TASK *task, void (*rt_thread)(long), long data, int stack_size, int priority, int uses_fpu, void(*signal)(void))
00455 {
00456 return rt_kthread_init(task, rt_thread, data, stack_size, priority, uses_fpu, signal);
00457 }
00458
00459 #endif
00460
00461 RTAI_SYSCALL_MODE void rt_set_runnable_on_cpuid(RT_TASK *task, unsigned int cpuid)
00462 {
00463 unsigned long flags;
00464 RT_TASK *linux_task;
00465
00466 if (task->lnxtsk) {
00467 return;
00468 }
00469
00470 if (cpuid >= NR_RT_CPUS) {
00471 cpuid = get_min_tasks_cpuid();
00472 }
00473 flags = rt_global_save_flags_and_cli();
00474 switch (rt_smp_oneshot_timer[task->runnable_on_cpus] |
00475 (rt_smp_oneshot_timer[cpuid] << 1)) {
00476 case 1:
00477 task->period = llimd(task->period, TIMER_FREQ, tuned.cpu_freq);
00478 task->resume_time = llimd(task->resume_time, TIMER_FREQ, tuned.cpu_freq);
00479 task->periodic_resume_time = llimd(task->periodic_resume_time, TIMER_FREQ, tuned.cpu_freq);
00480 break;
00481 case 2:
00482 task->period = llimd(task->period, tuned.cpu_freq, TIMER_FREQ);
00483 task->resume_time = llimd(task->resume_time, tuned.cpu_freq, TIMER_FREQ);
00484 task->periodic_resume_time = llimd(task->periodic_resume_time, tuned.cpu_freq, TIMER_FREQ);
00485 break;
00486 }
00487 if (!((task->prev)->next = task->next)) {
00488 rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev;
00489 } else {
00490 (task->next)->prev = task->prev;
00491 }
00492 if ((task->state & RT_SCHED_DELAYED)) {
00493 rem_timed_task(task);
00494 task->runnable_on_cpus = cpuid;
00495 enq_timed_task(task);
00496 } else {
00497 task->runnable_on_cpus = cpuid;
00498 }
00499 task->next = 0;
00500 (linux_task = rt_smp_linux_task + cpuid)->prev->next = task;
00501 task->prev = linux_task->prev;
00502 linux_task->prev = task;
00503 rt_global_restore_flags(flags);
00504 }
00505
00506
00507 RTAI_SYSCALL_MODE void rt_set_runnable_on_cpus(RT_TASK *task, unsigned long run_on_cpus)
00508 {
00509 int cpuid;
00510
00511 if (task->lnxtsk) {
00512 return;
00513 }
00514
00515 #ifdef CONFIG_SMP
00516 run_on_cpus &= CPUMASK(cpu_online_map);
00517 #else
00518 run_on_cpus = 1;
00519 #endif
00520 cpuid = get_min_tasks_cpuid();
00521 if (!test_bit(cpuid, &run_on_cpus)) {
00522 cpuid = ffnz(run_on_cpus);
00523 }
00524 rt_set_runnable_on_cpuid(task, cpuid);
00525 }
00526
00527
00528 int rt_check_current_stack(void)
00529 {
00530 DECLARE_RT_CURRENT;
00531 char *sp;
00532
00533 ASSIGN_RT_CURRENT;
00534 if (rt_current != &rt_linux_task) {
00535 sp = get_stack_pointer();
00536 return (sp - (char *)(rt_current->stack_bottom));
00537 } else {
00538 return RT_RESEM_SUSPDEL;
00539 }
00540 }
00541
00542
00543 #define RR_YIELD() \
00544 if (CONFIG_RTAI_ALLOW_RR && rt_current->policy > 0) { \
00545 if (rt_current->yield_time <= rt_times.tick_time) { \
00546 rt_current->rr_remaining = rt_current->rr_quantum; \
00547 if (rt_current->state == RT_SCHED_READY) { \
00548 RT_TASK *task; \
00549 task = rt_current->rnext; \
00550 while (rt_current->priority == task->priority) { \
00551 task = task->rnext; \
00552 } \
00553 if (task != rt_current->rnext) { \
00554 (rt_current->rprev)->rnext = rt_current->rnext; \
00555 (rt_current->rnext)->rprev = rt_current->rprev; \
00556 task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \
00557 rt_current->rnext = task; \
00558 } \
00559 } \
00560 } else { \
00561 rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \
00562 } \
00563 }
00564
00565 #define TASK_TO_SCHEDULE() \
00566 do { \
00567 new_task = rt_linux_task.rnext; \
00568 if (CONFIG_RTAI_ALLOW_RR && new_task->policy > 0) { \
00569 new_task->yield_time = rt_times.tick_time + new_task->rr_remaining; \
00570 } \
00571 } while (0)
00572
00573 #define RR_INTR_TIME(fire_shot) \
00574 do { \
00575 fire_shot = 0; \
00576 prio = new_task->priority; \
00577 if (CONFIG_RTAI_ALLOW_RR && new_task->policy > 0) { \
00578 if (new_task->yield_time < rt_times.intr_time) { \
00579 rt_times.intr_time = new_task->yield_time; \
00580 fire_shot = 1; \
00581 } \
00582 } \
00583 } while (0)
00584
00585 #define LOCK_LINUX(cpuid) \
00586 do { rt_switch_to_real_time(cpuid); } while (0)
00587 #define UNLOCK_LINUX(cpuid) \
00588 do { rt_switch_to_linux(cpuid); } while (0)
00589
00590 #define SAVE_LOCK_LINUX(cpuid) \
00591 do { sflags = rt_save_switch_to_real_time(cpuid); } while (0)
00592 #define RESTORE_UNLOCK_LINUX(cpuid) \
00593 do { rt_restore_switch_to_linux(sflags, cpuid); } while (0)
00594
00595 #ifdef LOCKED_LINUX_IN_IRQ_HANDLER
00596 #define SAVE_LOCK_LINUX_IN_IRQ(cpuid)
00597 #define RESTORE_UNLOCK_LINUX_IN_IRQ(cpuid)
00598 #else
00599 #define SAVE_LOCK_LINUX_IN_IRQ(cpuid) LOCK_LINUX(cpuid)
00600 #define RESTORE_UNLOCK_LINUX_IN_IRQ(cpuid) UNLOCK_LINUX(cpuid)
00601 #endif
00602
00603 #if defined(CONFIG_RTAI_TASK_SWITCH_SIGNAL) && CONFIG_RTAI_TASK_SWITCH_SIGNAL
00604
00605 #define RTAI_TASK_SWITCH_SIGNAL() \
00606 do { \
00607 void (*signal)(void) = rt_current->signal; \
00608 if ((unsigned long)signal > MAXSIGNALS) { \
00609 (*signal)(); \
00610 } else if (signal) { \
00611 rt_current->signal = NULL; \
00612 rt_trigger_signal((long)signal, rt_current); \
00613 rt_current->signal = signal; \
00614 } \
00615 } while (0)
00616 #else
00617
00618 #define RTAI_TASK_SWITCH_SIGNAL()
00619
00620 #endif
00621
00622 #if CONFIG_RTAI_MONITOR_EXECTIME
00623
00624 RTIME switch_time[NR_RT_CPUS];
00625
00626 #define SET_EXEC_TIME() \
00627 do { \
00628 RTIME now; \
00629 now = rdtsc(); \
00630 rt_current->exectime[0] += (now - switch_time[cpuid]); \
00631 switch_time[cpuid] = now; \
00632 } while (0)
00633
00634 #define RST_EXEC_TIME() do { switch_time[cpuid] = rdtsc(); } while (0)
00635
00636 #else
00637
00638 #define SET_EXEC_TIME()
00639 #define RST_EXEC_TIME()
00640
00641 #endif
00642
00643 #ifdef CONFIG_RTAI_WD
00644 #define SAVE_PREV_TASK() \
00645 do { lxrt_prev_task[cpuid] = rt_current; } while (0)
00646 #else
00647 #define SAVE_PREV_TASK() do { } while (0)
00648 #endif
00649
00650 void rt_do_force_soft(RT_TASK *rt_task)
00651 {
00652 rt_global_cli();
00653 if (rt_task->state != RT_SCHED_READY) {
00654 rt_task->state &= ~RT_SCHED_READY;
00655 enq_ready_task(rt_task);
00656 RT_SCHEDULE(rt_task, rtai_cpuid());
00657 }
00658 rt_global_sti();
00659 }
00660
00661 #define enq_soft_ready_task(ready_task) \
00662 do { \
00663 RT_TASK *task = rt_smp_linux_task[cpuid].rnext; \
00664 if (ready_task == task) break; \
00665 task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task; \
00666 ready_task->rnext = task; \
00667 } while (0)
00668
00669
00670 #define pend_wake_up_hts(lnxtsk, cpuid) \
00671 do { \
00672 wake_up_hts[cpuid].task[wake_up_hts[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00673 hal_pend_uncond(wake_up_srq[cpuid].srq, cpuid); \
00674 } while (0)
00675
00676
00677 static inline void force_current_soft(RT_TASK *rt_current, int cpuid)
00678 {
00679 struct task_struct *lnxtsk;
00680 void rt_schedule(void);
00681 rt_current->force_soft = 0;
00682 rt_current->state &= ~RT_SCHED_READY;;
00683 pend_wake_up_hts(lnxtsk = rt_current->lnxtsk, cpuid);
00684 (rt_current->rprev)->rnext = rt_current->rnext;
00685 (rt_current->rnext)->rprev = rt_current->rprev;
00686 rt_schedule();
00687 rt_current->is_hard = 0;
00688 if (rt_current->priority < BASE_SOFT_PRIORITY) {
00689 if (rt_current->priority == rt_current->base_priority) {
00690 rt_current->priority += BASE_SOFT_PRIORITY;
00691 }
00692 }
00693 if (rt_current->base_priority < BASE_SOFT_PRIORITY) {
00694 rt_current->base_priority += BASE_SOFT_PRIORITY;
00695 }
00696 rt_global_sti();
00697 hal_schedule_back_root(lnxtsk);
00698
00699 rt_global_cli();
00700 LOCK_LINUX(cpuid);
00701 rt_current->state |= RT_SCHED_READY;
00702 rt_smp_current[cpuid] = rt_current;
00703 if (rt_current->state != RT_SCHED_READY) {
00704 lnxtsk->state = TASK_SOFTREALTIME;
00705 rt_schedule();
00706 } else {
00707 enq_soft_ready_task(rt_current);
00708 }
00709 }
00710
00711 static RT_TASK *switch_rtai_tasks(RT_TASK *rt_current, RT_TASK *new_task, int cpuid)
00712 {
00713 if (rt_current->lnxtsk) {
00714 unsigned long sflags;
00715 #ifdef IPIPE_NOSTACK_FLAG
00716 ipipe_set_foreign_stack(&rtai_domain);
00717 #endif
00718 SAVE_LOCK_LINUX(cpuid);
00719 rt_linux_task.prevp = rt_current;
00720 save_fpcr_and_enable_fpu(linux_cr0);
00721 if (new_task->uses_fpu) {
00722 save_fpenv(rt_linux_task.fpu_reg);
00723 fpu_task = new_task;
00724 restore_fpenv(fpu_task->fpu_reg);
00725 }
00726 RST_EXEC_TIME();
00727 SAVE_PREV_TASK();
00728 rt_exchange_tasks(rt_smp_current[cpuid], new_task);
00729 restore_fpcr(linux_cr0);
00730 RESTORE_UNLOCK_LINUX(cpuid);
00731 #ifdef IPIPE_NOSTACK_FLAG
00732 ipipe_clear_foreign_stack(&rtai_domain);
00733 #endif
00734 if (rt_linux_task.nextp != rt_current) {
00735 return rt_linux_task.nextp;
00736 }
00737 } else {
00738 if (new_task->lnxtsk) {
00739 rt_linux_task.nextp = new_task;
00740 new_task = rt_linux_task.prevp;
00741 if (fpu_task != &rt_linux_task) {
00742 save_fpenv(fpu_task->fpu_reg);
00743 fpu_task = &rt_linux_task;
00744 restore_fpenv(fpu_task->fpu_reg);
00745 }
00746 } else if (new_task->uses_fpu && fpu_task != new_task) {
00747 save_fpenv(fpu_task->fpu_reg);
00748 fpu_task = new_task;
00749 restore_fpenv(fpu_task->fpu_reg);
00750 }
00751 SET_EXEC_TIME();
00752 SAVE_PREV_TASK();
00753 rt_exchange_tasks(rt_smp_current[cpuid], new_task);
00754 }
00755 RTAI_TASK_SWITCH_SIGNAL();
00756 return NULL;
00757 }
00758
00759 #define lxrt_context_switch(prev, next, cpuid) \
00760 do { \
00761 SAVE_PREV_TASK(); \
00762 _lxrt_context_switch(prev, next, cpuid); barrier(); \
00763 RTAI_TASK_SWITCH_SIGNAL(); \
00764 } while (0)
00765
00766
00767 #ifdef USE_LINUX_TIMER
00768
00769 #define CHECK_LINUX_TIME() \
00770 if (rt_times.linux_time < rt_times.intr_time) { \
00771 rt_times.intr_time = rt_times.linux_time; \
00772 fire_shot = 1; \
00773 break; \
00774 }
00775
00776 #define SET_PEND_LINUX_TIMER_SHOT() \
00777 do { \
00778 if (rt_times.tick_time >= rt_times.linux_time) { \
00779 if (rt_times.linux_tick > 0) { \
00780 rt_times.linux_time += rt_times.linux_tick; \
00781 } else { \
00782 rt_times.linux_time = RT_TIME_END; \
00783 } \
00784 update_linux_timer(cpuid); \
00785 } \
00786 } while (0)
00787
00788 #else
00789
00790 #define CHECK_LINUX_TIME()
00791
00792 #define SET_PEND_LINUX_TIMER_SHOT()
00793
00794 #endif
00795
00796
00797 #define SET_NEXT_TIMER_SHOT(fire_shot) \
00798 do { \
00799 fire_shot = 0; \
00800 prio = new_task->priority; \
00801 if (CONFIG_RTAI_ALLOW_RR && new_task->policy > 0) { \
00802 if (new_task->yield_time < rt_times.intr_time) { \
00803 rt_times.intr_time = new_task->yield_time; \
00804 fire_shot = 1; \
00805 } \
00806 } \
00807 task = &rt_linux_task; \
00808 while ((task = task->tnext) != &rt_linux_task && task->resume_time < rt_times.intr_time) { \
00809 if (task->priority <= prio) { \
00810 rt_times.intr_time = task->resume_time; \
00811 fire_shot = 1; \
00812 break; \
00813 } \
00814 } \
00815 } while (0)
00816
00817 #define IF_GOING_TO_LINUX_CHECK_TIMER_SHOT(fire_shot) \
00818 do { \
00819 if (prio == RT_SCHED_LINUX_PRIORITY) { \
00820 CHECK_LINUX_TIME(); \
00821 if (!timer_shot_fired) {\
00822 fire_shot = 1; \
00823 } \
00824 } \
00825 } while (0)
00826
00827 static int oneshot_span;
00828 static int satdlay;
00829
00830 #define ONESHOT_DELAY(SHOT_FIRED) \
00831 do { \
00832 if (!(SHOT_FIRED)) { \
00833 RTIME span; \
00834 if (unlikely((span = rt_times.intr_time - rt_time_h) > oneshot_span)) { \
00835 rt_times.intr_time = rt_time_h + oneshot_span; \
00836 delay = satdlay; \
00837 } else { \
00838 delay = (int)span - tuned.latency; \
00839 } \
00840 } else { \
00841 delay = (int)(rt_times.intr_time - rt_time_h) - tuned.latency; \
00842 } \
00843 } while (0)
00844
00845 #if 1
00846
00847 static void rt_timer_handler(void);
00848
00849 #define FIRE_NEXT_TIMER_SHOT(SHOT_FIRED) \
00850 do { \
00851 if (fire_shot) { \
00852 int delay; \
00853 ONESHOT_DELAY(SHOT_FIRED); \
00854 if (delay > tuned.setup_time_TIMER_CPUNIT) { \
00855 rt_set_timer_delay(imuldiv(delay, TIMER_FREQ, tuned.cpu_freq));\
00856 timer_shot_fired = 1; \
00857 } else { \
00858 rt_times.intr_time = rt_time_h + tuned.setup_time_TIMER_CPUNIT;\
00859 timer_shot_fired = -1;\
00860 } \
00861 } \
00862 } while (0)
00863
00864 #define CALL_TIMER_HANDLER() \
00865 do { if (timer_shot_fired < 0) rt_timer_handler(); } while (0)
00866
00867 #define REDO_TIMER_HANDLER() \
00868 do { if (timer_shot_fired < 0) goto redo_timer_handler; } while (0)
00869
00870 #define FIRE_IMMEDIATE_LINUX_TIMER_SHOT() \
00871 do { \
00872 LOCK_LINUX(cpuid); \
00873 rt_timer_handler(); \
00874 UNLOCK_LINUX(cpuid); \
00875 } while (0)
00876
00877 #else
00878
00879 #define FIRE_NEXT_TIMER_SHOT(CHECK_SPAN) \
00880 do { \
00881 if (fire_shot) { \
00882 int delay; \
00883 ONESHOT_DELAY(CHECK_SPAN); \
00884 if (delay > tuned.setup_time_TIMER_CPUNIT) { \
00885 rt_set_timer_delay(imuldiv(delay, TIMER_FREQ, tuned.cpu_freq));\
00886 } else { \
00887 rt_set_timer_delay(tuned.setup_time_TIMER_UNIT); \
00888 rt_times.intr_time = rt_time_h + tuned.setup_time_TIMER_CPUNIT;\
00889 } \
00890 timer_shot_fired = 1; \
00891 } \
00892 } while (0)
00893
00894 #define CALL_TIMER_HANDLER()
00895
00896 #define REDO_TIMER_HANDLER()
00897
00898 #endif
00899
00900 #ifdef CONFIG_SMP
00901 static void rt_schedule_on_schedule_ipi(void)
00902 {
00903 RT_TASK *rt_current, *task, *new_task;
00904 int cpuid;
00905
00906 rt_current = rt_smp_current[cpuid = rtai_cpuid()];
00907
00908 sched_get_global_lock(cpuid);
00909 RR_YIELD();
00910 if (oneshot_running) {
00911 int prio, fire_shot;
00912
00913 rt_time_h = rdtsc() + rt_half_tick;
00914 wake_up_timed_tasks(cpuid);
00915 TASK_TO_SCHEDULE();
00916
00917 SET_NEXT_TIMER_SHOT(fire_shot);
00918 sched_release_global_lock(cpuid);
00919 IF_GOING_TO_LINUX_CHECK_TIMER_SHOT(fire_shot);
00920 FIRE_NEXT_TIMER_SHOT(timer_shot_fired);
00921 } else {
00922 TASK_TO_SCHEDULE();
00923 sched_release_global_lock(cpuid);
00924 }
00925
00926 if (new_task != rt_current) {
00927 if (rt_scheduling[cpuid].locked) {
00928 rt_scheduling[cpuid].rqsted = 1;
00929 goto sched_exit;
00930 }
00931 if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
00932 if (!(new_task = switch_rtai_tasks(rt_current, new_task, cpuid))) {
00933 goto sched_exit;
00934 }
00935 }
00936 if (new_task->is_hard > 0 || rt_current->is_hard > 0) {
00937 struct task_struct *prev;
00938 unsigned long sflags;
00939 if (rt_current->is_hard <= 0) {
00940 SAVE_LOCK_LINUX_IN_IRQ(cpuid);
00941 rt_linux_task.lnxtsk = prev = current;
00942 RST_EXEC_TIME();
00943 } else {
00944 sflags = rtai_linux_context[cpuid].sflags;
00945 prev = rt_current->lnxtsk;
00946 SET_EXEC_TIME();
00947 }
00948 rt_smp_current[cpuid] = new_task;
00949 lxrt_context_switch(prev, new_task->lnxtsk, cpuid);
00950 if (rt_current->is_hard <= 0) {
00951 RESTORE_UNLOCK_LINUX_IN_IRQ(cpuid);
00952 } else if (lnxtsk_uses_fpu(prev)) {
00953 restore_fpu(prev);
00954 }
00955 }
00956 }
00957 sched_exit:
00958 CALL_TIMER_HANDLER();
00959 #if CONFIG_RTAI_BUSY_TIME_ALIGN
00960 if (rt_current->busy_time_align) {
00961 rt_current->busy_time_align = 0;
00962 while(rdtsc() < rt_current->resume_time);
00963 }
00964 #endif
00965 }
00966 #endif
00967
00968 void rt_schedule(void)
00969 {
00970 RT_TASK *rt_current, *task, *new_task;
00971 int cpuid;
00972
00973 rt_current = rt_smp_current[cpuid = rtai_cpuid()];
00974
00975 RR_YIELD();
00976 if (oneshot_running) {
00977 int prio, fire_shot;
00978
00979 rt_time_h = rdtsc() + rt_half_tick;
00980 wake_up_timed_tasks(cpuid);
00981 TASK_TO_SCHEDULE();
00982
00983 SET_NEXT_TIMER_SHOT(fire_shot);
00984 sched_release_global_lock(cpuid);
00985 IF_GOING_TO_LINUX_CHECK_TIMER_SHOT(fire_shot);
00986 FIRE_NEXT_TIMER_SHOT(timer_shot_fired);
00987 } else {
00988 TASK_TO_SCHEDULE();
00989 sched_release_global_lock(cpuid);
00990 }
00991
00992 if (new_task != rt_current) {
00993 if (rt_scheduling[cpuid].locked) {
00994 rt_scheduling[cpuid].rqsted = 1;
00995 goto sched_exit;
00996 }
00997 if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
00998 if (!(new_task = switch_rtai_tasks(rt_current, new_task, cpuid))) {
00999 goto sched_exit;
01000 }
01001 }
01002 rt_smp_current[cpuid] = new_task;
01003 if (new_task->is_hard > 0 || rt_current->is_hard > 0) {
01004 struct task_struct *prev;
01005 unsigned long sflags;
01006 if (rt_current->is_hard <= 0) {
01007 SAVE_LOCK_LINUX(cpuid);
01008 rt_linux_task.lnxtsk = prev = current;
01009 RST_EXEC_TIME();
01010 } else {
01011 sflags = rtai_linux_context[cpuid].sflags;
01012 prev = rt_current->lnxtsk;
01013 SET_EXEC_TIME();
01014 }
01015 lxrt_context_switch(prev, new_task->lnxtsk, cpuid);
01016 if (rt_current->is_hard <= 0) {
01017 RESTORE_UNLOCK_LINUX(cpuid);
01018 if (rt_current->state != RT_SCHED_READY) {
01019 goto sched_soft;
01020 }
01021 } else {
01022 if (lnxtsk_uses_fpu(prev)) {
01023 restore_fpu(prev);
01024 }
01025 if (rt_current->force_soft) {
01026 force_current_soft(rt_current, cpuid);
01027 }
01028 }
01029 } else {
01030 sched_soft:
01031 CALL_TIMER_HANDLER();
01032 UNLOCK_LINUX(cpuid);
01033 rtai_sti();
01034
01035 #ifdef CONFIG_RTAI_ALIGN_LINUX_PRIORITY
01036 if (current->rtai_tskext(TSKEXT0) && (current->policy == SCHED_FIFO || current->policy == SCHED_RR)) {
01037 int rt_priority;
01038 if ((rt_priority = ((RT_TASK *)current->rtai_tskext(TSKEXT0))->priority) >= BASE_SOFT_PRIORITY) {
01039 rt_priority -= BASE_SOFT_PRIORITY;
01040 }
01041 if ((rt_priority = (MAX_LINUX_RTPRIO - rt_priority)) < 1) {
01042 rt_priority = 1;
01043 }
01044 if (rt_priority != current->rt_priority) {
01045 rtai_set_linux_task_priority(current, current->policy, rt_priority);
01046 }
01047 }
01048 #endif
01049
01050 hal_test_and_fast_flush_pipeline(cpuid);
01051 NON_RTAI_SCHEDULE(cpuid);
01052 rt_global_cli();
01053 rt_current->state = (rt_current->state & ~RT_SCHED_SFTRDY) | RT_SCHED_READY;
01054 LOCK_LINUX(cpuid);
01055 enq_soft_ready_task(rt_current);
01056 rt_smp_current[cpuid] = rt_current;
01057 return;
01058 }
01059 }
01060 sched_exit:
01061 CALL_TIMER_HANDLER();
01062 #if CONFIG_RTAI_BUSY_TIME_ALIGN
01063 if (rt_current->busy_time_align) {
01064 rt_current->busy_time_align = 0;
01065 while(rdtsc() < rt_current->resume_time);
01066 }
01067 #endif
01068 sched_get_global_lock(cpuid);
01069 }
01070
01071 RTAI_SYSCALL_MODE void rt_spv_RMS(int cpuid)
01072 {
01073 RT_TASK *task;
01074 int prio;
01075 if (cpuid < 0 || cpuid >= num_online_cpus()) {
01076 cpuid = rtai_cpuid();
01077 }
01078 prio = 0;
01079 task = &rt_linux_task;
01080 while ((task = task->next)) {
01081 RT_TASK *task, *htask;
01082 RTIME period;
01083 htask = 0;
01084 task = &rt_linux_task;
01085 period = RT_TIME_END;
01086 while ((task = task->next)) {
01087 if (task->priority >= 0 && task->policy >= 0 && task->period && task->period < period) {
01088 period = (htask = task)->period;
01089 }
01090 }
01091 if (htask) {
01092 htask->priority = -1;
01093 htask->base_priority = prio++;
01094 } else {
01095 goto ret;
01096 }
01097 }
01098 ret: task = &rt_linux_task;
01099 while ((task = task->next)) {
01100 if (task->priority < 0) {
01101 task->priority = task->base_priority;
01102 }
01103 }
01104 return;
01105 }
01106
01107
01108 void rt_sched_lock(void)
01109 {
01110 unsigned long flags;
01111 int cpuid;
01112
01113 rtai_save_flags_and_cli(flags);
01114 if (!rt_scheduling[cpuid = rtai_cpuid()].locked++) {
01115 rt_scheduling[cpuid].rqsted = 0;
01116 }
01117 rtai_restore_flags(flags);
01118 }
01119
01120 #define SCHED_UNLOCK_SCHEDULE(cpuid) \
01121 do { \
01122 rt_scheduling[cpuid].rqsted = 0; \
01123 sched_get_global_lock(cpuid); \
01124 rt_schedule(); \
01125 sched_release_global_lock(cpuid); \
01126 } while (0)
01127
01128
01129 void rt_sched_unlock(void)
01130 {
01131 unsigned long flags;
01132 int cpuid;
01133
01134 rtai_save_flags_and_cli(flags);
01135 if (rt_scheduling[cpuid = rtai_cpuid()].locked && !(--rt_scheduling[cpuid].locked)) {
01136 if (rt_scheduling[cpuid].rqsted > 0) {
01137 SCHED_UNLOCK_SCHEDULE(cpuid);
01138 }
01139 } else {
01140
01141 }
01142 rtai_restore_flags(flags);
01143 }
01144
01145
01146 void rtai_handle_isched_lock (int cpuid)
01147 {
01148 SCHED_UNLOCK_SCHEDULE(cpuid);
01149 }
01150
01151
01152 void *rt_get_lxrt_fun_entry(int index);
01153 static inline void sched_sem_signal(SEM *sem)
01154 {
01155 ((RTAI_SYSCALL_MODE void (*)(SEM *, ...))rt_get_lxrt_fun_entry(SEM_SIGNAL))(sem);
01156 }
01157
01158 int clr_rtext(RT_TASK *task)
01159 {
01160 DECLARE_RT_CURRENT;
01161 unsigned long flags;
01162 QUEUE *q;
01163
01164 if (task->magic != RT_TASK_MAGIC || task->priority == RT_SCHED_LINUX_PRIORITY) {
01165 return -EINVAL;
01166 }
01167
01168 flags = rt_global_save_flags_and_cli();
01169 ASSIGN_RT_CURRENT;
01170 if (!task_owns_sems(task) || task == rt_current || rt_current->priority == RT_SCHED_LINUX_PRIORITY) {
01171 call_exit_handlers(task);
01172 rem_timed_task(task);
01173 if (task->blocked_on) {
01174 if (task->state & (RT_SCHED_SEMAPHORE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN)) {
01175 (task->queue.prev)->next = task->queue.next;
01176 (task->queue.next)->prev = task->queue.prev;
01177 if (task->state & RT_SCHED_SEMAPHORE) {
01178 SEM *sem = (SEM *)(task->blocked_on);
01179 if (++sem->count > 1 && sem->type) {
01180 sem->count = 1;
01181 }
01182 }
01183 } else if (task->state & RT_SCHED_MBXSUSP) {
01184 MBX *mbx = (MBX *)task->blocked_on;
01185 mbx->waiting_task = NULL;
01186 sched_sem_signal(!mbx->frbs ? &mbx->sndsem : &mbx->rcvsem);
01187 }
01188 }
01189 q = &(task->msg_queue);
01190 while ((q = q->next) != &(task->msg_queue)) {
01191 rem_timed_task(q->task);
01192 if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
01193 enq_ready_task(q->task);
01194 }
01195 (q->task)->blocked_on = RTP_OBJREM;
01196 }
01197 q = &(task->ret_queue);
01198 while ((q = q->next) != &(task->ret_queue)) {
01199 rem_timed_task(q->task);
01200 if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_RETURN | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
01201 enq_ready_task(q->task);
01202 }
01203 (q->task)->blocked_on = RTP_OBJREM;
01204 }
01205 if (!((task->prev)->next = task->next)) {
01206 rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev;
01207 } else {
01208 (task->next)->prev = task->prev;
01209 }
01210 if (rt_smp_fpu_task[task->runnable_on_cpus] == task) {
01211 rt_smp_fpu_task[task->runnable_on_cpus] = rt_smp_linux_task + task->runnable_on_cpus;;
01212 }
01213 if (!task->lnxtsk) {
01214 frstk_srq.mp[frstk_srq.in++ & (MAX_FRESTK_SRQ - 1)] = task->stack_bottom;
01215 rt_pend_linux_srq(frstk_srq.srq);
01216 }
01217 task->magic = 0;
01218 rem_ready_task(task);
01219 task->state = 0;
01220 atomic_dec((void *)(tasks_per_cpu + task->runnable_on_cpus));
01221 if (task == rt_current) {
01222 rt_schedule();
01223 }
01224 } else {
01225 task->suspdepth = -0x7FFFFFFF;
01226 }
01227 rt_global_restore_flags(flags);
01228 return 0;
01229 }
01230
01231
01232 int rt_task_delete(RT_TASK *task)
01233 {
01234 if (!clr_rtext(task)) {
01235 if (task->lnxtsk) {
01236 start_stop_kthread(task, 0, 0, 0, 0, 0, 0);
01237 }
01238 }
01239 return 0;
01240 }
01241
01242
01243 int rt_get_timer_cpu(void)
01244 {
01245 return 1;
01246 }
01247
01248
01249 static void rt_timer_handler(void)
01250 {
01251 RT_TASK *rt_current, *task, *new_task;
01252 int cpuid;
01253
01254 DO_TIMER_PROPER_OP();
01255 rt_current = rt_smp_current[cpuid = rtai_cpuid()];
01256
01257 redo_timer_handler:
01258
01259 rt_times.tick_time = oneshot_timer ? rdtsc() : rt_times.intr_time;
01260 rt_time_h = rt_times.tick_time + rt_half_tick;
01261 SET_PEND_LINUX_TIMER_SHOT();
01262
01263 sched_get_global_lock(cpuid);
01264 RR_YIELD();
01265 wake_up_timed_tasks(cpuid);
01266 TASK_TO_SCHEDULE();
01267
01268 if (oneshot_timer) {
01269 int prio, fire_shot;
01270
01271 timer_shot_fired = 0;
01272 rt_times.intr_time = RT_TIME_END;
01273
01274 SET_NEXT_TIMER_SHOT(fire_shot);
01275 sched_release_global_lock(cpuid);
01276 IF_GOING_TO_LINUX_CHECK_TIMER_SHOT(fire_shot);
01277 FIRE_NEXT_TIMER_SHOT(0);
01278 } else {
01279 sched_release_global_lock(cpuid);
01280 rt_times.intr_time += rt_times.periodic_tick;
01281 rt_set_timer_delay(0);
01282 }
01283
01284 if (new_task != rt_current) {
01285 if (rt_scheduling[cpuid].locked) {
01286 rt_scheduling[cpuid].rqsted = 1;
01287 goto sched_exit;
01288 }
01289 if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
01290 if (!(new_task = switch_rtai_tasks(rt_current, new_task, cpuid))) {
01291 goto sched_exit;
01292 }
01293 }
01294 if (new_task->is_hard > 0 || rt_current->is_hard > 0) {
01295 struct task_struct *prev;
01296 unsigned long sflags;
01297 if (rt_current->is_hard <= 0) {
01298 SAVE_LOCK_LINUX_IN_IRQ(cpuid);
01299 rt_linux_task.lnxtsk = prev = current;
01300 RST_EXEC_TIME();
01301 } else {
01302 sflags = rtai_linux_context[cpuid].sflags;
01303 prev = rt_current->lnxtsk;
01304 SET_EXEC_TIME();
01305 }
01306 rt_smp_current[cpuid] = new_task;
01307 lxrt_context_switch(prev, new_task->lnxtsk, cpuid);
01308 if (rt_current->is_hard <= 0) {
01309 RESTORE_UNLOCK_LINUX_IN_IRQ(cpuid);
01310 } else if (lnxtsk_uses_fpu(prev)) {
01311 restore_fpu(prev);
01312 }
01313 }
01314 }
01315 sched_exit:
01316 REDO_TIMER_HANDLER();
01317 return;
01318 goto redo_timer_handler;
01319 }
01320
01321
01322 #if defined(USE_LINUX_TIMER) && !defined(CONFIG_GENERIC_CLOCKEVENTS)
01323
01324 static irqreturn_t recover_jiffies(int irq, void *dev_id, struct pt_regs *regs)
01325 {
01326 rt_global_cli();
01327 if (linux_times->tick_time >= linux_times->linux_time) {
01328 linux_times->linux_time += linux_times->linux_tick;
01329 update_linux_timer(rtai_cpuid());
01330 }
01331 rt_global_sti();
01332 return RTAI_LINUX_IRQ_HANDLED;
01333 }
01334
01335 #define REQUEST_RECOVER_JIFFIES() rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies)
01336
01337 #define RELEASE_RECOVER_JIFFIES(timer) rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies)
01338
01339 #else
01340
01341 #define REQUEST_RECOVER_JIFFIES()
01342
01343 #define RELEASE_RECOVER_JIFFIES()
01344
01345 #endif
01346
01347
01348 int rt_is_hard_timer_running(void)
01349 {
01350 return rt_sched_timed;
01351 }
01352
01353
01354 void rt_set_periodic_mode(void)
01355 {
01356 int cpuid;
01357 stop_rt_timer();
01358 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01359 oneshot_timer = oneshot_running = 0;
01360 }
01361 }
01362
01363
01364 void rt_set_oneshot_mode(void)
01365 {
01366 int cpuid;
01367 stop_rt_timer();
01368 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01369 oneshot_timer = 1;
01370 }
01371 }
01372
01373
01374 #if defined(CONFIG_RTAI_RTC_FREQ) && CONFIG_RTAI_RTC_FREQ >= 2
01375
01376 #ifdef CONFIG_SMP
01377
01378 RTAI_SYSCALL_MODE RTIME start_rt_timer(int period)
01379 {
01380 int cpuid;
01381 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01382 oneshot_timer = oneshot_running = 0;
01383 rt_smp_times[cpuid].linux_tick = 0;
01384 rt_smp_times[cpuid].tick_time = 0;
01385 rt_smp_times[cpuid].intr_time = 0;
01386 rt_smp_times[cpuid].linux_time = 0;
01387 rt_smp_times[cpuid].periodic_tick = 1;
01388 tuned.timers_tol[cpuid] = rt_half_tick = 0;
01389 rt_time_h = 0;
01390 }
01391 linux_times = rt_smp_times;
01392 rt_request_irq(RTAI_APIC_TIMER_IPI, (void *)rt_timer_handler, NULL, 0);
01393 rt_request_rtc(CONFIG_RTAI_RTC_FREQ, NULL);
01394 rt_sched_timed = 1;
01395 rt_gettimeorig(NULL);
01396 return 1LL;
01397 }
01398
01399
01400 void stop_rt_timer(void)
01401 {
01402 if (rt_sched_timed) {
01403 int cpuid;
01404 rt_sched_timed = 0;
01405 rt_release_rtc();
01406 rt_release_irq(RTAI_APIC_TIMER_IPI);
01407 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01408 rt_time_h = RT_TIME_END;
01409 oneshot_running = 0;
01410 }
01411 }
01412 }
01413
01414 #else
01415
01416 RTAI_SYSCALL_MODE RTIME start_rt_timer(int period)
01417 {
01418 int const cpuid = 0;
01419 oneshot_timer = oneshot_running = 0;
01420 rt_smp_times[cpuid].linux_tick = 0;
01421 rt_smp_times[cpuid].tick_time = 0;
01422 rt_smp_times[cpuid].intr_time = 0;
01423 rt_smp_times[cpuid].linux_time = 0;
01424 rt_smp_times[cpuid].periodic_tick = 1;
01425 tuned.timers_tol[0] = rt_half_tick = 0;
01426 rt_time_h = 0;
01427 linux_times = rt_smp_times;
01428 rt_request_rtc(CONFIG_RTAI_RTC_FREQ, (void *)rt_timer_handler);
01429 rt_sched_timed = 1;
01430 rt_gettimeorig(NULL);
01431 return 1LL;
01432 }
01433
01434 void stop_rt_timer(void)
01435 {
01436 if (rt_sched_timed) {
01437 rt_sched_timed = 0;
01438 rt_release_rtc();
01439 rt_time_h = RT_TIME_END;
01440 rt_smp_oneshot_timer[0] = 0;
01441 }
01442 }
01443
01444 #endif
01445
01446 RTAI_SYSCALL_MODE void start_rt_apic_timers(struct apic_timer_setup_data *setup_data, unsigned int rcvr_jiffies_cpuid)
01447 {
01448 start_rt_timer(0);
01449 }
01450
01451 #else
01452
01453 #ifdef CONFIG_GENERIC_CLOCKEVENTS
01454
01455 #include <linux/clockchips.h>
01456 #include <linux/ipipe_tickdev.h>
01457
01458 extern void *rt_linux_hrt_set_mode;
01459 extern void *rt_linux_hrt_next_shot;
01460
01461 static void _rt_linux_hrt_set_mode(enum clock_event_mode mode, struct ipipe_tick_device *hrt_dev)
01462 {
01463 int cpuid = rtai_cpuid();
01464
01465 if (mode == CLOCK_EVT_MODE_ONESHOT || mode == CLOCK_EVT_MODE_SHUTDOWN) {
01466 rt_times.linux_tick = 0;
01467 } else if (mode == CLOCK_EVT_MODE_PERIODIC) {
01468 rt_times.linux_tick = nano2count_cpuid((1000000000 + HZ/2)/HZ, cpuid);
01469 }
01470 }
01471
01472 static int _rt_linux_hrt_next_shot(unsigned long deltat, struct ipipe_tick_device *hrt_dev)
01473 {
01474 int cpuid = rtai_cpuid();
01475 RTIME linux_time;
01476
01477 deltat = nano2count_cpuid(deltat, cpuid);
01478 linux_time = rt_get_time_cpuid(cpuid) + deltat;
01479 deltat = deltat > (tuned.setup_time_TIMER_CPUNIT + tuned.latency) ? imuldiv(deltat - tuned.latency, TIMER_FREQ, tuned.cpu_freq) : 0;
01480
01481 rtai_cli();
01482 rt_times.linux_time = linux_time;
01483 if (oneshot_running) {
01484 if (linux_time < rt_times.intr_time) {
01485 if (deltat > 0) {
01486 rt_times.intr_time = linux_time;
01487 rt_set_timer_delay(deltat);
01488 timer_shot_fired = 1;
01489 } else {
01490 rt_times.linux_time = RT_TIME_END;
01491 update_linux_timer(cpuid);
01492 }
01493 }
01494 }
01495 rtai_sti();
01496 return 0;
01497 }
01498
01499 #endif
01500
01501 #ifdef CONFIG_SMP
01502
01503 RTAI_SYSCALL_MODE void start_rt_apic_timers(struct apic_timer_setup_data *setup_data, unsigned int rcvr_jiffies_cpuid)
01504 {
01505 unsigned long flags, cpuid;
01506
01507 rt_request_apic_timers(rt_timer_handler, setup_data);
01508 flags = rt_global_save_flags_and_cli();
01509 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01510 if (setup_data[cpuid].mode > 0) {
01511 oneshot_timer = oneshot_running = 0;
01512 tuned.timers_tol[cpuid] = rt_half_tick = (rt_times.periodic_tick + 1)>>1;
01513 } else {
01514 oneshot_timer = oneshot_running = 1;
01515 tuned.timers_tol[cpuid] = rt_half_tick = (tuned.latency + 1)>>1;
01516 }
01517 rt_time_h = rt_times.tick_time + rt_half_tick;
01518 timer_shot_fired = 1;
01519 }
01520 rt_sched_timed = 1;
01521 linux_times = rt_smp_times + (rcvr_jiffies_cpuid < NR_RT_CPUS ? rcvr_jiffies_cpuid : 0);
01522 rt_global_restore_flags(flags);
01523 }
01524
01525
01526 RTAI_SYSCALL_MODE RTIME start_rt_timer(int period)
01527 {
01528 int cpuid;
01529 struct apic_timer_setup_data setup_data[NR_RT_CPUS];
01530 if (period <= 0) {
01531 period = 0;
01532 rt_set_oneshot_mode();
01533 }
01534 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01535 setup_data[cpuid].mode = oneshot_timer ? 0 : 1;
01536 setup_data[cpuid].count = count2nano(period);
01537 }
01538 start_rt_apic_timers(setup_data, rtai_cpuid());
01539 rt_gettimeorig(NULL);
01540 return setup_data[0].mode ? setup_data[0].count : period;
01541 }
01542
01543
01544 void stop_rt_timer(void)
01545 {
01546 if (rt_sched_timed) {
01547 int cpuid;
01548 rt_sched_timed = 0;
01549 rt_free_apic_timers();
01550 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01551 rt_time_h = RT_TIME_END;
01552 oneshot_running = 0;
01553 }
01554 }
01555 }
01556
01557 #else
01558
01559 #ifndef TIMER_TYPE
01560 #define TIMER_TYPE 1
01561 #endif
01562
01563 RTAI_SYSCALL_MODE RTIME start_rt_timer(int period)
01564 {
01565 #define cpuid 0
01566 #undef rt_times
01567
01568 unsigned long flags;
01569 if (period <= 0) {
01570 period = 0;
01571 rt_set_oneshot_mode();
01572 }
01573 flags = rt_global_save_flags_and_cli();
01574 if (oneshot_timer) {
01575 rt_request_timer(rt_timer_handler, 0, TIMER_TYPE);
01576 tuned.timers_tol[0] = rt_half_tick = (tuned.latency + 1)>>1;
01577 oneshot_running = timer_shot_fired = 1;
01578 } else {
01579 rt_request_timer(rt_timer_handler, !TIMER_TYPE && period > LATCH ? LATCH: period, TIMER_TYPE);
01580 tuned.timers_tol[0] = rt_half_tick = (rt_times.periodic_tick + 1)>>1;
01581 }
01582 rt_sched_timed = 1;
01583 rt_smp_times[cpuid].linux_tick = rt_times.linux_tick;
01584 rt_smp_times[cpuid].tick_time = rt_times.tick_time;
01585 rt_smp_times[cpuid].intr_time = rt_times.intr_time;
01586 rt_smp_times[cpuid].linux_time = rt_times.linux_time;
01587 rt_smp_times[cpuid].periodic_tick = rt_times.periodic_tick;
01588 rt_time_h = rt_times.tick_time + rt_half_tick;
01589 linux_times = rt_smp_times;
01590 rt_global_restore_flags(flags);
01591 REQUEST_RECOVER_JIFFIES();
01592 rt_gettimeorig(NULL);
01593 return period;
01594
01595 #undef cpuid
01596 #define rt_times (rt_smp_times[cpuid])
01597 }
01598
01599
01600 RTAI_SYSCALL_MODE void start_rt_apic_timers(struct apic_timer_setup_data *setup_mode, unsigned int rcvr_jiffies_cpuid)
01601 {
01602 int cpuid, period;
01603
01604 period = 0;
01605 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01606 period += setup_mode[cpuid].mode;
01607 }
01608 if (period == NR_RT_CPUS) {
01609 period = 2000000000;
01610 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01611 if (setup_mode[cpuid].count < period) {
01612 period = setup_mode[cpuid].count;
01613 }
01614 }
01615 start_rt_timer(nano2count(period));
01616 } else {
01617 rt_set_oneshot_mode();
01618 start_rt_timer(0);
01619 }
01620 }
01621
01622
01623 void stop_rt_timer(void)
01624 {
01625 if (rt_sched_timed) {
01626 rt_sched_timed = 0;
01627 RELEASE_RECOVER_JIFFIES();
01628 rt_free_timer();
01629 rt_time_h = RT_TIME_END;
01630 rt_smp_oneshot_timer[0] = 0;
01631 }
01632 }
01633
01634 #endif
01635
01636 #endif
01637
01638 int rt_sched_type(void)
01639 {
01640 return RT_SCHED_MUP;
01641 }
01642
01643
01644 RTAI_SYSCALL_MODE int rt_hard_timer_tick_count(void)
01645 {
01646 int cpuid = rtai_cpuid();
01647 if (rt_sched_timed) {
01648 return oneshot_timer ? 0 : rt_smp_times[cpuid].periodic_tick;
01649 }
01650 return -1;
01651 }
01652
01653
01654 RTAI_SYSCALL_MODE int rt_hard_timer_tick_count_cpuid(int cpuid)
01655 {
01656 if (rt_sched_timed) {
01657 return oneshot_timer ? 0 : rt_smp_times[cpuid].periodic_tick;
01658 }
01659 return -1;
01660 }
01661
01662
01663 RT_TRAP_HANDLER rt_set_task_trap_handler( RT_TASK *task, unsigned int vec, RT_TRAP_HANDLER handler)
01664 {
01665 RT_TRAP_HANDLER old_handler;
01666
01667 if (!task || (vec >= RTAI_NR_TRAPS)) {
01668 return (RT_TRAP_HANDLER) -EINVAL;
01669 }
01670 old_handler = task->task_trap_handler[vec];
01671 task->task_trap_handler[vec] = handler;
01672 return old_handler;
01673 }
01674
01675 static int OneShot = CONFIG_RTAI_ONE_SHOT;
01676 RTAI_MODULE_PARM(OneShot, int);
01677
01678 static int Latency = TIMER_LATENCY;
01679 RTAI_MODULE_PARM(Latency, int);
01680
01681 static int SetupTimeTIMER = TIMER_SETUP_TIME;
01682 RTAI_MODULE_PARM(SetupTimeTIMER, int);
01683
01684 extern void krtai_objects_release(void);
01685
01686 static void frstk_srq_handler(void)
01687 {
01688 while (frstk_srq.out != frstk_srq.in) {
01689 rt_kstack_free(frstk_srq.mp[frstk_srq.out++ & (MAX_FRESTK_SRQ - 1)]);
01690 }
01691 }
01692
01693 static void nihil(void) { };
01694 struct rt_fun_entry rt_fun_lxrt[MAX_LXRT_FUN];
01695
01696 void reset_rt_fun_entries(struct rt_native_fun_entry *entry)
01697 {
01698 while (entry->fun.fun) {
01699 if (entry->index >= MAX_LXRT_FUN) {
01700 rt_printk("*** RESET ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWD TABLE SIZE %d, NOT USED ***\n", entry->index, MAX_LXRT_FUN);
01701 } else {
01702 rt_fun_lxrt[entry->index] = (struct rt_fun_entry){ 1, nihil };
01703 }
01704 entry++;
01705 }
01706 }
01707
01708 int set_rt_fun_entries(struct rt_native_fun_entry *entry)
01709 {
01710 int error;
01711 error = 0;
01712 while (entry->fun.fun) {
01713 if (rt_fun_lxrt[entry->index].fun != nihil) {
01714 rt_printk("*** SUSPICIOUS ENTRY ASSIGNEMENT FOR USER SPACE CALL AT %d, DUPLICATED INDEX OR REPEATED INITIALIZATION ***\n", entry->index);
01715 error = -1;
01716 } else if (entry->index >= MAX_LXRT_FUN) {
01717 rt_printk("*** ASSIGNEMENT ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWED TABLE SIZE %d, NOT USED ***\n", entry->index, MAX_LXRT_FUN);
01718 error = -1;
01719 } else {
01720 rt_fun_lxrt[entry->index] = entry->fun;
01721 }
01722 entry++;
01723 }
01724 if (error) {
01725 reset_rt_fun_entries(entry);
01726 }
01727 return 0;
01728 }
01729
01730 void *rt_get_lxrt_fun_entry(int index) {
01731 return rt_fun_lxrt[index].fun;
01732 }
01733
01734 static void lxrt_killall (void)
01735 {
01736 int cpuid;
01737
01738 stop_rt_timer();
01739 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
01740 while (rt_linux_task.next) {
01741 rt_task_delete(rt_linux_task.next);
01742 }
01743 }
01744 }
01745
01746 static int lxrt_notify_reboot (struct notifier_block *nb, unsigned long event, void *p)
01747 {
01748 switch (event) {
01749 case SYS_DOWN:
01750 case SYS_HALT:
01751 case SYS_POWER_OFF:
01752
01753 printk("LXRT: REBOOT NOTIFIED -- KILLING TASKS\n");
01754 lxrt_killall();
01755 }
01756 return NOTIFY_DONE;
01757 }
01758
01759
01760
01761 RTAI_SYSCALL_MODE RTIME count2nano(RTIME counts)
01762 {
01763 int sign;
01764
01765 if (counts >= 0) {
01766 sign = 1;
01767 } else {
01768 sign = 0;
01769 counts = - counts;
01770 }
01771 counts = oneshot_timer_cpuid ?
01772 llimd(counts, 1000000000, tuned.cpu_freq):
01773 llimd(counts, 1000000000, TIMER_FREQ);
01774 return sign ? counts : - counts;
01775 }
01776
01777
01778 RTAI_SYSCALL_MODE RTIME nano2count(RTIME ns)
01779 {
01780 int sign;
01781
01782 if (ns >= 0) {
01783 sign = 1;
01784 } else {
01785 sign = 0;
01786 ns = - ns;
01787 }
01788 ns = oneshot_timer_cpuid ?
01789 llimd(ns, tuned.cpu_freq, 1000000000) :
01790 llimd(ns, TIMER_FREQ, 1000000000);
01791 return sign ? ns : - ns;
01792 }
01793
01794 RTAI_SYSCALL_MODE RTIME count2nano_cpuid(RTIME counts, unsigned int cpuid)
01795 {
01796 int sign;
01797
01798 if (counts >= 0) {
01799 sign = 1;
01800 } else {
01801 sign = 0;
01802 counts = - counts;
01803 }
01804 counts = oneshot_timer ?
01805 llimd(counts, 1000000000, tuned.cpu_freq):
01806 llimd(counts, 1000000000, TIMER_FREQ);
01807 return sign ? counts : - counts;
01808 }
01809
01810
01811 RTAI_SYSCALL_MODE RTIME nano2count_cpuid(RTIME ns, unsigned int cpuid)
01812 {
01813 int sign;
01814
01815 if (ns >= 0) {
01816 sign = 1;
01817 } else {
01818 sign = 0;
01819 ns = - ns;
01820 }
01821 ns = oneshot_timer ?
01822 llimd(ns, tuned.cpu_freq, 1000000000) :
01823 llimd(ns, TIMER_FREQ, 1000000000);
01824 return sign ? ns : - ns;
01825 }
01826
01827
01828
01829 RTIME rt_get_time(void)
01830 {
01831 int cpuid;
01832 return rt_smp_oneshot_timer[cpuid = rtai_cpuid()] ? rdtsc() : rt_smp_times[cpuid].tick_time;
01833 }
01834
01835 RTAI_SYSCALL_MODE RTIME rt_get_time_cpuid(unsigned int cpuid)
01836 {
01837 return oneshot_timer ? rdtsc(): rt_times.tick_time;
01838 }
01839
01840 RTIME rt_get_time_ns(void)
01841 {
01842 int cpuid = rtai_cpuid();
01843 return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) :
01844 llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);
01845 }
01846
01847 RTAI_SYSCALL_MODE RTIME rt_get_time_ns_cpuid(unsigned int cpuid)
01848 {
01849 return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) :
01850 llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);
01851 }
01852
01853 RTIME rt_get_cpu_time_ns(void)
01854 {
01855 return llimd(rdtsc(), 1000000000, tuned.cpu_freq);
01856 }
01857
01858 extern struct epoch_struct boot_epoch;
01859
01860 RTIME rt_get_real_time(void)
01861 {
01862 return boot_epoch.time[boot_epoch.touse][0] + rtai_rdtsc();
01863 }
01864
01865 RTIME rt_get_real_time_ns(void)
01866 {
01867 return boot_epoch.time[boot_epoch.touse][1] + llimd(rtai_rdtsc(), 1000000000, tuned.cpu_freq);
01868 }
01869
01870
01871
01872 RT_TASK *rt_get_base_linux_task(RT_TASK **base_linux_tasks)
01873 {
01874 int cpuid;
01875 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
01876 base_linux_tasks[cpuid] = rt_smp_linux_task + cpuid;
01877 }
01878 return rt_smp_linux_task;
01879 }
01880
01881 RT_TASK *rt_alloc_dynamic_task(void)
01882 {
01883 #ifdef CONFIG_RTAI_MALLOC
01884 return rt_malloc(sizeof(RT_TASK));
01885 #else
01886 return NULL;
01887 #endif
01888 }
01889
01890
01891
01892 RT_TASK **rt_register_watchdog(RT_TASK *wd, int cpuid)
01893 {
01894 RT_TASK *task;
01895
01896 if (lxrt_wdog_task[cpuid]) return (RT_TASK**) -EBUSY;
01897 task = &rt_linux_task;
01898 while ((task = task->next)) {
01899 if (task != wd && task->priority == RT_SCHED_HIGHEST_PRIORITY) {
01900 return (RT_TASK**) -EBUSY;
01901 }
01902 }
01903 lxrt_wdog_task[cpuid] = wd;
01904 return (RT_TASK**) 0;
01905 }
01906
01907 void rt_deregister_watchdog(RT_TASK *wd, int cpuid)
01908 {
01909 if (lxrt_wdog_task[cpuid] != wd) return;
01910 lxrt_wdog_task[cpuid] = NULL;
01911 }
01912
01913
01914
01915
01916 #ifdef ECHO_SYSW
01917 #define SYSW_DIAG_MSG(x) x
01918 #else
01919 #define SYSW_DIAG_MSG(x)
01920 #endif
01921
01922 static RT_TRAP_HANDLER lxrt_old_trap_handler;
01923
01924 static inline void _rt_schedule_soft_tail(RT_TASK *rt_task, int cpuid)
01925 {
01926 rt_global_cli();
01927 rt_task->state &= ~(RT_SCHED_READY | RT_SCHED_SFTRDY);
01928 (rt_task->rprev)->rnext = rt_task->rnext;
01929 (rt_task->rnext)->rprev = rt_task->rprev;
01930 rt_smp_current[cpuid] = &rt_linux_task;
01931 rt_schedule();
01932 UNLOCK_LINUX(cpuid);
01933 rt_global_sti();
01934
01935 #ifdef CONFIG_RTAI_ALIGN_LINUX_PRIORITY
01936 do {
01937 int rt_priority;
01938 struct task_struct *lnxtsk;
01939
01940 if ((lnxtsk = rt_task->lnxtsk)->policy == SCHED_FIFO || lnxtsk->policy == SCHED_RR) {
01941 if ((rt_priority = rt_task->priority) >= BASE_SOFT_PRIORITY) {
01942 rt_priority -= BASE_SOFT_PRIORITY;
01943 }
01944 if ((rt_priority = (MAX_LINUX_RTPRIO - rt_priority)) < 1) {
01945 rt_priority = 1;
01946 }
01947 if (rt_priority != lnxtsk->rt_priority) {
01948 rtai_set_linux_task_priority(lnxtsk, lnxtsk->policy, rt_priority);
01949 }
01950 }
01951 } while (0);
01952 #endif
01953 }
01954
01955 void rt_schedule_soft(RT_TASK *rt_task)
01956 {
01957 struct fun_args *funarg;
01958 int cpuid;
01959
01960 rt_global_cli();
01961 rt_task->state |= RT_SCHED_READY;
01962 while (rt_task->state != RT_SCHED_READY) {
01963 current->state = TASK_SOFTREALTIME;
01964 rt_global_sti();
01965 schedule();
01966 rt_global_cli();
01967 }
01968 cpuid = rt_task->runnable_on_cpus;
01969 LOCK_LINUX(cpuid);
01970 enq_soft_ready_task(rt_task);
01971 rt_smp_current[cpuid] = rt_task;
01972 rt_global_sti();
01973 funarg = (void *)rt_task->fun_args;
01974 rt_task->retval = funarg->fun(RTAI_FUNARGS);
01975 _rt_schedule_soft_tail(rt_task, cpuid);
01976 }
01977
01978 void rt_schedule_soft_tail(RT_TASK *rt_task, int cpuid)
01979 {
01980 _rt_schedule_soft_tail(rt_task, cpuid);
01981 }
01982
01983 static inline void fast_schedule(RT_TASK *new_task, struct task_struct *lnxtsk, int cpuid)
01984 {
01985 RT_TASK *rt_current;
01986 rt_global_cli();
01987 new_task->state |= RT_SCHED_READY;
01988 enq_soft_ready_task(new_task);
01989 sched_release_global_lock(cpuid);
01990 LOCK_LINUX(cpuid);
01991 (rt_current = &rt_linux_task)->lnxtsk = lnxtsk;
01992 SET_EXEC_TIME();
01993 rt_smp_current[cpuid] = new_task;
01994 lxrt_context_switch(lnxtsk, new_task->lnxtsk, cpuid);
01995 CALL_TIMER_HANDLER();
01996 UNLOCK_LINUX(cpuid);
01997 rtai_sti();
01998 }
01999
02000
02001 static RT_TASK thread_task[NR_RT_CPUS];
02002 static int rsvr_cnt[NR_RT_CPUS];
02003
02004 #if USE_RTAI_TASKS
02005 #define RESERVOIR 0
02006 #else
02007 #define RESERVOIR 11
02008 #endif
02009 static int Reservoir = RESERVOIR;
02010 RTAI_MODULE_PARM(Reservoir, int);
02011 static int SpareKthreads = 100;
02012 RTAI_MODULE_PARM(SpareKthreads, int);
02013
02014 static int taskidx[NR_RT_CPUS];
02015 static struct task_struct **taskav[NR_RT_CPUS];
02016
02017 static struct task_struct *__get_kthread(int cpuid)
02018 {
02019 unsigned long flags;
02020 struct task_struct *p;
02021
02022 flags = rt_global_save_flags_and_cli();
02023 if (taskidx[cpuid] > 0) {
02024 p = taskav[cpuid][--taskidx[cpuid]];
02025 rt_global_restore_flags(flags);
02026 return p;
02027 }
02028 rt_global_restore_flags(flags);
02029 return 0;
02030 }
02031
02032
02033
02034
02035
02036 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
02037
02038 void rt_daemonize(void)
02039 {
02040 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
02041 current->session = 1;
02042 current->pgrp = 1;
02043 current->tty = NULL;
02044 spin_lock_irq(¤t->sigmask_lock);
02045 sigfillset(¤t->blocked);
02046 recalc_sigpending(current);
02047 spin_unlock_irq(¤t->sigmask_lock);
02048 #else
02049 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19)
02050 (current->signal)->__session = 1;
02051 #else
02052 (current->signal)->session = 1;
02053 #endif
02054 (current->signal)->pgrp = 1;
02055 (current->signal)->tty = NULL;
02056 #endif
02057 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
02058 spin_lock_irq(¤t->sigmask_lock);
02059 sigfillset(¤t->blocked);
02060 recalc_sigpending(current);
02061 spin_unlock_irq(¤t->sigmask_lock);
02062 #else
02063 spin_lock_irq(&(current->sighand)->siglock);
02064 sigfillset(¤t->blocked);
02065 recalc_sigpending();
02066 spin_unlock_irq(&(current->sighand)->siglock);
02067 #endif
02068 }
02069
02070 EXPORT_SYMBOL(rt_daemonize);
02071
02072 #else
02073
02074 extern void rt_daemonize(void);
02075
02076 #endif
02077
02078 #define HARD_KTHREAD_IN_USE ((char)220)
02079
02080 static void kthread_fun(int cpuid)
02081 {
02082 void steal_from_linux(RT_TASK *);
02083 void give_back_to_linux(RT_TASK *, int);
02084 RT_TASK *task;
02085
02086 #ifdef OOM_DISABLE
02087 current->oomkilladj = OOM_DISABLE;
02088 #endif
02089 rt_daemonize();
02090 rtai_set_linux_task_priority(current, SCHED_FIFO, KTHREAD_F_PRIO);
02091 sprintf(current->comm, "F:HARD:%d:%d", cpuid, ++rsvr_cnt[cpuid]);
02092 current->rtai_tskext(TSKEXT0) = task = &thread_task[cpuid];
02093 current->rtai_tskext(TSKEXT1) = task->lnxtsk = current;
02094 put_current_on_cpu(cpuid);
02095 init_hard_fpu(current);
02096 task->msg_queue.next = &task->msg_queue;
02097 task->resq.next = &task->resq;
02098 #ifdef PF_EVNOTIFY
02099 current->flags |= PF_EVNOTIFY;
02100 #endif
02101 steal_from_linux(task);
02102 while(1) {
02103 rt_task_suspend(task);
02104 current->comm[0] = HARD_KTHREAD_IN_USE;
02105 if (!(task = current->rtai_tskext(TSKEXT0))->max_msg_size[0]) {
02106 break;
02107 }
02108 #if CONFIG_RTAI_MONITOR_EXECTIME
02109 task->exectime[1] = rdtsc();
02110 #endif
02111 ((void (*)(long))task->max_msg_size[0])(task->max_msg_size[1]);
02112 task->owndres = 0;
02113 current->comm[0] = 'F';
02114 current->rtai_tskext(TSKEXT1) = 0;
02115 rtai_cli();
02116 if (taskidx[cpuid] < SpareKthreads) {
02117 taskav[cpuid][taskidx[cpuid]++] = task->lnxtsk;
02118 }
02119 rtai_sti();
02120 }
02121 give_back_to_linux(task, 0);
02122 clr_rtext(task);
02123 }
02124
02125 #define WAKE_UP_TASKs(klist) \
02126 do { \
02127 struct klist_t *p = &klist[cpuid]; \
02128 struct task_struct *task; \
02129 while (p->out != p->in) { \
02130 task = p->task[p->out++ & (MAX_WAKEUP_SRQ - 1)]; \
02131 set_task_state(task, TASK_UNINTERRUPTIBLE); \
02132 wake_up_process(task); \
02133 } \
02134 } while (0)
02135
02136 #define WAKE_UP_THREADM(thread) \
02137 do { \
02138 set_task_state(thread, TASK_UNINTERRUPTIBLE); \
02139 wake_up_process(thread); \
02140 } while (0)
02141
02142 static void kthread_m(int cpuid)
02143 {
02144 struct task_struct *lnxtsk;
02145 struct klist_t *klistp;
02146 RT_TASK *task;
02147
02148 #ifdef OOM_DISABLE
02149 current->oomkilladj = OOM_DISABLE;
02150 #endif
02151 rt_daemonize();
02152 (task = &thread_task[cpuid])->magic = RT_TASK_MAGIC;
02153 task->runnable_on_cpus = cpuid;
02154 sprintf(current->comm, "RTAI_KTHRD_M:%d", cpuid);
02155 put_current_on_cpu(cpuid);
02156 kthreadm[cpuid] = current;
02157 klistp = &klistm[cpuid];
02158 rtai_set_linux_task_priority(current, SCHED_FIFO, KTHREAD_M_PRIO);
02159 up(&resem[cpuid]);
02160 while (!endkthread) {
02161 current->state = TASK_RTAISRVSLEEP;
02162 schedule();
02163 #if defined(CONFIG_SMP) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
02164 WAKE_UP_TASKs(wake_up_hts);
02165 #endif
02166 while (klistp->out != klistp->in) {
02167 unsigned long hard, flags;
02168 flags = rt_global_save_flags_and_cli();
02169 hard = (unsigned long)(lnxtsk = klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)]);
02170 if (hard > 1) {
02171 if (lnxtsk->rtai_tskext(TSKEXT2)) {
02172 if (lnxtsk->rtai_tskext(TSKEXT1) && taskidx[cpuid] < SpareKthreads) {;
02173 taskav[cpuid][taskidx[cpuid]++] = lnxtsk;
02174 lnxtsk->comm[0] = 'F';
02175 }
02176 kthread_fun_long_jump(lnxtsk);
02177 }
02178 } else {
02179 if (taskidx[cpuid] < Reservoir) {
02180 task->suspdepth = task->state = 0;
02181 rt_global_sti();
02182 kernel_thread((void *)kthread_fun, (void *)(long)cpuid, 0);
02183 while (task->state != (RT_SCHED_READY | RT_SCHED_SUSPENDED)) {
02184 current->state = TASK_INTERRUPTIBLE;
02185 schedule_timeout(2);
02186 }
02187 kthread_fun_set_jump(task->lnxtsk);
02188 rt_global_cli();
02189 taskav[cpuid][taskidx[cpuid]++] = (void *)task->lnxtsk;
02190 }
02191 if (hard) {
02192 rt_task_resume((void *)klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)]);
02193 } else {
02194 rt_global_sti();
02195 up(&resem[cpuid]);
02196 rt_global_cli();
02197 }
02198 }
02199 rt_global_restore_flags(flags);
02200 }
02201 }
02202 kthreadm[cpuid] = 0;
02203 }
02204
02205 void steal_from_linux(RT_TASK *rt_task)
02206 {
02207 struct klist_t *klistp;
02208 struct task_struct *lnxtsk;
02209
02210 if (signal_pending(rt_task->lnxtsk)) {
02211 rt_task->is_hard = -1;
02212 return;
02213 }
02214 klistp = &wake_up_sth[rt_task->runnable_on_cpus];
02215 rtai_cli();
02216 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = rt_task;
02217 if (rt_task->base_priority >= BASE_SOFT_PRIORITY) {
02218 rt_task->base_priority -= BASE_SOFT_PRIORITY;
02219 }
02220 if (rt_task->priority >= BASE_SOFT_PRIORITY) {
02221 rt_task->priority -= BASE_SOFT_PRIORITY;
02222 }
02223 rt_task->is_hard = 1;
02224 #if defined(TASK_ATOMICSWITCH) && TASK_ATOMICSWITCH && defined(CONFIG_PREEMPT)
02225 preempt_disable();
02226 (lnxtsk = rt_task->lnxtsk)->state = (TASK_HARDREALTIME | TASK_ATOMICSWITCH);
02227 rtai_sti();
02228 #else
02229 (lnxtsk = rt_task->lnxtsk)->state = TASK_HARDREALTIME;
02230 #endif
02231 do {
02232 schedule();
02233 } while (rt_task->state != RT_SCHED_READY);
02234 #if CONFIG_RTAI_MONITOR_EXECTIME
02235 if (!rt_task->exectime[1]) {
02236 rt_task->exectime[1] = rdtsc();
02237 }
02238 #endif
02239 if (lnxtsk_uses_fpu(lnxtsk)) {
02240 rtai_cli();
02241 restore_fpu(lnxtsk);
02242 rtai_sti();
02243 }
02244 }
02245
02246 void give_back_to_linux(RT_TASK *rt_task, int keeprio)
02247 {
02248 struct task_struct *lnxtsk;
02249 int rt_priority;
02250
02251 rt_global_cli();
02252 (rt_task->rprev)->rnext = rt_task->rnext;
02253 (rt_task->rnext)->rprev = rt_task->rprev;
02254 rt_task->state = 0;
02255 pend_wake_up_hts(lnxtsk = rt_task->lnxtsk, rt_task->runnable_on_cpus);
02256 #ifdef TASK_NOWAKEUP
02257 set_task_state(lnxtsk, lnxtsk->state & ~TASK_NOWAKEUP);
02258 #endif
02259 rt_schedule();
02260 if (!(rt_task->is_hard = keeprio)) {
02261 if (rt_task->priority < BASE_SOFT_PRIORITY) {
02262 rt_priority = rt_task->priority;
02263 if (rt_task->priority == rt_task->base_priority) {
02264 rt_task->priority += BASE_SOFT_PRIORITY;
02265 }
02266 } else {
02267 rt_priority = rt_task->priority - BASE_SOFT_PRIORITY;
02268 }
02269 if (rt_task->base_priority < BASE_SOFT_PRIORITY) {
02270 rt_task->base_priority += BASE_SOFT_PRIORITY;
02271 }
02272 } else {
02273 if (rt_task->priority < BASE_SOFT_PRIORITY) {
02274 rt_priority = rt_task->priority;
02275 } else {
02276 rt_priority = rt_task->priority - BASE_SOFT_PRIORITY;
02277 }
02278 }
02279 rt_global_sti();
02280
02281
02282 hal_schedule_back_root(lnxtsk);
02283
02284 #ifdef CONFIG_RTAI_ALIGN_LINUX_PRIORITY
02285 if (lnxtsk->policy == SCHED_FIFO || lnxtsk->policy == SCHED_RR) {
02286 if ((rt_priority = (MAX_LINUX_RTPRIO - rt_priority)) < 1) {
02287 rt_priority = 1;
02288 }
02289 if (rt_priority != lnxtsk->rt_priority) {
02290 rtai_set_linux_task_priority(lnxtsk, lnxtsk->policy, rt_priority);
02291 }
02292 }
02293 #endif
02294
02295 return;
02296 }
02297
02298 static struct task_struct *get_kthread(int get, int cpuid, void *lnxtsk)
02299 {
02300 struct task_struct *kthread;
02301 struct klist_t *klistp;
02302 RT_TASK *this_task;
02303 int hard;
02304
02305 klistp = &klistm[cpuid];
02306 if (get) {
02307 while (!(kthread = __get_kthread(cpuid))) {
02308 this_task = rt_smp_current[cpuid];
02309 rt_global_cli();
02310 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = (void *)(long)(hard = this_task->is_hard > 0 ? 1 : 0);
02311 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = (void *)this_task;
02312 pend_wake_up_srq(kthreadm[cpuid], cpuid);
02313 rt_global_sti();
02314 if (hard) {
02315 rt_task_suspend(this_task);
02316 } else {
02317 down(&resem[cpuid]);
02318 }
02319 }
02320 rt_global_cli();
02321 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = 0;
02322 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = 0;
02323 } else {
02324 kthread = 0;
02325 rt_global_cli();
02326 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk;
02327 }
02328 pend_wake_up_srq(kthreadm[cpuid], cpuid);
02329 rt_global_sti();
02330 return kthread;
02331 }
02332
02333 static void start_stop_kthread(RT_TASK *task, void (*rt_thread)(long), long data, int priority, int uses_fpu, void(*signal)(void), int runnable_on_cpus)
02334 {
02335 if (num_online_cpus() == 1) {
02336 runnable_on_cpus = 0;
02337 }
02338 if (rt_thread) {
02339 task->retval = set_rtext(task, priority, uses_fpu, signal, runnable_on_cpus, get_kthread(1, runnable_on_cpus, 0));
02340 task->max_msg_size[0] = (long)rt_thread;
02341 task->max_msg_size[1] = data;
02342 } else {
02343 get_kthread(0, task->runnable_on_cpus, task->lnxtsk);
02344 }
02345 }
02346
02347 static void wake_up_srq_handler(unsigned srq)
02348 {
02349 #ifdef CONFIG_SMP
02350 int cpuid = srq - wake_up_srq[0].srq;
02351 #else
02352 int cpuid = 0;
02353 #endif
02354 #if !defined(CONFIG_SMP) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
02355 WAKE_UP_TASKs(wake_up_hts);
02356 #else
02357 WAKE_UP_THREADM(kthreadm[cpuid]);
02358 #endif
02359 WAKE_UP_TASKs(wake_up_srq);
02360 set_need_resched();
02361 }
02362
02363 static unsigned long traptrans, systrans;
02364
02365 static int lxrt_handle_trap(int vec, int signo, struct pt_regs *regs, void *dummy_data)
02366 {
02367 RT_TASK *rt_task;
02368
02369 rt_task = rt_smp_current[rtai_cpuid()];
02370 if ((USE_RTAI_TASKS && !rt_task->lnxtsk) || (rt_task->lnxtsk)->comm[0] == HARD_KTHREAD_IN_USE) {
02371 if (rt_task->task_trap_handler[vec]) {
02372 return rt_task->task_trap_handler[vec](vec, signo, regs, rt_task);
02373 }
02374 rt_printk("Default Trap Handler: vector %d: Suspend RT task %p\n", vec, rt_task);
02375 rt_task_suspend(rt_task);
02376 return 1;
02377 }
02378
02379 if (rt_task->is_hard > 0) {
02380 if (!traptrans++) {
02381 rt_printk("\nLXRT CHANGED MODE (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo);
02382 }
02383 SYSW_DIAG_MSG(rt_printk("\nFORCING IT SOFT (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo););
02384 give_back_to_linux(rt_task, -1);
02385 SYSW_DIAG_MSG(rt_printk("FORCED IT SOFT (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo););
02386 }
02387
02388 return 0;
02389 }
02390
02391 static inline void rt_signal_wake_up(RT_TASK *task)
02392 {
02393 if (task->state && task->state != RT_SCHED_READY) {
02394 #ifdef TASK_NOWAKEUP
02395 struct task_struct *lnxtsk;
02396 if ((lnxtsk = task->lnxtsk)->state & TASK_HARDREALTIME) {
02397 set_task_state(lnxtsk, lnxtsk->state | TASK_NOWAKEUP);
02398 }
02399 #endif
02400 task->unblocked = 1;
02401 rt_task_masked_unblock(task, ~RT_SCHED_READY);
02402 } else {
02403 task->unblocked = -1;
02404 }
02405 }
02406
02407 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02408
02409 #define INTERCEPT_SCHEDULE_HEAD() \
02410 rtai_catch_event(hal_root_domain, HAL_SCHEDULE_HEAD, (void *)lxrt_intercept_schedule_head)
02411
02412 #define MAX_MM2DROP 32
02413 static struct mmreq {
02414 unsigned long in, out, count;
02415 struct mm_struct *mm[MAX_MM2DROP];
02416 } mm2drop_tab[NR_CPUS];
02417
02418 struct prev_next_t { struct task_struct *prev, *next; };
02419
02420 static int lxrt_intercept_schedule_head (unsigned long event, struct prev_next_t *evdata)
02421 {
02422 struct task_struct *prev = evdata->prev;
02423
02424 if (!prev->mm) {
02425 struct mmreq *p = mm2drop_tab + prev->processor;
02426 struct mm_struct *oldmm = prev->active_mm;
02427 BUG_ON(p->count >= MAX_MM2DROP);
02428 atomic_inc(&oldmm->mm_count);
02429 p->mm[p->in++ & (MAX_MM2DROP - 1)] = oldmm;
02430 p->count++;
02431 }
02432
02433 return 0;
02434 }
02435
02436 #define DROP_MM2DROP(cpuid) \
02437 do { \
02438 struct mmreq *p = mm2drop_tab + cpuid; \
02439 while (p->out != p->in) { \
02440 struct mm_struct *oldmm = p->mm[p->out++ & (MAX_MM2DROP - 1)]; \
02441 mmdrop(oldmm); \
02442 p->count--; \
02443 } \
02444 } while (0)
02445
02446 #define RELEASE_SCHEDULE_HEAD() \
02447 rtai_catch_event(hal_root_domain, HAL_SCHEDULE_HEAD, NULL)
02448
02449 #define DROP_ALL_PENDING_MM2DROP() \
02450 do { \
02451 unsigned long flags, cpuid; \
02452 flags = rtai_critical_enter(NULL); \
02453 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
02454 DROP_MM2DROP(cpuid); \
02455 } \
02456 rtai_critical_exit(flags); \
02457 } while (0)
02458
02459 #else
02460
02461 #define INTERCEPT_SCHEDULE_HEAD() do { } while (0)
02462
02463 #define DROP_MM2DROP(cpuid) do { } while (0)
02464
02465 #define RELEASE_SCHEDULE_HEAD() do { } while (0)
02466
02467 #define DROP_ALL_PENDING_MM2DROP() do { } while (0)
02468
02469 #endif
02470
02471 static int lxrt_intercept_schedule_tail (unsigned event, void *nothing)
02472
02473 {
02474 int cpuid = rtai_cpuid();
02475 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
02476 if (in_hrt_mode(cpuid)) {
02477 return 1;
02478 } else
02479 #endif
02480 {
02481 struct klist_t *klistp = &wake_up_sth[cpuid];
02482 while (klistp->out != klistp->in) {
02483 fast_schedule(klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)], current, cpuid);
02484 }
02485 }
02486
02487 DROP_MM2DROP(cpuid);
02488
02489 return 0;
02490 }
02491
02492 struct sig_wakeup_t { struct task_struct *task; };
02493 static int lxrt_intercept_sig_wakeup (long event, void *data)
02494 {
02495 RT_TASK *task;
02496 if ((task = INTERCEPT_WAKE_UP_TASK(data)->rtai_tskext(TSKEXT0))) {
02497 rt_signal_wake_up(task);
02498 return 1;
02499 }
02500 return 0;
02501 }
02502
02503 static int lxrt_intercept_exit (unsigned long event, struct task_struct *lnx_task)
02504 {
02505 extern void linux_process_termination(void);
02506 RT_TASK *task;
02507 if ((task = lnx_task->rtai_tskext(TSKEXT0))) {
02508 if (task->is_hard > 0) {
02509 give_back_to_linux(task, 0);
02510 }
02511 linux_process_termination();
02512 }
02513 return 0;
02514 }
02515
02516 extern long long rtai_lxrt_invoke (unsigned long, void *, void *);
02517 extern int (*sys_call_table[])(struct pt_regs);
02518
02519 #if 0
02520 static RT_TASK *server_task_init(int prio, int cpus_allowed)
02521 {
02522 RT_TASK *tsk;
02523 if ((tsk = rt_malloc(sizeof(RT_TASK) + 2*sizeof(struct fun_args)))) {
02524 tsk->magic = 0;
02525 if (!set_rtext(tsk, prio, 0, 0, cpus_allowed, 0)) {
02526 tsk->fun_args = (long *)((struct fun_args *)(tsk + 1));
02527 if (rt_register((unsigned long)tsk, tsk, IS_TASK, 0)) {
02528 return tsk;
02529 } else {
02530 clr_rtext(tsk);
02531 }
02532 }
02533 rt_free(tsk);
02534 }
02535 return 0;
02536 }
02537
02538 static inline RT_TASK *soft_rt_linux_server_call(RT_TASK *task, void *fun, void *arg1, void *arg2)
02539 {
02540 task->fun_args[0] = (long)arg1;
02541 task->fun_args[1] = (long)arg2;
02542 ((struct fun_args *)task->fun_args)->fun = fun;
02543 rt_schedule_soft(task);
02544 return (RT_TASK *)(unsigned long)task->retval;
02545 }
02546
02547 static void linux_syscall_server_fun(RT_TASK *master_task)
02548 {
02549 RT_TASK *server_task;
02550 struct pt_regs regs;
02551
02552 master_task->linux_syscall_server = server_task = server_task_init(master_task->base_priority >= BASE_SOFT_PRIORITY ? master_task->base_priority - BASE_SOFT_PRIORITY : master_task->base_priority, master_task->runnable_on_cpus);
02553 rt_task_resume(master_task);
02554 while (soft_rt_linux_server_call(server_task, rt_receive_linux_syscall, master_task, ®s) == master_task) {
02555 rt_return_linux_syscall(master_task, sys_call_table[regs.LINUX_SYSCALL_NR](regs));
02556 }
02557 }
02558
02559 RT_TASK *lxrt_init_linux_server(RT_TASK *master_task)
02560 {
02561 int is_hard;
02562 if (!master_task) {
02563 if (!current->rtai_tskext(TSKEXT0)) {
02564 return NULL;
02565 }
02566 master_task = current->rtai_tskext(TSKEXT0);
02567 }
02568 if (!master_task->lnxtsk) {
02569 return NULL;
02570 }
02571 if ((is_hard = master_task->is_hard) > 0) {
02572 give_back_to_linux(master_task, 0);
02573 }
02574 master_task->linux_syscall_server = NULL;
02575 kernel_thread((void *)linux_syscall_server_fun, master_task, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
02576 soft_rt_linux_server_call(master_task, rt_task_suspend, master_task, NULL);
02577 if (is_hard > 0) {
02578 steal_from_linux(master_task);
02579 }
02580 return master_task->linux_syscall_server;
02581 }
02582
02583 #endif
02584
02585 static int lxrt_intercept_syscall_prologue(struct pt_regs *regs)
02586 {
02587 RT_TASK *task;
02588
02589 if (regs->LINUX_SYSCALL_NR < NR_syscalls && (task = current->rtai_tskext(TSKEXT0))) {
02590 if (task->is_hard > 0) {
02591 if (task->linux_syscall_server) {
02592 rt_exec_linux_syscall(task, task->linux_syscall_server, regs);
02593 return 1;
02594 }
02595 if (!systrans++) {
02596 rt_printk("\nLXRT CHANGED MODE (SYSCALL), PID = %d, SYSCALL = %lu.\n", (task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR);
02597 }
02598 SYSW_DIAG_MSG(rt_printk("\nFORCING IT SOFT (SYSCALL), PID = %d, SYSCALL = %d.\n", (task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR););
02599 give_back_to_linux(task, -1);
02600 SKIP_IMMEDIATE_LINUX_SYSCALL();
02601 if (signal_pending(task->lnxtsk)) return 0;
02602 SYSW_DIAG_MSG(rt_printk("FORCED IT SOFT, CALLING LINUX (SYSCALL), PID = %d, SYSCALL = %d.\n", (task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR););
02603 LXRT_DO_IMMEDIATE_LINUX_SYSCALL(regs);
02604 SYSW_DIAG_MSG(rt_printk("LINUX RETURNED, GOING BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02605 steal_from_linux(task);
02606 SYSW_DIAG_MSG(rt_printk("GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02607 return 1;
02608 }
02609 }
02610 return 0;
02611 }
02612
02613 static int lxrt_intercept_syscall_epilogue(unsigned long event, void *nothing)
02614 {
02615 RT_TASK *task;
02616 if ((task = (RT_TASK *)current->rtai_tskext(TSKEXT0))) {
02617 if (task->system_data_ptr) {
02618 struct pt_regs *r = task->system_data_ptr;
02619 r->LINUX_SYSCALL_RETREG = -ERESTARTSYS;
02620 r->LINUX_SYSCALL_NR = RTAI_SYSCALL_NR;
02621 task->system_data_ptr = NULL;
02622 } else if (task->is_hard < 0) {
02623 SYSW_DIAG_MSG(rt_printk("GOING BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02624 steal_from_linux(task);
02625 SYSW_DIAG_MSG(rt_printk("GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02626 return 1;
02627 }
02628 }
02629 return 0;
02630 }
02631
02632
02633
02634 #ifdef CONFIG_PROC_FS
02635
02636
02637 extern int rtai_global_heap_size;
02638
02639 #ifdef CONFIG_RTAI_USE_TLSF
02640 #define RTAI_USES_TLSF 1
02641 extern unsigned long tlsf_get_used_size(rtheap_t *);
02642 #define rt_get_heap_mem_used(heap) tlsf_get_used_size(heap)
02643 #else
02644 #define RTAI_USES_TLSF 0
02645 #define rt_get_heap_mem_used(heap) rtheap_used_mem(heap)
02646 #endif
02647
02648 static int rtai_read_sched(char *page, char **start, off_t off, int count,
02649 int *eof, void *data)
02650 {
02651 PROC_PRINT_VARS;
02652 int cpuid, i = 1;
02653 unsigned long t;
02654 RT_TASK *task;
02655
02656 PROC_PRINT("\nRTAI LXRT Real Time Task Scheduler.\n\n");
02657 PROC_PRINT(" Calibrated Time Base Frequency: %lu Hz\n", tuned.cpu_freq);
02658 PROC_PRINT(" Calibrated interrupt to scheduler latency: %d ns\n", (int)imuldiv(tuned.latency - tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq));
02659 PROC_PRINT(" Calibrated oneshot timer setup_to_firing time: %d ns\n\n",
02660 (int)imuldiv(tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq));
02661 PROC_PRINT("Number of RT CPUs in system: %d (sized for %d)\n\n", num_online_cpus(), NR_RT_CPUS);
02662
02663 PROC_PRINT("Real time kthreads in resorvoir (cpu/#)");
02664 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
02665 PROC_PRINT(": (%d/%d)", cpuid, taskidx[cpuid]);
02666 }
02667 PROC_PRINT("\n\n");
02668
02669 PROC_PRINT("Global heap: size = %10d, used = %10lu; <%s>.\n", rtai_global_heap_size, rt_get_heap_mem_used(&rtai_global_heap), RTAI_USES_TLSF ? "TLSF" : "BSD");
02670
02671 PROC_PRINT("Kstack heap: size = %10d, used = %10lu; <%s>.\n\n", rtai_kstack_heap_size, rt_get_heap_mem_used(&rtai_kstack_heap), RTAI_USES_TLSF ? "TLSF" : "BSD");
02672
02673 PROC_PRINT("Number of forced hard/soft/hard transitions: traps %lu, syscalls %lu\n\n", traptrans, systrans);
02674
02675 PROC_PRINT("Priority Period(ns) FPU Sig State CPU Task HD/SF PID RT_TASK * TIME\n" );
02676 PROC_PRINT("------------------------------------------------------------------------------\n" );
02677 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
02678 task = &rt_linux_task;
02679
02680
02681
02682
02683
02684
02685 while ((task = task->next)) {
02686
02687
02688
02689
02690
02691 t = 0;
02692 if ((!task->lnxtsk || task->is_hard) && task->exectime[1]) {
02693 unsigned long den = (unsigned long)llimd(rdtsc() - task->exectime[1], 10, tuned.cpu_freq);
02694 if (den) {
02695 t = 1000UL*(unsigned long)llimd(task->exectime[0], 10, tuned.cpu_freq)/den;
02696 }
02697 }
02698 PROC_PRINT("%-10d %-11lu %-4s %-3s 0x%-3x %1lu:%1lu %-4d %-4d %-4d %p %-lu\n",
02699 task->priority,
02700 (unsigned long)count2nano_cpuid(task->period, task->runnable_on_cpus),
02701 task->uses_fpu || task->lnxtsk ? "Yes" : "No",
02702 task->signal ? "Yes" : "No",
02703 task->state,
02704 task->runnable_on_cpus,
02705 task->lnxtsk ? CPUMASK((task->lnxtsk)->cpus_allowed) : (1 << task->runnable_on_cpus),
02706 i,
02707 task->is_hard,
02708 task->lnxtsk ? task->lnxtsk->pid : 0,
02709 task, t);
02710 i++;
02711 }
02712
02713 PROC_PRINT("TIMED\n");
02714 task = &rt_linux_task;
02715 while ((task = task->tnext) != &rt_linux_task) {
02716 PROC_PRINT("> %p ", task);
02717 }
02718 PROC_PRINT("\nREADY\n");
02719 task = &rt_linux_task;
02720 while ((task = task->rnext) != &rt_linux_task) {
02721 PROC_PRINT("> %p ", task);
02722 }
02723
02724 }
02725
02726 PROC_PRINT_DONE;
02727
02728 }
02729
02730
02731 static int rtai_proc_sched_register(void)
02732 {
02733 struct proc_dir_entry *proc_sched_ent;
02734
02735
02736 proc_sched_ent = create_proc_entry("scheduler", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
02737 if (!proc_sched_ent) {
02738 printk("Unable to initialize /proc/rtai/scheduler\n");
02739 return(-1);
02740 }
02741 proc_sched_ent->read_proc = rtai_read_sched;
02742 return(0);
02743 }
02744
02745
02746 static void rtai_proc_sched_unregister(void)
02747 {
02748 remove_proc_entry("scheduler", rtai_proc_root);
02749 }
02750
02751
02752 #endif
02753
02754
02755
02756 extern void usp_request_rtc(int, void *);
02757 extern void rt_release_rtc(void);
02758
02759 static int rt_gettid(void)
02760 {
02761 return current->pid;
02762 }
02763
02764 static struct rt_native_fun_entry rt_sched_entries[] = {
02765 { { 0, rt_set_runnable_on_cpus }, SET_RUNNABLE_ON_CPUS },
02766 { { 0, rt_set_runnable_on_cpuid }, SET_RUNNABLE_ON_CPUID },
02767 { { 0, rt_set_sched_policy }, SET_SCHED_POLICY },
02768 { { 0, rt_get_timer_cpu }, GET_TIMER_CPU },
02769 { { 0, rt_is_hard_timer_running }, HARD_TIMER_RUNNING },
02770 { { 0, rt_set_periodic_mode }, SET_PERIODIC_MODE },
02771 { { 0, rt_set_oneshot_mode }, SET_ONESHOT_MODE },
02772 { { 0, start_rt_timer }, START_TIMER },
02773 { { 0, start_rt_apic_timers }, START_RT_APIC_TIMERS },
02774 { { 0, stop_rt_timer }, STOP_TIMER },
02775 { { 0, rt_task_signal_handler }, SIGNAL_HANDLER },
02776 { { 0, rt_task_use_fpu }, TASK_USE_FPU },
02777 { { 0, rt_hard_timer_tick_count }, HARD_TIMER_COUNT },
02778 { { 0, rt_hard_timer_tick_count_cpuid }, HARD_TIMER_COUNT_CPUID },
02779 { { 0, count2nano }, COUNT2NANO },
02780 { { 0, nano2count }, NANO2COUNT },
02781 { { 0, count2nano_cpuid }, COUNT2NANO_CPUID },
02782 { { 0, nano2count_cpuid }, NANO2COUNT_CPUID },
02783 { { 0, rt_get_time }, GET_TIME },
02784 { { 0, rt_get_time_cpuid }, GET_TIME_CPUID },
02785 { { 0, rt_get_time_ns }, GET_TIME_NS },
02786 { { 0, rt_get_time_ns_cpuid }, GET_TIME_NS_CPUID },
02787 { { 0, rt_get_cpu_time_ns }, GET_CPU_TIME_NS },
02788 { { 0, rt_task_get_info }, GET_TASK_INFO },
02789 { { 0, rt_spv_RMS }, SPV_RMS },
02790 { { 1, rt_change_prio }, CHANGE_TASK_PRIO },
02791 { { 0, rt_sched_lock }, SCHED_LOCK },
02792 { { 0, rt_sched_unlock }, SCHED_UNLOCK },
02793 { { 1, rt_task_yield }, YIELD },
02794 { { 1, rt_task_suspend }, SUSPEND },
02795 { { 1, rt_task_suspend_if }, SUSPEND_IF },
02796 { { 1, rt_task_suspend_until }, SUSPEND_UNTIL },
02797 { { 1, rt_task_suspend_timed }, SUSPEND_TIMED },
02798 { { 1, rt_task_resume }, RESUME },
02799 { { 1, rt_set_linux_syscall_mode }, SET_LINUX_SYSCALL_MODE },
02800 #ifdef CONFIG_RTAI_USI
02801 { { 1, rt_irq_wait }, IRQ_WAIT },
02802 { { 1, rt_irq_wait_if }, IRQ_WAIT_IF },
02803 { { 1, rt_irq_wait_until }, IRQ_WAIT_UNTIL },
02804 { { 1, rt_irq_wait_timed }, IRQ_WAIT_TIMED },
02805 { { 0, rt_irq_signal }, IRQ_SIGNAL },
02806 { { 0, rt_request_irq_task }, REQUEST_IRQ_TASK },
02807 { { 0, rt_release_irq_task }, RELEASE_IRQ_TASK },
02808 { { 0, usp_request_rtc }, REQUEST_RTC },
02809 #endif
02810 { { 1, rt_task_make_periodic_relative_ns }, MAKE_PERIODIC_NS },
02811 { { 1, rt_task_make_periodic }, MAKE_PERIODIC },
02812 { { 1, rt_task_set_resume_end_times }, SET_RESUME_END },
02813 { { 0, rt_set_resume_time }, SET_RESUME_TIME },
02814 { { 0, rt_set_period }, SET_PERIOD },
02815 { { 1, rt_task_wait_period }, WAIT_PERIOD },
02816 { { 0, rt_busy_sleep }, BUSY_SLEEP },
02817 { { 1, rt_sleep }, SLEEP },
02818 { { 1, rt_sleep_until }, SLEEP_UNTIL },
02819 { { 0, rt_task_masked_unblock }, WAKEUP_SLEEPING },
02820 { { 0, rt_named_task_init }, NAMED_TASK_INIT },
02821 { { 0, rt_named_task_init_cpuid }, NAMED_TASK_INIT_CPUID },
02822 { { 0, rt_named_task_delete }, NAMED_TASK_DELETE },
02823 { { 0, rt_get_name }, GET_NAME },
02824 { { 0, rt_get_adr }, GET_ADR },
02825 { { 0, usr_rt_pend_linux_irq }, PEND_LINUX_IRQ },
02826 { { 0, rt_release_rtc }, RELEASE_RTC },
02827 { { 0, rt_gettid }, RT_GETTID },
02828 { { 0, rt_get_real_time }, GET_REAL_TIME },
02829 { { 0, rt_get_real_time_ns }, GET_REAL_TIME_NS },
02830 { { 1, rt_signal_helper }, RT_SIGNAL_HELPER },
02831 { { 1, rt_wait_signal }, RT_SIGNAL_WAITSIG },
02832 { { 1, rt_request_signal_ }, RT_SIGNAL_REQUEST },
02833 { { 1, rt_release_signal }, RT_SIGNAL_RELEASE },
02834 { { 1, rt_enable_signal }, RT_SIGNAL_ENABLE },
02835 { { 1, rt_disable_signal }, RT_SIGNAL_DISABLE },
02836 { { 1, rt_trigger_signal }, RT_SIGNAL_TRIGGER },
02837 { { 0, 0 }, 000 }
02838 };
02839
02840 extern void *rtai_lxrt_dispatcher;
02841
02842 DECLARE_FUSION_WAKE_UP_STUFF;
02843
02844 static int lxrt_init(void)
02845
02846 {
02847 void init_fun_ext(void);
02848 int cpuid;
02849
02850 init_fun_ext();
02851
02852 REQUEST_RESUME_SRQs_STUFF();
02853
02854
02855
02856
02857 if (Reservoir <= 0) {
02858 Reservoir = 1;
02859 }
02860
02861 Reservoir = (Reservoir + num_online_cpus() - 1)/num_online_cpus();
02862
02863 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
02864 taskav[cpuid] = (void *)kmalloc(SpareKthreads*sizeof(void *), GFP_KERNEL);
02865 init_MUTEX_LOCKED(&resem[cpuid]);
02866 kernel_thread((void *)kthread_m, (void *)(long)cpuid, 0);
02867 down(&resem[cpuid]);
02868 klistm[cpuid].in = (2*Reservoir) & (MAX_WAKEUP_SRQ - 1);
02869 WAKE_UP_THREADM(kthreadm[cpuid]);
02870 }
02871
02872 for (cpuid = 0; cpuid < MAX_LXRT_FUN; cpuid++) {
02873 rt_fun_lxrt[cpuid].type = 1;
02874 rt_fun_lxrt[cpuid].fun = nihil;
02875 }
02876
02877 set_rt_fun_entries(rt_sched_entries);
02878
02879 lxrt_old_trap_handler = rt_set_rtai_trap_handler(lxrt_handle_trap);
02880
02881 #ifdef CONFIG_PROC_FS
02882 rtai_proc_lxrt_register();
02883 #endif
02884
02885 INTERCEPT_SCHEDULE_HEAD();
02886 rtai_catch_event(hal_root_domain, HAL_SCHEDULE_TAIL, (void *)lxrt_intercept_schedule_tail);
02887 rtai_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)lxrt_intercept_syscall_prologue);
02888 rtai_catch_event(hal_root_domain, HAL_SYSCALL_EPILOGUE, (void *)lxrt_intercept_syscall_epilogue);
02889 rtai_catch_event(hal_root_domain, HAL_EXIT_PROCESS, (void *)lxrt_intercept_exit);
02890 rtai_catch_event(hal_root_domain, HAL_KICK_PROCESS, (void *)lxrt_intercept_sig_wakeup);
02891 rtai_lxrt_dispatcher = rtai_lxrt_invoke;
02892
02893 return 0;
02894 }
02895
02896 static void lxrt_exit(void)
02897 {
02898 RT_TASK *rt_task;
02899 struct task_struct *kthread;
02900 int cpuid;
02901
02902 #ifdef CONFIG_PROC_FS
02903 rtai_proc_lxrt_unregister();
02904 #endif
02905
02906 rt_task = kmalloc(sizeof(RT_TASK), GFP_KERNEL);
02907 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
02908 while ((kthread = __get_kthread(cpuid))) {
02909 if (kthread->rtai_tskext(TSKEXT2)) {
02910 kfree(kthread->rtai_tskext(TSKEXT2));
02911 }
02912 rt_task->magic = 0;
02913 set_rtext(rt_task, 0, 0, 0, cpuid, kthread);
02914 rt_task->max_msg_size[0] = 0;
02915 rt_task_resume(rt_task);
02916 while (rt_task->magic || rt_task->state) {
02917 current->state = TASK_INTERRUPTIBLE;
02918 schedule_timeout(2);
02919 }
02920 }
02921 }
02922 kfree(rt_task);
02923
02924 endkthread = 1;
02925 for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
02926 WAKE_UP_THREADM(kthreadm[cpuid]);
02927 while (kthreadm[cpuid]) {
02928 current->state = TASK_INTERRUPTIBLE;
02929 schedule_timeout(2);
02930 }
02931 kfree(taskav[cpuid]);
02932 }
02933
02934 rt_set_rtai_trap_handler(lxrt_old_trap_handler);
02935
02936 RELEASE_RESUME_SRQs_STUFF();
02937
02938 RELEASE_SCHEDULE_HEAD();
02939 rtai_catch_event(hal_root_domain, HAL_SCHEDULE_TAIL, NULL);
02940 rtai_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, NULL);
02941 rtai_catch_event(hal_root_domain, HAL_SYSCALL_EPILOGUE, NULL);
02942 rtai_catch_event(hal_root_domain, HAL_EXIT_PROCESS, NULL);
02943 rtai_catch_event(hal_root_domain, HAL_KICK_PROCESS, NULL);
02944 rtai_lxrt_dispatcher = NULL;
02945
02946 DROP_ALL_PENDING_MM2DROP();
02947
02948 reset_rt_fun_entries(rt_sched_entries);
02949 }
02950
02951 #ifdef DECLR_8254_TSC_EMULATION
02952 DECLR_8254_TSC_EMULATION;
02953
02954 static void timer_fun(unsigned long none)
02955 {
02956 TICK_8254_TSC_EMULATION();
02957 timer.expires = jiffies + (HZ + TSC_EMULATION_GUARD_FREQ/2 - 1)/TSC_EMULATION_GUARD_FREQ;
02958 add_timer(&timer);
02959 }
02960 #endif
02961
02962 extern int rt_registry_alloc(void);
02963 extern void rt_registry_free(void);
02964
02965 static int __rtai_lxrt_init(void)
02966 {
02967 int cpuid, retval;
02968
02969 #ifdef IPIPE_NOSTACK_FLAG
02970
02971 #endif
02972
02973 #ifdef CONFIG_RTAI_MALLOC
02974 rtai_kstack_heap_size = (rtai_kstack_heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
02975 if (rtheap_init(&rtai_kstack_heap, NULL, rtai_kstack_heap_size, PAGE_SIZE, GFP_KERNEL)) {
02976 printk(KERN_INFO "RTAI[malloc]: failed to initialize the kernel stacks heap (size=%d bytes).\n", rtai_kstack_heap_size);
02977 return 1;
02978 }
02979 #endif
02980 sched_mem_init();
02981
02982 rt_registry_alloc();
02983
02984 for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {
02985 rt_linux_task.uses_fpu = 1;
02986 rt_linux_task.magic = 0;
02987 rt_linux_task.policy = rt_linux_task.is_hard = 0;
02988 rt_linux_task.runnable_on_cpus = cpuid;
02989 rt_linux_task.state = RT_SCHED_READY;
02990 rt_linux_task.msg_queue.prev = &(rt_linux_task.msg_queue);
02991 rt_linux_task.msg_queue.next = &(rt_linux_task.msg_queue);
02992 rt_linux_task.msg_queue.task = &rt_linux_task;
02993 rt_linux_task.msg = 0;
02994 rt_linux_task.ret_queue.prev = &(rt_linux_task.ret_queue);
02995 rt_linux_task.ret_queue.next = &(rt_linux_task.ret_queue);
02996 rt_linux_task.ret_queue.task = NULL;
02997 rt_linux_task.priority = RT_SCHED_LINUX_PRIORITY;
02998 rt_linux_task.base_priority = RT_SCHED_LINUX_PRIORITY;
02999 rt_linux_task.signal = 0;
03000 rt_linux_task.prev = &rt_linux_task;
03001 rt_linux_task.resume_time = RT_TIME_END;
03002 rt_linux_task.periodic_resume_time = RT_TIME_END;
03003 rt_linux_task.tprev = rt_linux_task.tnext =
03004 rt_linux_task.rprev = rt_linux_task.rnext = &rt_linux_task;
03005 #ifdef CONFIG_RTAI_LONG_TIMED_LIST
03006 rt_linux_task.rbr.rb_node = NULL;
03007 #endif
03008 rt_linux_task.next = 0;
03009 rt_linux_task.lnxtsk = current;
03010 rt_smp_current[cpuid] = &rt_linux_task;
03011 rt_smp_fpu_task[cpuid] = &rt_linux_task;
03012 oneshot_timer = OneShot ? 1 : 0;
03013 oneshot_running = 0;
03014 linux_cr0 = 0;
03015 rt_linux_task.resq.prev = rt_linux_task.resq.next = &rt_linux_task.resq;
03016 rt_linux_task.resq.task = NULL;
03017 }
03018 tuned.latency = imuldiv(Latency, tuned.cpu_freq, 1000000000);
03019 tuned.setup_time_TIMER_CPUNIT = imuldiv( SetupTimeTIMER,
03020 tuned.cpu_freq,
03021 1000000000);
03022 tuned.setup_time_TIMER_UNIT = imuldiv( SetupTimeTIMER,
03023 TIMER_FREQ,
03024 1000000000);
03025 tuned.timers_tol[0] = 0;
03026 oneshot_span = ONESHOT_SPAN;
03027 satdlay = oneshot_span - tuned.latency;
03028 #ifdef CONFIG_PROC_FS
03029 if (rtai_proc_sched_register()) {
03030 retval = 1;
03031 goto mem_end;
03032 }
03033 #endif
03034
03035
03036 if ((frstk_srq.srq = rt_request_srq(0x7dd763ad, frstk_srq_handler, 0)) < 0) {
03037 printk("MEM SRQ: no sysrq available.\n");
03038 retval = frstk_srq.srq;
03039 goto proc_unregister;
03040 }
03041
03042 frstk_srq.in = frstk_srq.out = 0;
03043 if ((retval = rt_request_sched_ipi()) != 0)
03044 goto free_srq;
03045
03046 if ((retval = lxrt_init()) != 0)
03047 goto free_sched_ipi;
03048
03049 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
03050 rt_set_ihook(&rtai_handle_isched_lock);
03051 #endif
03052
03053 register_reboot_notifier(&lxrt_reboot_notifier);
03054 #ifdef CONFIG_SMP
03055 printk(KERN_INFO "RTAI[sched]: IMMEDIATE, MP, USER/KERNEL SPACE: <%s RTAI OWN KTASKs>", USE_RTAI_TASKS ? "with" : "without");
03056 #else
03057 printk(KERN_INFO "RTAI[sched]: loaded (IMMEDIATE, UP, USER/KERNEL SPACE: <%s RTAI OWN KTASKs>", USE_RTAI_TASKS ? "with" : "without");
03058 #endif
03059 #ifdef CONFIG_RTAI_LXRT_USE_LINUX_SYSCALL
03060 printk(", <uses LINUX SYSCALLs>");
03061 #endif
03062 #ifdef CONFIG_RTAI_MALLOC
03063 printk(", kstacks pool size = %d bytes", rtai_kstack_heap_size);
03064 #endif
03065 printk(".\n");
03066 printk(KERN_INFO "RTAI[sched]: hard timer type/freq = %s/%d(Hz); default timing: %s; ", TIMER_NAME, (int)TIMER_FREQ, OneShot ? "oneshot" : "periodic");
03067 #ifdef CONFIG_RTAI_LONG_TIMED_LIST
03068 printk("black/red timed lists.\n");
03069 #else
03070 printk("linear timed lists.\n");
03071 #endif
03072 printk(KERN_INFO "RTAI[sched]: Linux timer freq = %d (Hz), TimeBase freq = %lu hz.\n", HZ, (unsigned long)tuned.cpu_freq);
03073 printk(KERN_INFO "RTAI[sched]: timer setup = %d ns, resched latency = %d ns.\n", (int)imuldiv(tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq), (int)imuldiv(tuned.latency - tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq));
03074
03075 #ifdef DECLR_8254_TSC_EMULATION
03076 SETUP_8254_TSC_EMULATION;
03077 #endif
03078
03079 retval = rtai_init_features();
03080
03081 exit:
03082 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && CONFIG_RTAI_RTC_FREQ == 0
03083 rt_linux_hrt_set_mode = _rt_linux_hrt_set_mode;
03084 rt_linux_hrt_next_shot = _rt_linux_hrt_next_shot;
03085 #endif
03086 return retval;
03087 free_sched_ipi:
03088 rt_free_sched_ipi();
03089 free_srq:
03090 rt_free_srq(frstk_srq.srq);
03091 proc_unregister:
03092 #ifdef CONFIG_PROC_FS
03093 rtai_proc_sched_unregister();
03094 #endif
03095 mem_end:
03096 sched_mem_end();
03097 #ifdef CONFIG_RTAI_MALLOC
03098 rtheap_destroy(&rtai_kstack_heap, GFP_KERNEL);
03099 #endif
03100 rt_registry_free();
03101 goto exit;
03102 }
03103
03104 static void __rtai_lxrt_exit(void)
03105 {
03106 unregister_reboot_notifier(&lxrt_reboot_notifier);
03107
03108 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && CONFIG_RTAI_RTC_FREQ == 0
03109 rt_linux_hrt_set_mode = NULL;
03110 rt_linux_hrt_next_shot = NULL;
03111 #endif
03112
03113 lxrt_killall();
03114
03115 krtai_objects_release();
03116
03117 lxrt_exit();
03118
03119 rtai_cleanup_features();
03120
03121 #ifdef CONFIG_PROC_FS
03122 rtai_proc_sched_unregister();
03123 #endif
03124 while (frstk_srq.out != frstk_srq.in);
03125 if (rt_free_srq(frstk_srq.srq) < 0) {
03126 printk("MEM SRQ: frstk_srq %d illegal or already free.\n", frstk_srq.srq);
03127 }
03128 rt_free_sched_ipi();
03129 sched_mem_end();
03130 #ifdef CONFIG_RTAI_MALLOC
03131 rtheap_destroy(&rtai_kstack_heap, GFP_KERNEL);
03132 #endif
03133 rt_registry_free();
03134 current->state = TASK_INTERRUPTIBLE;
03135 schedule_timeout(HZ/10);
03136 #ifdef CONFIG_RTAI_SCHED_ISR_LOCK
03137 rt_set_ihook(NULL);
03138 #endif
03139
03140 #ifdef DECLR_8254_TSC_EMULATION
03141 CLEAR_8254_TSC_EMULATION;
03142 #endif
03143
03144 #ifdef IPIPE_NOSTACK_FLAG
03145 ipipe_clear_foreign_stack(&rtai_domain);
03146 #endif
03147
03148 printk(KERN_INFO "RTAI[sched]: unloaded (forced hard/soft/hard transitions: traps %lu, syscalls %lu).\n", traptrans, systrans);
03149 }
03150
03151 module_init(__rtai_lxrt_init);
03152 module_exit(__rtai_lxrt_exit);
03153
03154 #ifndef CONFIG_KBUILD
03155 #define CONFIG_KBUILD
03156 #endif
03157
03158 #ifdef CONFIG_KBUILD
03159
03160 EXPORT_SYMBOL(rt_fun_lxrt);
03161 EXPORT_SYMBOL(clr_rtext);
03162 EXPORT_SYMBOL(set_rtext);
03163 EXPORT_SYMBOL(get_min_tasks_cpuid);
03164 EXPORT_SYMBOL(put_current_on_cpu);
03165 EXPORT_SYMBOL(rt_schedule_soft);
03166 EXPORT_SYMBOL(rt_do_force_soft);
03167 EXPORT_SYMBOL(rt_schedule_soft_tail);
03168 EXPORT_SYMBOL(rt_sched_timed);
03169 EXPORT_SYMBOL(rtai_handle_isched_lock);
03170 #if CONFIG_RTAI_MONITOR_EXECTIME
03171 EXPORT_SYMBOL(switch_time);
03172 #endif
03173 EXPORT_SYMBOL(lxrt_prev_task);
03174
03175 #endif
03176