00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
#if 0
00030
#define CONFIG_RTAI_MONITOR_EXECTIME 1
00031
#define CONFIG_RTAI_ALLOW_RR 1
00032
#define CONFIG_RTAI_ONE_SHOT 0
00033
#define CONFIG_RTAI_BUSY_TIME_ALIGN 0
00034
#define CONFIG_RTAI_CAL_FREQS_FACT 0
00035
#endif
00036
00037
00038
00039
#include <linux/module.h>
00040
#include <linux/kernel.h>
00041
#include <linux/version.h>
00042
#include <linux/errno.h>
00043
#include <linux/slab.h>
00044
#include <linux/timex.h>
00045
#include <linux/sched.h>
00046
#include <linux/irq.h>
00047
#include <linux/reboot.h>
00048
#include <linux/sys.h>
00049
00050
#include <asm/param.h>
00051
#include <asm/system.h>
00052
#include <asm/io.h>
00053
#include <asm/segment.h>
00054
#include <asm/uaccess.h>
00055
#include <asm/mmu_context.h>
00056
00057 #define __KERNEL_SYSCALLS__
00058
#include <linux/unistd.h>
00059
00060
#ifdef CONFIG_PROC_FS
00061
#include <linux/stat.h>
00062
#include <linux/proc_fs.h>
00063
#include <rtai_proc_fs.h>
00064
static int rtai_proc_sched_register(
void);
00065
static void rtai_proc_sched_unregister(
void);
00066
int rtai_proc_lxrt_register(
void);
00067
void rtai_proc_lxrt_unregister(
void);
00068
#endif
00069
00070
#include <rtai.h>
00071
#include <asm/rtai_sched.h>
00072
#include <rtai_lxrt.h>
00073
#include <rtai_registry.h>
00074
#include <rtai_nam2num.h>
00075
#include <rtai_schedcore.h>
00076
00077
MODULE_LICENSE(
"GPL");
00078
00079
00080
00081 RT_TASK rt_smp_linux_task[
NR_RT_CPUS];
00082
00083 RT_TASK *
rt_smp_current[
NR_RT_CPUS];
00084
00085 RTIME rt_smp_time_h[
NR_RT_CPUS];
00086
00087 int rt_smp_oneshot_timer[
NR_RT_CPUS];
00088
00089 volatile int rt_sched_timed;
00090
00091 struct klist_t
wake_up_hts[
NR_RT_CPUS];
00092 struct klist_t
wake_up_srq[
NR_RT_CPUS];
00093
00094
00095
00096 extern struct {
volatile int locked,
rqsted; }
rt_scheduling[];
00097
00098 static int rt_smp_linux_cr0[
NR_RT_CPUS];
00099
00100 static RT_TASK *
rt_smp_fpu_task[
NR_RT_CPUS];
00101
00102 static int rt_smp_half_tick[
NR_RT_CPUS];
00103
00104 static int rt_smp_oneshot_running[
NR_RT_CPUS];
00105
00106 static volatile int rt_smp_shot_fired[
NR_RT_CPUS];
00107
00108 static struct rt_times *
linux_times;
00109
00110 static RT_TASK *
lxrt_wdog_task[
NR_RT_CPUS];
00111
00112
static int lxrt_notify_reboot(
struct notifier_block *nb,
00113
unsigned long event,
00114
void *ptr);
00115
00116 static struct notifier_block
lxrt_notifier_reboot = {
00117 .notifier_call = &
lxrt_notify_reboot,
00118 .next = NULL,
00119 .priority = 0
00120 };
00121
00122 static struct klist_t
klistb[
NR_RT_CPUS];
00123
00124 static struct klist_t
klistm[
NR_RT_CPUS];
00125
00126 static struct task_struct *
kthreadm[
NR_RT_CPUS];
00127
00128 static struct semaphore
resem[
NR_RT_CPUS];
00129
00130 static int endkthread;
00131
00132 #define fpu_task (rt_smp_fpu_task[cpuid])
00133
00134 #define rt_half_tick (rt_smp_half_tick[cpuid])
00135
00136 #define oneshot_running (rt_smp_oneshot_running[cpuid])
00137
00138 #define oneshot_timer_cpuid (rt_smp_oneshot_timer[rtai_cpuid()])
00139
00140 #define shot_fired (rt_smp_shot_fired[cpuid])
00141
00142 #define rt_times (rt_smp_times[cpuid])
00143
00144 #define linux_cr0 (rt_smp_linux_cr0[cpuid])
00145
00146 #define MAX_FRESTK_SRQ (2 << 6)
00147 static struct {
int srq;
volatile unsigned long in,
out;
void *
mp[
MAX_FRESTK_SRQ]; }
frstk_srq;
00148
00149 #define KTHREAD_M_PRIO MAX_LINUX_RTPRIO
00150 #define KTHREAD_F_PRIO MAX_LINUX_RTPRIO
00151
00152
#ifdef CONFIG_SMP
00153
00154
extern void rt_set_sched_ipi_gate(
void);
00155
extern void rt_reset_sched_ipi_gate(
void);
00156
static void rt_schedule_on_schedule_ipi(
void);
00157
00158
static inline int rt_request_sched_ipi(
void)
00159 {
00160
int retval;
00161 retval =
rt_request_irq(SCHED_IPI, (
void *)rt_schedule_on_schedule_ipi, NULL, 0);
00162 rt_set_sched_ipi_gate();
00163
return retval;
00164 }
00165
00166
#define rt_free_sched_ipi() \
00167
do { \
00168
rt_release_irq(SCHED_IPI); \
00169
rt_reset_sched_ipi_gate(); \
00170
} while (0)
00171
00172
00173
00174
00175
00176
#define sched_get_global_lock(cpuid) \
00177
do { \
00178
barrier(); \
00179
if (!test_and_set_bit(cpuid, &rtai_cpu_lock)) { \
00180
while (test_and_set_bit(31, &rtai_cpu_lock)) { \
00181
cpu_relax(); \
00182
} \
00183
} \
00184
barrier(); \
00185
} while (0)
00186
00187
#if 0
00188
#include <asm/atomic.h>
00189
#define sched_release_global_lock(cpuid) \
00190
do { \
00191
barrier(); \
00192
atomic_clear_mask((0xFFFF0001 << cpuid), (atomic_t *)&rtai_cpu_lock); \
00193
cpu_relax(); \
00194
barrier(); \
00195
} while (0)
00196
#else
00197
#define sched_release_global_lock(cpuid) \
00198
do { \
00199
barrier(); \
00200
if (test_and_clear_bit(cpuid, &rtai_cpu_lock)) { \
00201
test_and_clear_bit(31, &rtai_cpu_lock); \
00202
cpu_relax(); \
00203
} \
00204
barrier(); \
00205
} while (0)
00206
#endif
00207
00208
#else
00209
00210 #define rt_request_sched_ipi() 0
00211
00212 #define rt_free_sched_ipi()
00213
00214 #define sched_get_global_lock(cpuid)
00215
00216 #define sched_release_global_lock(cpuid)
00217
00218
#endif
00219
00220
00221
00222 static int tasks_per_cpu[
NR_RT_CPUS] = { 0, };
00223
00224 int get_min_tasks_cpuid(
void)
00225 {
00226
int i,
cpuid, min;
00227 min =
tasks_per_cpu[
cpuid = 0];
00228
for (i = 1; i <
NR_RT_CPUS; i++) {
00229
if (
tasks_per_cpu[i] < min) {
00230 min =
tasks_per_cpu[
cpuid = i];
00231 }
00232 }
00233
return cpuid;
00234 }
00235
00236 static void put_current_on_cpu(
int cpuid)
00237 {
00238
#ifdef CONFIG_SMP
00239
struct task_struct *
task = current;
00240
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00241
task->cpus_allowed = 1 <<
cpuid;
00242
while (
cpuid != rtai_cpuid()) {
00243
task->state = TASK_INTERRUPTIBLE;
00244 schedule_timeout(2);
00245 }
00246
#else
00247
if (set_cpus_allowed(
task, cpumask_of_cpu(
cpuid))) {
00248 ((
RT_TASK *)(
task->rtai_tskext(
TSKEXT0)))->runnable_on_cpus = smp_processor_id();
00249 set_cpus_allowed(current, cpumask_of_cpu(smp_processor_id()));
00250 }
00251
#endif
00252
#endif
00253 }
00254
00255 int set_rtext(
RT_TASK *task,
int priority,
int uses_fpu,
void(*signal)(
void),
unsigned int cpuid,
struct task_struct *relink)
00256 {
00257
unsigned long flags;
00258
00259
if (num_online_cpus() <= 1) {
00260
cpuid = 0;
00261 }
00262
if (
task->magic == RT_TASK_MAGIC ||
cpuid >=
NR_RT_CPUS || priority < 0) {
00263
return -EINVAL;
00264 }
00265
if (
lxrt_wdog_task[
cpuid] &&
00266
lxrt_wdog_task[
cpuid] !=
task &&
00267 priority ==
RT_SCHED_HIGHEST_PRIORITY) {
00268
rt_printk(
"Highest priority reserved for RTAI watchdog\n");
00269
return -EBUSY;
00270 }
00271
task->uses_fpu = uses_fpu ? 1 : 0;
00272
task->runnable_on_cpus =
cpuid;
00273 (
task->stack_bottom = (
long *)&
task->fpu_reg)[0] = 0;
00274
task->magic = RT_TASK_MAGIC;
00275
task->policy = 0;
00276
task->owndres = 0;
00277
task->prio_passed_to = 0;
00278
task->period = 0;
00279
task->resume_time =
RT_TIME_END;
00280
task->queue.prev =
task->queue.next = &(
task->queue);
00281
task->queue.task =
task;
00282
task->msg_queue.prev =
task->msg_queue.next = &(
task->msg_queue);
00283
task->msg_queue.task =
task;
00284
task->msg = 0;
00285
task->ret_queue.prev =
task->ret_queue.next = &(
task->ret_queue);
00286
task->ret_queue.task = NOTHING;
00287
task->tprev =
task->tnext =
task->rprev =
task->rnext =
task;
00288
task->blocked_on = NOTHING;
00289
task->signal = signal;
00290
task->unblocked = 0;
00291
task->rt_signals = NULL;
00292 memset(
task->task_trap_handler, 0,
RTAI_NR_TRAPS*
sizeof(
void *));
00293
task->linux_syscall_server = NULL;
00294
task->trap_handler_data = NULL;
00295
task->resync_frame = 0;
00296
task->ExitHook = 0;
00297
task->usp_flags =
task->usp_flags_mask =
task->force_soft = 0;
00298
task->msg_buf[0] = 0;
00299
task->exectime[0] =
task->exectime[1] = 0;
00300
task->system_data_ptr = 0;
00301
atomic_inc((atomic_t *)(
tasks_per_cpu +
cpuid));
00302
if (relink) {
00303
task->priority =
task->base_priority = priority;
00304
task->suspdepth =
task->is_hard = 1;
00305
task->state =
RT_SCHED_READY |
RT_SCHED_SUSPENDED;
00306 relink->rtai_tskext(
TSKEXT0) =
task;
00307
task->lnxtsk = relink;
00308 }
else {
00309
task->priority =
task->base_priority = BASE_SOFT_PRIORITY + priority;
00310
task->suspdepth =
task->is_hard = 0;
00311
task->state =
RT_SCHED_READY;
00312 current->rtai_tskext(
TSKEXT0) =
task;
00313 current->rtai_tskext(
TSKEXT1) =
task->lnxtsk = current;
00314
put_current_on_cpu(
cpuid);
00315 }
00316
flags = rt_global_save_flags_and_cli();
00317
task->next = 0;
00318 rt_linux_task.prev->next =
task;
00319
task->prev = rt_linux_task.prev;
00320 rt_linux_task.prev =
task;
00321 rt_global_restore_flags(
flags);
00322
00323
return 0;
00324 }
00325
00326
00327
static void start_stop_kthread(
RT_TASK *,
void (*)(
long),
long,
int,
int,
void(*)(
void),
int);
00328
00329 int rt_kthread_init_cpuid(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
00330
int stack_size,
int priority,
int uses_fpu,
00331
void(*signal)(
void),
unsigned int cpuid)
00332 {
00333
start_stop_kthread(
task, rt_thread, data, priority, uses_fpu, signal,
cpuid);
00334
return (
int)
task->retval;
00335 }
00336
00337
00338 int rt_kthread_init(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
00339
int stack_size,
int priority,
int uses_fpu,
00340
void(*signal)(
void))
00341 {
00342
return rt_kthread_init_cpuid(
task, rt_thread, data, stack_size, priority,
00343 uses_fpu, signal,
get_min_tasks_cpuid());
00344 }
00345
00346
00347
#if USE_RTAI_TASKS
00348
00349 asmlinkage
static void rt_startup(
void(*rt_thread)(
long),
long data)
00350 {
00351
extern int rt_task_delete(
RT_TASK *);
00352
RT_TASK *rt_current =
rt_smp_current[rtai_cpuid()];
00353 rt_global_sti();
00354 rt_current->exectime[1] =
rdtsc();
00355
#if 1
00356
((void (*)(
long))rt_current->max_msg_size[0])(rt_current->max_msg_size[1]);
00357
#else
00358
rt_thread(data);
00359
#endif
00360
rt_task_delete(rt_current);
00361
rt_printk(
"LXRT: task %p returned but could not be delated.\n", rt_current);
00362 }
00363
00364
00365
int rt_task_init_cpuid(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
int stack_size,
int priority,
int uses_fpu,
void(*signal)(
void),
unsigned int cpuid)
00366 {
00367
long *st, i;
00368
unsigned long flags;
00369
00370
if (num_online_cpus() <= 1) {
00371
cpuid = 0;
00372 }
00373
if (
task->magic == RT_TASK_MAGIC ||
cpuid >=
NR_RT_CPUS || priority < 0) {
00374
return -EINVAL;
00375 }
00376
if (!(st = (
long *)
sched_malloc(stack_size))) {
00377
return -ENOMEM;
00378 }
00379
if (
lxrt_wdog_task[
cpuid] &&
lxrt_wdog_task[
cpuid] !=
task
00380 && priority ==
RT_SCHED_HIGHEST_PRIORITY) {
00381
rt_printk(
"Highest priority reserved for RTAI watchdog\n");
00382
return -EBUSY;
00383 }
00384
00385
task->bstack =
task->stack = (
long *)(((
unsigned long)st + stack_size - 0x10) & ~0xF);
00386
task->stack[0] = 0;
00387
task->uses_fpu = uses_fpu ? 1 : 0;
00388
task->runnable_on_cpus =
cpuid;
00389
atomic_inc((atomic_t *)(tasks_per_cpu + cpuid));
00390 *(
task->stack_bottom = st) = 0;
00391
task->magic = RT_TASK_MAGIC;
00392
task->policy = 0;
00393
task->suspdepth = 1;
00394
task->state = (
RT_SCHED_SUSPENDED |
RT_SCHED_READY);
00395
task->owndres = 0;
00396
task->is_hard = 1;
00397
task->lnxtsk = 0;
00398
task->priority =
task->base_priority = priority;
00399
task->prio_passed_to = 0;
00400
task->period = 0;
00401
task->resume_time =
RT_TIME_END;
00402
task->queue.prev = &(
task->queue);
00403
task->queue.next = &(
task->queue);
00404
task->queue.task =
task;
00405
task->msg_queue.prev = &(
task->msg_queue);
00406
task->msg_queue.next = &(
task->msg_queue);
00407
task->msg_queue.task =
task;
00408
task->msg = 0;
00409
task->ret_queue.prev = &(
task->ret_queue);
00410
task->ret_queue.next = &(
task->ret_queue);
00411
task->ret_queue.task = NOTHING;
00412
task->tprev =
task->tnext =
00413
task->rprev =
task->rnext =
task;
00414
task->blocked_on = NOTHING;
00415
task->signal = signal;
00416
task->unblocked = 0;
00417
task->rt_signals = NULL;
00418
for (i = 0; i <
RTAI_NR_TRAPS; i++) {
00419
task->task_trap_handler[i] = NULL;
00420 }
00421
task->linux_syscall_server = NULL;
00422
task->trap_handler_data = NULL;
00423
task->resync_frame = 0;
00424
task->ExitHook = 0;
00425
task->exectime[0] =
task->exectime[1] = 0;
00426
task->system_data_ptr = 0;
00427
00428
task->max_msg_size[0] = (
long)rt_thread;
00429
task->max_msg_size[1] = data;
00430
init_arch_stack();
00431
00432
flags = rt_global_save_flags_and_cli();
00433
task->next = 0;
00434 rt_linux_task.prev->next =
task;
00435
task->prev = rt_linux_task.prev;
00436 rt_linux_task.prev =
task;
00437
init_task_fpenv(task);
00438 rt_global_restore_flags(flags);
00439
return 0;
00440 }
00441
00442
int rt_task_init(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
00443
int stack_size,
int priority,
int uses_fpu,
00444
void(*signal)(
void))
00445 {
00446
return rt_task_init_cpuid(task, rt_thread, data, stack_size, priority,
00447 uses_fpu, signal,
get_min_tasks_cpuid());
00448 }
00449
00450
#else
00451
00452 int rt_task_init_cpuid(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
int stack_size,
int priority,
int uses_fpu,
void(*signal)(
void),
unsigned int cpuid)
00453 {
00454
return rt_kthread_init_cpuid(
task, rt_thread, data, stack_size, priority, uses_fpu, signal,
cpuid);
00455 }
00456
00457 int rt_task_init(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
int stack_size,
int priority,
int uses_fpu,
void(*signal)(
void))
00458 {
00459
return rt_kthread_init(
task, rt_thread, data, stack_size, priority, uses_fpu, signal);
00460 }
00461
00462
#endif
00463
00464 void rt_set_runnable_on_cpuid(
RT_TASK *task,
unsigned int cpuid)
00465 {
00466
unsigned long flags;
00467
RT_TASK *linux_task;
00468
00469
if (
task->lnxtsk) {
00470
return;
00471 }
00472
00473
if (
cpuid >=
NR_RT_CPUS) {
00474
cpuid =
get_min_tasks_cpuid();
00475 }
00476
flags = rt_global_save_flags_and_cli();
00477
switch (
rt_smp_oneshot_timer[
task->runnable_on_cpus] |
00478 (
rt_smp_oneshot_timer[
cpuid] << 1)) {
00479
case 1:
00480
task->period =
llimd(
task->period,
TIMER_FREQ,
tuned.cpu_freq);
00481
task->resume_time =
llimd(
task->resume_time,
TIMER_FREQ,
tuned.cpu_freq);
00482
break;
00483
case 2:
00484
task->period =
llimd(
task->period,
tuned.cpu_freq,
TIMER_FREQ);
00485
task->resume_time =
llimd(
task->resume_time,
tuned.cpu_freq,
TIMER_FREQ);
00486
break;
00487 }
00488
if (!((
task->prev)->next =
task->next)) {
00489
rt_smp_linux_task[
task->runnable_on_cpus].prev =
task->prev;
00490 }
else {
00491 (
task->next)->prev =
task->prev;
00492 }
00493
if ((
task->state &
RT_SCHED_DELAYED)) {
00494 rem_timed_task(
task);
00495
task->runnable_on_cpus =
cpuid;
00496 enq_timed_task(
task);
00497 }
else {
00498
task->runnable_on_cpus =
cpuid;
00499 }
00500
task->next = 0;
00501 (linux_task =
rt_smp_linux_task +
cpuid)->prev->next =
task;
00502
task->prev = linux_task->prev;
00503 linux_task->prev =
task;
00504 rt_global_restore_flags(
flags);
00505 }
00506
00507
00508 void rt_set_runnable_on_cpus(
RT_TASK *task,
unsigned long run_on_cpus)
00509 {
00510
int cpuid;
00511
00512
if (
task->lnxtsk) {
00513
return;
00514 }
00515
00516
#ifdef CONFIG_SMP
00517
run_on_cpus &= CPUMASK(cpu_online_map);
00518
#else
00519
run_on_cpus = 1;
00520
#endif
00521
cpuid =
get_min_tasks_cpuid();
00522
if (!test_bit(
cpuid, &run_on_cpus)) {
00523
cpuid =
ffnz(run_on_cpus);
00524 }
00525
rt_set_runnable_on_cpuid(
task,
cpuid);
00526 }
00527
00528
00529 int rt_check_current_stack(
void)
00530 {
00531 DECLARE_RT_CURRENT;
00532
char *sp;
00533
00534 ASSIGN_RT_CURRENT;
00535
if (rt_current != &rt_linux_task) {
00536 sp =
get_stack_pointer();
00537
return (sp - (
char *)(rt_current->stack_bottom));
00538 }
else {
00539
return -0x7FFFFFFF;
00540 }
00541 }
00542
00543
00544 #define RR_YIELD() \
00545
if (CONFIG_RTAI_ALLOW_RR && rt_current->policy > 0) { \
00546
if (rt_current->yield_time <= rt_times.tick_time) { \
00547
rt_current->rr_remaining = rt_current->rr_quantum; \
00548
if (rt_current->state == RT_SCHED_READY) { \
00549
RT_TASK *task; \
00550
task = rt_current->rnext; \
00551
while (rt_current->priority == task->priority) { \
00552
task = task->rnext; \
00553
} \
00554
if (task != rt_current->rnext) { \
00555
(rt_current->rprev)->rnext = rt_current->rnext; \
00556
(rt_current->rnext)->rprev = rt_current->rprev; \
00557
task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \
00558
rt_current->rnext = task; \
00559
} \
00560
} \
00561
} else { \
00562
rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \
00563
} \
00564
}
00565
00566 #define TASK_TO_SCHEDULE() \
00567
do { \
00568
prio = (new_task = rt_linux_task.rnext)->priority; \
00569
if (CONFIG_RTAI_ALLOW_RR && new_task->policy > 0) { \
00570
new_task->yield_time = rt_times.tick_time + new_task->rr_remaining; \
00571
} \
00572
} while (0)
00573
00574 #define RR_INTR_TIME() \
00575
do { \
00576
if (CONFIG_RTAI_ALLOW_RR && new_task->policy > 0) { \
00577
preempt = 1; \
00578
if (new_task->yield_time < rt_times.intr_time) { \
00579
rt_times.intr_time = new_task->yield_time; \
00580
} \
00581
} else { \
00582
preempt = 0; \
00583
} \
00584
} while (0)
00585
00586
#ifdef RTAI_TASKPRI
00587
#define LOCK_LINUX_NOTSKPRI(cpuid) \
00588
do { rt_switch_to_real_time_notskpri(cpuid); } while (0)
00589
#define UNLOCK_LINUX_NOTSKPRI(cpuid) \
00590
do { rt_switch_to_linux_notskpri(cpuid); } while (0)
00591
#else
00592 #define LOCK_LINUX_NOTSKPRI(cpuid) \
00593
do { rt_switch_to_real_time(cpuid); } while (0)
00594 #define UNLOCK_LINUX_NOTSKPRI(cpuid) \
00595
do { rt_switch_to_linux(cpuid); } while (0)
00596
#endif
00597
00598 #define LOCK_LINUX(cpuid) do { rt_switch_to_real_time(cpuid); } while (0)
00599 #define UNLOCK_LINUX(cpuid) do { rt_switch_to_linux(cpuid); } while (0)
00600
00601
#ifdef LOCKED_LINUX_IN_IRQ_HANDLER
00602 #define LOCK_LINUX_IN_IRQ(cpuid)
00603 #define UNLOCK_LINUX_IN_IRQ(cpuid)
00604
#else
00605
#define LOCK_LINUX_IN_IRQ(cpuid) LOCK_LINUX(cpuid)
00606
#define UNLOCK_LINUX_IN_IRQ(cpuid) UNLOCK_LINUX(cpuid)
00607
#endif
00608
00609
#if CONFIG_RTAI_MONITOR_EXECTIME
00610 static RTIME switch_time[
NR_RT_CPUS];
00611 #define KEXECTIME() \
00612
do { \
00613
RTIME now; \
00614
now = rdtsc(); \
00615
if (!rt_current->lnxtsk) { \
00616
rt_current->exectime[0] += (now - switch_time[cpuid]); \
00617
} \
00618
switch_time[cpuid] = now; \
00619
} while (0)
00620
00621 #define UEXECTIME() \
00622
do { \
00623
RTIME now; \
00624
now = rdtsc(); \
00625
if (rt_current->is_hard) { \
00626
rt_current->exectime[0] += (now - switch_time[cpuid]); \
00627
} \
00628
switch_time[cpuid] = now; \
00629
} while (0)
00630
#else
00631
#define KEXECTIME()
00632
#define UEXECTIME()
00633
#endif
00634
00635
00636 void rt_do_force_soft(
RT_TASK *rt_task)
00637 {
00638 rt_global_cli();
00639
if (rt_task->state !=
RT_SCHED_READY) {
00640 rt_task->state &= ~
RT_SCHED_READY;
00641 enq_ready_task(rt_task);
00642 RT_SCHEDULE(rt_task, rtai_cpuid());
00643 }
00644 rt_global_sti();
00645 }
00646
00647 #define enq_soft_ready_task(ready_task) \
00648
do { \
00649
RT_TASK *task = rt_smp_linux_task[cpuid].rnext; \
00650
while (ready_task->priority >= task->priority) { \
00651
if ((task = task->rnext)->priority < 0) break; \
00652
} \
00653
task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task; \
00654
ready_task->rnext = task; \
00655
} while (0)
00656
00657
00658 #define pend_wake_up_hts(lnxtsk, cpuid) \
00659
do { \
00660
wake_up_hts[cpuid].task[wake_up_hts[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00661
hal_pend_uncond(wake_up_srq[cpuid].srq, cpuid); \
00662
} while (0)
00663
00664
00665 static inline void make_current_soft(
RT_TASK *rt_current,
int cpuid)
00666 {
00667
void rt_schedule(
void);
00668 rt_current->force_soft = 0;
00669 rt_current->state &= ~
RT_SCHED_READY;;
00670
pend_wake_up_hts(rt_current->lnxtsk,
cpuid);
00671 (rt_current->rprev)->rnext = rt_current->rnext;
00672 (rt_current->rnext)->rprev = rt_current->rprev;
00673
rt_schedule();
00674 rt_current->is_hard = 0;
00675 rt_global_sti();
00676
hal_schedule_back_root(rt_current->lnxtsk);
00677
00678 rt_global_cli();
00679
LOCK_LINUX_NOTSKPRI(
cpuid);
00680 rt_current->state |=
RT_SCHED_READY;
00681
rt_smp_current[
cpuid] = rt_current;
00682
if (rt_current->state !=
RT_SCHED_READY) {
00683 (rt_current->lnxtsk)->state = TASK_SOFTREALTIME;
00684
rt_schedule();
00685 }
else {
00686
enq_soft_ready_task(rt_current);
00687 }
00688 }
00689
00690 static RT_TASK *
switch_rtai_tasks(
RT_TASK *rt_current,
RT_TASK *new_task,
int cpuid)
00691 {
00692
if (rt_current->lnxtsk) {
00693
LOCK_LINUX(
cpuid);
00694 rt_linux_task.prevp = rt_current;
00695
save_fpcr_and_enable_fpu(
linux_cr0);
00696
if (new_task->uses_fpu) {
00697
save_fpenv(rt_linux_task.fpu_reg);
00698
fpu_task = new_task;
00699
restore_fpenv(
fpu_task->fpu_reg);
00700 }
00701
KEXECTIME();
00702
rt_exchange_tasks(
rt_smp_current[
cpuid], new_task);
00703
restore_fpcr(
linux_cr0);
00704
UNLOCK_LINUX(
cpuid);
00705
if (rt_linux_task.nextp != rt_current) {
00706
return rt_linux_task.nextp;
00707 }
00708 }
else {
00709
if (new_task->lnxtsk) {
00710 rt_linux_task.nextp = new_task;
00711 new_task = rt_linux_task.prevp;
00712
if (
fpu_task != &rt_linux_task) {
00713
save_fpenv(
fpu_task->fpu_reg);
00714
fpu_task = &rt_linux_task;
00715
restore_fpenv(
fpu_task->fpu_reg);
00716 }
00717 }
else if (new_task->uses_fpu &&
fpu_task != new_task) {
00718
save_fpenv(
fpu_task->fpu_reg);
00719
fpu_task = new_task;
00720
restore_fpenv(
fpu_task->fpu_reg);
00721 }
00722
KEXECTIME();
00723
rt_exchange_tasks(
rt_smp_current[
cpuid], new_task);
00724 }
00725
if (rt_current->signal) {
00726 (*rt_current->signal)();
00727 }
00728
return NULL;
00729 }
00730
00731 #define lxrt_context_switch(prev, next, cpuid) \
00732
do { _lxrt_context_switch(prev, next, cpuid); barrier(); } while (0)
00733
00734
#ifdef CONFIG_SMP
00735
static void rt_schedule_on_schedule_ipi(
void)
00736 {
00737
RT_TASK *rt_current, *
task, *new_task;
00738
int cpuid, prio, preempt;
00739
00740 rt_current =
rt_smp_current[
cpuid = rtai_cpuid()];
00741
00742
sched_get_global_lock(
cpuid);
00743
RR_YIELD();
00744
if (
oneshot_running) {
00745
00746 rt_time_h =
rdtsc() +
rt_half_tick;
00747 wake_up_timed_tasks(
cpuid);
00748
TASK_TO_SCHEDULE();
00749
00750
RR_INTR_TIME();
00751
task = &rt_linux_task;
00752
while ((
task =
task->tnext) != &rt_linux_task) {
00753
if (
task->priority <= prio &&
task->resume_time <
rt_times.intr_time) {
00754
rt_times.intr_time =
task->resume_time;
00755 preempt = 1;
00756
break;
00757 }
00758 }
00759
if (preempt || (prio ==
RT_SCHED_LINUX_PRIORITY && !
shot_fired)) {
00760
00761
int delay;
00762
00763 delay = (
int)(
rt_times.intr_time - rt_time_h) -
tuned.latency;
00764
if (delay >=
tuned.setup_time_TIMER_CPUNIT) {
00765 delay =
imuldiv(delay,
TIMER_FREQ,
tuned.cpu_freq);
00766 }
else {
00767 delay =
tuned.setup_time_TIMER_UNIT;
00768
00769
rt_times.intr_time = rt_time_h + (
tuned.setup_time_TIMER_CPUNIT);
00770 }
00771
shot_fired = 1;
00772
rt_set_timer_delay(delay);
00773 }
00774 }
else {
00775
TASK_TO_SCHEDULE();
00776 }
00777
sched_release_global_lock(
cpuid);
00778
00779
if (new_task != rt_current) {
00780
rt_scheduling[
cpuid].rqsted = 1;
00781
if (
rt_scheduling[
cpuid].locked) {
00782
goto sched_exit;
00783 }
00784
if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
00785
if (!(new_task =
switch_rtai_tasks(rt_current, new_task,
cpuid))) {
00786
goto sched_exit;
00787 }
00788 }
00789
if (new_task->is_hard || rt_current->is_hard) {
00790
struct task_struct *prev;
00791
if (!rt_current->is_hard) {
00792
LOCK_LINUX_IN_IRQ(
cpuid);
00793 rt_linux_task.lnxtsk = prev = current;
00794 }
else {
00795 prev = rt_current->lnxtsk;
00796 }
00797
rt_smp_current[
cpuid] = new_task;
00798
UEXECTIME();
00799
lxrt_context_switch(prev, new_task->lnxtsk,
cpuid);
00800
if (!rt_current->is_hard) {
00801
UNLOCK_LINUX_IN_IRQ(
cpuid);
00802 }
else if (
lnxtsk_uses_fpu(prev)) {
00803
restore_fpu(prev);
00804 }
00805 }
00806 }
00807 sched_exit:
00808
rtai_cli();
00809
#if CONFIG_RTAI_BUSY_TIME_ALIGN
00810
if (rt_current->trap_handler_data) {
00811 rt_current->trap_handler_data = 0;
00812
while(
rdtsc() < rt_current->resume_time);
00813 }
00814
#endif
00815
}
00816
#endif
00817
00818 void rt_schedule(
void)
00819 {
00820
RT_TASK *rt_current, *
task, *new_task;
00821
int cpuid, prio, preempt;
00822
00823 rt_current =
rt_smp_current[
cpuid = rtai_cpuid()];
00824
00825
RR_YIELD();
00826
if (
oneshot_running) {
00827 rt_time_h =
rdtsc() +
rt_half_tick;
00828 wake_up_timed_tasks(
cpuid);
00829
TASK_TO_SCHEDULE();
00830
00831
RR_INTR_TIME();
00832
task = &rt_linux_task;
00833
while ((
task =
task->tnext) != &rt_linux_task) {
00834
if (
task->priority <= prio &&
task->resume_time <
rt_times.intr_time) {
00835
rt_times.intr_time =
task->resume_time;
00836 preempt = 1;
00837
break;
00838 }
00839 }
00840
#ifdef USE_LINUX_TIMER
00841
if (prio ==
RT_SCHED_LINUX_PRIORITY && !
shot_fired) {
00842
RTIME linux_intr_time;
00843 linux_intr_time =
rt_times.linux_time >
rt_times.tick_time ?
rt_times.linux_time :
rt_times.tick_time +
rt_times.linux_tick;
00844
if (linux_intr_time <
rt_times.intr_time) {
00845
rt_times.intr_time = linux_intr_time;
00846 preempt = 1;
00847 }
00848 }
00849
#endif
00850
if (preempt || (prio ==
RT_SCHED_LINUX_PRIORITY && !
shot_fired)) {
00851
00852
int delay;
00853
00854 delay = (
int)(
rt_times.intr_time - rt_time_h) -
tuned.latency;
00855
if (delay >=
tuned.setup_time_TIMER_CPUNIT) {
00856 delay =
imuldiv(delay,
TIMER_FREQ,
tuned.cpu_freq);
00857 }
else {
00858 delay =
tuned.setup_time_TIMER_UNIT;
00859
00860
rt_times.intr_time = rt_time_h + (
tuned.setup_time_TIMER_CPUNIT);
00861 }
00862
shot_fired = 1;
00863
rt_set_timer_delay(delay);
00864 }
00865 }
else {
00866
TASK_TO_SCHEDULE();
00867 }
00868
sched_release_global_lock(
cpuid);
00869
00870
if (new_task != rt_current) {
00871
rt_scheduling[
cpuid].rqsted = 1;
00872
if (
rt_scheduling[
cpuid].locked) {
00873
goto sched_exit;
00874 }
00875
if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
00876
if (!(new_task =
switch_rtai_tasks(rt_current, new_task,
cpuid))) {
00877
goto sched_exit;
00878 }
00879 }
00880
rt_smp_current[
cpuid] = new_task;
00881
if (new_task->is_hard || rt_current->is_hard) {
00882
struct task_struct *prev;
00883
if (!rt_current->is_hard) {
00884
LOCK_LINUX(
cpuid);
00885 rt_linux_task.lnxtsk = prev = current;
00886 }
else {
00887 prev = rt_current->lnxtsk;
00888 }
00889
UEXECTIME();
00890
lxrt_context_switch(prev, new_task->lnxtsk,
cpuid);
00891
if (!rt_current->is_hard) {
00892
UNLOCK_LINUX(
cpuid);
00893
if (rt_current->state !=
RT_SCHED_READY) {
00894
goto sched_soft;
00895 }
00896 }
else {
00897
if (
lnxtsk_uses_fpu(prev)) {
00898
restore_fpu(prev);
00899 }
00900
if (rt_current->force_soft) {
00901
make_current_soft(rt_current,
cpuid);
00902 }
00903 }
00904 }
else if (rt_current->state !=
RT_SCHED_READY) {
00905 sched_soft:
00906
UNLOCK_LINUX_NOTSKPRI(
cpuid);
00907 rt_global_sti();
00908 hal_test_and_fast_flush_pipeline(
cpuid);
00909
NON_RTAI_SCHEDULE(
cpuid);
00910 rt_global_cli();
00911 rt_current->state = (rt_current->state & ~
RT_SCHED_SFTRDY) |
RT_SCHED_READY;
00912
LOCK_LINUX_NOTSKPRI(
cpuid);
00913
enq_soft_ready_task(rt_current);
00914
rt_smp_current[
cpuid] = rt_current;
00915 }
00916 }
00917 sched_exit:
00918
rtai_cli();
00919
sched_get_global_lock(
cpuid);
00920
#if CONFIG_RTAI_BUSY_TIME_ALIGN
00921
if (rt_current->trap_handler_data) {
00922 rt_current->trap_handler_data = 0;
00923
while(
rdtsc() < rt_current->resume_time);
00924 }
00925
#endif
00926
}
00927
00928
00929 void rt_spv_RMS(
int cpuid)
00930 {
00931
RT_TASK *
task;
00932
int prio;
00933
if (cpuid < 0 || cpuid >= num_online_cpus()) {
00934
cpuid = rtai_cpuid();
00935 }
00936 prio = 0;
00937
task = &rt_linux_task;
00938
while ((
task =
task->next)) {
00939
RT_TASK *
task, *htask;
00940
RTIME period;
00941 htask = 0;
00942
task = &rt_linux_task;
00943 period =
RT_TIME_END;
00944
while ((
task =
task->next)) {
00945
if (
task->priority >= 0 &&
task->policy >= 0 &&
task->period &&
task->period < period) {
00946 period = (htask =
task)->period;
00947 }
00948 }
00949
if (htask) {
00950 htask->priority = -1;
00951 htask->base_priority = prio++;
00952 }
else {
00953
goto ret;
00954 }
00955 }
00956 ret:
task = &rt_linux_task;
00957
while ((
task =
task->next)) {
00958
if (
task->priority < 0) {
00959
task->priority =
task->base_priority;
00960 }
00961 }
00962
return;
00963 }
00964
00965
00966 void rt_sched_lock(
void)
00967 {
00968
unsigned long flags;
00969
int cpuid;
00970
00971
rtai_save_flags_and_cli(
flags);
00972
if (!
rt_scheduling[
cpuid = rtai_cpuid()].locked++) {
00973
rt_scheduling[
cpuid].rqsted = 0;
00974 }
00975
rtai_restore_flags(
flags);
00976 }
00977
00978
00979 void rt_sched_unlock(
void)
00980 {
00981
unsigned long flags;
00982
int cpuid;
00983
00984
rtai_save_flags_and_cli(
flags);
00985
if (
rt_scheduling[
cpuid = rtai_cpuid()].locked && !(--
rt_scheduling[
cpuid].locked)) {
00986
if (
rt_scheduling[
cpuid].rqsted > 0) {
00987
sched_get_global_lock(
cpuid);
00988
rt_schedule();
00989
sched_release_global_lock(
cpuid);
00990 }
00991 }
else {
00992
00993 }
00994
rtai_restore_flags(
flags);
00995 }
00996
00997
00998
00999 void rtai_handle_isched_lock (
int cpuid)
01000 {
01001
sched_get_global_lock(
cpuid);
01002
rt_schedule();
01003
sched_release_global_lock(
cpuid);
01004 }
01005
01006
01007
01008
void *
rt_get_lxrt_fun_entry(
int index);
01009 static inline void sched_sem_signal(
SEM *sem)
01010 {
01011 ((void (*)(
SEM *))
rt_get_lxrt_fun_entry(
SEM_SIGNAL))(sem);
01012 }
01013
01014 int clr_rtext(
RT_TASK *task)
01015 {
01016 DECLARE_RT_CURRENT;
01017
unsigned long flags;
01018 QUEUE *q;
01019
01020
if (
task->magic != RT_TASK_MAGIC ||
task->priority ==
RT_SCHED_LINUX_PRIORITY) {
01021
return -EINVAL;
01022 }
01023
01024
flags = rt_global_save_flags_and_cli();
01025 ASSIGN_RT_CURRENT;
01026
if (!(
task->owndres & SEMHLF) ||
task == rt_current || rt_current->priority ==
RT_SCHED_LINUX_PRIORITY) {
01027 call_exit_handlers(
task);
01028 rem_timed_task(
task);
01029
if (
task->blocked_on) {
01030
if (
task->state & (
RT_SCHED_SEMAPHORE |
RT_SCHED_SEND |
RT_SCHED_RPC |
RT_SCHED_RETURN)) {
01031 (
task->queue.prev)->next =
task->queue.next;
01032 (
task->queue.next)->prev =
task->queue.prev;
01033
if (
task->state &
RT_SCHED_SEMAPHORE) {
01034
SEM *sem = (
SEM *)(
task->blocked_on);
01035
if (++sem->count > 1 && sem->type) {
01036 sem->count = 1;
01037 }
01038 }
01039 }
else if (
task->state &
RT_SCHED_MBXSUSP) {
01040
MBX *mbx = (
MBX *)
task->blocked_on;
01041 mbx->waiting_task = NOTHING;
01042
sched_sem_signal(!mbx->frbs ? &mbx->sndsem : &mbx->rcvsem);
01043 }
01044 }
01045 q = &(
task->msg_queue);
01046
while ((q = q->next) != &(
task->msg_queue)) {
01047 rem_timed_task(q->task);
01048
if ((q->task)->state !=
RT_SCHED_READY && ((q->task)->state &= ~(
RT_SCHED_SEND |
RT_SCHED_RPC |
RT_SCHED_DELAYED)) ==
RT_SCHED_READY) {
01049 enq_ready_task(q->task);
01050 }
01051 (q->task)->blocked_on = SOMETHING;
01052 }
01053 q = &(
task->ret_queue);
01054
while ((q = q->next) != &(
task->ret_queue)) {
01055 rem_timed_task(q->task);
01056
if ((q->task)->state !=
RT_SCHED_READY && ((q->task)->state &= ~(
RT_SCHED_RETURN |
RT_SCHED_DELAYED)) ==
RT_SCHED_READY) {
01057 enq_ready_task(q->task);
01058 }
01059 (q->task)->blocked_on = SOMETHING;
01060 }
01061
if (!((
task->prev)->next =
task->next)) {
01062
rt_smp_linux_task[
task->runnable_on_cpus].prev =
task->prev;
01063 }
else {
01064 (
task->next)->prev =
task->prev;
01065 }
01066
if (
rt_smp_fpu_task[
task->runnable_on_cpus] ==
task) {
01067
rt_smp_fpu_task[
task->runnable_on_cpus] =
rt_smp_linux_task +
task->runnable_on_cpus;;
01068 }
01069
if (!
task->lnxtsk) {
01070
frstk_srq.mp[
frstk_srq.in++ & (
MAX_FRESTK_SRQ - 1)] =
task->stack_bottom;
01071
rt_pend_linux_srq(
frstk_srq.srq);
01072 }
01073
task->magic = 0;
01074 rem_ready_task(
task);
01075
task->state = 0;
01076 atomic_dec((
void *)(
tasks_per_cpu +
task->runnable_on_cpus));
01077
if (
task == rt_current) {
01078
rt_schedule();
01079 }
01080 }
else {
01081
task->suspdepth = -0x7FFFFFFF;
01082 }
01083 rt_global_restore_flags(
flags);
01084
return 0;
01085 }
01086
01087
01088 int rt_task_delete(
RT_TASK *task)
01089 {
01090
if (!
clr_rtext(
task)) {
01091
if (
task->lnxtsk) {
01092
start_stop_kthread(
task, 0, 0, 0, 0, 0, 0);
01093 }
01094 }
01095
return 0;
01096 }
01097
01098
01099 int rt_get_timer_cpu(
void)
01100 {
01101
return 1;
01102 }
01103
01104
01105 static void rt_timer_handler(
void)
01106 {
01107
RT_TASK *rt_current, *
task, *new_task;
01108
int cpuid, prio, preempt;
01109
01110
DO_TIMER_PROPER_OP();
01111 rt_current =
rt_smp_current[
cpuid = rtai_cpuid()];
01112
01113
rt_times.tick_time = oneshot_timer ?
rdtsc() :
rt_times.intr_time;
01114 rt_time_h =
rt_times.tick_time +
rt_half_tick;
01115
#ifdef USE_LINUX_TIMER
01116
if (
rt_times.tick_time >=
rt_times.linux_time) {
01117
rt_times.linux_time +=
rt_times.linux_tick;
01118
update_linux_timer(
cpuid);
01119 }
01120
#endif
01121
01122
sched_get_global_lock(
cpuid);
01123 wake_up_timed_tasks(
cpuid);
01124
RR_YIELD();
01125
TASK_TO_SCHEDULE();
01126
01127
if (oneshot_timer) {
01128
#ifdef USE_LINUX_TIMER
01129
int islnx;
01130
#endif
01131
shot_fired = 0;
01132
rt_times.intr_time =
rt_times.tick_time +
ONESHOT_SPAN;
01133
RR_INTR_TIME();
01134
task = &rt_linux_task;
01135
while ((
task =
task->tnext) != &rt_linux_task) {
01136
if (
task->priority <= prio &&
task->resume_time <
rt_times.intr_time) {
01137
rt_times.intr_time =
task->resume_time;
01138 preempt = 1;
01139
break;
01140 }
01141 }
01142
#ifndef USE_LINUX_TIMER
01143
if (preempt || prio ==
RT_SCHED_LINUX_PRIORITY) {
01144
01145
int delay;
01146
#else
01147
if ((islnx = (prio ==
RT_SCHED_LINUX_PRIORITY)) || preempt) {
01148
01149
int delay;
01150
if (islnx) {
01151
RTIME linux_intr_time;
01152 linux_intr_time =
rt_times.linux_time >
rt_times.tick_time ?
rt_times.linux_time :
rt_times.tick_time +
rt_times.linux_tick;
01153
if (linux_intr_time <
rt_times.intr_time) {
01154
rt_times.intr_time = linux_intr_time;
01155 }
01156 }
01157
#endif
01158
01159 delay = (
int)(
rt_times.intr_time - rt_time_h) -
tuned.latency;
01160
if (delay >=
tuned.setup_time_TIMER_CPUNIT) {
01161 delay =
imuldiv(delay,
TIMER_FREQ,
tuned.cpu_freq);
01162 }
else {
01163 delay =
tuned.setup_time_TIMER_UNIT;
01164
01165
rt_times.intr_time = rt_time_h + (
tuned.setup_time_TIMER_CPUNIT);
01166 }
01167
shot_fired = 1;
01168
rt_set_timer_delay(delay);
01169 }
01170 }
else {
01171
rt_times.intr_time +=
rt_times.periodic_tick;
01172
rt_set_timer_delay(0);
01173 }
01174
sched_release_global_lock(
cpuid);
01175
01176
if (new_task != rt_current) {
01177
rt_scheduling[
cpuid].rqsted = 1;
01178
if (
rt_scheduling[
cpuid].locked) {
01179
goto sched_exit;
01180 }
01181
if (USE_RTAI_TASKS && (!new_task->lnxtsk || !rt_current->lnxtsk)) {
01182
if (!(new_task =
switch_rtai_tasks(rt_current, new_task,
cpuid))) {
01183
goto sched_exit;
01184 }
01185 }
01186
if (new_task->is_hard || rt_current->is_hard) {
01187
struct task_struct *prev;
01188
if (!rt_current->is_hard) {
01189
LOCK_LINUX_IN_IRQ(
cpuid);
01190 rt_linux_task.lnxtsk = prev = current;
01191 }
else {
01192 prev = rt_current->lnxtsk;
01193 }
01194
rt_smp_current[
cpuid] = new_task;
01195
UEXECTIME();
01196
lxrt_context_switch(prev, new_task->lnxtsk,
cpuid);
01197
if (!rt_current->is_hard) {
01198
UNLOCK_LINUX_IN_IRQ(
cpuid);
01199 }
else if (
lnxtsk_uses_fpu(prev)) {
01200
restore_fpu(prev);
01201 }
01202 }
01203 }
01204 sched_exit:
01205
rtai_cli();
01206 }
01207
01208
01209
#ifdef USE_LINUX_TIMER
01210 static irqreturn_t
recover_jiffies(
int irq,
void *dev_id,
struct pt_regs *regs)
01211 {
01212 rt_global_cli();
01213
if (
linux_times->
tick_time >=
linux_times->
linux_time) {
01214
linux_times->
linux_time +=
linux_times->
linux_tick;
01215
update_linux_timer(rtai_cpuid());
01216 }
01217 rt_global_sti();
01218
return RTAI_LINUX_IRQ_HANDLED;
01219 }
01220
#endif
01221
01222
01223 int rt_is_hard_timer_running(
void)
01224 {
01225
return rt_sched_timed;
01226 }
01227
01228
01229 void rt_set_periodic_mode(
void)
01230 {
01231
int cpuid;
01232
stop_rt_timer();
01233
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01234 oneshot_timer =
oneshot_running = 0;
01235 }
01236 }
01237
01238
01239 void rt_set_oneshot_mode(
void)
01240 {
01241
int cpuid;
01242
stop_rt_timer();
01243
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01244 oneshot_timer = 1;
01245 }
01246 }
01247
01248
01249
#if defined(CONFIG_RTAI_RTC_FREQ) && CONFIG_RTAI_RTC_FREQ >= 2
01250
01251
#ifdef CONFIG_SMP
01252
01253
RTIME start_rt_timer(
int period)
01254 {
01255
int cpuid;
01256
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01257 oneshot_timer =
oneshot_running = 0;
01258
rt_smp_times[
cpuid].
linux_tick = 0;
01259
rt_smp_times[
cpuid].
tick_time = 0;
01260
rt_smp_times[
cpuid].
intr_time = 0;
01261
rt_smp_times[
cpuid].
linux_time = 0;
01262
rt_smp_times[
cpuid].
periodic_tick = 1;
01263
tuned.timers_tol[
cpuid] =
rt_half_tick = 0;
01264 rt_time_h = 0;
01265 }
01266
linux_times =
rt_smp_times;
01267
rt_request_irq(RTAI_APIC_TIMER_IPI, (
void *)rt_timer_handler, NULL, 0);
01268
rt_request_rtc(CONFIG_RTAI_RTC_FREQ, NULL);
01269
rt_sched_timed = 1;
01270
return 1LL;
01271 }
01272
01273
01274
void stop_rt_timer(
void)
01275 {
01276
unsigned long flags,
cpuid;
01277
01278
if (!
rt_sched_timed) {
01279
return;
01280 }
01281
rt_release_rtc();
01282
rt_release_irq(RTAI_APIC_TIMER_IPI);
01283
rt_sched_timed = 0;
01284
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01285 rt_time_h =
RT_TIME_END;
01286
oneshot_running = 0;
01287 }
01288
flags = rt_global_save_flags_and_cli();
01289 RT_SCHEDULE_MAP_BOTH(0xFF & ~(1 << rtai_cpuid()));
01290 rt_global_restore_flags(flags);
01291 }
01292
01293
#else
01294
01295
RTIME start_rt_timer(
int period)
01296 {
01297
int const cpuid = 0;
01298 oneshot_timer =
oneshot_running = 0;
01299
rt_smp_times[
cpuid].
linux_tick = 0;
01300
rt_smp_times[
cpuid].
tick_time = 0;
01301
rt_smp_times[
cpuid].
intr_time = 0;
01302
rt_smp_times[
cpuid].
linux_time = 0;
01303
rt_smp_times[
cpuid].
periodic_tick = 1;
01304
tuned.timers_tol[0] =
rt_half_tick = 0;
01305 rt_time_h = 0;
01306
linux_times =
rt_smp_times;
01307
rt_request_rtc(CONFIG_RTAI_RTC_FREQ, (
void *)rt_timer_handler);
01308
rt_sched_timed = 1;
01309
return 1LL;
01310 }
01311
01312
void stop_rt_timer(
void)
01313 {
01314
unsigned long flags;
01315
01316
if (!
rt_sched_timed) {
01317
return;
01318 }
01319
rt_release_rtc();
01320 rt_time_h =
RT_TIME_END;
01321
rt_sched_timed =
rt_smp_oneshot_timer[0] = 0;
01322
flags = rt_global_save_flags_and_cli();
01323
rt_schedule();
01324 rt_global_restore_flags(flags);
01325 }
01326
01327
#endif
01328
01329
void start_rt_apic_timers(
struct apic_timer_setup_data *setup_data,
unsigned int rcvr_jiffies_cpuid)
01330 {
01331
start_rt_timer(0);
01332 }
01333
01334
#else
01335
01336
#ifdef CONFIG_SMP
01337
01338
void start_rt_apic_timers(
struct apic_timer_setup_data *setup_data,
unsigned int rcvr_jiffies_cpuid)
01339 {
01340
unsigned long flags,
cpuid;
01341
01342
rt_request_apic_timers(rt_timer_handler, setup_data);
01343
flags = rt_global_save_flags_and_cli();
01344
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01345
if (setup_data[
cpuid].mode > 0) {
01346 oneshot_timer =
oneshot_running = 0;
01347
tuned.timers_tol[
cpuid] =
rt_half_tick = (
rt_times.periodic_tick + 1)>>1;
01348 }
else {
01349 oneshot_timer =
oneshot_running = 1;
01350
tuned.timers_tol[
cpuid] =
rt_half_tick = (
tuned.latency + 1)>>1;
01351 }
01352 rt_time_h =
rt_times.tick_time +
rt_half_tick;
01353
shot_fired = 1;
01354 }
01355
rt_sched_timed = 1;
01356
linux_times =
rt_smp_times + (rcvr_jiffies_cpuid <
NR_RT_CPUS ? rcvr_jiffies_cpuid : 0);
01357 rt_global_restore_flags(flags);
01358 }
01359
01360
01361
RTIME start_rt_timer(
int period)
01362 {
01363
int cpuid;
01364
struct apic_timer_setup_data setup_data[
NR_RT_CPUS];
01365
if (period <= 0) {
01366
rt_set_oneshot_mode();
01367 }
01368
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01369 setup_data[
cpuid].mode = oneshot_timer ? 0 : 1;
01370 setup_data[
cpuid].count =
count2nano(period);
01371 }
01372
start_rt_apic_timers(setup_data, rtai_cpuid());
01373
return setup_data[0].mode ? setup_data[0].count : period;
01374 }
01375
01376
01377
void stop_rt_timer(
void)
01378 {
01379
unsigned long flags,
cpuid;
01380
01381
if (!
rt_sched_timed) {
01382
return;
01383 }
01384
rt_free_apic_timers();
01385
rt_sched_timed = 0;
01386
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01387 rt_time_h =
RT_TIME_END;
01388
oneshot_running = 0;
01389 }
01390
flags = rt_global_save_flags_and_cli();
01391 RT_SCHEDULE_MAP_BOTH(0xFF & ~(1 << rtai_cpuid()));
01392 rt_global_restore_flags(flags);
01393 }
01394
01395
#else
01396
01397
#ifdef USE_LINUX_TIMER
01398 #define TIMER_TYPE 0
01399
#else
01400
#define TIMER_TYPE 1
01401
#endif
01402
01403 RTIME start_rt_timer(
int period)
01404 {
01405
#define cpuid 0
01406
#undef rt_times
01407
01408
unsigned long flags;
01409
if (period <= 0) {
01410
rt_set_oneshot_mode();
01411 }
01412
flags = rt_global_save_flags_and_cli();
01413
if (oneshot_timer) {
01414
rt_request_timer(
rt_timer_handler, 0,
TIMER_TYPE);
01415
tuned.timers_tol[0] =
rt_half_tick = (
tuned.latency + 1)>>1;
01416
oneshot_running =
shot_fired = 1;
01417 }
else {
01418
rt_request_timer(
rt_timer_handler, !
TIMER_TYPE && period > LATCH ? LATCH: period,
TIMER_TYPE);
01419
tuned.timers_tol[0] =
rt_half_tick = (
rt_times.periodic_tick + 1)>>1;
01420 }
01421
rt_sched_timed = 1;
01422
rt_smp_times[
cpuid].
linux_tick =
rt_times.linux_tick;
01423
rt_smp_times[
cpuid].
tick_time =
rt_times.tick_time;
01424
rt_smp_times[
cpuid].
intr_time =
rt_times.intr_time;
01425
rt_smp_times[
cpuid].
linux_time =
rt_times.linux_time;
01426
rt_smp_times[
cpuid].
periodic_tick =
rt_times.periodic_tick;
01427 rt_time_h =
rt_times.tick_time +
rt_half_tick;
01428
linux_times =
rt_smp_times;
01429 rt_global_restore_flags(
flags);
01430
#ifdef USE_LINUX_TIMER
01431
rt_request_linux_irq(
TIMER_8254_IRQ,
recover_jiffies,
"rtai_jif_chk",
recover_jiffies);
01432
#endif
01433
return period;
01434
01435
#undef cpuid
01436
#define rt_times (rt_smp_times[cpuid])
01437
}
01438
01439
01440 void start_rt_apic_timers(
struct apic_timer_setup_data *setup_mode,
unsigned int rcvr_jiffies_cpuid)
01441 {
01442
int cpuid, period;
01443
01444 period = 0;
01445
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01446 period += setup_mode[
cpuid].mode;
01447 }
01448
if (period ==
NR_RT_CPUS) {
01449 period = 2000000000;
01450
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
01451
if (setup_mode[
cpuid].count < period) {
01452 period = setup_mode[
cpuid].count;
01453 }
01454 }
01455
start_rt_timer(
nano2count(period));
01456 }
else {
01457
rt_set_oneshot_mode();
01458
start_rt_timer(0);
01459 }
01460 }
01461
01462
01463 void stop_rt_timer(
void)
01464 {
01465
unsigned long flags;
01466
01467
if (!
rt_sched_timed) {
01468
return;
01469 }
01470
#ifdef USE_LINUX_TIMER
01471
rt_free_linux_irq(
TIMER_8254_IRQ,
recover_jiffies);
01472
#endif
01473
rt_free_timer();
01474 rt_time_h =
RT_TIME_END;
01475
rt_sched_timed =
rt_smp_oneshot_timer[0] = 0;
01476
flags = rt_global_save_flags_and_cli();
01477
rt_schedule();
01478 rt_global_restore_flags(
flags);
01479 }
01480
01481
#endif
01482
01483
#endif
01484
01485 int rt_sched_type(
void)
01486 {
01487
return RT_SCHED_MUP;
01488 }
01489
01490
01491 int rt_hard_timer_tick_count(
void)
01492 {
01493
int cpuid = rtai_cpuid();
01494
if (
rt_sched_timed) {
01495
return oneshot_timer ? 0 :
rt_smp_times[
cpuid].
periodic_tick;
01496 }
01497
return -1;
01498 }
01499
01500
01501 int rt_hard_timer_tick_count_cpuid(
int cpuid)
01502 {
01503
if (
rt_sched_timed) {
01504
return oneshot_timer ? 0 :
rt_smp_times[
cpuid].
periodic_tick;
01505 }
01506
return -1;
01507 }
01508
01509
01510 RT_TRAP_HANDLER rt_set_task_trap_handler(
RT_TASK *task,
unsigned int vec,
RT_TRAP_HANDLER handler)
01511 {
01512
RT_TRAP_HANDLER old_handler;
01513
01514
if (!
task || (vec >=
RTAI_NR_TRAPS)) {
01515
return (
RT_TRAP_HANDLER) -EINVAL;
01516 }
01517 old_handler =
task->task_trap_handler[vec];
01518
task->task_trap_handler[vec] =
handler;
01519
return old_handler;
01520 }
01521
01522 static int OneShot =
CONFIG_RTAI_ONE_SHOT;
01523
MODULE_PARM(OneShot,
"i");
01524
01525 static int Latency =
TIMER_LATENCY;
01526
MODULE_PARM(Latency,
"i");
01527
01528 static int SetupTimeTIMER =
TIMER_SETUP_TIME;
01529
MODULE_PARM(SetupTimeTIMER,
"i");
01530
01531
extern void krtai_objects_release(
void);
01532
01533 static void frstk_srq_handler(
void)
01534 {
01535
while (
frstk_srq.out !=
frstk_srq.in) {
01536
sched_free(
frstk_srq.mp[
frstk_srq.out++ & (
MAX_FRESTK_SRQ - 1)]);
01537 }
01538 }
01539
01540 static void nihil(
void) { };
01541 struct rt_fun_entry
rt_fun_lxrt[
MAX_LXRT_FUN];
01542
01543 void reset_rt_fun_entries(
struct rt_native_fun_entry *entry)
01544 {
01545
while (entry->fun.fun) {
01546
if (entry->index >=
MAX_LXRT_FUN) {
01547
rt_printk(
"*** RESET ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWD TABLE SIZE %d, NOT USED ***\n", entry->index,
MAX_LXRT_FUN);
01548 }
else {
01549
rt_fun_lxrt[entry->index] = (
struct rt_fun_entry){ 1,
nihil };
01550 }
01551 entry++;
01552 }
01553 }
01554
01555 int set_rt_fun_entries(
struct rt_native_fun_entry *entry)
01556 {
01557
int error;
01558 error = 0;
01559
while (entry->fun.fun) {
01560
if (
rt_fun_lxrt[entry->index].fun !=
nihil) {
01561
rt_printk(
"*** SUSPICIOUS ENTRY ASSIGNEMENT FOR USER SPACE CALL AT %d, DUPLICATED INDEX OR REPEATED INITIALIZATION ***\n", entry->index);
01562 error = -1;
01563 }
else if (entry->index >=
MAX_LXRT_FUN) {
01564
rt_printk(
"*** ASSIGNEMENT ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWED TABLE SIZE %d, NOT USED ***\n", entry->index,
MAX_LXRT_FUN);
01565 error = -1;
01566 }
else {
01567
rt_fun_lxrt[entry->index] = entry->fun;
01568 }
01569 entry++;
01570 }
01571
if (error) {
01572
reset_rt_fun_entries(entry);
01573 }
01574
return 0;
01575 }
01576
01577 void *
rt_get_lxrt_fun_entry(
int index) {
01578
return rt_fun_lxrt[index].fun;
01579 }
01580
01581 static void lxrt_killall (
void)
01582 {
01583
int cpuid;
01584
01585
stop_rt_timer();
01586
01587
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++)
01588
while (rt_linux_task.next)
01589
rt_task_delete(rt_linux_task.next);
01590 }
01591
01592 static int lxrt_notify_reboot (
struct notifier_block *nb,
unsigned long event,
void *p)
01593
01594 {
01595
switch (event)
01596 {
01597
case SYS_DOWN:
01598
case SYS_HALT:
01599
case SYS_POWER_OFF:
01600
01601
01602
printk(
"LXRT: REBOOT NOTIFIED -- KILLING TASKS\n");
01603
lxrt_killall();
01604 }
01605
01606
return NOTIFY_DONE;
01607 }
01608
01609
01610
01611 RTIME count2nano(
RTIME counts)
01612 {
01613
int sign;
01614
01615
if (counts >= 0) {
01616 sign = 1;
01617 }
else {
01618 sign = 0;
01619 counts = - counts;
01620 }
01621 counts =
oneshot_timer_cpuid ?
01622
llimd(counts, 1000000000,
tuned.cpu_freq):
01623
llimd(counts, 1000000000,
TIMER_FREQ);
01624
return sign ? counts : - counts;
01625 }
01626
01627
01628 RTIME nano2count(
RTIME ns)
01629 {
01630
int sign;
01631
01632
if (ns >= 0) {
01633 sign = 1;
01634 }
else {
01635 sign = 0;
01636 ns = - ns;
01637 }
01638 ns =
oneshot_timer_cpuid ?
01639
llimd(ns,
tuned.cpu_freq, 1000000000) :
01640
llimd(ns,
TIMER_FREQ, 1000000000);
01641
return sign ? ns : - ns;
01642 }
01643
01644 RTIME count2nano_cpuid(
RTIME counts,
unsigned int cpuid)
01645 {
01646
int sign;
01647
01648
if (counts >= 0) {
01649 sign = 1;
01650 }
else {
01651 sign = 0;
01652 counts = - counts;
01653 }
01654 counts = oneshot_timer ?
01655
llimd(counts, 1000000000,
tuned.cpu_freq):
01656
llimd(counts, 1000000000,
TIMER_FREQ);
01657
return sign ? counts : - counts;
01658 }
01659
01660
01661 RTIME nano2count_cpuid(
RTIME ns,
unsigned int cpuid)
01662 {
01663
int sign;
01664
01665
if (ns >= 0) {
01666 sign = 1;
01667 }
else {
01668 sign = 0;
01669 ns = - ns;
01670 }
01671 ns = oneshot_timer ?
01672
llimd(ns,
tuned.cpu_freq, 1000000000) :
01673
llimd(ns,
TIMER_FREQ, 1000000000);
01674
return sign ? ns : - ns;
01675 }
01676
01677
01678
01679 RTIME rt_get_time(
void)
01680 {
01681
int cpuid;
01682
return rt_smp_oneshot_timer[
cpuid = rtai_cpuid()] ?
rdtsc() :
rt_smp_times[
cpuid].
tick_time;
01683 }
01684
01685 RTIME rt_get_time_cpuid(
unsigned int cpuid)
01686 {
01687
return oneshot_timer ?
rdtsc():
rt_times.tick_time;
01688 }
01689
01690 RTIME rt_get_time_ns(
void)
01691 {
01692
int cpuid = rtai_cpuid();
01693
return oneshot_timer ?
llimd(
rdtsc(), 1000000000,
tuned.cpu_freq) :
01694
llimd(
rt_times.tick_time, 1000000000,
TIMER_FREQ);
01695 }
01696
01697 RTIME rt_get_time_ns_cpuid(
unsigned int cpuid)
01698 {
01699
return oneshot_timer ?
llimd(
rdtsc(), 1000000000,
tuned.cpu_freq) :
01700
llimd(
rt_times.tick_time, 1000000000,
TIMER_FREQ);
01701 }
01702
01703 RTIME rt_get_cpu_time_ns(
void)
01704 {
01705
return llimd(
rdtsc(), 1000000000,
tuned.cpu_freq);
01706 }
01707
01708
01709
01710 RT_TASK *
rt_get_base_linux_task(
RT_TASK **base_linux_tasks)
01711 {
01712
int cpuid;
01713
for (
cpuid = 0;
cpuid < num_online_cpus();
cpuid++) {
01714 base_linux_tasks[
cpuid] =
rt_smp_linux_task +
cpuid;
01715 }
01716
return rt_smp_linux_task;
01717 }
01718
01719 RT_TASK *
rt_alloc_dynamic_task(
void)
01720 {
01721
#ifdef CONFIG_RTAI_MALLOC
01722
return rt_malloc(
sizeof(
RT_TASK));
01723
#else
01724
return NULL;
01725
#endif
01726
}
01727
01728
01729
01730 RT_TASK **
rt_register_watchdog(
RT_TASK *wd,
int cpuid)
01731 {
01732
RT_TASK *
task;
01733
01734
if (
lxrt_wdog_task[
cpuid])
return (
RT_TASK**) -EBUSY;
01735
task = &rt_linux_task;
01736
while ((
task =
task->next)) {
01737
if (
task != wd &&
task->priority ==
RT_SCHED_HIGHEST_PRIORITY) {
01738
return (
RT_TASK**) -EBUSY;
01739 }
01740 }
01741
lxrt_wdog_task[
cpuid] = wd;
01742
return (
RT_TASK**) 0;
01743 }
01744
01745 void rt_deregister_watchdog(
RT_TASK *wd,
int cpuid)
01746 {
01747
if (
lxrt_wdog_task[
cpuid] != wd)
return;
01748
lxrt_wdog_task[
cpuid] = NULL;
01749 }
01750
01751
01752
01753
01754
#ifdef ECHO_SYSW
01755
#define SYSW_DIAG_MSG(x) x
01756
#else
01757 #define SYSW_DIAG_MSG(x)
01758
#endif
01759
01760 static RT_TRAP_HANDLER lxrt_old_trap_handler;
01761
01762 static inline void _rt_schedule_soft_tail(
RT_TASK *rt_task,
int cpuid)
01763 {
01764 rt_global_cli();
01765 rt_task->state &= ~(
RT_SCHED_READY |
RT_SCHED_SFTRDY);
01766 (rt_task->rprev)->rnext = rt_task->rnext;
01767 (rt_task->rnext)->rprev = rt_task->rprev;
01768
rt_smp_current[
cpuid] = &rt_linux_task;
01769
rt_schedule();
01770
UNLOCK_LINUX_NOTSKPRI(
cpuid);
01771 rt_global_sti();
01772 }
01773
01774 void rt_schedule_soft(
RT_TASK *rt_task)
01775 {
01776
struct fun_args *funarg;
01777
int cpuid;
01778
01779 rt_global_cli();
01780 rt_task->state |=
RT_SCHED_READY;
01781
while (rt_task->state !=
RT_SCHED_READY) {
01782 current->state = TASK_SOFTREALTIME;
01783 rt_global_sti();
01784 schedule();
01785 rt_global_cli();
01786 }
01787
LOCK_LINUX_NOTSKPRI(
cpuid = rt_task->runnable_on_cpus);
01788
enq_soft_ready_task(rt_task);
01789
rt_smp_current[
cpuid] = rt_task;
01790 rt_global_sti();
01791 funarg = (
void *)rt_task->fun_args;
01792 rt_task->retval = funarg->fun(RTAI_FUNARGS);
01793
_rt_schedule_soft_tail(rt_task,
cpuid);
01794 }
01795
01796 void rt_schedule_soft_tail(
RT_TASK *rt_task,
int cpuid)
01797 {
01798
_rt_schedule_soft_tail(rt_task,
cpuid);
01799 }
01800
01801 static inline void fast_schedule(
RT_TASK *new_task,
struct task_struct *lnxtsk,
int cpuid)
01802 {
01803
RT_TASK *rt_current;
01804 new_task->state |=
RT_SCHED_READY;
01805
enq_soft_ready_task(new_task);
01806
sched_release_global_lock(
cpuid);
01807
if (!new_task->is_hard) {
01808
LOCK_LINUX(
cpuid);
01809 (rt_current = &rt_linux_task)->lnxtsk = lnxtsk;
01810
UEXECTIME();
01811
rt_smp_current[
cpuid] = new_task;
01812
lxrt_context_switch(lnxtsk, new_task->lnxtsk,
cpuid);
01813
UNLOCK_LINUX(
cpuid);
01814 }
else {
01815
LOCK_LINUX_NOTSKPRI(
cpuid);
01816 (rt_current = &rt_linux_task)->lnxtsk = lnxtsk;
01817
UEXECTIME();
01818
rt_smp_current[
cpuid] = new_task;
01819
lxrt_context_switch(lnxtsk, new_task->lnxtsk,
cpuid);
01820
UNLOCK_LINUX_NOTSKPRI(
cpuid);
01821 }
01822 }
01823
01824
01825 static RT_TASK thread_task[
NR_RT_CPUS];
01826 static int rsvr_cnt[
NR_RT_CPUS];
01827
01828
#if USE_RTAI_TASKS
01829
#define RESERVOIR 0
01830
#else
01831 #define RESERVOIR 4
01832
#endif
01833 static int Reservoir =
RESERVOIR;
01834
MODULE_PARM(Reservoir,
"i");
01835 static int SpareKthreads = 100;
01836
MODULE_PARM(SpareKthreads,
"i");
01837
01838 static int taskidx[
NR_RT_CPUS];
01839 static struct task_struct **
taskav[
NR_RT_CPUS];
01840
01841 static struct task_struct *
__get_kthread(
int cpuid)
01842 {
01843
unsigned long flags;
01844
struct task_struct *p;
01845
01846
flags = rt_global_save_flags_and_cli();
01847
if (
taskidx[
cpuid] > 0) {
01848 p =
taskav[
cpuid][--
taskidx[
cpuid]];
01849 rt_global_restore_flags(
flags);
01850
return p;
01851 }
01852 rt_global_restore_flags(
flags);
01853
return 0;
01854 }
01855
01856
01857
01858
01859
01860 static inline void detach_kthread(
void)
01861 {
01862
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
01863
current->session = 1;
01864 current->pgrp = 1;
01865 current->tty = NULL;
01866
#else
01867
(current->signal)->session = 1;
01868 (current->signal)->pgrp = 1;
01869 (current->signal)->tty = NULL;
01870
#endif
01871
}
01872
01873 static inline void lxrt_sigfillset(
void)
01874 {
01875
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
01876
spin_lock_irq(¤t->sigmask_lock);
01877 sigfillset(¤t->blocked);
01878 recalc_sigpending(current);
01879 spin_unlock_irq(¤t->sigmask_lock);
01880
#else
01881
spin_lock_irq(&(current->sighand)->siglock);
01882 sigfillset(¤t->blocked);
01883 recalc_sigpending();
01884 spin_unlock_irq(&(current->sighand)->siglock);
01885
#endif
01886
}
01887
01888 static void kthread_fun(
int cpuid)
01889 {
01890
void steal_from_linux(
RT_TASK *);
01891
void give_back_to_linux(
RT_TASK *,
int);
01892
RT_TASK *
task;
01893
01894
detach_kthread();
01895 rtai_set_linux_task_priority(current, SCHED_FIFO,
KTHREAD_F_PRIO);
01896 sprintf(current->comm,
"F:HARD:%d:%d",
cpuid, ++
rsvr_cnt[
cpuid]);
01897 current->rtai_tskext(
TSKEXT0) =
task = &
thread_task[
cpuid];
01898 current->rtai_tskext(
TSKEXT1) =
task->lnxtsk = current;
01899
lxrt_sigfillset();
01900
put_current_on_cpu(
cpuid);
01901
init_hard_fpu(current);
01902
steal_from_linux(
task);
01903
while(1) {
01904
rt_task_suspend(
task);
01905 current->comm[0] =
'U';
01906
if (!(
task = current->rtai_tskext(
TSKEXT0))->max_msg_size[0]) {
01907
break;
01908 }
01909
task->exectime[1] =
rdtsc();
01910 ((void (*)(
long))
task->max_msg_size[0])(
task->max_msg_size[1]);
01911 current->comm[0] =
'F';
01912 current->rtai_tskext(
TSKEXT1) = 0;
01913
rtai_cli();
01914
if (
taskidx[
cpuid] <
SpareKthreads) {
01915
taskav[
cpuid][
taskidx[
cpuid]++] =
task->lnxtsk;
01916 }
01917
rtai_sti();
01918 }
01919
give_back_to_linux(
task, 0);
01920
clr_rtext(
task);
01921 }
01922
01923 #define WAKE_UP_TASKs(klist) \
01924
do { \
01925
struct klist_t *p = &klist[cpuid]; \
01926
while (p->out != p->in) { \
01927
wake_up_process(p->task[p->out++ & (MAX_WAKEUP_SRQ - 1)]); \
01928
} \
01929
} while (0)
01930
01931 static void kthread_m(
int cpuid)
01932 {
01933
struct task_struct *lnxtsk;
01934
struct klist_t *klistp;
01935
RT_TASK *
task;
01936
01937
01938
detach_kthread();
01939 (
task = &
thread_task[
cpuid])->magic = RT_TASK_MAGIC;
01940
task->runnable_on_cpus =
cpuid;
01941 sprintf(current->comm,
"RTAI_KTHRD_M:%d",
cpuid);
01942
put_current_on_cpu(
cpuid);
01943
kthreadm[
cpuid] = current;
01944 klistp = &
klistm[
cpuid];
01945 rtai_set_linux_task_priority(current, SCHED_FIFO,
KTHREAD_M_PRIO);
01946
lxrt_sigfillset();
01947 up(&
resem[
cpuid]);
01948
while (!
endkthread) {
01949 current->state = TASK_UNINTERRUPTIBLE;
01950 schedule();
01951
#if defined(CONFIG_SMP) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
01952
WAKE_UP_TASKs(
wake_up_hts);
01953
#endif
01954
while (klistp->out != klistp->in) {
01955
unsigned long hard,
flags;
01956
flags = rt_global_save_flags_and_cli();
01957 hard = (
unsigned long)(lnxtsk = klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)]);
01958
if (hard > 1) {
01959
if (lnxtsk->rtai_tskext(
TSKEXT2)) {
01960
if (lnxtsk->rtai_tskext(
TSKEXT1) &&
taskidx[
cpuid] <
SpareKthreads) {;
01961
taskav[
cpuid][
taskidx[
cpuid]++] = lnxtsk;
01962 lnxtsk->comm[0] =
'F';
01963 }
01964 kthread_fun_long_jump(lnxtsk);
01965 }
01966 }
else {
01967
if (
taskidx[
cpuid] <
Reservoir) {
01968
task->suspdepth =
task->state = 0;
01969 rt_global_sti();
01970 kernel_thread((
void *)
kthread_fun, (
void *)(
long)
cpuid, 0);
01971
while (
task->state != (
RT_SCHED_READY |
RT_SCHED_SUSPENDED)) {
01972 current->state = TASK_INTERRUPTIBLE;
01973 schedule_timeout(2);
01974 }
01975 kthread_fun_set_jump(
task->lnxtsk);
01976 rt_global_cli();
01977
taskav[
cpuid][
taskidx[
cpuid]++] = (
void *)
task->lnxtsk;
01978 }
01979
if (hard) {
01980
rt_task_resume((
void *)klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)]);
01981 }
else {
01982 rt_global_sti();
01983 up(&
resem[
cpuid]);
01984 rt_global_cli();
01985 }
01986 }
01987 rt_global_restore_flags(
flags);
01988 }
01989 }
01990
kthreadm[
cpuid] = 0;
01991 }
01992
01993 void steal_from_linux(
RT_TASK *rt_task)
01994 {
01995
struct klist_t *klistp;
01996
struct task_struct *lnxtsk;
01997
01998
if (signal_pending(rt_task->lnxtsk)) {
01999
return;
02000 }
02001 klistp = &
klistb[rt_task->runnable_on_cpus];
02002
rtai_cli();
02003 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = rt_task;
02004
#if defined(TASK_ATOMICSWITCH) && TASK_ATOMICSWITCH && defined(CONFIG_PREEMPT)
02005
preempt_disable();
02006 (lnxtsk = rt_task->lnxtsk)->state = (TASK_HARDREALTIME | TASK_ATOMICSWITCH);
02007
#else
02008
(lnxtsk = rt_task->lnxtsk)->state = TASK_HARDREALTIME;
02009
#endif
02010
rtai_sti();
02011
do {
02012 schedule();
02013 }
while (rt_task->state !=
RT_SCHED_READY);
02014
if (!rt_task->exectime[1]) {
02015 rt_task->exectime[1] =
rdtsc();
02016 }
02017
rtai_cli();
02018
if (rt_task->base_priority >= BASE_SOFT_PRIORITY) {
02019 rt_task->base_priority -= BASE_SOFT_PRIORITY;
02020 rt_task->priority -= BASE_SOFT_PRIORITY;
02021 }
02022 rt_task->is_hard = 1;
02023
if (
lnxtsk_uses_fpu(lnxtsk)) {
02024
restore_fpu(lnxtsk);
02025 }
02026
rtai_sti();
02027 }
02028
02029 void give_back_to_linux(
RT_TASK *rt_task,
int keeprio)
02030 {
02031
struct task_struct *lnxtsk;
02032
02033 rt_global_cli();
02034 (rt_task->rprev)->rnext = rt_task->rnext;
02035 (rt_task->rnext)->rprev = rt_task->rprev;
02036 rt_task->state = 0;
02037
if (!keeprio && rt_task->base_priority < BASE_SOFT_PRIORITY) {
02038 rt_task->base_priority += BASE_SOFT_PRIORITY;
02039 rt_task->priority += BASE_SOFT_PRIORITY;
02040 }
02041 (lnxtsk = rt_task->lnxtsk)->rt_priority = (MAX_LINUX_RTPRIO - rt_task->priority) < 1 ? 1 : MAX_LINUX_RTPRIO - rt_task->priority;
02042
pend_wake_up_hts(rt_task->lnxtsk, rt_task->runnable_on_cpus);
02043
rt_schedule();
02044 rt_task->is_hard = keeprio;
02045 rt_global_sti();
02046
02047
02048
hal_schedule_back_root(lnxtsk);
02049 }
02050
02051 static struct task_struct *
get_kthread(
int get,
int cpuid,
void *lnxtsk)
02052 {
02053
struct task_struct *kthread;
02054
struct klist_t *klistp;
02055
RT_TASK *this_task;
02056
int hard;
02057
02058 klistp = &
klistm[
cpuid];
02059
if (get) {
02060
while (!(kthread =
__get_kthread(
cpuid))) {
02061 this_task =
rt_smp_current[
cpuid];
02062 rt_global_cli();
02063 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = (
void *)(
long)(hard = this_task->is_hard > 0 ? 1 : 0);
02064 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = (
void *)this_task;
02065 pend_wake_up_srq(
kthreadm[
cpuid],
cpuid);
02066 rt_global_sti();
02067
if (hard) {
02068
rt_task_suspend(this_task);
02069 }
else {
02070 down(&
resem[
cpuid]);
02071 }
02072 }
02073 rt_global_cli();
02074 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = 0;
02075 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = 0;
02076 }
else {
02077 kthread = 0;
02078 rt_global_cli();
02079 klistp->task[klistp->in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk;
02080 }
02081 pend_wake_up_srq(
kthreadm[
cpuid],
cpuid);
02082 rt_global_sti();
02083
return kthread;
02084 }
02085
02086 static void start_stop_kthread(
RT_TASK *task,
void (*rt_thread)(
long),
long data,
int priority,
int uses_fpu,
void(*signal)(
void),
int runnable_on_cpus)
02087 {
02088
if (num_online_cpus() == 1) {
02089 runnable_on_cpus = 0;
02090 }
02091
if (rt_thread) {
02092
task->retval =
set_rtext(
task, priority, uses_fpu, signal, runnable_on_cpus,
get_kthread(1, runnable_on_cpus, 0));
02093
task->max_msg_size[0] = (
long)rt_thread;
02094
task->max_msg_size[1] = data;
02095 }
else {
02096
get_kthread(0,
task->runnable_on_cpus,
task->lnxtsk);
02097 }
02098 }
02099
02100 static void wake_up_srq_handler(
unsigned srq)
02101 {
02102
#ifdef CONFIG_PREEMPT
02103
02104
#endif
02105
#ifdef CONFIG_X86_64
02106
int cpuid = rtai_cpuid();
02107
#else
02108
int cpuid =
srq -
wake_up_srq[0].srq;
02109
#endif
02110
#if !defined(CONFIG_SMP) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
02111
WAKE_UP_TASKs(
wake_up_hts);
02112
#else
02113
wake_up_process(
kthreadm[
cpuid]);
02114
#endif
02115
WAKE_UP_TASKs(
wake_up_srq);
02116 set_need_resched();
02117
#ifdef CONFIG_PREEMPT
02118
02119
#endif
02120
}
02121
02122 static unsigned long traptrans,
systrans;
02123
02124 static int lxrt_handle_trap(
int vec,
int signo,
struct pt_regs *regs,
void *dummy_data)
02125 {
02126
RT_TASK *rt_task;
02127
02128 rt_task =
rt_smp_current[smp_processor_id()];
02129
if (USE_RTAI_TASKS && !rt_task->lnxtsk) {
02130
if (rt_task->task_trap_handler[vec]) {
02131
return rt_task->task_trap_handler[vec](vec, signo, regs, rt_task);
02132 }
02133
rt_printk(
"Default Trap Handler: vector %d: Suspend RT task %p\n", vec, rt_task);
02134
rt_task_suspend(rt_task);
02135
return 1;
02136 }
02137
02138
if (rt_task->is_hard > 0) {
02139
if (!
traptrans++) {
02140
rt_printk(
"\nLXRT CHANGED MODE (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo);
02141 }
02142
SYSW_DIAG_MSG(
rt_printk(
"\nFORCING IT SOFT (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo););
02143
give_back_to_linux(rt_task, -1);
02144
SYSW_DIAG_MSG(
rt_printk(
"FORCED IT SOFT (TRAP), PID = %d, VEC = %d, SIGNO = %d.\n", (rt_task->lnxtsk)->pid, vec, signo););
02145 }
02146
02147
return 0;
02148 }
02149
02150 static inline void rt_signal_wake_up(
RT_TASK *task)
02151 {
02152
if (
task->state &&
task->state !=
RT_SCHED_READY) {
02153
task->unblocked = 1;
02154
rt_task_masked_unblock(
task, ~
RT_SCHED_READY);
02155 }
else {
02156
task->unblocked = -1;
02157 }
02158 }
02159
02160
#ifdef UNWRAPPED_CATCH_EVENT
02161
02162
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02163
static struct mmreq {
02164
int in,
out,
count;
02165
#define MAX_MM 32
02166
#define bump_mmreq(x) do { x = (x + 1) & (MAX_MM - 1); } while(0)
02167
struct mm_struct *mm[MAX_MM];
02168 } lxrt_mmrqtab[NR_CPUS];
02169
02170
struct prev_next_t {
struct task_struct *prev, *next; };
02171
static int lxrt_intercept_schedule_head (
unsigned long event,
struct prev_next_t *evdata)
02172
02173 {
02174
IN_INTERCEPT_IRQ_ENABLE(); {
02175
02176
struct task_struct *prev = evdata->prev;
02177
02178
02179
02180
02181
02182
02183
02184
02185
02186
02187
if (!prev->mm)
02188 {
02189
struct mmreq *p = lxrt_mmrqtab + task_cpu(prev);
02190
struct mm_struct *oldmm = prev->active_mm;
02191 BUG_ON(p->count >= MAX_MM);
02192
02193
02194
atomic_inc(&oldmm->mm_count);
02195 p->mm[p->in] = oldmm;
02196 bump_mmreq(p->in);
02197 p->count++;
02198 }
02199
02200
return 0;
02201 } }
02202
02203
#endif
02204
02205 static int lxrt_intercept_schedule_tail (
unsigned event,
void *nothing)
02206
02207 {
02208
IN_INTERCEPT_IRQ_ENABLE(); {
02209
02210
int cpuid;
02211
if (in_hrt_mode(
cpuid = smp_processor_id())) {
02212
return 1;
02213 }
else {
02214
struct klist_t *klistp = &
klistb[
cpuid];
02215
struct task_struct *lnxtsk = current;
02216
#ifdef CONFIG_PREEMPT
02217
02218
#endif
02219
while (klistp->out != klistp->in) {
02220 rt_global_cli();
02221
fast_schedule(klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)], lnxtsk,
cpuid);
02222 rt_global_sti();
02223 }
02224
#ifdef CONFIG_PREEMPT
02225
02226
#endif
02227
}
02228
02229
#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
02230
02231
#endif
02232
02233
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02234
{
02235
struct mmreq *p;
02236
02237
#ifdef CONFIG_PREEMPT
02238
preempt_disable();
02239
#endif
02240
02241 p = lxrt_mmrqtab + smp_processor_id();
02242
02243
while (p->out != p->in)
02244 {
02245
struct mm_struct *oldmm = p->mm[p->out];
02246 mmdrop(oldmm);
02247 bump_mmreq(p->out);
02248 p->count--;
02249 }
02250
02251
#ifdef CONFIG_PREEMPT
02252
preempt_enable();
02253
#endif
02254 }
02255
#endif
02256
02257
return 0;
02258 } }
02259
02260 struct sig_wakeup_t {
struct task_struct *
task; };
02261 static int lxrt_intercept_sig_wakeup (
long event,
void *data)
02262 {
02263
IN_INTERCEPT_IRQ_ENABLE(); {
02264
RT_TASK *
task;
02265
if ((
task =
INTERCEPT_WAKE_UP_TASK(data)->rtai_tskext(
TSKEXT0))) {
02266
rt_signal_wake_up(
task);
02267
return 1;
02268 }
02269
return 0;
02270 } }
02271
02272 static int lxrt_intercept_exit (
unsigned long event,
struct task_struct *lnx_task)
02273 {
02274
IN_INTERCEPT_IRQ_ENABLE(); {
02275
02276
extern void linux_process_termination(
void);
02277
RT_TASK *
task;
02278
if ((
task = lnx_task->rtai_tskext(
TSKEXT0))) {
02279
if (
task->is_hard > 0) {
02280
give_back_to_linux(
task, 0);
02281 }
02282
linux_process_termination();
02283 }
02284
return 0;
02285 } }
02286
02287
extern long long rtai_lxrt_invoke (
unsigned long,
void *,
void *);
02288
extern int (*sys_call_table[])(
struct pt_regs);
02289
02290
#if 0
02291
static RT_TASK *server_task_init(
int prio,
int cpus_allowed)
02292 {
02293
RT_TASK *tsk;
02294
if ((tsk =
rt_malloc(
sizeof(
RT_TASK) + 2*
sizeof(
struct fun_args)))) {
02295 tsk->magic = 0;
02296
if (!
set_rtext(tsk, prio, 0, 0, cpus_allowed, 0)) {
02297 tsk->fun_args = (
long *)((
struct fun_args *)(tsk + 1));
02298
if (
rt_register((
unsigned long)tsk, tsk, IS_TASK, 0)) {
02299
return tsk;
02300 }
else {
02301
clr_rtext(tsk);
02302 }
02303 }
02304
rt_free(tsk);
02305 }
02306
return 0;
02307 }
02308
02309
static inline RT_TASK *soft_rt_linux_server_call(RT_TASK *task,
void *fun,
void *arg1,
void *arg2)
02310 {
02311
task->fun_args[0] = (
long)arg1;
02312
task->fun_args[1] = (
long)arg2;
02313 ((
struct fun_args *)
task->fun_args)->fun = fun;
02314
rt_schedule_soft(task);
02315
return (
RT_TASK *)(
unsigned long)
task->retval;
02316 }
02317
02318
static void linux_syscall_server_fun(RT_TASK *master_task)
02319 {
02320
RT_TASK *server_task;
02321
struct pt_regs regs;
02322
02323 master_task->linux_syscall_server = server_task = server_task_init(master_task->base_priority >= BASE_SOFT_PRIORITY ? master_task->base_priority - BASE_SOFT_PRIORITY : master_task->base_priority, master_task->runnable_on_cpus);
02324
rt_task_resume(master_task);
02325
while (soft_rt_linux_server_call(server_task, rt_receive_linux_syscall, master_task, ®s) == master_task) {
02326
rt_return_linux_syscall(master_task, sys_call_table[regs.LINUX_SYSCALL_NR](regs));
02327 }
02328 }
02329
02330
RT_TASK *lxrt_init_linux_server(RT_TASK *master_task)
02331 {
02332
int is_hard;
02333
if (!master_task) {
02334
if (!current->rtai_tskext(TSKEXT0)) {
02335
return NULL;
02336 }
02337 master_task = current->rtai_tskext(TSKEXT0);
02338 }
02339
if (!master_task->lnxtsk) {
02340
return NULL;
02341 }
02342
if ((is_hard = master_task->is_hard) > 0) {
02343
give_back_to_linux(master_task, 0);
02344 }
02345 master_task->linux_syscall_server = NULL;
02346 kernel_thread((
void *)linux_syscall_server_fun, master_task, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
02347 soft_rt_linux_server_call(master_task, rt_task_suspend, master_task, NULL);
02348
if (is_hard > 0) {
02349
steal_from_linux(master_task);
02350 }
02351
return master_task->linux_syscall_server;
02352 }
02353
02354
#endif
02355
02356 static int lxrt_intercept_syscall_prologue(
unsigned long event,
struct pt_regs *regs)
02357 {
02358
IN_INTERCEPT_IRQ_ENABLE(); {
02359
02360
if (
unlikely(regs->LINUX_SYSCALL_NR >=
RTAI_SYSCALL_NR)) {
02361
long long retval;
02362
int cpuid;
02363
if (
likely(regs->LINUX_SYSCALL_NR ==
RTAI_SYSCALL_NR)) {
02364 retval =
rtai_lxrt_invoke(regs->RTAI_SYSCALL_CODE, (
void *)regs->RTAI_SYSCALL_ARGS, regs);
02365
SET_LXRT_RETVAL_IN_SYSCALL(regs, retval);
02366 }
else {
02367
unsigned long args[2] = { (
unsigned long)current, (
unsigned long)regs };
02368 retval = regs->LINUX_SYSCALL_RETREG =
rtai_lxrt_invoke(regs->LINUX_SYSCALL_NR, args, regs);
02369 }
02370
if (
unlikely(!in_hrt_mode(
cpuid = rtai_cpuid()))) {
02371
#if 0
02372
if (
unlikely((
int)retval == -
RT_EINTR)) {
02373 regs->LINUX_SYSCALL_NR =
RTAI_FAKE_LINUX_SYSCALL;
02374 }
02375
#endif
02376
hal_test_and_fast_flush_pipeline(
cpuid);
02377
return 0;
02378 }
02379
return 1;
02380 }
02381
02382 {
int cpuid;
02383
02384
if (in_hrt_mode(
cpuid = rtai_cpuid()) && regs->LINUX_SYSCALL_NR < NR_syscalls) {
02385
RT_TASK *
task =
rt_smp_current[
cpuid];
02386
if (
task->is_hard > 0) {
02387
if (
task->linux_syscall_server) {
02388
task->linux_syscall_server =
rt_exec_linux_syscall(
task, (
void *)
task->linux_syscall_server, regs);
02389
return 1;
02390 }
02391
if (!
systrans++) {
02392
rt_printk(
"\nLXRT CHANGED MODE (SYSCALL), PID = %d, SYSCALL = %lu.\n", (
task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR);
02393 }
02394
SYSW_DIAG_MSG(
rt_printk(
"\nFORCING IT SOFT (SYSCALL), PID = %d, SYSCALL = %d.\n", (
task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR););
02395
give_back_to_linux(
task, -1);
02396
SKIP_IMMEDIATE_LINUX_SYSCALL();
02397
SYSW_DIAG_MSG(
rt_printk(
"FORCED IT SOFT, CALLING LINUX (SYSCALL), PID = %d, SYSCALL = %d.\n", (
task->lnxtsk)->pid, regs->LINUX_SYSCALL_NR););
02398 regs->LINUX_SYSCALL_RETREG =
sys_call_table[regs->LINUX_SYSCALL_NR](*regs);
02399
SYSW_DIAG_MSG(
rt_printk(
"LINUX RETURNED, GOING BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02400
steal_from_linux(
task);
02401
SYSW_DIAG_MSG(
rt_printk(
"GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02402
return 1;
02403 }
02404 } }
02405
return 0;
02406 } }
02407
02408 static int lxrt_intercept_syscall_epilogue(
unsigned long event,
void *nothing)
02409 {
02410
IN_INTERCEPT_IRQ_ENABLE(); {
02411
02412
RT_TASK *
task;
02413
if ((
task = (
RT_TASK *)current->rtai_tskext(
TSKEXT0))) {
02414
if (
task->system_data_ptr) {
02415
struct pt_regs *r =
task->system_data_ptr;
02416 r->LINUX_SYSCALL_RETREG = -ERESTARTSYS;
02417 r->LINUX_SYSCALL_NR =
RTAI_SYSCALL_NR;
02418
task->system_data_ptr = NULL;
02419 }
else if (
task->is_hard < 0) {
02420
SYSW_DIAG_MSG(
rt_printk(
"GOING BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02421
steal_from_linux(
task);
02422
SYSW_DIAG_MSG(
rt_printk(
"GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02423
return 1;
02424 }
02425 }
02426
return 0;
02427 } }
02428
02429
#else
02430
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02431
02432
static struct mmreq {
02433
int in,
out,
count;
02434
#define MAX_MM 32
02435
#define bump_mmreq(x) do { x = (x + 1) & (MAX_MM - 1); } while(0)
02436
struct mm_struct *mm[MAX_MM];
02437 } lxrt_mmrqtab[NR_CPUS];
02438
02439
static void lxrt_intercept_schedule_head (adevinfo_t *evinfo)
02440
02441 {
02442
IN_INTERCEPT_IRQ_ENABLE(); {
02443
02444
struct {
struct task_struct *prev, *next; } *evdata = (__typeof(evdata))evinfo->evdata;
02445
struct task_struct *prev = evdata->prev;
02446
02447
02448
02449
02450
02451
02452
02453
02454
02455
02456
if (!prev->mm)
02457 {
02458
struct mmreq *p = lxrt_mmrqtab + task_cpu(prev);
02459
struct mm_struct *oldmm = prev->active_mm;
02460 BUG_ON(p->count >= MAX_MM);
02461
02462
02463
atomic_inc(&oldmm->mm_count);
02464 p->mm[p->in] = oldmm;
02465 bump_mmreq(p->in);
02466 p->count++;
02467 }
02468
02469
hal_propagate_event(evinfo);
02470 } }
02471
02472
#endif
02473
02474
static void lxrt_intercept_schedule_tail (adevinfo_t *evinfo)
02475
02476 {
02477
IN_INTERCEPT_IRQ_ENABLE(); {
02478
02479
int cpuid;
02480
if (in_hrt_mode(cpuid = smp_processor_id())) {
02481
return;
02482 }
else {
02483
struct klist_t *klistp = &
klistb[
cpuid];
02484
struct task_struct *lnxtsk = current;
02485
#ifdef CONFIG_PREEMPT
02486
preempt_disable();
02487
#endif
02488
while (klistp->out != klistp->in) {
02489 rt_global_cli();
02490
fast_schedule(klistp->task[klistp->out++ & (MAX_WAKEUP_SRQ - 1)], lnxtsk, cpuid);
02491 rt_global_sti();
02492 }
02493
#ifdef CONFIG_PREEMPT
02494
preempt_enable();
02495
#endif
02496
}
02497
02498
#ifdef CONFIG_PREEMPT
02499
02500
#endif
02501
02502
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02503
{
02504
struct mmreq *p;
02505
02506
#ifdef CONFIG_PREEMPT
02507
preempt_disable();
02508
#endif
02509
02510 p = lxrt_mmrqtab + smp_processor_id();
02511
02512
while (p->out != p->in)
02513 {
02514
struct mm_struct *oldmm = p->mm[p->out];
02515 mmdrop(oldmm);
02516 bump_mmreq(p->out);
02517 p->count--;
02518 }
02519
02520
#ifdef CONFIG_PREEMPT
02521
preempt_enable();
02522
#endif
02523 }
02524
#endif
02525
02526
hal_propagate_event(evinfo);
02527 } }
02528
02529
struct sig_wakeup_t {
struct task_struct *
task; };
02530
static void lxrt_intercept_sig_wakeup (
long event,
struct sig_wakeup_t *evdata)
02531 {
02532
IN_INTERCEPT_IRQ_ENABLE(); {
02533
RT_TASK *
task;
02534
if ((
task = (evdata->
task)->rtai_tskext(TSKEXT0))) {
02535
rt_signal_wake_up(task);
02536 }
02537 } }
02538
02539
static void lxrt_intercept_exit (adevinfo_t *evinfo)
02540 {
02541
IN_INTERCEPT_IRQ_ENABLE(); {
02542
02543
extern void linux_process_termination(
void);
02544
RT_TASK *
task = current->rtai_tskext(TSKEXT0);
02545
if (
task) {
02546
if (
task->is_hard > 0) {
02547
give_back_to_linux(task, 0);
02548 }
02549
linux_process_termination();
02550 }
02551
hal_propagate_event(evinfo);
02552 } }
02553
02554
extern long long rtai_lxrt_invoke (
unsigned long,
void *,
void *);
02555
02556
static void lxrt_intercept_syscall_prologue(adevinfo_t *evinfo)
02557 {
02558
IN_INTERCEPT_IRQ_ENABLE(); {
02559
02560
#ifdef USE_LINUX_SYSCALL
02561
struct pt_regs *r = (
struct pt_regs *)evinfo->evdata;
02562
unsigned long syscall_nr;
02563
if ((syscall_nr = r->RTAI_SYSCALL_NR) >=
GT_NR_SYSCALLS) {
02564
long long retval =
rtai_lxrt_invoke(syscall_nr, (
void *)r->RTAI_SYSCALL_ARGS, r);
02565
SET_LXRT_RETVAL_IN_SYSCALL(r, retval);
02566
if (!in_hrt_mode(rtai_cpuid())) {
02567
hal_propagate_event(evinfo);
02568 }
02569
return;
02570 }
02571
#endif
02572
{
int cpuid;
02573
02574
if (in_hrt_mode(cpuid = rtai_cpuid())) {
02575
#ifdef ECHO_SYSW
02576
struct pt_regs *r = (
struct pt_regs *)evinfo->evdata;
02577
#endif
02578
RT_TASK *
task =
rt_smp_current[
cpuid];
02579
if (
task->is_hard > 0) {
02580
02581
if (
task->linux_syscall_server) {
02582
#if 1
02583
rt_exec_linux_syscall(task, (
void *)
task->linux_syscall_server, (
struct pt_regs *)evinfo->evdata);
02584
#else
02585
struct pt_regs *r = (
struct pt_regs *)evinfo->evdata;
02586 ((void (*)(
RT_TASK *,
void *,
void *,
int,
int))
rt_fun_lxrt[
RPCX].fun)(
task->linux_syscall_server, r, &r->LINUX_SYSCALL_RETREG,
sizeof(
struct pt_regs), sizeof(long));
02587
#endif
02588
return;
02589 }
02590
if (!
systrans++) {
02591
struct pt_regs *r = (
struct pt_regs *)evinfo->evdata;
02592
rt_printk(
"\nLXRT CHANGED MODE (SYSCALL), PID = %d, SYSCALL = %lu.\n", (
task->lnxtsk)->pid, r->RTAI_SYSCALL_NR);
02593 }
02594
SYSW_DIAG_MSG(
rt_printk(
"\nFORCING IT SOFT (SYSCALL), PID = %d, SYSCALL = %ld.\n", (
task->lnxtsk)->pid, r->RTAI_SYSCALL_NR););
02595
give_back_to_linux(task, -1);
02596
SYSW_DIAG_MSG(
rt_printk(
"FORCED IT SOFT (SYSCALL), PID = %d, SYSCALL = %ld.\n", (
task->lnxtsk)->pid, r->RTAI_SYSCALL_NR););
02597 }
02598 } }
02599
hal_propagate_event(evinfo);
02600 } }
02601
02602
static void lxrt_intercept_syscall_epilogue(adevinfo_t *evinfo)
02603 {
02604
IN_INTERCEPT_IRQ_ENABLE(); {
02605
02606
RT_TASK *
task;
02607
if (current->rtai_tskext(TSKEXT0) && (
task = (
RT_TASK *)current->rtai_tskext(TSKEXT0))->is_hard < 0) {
02608
SYSW_DIAG_MSG(
rt_printk(
"GOING BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02609
steal_from_linux(task);
02610
SYSW_DIAG_MSG(
rt_printk(
"GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
02611
return;
02612 }
02613
hal_propagate_event(evinfo);
02614 } }
02615
#endif
02616
02617
02618
02619
#ifdef CONFIG_PROC_FS
02620
02621
02622
static int rtai_read_sched(
char *page,
char **start, off_t off,
int count,
02623
int *eof,
void *data)
02624 {
02625
PROC_PRINT_VARS;
02626
int cpuid, i = 1;
02627
unsigned long t;
02628
RT_TASK *
task;
02629
02630
PROC_PRINT(
"\nRTAI LXRT Real Time Task Scheduler.\n\n");
02631
PROC_PRINT(
" Calibrated CPU Frequency: %lu Hz\n",
tuned.cpu_freq);
02632
PROC_PRINT(
" Calibrated interrupt to scheduler latency: %d ns\n", (
int)
imuldiv(
tuned.latency -
tuned.setup_time_TIMER_CPUNIT, 1000000000,
tuned.cpu_freq));
02633
PROC_PRINT(
" Calibrated oneshot timer setup_to_firing time: %d ns\n\n",
02634 (
int)
imuldiv(
tuned.setup_time_TIMER_CPUNIT, 1000000000,
tuned.cpu_freq));
02635
PROC_PRINT(
"Number of RT CPUs in system: %d\n\n", NR_RT_CPUS);
02636
PROC_PRINT(
"Number of forced hard/soft/hard transitions: traps %lu, syscalls %lu\n\n", traptrans, systrans);
02637
02638
PROC_PRINT(
"Priority Period(ns) FPU Sig State CPU Task HD/SF PID RT_TASK * TIME\n" );
02639
PROC_PRINT(
"------------------------------------------------------------------------------\n" );
02640
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
02641
task = &rt_linux_task;
02642
02643
02644
02645
02646
02647
02648
while ((
task =
task->next)) {
02649
02650
02651
02652
02653
02654 t = 0;
02655
if ((!
task->lnxtsk ||
task->is_hard) &&
task->exectime[1]) {
02656
unsigned long den = (
unsigned long)
llimd(
rdtsc() -
task->exectime[1], 10,
tuned.cpu_freq);
02657
if (den) {
02658 t = 1000UL*(
unsigned long)
llimd(
task->exectime[0], 10,
tuned.cpu_freq)/den;
02659 }
02660 }
02661
PROC_PRINT(
"%-10d %-11lu %-4s %-3s 0x%-3x %1lu:%1lu %-4d %-4d %-4d %p %-lu\n",
02662
task->priority,
02663 (
unsigned long)
count2nano_cpuid(
task->period,
task->runnable_on_cpus),
02664
task->uses_fpu ||
task->lnxtsk ?
"Yes" :
"No",
02665
task->signal ?
"Yes" :
"No",
02666
task->state,
02667
task->runnable_on_cpus,
02668
task->lnxtsk ? CPUMASK((
task->lnxtsk)->cpus_allowed) : (1 <<
task->runnable_on_cpus),
02669 i,
02670
task->is_hard,
02671
task->lnxtsk ?
task->lnxtsk->pid : 0,
02672 task, t);
02673 i++;
02674 }
02675
02676
PROC_PRINT(
"TIMED\n");
02677
task = &rt_linux_task;
02678
while ((
task =
task->tnext) != &rt_linux_task) {
02679
PROC_PRINT(
"> %p ", task);
02680 }
02681
PROC_PRINT(
"\nREADY\n");
02682
task = &rt_linux_task;
02683
while ((
task =
task->rnext) != &rt_linux_task) {
02684
PROC_PRINT(
"> %p ", task);
02685 }
02686
02687 }
02688
02689
PROC_PRINT_DONE;
02690
02691 }
02692
02693
02694
static int rtai_proc_sched_register(
void)
02695 {
02696
struct proc_dir_entry *proc_sched_ent;
02697
02698
02699 proc_sched_ent = create_proc_entry(
"scheduler", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
02700
if (!proc_sched_ent) {
02701
printk(
"Unable to initialize /proc/rtai/scheduler\n");
02702
return(-1);
02703 }
02704 proc_sched_ent->read_proc = rtai_read_sched;
02705
return(0);
02706 }
02707
02708
02709
static void rtai_proc_sched_unregister(
void)
02710 {
02711 remove_proc_entry(
"scheduler", rtai_proc_root);
02712 }
02713
02714
02715
#endif
02716
02717
02718
02719
extern void usp_request_rtc(
int,
void *);
02720
extern void rt_release_rtc(
void);
02721
02722 static struct rt_native_fun_entry
rt_sched_entries[] = {
02723 { { 0,
rt_named_task_init },
NAMED_TASK_INIT },
02724 { { 0,
rt_named_task_init_cpuid },
NAMED_TASK_INIT_CPUID },
02725 { { 0,
rt_named_task_delete },
NAMED_TASK_DELETE },
02726 { { 1,
rt_task_yield },
YIELD },
02727 { { 1,
rt_task_suspend },
SUSPEND },
02728 { { 1,
rt_task_resume },
RESUME },
02729 { { 1,
rt_task_make_periodic },
MAKE_PERIODIC },
02730 { { 1,
rt_task_wait_period },
WAIT_PERIOD },
02731 { { 1,
rt_sleep },
SLEEP },
02732 { { 1,
rt_sleep_until },
SLEEP_UNTIL },
02733 { { 0,
start_rt_timer },
START_TIMER },
02734 { { 0,
stop_rt_timer },
STOP_TIMER },
02735 { { 0,
rt_get_time },
GET_TIME },
02736 { { 0,
count2nano },
COUNT2NANO },
02737 { { 0,
nano2count },
NANO2COUNT },
02738 { { 0,
rt_busy_sleep },
BUSY_SLEEP },
02739 { { 0,
rt_set_periodic_mode },
SET_PERIODIC_MODE },
02740 { { 0,
rt_set_oneshot_mode },
SET_ONESHOT_MODE },
02741 { { 0,
rt_task_signal_handler },
SIGNAL_HANDLER },
02742 { { 0,
rt_task_use_fpu },
TASK_USE_FPU },
02743 { { 0,
rt_linux_use_fpu },
LINUX_USE_FPU },
02744 { { 0,
rt_hard_timer_tick_count },
HARD_TIMER_COUNT },
02745 { { 0,
rt_get_time_ns },
GET_TIME_NS },
02746 { { 0,
rt_get_cpu_time_ns },
GET_CPU_TIME_NS },
02747 { { 0,
rt_set_runnable_on_cpus },
SET_RUNNABLE_ON_CPUS },
02748 { { 0,
rt_set_runnable_on_cpuid },
SET_RUNNABLE_ON_CPUID },
02749 { { 0,
rt_get_timer_cpu },
GET_TIMER_CPU },
02750 { { 0,
start_rt_apic_timers },
START_RT_APIC_TIMERS },
02751 { { 0,
rt_hard_timer_tick_count_cpuid },
HARD_TIMER_COUNT_CPUID },
02752 { { 0,
count2nano_cpuid },
COUNT2NANO_CPUID },
02753 { { 0,
nano2count_cpuid },
NANO2COUNT_CPUID },
02754 { { 0,
rt_get_time_cpuid },
GET_TIME_CPUID },
02755 { { 0,
rt_get_time_ns_cpuid },
GET_TIME_NS_CPUID },
02756 { { 1,
rt_task_make_periodic_relative_ns },
MAKE_PERIODIC_NS },
02757 { { 0,
rt_set_sched_policy },
SET_SCHED_POLICY },
02758 { { 1,
rt_task_set_resume_end_times },
SET_RESUME_END },
02759 { { 0,
rt_spv_RMS },
SPV_RMS },
02760 { { 0,
rt_task_masked_unblock },
WAKEUP_SLEEPING },
02761 { { 1,
rt_change_prio },
CHANGE_TASK_PRIO },
02762 { { 0,
rt_set_resume_time },
SET_RESUME_TIME },
02763 { { 0,
rt_set_period },
SET_PERIOD },
02764 { { 0,
rt_is_hard_timer_running },
HARD_TIMER_RUNNING },
02765 { { 0,
rt_get_adr },
GET_ADR },
02766 { { 0,
rt_get_name },
GET_NAME },
02767 { { 1,
rt_task_suspend_if },
SUSPEND_IF },
02768 { { 1,
rt_task_suspend_until },
SUSPEND_UNTIL },
02769 { { 1,
rt_task_suspend_timed },
SUSPEND_TIMED },
02770 { { 1,
rt_irq_wait },
IRQ_WAIT },
02771 { { 1,
rt_irq_wait_if },
IRQ_WAIT_IF },
02772 { { 1,
rt_irq_wait_until },
IRQ_WAIT_UNTIL },
02773 { { 1,
rt_irq_wait_timed },
IRQ_WAIT_TIMED },
02774 { { 0,
rt_irq_signal },
IRQ_SIGNAL },
02775 { { 0,
rt_request_irq_task },
REQUEST_IRQ_TASK },
02776 { { 0,
rt_release_irq_task },
RELEASE_IRQ_TASK },
02777 { { 0,
rt_sched_lock },
SCHED_LOCK },
02778 { { 0,
rt_sched_unlock },
SCHED_UNLOCK },
02779 { { 0,
rt_pend_linux_irq },
PEND_LINUX_IRQ },
02780 { { 1,
rt_return_linux_syscall },
RETURN_LINUX_SYSCALL },
02781 { { 1,
rt_receive_linux_syscall },
RECEIVE_LINUX_SYSCALL },
02782 { { 0,
usp_request_rtc },
REQUEST_RTC },
02783 { { 0,
rt_release_rtc },
RELEASE_RTC },
02784 { { 0, 0 }, 000 }
02785 };
02786
02787
extern void *
rtai_lxrt_dispatcher;
02788
02789 DECLARE_FUSION_WAKE_UP_STUFF;
02790
02791 static int lxrt_init(
void)
02792
02793 {
02794
void init_fun_ext(
void);
02795
int cpuid;
02796
02797
init_fun_ext();
02798
02799 REQUEST_RESUME_SRQs_STUFF();
02800
02801
02802
02803
02804
if (
Reservoir <= 0)
02805
Reservoir = 1;
02806
02807
Reservoir = (
Reservoir +
NR_RT_CPUS - 1)/
NR_RT_CPUS;
02808
02809
for (
cpuid = 0;
cpuid < num_online_cpus();
cpuid++)
02810 {
02811
taskav[
cpuid] = (
void *)kmalloc(
SpareKthreads*
sizeof(
void *), GFP_KERNEL);
02812 init_MUTEX_LOCKED(&
resem[
cpuid]);
02813 kernel_thread((
void *)
kthread_m, (
void *)(
long)
cpuid, 0);
02814 down(&
resem[
cpuid]);
02815
klistm[
cpuid].in = (2*
Reservoir) & (MAX_WAKEUP_SRQ - 1);
02816 wake_up_process(
kthreadm[
cpuid]);
02817 }
02818
02819
for (
cpuid = 0;
cpuid <
MAX_LXRT_FUN;
cpuid++)
02820 {
02821
rt_fun_lxrt[
cpuid].type = 1;
02822
rt_fun_lxrt[
cpuid].fun =
nihil;
02823 }
02824
02825
set_rt_fun_entries(
rt_sched_entries);
02826
02827
lxrt_old_trap_handler = rt_set_rtai_trap_handler(
lxrt_handle_trap);
02828
02829
#ifdef CONFIG_PROC_FS
02830
rtai_proc_lxrt_register();
02831
#endif
02832
02833
02834
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02835
rtai_catch_event(
hal_root_domain,
HAL_SCHEDULE_HEAD, (
void *)lxrt_intercept_schedule_head);
02836
#endif
02837 rtai_catch_event(
hal_root_domain,
HAL_SCHEDULE_TAIL, (
void *)
lxrt_intercept_schedule_tail);
02838 rtai_catch_event(
hal_root_domain,
HAL_SYSCALL_PROLOGUE, (
void *)
lxrt_intercept_syscall_prologue);
02839 rtai_catch_event(
hal_root_domain,
HAL_SYSCALL_EPILOGUE, (
void *)
lxrt_intercept_syscall_epilogue);
02840 rtai_catch_event(
hal_root_domain,
HAL_EXIT_PROCESS, (
void *)
lxrt_intercept_exit);
02841 rtai_catch_event(
hal_root_domain,
HAL_KICK_PROCESS, (
void *)
lxrt_intercept_sig_wakeup);
02842
rtai_lxrt_dispatcher =
rtai_lxrt_invoke;
02843
02844
return 0;
02845 }
02846
02847 static void lxrt_exit(
void)
02848 {
02849
RT_TASK *rt_task;
02850
struct task_struct *kthread;
02851
unsigned long flags;
02852
int cpuid;
02853
02854
#ifdef CONFIG_PROC_FS
02855
rtai_proc_lxrt_unregister();
02856
#endif
02857
02858 rt_task = kmalloc(
sizeof(
RT_TASK), GFP_KERNEL);
02859
for (
cpuid = 0;
cpuid < num_online_cpus();
cpuid++) {
02860
while ((kthread =
__get_kthread(
cpuid))) {
02861
if (kthread->rtai_tskext(
TSKEXT2)) {
02862 kfree(kthread->rtai_tskext(
TSKEXT2));
02863 }
02864 rt_task->magic = 0;
02865
set_rtext(rt_task, 0, 0, 0,
cpuid, kthread);
02866 rt_task->max_msg_size[0] = 0;
02867
rt_task_resume(rt_task);
02868
while (rt_task->magic || rt_task->state) {
02869 current->state = TASK_INTERRUPTIBLE;
02870 schedule_timeout(2);
02871 }
02872 }
02873 }
02874 kfree(rt_task);
02875
02876
endkthread = 1;
02877
for (
cpuid = 0;
cpuid < num_online_cpus();
cpuid++) {
02878 wake_up_process(
kthreadm[
cpuid]);
02879
while (
kthreadm[
cpuid]) {
02880 current->state = TASK_INTERRUPTIBLE;
02881 schedule_timeout(2);
02882 }
02883 kfree(
taskav[
cpuid]);
02884 }
02885
02886 rt_set_rtai_trap_handler(
lxrt_old_trap_handler);
02887
02888 RELEASE_RESUME_SRQs_STUFF();
02889
02890 rtai_catch_event(
hal_root_domain,
HAL_SCHEDULE_HEAD, NULL);
02891 rtai_catch_event(
hal_root_domain,
HAL_SCHEDULE_TAIL, NULL);
02892 rtai_catch_event(
hal_root_domain,
HAL_SYSCALL_PROLOGUE, NULL);
02893 rtai_catch_event(
hal_root_domain,
HAL_SYSCALL_EPILOGUE, NULL);
02894 rtai_catch_event(
hal_root_domain,
HAL_EXIT_PROCESS, NULL);
02895 rtai_catch_event(
hal_root_domain,
HAL_KICK_PROCESS, NULL);
02896
rtai_lxrt_dispatcher = NULL;
02897
02898
flags =
rtai_critical_enter(NULL);
02899
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32)
02900
do {
02901
struct mmreq *p;
02902
02903
for (p = lxrt_mmrqtab; p < lxrt_mmrqtab + NR_CPUS; p++) {
02904
while (p->out != p->in) {
02905
struct mm_struct *oldmm = p->mm[p->out];
02906 mmdrop(oldmm);
02907 bump_mmreq(p->out);
02908 p->count--;
02909 }
02910 }
02911 }
while (0);
02912
#endif
02913
rtai_critical_exit(
flags);
02914
02915
reset_rt_fun_entries(
rt_sched_entries);
02916 }
02917
02918
#ifdef DECLR_8254_TSC_EMULATION
02919 DECLR_8254_TSC_EMULATION;
02920
02921 static void timer_fun(
unsigned long none)
02922 {
02923
TICK_8254_TSC_EMULATION();
02924 timer.expires = jiffies + (HZ + TSC_EMULATION_GUARD_FREQ/2 - 1)/TSC_EMULATION_GUARD_FREQ;
02925 add_timer(&timer);
02926 }
02927
#endif
02928
02929
extern int rt_registry_alloc(
void);
02930
extern void rt_registry_free(
void);
02931
02932 static int __rtai_lxrt_init(
void)
02933 {
02934
int cpuid, retval;
02935
02936
#ifdef CONFIG_REGPARM
02937
if (!USE_RTAI_TASKS) {
02938
printk(KERN_INFO
"RTAI[sched_lxrt]: Linux kernel REGPARM configuration enabled, RTAI will not work in user space, disable it.\n");
02939
return -EINVAL;
02940 }
02941
#endif
02942
sched_mem_init();
02943
rt_registry_alloc();
02944
02945
for (
cpuid = 0;
cpuid <
NR_RT_CPUS;
cpuid++) {
02946 rt_linux_task.uses_fpu = 1;
02947 rt_linux_task.magic = 0;
02948 rt_linux_task.policy = rt_linux_task.is_hard = 0;
02949 rt_linux_task.runnable_on_cpus =
cpuid;
02950 rt_linux_task.state =
RT_SCHED_READY;
02951 rt_linux_task.msg_queue.prev = &(rt_linux_task.msg_queue);
02952 rt_linux_task.msg_queue.next = &(rt_linux_task.msg_queue);
02953 rt_linux_task.msg_queue.task = &rt_linux_task;
02954 rt_linux_task.msg = 0;
02955 rt_linux_task.ret_queue.prev = &(rt_linux_task.ret_queue);
02956 rt_linux_task.ret_queue.next = &(rt_linux_task.ret_queue);
02957 rt_linux_task.ret_queue.task = NOTHING;
02958 rt_linux_task.priority =
RT_SCHED_LINUX_PRIORITY;
02959 rt_linux_task.base_priority =
RT_SCHED_LINUX_PRIORITY;
02960 rt_linux_task.signal = 0;
02961 rt_linux_task.prev = &rt_linux_task;
02962 rt_linux_task.resume_time =
RT_TIME_END;
02963 rt_linux_task.tprev = rt_linux_task.tnext =
02964 rt_linux_task.rprev = rt_linux_task.rnext = &rt_linux_task;
02965
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
02966
rt_linux_task.rbr.rb_node = NULL;
02967
#endif
02968
rt_linux_task.next = 0;
02969 rt_linux_task.lnxtsk = current;
02970
rt_smp_current[
cpuid] = &rt_linux_task;
02971
rt_smp_fpu_task[
cpuid] = &rt_linux_task;
02972 oneshot_timer =
OneShot ? 1 : 0;
02973
oneshot_running = 0;
02974
linux_cr0 = 0;
02975 }
02976
tuned.latency =
imuldiv(
Latency,
tuned.cpu_freq, 1000000000);
02977
tuned.setup_time_TIMER_CPUNIT =
imuldiv(
SetupTimeTIMER,
02978
tuned.cpu_freq,
02979 1000000000);
02980
tuned.setup_time_TIMER_UNIT =
imuldiv(
SetupTimeTIMER,
02981
TIMER_FREQ,
02982 1000000000);
02983
tuned.timers_tol[0] = 0;
02984
#ifdef CONFIG_PROC_FS
02985
if (rtai_proc_sched_register()) {
02986 retval = 1;
02987
goto mem_end;
02988 }
02989
#endif
02990
02991
02992
if ((
frstk_srq.srq =
rt_request_srq(0x7dd763ad,
frstk_srq_handler, 0)) < 0) {
02993
printk(
"MEM SRQ: no sysrq available.\n");
02994 retval =
frstk_srq.srq;
02995
goto proc_unregister;
02996 }
02997
02998
frstk_srq.in =
frstk_srq.out = 0;
02999
if ((retval =
rt_request_sched_ipi()) != 0)
03000
goto free_srq;
03001
03002
if ((retval =
lxrt_init()) != 0)
03003
goto free_sched_ipi;
03004
03005
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
03006
rt_set_ihook(&
rtai_handle_isched_lock);
03007
#endif
03008
03009 register_reboot_notifier(&
lxrt_notifier_reboot);
03010
#ifdef CONFIG_SMP
03011
printk(KERN_INFO
"RTAI[sched_lxrt]: loaded (IMMEDIATE, MP, USER/KERNEL SPACE%s).\n", USE_RTAI_TASKS ?
" <with RTAI TASKs>" :
"");
03012
#else
03013
printk(KERN_INFO
"RTAI[sched_lxrt]: loaded (IMMEDIATE, UP, USER/KERNEL SPACE%s).\n", USE_RTAI_TASKS ?
" <with RTAI TASKs>" :
"");
03014
#endif
03015
printk(KERN_INFO
"RTAI[sched_lxrt]: hard timer type/freq = %s/%d(Hz); default timing mode is %s; ",
TIMER_NAME, (
int)
TIMER_FREQ,
OneShot ?
"oneshot" :
"periodic");
03016
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
03017
printk(
"binary tree ordering of timed lists.\n");
03018
#else
03019
printk(
"linear ordering of timed lists.\n");
03020
#endif
03021
printk(KERN_INFO
"RTAI[sched_lxrt]: Linux timer freq = %d (Hz), CPU freq = %lu hz.\n", HZ, (
unsigned long)
tuned.cpu_freq);
03022
printk(KERN_INFO
"RTAI[sched_lxrt]: timer setup = %d ns, resched latency = %d ns.\n", (
int)
imuldiv(
tuned.setup_time_TIMER_CPUNIT, 1000000000,
tuned.cpu_freq), (
int)imuldiv(
tuned.latency -
tuned.setup_time_TIMER_CPUNIT, 1000000000,
tuned.cpu_freq));
03023
03024
#ifdef DECLR_8254_TSC_EMULATION
03025
SETUP_8254_TSC_EMULATION;
03026
#endif
03027
03028 retval = rtai_init_features();
03029 exit:
03030
return retval;
03031 free_sched_ipi:
03032
rt_free_sched_ipi();
03033 free_srq:
03034
rt_free_srq(
frstk_srq.srq);
03035 proc_unregister:
03036
#ifdef CONFIG_PROC_FS
03037
rtai_proc_sched_unregister();
03038
#endif
03039
mem_end:
03040 sched_mem_end();
03041
rt_registry_free();
03042
goto exit;
03043 }
03044
03045 static void __rtai_lxrt_exit(
void)
03046 {
03047 unregister_reboot_notifier(&
lxrt_notifier_reboot);
03048
03049
lxrt_killall();
03050
03051
krtai_objects_release();
03052
03053
lxrt_exit();
03054
03055 rtai_cleanup_features();
03056
03057
#ifdef CONFIG_PROC_FS
03058
rtai_proc_sched_unregister();
03059
#endif
03060
while (
frstk_srq.out !=
frstk_srq.in);
03061
if (
rt_free_srq(
frstk_srq.srq) < 0) {
03062
printk(
"MEM SRQ: frstk_srq %d illegal or already free.\n",
frstk_srq.srq);
03063 }
03064
rt_free_sched_ipi();
03065 sched_mem_end();
03066
rt_registry_free();
03067 current->state = TASK_INTERRUPTIBLE;
03068 schedule_timeout(HZ/10);
03069
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
03070
rt_set_ihook(NULL);
03071
#endif
03072
03073
#ifdef DECLR_8254_TSC_EMULATION
03074
CLEAR_8254_TSC_EMULATION;
03075
#endif
03076
03077
printk(KERN_INFO
"RTAI[sched_lxrt]: unloaded (forced hard/soft/hard transitions: traps %lu, syscalls %lu).\n",
traptrans,
systrans);
03078 }
03079
03080
module_init(__rtai_lxrt_init);
03081
module_exit(__rtai_lxrt_exit);
03082
03083
#ifdef CONFIG_KBUILD
03084
03085
EXPORT_SYMBOL(rt_fun_lxrt);
03086
EXPORT_SYMBOL(clr_rtext);
03087
EXPORT_SYMBOL(set_rtext);
03088
EXPORT_SYMBOL(get_min_tasks_cpuid);
03089
EXPORT_SYMBOL(rt_schedule_soft);
03090
EXPORT_SYMBOL(rt_do_force_soft);
03091
EXPORT_SYMBOL(rt_schedule_soft_tail);
03092
EXPORT_SYMBOL(rt_sched_timed);
03093
EXPORT_SYMBOL(rtai_handle_isched_lock);
03094
03095
#endif