00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
#ifndef _RTAI_SCHEDCORE_H
00021
#define _RTAI_SCHEDCORE_H
00022
00023
#include <rtai_version.h>
00024
#include <rtai_lxrt.h>
00025
#include <rtai_sched.h>
00026
#include <rtai_malloc.h>
00027
#include <rtai_trace.h>
00028
#include <rtai_leds.h>
00029
#include <rtai_sem.h>
00030
#include <rtai_rwl.h>
00031
#include <rtai_spl.h>
00032
#include <rtai_scb.h>
00033
#include <rtai_mbx.h>
00034
#include <rtai_msg.h>
00035
#include <rtai_tbx.h>
00036
#include <rtai_mq.h>
00037
#include <rtai_bits.h>
00038
#include <rtai_wd.h>
00039
#include <rtai_tasklets.h>
00040
#include <rtai_fifos.h>
00041
#include <rtai_netrpc.h>
00042
#include <rtai_shm.h>
00043
#include <rtai_usi.h>
00044
00045
#ifdef __KERNEL__
00046
00047
#include <linux/module.h>
00048
#include <linux/init.h>
00049
#include <linux/kernel.h>
00050
#include <linux/version.h>
00051
#include <linux/errno.h>
00052
#include <linux/slab.h>
00053
#include <linux/timex.h>
00054
#include <linux/sched.h>
00055
#include <asm/param.h>
00056
#include <asm/system.h>
00057
#include <asm/io.h>
00058
00059
00060
#ifndef _RTAI_SCHED_XN_H
00061
#define _RTAI_SCHED_XN_H
00062
00063
#ifdef RTAI_TRIOSS
00064
00065
extern int nkgkptd;
00066
#define FUSIONEXT (nkgkptd)
00067
00068
00069
#define XNSUSP (0x00000001)
00070
#define XNRELAX (0x00000100)
00071
00072
typedef struct xnarchtcb {
00073
union i387_union fpuenv __attribute__ ((aligned (16)));
00074
unsigned stacksize;
00075
unsigned long *stackbase;
00076
unsigned long esp;
00077
unsigned long eip;
00078
struct task_struct *user_task;
00079
struct task_struct *active_task;
00080
unsigned long *espp;
00081
unsigned long *eipp;
00082
union i387_union *fpup;
00083 } xnarchtcb_t;
00084
00085
typedef struct xnthread { xnarchtcb_t tcb;
unsigned long status; } xnthread_t;
00086
00087
extern void xnpod_resume_thread(
void *,
unsigned long);
00088
extern void xnpod_schedule(
void);
00089
00090
00091
extern struct hal_domain_struct *fusion_domain;
00092
00093
extern struct klist_t fusion_wake_up_srq[];
00094
00095
#define NON_RTAI_TASK_SUSPEND(task) \
00096
do { \
00097
xnthread_t *thread; \
00098
if ((thread = (task->lnxtsk)->rtai_tskext(FUSIONEXT)) && !(thread->status & XNRELAX)) { \
00099
atomic_set_mask(XNSUSP, (atomic_t *)&thread->status); \
00100
} else { \
00101
(task->lnxtsk)->state = TASK_SOFTREALTIME; \
00102
} \
00103
} while (0)
00104
00105
#define pend_fusion_wake_up_srq(lnxtsk, cpuid) \
00106
do { \
00107
fusion_wake_up_srq[cpuid].task[fusion_wake_up_srq[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00108
hal_pend_domain_uncond(fusion_wake_up_srq[cpuid].srq, fusion_domain, cpuid); \
00109
} while (0)
00110
00111
#define NON_RTAI_TASK_RESUME(ready_task) \
00112
do { \
00113
xnthread_t *thread; \
00114
if ((thread = (ready_task->lnxtsk)->rtai_tskext(FUSIONEXT)) && !(thread->status & XNRELAX)) { \
00115
pend_fusion_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); \
00116
} else { \
00117
pend_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); \
00118
} \
00119
} while (0)
00120
00121
#define DECLARE_FUSION_WAKE_UP_STUFF \
00122
struct klist_t fusion_wake_up_srq[MAX_WAKEUP_SRQ]; \
00123
static void fusion_wake_up_srq_handler(unsigned srq) \
00124
{ \
00125
int cpuid = srq - fusion_wake_up_srq[0].srq; \
00126
while (fusion_wake_up_srq[cpuid].out != fusion_wake_up_srq[cpuid].in) { \
00127
xnpod_resume_thread(((struct task_struct *)fusion_wake_up_srq[cpuid].task[fusion_wake_up_srq[cpuid].out++ & (MAX_WAKEUP_SRQ - 1)])->rtai_tskext(FUSIONEXT), XNSUSP); \
00128
} \
00129
xnpod_schedule(); \
00130
} \
00131
EXPORT_SYMBOL(fusion_wake_up_srq);
00132
00133
#define REQUEST_RESUME_SRQs_STUFF() \
00134
do { \
00135
int cpuid; \
00136
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00137
hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq = hal_alloc_irq(), wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00138
hal_virtualize_irq(fusion_domain, fusion_wake_up_srq[cpuid].srq = hal_alloc_irq(), fusion_wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00139
} \
00140
} while (0)
00141
00142
#define RELEASE_RESUME_SRQs_STUFF() \
00143
do { \
00144
int cpuid; \
00145
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00146
hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00147
hal_free_irq(wake_up_srq[cpuid].srq); \
00148
hal_virtualize_irq(fusion_domain, fusion_wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00149
hal_free_irq(fusion_wake_up_srq[cpuid].srq); \
00150
} \
00151
} while (0)
00152
00153
#else
00154
00155
#define FUSIONEXT (0)
00156
00157
#define DECLARE_FUSION_WAKE_UP_STUFF
00158
00159
#define NON_RTAI_TASK_SUSPEND(task) \
00160
do { (task->lnxtsk)->state = TASK_SOFTREALTIME; } while (0)
00161
00162
#define NON_RTAI_TASK_RESUME(ready_task) \
00163
do { pend_wake_up_srq(ready_task->lnxtsk, rtai_cpuid()); } while (0)
00164
00165
#define REQUEST_RESUME_SRQs_STUFF() \
00166
do { \
00167
int cpuid; \
00168
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00169
hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq = hal_alloc_irq(), wake_up_srq_handler, NULL, IPIPE_HANDLE_FLAG); \
00170
} \
00171
} while (0)
00172
00173
#define RELEASE_RESUME_SRQs_STUFF() \
00174
do { \
00175
int cpuid; \
00176
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { \
00177
hal_virtualize_irq(hal_root_domain, wake_up_srq[cpuid].srq, NULL, NULL, 0); \
00178
hal_free_irq(wake_up_srq[cpuid].srq); \
00179
} \
00180
} while (0)
00181
00182
#endif
00183
00184
#endif
00185
00186
00187
extern RT_TASK rt_smp_linux_task[];
00188
00189
extern RT_TASK *
rt_smp_current[];
00190
00191
extern RTIME rt_smp_time_h[];
00192
00193
extern int rt_smp_oneshot_timer[];
00194
00195
extern volatile int rt_sched_timed;
00196
00197
#ifdef CONFIG_RTAI_MALLOC
00198
#define sched_malloc(size) rt_malloc((size))
00199
#define sched_free(adr) rt_free((adr))
00200
#ifndef CONFIG_RTAI_MALLOC_BUILTIN
00201
#define sched_mem_init()
00202
#define sched_mem_end()
00203
#else
00204
#define sched_mem_init() \
00205
{ if(__rtai_heap_init() != 0) { \
00206
return(-ENOMEM); \
00207
} }
00208
#define sched_mem_end() __rtai_heap_exit()
00209
#endif
00210
#define call_exit_handlers(task) __call_exit_handlers(task)
00211
#define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2)
00212
#else
00213
#define sched_malloc(size) kmalloc((size), GFP_KERNEL)
00214
#define sched_free(adr) kfree((adr))
00215
#define sched_mem_init()
00216
#define sched_mem_end()
00217
#define call_exit_handlers(task)
00218
#define set_exit_handler(task, fun, arg1, arg2)
00219
#endif
00220
00221
#define RT_SEM_MAGIC 0x3f83ebb // nam2num("rtsem")
00222
00223
#define SEM_ERR (0xFfff)
00224
00225
#define MSG_ERR ((RT_TASK *)0xFfff)
00226
00227
#define NOTHING ((void *)0)
00228
#define SOMETHING ((void *)1)
00229
00230
#define SEMHLF 0x0000FFFF
00231
#define RPCHLF 0xFFFF0000
00232
#define RPCINC 0x00010000
00233
00234
#define DECLARE_RT_CURRENT int cpuid; RT_TASK *rt_current
00235
#define ASSIGN_RT_CURRENT rt_current = rt_smp_current[cpuid = rtai_cpuid()]
00236
#define RT_CURRENT rt_smp_current[rtai_cpuid()]
00237
00238
#define MAX_LINUX_RTPRIO 99
00239
#define MIN_LINUX_RTPRIO 1
00240
00241
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00242
void rtai_handle_isched_lock(
int nesting);
00243
#endif
00244
00245
#ifdef CONFIG_SMP
00246
#define rt_time_h (rt_smp_time_h[cpuid])
00247
#define oneshot_timer (rt_smp_oneshot_timer[cpuid])
00248
#define rt_linux_task (rt_smp_linux_task[cpuid])
00249
#else
00250
#define rt_time_h (rt_smp_time_h[0])
00251
#define oneshot_timer (rt_smp_oneshot_timer[0])
00252
#define rt_linux_task (rt_smp_linux_task[0])
00253
#endif
00254
00255
00256
00257
00258
00259
00260
#define RTAI_MAX_FUN_ARGS 9
00261
struct fun_args {
unsigned long a[RTAI_MAX_FUN_ARGS];
long long (*fun)(
unsigned long, ...); };
00262
00263
#define RTAI_FUN_ARGS arg[0],arg[1],arg[2],arg[3],arg[4],arg[5],arg[6],arg[7],arg[RTAI_MAX_FUN_ARGS - 1]
00264
00265
#define RTAI_FUNARGS funarg->a[0],funarg->a[1],funarg->a[2],funarg->a[3],funarg->a[4],funarg->a[5],funarg->a[6],funarg->a[7],funarg->a[RTAI_MAX_FUN_ARGS - 1]
00266
00267
#define RTAI_FUN_A a[0],a[1],a[2],a[3],a[4],a[5],a[6],a[7],a[RTAI_MAX_FUN_ARGS - 1]
00268
00269
#ifdef CONFIG_SMP
00270
00271
static inline void send_sched_ipi(
unsigned long dest)
00272 {
00273 _send_sched_ipi(dest);
00274 }
00275
00276
#define RT_SCHEDULE_MAP(schedmap) \
00277
do { if (schedmap) send_sched_ipi(schedmap); } while (0)
00278
00279
#define RT_SCHEDULE_MAP_BOTH(schedmap) \
00280
do { if (schedmap) send_sched_ipi(schedmap); rt_schedule(); } while (0)
00281
00282
#define RT_SCHEDULE(task, cpuid) \
00283
do { \
00284
if ((task)->runnable_on_cpus != (cpuid)) { \
00285
send_sched_ipi(1 << (task)->runnable_on_cpus); \
00286
} else { \
00287
rt_schedule(); \
00288
} \
00289
} while (0)
00290
00291
#define RT_SCHEDULE_BOTH(task, cpuid) \
00292
{ \
00293
if ((task)->runnable_on_cpus != (cpuid)) { \
00294
send_sched_ipi(1 << (task)->runnable_on_cpus); \
00295
} \
00296
rt_schedule(); \
00297
}
00298
00299
#else
00300
00301
#define send_sched_ipi(dest)
00302
00303
#define RT_SCHEDULE_MAP_BOTH(schedmap) rt_schedule()
00304
00305
#define RT_SCHEDULE_MAP(schedmap) rt_schedule()
00306
00307
#define RT_SCHEDULE(task, cpuid) rt_schedule()
00308
00309
#define RT_SCHEDULE_BOTH(task, cpuid) rt_schedule()
00310
00311
#endif
00312
00313
#define BASE_SOFT_PRIORITY 1000000000
00314
00315
#define TASK_HARDREALTIME TASK_UNINTERRUPTIBLE
00316
#define TASK_SOFTREALTIME TASK_INTERRUPTIBLE
00317
00318
static inline void enq_ready_edf_task(
RT_TASK *ready_task)
00319 {
00320
RT_TASK *
task;
00321
#ifdef CONFIG_SMP
00322
task =
rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00323
#else
00324
task =
rt_smp_linux_task[0].rnext;
00325
#endif
00326
while (
task->policy < 0 && ready_task->period >=
task->period) {
00327
task =
task->rnext;
00328 }
00329
task->rprev = (ready_task->rprev =
task->rprev)->rnext = ready_task;
00330 ready_task->rnext =
task;
00331 }
00332
00333
#define MAX_WAKEUP_SRQ (2 << 6)
00334
00335
struct klist_t {
int srq;
volatile unsigned long in,
out;
void *
task[MAX_WAKEUP_SRQ]; };
00336
extern struct klist_t
wake_up_srq[];
00337
00338
#define pend_wake_up_srq(lnxtsk, cpuid) \
00339
do { \
00340
wake_up_srq[cpuid].task[wake_up_srq[cpuid].in++ & (MAX_WAKEUP_SRQ - 1)] = lnxtsk; \
00341
hal_pend_uncond(wake_up_srq[cpuid].srq, cpuid); \
00342
} while (0)
00343
00344
static inline void enq_ready_task(
RT_TASK *ready_task)
00345 {
00346
RT_TASK *
task;
00347
if (ready_task->is_hard) {
00348
#ifdef CONFIG_SMP
00349
task =
rt_smp_linux_task[ready_task->runnable_on_cpus].rnext;
00350
#else
00351
task =
rt_smp_linux_task[0].rnext;
00352
#endif
00353
while (ready_task->priority >=
task->priority) {
00354
if ((
task =
task->rnext)->priority < 0)
break;
00355 }
00356
task->rprev = (ready_task->rprev =
task->rprev)->rnext = ready_task;
00357 ready_task->rnext =
task;
00358 }
else {
00359 ready_task->state |=
RT_SCHED_SFTRDY;
00360 NON_RTAI_TASK_RESUME(ready_task);
00361 }
00362 }
00363
00364
static inline int renq_ready_task(
RT_TASK *ready_task,
int priority)
00365 {
00366
int retval;
00367
if ((retval = ready_task->priority != priority)) {
00368 ready_task->priority = priority;
00369
if (ready_task->state ==
RT_SCHED_READY) {
00370 (ready_task->rprev)->rnext = ready_task->rnext;
00371 (ready_task->rnext)->rprev = ready_task->rprev;
00372 enq_ready_task(ready_task);
00373 }
00374 }
00375
return retval;
00376 }
00377
00378
static inline int renq_current(
RT_TASK *rt_current,
int priority)
00379 {
00380
int retval;
00381
if ((retval = rt_current->priority != priority)) {
00382 rt_current->priority = priority;
00383 (rt_current->rprev)->rnext = rt_current->rnext;
00384 (rt_current->rnext)->rprev = rt_current->rprev;
00385 enq_ready_task(rt_current);
00386 }
00387
return retval;
00388 }
00389
00390
static inline void rem_ready_task(
RT_TASK *task)
00391 {
00392
if (
task->state ==
RT_SCHED_READY) {
00393
if (!
task->is_hard) {
00394 NON_RTAI_TASK_SUSPEND(task);
00395 }
00396
task->unblocked = 0;
00397 (
task->rprev)->rnext =
task->rnext;
00398 (
task->rnext)->rprev =
task->rprev;
00399 }
00400 }
00401
00402
static inline void rem_ready_current(
RT_TASK *rt_current)
00403 {
00404
if (!rt_current->is_hard) {
00405 NON_RTAI_TASK_SUSPEND(rt_current);
00406 }
00407 rt_current->unblocked = 0;
00408 (rt_current->rprev)->rnext = rt_current->rnext;
00409 (rt_current->rnext)->rprev = rt_current->rprev;
00410 }
00411
00412
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
00413
00414
00415
static inline void enq_timed_task(
RT_TASK *timed_task)
00416 {
00417
RT_TASK *taskh, *tsknxt, *
task;
00418 rb_node_t **rbtn, *rbtpn = NULL;
00419
#ifdef CONFIG_SMP
00420
task = taskh = &
rt_smp_linux_task[timed_task->runnable_on_cpus];
00421
#else
00422
task = taskh = &
rt_smp_linux_task[0];
00423
#endif
00424
rbtn = &taskh->rbr.rb_node;
00425
00426
while (*rbtn) {
00427 rbtpn = *rbtn;
00428 tsknxt = rb_entry(rbtpn,
RT_TASK, rbn);
00429
if (timed_task->resume_time > tsknxt->resume_time) {
00430 rbtn = &(rbtpn)->rb_right;
00431 }
else {
00432 rbtn = &(rbtpn)->rb_left;
00433
task = tsknxt;
00434 }
00435 }
00436 rb_link_node(&timed_task->rbn, rbtpn, rbtn);
00437 rb_insert_color(&timed_task->rbn, &taskh->rbr);
00438
task->tprev = (timed_task->tprev =
task->tprev)->tnext = timed_task;
00439 timed_task->tnext =
task;
00440 }
00441
00442
static inline void rem_timed_task(
RT_TASK *task)
00443 {
00444
if ((
task->state &
RT_SCHED_DELAYED)) {
00445 (
task->tprev)->tnext =
task->tnext;
00446 (
task->tnext)->tprev =
task->tprev;
00447
#ifdef CONFIG_SMP
00448
rb_erase(&
task->rbn, &rt_smp_linux_task[
task->runnable_on_cpus].rbr);
00449
#else
00450
rb_erase(&
task->rbn, &rt_smp_linux_task[0].rbr);
00451
#endif
00452
}
00453 }
00454
00455
static inline void wake_up_timed_tasks(
int cpuid)
00456 {
00457
RT_TASK *taskh, *
task;
00458
#ifdef CONFIG_SMP
00459
task = (taskh = &
rt_smp_linux_task[
cpuid])->tnext;
00460
#else
00461
task = (taskh = &
rt_smp_linux_task[0])->tnext;
00462
#endif
00463
if (
task->resume_time <= rt_time_h) {
00464
do {
00465
if ((
task->state &= ~(
RT_SCHED_DELAYED |
RT_SCHED_SUSPENDED |
RT_SCHED_SEMAPHORE |
RT_SCHED_RECEIVE |
RT_SCHED_SEND |
RT_SCHED_RPC |
RT_SCHED_RETURN |
RT_SCHED_MBXSUSP)) ==
RT_SCHED_READY) {
00466
if (
task->policy < 0) {
00467 enq_ready_edf_task(task);
00468 }
else {
00469 enq_ready_task(task);
00470 }
00471
#if defined(CONFIG_RTAI_BUSY_TIME_ALIGN) && CONFIG_RTAI_BUSY_TIME_ALIGN
00472
task->trap_handler_data = (
void *)oneshot_timer;
00473
#endif
00474
}
00475 rb_erase(&
task->rbn, &taskh->rbr);
00476
task =
task->tnext;
00477 }
while (
task->resume_time <= rt_time_h);
00478
#ifdef CONFIG_SMP
00479
rt_smp_linux_task[
cpuid].tnext =
task;
00480
task->tprev = &
rt_smp_linux_task[
cpuid];
00481
#else
00482
rt_smp_linux_task[0].tnext =
task;
00483
task->tprev = &
rt_smp_linux_task[0];
00484
#endif
00485
}
00486 }
00487
00488
#else
00489
00490
00491
static inline void enq_timed_task(
RT_TASK *timed_task)
00492 {
00493
RT_TASK *
task;
00494
#ifdef CONFIG_SMP
00495
task =
rt_smp_linux_task[timed_task->runnable_on_cpus].tnext;
00496
#else
00497
task =
rt_smp_linux_task[0].tnext;
00498
#endif
00499
while (timed_task->resume_time >
task->resume_time) {
00500
task =
task->tnext;
00501 }
00502
task->tprev = (timed_task->tprev =
task->tprev)->tnext = timed_task;
00503 timed_task->tnext =
task;
00504 }
00505
00506
static inline void wake_up_timed_tasks(
int cpuid)
00507 {
00508
RT_TASK *
task;
00509
#ifdef CONFIG_SMP
00510
task =
rt_smp_linux_task[
cpuid].tnext;
00511
#else
00512
task =
rt_smp_linux_task[0].tnext;
00513
#endif
00514
if (
task->resume_time <= rt_time_h) {
00515
do {
00516
if ((
task->state &= ~(
RT_SCHED_DELAYED |
RT_SCHED_SUSPENDED |
RT_SCHED_SEMAPHORE |
RT_SCHED_RECEIVE |
RT_SCHED_SEND |
RT_SCHED_RPC |
RT_SCHED_RETURN |
RT_SCHED_MBXSUSP)) ==
RT_SCHED_READY) {
00517
if (
task->policy < 0) {
00518 enq_ready_edf_task(task);
00519 }
else {
00520 enq_ready_task(task);
00521 }
00522
#if defined(CONFIG_RTAI_BUSY_TIME_ALIGN) && CONFIG_RTAI_BUSY_TIME_ALIGN
00523
task->trap_handler_data = (
void *)oneshot_timer;
00524
#endif
00525
}
00526
task =
task->tnext;
00527 }
while (
task->resume_time <= rt_time_h);
00528
#ifdef CONFIG_SMP
00529
rt_smp_linux_task[
cpuid].tnext =
task;
00530
task->tprev = &
rt_smp_linux_task[
cpuid];
00531
#else
00532
rt_smp_linux_task[0].tnext =
task;
00533
task->tprev = &
rt_smp_linux_task[0];
00534
#endif
00535
}
00536 }
00537
00538
static inline void rem_timed_task(
RT_TASK *task)
00539 {
00540
if ((
task->state &
RT_SCHED_DELAYED)) {
00541 (
task->tprev)->tnext =
task->tnext;
00542 (
task->tnext)->tprev =
task->tprev;
00543 }
00544 }
00545
00546
#endif
00547
00548
#define get_time() rt_get_time()
00549
#if 0
00550
static inline RTIME get_time(
void)
00551 {
00552
#ifdef CONFIG_SMP
00553
int cpuid;
00554
return rt_smp_oneshot_timer[
cpuid = rtai_cpuid()] ?
rdtsc() :
rt_smp_times[
cpuid].tick_time;
00555
#else
00556
return rt_smp_oneshot_timer[0] ?
rdtsc() :
rt_smp_times[0].tick_time;
00557
#endif
00558
}
00559
#endif
00560
00561
static inline void enqueue_blocked(
RT_TASK *task, QUEUE *queue,
int qtype)
00562 {
00563 QUEUE *q;
00564
task->blocked_on = (q = queue);
00565
if (!qtype) {
00566
while ((q = q->next) != queue && (q->task)->priority <=
task->priority);
00567 }
00568 q->prev = (
task->queue.prev = q->prev)->next = &(
task->queue);
00569
task->queue.next = q;
00570 }
00571
00572
00573
static inline void dequeue_blocked(
RT_TASK *task)
00574 {
00575
task->prio_passed_to = NOTHING;
00576 (
task->queue.prev)->next =
task->queue.next;
00577 (
task->queue.next)->prev =
task->queue.prev;
00578
task->blocked_on = NOTHING;
00579 }
00580
00581
static __volatile__
inline unsigned long pass_prio(
RT_TASK *to,
RT_TASK *from)
00582 {
00583 QUEUE *q;
00584
#ifdef CONFIG_SMP
00585
unsigned long schedmap;
00586 schedmap = 0;
00587
#endif
00588
from->prio_passed_to = to;
00589
while (to && to->priority > from->priority) {
00590 to->priority = from->priority;
00591
if (to->state ==
RT_SCHED_READY) {
00592 (to->rprev)->rnext = to->rnext;
00593 (to->rnext)->rprev = to->rprev;
00594 enq_ready_task(to);
00595
#ifdef CONFIG_SMP
00596
set_bit(to->runnable_on_cpus & 0x1F, &schedmap);
00597
#endif
00598
}
else if ((q = to->blocked_on) && !((to->state &
RT_SCHED_SEMAPHORE) &&
00599 ((
SEM *)q)->qtype)) {
00600 (to->queue.prev)->next = to->queue.next;
00601 (to->queue.next)->prev = to->queue.prev;
00602
while ((q = q->next) != to->blocked_on && (q->task)->priority <= to->priority);
00603 q->prev = (to->queue.prev = q->prev)->next = &(to->queue);
00604 to->queue.next = q;
00605 }
00606 to = to->prio_passed_to;
00607 }
00608
#ifdef CONFIG_SMP
00609
return schedmap;
00610
#else
00611
return 0;
00612
#endif
00613
}
00614
00615
static inline RT_TASK *_rt_whoami(
void)
00616 {
00617
#ifdef CONFIG_SMP
00618
RT_TASK *rt_current;
00619
unsigned long flags;
00620
flags = rt_global_save_flags_and_cli();
00621 rt_current = RT_CURRENT;
00622 rt_global_restore_flags(flags);
00623
return rt_current;
00624
#else
00625
return rt_smp_current[0];
00626
#endif
00627
}
00628
00629
static inline void __call_exit_handlers(
RT_TASK *task)
00630 {
00631 XHDL *pt, *tmp;
00632
00633 pt =
task->ExitHook;
00634
while ( pt ) {
00635 (*pt->fun) (pt->arg1, pt->arg2);
00636 tmp = pt;
00637 pt = pt->nxt;
00638
rt_free(tmp);
00639 }
00640
task->ExitHook = 0;
00641 }
00642
00643
static inline XHDL *__set_exit_handler(
RT_TASK *task,
void (*fun) (
void *,
int),
void *arg1,
int arg2)
00644 {
00645 XHDL *p;
00646
00647
00648
00649
if (
task->magic != RT_TASK_MAGIC)
return 0;
00650
if (!(p = (XHDL *)
rt_malloc (
sizeof(XHDL))))
return 0;
00651 p->fun = fun;
00652 p->arg1 = arg1;
00653 p->arg2 = arg2;
00654 p->nxt =
task->ExitHook;
00655
return (
task->ExitHook = p);
00656 }
00657
00658
static inline int rtai_init_features (
void)
00659
00660 {
00661
#ifdef CONFIG_RTAI_LEDS_BUILTIN
00662
__rtai_leds_init();
00663
#endif
00664
#ifdef CONFIG_RTAI_SEM_BUILTIN
00665
__rtai_sem_init();
00666
#endif
00667
#ifdef CONFIG_RTAI_MSG_BUILTIN
00668
__rtai_msg_init();
00669
#endif
00670
#ifdef CONFIG_RTAI_MBX_BUILTIN
00671
__rtai_mbx_init();
00672
#endif
00673
#ifdef CONFIG_RTAI_TBX_BUILTIN
00674
__rtai_tbx_init();
00675
#endif
00676
#ifdef CONFIG_RTAI_MQ_BUILTIN
00677
__rtai_mq_init();
00678
#endif
00679
#ifdef CONFIG_RTAI_BITS_BUILTIN
00680
__rtai_bits_init();
00681
#endif
00682
#ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00683
__rtai_tasklets_init();
00684
#endif
00685
#ifdef CONFIG_RTAI_FIFOS_BUILTIN
00686
__rtai_fifos_init();
00687
#endif
00688
#ifdef CONFIG_RTAI_NETRPC_BUILTIN
00689
__rtai_netrpc_init();
00690
#endif
00691
#ifdef CONFIG_RTAI_SHM_BUILTIN
00692
__rtai_shm_init();
00693
#endif
00694
#ifdef CONFIG_RTAI_USI_BUILTIN
00695
__rtai_usi_init();
00696
#endif
00697
#ifdef CONFIG_RTAI_MATH_BUILTIN
00698
__rtai_math_init();
00699
#endif
00700
00701
return 0;
00702 }
00703
00704
static inline void rtai_cleanup_features (
void) {
00705
00706
#ifdef CONFIG_RTAI_MATH_BUILTIN
00707
__rtai_math_exit();
00708
#endif
00709
#ifdef CONFIG_RTAI_USI_BUILTIN
00710
__rtai_usi_exit();
00711
#endif
00712
#ifdef CONFIG_RTAI_SHM_BUILTIN
00713
__rtai_shm_exit();
00714
#endif
00715
#ifdef CONFIG_RTAI_NETRPC_BUILTIN
00716
__rtai_netrpc_exit();
00717
#endif
00718
#ifdef CONFIG_RTAI_FIFOS_BUILTIN
00719
__rtai_fifos_exit();
00720
#endif
00721
#ifdef CONFIG_RTAI_TASKLETS_BUILTIN
00722
__rtai_tasklets_exit();
00723
#endif
00724
#ifdef CONFIG_RTAI_BITS_BUILTIN
00725
__rtai_bits_exit();
00726
#endif
00727
#ifdef CONFIG_RTAI_MQ_BUILTIN
00728
__rtai_mq_exit();
00729
#endif
00730
#ifdef CONFIG_RTAI_TBX_BUILTIN
00731
__rtai_tbx_exit();
00732
#endif
00733
#ifdef CONFIG_RTAI_MBX_BUILTIN
00734
__rtai_mbx_exit();
00735
#endif
00736
#ifdef CONFIG_RTAI_MSG_BUILTIN
00737
__rtai_msg_exit();
00738
#endif
00739
#ifdef CONFIG_RTAI_SEM_BUILTIN
00740
__rtai_sem_exit();
00741
#endif
00742
#ifdef CONFIG_RTAI_LEDS_BUILTIN
00743
__rtai_leds_exit();
00744
#endif
00745 }
00746
00747
int rt_check_current_stack(
void);
00748
00749
int rt_kthread_init(
RT_TASK *task,
00750
void (*rt_thread)(
long),
00751
long data,
00752
int stack_size,
00753
int priority,
00754
int uses_fpu,
00755
void(*signal)(
void));
00756
00757
int rt_kthread_init_cpuid(
RT_TASK *task,
00758
void (*rt_thread)(
long),
00759
long data,
00760
int stack_size,
00761
int priority,
00762
int uses_fpu,
00763
void(*signal)(
void),
00764
unsigned int cpuid);
00765
00766
#endif
00767
00768
#endif