00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
#ifndef _RTAI_SCHED_H
00020
#define _RTAI_SCHED_H
00021
00022
#include <rtai.h>
00023
#ifndef __KERNEL__
00024
#include <sys/time.h>
00025
#include <time.h>
00026
#include <errno.h>
00027
#include <rtai_types.h>
00028
#endif
00029
00030 #define RT_SCHED_UP 1
00031 #define RT_SCHED_SMP 2
00032 #define RT_SCHED_MUP 3
00033
00034 #define RT_SCHED_HIGHEST_PRIORITY 0
00035 #define RT_SCHED_LOWEST_PRIORITY 0x3fffFfff
00036 #define RT_SCHED_LINUX_PRIORITY 0x7fffFfff
00037
00038 #define RT_SCHED_READY 1
00039 #define RT_SCHED_SUSPENDED 2
00040 #define RT_SCHED_DELAYED 4
00041 #define RT_SCHED_SEMAPHORE 8
00042 #define RT_SCHED_SEND 16
00043 #define RT_SCHED_RECEIVE 32
00044 #define RT_SCHED_RPC 64
00045 #define RT_SCHED_RETURN 128
00046 #define RT_SCHED_MBXSUSP 256
00047 #define RT_SCHED_SFTRDY 512
00048
00049 #define RT_EINTR (0xFff0)
00050
00051 #define RT_IRQ_TASK 0
00052 #define RT_IRQ_TASKLET 1
00053 #define RT_IRQ_TASK_ERR 0x7FFFFFFF
00054
00055
struct rt_task_struct;
00056
00057
#ifdef __KERNEL__
00058
00059
#include <linux/time.h>
00060
#include <linux/errno.h>
00061
00062
#if defined(CONFIG_RTAI_LONG_TIMED_LIST) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
00063
#include <linux/rbtree.h>
00064
typedef struct rb_node rb_node_t;
00065
typedef struct rb_root rb_root_t;
00066
#endif
00067
00068
#define RT_TASK_MAGIC 0x9ad25f6f // nam2num("rttask")
00069
00070
#ifndef __cplusplus
00071
00072
#include <linux/sched.h>
00073
00074
typedef struct rt_queue {
00075
struct rt_queue *prev;
00076
struct rt_queue *next;
00077
struct rt_task_struct *
task;
00078 } QUEUE;
00079
00080
struct mcb_t {
00081
void *sbuf;
00082
int sbytes;
00083
void *rbuf;
00084
int rbytes;
00085 };
00086
00087
typedef struct rt_ExitHandler {
00088
00089
00090
struct rt_ExitHandler *nxt;
00091 void (*fun) (
void *arg1,
int arg2);
00092
void *arg1;
00093
int arg2;
00094 } XHDL;
00095
00096
struct rt_heap_t {
void *heap, *kadr, *uadr; };
00097
00098
typedef struct rt_task_struct {
00099
00100
long *stack
__attribute__ ((__aligned__ (L1_CACHE_BYTES)));
00101
int uses_fpu;
00102
int magic;
00103
volatile int state, running;
00104
unsigned long runnable_on_cpus;
00105
long *stack_bottom;
00106
volatile int priority;
00107
int base_priority;
00108
int policy;
00109
int sched_lock_priority;
00110
struct rt_task_struct *prio_passed_to;
00111
RTIME period;
00112
RTIME resume_time;
00113
RTIME yield_time;
00114
int rr_quantum;
00115
int rr_remaining;
00116
int suspdepth;
00117
struct rt_queue queue;
00118
int owndres;
00119
struct rt_queue *blocked_on;
00120
struct rt_queue msg_queue;
00121
int tid;
00122
unsigned long msg;
00123
struct rt_queue ret_queue;
00124 void (*signal)(
void);
00125
FPU_ENV fpu_reg
__attribute__ ((__aligned__ (L1_CACHE_BYTES)));
00126
struct rt_task_struct *prev;
00127
struct rt_task_struct *next;
00128
struct rt_task_struct *tprev;
00129
struct rt_task_struct *tnext;
00130
struct rt_task_struct *rprev;
00131
struct rt_task_struct *rnext;
00132
00133
00134
long *fun_args;
00135
long *bstack;
00136
struct task_struct *lnxtsk;
00137
long long retval;
00138
char *msg_buf[2];
00139
long max_msg_size[2];
00140
char task_name[16];
00141
void *system_data_ptr;
00142
struct rt_task_struct *nextp;
00143
struct rt_task_struct *prevp;
00144
00145
00146
RT_TRAP_HANDLER task_trap_handler[
HAL_NR_FAULTS];
00147
00148
00149
long unblocked;
00150
void *rt_signals;
00151
volatile unsigned long pstate;
00152
unsigned long usp_flags;
00153
unsigned long usp_flags_mask;
00154
unsigned long force_soft;
00155
volatile int is_hard;
00156
00157
void *trap_handler_data;
00158
struct rt_task_struct *linux_syscall_server;
00159
00160
00161
int resync_frame;
00162
00163
00164 XHDL *ExitHook;
00165
00166
RTIME exectime[2];
00167
struct mcb_t mcb;
00168
00169
00170
struct rt_heap_t heap[2];
00171
00172
volatile int scheduler;
00173
00174
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
00175
rb_root_t rbr;
00176 rb_node_t rbn;
00177
#endif
00178
}
RT_TASK __attribute__ ((__aligned__ (L1_CACHE_BYTES)));
00179
00180
#else
00181
extern "C" {
00182
#endif
00183
00184
int rt_task_init(
struct rt_task_struct *task,
00185
void (*rt_thread)(
long),
00186
long data,
00187
int stack_size,
00188
int priority,
00189
int uses_fpu,
00190
void(*signal)(
void));
00191
00192
int rt_task_init_cpuid(
struct rt_task_struct *task,
00193
void (*rt_thread)(
long),
00194
long data,
00195
int stack_size,
00196
int priority,
00197
int uses_fpu,
00198
void(*signal)(
void),
00199
unsigned run_on_cpu);
00200
00201
void rt_set_runnable_on_cpus(
struct rt_task_struct *task,
00202
unsigned long cpu_mask);
00203
00204
void rt_set_runnable_on_cpuid(
struct rt_task_struct *task,
00205
unsigned cpuid);
00206
00207
void rt_set_sched_policy(
struct rt_task_struct *task,
00208
int policy,
00209
int rr_quantum_ns);
00210
00211
int rt_task_delete(
struct rt_task_struct *task);
00212
00213
int rt_get_task_state(
struct rt_task_struct *task);
00214
00215
void rt_gettimeorig(
RTIME time_orig[]);
00216
00217
int rt_get_timer_cpu(
void);
00218
00219
int rt_is_hard_timer_running(
void);
00220
00221
void rt_set_periodic_mode(
void);
00222
00223
void rt_set_oneshot_mode(
void);
00224
00225
RTIME start_rt_timer(
int period);
00226
00227
#define start_rt_timer_ns(period) start_rt_timer(nano2count((period)))
00228
00229
void start_rt_apic_timers(
struct apic_timer_setup_data *setup_mode,
00230
unsigned rcvr_jiffies_cpuid);
00231
00232
void stop_rt_timer(
void);
00233
00234
struct rt_task_struct *
rt_whoami(
void);
00235
00236
int rt_sched_type(
void);
00237
00238
int rt_task_signal_handler(
struct rt_task_struct *task,
00239
void (*handler)(
void));
00240
00241
int rt_task_use_fpu(
struct rt_task_struct *task,
00242
int use_fpu_flag);
00243
00244
void rt_linux_use_fpu(
int use_fpu_flag);
00245
00246
int rt_hard_timer_tick_count(
void);
00247
00248
int rt_hard_timer_tick_count_cpuid(
int cpuid);
00249
00250
RTIME count2nano(
RTIME timercounts);
00251
00252
RTIME nano2count(
RTIME nanosecs);
00253
00254
RTIME count2nano_cpuid(
RTIME timercounts,
00255
unsigned cpuid);
00256
00257
RTIME nano2count_cpuid(
RTIME nanosecs,
00258
unsigned cpuid);
00259
00260
RTIME rt_get_time(
void);
00261
00262
RTIME rt_get_time_cpuid(
unsigned cpuid);
00263
00264
RTIME rt_get_time_ns(
void);
00265
00266
RTIME rt_get_time_ns_cpuid(
unsigned cpuid);
00267
00268
RTIME rt_get_cpu_time_ns(
void);
00269
00270
int rt_get_prio(
struct rt_task_struct *task);
00271
00272
int rt_get_inher_prio(
struct rt_task_struct *task);
00273
00274
void rt_spv_RMS(
int cpuid);
00275
00276
int rt_change_prio(
struct rt_task_struct *task,
00277
int priority);
00278
00279
void rt_sched_lock(
void);
00280
00281
void rt_sched_unlock(
void);
00282
00283
void rt_task_yield(
void);
00284
00285
int rt_task_suspend(
struct rt_task_struct *task);
00286
00287
int rt_task_suspend_if(
struct rt_task_struct *task);
00288
00289
int rt_task_suspend_until(
struct rt_task_struct *task,
RTIME until);
00290
00291
int rt_task_suspend_timed(
struct rt_task_struct *task,
RTIME delay);
00292
00293
int rt_task_resume(
struct rt_task_struct *task);
00294
00295
RT_TASK *
rt_exec_linux_syscall(
RT_TASK *rt_current,
RT_TASK *task,
struct pt_regs *regs);
00296
00297
RT_TASK *
rt_receive_linux_syscall(
RT_TASK *task,
struct pt_regs *regs);
00298
00299
void rt_return_linux_syscall(
RT_TASK *task,
unsigned long retval);
00300
00301
int rt_irq_wait(
unsigned irq);
00302
00303
int rt_irq_wait_if(
unsigned irq);
00304
00305
int rt_irq_wait_until(
unsigned irq,
RTIME until);
00306
00307
int rt_irq_wait_timed(
unsigned irq,
RTIME delay);
00308
00309
void rt_irq_signal(
unsigned irq);
00310
00311
int rt_request_irq_task (
unsigned irq,
void *handler,
int type,
int affine2task);
00312
00313
int rt_release_irq_task (
unsigned irq);
00314
00315
int rt_task_make_periodic_relative_ns(
struct rt_task_struct *task,
00316
RTIME start_delay,
00317
RTIME period);
00318
00319
int rt_task_make_periodic(
struct rt_task_struct *task,
00320
RTIME start_time,
00321
RTIME period);
00322
00323
void rt_task_set_resume_end_times(
RTIME resume,
00324
RTIME end);
00325
00326
int rt_set_resume_time(
struct rt_task_struct *task,
00327
RTIME new_resume_time);
00328
00329
int rt_set_period(
struct rt_task_struct *task,
00330
RTIME new_period);
00331
00332
int rt_task_wait_period(
void);
00333
00334
void rt_schedule(
void);
00335
00336
RTIME next_period(
void);
00337
00338
void rt_busy_sleep(
int nanosecs);
00339
00340
int rt_sleep(
RTIME delay);
00341
00342
int rt_sleep_until(
RTIME time);
00343
00344
int rt_task_masked_unblock(
struct rt_task_struct *task,
unsigned long mask);
00345
00346
#define rt_task_wakeup_sleeping(t) rt_task_masked_unblock(t, RT_SCHED_DELAYED)
00347
00348
struct rt_task_struct *
rt_named_task_init(
const char *task_name,
00349
void (*thread)(
long),
00350
long data,
00351
int stack_size,
00352
int prio,
00353
int uses_fpu,
00354
void(*signal)(
void));
00355
00356
struct rt_task_struct *
rt_named_task_init_cpuid(
const char *task_name,
00357
void (*thread)(
long),
00358
long data,
00359
int stack_size,
00360
int prio,
00361
int uses_fpu,
00362
void(*signal)(
void),
00363
unsigned run_on_cpu);
00364
00365
int rt_named_task_delete(
struct rt_task_struct *task);
00366
00367
RT_TRAP_HANDLER rt_set_task_trap_handler(
struct rt_task_struct *task,
00368
unsigned vec,
00369
RT_TRAP_HANDLER handler);
00370
00371
static inline RTIME timeval2count(
struct timeval *t)
00372 {
00373
return nano2count(t->tv_sec*1000000000LL + t->tv_usec*1000);
00374 }
00375
00376
static inline void count2timeval(
RTIME rt,
struct timeval *t)
00377 {
00378 t->tv_sec =
rtai_ulldiv(
count2nano(rt), 1000000000, (
unsigned long *)&t->tv_usec);
00379 t->tv_usec /= 1000;
00380 }
00381
00382
static inline RTIME timespec2count(
const struct timespec *t)
00383 {
00384
return nano2count(t->tv_sec*1000000000LL + t->tv_nsec);
00385 }
00386
00387
static inline void count2timespec(
RTIME rt,
struct timespec *t)
00388 {
00389 t->tv_sec =
rtai_ulldiv(
count2nano(rt), 1000000000, (
unsigned long *)&t->tv_nsec);
00390 }
00391
00392
static inline RTIME timespec2nanos(
const struct timespec *t)
00393 {
00394
return t->tv_sec*1000000000LL + t->tv_nsec;
00395 }
00396
00397
static inline void nanos2timespec(
RTIME rt,
struct timespec *t)
00398 {
00399 t->tv_sec =
rtai_ulldiv(rt, 1000000000, (
unsigned long *)&t->tv_nsec);
00400 }
00401
00402
#ifdef __cplusplus
00403
}
00404
#else
00405
00406
00407
00408
RT_TASK *
rt_get_base_linux_task(
RT_TASK **base_linux_task);
00409
00410
RT_TASK *
rt_alloc_dynamic_task(
void);
00411
00412
void rt_enq_ready_edf_task(
RT_TASK *ready_task);
00413
00414
void rt_enq_ready_task(
RT_TASK *ready_task);
00415
00416
int rt_renq_ready_task(
RT_TASK *ready_task,
00417
int priority);
00418
00419
void rt_rem_ready_task(
RT_TASK *task);
00420
00421
void rt_rem_ready_current(
RT_TASK *rt_current);
00422
00423
void rt_enq_timed_task(
RT_TASK *timed_task);
00424
00425
void rt_rem_timed_task(
RT_TASK *task);
00426
00427
void rt_dequeue_blocked(
RT_TASK *task);
00428
00429
RT_TASK **
rt_register_watchdog(
RT_TASK *wdog,
00430
int cpuid);
00431
00432
void rt_deregister_watchdog(
RT_TASK *wdog,
00433
int cpuid);
00434
00435
#endif
00436
00437
#endif
00438
00439
#if !defined(__KERNEL__) || defined(__cplusplus)
00440
00441 typedef struct rt_task_struct {
00442 int opaque;
00443 }
RT_TASK;
00444
00445 typedef struct QueueBlock {
00446 int opaque;
00447 }
QBLK;
00448
00449 typedef struct QueueHook {
00450 int opaque;
00451 }
QHOOK;
00452
00453
#endif
00454
00455
#endif