00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026 #include <linux/module.h>
00027 #include <linux/kernel.h>
00028 #include <linux/version.h>
00029 #include <linux/errno.h>
00030 #include <linux/slab.h>
00031 #include <linux/unistd.h>
00032 #include <linux/mman.h>
00033 #include <asm/uaccess.h>
00034
00035 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
00036 #include <linux/oom.h>
00037 #endif
00038
00039 #include <rtai_sched.h>
00040 #include <rtai_lxrt.h>
00041 #include <rtai_sem.h>
00042 #include <rtai_mbx.h>
00043 #include <rtai_rwl.h>
00044 #include <rtai_spl.h>
00045
00046 #include <asm/rtai_fpu.h>
00047 #include <rtai_registry.h>
00048 #include <rtai_proxies.h>
00049 #include <rtai_msg.h>
00050 #include <rtai_schedcore.h>
00051
00052 #define MAX_FUN_EXT 16
00053 static struct rt_fun_entry *rt_fun_ext[MAX_FUN_EXT];
00054
00055
00056
00057
00058 #define USRLAND_MAX_MSG_SIZE 128 // Default max message size, used here only.
00059
00060 int get_min_tasks_cpuid(void);
00061
00062 int set_rtext(RT_TASK *task,
00063 int priority,
00064 int uses_fpu,
00065 void(*signal)(void),
00066 unsigned int cpuid,
00067 struct task_struct *relink);
00068
00069 int clr_rtext(RT_TASK *task);
00070
00071 void steal_from_linux(RT_TASK *task);
00072
00073 void give_back_to_linux(RT_TASK *task, int);
00074
00075 void rt_schedule_soft(RT_TASK *task);
00076
00077 void *rt_get_lxrt_fun_entry(int index);
00078
00079 static inline void lxrt_typed_sem_init(SEM *sem, int count, int type)
00080 {
00081 ((RTAI_SYSCALL_MODE int (*)(SEM *, ...))rt_get_lxrt_fun_entry(TYPED_SEM_INIT))(sem, count, type);
00082 }
00083
00084 static inline int lxrt_typed_mbx_init(MBX *mbx, int bufsize, int type)
00085 {
00086 return ((RTAI_SYSCALL_MODE int (*)(MBX *, ...))rt_get_lxrt_fun_entry(TYPED_MBX_INIT))(mbx, bufsize, type);
00087 }
00088
00089 static inline int lxrt_typed_rwl_init(RWL *rwl, int type)
00090 {
00091 return ((RTAI_SYSCALL_MODE int (*)(RWL *, ...))rt_get_lxrt_fun_entry(RWL_INIT))(rwl, type);
00092 }
00093
00094 static inline int lxrt_spl_init(SPL *spl)
00095 {
00096 return ((RTAI_SYSCALL_MODE int (*)(SPL *, ...))rt_get_lxrt_fun_entry(SPL_INIT))(spl);
00097 }
00098
00099 static inline int lxrt_Proxy_detach(pid_t pid)
00100 {
00101 return ((RTAI_SYSCALL_MODE int (*)(int, ...))rt_get_lxrt_fun_entry(PROXY_DETACH))(pid);
00102 }
00103
00104 static inline int GENERIC_DELETE(int index, void *object)
00105 {
00106 return ((RTAI_SYSCALL_MODE int (*)(void *, ...))rt_get_lxrt_fun_entry(index))(object);
00107 }
00108
00109 #define lxrt_sem_delete(sem) GENERIC_DELETE(SEM_DELETE, sem)
00110 #define lxrt_named_sem_delete(sem) GENERIC_DELETE(NAMED_SEM_DELETE, sem)
00111 #define lxrt_rwl_delete(rwl) GENERIC_DELETE(RWL_DELETE, rwl)
00112 #define lxrt_named_rwl_delete(rwl) GENERIC_DELETE(NAMED_RWL_DELETE, rwl)
00113 #define lxrt_spl_delete(spl) GENERIC_DELETE(SPL_DELETE, spl)
00114 #define lxrt_named_spl_delete(spl) GENERIC_DELETE(NAMED_SPL_DELETE, spl)
00115 #define lxrt_mbx_delete(mbx) GENERIC_DELETE(MBX_DELETE, mbx)
00116 #define lxrt_named_mbx_delete(mbx) GENERIC_DELETE(NAMED_MBX_DELETE, mbx)
00117
00118 extern void rt_schedule_soft_tail(RT_TASK *, int);
00119 static inline void lxrt_fun_call(RT_TASK *task, void *fun, int narg, long *arg)
00120 {
00121 if (likely(task->is_hard > 0)) {
00122 task->retval = ((RTAI_SYSCALL_MODE long long (*)(unsigned long, ...))fun)(RTAI_FUN_ARGS);
00123 if (unlikely(!task->is_hard)) {
00124 rt_schedule_soft_tail(task, task->runnable_on_cpus);
00125 }
00126 } else {
00127 struct fun_args *funarg;
00128 memcpy(funarg = (void *)task->fun_args, arg, narg);
00129 funarg->fun = fun;
00130 rt_schedule_soft(task);
00131 }
00132 }
00133
00134 static inline void lxrt_fun_call_wbuf(RT_TASK *rt_task, void *fun, int narg, long *arg, unsigned long type)
00135 {
00136 int rsize, r2size, wsize, w2size, msg_size;
00137 long *wmsg_adr, *w2msg_adr, *fun_args;
00138
00139 rsize = r2size = wsize = w2size = 0 ;
00140 wmsg_adr = w2msg_adr = NULL;
00141 fun_args = arg - 1;
00142 if (NEED_TO_R(type)) {
00143 rsize = USP_RSZ1(type);
00144 rsize = rsize ? fun_args[rsize] : sizeof(long);
00145 if (NEED_TO_R2ND(type)) {
00146 r2size = USP_RSZ2(type);
00147 r2size = r2size ? fun_args[r2size] : sizeof(long);
00148 }
00149 }
00150 if (NEED_TO_W(type)) {
00151 wsize = USP_WSZ1(type);
00152 wsize = wsize ? fun_args[wsize] : sizeof(long);
00153 if (NEED_TO_W2ND(type)) {
00154 w2size = USP_WSZ2(type);
00155 w2size = w2size ? fun_args[w2size] : sizeof(long);
00156 }
00157 }
00158 if ((msg_size = rsize > wsize ? rsize : wsize) > 0) {
00159 if (msg_size > rt_task->max_msg_size[0]) {
00160 rt_free(rt_task->msg_buf[0]);
00161 rt_task->max_msg_size[0] = (msg_size << 7)/100;
00162 rt_task->msg_buf[0] = rt_malloc(rt_task->max_msg_size[0]);
00163 }
00164 if (rsize) {
00165 long *buf_arg = fun_args + USP_RBF1(type);
00166 rt_copy_from_user(rt_task->msg_buf[0], (long *)buf_arg[0], rsize);
00167 buf_arg[0] = (long)rt_task->msg_buf[0];
00168 }
00169 if (wsize) {
00170 long *buf_arg = fun_args + USP_WBF1(type);
00171 wmsg_adr = (long *)buf_arg[0];
00172 buf_arg[0] = (long)rt_task->msg_buf[0];
00173 }
00174 }
00175 if ((msg_size = r2size > w2size ? r2size : w2size) > 0) {
00176 if (msg_size > rt_task->max_msg_size[1]) {
00177 rt_free(rt_task->msg_buf[1]);
00178 rt_task->max_msg_size[1] = (msg_size << 7)/100;
00179 rt_task->msg_buf[1] = rt_malloc(rt_task->max_msg_size[1]);
00180 }
00181 if (r2size) {
00182 long *buf_arg = fun_args + USP_RBF2(type);
00183 rt_copy_from_user(rt_task->msg_buf[1], (long *)buf_arg[0], r2size);
00184 buf_arg[0] = (long)rt_task->msg_buf[1];
00185 }
00186 if (w2size) {
00187 long *buf_arg = fun_args + USP_WBF2(type);
00188 w2msg_adr = (long *)buf_arg[0];
00189 buf_arg[0] = (long)rt_task->msg_buf[1];
00190 }
00191 }
00192 lxrt_fun_call(rt_task, fun, narg, arg);
00193 if (wsize) {
00194 rt_copy_to_user(wmsg_adr, rt_task->msg_buf[0], wsize);
00195 if (w2size) {
00196 rt_copy_to_user(w2msg_adr, rt_task->msg_buf[1], w2size);
00197 }
00198 }
00199 }
00200
00201 void put_current_on_cpu(int cpuid);
00202
00203 static inline RT_TASK* __task_init(unsigned long name, int prio, int stack_size, int max_msg_size, int cpus_allowed)
00204 {
00205 void *msg_buf0, *msg_buf1;
00206 RT_TASK *rt_task;
00207
00208 if ((rt_task = current->rtai_tskext(TSKEXT0))) {
00209 if (num_online_cpus() > 1 && cpus_allowed) {
00210 cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed);
00211 } else {
00212 cpus_allowed = rtai_cpuid();
00213 }
00214 put_current_on_cpu(cpus_allowed);
00215 return rt_task;
00216 }
00217 if (rt_get_adr(name)) {
00218 return 0;
00219 }
00220 if (prio > RT_SCHED_LOWEST_PRIORITY) {
00221 prio = RT_SCHED_LOWEST_PRIORITY;
00222 }
00223 if (!max_msg_size) {
00224 max_msg_size = USRLAND_MAX_MSG_SIZE;
00225 }
00226 if (!(msg_buf0 = rt_malloc(max_msg_size))) {
00227 return 0;
00228 }
00229 if (!(msg_buf1 = rt_malloc(max_msg_size))) {
00230 rt_free(msg_buf0);
00231 return 0;
00232 }
00233 rt_task = rt_malloc(sizeof(RT_TASK) + 3*sizeof(struct fun_args));
00234 if (rt_task) {
00235 rt_task->magic = 0;
00236 if (num_online_cpus() > 1 && cpus_allowed) {
00237 cpus_allowed = hweight32(cpus_allowed) > 1 ? get_min_tasks_cpuid() : ffnz(cpus_allowed);
00238 } else {
00239 cpus_allowed = rtai_cpuid();
00240 }
00241 if (!set_rtext(rt_task, prio, 0, 0, cpus_allowed, 0)) {
00242 rt_task->fun_args = (long *)((struct fun_args *)(rt_task + 1));
00243 rt_task->msg_buf[0] = msg_buf0;
00244 rt_task->msg_buf[1] = msg_buf1;
00245 rt_task->max_msg_size[0] =
00246 rt_task->max_msg_size[1] = max_msg_size;
00247 if (rt_register(name, rt_task, IS_TASK, 0)) {
00248 rt_task->state = 0;
00249
00250 #ifdef PF_EVNOTIFY
00251 current->flags |= PF_EVNOTIFY;
00252 #endif
00253 #if (defined VM_PINNED) && (defined CONFIG_MMU)
00254 ipipe_disable_ondemand_mappings(current);
00255 #endif
00256 #ifdef OOM_DISABLE
00257 current->oomkilladj = OOM_DISABLE;
00258 #endif
00259
00260 return rt_task;
00261 } else {
00262 clr_rtext(rt_task);
00263 }
00264 }
00265 rt_free(rt_task);
00266 }
00267 rt_free(msg_buf0);
00268 rt_free(msg_buf1);
00269 return 0;
00270 }
00271
00272 static int __task_delete(RT_TASK *rt_task)
00273 {
00274 struct task_struct *process;
00275
00276 if (current != rt_task->lnxtsk) {
00277 return -EPERM;
00278 }
00279 if ((process = rt_task->lnxtsk)) {
00280 process->rtai_tskext(TSKEXT0) = process->rtai_tskext(TSKEXT1) = 0;
00281 }
00282 if (rt_task->is_hard > 0) {
00283 give_back_to_linux(rt_task, 0);
00284 }
00285 if (rt_task->linux_syscall_server) {
00286 RT_TASK *serv = rt_task->linux_syscall_server->serv;
00287 serv->suspdepth = -RTE_HIGERR;
00288 rt_task_masked_unblock(serv, ~RT_SCHED_READY);
00289 process->state = TASK_INTERRUPTIBLE;
00290 schedule_timeout(HZ/10);
00291
00292 }
00293 if (clr_rtext(rt_task)) {
00294 return -EFAULT;
00295 }
00296 rt_free(rt_task->msg_buf[0]);
00297 rt_free(rt_task->msg_buf[1]);
00298 rt_free(rt_task);
00299 return (!rt_drg_on_adr(rt_task)) ? -ENODEV : 0;
00300 }
00301
00302
00303 #ifdef ECHO_SYSW
00304 #define SYSW_DIAG_MSG(x) x
00305 #else
00306 #define SYSW_DIAG_MSG(x)
00307 #endif
00308
00309 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)
00310
00311 #include <linux/cred.h>
00312 static inline void set_lxrt_perm(int perm)
00313 {
00314 struct cred *cred;
00315 if ((cred = prepare_creds())) {
00316 cap_raise(cred->cap_effective, perm);
00317 commit_creds(cred);
00318 }
00319 }
00320
00321 #else
00322
00323 static inline void set_lxrt_perm(int perm)
00324 {
00325 #ifdef current_cap
00326 cap_raise(current_cap(), perm);
00327 #else
00328 cap_raise(current->cap_effective, perm);
00329 #endif
00330 }
00331
00332 #endif
00333
00334 void rt_make_hard_real_time(RT_TASK *task)
00335 {
00336 if (task && task->magic == RT_TASK_MAGIC && !task->is_hard) {
00337 steal_from_linux(task);
00338 }
00339 }
00340
00341 void rt_make_soft_real_time(RT_TASK *task)
00342 {
00343 if (task && task->magic == RT_TASK_MAGIC && task->is_hard) {
00344 if (task->is_hard > 0) {
00345 give_back_to_linux(task, 0);
00346 } else {
00347 task->is_hard = 0;
00348 }
00349 }
00350 }
00351
00352 static inline long long handle_lxrt_request (unsigned int lxsrq, long *arg, RT_TASK *task)
00353 {
00354 #define larg ((struct arg *)arg)
00355
00356 union {unsigned long name; RT_TASK *rt_task; SEM *sem; MBX *mbx; RWL *rwl; SPL *spl; int i; void *p; long long ll; } arg0;
00357 int srq;
00358
00359 if (likely((srq = SRQ(lxsrq)) < MAX_LXRT_FUN)) {
00360 unsigned long type;
00361 struct rt_fun_entry *funcm;
00362
00363
00364
00365
00366
00367
00368 if (unlikely(!(funcm = rt_fun_ext[INDX(lxsrq)]))) {
00369 rt_printk("BAD: null rt_fun_ext, no module for extension %d?\n", INDX(lxsrq));
00370 return -ENOSYS;
00371 }
00372 if (!(type = funcm[srq].type)) {
00373 return ((RTAI_SYSCALL_MODE long long (*)(unsigned long, ...))funcm[srq].fun)(RTAI_FUN_ARGS);
00374 }
00375 if (unlikely(NEED_TO_RW(type))) {
00376 lxrt_fun_call_wbuf(task, funcm[srq].fun, NARG(lxsrq), arg, type);
00377 } else {
00378 lxrt_fun_call(task, funcm[srq].fun, NARG(lxsrq), arg);
00379 }
00380 return task->retval;
00381 }
00382
00383 arg0.name = arg[0];
00384 switch (srq) {
00385 case LXRT_GET_ADR: {
00386 arg0.p = rt_get_adr(arg0.name);
00387 return arg0.ll;
00388 }
00389
00390 case LXRT_GET_NAME: {
00391 arg0.name = rt_get_name(arg0.p);
00392 return arg0.ll;
00393 }
00394
00395 case LXRT_TASK_INIT: {
00396 struct arg { unsigned long name; long prio, stack_size, max_msg_size, cpus_allowed; };
00397 arg0.rt_task = __task_init(arg0.name, larg->prio, larg->stack_size, larg->max_msg_size, larg->cpus_allowed);
00398 return arg0.ll;
00399 }
00400
00401 case LXRT_TASK_DELETE: {
00402 arg0.i = __task_delete(arg0.rt_task ? arg0.rt_task : task);
00403 return arg0.ll;
00404 }
00405
00406 case LXRT_SEM_INIT: {
00407 if (rt_get_adr(arg0.name)) {
00408 return 0;
00409 }
00410 if ((arg0.sem = rt_malloc(sizeof(SEM)))) {
00411 struct arg { unsigned long name; long cnt; long typ; };
00412 lxrt_typed_sem_init(arg0.sem, larg->cnt, larg->typ);
00413 if (rt_register(larg->name, arg0.sem, IS_SEM, current)) {
00414 return arg0.ll;
00415 } else {
00416 rt_free(arg0.sem);
00417 }
00418 }
00419 return 0;
00420 }
00421
00422 case LXRT_SEM_DELETE: {
00423 if (lxrt_sem_delete(arg0.sem)) {
00424 arg0.i = -EFAULT;
00425 return arg0.ll;
00426 }
00427 rt_free(arg0.sem);
00428 arg0.i = rt_drg_on_adr(arg0.sem);
00429 return arg0.ll;
00430 }
00431
00432 case LXRT_MBX_INIT: {
00433 if (rt_get_adr(arg0.name)) {
00434 return 0;
00435 }
00436 if ((arg0.mbx = rt_malloc(sizeof(MBX)))) {
00437 struct arg { unsigned long name; long size; int qtype; };
00438 if (lxrt_typed_mbx_init(arg0.mbx, larg->size, larg->qtype) < 0) {
00439 rt_free(arg0.mbx);
00440 return 0;
00441 }
00442 if (rt_register(larg->name, arg0.mbx, IS_MBX, current)) {
00443 return arg0.ll;
00444 } else {
00445 rt_free(arg0.mbx);
00446 }
00447 }
00448 return 0;
00449 }
00450
00451 case LXRT_MBX_DELETE: {
00452 if (lxrt_mbx_delete(arg0.mbx)) {
00453 arg0.i = -EFAULT;
00454 return arg0.ll;
00455 }
00456 rt_free(arg0.mbx);
00457 arg0.i = rt_drg_on_adr(arg0.mbx);
00458 return arg0.ll;
00459 }
00460
00461 case LXRT_RWL_INIT: {
00462 if (rt_get_adr(arg0.name)) {
00463 return 0;
00464 }
00465 if ((arg0.rwl = rt_malloc(sizeof(RWL)))) {
00466 struct arg { unsigned long name; long type; };
00467 lxrt_typed_rwl_init(arg0.rwl, larg->type);
00468 if (rt_register(larg->name, arg0.rwl, IS_SEM, current)) {
00469 return arg0.ll;
00470 } else {
00471 rt_free(arg0.rwl);
00472 }
00473 }
00474 return 0;
00475 }
00476
00477 case LXRT_RWL_DELETE: {
00478 if (lxrt_rwl_delete(arg0.rwl)) {
00479 arg0.i = -EFAULT;
00480 return arg0.ll;
00481 }
00482 rt_free(arg0.rwl);
00483 arg0.i = rt_drg_on_adr(arg0.rwl);
00484 return arg0.ll;
00485 }
00486
00487 case LXRT_SPL_INIT: {
00488 if (rt_get_adr(arg0.name)) {
00489 return 0;
00490 }
00491 if ((arg0.spl = rt_malloc(sizeof(SPL)))) {
00492 struct arg { unsigned long name; };
00493 lxrt_spl_init(arg0.spl);
00494 if (rt_register(larg->name, arg0.spl, IS_SEM, current)) {
00495 return arg0.ll;
00496 } else {
00497 rt_free(arg0.spl);
00498 }
00499 }
00500 return 0;
00501 }
00502
00503 case LXRT_SPL_DELETE: {
00504 if (lxrt_spl_delete(arg0.spl)) {
00505 arg0.i = -EFAULT;
00506 return arg0.ll;
00507 }
00508 rt_free(arg0.spl);
00509 arg0.i = rt_drg_on_adr(arg0.spl);
00510 return arg0.ll;
00511 }
00512
00513 case MAKE_HARD_RT: {
00514 rt_make_hard_real_time(task);
00515 return 0;
00516 if (!task || task->is_hard) {
00517 return 0;
00518 }
00519 steal_from_linux(task);
00520 return 0;
00521 }
00522
00523 case MAKE_SOFT_RT: {
00524 rt_make_soft_real_time(task);
00525 return 0;
00526 if (!task || !task->is_hard) {
00527 return 0;
00528 }
00529 if (task->is_hard < 0) {
00530 task->is_hard = 0;
00531 } else {
00532 give_back_to_linux(task, 0);
00533 }
00534 return 0;
00535 }
00536 case PRINT_TO_SCREEN: {
00537 struct arg { char *display; long nch; };
00538 arg0.i = rtai_print_to_screen("%s", larg->display);
00539 return arg0.ll;
00540 }
00541
00542 case PRINTK: {
00543 struct arg { char *display; long nch; };
00544 arg0.i = rt_printk("%s", larg->display);
00545 return arg0.ll;
00546 }
00547
00548 case NONROOT_HRT: {
00549 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
00550 current->cap_effective |= ((1 << CAP_IPC_LOCK) |
00551 (1 << CAP_SYS_RAWIO) |
00552 (1 << CAP_SYS_NICE));
00553 #else
00554 set_lxrt_perm(CAP_IPC_LOCK);
00555 set_lxrt_perm(CAP_SYS_RAWIO);
00556 set_lxrt_perm(CAP_SYS_NICE);
00557 #endif
00558 return 0;
00559 }
00560
00561 case RT_BUDDY: {
00562 arg0.rt_task = task && current->rtai_tskext(TSKEXT1) == current ? task : NULL;
00563 return arg0.ll;
00564 }
00565
00566 case HRT_USE_FPU: {
00567 struct arg { RT_TASK *task; long use_fpu; };
00568 if(!larg->use_fpu) {
00569 clear_lnxtsk_uses_fpu((larg->task)->lnxtsk);
00570 } else {
00571 init_fpu((larg->task)->lnxtsk);
00572 }
00573 return 0;
00574 }
00575
00576 case GET_USP_FLAGS: {
00577 arg0.name = arg0.rt_task->usp_flags;
00578 return arg0.ll;
00579 }
00580 case SET_USP_FLAGS: {
00581 struct arg { RT_TASK *task; unsigned long flags; };
00582 arg0.rt_task->usp_flags = larg->flags;
00583 arg0.rt_task->force_soft = (arg0.rt_task->is_hard > 0) && (larg->flags & arg0.rt_task->usp_flags_mask & FORCE_SOFT);
00584 return 0;
00585 }
00586
00587 case GET_USP_FLG_MSK: {
00588 arg0.name = arg0.rt_task->usp_flags_mask;
00589 return arg0.ll;
00590 }
00591
00592 case SET_USP_FLG_MSK: {
00593 task->usp_flags_mask = arg0.name;
00594 task->force_soft = (task->is_hard > 0) && (task->usp_flags & arg0.name & FORCE_SOFT);
00595 return 0;
00596 }
00597
00598 case FORCE_TASK_SOFT: {
00599 extern void rt_do_force_soft(RT_TASK *rt_task);
00600 struct task_struct *ltsk;
00601 if ((ltsk = find_task_by_pid(arg0.name))) {
00602 if ((arg0.rt_task = ltsk->rtai_tskext(TSKEXT0))) {
00603 if ((arg0.rt_task->force_soft = (arg0.rt_task->is_hard != 0) && FORCE_SOFT)) {
00604 rt_do_force_soft(arg0.rt_task);
00605 }
00606 return arg0.ll;
00607 }
00608 }
00609 return 0;
00610 }
00611
00612 case IS_HARD: {
00613 arg0.i = arg0.rt_task || (arg0.rt_task = current->rtai_tskext(TSKEXT0)) ? arg0.rt_task->is_hard : 0;
00614 return arg0.ll;
00615 }
00616 case GET_EXECTIME: {
00617 struct arg { RT_TASK *task; RTIME *exectime; };
00618 if ((larg->task)->exectime[0] && (larg->task)->exectime[1]) {
00619 larg->exectime[0] = (larg->task)->exectime[0];
00620 larg->exectime[1] = (larg->task)->exectime[1];
00621 larg->exectime[2] = rdtsc();
00622 }
00623 return 0;
00624 }
00625 case GET_TIMEORIG: {
00626 struct arg { RTIME *time_orig; };
00627 if (larg->time_orig) {
00628 RTIME time_orig[2];
00629 rt_gettimeorig(time_orig);
00630 rt_copy_to_user(larg->time_orig, time_orig, sizeof(time_orig));
00631 } else {
00632 rt_gettimeorig(NULL);
00633 }
00634 return 0;
00635 }
00636
00637 case LINUX_SERVER_INIT: {
00638 struct arg { struct linux_syscalls_list syscalls; };
00639 larg->syscalls.task->linux_syscall_server = larg->syscalls.serv;
00640 rtai_set_linux_task_priority(current, (larg->syscalls.task)->lnxtsk->policy, (larg->syscalls.task)->lnxtsk->rt_priority);
00641 arg0.rt_task = __task_init((unsigned long)larg->syscalls.task, larg->syscalls.task->base_priority >= BASE_SOFT_PRIORITY ? larg->syscalls.task->base_priority - BASE_SOFT_PRIORITY : larg->syscalls.task->base_priority, 0, 0, 1 << larg->syscalls.task->runnable_on_cpus);
00642 return arg0.ll;
00643 }
00644
00645 default: {
00646 rt_printk("RTAI/LXRT: Unknown srq #%d\n", srq);
00647 arg0.i = -ENOSYS;
00648 return arg0.ll;
00649 }
00650 }
00651 return 0;
00652 }
00653
00654 static inline void check_to_soften_harden(RT_TASK *task)
00655 {
00656 if (unlikely(task->force_soft)) {
00657 if (task->is_hard > 0) {
00658 give_back_to_linux(task, 0);
00659 } else {
00660 task->is_hard = 0;
00661 }
00662 task->unblocked = task->force_soft = 0;
00663 task->usp_flags &= ~FORCE_SOFT;
00664 } else if (unlikely(task->is_hard < 0)) {
00665 SYSW_DIAG_MSG(rt_printk("GOING BACK TO HARD (SYSLXRT, DIRECT), PID = %d.\n", current->pid););
00666 steal_from_linux(task);
00667 SYSW_DIAG_MSG(rt_printk("GONE BACK TO HARD (SYSLXRT), PID = %d.\n", current->pid););
00668 } else if (unlikely(task->unblocked)) {
00669 if (task->is_hard > 0) {
00670 give_back_to_linux(task, -1);
00671 }
00672 task->unblocked = 0;
00673 }
00674 }
00675
00676 long long rtai_lxrt_invoke (unsigned int lxsrq, void *arg)
00677 {
00678 RT_TASK *task;
00679
00680 if (likely((task = current->rtai_tskext(TSKEXT0)) != NULL)) {
00681 long long retval;
00682 check_to_soften_harden(task);
00683 retval = handle_lxrt_request(lxsrq, arg, task);
00684 check_to_soften_harden(task);
00685 return retval;
00686 }
00687
00688 return handle_lxrt_request(lxsrq, arg, NULL);
00689 }
00690
00691 int set_rt_fun_ext_index(struct rt_fun_entry *fun, int idx)
00692 {
00693 if (idx > 0 && idx < MAX_FUN_EXT && !rt_fun_ext[idx]) {
00694 rt_fun_ext[idx] = fun;
00695 return 0;
00696 }
00697 return -EACCES;
00698 }
00699
00700 void reset_rt_fun_ext_index( struct rt_fun_entry *fun, int idx)
00701 {
00702 if (idx > 0 && idx < MAX_FUN_EXT && rt_fun_ext[idx] == fun) {
00703 rt_fun_ext[idx] = 0;
00704 }
00705 }
00706
00707 void linux_process_termination(void)
00708
00709 {
00710 extern int max_slots;
00711 unsigned long numid;
00712 char name[8];
00713 RT_TASK *task2delete;
00714 struct rt_registry_entry entry;
00715 int slot;
00716
00717
00718
00719
00720 if (!(numid = is_process_registered(current))) {
00721 return;
00722 }
00723 for (slot = 1; slot <= max_slots; slot++) {
00724 if (!rt_get_registry_slot(slot, &entry) || entry.tsk != current || rt_drg_on_name_cnt(entry.name) <= 0) {
00725 continue;
00726 }
00727 num2nam(entry.name, name);
00728 entry.tsk = 0;
00729 switch (entry.type) {
00730 case IS_SEM:
00731 rt_printk("LXRT releases SEM %s\n", name);
00732 lxrt_sem_delete(entry.adr);
00733 rt_free(entry.adr);
00734 break;
00735 case IS_RWL:
00736 rt_printk("LXRT releases RWL %s\n", name);
00737 lxrt_rwl_delete(entry.adr);
00738 rt_free(entry.adr);
00739 break;
00740 case IS_SPL:
00741 rt_printk("LXRT releases SPL %s\n", name);
00742 lxrt_spl_delete(entry.adr);
00743 rt_free(entry.adr);
00744 break;
00745 case IS_MBX:
00746 rt_printk("LXRT releases MBX %s\n", name);
00747 lxrt_mbx_delete(entry.adr);
00748 rt_free(entry.adr);
00749 break;
00750 case IS_PRX:
00751 numid = rttask2pid(entry.adr);
00752 rt_printk("LXRT releases PROXY PID %lu\n", numid);
00753 lxrt_Proxy_detach(numid);
00754 break;
00755 case IS_TASK:
00756 rt_printk("LXRT deregisters task %s %d\n", name, ((RT_TASK *)entry.adr)->lnxtsk->pid);
00757 break;
00758 }
00759 }
00760 if ((task2delete = current->rtai_tskext(TSKEXT0))) {
00761 if (!clr_rtext(task2delete)) {
00762 rt_drg_on_adr(task2delete);
00763 rt_printk("LXRT releases PID %d (ID: %s).\n", current->pid, current->comm);
00764 rt_free(task2delete->msg_buf[0]);
00765 rt_free(task2delete->msg_buf[1]);
00766 rt_free(task2delete);
00767 current->rtai_tskext(TSKEXT0) = current->rtai_tskext(TSKEXT1) = 0;
00768 }
00769 }
00770 }
00771
00772 void init_fun_ext (void)
00773 {
00774 rt_fun_ext[0] = rt_fun_lxrt;
00775 }
00776
00777 EXPORT_SYMBOL(rt_make_hard_real_time);
00778 EXPORT_SYMBOL(rt_make_soft_real_time);