base/sched/api.c

Go to the documentation of this file.
00001 /**
00002  * @ingroup lxrt
00003  * @file
00004  * Common scheduling function 
00005  * @author Paolo Mantegazza
00006  *
00007  * This file is part of the RTAI project.
00008  *
00009  * @note Copyright &copy; 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it>
00010  *
00011  * This program is free software; you can redistribute it and/or
00012  * modify it under the terms of the GNU General Public License as
00013  * published by the Free Software Foundation; either version 2 of the
00014  * License, or (at your option) any later version.
00015  *
00016  * This program is distributed in the hope that it will be useful,
00017  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00018  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00019  * GNU General Public License for more details.
00020  *
00021  * You should have received a copy of the GNU General Public License
00022  * along with this program; if not, write to the Free Software
00023  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00024  */
00025 
00026 
00027 #include <linux/module.h>
00028 #include <asm/uaccess.h>
00029 #include <asm/unistd.h>
00030 
00031 #include <rtai_schedcore.h>
00032 #include <rtai_prinher.h>
00033 #include <rtai_registry.h>
00034 
00035 /* ++++++++++++++++++++++++ COMMON FUNCTIONALITIES ++++++++++++++++++++++++++ */
00036 
00037 /* +++++++++++++++++++++++++ PRIORITY MANAGEMENT ++++++++++++++++++++++++++++ */
00038 
00039 RTAI_SYSCALL_MODE void rt_set_sched_policy(RT_TASK *task, int policy, int rr_quantum_ns)
00040 {
00041     if (!task) {
00042         task = RT_CURRENT;
00043     }
00044     if ((task->policy = policy ? 1 : 0)) {
00045         task->rr_quantum = nano2count_cpuid(rr_quantum_ns, task->runnable_on_cpus);
00046         if ((task->rr_quantum & 0xF0000000) || !task->rr_quantum) {
00047 #ifdef CONFIG_SMP
00048             task->rr_quantum = rt_smp_times[task->runnable_on_cpus].linux_tick;
00049 #else
00050             task->rr_quantum = rt_times.linux_tick;
00051 #endif
00052         }
00053         task->rr_remaining = task->rr_quantum;
00054         task->yield_time = 0;
00055     }
00056 }
00057 
00058 
00059 /**
00060  * @anchor rt_get_prio
00061  * @brief Check a task priority.
00062  * 
00063  * rt_get_prio returns the base priority of task @e task.
00064  *
00065  * Recall that a task has a base native priority, assigned at its
00066  * birth or by @ref rt_change_prio(), and an actual, inherited,
00067  * priority. They can be different because of priority inheritance.
00068  *
00069  * @param task is the affected task.
00070  *
00071  * @return rt_get_prio returns the priority of task @e task.
00072  *
00073  */
00074 int rt_get_prio(RT_TASK *task)
00075 {
00076     if (task->magic != RT_TASK_MAGIC) {
00077         return -EINVAL;
00078     }
00079     return task->base_priority;
00080 }
00081 
00082 
00083 /**
00084  * @anchor rt_get_inher_prio
00085  * @brief Check a task priority.
00086  * 
00087  * rt_get_prio returns the base priority task @e task has inherited
00088  * from other tasks, either blocked on resources owned by or waiting
00089  * to pass a message to task @e task.
00090  *
00091  * Recall that a task has a base native priority, assigned at its
00092  * birth or by @ref rt_change_prio(), and an actual, inherited,
00093  * priority. They can be different because of priority inheritance.
00094  *
00095  * @param task is the affected task.
00096  *
00097  * @return rt_get_inher_prio returns the priority of task @e task.
00098  *
00099  */
00100 int rt_get_inher_prio(RT_TASK *task)
00101 {
00102     if (task->magic != RT_TASK_MAGIC) {
00103         return -EINVAL;
00104     }
00105     return task->priority;
00106 }
00107 
00108 
00109 /**
00110  * @anchor rt_get_priorities
00111  * @brief Check inheredited and base priority.
00112  * 
00113  * rt_get_priorities returns the base and inherited priorities of a task.
00114  *
00115  * Recall that a task has a base native priority, assigned at its
00116  * birth or by @ref rt_change_prio(), and an actual, inherited,
00117  * priority. They can be different because of priority inheritance.
00118  *
00119  * @param task is the affected task.
00120  *
00121  * @param priority the actual, e.e. inherited priority.
00122  *
00123  * @param base_priority the base priority.
00124  *
00125  * @return rt_get_priority returns 0 if non NULL priority addresses
00126  * are given, EINVAL if addresses are NULL or task is not a valid object.
00127  *
00128  */
00129 
00130 RTAI_SYSCALL_MODE int rt_get_priorities(RT_TASK *task, int *priority, int *base_priority)
00131 {
00132     if (!task) {
00133         task = RT_CURRENT;
00134     }
00135     if (task->magic != RT_TASK_MAGIC || !priority || !base_priority) {
00136         return -EINVAL;
00137     }
00138     *priority      = task->priority;
00139     *base_priority = task->base_priority;
00140     return 0;
00141 }
00142 
00143 /**
00144  * @anchor rt_task_get_info
00145  * @brief Get task task data listed in RT_TASK_INFO type.
00146  * 
00147  * rt_task_get_info returns task data listed in RT_TASK_INFO type.
00148  *
00149  * @param task is the task of interest, NULL can be used for the current task.
00150  * @param task_info a pointer to RT_TASK_INFO.
00151  *
00152  * @return -EINVAL if task is not valid or task_info is NULL, 0 if OK.
00153  *
00154  */
00155 
00156 RTAI_SYSCALL_MODE int rt_task_get_info(RT_TASK *task, RT_TASK_INFO *task_info)
00157 {
00158     if (!task) {
00159         task = RT_CURRENT;
00160     } 
00161     if (task->magic != RT_TASK_MAGIC || task_info == NULL) {
00162         return -EINVAL;
00163     }
00164     task_info->period        = task->period;
00165     task_info->base_priority = task->base_priority;
00166     task_info->priority      = task->priority;
00167     return 0;
00168 }
00169 
00170 /**
00171  * @anchor rt_change_prio
00172  * @brief Change a task priority.
00173  * 
00174  * rt_change_prio changes the base priority of task @e task to @e
00175  * prio. 
00176  *
00177  * Recall that a task has a base native priority, assigned at its
00178  * birth or by @ref rt_change_prio(), and an actual, inherited,
00179  * priority. They can be different because of priority inheritance.
00180  *
00181  * @param task is the affected task.
00182  *
00183  * @param priority is the new priority, it can range within 0 < prio < 
00184  * RT_SCHED_LOWEST_PRIORITY. 
00185  *
00186  * @return rt_change_prio returns the base priority task @e task had
00187  * before the change.
00188  *
00189  */
00190 RTAI_SYSCALL_MODE int rt_change_prio(RT_TASK *task, int priority)
00191 {
00192     unsigned long flags;
00193     int prio, base_priority;
00194     RT_TASK *rhead;
00195 
00196     if (task->magic != RT_TASK_MAGIC || priority < 0) {
00197         return -EINVAL;
00198     }
00199 
00200     prio = task->base_priority;
00201     flags = rt_global_save_flags_and_cli();
00202     if (!task->is_hard && priority < BASE_SOFT_PRIORITY) {
00203         priority += BASE_SOFT_PRIORITY;
00204     }
00205     base_priority = task->base_priority;
00206     task->base_priority = priority;
00207     if (base_priority == task->priority || priority < task->priority) {
00208         QUEUE *q, *blocked_on;
00209         unsigned long schedmap = 0;
00210         do {
00211             task->priority = priority;
00212             if (task->state == RT_SCHED_READY) {
00213                 if ((task->rprev)->priority > task->priority || (task->rnext)->priority < task->priority) {
00214                     rhead = rt_smp_linux_task[task->runnable_on_cpus].rnext;
00215                     (task->rprev)->rnext = task->rnext;
00216                     (task->rnext)->rprev = task->rprev;
00217                     enq_ready_task(task);
00218                     if (rhead != rt_smp_linux_task[task->runnable_on_cpus].rnext) {
00219 #ifdef CONFIG_SMP
00220                         __set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
00221 #else
00222                         schedmap = 1;
00223 #endif
00224                     }
00225                 }
00226                 break;
00227 //           } else if ((task->state & (RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_SEMAPHORE))) {
00228             } else if ((unsigned long)(blocked_on = task->blocked_on) > RTE_HIGERR && (((task->state & RT_SCHED_SEMAPHORE) && ((SEM *)blocked_on)->type > 0) || (task->state & (RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN)))) {
00229                 if (task->queue.prev != (blocked_on = task->blocked_on)) {
00230                     q = blocked_on;
00231                     (task->queue.prev)->next = task->queue.next;
00232                     (task->queue.next)->prev = task->queue.prev;
00233                     while ((q = q->next) != blocked_on && (q->task)->priority <= priority);
00234                     q->prev = (task->queue.prev = q->prev)->next  = &(task->queue);
00235                     task->queue.next = q;
00236                     if (task->queue.prev != blocked_on) {
00237                         break;
00238                     }
00239                 }
00240                 task = (task->state & RT_SCHED_SEMAPHORE) ? ((SEM *)blocked_on)->owndby : blocked_on->task;
00241             }
00242         } while (task && task->priority > priority);
00243         if (schedmap) {
00244 #ifdef CONFIG_SMP
00245             if (test_and_clear_bit(rtai_cpuid(), &schedmap)) {
00246                 RT_SCHEDULE_MAP_BOTH(schedmap);
00247             } else {
00248                 RT_SCHEDULE_MAP(schedmap);
00249             }
00250 #else
00251             rt_schedule();
00252 #endif
00253         }
00254     }
00255     rt_global_restore_flags(flags);
00256     return prio;
00257 }
00258 
00259 /* +++++++++++++++++++++ TASK RELATED SCHEDULER SERVICES ++++++++++++++++++++ */
00260 
00261 
00262 /**
00263  * @anchor rt_whoami
00264  * @brief Get the task pointer of the current task.
00265  *
00266  * Calling rt_whoami from a task can get a pointer to its own task
00267  * structure.
00268  * 
00269  * @return The pointer to the current task.
00270  */
00271 RT_TASK *rt_whoami(void)
00272 {
00273     return _rt_whoami();
00274 }
00275 
00276 
00277 /**
00278  * @anchor rt_task_yield
00279  * Yield the current task.
00280  *
00281  * @ref rt_task_yield() stops the current task and takes it at the end
00282  * of the list of ready tasks having its same priority. The scheduler
00283  * makes the next ready task of the same priority active.
00284  * If the current task has the highest priority no more then it results
00285  * in an immediate rescheduling.
00286  *
00287  * Recall that RTAI schedulers allow only higher priority tasks to
00288  * preempt the execution of lower priority ones. So equal priority
00289  * tasks cannot preempt each other and @ref rt_task_yield() should be
00290  * used if a user needs a cooperative time slicing among equal
00291  * priority tasks. The implementation of the related policy is wholly
00292  * in the hand of the user. It is believed that time slicing is too
00293  * much an overhead for the most demanding real time applications, so
00294  * it is left up to you.
00295  */
00296 void rt_task_yield(void)
00297 {
00298     RT_TASK *rt_current, *task;
00299     unsigned long flags;
00300 
00301     flags = rt_global_save_flags_and_cli();
00302     rt_current = RT_CURRENT;
00303     if (rt_smp_linux_task[rt_current->runnable_on_cpus].rnext == rt_current) {
00304         task = rt_current->rnext;
00305         while (rt_current->priority == task->priority) {
00306             task = task->rnext;
00307         }
00308         if (task != rt_current->rnext) {
00309             (rt_current->rprev)->rnext = rt_current->rnext;
00310             (rt_current->rnext)->rprev = rt_current->rprev;
00311             task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current;
00312             rt_current->rnext = task;
00313             rt_schedule();
00314         }
00315     } else {
00316         rt_schedule();
00317     }
00318     rt_global_restore_flags(flags);
00319 }
00320 
00321 
00322 /**
00323  * @anchor rt_task_suspend
00324  * rt_task_suspend suspends execution of the task task.
00325  *
00326  * It will not be executed until a call to @ref rt_task_resume() or
00327  * @ref rt_task_make_periodic() is made. Multiple suspends and require as 
00328  * many @ref rt_task_resume() as the rt_task_suspends placed on a task.
00329  *
00330  * @param task pointer to a task structure.
00331  *
00332  * @return the task suspend depth. An abnormal termination returns as 
00333  * described below:
00334  * - @b -EINVAL: task does not refer to a valid task.
00335  * - @b RTE_UNBLKD:  the task was unblocked while suspended;
00336  *
00337  */
00338 RTAI_SYSCALL_MODE int rt_task_suspend(RT_TASK *task)
00339 {
00340     unsigned long flags;
00341 
00342     if (!task) {
00343         task = RT_CURRENT;
00344     } else if (task->magic != RT_TASK_MAGIC) {
00345         return -EINVAL;
00346     }
00347 
00348     flags = rt_global_save_flags_and_cli();
00349     if (!task_owns_sems(task)) {
00350         if (task->suspdepth >= 0) {
00351             if (!task->suspdepth) {
00352                 task->suspdepth++;
00353             }
00354             if (task == RT_CURRENT) {
00355                 rem_ready_current(task);
00356                 task->state |= RT_SCHED_SUSPENDED;
00357                 rt_schedule();
00358                 if (unlikely(task->blocked_on != NULL)) {
00359                     task->suspdepth = 0;
00360                     rt_global_restore_flags(flags);
00361                     return RTE_UNBLKD;
00362                 }
00363             } else {
00364                 rem_ready_task(task);
00365                 rem_timed_task(task);
00366                 task->state |= RT_SCHED_SUSPENDED;
00367                 if (task->runnable_on_cpus != rtai_cpuid()) {
00368                     send_sched_ipi(1 << task->runnable_on_cpus);
00369                 }
00370             }
00371         } else {
00372             task->suspdepth++;
00373         }
00374     } else if (task->suspdepth < 0) {
00375         task->suspdepth++;
00376     }
00377     rt_global_restore_flags(flags);
00378     return task->suspdepth;
00379 }
00380 
00381 
00382 RTAI_SYSCALL_MODE int rt_task_suspend_if(RT_TASK *task)
00383 {
00384     unsigned long flags;
00385 
00386     if (!task) {
00387         task = RT_CURRENT;
00388     } else if (task->magic != RT_TASK_MAGIC) {
00389         return -EINVAL;
00390     }
00391 
00392     flags = rt_global_save_flags_and_cli();
00393     if (!task_owns_sems(task) && task->suspdepth < 0) {
00394         task->suspdepth++;
00395     }
00396     rt_global_restore_flags(flags);
00397     return task->suspdepth;
00398 }
00399 
00400 
00401 RTAI_SYSCALL_MODE int rt_task_suspend_until(RT_TASK *task, RTIME time)
00402 {
00403     unsigned long flags;
00404 
00405     if (!task) {
00406         task = RT_CURRENT;
00407     } else if (task->magic != RT_TASK_MAGIC) {
00408         return -EINVAL;
00409     }
00410 
00411     flags = rt_global_save_flags_and_cli();
00412     if (!task_owns_sems(task)) {
00413         if (task->suspdepth >= 0) {
00414 #ifdef CONFIG_SMP
00415             int cpuid = rtai_cpuid();
00416 #endif
00417             if ((task->resume_time = time) > rt_time_h) {
00418                 if (!task->suspdepth) {
00419                     task->suspdepth++;
00420                 }
00421                 if (task == RT_CURRENT) {
00422                     rem_ready_current(task);
00423                     enq_timed_task(task);
00424                     task->state |= (RT_SCHED_SUSPENDED | RT_SCHED_DELAYED);
00425                     while (1) {
00426                         rt_schedule();
00427                         if (unlikely(task->blocked_on != NULL)) {
00428                             task->suspdepth = 0;
00429                             rt_global_restore_flags(flags);
00430                             return RTE_UNBLKD;
00431                         }
00432                         if (task->suspdepth) {
00433                             continue;
00434                         }
00435                         rt_global_restore_flags(flags);
00436                         return task->resume_time < rt_smp_time_h[rtai_cpuid()] ? RTE_TIMOUT : 0;
00437                     }
00438                 } else {
00439                     rem_ready_task(task);
00440                     enq_timed_task(task);
00441                     task->state |= (RT_SCHED_SUSPENDED | RT_SCHED_DELAYED);
00442                     if (task->runnable_on_cpus != rtai_cpuid()) {
00443                         send_sched_ipi(1 << task->runnable_on_cpus);
00444                     }
00445                 }
00446             } else {
00447                 rt_global_restore_flags(flags);
00448                 return RTE_TIMOUT;
00449             }
00450         } else {
00451             task->suspdepth++;
00452         }
00453     } else if (task->suspdepth < 0) {
00454         task->suspdepth++;
00455     }
00456     rt_global_restore_flags(flags);
00457     return task->suspdepth;
00458 }
00459 
00460 
00461 RTAI_SYSCALL_MODE int rt_task_suspend_timed(RT_TASK *task, RTIME delay)
00462 {
00463     return rt_task_suspend_until(task, get_time() + delay);
00464 }
00465 
00466 
00467 /**
00468  * @anchor rt_task_resume
00469  * Resume a task.
00470  *
00471  * rt_task_resume resumes execution of the task @e task previously
00472  * suspended by @ref rt_task_suspend(), or makes a newly created task
00473  * ready to run, if it makes the task ready. Since no account is made
00474  * for multiple suspend rt_task_resume unconditionally resumes any
00475  * task it makes ready.
00476  *
00477  * @param task pointer to a task structure.
00478  *
00479  * @return 0 on success. A negative value on failure as described below:
00480  * - @b EINVAL: task does not refer to a valid task.
00481  *
00482  */
00483 RTAI_SYSCALL_MODE int rt_task_resume(RT_TASK *task)
00484 {
00485     unsigned long flags;
00486 
00487     if (task->magic != RT_TASK_MAGIC) {
00488         return -EINVAL;
00489     }
00490 
00491     flags = rt_global_save_flags_and_cli();
00492     if (!(--task->suspdepth)) {
00493         rem_timed_task(task);
00494         if ((task->state &= ~(RT_SCHED_SUSPENDED | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00495             task->blocked_on = NULL;
00496             enq_ready_task(task);
00497             RT_SCHEDULE(task, rtai_cpuid());
00498         }
00499     }
00500     rt_global_restore_flags(flags);
00501     return 0;
00502 }
00503 
00504 
00505 /**
00506  * @anchor rt_get_task_state
00507  * Query task state
00508  *
00509  * rt_get_task_state returns the state of a real time task.
00510  *
00511  * @param task is a pointer to the task structure.
00512  *
00513  * Task state is formed by the bitwise OR of one or more of the
00514  * following flags:
00515  *
00516  * @retval READY Task @e task is ready to run (i.e. unblocked).
00517  * Note that on a UniProcessor machine the currently running task is
00518  * just in READY state, while on MultiProcessors can be (READY |
00519  * RUNNING), see below. 
00520  * @retval SUSPENDED Task @e task blocked waiting for a resume.
00521  * @retval DELAYED Task @e task blocked waiting for its next running
00522  * period or expiration of a timeout.
00523  * @retval SEMAPHORE Task @e task blocked on a semaphore, waiting for
00524  * the semaphore to be signaled.
00525  * @retval SEND Task @e task blocked on sending a message, receiver
00526  * was not in RECEIVE state.
00527  * @retval RECEIVE Task @e task blocked waiting for incoming messages,
00528  * sends or rpcs. 
00529  * @retval RPC Task @e task blocked on a Remote Procedure Call,
00530  * receiver was not in RECEIVE state.
00531  * @retval RETURN Task @e task blocked waiting for a return from a
00532  * Remote Procedure Call, receiver got the RPC but has not replied
00533  * yet. 
00534  * @retval RUNNING Task @e task is running, used only for SMP
00535  * schedulers. 
00536  *
00537  * The returned task state is just an approximate information. Timer
00538  * and other hardware interrupts may cause a change in the state of
00539  * the queried task before the caller could evaluate the returned
00540  * value. Caller should disable interrupts if it wants reliable info
00541  * about an other task.  rt_get_task_state does not perform any check
00542  * on pointer task.
00543  */
00544 int rt_get_task_state(RT_TASK *task)
00545 {
00546     return task->state;
00547 }
00548 
00549 
00550 /**
00551  * @anchor rt_linux_use_fpu
00552  * @brief Set indication of FPU usage.
00553  *
00554  * rt_linux_use_fpu informs the scheduler that floating point
00555  * arithmetic operations will be used also by foreground Linux
00556  * processes, i.e. the Linux kernel itself (unlikely) and any of its
00557  * processes. 
00558  *
00559  * @param use_fpu_flag If this parameter has a nonzero value, the
00560  * Floating Point Unit (FPU) context is also switched when @e task or
00561  * the kernel becomes active.
00562  * This makes task switching slower, negligibly, on all 32 bits CPUs
00563  * but 386s and the oldest 486s. 
00564  * This flag can be set also by rt_task_init when the real time task
00565  * is created. With UP and MUP schedulers care is taken to avoid
00566  * useless saves/ restores of the FPU environment. 
00567  * Under SMP tasks can be moved from CPU to CPU so saves/restores for
00568  * tasks using the FPU are always carried out. 
00569  * Note that by default Linux has this flag cleared. Beside by using
00570  * rt_linux_use_fpu you can change the Linux FPU flag when you insmod
00571  * any RTAI scheduler module by setting the LinuxFpu command line
00572  * parameter of the rtai_sched module itself.
00573  *
00574  * @return 0 on success. A negative value on failure as described below:
00575  * - @b EINVAL: task does not refer to a valid task.
00576  *
00577  * See also: @ref rt_linux_use_fpu().
00578  */
00579 void rt_linux_use_fpu(int use_fpu_flag)
00580 {
00581     int cpuid;
00582     for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
00583         rt_linux_task.uses_fpu = use_fpu_flag ? 1 : 0;
00584     }
00585 }
00586 
00587 
00588 /**
00589  * @anchor rt_task_use_fpu
00590  * @brief 
00591  *
00592  * rt_task_use_fpu informs the scheduler that floating point
00593  * arithmetic operations will be used by the real time task @e task.
00594  *
00595  * @param task is a pointer to the real time task.
00596  * 
00597  * @param use_fpu_flag If this parameter has a nonzero value, the
00598  * Floating Point Unit (FPU) context is also switched when @e task or
00599  * the kernel becomes active.
00600  * This makes task switching slower, negligibly, on all 32 bits CPUs
00601  * but 386s and the oldest 486s.
00602  * This flag can be set also by @ref rt_task_init() when the real time
00603  * task is created. With UP and MUP schedulers care is taken to avoid
00604  * useless saves/restores of the FPU environment.
00605  * Under SMP tasks can be moved from CPU to CPU so saves/restores for
00606  * tasks using the FPU are always carried out. 
00607  *
00608  * @return 0 on success. A negative value on failure as described below:
00609  * - @b EINVAL: task does not refer to a valid task.
00610  *
00611  * See also: @ref rt_linux_use_fpu().
00612  */
00613 RTAI_SYSCALL_MODE int rt_task_use_fpu(RT_TASK *task, int use_fpu_flag)
00614 {
00615     if (task->magic != RT_TASK_MAGIC) {
00616         return -EINVAL;
00617     }
00618     task->uses_fpu = use_fpu_flag ? 1 : 0;
00619     return 0;
00620 }
00621 
00622 
00623 /**
00624  * @anchor rt_task_signal_handler
00625  * @brief Set the signal handler of a task.
00626  *
00627  * rt_task_signal_handler installs, or changes, the signal function
00628  * of a real time task.
00629  *
00630  * @param task is a pointer to the real time task.
00631  *
00632  * @param handler is the entry point of the signal function.
00633  *
00634  * A signal handler function can be set also when the task is newly
00635  * created with @ref rt_task_init(). The signal handler is a function
00636  * called within the task environment and with interrupts disabled,
00637  * when the task becomes the current running task after a context
00638  * switch, except at its very first scheduling. It allows you to
00639  * implement whatever signal management policy you think useful, and
00640  * many other things as well (FIXME).
00641  *
00642  * @return 0 on success.A negative value on failure as described below:
00643  * - @b EINVAL: task does not refer to a valid task.
00644  */
00645 RTAI_SYSCALL_MODE int rt_task_signal_handler(RT_TASK *task, void (*handler)(void))
00646 {
00647     if (task->magic != RT_TASK_MAGIC) {
00648         return -EINVAL;
00649     }
00650     task->signal = handler;
00651     return 0;
00652 }
00653 
00654 /* ++++++++++++++++++++++++++++ MEASURING TIME ++++++++++++++++++++++++++++++ */
00655 
00656 struct epoch_struct boot_epoch = { SPIN_LOCK_UNLOCKED, 0, };
00657 EXPORT_SYMBOL(boot_epoch);
00658 
00659 static inline void _rt_get_boot_epoch(volatile RTIME time_orig[])
00660 {
00661     unsigned long flags;
00662     struct timeval tv;
00663     RTIME t;
00664 
00665     flags = rt_spin_lock_irqsave(&boot_epoch.lock);
00666     do_gettimeofday(&tv);
00667     t = rtai_rdtsc();
00668     rt_spin_unlock_irqrestore(flags, &boot_epoch.lock);
00669 
00670     time_orig[0] = tv.tv_sec*(RTIME)tuned.cpu_freq + imuldiv(tv.tv_usec, tuned.cpu_freq, 1000000) - t;
00671     time_orig[1] = tv.tv_sec*1000000000ULL + tv.tv_usec*1000ULL - llimd(t, 1000000000, tuned.cpu_freq);
00672 }
00673 
00674 void rt_get_boot_epoch(void)
00675 {
00676     int use;
00677     _rt_get_boot_epoch(boot_epoch.time[use = 1 - boot_epoch.touse]);
00678     boot_epoch.touse = use;
00679 }
00680 
00681 void rt_gettimeorig(RTIME time_orig[])
00682 {
00683     if (time_orig == NULL) {
00684         rt_get_boot_epoch();
00685     } else {
00686         _rt_get_boot_epoch(time_orig);
00687     }
00688 }
00689 
00690 /* +++++++++++++++++++++++++++ CONTROLLING TIME ++++++++++++++++++++++++++++++ */
00691 
00692 /**
00693  * @anchor rt_task_make_periodic_relative_ns
00694  * Make a task run periodically.
00695  *
00696  * rt_task_make_periodic_relative_ns mark the task @e task, previously
00697  * created with @ref rt_task_init(), as suitable for a periodic
00698  * execution, with period @e period, when @ref rt_task_wait_period()
00699  * is called.
00700  *
00701  * The time of first execution is defined through @e start_time or @e
00702  * start_delay. @e start_time is an absolute value measured in clock
00703  * ticks. @e start_delay is relative to the current time and measured
00704  * in nanoseconds. 
00705  *
00706  * @param task is a pointer to the task you want to make periodic.
00707  *
00708  * @param start_delay is the time, to wait before the task start
00709  *    running, in nanoseconds.
00710  *
00711  * @param period corresponds to the period of the task, in nanoseconds.
00712  *
00713  * @retval 0 on success. A negative value on failure as described below:
00714  * - @b EINVAL: task does not refer to a valid task.
00715  *
00716  * Recall that the term clock ticks depends on the mode in which the hard
00717  * timer runs. So if the hard timer was set as periodic a clock tick will
00718  * last as the period set in start_rt_timer, while if oneshot mode is used
00719  * a clock tick will last as the inverse of the running frequency of the
00720  * hard timer in use and irrespective of any period used in the call to
00721  * start_rt_timer.
00722  */
00723 RTAI_SYSCALL_MODE int rt_task_make_periodic_relative_ns(RT_TASK *task, RTIME start_delay, RTIME period)
00724 {
00725     unsigned long flags;
00726 
00727     if (!task) {
00728         task = RT_CURRENT;
00729     } else if (task->magic != RT_TASK_MAGIC) {
00730         return -EINVAL;
00731     }
00732     start_delay = nano2count_cpuid(start_delay, task->runnable_on_cpus);
00733     period = nano2count_cpuid(period, task->runnable_on_cpus);
00734     flags = rt_global_save_flags_and_cli();
00735     task->periodic_resume_time = task->resume_time = rt_get_time_cpuid(task->runnable_on_cpus) + start_delay;
00736     task->period = period;
00737     task->suspdepth = 0;
00738         if (!(task->state & RT_SCHED_DELAYED)) {
00739         rem_ready_task(task);
00740         task->state = (task->state & ~RT_SCHED_SUSPENDED) | RT_SCHED_DELAYED;
00741         enq_timed_task(task);
00742 }
00743     RT_SCHEDULE(task, rtai_cpuid());
00744     rt_global_restore_flags(flags);
00745     return 0;
00746 }
00747 
00748 
00749 /**
00750  * @anchor rt_task_make_periodic
00751  * Make a task run periodically
00752  *
00753  * rt_task_make_periodic mark the task @e task, previously created
00754  * with @ref rt_task_init(), as suitable for a periodic execution, with
00755  * period @e period, when @ref rt_task_wait_period() is called.
00756  *
00757  * The time of first execution is defined through @e start_time or @e
00758  * start_delay. @e start_time is an absolute value measured in clock
00759  * ticks.  @e start_delay is relative to the current time and measured
00760  * in nanoseconds.
00761  *
00762  * @param task is a pointer to the task you want to make periodic.
00763  *
00764  * @param start_time is the absolute time to wait before the task start
00765  *    running, in clock ticks.
00766  *
00767  * @param period corresponds to the period of the task, in clock ticks.
00768  *
00769  * @retval 0 on success. A negative value on failure as described
00770  * below: 
00771  * - @b EINVAL: task does not refer to a valid task.
00772  *
00773  * See also: @ref rt_task_make_periodic_relative_ns().
00774  * Recall that the term clock ticks depends on the mode in which the hard
00775  * timer runs. So if the hard timer was set as periodic a clock tick will
00776  * last as the period set in start_rt_timer, while if oneshot mode is used
00777  * a clock tick will last as the inverse of the running frequency of the
00778  * hard timer in use and irrespective of any period used in the call to
00779  * start_rt_timer.
00780  *
00781  */
00782 RTAI_SYSCALL_MODE int rt_task_make_periodic(RT_TASK *task, RTIME start_time, RTIME period)
00783 {
00784     unsigned long flags;
00785 
00786     if (!task) {
00787         task = RT_CURRENT;
00788     } else if (task->magic != RT_TASK_MAGIC) {
00789         return -EINVAL;
00790     }
00791     REALTIME2COUNT(start_time);
00792     flags = rt_global_save_flags_and_cli();
00793     task->periodic_resume_time = task->resume_time = start_time;
00794     task->period = period;
00795     task->suspdepth = 0;
00796         if (!(task->state & RT_SCHED_DELAYED)) {
00797         rem_ready_task(task);
00798         task->state = (task->state & ~RT_SCHED_SUSPENDED) | RT_SCHED_DELAYED;
00799         enq_timed_task(task);
00800     }
00801     RT_SCHEDULE(task, rtai_cpuid());
00802     rt_global_restore_flags(flags);
00803     return 0;
00804 }
00805 
00806 
00807 /**
00808  * @anchor rt_task_wait_period
00809  * Wait till next period.
00810  *
00811  * rt_task_wait_period suspends the execution of the currently running
00812  * real time task until the next period is reached.
00813  * The task must have
00814  * been previously marked for a periodic execution by calling
00815  * @ref rt_task_make_periodic() or @ref rt_task_make_periodic_relative_ns().
00816  *
00817  * @return 0 if the period expires as expected. An abnormal termination 
00818  * returns as described below:
00819  * - @b RTE_UNBLKD:  the task was unblocked while sleeping;
00820  * - @b RTE_TMROVRN: an immediate return was taken because the next period
00821  *   has already expired.
00822  *
00823  * @note The task is suspended only temporarily, i.e. it simply gives
00824  * up control until the next time period.
00825  */
00826 int rt_task_wait_period(void)
00827 {
00828     DECLARE_RT_CURRENT;
00829     unsigned long flags;
00830 
00831     flags = rt_global_save_flags_and_cli();
00832     ASSIGN_RT_CURRENT;
00833     if (rt_current->resync_frame) { // Request from watchdog
00834             rt_current->resync_frame = 0;
00835         rt_current->periodic_resume_time = rt_current->resume_time = oneshot_timer ? rtai_rdtsc() :
00836 #ifdef CONFIG_SMP
00837         rt_smp_times[cpuid].tick_time;
00838 #else
00839         rt_times.tick_time;
00840 #endif
00841     } else if ((rt_current->periodic_resume_time += rt_current->period) > rt_time_h) {
00842         void *blocked_on;
00843         rt_current->resume_time = rt_current->periodic_resume_time;
00844         rt_current->blocked_on = NULL;
00845         rt_current->state |= RT_SCHED_DELAYED;
00846         rem_ready_current(rt_current);
00847         enq_timed_task(rt_current);
00848         rt_schedule();
00849         blocked_on = rt_current->blocked_on;
00850         rt_global_restore_flags(flags);
00851 #ifdef CONFIG_M68K
00852         //Workaround of a gcc bug
00853         if(blocked_on == RTP_OBJREM) {
00854             __asm__ __volatile__ ("nop");
00855         }
00856         return likely(!blocked_on) ? 0L : RTE_UNBLKD;
00857 #else
00858         return likely(!blocked_on) ? 0 : RTE_UNBLKD;
00859 #endif
00860     }
00861     rt_global_restore_flags(flags);
00862     return RTE_TMROVRN;
00863 }
00864 
00865 RTAI_SYSCALL_MODE void rt_task_set_resume_end_times(RTIME resume, RTIME end)
00866 {
00867     RT_TASK *rt_current;
00868     unsigned long flags;
00869 
00870     flags = rt_global_save_flags_and_cli();
00871     rt_current = RT_CURRENT;
00872     rt_current->policy   = -1;
00873     rt_current->priority =  0;
00874     if (resume > 0) {
00875         rt_current->resume_time = resume;
00876     } else {
00877         rt_current->resume_time -= resume;
00878     }
00879     if (end > 0) {
00880         rt_current->period = end;
00881     } else {
00882         rt_current->period = rt_current->resume_time - end;
00883     }
00884     rt_current->state |= RT_SCHED_DELAYED;
00885     rem_ready_current(rt_current);
00886     enq_timed_task(rt_current);
00887     rt_schedule();
00888     rt_global_restore_flags(flags);
00889 }
00890 
00891 RTAI_SYSCALL_MODE int rt_set_resume_time(RT_TASK *task, RTIME new_resume_time)
00892 {
00893     unsigned long flags;
00894 
00895     if (task->magic != RT_TASK_MAGIC) {
00896         return -EINVAL;
00897     }
00898 
00899     flags = rt_global_save_flags_and_cli();
00900     if (task->state & RT_SCHED_DELAYED) {
00901         if (((task->resume_time = new_resume_time) - (task->tnext)->resume_time) > 0) {
00902             rem_timed_task(task);
00903             enq_timed_task(task);
00904             rt_global_restore_flags(flags);
00905             return 0;
00906             }
00907         }
00908     rt_global_restore_flags(flags);
00909     return -ETIME;
00910 }
00911 
00912 RTAI_SYSCALL_MODE int rt_set_period(RT_TASK *task, RTIME new_period)
00913 {
00914     unsigned long flags;
00915 
00916     if (task->magic != RT_TASK_MAGIC) {
00917         return -EINVAL;
00918     }
00919     flags = rt_global_save_flags_and_cli();
00920     task->period = new_period;
00921     rt_global_restore_flags(flags);
00922     return 0;
00923 }
00924 
00925 /**
00926  * @anchor next_period
00927  * @brief Get the time a periodic task will be resumed after calling
00928  *  rt_task_wait_period.
00929  *
00930  * this function returns the time when the caller task will run
00931  * next. Combined with the appropriate @ref rt_get_time function() it
00932  * can be used for checking the fraction of period used or any period
00933  * overrun.
00934  *
00935  * @return Next period time in internal count units.
00936  */
00937 RTIME next_period(void)
00938 {
00939     RT_TASK *rt_current;
00940     unsigned long flags;
00941     flags = rt_global_save_flags_and_cli();
00942     rt_current = RT_CURRENT;
00943     rt_global_restore_flags(flags);
00944     return rt_current->periodic_resume_time + rt_current->period;
00945 }
00946 
00947 /**
00948  * @anchor rt_busy_sleep
00949  * @brief Delay/suspend execution for a while.
00950  *
00951  * rt_busy_sleep delays the execution of the caller task without
00952  * giving back the control to the scheduler. This function burns away
00953  * CPU cycles in a busy wait loop so it should be used only for very
00954  * short synchronization delays. On machine not having a TSC clock it
00955  * can lead to many microseconds uncertain busy sleeps because of the
00956  * need of reading the 8254 timer.
00957  *
00958  * @param ns is the number of nanoseconds to wait.
00959  * 
00960  * See also: @ref rt_sleep(), @ref rt_sleep_until().
00961  *
00962  * @note A higher priority task or interrupt handler can run before
00963  *   the task goes to sleep, so the actual time spent in these
00964  *   functions may be longer than that specified.
00965  */
00966 RTAI_SYSCALL_MODE void rt_busy_sleep(int ns)
00967 {
00968     RTIME end_time;
00969     end_time = rtai_rdtsc() + llimd(ns, tuned.cpu_freq, 1000000000);
00970     while (rtai_rdtsc() < end_time);
00971 }
00972 
00973 /**
00974  * @anchor rt_sleep
00975  * @brief Delay/suspend execution for a while.
00976  *
00977  * rt_sleep suspends execution of the caller task for a time of delay
00978  * internal count units. During this time the CPU is used by other
00979  * tasks.
00980  * 
00981  * @param delay Corresponds to the time the task is going to be suspended.
00982  *
00983  * See also: @ref rt_busy_sleep(), @ref rt_sleep_until().
00984  *
00985  * @return 0 if the delay expires as expected. An abnormal termination returns
00986  *  as described below:
00987  * - @b RTE_UNBLKD:  the task was unblocked while sleeping;
00988  * - @b RTE_TMROVRN: an immediate return was taken because the delay is too
00989  *   short to be honoured.
00990  *
00991  * @note A higher priority task or interrupt handler can run before
00992  *   the task goes to sleep, so the actual time spent in these
00993  *   functions may be longer than the the one specified.
00994  */
00995 RTAI_SYSCALL_MODE int rt_sleep(RTIME delay)
00996 {
00997     DECLARE_RT_CURRENT;
00998     unsigned long flags;
00999     flags = rt_global_save_flags_and_cli();
01000     ASSIGN_RT_CURRENT;
01001     if ((rt_current->resume_time = get_time() + delay) > rt_time_h) {
01002         void *blocked_on;
01003         rt_current->blocked_on = NULL;
01004         rt_current->state |= RT_SCHED_DELAYED;
01005         rem_ready_current(rt_current);
01006         enq_timed_task(rt_current);
01007         rt_schedule();
01008         blocked_on = rt_current->blocked_on;
01009         rt_global_restore_flags(flags);
01010         return likely(!blocked_on) ? 0 : RTE_UNBLKD;
01011     }
01012     rt_global_restore_flags(flags);
01013     return RTE_TMROVRN;
01014 }
01015 
01016 /**
01017  * @anchor rt_sleep_until
01018  * @brief Delay/suspend execution for a while.
01019  *
01020  * rt_sleep_until is similar to @ref rt_sleep() but the parameter time
01021  * is the absolute time till the task have to be suspended. If the
01022  * given time is already passed this call has no effect.
01023  * 
01024  * @param time Absolute time till the task have to be suspended
01025  *
01026  * See also: @ref rt_busy_sleep(), @ref rt_sleep_until().
01027  *
01028  * @return 0 if the sleeping expires as expected. An abnormal termination 
01029  * returns as described below:
01030  * - @b RTE_UNBLKD:  the task was unblocked while sleeping;
01031  * - @b RTE_TMROVRN: an immediate return was taken because the time deadline
01032  *   has already expired.
01033  *
01034  * @note A higher priority task or interrupt handler can run before
01035  *   the task goes to sleep, so the actual time spent in these
01036  *   functions may be longer than the the one specified.
01037  */
01038 RTAI_SYSCALL_MODE int rt_sleep_until(RTIME time)
01039 {
01040     DECLARE_RT_CURRENT;
01041     unsigned long flags;
01042     REALTIME2COUNT(time);
01043     flags = rt_global_save_flags_and_cli();
01044     ASSIGN_RT_CURRENT;
01045     if ((rt_current->resume_time = time) > rt_time_h) {
01046         void *blocked_on;
01047         rt_current->blocked_on = NULL;
01048         rt_current->state |= RT_SCHED_DELAYED;
01049         rem_ready_current(rt_current);
01050         enq_timed_task(rt_current);
01051         rt_schedule();
01052         blocked_on = rt_current->blocked_on;
01053         rt_global_restore_flags(flags);
01054         return likely(!blocked_on) ? 0 : RTE_UNBLKD;
01055     }
01056     rt_global_restore_flags(flags);
01057     return RTE_TMROVRN;
01058 }
01059 
01060 RTAI_SYSCALL_MODE int rt_task_masked_unblock(RT_TASK *task, unsigned long mask)
01061 {
01062     unsigned long flags;
01063 
01064     if (task->magic != RT_TASK_MAGIC) {
01065         return -EINVAL;
01066     }
01067 
01068     if (task->state && task->state != RT_SCHED_READY) {
01069         flags = rt_global_save_flags_and_cli();
01070         if (mask & RT_SCHED_DELAYED) {
01071             rem_timed_task(task);
01072         }
01073         if (task->state != RT_SCHED_READY && (task->state &= ~mask) == RT_SCHED_READY) {
01074             task->blocked_on = RTP_UNBLKD;
01075             enq_ready_task(task);
01076             RT_SCHEDULE(task, rtai_cpuid());
01077         }
01078         rt_global_restore_flags(flags);
01079         return RTE_UNBLKD;
01080     }
01081     return 0;
01082 }
01083 
01084 int rt_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
01085 {
01086     RTIME expire;
01087 
01088     if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
01089         return -EINVAL;
01090     }
01091     rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
01092     if ((expire -= rt_get_time()) > 0) {
01093         if (rmtp) {
01094             count2timespec(expire, rmtp);
01095         }
01096         return -EINTR;
01097     }
01098     return 0;
01099 }
01100 
01101 /* +++++++++++++++++++ READY AND TIMED QUEUE MANIPULATION +++++++++++++++++++ */
01102 
01103 void rt_enq_ready_edf_task(RT_TASK *ready_task)
01104 {
01105     enq_ready_edf_task(ready_task);
01106 }
01107 
01108 void rt_enq_ready_task(RT_TASK *ready_task)
01109 {
01110     enq_ready_task(ready_task);
01111 }
01112 
01113 int rt_renq_ready_task(RT_TASK *ready_task, int priority)
01114 {
01115     return renq_ready_task(ready_task, priority);
01116 }
01117 
01118 void rt_rem_ready_task(RT_TASK *task)
01119 {
01120     rem_ready_task(task);
01121 }
01122 
01123 void rt_rem_ready_current(RT_TASK *rt_current)
01124 {
01125     rem_ready_current(rt_current);
01126 }
01127 
01128 void rt_enq_timed_task(RT_TASK *timed_task)
01129 {
01130     enq_timed_task(timed_task);
01131 }
01132 
01133 void rt_wake_up_timed_tasks(int cpuid)
01134 {
01135 #ifdef CONFIG_SMP
01136     wake_up_timed_tasks(cpuid);
01137 #else
01138         wake_up_timed_tasks(0);
01139 #endif
01140 }
01141 
01142 void rt_rem_timed_task(RT_TASK *task)
01143 {
01144     rem_timed_task(task);
01145 }
01146 
01147 void rt_enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
01148 {
01149     enqueue_blocked(task, queue, qtype);
01150 }
01151 
01152 void rt_dequeue_blocked(RT_TASK *task)
01153 {
01154     dequeue_blocked(task);
01155 }
01156 
01157 int rt_renq_current(RT_TASK *rt_current, int priority)
01158 {
01159     return renq_ready_task(rt_current, priority);
01160 }
01161 
01162 /* ++++++++++++++++++++++++ NAMED TASK INIT/DELETE ++++++++++++++++++++++++++ */
01163 
01164 RTAI_SYSCALL_MODE RT_TASK *rt_named_task_init(const char *task_name, void (*thread)(long), long data, int stack_size, int prio, int uses_fpu, void(*signal)(void))
01165 {
01166     RT_TASK *task;
01167     unsigned long name;
01168 
01169     if ((task = rt_get_adr(name = nam2num(task_name)))) {
01170         return task;
01171     }
01172         if ((task = rt_malloc(sizeof(RT_TASK))) && !rt_task_init(task, thread, data, stack_size, prio, uses_fpu, signal)) {
01173         if (rt_register(name, task, IS_TASK, 0)) {
01174             return task;
01175         }
01176         rt_task_delete(task);
01177     }
01178     rt_free(task);
01179     return (RT_TASK *)0;
01180 }
01181 
01182 RTAI_SYSCALL_MODE RT_TASK *rt_named_task_init_cpuid(const char *task_name, void (*thread)(long), long data, int stack_size, int prio, int uses_fpu, void(*signal)(void), unsigned int run_on_cpu)
01183 {
01184     RT_TASK *task;
01185     unsigned long name;
01186 
01187     if ((task = rt_get_adr(name = nam2num(task_name)))) {
01188         return task;
01189     }
01190         if ((task = rt_malloc(sizeof(RT_TASK))) && !rt_task_init_cpuid(task, thread, data, stack_size, prio, uses_fpu, signal, run_on_cpu)) {
01191         if (rt_register(name, task, IS_TASK, 0)) {
01192             return task;
01193         }
01194         rt_task_delete(task);
01195     }
01196     rt_free(task);
01197     return (RT_TASK *)0;
01198 }
01199 
01200 RTAI_SYSCALL_MODE int rt_named_task_delete(RT_TASK *task)
01201 {
01202     if (!rt_task_delete(task)) {
01203         rt_free(task);
01204     }
01205     return rt_drg_on_adr(task);
01206 }
01207 
01208 /* +++++++++++++++++++++++++++++++ REGISTRY +++++++++++++++++++++++++++++++++ */
01209 
01210 #define HASHED_REGISTRY
01211 
01212 #ifdef HASHED_REGISTRY
01213 
01214 int max_slots;
01215 static struct rt_registry_entry *lxrt_list;
01216 static spinlock_t list_lock = SPIN_LOCK_UNLOCKED;
01217 
01218 #define COLLISION_COUNT() do { col++; } while(0)
01219 static unsigned long long col;
01220 #ifndef COLLISION_COUNT
01221 #define COLLISION_COUNT()
01222 #endif
01223 
01224 #define NONAME  (1UL)
01225 #define NOADR   ((void *)1)
01226 
01227 #define PRIMES_TAB_GRANULARITY  100
01228 
01229 static unsigned short primes[ ] = { 1, 103, 211, 307, 401, 503, 601, 701, 809, 907, 1009, 1103, 1201, 1301, 1409, 1511, 1601, 1709, 1801, 1901, 2003, 2111, 2203, 2309, 2411, 2503, 2609, 2707, 2801, 2903, 3001, 3109, 3203, 3301, 3407, 3511,
01230 3607, 3701, 3803, 3907, 4001, 4111, 4201, 4327, 4409, 4507, 4603, 4703, 4801, 4903, 5003, 5101, 5209, 5303, 5407, 5501, 5623, 5701, 5801, 5903, 6007, 6101, 6203, 6301, 6421, 6521, 6607, 6703, 6803, 6907, 7001, 7103, 7207, 7307, 7411, 7507,
01231 7603, 7703, 7817, 7901, 8009, 8101, 8209, 8311, 8419, 8501, 8609, 8707, 8803, 8923, 9001, 9103, 9203, 9311, 9403, 9511, 9601, 9719, 9803, 9901, 10007, 10103, 10211, 10301, 10427, 10501, 10601, 10709, 10831, 10903, 11003, 11113, 11213, 11311, 11411, 11503, 11597, 11617, 11701, 11801, 11903, 12007, 12101, 12203, 12301, 12401, 12503, 12601, 12703, 12809, 12907, 13001, 13103, 13217, 13309, 13411, 13513, 13613, 13709, 13807, 13901, 14009, 14107, 14207, 14303, 14401, 14503, 14621,
01232 14713, 14813, 14923, 15013, 15101, 15217, 15307, 15401, 15511, 15601, 15727, 15803, 15901, 16001, 16103, 16217, 16301, 16411, 16519, 16603, 16703, 16811, 16901, 17011, 17107, 17203, 17317, 17401, 17509, 17609, 17707, 17807, 17903, 18013, 18119, 18211, 18301, 18401, 18503, 18617, 18701, 18803, 18911, 19001, 19121, 19207, 19301, 19403, 19501, 19603, 19709, 19801, 19913, 20011, 20101 };
01233 
01234 #define hash_fun(m, n) ((m)%(n) + 1)
01235 
01236 static int hash_ins_adr(void *adr, struct rt_registry_entry *list, int lstlen, int nlink)
01237 {
01238     int i, k;
01239     unsigned long flags;
01240 
01241     i = hash_fun((unsigned long)adr, lstlen);
01242     while (1) {
01243         k = i;
01244         while (list[k].adr > NOADR && list[k].adr != adr) {
01245 COLLISION_COUNT();
01246             if (++k > lstlen) {
01247                 k = 1;
01248             }
01249             if (k == i) {
01250                 return 0;
01251             }
01252         }
01253         flags = rt_spin_lock_irqsave(&list_lock);
01254         if (list[k].adr == adr) {
01255             rt_spin_unlock_irqrestore(flags, &list_lock);
01256             return -k;
01257         } else if (list[k].adr <= NOADR) {
01258             list[k].adr       = adr;
01259             list[k].nlink     = nlink;
01260             list[nlink].alink = k;
01261             rt_spin_unlock_irqrestore(flags, &list_lock);
01262             return k;
01263         }
01264     }
01265 }
01266 
01267 static int hash_ins_name(unsigned long name, void *adr, int type, struct task_struct *lnxtsk, struct rt_registry_entry *list, int lstlen, int inc)
01268 {
01269     int i, k;
01270     unsigned long flags;
01271 
01272     i = hash_fun(name, lstlen);
01273     while (1) {
01274         k = i;
01275         while (list[k].name > NONAME && list[k].name != name) {
01276 COLLISION_COUNT();
01277             if (++k > lstlen) {
01278                 k = 1;
01279             }
01280             if (k == i) {
01281                 return 0;
01282             }
01283         }
01284         flags = rt_spin_lock_irqsave(&list_lock);
01285         if (list[k].name == name) {
01286             if (inc) {
01287                 list[k].count++;
01288             }
01289             rt_spin_unlock_irqrestore(flags, &list_lock);
01290             return -k;
01291         } else if (list[k].name <= NONAME) {
01292             list[k].name  = name;
01293             list[k].type  = type;
01294             list[k].tsk   = lnxtsk;
01295             list[k].count = 1;
01296             list[k].alink = 0;
01297             rt_spin_unlock_irqrestore(flags, &list_lock);
01298                     if (hash_ins_adr(adr, list, lstlen, k) <= 0) {
01299                 rt_spin_unlock_irqrestore(flags, &list_lock);
01300                             return 0;
01301                     }
01302             return k;
01303         }
01304     }
01305 }
01306 
01307 static void *hash_find_name(unsigned long name, struct rt_registry_entry *list, long lstlen, int inc, int *slot)
01308 {
01309     int i, k;
01310     unsigned long flags;
01311 
01312     i = hash_fun(name, lstlen);
01313     while (1) {
01314         k = i;
01315         while (list[k].name && list[k].name != name) {
01316 COLLISION_COUNT();
01317             if (++k > lstlen) {
01318                 k = 1;
01319             }
01320             if (k == i) {
01321                 return NULL;
01322             }
01323         }
01324         flags = rt_spin_lock_irqsave(&list_lock);
01325         if (list[k].name == name) {
01326             if (inc) {
01327                 list[k].count++;
01328             }
01329             rt_spin_unlock_irqrestore(flags, &list_lock);
01330             if (slot) {
01331                 *slot = k;
01332             }
01333             return list[list[k].alink].adr;
01334         } else if (list[k].name <= NONAME) {
01335             rt_spin_unlock_irqrestore(flags, &list_lock);
01336             return NULL;
01337         }
01338     }
01339 }
01340 
01341 static unsigned long hash_find_adr(void *adr, struct rt_registry_entry *list, long lstlen, int inc)
01342 {
01343     int i, k;
01344     unsigned long flags;
01345 
01346     i = hash_fun((unsigned long)adr, lstlen);
01347     while (1) {
01348         k = i;
01349         while (list[k].adr && list[k].adr != adr) {
01350 COLLISION_COUNT();
01351             if (++k > lstlen) {
01352                 k = 1;
01353             }
01354             if (k == i) {
01355                 return 0;
01356             }
01357         }
01358         flags = rt_spin_lock_irqsave(&list_lock);
01359         if (list[k].adr == adr) {
01360             if (inc) {
01361                 list[list[k].nlink].count++;
01362             }
01363             rt_spin_unlock_irqrestore(flags, &list_lock);
01364             return list[list[k].nlink].name;
01365         } else if (list[k].adr <= NOADR) {
01366             rt_spin_unlock_irqrestore(flags, &list_lock);
01367             return 0;
01368         }
01369     }
01370 }
01371 
01372 static int hash_rem_name(unsigned long name, struct rt_registry_entry *list, long lstlen, int dec)
01373 {
01374     int i, k;
01375     unsigned long flags;
01376 
01377     k = i = hash_fun(name, lstlen);
01378     while (list[k].name && list[k].name != name) {
01379 COLLISION_COUNT();
01380         if (++k > lstlen) {
01381             k = 1;
01382         }
01383         if (k == i) {
01384             return 0;
01385         }
01386     }
01387     flags = rt_spin_lock_irqsave(&list_lock);
01388     if (list[k].name == name) {
01389         if (!dec || (list[k].count && !--list[k].count)) {
01390             int j;
01391             if ((i = k + 1) > lstlen) {
01392                 i = 1;
01393             }
01394             list[k].name = !list[i].name ? 0UL : NONAME;
01395             if ((j = list[k].alink)) {
01396                 if ((i = j + 1) > lstlen) {
01397                     i = 1;
01398                 }
01399                 list[j].adr = !list[i].adr ? NULL : NOADR;
01400             }
01401         }
01402         if (dec) {
01403             k = list[k].count;
01404         }
01405         rt_spin_unlock_irqrestore(flags, &list_lock);
01406         return k;
01407     }
01408     rt_spin_unlock_irqrestore(flags, &list_lock);
01409     return dec;
01410 }
01411 
01412 static int hash_rem_adr(void *adr, struct rt_registry_entry *list, long lstlen, int dec)
01413 {
01414     int i, k;
01415     unsigned long flags;
01416 
01417     k = i = hash_fun((unsigned long)adr, lstlen);
01418     while (list[k].adr && list[k].adr != adr) {
01419 COLLISION_COUNT();
01420         if (++k > lstlen) {
01421             k = 1;
01422         }
01423         if (k == i) {
01424             return 0;
01425         }
01426     }
01427     flags = rt_spin_lock_irqsave(&list_lock);
01428     if (list[k].adr == adr) {
01429         if (!dec || (list[list[k].nlink].count && !--list[list[k].nlink].count)) {
01430             int j;
01431             if ((i = k + 1) > lstlen) {
01432                 i = 1;
01433             }
01434             list[k].adr = !list[i].adr ? NULL : NOADR;
01435             j = list[k].nlink;
01436             if ((i = j + 1) > lstlen) {
01437                 i = 1;
01438             }
01439             list[j].name = !list[i].name ? 0UL : NONAME;
01440         }
01441         if (dec) {
01442             k = list[list[k].nlink].count;
01443         }
01444         rt_spin_unlock_irqrestore(flags, &list_lock);
01445         return k;
01446     }
01447     rt_spin_unlock_irqrestore(flags, &list_lock);
01448     return dec;
01449 }
01450 
01451 static inline int registr(unsigned long name, void *adr, int type, struct task_struct *lnxtsk)
01452 {
01453     return abs(hash_ins_name(name, adr, type, lnxtsk, lxrt_list, max_slots, 1));
01454 }
01455 
01456 static inline int drg_on_name(unsigned long name)
01457 {
01458     return hash_rem_name(name, lxrt_list, max_slots, 0);
01459 } 
01460 
01461 static inline int drg_on_name_cnt(unsigned long name)
01462 {
01463     return hash_rem_name(name, lxrt_list, max_slots, -EFAULT);
01464 } 
01465 
01466 static inline int drg_on_adr(void *adr)
01467 {
01468     return hash_rem_adr(adr, lxrt_list, max_slots, 0);
01469 } 
01470 
01471 static inline int drg_on_adr_cnt(void *adr)
01472 {
01473     return hash_rem_adr(adr, lxrt_list, max_slots, -EFAULT);
01474 } 
01475 
01476 static inline unsigned long get_name(void *adr)
01477 {
01478     static unsigned long nameseed = 3518743764UL;
01479     if (!adr) {
01480         unsigned long flags;
01481         unsigned long name;
01482         flags = rt_spin_lock_irqsave(&list_lock);
01483         if ((name = ++nameseed) == 0xFFFFFFFFUL) {
01484             nameseed = 3518743764UL;
01485         }
01486         rt_spin_unlock_irqrestore(flags, &list_lock);
01487         return name;
01488     } else {
01489         return hash_find_adr(adr, lxrt_list, max_slots, 0);
01490     }
01491     return 0;
01492 } 
01493 
01494 static inline void *get_adr(unsigned long name)
01495 {
01496     return hash_find_name(name, lxrt_list, max_slots, 0, NULL);
01497 } 
01498 
01499 static inline void *get_adr_cnt(unsigned long name)
01500 {
01501     return hash_find_name(name, lxrt_list, max_slots, 1, NULL);
01502 } 
01503 
01504 static inline int get_type(unsigned long name)
01505 {
01506     int slot;
01507 
01508     if (hash_find_name(name, lxrt_list, max_slots, 0, &slot)) {
01509         return lxrt_list[slot].type;
01510     }
01511         return -EINVAL;
01512 }
01513 
01514 unsigned long is_process_registered(struct task_struct *lnxtsk)
01515 {
01516     void *adr = lnxtsk->rtai_tskext(TSKEXT0);
01517     return adr ? hash_find_adr(adr, lxrt_list, max_slots, 0) : 0;
01518 }
01519 
01520 int rt_get_registry_slot(int slot, struct rt_registry_entry *entry)
01521 {
01522     unsigned long flags;
01523         flags = rt_spin_lock_irqsave(&list_lock);
01524     if (lxrt_list[slot].name > NONAME) {
01525         *entry = lxrt_list[slot];
01526         entry->adr = lxrt_list[entry->alink].adr;
01527         rt_spin_unlock_irqrestore(flags, &list_lock);
01528         return slot;
01529         }
01530         rt_spin_unlock_irqrestore(flags, &list_lock);
01531         return 0;
01532 }
01533 
01534 int rt_registry_alloc(void)
01535 {
01536     if ((max_slots = (MAX_SLOTS + PRIMES_TAB_GRANULARITY - 1)/(PRIMES_TAB_GRANULARITY)) >= sizeof(primes)/sizeof(primes[0])) {
01537         printk("REGISTRY TABLE TOO LARGE FOR AVAILABLE PRIMES\n");
01538                 return -ENOMEM;
01539         }
01540     max_slots = primes[max_slots];
01541     if (!(lxrt_list = vmalloc((max_slots + 1)*sizeof(struct rt_registry_entry)))) {
01542         printk("NO MEMORY FOR REGISTRY TABLE\n");
01543                 return -ENOMEM;
01544     }
01545     memset(lxrt_list, 0, (max_slots + 1)*sizeof(struct rt_registry_entry));
01546     return 0;
01547 }
01548 
01549 void rt_registry_free(void)
01550 {
01551     if (lxrt_list) {
01552         vfree(lxrt_list);
01553     }
01554 }
01555 #else
01556 volatile int max_slots;
01557 static struct rt_registry_entry *lxrt_list;
01558 static spinlock_t list_lock = SPIN_LOCK_UNLOCKED;
01559 
01560 int rt_registry_alloc(void)
01561 {
01562     if (!(lxrt_list = vmalloc((MAX_SLOTS + 1)*sizeof(struct rt_registry_entry)))) {
01563                 printk("NO MEMORY FOR REGISTRY TABLE\n");
01564                 return -ENOMEM;
01565         }
01566         memset(lxrt_list, 0, (MAX_SLOTS + 1)*sizeof(struct rt_registry_entry));
01567         return 0;
01568 }
01569 
01570 void rt_registry_free(void)
01571 {
01572     if (lxrt_list) {
01573         vfree(lxrt_list);
01574     }
01575 }
01576 
01577 static inline int registr(unsigned long name, void *adr, int type, struct task_struct *tsk)
01578 {
01579         unsigned long flags;
01580         int i, slot;
01581 /*
01582  * Register a resource. This allows other programs (RTAI and/or user space)
01583  * to use the same resource because they can find the address from the name.
01584 */
01585         // index 0 is reserved for the null slot.
01586     while ((slot = max_slots) < MAX_SLOTS) {
01587             for (i = 1; i <= max_slots; i++) {
01588                     if (lxrt_list[i].name == name) {
01589                 return 0;
01590             }
01591         }
01592             flags = rt_spin_lock_irqsave(&list_lock);
01593                 if (slot == max_slots && max_slots < MAX_SLOTS) {
01594             slot = ++max_slots;
01595                         lxrt_list[slot].name  = name;
01596                         lxrt_list[slot].adr   = adr;
01597                         lxrt_list[slot].tsk   = tsk;
01598                         lxrt_list[slot].type  = type;
01599                         lxrt_list[slot].count = 1;
01600                         rt_spin_unlock_irqrestore(flags, &list_lock);
01601                         return slot;
01602                 }
01603             rt_spin_unlock_irqrestore(flags, &list_lock);
01604         }
01605         return 0;
01606 }
01607 
01608 static inline int drg_on_name(unsigned long name)
01609 {
01610     unsigned long flags;
01611     int slot;
01612     for (slot = 1; slot <= max_slots; slot++) {
01613         flags = rt_spin_lock_irqsave(&list_lock);
01614         if (lxrt_list[slot].name == name) {
01615             if (slot < max_slots) {
01616                 lxrt_list[slot] = lxrt_list[max_slots];
01617             }
01618             if (max_slots > 0) {
01619                 max_slots--;
01620             }
01621             rt_spin_unlock_irqrestore(flags, &list_lock);
01622             return slot;
01623         }
01624         rt_spin_unlock_irqrestore(flags, &list_lock);
01625     }
01626     return 0;
01627 } 
01628 
01629 static inline int drg_on_name_cnt(unsigned long name)
01630 {
01631     unsigned long flags;
01632     int slot, count;
01633     for (slot = 1; slot <= max_slots; slot++) {
01634         flags = rt_spin_lock_irqsave(&list_lock);
01635         if (lxrt_list[slot].name == name && lxrt_list[slot].count > 0 && !(count = --lxrt_list[slot].count)) {
01636             if (slot < max_slots) {
01637                 lxrt_list[slot] = lxrt_list[max_slots];
01638             }
01639             if (max_slots > 0) {
01640                 max_slots--;
01641             }
01642             rt_spin_unlock_irqrestore(flags, &list_lock);
01643             return count;
01644         }
01645         rt_spin_unlock_irqrestore(flags, &list_lock);
01646     }
01647     return -EFAULT;
01648 } 
01649 
01650 static inline int drg_on_adr(void *adr)
01651 {
01652     unsigned long flags;
01653     int slot;
01654     for (slot = 1; slot <= max_slots; slot++) {
01655         flags = rt_spin_lock_irqsave(&list_lock);
01656         if (lxrt_list[slot].adr == adr) {
01657             if (slot < max_slots) {
01658                 lxrt_list[slot] = lxrt_list[max_slots];
01659             }
01660             if (max_slots > 0) {
01661                 max_slots--;
01662             }
01663             rt_spin_unlock_irqrestore(flags, &list_lock);
01664             return slot;
01665         }
01666         rt_spin_unlock_irqrestore(flags, &list_lock);
01667     }
01668     return 0;
01669 } 
01670 
01671 static inline int drg_on_adr_cnt(void *adr)
01672 {
01673     unsigned long flags;
01674     int slot, count;
01675     for (slot = 1; slot <= max_slots; slot++) {
01676         flags = rt_spin_lock_irqsave(&list_lock);
01677         if (lxrt_list[slot].adr == adr && lxrt_list[slot].count > 0 && !(count = --lxrt_list[slot].count)) {
01678             if (slot < max_slots) {
01679                 lxrt_list[slot] = lxrt_list[max_slots];
01680             }
01681             if (max_slots > 0) {
01682                 max_slots--;
01683             }
01684             rt_spin_unlock_irqrestore(flags, &list_lock);
01685             return count;
01686         }
01687         rt_spin_unlock_irqrestore(flags, &list_lock);
01688     }
01689     return -EFAULT;
01690 } 
01691 
01692 static inline unsigned long get_name(void *adr)
01693 {
01694     static unsigned long nameseed = 3518743764UL;
01695     int slot;
01696         if (!adr) {
01697         unsigned long flags;
01698         unsigned long name;
01699         flags = rt_spin_lock_irqsave(&list_lock);
01700         if ((name = ++nameseed) == 0xFFFFFFFFUL) {
01701             nameseed = 3518743764UL;
01702         }
01703         rt_spin_unlock_irqrestore(flags, &list_lock);
01704         return name;
01705         }
01706     for (slot = 1; slot <= max_slots; slot++) {
01707         if (lxrt_list[slot].adr == adr) {
01708             return lxrt_list[slot].name;
01709         }
01710     }
01711     return 0;
01712 } 
01713 
01714 static inline void *get_adr(unsigned long name)
01715 {
01716     int slot;
01717     for (slot = 1; slot <= max_slots; slot++) {
01718         if (lxrt_list[slot].name == name) {
01719             return lxrt_list[slot].adr;
01720         }
01721     }
01722     return 0;
01723 } 
01724 
01725 static inline void *get_adr_cnt(unsigned long name)
01726 {
01727     unsigned long flags;
01728     int slot;
01729     for (slot = 1; slot <= max_slots; slot++) {
01730         flags = rt_spin_lock_irqsave(&list_lock);
01731         if (lxrt_list[slot].name == name) {
01732             ++lxrt_list[slot].count;
01733             rt_spin_unlock_irqrestore(flags, &list_lock);
01734             return lxrt_list[slot].adr;
01735         }
01736         rt_spin_unlock_irqrestore(flags, &list_lock);
01737     }
01738     return 0;
01739 } 
01740 
01741 static inline int get_type(unsigned long name)
01742 {
01743         int slot;
01744         for (slot = 1; slot <= max_slots; slot++) {
01745                 if (lxrt_list[slot].name == name) {
01746                         return lxrt_list[slot].type;
01747                 }
01748         }
01749         return -EINVAL;
01750 }
01751 
01752 unsigned long is_process_registered(struct task_struct *tsk)
01753 {
01754         void *adr;
01755 
01756         if ((adr = tsk->rtai_tskext(TSKEXT0))) {
01757         int slot;
01758         for (slot = 1; slot <= max_slots; slot++) {
01759             if (lxrt_list[slot].adr == adr) {
01760                 return lxrt_list[slot].name;
01761             }
01762                 }
01763         }
01764         return 0;
01765 }
01766 
01767 int rt_get_registry_slot(int slot, struct rt_registry_entry *entry)
01768 {
01769     unsigned long flags;
01770 
01771     if(entry == 0) {
01772         return 0;
01773     }
01774     flags = rt_spin_lock_irqsave(&list_lock);
01775     if (slot > 0 && slot <= max_slots ) {
01776         if (lxrt_list[slot].name != 0) {
01777             *entry = lxrt_list[slot];
01778             rt_spin_unlock_irqrestore(flags, &list_lock);
01779             return slot;
01780         }
01781     }
01782     rt_spin_unlock_irqrestore(flags, &list_lock);
01783 
01784     return 0;
01785 }
01786 #endif
01787 
01788 /**
01789  * @ingroup lxrt
01790  * Register an object.
01791  *
01792  * rt_register registers the object to be identified with @a name, which is
01793  * pointed by @a adr.
01794  *
01795  * @return a positive number on success, 0 on failure.
01796  */
01797 int rt_register(unsigned long name, void *adr, int type, struct task_struct *t)
01798 {
01799 /*
01800  * Register a resource. This function provides the service to all RTAI tasks.
01801 */
01802     return get_adr(name) ? 0 : registr(name, adr, type, t );
01803 }
01804 
01805 
01806 /**
01807  * @ingroup lxrt
01808  * Deregister an object by its name.
01809  *
01810  * rt_drg_on_name deregisters the object identified by its @a name.
01811  *
01812  * @return a positive number on success, 0 on failure.
01813  */
01814 int rt_drg_on_name(unsigned long name)
01815 {
01816     return drg_on_name(name);
01817 } 
01818 
01819 /**
01820  * @ingroup lxrt
01821  * Deregister an object by its address.
01822  *
01823  * rt_drg_on_adr deregisters the object identified by its @a adr.
01824  *
01825  * @return a positive number on success, 0 on failure.
01826  */
01827 int rt_drg_on_adr(void *adr)
01828 {
01829     return drg_on_adr(adr);
01830 } 
01831 
01832 RTAI_SYSCALL_MODE unsigned long rt_get_name(void *adr)
01833 {
01834     return get_name(adr);
01835 } 
01836 
01837 RTAI_SYSCALL_MODE void *rt_get_adr(unsigned long name)
01838 {
01839     return get_adr(name);
01840 }
01841 
01842 int rt_get_type(unsigned long name)
01843 {
01844     return get_type(name);
01845 }
01846 
01847 int rt_drg_on_name_cnt(unsigned long name)
01848 {
01849     return drg_on_name_cnt(name);
01850 }
01851 
01852 int rt_drg_on_adr_cnt(void *adr)
01853 {
01854     return drg_on_adr_cnt(adr);
01855 }
01856 
01857 void *rt_get_adr_cnt(unsigned long name)
01858 {
01859     return get_adr_cnt(name);
01860 }
01861 
01862 #include <rtai_lxrt.h>
01863 
01864 extern struct rt_fun_entry rt_fun_lxrt[];
01865 
01866 void krtai_objects_release(void)
01867 {
01868     int slot;
01869         struct rt_registry_entry entry;
01870     char name[8], *type;
01871 
01872     for (slot = 1; slot <= max_slots; slot++) {
01873                 if (rt_get_registry_slot(slot, &entry)) {
01874             switch (entry.type) {
01875                             case IS_TASK:
01876                     type = "TASK";
01877                     rt_named_task_delete(entry.adr);
01878                     break;
01879                 case IS_SEM:
01880                     type = "SEM ";
01881                     ((void (*)(void *))rt_fun_lxrt[NAMED_SEM_DELETE].fun)(entry.adr);
01882                     break;
01883                 case IS_RWL:
01884                     type = "RWL ";
01885                     ((void (*)(void *))rt_fun_lxrt[NAMED_RWL_DELETE].fun)(entry.adr);
01886                     break;
01887                 case IS_SPL:
01888                     type = "SPL ";
01889                     ((void (*)(void *))rt_fun_lxrt[NAMED_SPL_DELETE].fun)(entry.adr);
01890                     break;
01891                 case IS_MBX:
01892                     type = "MBX ";
01893                     ((void (*)(void *))rt_fun_lxrt[NAMED_MBX_DELETE].fun)(entry.adr);
01894                                 break;  
01895                 case IS_PRX:
01896                     type = "PRX ";
01897                     ((void (*)(void *))rt_fun_lxrt[PROXY_DETACH].fun)(entry.adr);
01898                     rt_drg_on_adr(entry.adr); 
01899                     break;
01900                             default:
01901                     type = "ALIEN";
01902                     break;
01903             }
01904             num2nam(entry.name, name);
01905             rt_printk("SCHED releases registered named %s %s\n", type, name);
01906         }
01907     }
01908 }
01909 
01910 /* +++++++++++++++++++++++++ SUPPORT FOR IRQ TASKS ++++++++++++++++++++++++++ */
01911 
01912 #ifdef CONFIG_RTAI_USI
01913 
01914 #include <rtai_tasklets.h>
01915 
01916 extern struct rtai_realtime_irq_s rtai_realtime_irq[];
01917 
01918 RTAI_SYSCALL_MODE int rt_irq_wait(unsigned irq)
01919 {   
01920     int retval;
01921     retval = rt_task_suspend(0);
01922     return rtai_realtime_irq[irq].handler ? -retval : RT_IRQ_TASK_ERR;
01923 }
01924 
01925 RTAI_SYSCALL_MODE int rt_irq_wait_if(unsigned irq)
01926 {
01927     int retval;
01928     retval = rt_task_suspend_if(0);
01929     return rtai_realtime_irq[irq].handler ? -retval : RT_IRQ_TASK_ERR;
01930 }
01931 
01932 RTAI_SYSCALL_MODE int rt_irq_wait_until(unsigned irq, RTIME time)
01933 {
01934     int retval;
01935     retval = rt_task_suspend_until(0, time);
01936     return rtai_realtime_irq[irq].handler ? -retval : RT_IRQ_TASK_ERR;
01937 }
01938 
01939 RTAI_SYSCALL_MODE int rt_irq_wait_timed(unsigned irq, RTIME delay)
01940 {
01941     return rt_irq_wait_until(irq, get_time() + delay);
01942 }
01943 
01944 RTAI_SYSCALL_MODE void rt_irq_signal(unsigned irq)
01945 {
01946     if (rtai_realtime_irq[irq].handler) {
01947         rt_task_resume((void *)rtai_realtime_irq[irq].cookie);
01948     }
01949 }
01950 
01951 static int rt_irq_task_handler(unsigned irq, RT_TASK *irq_task)
01952 {
01953     rt_task_resume(irq_task);
01954     return 0;
01955 }
01956 
01957 RTAI_SYSCALL_MODE int rt_request_irq_task (unsigned irq, void *handler, int type, int affine2task)
01958 {
01959     RT_TASK *task;
01960     if (!handler) {
01961         task = _rt_whoami();
01962     } else {
01963         task = type == RT_IRQ_TASKLET ? ((struct rt_tasklet_struct *)handler)->task : handler;
01964     }
01965     if (affine2task) {
01966         rt_assign_irq_to_cpu(irq, (1 << task->runnable_on_cpus));
01967     }
01968     return rt_request_irq(irq, (void *)rt_irq_task_handler, task, 0);
01969 }
01970 
01971 RTAI_SYSCALL_MODE int rt_release_irq_task (unsigned irq)
01972 {
01973     int retval;
01974     RT_TASK *task;
01975     task = (void *)rtai_realtime_irq[irq].cookie;
01976     if (!(retval = rt_release_irq(irq))) {
01977         rt_task_resume(task);
01978         rt_reset_irq_to_sym_mode(irq);
01979     }
01980     return retval;
01981 }
01982 
01983 //extern void usp_request_rtc(int, void *);
01984 RTAI_SYSCALL_MODE void usp_request_rtc(int rtc_freq, void *handler)
01985 {
01986     rt_request_rtc(rtc_freq, !handler || (handler && handler == (void *)1) ? handler : rt_irq_signal);
01987         
01988 }
01989 
01990 #endif
01991 
01992 /* +++++++++++++++++ SUPPORT FOR THE LINUX SYSCALL SERVER +++++++++++++++++++ */
01993 
01994 RTAI_SYSCALL_MODE void rt_set_linux_syscall_mode(long mode, void (*callback_fun)(long, long))
01995 {
01996     rt_put_user(callback_fun, &(RT_CURRENT->linux_syscall_server)->callback_fun);
01997     rt_put_user(mode, &(RT_CURRENT->linux_syscall_server)->mode);
01998 }
01999 
02000 void rt_exec_linux_syscall(RT_TASK *rt_current, struct linux_syscalls_list *syscalls, struct pt_regs *regs)
02001 {
02002     int in, sz;
02003     struct mode_regs moderegs;
02004     struct { int in, out, nr, mode; RT_TASK *serv; } from;
02005 
02006     rt_copy_from_user(&from, syscalls, sizeof(from));
02007     in = from.in;
02008     if (++from.in >= from.nr) {
02009         from.in = 0;
02010     }
02011     if (from.mode == ASYNC_LINUX_SYSCALL && from.in == from.out) {
02012         regs->LINUX_SYSCALL_RETREG = -1;
02013         return;
02014     }
02015 
02016 #if defined( __NR_socketcall)
02017     if (regs->LINUX_SYSCALL_NR == __NR_socketcall) {
02018         memcpy(moderegs.pacargs, (void *)regs->LINUX_SYSCALL_REG2, sizeof(moderegs.pacargs));
02019         moderegs.regs[2] = (long)(&syscalls->moderegs[in].pacargs);
02020         sz = sizeof(moderegs);
02021     } else
02022 #endif
02023     {
02024         moderegs.regs[2] = regs->LINUX_SYSCALL_REG2;
02025         sz = offsetof(struct mode_regs, pacargs);
02026     }
02027 
02028     moderegs.regs[0] = regs->LINUX_SYSCALL_NR;
02029     moderegs.regs[1] = regs->LINUX_SYSCALL_REG1;
02030     moderegs.regs[3] = regs->LINUX_SYSCALL_REG3;
02031     moderegs.regs[4] = regs->LINUX_SYSCALL_REG4;
02032     moderegs.regs[5] = regs->LINUX_SYSCALL_REG5;
02033     moderegs.regs[6] = regs->LINUX_SYSCALL_REG6;
02034     moderegs.mode = from.mode;
02035     rt_copy_to_user(&syscalls->moderegs[in].regs, &moderegs, sz);
02036     rt_put_user(from.in, &syscalls->in);
02037     if (from.serv->suspdepth >= -from.nr) {
02038         from.serv->priority = rt_current->priority + BASE_SOFT_PRIORITY;
02039         rt_task_resume(from.serv);
02040     }
02041     if (from.mode == SYNC_LINUX_SYSCALL) {
02042         rt_task_suspend(rt_current);
02043         rt_get_user(regs->LINUX_SYSCALL_RETREG, &syscalls->retval);
02044     } else {
02045         regs->LINUX_SYSCALL_RETREG = -EINPROGRESS;
02046     }
02047 }
02048 
02049 /* ++++++++++++++++++++ END OF COMMON FUNCTIONALITIES +++++++++++++++++++++++ */
02050 
02051 #ifdef CONFIG_PROC_FS
02052 #include <linux/stat.h>
02053 #include <linux/proc_fs.h>
02054 #include <rtai_proc_fs.h>
02055 #include <rtai_nam2num.h>
02056 
02057 extern struct proc_dir_entry *rtai_proc_root;
02058 
02059 /* ----------------------< proc filesystem section >----------------------*/
02060 
02061 static int rtai_read_lxrt(char *page, char **start, off_t off, int count, int *eof, void *data)
02062 {
02063     PROC_PRINT_VARS;
02064     struct rt_registry_entry entry;
02065     char *type_name[] = { "TASK", "SEM", "RWL", "SPL", "MBX", "PRX", "BITS", "TBX", "HPCK" };
02066     unsigned int i = 1;
02067     char name[8];
02068 
02069     PROC_PRINT("\nRTAI LXRT Information.\n\n");
02070     PROC_PRINT("    MAX_SLOTS = %d\n\n", MAX_SLOTS);
02071 
02072 //                  1234 123456 0x12345678 ALIEN  0x12345678 0x12345678   1234567      1234567
02073 
02074     PROC_PRINT("                                         Linux_Owner         Parent PID\n");
02075     PROC_PRINT("Slot Name   ID         Type   RT_Handle    Pointer   Tsk_PID   MEM_Sz   USG Cnt\n");
02076     PROC_PRINT("-------------------------------------------------------------------------------\n");
02077     for (i = 1; i <= max_slots; i++) {
02078         if (rt_get_registry_slot(i, &entry)) {
02079             num2nam(entry.name, name);
02080             PROC_PRINT("%4d %-6.6s 0x%08lx %-6.6s 0x%p 0x%p  %7d   %8d %7d\n",
02081             i,              // the slot number
02082             name,               // the name in 6 char asci
02083             entry.name,         // the name as unsigned long hex
02084             entry.type >= PAGE_SIZE ? "SHMEM" : 
02085             entry.type > sizeof(type_name)/sizeof(char *) ? 
02086             "ALIEN" : 
02087             type_name[entry.type],  // the Type
02088             entry.adr,      // The RT Handle
02089             entry.tsk,          // The Owner task pointer
02090             entry.tsk ? entry.tsk->pid : 0, // The Owner PID
02091             entry.type == IS_TASK && ((RT_TASK *)entry.adr)->lnxtsk ? (((RT_TASK *)entry.adr)->lnxtsk)->pid : entry.type >= PAGE_SIZE ? entry.type : 0, entry.count);
02092          }
02093     }
02094         PROC_PRINT_DONE;
02095 }  /* End function - rtai_read_lxrt */
02096 
02097 int rtai_proc_lxrt_register(void)
02098 {
02099     struct proc_dir_entry *proc_lxrt_ent;
02100 
02101 
02102     proc_lxrt_ent = create_proc_entry("names", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
02103     if (!proc_lxrt_ent) {
02104         printk("Unable to initialize /proc/rtai/lxrt\n");
02105         return(-1);
02106     }
02107     proc_lxrt_ent->read_proc = rtai_read_lxrt;
02108     return(0);
02109 }  /* End function - rtai_proc_lxrt_register */
02110 
02111 
02112 void rtai_proc_lxrt_unregister(void)
02113 {
02114     remove_proc_entry("names", rtai_proc_root);
02115 }  /* End function - rtai_proc_lxrt_unregister */
02116 
02117 /* ------------------< end of proc filesystem section >------------------*/
02118 #endif /* CONFIG_PROC_FS */
02119 
02120 #ifndef CONFIG_KBUILD
02121 #define CONFIG_KBUILD
02122 #endif
02123 
02124 #ifdef CONFIG_KBUILD
02125 
02126 EXPORT_SYMBOL(rt_set_sched_policy);
02127 EXPORT_SYMBOL(rt_get_prio);
02128 EXPORT_SYMBOL(rt_get_inher_prio);
02129 EXPORT_SYMBOL(rt_get_priorities);
02130 EXPORT_SYMBOL(rt_change_prio);
02131 EXPORT_SYMBOL(rt_whoami);
02132 EXPORT_SYMBOL(rt_task_yield);
02133 EXPORT_SYMBOL(rt_task_suspend);
02134 EXPORT_SYMBOL(rt_task_suspend_if);
02135 EXPORT_SYMBOL(rt_task_suspend_until);
02136 EXPORT_SYMBOL(rt_task_suspend_timed);
02137 EXPORT_SYMBOL(rt_task_resume);
02138 EXPORT_SYMBOL(rt_get_task_state);
02139 EXPORT_SYMBOL(rt_linux_use_fpu);
02140 EXPORT_SYMBOL(rt_task_use_fpu);
02141 EXPORT_SYMBOL(rt_task_signal_handler);
02142 EXPORT_SYMBOL(rt_gettimeorig);
02143 EXPORT_SYMBOL(rt_task_make_periodic_relative_ns);
02144 EXPORT_SYMBOL(rt_task_make_periodic);
02145 EXPORT_SYMBOL(rt_task_wait_period);
02146 EXPORT_SYMBOL(rt_task_set_resume_end_times);
02147 EXPORT_SYMBOL(rt_set_resume_time);
02148 EXPORT_SYMBOL(rt_set_period);
02149 EXPORT_SYMBOL(next_period);
02150 EXPORT_SYMBOL(rt_busy_sleep);
02151 EXPORT_SYMBOL(rt_sleep);
02152 EXPORT_SYMBOL(rt_sleep_until);
02153 EXPORT_SYMBOL(rt_task_masked_unblock);
02154 EXPORT_SYMBOL(rt_nanosleep);
02155 EXPORT_SYMBOL(rt_enq_ready_edf_task);
02156 EXPORT_SYMBOL(rt_enq_ready_task);
02157 EXPORT_SYMBOL(rt_renq_ready_task);
02158 EXPORT_SYMBOL(rt_rem_ready_task);
02159 EXPORT_SYMBOL(rt_rem_ready_current);
02160 EXPORT_SYMBOL(rt_enq_timed_task);
02161 EXPORT_SYMBOL(rt_wake_up_timed_tasks);
02162 EXPORT_SYMBOL(rt_rem_timed_task);
02163 EXPORT_SYMBOL(rt_enqueue_blocked);
02164 EXPORT_SYMBOL(rt_dequeue_blocked);
02165 EXPORT_SYMBOL(rt_renq_current);
02166 EXPORT_SYMBOL(rt_named_task_init);
02167 EXPORT_SYMBOL(rt_named_task_init_cpuid);
02168 EXPORT_SYMBOL(rt_named_task_delete);
02169 EXPORT_SYMBOL(is_process_registered);
02170 EXPORT_SYMBOL(rt_register);
02171 EXPORT_SYMBOL(rt_drg_on_name);
02172 EXPORT_SYMBOL(rt_drg_on_adr);
02173 EXPORT_SYMBOL(rt_get_name);
02174 EXPORT_SYMBOL(rt_get_adr);
02175 EXPORT_SYMBOL(rt_get_type);
02176 EXPORT_SYMBOL(rt_drg_on_name_cnt);
02177 EXPORT_SYMBOL(rt_drg_on_adr_cnt);
02178 EXPORT_SYMBOL(rt_get_adr_cnt);
02179 EXPORT_SYMBOL(rt_get_registry_slot);
02180 
02181 EXPORT_SYMBOL(rt_task_init);
02182 EXPORT_SYMBOL(rt_task_init_cpuid);
02183 EXPORT_SYMBOL(rt_set_runnable_on_cpus);
02184 EXPORT_SYMBOL(rt_set_runnable_on_cpuid);
02185 EXPORT_SYMBOL(rt_check_current_stack);
02186 EXPORT_SYMBOL(rt_schedule);
02187 EXPORT_SYMBOL(rt_spv_RMS);
02188 EXPORT_SYMBOL(rt_sched_lock);
02189 EXPORT_SYMBOL(rt_sched_unlock);
02190 EXPORT_SYMBOL(rt_task_delete);
02191 EXPORT_SYMBOL(rt_is_hard_timer_running);
02192 EXPORT_SYMBOL(rt_set_periodic_mode);
02193 EXPORT_SYMBOL(rt_set_oneshot_mode);
02194 EXPORT_SYMBOL(rt_get_timer_cpu);
02195 EXPORT_SYMBOL(start_rt_timer);
02196 EXPORT_SYMBOL(stop_rt_timer);
02197 EXPORT_SYMBOL(start_rt_apic_timers);
02198 EXPORT_SYMBOL(rt_sched_type);
02199 EXPORT_SYMBOL(rt_hard_timer_tick_count);
02200 EXPORT_SYMBOL(rt_hard_timer_tick_count_cpuid);
02201 EXPORT_SYMBOL(rt_set_task_trap_handler);
02202 EXPORT_SYMBOL(rt_get_time);
02203 EXPORT_SYMBOL(rt_get_time_cpuid);
02204 EXPORT_SYMBOL(rt_get_time_ns);
02205 EXPORT_SYMBOL(rt_get_time_ns_cpuid);
02206 EXPORT_SYMBOL(rt_get_cpu_time_ns);
02207 EXPORT_SYMBOL(rt_get_real_time);
02208 EXPORT_SYMBOL(rt_get_real_time_ns);
02209 EXPORT_SYMBOL(rt_get_base_linux_task);
02210 EXPORT_SYMBOL(rt_alloc_dynamic_task);
02211 EXPORT_SYMBOL(rt_register_watchdog);
02212 EXPORT_SYMBOL(rt_deregister_watchdog);
02213 EXPORT_SYMBOL(count2nano);
02214 EXPORT_SYMBOL(nano2count);
02215 EXPORT_SYMBOL(count2nano_cpuid);
02216 EXPORT_SYMBOL(nano2count_cpuid);
02217 
02218 EXPORT_SYMBOL(rt_kthread_init);
02219 EXPORT_SYMBOL(rt_kthread_init_cpuid);
02220 EXPORT_SYMBOL(rt_smp_linux_task);
02221 EXPORT_SYMBOL(rt_smp_current);
02222 EXPORT_SYMBOL(rt_smp_time_h);
02223 EXPORT_SYMBOL(rt_smp_oneshot_timer);
02224 EXPORT_SYMBOL(wake_up_srq);
02225 EXPORT_SYMBOL(set_rt_fun_entries);
02226 EXPORT_SYMBOL(reset_rt_fun_entries);
02227 EXPORT_SYMBOL(set_rt_fun_ext_index);
02228 EXPORT_SYMBOL(reset_rt_fun_ext_index);
02229 EXPORT_SYMBOL(max_slots);
02230 
02231 #ifdef CONFIG_SMP
02232 #endif /* CONFIG_SMP */
02233 
02234 #endif /* CONFIG_KBUILD */

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7