base/ipc/sem/sem.c

Go to the documentation of this file.
00001 /** 
00002  * @file
00003  * Semaphore functions.
00004  * @author Paolo Mantegazza
00005  *
00006  * @note Copyright (C) 1999-2008 Paolo Mantegazza
00007  * <mantegazza@aero.polimi.it>
00008  *
00009  * This program is free software; you can redistribute it and/or
00010  * modify it under the terms of the GNU General Public License as
00011  * published by the Free Software Foundation; either version 2 of the
00012  * License, or (at your option) any later version.
00013  *
00014  * This program is distributed in the hope that it will be useful,
00015  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00016  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00017  * GNU General Public License for more details.
00018  *
00019  * You should have received a copy of the GNU General Public License
00020  * along with this program; if not, write to the Free Software
00021  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00022  *
00023  * @ingroup sem
00024  */
00025 
00026 /**
00027  * @ingroup sched
00028  * @defgroup sem Semaphore functions
00029  *
00030  *@{*/
00031 
00032 #include <linux/kernel.h>
00033 #include <linux/module.h>
00034 
00035 #include <asm/uaccess.h>
00036 
00037 #include <rtai_schedcore.h>
00038 #include <rtai_prinher.h>
00039 #include <rtai_sem.h>
00040 #include <rtai_rwl.h>
00041 #include <rtai_spl.h>
00042 
00043 MODULE_LICENSE("GPL");
00044 
00045 extern struct epoch_struct boot_epoch;
00046 
00047 #ifdef CONFIG_RTAI_RT_POLL
00048 
00049 #define WAKEUP_WAIT_ONE_POLLER(wakeup) \
00050     if (wakeup) rt_wakeup_pollers(&sem->poll_wait_one, 0);
00051 
00052 #define WAKEUP_WAIT_ALL_POLLERS(wakeup) \
00053     do { \
00054         WAKEUP_WAIT_ONE_POLLER(wakeup) \
00055         if (sem->count == 1) rt_wakeup_pollers(&sem->poll_wait_all, 0);\
00056     } while (0)
00057 
00058 #else
00059 
00060 #define WAKEUP_WAIT_ONE_POLLER(wakeup)
00061 
00062 #define WAKEUP_WAIT_ALL_POLLERS(wakeup) 
00063 
00064 #endif
00065 
00066 #define CHECK_SEM_MAGIC(sem) \
00067 do { if (sem->magic != RT_SEM_MAGIC) return RTE_OBJINV; } while (0)
00068 
00069 /* +++++++++++++++++++++ ALL SEMAPHORES TYPES SUPPORT +++++++++++++++++++++++ */
00070 
00071 /**
00072  * @anchor rt_typed_sem_init
00073  * @brief Initialize a specifically typed (counting, binary, resource)
00074  *    semaphore
00075  *
00076  * rt_typed_sem_init initializes a semaphore @e sem of type @e type. A
00077  * semaphore can be used for communication and synchronization among
00078  * real time tasks. Negative value of a semaphore shows how many tasks
00079  * are blocked on the semaphore queue, waiting to be awaken by calls
00080  * to rt_sem_signal.
00081  *
00082  * @param sem must point to an allocated SEM structure.
00083  *
00084  * @param value is the initial value of the semaphore, always set to 1
00085  *    for a resource semaphore.
00086  *
00087  * @param type is the semaphore type and queuing policy. It can be an OR
00088  * a semaphore kind: CNT_SEM for counting semaphores, BIN_SEM for binary 
00089  * semaphores, RES_SEM for resource semaphores; and queuing policy:
00090  * FIFO_Q, PRIO_Q for a fifo and priority queueing respectively.
00091  * Resource semaphores will enforce a PRIO_Q policy anyhow.
00092  * 
00093  * Counting semaphores can register up to 0xFFFE events. Binary
00094  * semaphores do not count signalled events, their count will never
00095  * exceed 1 whatever number of events is signaled to them. Resource
00096  * semaphores are special binary semaphores suitable for managing
00097  * resources. The task that acquires a resource semaphore becomes its
00098  * owner, also called resource owner, since it is the only one capable
00099  * of manipulating the resource the semaphore is protecting. The owner
00100  * has its priority increased to that of any task blocking on a wait
00101  * to the semaphore. Such a feature, called priority inheritance,
00102  * ensures that a high priority task is never slaved to a lower
00103  * priority one, thus allowing to avoid any deadlock due to priority
00104  * inversion. Resource semaphores can be recursed, i.e. their task
00105  * owner is not blocked by nested waits placed on an owned
00106  * resource. The owner must insure that it will signal the semaphore,
00107  * in reversed order, as many times as he waited on it. Note that that
00108  * full priority inheritance is supported both for resource semaphores
00109  * and inter task messages, for a singly owned resource. Instead it
00110  * becomes an adaptive priority ceiling when a task owns multiple
00111  * resources, including messages sent to him. In such a case in fact
00112  * its priority is returned to its base one only when all such
00113  * resources are released and no message is waiting for being
00114  * received. This is a compromise design choice aimed at avoiding
00115  * extensive searches for the new priority to be inherited across
00116  * multiply owned resources and blocked tasks sending messages to
00117  * him. Such a solution will be implemented only if it proves
00118  * necessary. Note also that, to avoid @e deadlocks, a task owning a
00119  * resource semaphore cannot be suspended. Any @ref rt_task_suspend()
00120  * posed on it is just registered. An owner task will go into suspend
00121  * state only when it releases all the owned resources.
00122  *
00123  * @note if the legacy error return values scheme is used RTAI counting 
00124  *       semaphores assume that their counter will never exceed 0xFFFF, 
00125  *       such a number being used to signal returns in error. Thus also 
00126  *       the initial count value cannot be greater 0xFFFF. The new error
00127  *       return scheme allows counts in the order of billions instead.
00128  *
00129  */
00130 RTAI_SYSCALL_MODE void rt_typed_sem_init(SEM *sem, int value, int type)
00131 {
00132     sem->magic = RT_SEM_MAGIC;
00133     sem->count = value;
00134     sem->restype = 0;
00135     if ((type & RES_SEM) == RES_SEM) {
00136         sem->qtype = 0;
00137     } else {
00138         sem->qtype = (type & FIFO_Q) ? 1 : 0;
00139     }
00140     type = (type & 3) - 2;
00141     if ((sem->type = type) < 0 && value > 1) {
00142         sem->count = 1;
00143     } else if (type > 0) {
00144         sem->type = sem->count = 1;
00145         sem->restype = value;
00146     }
00147     sem->queue.prev = &(sem->queue);
00148     sem->queue.next = &(sem->queue);
00149     sem->queue.task = sem->owndby = NULL;
00150 
00151     sem->resq.prev = sem->resq.next = &sem->resq;
00152     sem->resq.task = (void *)&sem->queue;
00153 #ifdef CONFIG_RTAI_RT_POLL
00154     sem->poll_wait_all.pollq.prev = sem->poll_wait_all.pollq.next = &(sem->poll_wait_all.pollq);
00155     sem->poll_wait_one.pollq.prev = sem->poll_wait_one.pollq.next = &(sem->poll_wait_one.pollq);
00156     sem->poll_wait_all.pollq.task = sem->poll_wait_one.pollq.task = NULL;
00157         spin_lock_init(&(sem->poll_wait_all.pollock));
00158         spin_lock_init(&(sem->poll_wait_one.pollock));
00159 #endif
00160 }
00161 
00162 
00163 /**
00164  * @anchor rt_sem_init
00165  * @brief Initialize a counting semaphore.
00166  *
00167  * rt_sem_init initializes a counting fifo queueing semaphore @e sem.
00168  *
00169  * A semaphore can be used for communication and synchronization among
00170  * real time tasks.
00171  *
00172  * @param sem must point to an allocated @e SEM structure.
00173  *
00174  * @param value is the initial value of the semaphore.
00175  * 
00176  * Positive values of the semaphore variable show how many tasks can
00177  * do a @ref rt_sem_wait() call without blocking. Negative value of a
00178  * semaphore shows how many tasks are blocked on the semaphore queue,
00179  * waiting to be awaken by calls to @ref rt_sem_signal().
00180  *
00181  * @note RTAI counting semaphores assume that their counter will never
00182  *   exceed 0xFFFF, such a number being used to signal returns in
00183  *   error. Thus also the initial count value cannot be greater
00184  *   than 0xFFFF.
00185  *   This is an old legacy functioni, there is also 
00186  *   @ref rt_typed_sem_init(), allowing to
00187  *   choose among counting, binary and resource
00188  *   semaphores. Resource semaphores have priority inherithance. 
00189  */
00190 void rt_sem_init(SEM *sem, int value)
00191 {
00192     rt_typed_sem_init(sem, value, CNT_SEM);
00193 }
00194 
00195 
00196 /**
00197  * @anchor rt_sem_delete
00198  * @brief Delete a semaphore
00199  *
00200  * rt_sem_delete deletes a semaphore previously created with 
00201  * @ref rt_sem_init(). 
00202  *
00203  * @param sem points to the structure used in the corresponding
00204  * call to rt_sem_init. 
00205  *
00206  * Any tasks blocked on this semaphore is returned in error and
00207  * allowed to run when semaphore is destroyed. 
00208  *
00209  * @return 0 is returned upon success. A negative value is returned on
00210  * failure as described below: 
00211  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00212  *
00213  * @note In principle 0xFFFF could theoretically be a usable
00214  *   semaphores events count, so it could be returned also under
00215  *   normal circumstances. It is unlikely you are going to count
00216  *   up to such number of events, in any case avoid counting up 
00217  *   to 0xFFFF. 
00218  */
00219 RTAI_SYSCALL_MODE int rt_sem_delete(SEM *sem)
00220 {
00221     unsigned long flags;
00222     RT_TASK *task;
00223     unsigned long schedmap, sched;
00224     QUEUE *q;
00225 
00226     CHECK_SEM_MAGIC(sem);
00227 
00228     rt_wakeup_pollers(&sem->poll_wait_all, RTE_OBJREM);
00229     rt_wakeup_pollers(&sem->poll_wait_one, RTE_OBJREM);
00230     schedmap = 0;
00231     q = &(sem->queue);
00232     flags = rt_global_save_flags_and_cli();
00233     sem->magic = 0;
00234     while ((q = q->next) != &(sem->queue) && (task = q->task)) {
00235         rem_timed_task(task);
00236         if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00237             task->blocked_on = RTP_OBJREM;
00238             enq_ready_task(task);
00239             set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
00240         }
00241     }
00242     sched = schedmap;
00243     clear_bit(rtai_cpuid(), &schedmap);
00244     if ((task = sem->owndby) && sem->type > 0) {
00245         sched |= dequeue_resqel_reset_task_priority(&sem->resq, task);
00246         if (task->suspdepth) {
00247             if (task->suspdepth > 0) {
00248                 task->state |= RT_SCHED_SUSPENDED;
00249                 rem_ready_task(task);
00250                 sched = 1;
00251             } else if (task->suspdepth == RT_RESEM_SUSPDEL) {
00252                 rt_task_delete(task);
00253             }
00254         }
00255     }
00256     if (sched) {
00257         if (schedmap) {
00258             RT_SCHEDULE_MAP_BOTH(schedmap);
00259         } else {
00260             rt_schedule();
00261         }
00262     }
00263     rt_global_restore_flags(flags);
00264     return 0;
00265 }
00266 
00267 
00268 RTAI_SYSCALL_MODE int rt_sem_count(SEM *sem)
00269 {
00270     return sem->count;
00271 }
00272 
00273 
00274 /**
00275  * @anchor rt_sem_signal
00276  * @brief Signaling a semaphore.
00277  *
00278  * rt_sem_signal signals an event to a semaphore. It is typically
00279  * called when the task leaves a critical region. The semaphore value
00280  * is incremented and tested. If the value is not positive, the first
00281  * task in semaphore's waiting queue is allowed to run.  rt_sem_signal
00282  * never blocks the caller task.
00283  *
00284  * @param sem points to the structure used in the call to @ref
00285  * rt_sem_init().
00286  * 
00287  * @return 0 is returned upon success. A negative value is returned on
00288  * failure as described below: 
00289  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00290  *
00291  * @note In principle 0xFFFF could theoretically be a usable
00292  *   semaphores events count, so it could be returned also under
00293  *   normal circumstances. It is unlikely you are going to count
00294  *   up to such number of events, in any case avoid counting up to
00295  *   0xFFFF.
00296  *   See @ref rt_sem_wait() notes for some curiosities.
00297  */
00298 RTAI_SYSCALL_MODE int rt_sem_signal(SEM *sem)
00299 {
00300     unsigned long flags;
00301     RT_TASK *task;
00302     int tosched;
00303 
00304     CHECK_SEM_MAGIC(sem);
00305 
00306     flags = rt_global_save_flags_and_cli();
00307     if (sem->type) {
00308         if (sem->restype && (!sem->owndby || sem->owndby != RT_CURRENT)) {
00309             rt_global_restore_flags(flags);
00310             return RTE_PERM;
00311         }
00312         if (sem->type > 1) {
00313             sem->type--;
00314             rt_global_restore_flags(flags);
00315             return 0;
00316         }
00317         if (++sem->count > 1) {
00318             sem->count = 1;
00319         }
00320     } else {
00321         sem->count++;
00322     }
00323     if ((task = (sem->queue.next)->task)) {
00324         dequeue_blocked(task);
00325         rem_timed_task(task);
00326         if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00327             enq_ready_task(task);
00328             if (sem->type <= 0) {
00329                 RT_SCHEDULE(task, rtai_cpuid());
00330                 rt_global_restore_flags(flags);
00331                 WAKEUP_WAIT_ALL_POLLERS(1);
00332                 return 0;
00333             }
00334             tosched = 1;
00335             goto res;
00336         }
00337     }
00338     tosched = 0;
00339 res:    if (sem->type > 0) {
00340         DECLARE_RT_CURRENT;
00341         int sched;
00342         ASSIGN_RT_CURRENT;
00343         sem->owndby = 0;
00344         sched = dequeue_resqel_reset_current_priority(&sem->resq, rt_current);
00345         if (rt_current->suspdepth) {
00346             if (rt_current->suspdepth > 0) {
00347                 rt_current->state |= RT_SCHED_SUSPENDED;
00348                 rem_ready_current(rt_current);
00349                             sched = 1;
00350             } else if (task->suspdepth == RT_RESEM_SUSPDEL) {
00351                 rt_task_delete(rt_current);
00352             }
00353         }
00354         if (sched) {
00355             if (tosched) {
00356                 RT_SCHEDULE_BOTH(task, cpuid);
00357             } else {
00358                 rt_schedule();
00359             }
00360         } else if (tosched) {
00361             RT_SCHEDULE(task, cpuid);
00362         }
00363     }
00364     rt_global_restore_flags(flags);
00365     WAKEUP_WAIT_ALL_POLLERS(1);
00366     return 0;
00367 }
00368 
00369 
00370 /**
00371  * @anchor rt_sem_broadcast
00372  * @brief Signaling a semaphore.
00373  *
00374  * rt_sem_broadcast signals an event to a semaphore that unblocks all tasks
00375  * waiting on it. It is used as a support for RTAI proper conditional 
00376  * variables but can be of help in many other instances. After the broadcast
00377  * the semaphore counts is set to zero, thus all tasks waiting on it will
00378  * blocked.
00379  * rt_sem_broadcast should not be used for resource semaphares.
00380  *
00381  * @param sem points to the structure used in the call to @ref
00382  * rt_sem_init().
00383  * 
00384  * @returns 0 always.
00385  */
00386 RTAI_SYSCALL_MODE int rt_sem_broadcast(SEM *sem)
00387 {
00388     unsigned long flags, schedmap;
00389     RT_TASK *task;
00390     QUEUE *q;
00391 
00392     CHECK_SEM_MAGIC(sem);
00393 
00394     schedmap = 0;
00395     q = &(sem->queue);
00396     flags = rt_global_save_flags_and_cli();
00397     while ((q = q->next) != &(sem->queue)) {
00398         if ((task = q->task)) {
00399             dequeue_blocked(task = q->task);
00400             rem_timed_task(task);
00401             if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00402                 enq_ready_task(task);
00403                 set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
00404             }
00405         }
00406         rt_global_restore_flags(flags);
00407         flags = rt_global_save_flags_and_cli();
00408     }
00409     sem->count = 0;
00410     if (schedmap) {
00411         if (test_and_clear_bit(rtai_cpuid(), &schedmap)) {
00412             RT_SCHEDULE_MAP_BOTH(schedmap);
00413         } else {
00414             RT_SCHEDULE_MAP(schedmap);
00415         }
00416     }
00417     rt_global_restore_flags(flags);
00418     WAKEUP_WAIT_ONE_POLLER(schedmap);
00419     return 0;
00420 }
00421 
00422 
00423 /**
00424  * @anchor rt_sem_wait
00425  * @brief Take a semaphore.
00426  *
00427  * rt_sem_wait waits for a event to be signaled to a semaphore. It is
00428  * typically called when a task enters a critical region. The
00429  * semaphore value is decremented and tested. If it is still
00430  * non-negative rt_sem_wait returns immediately. Otherwise the caller
00431  * task is blocked and queued up. Queuing may happen in priority order
00432  * or on FIFO base. This is determined by the compile time option @e
00433  * SEM_PRIORD. In this case rt_sem_wait returns if:
00434  *         - The caller task is in the first place of the waiting
00435  *       queue and another task issues a @ref rt_sem_signal()
00436  *       call;
00437  *         - An error occurs (e.g. the semaphore is destroyed);
00438  *
00439  * @param sem points to the structure used in the call to @ref
00440  *    rt_sem_init().
00441  *
00442  * @return the number of events already signaled upon success.
00443  * A special value" as described below in case of a failure :
00444  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00445  *
00446  * @note In principle 0xFFFF could theoretically be a usable
00447  *   semaphores events count, so it could be returned also under
00448  *   normal circumstances. It is unlikely you are going to count
00449  *   up to such number of events, in any case avoid counting up to
00450  *   0xFFFF.<br>
00451  *   Just for curiosity: the original Dijkstra notation for
00452  *   rt_sem_wait was a "P" operation, and rt_sem_signal was a "V"
00453  *   operation. The name for P comes from the Dutch "prolagen", a
00454  *   combination of "proberen" (to probe) and "verlagen" (to
00455  *   decrement). Also from the word "passeren" (to pass).<br>
00456  *   The name for V comes from the Dutch "verhogen" (to increase)
00457  *   or "vrygeven" (to release).  (Source: Daniel Tabak -
00458  *   Multiprocessors, Prentice Hall, 1990).<br>
00459  *   It should be also remarked that real time programming
00460  *   practitioners were using semaphores a long time before
00461  *   Dijkstra formalized P and V. "In Italian semaforo" means a
00462  *   traffic light, so that semaphores have an intuitive appeal
00463  *   and their use and meaning is easily understood.
00464  */
00465 RTAI_SYSCALL_MODE int rt_sem_wait(SEM *sem)
00466 {
00467     RT_TASK *rt_current;
00468     unsigned long flags;
00469     int count;
00470 
00471     CHECK_SEM_MAGIC(sem);
00472 
00473     flags = rt_global_save_flags_and_cli();
00474     rt_current = RT_CURRENT;
00475     if ((count = sem->count) <= 0) {
00476         void *retp;
00477         unsigned long schedmap;
00478         if (sem->type > 0) {
00479             if (sem->restype && sem->owndby == rt_current) {
00480                 if (sem->restype > 0) {
00481                     count = sem->type++;
00482                     rt_global_restore_flags(flags);
00483                     return count + 1;
00484                 }
00485                 rt_global_restore_flags(flags);
00486                 return RTE_DEADLOK;
00487             }
00488             schedmap = pass_prio(sem->owndby, rt_current);
00489         } else {
00490             schedmap = 0;
00491         }
00492         sem->count--;
00493         rt_current->state |= RT_SCHED_SEMAPHORE;
00494         rem_ready_current(rt_current);
00495         enqueue_blocked(rt_current, &sem->queue, sem->qtype);
00496         RT_SCHEDULE_MAP_BOTH(schedmap);
00497         if (likely(!(retp = rt_current->blocked_on))) { 
00498             count = sem->count;
00499         } else {
00500             if (likely(retp != RTP_OBJREM)) { 
00501                 dequeue_blocked(rt_current);
00502                 if (++sem->count > 1 && sem->type) {
00503                     sem->count = 1;
00504                 }
00505                 if (sem->owndby && sem->type > 0) {
00506                     set_task_prio_from_resq(sem->owndby);
00507                 }
00508                 rt_global_restore_flags(flags);
00509                 return RTE_UNBLKD;
00510             } else {
00511                 rt_current->prio_passed_to = NULL;
00512                 rt_global_restore_flags(flags);
00513                 return RTE_OBJREM;
00514             }
00515         }
00516     } else {
00517         sem->count--;
00518     }
00519     if (sem->type > 0) {
00520         enqueue_resqel(&sem->resq, sem->owndby = rt_current);
00521     }
00522     rt_global_restore_flags(flags);
00523     return count;
00524 }
00525 
00526 
00527 /**
00528  * @anchor rt_sem_wait_if
00529  * @brief Take a semaphore, only if the calling task is not blocked.
00530  *
00531  * rt_sem_wait_if is a version of the semaphore wait operation is
00532  * similar to @ref rt_sem_wait() but it is never blocks the caller. If
00533  * the semaphore is not free, rt_sem_wait_if returns immediately and
00534  * the semaphore value remains unchanged.
00535  *
00536  * @param sem points to the structure used in the call to @ref
00537  * rt_sem_init().
00538  *
00539  * @return the number of events already signaled upon success.
00540  * A special value as described below in case of a failure:
00541  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00542  *
00543  * @note In principle 0xFFFF could theoretically be a usable
00544  *   semaphores events count so it could be returned also under
00545  *   normal circumstances. It is unlikely you are going to count
00546  *   up to such number  of events, in any case avoid counting up
00547  *   to 0xFFFF.
00548  */
00549 RTAI_SYSCALL_MODE int rt_sem_wait_if(SEM *sem)
00550 {
00551     int count;
00552     unsigned long flags;
00553 
00554     CHECK_SEM_MAGIC(sem);
00555 
00556     flags = rt_global_save_flags_and_cli();
00557     if ((count = sem->count) <= 0) {
00558         if (sem->restype && sem->owndby == RT_CURRENT) {
00559             if (sem->restype > 0) {
00560                 count = sem->type++;
00561                 rt_global_restore_flags(flags);
00562                 return count + 1;
00563             }
00564             rt_global_restore_flags(flags);
00565             return RTE_DEADLOK;
00566         }
00567     } else {
00568         sem->count--;
00569         if (sem->type > 0) {
00570             enqueue_resqel(&sem->resq, sem->owndby = RT_CURRENT);
00571         }
00572     }
00573     rt_global_restore_flags(flags);
00574     return count;
00575 }
00576 
00577 
00578 /**
00579  * @anchor rt_sem_wait_until
00580  * @brief Wait a semaphore with timeout.
00581  *
00582  * rt_sem_wait_until, like @ref rt_sem_wait_timed() is a timed version
00583  * of the standard semaphore wait call. The semaphore value is
00584  * decremented and tested. If it is still non-negative these functions
00585  * return immediately. Otherwise the caller task is blocked and queued
00586  * up. Queuing may happen in priority order or on FIFO base. This is
00587  * determined by the compile time option @e SEM_PRIORD. In this case
00588  * the function returns if:
00589  *  - The caller task is in the first place of the waiting queue
00590  *    and an other task issues a @ref rt_sem_signal call();
00591  *  - a timeout occurs;
00592  *  - an error occurs (e.g. the semaphore is destroyed);
00593  *
00594  * In case of a timeout, the semaphore value is incremented before 
00595  * return.  
00596  *
00597  * @param sem points to the structure used in the call to @ref
00598  *    rt_sem_init().
00599  *
00600  * @param time is an absolute value to the current time.
00601  *
00602  * @return the number of events already signaled upon success.
00603  * Aa special value" as described below in case of a failure:
00604  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00605  * 
00606  * @note In principle 0xFFFF could theoretically be a usable
00607  *   semaphores events count so it could be returned also under
00608  *   normal circumstances. It is unlikely you are going to count
00609  *   up to such number of events, in any case avoid counting up to
00610  *   0xFFFF.
00611  */
00612 RTAI_SYSCALL_MODE int rt_sem_wait_until(SEM *sem, RTIME time)
00613 {
00614     DECLARE_RT_CURRENT;
00615     int count;
00616     unsigned long flags;
00617 
00618     CHECK_SEM_MAGIC(sem);
00619 
00620     REALTIME2COUNT(time);
00621 
00622     flags = rt_global_save_flags_and_cli();
00623     ASSIGN_RT_CURRENT;
00624     if ((count = sem->count) <= 0) {
00625         void *retp;
00626         rt_current->blocked_on = &sem->queue;
00627         if ((rt_current->resume_time = time) > rt_time_h) {
00628             unsigned long schedmap;
00629             if (sem->type > 0) {
00630                 if (sem->restype && sem->owndby == rt_current) {
00631                     if (sem->restype > 0) {
00632                         count = sem->type++;
00633                         rt_global_restore_flags(flags);
00634                         return count + 1;
00635                     }
00636                     rt_global_restore_flags(flags);
00637                     return RTE_DEADLOK;
00638                 }
00639                 schedmap = pass_prio(sem->owndby, rt_current);
00640             } else {
00641                 schedmap = 0;
00642             }   
00643             sem->count--;
00644             rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
00645             rem_ready_current(rt_current);
00646             enqueue_blocked(rt_current, &sem->queue, sem->qtype);
00647             enq_timed_task(rt_current);
00648             RT_SCHEDULE_MAP_BOTH(schedmap);
00649         } else {
00650             sem->count--;
00651             rt_current->queue.prev = rt_current->queue.next = &rt_current->queue;
00652         }
00653         if (likely(!(retp = rt_current->blocked_on))) { 
00654             count = sem->count;
00655         } else if (likely(retp != RTP_OBJREM)) { 
00656             dequeue_blocked(rt_current);
00657             if (++sem->count > 1 && sem->type) {
00658                 sem->count = 1;
00659             }
00660             if (sem->owndby && sem->type > 0) {
00661                 set_task_prio_from_resq(sem->owndby);
00662             }
00663             rt_global_restore_flags(flags);
00664             return likely(retp > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
00665         } else {
00666             rt_current->prio_passed_to = NULL;
00667             rt_global_restore_flags(flags);
00668             return RTE_OBJREM;
00669         }
00670     } else {
00671         sem->count--;
00672     }
00673     if (sem->type > 0) {
00674         enqueue_resqel(&sem->resq, sem->owndby = rt_current);
00675     }
00676     rt_global_restore_flags(flags);
00677     return count;
00678 }
00679 
00680 
00681 /**
00682  * @anchor rt_sem_wait_timed
00683  * @brief Wait a semaphore with timeout.
00684  *
00685  * rt_sem_wait_timed, like @ref rt_sem_wait_until(), is a timed version
00686  * of the standard semaphore wait call. The semaphore value is
00687  * decremented and tested. If it is still non-negative these functions
00688  * return immediately. Otherwise the caller task is blocked and queued
00689  * up. Queuing may happen in priority order or on FIFO base. This is
00690  * determined by the compile time option @e SEM_PRIORD. In this case
00691  * the function returns if:
00692  *  - The caller task is in the first place of the waiting queue
00693  *    and an other task issues a @ref rt_sem_signal() call;
00694  *  - a timeout occurs;
00695  *  - an error occurs (e.g. the semaphore is destroyed);
00696  *
00697  * In case of a timeout, the semaphore value is incremented before 
00698  * return.  
00699  *
00700  * @param sem points to the structure used in the call to @ref
00701  *    rt_sem_init().
00702  *
00703  * @param delay is an absolute value to the current time.
00704  *
00705  * @return the number of events already signaled upon success.
00706  * A special value as described below in case of a failure:
00707  * - @b 0xFFFF: @e sem does not refer to a valid semaphore.
00708  * 
00709  * @note In principle 0xFFFF could theoretically be a usable
00710  *   semaphores events count so it could be returned also under
00711  *   normal circumstances. It is unlikely you are going to count
00712  *   up to such number of events, in any case avoid counting up to
00713  *   0xFFFF.
00714  */
00715 RTAI_SYSCALL_MODE int rt_sem_wait_timed(SEM *sem, RTIME delay)
00716 {
00717     return rt_sem_wait_until(sem, get_time() + delay);
00718 }
00719 
00720 
00721 /* ++++++++++++++++++++++++++ BARRIER SUPPORT +++++++++++++++++++++++++++++++ */
00722 
00723 /**
00724  * @anchor rt_sem_wait_barrier
00725  * @brief Wait on a semaphore barrier.
00726  *
00727  * rt_sem_wait_barrier is a gang waiting in that a task issuing such
00728  * a request will be blocked till a number of tasks equal to the semaphore
00729  * count set at rt_sem_init is reached.
00730  *
00731  * @returns -1 for tasks that waited on the barrier, 0 for the tasks that 
00732  * completed the barrier count.
00733  */
00734 RTAI_SYSCALL_MODE int rt_sem_wait_barrier(SEM *sem)
00735 {
00736     unsigned long flags;
00737 
00738     CHECK_SEM_MAGIC(sem);
00739 
00740     flags = rt_global_save_flags_and_cli();
00741     if (!sem->owndby) {
00742         sem->owndby = (void *)(long)(sem->count < 1 ? 1 : sem->count);
00743         sem->count = sem->type = 0;
00744     }
00745     if ((1 - sem->count) < (long)sem->owndby) {
00746         rt_sem_wait(sem);
00747         rt_global_restore_flags(flags);
00748         return -1;
00749     }
00750     rt_sem_broadcast(sem);
00751     rt_global_restore_flags(flags);
00752     return 0;
00753 }
00754 
00755 /* +++++++++++++++++++++++++ COND VARIABLES SUPPORT +++++++++++++++++++++++++ */
00756 
00757 /**
00758  * @anchor rt_cond_signal
00759  * @brief Wait for a signal to a conditional variable.
00760  *
00761  * rt_cond_signal resumes one of the tasks that are waiting on the condition 
00762  * semaphore cnd. Nothing happens if no task is waiting on @a cnd, while it
00763  * resumed the first queued task blocked on cnd, according to the queueing
00764  * method set at rt_cond_init.
00765  *
00766  * @param cnd points to the structure used in the call to @ref
00767  *    rt_cond_init().
00768  *
00769  * @returns 0
00770  *
00771  */
00772 RTAI_SYSCALL_MODE int rt_cond_signal(CND *cnd)
00773 {
00774     unsigned long flags;
00775     RT_TASK *task;
00776 
00777     CHECK_SEM_MAGIC(cnd);
00778 
00779     flags = rt_global_save_flags_and_cli();
00780     if ((task = (cnd->queue.next)->task)) {
00781         dequeue_blocked(task);
00782         rem_timed_task(task);
00783         if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00784             enq_ready_task(task);
00785             RT_SCHEDULE(task, rtai_cpuid());
00786         }
00787     }
00788     rt_global_restore_flags(flags);
00789     return 0;
00790 }
00791 
00792 static inline int rt_cndmtx_signal(SEM *mtx, RT_TASK *rt_current)
00793 {
00794     int type;
00795     RT_TASK *task;
00796 
00797     if ((type = mtx->type) > 1) {
00798         mtx->type = 1;
00799     }
00800     if (++mtx->count > 1) {
00801         mtx->count = 1;
00802     }
00803     if ((task = (mtx->queue.next)->task)) {
00804         dequeue_blocked(task);
00805         rem_timed_task(task);
00806         if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00807             enq_ready_task(task);
00808         }
00809     }
00810     mtx->owndby = 0;
00811     dequeue_resqel_reset_current_priority(&mtx->resq, rt_current);
00812     if (task) {
00813          RT_SCHEDULE_BOTH(task, rtai_cpuid());
00814     } else {
00815         rt_schedule();
00816     }
00817     return type;
00818 }
00819 
00820 /**
00821  * @anchor rt_cond_wait
00822  * @brief Wait for a signal to a conditional variable.
00823  *
00824  * rt_cond_wait atomically unlocks mtx (as for using rt_sem_signal)
00825  * and waits for the condition semaphore cnd to be signaled. The task 
00826  * execution is suspended until the condition semaphore is signalled. 
00827  * Mtx must be obtained by the calling task, before calling rt_cond_wait is
00828  * called. Before returning to the calling task rt_cond_wait reacquires 
00829  * mtx by calling rt_sem_wait.
00830  *
00831  * @param cnd points to the structure used in the call to @ref
00832  *    rt_cond_init().
00833  *
00834  * @param mtx points to the structure used in the call to @ref
00835  *    rt_sem_init().
00836  *
00837  * @return 0 on succes, SEM_ERR in case of error.
00838  *
00839  */
00840 RTAI_SYSCALL_MODE int rt_cond_wait(CND *cnd, SEM *mtx)
00841 {
00842     RT_TASK *rt_current;
00843     unsigned long flags;
00844     void *retp;
00845     int retval, type;
00846 
00847     CHECK_SEM_MAGIC(cnd);
00848     CHECK_SEM_MAGIC(mtx);
00849 
00850     flags = rt_global_save_flags_and_cli();
00851     rt_current = RT_CURRENT;
00852     if (mtx->owndby != rt_current) {
00853         rt_global_restore_flags(flags);
00854         return RTE_PERM;
00855     }
00856     rt_current->state |= RT_SCHED_SEMAPHORE;
00857     rem_ready_current(rt_current);
00858     enqueue_blocked(rt_current, &cnd->queue, cnd->qtype);
00859     type = rt_cndmtx_signal(mtx, rt_current);
00860     if (likely((retp = rt_current->blocked_on) != RTP_OBJREM)) { 
00861         if (unlikely(retp != NULL)) {
00862             dequeue_blocked(rt_current);
00863                         retval = RTE_UNBLKD;
00864         } else {
00865             retval = 0;
00866         }
00867     } else {
00868         retval = RTE_OBJREM;
00869     }
00870     rt_global_restore_flags(flags);
00871     if (rt_sem_wait(mtx) < RTE_LOWERR) {
00872         mtx->type = type;
00873     }
00874     return retval;
00875 }
00876 
00877 /**
00878  * @anchor rt_cond_wait_until
00879  * @brief Wait a semaphore with timeout.
00880  *
00881  * rt_cond_wait_until atomically unlocks mtx (as for using rt_sem_signal)
00882  * and waits for the condition semaphore cnd to be signalled. The task 
00883  * execution is suspended until the condition semaphore is either signaled
00884  * or a timeout expires. Mtx must be obtained by the calling task, before 
00885  * calling rt_cond_wait is called. Before returning to the calling task 
00886  * rt_cond_wait_until reacquires mtx by calling rt_sem_wait and returns a 
00887  * value to indicate if it has been signalled pr timedout.
00888  *
00889  * @param cnd points to the structure used in the call to @ref
00890  *    rt_cond_init().
00891  *
00892  * @param mtx points to the structure used in the call to @ref
00893  *    rt_sem_init().
00894  *
00895  * @param time is an absolute value to the current time, in timer count unit.
00896  *
00897  * @returns 0 if it was signaled, SEM_TIMOUT if a timeout occured, SEM_ERR
00898  * if the task has been resumed because of any other action (likely cnd
00899  * was deleted).
00900  */
00901 RTAI_SYSCALL_MODE int rt_cond_wait_until(CND *cnd, SEM *mtx, RTIME time)
00902 {
00903     DECLARE_RT_CURRENT;
00904     unsigned long flags;
00905     void *retp;
00906     int retval, type;
00907 
00908     CHECK_SEM_MAGIC(cnd);
00909     CHECK_SEM_MAGIC(mtx);
00910 
00911     REALTIME2COUNT(time);
00912 
00913     flags = rt_global_save_flags_and_cli();
00914     ASSIGN_RT_CURRENT;
00915     if (mtx->owndby != rt_current) {
00916         rt_global_restore_flags(flags);
00917         return RTE_PERM;
00918     }
00919     if ((rt_current->resume_time = time) > rt_time_h) {
00920         rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
00921         rem_ready_current(rt_current);
00922         enqueue_blocked(rt_current, &cnd->queue, cnd->qtype);
00923         enq_timed_task(rt_current);
00924         type = rt_cndmtx_signal(mtx, rt_current);
00925         if (unlikely((retp = rt_current->blocked_on) == RTP_OBJREM)) { 
00926                         retval = RTE_OBJREM;
00927         } else if (unlikely(retp != NULL)) {
00928             dequeue_blocked(rt_current);
00929             retval = likely(retp > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
00930         } else {
00931             retval = 0;
00932         }
00933         rt_global_restore_flags(flags);
00934         if (rt_sem_wait(mtx) < RTE_LOWERR) {
00935             mtx->type = type;
00936         }
00937     } else {
00938         retval = RTE_TIMOUT;
00939         rt_global_restore_flags(flags);
00940     }
00941     return retval;
00942 }
00943 
00944 /**
00945  * @anchor rt_cond_wait_timed
00946  * @brief Wait a semaphore with timeout.
00947  *
00948  * rt_cond_wait_timed atomically unlocks mtx (as for using rt_sem_signal)
00949  * and waits for the condition semaphore cnd to be signalled. The task 
00950  * execution is suspended until the condition semaphore is either signaled
00951  * or a timeout expires. Mtx must be obtained by the calling task, before 
00952  * calling rt_cond_wait is called. Before returning to the calling task 
00953  * rt_cond_wait_until reacquires mtx by calling rt_sem_wait and returns a 
00954  * value to indicate if it has been signalled pr timedout.
00955  *
00956  * @param cnd points to the structure used in the call to @ref
00957  *    rt_cond_init().
00958  *
00959  * @param mtx points to the structure used in the call to @ref
00960  *    rt_sem_init().
00961  *
00962  * @param delay is a realtive time values with respect to the current time,
00963  * in timer count unit.
00964  *
00965  * @returns 0 if it was signaled, SEM_TIMOUT if a timeout occured, SEM_ERR
00966  * if the task has been resumed because of any other action (likely cnd
00967  * was deleted).
00968  */
00969 RTAI_SYSCALL_MODE int rt_cond_wait_timed(CND *cnd, SEM *mtx, RTIME delay)
00970 {
00971     return rt_cond_wait_until(cnd, mtx, get_time() + delay);
00972 }
00973 
00974 /* ++++++++++++++++++++ READERS-WRITER LOCKS SUPPORT ++++++++++++++++++++++++ */
00975 
00976 /**
00977  * @anchor rt_rwl_init
00978  * @brief Initialize a multi readers single writer lock.
00979  *
00980  * rt_rwl_init initializes a multi readers single writer lock @a rwl.
00981  *
00982  * @param rwl must point to an allocated @e RWL structure.
00983  *
00984  * A multi readers single writer lock (RWL) is a synchronization mechanism 
00985  * that allows to have simultaneous read only access to an object, while only 
00986  * one task can have write access. A data set which is searched more 
00987  * frequently than it is changed can be usefully controlled by using an rwl. 
00988  * The lock acquisition policy is determined solely on the priority of tasks 
00989  * applying to own a lock.
00990  *
00991  * @returns 0 if always.
00992  *
00993  */
00994 
00995 RTAI_SYSCALL_MODE int rt_typed_rwl_init(RWL *rwl, int type)
00996 {
00997     rt_typed_sem_init(&rwl->wrmtx, type, RES_SEM);
00998     rt_typed_sem_init(&rwl->wrsem, 0, CNT_SEM | PRIO_Q);
00999     rt_typed_sem_init(&rwl->rdsem, 0, CNT_SEM | PRIO_Q);
01000     return 0;
01001 }
01002 
01003 /**
01004  * @anchor rt_rwl_delete
01005  * @brief destroys a multi readers single writer lock.
01006  *
01007  * rt_rwl_init destroys a multi readers single writer lock @a rwl.
01008  *
01009  * @param rwl must point to an allocated @e RWL structure.
01010  *
01011  * @returns 0 if OK, SEM_ERR if anything went wrong.
01012  *
01013  */
01014 
01015 RTAI_SYSCALL_MODE int rt_rwl_delete(RWL *rwl)
01016 {
01017     int ret;
01018 
01019     ret  =  rt_sem_delete(&rwl->rdsem);
01020     ret |= rt_sem_delete(&rwl->wrsem);
01021     ret |= rt_sem_delete(&rwl->wrmtx);
01022     return !ret ? 0 : RTE_OBJINV;
01023 }
01024 
01025 /**
01026  * @anchor rt_rwl_rdlock
01027  * @brief acquires a multi readers single writer lock for reading.
01028  *
01029  * rt_rwl_rdlock acquires a multi readers single writer lock @a rwl for
01030  * reading. The calling task will block only if any writer owns the lock
01031  * already or there are writers with higher priority waiting to acquire 
01032  * write access.
01033  *
01034  * @param rwl must point to an allocated @e RWL structure.
01035  *
01036  * @returns 0 if OK, SEM_ERR if anything went wrong after being blocked.
01037  *
01038  */
01039 
01040 RTAI_SYSCALL_MODE int rt_rwl_rdlock(RWL *rwl)
01041 {
01042     unsigned long flags;
01043     RT_TASK *wtask, *rt_current;
01044 
01045     flags = rt_global_save_flags_and_cli();
01046     rt_current = RT_CURRENT;
01047     while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) {
01048         int ret;
01049         if (rwl->wrmtx.owndby == rt_current) {
01050             rt_global_restore_flags(flags);
01051             return RTE_RWLINV;
01052         }
01053         if ((ret = rt_sem_wait(&rwl->rdsem)) >= RTE_LOWERR) {
01054             rt_global_restore_flags(flags);
01055             return ret;
01056         }
01057     }
01058     ((volatile int *)&rwl->rdsem.owndby)[0]++;
01059     rt_global_restore_flags(flags);
01060     return 0;
01061 }
01062 
01063 /**
01064  * @anchor rt_rwl_rdlock_if
01065  * @brief try to acquire a multi readers single writer lock just for reading.
01066  *
01067  * rt_rwl_rdlock_if tries to acquire a multi readers single writer lock @a rwl 
01068  * for reading immediately, i.e. without blocking if a writer owns the lock
01069  * or there are writers with higher priority waiting to acquire write access.
01070  *
01071  * @param rwl must point to an allocated @e RWL structure.
01072  *
01073  * @returns 0 if the lock was acquired, -1 if the lock was already owned.
01074  *
01075  */
01076 
01077 RTAI_SYSCALL_MODE int rt_rwl_rdlock_if(RWL *rwl)
01078 {
01079     unsigned long flags;
01080     RT_TASK *wtask;
01081 
01082     flags = rt_global_save_flags_and_cli();
01083     if (!rwl->wrmtx.owndby && (!(wtask = (rwl->wrsem.queue.next)->task) || wtask->priority > RT_CURRENT->priority)) {
01084         ((volatile int *)&rwl->rdsem.owndby)[0]++;
01085         rt_global_restore_flags(flags);
01086         return 0;
01087     }
01088     rt_global_restore_flags(flags);
01089     return -1;
01090 }
01091 
01092 /**
01093  * @anchor rt_rwl_rdlock_until
01094  * @brief try to acquire a multi readers single writer lock for reading within
01095  * an absolute deadline time.
01096  *
01097  * rt_rwl_rdlock_untill tries to acquire a multi readers single writer lock 
01098  * @a rwl for reading, as for rt_rwl_rdlock, but timing out if the lock has not 
01099  * been acquired within an assigned deadline.
01100  *
01101  * @param rwl must point to an allocated @e RWL structure.
01102  *
01103  * @param time is the time deadline, in internal count units.
01104  *
01105  * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired
01106  * without acquiring the lock, SEM_ERR in case something went wrong.
01107  *
01108  */
01109 
01110 RTAI_SYSCALL_MODE int rt_rwl_rdlock_until(RWL *rwl, RTIME time)
01111 {
01112     unsigned long flags;
01113     RT_TASK *wtask, *rt_current;
01114 
01115     flags = rt_global_save_flags_and_cli();
01116     rt_current = RT_CURRENT;
01117     while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) {
01118         int ret;
01119         if (rwl->wrmtx.owndby == rt_current) {
01120             rt_global_restore_flags(flags);
01121             return RTE_RWLINV;
01122         }
01123         if ((ret = rt_sem_wait_until(&rwl->rdsem, time)) >= RTE_LOWERR) {
01124             rt_global_restore_flags(flags);
01125             return ret;
01126         }
01127     }
01128     ((volatile int *)&rwl->rdsem.owndby)[0]++;
01129     rt_global_restore_flags(flags);
01130     return 0;
01131 }
01132 
01133 /**
01134  * @anchor rt_rwl_rdlock_timed
01135  * @brief try to acquire a multi readers single writer lock for reading within
01136  * a relative deadline time.
01137  *
01138  * rt_rwl_rdlock_timed tries to acquire a multi readers single writer lock
01139  * @a rwl for reading, as for rt_rwl_rdlock, but timing out if the lock has not
01140  * been acquired within an assigned deadline.
01141  *
01142  * @param rwl must point to an allocated @e RWL structure.
01143  *
01144  * @param delay is the time delay within which the lock must be acquired, in 
01145  * internal count units.
01146  *
01147  * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired
01148  * without acquiring the lock, SEM_ERR in case something went wrong.
01149  *
01150  */
01151 
01152 RTAI_SYSCALL_MODE int rt_rwl_rdlock_timed(RWL *rwl, RTIME delay)
01153 {
01154     return rt_rwl_rdlock_until(rwl, get_time() + delay);
01155 }
01156 
01157 /**
01158  * @anchor rt_rwl_wrlock
01159  * @brief acquires a multi readers single writer lock for wrtiting.
01160  *
01161  * rt_rwl_rwlock acquires a multi readers single writer lock @a rwl for
01162  * writing. The calling task will block if any other task, reader or writer, 
01163  * owns the lock already.
01164  *
01165  * @param rwl must point to an allocated @e RWL structure.
01166  *
01167  * @returns 0 if OK, SEM_ERR if anything went wrong after being blocked.
01168  *
01169  */
01170 
01171 RTAI_SYSCALL_MODE int rt_rwl_wrlock(RWL *rwl)
01172 {
01173     unsigned long flags;
01174     int ret;
01175 
01176     flags = rt_global_save_flags_and_cli();
01177     while (rwl->rdsem.owndby) {
01178         if ((ret = rt_sem_wait(&rwl->wrsem)) >= RTE_LOWERR) {
01179             rt_global_restore_flags(flags);
01180             return ret;
01181         }
01182     }
01183     if ((ret = rt_sem_wait(&rwl->wrmtx)) >= RTE_LOWERR) {
01184         rt_global_restore_flags(flags);
01185         return ret;
01186     }
01187     rt_global_restore_flags(flags);
01188     return 0;
01189 }
01190 
01191 /**
01192  * @anchor rt_rwl_wrlock_if
01193  * @brief acquires a multi readers single writer lock for writing.
01194  *
01195  * rt_rwl_wrlock_if try to acquire a multi readers single writer lock @a rwl 
01196  * for writing immediately, i.e without blocking if the lock is owned already.
01197  *
01198  * @param rwl must point to an allocated @e RWL structure.
01199  *
01200  * @returns 0 if the lock was acquired, -1 if the lock was already owned.
01201  *
01202  */
01203 
01204 RTAI_SYSCALL_MODE int rt_rwl_wrlock_if(RWL *rwl)
01205 {
01206     unsigned long flags;
01207     int ret;
01208 
01209     flags = rt_global_save_flags_and_cli();
01210     if (!rwl->rdsem.owndby && (ret = rt_sem_wait_if(&rwl->wrmtx)) > 0 && ret  < RTE_LOWERR) {
01211         rt_global_restore_flags(flags);
01212         return 0;
01213     }
01214     rt_global_restore_flags(flags);
01215     return -1;
01216 }
01217 
01218 /**
01219  * @anchor rt_rwl_wrlock_until
01220  * @brief try to acquire a multi readers single writer lock for writing within
01221  * an absolute deadline time.
01222  *
01223  * rt_rwl_rwlock_until tries to acquire a multi readers single writer lock 
01224  * @a rwl for writing, as for rt_rwl_rwlock, but timing out if the lock has not 
01225  * been acquired within an assigned deadline.
01226  *
01227  * @param rwl must point to an allocated @e RWL structure.
01228  *
01229  * @param time is the time deadline, in internal count units.
01230  *
01231  * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired
01232  * without acquiring the lock, SEM_ERR in case something went wrong.
01233  *
01234  */
01235 
01236 RTAI_SYSCALL_MODE int rt_rwl_wrlock_until(RWL *rwl, RTIME time)
01237 {
01238     unsigned long flags;
01239     int ret;
01240 
01241     flags = rt_global_save_flags_and_cli();
01242     while (rwl->rdsem.owndby) {
01243         if ((ret = rt_sem_wait_until(&rwl->wrsem, time)) >= RTE_LOWERR) {
01244             rt_global_restore_flags(flags);
01245             return ret;
01246         };
01247     }
01248     if ((ret = rt_sem_wait_until(&rwl->wrmtx, time)) >= RTE_LOWERR) {
01249         rt_global_restore_flags(flags);
01250         return ret;
01251     };
01252     rt_global_restore_flags(flags);
01253     return 0;
01254 }
01255 
01256 /**
01257  * @anchor rt_rwl_wrlock_timed
01258  * @brief try to acquire a multi readers single writer lock for writing within
01259  * a relative deadline time.
01260  *
01261  * rt_rwl_wrlock_timed tries to acquire a multi readers single writer lock
01262  * @a rwl  for writing, as for rt_rwl_wrlock, timing out if the lock has not
01263  * been acquired within an assigned deadline.
01264  *
01265  * @param rwl must point to an allocated @e RWL structure.
01266  *
01267  * @param delay is the time delay within which the lock must be acquired, in 
01268  * internal count units.
01269  *
01270  * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired
01271  * without acquiring the lock, SEM_ERR in case something went wrong.
01272  *
01273  */
01274 
01275 RTAI_SYSCALL_MODE int rt_rwl_wrlock_timed(RWL *rwl, RTIME delay)
01276 {
01277     return rt_rwl_wrlock_until(rwl, get_time() + delay);
01278 }
01279 
01280 /**
01281  * @anchor rt_rwl_unlock
01282  * @brief unlock an acquired multi readers single writer lock.
01283  *
01284  * rt_rwl_unlock unlocks an acquired multi readers single writer lock @a rwl. 
01285  * After releasing the lock any task waiting to acquire it will own the lock
01286  * according to its priority, whether it is a reader or a writer, otherwise
01287  * the lock will be fully unlocked.
01288  *
01289  * @param rwl must point to an allocated @e RWL structure.
01290  *
01291  * @returns 0 always.
01292  *
01293  */
01294 
01295 RTAI_SYSCALL_MODE int rt_rwl_unlock(RWL *rwl)
01296 {
01297     unsigned long flags;
01298 
01299     flags = rt_global_save_flags_and_cli();
01300     if (rwl->wrmtx.owndby == RT_CURRENT) {
01301         rt_sem_signal(&rwl->wrmtx);
01302     } else if (rwl->rdsem.owndby) {
01303         ((volatile int *)&rwl->rdsem.owndby)[0]--;
01304     } else {
01305         rt_global_restore_flags(flags);
01306         return RTE_PERM;
01307     }
01308     rt_global_restore_flags(flags);
01309     flags = rt_global_save_flags_and_cli();
01310     if (!rwl->wrmtx.owndby && !rwl->rdsem.owndby) {
01311         RT_TASK *wtask, *rtask;
01312         wtask = (rwl->wrsem.queue.next)->task;
01313         rtask = (rwl->rdsem.queue.next)->task;
01314         if (wtask && rtask) {
01315             if (wtask->priority <= rtask->priority) {
01316                 rt_sem_signal(&rwl->wrsem);
01317             } else {
01318                 rt_sem_broadcast(&rwl->rdsem);
01319             }
01320         } else if (wtask) {
01321             rt_sem_signal(&rwl->wrsem);
01322         } else if (rtask) {
01323             rt_sem_broadcast(&rwl->rdsem);
01324         }
01325         }
01326     rt_global_restore_flags(flags);
01327     return 0;
01328 }
01329 
01330 /* +++++++++++++++++++++ RECURSIVE SPINLOCKS SUPPORT ++++++++++++++++++++++++ */
01331 
01332 /**
01333  * @anchor rt_spl_init
01334  * @brief Initialize a spinlock.
01335  *
01336  * rt_spl_init initializes a spinlock @a spl.
01337  *
01338  * @param spl must point to an allocated @e SPL structure.
01339  *
01340  * A spinlock is an active wait synchronization mechanism useful for multi
01341  * processors very short synchronization, when it is more efficient to wait
01342  * at a meeting point instead of being suspended and the reactivated, as by
01343  * using semaphores, to acquire ownership of any object.
01344  * Spinlocks can be recursed once acquired, a recurring owner must care of
01345  * unlocking as many times as he took the spinlock.
01346  *
01347  * @returns 0 if always.
01348  *
01349  */
01350 
01351 RTAI_SYSCALL_MODE int rt_spl_init(SPL *spl)
01352 {
01353     spl->owndby = 0;
01354     spl->count  = 0;
01355     return 0;
01356 }
01357 
01358 /**
01359  * @anchor rt_spl_delete
01360  * @brief Initialize a spinlock.
01361  *
01362  * rt_spl_delete destroies a spinlock @a spl.
01363  *
01364  * @param spl must point to an allocated @e SPL structure.
01365  *
01366  * @returns 0 if always.
01367  *
01368  */
01369 
01370 RTAI_SYSCALL_MODE int rt_spl_delete(SPL *spl)
01371 {
01372         return 0;
01373 }
01374 
01375 /**
01376  * @anchor rt_spl_lock
01377  * @brief Acquire a spinlock.
01378  *
01379  * rt_spl_lock acquires a spinlock @a spl.
01380  *
01381  * @param spl must point to an allocated @e SPL structure.
01382  *
01383  * rt_spl_lock spins on lock till it can be acquired. If a tasks asks for
01384  * lock it owns already it will acquire it immediately but will have to care
01385  * to unlock it as many times as it recursed the spinlock ownership.
01386  *
01387  * @returns 0 if always.
01388  *
01389  */
01390 
01391 RTAI_SYSCALL_MODE int rt_spl_lock(SPL *spl)
01392 {
01393     unsigned long flags;
01394     RT_TASK *rt_current;
01395 
01396     rtai_save_flags_and_cli(flags);
01397     if (spl->owndby == (rt_current = RT_CURRENT)) {
01398         spl->count++;
01399     } else {
01400         while (cmpxchg(&spl->owndby, 0L, rt_current));
01401         spl->flags = flags;
01402     }
01403     rtai_restore_flags(flags);
01404     return 0;
01405 }
01406 
01407 /**
01408  * @anchor rt_spl_lock_if
01409  * @brief Acquire a spinlock without waiting.
01410  *
01411  * rt_spl_lock_if acquires a spinlock @a spl without waiting.
01412  *
01413  * @param spl must point to an allocated @e SPL structure.
01414  *
01415  * rt_spl_lock_if tries to acquire a spinlock but will not spin on it if
01416  * it is owned already.
01417  *
01418  * @returns 0 if it succeeded, -1 if the lock was owned already.
01419  *
01420  */
01421 
01422 RTAI_SYSCALL_MODE int rt_spl_lock_if(SPL *spl)
01423 {
01424     unsigned long flags;
01425     RT_TASK *rt_current;
01426 
01427     rtai_save_flags_and_cli(flags);
01428     if (spl->owndby == (rt_current = RT_CURRENT)) {
01429         spl->count++;
01430     } else {
01431         if (cmpxchg(&spl->owndby, 0L, rt_current)) {
01432             rtai_restore_flags(flags);
01433             return -1;
01434         }
01435         spl->flags = flags;
01436     }
01437     rtai_restore_flags(flags);
01438     return 0;
01439 }
01440 
01441 /**
01442  * @anchor rt_spl_lock_timed
01443  * @brief Acquire a spinlock with timeout.
01444  *
01445  * rt_spl_lock_timed acquires a spinlock @a spl, but waiting spinning only 
01446  * for an allowed time.
01447  *
01448  * @param spl must point to an allocated @e SPL structure.
01449  *
01450  * @param ns timeout 
01451  *
01452  * rt_spl_lock spins on lock till it can be acquired, as for rt_spl_lock,
01453  * but only for an allowed time. If the spinlock cannot be acquired in time
01454  * the functions returns in error.
01455  * This function can be usefull either in itself or as a diagnosis toll
01456  * during code development.
01457  *
01458  * @returns 0 if the spinlock was acquired, -1 if a timeout occured.
01459  *
01460  */
01461 
01462 RTAI_SYSCALL_MODE int rt_spl_lock_timed(SPL *spl, unsigned long ns)
01463 {
01464     unsigned long flags;
01465     RT_TASK *rt_current;
01466 
01467     rtai_save_flags_and_cli(flags);
01468     if (spl->owndby == (rt_current = RT_CURRENT)) {
01469         spl->count++;
01470     } else {
01471         RTIME end_time;
01472         long locked;
01473         end_time = rdtsc() + imuldiv(ns, tuned.cpu_freq, 1000000000);
01474         while ((locked = (long)cmpxchg(&spl->owndby, 0L, rt_current)) && rdtsc() < end_time);
01475         if (locked) {
01476             rtai_restore_flags(flags);
01477             return -1;
01478         }
01479         spl->flags = flags;
01480     }
01481     rtai_restore_flags(flags);
01482     return 0;
01483 }
01484 
01485 /**
01486  * @anchor rt_spl_unlock
01487  * @brief Release an owned spinlock.
01488  *
01489  * rt_spl_lock releases an owned spinlock @a spl.
01490  *
01491  * @param spl must point to an allocated @e SPL structure.
01492  *
01493  * rt_spl_unlock releases an owned lock. The spinlock can remain locked and
01494  * its ownership can remain with the task is the spinlock acquisition was 
01495  * recursed.
01496  *
01497  * @returns 0 if the function was used legally, -1 if a tasks tries to unlock
01498  * a spinlock it does not own.
01499  *
01500  */
01501 
01502 RTAI_SYSCALL_MODE int rt_spl_unlock(SPL *spl)
01503 {
01504     unsigned long flags;
01505     RT_TASK *rt_current;
01506 
01507     rtai_save_flags_and_cli(flags);
01508     if (spl->owndby == (rt_current = RT_CURRENT)) {
01509         if (spl->count) {
01510             --spl->count;
01511         } else {
01512             spl->owndby = 0;
01513             spl->count  = 0;
01514         }
01515         rtai_restore_flags(spl->flags);
01516         return 0;
01517     }
01518     rtai_restore_flags(flags);
01519     return -1;
01520 }
01521 
01522 /* ++++++ NAMED SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS +++++ */
01523 
01524 #include <rtai_registry.h>
01525 
01526 /**
01527  * @anchor _rt_typed_named_sem_init
01528  * @brief Initialize a specifically typed (counting, binary, resource)
01529  *    semaphore identified by a name.
01530  *
01531  * _rt_typed_named_sem_init allocate and initializes a semaphore identified 
01532  * by @e name of type @e type. Once the semaphore structure is allocated the 
01533  * initialization is as for rt_typed_sem_init. The function returns the
01534  * handle pointing to the allocated semaphore structure, to be used as the
01535  * usual semaphore address in all semaphore based services. Named objects
01536  * are useful for use among different processes, kernel/user space and
01537  * in distributed applications, see netrpc.
01538  *
01539  * @param sem_name is the identifier associated with the returned object.
01540  *
01541  * @param value is the initial value of the semaphore, always set to 1
01542  *    for a resource semaphore.
01543  *
01544  * @param type is the semaphore type and queuing policy. It can be an OR
01545  * a semaphore kind: CNT_SEM for counting semaphores, BIN_SEM for binary 
01546  * semaphores, RES_SEM for resource semaphores; and queuing policy:
01547  * FIFO_Q, PRIO_Q for a fifo and priority queueing respectively.
01548  * Resource semaphores will enforce a PRIO_Q policy anyhow.
01549  * 
01550  * Since @a name can be a clumsy identifier, services are provided to
01551  * convert 6 characters identifiers to unsigned long, and vice versa.
01552  *
01553  * @see nam2num() and num2nam().
01554  *
01555  * See rt_typed_sem_init for further clues.
01556  *
01557  * As for all the named initialization functions it must be remarked that
01558  * only the very first call to initilize/create a named RTAI object does a 
01559  * real allocation of the object, any following call with the same name 
01560  * will just increase its usage count. In any case the function returns
01561  * a pointer to the named object, or zero if in error.
01562  *
01563  * @returns either a valid pointer or 0 if in error.
01564  *
01565  */
01566 
01567 RTAI_SYSCALL_MODE SEM *_rt_typed_named_sem_init(unsigned long sem_name, int value, int type, unsigned long *handle)
01568 {
01569     SEM *sem;
01570 
01571     if ((sem = rt_get_adr_cnt(sem_name))) {
01572         if (handle) {
01573             if ((unsigned long)handle > PAGE_OFFSET) {
01574                 *handle = 1;
01575             } else {
01576                 rt_copy_to_user(handle, sem, sizeof(SEM *));
01577             }
01578         }
01579         return sem;
01580     }
01581     if ((sem = rt_malloc(sizeof(SEM)))) {
01582         rt_typed_sem_init(sem, value, type);
01583         if (rt_register(sem_name, sem, IS_SEM, 0)) {
01584             return sem;
01585         }
01586         rt_sem_delete(sem);
01587     }
01588     rt_free(sem);
01589     return (SEM *)0;
01590 }
01591 
01592 /**
01593  * @anchor rt_named_sem_delete
01594  * @brief Delete a semaphore initialized in named mode.
01595  *
01596  * rt_named_sem_delete deletes a semaphore previously created with 
01597  * @ref _rt_typed_named_sem_init(). 
01598  *
01599  * @param sem points to the structure pointer returned by a corresponding
01600  * call to _rt_typed_named_sem_init. 
01601  *
01602  * Any tasks blocked on this semaphore is returned in error and
01603  * allowed to run when semaphore is destroyed. 
01604  * As it is done by all the named allocation functions delete calls have just 
01605  * the effect of decrementing a usage count till the last is done, as that is 
01606  * the one the really frees the object.
01607  *
01608  * @return an int >=0 is returned upon success, SEM_ERR if it failed to 
01609  * delete the semafore, -EFAULT if the semaphore does not exist anymore.
01610  *
01611  */
01612 
01613 RTAI_SYSCALL_MODE int rt_named_sem_delete(SEM *sem)
01614 {
01615     int ret;
01616     if (!(ret = rt_drg_on_adr_cnt(sem))) {
01617         if (!rt_sem_delete(sem)) {
01618             rt_free(sem);
01619             return 0;
01620         } else {
01621             return RTE_OBJINV;
01622         }
01623     }
01624     return ret;
01625 }
01626 
01627 /**
01628  * @anchor _rt_named_rwl_init
01629  * @brief Initialize a multi readers single writer lock identified by a name.
01630  *
01631  * _rt_named_rwl_init allocate and initializes a multi readers single writer 
01632  * lock (RWL) identified by @e name. Once the lock structure is allocated the 
01633  * initialization is as for rt_rwl_init. The function returns the
01634  * handle pointing to the allocated multi readers single writer lock o
01635  * structure, to be used as the usual lock address in all rwl based services. 
01636  * Named objects are useful for use among different processes, kernel/user 
01637  * space and in distributed applications, see netrpc.
01638  *
01639  * @param rwl_name is the identifier associated with the returned object.
01640  *
01641  * Since @a name can be a clumsy identifier, services are provided to
01642  * convert 6 characters identifiers to unsigned long, and vice versa.
01643  *
01644  * @see nam2num() and num2nam().
01645  *
01646  * As for all the named initialization functions it must be remarked that
01647  * only the very first call to initilize/create a named RTAI object does a 
01648  * real allocation of the object, any following call with the same name 
01649  * will just increase its usage count. In any case the function returns
01650  * a pointer to the named object, or zero if in error.
01651  *
01652  * @returns either a valid pointer or 0 if in error.
01653  *
01654  */
01655 
01656 RTAI_SYSCALL_MODE RWL *_rt_named_rwl_init(unsigned long rwl_name)
01657 {
01658     RWL *rwl;
01659 
01660     if ((rwl = rt_get_adr_cnt(rwl_name))) {
01661         return rwl;
01662     }
01663     if ((rwl = rt_malloc(sizeof(RWL)))) {
01664         rt_rwl_init(rwl);
01665         if (rt_register(rwl_name, rwl, IS_RWL, 0)) {
01666             return rwl;
01667         }
01668         rt_rwl_delete(rwl);
01669     }
01670     rt_free(rwl);
01671     return (RWL *)0;
01672 }
01673 
01674 /**
01675  * @anchor rt_named_rwl_delete
01676  * @brief Delete a multi readers single writer lock in named mode.
01677  *
01678  * rt_named_rwl_delete deletes a multi readers single writer lock
01679  * previously created with @ref _rt_named_rwl_init(). 
01680  *
01681  * @param rwl points to the structure pointer returned by a corresponding 
01682  * call to rt_named_rwl_init. 
01683  *
01684  * As it is done by all the named allocation functions delete calls have just 
01685  * the effect of decrementing a usage count till the last is done, as that is 
01686  * the one the really frees the object.
01687  *
01688  * @return an int >=0 is returned upon success, SEM_ERR if it failed to 
01689  * delete the multi readers single writer lock, -EFAULT if the lock does 
01690  * not exist anymore.
01691  *
01692  */
01693 
01694 RTAI_SYSCALL_MODE int rt_named_rwl_delete(RWL *rwl)
01695 {
01696     int ret;
01697     if (!(ret = rt_drg_on_adr_cnt(rwl))) {
01698         if (!rt_rwl_delete(rwl)) {
01699             rt_free(rwl);
01700             return 0;
01701         } else {
01702             return RTE_OBJINV;
01703         }
01704     }
01705     return ret;
01706 }
01707 
01708 /**
01709  * @anchor _rt_named_spl_init
01710  * @brief Initialize a spinlock identified by a name.
01711  *
01712  * _rt_named_spl_init allocate and initializes a spinlock (SPL) identified 
01713  * by @e name. Once the spinlock structure is allocated the initialization 
01714  * is as for rt_spl_init. The function returns the handle pointing to the 
01715  * allocated spinlock structure, to be used as the usual spinlock address 
01716  * in all spinlock based services. Named objects are useful for use among 
01717  * different processes and kernel/user space.
01718  *
01719  * @param spl_name is the identifier associated with the returned object.
01720  *
01721  * Since @a name can be a clumsy identifier, services are provided to
01722  * convert 6 characters identifiers to unsigned long, and vice versa.
01723  *
01724  * @see nam2num() and num2nam().
01725  *
01726  * As for all the named initialization functions it must be remarked that
01727  * only the very first call to initilize/create a named RTAI object does a 
01728  * real allocation of the object, any following call with the same name 
01729  * will just increase its usage count. In any case the function returns
01730  * a pointer to the named object, or zero if in error.
01731  *
01732  * @returns either a valid pointer or 0 if in error.
01733  *
01734  */
01735 
01736 RTAI_SYSCALL_MODE SPL *_rt_named_spl_init(unsigned long spl_name)
01737 {
01738     SPL *spl;
01739 
01740     if ((spl = rt_get_adr_cnt(spl_name))) {
01741         return spl;
01742     }
01743     if ((spl = rt_malloc(sizeof(SPL)))) {
01744         rt_spl_init(spl);
01745         if (rt_register(spl_name, spl, IS_SPL, 0)) {
01746             return spl;
01747         }
01748         rt_spl_delete(spl);
01749     }
01750     rt_free(spl);
01751     return (SPL *)0;
01752 }
01753 
01754 /**
01755  * @anchor rt_named_spl_delete
01756  * @brief Delete a spinlock in named mode.
01757  *
01758  * rt_named_spl_delete deletes a spinlock previously created with
01759  * @ref _rt_named_spl_init(). 
01760  *
01761  * @param spl points to the structure pointer returned by a corresponding 
01762  * call to rt_named_spl_init. 
01763  *
01764  * As it is done by all the named allocation functions delete calls have just 
01765  * the effect of decrementing a usage count till the last is done, as that is 
01766  * the one the really frees the object.
01767  *
01768  * @return an int >=0 is returned upon success, -EFAULT if the spinlock
01769  * does not exist anymore.
01770  *
01771  */
01772 
01773 RTAI_SYSCALL_MODE int rt_named_spl_delete(SPL *spl)
01774 {
01775     int ret;
01776     if (!(ret = rt_drg_on_adr_cnt(spl))) {
01777         rt_spl_delete(spl);
01778         rt_free(spl);
01779         return 0;
01780     }
01781     return ret;
01782 }
01783 
01784 /* ++++++++++++++++++++++++++++++ POLLING SERVICE +++++++++++++++++++++++++++ */
01785 
01786 struct rt_poll_enc rt_poll_ofstfun[] = {
01787     [RT_POLL_NOT_TO_USE]   = {            0           , NULL },
01788 #ifdef CONFIG_RTAI_RT_POLL
01789     [RT_POLL_MBX_RECV]     = { offsetof(MBX, poll_recv), NULL }, 
01790     [RT_POLL_MBX_SEND]     = { offsetof(MBX, poll_send), NULL },
01791     [RT_POLL_SEM_WAIT_ALL] = { offsetof(SEM, poll_wait_all), NULL }, 
01792     [RT_POLL_SEM_WAIT_ONE] = { offsetof(SEM, poll_wait_one), NULL }
01793 #else
01794     [RT_POLL_MBX_RECV]     = { 0, NULL }, 
01795     [RT_POLL_MBX_SEND]     = { 0, NULL },
01796     [RT_POLL_SEM_WAIT_ALL] = { 0, NULL }, 
01797     [RT_POLL_SEM_WAIT_ONE] = { 0, NULL }
01798 #endif
01799 };
01800 EXPORT_SYMBOL(rt_poll_ofstfun);
01801 
01802 #ifdef CONFIG_RTAI_RT_POLL
01803 
01804 typedef struct rt_poll_sem { QUEUE queue; RT_TASK *task; int wait; } POLL_SEM;
01805 
01806 static inline void rt_schedule_tosched(unsigned long tosched_mask)
01807 {
01808     unsigned long flags;
01809 #ifdef CONFIG_SMP
01810     unsigned long cpumask, rmask;
01811     rmask = tosched_mask & ~(cpumask = (1 << rtai_cpuid())); 
01812     if (rmask) {
01813         rtai_save_flags_and_cli(flags);
01814         send_sched_ipi(rmask);
01815         rtai_restore_flags(flags);
01816     }
01817     if (tosched_mask | cpumask)
01818 #endif
01819     {
01820         flags = rt_global_save_flags_and_cli();
01821         rt_schedule();
01822         rt_global_restore_flags(flags);
01823     }
01824 }
01825  
01826 static inline int rt_poll_wait(POLL_SEM *sem, RT_TASK *rt_current)
01827 {
01828     unsigned long flags;
01829     int retval = 0;
01830 
01831     flags = rt_global_save_flags_and_cli();
01832     if (sem->wait) {
01833         rt_current->state |= RT_SCHED_POLL;
01834         rem_ready_current(rt_current);
01835         enqueue_blocked(rt_current, &sem->queue, 1);
01836         rt_schedule();
01837         if (unlikely(rt_current->blocked_on != NULL)) { 
01838             dequeue_blocked(rt_current);
01839             retval = RTE_UNBLKD;
01840         }
01841     }
01842     rt_global_restore_flags(flags);
01843     return retval;
01844 }
01845 
01846 static inline int rt_poll_wait_until(POLL_SEM *sem, RTIME time, RT_TASK *rt_current, int cpuid)
01847 {
01848     unsigned long flags;
01849     int retval = 0;
01850 
01851     flags = rt_global_save_flags_and_cli();
01852     if (sem->wait) {
01853         rt_current->blocked_on = &sem->queue;
01854         if ((rt_current->resume_time = time) > rt_time_h) {
01855             rt_current->state |= (RT_SCHED_POLL | RT_SCHED_DELAYED);
01856             rem_ready_current(rt_current);
01857             enqueue_blocked(rt_current, &sem->queue, 1);
01858             enq_timed_task(rt_current);
01859             rt_schedule();
01860         }
01861         if (unlikely(rt_current->blocked_on != NULL)) { 
01862             retval = likely((void *)rt_current->blocked_on > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
01863             dequeue_blocked(rt_current);
01864         }
01865     }
01866     rt_global_restore_flags(flags);
01867     return retval;
01868 }
01869 
01870 static inline int rt_poll_signal(POLL_SEM *sem)
01871 {
01872     unsigned long flags;
01873     RT_TASK *task;
01874     int retval = 0;
01875 
01876     flags = rt_global_save_flags_and_cli();
01877     sem->wait = 0;
01878     if ((task = (sem->queue.next)->task)) {
01879         dequeue_blocked(task);
01880         rem_timed_task(task);
01881         if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_POLL | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
01882             enq_ready_task(task);
01883             retval = (1 << task->runnable_on_cpus);
01884         }
01885     }
01886     rt_global_restore_flags(flags);
01887     return retval;
01888 }
01889 
01890 void rt_wakeup_pollers(struct rt_poll_ql *ql, int reason)
01891 {
01892         QUEUE *q, *queue = &ql->pollq;
01893         spinlock_t *qlock = &ql->pollock;
01894 
01895     rt_spin_lock_irq(qlock);
01896     if ((q = queue->next) != queue) {
01897             POLL_SEM *sem;
01898         unsigned long tosched_mask = 0UL;
01899         do {
01900             sem = (POLL_SEM *)q->task;
01901             q->task = (void *)((unsigned long)reason);
01902             (queue->next = q->next)->prev = queue;
01903             tosched_mask |= rt_poll_signal(sem);
01904             rt_spin_unlock_irq(qlock);
01905             rt_spin_lock_irq(qlock);
01906         } while ((q = queue->next) != queue);
01907         rt_spin_unlock_irq(qlock);
01908         rt_schedule_tosched(tosched_mask);
01909     } else {
01910         rt_spin_unlock_irq(qlock);
01911     }
01912 }
01913 
01914 EXPORT_SYMBOL(rt_wakeup_pollers);
01915 
01916 /**
01917  * @anchor _rt_poll
01918  * @brief Poll RTAI IPC mechanisms, waiting for the setting of desired states.
01919  *
01920  * RTAI _rt_poll roughly does what Linux "poll" does, i.e waits for a desired
01921  * state to be set upon an RTAI IPC mechanism. At the moment it supports MBXes
01922  * only. Other IPCs methods will be added as soon as they are needed. It
01923  * is usable for remote objects also, through RTAI netrpc support.
01924  *
01925  * @param pdsa is a pointer to an array of "struct rt_poll_s" containing
01926  * the list of objects to poll. Its content is not preserved through the
01927  * call, so it must be initialised before any call always, see also the
01928  * usage note below.
01929  *
01930  * @param nr is the number of elements of pdsa. If zero rt_poll will simply
01931  * suspend the polling task, but only if a non null timeout is specified.
01932  *
01933  * @param timeout sets a possible time boundary for the polling action; its 
01934  * value can be:
01935  *  -   0 for an infinitely long wait;
01936  *  - < 0 for a relative timeout;
01937  *  - > 0 for an absolute deadline. It has a subcase though. If it is
01938  *       set to 1, a meaningless absolute time value on any machine,
01939  *       rt_poll will not block waiting for the asked events but return
01940  *       immediately just reporting anything immediately available,
01941  *       thus becoming a multiple conditional polling.
01942  *       In such a way we have the usual 4 ways of RTAI IPC services
01943  *       within a single call.
01944  *
01945  * @return:
01946  *  + the number of structures for which the poll succeeded, the related
01947  *    IPCs polling result can be inferred by looking at "what"s, which 
01948  *    will be:
01949  *    - unchanged if nothing happened,
01950  *    - NULL if the related poll succeeded,
01951  *    - after a casting to int it will signal an interrupted polling,
01952  *      either because the related IPC operation was not completed for
01953  *      lack of something, e.g. buffer space, or because of an error,
01954  *      which can be inferred from the content of "what";
01955  *  + a sem error, the value of sem errors being the same as for sem_wait
01956  *    functions;
01957  *  + ENOMEM, if CONFIG_RTAI_RT_POLL_ON_STACK is not set so that RTAI
01958  *     heap is used and there is not enough space anymore (see also the
01959  *     WARNING below).
01960  * 
01961  * @usage note:
01962  *  the user sets the elements of her/his struct rt_poll_s array: 
01963  *  struct rt_poll_s { void *what; unsigned long forwhat; }, as needed.
01964  *  In particular "what" must be set to the pointer of the IPC
01965  *      referenced mechanism, i.e. only a MBX pointer at the moment. Then the
01966  *  element "forwhat" can be set to:
01967  *  - RT_POLL_MBX_RECV, to wait for something sent to a MBX,
01968  *  - RT_POLL_MBX_SEND to wait for the possibility of sending to a MBX,
01969  *  without being blocked.
01970  *  When _rt_poll returns a user can infer the results of her/his polling
01971  *  by looking at each "what' in the array, as explained above.
01972  *  It is important to remark that if more tasks are using the same IPC
01973  *  mechanism simultaneously, it is not possible to assume that a NULL 
01974  *  "what" entails the possibility of applying the desired IPC mechanism
01975  *  without blocking.
01976  *  In fact the task at hand cannot be sure that another task has done
01977  *  it before, so depleting/filling the polled object. Then, if it is known
01978  *  that more tasks might have polled/accessed the same mechanism, the 
01979  *  "_if" version of the needed action should be used if one wants to
01980  *  be sure of not blocking. If an "_if" call fails then it will mean
01981  *  that there was a competing polling/access on the same object.
01982  *  WARNING: rt_poll needs a couple of dynamically assigned arrays.
01983  *  In the default implementation they are allocated on the stack,
01984  *  keeping interrupts unblocked as far as possible. So there is the
01985  *  danger that a very large polling list might exceed the kernel stack
01986  *  in use. Even if that is not the case a large polling coupled to a
01987  *  simultaneous flooding of nested interrupts could result in a stack
01988  *  overflow as well. The solution to such problems is to use rt_malloc,
01989  *  in which case the limit would be only in the memory assigned to the
01990  *  RTAI dynamic heap. To be cautious rt_malloc has been set as default
01991  *  in the RTAI configuration. If one is sure that short enough lists,
01992  *  say 30/40 terms or so, will be used in her/his application the more
01993  *  effective allocation on the stack can be use by setting 
01994  *  CONFIG_RTAI_RT_POLL_ON_STACK when configuring RTAI.
01995  */
01996 
01997 #define QL(i) ((struct rt_poll_ql *)(pds[i].what + rt_poll_ofstfun[pds[i].forwhat].offset))
01998 
01999 RTAI_SYSCALL_MODE int _rt_poll(struct rt_poll_s *pdsa, unsigned long nr, RTIME timeout, int space)
02000 {
02001     struct rt_poll_s *pds;
02002     int i, polled, semret, cpuid;
02003     POLL_SEM sem = { { &sem.queue, &sem.queue, NULL }, rt_smp_current[cpuid = rtai_cpuid()], 1 };
02004 #ifdef CONFIG_RTAI_RT_POLL_ON_STACK
02005     struct rt_poll_s pdsv[nr]; // BEWARE: consuming too much stack?
02006     QUEUE pollq[nr];           // BEWARE: consuming too much stack?
02007 #else
02008     struct rt_poll_s *pdsv;
02009     QUEUE *pollq;
02010     if (!(pdsv = rt_malloc(nr*sizeof(struct rt_poll_s))) && nr > 0) {
02011         return ENOMEM;
02012     }
02013     if (!(pollq = rt_malloc(nr*sizeof(QUEUE))) && nr > 0) {
02014         rt_free(pdsv);
02015         return ENOMEM;
02016     }
02017 #endif
02018     if (space) {
02019         pds = pdsa;
02020     } else {
02021         rt_copy_from_user(pdsv, pdsa, nr*sizeof(struct rt_poll_s));
02022         pds = pdsv;
02023     }
02024     for (polled = i = 0; i < nr; i++) {
02025         QUEUE *queue = NULL;
02026         spinlock_t *qlock = NULL;
02027         if (rt_poll_ofstfun[pds[i].forwhat].topoll(pds[i].what)) {
02028             struct rt_poll_ql *ql = QL(i);
02029             queue = &ql->pollq;
02030             qlock = &ql->pollock;
02031         } else {
02032             pollq[i].task = NULL;
02033             polled++;
02034         }
02035         if (queue) {
02036                 QUEUE *q = queue;
02037             pollq[i].task = (RT_TASK *)&sem;
02038             rt_spin_lock_irq(qlock);
02039             while ((q = q->next) != queue && (((POLL_SEM *)q->task)->task)->priority <= sem.task->priority);
02040                 pollq[i].next = q;
02041                 q->prev = (pollq[i].prev = q->prev)->next  = &pollq[i];
02042             rt_spin_unlock_irq(qlock);
02043         } else {
02044             pds[i].forwhat = 0;
02045         }
02046     }
02047     semret = 0;
02048     if (!polled) {
02049         if (timeout < 0) {
02050             semret = rt_poll_wait_until(&sem, get_time() - timeout, sem.task, cpuid);
02051         } else if (timeout > 1) {
02052             semret = rt_poll_wait_until(&sem, timeout, sem.task, cpuid);
02053         } else if (timeout < 1 && nr > 0) {
02054             semret = rt_poll_wait(&sem, sem.task);
02055         }
02056     }
02057     for (polled = i = 0; i < nr; i++) {
02058         if (pds[i].forwhat) {
02059             spinlock_t *qlock = &QL(i)->pollock;
02060             rt_spin_lock_irq(qlock);
02061             if (pollq[i].task == (void *)&sem) {
02062                 (pollq[i].prev)->next = pollq[i].next;
02063                 (pollq[i].next)->prev = pollq[i].prev;
02064             }
02065             rt_spin_unlock_irq(qlock);
02066         }
02067         if (pollq[i].task != (void *)&sem) {
02068             pds[i].what = pollq[i].task;
02069             polled++;
02070         }
02071     }
02072     if (!space) {
02073         rt_copy_to_user(pdsa, pds, nr*sizeof(struct rt_poll_s));
02074     }
02075 #ifndef CONFIG_RTAI_RT_POLL_ON_STACK
02076     rt_free(pdsv);
02077     rt_free(pollq);
02078 #endif
02079     return polled ? polled : semret;
02080 }
02081 
02082 EXPORT_SYMBOL(_rt_poll);
02083 
02084 #endif
02085 
02086 /* +++++++++++++++++++++++++++ END POLLING SERVICE ++++++++++++++++++++++++++ */
02087 
02088 /* +++++ SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS ENTRIES ++++ */
02089 
02090 struct rt_native_fun_entry rt_sem_entries[] = {
02091     { { 0, rt_typed_sem_init },        TYPED_SEM_INIT },
02092     { { 0, rt_sem_delete },            SEM_DELETE },
02093     { { 0, _rt_typed_named_sem_init }, NAMED_SEM_INIT },
02094     { { 0, rt_named_sem_delete },      NAMED_SEM_DELETE },
02095     { { 1, rt_sem_signal },            SEM_SIGNAL },
02096     { { 1, rt_sem_broadcast },         SEM_BROADCAST },
02097     { { 1, rt_sem_wait },              SEM_WAIT },
02098     { { 1, rt_sem_wait_if },           SEM_WAIT_IF },
02099     { { 1, rt_sem_wait_until },        SEM_WAIT_UNTIL },
02100     { { 1, rt_sem_wait_timed },        SEM_WAIT_TIMED },
02101     { { 1, rt_sem_wait_barrier },      SEM_WAIT_BARRIER },
02102     { { 1, rt_sem_count },             SEM_COUNT },
02103     { { 1, rt_cond_signal},        COND_SIGNAL },
02104     { { 1, rt_cond_wait },             COND_WAIT },
02105     { { 1, rt_cond_wait_until },       COND_WAIT_UNTIL },
02106     { { 1, rt_cond_wait_timed },       COND_WAIT_TIMED },
02107     { { 0, rt_typed_rwl_init },        RWL_INIT },
02108     { { 0, rt_rwl_delete },            RWL_DELETE },
02109     { { 0, _rt_named_rwl_init },       NAMED_RWL_INIT },
02110     { { 0, rt_named_rwl_delete },      NAMED_RWL_DELETE },
02111     { { 1, rt_rwl_rdlock },            RWL_RDLOCK },
02112     { { 1, rt_rwl_rdlock_if },         RWL_RDLOCK_IF },
02113     { { 1, rt_rwl_rdlock_until },      RWL_RDLOCK_UNTIL },
02114     { { 1, rt_rwl_rdlock_timed },      RWL_RDLOCK_TIMED },
02115     { { 1, rt_rwl_wrlock },            RWL_WRLOCK },
02116     { { 1, rt_rwl_wrlock_if },         RWL_WRLOCK_IF },
02117     { { 1, rt_rwl_wrlock_until },      RWL_WRLOCK_UNTIL },
02118     { { 1, rt_rwl_wrlock_timed },      RWL_WRLOCK_TIMED },
02119     { { 1, rt_rwl_unlock },            RWL_UNLOCK },
02120     { { 0, rt_spl_init },              SPL_INIT },
02121     { { 0, rt_spl_delete },            SPL_DELETE },
02122     { { 0, _rt_named_spl_init },       NAMED_SPL_INIT },
02123     { { 0, rt_named_spl_delete },      NAMED_SPL_DELETE },
02124     { { 1, rt_spl_lock },              SPL_LOCK },
02125     { { 1, rt_spl_lock_if },           SPL_LOCK_IF },
02126     { { 1, rt_spl_lock_timed },        SPL_LOCK_TIMED },
02127     { { 1, rt_spl_unlock },            SPL_UNLOCK },
02128 #ifdef CONFIG_RTAI_RT_POLL
02129     { { 1, _rt_poll },             SEM_RT_POLL },
02130 #endif
02131     { { 0, 0 },                    000 }
02132 };
02133 
02134 extern int set_rt_fun_entries(struct rt_native_fun_entry *entry);
02135 extern void reset_rt_fun_entries(struct rt_native_fun_entry *entry);
02136 
02137 static int poll_wait(void *sem) { return ((SEM *)sem)->count <= 0; }
02138 
02139 int __rtai_sem_init (void)
02140 {
02141     rt_poll_ofstfun[RT_POLL_SEM_WAIT_ALL].topoll =
02142     rt_poll_ofstfun[RT_POLL_SEM_WAIT_ONE].topoll = poll_wait;
02143     return set_rt_fun_entries(rt_sem_entries);
02144 }
02145 
02146 void __rtai_sem_exit (void)
02147 {
02148     rt_poll_ofstfun[RT_POLL_SEM_WAIT_ALL].topoll =
02149     rt_poll_ofstfun[RT_POLL_SEM_WAIT_ONE].topoll = NULL;
02150     reset_rt_fun_entries(rt_sem_entries);
02151 }
02152 
02153 /* +++++++ END SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS ++++++ */
02154 
02155 /*@}*/
02156 
02157 #ifndef CONFIG_RTAI_SEM_BUILTIN
02158 module_init(__rtai_sem_init);
02159 module_exit(__rtai_sem_exit);
02160 #endif /* !CONFIG_RTAI_SEM_BUILTIN */
02161 
02162 EXPORT_SYMBOL(rt_typed_sem_init);
02163 EXPORT_SYMBOL(rt_sem_init);
02164 EXPORT_SYMBOL(rt_sem_delete);
02165 EXPORT_SYMBOL(rt_sem_count);
02166 EXPORT_SYMBOL(rt_sem_signal);
02167 EXPORT_SYMBOL(rt_sem_broadcast);
02168 EXPORT_SYMBOL(rt_sem_wait);
02169 EXPORT_SYMBOL(rt_sem_wait_if);
02170 EXPORT_SYMBOL(rt_sem_wait_until);
02171 EXPORT_SYMBOL(rt_sem_wait_timed);
02172 EXPORT_SYMBOL(rt_sem_wait_barrier);
02173 EXPORT_SYMBOL(_rt_typed_named_sem_init);
02174 EXPORT_SYMBOL(rt_named_sem_delete);
02175 
02176 EXPORT_SYMBOL(rt_cond_signal);
02177 EXPORT_SYMBOL(rt_cond_wait);
02178 EXPORT_SYMBOL(rt_cond_wait_until);
02179 EXPORT_SYMBOL(rt_cond_wait_timed);
02180 
02181 EXPORT_SYMBOL(rt_typed_rwl_init);
02182 EXPORT_SYMBOL(rt_rwl_delete);
02183 EXPORT_SYMBOL(rt_rwl_rdlock);
02184 EXPORT_SYMBOL(rt_rwl_rdlock_if);
02185 EXPORT_SYMBOL(rt_rwl_rdlock_until);
02186 EXPORT_SYMBOL(rt_rwl_rdlock_timed);
02187 EXPORT_SYMBOL(rt_rwl_wrlock);
02188 EXPORT_SYMBOL(rt_rwl_wrlock_if);
02189 EXPORT_SYMBOL(rt_rwl_wrlock_until);
02190 EXPORT_SYMBOL(rt_rwl_wrlock_timed);
02191 EXPORT_SYMBOL(rt_rwl_unlock);
02192 EXPORT_SYMBOL(_rt_named_rwl_init);
02193 EXPORT_SYMBOL(rt_named_rwl_delete);
02194 
02195 EXPORT_SYMBOL(rt_spl_init);
02196 EXPORT_SYMBOL(rt_spl_delete);
02197 EXPORT_SYMBOL(rt_spl_lock);
02198 EXPORT_SYMBOL(rt_spl_lock_if);
02199 EXPORT_SYMBOL(rt_spl_lock_timed);
02200 EXPORT_SYMBOL(rt_spl_unlock);
02201 EXPORT_SYMBOL(_rt_named_spl_init);
02202 EXPORT_SYMBOL(rt_named_spl_delete);

Generated on Tue Feb 2 17:46:05 2010 for RTAI API by  doxygen 1.4.7