base/ipc/sem/sem.c

Go to the documentation of this file.
00001 /** 00002 * @file 00003 * Semaphore functions. 00004 * @author Paolo Mantegazza 00005 * 00006 * @note Copyright (C) 1999-2003 Paolo Mantegazza 00007 * <mantegazza@aero.polimi.it> 00008 * 00009 * This program is free software; you can redistribute it and/or 00010 * modify it under the terms of the GNU General Public License as 00011 * published by the Free Software Foundation; either version 2 of the 00012 * License, or (at your option) any later version. 00013 * 00014 * This program is distributed in the hope that it will be useful, 00015 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00016 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00017 * GNU General Public License for more details. 00018 * 00019 * You should have received a copy of the GNU General Public License 00020 * along with this program; if not, write to the Free Software 00021 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 00022 * 00023 * @ingroup sem 00024 */ 00025 00026 /** 00027 * @ingroup sched 00028 * @defgroup sem Semaphore functions 00029 * 00030 *@{*/ 00031 00032 #include <rtai_schedcore.h> 00033 #include <rtai_sem.h> 00034 #include <rtai_rwl.h> 00035 #include <rtai_spl.h> 00036 00037 MODULE_LICENSE("GPL"); 00038 00039 /* +++++++++++++++++++++ ALL SEMAPHORES TYPES SUPPORT +++++++++++++++++++++++ */ 00040 00041 /** 00042 * @anchor rt_typed_sem_init 00043 * @brief Initialize a specifically typed (counting, binary, resource) 00044 * semaphore 00045 * 00046 * rt_typed_sem_init initializes a semaphore @e sem of type @e type. A 00047 * semaphore can be used for communication and synchronization among 00048 * real time tasks. Negative value of a semaphore shows how many tasks 00049 * are blocked on the semaphore queue, waiting to be awaken by calls 00050 * to rt_sem_signal. 00051 * 00052 * @param sem must point to an allocated SEM structure. 00053 * 00054 * @param value is the initial value of the semaphore, always set to 1 00055 * for a resource semaphore. 00056 * 00057 * @param type is the semaphore type and queuing policy. It can be an OR 00058 * a semaphore kind: CNT_SEM for counting semaphores, BIN_SEM for binary 00059 * semaphores, RES_SEM for resource semaphores; and queuing policy: 00060 * FIFO_Q, PRIO_Q for a fifo and priority queueing respectively. 00061 * Resource semaphores will enforce a PRIO_Q policy anyhow. 00062 * 00063 * Counting semaphores can register up to 0xFFFE events. Binary 00064 * semaphores do not count signalled events, their count will never 00065 * exceed 1 whatever number of events is signaled to them. Resource 00066 * semaphores are special binary semaphores suitable for managing 00067 * resources. The task that acquires a resource semaphore becomes its 00068 * owner, also called resource owner, since it is the only one capable 00069 * of manipulating the resource the semaphore is protecting. The owner 00070 * has its priority increased to that of any task blocking on a wait 00071 * to the semaphore. Such a feature, called priority inheritance, 00072 * ensures that a high priority task is never slaved to a lower 00073 * priority one, thus allowing to avoid any deadlock due to priority 00074 * inversion. Resource semaphores can be recursed, i.e. their task 00075 * owner is not blocked by nested waits placed on an owned 00076 * resource. The owner must insure that it will signal the semaphore, 00077 * in reversed order, as many times as he waited on it. Note that that 00078 * full priority inheritance is supported both for resource semaphores 00079 * and inter task messages, for a singly owned resource. Instead it 00080 * becomes an adaptive priority ceiling when a task owns multiple 00081 * resources, including messages sent to him. In such a case in fact 00082 * its priority is returned to its base one only when all such 00083 * resources are released and no message is waiting for being 00084 * received. This is a compromise design choice aimed at avoiding 00085 * extensive searches for the new priority to be inherited across 00086 * multiply owned resources and blocked tasks sending messages to 00087 * him. Such a solution will be implemented only if it proves 00088 * necessary. Note also that, to avoid @e deadlocks, a task owning a 00089 * resource semaphore cannot be suspended. Any @ref rt_task_suspend() 00090 * posed on it is just registered. An owner task will go into suspend 00091 * state only when it releases all the owned resources. 00092 * 00093 * @note RTAI counting semaphores assume that their counter will never 00094 * exceed 0xFFFF, such a number being used to signal returns in 00095 * error. Thus also the initial count value cannot be greater 00096 * than 0xFFFF. To be used only with RTAI24.x.xx (FIXME). 00097 */ 00098 void rt_typed_sem_init(SEM *sem, int value, int type) 00099 { 00100 sem->magic = RT_SEM_MAGIC; 00101 sem->count = value; 00102 if ((type & RES_SEM) == RES_SEM) { 00103 sem->qtype = 0; 00104 } else { 00105 sem->qtype = (type & FIFO_Q) ? 1 : 0; 00106 } 00107 type = (type & 3) - 2; 00108 if ((sem->type = type) < 0 && value > 1) { 00109 sem->count = 1; 00110 } else if (type > 0) { 00111 sem->type = sem->count = 1; 00112 } 00113 sem->queue.prev = &(sem->queue); 00114 sem->queue.next = &(sem->queue); 00115 sem->queue.task = sem->owndby = 0; 00116 } 00117 00118 00119 /** 00120 * @anchor rt_sem_init 00121 * @brief Initialize a counting semaphore. 00122 * 00123 * rt_sem_init initializes a counting fifo queueing semaphore @e sem. 00124 * 00125 * A semaphore can be used for communication and synchronization among 00126 * real time tasks. 00127 * 00128 * @param sem must point to an allocated @e SEM structure. 00129 * 00130 * @param value is the initial value of the semaphore. 00131 * 00132 * Positive values of the semaphore variable show how many tasks can 00133 * do a @ref rt_sem_wait() call without blocking. Negative value of a 00134 * semaphore shows how many tasks are blocked on the semaphore queue, 00135 * waiting to be awaken by calls to @ref rt_sem_signal(). 00136 * 00137 * @note RTAI counting semaphores assume that their counter will never 00138 * exceed 0xFFFF, such a number being used to signal returns in 00139 * error. Thus also the initial count value cannot be greater 00140 * than 0xFFFF. 00141 * This is an old legacy function. RTAI 24.1.xx has also 00142 * @ref rt_typed_sem_init(), allowing to 00143 * choose among counting, binary and resource 00144 * semaphores. Resource semaphores have priority inherithance. 00145 */ 00146 void rt_sem_init(SEM *sem, int value) 00147 { 00148 rt_typed_sem_init(sem, value, CNT_SEM); 00149 } 00150 00151 00152 /** 00153 * @anchor rt_sem_delete 00154 * @brief Delete a semaphore 00155 * 00156 * rt_sem_delete deletes a semaphore previously created with 00157 * @ref rt_sem_init(). 00158 * 00159 * @param sem points to the structure used in the corresponding 00160 * call to rt_sem_init. 00161 * 00162 * Any tasks blocked on this semaphore is returned in error and 00163 * allowed to run when semaphore is destroyed. 00164 * 00165 * @return 0 is returned upon success. A negative value is returned on 00166 * failure as described below: 00167 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00168 * 00169 * @note In principle 0xFFFF could theoretically be a usable 00170 * semaphores events count, so it could be returned also under 00171 * normal circumstances. It is unlikely you are going to count 00172 * up to such number of events, in any case avoid counting up 00173 * to 0xFFFF. 00174 */ 00175 int rt_sem_delete(SEM *sem) 00176 { 00177 unsigned long flags; 00178 RT_TASK *task; 00179 unsigned long schedmap, sched; 00180 QUEUE *q; 00181 00182 if (sem->magic != RT_SEM_MAGIC) { 00183 return SEM_ERR; 00184 } 00185 00186 schedmap = 0; 00187 q = &(sem->queue); 00188 flags = rt_global_save_flags_and_cli(); 00189 sem->magic = 0; 00190 while ((q = q->next) != &(sem->queue) && (task = q->task)) { 00191 rem_timed_task(task); 00192 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { 00193 task->blocked_on = SOMETHING; 00194 enq_ready_task(task); 00195 set_bit(task->runnable_on_cpus & 0x1F, &schedmap); 00196 } 00197 } 00198 sched = schedmap; 00199 clear_bit(rtai_cpuid(), &schedmap); 00200 if ((task = sem->owndby) && sem->type > 0) { 00201 if (task->owndres & SEMHLF) { 00202 --task->owndres; 00203 } 00204 if (!task->owndres) { 00205 sched |= renq_ready_task(task, task->base_priority); 00206 } else if (!(task->owndres & SEMHLF)) { 00207 int priority; 00208 sched |= renq_ready_task(task, task->base_priority > (priority = ((task->msg_queue.next)->task)->priority) ? priority : task->base_priority); 00209 } 00210 if (task->suspdepth) { 00211 if (task->suspdepth > 0) { 00212 task->state |= RT_SCHED_SUSPENDED; 00213 rem_ready_task(task); 00214 sched = 1; 00215 } else { 00216 rt_task_delete(task); 00217 } 00218 } 00219 } 00220 if (sched) { 00221 if (schedmap) { 00222 RT_SCHEDULE_MAP_BOTH(schedmap); 00223 } else { 00224 rt_schedule(); 00225 } 00226 } 00227 rt_global_restore_flags(flags); 00228 return 0; 00229 } 00230 00231 00232 int rt_sem_count(SEM *sem) 00233 { 00234 return sem->count; 00235 } 00236 00237 00238 /** 00239 * @anchor rt_sem_signal 00240 * @brief Signaling a semaphore. 00241 * 00242 * rt_sem_signal signals an event to a semaphore. It is typically 00243 * called when the task leaves a critical region. The semaphore value 00244 * is incremented and tested. If the value is not positive, the first 00245 * task in semaphore's waiting queue is allowed to run. rt_sem_signal 00246 * never blocks the caller task. 00247 * 00248 * @param sem points to the structure used in the call to @ref 00249 * rt_sem_init(). 00250 * 00251 * @return 0 is returned upon success. A negative value is returned on 00252 * failure as described below: 00253 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00254 * 00255 * @note In principle 0xFFFF could theoretically be a usable 00256 * semaphores events count, so it could be returned also under 00257 * normal circumstances. It is unlikely you are going to count 00258 * up to such number of events, in any case avoid counting up to 00259 * 0xFFFF. 00260 * See @ref rt_sem_wait() notes for some curiosities. 00261 */ 00262 int rt_sem_signal(SEM *sem) 00263 { 00264 unsigned long flags; 00265 RT_TASK *task; 00266 int tosched; 00267 00268 if (sem->magic != RT_SEM_MAGIC) { 00269 return SEM_ERR; 00270 } 00271 00272 flags = rt_global_save_flags_and_cli(); 00273 if (sem->type) { 00274 if (sem->type > 1) { 00275 sem->type--; 00276 rt_global_restore_flags(flags); 00277 return 0; 00278 } 00279 if (++sem->count > 1) { 00280 sem->count = 1; 00281 } 00282 } else { 00283 sem->count++; 00284 } 00285 if ((task = (sem->queue.next)->task)) { 00286 dequeue_blocked(task); 00287 rem_timed_task(task); 00288 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { 00289 enq_ready_task(task); 00290 if (sem->type <= 0) { 00291 RT_SCHEDULE(task, rtai_cpuid()); 00292 rt_global_restore_flags(flags); 00293 return 0; 00294 } 00295 tosched = 1; 00296 goto res; 00297 } 00298 } 00299 tosched = 0; 00300 res: if (sem->type > 0) { 00301 DECLARE_RT_CURRENT; 00302 int sched; 00303 ASSIGN_RT_CURRENT; 00304 sem->owndby = 0; 00305 if (rt_current->owndres & SEMHLF) { 00306 --rt_current->owndres; 00307 } 00308 if (!rt_current->owndres) { 00309 sched = renq_current(rt_current, rt_current->base_priority); 00310 } else if (!(rt_current->owndres & SEMHLF)) { 00311 int priority; 00312 sched = renq_current(rt_current, rt_current->base_priority > (priority = ((rt_current->msg_queue.next)->task)->priority) ? priority : rt_current->base_priority); 00313 } else { 00314 sched = 0; 00315 } 00316 if (rt_current->suspdepth) { 00317 if (rt_current->suspdepth > 0) { 00318 rt_current->state |= RT_SCHED_SUSPENDED; 00319 rem_ready_current(rt_current); 00320 sched = 1; 00321 } else { 00322 rt_task_delete(rt_current); 00323 } 00324 } 00325 if (sched) { 00326 if (tosched) { 00327 RT_SCHEDULE_BOTH(task, cpuid); 00328 } else { 00329 rt_schedule(); 00330 } 00331 } else if (tosched) { 00332 RT_SCHEDULE(task, cpuid); 00333 } 00334 } 00335 rt_global_restore_flags(flags); 00336 return 0; 00337 } 00338 00339 00340 /** 00341 * @anchor rt_sem_broadcast 00342 * @brief Signaling a semaphore. 00343 * 00344 * rt_sem_broadcast signals an event to a semaphore that unblocks all tasks 00345 * waiting on it. It is used as a support for RTAI proper conditional 00346 * variables but can be of help in many other instances. After the broadcast 00347 * the semaphore counts is set to zero, thus all tasks waiting on it will 00348 * blocked. 00349 * 00350 * @param sem points to the structure used in the call to @ref 00351 * rt_sem_init(). 00352 * 00353 * @returns 0 always. 00354 */ 00355 int rt_sem_broadcast(SEM *sem) 00356 { 00357 unsigned long flags, schedmap; 00358 RT_TASK *task; 00359 QUEUE *q; 00360 00361 if (sem->magic != RT_SEM_MAGIC) { 00362 return SEM_ERR; 00363 } 00364 schedmap = 0; 00365 q = &(sem->queue); 00366 flags = rt_global_save_flags_and_cli(); 00367 while ((q = q->next) != &(sem->queue)) { 00368 dequeue_blocked(task = q->task); 00369 rem_timed_task(task); 00370 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { 00371 enq_ready_task(task); 00372 set_bit(task->runnable_on_cpus & 0x1F, &schedmap); 00373 } 00374 rt_global_restore_flags(flags); 00375 flags = rt_global_save_flags_and_cli(); 00376 } 00377 sem->count = 0; 00378 if (schedmap) { 00379 if (test_and_clear_bit(rtai_cpuid(), &schedmap)) { 00380 RT_SCHEDULE_MAP_BOTH(schedmap); 00381 } else { 00382 RT_SCHEDULE_MAP(schedmap); 00383 } 00384 } 00385 rt_global_restore_flags(flags); 00386 return 0; 00387 } 00388 00389 00390 /** 00391 * @anchor rt_sem_wait 00392 * @brief Take a semaphore. 00393 * 00394 * rt_sem_wait waits for a event to be signaled to a semaphore. It is 00395 * typically called when a task enters a critical region. The 00396 * semaphore value is decremented and tested. If it is still 00397 * non-negative rt_sem_wait returns immediately. Otherwise the caller 00398 * task is blocked and queued up. Queuing may happen in priority order 00399 * or on FIFO base. This is determined by the compile time option @e 00400 * SEM_PRIORD. In this case rt_sem_wait returns if: 00401 * - The caller task is in the first place of the waiting 00402 * queue and another task issues a @ref rt_sem_signal() 00403 * call; 00404 * - An error occurs (e.g. the semaphore is destroyed); 00405 * 00406 * @param sem points to the structure used in the call to @ref 00407 * rt_sem_init(). 00408 * 00409 * @return the number of events already signaled upon success. 00410 * A special value" as described below in case of a failure : 00411 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00412 * 00413 * @note In principle 0xFFFF could theoretically be a usable 00414 * semaphores events count, so it could be returned also under 00415 * normal circumstances. It is unlikely you are going to count 00416 * up to such number of events, in any case avoid counting up to 00417 * 0xFFFF.<br> 00418 * Just for curiosity: the original Dijkstra notation for 00419 * rt_sem_wait was a "P" operation, and rt_sem_signal was a "V" 00420 * operation. The name for P comes from the Dutch "prolagen", a 00421 * combination of "proberen" (to probe) and "verlagen" (to 00422 * decrement). Also from the word "passeren" (to pass).<br> 00423 * The name for V comes from the Dutch "verhogen" (to increase) 00424 * or "vrygeven" (to release). (Source: Daniel Tabak - 00425 * Multiprocessors, Prentice Hall, 1990).<br> 00426 * It should be also remarked that real time programming 00427 * practitioners were using semaphores a long time before 00428 * Dijkstra formalized P and V. "In Italian semaforo" means a 00429 * traffic light, so that semaphores have an intuitive appeal 00430 * and their use and meaning is easily understood. 00431 */ 00432 int rt_sem_wait(SEM *sem) 00433 { 00434 RT_TASK *rt_current; 00435 unsigned long flags; 00436 int count; 00437 00438 if (sem->magic != RT_SEM_MAGIC) { 00439 return SEM_ERR; 00440 } 00441 00442 flags = rt_global_save_flags_and_cli(); 00443 rt_current = RT_CURRENT; 00444 if ((count = sem->count) <= 0) { 00445 unsigned long schedmap; 00446 if (sem->type > 0) { 00447 if (sem->owndby == rt_current) { 00448 count = sem->type++; 00449 rt_global_restore_flags(flags); 00450 return count + 1; 00451 } 00452 schedmap = pass_prio(sem->owndby, rt_current); 00453 } else { 00454 schedmap = 0; 00455 } 00456 sem->count--; 00457 rt_current->state |= RT_SCHED_SEMAPHORE; 00458 rem_ready_current(rt_current); 00459 enqueue_blocked(rt_current, &sem->queue, sem->qtype); 00460 RT_SCHEDULE_MAP_BOTH(schedmap); 00461 if (rt_current->blocked_on) { 00462 rt_current->prio_passed_to = NOTHING; 00463 rt_global_restore_flags(flags); 00464 return SEM_ERR; 00465 } else { 00466 count = sem->count; 00467 } 00468 } else { 00469 sem->count--; 00470 } 00471 if (sem->type > 0) { 00472 (sem->owndby = rt_current)->owndres++; 00473 } 00474 rt_global_restore_flags(flags); 00475 return count; 00476 } 00477 00478 00479 /** 00480 * @anchor rt_sem_wait_if 00481 * @brief Take a semaphore, only if the calling task is not blocked. 00482 * 00483 * rt_sem_wait_if is a version of the semaphore wait operation is 00484 * similar to @ref rt_sem_wait() but it is never blocks the caller. If 00485 * the semaphore is not free, rt_sem_wait_if returns immediately and 00486 * the semaphore value remains unchanged. 00487 * 00488 * @param sem points to the structure used in the call to @ref 00489 * rt_sem_init(). 00490 * 00491 * @return the number of events already signaled upon success. 00492 * A special value as described below in case of a failure: 00493 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00494 * 00495 * @note In principle 0xFFFF could theoretically be a usable 00496 * semaphores events count so it could be returned also under 00497 * normal circumstances. It is unlikely you are going to count 00498 * up to such number of events, in any case avoid counting up 00499 * to 0xFFFF. 00500 */ 00501 int rt_sem_wait_if(SEM *sem) 00502 { 00503 int count; 00504 unsigned long flags; 00505 00506 if (sem->magic != RT_SEM_MAGIC) { 00507 return SEM_ERR; 00508 } 00509 00510 flags = rt_global_save_flags_and_cli(); 00511 if ((count = sem->count) <= 0) { 00512 if (sem->type > 0 && sem->owndby == RT_CURRENT) { 00513 count = sem->type++; 00514 rt_global_restore_flags(flags); 00515 return count + 1; 00516 } 00517 } else { 00518 sem->count--; 00519 if (sem->type > 0) { 00520 (sem->owndby = RT_CURRENT)->owndres++; 00521 } 00522 } 00523 rt_global_restore_flags(flags); 00524 return count; 00525 } 00526 00527 00528 /** 00529 * @anchor rt_sem_wait_until 00530 * @brief Wait a semaphore with timeout. 00531 * 00532 * rt_sem_wait_until, like @ref rt_sem_wait_timed() is a timed version 00533 * of the standard semaphore wait call. The semaphore value is 00534 * decremented and tested. If it is still non-negative these functions 00535 * return immediately. Otherwise the caller task is blocked and queued 00536 * up. Queuing may happen in priority order or on FIFO base. This is 00537 * determined by the compile time option @e SEM_PRIORD. In this case 00538 * the function returns if: 00539 * - The caller task is in the first place of the waiting queue 00540 * and an other task issues a @ref rt_sem_signal call(); 00541 * - a timeout occurs; 00542 * - an error occurs (e.g. the semaphore is destroyed); 00543 * 00544 * In case of a timeout, the semaphore value is incremented before 00545 * return. 00546 * 00547 * @param sem points to the structure used in the call to @ref 00548 * rt_sem_init(). 00549 * 00550 * @param time is an absolute value to the current time. 00551 * 00552 * @return the number of events already signaled upon success. 00553 * Aa special value" as described below in case of a failure: 00554 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00555 * 00556 * @note In principle 0xFFFF could theoretically be a usable 00557 * semaphores events count so it could be returned also under 00558 * normal circumstances. It is unlikely you are going to count 00559 * up to such number of events, in any case avoid counting up to 00560 * 0xFFFF. 00561 */ 00562 int rt_sem_wait_until(SEM *sem, RTIME time) 00563 { 00564 DECLARE_RT_CURRENT; 00565 int count; 00566 unsigned long flags; 00567 00568 if (sem->magic != RT_SEM_MAGIC) { 00569 return SEM_ERR; 00570 } 00571 00572 flags = rt_global_save_flags_and_cli(); 00573 ASSIGN_RT_CURRENT; 00574 if ((count = sem->count) <= 0) { 00575 rt_current->blocked_on = &sem->queue; 00576 if ((rt_current->resume_time = time) > rt_time_h) { 00577 unsigned long schedmap; 00578 if (sem->type > 0) { 00579 if (sem->owndby == rt_current) { 00580 count = sem->type++; 00581 rt_global_restore_flags(flags); 00582 return count + 1; 00583 } 00584 schedmap = pass_prio(sem->owndby, rt_current); 00585 } else { 00586 schedmap = 0; 00587 } 00588 sem->count--; 00589 rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED); 00590 rem_ready_current(rt_current); 00591 enqueue_blocked(rt_current, &sem->queue, sem->qtype); 00592 enq_timed_task(rt_current); 00593 RT_SCHEDULE_MAP_BOTH(schedmap); 00594 } else { 00595 sem->count--; 00596 rt_current->queue.prev = rt_current->queue.next = &rt_current->queue; 00597 } 00598 if (!rt_current->blocked_on) { 00599 count = sem->count; 00600 } else if ((void *)rt_current->blocked_on > SOMETHING) { 00601 dequeue_blocked(rt_current); 00602 if(++sem->count > 1 && sem->type) { 00603 sem->count = 1; 00604 } 00605 rt_global_restore_flags(flags); 00606 return SEM_TIMOUT; 00607 } else { 00608 rt_current->prio_passed_to = NOTHING; 00609 rt_global_restore_flags(flags); 00610 return SEM_ERR; 00611 } 00612 } else { 00613 sem->count--; 00614 } 00615 if (sem->type > 0) { 00616 (sem->owndby = rt_current)->owndres++; 00617 } 00618 rt_global_restore_flags(flags); 00619 return count; 00620 } 00621 00622 00623 /** 00624 * @anchor rt_sem_wait_timed 00625 * @brief Wait a semaphore with timeout. 00626 * 00627 * rt_sem_wait_timed, like @ref rt_sem_wait_until(), is a timed version 00628 * of the standard semaphore wait call. The semaphore value is 00629 * decremented and tested. If it is still non-negative these functions 00630 * return immediately. Otherwise the caller task is blocked and queued 00631 * up. Queuing may happen in priority order or on FIFO base. This is 00632 * determined by the compile time option @e SEM_PRIORD. In this case 00633 * the function returns if: 00634 * - The caller task is in the first place of the waiting queue 00635 * and an other task issues a @ref rt_sem_signal() call; 00636 * - a timeout occurs; 00637 * - an error occurs (e.g. the semaphore is destroyed); 00638 * 00639 * In case of a timeout, the semaphore value is incremented before 00640 * return. 00641 * 00642 * @param sem points to the structure used in the call to @ref 00643 * rt_sem_init(). 00644 * 00645 * @param delay is an absolute value to the current time. 00646 * 00647 * @return the number of events already signaled upon success. 00648 * A special value as described below in case of a failure: 00649 * - @b 0xFFFF: @e sem does not refer to a valid semaphore. 00650 * 00651 * @note In principle 0xFFFF could theoretically be a usable 00652 * semaphores events count so it could be returned also under 00653 * normal circumstances. It is unlikely you are going to count 00654 * up to such number of events, in any case avoid counting up to 00655 * 0xFFFF. 00656 */ 00657 int rt_sem_wait_timed(SEM *sem, RTIME delay) 00658 { 00659 return rt_sem_wait_until(sem, get_time() + delay); 00660 } 00661 00662 00663 /* ++++++++++++++++++++++++++ BARRIER SUPPORT +++++++++++++++++++++++++++++++ */ 00664 00665 /** 00666 * @anchor rt_sem_wait_barrier 00667 * @brief Wait on a semaphore barrier. 00668 * 00669 * rt_sem_wait_barrier is a gang waiting in that a task issuing such 00670 * a request will be blocked till a number of tasks equal to the semaphore 00671 * count set at rt_sem_init is reached. 00672 * 00673 * @returns 0 always. 00674 */ 00675 int rt_sem_wait_barrier(SEM *sem) 00676 { 00677 unsigned long flags; 00678 00679 if (sem->magic != RT_SEM_MAGIC) { 00680 return SEM_ERR; 00681 } 00682 00683 flags = rt_global_save_flags_and_cli(); 00684 if (!sem->owndby) { 00685 sem->owndby = (void *)(long)(sem->count < 1 ? 1 : sem->count); 00686 sem->count = sem->type = 0; 00687 } 00688 if ((1 - sem->count) < (long)sem->owndby) { 00689 int retval = rt_sem_wait(sem); 00690 rt_global_restore_flags(flags); 00691 return retval; 00692 } 00693 rt_sem_broadcast(sem); 00694 rt_global_restore_flags(flags); 00695 return 0; 00696 } 00697 00698 /* +++++++++++++++++++++++++ COND VARIABLES SUPPORT +++++++++++++++++++++++++ */ 00699 00700 /** 00701 * @anchor rt_cond_signal 00702 * @brief Wait for a signal to a conditional variable. 00703 * 00704 * rt_cond_signal resumes one of the tasks that are waiting on the condition 00705 * semaphore cnd. Nothing happens if no task is waiting on @a cnd, while it 00706 * resumed the first queued task blocked on cnd, according to the queueing 00707 * method set at rt_cond_init. 00708 * 00709 * @param cnd points to the structure used in the call to @ref 00710 * rt_cond_init(). 00711 * 00712 * @returns 0 00713 * 00714 */ 00715 int rt_cond_signal(CND *cnd) 00716 { 00717 unsigned long flags; 00718 RT_TASK *task; 00719 00720 if (cnd->magic != RT_SEM_MAGIC) { 00721 return SEM_ERR; 00722 } 00723 flags = rt_global_save_flags_and_cli(); 00724 if ((task = (cnd->queue.next)->task)) { 00725 dequeue_blocked(task); 00726 rem_timed_task(task); 00727 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { 00728 enq_ready_task(task); 00729 RT_SCHEDULE(task, rtai_cpuid()); 00730 } 00731 } 00732 rt_global_restore_flags(flags); 00733 return 0; 00734 } 00735 00736 static inline int rt_cndmtx_signal(SEM *mtx, RT_TASK *rt_current) 00737 { 00738 int type; 00739 RT_TASK *task; 00740 00741 type = mtx->type; 00742 mtx->type = 1; 00743 if (++mtx->count > 1) { 00744 mtx->count = 1; 00745 } 00746 if ((task = (mtx->queue.next)->task)) { 00747 dequeue_blocked(task); 00748 rem_timed_task(task); 00749 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) { 00750 enq_ready_task(task); 00751 } 00752 } else { 00753 task = 0; 00754 } 00755 mtx->owndby = 0; 00756 if (rt_current->owndres & SEMHLF) { 00757 --rt_current->owndres; 00758 } 00759 if (!(rt_current->owndres & SEMHLF)) { 00760 int priority; 00761 rt_current->priority = rt_current->base_priority > (priority = ((rt_current->msg_queue.next)->task)->priority) ? priority : rt_current->base_priority; 00762 } 00763 if (task) { 00764 RT_SCHEDULE_BOTH(task, rtai_cpuid()); 00765 } else { 00766 rt_schedule(); 00767 } 00768 return type; 00769 } 00770 00771 /** 00772 * @anchor rt_cond_wait 00773 * @brief Wait for a signal to a conditional variable. 00774 * 00775 * rt_cond_wait atomically unlocks mtx (as for using rt_sem_signal) 00776 * and waits for the condition semaphore cnd to be signaled. The task 00777 * execution is suspended until the condition semaphore is signalled. 00778 * Mtx must be obtained by the calling task, before calling rt_cond_wait is 00779 * called. Before returning to the calling task rt_cond_wait reacquires 00780 * mtx by calling rt_sem_wait. 00781 * 00782 * @param cnd points to the structure used in the call to @ref 00783 * rt_cond_init(). 00784 * 00785 * @param mtx points to the structure used in the call to @ref 00786 * rt_sem_init(). 00787 * 00788 * @return 0 on succes, SEM_ERR in case of error. 00789 * 00790 */ 00791 int rt_cond_wait(CND *cnd, SEM *mtx) 00792 { 00793 RT_TASK *rt_current; 00794 unsigned long flags; 00795 int retval, type; 00796 00797 if (cnd->magic != RT_SEM_MAGIC || mtx->magic != RT_SEM_MAGIC) { 00798 return SEM_ERR; 00799 } 00800 retval = 0; 00801 flags = rt_global_save_flags_and_cli(); 00802 rt_current = RT_CURRENT; 00803 rt_current->state |= RT_SCHED_SEMAPHORE; 00804 rem_ready_current(rt_current); 00805 enqueue_blocked(rt_current, &cnd->queue, cnd->qtype); 00806 type = rt_cndmtx_signal(mtx, rt_current); 00807 if (rt_current->blocked_on) { 00808 retval = SEM_ERR; 00809 } 00810 rt_global_restore_flags(flags); 00811 rt_sem_wait(mtx); 00812 mtx->type = type; 00813 return retval; 00814 } 00815 00816 /** 00817 * @anchor rt_cond_wait_until 00818 * @brief Wait a semaphore with timeout. 00819 * 00820 * rt_cond_wait_until atomically unlocks mtx (as for using rt_sem_signal) 00821 * and waits for the condition semaphore cnd to be signalled. The task 00822 * execution is suspended until the condition semaphore is either signaled 00823 * or a timeout expires. Mtx must be obtained by the calling task, before 00824 * calling rt_cond_wait is called. Before returning to the calling task 00825 * rt_cond_wait_until reacquires mtx by calling rt_sem_wait and returns a 00826 * value to indicate if it has been signalled pr timedout. 00827 * 00828 * @param cnd points to the structure used in the call to @ref 00829 * rt_cond_init(). 00830 * 00831 * @param mtx points to the structure used in the call to @ref 00832 * rt_sem_init(). 00833 * 00834 * @param time is an absolute value to the current time, in timer count unit. 00835 * 00836 * @returns 0 if it was signaled, SEM_TIMOUT if a timeout occured, SEM_ERR 00837 * if the task has been resumed because of any other action (likely cnd 00838 * was deleted). 00839 */ 00840 int rt_cond_wait_until(CND *cnd, SEM *mtx, RTIME time) 00841 { 00842 DECLARE_RT_CURRENT; 00843 unsigned long flags; 00844 int retval, type; 00845 00846 if (cnd->magic != RT_SEM_MAGIC && mtx->magic != RT_SEM_MAGIC) { 00847 return SEM_ERR; 00848 } 00849 retval = SEM_TIMOUT; 00850 flags = rt_global_save_flags_and_cli(); 00851 ASSIGN_RT_CURRENT; 00852 if ((rt_current->resume_time = time) > rt_time_h) { 00853 rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED); 00854 rem_ready_current(rt_current); 00855 enqueue_blocked(rt_current, &cnd->queue, cnd->qtype); 00856 enq_timed_task(rt_current); 00857 type = rt_cndmtx_signal(mtx, rt_current); 00858 00859 00860 if (!rt_current->blocked_on) { 00861 retval = 0; 00862 } else if ((void *)rt_current->blocked_on > SOMETHING) { 00863 dequeue_blocked(rt_current); 00864 retval = SEM_TIMOUT; 00865 } else { 00866 retval = SEM_ERR; 00867 } 00868 } else { 00869 rt_global_restore_flags(flags); 00870 return retval; 00871 } 00872 rt_global_restore_flags(flags); 00873 rt_sem_wait(mtx); 00874 mtx->type = type; 00875 return retval; 00876 } 00877 00878 /** 00879 * @anchor rt_cond_wait_timed 00880 * @brief Wait a semaphore with timeout. 00881 * 00882 * rt_cond_wait_timed atomically unlocks mtx (as for using rt_sem_signal) 00883 * and waits for the condition semaphore cnd to be signalled. The task 00884 * execution is suspended until the condition semaphore is either signaled 00885 * or a timeout expires. Mtx must be obtained by the calling task, before 00886 * calling rt_cond_wait is called. Before returning to the calling task 00887 * rt_cond_wait_until reacquires mtx by calling rt_sem_wait and returns a 00888 * value to indicate if it has been signalled pr timedout. 00889 * 00890 * @param cnd points to the structure used in the call to @ref 00891 * rt_cond_init(). 00892 * 00893 * @param mtx points to the structure used in the call to @ref 00894 * rt_sem_init(). 00895 * 00896 * @param delay is a realtive time values with respect to the current time, 00897 * in timer count unit. 00898 * 00899 * @returns 0 if it was signaled, SEM_TIMOUT if a timeout occured, SEM_ERR 00900 * if the task has been resumed because of any other action (likely cnd 00901 * was deleted). 00902 */ 00903 int rt_cond_wait_timed(CND *cnd, SEM *mtx, RTIME delay) 00904 { 00905 return rt_cond_wait_until(cnd, mtx, get_time() + delay); 00906 } 00907 00908 /* ++++++++++++++++++++ READERS-WRITER LOCKS SUPPORT ++++++++++++++++++++++++ */ 00909 00910 /** 00911 * @anchor rt_rwl_init 00912 * @brief Initialize a multi readers single writer lock. 00913 * 00914 * rt_rwl_init initializes a multi readers single writer lock @a rwl. 00915 * 00916 * @param rwl must point to an allocated @e RWL structure. 00917 * 00918 * A multi readers single writer lock (RWL) is a synchronization mechanism 00919 * that allows to have simultaneous read only access to an object, while only 00920 * one task can have write access. A data set which is searched more 00921 * frequently than it is changed can be usefully controlled by using an rwl. 00922 * The lock acquisition policy is determined solely on the priority of tasks 00923 * applying to own a lock. 00924 * 00925 * @returns 0 if always. 00926 * 00927 */ 00928 00929 int rt_rwl_init(RWL *rwl) 00930 { 00931 rt_typed_sem_init(&rwl->wrmtx, 1, RES_SEM); 00932 rt_typed_sem_init(&rwl->wrsem, 0, CNT_SEM); 00933 rt_typed_sem_init(&rwl->rdsem, 0, CNT_SEM); 00934 return 0; 00935 } 00936 00937 /** 00938 * @anchor rt_rwl_delete 00939 * @brief destroys a multi readers single writer lock. 00940 * 00941 * rt_rwl_init destroys a multi readers single writer lock @a rwl. 00942 * 00943 * @param rwl must point to an allocated @e RWL structure. 00944 * 00945 * @returns 0 if OK, SEM_ERR if anything went wrong. 00946 * 00947 */ 00948 00949 int rt_rwl_delete(RWL *rwl) 00950 { 00951 int ret; 00952 00953 ret = rt_sem_delete(&rwl->rdsem); 00954 ret |= !rt_sem_delete(&rwl->wrsem); 00955 ret |= !rt_sem_delete(&rwl->wrmtx); 00956 return ret ? 0 : SEM_ERR; 00957 } 00958 00959 /** 00960 * @anchor rt_rwl_rdlock 00961 * @brief acquires a multi readers single writer lock for reading. 00962 * 00963 * rt_rwl_rdlock acquires a multi readers single writer lock @a rwl for 00964 * reading. The calling task will block only if any writer owns the lock 00965 * already or there are writers with higher priority waiting to acquire 00966 * write access. 00967 * 00968 * @param rwl must point to an allocated @e RWL structure. 00969 * 00970 * @returns 0 if OK, SEM_ERR if anything went wrong after being blocked. 00971 * 00972 */ 00973 00974 int rt_rwl_rdlock(RWL *rwl) 00975 { 00976 unsigned long flags; 00977 RT_TASK *wtask, *rt_current; 00978 00979 flags = rt_global_save_flags_and_cli(); 00980 rt_current = RT_CURRENT; 00981 while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) { 00982 int ret; 00983 if (rwl->wrmtx.owndby == rt_current) { 00984 rt_global_restore_flags(flags); 00985 return SEM_ERR + 1; 00986 } 00987 if ((ret = rt_sem_wait(&rwl->rdsem)) >= SEM_TIMOUT) { 00988 rt_global_restore_flags(flags); 00989 return ret; 00990 } 00991 } 00992 ((int *)&rwl->rdsem.owndby)[0]++; 00993 rt_global_restore_flags(flags); 00994 return 0; 00995 } 00996 00997 /** 00998 * @anchor rt_rwl_rdlock_if 00999 * @brief try to acquire a multi readers single writer lock just for reading. 01000 * 01001 * rt_rwl_rdlock_if tries to acquire a multi readers single writer lock @a rwl 01002 * for reading immediately, i.e. without blocking if a writer owns the lock 01003 * or there are writers with higher priority waiting to acquire write access. 01004 * 01005 * @param rwl must point to an allocated @e RWL structure. 01006 * 01007 * @returns 0 if the lock was acquired, -1 if the lock was already owned. 01008 * 01009 */ 01010 01011 int rt_rwl_rdlock_if(RWL *rwl) 01012 { 01013 unsigned long flags; 01014 RT_TASK *wtask; 01015 01016 flags = rt_global_save_flags_and_cli(); 01017 if (!rwl->wrmtx.owndby && (!(wtask = (rwl->wrsem.queue.next)->task) || wtask->priority > RT_CURRENT->priority)) { 01018 ((int *)&rwl->rdsem.owndby)[0]++; 01019 rt_global_restore_flags(flags); 01020 return 0; 01021 } 01022 rt_global_restore_flags(flags); 01023 return -1; 01024 } 01025 01026 /** 01027 * @anchor rt_rwl_rdlock_until 01028 * @brief try to acquire a multi readers single writer lock for reading within 01029 * an absolute deadline time. 01030 * 01031 * rt_rwl_rdlock_untill tries to acquire a multi readers single writer lock 01032 * @a rwl for reading, as for rt_rwl_rdlock, but timing out if the lock has not 01033 * been acquired within an assigned deadline. 01034 * 01035 * @param rwl must point to an allocated @e RWL structure. 01036 * 01037 * @param time is the time deadline, in internal count units. 01038 * 01039 * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired 01040 * without acquiring the lock, SEM_ERR in case something went wrong. 01041 * 01042 */ 01043 01044 int rt_rwl_rdlock_until(RWL *rwl, RTIME time) 01045 { 01046 unsigned long flags; 01047 RT_TASK *wtask, *rt_current; 01048 01049 flags = rt_global_save_flags_and_cli(); 01050 rt_current = RT_CURRENT; 01051 while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) { 01052 int ret; 01053 if (rwl->wrmtx.owndby == rt_current) { 01054 rt_global_restore_flags(flags); 01055 return SEM_ERR + 1; 01056 } 01057 if ((ret = rt_sem_wait_until(&rwl->rdsem, time)) >= SEM_TIMOUT) { 01058 rt_global_restore_flags(flags); 01059 return ret; 01060 } 01061 } 01062 ((int *)&rwl->rdsem.owndby)[0]++; 01063 rt_global_restore_flags(flags); 01064 return 0; 01065 } 01066 01067 /** 01068 * @anchor rt_rwl_rdlock_timed 01069 * @brief try to acquire a multi readers single writer lock for reading within 01070 * a relative deadline time. 01071 * 01072 * rt_rwl_rdlock_timed tries to acquire a multi readers single writer lock 01073 * @a rwl for reading, as for rt_rwl_rdlock, but timing out if the lock has not 01074 * been acquired within an assigned deadline. 01075 * 01076 * @param rwl must point to an allocated @e RWL structure. 01077 * 01078 * @param delay is the time delay within which the lock must be acquired, in 01079 * internal count units. 01080 * 01081 * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired 01082 * without acquiring the lock, SEM_ERR in case something went wrong. 01083 * 01084 */ 01085 01086 int rt_rwl_rdlock_timed(RWL *rwl, RTIME delay) 01087 { 01088 return rt_rwl_rdlock_until(rwl, get_time() + delay); 01089 } 01090 01091 /** 01092 * @anchor rt_rwl_wrlock 01093 * @brief acquires a multi readers single writer lock for wrtiting. 01094 * 01095 * rt_rwl_rwlock acquires a multi readers single writer lock @a rwl for 01096 * writing. The calling task will block if any other task, reader or writer, 01097 * owns the lock already. 01098 * 01099 * @param rwl must point to an allocated @e RWL structure. 01100 * 01101 * @returns 0 if OK, SEM_ERR if anything went wrong after being blocked. 01102 * 01103 */ 01104 01105 int rt_rwl_wrlock(RWL *rwl) 01106 { 01107 unsigned long flags; 01108 int ret; 01109 01110 flags = rt_global_save_flags_and_cli(); 01111 while (rwl->rdsem.owndby) { 01112 if ((ret = rt_sem_wait(&rwl->wrsem)) >= SEM_TIMOUT) { 01113 rt_global_restore_flags(flags); 01114 return ret; 01115 } 01116 } 01117 if ((ret = rt_sem_wait(&rwl->wrmtx)) >= SEM_TIMOUT) { 01118 rt_global_restore_flags(flags); 01119 return ret; 01120 } 01121 rt_global_restore_flags(flags); 01122 return 0; 01123 } 01124 01125 /** 01126 * @anchor rt_rwl_wrlock_if 01127 * @brief acquires a multi readers single writer lock for writing. 01128 * 01129 * rt_rwl_wrlock_if try to acquire a multi readers single writer lock @a rwl 01130 * for writing immediately, i.e without blocking if the lock is owned already. 01131 * 01132 * @param rwl must point to an allocated @e RWL structure. 01133 * 01134 * @returns 0 if the lock was acquired, -1 if the lock was already owned. 01135 * 01136 */ 01137 01138 int rt_rwl_wrlock_if(RWL *rwl) 01139 { 01140 unsigned long flags; 01141 01142 flags = rt_global_save_flags_and_cli(); 01143 if (!rwl->rdsem.owndby && rt_sem_wait_if(&rwl->wrmtx) >= 0) { 01144 rt_global_restore_flags(flags); 01145 return 0; 01146 } 01147 rt_global_restore_flags(flags); 01148 return -1; 01149 } 01150 01151 /** 01152 * @anchor rt_rwl_wrlock_until 01153 * @brief try to acquire a multi readers single writer lock for writing within 01154 * an absolute deadline time. 01155 * 01156 * rt_rwl_rwlock_until tries to acquire a multi readers single writer lock 01157 * @a rwl for writing, as for rt_rwl_rwlock, but timing out if the lock has not 01158 * been acquired within an assigned deadline. 01159 * 01160 * @param rwl must point to an allocated @e RWL structure. 01161 * 01162 * @param time is the time deadline, in internal count units. 01163 * 01164 * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired 01165 * without acquiring the lock, SEM_ERR in case something went wrong. 01166 * 01167 */ 01168 01169 int rt_rwl_wrlock_until(RWL *rwl, RTIME time) 01170 { 01171 unsigned long flags; 01172 int ret; 01173 01174 flags = rt_global_save_flags_and_cli(); 01175 while (rwl->rdsem.owndby) { 01176 if ((ret = rt_sem_wait_until(&rwl->wrsem, time)) >= SEM_TIMOUT) { 01177 rt_global_restore_flags(flags); 01178 return ret; 01179 }; 01180 } 01181 if ((ret = rt_sem_wait_until(&rwl->wrmtx, time)) >= SEM_TIMOUT) { 01182 rt_global_restore_flags(flags); 01183 return ret; 01184 }; 01185 rt_global_restore_flags(flags); 01186 return 0; 01187 } 01188 01189 /** 01190 * @anchor rt_rwl_wrlock_timed 01191 * @brief try to acquire a multi readers single writer lock for writing within 01192 * a relative deadline time. 01193 * 01194 * rt_rwl_wrlock_timed tries to acquire a multi readers single writer lock 01195 * @a rwl for writing, as for rt_rwl_wrlock, timing out if the lock has not 01196 * been acquired within an assigned deadline. 01197 * 01198 * @param rwl must point to an allocated @e RWL structure. 01199 * 01200 * @param delay is the time delay within which the lock must be acquired, in 01201 * internal count units. 01202 * 01203 * @returns 0 if the lock was acquired, SEM_TIMOUT if the deadline expired 01204 * without acquiring the lock, SEM_ERR in case something went wrong. 01205 * 01206 */ 01207 01208 int rt_rwl_wrlock_timed(RWL *rwl, RTIME delay) 01209 { 01210 return rt_rwl_wrlock_until(rwl, get_time() + delay); 01211 } 01212 01213 /** 01214 * @anchor rt_rwl_unlock 01215 * @brief unlock an acquired multi readers single writer lock. 01216 * 01217 * rt_rwl_unlock unlocks an acquired multi readers single writer lock @a rwl. 01218 * After releasing the lock any task waiting to acquire it will own the lock 01219 * according to its priority, whether it is a reader or a writer, otherwise 01220 * the lock will be fully unlocked. 01221 * 01222 * @param rwl must point to an allocated @e RWL structure. 01223 * 01224 * @returns 0 always. 01225 * 01226 */ 01227 01228 int rt_rwl_unlock(RWL *rwl) 01229 { 01230 unsigned long flags; 01231 01232 flags = rt_global_save_flags_and_cli(); 01233 if (rwl->wrmtx.owndby) { 01234 rt_sem_signal(&rwl->wrmtx); 01235 } else if (rwl->rdsem.owndby) { 01236 rwl->rdsem.owndby = (struct rt_task_struct *)((char *)rwl->rdsem.owndby - 1); 01237 } 01238 rt_global_restore_flags(flags); 01239 flags = rt_global_save_flags_and_cli(); 01240 if (!rwl->wrmtx.owndby && !rwl->rdsem.owndby) { 01241 RT_TASK *wtask, *rtask; 01242 wtask = (rwl->wrsem.queue.next)->task; 01243 rtask = (rwl->rdsem.queue.next)->task; 01244 if (wtask && rtask) { 01245 if (wtask->priority < rtask->priority) { 01246 rt_sem_signal(&rwl->wrsem); 01247 } else { 01248 rt_sem_signal(&rwl->rdsem); 01249 } 01250 } else if (wtask) { 01251 rt_sem_signal(&rwl->wrsem); 01252 } else if (rtask) { 01253 rt_sem_signal(&rwl->rdsem); 01254 } 01255 } 01256 rt_global_restore_flags(flags); 01257 return 0; 01258 } 01259 01260 /* +++++++++++++++++++++ RECURSIVE SPINLOCKS SUPPORT ++++++++++++++++++++++++ */ 01261 01262 /** 01263 * @anchor rt_spl_init 01264 * @brief Initialize a spinlock. 01265 * 01266 * rt_spl_init initializes a spinlock @a spl. 01267 * 01268 * @param spl must point to an allocated @e SPL structure. 01269 * 01270 * A spinlock is an active wait synchronization mechanism useful for multi 01271 * processors very short synchronization, when it is more efficient to wait 01272 * at a meeting point instead of being suspended and the reactivated, as by 01273 * using semaphores, to acquire ownership of any object. 01274 * Spinlocks can be recursed once acquired, a recurring owner must care of 01275 * unlocking as many times as he took the spinlock. 01276 * 01277 * @returns 0 if always. 01278 * 01279 */ 01280 01281 int rt_spl_init(SPL *spl) 01282 { 01283 spl->owndby = 0; 01284 spl->count = 0; 01285 return 0; 01286 } 01287 01288 /** 01289 * @anchor rt_spl_delete 01290 * @brief Initialize a spinlock. 01291 * 01292 * rt_spl_delete destroies a spinlock @a spl. 01293 * 01294 * @param spl must point to an allocated @e SPL structure. 01295 * 01296 * @returns 0 if always. 01297 * 01298 */ 01299 01300 int rt_spl_delete(SPL *spl) 01301 { 01302 return 0; 01303 } 01304 01305 /** 01306 * @anchor rt_spl_lock 01307 * @brief Acquire a spinlock. 01308 * 01309 * rt_spl_lock acquires a spinlock @a spl. 01310 * 01311 * @param spl must point to an allocated @e SPL structure. 01312 * 01313 * rt_spl_lock spins on lock till it can be acquired. If a tasks asks for 01314 * lock it owns already it will acquire it immediately but will have to care 01315 * to unlock it as many times as it recursed the spinlock ownership. 01316 * 01317 * @returns 0 if always. 01318 * 01319 */ 01320 01321 int rt_spl_lock(SPL *spl) 01322 { 01323 unsigned long flags; 01324 RT_TASK *rt_current; 01325 01326 rtai_save_flags_and_cli(flags); 01327 if (spl->owndby == (rt_current = RT_CURRENT)) { 01328 spl->count++; 01329 } else { 01330 while (atomic_cmpxchg(&spl->owndby, 0, rt_current)); 01331 spl->flags = flags; 01332 } 01333 return 0; 01334 } 01335 01336 /** 01337 * @anchor rt_spl_lock_if 01338 * @brief Acquire a spinlock without waiting. 01339 * 01340 * rt_spl_lock_if acquires a spinlock @a spl without waiting. 01341 * 01342 * @param spl must point to an allocated @e SPL structure. 01343 * 01344 * rt_spl_lock_if tries to acquire a spinlock but will not spin on it if 01345 * it is owned already. 01346 * 01347 * @returns 0 if it succeeded, -1 if the lock was owned already. 01348 * 01349 */ 01350 01351 int rt_spl_lock_if(SPL *spl) 01352 { 01353 unsigned long flags; 01354 RT_TASK *rt_current; 01355 01356 rtai_save_flags_and_cli(flags); 01357 if (spl->owndby == (rt_current = RT_CURRENT)) { 01358 spl->count++; 01359 } else { 01360 if (atomic_cmpxchg(&spl->owndby, 0, rt_current)) { 01361 rtai_restore_flags(flags); 01362 return -1; 01363 } 01364 spl->flags = flags; 01365 } 01366 return 0; 01367 } 01368 01369 /** 01370 * @anchor rt_spl_lock_timed 01371 * @brief Acquire a spinlock with timeout. 01372 * 01373 * rt_spl_lock_timed acquires a spinlock @a spl, but waiting spinning only 01374 * for an allowed time. 01375 * 01376 * @param spl must point to an allocated @e SPL structure. 01377 * 01378 * @param ns timeout 01379 * 01380 * rt_spl_lock spins on lock till it can be acquired, as for rt_spl_lock, 01381 * but only for an allowed time. If the spinlock cannot be acquired in time 01382 * the functions returns in error. 01383 * This function can be usefull either in itself or as a diagnosis toll 01384 * during code development. 01385 * 01386 * @returns 0 if the spinlock was acquired, -1 if a timeout occured. 01387 * 01388 */ 01389 01390 int rt_spl_lock_timed(SPL *spl, unsigned long ns) 01391 { 01392 unsigned long flags; 01393 RT_TASK *rt_current; 01394 01395 rtai_save_flags_and_cli(flags); 01396 if (spl->owndby == (rt_current = RT_CURRENT)) { 01397 spl->count++; 01398 } else { 01399 RTIME end_time; 01400 void *locked; 01401 end_time = rdtsc() + imuldiv(ns, tuned.cpu_freq, 1000000000); 01402 while ((locked = atomic_cmpxchg(&spl->owndby, 0, rt_current)) && rdtsc() < end_time); 01403 if (locked) { 01404 rtai_restore_flags(flags); 01405 return -1; 01406 } 01407 spl->flags = flags; 01408 } 01409 return 0; 01410 } 01411 01412 /** 01413 * @anchor rt_spl_unlock 01414 * @brief Release an owned spinlock. 01415 * 01416 * rt_spl_lock releases an owned spinlock @a spl. 01417 * 01418 * @param spl must point to an allocated @e SPL structure. 01419 * 01420 * rt_spl_unlock releases an owned lock. The spinlock can remain locked and 01421 * its ownership can remain with the task is the spinlock acquisition was 01422 * recursed. 01423 * 01424 * @returns 0 if the function was used legally, -1 if a tasks tries to unlock 01425 * a spinlock it does not own. 01426 * 01427 */ 01428 01429 int rt_spl_unlock(SPL *spl) 01430 { 01431 unsigned long flags; 01432 RT_TASK *rt_current; 01433 01434 rtai_save_flags_and_cli(flags); 01435 if (spl->owndby == (rt_current = RT_CURRENT)) { 01436 if (spl->count) { 01437 --spl->count; 01438 } else { 01439 spl->owndby = 0; 01440 spl->count = 0; 01441 rtai_restore_flags(spl->flags); 01442 } 01443 return 0; 01444 } 01445 rtai_restore_flags(flags); 01446 return -1; 01447 } 01448 01449 /* ++++++ NAMED SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS +++++ */ 01450 01451 #include <rtai_registry.h> 01452 01453 /** 01454 * @anchor _rt_typed_named_sem_init 01455 * @brief Initialize a specifically typed (counting, binary, resource) 01456 * semaphore identified by a name. 01457 * 01458 * _rt_typed_named_sem_init allocate and initializes a semaphore identified 01459 * by @e name of type @e type. Once the semaphore structure is allocated the 01460 * initialization is as for rt_typed_sem_init. The function returns the 01461 * handle pointing to the allocated semaphore structure, to be used as the 01462 * usual semaphore address in all semaphore based services. Named objects 01463 * are useful for use among different processes, kernel/user space and 01464 * in distributed applications, see netrpc. 01465 * 01466 * @param sem_name is the identifier associated with the returned object. 01467 * 01468 * @param value is the initial value of the semaphore, always set to 1 01469 * for a resource semaphore. 01470 * 01471 * @param type is the semaphore type and queuing policy. It can be an OR 01472 * a semaphore kind: CNT_SEM for counting semaphores, BIN_SEM for binary 01473 * semaphores, RES_SEM for resource semaphores; and queuing policy: 01474 * FIFO_Q, PRIO_Q for a fifo and priority queueing respectively. 01475 * Resource semaphores will enforce a PRIO_Q policy anyhow. 01476 * 01477 * Since @a name can be a clumsy identifier, services are provided to 01478 * convert 6 characters identifiers to unsigned long, and vice versa. 01479 * 01480 * @see nam2num() and num2nam(). 01481 * 01482 * See rt_typed_sem_init for further clues. 01483 * 01484 * As for all the named initialization functions it must be remarked that 01485 * only the very first call to initilize/create a named RTAI object does a 01486 * real allocation of the object, any following call with the same name 01487 * will just increase its usage count. In any case the function returns 01488 * a pointer to the named object, or zero if in error. 01489 * 01490 * @returns either a valid pointer or 0 if in error. 01491 * 01492 */ 01493 01494 SEM *_rt_typed_named_sem_init(unsigned long sem_name, int value, int type) 01495 { 01496 SEM *sem; 01497 01498 if ((sem = rt_get_adr_cnt(sem_name))) { 01499 return sem; 01500 } 01501 if ((sem = rt_malloc(sizeof(SEM)))) { 01502 rt_typed_sem_init(sem, value, type); 01503 if (rt_register(sem_name, sem, IS_SEM, 0)) { 01504 return sem; 01505 } 01506 rt_sem_delete(sem); 01507 } 01508 rt_free(sem); 01509 return (SEM *)0; 01510 } 01511 01512 /** 01513 * @anchor rt_named_sem_delete 01514 * @brief Delete a semaphore initialized in named mode. 01515 * 01516 * rt_named_sem_delete deletes a semaphore previously created with 01517 * @ref _rt_typed_named_sem_init(). 01518 * 01519 * @param sem points to the structure pointer returned by a corresponding 01520 * call to _rt_typed_named_sem_init. 01521 * 01522 * Any tasks blocked on this semaphore is returned in error and 01523 * allowed to run when semaphore is destroyed. 01524 * As it is done by all the named allocation functions delete calls have just 01525 * the effect of decrementing a usage count till the last is done, as that is 01526 * the one the really frees the object. 01527 * 01528 * @return an int >=0 is returned upon success, SEM_ERR if it failed to 01529 * delete the semafore, -EFAULT if the semaphore does not exist anymore. 01530 * 01531 */ 01532 01533 int rt_named_sem_delete(SEM *sem) 01534 { 01535 int ret; 01536 if (!(ret = rt_drg_on_adr_cnt(sem))) { 01537 if (!rt_sem_delete(sem)) { 01538 rt_free(sem); 01539 return 0; 01540 } else { 01541 return SEM_ERR; 01542 } 01543 } 01544 return ret; 01545 } 01546 01547 /** 01548 * @anchor _rt_named_rwl_init 01549 * @brief Initialize a multi readers single writer lock identified by a name. 01550 * 01551 * _rt_named_rwl_init allocate and initializes a multi readers single writer 01552 * lock (RWL) identified by @e name. Once the lock structure is allocated the 01553 * initialization is as for rt_rwl_init. The function returns the 01554 * handle pointing to the allocated multi readers single writer lock o 01555 * structure, to be used as the usual lock address in all rwl based services. 01556 * Named objects are useful for use among different processes, kernel/user 01557 * space and in distributed applications, see netrpc. 01558 * 01559 * @param rwl_name is the identifier associated with the returned object. 01560 * 01561 * Since @a name can be a clumsy identifier, services are provided to 01562 * convert 6 characters identifiers to unsigned long, and vice versa. 01563 * 01564 * @see nam2num() and num2nam(). 01565 * 01566 * As for all the named initialization functions it must be remarked that 01567 * only the very first call to initilize/create a named RTAI object does a 01568 * real allocation of the object, any following call with the same name 01569 * will just increase its usage count. In any case the function returns 01570 * a pointer to the named object, or zero if in error. 01571 * 01572 * @returns either a valid pointer or 0 if in error. 01573 * 01574 */ 01575 01576 RWL *_rt_named_rwl_init(unsigned long rwl_name) 01577 { 01578 RWL *rwl; 01579 01580 if ((rwl = rt_get_adr_cnt(rwl_name))) { 01581 return rwl; 01582 } 01583 if ((rwl = rt_malloc(sizeof(RWL)))) { 01584 rt_rwl_init(rwl); 01585 if (rt_register(rwl_name, rwl, IS_RWL, 0)) { 01586 return rwl; 01587 } 01588 rt_rwl_delete(rwl); 01589 } 01590 rt_free(rwl); 01591 return (RWL *)0; 01592 } 01593 01594 /** 01595 * @anchor rt_named_rwl_delete 01596 * @brief Delete a multi readers single writer lock in named mode. 01597 * 01598 * rt_named_rwl_delete deletes a multi readers single writer lock 01599 * previously created with @ref _rt_named_rwl_init(). 01600 * 01601 * @param rwl points to the structure pointer returned by a corresponding 01602 * call to rt_named_rwl_init. 01603 * 01604 * As it is done by all the named allocation functions delete calls have just 01605 * the effect of decrementing a usage count till the last is done, as that is 01606 * the one the really frees the object. 01607 * 01608 * @return an int >=0 is returned upon success, SEM_ERR if it failed to 01609 * delete the multi readers single writer lock, -EFAULT if the lock does 01610 * not exist anymore. 01611 * 01612 */ 01613 01614 int rt_named_rwl_delete(RWL *rwl) 01615 { 01616 int ret; 01617 if (!(ret = rt_drg_on_adr_cnt(rwl))) { 01618 if (!rt_rwl_delete(rwl)) { 01619 rt_free(rwl); 01620 return 0; 01621 } else { 01622 return SEM_ERR; 01623 } 01624 } 01625 return ret; 01626 } 01627 01628 /** 01629 * @anchor _rt_named_spl_init 01630 * @brief Initialize a spinlock identified by a name. 01631 * 01632 * _rt_named_spl_init allocate and initializes a spinlock (SPL) identified 01633 * by @e name. Once the spinlock structure is allocated the initialization 01634 * is as for rt_spl_init. The function returns the handle pointing to the 01635 * allocated spinlock structure, to be used as the usual spinlock address 01636 * in all spinlock based services. Named objects are useful for use among 01637 * different processes and kernel/user space. 01638 * 01639 * @param spl_name is the identifier associated with the returned object. 01640 * 01641 * Since @a name can be a clumsy identifier, services are provided to 01642 * convert 6 characters identifiers to unsigned long, and vice versa. 01643 * 01644 * @see nam2num() and num2nam(). 01645 * 01646 * As for all the named initialization functions it must be remarked that 01647 * only the very first call to initilize/create a named RTAI object does a 01648 * real allocation of the object, any following call with the same name 01649 * will just increase its usage count. In any case the function returns 01650 * a pointer to the named object, or zero if in error. 01651 * 01652 * @returns either a valid pointer or 0 if in error. 01653 * 01654 */ 01655 01656 SPL *_rt_named_spl_init(unsigned long spl_name) 01657 { 01658 SPL *spl; 01659 01660 if ((spl = rt_get_adr_cnt(spl_name))) { 01661 return spl; 01662 } 01663 if ((spl = rt_malloc(sizeof(SPL)))) { 01664 rt_spl_init(spl); 01665 if (rt_register(spl_name, spl, IS_SPL, 0)) { 01666 return spl; 01667 } 01668 rt_spl_delete(spl); 01669 } 01670 rt_free(spl); 01671 return (SPL *)0; 01672 } 01673 01674 /** 01675 * @anchor rt_named_spl_delete 01676 * @brief Delete a spinlock in named mode. 01677 * 01678 * rt_named_spl_delete deletes a spinlock previously created with 01679 * @ref _rt_named_spl_init(). 01680 * 01681 * @param spl points to the structure pointer returned by a corresponding 01682 * call to rt_named_spl_init. 01683 * 01684 * As it is done by all the named allocation functions delete calls have just 01685 * the effect of decrementing a usage count till the last is done, as that is 01686 * the one the really frees the object. 01687 * 01688 * @return an int >=0 is returned upon success, -EFAULT if the spinlock 01689 * does not exist anymore. 01690 * 01691 */ 01692 01693 int rt_named_spl_delete(SPL *spl) 01694 { 01695 int ret; 01696 if (!(ret = rt_drg_on_adr_cnt(spl))) { 01697 rt_spl_delete(spl); 01698 rt_free(spl); 01699 return 0; 01700 } 01701 return ret; 01702 } 01703 01704 /* +++++ SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS ENTRIES ++++ */ 01705 01706 struct rt_native_fun_entry rt_sem_entries[] = { 01707 { { 0, rt_typed_sem_init }, TYPED_SEM_INIT }, 01708 { { 0, rt_sem_delete }, SEM_DELETE }, 01709 { { 0, _rt_typed_named_sem_init }, NAMED_SEM_INIT }, 01710 { { 0, rt_named_sem_delete }, NAMED_SEM_DELETE }, 01711 { { 1, rt_sem_signal }, SEM_SIGNAL }, 01712 { { 1, rt_sem_wait }, SEM_WAIT }, 01713 { { 1, rt_sem_wait_if }, SEM_WAIT_IF }, 01714 { { 1, rt_sem_wait_until }, SEM_WAIT_UNTIL }, 01715 { { 1, rt_sem_wait_timed }, SEM_WAIT_TIMED }, 01716 { { 1, rt_sem_broadcast }, SEM_BROADCAST }, 01717 { { 1, rt_sem_wait_barrier }, SEM_WAIT_BARRIER }, 01718 { { 1, rt_sem_count }, SEM_COUNT }, 01719 { { 1, rt_cond_wait }, COND_WAIT }, 01720 { { 1, rt_cond_wait_until }, COND_WAIT_UNTIL }, 01721 { { 1, rt_cond_wait_timed }, COND_WAIT_TIMED }, 01722 { { 0, rt_rwl_init }, RWL_INIT }, 01723 { { 0, rt_rwl_delete }, RWL_DELETE }, 01724 { { 0, _rt_named_rwl_init }, NAMED_RWL_INIT }, 01725 { { 0, rt_named_rwl_delete }, NAMED_RWL_DELETE }, 01726 { { 1, rt_rwl_rdlock }, RWL_RDLOCK }, 01727 { { 1, rt_rwl_rdlock_if }, RWL_RDLOCK_IF }, 01728 { { 1, rt_rwl_rdlock_until }, RWL_RDLOCK_UNTIL }, 01729 { { 1, rt_rwl_rdlock_timed }, RWL_RDLOCK_TIMED }, 01730 { { 1, rt_rwl_wrlock }, RWL_WRLOCK }, 01731 { { 1, rt_rwl_wrlock_if }, RWL_WRLOCK_IF }, 01732 { { 1, rt_rwl_wrlock_until }, RWL_WRLOCK_UNTIL }, 01733 { { 1, rt_rwl_wrlock_timed }, RWL_WRLOCK_TIMED }, 01734 { { 1, rt_rwl_unlock }, RWL_UNLOCK }, 01735 { { 0, rt_spl_init }, SPL_INIT }, 01736 { { 0, rt_spl_delete }, SPL_DELETE }, 01737 { { 0, _rt_named_spl_init }, NAMED_SPL_INIT }, 01738 { { 0, rt_named_spl_delete }, NAMED_SPL_DELETE }, 01739 { { 1, rt_spl_lock }, SPL_LOCK }, 01740 { { 1, rt_spl_lock_if }, SPL_LOCK_IF }, 01741 { { 1, rt_spl_lock_timed }, SPL_LOCK_TIMED }, 01742 { { 1, rt_spl_unlock }, SPL_UNLOCK }, 01743 { { 1, rt_cond_signal}, COND_SIGNAL }, 01744 { { 0, 0 }, 000 } 01745 }; 01746 01747 extern int set_rt_fun_entries(struct rt_native_fun_entry *entry); 01748 extern void reset_rt_fun_entries(struct rt_native_fun_entry *entry); 01749 01750 int __rtai_sem_init (void) 01751 { 01752 return set_rt_fun_entries(rt_sem_entries); 01753 } 01754 01755 void __rtai_sem_exit (void) 01756 { 01757 reset_rt_fun_entries(rt_sem_entries); 01758 } 01759 01760 /* +++++++ END SEMAPHORES, BARRIER, COND VARIABLES, RWLOCKS, SPINLOCKS ++++++ */ 01761 01762 /*@}*/ 01763 01764 #ifndef CONFIG_RTAI_SEM_BUILTIN 01765 module_init(__rtai_sem_init); 01766 module_exit(__rtai_sem_exit); 01767 #endif /* !CONFIG_RTAI_SEM_BUILTIN */ 01768 01769 #ifdef CONFIG_KBUILD 01770 EXPORT_SYMBOL(rt_typed_sem_init); 01771 EXPORT_SYMBOL(rt_sem_init); 01772 EXPORT_SYMBOL(rt_sem_delete); 01773 EXPORT_SYMBOL(rt_sem_count); 01774 EXPORT_SYMBOL(rt_sem_signal); 01775 EXPORT_SYMBOL(rt_sem_broadcast); 01776 EXPORT_SYMBOL(rt_sem_wait); 01777 EXPORT_SYMBOL(rt_sem_wait_if); 01778 EXPORT_SYMBOL(rt_sem_wait_until); 01779 EXPORT_SYMBOL(rt_sem_wait_timed); 01780 EXPORT_SYMBOL(rt_sem_wait_barrier); 01781 EXPORT_SYMBOL(_rt_typed_named_sem_init); 01782 EXPORT_SYMBOL(rt_named_sem_delete); 01783 01784 EXPORT_SYMBOL(rt_cond_signal); 01785 EXPORT_SYMBOL(rt_cond_wait); 01786 EXPORT_SYMBOL(rt_cond_wait_until); 01787 EXPORT_SYMBOL(rt_cond_wait_timed); 01788 01789 EXPORT_SYMBOL(rt_rwl_init); 01790 EXPORT_SYMBOL(rt_rwl_delete); 01791 EXPORT_SYMBOL(rt_rwl_rdlock); 01792 EXPORT_SYMBOL(rt_rwl_rdlock_if); 01793 EXPORT_SYMBOL(rt_rwl_rdlock_until); 01794 EXPORT_SYMBOL(rt_rwl_rdlock_timed); 01795 EXPORT_SYMBOL(rt_rwl_wrlock); 01796 EXPORT_SYMBOL(rt_rwl_wrlock_if); 01797 EXPORT_SYMBOL(rt_rwl_wrlock_until); 01798 EXPORT_SYMBOL(rt_rwl_wrlock_timed); 01799 EXPORT_SYMBOL(rt_rwl_unlock); 01800 EXPORT_SYMBOL(_rt_named_rwl_init); 01801 EXPORT_SYMBOL(rt_named_rwl_delete); 01802 01803 EXPORT_SYMBOL(rt_spl_init); 01804 EXPORT_SYMBOL(rt_spl_delete); 01805 EXPORT_SYMBOL(rt_spl_lock); 01806 EXPORT_SYMBOL(rt_spl_lock_if); 01807 EXPORT_SYMBOL(rt_spl_lock_timed); 01808 EXPORT_SYMBOL(rt_spl_unlock); 01809 EXPORT_SYMBOL(_rt_named_spl_init); 01810 EXPORT_SYMBOL(rt_named_spl_delete); 01811 #endif /* CONFIG_KBUILD */

Generated on Thu Nov 20 11:49:51 2008 for RTAI API by doxygen 1.3.8