00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032 #include <linux/kernel.h>
00033 #include <linux/module.h>
00034
00035 #include <asm/uaccess.h>
00036
00037 #include <rtai_schedcore.h>
00038 #include <rtai_prinher.h>
00039 #include <rtai_sem.h>
00040 #include <rtai_rwl.h>
00041 #include <rtai_spl.h>
00042
00043 MODULE_LICENSE("GPL");
00044
00045 extern struct epoch_struct boot_epoch;
00046
00047 #ifdef CONFIG_RTAI_RT_POLL
00048
00049 #define WAKEUP_WAIT_ONE_POLLER(wakeup) \
00050 if (wakeup) rt_wakeup_pollers(&sem->poll_wait_one, 0);
00051
00052 #define WAKEUP_WAIT_ALL_POLLERS(wakeup) \
00053 do { \
00054 WAKEUP_WAIT_ONE_POLLER(wakeup) \
00055 if (sem->count == 1) rt_wakeup_pollers(&sem->poll_wait_all, 0);\
00056 } while (0)
00057
00058 #else
00059
00060 #define WAKEUP_WAIT_ONE_POLLER(wakeup)
00061
00062 #define WAKEUP_WAIT_ALL_POLLERS(wakeup)
00063
00064 #endif
00065
00066 #define CHECK_SEM_MAGIC(sem) \
00067 do { if (sem->magic != RT_SEM_MAGIC) return RTE_OBJINV; } while (0)
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129
00130 RTAI_SYSCALL_MODE void rt_typed_sem_init(SEM *sem, int value, int type)
00131 {
00132 sem->magic = RT_SEM_MAGIC;
00133 sem->count = value;
00134 sem->restype = 0;
00135 if ((type & RES_SEM) == RES_SEM) {
00136 sem->qtype = 0;
00137 } else {
00138 sem->qtype = (type & FIFO_Q) ? 1 : 0;
00139 }
00140 type = (type & 3) - 2;
00141 if ((sem->type = type) < 0 && value > 1) {
00142 sem->count = 1;
00143 } else if (type > 0) {
00144 sem->type = sem->count = 1;
00145 sem->restype = value;
00146 }
00147 sem->queue.prev = &(sem->queue);
00148 sem->queue.next = &(sem->queue);
00149 sem->queue.task = sem->owndby = NULL;
00150
00151 sem->resq.prev = sem->resq.next = &sem->resq;
00152 sem->resq.task = (void *)&sem->queue;
00153 #ifdef CONFIG_RTAI_RT_POLL
00154 sem->poll_wait_all.pollq.prev = sem->poll_wait_all.pollq.next = &(sem->poll_wait_all.pollq);
00155 sem->poll_wait_one.pollq.prev = sem->poll_wait_one.pollq.next = &(sem->poll_wait_one.pollq);
00156 sem->poll_wait_all.pollq.task = sem->poll_wait_one.pollq.task = NULL;
00157 spin_lock_init(&(sem->poll_wait_all.pollock));
00158 spin_lock_init(&(sem->poll_wait_one.pollock));
00159 #endif
00160 }
00161
00162
00163
00164
00165
00166
00167
00168
00169
00170
00171
00172
00173
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190 void rt_sem_init(SEM *sem, int value)
00191 {
00192 rt_typed_sem_init(sem, value, CNT_SEM);
00193 }
00194
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219 RTAI_SYSCALL_MODE int rt_sem_delete(SEM *sem)
00220 {
00221 unsigned long flags;
00222 RT_TASK *task;
00223 unsigned long schedmap, sched;
00224 QUEUE *q;
00225
00226 CHECK_SEM_MAGIC(sem);
00227
00228 rt_wakeup_pollers(&sem->poll_wait_all, RTE_OBJREM);
00229 rt_wakeup_pollers(&sem->poll_wait_one, RTE_OBJREM);
00230 schedmap = 0;
00231 q = &(sem->queue);
00232 flags = rt_global_save_flags_and_cli();
00233 sem->magic = 0;
00234 while ((q = q->next) != &(sem->queue) && (task = q->task)) {
00235 rem_timed_task(task);
00236 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00237 task->blocked_on = RTP_OBJREM;
00238 enq_ready_task(task);
00239 set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
00240 }
00241 }
00242 sched = schedmap;
00243 clear_bit(rtai_cpuid(), &schedmap);
00244 if ((task = sem->owndby) && sem->type > 0) {
00245 sched |= dequeue_resqel_reset_task_priority(&sem->resq, task);
00246 if (task->suspdepth) {
00247 if (task->suspdepth > 0) {
00248 task->state |= RT_SCHED_SUSPENDED;
00249 rem_ready_task(task);
00250 sched = 1;
00251 } else if (task->suspdepth == RT_RESEM_SUSPDEL) {
00252 rt_task_delete(task);
00253 }
00254 }
00255 }
00256 if (sched) {
00257 if (schedmap) {
00258 RT_SCHEDULE_MAP_BOTH(schedmap);
00259 } else {
00260 rt_schedule();
00261 }
00262 }
00263 rt_global_restore_flags(flags);
00264 return 0;
00265 }
00266
00267
00268 RTAI_SYSCALL_MODE int rt_sem_count(SEM *sem)
00269 {
00270 return sem->count;
00271 }
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298 RTAI_SYSCALL_MODE int rt_sem_signal(SEM *sem)
00299 {
00300 unsigned long flags;
00301 RT_TASK *task;
00302 int tosched;
00303
00304 CHECK_SEM_MAGIC(sem);
00305
00306 flags = rt_global_save_flags_and_cli();
00307 if (sem->type) {
00308 if (sem->restype && (!sem->owndby || sem->owndby != RT_CURRENT)) {
00309 rt_global_restore_flags(flags);
00310 return RTE_PERM;
00311 }
00312 if (sem->type > 1) {
00313 sem->type--;
00314 rt_global_restore_flags(flags);
00315 return 0;
00316 }
00317 if (++sem->count > 1) {
00318 sem->count = 1;
00319 }
00320 } else {
00321 sem->count++;
00322 }
00323 if ((task = (sem->queue.next)->task)) {
00324 dequeue_blocked(task);
00325 rem_timed_task(task);
00326 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00327 enq_ready_task(task);
00328 if (sem->type <= 0) {
00329 RT_SCHEDULE(task, rtai_cpuid());
00330 rt_global_restore_flags(flags);
00331 WAKEUP_WAIT_ALL_POLLERS(1);
00332 return 0;
00333 }
00334 tosched = 1;
00335 goto res;
00336 }
00337 }
00338 tosched = 0;
00339 res: if (sem->type > 0) {
00340 DECLARE_RT_CURRENT;
00341 int sched;
00342 ASSIGN_RT_CURRENT;
00343 sem->owndby = 0;
00344 sched = dequeue_resqel_reset_current_priority(&sem->resq, rt_current);
00345 if (rt_current->suspdepth) {
00346 if (rt_current->suspdepth > 0) {
00347 rt_current->state |= RT_SCHED_SUSPENDED;
00348 rem_ready_current(rt_current);
00349 sched = 1;
00350 } else if (task->suspdepth == RT_RESEM_SUSPDEL) {
00351 rt_task_delete(rt_current);
00352 }
00353 }
00354 if (sched) {
00355 if (tosched) {
00356 RT_SCHEDULE_BOTH(task, cpuid);
00357 } else {
00358 rt_schedule();
00359 }
00360 } else if (tosched) {
00361 RT_SCHEDULE(task, cpuid);
00362 }
00363 }
00364 rt_global_restore_flags(flags);
00365 WAKEUP_WAIT_ALL_POLLERS(1);
00366 return 0;
00367 }
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384
00385
00386 RTAI_SYSCALL_MODE int rt_sem_broadcast(SEM *sem)
00387 {
00388 unsigned long flags, schedmap;
00389 RT_TASK *task;
00390 QUEUE *q;
00391
00392 CHECK_SEM_MAGIC(sem);
00393
00394 schedmap = 0;
00395 q = &(sem->queue);
00396 flags = rt_global_save_flags_and_cli();
00397 while ((q = q->next) != &(sem->queue)) {
00398 if ((task = q->task)) {
00399 dequeue_blocked(task = q->task);
00400 rem_timed_task(task);
00401 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00402 enq_ready_task(task);
00403 set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
00404 }
00405 }
00406 rt_global_restore_flags(flags);
00407 flags = rt_global_save_flags_and_cli();
00408 }
00409 sem->count = 0;
00410 if (schedmap) {
00411 if (test_and_clear_bit(rtai_cpuid(), &schedmap)) {
00412 RT_SCHEDULE_MAP_BOTH(schedmap);
00413 } else {
00414 RT_SCHEDULE_MAP(schedmap);
00415 }
00416 }
00417 rt_global_restore_flags(flags);
00418 WAKEUP_WAIT_ONE_POLLER(schedmap);
00419 return 0;
00420 }
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465 RTAI_SYSCALL_MODE int rt_sem_wait(SEM *sem)
00466 {
00467 RT_TASK *rt_current;
00468 unsigned long flags;
00469 int count;
00470
00471 CHECK_SEM_MAGIC(sem);
00472
00473 flags = rt_global_save_flags_and_cli();
00474 rt_current = RT_CURRENT;
00475 if ((count = sem->count) <= 0) {
00476 void *retp;
00477 unsigned long schedmap;
00478 if (sem->type > 0) {
00479 if (sem->restype && sem->owndby == rt_current) {
00480 if (sem->restype > 0) {
00481 count = sem->type++;
00482 rt_global_restore_flags(flags);
00483 return count + 1;
00484 }
00485 rt_global_restore_flags(flags);
00486 return RTE_DEADLOK;
00487 }
00488 schedmap = pass_prio(sem->owndby, rt_current);
00489 } else {
00490 schedmap = 0;
00491 }
00492 sem->count--;
00493 rt_current->state |= RT_SCHED_SEMAPHORE;
00494 rem_ready_current(rt_current);
00495 enqueue_blocked(rt_current, &sem->queue, sem->qtype);
00496 RT_SCHEDULE_MAP_BOTH(schedmap);
00497 if (likely(!(retp = rt_current->blocked_on))) {
00498 count = sem->count;
00499 } else {
00500 if (likely(retp != RTP_OBJREM)) {
00501 dequeue_blocked(rt_current);
00502 if (++sem->count > 1 && sem->type) {
00503 sem->count = 1;
00504 }
00505 if (sem->owndby && sem->type > 0) {
00506 set_task_prio_from_resq(sem->owndby);
00507 }
00508 rt_global_restore_flags(flags);
00509 return RTE_UNBLKD;
00510 } else {
00511 rt_current->prio_passed_to = NULL;
00512 rt_global_restore_flags(flags);
00513 return RTE_OBJREM;
00514 }
00515 }
00516 } else {
00517 sem->count--;
00518 }
00519 if (sem->type > 0) {
00520 enqueue_resqel(&sem->resq, sem->owndby = rt_current);
00521 }
00522 rt_global_restore_flags(flags);
00523 return count;
00524 }
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539
00540
00541
00542
00543
00544
00545
00546
00547
00548
00549 RTAI_SYSCALL_MODE int rt_sem_wait_if(SEM *sem)
00550 {
00551 int count;
00552 unsigned long flags;
00553
00554 CHECK_SEM_MAGIC(sem);
00555
00556 flags = rt_global_save_flags_and_cli();
00557 if ((count = sem->count) <= 0) {
00558 if (sem->restype && sem->owndby == RT_CURRENT) {
00559 if (sem->restype > 0) {
00560 count = sem->type++;
00561 rt_global_restore_flags(flags);
00562 return count + 1;
00563 }
00564 rt_global_restore_flags(flags);
00565 return RTE_DEADLOK;
00566 }
00567 } else {
00568 sem->count--;
00569 if (sem->type > 0) {
00570 enqueue_resqel(&sem->resq, sem->owndby = RT_CURRENT);
00571 }
00572 }
00573 rt_global_restore_flags(flags);
00574 return count;
00575 }
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588
00589
00590
00591
00592
00593
00594
00595
00596
00597
00598
00599
00600
00601
00602
00603
00604
00605
00606
00607
00608
00609
00610
00611
00612 RTAI_SYSCALL_MODE int rt_sem_wait_until(SEM *sem, RTIME time)
00613 {
00614 DECLARE_RT_CURRENT;
00615 int count;
00616 unsigned long flags;
00617
00618 CHECK_SEM_MAGIC(sem);
00619
00620 REALTIME2COUNT(time);
00621
00622 flags = rt_global_save_flags_and_cli();
00623 ASSIGN_RT_CURRENT;
00624 if ((count = sem->count) <= 0) {
00625 void *retp;
00626 rt_current->blocked_on = &sem->queue;
00627 if ((rt_current->resume_time = time) > rt_time_h) {
00628 unsigned long schedmap;
00629 if (sem->type > 0) {
00630 if (sem->restype && sem->owndby == rt_current) {
00631 if (sem->restype > 0) {
00632 count = sem->type++;
00633 rt_global_restore_flags(flags);
00634 return count + 1;
00635 }
00636 rt_global_restore_flags(flags);
00637 return RTE_DEADLOK;
00638 }
00639 schedmap = pass_prio(sem->owndby, rt_current);
00640 } else {
00641 schedmap = 0;
00642 }
00643 sem->count--;
00644 rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
00645 rem_ready_current(rt_current);
00646 enqueue_blocked(rt_current, &sem->queue, sem->qtype);
00647 enq_timed_task(rt_current);
00648 RT_SCHEDULE_MAP_BOTH(schedmap);
00649 } else {
00650 sem->count--;
00651 rt_current->queue.prev = rt_current->queue.next = &rt_current->queue;
00652 }
00653 if (likely(!(retp = rt_current->blocked_on))) {
00654 count = sem->count;
00655 } else if (likely(retp != RTP_OBJREM)) {
00656 dequeue_blocked(rt_current);
00657 if (++sem->count > 1 && sem->type) {
00658 sem->count = 1;
00659 }
00660 if (sem->owndby && sem->type > 0) {
00661 set_task_prio_from_resq(sem->owndby);
00662 }
00663 rt_global_restore_flags(flags);
00664 return likely(retp > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
00665 } else {
00666 rt_current->prio_passed_to = NULL;
00667 rt_global_restore_flags(flags);
00668 return RTE_OBJREM;
00669 }
00670 } else {
00671 sem->count--;
00672 }
00673 if (sem->type > 0) {
00674 enqueue_resqel(&sem->resq, sem->owndby = rt_current);
00675 }
00676 rt_global_restore_flags(flags);
00677 return count;
00678 }
00679
00680
00681
00682
00683
00684
00685
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698
00699
00700
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710
00711
00712
00713
00714
00715 RTAI_SYSCALL_MODE int rt_sem_wait_timed(SEM *sem, RTIME delay)
00716 {
00717 return rt_sem_wait_until(sem, get_time() + delay);
00718 }
00719
00720
00721
00722
00723
00724
00725
00726
00727
00728
00729
00730
00731
00732
00733
00734 RTAI_SYSCALL_MODE int rt_sem_wait_barrier(SEM *sem)
00735 {
00736 unsigned long flags;
00737
00738 CHECK_SEM_MAGIC(sem);
00739
00740 flags = rt_global_save_flags_and_cli();
00741 if (!sem->owndby) {
00742 sem->owndby = (void *)(long)(sem->count < 1 ? 1 : sem->count);
00743 sem->count = sem->type = 0;
00744 }
00745 if ((1 - sem->count) < (long)sem->owndby) {
00746 rt_sem_wait(sem);
00747 rt_global_restore_flags(flags);
00748 return -1;
00749 }
00750 rt_sem_broadcast(sem);
00751 rt_global_restore_flags(flags);
00752 return 0;
00753 }
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763
00764
00765
00766
00767
00768
00769
00770
00771
00772 RTAI_SYSCALL_MODE int rt_cond_signal(CND *cnd)
00773 {
00774 unsigned long flags;
00775 RT_TASK *task;
00776
00777 CHECK_SEM_MAGIC(cnd);
00778
00779 flags = rt_global_save_flags_and_cli();
00780 if ((task = (cnd->queue.next)->task)) {
00781 dequeue_blocked(task);
00782 rem_timed_task(task);
00783 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00784 enq_ready_task(task);
00785 RT_SCHEDULE(task, rtai_cpuid());
00786 }
00787 }
00788 rt_global_restore_flags(flags);
00789 return 0;
00790 }
00791
00792 static inline int rt_cndmtx_signal(SEM *mtx, RT_TASK *rt_current)
00793 {
00794 int type;
00795 RT_TASK *task;
00796
00797 if ((type = mtx->type) > 1) {
00798 mtx->type = 1;
00799 }
00800 if (++mtx->count > 1) {
00801 mtx->count = 1;
00802 }
00803 if ((task = (mtx->queue.next)->task)) {
00804 dequeue_blocked(task);
00805 rem_timed_task(task);
00806 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
00807 enq_ready_task(task);
00808 }
00809 }
00810 mtx->owndby = 0;
00811 dequeue_resqel_reset_current_priority(&mtx->resq, rt_current);
00812 if (task) {
00813 RT_SCHEDULE_BOTH(task, rtai_cpuid());
00814 } else {
00815 rt_schedule();
00816 }
00817 return type;
00818 }
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834
00835
00836
00837
00838
00839
00840 RTAI_SYSCALL_MODE int rt_cond_wait(CND *cnd, SEM *mtx)
00841 {
00842 RT_TASK *rt_current;
00843 unsigned long flags;
00844 void *retp;
00845 int retval, type;
00846
00847 CHECK_SEM_MAGIC(cnd);
00848 CHECK_SEM_MAGIC(mtx);
00849
00850 flags = rt_global_save_flags_and_cli();
00851 rt_current = RT_CURRENT;
00852 if (mtx->owndby != rt_current) {
00853 rt_global_restore_flags(flags);
00854 return RTE_PERM;
00855 }
00856 rt_current->state |= RT_SCHED_SEMAPHORE;
00857 rem_ready_current(rt_current);
00858 enqueue_blocked(rt_current, &cnd->queue, cnd->qtype);
00859 type = rt_cndmtx_signal(mtx, rt_current);
00860 if (likely((retp = rt_current->blocked_on) != RTP_OBJREM)) {
00861 if (unlikely(retp != NULL)) {
00862 dequeue_blocked(rt_current);
00863 retval = RTE_UNBLKD;
00864 } else {
00865 retval = 0;
00866 }
00867 } else {
00868 retval = RTE_OBJREM;
00869 }
00870 rt_global_restore_flags(flags);
00871 if (rt_sem_wait(mtx) < RTE_LOWERR) {
00872 mtx->type = type;
00873 }
00874 return retval;
00875 }
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887
00888
00889
00890
00891
00892
00893
00894
00895
00896
00897
00898
00899
00900
00901 RTAI_SYSCALL_MODE int rt_cond_wait_until(CND *cnd, SEM *mtx, RTIME time)
00902 {
00903 DECLARE_RT_CURRENT;
00904 unsigned long flags;
00905 void *retp;
00906 int retval, type;
00907
00908 CHECK_SEM_MAGIC(cnd);
00909 CHECK_SEM_MAGIC(mtx);
00910
00911 REALTIME2COUNT(time);
00912
00913 flags = rt_global_save_flags_and_cli();
00914 ASSIGN_RT_CURRENT;
00915 if (mtx->owndby != rt_current) {
00916 rt_global_restore_flags(flags);
00917 return RTE_PERM;
00918 }
00919 if ((rt_current->resume_time = time) > rt_time_h) {
00920 rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
00921 rem_ready_current(rt_current);
00922 enqueue_blocked(rt_current, &cnd->queue, cnd->qtype);
00923 enq_timed_task(rt_current);
00924 type = rt_cndmtx_signal(mtx, rt_current);
00925 if (unlikely((retp = rt_current->blocked_on) == RTP_OBJREM)) {
00926 retval = RTE_OBJREM;
00927 } else if (unlikely(retp != NULL)) {
00928 dequeue_blocked(rt_current);
00929 retval = likely(retp > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
00930 } else {
00931 retval = 0;
00932 }
00933 rt_global_restore_flags(flags);
00934 if (rt_sem_wait(mtx) < RTE_LOWERR) {
00935 mtx->type = type;
00936 }
00937 } else {
00938 retval = RTE_TIMOUT;
00939 rt_global_restore_flags(flags);
00940 }
00941 return retval;
00942 }
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962
00963
00964
00965
00966
00967
00968
00969 RTAI_SYSCALL_MODE int rt_cond_wait_timed(CND *cnd, SEM *mtx, RTIME delay)
00970 {
00971 return rt_cond_wait_until(cnd, mtx, get_time() + delay);
00972 }
00973
00974
00975
00976
00977
00978
00979
00980
00981
00982
00983
00984
00985
00986
00987
00988
00989
00990
00991
00992
00993
00994
00995 RTAI_SYSCALL_MODE int rt_typed_rwl_init(RWL *rwl, int type)
00996 {
00997 rt_typed_sem_init(&rwl->wrmtx, type, RES_SEM);
00998 rt_typed_sem_init(&rwl->wrsem, 0, CNT_SEM | PRIO_Q);
00999 rt_typed_sem_init(&rwl->rdsem, 0, CNT_SEM | PRIO_Q);
01000 return 0;
01001 }
01002
01003
01004
01005
01006
01007
01008
01009
01010
01011
01012
01013
01014
01015 RTAI_SYSCALL_MODE int rt_rwl_delete(RWL *rwl)
01016 {
01017 int ret;
01018
01019 ret = rt_sem_delete(&rwl->rdsem);
01020 ret |= rt_sem_delete(&rwl->wrsem);
01021 ret |= rt_sem_delete(&rwl->wrmtx);
01022 return !ret ? 0 : RTE_OBJINV;
01023 }
01024
01025
01026
01027
01028
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040 RTAI_SYSCALL_MODE int rt_rwl_rdlock(RWL *rwl)
01041 {
01042 unsigned long flags;
01043 RT_TASK *wtask, *rt_current;
01044
01045 flags = rt_global_save_flags_and_cli();
01046 rt_current = RT_CURRENT;
01047 while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) {
01048 int ret;
01049 if (rwl->wrmtx.owndby == rt_current) {
01050 rt_global_restore_flags(flags);
01051 return RTE_RWLINV;
01052 }
01053 if ((ret = rt_sem_wait(&rwl->rdsem)) >= RTE_LOWERR) {
01054 rt_global_restore_flags(flags);
01055 return ret;
01056 }
01057 }
01058 ((volatile int *)&rwl->rdsem.owndby)[0]++;
01059 rt_global_restore_flags(flags);
01060 return 0;
01061 }
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077 RTAI_SYSCALL_MODE int rt_rwl_rdlock_if(RWL *rwl)
01078 {
01079 unsigned long flags;
01080 RT_TASK *wtask;
01081
01082 flags = rt_global_save_flags_and_cli();
01083 if (!rwl->wrmtx.owndby && (!(wtask = (rwl->wrsem.queue.next)->task) || wtask->priority > RT_CURRENT->priority)) {
01084 ((volatile int *)&rwl->rdsem.owndby)[0]++;
01085 rt_global_restore_flags(flags);
01086 return 0;
01087 }
01088 rt_global_restore_flags(flags);
01089 return -1;
01090 }
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110 RTAI_SYSCALL_MODE int rt_rwl_rdlock_until(RWL *rwl, RTIME time)
01111 {
01112 unsigned long flags;
01113 RT_TASK *wtask, *rt_current;
01114
01115 flags = rt_global_save_flags_and_cli();
01116 rt_current = RT_CURRENT;
01117 while (rwl->wrmtx.owndby || ((wtask = (rwl->wrsem.queue.next)->task) && wtask->priority <= rt_current->priority)) {
01118 int ret;
01119 if (rwl->wrmtx.owndby == rt_current) {
01120 rt_global_restore_flags(flags);
01121 return RTE_RWLINV;
01122 }
01123 if ((ret = rt_sem_wait_until(&rwl->rdsem, time)) >= RTE_LOWERR) {
01124 rt_global_restore_flags(flags);
01125 return ret;
01126 }
01127 }
01128 ((volatile int *)&rwl->rdsem.owndby)[0]++;
01129 rt_global_restore_flags(flags);
01130 return 0;
01131 }
01132
01133
01134
01135
01136
01137
01138
01139
01140
01141
01142
01143
01144
01145
01146
01147
01148
01149
01150
01151
01152 RTAI_SYSCALL_MODE int rt_rwl_rdlock_timed(RWL *rwl, RTIME delay)
01153 {
01154 return rt_rwl_rdlock_until(rwl, get_time() + delay);
01155 }
01156
01157
01158
01159
01160
01161
01162
01163
01164
01165
01166
01167
01168
01169
01170
01171 RTAI_SYSCALL_MODE int rt_rwl_wrlock(RWL *rwl)
01172 {
01173 unsigned long flags;
01174 int ret;
01175
01176 flags = rt_global_save_flags_and_cli();
01177 while (rwl->rdsem.owndby) {
01178 if ((ret = rt_sem_wait(&rwl->wrsem)) >= RTE_LOWERR) {
01179 rt_global_restore_flags(flags);
01180 return ret;
01181 }
01182 }
01183 if ((ret = rt_sem_wait(&rwl->wrmtx)) >= RTE_LOWERR) {
01184 rt_global_restore_flags(flags);
01185 return ret;
01186 }
01187 rt_global_restore_flags(flags);
01188 return 0;
01189 }
01190
01191
01192
01193
01194
01195
01196
01197
01198
01199
01200
01201
01202
01203
01204 RTAI_SYSCALL_MODE int rt_rwl_wrlock_if(RWL *rwl)
01205 {
01206 unsigned long flags;
01207 int ret;
01208
01209 flags = rt_global_save_flags_and_cli();
01210 if (!rwl->rdsem.owndby && (ret = rt_sem_wait_if(&rwl->wrmtx)) > 0 && ret < RTE_LOWERR) {
01211 rt_global_restore_flags(flags);
01212 return 0;
01213 }
01214 rt_global_restore_flags(flags);
01215 return -1;
01216 }
01217
01218
01219
01220
01221
01222
01223
01224
01225
01226
01227
01228
01229
01230
01231
01232
01233
01234
01235
01236 RTAI_SYSCALL_MODE int rt_rwl_wrlock_until(RWL *rwl, RTIME time)
01237 {
01238 unsigned long flags;
01239 int ret;
01240
01241 flags = rt_global_save_flags_and_cli();
01242 while (rwl->rdsem.owndby) {
01243 if ((ret = rt_sem_wait_until(&rwl->wrsem, time)) >= RTE_LOWERR) {
01244 rt_global_restore_flags(flags);
01245 return ret;
01246 };
01247 }
01248 if ((ret = rt_sem_wait_until(&rwl->wrmtx, time)) >= RTE_LOWERR) {
01249 rt_global_restore_flags(flags);
01250 return ret;
01251 };
01252 rt_global_restore_flags(flags);
01253 return 0;
01254 }
01255
01256
01257
01258
01259
01260
01261
01262
01263
01264
01265
01266
01267
01268
01269
01270
01271
01272
01273
01274
01275 RTAI_SYSCALL_MODE int rt_rwl_wrlock_timed(RWL *rwl, RTIME delay)
01276 {
01277 return rt_rwl_wrlock_until(rwl, get_time() + delay);
01278 }
01279
01280
01281
01282
01283
01284
01285
01286
01287
01288
01289
01290
01291
01292
01293
01294
01295 RTAI_SYSCALL_MODE int rt_rwl_unlock(RWL *rwl)
01296 {
01297 unsigned long flags;
01298
01299 flags = rt_global_save_flags_and_cli();
01300 if (rwl->wrmtx.owndby == RT_CURRENT) {
01301 rt_sem_signal(&rwl->wrmtx);
01302 } else if (rwl->rdsem.owndby) {
01303 ((volatile int *)&rwl->rdsem.owndby)[0]--;
01304 } else {
01305 rt_global_restore_flags(flags);
01306 return RTE_PERM;
01307 }
01308 rt_global_restore_flags(flags);
01309 flags = rt_global_save_flags_and_cli();
01310 if (!rwl->wrmtx.owndby && !rwl->rdsem.owndby) {
01311 RT_TASK *wtask, *rtask;
01312 wtask = (rwl->wrsem.queue.next)->task;
01313 rtask = (rwl->rdsem.queue.next)->task;
01314 if (wtask && rtask) {
01315 if (wtask->priority <= rtask->priority) {
01316 rt_sem_signal(&rwl->wrsem);
01317 } else {
01318 rt_sem_broadcast(&rwl->rdsem);
01319 }
01320 } else if (wtask) {
01321 rt_sem_signal(&rwl->wrsem);
01322 } else if (rtask) {
01323 rt_sem_broadcast(&rwl->rdsem);
01324 }
01325 }
01326 rt_global_restore_flags(flags);
01327 return 0;
01328 }
01329
01330
01331
01332
01333
01334
01335
01336
01337
01338
01339
01340
01341
01342
01343
01344
01345
01346
01347
01348
01349
01350
01351 RTAI_SYSCALL_MODE int rt_spl_init(SPL *spl)
01352 {
01353 spl->owndby = 0;
01354 spl->count = 0;
01355 return 0;
01356 }
01357
01358
01359
01360
01361
01362
01363
01364
01365
01366
01367
01368
01369
01370 RTAI_SYSCALL_MODE int rt_spl_delete(SPL *spl)
01371 {
01372 return 0;
01373 }
01374
01375
01376
01377
01378
01379
01380
01381
01382
01383
01384
01385
01386
01387
01388
01389
01390
01391 RTAI_SYSCALL_MODE int rt_spl_lock(SPL *spl)
01392 {
01393 unsigned long flags;
01394 RT_TASK *rt_current;
01395
01396 rtai_save_flags_and_cli(flags);
01397 if (spl->owndby == (rt_current = RT_CURRENT)) {
01398 spl->count++;
01399 } else {
01400 while (cmpxchg(&spl->owndby, 0L, rt_current));
01401 spl->flags = flags;
01402 }
01403 rtai_restore_flags(flags);
01404 return 0;
01405 }
01406
01407
01408
01409
01410
01411
01412
01413
01414
01415
01416
01417
01418
01419
01420
01421
01422 RTAI_SYSCALL_MODE int rt_spl_lock_if(SPL *spl)
01423 {
01424 unsigned long flags;
01425 RT_TASK *rt_current;
01426
01427 rtai_save_flags_and_cli(flags);
01428 if (spl->owndby == (rt_current = RT_CURRENT)) {
01429 spl->count++;
01430 } else {
01431 if (cmpxchg(&spl->owndby, 0L, rt_current)) {
01432 rtai_restore_flags(flags);
01433 return -1;
01434 }
01435 spl->flags = flags;
01436 }
01437 rtai_restore_flags(flags);
01438 return 0;
01439 }
01440
01441
01442
01443
01444
01445
01446
01447
01448
01449
01450
01451
01452
01453
01454
01455
01456
01457
01458
01459
01460
01461
01462 RTAI_SYSCALL_MODE int rt_spl_lock_timed(SPL *spl, unsigned long ns)
01463 {
01464 unsigned long flags;
01465 RT_TASK *rt_current;
01466
01467 rtai_save_flags_and_cli(flags);
01468 if (spl->owndby == (rt_current = RT_CURRENT)) {
01469 spl->count++;
01470 } else {
01471 RTIME end_time;
01472 long locked;
01473 end_time = rdtsc() + imuldiv(ns, tuned.cpu_freq, 1000000000);
01474 while ((locked = (long)cmpxchg(&spl->owndby, 0L, rt_current)) && rdtsc() < end_time);
01475 if (locked) {
01476 rtai_restore_flags(flags);
01477 return -1;
01478 }
01479 spl->flags = flags;
01480 }
01481 rtai_restore_flags(flags);
01482 return 0;
01483 }
01484
01485
01486
01487
01488
01489
01490
01491
01492
01493
01494
01495
01496
01497
01498
01499
01500
01501
01502 RTAI_SYSCALL_MODE int rt_spl_unlock(SPL *spl)
01503 {
01504 unsigned long flags;
01505 RT_TASK *rt_current;
01506
01507 rtai_save_flags_and_cli(flags);
01508 if (spl->owndby == (rt_current = RT_CURRENT)) {
01509 if (spl->count) {
01510 --spl->count;
01511 } else {
01512 spl->owndby = 0;
01513 spl->count = 0;
01514 }
01515 rtai_restore_flags(spl->flags);
01516 return 0;
01517 }
01518 rtai_restore_flags(flags);
01519 return -1;
01520 }
01521
01522
01523
01524 #include <rtai_registry.h>
01525
01526
01527
01528
01529
01530
01531
01532
01533
01534
01535
01536
01537
01538
01539
01540
01541
01542
01543
01544
01545
01546
01547
01548
01549
01550
01551
01552
01553
01554
01555
01556
01557
01558
01559
01560
01561
01562
01563
01564
01565
01566
01567 RTAI_SYSCALL_MODE SEM *_rt_typed_named_sem_init(unsigned long sem_name, int value, int type, unsigned long *handle)
01568 {
01569 SEM *sem;
01570
01571 if ((sem = rt_get_adr_cnt(sem_name))) {
01572 if (handle) {
01573 if ((unsigned long)handle > PAGE_OFFSET) {
01574 *handle = 1;
01575 } else {
01576 rt_copy_to_user(handle, sem, sizeof(SEM *));
01577 }
01578 }
01579 return sem;
01580 }
01581 if ((sem = rt_malloc(sizeof(SEM)))) {
01582 rt_typed_sem_init(sem, value, type);
01583 if (rt_register(sem_name, sem, IS_SEM, 0)) {
01584 return sem;
01585 }
01586 rt_sem_delete(sem);
01587 }
01588 rt_free(sem);
01589 return (SEM *)0;
01590 }
01591
01592
01593
01594
01595
01596
01597
01598
01599
01600
01601
01602
01603
01604
01605
01606
01607
01608
01609
01610
01611
01612
01613 RTAI_SYSCALL_MODE int rt_named_sem_delete(SEM *sem)
01614 {
01615 int ret;
01616 if (!(ret = rt_drg_on_adr_cnt(sem))) {
01617 if (!rt_sem_delete(sem)) {
01618 rt_free(sem);
01619 return 0;
01620 } else {
01621 return RTE_OBJINV;
01622 }
01623 }
01624 return ret;
01625 }
01626
01627
01628
01629
01630
01631
01632
01633
01634
01635
01636
01637
01638
01639
01640
01641
01642
01643
01644
01645
01646
01647
01648
01649
01650
01651
01652
01653
01654
01655
01656 RTAI_SYSCALL_MODE RWL *_rt_named_rwl_init(unsigned long rwl_name)
01657 {
01658 RWL *rwl;
01659
01660 if ((rwl = rt_get_adr_cnt(rwl_name))) {
01661 return rwl;
01662 }
01663 if ((rwl = rt_malloc(sizeof(RWL)))) {
01664 rt_rwl_init(rwl);
01665 if (rt_register(rwl_name, rwl, IS_RWL, 0)) {
01666 return rwl;
01667 }
01668 rt_rwl_delete(rwl);
01669 }
01670 rt_free(rwl);
01671 return (RWL *)0;
01672 }
01673
01674
01675
01676
01677
01678
01679
01680
01681
01682
01683
01684
01685
01686
01687
01688
01689
01690
01691
01692
01693
01694 RTAI_SYSCALL_MODE int rt_named_rwl_delete(RWL *rwl)
01695 {
01696 int ret;
01697 if (!(ret = rt_drg_on_adr_cnt(rwl))) {
01698 if (!rt_rwl_delete(rwl)) {
01699 rt_free(rwl);
01700 return 0;
01701 } else {
01702 return RTE_OBJINV;
01703 }
01704 }
01705 return ret;
01706 }
01707
01708
01709
01710
01711
01712
01713
01714
01715
01716
01717
01718
01719
01720
01721
01722
01723
01724
01725
01726
01727
01728
01729
01730
01731
01732
01733
01734
01735
01736 RTAI_SYSCALL_MODE SPL *_rt_named_spl_init(unsigned long spl_name)
01737 {
01738 SPL *spl;
01739
01740 if ((spl = rt_get_adr_cnt(spl_name))) {
01741 return spl;
01742 }
01743 if ((spl = rt_malloc(sizeof(SPL)))) {
01744 rt_spl_init(spl);
01745 if (rt_register(spl_name, spl, IS_SPL, 0)) {
01746 return spl;
01747 }
01748 rt_spl_delete(spl);
01749 }
01750 rt_free(spl);
01751 return (SPL *)0;
01752 }
01753
01754
01755
01756
01757
01758
01759
01760
01761
01762
01763
01764
01765
01766
01767
01768
01769
01770
01771
01772
01773 RTAI_SYSCALL_MODE int rt_named_spl_delete(SPL *spl)
01774 {
01775 int ret;
01776 if (!(ret = rt_drg_on_adr_cnt(spl))) {
01777 rt_spl_delete(spl);
01778 rt_free(spl);
01779 return 0;
01780 }
01781 return ret;
01782 }
01783
01784
01785
01786 struct rt_poll_enc rt_poll_ofstfun[] = {
01787 [RT_POLL_NOT_TO_USE] = { 0 , NULL },
01788 #ifdef CONFIG_RTAI_RT_POLL
01789 [RT_POLL_MBX_RECV] = { offsetof(MBX, poll_recv), NULL },
01790 [RT_POLL_MBX_SEND] = { offsetof(MBX, poll_send), NULL },
01791 [RT_POLL_SEM_WAIT_ALL] = { offsetof(SEM, poll_wait_all), NULL },
01792 [RT_POLL_SEM_WAIT_ONE] = { offsetof(SEM, poll_wait_one), NULL }
01793 #else
01794 [RT_POLL_MBX_RECV] = { 0, NULL },
01795 [RT_POLL_MBX_SEND] = { 0, NULL },
01796 [RT_POLL_SEM_WAIT_ALL] = { 0, NULL },
01797 [RT_POLL_SEM_WAIT_ONE] = { 0, NULL }
01798 #endif
01799 };
01800 EXPORT_SYMBOL(rt_poll_ofstfun);
01801
01802 #ifdef CONFIG_RTAI_RT_POLL
01803
01804 typedef struct rt_poll_sem { QUEUE queue; RT_TASK *task; int wait; } POLL_SEM;
01805
01806 static inline void rt_schedule_tosched(unsigned long tosched_mask)
01807 {
01808 unsigned long flags;
01809 #ifdef CONFIG_SMP
01810 unsigned long cpumask, rmask;
01811 rmask = tosched_mask & ~(cpumask = (1 << rtai_cpuid()));
01812 if (rmask) {
01813 rtai_save_flags_and_cli(flags);
01814 send_sched_ipi(rmask);
01815 rtai_restore_flags(flags);
01816 }
01817 if (tosched_mask | cpumask)
01818 #endif
01819 {
01820 flags = rt_global_save_flags_and_cli();
01821 rt_schedule();
01822 rt_global_restore_flags(flags);
01823 }
01824 }
01825
01826 static inline int rt_poll_wait(POLL_SEM *sem, RT_TASK *rt_current)
01827 {
01828 unsigned long flags;
01829 int retval = 0;
01830
01831 flags = rt_global_save_flags_and_cli();
01832 if (sem->wait) {
01833 rt_current->state |= RT_SCHED_POLL;
01834 rem_ready_current(rt_current);
01835 enqueue_blocked(rt_current, &sem->queue, 1);
01836 rt_schedule();
01837 if (unlikely(rt_current->blocked_on != NULL)) {
01838 dequeue_blocked(rt_current);
01839 retval = RTE_UNBLKD;
01840 }
01841 }
01842 rt_global_restore_flags(flags);
01843 return retval;
01844 }
01845
01846 static inline int rt_poll_wait_until(POLL_SEM *sem, RTIME time, RT_TASK *rt_current, int cpuid)
01847 {
01848 unsigned long flags;
01849 int retval = 0;
01850
01851 flags = rt_global_save_flags_and_cli();
01852 if (sem->wait) {
01853 rt_current->blocked_on = &sem->queue;
01854 if ((rt_current->resume_time = time) > rt_time_h) {
01855 rt_current->state |= (RT_SCHED_POLL | RT_SCHED_DELAYED);
01856 rem_ready_current(rt_current);
01857 enqueue_blocked(rt_current, &sem->queue, 1);
01858 enq_timed_task(rt_current);
01859 rt_schedule();
01860 }
01861 if (unlikely(rt_current->blocked_on != NULL)) {
01862 retval = likely((void *)rt_current->blocked_on > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
01863 dequeue_blocked(rt_current);
01864 }
01865 }
01866 rt_global_restore_flags(flags);
01867 return retval;
01868 }
01869
01870 static inline int rt_poll_signal(POLL_SEM *sem)
01871 {
01872 unsigned long flags;
01873 RT_TASK *task;
01874 int retval = 0;
01875
01876 flags = rt_global_save_flags_and_cli();
01877 sem->wait = 0;
01878 if ((task = (sem->queue.next)->task)) {
01879 dequeue_blocked(task);
01880 rem_timed_task(task);
01881 if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_POLL | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
01882 enq_ready_task(task);
01883 retval = (1 << task->runnable_on_cpus);
01884 }
01885 }
01886 rt_global_restore_flags(flags);
01887 return retval;
01888 }
01889
01890 void rt_wakeup_pollers(struct rt_poll_ql *ql, int reason)
01891 {
01892 QUEUE *q, *queue = &ql->pollq;
01893 spinlock_t *qlock = &ql->pollock;
01894
01895 rt_spin_lock_irq(qlock);
01896 if ((q = queue->next) != queue) {
01897 POLL_SEM *sem;
01898 unsigned long tosched_mask = 0UL;
01899 do {
01900 sem = (POLL_SEM *)q->task;
01901 q->task = (void *)((unsigned long)reason);
01902 (queue->next = q->next)->prev = queue;
01903 tosched_mask |= rt_poll_signal(sem);
01904 rt_spin_unlock_irq(qlock);
01905 rt_spin_lock_irq(qlock);
01906 } while ((q = queue->next) != queue);
01907 rt_spin_unlock_irq(qlock);
01908 rt_schedule_tosched(tosched_mask);
01909 } else {
01910 rt_spin_unlock_irq(qlock);
01911 }
01912 }
01913
01914 EXPORT_SYMBOL(rt_wakeup_pollers);
01915
01916
01917
01918
01919
01920
01921
01922
01923
01924
01925
01926
01927
01928
01929
01930
01931
01932
01933
01934
01935
01936
01937
01938
01939
01940
01941
01942
01943
01944
01945
01946
01947
01948
01949
01950
01951
01952
01953
01954
01955
01956
01957
01958
01959
01960
01961
01962
01963
01964
01965
01966
01967
01968
01969
01970
01971
01972
01973
01974
01975
01976
01977
01978
01979
01980
01981
01982
01983
01984
01985
01986
01987
01988
01989
01990
01991
01992
01993
01994
01995
01996
01997 #define QL(i) ((struct rt_poll_ql *)(pds[i].what + rt_poll_ofstfun[pds[i].forwhat].offset))
01998
01999 RTAI_SYSCALL_MODE int _rt_poll(struct rt_poll_s *pdsa, unsigned long nr, RTIME timeout, int space)
02000 {
02001 struct rt_poll_s *pds;
02002 int i, polled, semret, cpuid;
02003 POLL_SEM sem = { { &sem.queue, &sem.queue, NULL }, rt_smp_current[cpuid = rtai_cpuid()], 1 };
02004 #ifdef CONFIG_RTAI_RT_POLL_ON_STACK
02005 struct rt_poll_s pdsv[nr];
02006 QUEUE pollq[nr];
02007 #else
02008 struct rt_poll_s *pdsv;
02009 QUEUE *pollq;
02010 if (!(pdsv = rt_malloc(nr*sizeof(struct rt_poll_s))) && nr > 0) {
02011 return ENOMEM;
02012 }
02013 if (!(pollq = rt_malloc(nr*sizeof(QUEUE))) && nr > 0) {
02014 rt_free(pdsv);
02015 return ENOMEM;
02016 }
02017 #endif
02018 if (space) {
02019 pds = pdsa;
02020 } else {
02021 rt_copy_from_user(pdsv, pdsa, nr*sizeof(struct rt_poll_s));
02022 pds = pdsv;
02023 }
02024 for (polled = i = 0; i < nr; i++) {
02025 QUEUE *queue = NULL;
02026 spinlock_t *qlock = NULL;
02027 if (rt_poll_ofstfun[pds[i].forwhat].topoll(pds[i].what)) {
02028 struct rt_poll_ql *ql = QL(i);
02029 queue = &ql->pollq;
02030 qlock = &ql->pollock;
02031 } else {
02032 pollq[i].task = NULL;
02033 polled++;
02034 }
02035 if (queue) {
02036 QUEUE *q = queue;
02037 pollq[i].task = (RT_TASK *)&sem;
02038 rt_spin_lock_irq(qlock);
02039 while ((q = q->next) != queue && (((POLL_SEM *)q->task)->task)->priority <= sem.task->priority);
02040 pollq[i].next = q;
02041 q->prev = (pollq[i].prev = q->prev)->next = &pollq[i];
02042 rt_spin_unlock_irq(qlock);
02043 } else {
02044 pds[i].forwhat = 0;
02045 }
02046 }
02047 semret = 0;
02048 if (!polled) {
02049 if (timeout < 0) {
02050 semret = rt_poll_wait_until(&sem, get_time() - timeout, sem.task, cpuid);
02051 } else if (timeout > 1) {
02052 semret = rt_poll_wait_until(&sem, timeout, sem.task, cpuid);
02053 } else if (timeout < 1 && nr > 0) {
02054 semret = rt_poll_wait(&sem, sem.task);
02055 }
02056 }
02057 for (polled = i = 0; i < nr; i++) {
02058 if (pds[i].forwhat) {
02059 spinlock_t *qlock = &QL(i)->pollock;
02060 rt_spin_lock_irq(qlock);
02061 if (pollq[i].task == (void *)&sem) {
02062 (pollq[i].prev)->next = pollq[i].next;
02063 (pollq[i].next)->prev = pollq[i].prev;
02064 }
02065 rt_spin_unlock_irq(qlock);
02066 }
02067 if (pollq[i].task != (void *)&sem) {
02068 pds[i].what = pollq[i].task;
02069 polled++;
02070 }
02071 }
02072 if (!space) {
02073 rt_copy_to_user(pdsa, pds, nr*sizeof(struct rt_poll_s));
02074 }
02075 #ifndef CONFIG_RTAI_RT_POLL_ON_STACK
02076 rt_free(pdsv);
02077 rt_free(pollq);
02078 #endif
02079 return polled ? polled : semret;
02080 }
02081
02082 EXPORT_SYMBOL(_rt_poll);
02083
02084 #endif
02085
02086
02087
02088
02089
02090 struct rt_native_fun_entry rt_sem_entries[] = {
02091 { { 0, rt_typed_sem_init }, TYPED_SEM_INIT },
02092 { { 0, rt_sem_delete }, SEM_DELETE },
02093 { { 0, _rt_typed_named_sem_init }, NAMED_SEM_INIT },
02094 { { 0, rt_named_sem_delete }, NAMED_SEM_DELETE },
02095 { { 1, rt_sem_signal }, SEM_SIGNAL },
02096 { { 1, rt_sem_broadcast }, SEM_BROADCAST },
02097 { { 1, rt_sem_wait }, SEM_WAIT },
02098 { { 1, rt_sem_wait_if }, SEM_WAIT_IF },
02099 { { 1, rt_sem_wait_until }, SEM_WAIT_UNTIL },
02100 { { 1, rt_sem_wait_timed }, SEM_WAIT_TIMED },
02101 { { 1, rt_sem_wait_barrier }, SEM_WAIT_BARRIER },
02102 { { 1, rt_sem_count }, SEM_COUNT },
02103 { { 1, rt_cond_signal}, COND_SIGNAL },
02104 { { 1, rt_cond_wait }, COND_WAIT },
02105 { { 1, rt_cond_wait_until }, COND_WAIT_UNTIL },
02106 { { 1, rt_cond_wait_timed }, COND_WAIT_TIMED },
02107 { { 0, rt_typed_rwl_init }, RWL_INIT },
02108 { { 0, rt_rwl_delete }, RWL_DELETE },
02109 { { 0, _rt_named_rwl_init }, NAMED_RWL_INIT },
02110 { { 0, rt_named_rwl_delete }, NAMED_RWL_DELETE },
02111 { { 1, rt_rwl_rdlock }, RWL_RDLOCK },
02112 { { 1, rt_rwl_rdlock_if }, RWL_RDLOCK_IF },
02113 { { 1, rt_rwl_rdlock_until }, RWL_RDLOCK_UNTIL },
02114 { { 1, rt_rwl_rdlock_timed }, RWL_RDLOCK_TIMED },
02115 { { 1, rt_rwl_wrlock }, RWL_WRLOCK },
02116 { { 1, rt_rwl_wrlock_if }, RWL_WRLOCK_IF },
02117 { { 1, rt_rwl_wrlock_until }, RWL_WRLOCK_UNTIL },
02118 { { 1, rt_rwl_wrlock_timed }, RWL_WRLOCK_TIMED },
02119 { { 1, rt_rwl_unlock }, RWL_UNLOCK },
02120 { { 0, rt_spl_init }, SPL_INIT },
02121 { { 0, rt_spl_delete }, SPL_DELETE },
02122 { { 0, _rt_named_spl_init }, NAMED_SPL_INIT },
02123 { { 0, rt_named_spl_delete }, NAMED_SPL_DELETE },
02124 { { 1, rt_spl_lock }, SPL_LOCK },
02125 { { 1, rt_spl_lock_if }, SPL_LOCK_IF },
02126 { { 1, rt_spl_lock_timed }, SPL_LOCK_TIMED },
02127 { { 1, rt_spl_unlock }, SPL_UNLOCK },
02128 #ifdef CONFIG_RTAI_RT_POLL
02129 { { 1, _rt_poll }, SEM_RT_POLL },
02130 #endif
02131 { { 0, 0 }, 000 }
02132 };
02133
02134 extern int set_rt_fun_entries(struct rt_native_fun_entry *entry);
02135 extern void reset_rt_fun_entries(struct rt_native_fun_entry *entry);
02136
02137 static int poll_wait(void *sem) { return ((SEM *)sem)->count <= 0; }
02138
02139 int __rtai_sem_init (void)
02140 {
02141 rt_poll_ofstfun[RT_POLL_SEM_WAIT_ALL].topoll =
02142 rt_poll_ofstfun[RT_POLL_SEM_WAIT_ONE].topoll = poll_wait;
02143 return set_rt_fun_entries(rt_sem_entries);
02144 }
02145
02146 void __rtai_sem_exit (void)
02147 {
02148 rt_poll_ofstfun[RT_POLL_SEM_WAIT_ALL].topoll =
02149 rt_poll_ofstfun[RT_POLL_SEM_WAIT_ONE].topoll = NULL;
02150 reset_rt_fun_entries(rt_sem_entries);
02151 }
02152
02153
02154
02155
02156
02157 #ifndef CONFIG_RTAI_SEM_BUILTIN
02158 module_init(__rtai_sem_init);
02159 module_exit(__rtai_sem_exit);
02160 #endif
02161
02162 EXPORT_SYMBOL(rt_typed_sem_init);
02163 EXPORT_SYMBOL(rt_sem_init);
02164 EXPORT_SYMBOL(rt_sem_delete);
02165 EXPORT_SYMBOL(rt_sem_count);
02166 EXPORT_SYMBOL(rt_sem_signal);
02167 EXPORT_SYMBOL(rt_sem_broadcast);
02168 EXPORT_SYMBOL(rt_sem_wait);
02169 EXPORT_SYMBOL(rt_sem_wait_if);
02170 EXPORT_SYMBOL(rt_sem_wait_until);
02171 EXPORT_SYMBOL(rt_sem_wait_timed);
02172 EXPORT_SYMBOL(rt_sem_wait_barrier);
02173 EXPORT_SYMBOL(_rt_typed_named_sem_init);
02174 EXPORT_SYMBOL(rt_named_sem_delete);
02175
02176 EXPORT_SYMBOL(rt_cond_signal);
02177 EXPORT_SYMBOL(rt_cond_wait);
02178 EXPORT_SYMBOL(rt_cond_wait_until);
02179 EXPORT_SYMBOL(rt_cond_wait_timed);
02180
02181 EXPORT_SYMBOL(rt_typed_rwl_init);
02182 EXPORT_SYMBOL(rt_rwl_delete);
02183 EXPORT_SYMBOL(rt_rwl_rdlock);
02184 EXPORT_SYMBOL(rt_rwl_rdlock_if);
02185 EXPORT_SYMBOL(rt_rwl_rdlock_until);
02186 EXPORT_SYMBOL(rt_rwl_rdlock_timed);
02187 EXPORT_SYMBOL(rt_rwl_wrlock);
02188 EXPORT_SYMBOL(rt_rwl_wrlock_if);
02189 EXPORT_SYMBOL(rt_rwl_wrlock_until);
02190 EXPORT_SYMBOL(rt_rwl_wrlock_timed);
02191 EXPORT_SYMBOL(rt_rwl_unlock);
02192 EXPORT_SYMBOL(_rt_named_rwl_init);
02193 EXPORT_SYMBOL(rt_named_rwl_delete);
02194
02195 EXPORT_SYMBOL(rt_spl_init);
02196 EXPORT_SYMBOL(rt_spl_delete);
02197 EXPORT_SYMBOL(rt_spl_lock);
02198 EXPORT_SYMBOL(rt_spl_lock_if);
02199 EXPORT_SYMBOL(rt_spl_lock_timed);
02200 EXPORT_SYMBOL(rt_spl_unlock);
02201 EXPORT_SYMBOL(_rt_named_spl_init);
02202 EXPORT_SYMBOL(rt_named_spl_delete);