00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
#include <rtai_schedcore.h>
00027
#include <rtai_registry.h>
00028
#include <linux/module.h>
00029
00030
00031
00032
00033
00034 void rt_set_sched_policy(
RT_TASK *task,
int policy,
int rr_quantum_ns)
00035 {
00036
if ((
task->policy = policy ? 1 : 0)) {
00037
task->rr_quantum =
nano2count_cpuid(rr_quantum_ns,
task->runnable_on_cpus);
00038
if ((
task->rr_quantum & 0xF0000000) || !
task->rr_quantum) {
00039
#ifdef CONFIG_SMP
00040
task->rr_quantum =
rt_smp_times[
task->runnable_on_cpus].
linux_tick;
00041
#else
00042
task->rr_quantum =
rt_times.linux_tick;
00043
#endif
00044
}
00045
task->rr_remaining =
task->rr_quantum;
00046
task->yield_time = 0;
00047 }
00048 }
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067 int rt_get_prio(
RT_TASK *task)
00068 {
00069
if (
task->magic != RT_TASK_MAGIC) {
00070
return -EINVAL;
00071 }
00072
return task->base_priority;
00073 }
00074
00075
00076
00077
00078
00079
00080
00081
00082
00083
00084
00085
00086
00087
00088
00089
00090
00091
00092
00093
00094 int rt_get_inher_prio(
RT_TASK *task)
00095 {
00096
if (
task->magic != RT_TASK_MAGIC) {
00097
return -EINVAL;
00098 }
00099
return task->base_priority;
00100 }
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124 int rt_change_prio(
RT_TASK *task,
int priority)
00125 {
00126
unsigned long flags;
00127
int prio;
00128
00129
if (
task->magic != RT_TASK_MAGIC || priority < 0) {
00130
return -EINVAL;
00131 }
00132
00133 prio =
task->base_priority;
00134
flags = rt_global_save_flags_and_cli();
00135
if ((
task->base_priority = priority) <
task->priority) {
00136
unsigned long schedmap;
00137 QUEUE *q;
00138 schedmap = 0;
00139
do {
00140
task->priority = priority;
00141
if (
task->state ==
RT_SCHED_READY) {
00142 (
task->rprev)->rnext =
task->rnext;
00143 (
task->rnext)->rprev =
task->rprev;
00144 enq_ready_task(
task);
00145
#ifdef CONFIG_SMP
00146
set_bit(
task->runnable_on_cpus & 0x1F, &schedmap);
00147
#else
00148
schedmap = 1;
00149
#endif
00150
}
else if ((q =
task->blocked_on) && !((
task->state &
RT_SCHED_SEMAPHORE) && ((
SEM *)q)->qtype)) {
00151 (
task->queue.prev)->next =
task->queue.next;
00152 (
task->queue.next)->prev =
task->queue.prev;
00153
while ((q = q->next) !=
task->blocked_on && (q->task)->priority <= priority);
00154 q->prev = (
task->queue.prev = q->prev)->next = &(
task->queue);
00155
task->queue.next = q;
00156
#ifdef CONFIG_SMP
00157
set_bit(
task->runnable_on_cpus & 0x1F, &schedmap);
00158
#else
00159
schedmap = 1;
00160
#endif
00161
}
00162 }
while ((
task =
task->prio_passed_to) &&
task->priority > priority);
00163
if (schedmap) {
00164
#ifdef CONFIG_SMP
00165
if (test_and_clear_bit(rtai_cpuid(), &schedmap)) {
00166 RT_SCHEDULE_MAP_BOTH(schedmap);
00167 }
else {
00168 RT_SCHEDULE_MAP(schedmap);
00169 }
00170
#else
00171
rt_schedule();
00172
#endif
00173
}
00174 }
00175 rt_global_restore_flags(
flags);
00176
return prio;
00177 }
00178
00179
00180
00181
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191 RT_TASK *
rt_whoami(
void)
00192 {
00193
return _rt_whoami();
00194 }
00195
00196
00197
00198
00199
00200
00201
00202
00203
00204
00205
00206
00207
00208
00209
00210
00211
00212
00213
00214
00215 void rt_task_yield(
void)
00216 {
00217
RT_TASK *rt_current, *
task;
00218
unsigned long flags;
00219
00220
flags = rt_global_save_flags_and_cli();
00221
task = (rt_current = RT_CURRENT)->rnext;
00222
while (rt_current->priority ==
task->priority) {
00223
task =
task->rnext;
00224 }
00225
if (
task != rt_current->rnext) {
00226 (rt_current->rprev)->rnext = rt_current->rnext;
00227 (rt_current->rnext)->rprev = rt_current->rprev;
00228
task->rprev = (rt_current->rprev =
task->rprev)->rnext = rt_current;
00229 rt_current->rnext =
task;
00230
rt_schedule();
00231 }
00232 rt_global_restore_flags(
flags);
00233 }
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256 int rt_task_suspend(
RT_TASK *task)
00257 {
00258
unsigned long flags;
00259
00260
if (!
task) {
00261
task = RT_CURRENT;
00262 }
else if (
task->magic != RT_TASK_MAGIC) {
00263
return -EINVAL;
00264 }
00265
00266
flags = rt_global_save_flags_and_cli();
00267
if (!
task->owndres) {
00268
if (!
task->suspdepth++) {
00269 rem_ready_task(
task);
00270 rem_timed_task(
task);
00271
task->state |=
RT_SCHED_SUSPENDED;
00272
if (
task == RT_CURRENT) {
00273
rt_schedule();
00274 }
00275 }
00276 }
else if (
task->suspdepth < 0) {
00277
task->suspdepth++;
00278 }
00279 rt_global_restore_flags(
flags);
00280
return task->suspdepth;
00281 }
00282
00283
00284 int rt_task_suspend_if(
RT_TASK *task)
00285 {
00286
unsigned long flags;
00287
00288
if (!
task) {
00289
task = RT_CURRENT;
00290 }
else if (
task->magic != RT_TASK_MAGIC) {
00291
return -EINVAL;
00292 }
00293
00294
flags = rt_global_save_flags_and_cli();
00295
if (!
task->owndres &&
task->suspdepth < 0) {
00296
task->suspdepth++;
00297 }
00298 rt_global_restore_flags(
flags);
00299
return task->suspdepth;
00300 }
00301
00302
00303 int rt_task_suspend_until(
RT_TASK *task,
RTIME time)
00304 {
00305
unsigned long flags;
00306
00307
if (!
task) {
00308
task = RT_CURRENT;
00309 }
else if (
task->magic != RT_TASK_MAGIC) {
00310
return -EINVAL;
00311 }
00312
00313
flags = rt_global_save_flags_and_cli();
00314
if (!
task->owndres) {
00315
if (!
task->suspdepth) {
00316
#ifdef CONFIG_SMP
00317
int cpuid = rtai_cpuid();
00318
#endif
00319
if ((
task->resume_time = time) > rt_time_h) {
00320
task->suspdepth++;
00321 rem_ready_task(
task);
00322 enq_timed_task(
task);
00323
task->state |= (
RT_SCHED_SUSPENDED |
RT_SCHED_DELAYED);
00324
task->blocked_on = (
void *)
task;
00325
if (
task == RT_CURRENT) {
00326
rt_schedule();
00327
if (
task->blocked_on) {
00328
task->suspdepth--;
00329 rt_global_restore_flags(
flags);
00330
return SEM_TIMOUT;
00331 }
00332 }
00333 }
00334 }
else {
00335
task->suspdepth++;
00336 }
00337 }
else if (
task->suspdepth < 0) {
00338
task->suspdepth++;
00339 }
00340 rt_global_restore_flags(
flags);
00341
return task->suspdepth;
00342 }
00343
00344
00345 int rt_task_suspend_timed(
RT_TASK *task,
RTIME delay)
00346 {
00347
return rt_task_suspend_until(
task, get_time() + delay);
00348 }
00349
00350
00351
00352
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370 int rt_task_resume(
RT_TASK *task)
00371 {
00372
unsigned long flags;
00373
00374
if (
task->magic != RT_TASK_MAGIC) {
00375
return -EINVAL;
00376 }
00377
00378
flags = rt_global_save_flags_and_cli();
00379
if (!(--
task->suspdepth)) {
00380 rem_timed_task(
task);
00381
if ((
task->state &= ~(
RT_SCHED_SUSPENDED |
RT_SCHED_DELAYED)) ==
RT_SCHED_READY) {
00382
task->blocked_on = NOTHING;
00383 enq_ready_task(
task);
00384 RT_SCHEDULE(
task, rtai_cpuid());
00385 }
00386 }
00387 rt_global_restore_flags(
flags);
00388
return 0;
00389 }
00390
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423
00424
00425
00426
00427
00428
00429
00430
00431 int rt_get_task_state(
RT_TASK *task)
00432 {
00433
return task->state;
00434 }
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458
00459
00460
00461
00462
00463
00464
00465
00466 void rt_linux_use_fpu(
int use_fpu_flag)
00467 {
00468
int cpuid;
00469
for (
cpuid = 0;
cpuid < num_online_cpus();
cpuid++) {
00470 rt_linux_task.uses_fpu = use_fpu_flag ? 1 : 0;
00471 }
00472 }
00473
00474
00475
00476
00477
00478
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
00500 int rt_task_use_fpu(
RT_TASK *task,
int use_fpu_flag)
00501 {
00502
if (
task->magic != RT_TASK_MAGIC) {
00503
return -EINVAL;
00504 }
00505
task->uses_fpu = use_fpu_flag ? 1 : 0;
00506
return 0;
00507 }
00508
00509
00510
00511
00512
00513
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532 int rt_task_signal_handler(
RT_TASK *task,
void (*handler)(
void))
00533 {
00534
if (
task->magic != RT_TASK_MAGIC) {
00535
return -EINVAL;
00536 }
00537
task->signal =
handler;
00538
return 0;
00539 }
00540
00541
00542
00543 void rt_gettimeorig(
RTIME time_orig[])
00544 {
00545
unsigned long flags;
00546
struct timeval tv;
00547
rtai_save_flags_and_cli(
flags);
00548 do_gettimeofday(&tv);
00549 time_orig[0] =
rtai_rdtsc();
00550
rtai_restore_flags(
flags);
00551 time_orig[0] = tv.tv_sec*(
long long)
tuned.cpu_freq +
llimd(tv.tv_usec,
tuned.cpu_freq, 1000000) - time_orig[0];
00552 time_orig[1] =
llimd(time_orig[0], 1000000000,
tuned.cpu_freq);
00553 }
00554
00555
00556
00557
00558
00559
00560
00561
00562
00563
00564
00565
00566
00567
00568
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581
00582
00583
00584
00585
00586
00587
00588 int rt_task_make_periodic_relative_ns(
RT_TASK *task,
RTIME start_delay,
RTIME period)
00589 {
00590
long flags;
00591
00592
if (!
task) {
00593
task = RT_CURRENT;
00594 }
else if (
task->magic != RT_TASK_MAGIC) {
00595
return -EINVAL;
00596 }
00597 start_delay =
nano2count_cpuid(start_delay,
task->runnable_on_cpus);
00598 period =
nano2count_cpuid(period,
task->runnable_on_cpus);
00599
flags = rt_global_save_flags_and_cli();
00600
task->resume_time =
rt_get_time_cpuid(
task->runnable_on_cpus) + start_delay;
00601
task->period = period;
00602
task->suspdepth = 0;
00603
if (!(
task->state &
RT_SCHED_DELAYED)) {
00604 rem_ready_task(
task);
00605
task->state = (
task->state & ~
RT_SCHED_SUSPENDED) |
RT_SCHED_DELAYED;
00606 enq_timed_task(
task);
00607 }
00608 RT_SCHEDULE(
task, rtai_cpuid());
00609 rt_global_restore_flags(
flags);
00610
return 0;
00611 }
00612
00613
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623
00624
00625
00626
00627
00628
00629
00630
00631
00632
00633
00634
00635
00636
00637
00638
00639
00640
00641
00642
00643
00644
00645
00646
00647 int rt_task_make_periodic(
RT_TASK *task,
RTIME start_time,
RTIME period)
00648 {
00649
long flags;
00650
00651
if (!
task) {
00652
task = RT_CURRENT;
00653 }
else if (
task->magic != RT_TASK_MAGIC) {
00654
return -EINVAL;
00655 }
00656
flags = rt_global_save_flags_and_cli();
00657
task->resume_time = start_time;
00658
task->period = period;
00659
task->suspdepth = 0;
00660
if (!(
task->state &
RT_SCHED_DELAYED)) {
00661 rem_ready_task(
task);
00662
task->state = (
task->state & ~
RT_SCHED_SUSPENDED) |
RT_SCHED_DELAYED;
00663 enq_timed_task(
task);
00664 }
00665 RT_SCHEDULE(
task, rtai_cpuid());
00666 rt_global_restore_flags(
flags);
00667
return 0;
00668 }
00669
00670
00671
00672
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683
00684 int rt_task_wait_period(
void)
00685 {
00686 DECLARE_RT_CURRENT;
00687
long flags;
00688
00689
flags = rt_global_save_flags_and_cli();
00690 ASSIGN_RT_CURRENT;
00691
if (rt_current->resync_frame) {
00692 rt_current->resync_frame = 0;
00693
#ifdef CONFIG_SMP
00694
rt_current->resume_time = oneshot_timer ?
rtai_rdtsc() :
rt_smp_times[
cpuid].
tick_time;
00695
#else
00696
rt_current->resume_time = oneshot_timer ?
rtai_rdtsc() :
rt_times.tick_time;
00697
#endif
00698
}
else if ((rt_current->resume_time += rt_current->period) > rt_time_h) {
00699 rt_current->state |=
RT_SCHED_DELAYED;
00700 rem_ready_current(rt_current);
00701 enq_timed_task(rt_current);
00702
rt_schedule();
00703 rt_global_restore_flags(
flags);
00704
return 0;
00705 }
00706 rt_global_restore_flags(
flags);
00707
return 1;
00708 }
00709
00710 void rt_task_set_resume_end_times(
RTIME resume,
RTIME end)
00711 {
00712
RT_TASK *rt_current;
00713
long flags;
00714
00715
flags = rt_global_save_flags_and_cli();
00716 rt_current = RT_CURRENT;
00717 rt_current->policy = -1;
00718 rt_current->priority = 0;
00719
if (resume > 0) {
00720 rt_current->resume_time = resume;
00721 }
else {
00722 rt_current->resume_time -= resume;
00723 }
00724
if (end > 0) {
00725 rt_current->period = end;
00726 }
else {
00727 rt_current->period = rt_current->resume_time - end;
00728 }
00729 rt_current->state |=
RT_SCHED_DELAYED;
00730 rem_ready_current(rt_current);
00731 enq_timed_task(rt_current);
00732
rt_schedule();
00733 rt_global_restore_flags(
flags);
00734 }
00735
00736 int rt_set_resume_time(
RT_TASK *task,
RTIME new_resume_time)
00737 {
00738
long flags;
00739
00740
if (
task->magic != RT_TASK_MAGIC) {
00741
return -EINVAL;
00742 }
00743
00744
flags = rt_global_save_flags_and_cli();
00745
if (
task->state &
RT_SCHED_DELAYED) {
00746
if (((
task->resume_time = new_resume_time) - (
task->tnext)->resume_time) > 0) {
00747 rem_timed_task(
task);
00748 enq_timed_task(
task);
00749 rt_global_restore_flags(
flags);
00750
return 0;
00751 }
00752 }
00753 rt_global_restore_flags(
flags);
00754
return -ETIME;
00755 }
00756
00757 int rt_set_period(
RT_TASK *task,
RTIME new_period)
00758 {
00759
long flags;
00760
00761
if (
task->magic != RT_TASK_MAGIC) {
00762
return -EINVAL;
00763 }
00764
rtai_save_flags_and_cli(
flags);
00765
task->period = new_period;
00766
rtai_restore_flags(
flags);
00767
return 0;
00768 }
00769
00770
00771
00772
00773
00774
00775
00776
00777
00778
00779
00780
00781
00782 RTIME next_period(
void)
00783 {
00784
RT_TASK *rt_current;
00785
unsigned long flags;
00786
flags = rt_global_save_flags_and_cli();
00787 rt_current = RT_CURRENT;
00788 rt_global_restore_flags(
flags);
00789
return rt_current->resume_time + rt_current->period;
00790 }
00791
00792
00793
00794
00795
00796
00797
00798
00799
00800
00801
00802
00803
00804
00805
00806
00807
00808
00809
00810
00811 void rt_busy_sleep(
int ns)
00812 {
00813
RTIME end_time;
00814 end_time =
rtai_rdtsc() +
llimd(ns,
tuned.cpu_freq, 1000000000);
00815
while (
rtai_rdtsc() < end_time);
00816 }
00817
00818
00819
00820
00821
00822
00823
00824
00825
00826
00827
00828
00829
00830
00831
00832
00833
00834 int rt_sleep(
RTIME delay)
00835 {
00836 DECLARE_RT_CURRENT;
00837
unsigned long flags;
00838
flags = rt_global_save_flags_and_cli();
00839 ASSIGN_RT_CURRENT;
00840
if ((rt_current->resume_time = get_time() + delay) > rt_time_h) {
00841 rt_current->state |=
RT_SCHED_DELAYED;
00842 rem_ready_current(rt_current);
00843 enq_timed_task(rt_current);
00844
rt_schedule();
00845 rt_global_restore_flags(
flags);
00846
return 0;
00847 }
00848 rt_global_restore_flags(
flags);
00849
return 1;
00850 }
00851
00852
00853
00854
00855
00856
00857
00858
00859
00860
00861
00862
00863
00864
00865
00866
00867
00868 int rt_sleep_until(
RTIME time)
00869 {
00870 DECLARE_RT_CURRENT;
00871
unsigned long flags;
00872
flags = rt_global_save_flags_and_cli();
00873 ASSIGN_RT_CURRENT;
00874
if ((rt_current->resume_time = time) > rt_time_h) {
00875 rt_current->state |=
RT_SCHED_DELAYED;
00876 rem_ready_current(rt_current);
00877 enq_timed_task(rt_current);
00878
rt_schedule();
00879 rt_global_restore_flags(
flags);
00880
return 0;
00881 }
00882 rt_global_restore_flags(
flags);
00883
return 1;
00884 }
00885
00886 int rt_task_masked_unblock(
RT_TASK *task,
unsigned long mask)
00887 {
00888
unsigned long flags;
00889
00890
if (
task->magic != RT_TASK_MAGIC) {
00891
return -EINVAL;
00892 }
00893
00894
if (
task->state &&
task->state !=
RT_SCHED_READY) {
00895
flags = rt_global_save_flags_and_cli();
00896
if (mask &
RT_SCHED_DELAYED) {
00897 rem_timed_task(
task);
00898 }
00899
if (
task->blocked_on && (mask & (
RT_SCHED_SEMAPHORE |
RT_SCHED_SEND |
RT_SCHED_RPC |
RT_SCHED_RETURN))) {
00900 (
task->queue.prev)->next =
task->queue.next;
00901 (
task->queue.next)->prev =
task->queue.prev;
00902
if (
task->state &
RT_SCHED_SEMAPHORE) {
00903
SEM *sem = (
SEM *)
task->blocked_on;
00904
if (++sem->count > 1 && sem->type) {
00905 sem->count = 1;
00906 }
00907 }
00908 }
00909
if (
task->state !=
RT_SCHED_READY && (
task->state &= ~mask) ==
RT_SCHED_READY) {
00910 enq_ready_task(
task);
00911 RT_SCHEDULE(
task, rtai_cpuid());
00912 }
00913 rt_global_restore_flags(
flags);
00914
return task->unblocked = 1;
00915 }
00916
return 0;
00917 }
00918
00919 int rt_nanosleep(
struct timespec *rqtp,
struct timespec *rmtp)
00920 {
00921
RTIME expire;
00922
00923
if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
00924
return -EINVAL;
00925 }
00926
rt_sleep_until(expire =
rt_get_time() +
timespec2count(rqtp));
00927
if ((expire -=
rt_get_time()) > 0) {
00928
if (rmtp) {
00929
count2timespec(expire, rmtp);
00930 }
00931
return -EINTR;
00932 }
00933
return 0;
00934 }
00935
00936
00937
00938 void rt_enq_ready_edf_task(
RT_TASK *ready_task)
00939 {
00940 enq_ready_edf_task(ready_task);
00941 }
00942
00943 void rt_enq_ready_task(
RT_TASK *ready_task)
00944 {
00945 enq_ready_task(ready_task);
00946 }
00947
00948 int rt_renq_ready_task(
RT_TASK *ready_task,
int priority)
00949 {
00950
return renq_ready_task(ready_task, priority);
00951 }
00952
00953 void rt_rem_ready_task(
RT_TASK *task)
00954 {
00955 rem_ready_task(
task);
00956 }
00957
00958 void rt_rem_ready_current(
RT_TASK *rt_current)
00959 {
00960 rem_ready_current(rt_current);
00961 }
00962
00963 void rt_enq_timed_task(
RT_TASK *timed_task)
00964 {
00965 enq_timed_task(timed_task);
00966 }
00967
00968 void rt_wake_up_timed_tasks(
int cpuid)
00969 {
00970
#ifdef CONFIG_SMP
00971
wake_up_timed_tasks(
cpuid);
00972
#else
00973
wake_up_timed_tasks(0);
00974
#endif
00975
}
00976
00977 void rt_rem_timed_task(
RT_TASK *task)
00978 {
00979 rem_timed_task(
task);
00980 }
00981
00982 void rt_enqueue_blocked(
RT_TASK *task, QUEUE *queue,
int qtype)
00983 {
00984
enqueue_blocked(
task, queue, qtype);
00985 }
00986
00987 void rt_dequeue_blocked(
RT_TASK *task)
00988 {
00989
dequeue_blocked(
task);
00990 }
00991
00992 int rt_renq_current(
RT_TASK *rt_current,
int priority)
00993 {
00994
return renq_current(rt_current, priority);
00995 }
00996
00997
00998
00999 RT_TASK *
rt_named_task_init(
const char *task_name,
void (*thread)(
long),
long data,
int stack_size,
int prio,
int uses_fpu,
void(*signal)(
void))
01000 {
01001
RT_TASK *
task;
01002
unsigned long name;
01003
01004
if ((
task =
rt_get_adr(name =
nam2num(task_name)))) {
01005
return task;
01006 }
01007
if ((
task =
rt_malloc(
sizeof(
RT_TASK))) && !
rt_task_init(
task, thread, data, stack_size, prio, uses_fpu, signal)) {
01008
if (
rt_register(name,
task,
IS_TASK, 0)) {
01009
return task;
01010 }
01011
rt_task_delete(
task);
01012 }
01013
rt_free(
task);
01014
return (
RT_TASK *)0;
01015 }
01016
01017 RT_TASK *
rt_named_task_init_cpuid(
const char *task_name,
void (*thread)(
long),
long data,
int stack_size,
int prio,
int uses_fpu,
void(*signal)(
void),
unsigned int run_on_cpu)
01018 {
01019
RT_TASK *
task;
01020
unsigned long name;
01021
01022
if ((
task =
rt_get_adr(name =
nam2num(task_name)))) {
01023
return task;
01024 }
01025
if ((
task =
rt_malloc(
sizeof(
RT_TASK))) && !
rt_task_init_cpuid(
task, thread, data, stack_size, prio, uses_fpu, signal, run_on_cpu)) {
01026
if (
rt_register(name,
task,
IS_TASK, 0)) {
01027
return task;
01028 }
01029
rt_task_delete(
task);
01030 }
01031
rt_free(
task);
01032
return (
RT_TASK *)0;
01033 }
01034
01035 int rt_named_task_delete(
RT_TASK *task)
01036 {
01037
if (!
rt_task_delete(
task)) {
01038
rt_free(
task);
01039 }
01040
return rt_drg_on_adr(
task);
01041 }
01042
01043
01044
01045 #define HASHED_REGISTRY
01046
01047
#ifdef HASHED_REGISTRY
01048
01049 int max_slots;
01050 static struct rt_registry_entry *
lxrt_list;
01051 static spinlock_t
list_lock = SPIN_LOCK_UNLOCKED;
01052
01053 #define COLLISION_COUNT() do { col++; } while(0)
01054 static unsigned long long col;
01055
#ifndef COLLISION_COUNT
01056
#define COLLISION_COUNT()
01057
#endif
01058
01059 #define NONAME (1UL)
01060 #define NOADR ((void *)1)
01061
01062 #define PRIMES_TAB_GRANULARITY 100
01063
01064 static unsigned short primes[ ] = { 1, 103, 211, 307, 401, 503, 601, 701, 809, 907, 1009, 1103, 1201, 1301, 1409, 1511, 1601, 1709, 1801, 1901, 2003, 2111, 2203, 2309, 2411, 2503, 2609, 2707, 2801, 2903, 3001, 3109, 3203, 3301, 3407, 3511,
01065 3607, 3701, 3803, 3907, 4001, 4111, 4201, 4327, 4409, 4507, 4603, 4703, 4801, 4903, 5003, 5101, 5209, 5303, 5407, 5501, 5623, 5701, 5801, 5903, 6007, 6101, 6203, 6301, 6421, 6521, 6607, 6703, 6803, 6907, 7001, 7103, 7207, 7307, 7411, 7507,
01066 7603, 7703, 7817, 7901, 8009, 8101, 8209, 8311, 8419, 8501, 8609, 8707, 8803, 8923, 9001, 9103, 9203, 9311, 9403, 9511, 9601, 9719, 9803, 9901, 10007, 10103, 10211, 10301, 10427, 10501, 10601, 10709, 10831, 10903, 11003, 11113, 11213, 11311, 11411, 11503, 11597, 11617, 11701, 11801, 11903, 12007, 12101, 12203, 12301, 12401, 12503, 12601, 12703, 12809, 12907, 13001, 13103, 13217, 13309, 13411, 13513, 13613, 13709, 13807, 13901, 14009, 14107, 14207, 14303, 14401, 14503, 14621,
01067 14713, 14813, 14923, 15013, 15101, 15217, 15307, 15401, 15511, 15601, 15727, 15803, 15901, 16001, 16103, 16217, 16301, 16411, 16519, 16603, 16703, 16811, 16901, 17011, 17107, 17203, 17317, 17401, 17509, 17609, 17707, 17807, 17903, 18013, 18119, 18211, 18301, 18401, 18503, 18617, 18701, 18803, 18911, 19001, 19121, 19207, 19301, 19403, 19501, 19603, 19709, 19801, 19913, 20011, 20101 };
01068
01069 #define hash_fun(m, n) ((m)%(n) + 1)
01070
01071 static int hash_ins_adr(
void *adr,
struct rt_registry_entry *list,
int lstlen,
int nlink)
01072 {
01073
int i, k;
01074
unsigned long flags;
01075
01076 i =
hash_fun((
unsigned long)adr, lstlen);
01077
while (1) {
01078 k = i;
01079
while (list[k].
adr >
NOADR && list[k].
adr != adr) {
01080
COLLISION_COUNT();
01081
if (++k > lstlen) {
01082 k = 1;
01083 }
01084
if (k == i) {
01085
return 0;
01086 }
01087 }
01088
flags =
rt_spin_lock_irqsave(&
list_lock);
01089
if (list[k].adr == adr) {
01090
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01091
return -k;
01092 }
else if (list[k].adr <=
NOADR) {
01093 list[k].adr = adr;
01094 list[k].nlink = nlink;
01095 list[nlink].alink = k;
01096
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01097
return k;
01098 }
01099 }
01100 }
01101
01102 static int hash_ins_name(
unsigned long name,
void *adr,
int type,
struct task_struct *lnxtsk,
struct rt_registry_entry *list,
int lstlen,
int inc)
01103 {
01104
int i, k;
01105
unsigned long flags;
01106
01107 i =
hash_fun(name, lstlen);
01108
while (1) {
01109 k = i;
01110
while (list[k].
name >
NONAME && list[k].
name != name) {
01111
COLLISION_COUNT();
01112
if (++k > lstlen) {
01113 k = 1;
01114 }
01115
if (k == i) {
01116
return 0;
01117 }
01118 }
01119
flags =
rt_spin_lock_irqsave(&
list_lock);
01120
if (list[k].name == name) {
01121
if (inc) {
01122 list[k].count++;
01123 }
01124
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01125
return -k;
01126 }
else if (list[k].name <=
NONAME) {
01127 list[k].name = name;
01128 list[k].type = type;
01129 list[k].tsk = lnxtsk;
01130 list[k].count = 1;
01131 list[k].alink = 0;
01132
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01133
if (
hash_ins_adr(adr, list, lstlen, k) <= 0) {
01134
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01135
return 0;
01136 }
01137
return k;
01138 }
01139 }
01140 }
01141
01142 static void *
hash_find_name(
unsigned long name,
struct rt_registry_entry *list,
long lstlen,
int inc,
int *slot)
01143 {
01144
int i, k;
01145
unsigned long flags;
01146
01147 i =
hash_fun(name, lstlen);
01148
while (1) {
01149 k = i;
01150
while (list[k].
name >
NONAME && list[k].
name != name) {
01151
COLLISION_COUNT();
01152
if (++k > lstlen) {
01153 k = 1;
01154 }
01155
if (k == i) {
01156
return NULL;
01157 }
01158 }
01159
flags =
rt_spin_lock_irqsave(&
list_lock);
01160
if (list[k].name == name) {
01161
if (inc) {
01162 list[k].count++;
01163 }
01164
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01165
if (slot) {
01166 *slot = k;
01167 }
01168
return list[list[k].alink].adr;
01169 }
else if (list[k].name <=
NONAME) {
01170
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01171
return NULL;
01172 }
01173 }
01174 }
01175
01176 static unsigned long hash_find_adr(
void *adr,
struct rt_registry_entry *list,
long lstlen,
int inc)
01177 {
01178
int i, k;
01179
unsigned long flags;
01180
01181 i =
hash_fun((
unsigned long)adr, lstlen);
01182
while (1) {
01183 k = i;
01184
while (list[k].
adr >
NOADR && list[k].
adr != adr) {
01185
COLLISION_COUNT();
01186
if (++k > lstlen) {
01187 k = 1;
01188 }
01189
if (k == i) {
01190
return 0;
01191 }
01192 }
01193
flags =
rt_spin_lock_irqsave(&
list_lock);
01194
if (list[k].adr == adr) {
01195
if (inc) {
01196 list[list[k].nlink].count++;
01197 }
01198
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01199
return list[list[k].nlink].name;
01200 }
else if (list[k].adr <=
NOADR) {
01201
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01202
return 0;
01203 }
01204 }
01205 }
01206
01207 static int hash_rem_name(
unsigned long name,
struct rt_registry_entry *list,
long lstlen,
int dec)
01208 {
01209
int i, k;
01210
unsigned long flags;
01211
01212 k = i =
hash_fun(name, lstlen);
01213
while (list[k].
name && list[k].
name != name) {
01214
COLLISION_COUNT();
01215
if (++k > lstlen) {
01216 k = 1;
01217 }
01218
if (k == i) {
01219
return 0;
01220 }
01221 }
01222
flags =
rt_spin_lock_irqsave(&
list_lock);
01223
if (list[k].
name == name) {
01224
if (!dec || (list[k].
count && !--list[k].
count)) {
01225
int j;
01226
if ((i = k + 1) > lstlen) {
01227 i = 1;
01228 }
01229 list[k].
name = !list[i].
name ? 0UL :
NONAME;
01230
if ((j = list[k].
alink)) {
01231
if ((i = j + 1) > lstlen) {
01232 i = 1;
01233 }
01234 list[j].adr = !list[i].adr ? NULL :
NOADR;
01235 }
01236 }
01237
if (dec) {
01238 k = list[k].
count;
01239 }
01240
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01241
return k;
01242 }
01243
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01244
return dec;
01245 }
01246
01247 static int hash_rem_adr(
void *adr,
struct rt_registry_entry *list,
long lstlen,
int dec)
01248 {
01249
int i, k;
01250
unsigned long flags;
01251
01252 k = i =
hash_fun((
unsigned long)adr, lstlen);
01253
while (list[k].
adr && list[k].
adr != adr) {
01254
COLLISION_COUNT();
01255
if (++k > lstlen) {
01256 k = 1;
01257 }
01258
if (k == i) {
01259
return 0;
01260 }
01261 }
01262
flags =
rt_spin_lock_irqsave(&
list_lock);
01263
if (list[k].
adr == adr) {
01264
if (!dec || (list[list[k].
nlink].
count && !--list[list[k].nlink].count)) {
01265
int j;
01266
if ((i = k + 1) > lstlen) {
01267 i = 1;
01268 }
01269 list[k].adr = !list[i].adr ? NULL :
NOADR;
01270 j = list[k].nlink;
01271
if ((i = j + 1) > lstlen) {
01272 i = 1;
01273 }
01274 list[j].name = !list[i].name ? 0UL :
NONAME;
01275 }
01276
if (dec) {
01277 k = list[list[k].nlink].count;
01278 }
01279
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01280
return k;
01281 }
01282
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01283
return dec;
01284 }
01285
01286 static inline int registr(
unsigned long name,
void *adr,
int type,
struct task_struct *lnxtsk)
01287 {
01288
return abs(
hash_ins_name(name, adr, type, lnxtsk,
lxrt_list,
max_slots, 1));
01289 }
01290
01291 static inline int drg_on_name(
unsigned long name)
01292 {
01293
return hash_rem_name(name,
lxrt_list,
max_slots, 0);
01294 }
01295
01296 static inline int drg_on_name_cnt(
unsigned long name)
01297 {
01298
return hash_rem_name(name,
lxrt_list,
max_slots, -EFAULT);
01299 }
01300
01301 static inline int drg_on_adr(
void *adr)
01302 {
01303
return hash_rem_adr(adr,
lxrt_list,
max_slots, 0);
01304 }
01305
01306 static inline int drg_on_adr_cnt(
void *adr)
01307 {
01308
return hash_rem_adr(adr,
lxrt_list,
max_slots, -EFAULT);
01309 }
01310
01311 static inline unsigned long get_name(
void *adr)
01312 {
01313
static unsigned long nameseed = 3518743764UL;
01314
if (!adr) {
01315
unsigned long flags;
01316
unsigned long name;
01317
flags =
rt_spin_lock_irqsave(&
list_lock);
01318
if ((name = ++nameseed) == 0xFFFFFFFFUL) {
01319 nameseed = 3518743764UL;
01320 }
01321
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01322
return name;
01323 }
else {
01324
return hash_find_adr(adr,
lxrt_list,
max_slots, 0);
01325 }
01326
return 0;
01327 }
01328
01329 static inline void *
get_adr(
unsigned long name)
01330 {
01331
return hash_find_name(name,
lxrt_list,
max_slots, 0, NULL);
01332 }
01333
01334 static inline void *
get_adr_cnt(
unsigned long name)
01335 {
01336
return hash_find_name(name,
lxrt_list,
max_slots, 1, NULL);
01337 }
01338
01339 static inline int get_type(
unsigned long name)
01340 {
01341
int slot;
01342
01343
if (
hash_find_name(name,
lxrt_list,
max_slots, 0, &slot)) {
01344
return lxrt_list[slot].
type;
01345 }
01346
return -EINVAL;
01347 }
01348
01349 unsigned long is_process_registered(
struct task_struct *lnxtsk)
01350 {
01351
void *adr = lnxtsk->rtai_tskext(
TSKEXT0);
01352
return adr ?
hash_find_adr(adr,
lxrt_list,
max_slots, 0) : 0;
01353 }
01354
01355 int rt_get_registry_slot(
int slot,
struct rt_registry_entry *entry)
01356 {
01357
unsigned long flags;
01358
flags =
rt_spin_lock_irqsave(&
list_lock);
01359
if (
lxrt_list[slot].
name >
NONAME) {
01360 *entry =
lxrt_list[slot];
01361 entry->
adr =
lxrt_list[entry->
alink].
adr;
01362
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01363
return slot;
01364 }
01365
rt_spin_unlock_irqrestore(
flags, &
list_lock);
01366
return 0;
01367 }
01368
01369 int rt_registry_alloc(
void)
01370 {
01371
if ((
max_slots = (
MAX_SLOTS +
PRIMES_TAB_GRANULARITY - 1)/(
PRIMES_TAB_GRANULARITY)) >=
sizeof(
primes)/
sizeof(
primes[0])) {
01372
printk(
"REGISTRY TABLE TOO LARGE FOR AVAILABLE PRIMES\n");
01373
return -ENOMEM;
01374 }
01375
max_slots =
primes[
max_slots];
01376
if (!(
lxrt_list = vmalloc((
max_slots + 1)*
sizeof(
struct rt_registry_entry)))) {
01377
printk(
"NO MEMORY FOR REGISTRY TABLE\n");
01378
return -ENOMEM;
01379 }
01380 memset(
lxrt_list, 0, (
max_slots + 1)*
sizeof(
struct rt_registry_entry));
01381
return 0;
01382 }
01383
01384 void rt_registry_free(
void)
01385 {
01386
if (
lxrt_list) {
01387 vfree(
lxrt_list);
01388 }
01389 }
01390
#else
01391
volatile int max_slots;
01392
static struct rt_registry_entry *
lxrt_list;
01393
static spinlock_t
list_lock = SPIN_LOCK_UNLOCKED;
01394
01395
int rt_registry_alloc(
void)
01396 {
01397
if (!(
lxrt_list = vmalloc((MAX_SLOTS + 1)*
sizeof(
struct rt_registry_entry)))) {
01398
printk(
"NO MEMORY FOR REGISTRY TABLE\n");
01399
return -ENOMEM;
01400 }
01401 memset(lxrt_list, 0, (MAX_SLOTS + 1)*
sizeof(
struct rt_registry_entry));
01402
return 0;
01403 }
01404
01405
void rt_registry_free(
void)
01406 {
01407
if (
lxrt_list) {
01408 vfree(lxrt_list);
01409 }
01410 }
01411
01412
static inline int registr(
unsigned long name,
void *adr,
int type,
struct task_struct *tsk)
01413 {
01414
unsigned long flags;
01415
int i, slot;
01416
01417
01418
01419
01420
01421
while ((slot =
max_slots) <
MAX_SLOTS) {
01422
for (i = 1; i <=
max_slots; i++) {
01423
if (
lxrt_list[i].
name == name) {
01424
return 0;
01425 }
01426 }
01427
flags =
rt_spin_lock_irqsave(&list_lock);
01428
if (slot ==
max_slots &&
max_slots <
MAX_SLOTS) {
01429 slot = ++
max_slots;
01430
lxrt_list[slot].
name = name;
01431
lxrt_list[slot].
adr = adr;
01432
lxrt_list[slot].
tsk = tsk;
01433
lxrt_list[slot].
type = type;
01434
lxrt_list[slot].
count = 1;
01435
rt_spin_unlock_irqrestore(flags, &list_lock);
01436
return slot;
01437 }
01438
rt_spin_unlock_irqrestore(flags, &list_lock);
01439 }
01440
return 0;
01441 }
01442
01443
static inline int drg_on_name(
unsigned long name)
01444 {
01445
unsigned long flags;
01446
int slot;
01447
for (slot = 1; slot <=
max_slots; slot++) {
01448
flags =
rt_spin_lock_irqsave(&list_lock);
01449
if (
lxrt_list[slot].
name == name) {
01450
if (slot <
max_slots) {
01451
lxrt_list[slot] =
lxrt_list[
max_slots];
01452 }
01453
if (
max_slots > 0) {
01454
max_slots--;
01455 }
01456
rt_spin_unlock_irqrestore(flags, &list_lock);
01457
return slot;
01458 }
01459
rt_spin_unlock_irqrestore(flags, &list_lock);
01460 }
01461
return 0;
01462 }
01463
01464
static inline int drg_on_name_cnt(
unsigned long name)
01465 {
01466
unsigned long flags;
01467
int slot,
count;
01468
for (slot = 1; slot <=
max_slots; slot++) {
01469
flags =
rt_spin_lock_irqsave(&list_lock);
01470
if (
lxrt_list[slot].
name == name &&
lxrt_list[slot].
count > 0 && !(
count = --
lxrt_list[slot].
count)) {
01471
if (slot <
max_slots) {
01472
lxrt_list[slot] =
lxrt_list[
max_slots];
01473 }
01474
if (
max_slots > 0) {
01475
max_slots--;
01476 }
01477
rt_spin_unlock_irqrestore(flags, &list_lock);
01478
return count;
01479 }
01480
rt_spin_unlock_irqrestore(flags, &list_lock);
01481 }
01482
return -EFAULT;
01483 }
01484
01485
static inline int drg_on_adr(
void *adr)
01486 {
01487
unsigned long flags;
01488
int slot;
01489
for (slot = 1; slot <=
max_slots; slot++) {
01490
flags =
rt_spin_lock_irqsave(&list_lock);
01491
if (
lxrt_list[slot].
adr == adr) {
01492
if (slot <
max_slots) {
01493
lxrt_list[slot] =
lxrt_list[
max_slots];
01494 }
01495
if (
max_slots > 0) {
01496
max_slots--;
01497 }
01498
rt_spin_unlock_irqrestore(flags, &list_lock);
01499
return slot;
01500 }
01501
rt_spin_unlock_irqrestore(flags, &list_lock);
01502 }
01503
return 0;
01504 }
01505
01506
static inline int drg_on_adr_cnt(
void *adr)
01507 {
01508
unsigned long flags;
01509
int slot,
count;
01510
for (slot = 1; slot <=
max_slots; slot++) {
01511
flags =
rt_spin_lock_irqsave(&list_lock);
01512
if (
lxrt_list[slot].
adr == adr &&
lxrt_list[slot].
count > 0 && !(
count = --
lxrt_list[slot].
count)) {
01513
if (slot <
max_slots) {
01514
lxrt_list[slot] =
lxrt_list[
max_slots];
01515 }
01516
if (
max_slots > 0) {
01517
max_slots--;
01518 }
01519
rt_spin_unlock_irqrestore(flags, &list_lock);
01520
return count;
01521 }
01522
rt_spin_unlock_irqrestore(flags, &list_lock);
01523 }
01524
return -EFAULT;
01525 }
01526
01527
static inline unsigned long get_name(
void *adr)
01528 {
01529
static unsigned long nameseed = 3518743764UL;
01530
int slot;
01531
if (!adr) {
01532
unsigned long flags;
01533
unsigned long name;
01534
flags =
rt_spin_lock_irqsave(&list_lock);
01535
if ((name = ++nameseed) == 0xFFFFFFFFUL) {
01536 nameseed = 3518743764UL;
01537 }
01538
rt_spin_unlock_irqrestore(flags, &list_lock);
01539
return name;
01540 }
01541
for (slot = 1; slot <=
max_slots; slot++) {
01542
if (
lxrt_list[slot].
adr == adr) {
01543
return lxrt_list[slot].
name;
01544 }
01545 }
01546
return 0;
01547 }
01548
01549
static inline void *
get_adr(
unsigned long name)
01550 {
01551
int slot;
01552
for (slot = 1; slot <=
max_slots; slot++) {
01553
if (
lxrt_list[slot].
name == name) {
01554
return lxrt_list[slot].
adr;
01555 }
01556 }
01557
return 0;
01558 }
01559
01560
static inline void *
get_adr_cnt(
unsigned long name)
01561 {
01562
unsigned long flags;
01563
int slot;
01564
for (slot = 1; slot <=
max_slots; slot++) {
01565
flags =
rt_spin_lock_irqsave(&list_lock);
01566
if (
lxrt_list[slot].
name == name) {
01567 ++
lxrt_list[slot].
count;
01568
rt_spin_unlock_irqrestore(flags, &list_lock);
01569
return lxrt_list[slot].
adr;
01570 }
01571
rt_spin_unlock_irqrestore(flags, &list_lock);
01572 }
01573
return 0;
01574 }
01575
01576
static inline int get_type(
unsigned long name)
01577 {
01578
int slot;
01579
for (slot = 1; slot <=
max_slots; slot++) {
01580
if (
lxrt_list[slot].
name == name) {
01581
return lxrt_list[slot].
type;
01582 }
01583 }
01584
return -EINVAL;
01585 }
01586
01587
unsigned long is_process_registered(
struct task_struct *tsk)
01588 {
01589
void *adr;
01590
01591
if ((adr = tsk->rtai_tskext(TSKEXT0))) {
01592
int slot;
01593
for (slot = 1; slot <=
max_slots; slot++) {
01594
if (
lxrt_list[slot].
adr == adr) {
01595
return lxrt_list[slot].
name;
01596 }
01597 }
01598 }
01599
return 0;
01600 }
01601
01602
int rt_get_registry_slot(
int slot,
struct rt_registry_entry *entry)
01603 {
01604
unsigned long flags;
01605
01606
if(entry == 0) {
01607
return 0;
01608 }
01609
flags =
rt_spin_lock_irqsave(&list_lock);
01610
if (slot > 0 && slot <=
max_slots ) {
01611
if (
lxrt_list[slot].
name != 0) {
01612 *entry =
lxrt_list[slot];
01613
rt_spin_unlock_irqrestore(flags, &list_lock);
01614
return slot;
01615 }
01616 }
01617
rt_spin_unlock_irqrestore(flags, &list_lock);
01618
01619
return 0;
01620 }
01621
#endif
01622
01623
01624
01625
01626
01627
01628
01629
01630
01631
01632 int rt_register(
unsigned long name,
void *adr,
int type,
struct task_struct *t)
01633 {
01634
01635
01636
01637
return get_adr(name) ? 0 :
registr(name, adr, type, t );
01638 }
01639
01640
01641
01642
01643
01644
01645
01646
01647
01648
01649 int rt_drg_on_name(
unsigned long name)
01650 {
01651
return drg_on_name(name);
01652 }
01653
01654
01655
01656
01657
01658
01659
01660
01661
01662 int rt_drg_on_adr(
void *adr)
01663 {
01664
return drg_on_adr(adr);
01665 }
01666
01667 unsigned long rt_get_name(
void *adr)
01668 {
01669
return get_name(adr);
01670 }
01671
01672 void *
rt_get_adr(
unsigned long name)
01673 {
01674
return get_adr(name);
01675 }
01676
01677 int rt_get_type(
unsigned long name)
01678 {
01679
return get_type(name);
01680 }
01681
01682 int rt_drg_on_name_cnt(
unsigned long name)
01683 {
01684
return drg_on_name_cnt(name);
01685 }
01686
01687 int rt_drg_on_adr_cnt(
void *adr)
01688 {
01689
return drg_on_adr_cnt(adr);
01690 }
01691
01692 void *
rt_get_adr_cnt(
unsigned long name)
01693 {
01694
return get_adr_cnt(name);
01695 }
01696
01697
#include <rtai_lxrt.h>
01698
01699
extern struct rt_fun_entry
rt_fun_lxrt[];
01700
01701 void krtai_objects_release(
void)
01702 {
01703
int slot;
01704
struct rt_registry_entry entry;
01705
char name[8], *type;
01706
01707
for (slot = 1; slot <=
max_slots; slot++) {
01708
if (
rt_get_registry_slot(slot, &entry)) {
01709
switch (entry.
type) {
01710
case IS_TASK:
01711 type =
"TASK";
01712
rt_named_task_delete(entry.
adr);
01713
break;
01714
case IS_SEM:
01715 type =
"SEM ";
01716 ((void (*)(
void *))
rt_fun_lxrt[
NAMED_SEM_DELETE].fun)(entry.
adr);
01717
break;
01718
case IS_RWL:
01719 type =
"RWL ";
01720 ((void (*)(
void *))
rt_fun_lxrt[
NAMED_RWL_DELETE].fun)(entry.
adr);
01721
break;
01722
case IS_SPL:
01723 type =
"SPL ";
01724 ((void (*)(
void *))
rt_fun_lxrt[
NAMED_SPL_DELETE].fun)(entry.
adr);
01725
break;
01726
case IS_MBX:
01727 type =
"MBX ";
01728 ((void (*)(
void *))
rt_fun_lxrt[
NAMED_MBX_DELETE].fun)(entry.
adr);
01729
break;
01730
case IS_PRX:
01731 type =
"PRX ";
01732 ((void (*)(
void *))
rt_fun_lxrt[
PROXY_DETACH].fun)(entry.
adr);
01733
rt_drg_on_adr(entry.
adr);
01734
break;
01735
default:
01736 type =
"ALIEN";
01737
break;
01738 }
01739
num2nam(entry.
name, name);
01740
rt_printk(
"SCHED releases registered named %s %s\n", type, name);
01741 }
01742 }
01743 }
01744
01745
01746
01747
#include <rtai_tasklets.h>
01748
01749
extern struct {
01750 int (*handler)(
unsigned irq,
void *
cookie);
01751 void *
cookie;
01752 int retmode;
01753 int cpumask;
01754 }
rtai_realtime_irq[];
01755
01756 int rt_irq_wait(
unsigned irq)
01757 {
01758
int retval;
01759 retval =
rt_task_suspend(0);
01760
return rtai_realtime_irq[irq].handler ? -retval :
RT_IRQ_TASK_ERR;
01761 }
01762
01763 int rt_irq_wait_if(
unsigned irq)
01764 {
01765
int retval;
01766 retval =
rt_task_suspend_if(0);
01767
return rtai_realtime_irq[irq].handler ? -retval :
RT_IRQ_TASK_ERR;
01768 }
01769
01770 int rt_irq_wait_until(
unsigned irq,
RTIME time)
01771 {
01772
int retval;
01773 retval =
rt_task_suspend_until(0, time);
01774
return rtai_realtime_irq[irq].handler ? -retval :
RT_IRQ_TASK_ERR;
01775 }
01776
01777 int rt_irq_wait_timed(
unsigned irq,
RTIME delay)
01778 {
01779
return rt_irq_wait_until(irq, get_time() + delay);
01780 }
01781
01782 void rt_irq_signal(
unsigned irq)
01783 {
01784
if (
rtai_realtime_irq[irq].handler) {
01785
rt_task_resume((
void *)
rtai_realtime_irq[irq].
cookie);
01786 }
01787 }
01788
01789 static int rt_irq_task_handler(
unsigned irq,
RT_TASK *irq_task)
01790 {
01791
rt_task_resume(irq_task);
01792
return 0;
01793 }
01794
01795 int rt_request_irq_task (
unsigned irq,
void *handler,
int type,
int affine2task)
01796 {
01797
RT_TASK *
task;
01798
if (!
handler) {
01799
task = _rt_whoami();
01800 }
else {
01801
task = type ==
RT_IRQ_TASKLET ? ((
struct rt_tasklet_struct *)
handler)->task :
handler;
01802 }
01803
if (affine2task) {
01804
rt_assign_irq_to_cpu(irq, (1 <<
task->runnable_on_cpus));
01805 }
01806
return rt_request_irq(irq, (
void *)
rt_irq_task_handler,
task, 0);
01807 }
01808
01809 int rt_release_irq_task (
unsigned irq)
01810 {
01811
int retval;
01812
RT_TASK *
task;
01813
task = (
void *)
rtai_realtime_irq[irq].cookie;
01814
if (!(retval =
rt_release_irq(irq))) {
01815
rt_task_resume(
task);
01816
rt_reset_irq_to_sym_mode(irq);
01817 }
01818
return retval;
01819 }
01820
01821
extern void usp_request_rtc(
int,
void *);
01822 void usp_request_rtc(
int rtc_freq,
void *handler)
01823 {
01824
rt_request_rtc(rtc_freq, !
handler || (
handler &&
handler == (
void *)1) ?
handler :
rt_irq_signal);
01825
01826 }
01827
01828
01829
01830 RT_TASK *
rt_exec_linux_syscall(
RT_TASK *rt_current,
RT_TASK *task,
struct pt_regs *regs)
01831 {
01832
unsigned long flags;
01833
01834
flags = rt_global_save_flags_and_cli();
01835
if (
task->state &
RT_SCHED_RECEIVE) {
01836 rt_current->msg =
task->msg = (
unsigned long)regs;
01837
task->msg_queue.task = rt_current;
01838
task->ret_queue.task = NOTHING;
01839
task->state =
RT_SCHED_READY;
01840 enq_ready_task(
task);
01841
enqueue_blocked(rt_current, &
task->ret_queue, 1);
01842 rt_current->state |=
RT_SCHED_RETURN;
01843 }
else {
01844 rt_current->msg = (
unsigned long)regs;
01845
enqueue_blocked(rt_current, &
task->msg_queue, 1);
01846 rt_current->state |=
RT_SCHED_RPC;
01847 }
01848
task->priority = rt_current->priority;
01849 rem_ready_current(rt_current);
01850 rt_current->msg_queue.task =
task;
01851
rt_schedule();
01852 rt_global_restore_flags(
flags);
01853
return rt_current->msg_queue.task != rt_current ? NULL :
task;
01854 }
01855
01856
#include <asm/uaccess.h>
01857 RT_TASK *
rt_receive_linux_syscall(
RT_TASK *task,
struct pt_regs *regs)
01858 {
01859
unsigned long flags;
01860
RT_TASK *rt_current;
01861
01862
flags = rt_global_save_flags_and_cli();
01863 rt_current =
rt_smp_current[rtai_cpuid()];
01864
if ((
task->state &
RT_SCHED_RPC) &&
task->msg_queue.task == rt_current) {
01865
dequeue_blocked(
task);
01866 *regs = *((
struct pt_regs *)
task->msg);
01867 rt_current->msg_queue.task =
task;
01868
enqueue_blocked(
task, &rt_current->ret_queue, 1);
01869
task->state = (
task->state & ~
RT_SCHED_RPC) |
RT_SCHED_RETURN;
01870 }
else {
01871 rt_current->ret_queue.task = SOMETHING;
01872 rt_current->state |=
RT_SCHED_RECEIVE;
01873 rem_ready_current(rt_current);
01874 rt_current->msg_queue.task =
task != rt_current ?
task : NULL;
01875
rt_schedule();
01876 *regs = *((
struct pt_regs *)rt_current->msg);
01877 }
01878 rt_current->msg_queue.task = rt_current;
01879 rt_global_restore_flags(
flags);
01880
return rt_current->ret_queue.task ? NULL :
task;
01881 }
01882
01883 void rt_return_linux_syscall(
RT_TASK *task,
unsigned long retval)
01884 {
01885
unsigned long flags;
01886
01887 ((
struct pt_regs *)
task->msg)->LINUX_SYSCALL_RETREG = retval;
01888
flags = rt_global_save_flags_and_cli();
01889
dequeue_blocked(
task);
01890
task->msg = 0;
01891
task->msg_queue.task =
task;
01892
if ((
task->state &= ~
RT_SCHED_RETURN) ==
RT_SCHED_READY) {
01893 enq_ready_task(
task);
01894 }
01895 rt_global_restore_flags(
flags);
01896 }
01897
01898
01899
01900
#ifdef CONFIG_PROC_FS
01901
#include <linux/stat.h>
01902
#include <linux/proc_fs.h>
01903
#include <rtai_proc_fs.h>
01904
#include <rtai_nam2num.h>
01905
01906
extern struct proc_dir_entry *
rtai_proc_root;
01907
01908
01909
01910
static int rtai_read_lxrt(
char *page,
char **start, off_t off,
int count,
int *eof,
void *data)
01911 {
01912
PROC_PRINT_VARS;
01913
struct rt_registry_entry entry;
01914
char *type_name[] = {
"TASK",
"SEM",
"RWL",
"SPL",
"MBX",
"PRX",
"BITS",
"TBX",
"HPCK" };
01915
unsigned int i = 1;
01916
char name[8];
01917
01918
PROC_PRINT(
"\nRTAI LXRT Information.\n\n");
01919
PROC_PRINT(
" MAX_SLOTS = %d\n\n", MAX_SLOTS);
01920
01921
01922
01923
PROC_PRINT(
" Linux_Owner Parent PID\n");
01924
PROC_PRINT(
"Slot Name ID Type RT_Handle Pointer Tsk_PID MEM_Sz USG Cnt\n");
01925
PROC_PRINT(
"-------------------------------------------------------------------------------\n");
01926
for (i = 1; i <=
max_slots; i++) {
01927
if (
rt_get_registry_slot(i, &entry)) {
01928
num2nam(entry.
name, name);
01929
PROC_PRINT(
"%4d %-6.6s 0x%08lx %-6.6s 0x%p 0x%p %7d %8d %7d\n",
01930 i,
01931 name,
01932 entry.
name,
01933 entry.
type >= PAGE_SIZE ?
"SHMEM" :
01934 entry.
type >
sizeof(type_name)/
sizeof(
char *) ?
01935
"ALIEN" :
01936 type_name[entry.
type],
01937 entry.
adr,
01938 entry.
tsk,
01939 entry.
tsk ? entry.
tsk->pid : 0,
01940 entry.
type == IS_TASK && ((
RT_TASK *)entry.
adr)->lnxtsk ? (((RT_TASK *)entry.
adr)->lnxtsk)->pid : entry.
type >= PAGE_SIZE ? entry.
type : 0, entry.
count);
01941 }
01942 }
01943
PROC_PRINT_DONE;
01944 }
01945
01946
int rtai_proc_lxrt_register(
void)
01947 {
01948
struct proc_dir_entry *proc_lxrt_ent;
01949
01950
01951 proc_lxrt_ent = create_proc_entry(
"names", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root);
01952
if (!proc_lxrt_ent) {
01953
printk(
"Unable to initialize /proc/rtai/lxrt\n");
01954
return(-1);
01955 }
01956 proc_lxrt_ent->read_proc = rtai_read_lxrt;
01957
return(0);
01958 }
01959
01960
01961
void rtai_proc_lxrt_unregister(
void)
01962 {
01963 remove_proc_entry(
"names", rtai_proc_root);
01964 }
01965
01966
01967
#endif
01968
01969
#ifdef CONFIG_KBUILD
01970
01971
EXPORT_SYMBOL(rt_set_sched_policy);
01972
EXPORT_SYMBOL(rt_get_prio);
01973
EXPORT_SYMBOL(rt_get_inher_prio);
01974
EXPORT_SYMBOL(rt_change_prio);
01975
EXPORT_SYMBOL(rt_whoami);
01976
EXPORT_SYMBOL(rt_task_yield);
01977
EXPORT_SYMBOL(rt_task_suspend);
01978
EXPORT_SYMBOL(rt_task_suspend_if);
01979
EXPORT_SYMBOL(rt_task_suspend_until);
01980
EXPORT_SYMBOL(rt_task_suspend_timed);
01981
EXPORT_SYMBOL(rt_task_resume);
01982
EXPORT_SYMBOL(rt_get_task_state);
01983
EXPORT_SYMBOL(rt_linux_use_fpu);
01984
EXPORT_SYMBOL(rt_task_use_fpu);
01985
EXPORT_SYMBOL(rt_task_signal_handler);
01986
EXPORT_SYMBOL(rt_gettimeorig);
01987
EXPORT_SYMBOL(rt_task_make_periodic_relative_ns);
01988
EXPORT_SYMBOL(rt_task_make_periodic);
01989
EXPORT_SYMBOL(rt_task_wait_period);
01990
EXPORT_SYMBOL(rt_task_set_resume_end_times);
01991
EXPORT_SYMBOL(rt_set_resume_time);
01992
EXPORT_SYMBOL(rt_set_period);
01993
EXPORT_SYMBOL(next_period);
01994
EXPORT_SYMBOL(rt_busy_sleep);
01995
EXPORT_SYMBOL(rt_sleep);
01996
EXPORT_SYMBOL(rt_sleep_until);
01997
EXPORT_SYMBOL(rt_task_masked_unblock);
01998
EXPORT_SYMBOL(rt_nanosleep);
01999
EXPORT_SYMBOL(rt_enq_ready_edf_task);
02000
EXPORT_SYMBOL(rt_enq_ready_task);
02001
EXPORT_SYMBOL(rt_renq_ready_task);
02002
EXPORT_SYMBOL(rt_rem_ready_task);
02003
EXPORT_SYMBOL(rt_rem_ready_current);
02004
EXPORT_SYMBOL(rt_enq_timed_task);
02005
EXPORT_SYMBOL(rt_wake_up_timed_tasks);
02006
EXPORT_SYMBOL(rt_rem_timed_task);
02007
EXPORT_SYMBOL(rt_enqueue_blocked);
02008
EXPORT_SYMBOL(rt_dequeue_blocked);
02009
EXPORT_SYMBOL(rt_renq_current);
02010
EXPORT_SYMBOL(rt_named_task_init);
02011
EXPORT_SYMBOL(rt_named_task_init_cpuid);
02012
EXPORT_SYMBOL(rt_named_task_delete);
02013
EXPORT_SYMBOL(is_process_registered);
02014
EXPORT_SYMBOL(rt_register);
02015
EXPORT_SYMBOL(rt_drg_on_name);
02016
EXPORT_SYMBOL(rt_drg_on_adr);
02017
EXPORT_SYMBOL(rt_get_name);
02018
EXPORT_SYMBOL(rt_get_adr);
02019
EXPORT_SYMBOL(rt_get_type);
02020
EXPORT_SYMBOL(rt_drg_on_name_cnt);
02021
EXPORT_SYMBOL(rt_drg_on_adr_cnt);
02022
EXPORT_SYMBOL(rt_get_adr_cnt);
02023
EXPORT_SYMBOL(rt_get_registry_slot);
02024
02025
EXPORT_SYMBOL(rt_task_init);
02026
EXPORT_SYMBOL(rt_task_init_cpuid);
02027
EXPORT_SYMBOL(rt_set_runnable_on_cpus);
02028
EXPORT_SYMBOL(rt_set_runnable_on_cpuid);
02029
EXPORT_SYMBOL(rt_check_current_stack);
02030
EXPORT_SYMBOL(rt_schedule);
02031
EXPORT_SYMBOL(rt_spv_RMS);
02032
EXPORT_SYMBOL(rt_sched_lock);
02033
EXPORT_SYMBOL(rt_sched_unlock);
02034
EXPORT_SYMBOL(rt_task_delete);
02035
EXPORT_SYMBOL(rt_is_hard_timer_running);
02036
EXPORT_SYMBOL(rt_set_periodic_mode);
02037
EXPORT_SYMBOL(rt_set_oneshot_mode);
02038
EXPORT_SYMBOL(rt_get_timer_cpu);
02039
EXPORT_SYMBOL(start_rt_timer);
02040
EXPORT_SYMBOL(stop_rt_timer);
02041
EXPORT_SYMBOL(start_rt_apic_timers);
02042
EXPORT_SYMBOL(rt_sched_type);
02043
EXPORT_SYMBOL(rt_hard_timer_tick_count);
02044
EXPORT_SYMBOL(rt_hard_timer_tick_count_cpuid);
02045
EXPORT_SYMBOL(rt_set_task_trap_handler);
02046
EXPORT_SYMBOL(rt_get_time);
02047
EXPORT_SYMBOL(rt_get_time_cpuid);
02048
EXPORT_SYMBOL(rt_get_time_ns);
02049
EXPORT_SYMBOL(rt_get_time_ns_cpuid);
02050
EXPORT_SYMBOL(rt_get_cpu_time_ns);
02051
EXPORT_SYMBOL(rt_get_base_linux_task);
02052
EXPORT_SYMBOL(rt_alloc_dynamic_task);
02053
EXPORT_SYMBOL(rt_register_watchdog);
02054
EXPORT_SYMBOL(rt_deregister_watchdog);
02055
EXPORT_SYMBOL(count2nano);
02056
EXPORT_SYMBOL(nano2count);
02057
EXPORT_SYMBOL(count2nano_cpuid);
02058
EXPORT_SYMBOL(nano2count_cpuid);
02059
02060
EXPORT_SYMBOL(rt_kthread_init);
02061
EXPORT_SYMBOL(rt_smp_linux_task);
02062
EXPORT_SYMBOL(rt_smp_current);
02063
EXPORT_SYMBOL(rt_smp_time_h);
02064
EXPORT_SYMBOL(rt_smp_oneshot_timer);
02065
EXPORT_SYMBOL(wake_up_srq);
02066
EXPORT_SYMBOL(set_rt_fun_entries);
02067
EXPORT_SYMBOL(reset_rt_fun_entries);
02068
EXPORT_SYMBOL(set_rt_fun_ext_index);
02069
EXPORT_SYMBOL(reset_rt_fun_ext_index);
02070
EXPORT_SYMBOL(max_slots);
02071
02072
#ifdef CONFIG_SMP
02073
#endif
02074
02075
#endif