00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
#include <linux/version.h>
00045
#include <linux/slab.h>
00046
#include <linux/errno.h>
00047
00048
#include <linux/module.h>
00049
#include <linux/interrupt.h>
00050
#include <linux/irq.h>
00051
#include <linux/console.h>
00052
#include <asm/system.h>
00053
#include <asm/hw_irq.h>
00054
#include <asm/irq.h>
00055
#include <asm/io.h>
00056
#include <asm/mmu_context.h>
00057
#include <asm/uaccess.h>
00058
#include <asm/time.h>
00059
#include <asm/types.h>
00060 #define __RTAI_HAL__
00061
#include <asm/rtai_lxrt.h>
00062
#include <asm/rtai_hal.h>
00063
#ifdef CONFIG_PROC_FS
00064
#include <linux/stat.h>
00065
#include <linux/proc_fs.h>
00066
#include <rtai_proc_fs.h>
00067
#endif
00068
#include <stdarg.h>
00069
00070
MODULE_LICENSE(
"GPL");
00071
00072 static unsigned long rtai_cpufreq_arg =
RTAI_CALIBRATED_CPU_FREQ;
00073
MODULE_PARM(rtai_cpufreq_arg,
"i");
00074
00075
#ifdef CONFIG_SMP
00076
#error "SMP is not supported"
00077
#endif
00078
00079 struct {
volatile int locked,
rqsted; }
rt_scheduling[
RTAI_NR_CPUS];
00080
00081
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00082 static void (*rtai_isr_hook)(
int cpuid);
00083
#endif
00084
00085
extern struct desc_struct
idt_table[];
00086
00087 adomain_t
rtai_domain;
00088
00089 #define RTAI_NR_IRQS (RTAI_TIMER_DECR_IRQ + 1)
00090
00091
struct {
00092 int (*
handler)(
unsigned irq,
void *
cookie);
00093 void *
cookie;
00094 int retmode;
00095 }
rtai_realtime_irq[
RTAI_NR_IRQS];
00096
00097
static struct {
00098 unsigned long flags;
00099 int count;
00100 }
rtai_linux_irq[NR_IRQS];
00101
00102
static struct {
00103 void (*
k_handler)(
void);
00104
long long (*
u_handler)(
unsigned);
00105 unsigned label;
00106 }
rtai_sysreq_table[RTAI_NR_SRQS];
00107
00108 static unsigned rtai_sysreq_virq;
00109
00110 static unsigned long rtai_sysreq_map = 3;
00111
00112 static unsigned long rtai_sysreq_pending;
00113
00114 static unsigned long rtai_sysreq_running;
00115
00116 static spinlock_t
rtai_ssrq_lock = SPIN_LOCK_UNLOCKED;
00117
00118 static volatile int rtai_sync_level;
00119
00120 static atomic_t
rtai_sync_count = ATOMIC_INIT(1);
00121
00122
#ifdef FIXME
00123
static int rtai_last_8254_counter2;
00124
00125
static RTIME rtai_ts_8254;
00126
00127
static struct desc_struct rtai_sysvec;
00128
00129
static RT_TRAP_HANDLER rtai_trap_handler;
00130
#endif
00131
00132 struct rt_times rt_times = { 0 };
00133
00134 struct rt_times rt_smp_times[
RTAI_NR_CPUS] = { { 0 } };
00135
00136 struct rtai_switch_data
rtai_linux_context[
RTAI_NR_CPUS] = { { 0 } };
00137
00138 struct calibration_data
rtai_tunables = { 0 };
00139
00140 volatile unsigned long rtai_cpu_realtime = 0;
00141
00142 volatile unsigned long rtai_cpu_lock = 0;
00143
00144 int rtai_adeos_ptdbase = -1;
00145
00146 unsigned long rtai_critical_enter (
void (*synch)(
void))
00147 {
00148
unsigned long flags = adeos_critical_enter(synch);
00149
00150
if (
atomic_dec_and_test(&
rtai_sync_count)) {
00151
rtai_sync_level = 0;
00152 }
else if (synch != NULL) {
00153
printk(KERN_INFO
"RTAI[hal]: warning: nested sync will fail.\n");
00154 }
00155
return flags;
00156 }
00157
00158 void rtai_critical_exit (
unsigned long flags)
00159 {
00160
atomic_inc(&
rtai_sync_count);
00161 adeos_critical_exit(
flags);
00162 }
00163
00164 int rt_request_irq (
unsigned irq,
int (*handler)(
unsigned irq,
void *cookie),
void *cookie,
int retmode)
00165 {
00166
unsigned long flags;
00167
00168
if (
handler == NULL || irq >=
RTAI_NR_IRQS) {
00169
return -EINVAL;
00170 }
00171
if (
rtai_realtime_irq[irq].handler != NULL) {
00172
return -EBUSY;
00173 }
00174
flags =
rtai_critical_enter(NULL);
00175
00176
if (irq == RTAI_TIMER_DECR_IRQ) {
00177 disarm_decr[rtai_cpuid()] = 1;
00178 }
00179
rtai_realtime_irq[irq].handler = (
void *)
handler;
00180
rtai_realtime_irq[irq].cookie =
cookie;
00181
rtai_realtime_irq[irq].retmode =
retmode ? 1 : 0;
00182
rtai_critical_exit(
flags);
00183
return 0;
00184 }
00185
00186 int rt_release_irq (
unsigned irq)
00187 {
00188
unsigned long flags;
00189
if (irq >=
RTAI_NR_IRQS || !
rtai_realtime_irq[irq].handler) {
00190
return -EINVAL;
00191 }
00192
flags =
rtai_critical_enter(NULL);
00193
rtai_realtime_irq[irq].handler = NULL;
00194
00195
if (irq == RTAI_TIMER_DECR_IRQ) {
00196 disarm_decr[rtai_cpuid()] = 0;
00197 }
00198
rtai_critical_exit(
flags);
00199
return 0;
00200 }
00201
00202 void rt_set_irq_cookie (
unsigned irq,
void *cookie)
00203 {
00204
if (irq <
RTAI_NR_IRQS) {
00205
rtai_realtime_irq[irq].cookie =
cookie;
00206 }
00207 }
00208
00209 void rt_set_irq_retmode (
unsigned irq,
int retmode)
00210 {
00211
if (irq <
RTAI_NR_IRQS) {
00212
rtai_realtime_irq[irq].retmode =
retmode ? 1 : 0;
00213 }
00214 }
00215
00216
extern struct hw_interrupt_type
__adeos_std_irq_dtype[];
00217
00218 #define BEGIN_PIC() \
00219
do { \
00220
rtai_save_flags_and_cli(flags); \
00221
cpuid = rtai_cpuid(); \
00222
lflags = xchg(&adp_root->cpudata[cpuid].status, 1 << IPIPE_STALL_FLAG);
00223
\
00224 rtai_save_and_lock_preempt_count()
00225
00226 #define
END_PIC() \
00227 rtai_restore_preempt_count(); \
00228 adp_root->cpudata[
cpuid].status = lflags; \
00229
rtai_restore_flags(flags); \
00230 }
while (0)
00231
00232
00233
00234
00235
00236
00237
00238
00239
00240
00241
00242
00243
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
unsigned rt_startup_irq (
unsigned irq)
00260 {
00261
unsigned long flags, lflags;
00262
int retval,
cpuid;
00263
00264
BEGIN_PIC();
00265 __adeos_unlock_irq(adp_root, irq);
00266 retval =
__adeos_std_irq_dtype[irq].startup(irq);
00267
END_PIC();
00268
return retval;
00269 }
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295
00296
00297
00298
00299
00300 void rt_shutdown_irq (
unsigned irq)
00301 {
00302
unsigned long flags, lflags;
00303
int cpuid;
00304
00305
BEGIN_PIC();
00306
__adeos_std_irq_dtype[irq].shutdown(irq);
00307 __adeos_clear_irq(adp_root, irq);
00308
END_PIC();
00309 }
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330
00331
00332
00333
00334
00335
00336
00337
00338 static inline void _rt_enable_irq (
unsigned irq)
00339 {
00340
unsigned long flags, lflags;
00341
int cpuid;
00342
00343
BEGIN_PIC();
00344 __adeos_unlock_irq(adp_root, irq);
00345
__adeos_std_irq_dtype[irq].enable(irq);
00346
END_PIC();
00347 }
00348
00349 void rt_enable_irq (
unsigned irq)
00350 {
00351
_rt_enable_irq(irq);
00352 }
00353
00354
00355
00356
00357
00358
00359
00360
00361
00362
00363
00364
00365
00366
00367
00368
00369
00370
00371
00372
00373
00374
00375
00376
00377
00378
00379
00380
00381 void rt_disable_irq (
unsigned irq)
00382 {
00383
unsigned long flags, lflags;
00384
int cpuid;
00385
00386
BEGIN_PIC();
00387
__adeos_std_irq_dtype[irq].disable(irq);
00388 __adeos_lock_irq(adp_root,
cpuid, irq);
00389
END_PIC();
00390 }
00391
00392
00393
00394
00395
00396
00397
00398
00399
00400
00401
00402
00403
00404
00405
00406
00407
00408
00409
00410
00411
00412
00413
00414
00415
00416
00417
00418
00419
00420
00421
00422
00423 void rt_mask_and_ack_irq (
unsigned irq) {
00424
00425 irq_desc[irq].handler->ack(irq);
00426 }
00427
00428
00429
00430
00431
00432
00433
00434
00435
00436
00437
00438
00439
00440
00441
00442
00443
00444
00445
00446
00447
00448
00449
00450
00451
00452
00453
00454
00455
00456
00457
00458 static inline void _rt_end_irq (
unsigned irq)
00459 {
00460
unsigned long flags, lflags;
00461
int cpuid;
00462
00463
BEGIN_PIC();
00464
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
00465 __adeos_unlock_irq(adp_root, irq);
00466 }
00467
__adeos_std_irq_dtype[irq].end(irq);
00468
END_PIC();
00469 }
00470
00471 void rt_unmask_irq (
unsigned irq) {
00472
00473
if (irq_desc[irq].handler->end != NULL) {
00474
_rt_end_irq(irq);
00475 }
else {
00476
_rt_enable_irq(irq);
00477 }
00478 }
00479
00480
00481
00482
00483
00484
00485
00486
00487
00488
00489
00490
00491
00492
00493
00494
00495
00496
00497
00498
00499
00500
00501
00502
00503
00504
00505
00506
00507
00508
00509
00510 void rt_ack_irq (
unsigned irq)
00511 {
00512
_rt_enable_irq(irq);
00513 }
00514
00515
00516
00517
00518
00519
00520
00521
00522
00523
00524
00525
00526
00527
00528
00529
00530
00531
00532
00533
00534
00535
00536
00537
00538
00539 int rt_request_linux_irq (
unsigned irq,
00540 irqreturn_t (*handler)(
int irq,
00541
void *dev_id,
00542
struct pt_regs *regs),
00543
char *name,
00544
void *dev_id)
00545 {
00546
unsigned long flags;
00547
00548
if (irq >= NR_IRQS || !
handler) {
00549
return -EINVAL;
00550 }
00551
00552
rtai_save_flags_and_cli(
flags);
00553
00554 spin_lock(&irq_desc[irq].lock);
00555
00556
if (
rtai_linux_irq[irq].count++ == 0 && irq_desc[irq].action) {
00557
rtai_linux_irq[irq].flags = irq_desc[irq].action->flags;
00558 irq_desc[irq].action->flags |= SA_SHIRQ;
00559 }
00560
00561 spin_unlock(&irq_desc[irq].lock);
00562
00563
rtai_restore_flags(
flags);
00564
00565 request_irq(irq,
handler, SA_SHIRQ, name, dev_id);
00566
00567
return 0;
00568 }
00569
00570
00571
00572
00573
00574
00575
00576
00577
00578
00579
00580
00581 int rt_free_linux_irq (
unsigned irq,
void *dev_id)
00582
00583 {
00584
unsigned long flags;
00585
00586
if (irq >= NR_IRQS ||
rtai_linux_irq[irq].count == 0)
00587
return -EINVAL;
00588
00589
rtai_save_flags_and_cli(
flags);
00590
00591 free_irq(irq,dev_id);
00592
00593 spin_lock(&irq_desc[irq].lock);
00594
00595
if (--
rtai_linux_irq[irq].count == 0 && irq_desc[irq].action)
00596 irq_desc[irq].action->flags =
rtai_linux_irq[irq].flags;
00597
00598 spin_unlock(&irq_desc[irq].lock);
00599
00600
rtai_restore_flags(
flags);
00601
00602
return 0;
00603 }
00604
00605 static unsigned long adeos_pended;
00606
00607 #define adeos_pend_irq(irq) \
00608
do { \
00609
unsigned long flags; \
00610
rtai_save_flags_and_cli(flags); \
00611
adeos_pend_uncond(irq, rtai_cpuid()); \
00612
rtai_restore_flags(flags); \
00613
} while (0)
00614
00615
00616
00617
00618
00619
00620
00621
00622
00623 void rt_pend_linux_irq (
unsigned irq)
00624 {
00625
adeos_pend_irq(irq);
00626 }
00627
00628
00629
00630
00631
00632
00633
00634
00635
00636
00637
00638
00639
00640
00641
00642
00643
00644
00645
00646
00647 int rt_request_srq (
unsigned label,
00648
void (*k_handler)(
void),
00649
long long (*u_handler)(
unsigned))
00650 {
00651
unsigned long flags;
00652
int srq;
00653
00654
if (
k_handler == NULL)
00655
return -EINVAL;
00656
00657
rtai_save_flags_and_cli(
flags);
00658
00659
if (
rtai_sysreq_map != ~0)
00660 {
00661
srq = ffz(
rtai_sysreq_map);
00662 set_bit(
srq, &
rtai_sysreq_map);
00663
rtai_sysreq_table[
srq].k_handler =
k_handler;
00664
rtai_sysreq_table[
srq].u_handler =
u_handler;
00665
rtai_sysreq_table[
srq].label =
label;
00666 }
00667
else
00668
srq = -EBUSY;
00669
00670
rtai_restore_flags(
flags);
00671
00672
return srq;
00673 }
00674
00675
00676
00677
00678
00679
00680
00681
00682
00683 int rt_free_srq (
unsigned srq)
00684 {
00685
return (srq < 2 || srq >= RTAI_NR_SRQS || !test_and_clear_bit(
srq, &
rtai_sysreq_map)) ? -EINVAL : 0;
00686 }
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697
00698 void rt_pend_linux_srq (
unsigned srq)
00699 {
00700
if (
srq > 0 &&
srq < RTAI_NR_SRQS) {
00701 set_bit(
srq, &
rtai_sysreq_pending);
00702
adeos_pend_irq(
rtai_sysreq_virq);
00703 }
00704 }
00705
00706
#ifdef CONFIG_SMP
00707
00708
#error "SMP is not supported"
00709
00710
#else
00711
00712 #define rtai_critical_sync NULL
00713
00714 void rtai_broadcast_to_timers (
int irq,
00715
void *dev_id,
00716
struct pt_regs *regs) {
00717 }
00718
00719
#endif
00720
00721
00722
00723 void rt_request_apic_timers (
void (*handler)(
void),
00724
struct apic_timer_setup_data *tmdata) {
00725 }
00726
00727 #define rt_free_apic_timers() rt_free_timer()
00728
00729
00730
#ifdef CONFIG_SMP
00731
00732
#error "SMP is not supported"
00733
00734
#else
00735 int rt_assign_irq_to_cpu (
int irq,
unsigned long cpus_mask) {
00736
00737
return 0;
00738 }
00739
00740 int rt_reset_irq_to_sym_mode (
int irq) {
00741
00742
return 0;
00743 }
00744
00745 void rt_request_timer_cpuid (
void (*handler)(
void),
00746
unsigned tick,
00747
int cpuid) {
00748 }
00749
00750
#endif
00751
00752
00753
00754
00755
00756
00757
00758
00759
00760
00761
00762
00763 int rt_request_timer (
void (*handler)(
void),
unsigned tick,
int use_apic)
00764 {
00765
unsigned long flags;
00766
00767
TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_REQUEST,
handler,tick);
00768
00769
flags =
rtai_critical_enter(
rtai_critical_sync);
00770
00771
rt_times.tick_time =
rtai_rdtsc();
00772
rt_times.linux_tick = tb_ticks_per_jiffy;
00773
00774
if (tick > 0)
00775 {
00776
if (tick > tb_ticks_per_jiffy)
00777 tick = tb_ticks_per_jiffy;
00778
rt_times.intr_time =
rt_times.tick_time + tick;
00779
rt_times.linux_time =
rt_times.tick_time +
rt_times.linux_tick;
00780
rt_times.periodic_tick = tick;
00781
#ifdef CONFIG_40x
00782
00783 mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
00784
00785 mtspr(SPRN_PIT, tick);
00786
#endif
00787 }
00788
else
00789 {
00790
rt_times.intr_time =
rt_times.tick_time +
rt_times.linux_tick;
00791
rt_times.linux_time =
rt_times.tick_time +
rt_times.linux_tick;
00792
rt_times.periodic_tick =
rt_times.linux_tick;
00793
#ifdef CONFIG_40x
00794
00795 mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_ARE);
00796
#endif
00797 }
00798
00799
rtai_sync_level = 2;
00800
rt_release_irq(RTAI_TIMER_DECR_IRQ);
00801
if (
rt_request_irq(RTAI_TIMER_DECR_IRQ,(rt_irq_handler_t)
handler,NULL, 0) < 0)
00802 {
00803
rtai_critical_exit(
flags);
00804
return -EINVAL;
00805 }
00806
00807
rt_set_timer_delay(
rt_times.periodic_tick);
00808
00809
rtai_critical_exit(
flags);
00810
00811
return 0;
00812 }
00813
00814
00815
00816
00817
00818
00819 void rt_free_timer (
void)
00820
00821 {
00822
unsigned long flags;
00823
00824
TRACE_RTAI_TIMER(TRACE_RTAI_EV_TIMER_FREE,0,0);
00825
00826
flags =
rtai_critical_enter(
rtai_critical_sync);
00827
00828
rt_release_irq(RTAI_TIMER_DECR_IRQ);
00829
00830
#ifdef CONFIG_40x
00831
00832 mtspr(SPRN_TCR, mfspr(SPRN_TCR) | TCR_ARE);
00833
00834 mtspr(SPRN_PIT, tb_ticks_per_jiffy);
00835
#endif
00836
00837
rtai_critical_exit(
flags);
00838 }
00839
00840
#ifdef FIXME
00841
RT_TRAP_HANDLER rt_set_trap_handler (
RT_TRAP_HANDLER handler) {
00842
00843
return (
RT_TRAP_HANDLER)xchg(&rtai_trap_handler,handler);
00844 }
00845
#endif
00846
00847
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
00848 #define RTAI_SCHED_ISR_LOCK() \
00849
do { \
00850
if (!rt_scheduling[cpuid].locked++) { \
00851
rt_scheduling[cpuid].rqsted = 0; \
00852
} \
00853
} while (0)
00854 #define RTAI_SCHED_ISR_UNLOCK() \
00855
do { \
00856
rtai_cli(); \
00857
if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \
00858
if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_hook) {
00859
\
00860
rtai_isr_hook(cpuid); \
00861 } \
00862 } \
00863 }
while (0)
00864
#else
00865
#define RTAI_SCHED_ISR_LOCK() \
00866
do { } while (0)
00867
#define RTAI_SCHED_ISR_UNLOCK() \
00868
do { rtai_cli(); } while (0)
00869
#endif
00870
00871
static int rtai_irq_trampoline (
unsigned irq)
00872 {
00873
unsigned long lflags;
00874
int cpuid = rtai_cpuid();
00875
TRACE_RTAI_GLOBAL_IRQ_ENTRY(irq,0);
00876
00877 lflags = xchg(&adp_root->cpudata[
cpuid].status, (1 << IPIPE_STALL_FLAG));
00878 adp_root->irqs[irq].acknowledge(irq);
00879
if (
rtai_realtime_irq[irq].handler) {
00880
RTAI_SCHED_ISR_LOCK();
00881
if (
rtai_realtime_irq[irq].retmode && ((int (*)(
int,
void *))
rtai_realtime_irq[irq].handler)(irq,
rtai_realtime_irq[irq].cookie)) {
00882
RTAI_SCHED_ISR_UNLOCK();
00883 adp_root->cpudata[
cpuid].status = lflags;
00884
return 0;
00885 }
else {
00886
rtai_realtime_irq[irq].handler(irq,
rtai_realtime_irq[irq].
cookie);
00887
RTAI_SCHED_ISR_UNLOCK();
00888 }
00889 }
else {
00890 adeos_pend_uncond(irq, cpuid);
00891 }
00892 adp_root->cpudata[
cpuid].status = lflags;
00893
00894
if (test_and_clear_bit(cpuid, &adeos_pended) && !test_bit(IPIPE_STALL_FLAG, &lflags)) {
00895
if (adp_root->cpudata[
cpuid].irq_pending_hi != 0) {
00896
rtai_sti();
00897
rtai_cli();
00898 __adeos_sync_stage(IPIPE_IRQMASK_ANY);
00899 }
00900
return 1;
00901 }
00902
return 0;
00903
00904
TRACE_RTAI_GLOBAL_IRQ_EXIT();
00905 }
00906
00907
#ifdef FIXME
00908
static void rtai_trap_fault (adevinfo_t *evinfo)
00909
00910 {
00911 adeos_declare_cpuid;
00912
00913
static const int trap2sig[] = {
00914 SIGFPE,
00915 SIGTRAP,
00916 SIGSEGV,
00917 SIGTRAP,
00918 SIGSEGV,
00919 SIGSEGV,
00920 SIGILL,
00921 SIGSEGV,
00922 SIGSEGV,
00923 SIGFPE,
00924 SIGSEGV,
00925 SIGBUS,
00926 SIGBUS,
00927 SIGSEGV,
00928 SIGSEGV,
00929 0,
00930 SIGFPE,
00931 SIGBUS,
00932 SIGSEGV,
00933 SIGFPE,
00934 0,0,0,0,0,0,0,0,0,0,0,0
00935 };
00936
00937
TRACE_RTAI_TRAP_ENTRY(evinfo->event,0);
00938
00939
00940
00941
00942
00943
00944
00945
00946
00947
00948
00949
00950
00951
00952
00953
00954
00955
00956
00957
00958
00959
00960
00961
00962
#ifdef adeos_load_cpuid
00963
adeos_load_cpuid();
00964
#endif
00965
00966
if (evinfo->domid == RTAI_DOMAIN_ID)
00967 {
00968
if (evinfo->event == 7)
00969 {
00970
00971
00972
00973
00974
00975
00976
00977
struct task_struct *linux_task = rtai_get_current(cpuid);
00978
00979
#if CONFIG_PREEMPT
00980
00981
00982
00983 linux_task->preempt_count++;
00984
#endif
00985
00986
if (linux_task->used_math)
00987
restore_task_fpenv(linux_task);
00988
else
00989 {
00990
init_xfpu();
00991 linux_task->used_math = 1;
00992 }
00993
00994 linux_task->flags |= PF_USEDFPU;
00995
00996
#if CONFIG_PREEMPT
00997
linux_task->preempt_count--;
00998
#endif
00999
01000
goto endtrap;
01001 }
01002
01003
if (rtai_trap_handler != NULL &&
01004 (test_bit(cpuid,&rtai_cpu_realtime) || test_bit(cpuid,&rtai_cpu_lxrt)) &&
01005
rtai_trap_handler(evinfo->event,
01006 trap2sig[evinfo->event],
01007 (
struct pt_regs *)evinfo->evdata,
01008 NULL) != 0)
01009
goto endtrap;
01010 }
01011
01012 adeos_propagate_event(evinfo);
01013
01014 endtrap:
01015
01016
TRACE_RTAI_TRAP_EXIT();
01017 }
01018
01019
#endif
01020
01021 static void rtai_ssrq_trampoline (
unsigned virq)
01022 {
01023
unsigned long pending;
01024
01025 spin_lock(&
rtai_ssrq_lock);
01026
while ((pending =
rtai_sysreq_pending & ~
rtai_sysreq_running) != 0) {
01027
unsigned srq =
ffnz(pending);
01028 set_bit(
srq,&
rtai_sysreq_running);
01029 clear_bit(
srq,&
rtai_sysreq_pending);
01030 spin_unlock(&
rtai_ssrq_lock);
01031
01032
if (test_bit(
srq,&
rtai_sysreq_map)) {
01033
rtai_sysreq_table[
srq].k_handler();
01034 }
01035
01036 clear_bit(
srq,&
rtai_sysreq_running);
01037 spin_lock(&
rtai_ssrq_lock);
01038 }
01039 spin_unlock(&
rtai_ssrq_lock);
01040 }
01041
01042 static inline long long rtai_usrq_trampoline (
unsigned srq,
unsigned label)
01043 {
01044
long long r = 0;
01045
01046
TRACE_RTAI_SRQ_ENTRY(
srq, 0);
01047
01048
if (
srq > 1 &&
srq < RTAI_NR_SRQS &&
01049 test_bit(
srq,&
rtai_sysreq_map) &&
01050
rtai_sysreq_table[
srq].u_handler != NULL)
01051 r =
rtai_sysreq_table[
srq].u_handler(
label);
01052
else
01053
for (
srq = 2;
srq < RTAI_NR_SRQS;
srq++)
01054
if (test_bit(
srq,&
rtai_sysreq_map) &&
01055
rtai_sysreq_table[
srq].label ==
label)
01056 r = (
long long)
srq;
01057
01058
TRACE_RTAI_SRQ_EXIT();
01059
01060
return r;
01061 }
01062
01063
#include <asm/rtai_usi.h>
01064 long long (*rtai_lxrt_invoke_entry)(
unsigned long,
unsigned long);
01065
01066 asmlinkage
int rtai_syscall_entry(
struct pt_regs *regs)
01067 {
01068
unsigned long vec,
srq, args;
01069
01070
if (regs->gpr[0] && regs->gpr[0] == ((
srq = regs->gpr[3]) + (args = regs->gpr[4]))) {
01071
unsigned long long retval;
01072 retval = !(vec =
srq >> 24) ?
rtai_usrq_trampoline(
srq, args) :
rtai_lxrt_invoke_entry(
srq, args);
01073 regs->gpr[0] = 0;
01074 regs->gpr[3] = ((
unsigned long *)&retval)[0];
01075 regs->gpr[4] = ((
unsigned long *)&retval)[1];
01076 regs->nip += 4;
01077
if (in_hrt_mode(rtai_cpuid())) {
01078
return 1;
01079 }
01080 local_irq_enable();
01081
return 0;
01082 }
01083
return 0;
01084 }
01085
01086 static void rtai_install_archdep (
void)
01087 {
01088 adsysinfo_t sysinfo;
01089
01090 adeos_get_sysinfo(&sysinfo);
01091
if (sysinfo.archdep.tmirq != RTAI_TIMER_DECR_IRQ)
01092 {
01093
printk(
"RTAI/Adeos: the timer interrupt %d is not supported\n",
01094 sysinfo.archdep.tmirq);
01095 }
01096
01097
if (
rtai_cpufreq_arg == 0)
01098 {
01099
rtai_cpufreq_arg = (
unsigned long)sysinfo.cpufreq;
01100 }
01101
rtai_tunables.cpu_freq =
rtai_cpufreq_arg;
01102 }
01103
01104 static void rtai_uninstall_archdep (
void) {
01105
01106
unsigned long flags;
01107
01108
flags =
rtai_critical_enter(NULL);
01109
01110
rtai_critical_exit(
flags);
01111 }
01112
01113
01114 void (*rt_set_ihook (
void (*hookfn)(
int)))(
int) {
01115
01116
#ifdef CONFIG_RTAI_SCHED_ISR_LOCK
01117
return (void (*)(
int))xchg(&
rtai_isr_hook,hookfn);
01118
#else
01119
return NULL;
01120
#endif
01121 }
01122
01123
#ifdef CONFIG_PROC_FS
01124
01125
struct proc_dir_entry *
rtai_proc_root = NULL;
01126
01127
static int rtai_read_proc (
char *page,
01128
char **start,
01129 off_t off,
01130
int count,
01131
int *eof,
01132
void *data)
01133 {
01134
PROC_PRINT_VARS;
01135
int i, none;
01136
01137
PROC_PRINT(
"\n** RTAI/ppc over Adeos:\n\n");
01138
PROC_PRINT(
" Decr. Frequency: %lu\n",
rtai_tunables.cpu_freq);
01139
PROC_PRINT(
" Decr. Latency: %d ns\n",RTAI_LATENCY_8254);
01140
PROC_PRINT(
" Decr. Setup Time: %d ns\n",RTAI_SETUP_TIME_8254);
01141
01142 none = 1;
01143
01144
PROC_PRINT(
"\n** Real-time IRQs used by RTAI: ");
01145
01146
for (i = 0; i <
RTAI_NR_IRQS; i++)
01147 {
01148
if (
rtai_realtime_irq[i].handler)
01149 {
01150
if (none)
01151 {
01152
PROC_PRINT(
"\n");
01153 none = 0;
01154 }
01155
01156
PROC_PRINT(
"\n #%d at %p", i, rtai_realtime_irq[i].handler);
01157 }
01158 }
01159
01160
if (none)
01161
PROC_PRINT(
"none");
01162
01163
PROC_PRINT(
"\n\n");
01164
01165
#ifdef FIXME
01166
PROC_PRINT(
"** RTAI extension traps: \n\n");
01167
PROC_PRINT(
" SYSREQ=0x%x\n",RTAI_SYS_VECTOR);
01168
PROC_PRINT(
" LXRT=0x%x\n",RTAI_LXRT_VECTOR);
01169
PROC_PRINT(
" SHM=0x%x\n\n",RTAI_SHM_VECTOR);
01170
#endif
01171
01172 none = 1;
01173
PROC_PRINT(
"** RTAI SYSREQs in use: ");
01174
01175
for (i = 0; i < RTAI_NR_SRQS; i++)
01176 {
01177
if (
rtai_sysreq_table[i].k_handler ||
01178
rtai_sysreq_table[i].u_handler)
01179 {
01180
PROC_PRINT(
"#%d ", i);
01181 none = 0;
01182 }
01183 }
01184
01185
if (none)
01186
PROC_PRINT(
"none");
01187
01188
PROC_PRINT(
"\n\n");
01189
01190
PROC_PRINT_DONE;
01191 }
01192
01193
static int rtai_proc_register (
void)
01194
01195 {
01196
struct proc_dir_entry *ent;
01197
01198
rtai_proc_root = create_proc_entry(
"rtai",S_IFDIR, 0);
01199
01200
if (!
rtai_proc_root)
01201 {
01202
printk(
"Unable to initialize /proc/rtai.\n");
01203
return -1;
01204 }
01205
01206
rtai_proc_root->owner = THIS_MODULE;
01207
01208 ent = create_proc_entry(
"rtai",S_IFREG|S_IRUGO|S_IWUSR,rtai_proc_root);
01209
01210
if (!ent)
01211 {
01212
printk(
"Unable to initialize /proc/rtai/rtai.\n");
01213
return -1;
01214 }
01215
01216 ent->read_proc = rtai_read_proc;
01217
01218
return 0;
01219 }
01220
01221
static void rtai_proc_unregister (
void)
01222
01223 {
01224 remove_proc_entry(
"rtai",rtai_proc_root);
01225 remove_proc_entry(
"rtai",0);
01226 }
01227
#endif
01228
01229 static void rtai_domain_entry (
int iflag)
01230 {
01231
if (iflag) {
01232
rt_printk(KERN_INFO
"RTAI[hal]: %s mounted over Adeos %s.\n", PACKAGE_VERSION,ADEOS_VERSION_STRING);
01233
rt_printk(KERN_INFO
"RTAI[hal]: compiled with %s.\n",
CONFIG_RTAI_COMPILER);
01234 }
01235
#ifndef CONFIG_ADEOS_NOTHREADS
01236
for (;;) adeos_suspend_domain();
01237
#endif
01238 }
01239
01240
extern void *
adeos_extern_irq_handler;
01241
static void rt_printk_srq_handler(
void);
01242 #define RT_PRINTK_SRQ 1
01243
01244 int __rtai_hal_init (
void)
01245 {
01246
unsigned long flags;
01247
int trapnr;
01248 adattr_t attr;
01249
01250
rtai_sysreq_virq = adeos_alloc_irq();
01251
printk(
"RTAI/Adeos: rtai_sysreq_virq=%d\n",
rtai_sysreq_virq);
01252
01253
if (!
rtai_sysreq_virq) {
01254
printk(
"RTAI/Adeos: no virtual interrupt available.\n");
01255
return 1;
01256 }
01257
01258
01259
01260
01261
01262
01263
01264
flags =
rtai_critical_enter(NULL);
01265
rtai_adeos_ptdbase = adeos_alloc_ptdkey();
01266 trapnr = adeos_alloc_ptdkey() !=
rtai_adeos_ptdbase + 1;
01267
adeos_extern_irq_handler = rtai_irq_trampoline;
01268
rtai_critical_exit(
flags);
01269
01270
if (trapnr) {
01271
printk(KERN_ERR
"RTAI[hal]: per-thread keys not available.\n");
01272
return 1;
01273 }
01274
01275 adeos_virtualize_irq(
rtai_sysreq_virq, &
rtai_ssrq_trampoline, NULL, IPIPE_HANDLE_MASK);
01276
01277
rtai_install_archdep();
01278
01279
#ifdef CONFIG_PROC_FS
01280
rtai_proc_register();
01281
#endif
01282
01283
rtai_sysreq_table[
RT_PRINTK_SRQ].k_handler =
rt_printk_srq_handler;
01284 set_bit(
RT_PRINTK_SRQ, &
rtai_sysreq_map);
01285
01286
01287 adeos_init_attr(&attr);
01288 attr.name =
"RTAI";
01289 attr.domid = RTAI_DOMAIN_ID;
01290 attr.entry =
rtai_domain_entry;
01291 attr.estacksz = 256;
01292 attr.priority = ADEOS_ROOT_PRI + 100; adeos_register_domain(&
rtai_domain, &attr);
01293
printk(KERN_INFO
"RTAI[hal]: mounted (IMMEDIATE).\n");
01294
01295
return 0;
01296 }
01297
01298 void __rtai_hal_exit (
void)
01299 {
01300
#ifdef CONFIG_PROC_FS
01301
rtai_proc_unregister();
01302
#endif
01303
adeos_unregister_domain(&
rtai_domain);
01304
adeos_extern_irq_handler = NULL;
01305 clear_bit(
RT_PRINTK_SRQ, &
rtai_sysreq_map);
01306 adeos_virtualize_irq(
rtai_sysreq_virq,NULL,NULL,0);
01307 adeos_free_irq(
rtai_sysreq_virq);
01308
rtai_uninstall_archdep();
01309 adeos_free_ptdkey(
rtai_adeos_ptdbase);
01310 adeos_free_ptdkey(
rtai_adeos_ptdbase + 1);
01311 current->state = TASK_INTERRUPTIBLE;
01312 schedule_timeout(HZ/20);
01313
printk(KERN_INFO
"RTAI[hal]: unmounted.\n");
01314 }
01315
01316
01317
module_init(__rtai_hal_init);
01318
module_exit(__rtai_hal_exit);
01319
01320
01321
01322
01323
01324
01325
01326
01327
01328
01329 #define PRINTK_BUF_SIZE (10000) // Test programs may generate much output. PC
01330 #define TEMP_BUF_SIZE (500)
01331
01332 static char rt_printk_buf[
PRINTK_BUF_SIZE];
01333
01334 static int buf_front,
buf_back;
01335 static char buf[
TEMP_BUF_SIZE];
01336
01337 int rt_printk (
const char *fmt, ...)
01338 {
01339
unsigned long flags;
01340
static spinlock_t display_lock = SPIN_LOCK_UNLOCKED;
01341 va_list args;
01342
int len, i;
01343
01344
flags =
rt_spin_lock_irqsave(&display_lock);
01345 va_start(args, fmt);
01346 len = vsprintf(
buf, fmt, args);
01347 va_end(args);
01348
if ((
buf_front + len) >=
PRINTK_BUF_SIZE) {
01349 i =
PRINTK_BUF_SIZE -
buf_front;
01350 memcpy(
rt_printk_buf +
buf_front,
buf, i);
01351 memcpy(
rt_printk_buf,
buf + i, len - i);
01352
buf_front = len - i;
01353 }
else {
01354 memcpy(
rt_printk_buf +
buf_front,
buf, len);
01355
buf_front += len;
01356 }
01357
rt_spin_unlock_irqrestore(
flags, &display_lock);
01358
rt_pend_linux_srq(
RT_PRINTK_SRQ);
01359
01360
return len;
01361 }
01362
01363 static void rt_printk_srq_handler (
void)
01364 {
01365
int tmp;
01366
01367
while(1) {
01368 tmp =
buf_front;
01369
if (
buf_back > tmp) {
01370
printk(
"%.*s",
PRINTK_BUF_SIZE -
buf_back,
rt_printk_buf +
buf_back);
01371
buf_back = 0;
01372 }
01373
if (
buf_back == tmp) {
01374
break;
01375 }
01376
printk(
"%.*s", tmp -
buf_back,
rt_printk_buf +
buf_back);
01377
buf_back = tmp;
01378 }
01379 }
01380
01381
01382
01383
01384
01385 void *
ll2a (
long long ll,
char *s)
01386 {
01387
unsigned long i, k, ul;
01388
char a[20];
01389
01390
if (ll < 0) {
01391 s[0] = 1;
01392 ll = -ll;
01393 }
else {
01394 s[0] = 0;
01395 }
01396 i = 0;
01397
while (ll > 0xFFFFFFFF) {
01398 ll =
rtai_ulldiv(ll, 10, &k);
01399 a[++i] = k +
'0';
01400 }
01401 ul = ((
unsigned long *)&ll)[
LOW];
01402
do {
01403 ul = (k = ul)/10;
01404 a[++i] = k - ul*10 +
'0';
01405 }
while (ul);
01406
if (s[0]) {
01407 k = 1;
01408 s[0] =
'-';
01409 }
else {
01410 k = 0;
01411 }
01412 a[0] = 0;
01413
while ((s[k++] = a[i--]));
01414
return s;
01415 }
01416
01417
EXPORT_SYMBOL(rtai_realtime_irq);
01418
EXPORT_SYMBOL(rt_request_irq);
01419
EXPORT_SYMBOL(rt_release_irq);
01420
EXPORT_SYMBOL(rt_set_irq_cookie);
01421
EXPORT_SYMBOL(rt_set_irq_retmode);
01422
EXPORT_SYMBOL(rt_startup_irq);
01423
EXPORT_SYMBOL(rt_shutdown_irq);
01424
EXPORT_SYMBOL(rt_enable_irq);
01425
EXPORT_SYMBOL(rt_disable_irq);
01426
EXPORT_SYMBOL(rt_mask_and_ack_irq);
01427
EXPORT_SYMBOL(rt_unmask_irq);
01428
EXPORT_SYMBOL(rt_ack_irq);
01429
EXPORT_SYMBOL(rt_request_linux_irq);
01430
EXPORT_SYMBOL(rt_free_linux_irq);
01431
EXPORT_SYMBOL(rt_pend_linux_irq);
01432
EXPORT_SYMBOL(rt_request_srq);
01433
EXPORT_SYMBOL(rt_free_srq);
01434
EXPORT_SYMBOL(rt_pend_linux_srq);
01435
EXPORT_SYMBOL(rt_assign_irq_to_cpu);
01436
EXPORT_SYMBOL(rt_reset_irq_to_sym_mode);
01437
EXPORT_SYMBOL(rt_request_apic_timers);
01438
EXPORT_SYMBOL(rt_request_timer);
01439
EXPORT_SYMBOL(rt_free_timer);
01440
EXPORT_SYMBOL(rt_set_ihook);
01441
01442
EXPORT_SYMBOL(rtai_critical_enter);
01443
EXPORT_SYMBOL(rtai_critical_exit);
01444
EXPORT_SYMBOL(rtai_set_linux_task_priority);
01445
01446
EXPORT_SYMBOL(rtai_linux_context);
01447
EXPORT_SYMBOL(rtai_domain);
01448
EXPORT_SYMBOL(rtai_proc_root);
01449
EXPORT_SYMBOL(rtai_tunables);
01450
EXPORT_SYMBOL(rtai_cpu_lock);
01451
EXPORT_SYMBOL(rtai_cpu_realtime);
01452
EXPORT_SYMBOL(rt_times);
01453
EXPORT_SYMBOL(rt_smp_times);
01454
01455
EXPORT_SYMBOL(rt_printk);
01456
EXPORT_SYMBOL(ll2a);
01457
01458
EXPORT_SYMBOL(rtai_lxrt_invoke_entry);
01459
EXPORT_SYMBOL(rt_scheduling);
01460