00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044 #ifndef _RTAI_ASM_ARM_ATOMIC_H
00045 #define _RTAI_ASM_ARM_ATOMIC_H
00046
00047 #include <asm/atomic.h>
00048
00049 #ifdef __KERNEL__
00050
00051 #include <linux/bitops.h>
00052 #include <asm/system.h>
00053
00054 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
00055
00056 #define atomic_xchg(ptr,v) xchg(ptr,v)
00057
00058
00059 #define atomic_cmpxchg(p, o, n) ({ \
00060 typeof(*(p)) __o = (o); \
00061 typeof(*(p)) __n = (n); \
00062 typeof(*(p)) __prev; \
00063 unsigned long flags; \
00064 rtai_hw_lock(flags); \
00065 __prev = *(p); \
00066 if (__prev == __o) \
00067 *(p) = __n; \
00068 rtai_hw_unlock(flags); \
00069 __prev; })
00070
00071 #endif
00072
00073 #else
00074
00075 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
00076 #include <asm/proc/system.h>
00077 #else
00078 #include <asm/system.h>
00079 #endif
00080
00081 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
00082 typedef struct { volatile int counter; } atomic_t;
00083 #endif
00084
00085 static inline unsigned long
00086 atomic_xchg(volatile void *ptr, unsigned long x)
00087 {
00088 asm volatile(
00089 "swp %0, %1, [%2]"
00090 : "=&r" (x)
00091 : "r" (x), "r" (ptr)
00092 : "memory"
00093 );
00094 return x;
00095 }
00096
00097 static inline unsigned long atomic_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new)
00098 {
00099 unsigned long oldval, res;
00100
00101 do {
00102 __asm__ __volatile__("@ atomic_cmpxchg\n"
00103 "ldrex %1, [%2]\n"
00104 "teq %1, %3\n"
00105 "strexeq %0, %4, [%2]\n"
00106 : "=&r" (res), "=&r" (oldval)
00107 : "r" (*(unsigned long*)ptr), "r" (old), "r" (new)
00108 : "cc");
00109 } while (res);
00110
00111 return oldval;
00112 }
00113
00114
00115
00116
00117
00118
00119
00120
00121
00122
00123
00124
00125
00126
00127
00128
00129 static inline int atomic_add_return(int i, atomic_t *v)
00130 {
00131 unsigned long tmp;
00132 int result;
00133
00134 __asm__ __volatile__("@ atomic_add_return\n"
00135 "1: ldrex %0, [%2]\n"
00136 " add %0, %0, %3\n"
00137 " strex %1, %0, [%2]\n"
00138 " teq %1, #0\n"
00139 " bne 1b"
00140 : "=&r" (result), "=&r" (tmp)
00141 : "r" (&v->counter), "Ir" (i)
00142 : "cc");
00143
00144 return result;
00145 }
00146
00147 static inline int atomic_sub_return(int i, atomic_t *v)
00148 {
00149 unsigned long tmp;
00150 int result;
00151
00152 __asm__ __volatile__("@ atomic_sub_return\n"
00153 "1: ldrex %0, [%2]\n"
00154 " sub %0, %0, %3\n"
00155 " strex %1, %0, [%2]\n"
00156 " teq %1, #0\n"
00157 " bne 1b"
00158 : "=&r" (result), "=&r" (tmp)
00159 : "r" (&v->counter), "Ir" (i)
00160 : "cc");
00161
00162 return result;
00163 }
00164
00165 #define atomic_inc(v) (void) atomic_add_return(1, v)
00166 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
00167
00168 #endif
00169 #endif