00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef _RTAI_ASM_PPC_ATOMIC_H
00020 #define _RTAI_ASM_PPC_ATOMIC_H
00021
00022 #ifdef __KERNEL__
00023
00024 #include <linux/bitops.h>
00025 #include <asm/system.h>
00026 #include <asm/atomic.h>
00027
00028 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
00029 #define atomic_xchg(ptr, v) xchg(ptr,v)
00030 static __inline__ unsigned long atomic_cmpxchg(void *ptr, unsigned long o, unsigned long n)
00031 {
00032 unsigned long *p = ptr;
00033 return cmpxchg(p, o, n);
00034 }
00035 #endif
00036
00037 #else
00038
00039 typedef struct { volatile int counter; } atomic_t;
00040
00041
00042
00043 #ifdef CONFIG_SMP
00044 #define SMP_SYNC "sync"
00045 #define SMP_ISYNC "\n\tisync"
00046 #else
00047 #define SMP_SYNC ""
00048 #define SMP_ISYNC
00049 #endif
00050
00051
00052
00053
00054 #ifdef CONFIG_IBM405_ERR77
00055 #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
00056 #else
00057 #define PPC405_ERR77(ra,rb)
00058 #endif
00059
00060 static __inline__ void atomic_inc(atomic_t *v)
00061 {
00062 int t;
00063
00064 __asm__ __volatile__(
00065 "1: lwarx %0,0,%2 # atomic_inc\n\
00066 addic %0,%0,1\n"
00067 PPC405_ERR77(0,%2)
00068 " stwcx. %0,0,%2 \n\
00069 bne- 1b"
00070 : "=&r" (t), "=m" (v->counter)
00071 : "r" (&v->counter), "m" (v->counter)
00072 : "cc");
00073 }
00074
00075 static __inline__ int atomic_dec_return(atomic_t *v)
00076 {
00077 int t;
00078
00079 __asm__ __volatile__(
00080 "1: lwarx %0,0,%1 # atomic_dec_return\n\
00081 addic %0,%0,-1\n"
00082 PPC405_ERR77(0,%1)
00083 " stwcx. %0,0,%1\n\
00084 bne- 1b"
00085 SMP_ISYNC
00086 : "=&r" (t)
00087 : "r" (&v->counter)
00088 : "cc", "memory");
00089
00090 return t;
00091 }
00092
00093 #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
00094
00095 #define __HAVE_ARCH_CMPXCHG 1
00096
00097 static __inline__ unsigned long
00098 __cmpxchg_u32(volatile int *p, int old, int new)
00099 {
00100 int prev;
00101
00102 __asm__ __volatile__ ("\n\
00103 1: lwarx %0,0,%2 \n\
00104 cmpw 0,%0,%3 \n\
00105 bne 2f \n"
00106 PPC405_ERR77(0,%2)
00107 " stwcx. %4,0,%2 \n\
00108 bne- 1b\n"
00109 #ifdef CONFIG_SMP
00110 " sync\n"
00111 #endif
00112 "2:"
00113 : "=&r" (prev), "=m" (*p)
00114 : "r" (p), "r" (old), "r" (new), "m" (*p)
00115 : "cc", "memory");
00116
00117 return prev;
00118 }
00119
00120 static __inline__ unsigned long
00121 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
00122 {
00123 switch (size) {
00124 case 4:
00125 return __cmpxchg_u32(ptr, old, new);
00126 #if 0
00127 case 8:
00128 return __cmpxchg_u64(ptr, old, new);
00129 #endif
00130 }
00131 return old;
00132 }
00133
00134 #define cmpxchg(ptr,o,n) \
00135 ({ \
00136 __typeof__(*(ptr)) _o_ = (o); \
00137 __typeof__(*(ptr)) _n_ = (n); \
00138 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
00139 (unsigned long)_n_, sizeof(*(ptr))); \
00140 })
00141
00142 static __inline__ unsigned long atomic_cmpxchg(void *ptr, unsigned long o, unsigned long n)
00143 {
00144 unsigned long *p = ptr;
00145 return cmpxchg(p, o, n);
00146 }
00147
00148 #endif
00149
00150 #endif