base/include/asm-ppc/rtai_atomic.h

Go to the documentation of this file.
00001 /*
00002  * Copyright (C) 2003 Philippe Gerum <rpm@xenomai.org>.
00003  *
00004  * This program is free software; you can redistribute it and/or
00005  * modify it under the terms of the GNU General Public License as
00006  * published by the Free Software Foundation; either version 2 of the
00007  * License, or (at your option) any later version.
00008  *
00009  * This program is distributed in the hope that it will be useful,
00010  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00011  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00012  * GNU General Public License for more details.
00013  *
00014  * You should have received a copy of the GNU General Public License
00015  * along with this program; if not, write to the Free Software
00016  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
00017  */
00018 
00019 #ifndef _RTAI_ASM_PPC_ATOMIC_H
00020 #define _RTAI_ASM_PPC_ATOMIC_H
00021 
00022 #ifdef __KERNEL__
00023 
00024 #include <linux/bitops.h>
00025 #include <asm/system.h>
00026 #include <asm/atomic.h>
00027 
00028 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
00029 #define atomic_xchg(ptr, v)  xchg(ptr,v)
00030 static __inline__ unsigned long atomic_cmpxchg(void *ptr, unsigned long o, unsigned long n)
00031 {
00032     unsigned long *p = ptr;
00033     return cmpxchg(p, o, n);
00034 }
00035 #endif
00036 
00037 #else /* !__KERNEL__ */
00038 
00039 typedef struct { volatile int counter; } atomic_t;
00040 
00041 // shamelessly taken from Linux as they are
00042 
00043 #ifdef CONFIG_SMP
00044 #define SMP_SYNC    "sync"
00045 #define SMP_ISYNC   "\n\tisync"
00046 #else
00047 #define SMP_SYNC    ""
00048 #define SMP_ISYNC
00049 #endif
00050 
00051 /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
00052  * The old ATOMIC_SYNC_FIX covered some but not all of this.
00053  */
00054 #ifdef CONFIG_IBM405_ERR77
00055 #define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
00056 #else
00057 #define PPC405_ERR77(ra,rb)
00058 #endif
00059 
00060 static __inline__ void atomic_inc(atomic_t *v)
00061 {
00062     int t;
00063 
00064     __asm__ __volatile__(
00065 "1: lwarx   %0,0,%2     # atomic_inc\n\
00066     addic   %0,%0,1\n"
00067     PPC405_ERR77(0,%2)
00068 "   stwcx.  %0,0,%2 \n\
00069     bne-    1b"
00070     : "=&r" (t), "=m" (v->counter)
00071     : "r" (&v->counter), "m" (v->counter)
00072     : "cc");
00073 }
00074 
00075 static __inline__ int atomic_dec_return(atomic_t *v)
00076 {
00077     int t;
00078 
00079     __asm__ __volatile__(
00080 "1: lwarx   %0,0,%1     # atomic_dec_return\n\
00081     addic   %0,%0,-1\n"
00082     PPC405_ERR77(0,%1)
00083 "   stwcx.  %0,0,%1\n\
00084     bne-    1b"
00085     SMP_ISYNC
00086     : "=&r" (t)
00087     : "r" (&v->counter)
00088     : "cc", "memory");
00089 
00090     return t;
00091 }
00092 
00093 #define atomic_dec_and_test(v)      (atomic_dec_return((v)) == 0)
00094 
00095 #define __HAVE_ARCH_CMPXCHG 1
00096 
00097 static __inline__ unsigned long
00098 __cmpxchg_u32(volatile int *p, int old, int new)
00099 {
00100     int prev;
00101 
00102     __asm__ __volatile__ ("\n\
00103 1:  lwarx   %0,0,%2 \n\
00104     cmpw    0,%0,%3 \n\
00105     bne 2f \n"
00106     PPC405_ERR77(0,%2)
00107 "   stwcx.  %4,0,%2 \n\
00108     bne-    1b\n"
00109 #ifdef CONFIG_SMP
00110 "   sync\n"
00111 #endif /* CONFIG_SMP */
00112 "2:"
00113     : "=&r" (prev), "=m" (*p)
00114     : "r" (p), "r" (old), "r" (new), "m" (*p)
00115     : "cc", "memory");
00116 
00117     return prev;
00118 }
00119 
00120 static __inline__ unsigned long
00121 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
00122 {
00123     switch (size) {
00124     case 4:
00125         return __cmpxchg_u32(ptr, old, new);
00126 #if 0   /* we don't have __cmpxchg_u64 on 32-bit PPC */
00127     case 8:
00128         return __cmpxchg_u64(ptr, old, new);
00129 #endif /* 0 */
00130     }
00131     return old;
00132 }
00133 
00134 #define cmpxchg(ptr,o,n)                         \
00135   ({                                     \
00136      __typeof__(*(ptr)) _o_ = (o);                   \
00137      __typeof__(*(ptr)) _n_ = (n);                   \
00138      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,       \
00139                     (unsigned long)_n_, sizeof(*(ptr))); \
00140   })
00141 
00142 static __inline__ unsigned long atomic_cmpxchg(void *ptr, unsigned long o, unsigned long n)
00143 {
00144     unsigned long *p = ptr;
00145     return cmpxchg(p, o, n);
00146 }
00147 
00148 #endif /* __KERNEL__ */
00149 
00150 #endif /* !_RTAI_ASM_PPC_ATOMIC_H */

Generated on Tue Feb 2 17:46:04 2010 for RTAI API by  doxygen 1.4.7