Atomic_Op.cpp

Go to the documentation of this file.
00001 // $Id: Atomic_Op.cpp 80826 2008-03-04 14:51:23Z wotte $
00002 
00003 #include "ace/Atomic_Op.h"
00004 #include "ace/OS_NS_unistd.h"
00005 
00006 ACE_RCSID (ace,
00007            Atomic_Op,
00008            "$Id: Atomic_Op.cpp 80826 2008-03-04 14:51:23Z wotte $")
00009 
00010 #if !defined (__ACE_INLINE__)
00011 #include "ace/Atomic_Op.inl"
00012 #endif /* __ACE_INLINE__ */
00013 
00014 #if defined (ACE_HAS_BUILTIN_ATOMIC_OP)
00015 
00016 #if defined (ACE_INCLUDE_ATOMIC_OP_SPARC)
00017 # include "ace/Atomic_Op_Sparc.h"
00018 #endif /* ACE_INCLUDE_ATOMIC_OP_SPARC */
00019 
00020 namespace {
00021 
00022 #if defined (_MSC_VER)
00023 // Disable "no return value" warning, as we will be putting
00024 // the return values directly into the EAX register.
00025 #pragma warning (push)
00026 #pragma warning (disable: 4035)
00027 #endif /* _MSC_VER */
00028 
00029 long
00030 single_cpu_increment (volatile long *value)
00031 {
00032 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00033   long tmp = 1;
00034   unsigned long addr = reinterpret_cast<unsigned long> (value);
00035   asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
00036   return tmp + 1;
00037 #elif defined (sun) || \
00038      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00039   return ace_atomic_add_long (
00040            reinterpret_cast<volatile unsigned long*> (value), 1);
00041 #elif defined(__GNUC__) && defined(PPC)
00042   long tmp;
00043   asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
00044   asm("addi %0,%0,1" : "+r" (tmp) );
00045   asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
00046   return tmp;
00047 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00048   ACE_UNUSED_ARG (value);
00049   ACE_NOTSUP_RETURN (-1);
00050 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00051 }
00052 
00053 long
00054 single_cpu_decrement (volatile long *value)
00055 {
00056 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00057   long tmp = -1;
00058   unsigned long addr = reinterpret_cast<unsigned long> (value);
00059   asm( "xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
00060   return tmp - 1;
00061 #elif defined (sun) || \
00062      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00063   return ace_atomic_add_long (
00064             reinterpret_cast<volatile unsigned long*> (value), -1);
00065 #elif defined(__GNUC__) && defined(PPC)
00066   long tmp;
00067   asm("lwz %0,%1" : "=r" (tmp) : "m" (*value) );
00068   asm("addi %0,%0,-1" : "+r" (tmp) );
00069   asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
00070   return tmp;
00071 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00072   ACE_UNUSED_ARG (value);
00073   ACE_NOTSUP_RETURN (-1);
00074 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00075 }
00076 
00077 long
00078 single_cpu_exchange (volatile long *value, long rhs)
00079 {
00080 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00081   unsigned long addr = reinterpret_cast<unsigned long> (value);
00082   asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
00083   return rhs;
00084 #elif defined (sun) || \
00085      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00086   return ace_atomic_swap_long (
00087            reinterpret_cast<volatile unsigned long*> (value), rhs);
00088 #elif defined(__GNUC__) && defined(PPC)
00089   long tmp;
00090   asm("lwz %0,%1" : "=r" (tmp) : "m" (rhs) );
00091   asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
00092   return tmp;
00093 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00094   ACE_UNUSED_ARG (value);
00095   ACE_UNUSED_ARG (rhs);
00096   ACE_NOTSUP_RETURN (-1);
00097 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00098 }
00099 
00100 long
00101 single_cpu_exchange_add (volatile long *value, long rhs)
00102 {
00103 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00104   unsigned long addr = reinterpret_cast<unsigned long> (value);
00105   asm( "xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
00106   return rhs;
00107 #elif defined (sun) || \
00108      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00109   return ace_atomic_swap_add_long (
00110            reinterpret_cast<volatile unsigned long*> (value), rhs);
00111 #elif defined(__GNUC__) && defined(PPC)
00112   long tmp;
00113   asm("add %0,%1,%2" : "=r" (tmp) : "r" (*value), "r" (rhs) );
00114   asm("stw %0,%1" : "+r" (tmp), "=m" (*value) );
00115   return tmp;
00116 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
00117 # if defined (_MSC_VER)
00118   __asm
00119     {
00120       mov eax, rhs
00121       mov edx, value
00122       xadd [edx], eax
00123     }
00124   // Return value is already in EAX register.
00125 # elif defined (__BORLANDC__)
00126   _EAX = rhs;
00127   _EDX = reinterpret_cast<unsigned long> (value);
00128   __emit__(0x0F, 0xC1, 0x02); // xadd [edx], eax
00129   // Return value is already in EAX register.
00130 # else /* _MSC_VER */
00131   ACE_UNUSED_ARG (value);
00132   ACE_UNUSED_ARG (rhs);
00133   ACE_NOTSUP_RETURN (-1);
00134 # endif /* _MSC_VER */
00135 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00136   ACE_UNUSED_ARG (value);
00137   ACE_UNUSED_ARG (rhs);
00138   ACE_NOTSUP_RETURN (-1);
00139 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00140 }
00141 
00142 long
00143 multi_cpu_increment (volatile long *value)
00144 {
00145 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00146   long tmp = 1;
00147   unsigned long addr = reinterpret_cast<unsigned long> (value);
00148   asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
00149   return tmp + 1;
00150 #elif defined (sun) || \
00151      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00152   return ace_atomic_add_long (
00153            reinterpret_cast<volatile unsigned long*> (value), 1);
00154 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00155   ACE_UNUSED_ARG (value);
00156   ACE_NOTSUP_RETURN (-1);
00157 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00158 }
00159 
00160 long
00161 multi_cpu_decrement (volatile long *value)
00162 {
00163 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00164   long tmp = -1;
00165   unsigned long addr = reinterpret_cast<unsigned long> (value);
00166   asm( "lock ; xadd %0, (%1)" : "+r"(tmp) : "r"(addr) );
00167   return tmp - 1;
00168 #elif defined (sun) || \
00169      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00170   return ace_atomic_add_long (
00171            reinterpret_cast<volatile unsigned long*> (value), -1);
00172 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00173   ACE_UNUSED_ARG (value);
00174   ACE_NOTSUP_RETURN (-1);
00175 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00176 }
00177 
00178 long
00179 multi_cpu_exchange (volatile long *value, long rhs)
00180 {
00181 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00182   unsigned long addr = reinterpret_cast<unsigned long> (value);
00183   // The XCHG instruction automatically follows LOCK semantics
00184   asm( "xchg %0, (%1)" : "+r"(rhs) : "r"(addr) );
00185   return rhs;
00186 #elif defined (sun) || \
00187      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00188   return ace_atomic_swap_long (
00189            reinterpret_cast<volatile unsigned long*> (value), rhs);
00190 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00191   ACE_UNUSED_ARG (value);
00192   ACE_UNUSED_ARG (rhs);
00193   ACE_NOTSUP_RETURN (-1);
00194 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00195 }
00196 
00197 long
00198 multi_cpu_exchange_add (volatile long *value, long rhs)
00199 {
00200 #if defined (ACE_HAS_INTEL_ASSEMBLY)
00201   unsigned long addr = reinterpret_cast<unsigned long> (value);
00202   asm( "lock ; xadd %0, (%1)" : "+r"(rhs) : "r"(addr) );
00203   return rhs;
00204 #elif defined (sun) || \
00205      (defined (__SUNPRO_CC) && (defined (__i386) || defined (__x86_64)))
00206   return ace_atomic_swap_add_long (
00207            reinterpret_cast<volatile unsigned long*> (value), rhs);
00208 #elif defined (WIN32) && !defined (ACE_HAS_INTERLOCKED_EXCHANGEADD)
00209 # if defined (_MSC_VER)
00210   __asm
00211     {
00212       mov eax, rhs
00213       mov edx, value
00214       lock xadd [edx], eax
00215     }
00216   // Return value is already in EAX register.
00217 # elif defined (__BORLANDC__)
00218   _EAX = rhs;
00219   _EDX = reinterpret_cast<unsigned long> (value);
00220   __emit__(0xF0, 0x0F, 0xC1, 0x02); // lock xadd [edx], eax
00221   // Return value is already in EAX register.
00222 # else /* _MSC_VER */
00223   ACE_UNUSED_ARG (value);
00224   ACE_UNUSED_ARG (rhs);
00225   ACE_NOTSUP_RETURN (-1);
00226 # endif /* _MSC_VER */
00227 #else /* ACE_HAS_INTEL_ASSEMBLY*/
00228   ACE_UNUSED_ARG (value);
00229   ACE_UNUSED_ARG (rhs);
00230   ACE_NOTSUP_RETURN (-1);
00231 #endif /* ACE_HAS_INTEL_ASSEMBLY*/
00232 }
00233 
00234 #if defined (_MSC_VER)
00235 #pragma warning (pop)
00236 #endif /* _MSC_VER */
00237 
00238 } // end namespace
00239 
00240 ACE_BEGIN_VERSIONED_NAMESPACE_DECL
00241 
00242 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::increment_fn_) (volatile long *) = multi_cpu_increment;
00243 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
00244 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
00245 long (*ACE_Atomic_Op<ACE_Thread_Mutex, long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
00246 
00247 void
00248 ACE_Atomic_Op<ACE_Thread_Mutex, long>::init_functions (void)
00249 {
00250   if (ACE_OS::num_processors () == 1)
00251     {
00252       increment_fn_ = single_cpu_increment;
00253       decrement_fn_ = single_cpu_decrement;
00254       exchange_fn_ = single_cpu_exchange;
00255       exchange_add_fn_ = single_cpu_exchange_add;
00256     }
00257   else
00258     {
00259       increment_fn_ = multi_cpu_increment;
00260       decrement_fn_ = multi_cpu_decrement;
00261       exchange_fn_ = multi_cpu_exchange;
00262       exchange_add_fn_ = multi_cpu_exchange_add;
00263     }
00264 }
00265 
00266 void
00267 ACE_Atomic_Op<ACE_Thread_Mutex, long>::dump (void) const
00268 {
00269 #if defined (ACE_HAS_DUMP)
00270   ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
00271   ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP));
00272 #endif /* ACE_HAS_DUMP */
00273 }
00274 
00275 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::increment_fn_) (volatile long *) = multi_cpu_increment;
00276 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::decrement_fn_) (volatile long *) = multi_cpu_decrement;
00277 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_fn_) (volatile long *, long) = multi_cpu_exchange;
00278 long (*ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::exchange_add_fn_) (volatile long *, long) = multi_cpu_exchange_add;
00279 
00280 void
00281 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::init_functions (void)
00282 {
00283   if (ACE_OS::num_processors () == 1)
00284     {
00285       increment_fn_ = single_cpu_increment;
00286       decrement_fn_ = single_cpu_decrement;
00287       exchange_fn_ = single_cpu_exchange;
00288       exchange_add_fn_ = single_cpu_exchange_add;
00289     }
00290   else
00291     {
00292       increment_fn_ = multi_cpu_increment;
00293       decrement_fn_ = multi_cpu_decrement;
00294       exchange_fn_ = multi_cpu_exchange;
00295       exchange_add_fn_ = multi_cpu_exchange_add;
00296     }
00297 }
00298 
00299 void
00300 ACE_Atomic_Op<ACE_Thread_Mutex, unsigned long>::dump (void) const
00301 {
00302 #if defined (ACE_HAS_DUMP)
00303   ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this));
00304   ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP));
00305 #endif /* ACE_HAS_DUMP */
00306 }
00307 
00308 ACE_END_VERSIONED_NAMESPACE_DECL
00309 
00310 #endif /* ACE_HAS_BUILTIN_ATOMIC_OP */

Generated on Tue Feb 2 17:18:38 2010 for ACE by  doxygen 1.4.7