You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@stdcxx.apache.org by fa...@apache.org on 2007/09/06 21:05:30 UTC
svn commit: r573337 - in /incubator/stdcxx/trunk:
etc/config/windows/projects.js include/rw/_mutex.h src/i86/atomic.asm
src/i86_64/atomic.asm src/once.cpp
Author: faridz
Date: Thu Sep 6 12:05:30 2007
New Revision: 573337
URL: http://svn.apache.org/viewvc?rev=573337&view=rev
Log:
2007-09-06 Farid Zaripov <Fa...@epam.com>
* projects.js: Added definitions of the platform dependent files.
* i86/atomic.asm: New file with definitions of the __rw_atomic_xxx()
functions for Win32 platform.
* i86_64/atomic.asm: New file with definitions of the
__rw_atomic_xxx() functions for Windows/x64 platform.
* _mutex.h: Use new __rw_atomic_xxx() functions if corresponding
InterlockedXXX() functions are not present.
[_MSC_VER >= 1400]: Use intrinsic InterlockedXXX() functions
on MSVC 8 and higher.
* once.cpp [_WIN32 && _DLL]: Tell linker to export __atomic_xxx()
functions, defined in .asm files.
Added:
incubator/stdcxx/trunk/src/i86/atomic.asm (with props)
incubator/stdcxx/trunk/src/i86_64/atomic.asm (with props)
Modified:
incubator/stdcxx/trunk/etc/config/windows/projects.js
incubator/stdcxx/trunk/include/rw/_mutex.h
incubator/stdcxx/trunk/src/once.cpp
Modified: incubator/stdcxx/trunk/etc/config/windows/projects.js
URL: http://svn.apache.org/viewvc/incubator/stdcxx/trunk/etc/config/windows/projects.js?rev=573337&r1=573336&r2=573337&view=diff
==============================================================================
--- incubator/stdcxx/trunk/etc/config/windows/projects.js (original)
+++ incubator/stdcxx/trunk/etc/config/windows/projects.js Thu Sep 6 12:05:30 2007
@@ -85,6 +85,10 @@
projectDefs.push(new Array(configureDef));
///////////////////////////////////////////////////////////////////////////////
+ // add platform dependent files
+ customFileDefs.push(new CustomFileDef("i86\\atomic.asm", "Win32", InitAsmTool));
+ customFileDefs.push(new CustomFileDef("i86_64\\atomic.asm", "x64", InitAsmTool));
+
var stdcxxDef = new ProjectDef(".stdcxx", typeLibrary);
stdcxxDef.VCProjDir = ProjectsDir;
stdcxxDef.FilterDefs.push(
Modified: incubator/stdcxx/trunk/include/rw/_mutex.h
URL: http://svn.apache.org/viewvc/incubator/stdcxx/trunk/include/rw/_mutex.h?rev=573337&r1=573336&r2=573337&view=diff
==============================================================================
--- incubator/stdcxx/trunk/include/rw/_mutex.h (original)
+++ incubator/stdcxx/trunk/include/rw/_mutex.h Thu Sep 6 12:05:30 2007
@@ -140,6 +140,9 @@
__declspec (dllimport) void __stdcall
DeleteCriticalSection (_RTL_CRITICAL_SECTION*);
+
+#if defined _RWSTD_INTERLOCKED_T && (!defined (_MSC_VER) || _MSC_VER < 1400)
+
__declspec (dllimport) long __stdcall
InterlockedIncrement (_RWSTD_INTERLOCKED_T*);
@@ -149,6 +152,12 @@
__declspec (dllimport) long __stdcall
InterlockedExchange (_RWSTD_INTERLOCKED_T*, long);
+# define _InterlockedIncrement InterlockedIncrement
+# define _InterlockedDecrement InterlockedDecrement
+# define _InterlockedExchange InterlockedExchange
+
+#endif // _RWSTD_INTERLOCKED_T && (!_MSC_VER || _MSC_VER < 1400)
+
} // extern "C"
_RWSTD_NAMESPACE (__rw) {
@@ -166,6 +175,21 @@
# endif // _RWSTD_NO_FWD_DECLARATIONS
+# if _MSC_VER >= 1400
+# include <intrin.h>
+
+# pragma intrinsic (_InterlockedIncrement)
+# pragma intrinsic (_InterlockedIncrement16)
+# pragma intrinsic (_InterlockedDecrement)
+# pragma intrinsic (_InterlockedDecrement16)
+# pragma intrinsic (_InterlockedExchange)
+
+# ifdef _M_X64
+# pragma intrinsic (_InterlockedIncrement64)
+# pragma intrinsic (_InterlockedDecrement64)
+# pragma intrinsic (_InterlockedExchange64)
+# endif
+# endif // _MSC_VER >= 1400
_RWSTD_NAMESPACE (__rw) {
@@ -480,9 +504,9 @@
// up with multiple copies)
static volatile long __cntr /* = 0 */; // initialization counter
-#if defined (_WIN32) || defined (_WIN64)
+#if defined (_WIN32)
// MT safe
- if (0 == __cntr && 1 == InterlockedIncrement ((long*)&__cntr))
+ if (0 == __cntr && 1 == _InterlockedIncrement ((long*)&__cntr))
#else
// not so safe (volatile should help)
if (0 == __cntr && 1 == ++__cntr)
@@ -1161,19 +1185,20 @@
false);
}
-/********************** i386/gcc **************************************/
+/********************** i386/gcc || _M_IX86 *********************************/
-#elif defined (__i386__) && (defined (__GNUG__) || defined (__INTEL_COMPILER))
+#elif defined (__i386__) && (defined (__GNUG__) \
+ || defined (__INTEL_COMPILER)) || defined (_M_IX86)
extern "C" {
-char __rw_atomic_add8 (char*, int);
-short __rw_atomic_add16 (short*, short);
-int __rw_atomic_add32 (int*, int);
-
-char __rw_atomic_xchg8 (char*, char);
-short __rw_atomic_xchg16 (short*, short);
-int __rw_atomic_xchg32 (int*, int);
+_RWSTD_EXPORT char __rw_atomic_add8 (char*, int);
+_RWSTD_EXPORT short __rw_atomic_add16 (short*, short);
+_RWSTD_EXPORT int __rw_atomic_add32 (int*, int);
+
+_RWSTD_EXPORT char __rw_atomic_xchg8 (char*, char);
+_RWSTD_EXPORT short __rw_atomic_xchg16 (short*, short);
+_RWSTD_EXPORT int __rw_atomic_xchg32 (int*, int);
} // extern "C"
@@ -1206,7 +1231,12 @@
__rw_atomic_preincrement (short &__x, bool)
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (short));
+
+#if defined (_MSC_VER) && _MSC_VER >= 1400
+ return _InterlockedIncrement16 (&__x);
+#else
return __rw_atomic_add16 (&__x, +1);
+#endif
}
@@ -1214,7 +1244,12 @@
__rw_atomic_preincrement (unsigned short &__x, bool)
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (unsigned short));
+
+#if defined (_MSC_VER) && _MSC_VER >= 1400
+ return _InterlockedIncrement16 (_RWSTD_REINTERPRET_CAST (short*, &__x));
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (short*, &__x), +1);
+#endif
}
@@ -1222,7 +1257,12 @@
__rw_atomic_preincrement (int &__x, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+
+#ifdef _MSC_VER
+ return _InterlockedIncrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (&__x, 1);
+#endif
}
@@ -1230,7 +1270,12 @@
__rw_atomic_preincrement (unsigned int &__x, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+
+#ifdef _MSC_VER
+ return _InterlockedIncrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (int*, &__x), 1);
+#endif
}
@@ -1262,7 +1307,12 @@
__rw_atomic_predecrement (short &__x, bool)
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (short));
+
+#if defined (_MSC_VER) && _MSC_VER >= 1400
+ return _InterlockedDecrement16 (&__x);
+#else
return __rw_atomic_add16 (&__x, -1);
+#endif
}
@@ -1270,7 +1320,12 @@
__rw_atomic_predecrement (unsigned short &__x, bool)
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (unsigned short));
+
+#if defined (_MSC_VER) && _MSC_VER >= 1400
+ return _InterlockedDecrement16 (_RWSTD_REINTERPRET_CAST (short*, &__x));
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (short*, &__x), -1);
+#endif
}
@@ -1278,7 +1333,12 @@
__rw_atomic_predecrement (int &__x, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+
+#ifdef _MSC_VER
+ return _InterlockedDecrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (&__x, -1);
+#endif
}
@@ -1286,7 +1346,12 @@
__rw_atomic_predecrement (unsigned int &__x, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+
+#ifdef _MSC_VER
+ return _InterlockedDecrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (int*, &__x), -1);
+#endif
}
@@ -1337,7 +1402,13 @@
__rw_atomic_exchange (int &__x, int __y, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+
+#ifdef _MSC_VER
+ return _InterlockedExchange (_RWSTD_REINTERPRET_CAST (long*, &__x),
+ _RWSTD_STATIC_CAST (long, __y));
+#else
return __rw_atomic_xchg32 (&__x, __y);
+#endif
}
@@ -1345,88 +1416,48 @@
__rw_atomic_exchange (unsigned int &__x, unsigned int __y, bool)
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+
+#ifdef _MSC_VER
+ return _InterlockedExchange (_RWSTD_REINTERPRET_CAST (long*, &__x),
+ _RWSTD_STATIC_CAST (long, __y));
+#else
return __rw_atomic_xchg32 (_RWSTD_REINTERPRET_CAST (int*, &__x),
_RWSTD_STATIC_CAST (int, __y));
+#endif
}
+/********************** IA64/x86_64/_M_X64 *****************************/
-/********************** WIN 32/64 ************************************/
-
-#elif defined (_WIN32)
-
-// Interlocked[In|De]crement functions atomically modify their argument
-// and return the new value
-
-// InterlockedExchange atomically sets the value pointed to by the first
-// argument to that of the second argument and returns the original value
-
-inline int
-__rw_atomic_preincrement (int &__x, bool)
-{
- _RWSTD_COMPILE_ASSERT (sizeof __x == sizeof (long));
- return InterlockedIncrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
-}
-
-
-inline unsigned int
-__rw_atomic_preincrement (unsigned int &__x, bool)
-{
- return __rw_atomic_preincrement (_RWSTD_REINTERPRET_CAST (int&, __x),
- false);
-}
-
-
-inline int
-__rw_atomic_predecrement (int &__x, bool)
-{
- _RWSTD_COMPILE_ASSERT (sizeof __x == sizeof (long));
- return InterlockedDecrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
-}
-
-
-inline unsigned int
-__rw_atomic_predecrement (unsigned int &__x, bool)
-{
- return __rw_atomic_predecrement (_RWSTD_REINTERPRET_CAST (int&, __x),
- false);
-}
-
-
-inline int
-__rw_atomic_exchange (int &__x, int __y, bool)
-{
- _RWSTD_COMPILE_ASSERT (sizeof __x == sizeof (long));
- return InterlockedExchange (_RWSTD_REINTERPRET_CAST (long*, &__x),
- _RWSTD_STATIC_CAST (long, __y));
-}
+#elif defined (__ia64) || defined (__x86_64) || defined (_M_X64)
+extern "C" {
-inline unsigned int
-__rw_atomic_exchange (unsigned int &__x, unsigned int __y, bool)
-{
- return __rw_atomic_exchange (_RWSTD_REINTERPRET_CAST (int&, __x),
- _RWSTD_STATIC_CAST (int, __y), false);
-}
+_RWSTD_EXPORT _RWSTD_INT8_T
+__rw_atomic_xchg8 (_RWSTD_INT8_T*, _RWSTD_INT8_T);
-/********************** IA64/x86_64 ***********************************/
+_RWSTD_EXPORT _RWSTD_INT16_T
+__rw_atomic_xchg16 (_RWSTD_INT16_T*, _RWSTD_INT16_T);
-#elif defined (__ia64) || defined (__x86_64)
+_RWSTD_EXPORT _RWSTD_INT32_T
+__rw_atomic_xchg32 (_RWSTD_INT32_T*, _RWSTD_INT32_T);
-extern "C" {
-_RWSTD_INT8_T __rw_atomic_xchg8 (_RWSTD_INT8_T*, _RWSTD_INT8_T);
-_RWSTD_INT16_T __rw_atomic_xchg16 (_RWSTD_INT16_T*, _RWSTD_INT16_T);
-_RWSTD_INT32_T __rw_atomic_xchg32 (_RWSTD_INT32_T*, _RWSTD_INT32_T);
+_RWSTD_EXPORT _RWSTD_INT8_T
+__rw_atomic_add8 (_RWSTD_INT8_T*, _RWSTD_INT8_T);
+_RWSTD_EXPORT _RWSTD_INT16_T
+__rw_atomic_add16 (_RWSTD_INT16_T*, _RWSTD_INT16_T);
-_RWSTD_INT8_T __rw_atomic_add8 (_RWSTD_INT8_T*, _RWSTD_INT8_T);
-_RWSTD_INT16_T __rw_atomic_add16 (_RWSTD_INT16_T*, _RWSTD_INT16_T);
-_RWSTD_INT32_T __rw_atomic_add32 (_RWSTD_INT32_T*, _RWSTD_INT32_T);
+_RWSTD_EXPORT _RWSTD_INT32_T
+__rw_atomic_add32 (_RWSTD_INT32_T*, _RWSTD_INT32_T);
#ifdef _RWSTD_INT64_T
-_RWSTD_INT64_T __rw_atomic_xchg64 (_RWSTD_INT64_T*, _RWSTD_INT64_T);
-_RWSTD_INT64_T __rw_atomic_add64 (_RWSTD_INT64_T*, _RWSTD_INT64_T);
+_RWSTD_EXPORT _RWSTD_INT64_T
+__rw_atomic_xchg64 (_RWSTD_INT64_T*, _RWSTD_INT64_T);
+
+_RWSTD_EXPORT _RWSTD_INT64_T
+__rw_atomic_add64 (_RWSTD_INT64_T*, _RWSTD_INT64_T);
#endif // _RWSTD_INT64_T
@@ -1468,8 +1499,12 @@
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (short));
+#ifdef _MSC_VER
+ return _InterlockedIncrement16 (&__x);
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT16_T*, &__x),
+1);
+#endif
}
@@ -1478,8 +1513,12 @@
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (unsigned short));
+#ifdef _MSC_VER
+ return _InterlockedIncrement16 (_RWSTD_REINTERPRET_CAST (short*, &__x));
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT16_T*, &__x),
+1);
+#endif
}
@@ -1488,8 +1527,12 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+#ifdef _MSC_VER
+ return _InterlockedIncrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
+1);
+#endif
}
@@ -1498,8 +1541,12 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+#ifdef _MSC_VER
+ return _InterlockedIncrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
+1);
+#endif
}
@@ -1535,8 +1582,12 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (_RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedIncrement64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x));
+#else
return __rw_atomic_add64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
+1);
+#endif
}
@@ -1545,8 +1596,12 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (unsigned _RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedIncrement64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x));
+#else
return __rw_atomic_add64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
+1);
+#endif
}
# endif // _RWSTD_LLONG_SIZE > _RWSTD_LONG_SIZE
@@ -1588,8 +1643,12 @@
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (short));
+#ifdef _MSC_VER
+ return _InterlockedDecrement16 (&__x);
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT16_T*, &__x),
-1);
+#endif
}
@@ -1598,8 +1657,12 @@
{
_RWSTD_COMPILE_ASSERT (2 == sizeof (unsigned short));
+#ifdef _MSC_VER
+ return _InterlockedDecrement16 (_RWSTD_REINTERPRET_CAST (short*, &__x));
+#else
return __rw_atomic_add16 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT16_T*, &__x),
-1);
+#endif
}
@@ -1608,8 +1671,12 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+#ifdef _MSC_VER
+ return _InterlockedDecrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
-1);
+#endif
}
@@ -1618,8 +1685,12 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+#ifdef _MSC_VER
+ return _InterlockedDecrement (_RWSTD_REINTERPRET_CAST (long*, &__x));
+#else
return __rw_atomic_add32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
-1);
+#endif
}
@@ -1655,8 +1726,12 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (_RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedDecrement64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x));
+#else
return __rw_atomic_add64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
-1);
+#endif
}
@@ -1665,8 +1740,12 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (unsigned _RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedDecrement64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x));
+#else
return __rw_atomic_add64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
-1);
+#endif
}
# endif // _RWSTD_LLONG_SIZE > _RWSTD_LONG_SIZE
@@ -1728,8 +1807,13 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (int));
+#ifdef _MSC_VER
+ return _InterlockedExchange (_RWSTD_REINTERPRET_CAST (long*, &__x),
+ _RWSTD_STATIC_CAST (long, __y));
+#else
return __rw_atomic_xchg32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
_RWSTD_STATIC_CAST (_RWSTD_INT32_T, __y));
+#endif
}
@@ -1738,8 +1822,13 @@
{
_RWSTD_COMPILE_ASSERT (4 == sizeof (unsigned int));
+#ifdef _MSC_VER
+ return _InterlockedExchange (_RWSTD_REINTERPRET_CAST (long*, &__x),
+ _RWSTD_STATIC_CAST (long, __y));
+#else
return __rw_atomic_xchg32 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT32_T*, &__x),
_RWSTD_STATIC_CAST (_RWSTD_INT32_T, __y));
+#endif
}
@@ -1775,8 +1864,13 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (_RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedExchange64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x),
+ _RWSTD_STATIC_CAST (__int64, __y));
+#else
return __rw_atomic_xchg64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
_RWSTD_STATIC_CAST (_RWSTD_INT64_T, __y));
+#endif
}
@@ -1786,8 +1880,13 @@
{
_RWSTD_COMPILE_ASSERT (8 == sizeof (unsigned _RWSTD_LONG_LONG));
+#ifdef _MSC_VER
+ return _InterlockedExchange64 (_RWSTD_REINTERPRET_CAST (__int64*, &__x),
+ _RWSTD_STATIC_CAST (__int64, __y));
+#else
return __rw_atomic_xchg64 (_RWSTD_REINTERPRET_CAST (_RWSTD_INT64_T*, &__x),
_RWSTD_STATIC_CAST (_RWSTD_INT64_T, __y));
+#endif
}
# endif // _RWSTD_LLONG_SIZE > _RWSTD_LONG_SIZE
Added: incubator/stdcxx/trunk/src/i86/atomic.asm
URL: http://svn.apache.org/viewvc/incubator/stdcxx/trunk/src/i86/atomic.asm?rev=573337&view=auto
==============================================================================
--- incubator/stdcxx/trunk/src/i86/atomic.asm (added)
+++ incubator/stdcxx/trunk/src/i86/atomic.asm Thu Sep 6 12:05:30 2007
@@ -0,0 +1,178 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; i86/atomic.asm
+;
+; $Id$
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Licensed to the Apache Software Foundation (ASF) under one or more
+; contributor license agreements. See the NOTICE file distributed
+; with this work for additional information regarding copyright
+; ownership. The ASF licenses this file to you under the Apache
+; License, Version 2.0 (the "License"); you may not use this file
+; except in compliance with the License. You may obtain a copy of
+; the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+; implied. See the License for the specific language governing
+; permissions and limitations under the License.
+;
+; Copyright 2003-2006 Rogue Wave Software.
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ .486
+ .model flat
+ .code
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" char __rw_atomic_xchg8 (char *x, char y);
+;
+; Atomically assigns the 8-bit value y to *x and returns
+; the original (before assignment) 8-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_xchg8
+___rw_atomic_xchg8 proc ; char (char *x, char y)
+
+arg_x = dword ptr 4
+arg_y = byte ptr 8
+
+ mov ecx, [esp+arg_x] ; %ecx = x
+ mov al, [esp+arg_y] ; %al = y
+ xchg al, [ecx] ; %al <-> (%ecx)
+ ret
+___rw_atomic_xchg8 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" short __rw_atomic_xchg16 (short *x, short y);
+;
+; Atomically assigns the 16-bit value y to *x and returns
+; the original (before assignment) 16-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_xchg16
+___rw_atomic_xchg16 proc ; short (short *x, short y)
+
+arg_x = dword ptr 4
+arg_y = word ptr 8
+
+ mov ecx, [esp+arg_x] ; %ecx = x
+ mov ax, [esp+arg_y] ; %eax = y
+ xchg ax, [ecx] ; %ax <-> (%ecx)
+ ret
+___rw_atomic_xchg16 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int __rw_atomic_xchg32 (int *x, int y);
+;
+; Atomically assigns the 32-bit value y to *x and returns
+; the original (before assignment) 32-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_xchg32
+___rw_atomic_xchg32 proc ; int (int *x, int y)
+
+arg_x = dword ptr 4
+arg_y = dword ptr 8
+
+ mov ecx, [esp+arg_x] ; %ecx = x
+ mov eax, [esp+arg_y] ; %eax = y
+ xchg eax, [ecx] ; %eax <-> (%ecx)
+ ret
+___rw_atomic_xchg32 endp
+
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" char __rw_atomic_add8 (char *x, int y);
+;
+; Atomically increments the 8-bit value *x by y and returns
+; the new (after increment) 8-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_add8
+___rw_atomic_add8 proc ; char (char *dst, int inc)
+
+arg_dst = dword ptr 4
+arg_inc = dword ptr 8
+
+ mov ecx, [esp+arg_dst] ; %ecx = dst
+ mov eax, [esp+arg_inc] ; %eax = inc
+ mov edx, eax
+
+ lock xadd [ecx], al ; tmp = *dst;
+ ; dst += inc;
+ ; %al = tmp
+
+ add eax, edx ; return %eax + inc
+ ret
+___rw_atomic_add8 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" short __rw_atomic_add16 (short *x, short y);
+;
+; Atomically increments the 16-bit value *x by y and returns
+; the new (after increment) 16-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_add16
+___rw_atomic_add16 proc ; short (short *dst, short inc)
+
+arg_dst = dword ptr 4
+arg_inc = dword ptr 8
+
+ mov ecx, [esp+arg_dst] ; %ecx = dst
+ mov eax, [esp+arg_inc] ; %eax = inc
+ mov edx, eax
+
+ lock xadd [ecx], ax ; tmp = *dst;
+ ; dst += inc;
+ ; %ax = tmp
+
+ add eax, edx ; return %eax + inc
+ ret
+___rw_atomic_add16 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int __rw_atomic_add32 (int *x, int y);
+;
+; Atomically increments the 32-bit value *x by y and returns
+; the new (after increment) 32-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 4
+ public ___rw_atomic_add32
+___rw_atomic_add32 proc ; int (int *dst, int inc)
+
+arg_dst = dword ptr 4
+arg_inc = dword ptr 8
+
+ mov ecx, [esp+arg_dst] ; %ecx = dst
+ mov edx, [esp+arg_inc] ; %edx = inc
+ mov eax, edx
+
+ lock xadd [ecx], eax ; tmp = *dst;
+ ; dst += inc;
+ ; %eax = tmp
+
+ add eax, edx ; return %eax + inc
+ ret
+___rw_atomic_add32 endp
+
+ end
Propchange: incubator/stdcxx/trunk/src/i86/atomic.asm
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: incubator/stdcxx/trunk/src/i86/atomic.asm
------------------------------------------------------------------------------
svn:keywords = Id
Added: incubator/stdcxx/trunk/src/i86_64/atomic.asm
URL: http://svn.apache.org/viewvc/incubator/stdcxx/trunk/src/i86_64/atomic.asm?rev=573337&view=auto
==============================================================================
--- incubator/stdcxx/trunk/src/i86_64/atomic.asm (added)
+++ incubator/stdcxx/trunk/src/i86_64/atomic.asm Thu Sep 6 12:05:30 2007
@@ -0,0 +1,186 @@
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; i86_64/atomic.asm
+;
+; $Id$
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;
+; Licensed to the Apache Software Foundation (ASF) under one or more
+; contributor license agreements. See the NOTICE file distributed
+; with this work for additional information regarding copyright
+; ownership. The ASF licenses this file to you under the Apache
+; License, Version 2.0 (the "License"); you may not use this file
+; except in compliance with the License. You may obtain a copy of
+; the License at
+;
+; http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+; implied. See the License for the specific language governing
+; permissions and limitations under the License.
+;
+; Copyright 2003-2006 Rogue Wave Software.
+;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+
+ .code
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int8_t __rw_atomic_xchg8 (int8_t *x, int8_t y);
+;
+; Atomically assigns the 8-bit value y to *x and returns
+; the original (before assignment) 8-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_xchg8
+__rw_atomic_xchg8 proc ; int8_t (int8_t *x, int8_t y)
+ ; %rcx = x
+ mov al, dl ; %al = y
+ xchg al, [rcx] ; %al <-> (%rcx)
+ ret
+__rw_atomic_xchg8 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int16_t __rw_atomic_xchg16 (int16_t *x, int16_t y);
+;
+; Atomically assigns the 16-bit value y to *x and returns
+; the original (before assignment) 16-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_xchg16
+__rw_atomic_xchg16 proc ; int16_t (int16_t *x, int16_t y)
+ ; %rcx = x
+ mov ax, dx ; %ax = y
+ xchg ax, [rcx] ; %ax <-> (%rcx)
+ ret
+__rw_atomic_xchg16 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int32_t __rw_atomic_xchg32 (int32_t *x, int32_t y);
+;
+; Atomically assigns the 32-bit value y to *x and returns
+; the original (before assignment) 32-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_xchg32
+__rw_atomic_xchg32 proc ; int32_t (int32_t *x, int32_t y)
+ ; %rcx = x
+ mov eax, edx ; %eax = y
+ xchg eax, [rcx] ; %eax <-> (%rcx)
+ ret
+__rw_atomic_xchg32 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int32_t __rw_atomic_xchg64 (int64_t *x, int64_t y);
+;
+; Atomically assigns the 64-bit value y to *x and returns
+; the original (before assignment) 64-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_xchg64
+__rw_atomic_xchg64 proc ; int64_t (int64_t *x, int64_t y)
+ ; %rcx = x
+ mov rax, rdx ; %rax = y
+ xchg rax, [rcx] ; %rax <-> (%rcx)
+ ret
+__rw_atomic_xchg64 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int8_t __rw_atomic_add8 (int8_t *x, int8_t y);
+;
+; Atomically increments the 8-bit value *x by y and returns
+; the new (after increment) 8-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_add8
+__rw_atomic_add8 proc ; int8_t (int8_t *dst, int8_t inc)
+ ; %rcx = dst
+ mov eax, edx ; %eax = inc
+
+ lock xadd [rcx], al ; tmp = *dst
+ ; dst += inc
+ ; %al = tmp
+ add eax, edx ; return %al + inc
+ ret
+__rw_atomic_add8 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int16_t __rw_atomic_add16 (int16_t *x, int16_t y);
+;
+; Atomically increments the 16-bit value *x by y and returns
+; the new (after increment) 16-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_add16
+__rw_atomic_add16 proc ; int16_t (int16_t *dst, int16_t inc)
+ ; %rcx = dst
+ mov ax, dx ; %ax = inc
+
+ lock xadd [rcx], ax ; tmp = *dst
+ ; dst += inc
+ ; eax = tmp
+
+ add ax, dx ; return %ax + inc
+ ret
+__rw_atomic_add16 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int32_t __rw_atomic_add32 (int32_t *x, int32_t y);
+;
+; Atomically increments the 32-bit value *x by y and returns
+; the new (after increment) 32-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_add32
+__rw_atomic_add32 proc ; int32_t (int32_t *dst, int32_t inc)
+ ; %rcx = dst
+ mov eax, edx ; %eax = inc
+
+ lock xadd [rcx], eax ; tmp = *dst
+ ; dst += inc
+ ; %eax = tmp
+
+ add eax, edx ; return %eax + inc
+ ret
+__rw_atomic_add32 endp
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; extern "C" int64_t __rw_atomic_add64 (int64_t *x, int64_t y);
+;
+; Atomically increments the 32-bit value *x by y and returns
+; the new (after increment) 32-bit value of *x.
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+ align 16
+ public __rw_atomic_add64
+__rw_atomic_add64 proc ; int64_t (int64_t *dst, int64_t inc)
+ ; %rcx = dst
+ mov rax, rdx ; %eax = inc
+
+ lock xadd [rcx], rax ; tmp = *dst
+ ; dst += inc
+ ; %eax = tmp
+
+ add rax, rdx ; return %eax + inc
+ ret
+__rw_atomic_add64 endp
+
+ end
Propchange: incubator/stdcxx/trunk/src/i86_64/atomic.asm
------------------------------------------------------------------------------
svn:eol-style = native
Propchange: incubator/stdcxx/trunk/src/i86_64/atomic.asm
------------------------------------------------------------------------------
svn:keywords = Id
Modified: incubator/stdcxx/trunk/src/once.cpp
URL: http://svn.apache.org/viewvc/incubator/stdcxx/trunk/src/once.cpp?rev=573337&r1=573336&r2=573337&view=diff
==============================================================================
--- incubator/stdcxx/trunk/src/once.cpp (original)
+++ incubator/stdcxx/trunk/src/once.cpp Thu Sep 6 12:05:30 2007
@@ -188,3 +188,32 @@
} // extern "C"
} // namespace __rw
+
+// export __rw_atomic_xxx() functions, defined in atomic.asm
+#if defined (_WIN32) && defined (_DLL)
+
+# if defined (_M_IX86)
+
+# pragma comment(linker, "/EXPORT:___rw_atomic_add8")
+# pragma comment(linker, "/EXPORT:___rw_atomic_add16")
+# pragma comment(linker, "/EXPORT:___rw_atomic_add32")
+# pragma comment(linker, "/EXPORT:___rw_atomic_xchg8")
+# pragma comment(linker, "/EXPORT:___rw_atomic_xchg16")
+# pragma comment(linker, "/EXPORT:___rw_atomic_xchg32")
+
+# elif defined (_M_X64)
+
+# pragma comment(linker, "/EXPORT:__rw_atomic_add8")
+# pragma comment(linker, "/EXPORT:__rw_atomic_add16")
+# pragma comment(linker, "/EXPORT:__rw_atomic_add32")
+# pragma comment(linker, "/EXPORT:__rw_atomic_xchg8")
+# pragma comment(linker, "/EXPORT:__rw_atomic_xchg16")
+# pragma comment(linker, "/EXPORT:__rw_atomic_xchg32")
+
+# ifdef _RWSTD_INT64_T
+# pragma comment(linker, "/EXPORT:__rw_atomic_add64")
+# pragma comment(linker, "/EXPORT:__rw_atomic_xchg64")
+# endif // _RWSTD_INT64_T
+# endif // _M_IX86
+
+#endif // _WIN32 && _DLL
RE: svn commit: r573337 - in /incubator/stdcxx/trunk: etc/config/windows/projects.js include/rw/_mutex.h src/i86/atomic.asm src/i86_64/atomic.asm src/once.cpp
Posted by Farid Zaripov <Fa...@epam.com>.
> -----Original Message-----
> From: Martin Sebor [mailto:sebor@roguewave.com]
> Sent: Friday, September 07, 2007 1:11 AM
> To: stdcxx-dev@incubator.apache.org
> Subject: Re: svn commit: r573337 - in
> /incubator/stdcxx/trunk: etc/config/windows/projects.js
> include/rw/_mutex.h src/i86/atomic.asm src/i86_64/atomic.asm
> src/once.cpp
>
> Is there any particular reason why the export directives
> should be in once.cpp?
No reason. The export directives can be in any .cpp file.
> We have an export.cpp (currently used
> only by the EDG eccp C++ export feature) which seems like it
> might be a better choice. Alternatively, we could add a new
> file, say atomic.cpp.
> I don't suppose we could put the pragmas in the .asm files themselves?
No, we can't.
> Is there a way to get the Windows infrastructure to pick up
> these files automatically just like it does .cpp files so
> that we avoid this kind of coupling?
Done: http://svn.apache.org/viewvc?rev=573571&view=rev
Farid.
Re: svn commit: r573337 - in /incubator/stdcxx/trunk: etc/config/windows/projects.js
include/rw/_mutex.h src/i86/atomic.asm src/i86_64/atomic.asm src/once.cpp
Posted by Martin Sebor <se...@roguewave.com>.
faridz@apache.org wrote:
> Author: faridz
> Date: Thu Sep 6 12:05:30 2007
> New Revision: 573337
>
> URL: http://svn.apache.org/viewvc?rev=573337&view=rev
> Log:
> 2007-09-06 Farid Zaripov <Fa...@epam.com>
>
> * projects.js: Added definitions of the platform dependent files.
> * i86/atomic.asm: New file with definitions of the __rw_atomic_xxx()
> functions for Win32 platform.
> * i86_64/atomic.asm: New file with definitions of the
> __rw_atomic_xxx() functions for Windows/x64 platform.
> * _mutex.h: Use new __rw_atomic_xxx() functions if corresponding
> InterlockedXXX() functions are not present.
> [_MSC_VER >= 1400]: Use intrinsic InterlockedXXX() functions
> on MSVC 8 and higher.
> * once.cpp [_WIN32 && _DLL]: Tell linker to export __atomic_xxx()
> functions, defined in .asm files.
Is there any particular reason why the export directives should
be in once.cpp? We have an export.cpp (currently used only by
the EDG eccp C++ export feature) which seems like it might be
a better choice. Alternatively, we could add a new file, say
atomic.cpp. I don't suppose we could put the pragmas in the
.asm files themselves?
>
[...]
> --- incubator/stdcxx/trunk/etc/config/windows/projects.js (original)
> +++ incubator/stdcxx/trunk/etc/config/windows/projects.js Thu Sep 6 12:05:30 2007
> @@ -85,6 +85,10 @@
> projectDefs.push(new Array(configureDef));
>
> ///////////////////////////////////////////////////////////////////////////////
> + // add platform dependent files
> + customFileDefs.push(new CustomFileDef("i86\\atomic.asm", "Win32", InitAsmTool));
> + customFileDefs.push(new CustomFileDef("i86_64\\atomic.asm", "x64", InitAsmTool));
Is there a way to get the Windows infrastructure to pick up these
files automatically just like it does .cpp files so that we avoid
this kind of coupling?
Martin