You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1906 lines
54 KiB

  1. /* /////////////////////////////////////////////////////////////////////////
  2. * File: winstl/synch/atomic_functions.h (originally MLAtomic.cpp, ::SynesisStd)
  3. *
  4. * Purpose: WinSTL atomic functions.
  5. *
  6. * Created: 23rd October 1997
  7. * Updated: 29th April 2010
  8. *
  9. * Home: http://stlsoft.org/
  10. *
  11. * Copyright (c) 1997-2010, Matthew Wilson and Synesis Software
  12. * All rights reserved.
  13. *
  14. * Redistribution and use in source and binary forms, with or without
  15. * modification, are permitted provided that the following conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above copyright notice, this
  18. * list of conditions and the following disclaimer.
  19. * - Redistributions in binary form must reproduce the above copyright notice,
  20. * this list of conditions and the following disclaimer in the documentation
  21. * and/or other materials provided with the distribution.
  22. * - Neither the name(s) of Matthew Wilson and Synesis Software nor the names of
  23. * any contributors may be used to endorse or promote products derived from
  24. * this software without specific prior written permission.
  25. *
  26. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  27. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  28. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  29. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  30. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  31. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  32. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  33. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  34. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  35. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  36. * POSSIBILITY OF SUCH DAMAGE.
  37. *
  38. * ////////////////////////////////////////////////////////////////////// */
  39. /** \file winstl/synch/atomic_functions.h
  40. *
  41. * \brief [C++ only] Definition of the atomic functions
  42. * (\ref group__library__synch "Synchronisation" Library).
  43. */
  44. #ifndef WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS
  45. #define WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS
  46. #ifndef STLSOFT_DOCUMENTATION_SKIP_SECTION
  47. # define WINSTL_VER_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS_MAJOR 4
  48. # define WINSTL_VER_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS_MINOR 4
  49. # define WINSTL_VER_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS_REVISION 1
  50. # define WINSTL_VER_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS_EDIT 203
  51. #endif /* !STLSOFT_DOCUMENTATION_SKIP_SECTION */
  52. /* /////////////////////////////////////////////////////////////////////////
  53. * Compatibility
  54. */
  55. /*
  56. [Incompatibilies-start]
  57. STLSOFT_COMPILER_IS_MWERKS: __MWERKS__<0x3000
  58. [Incompatibilies-end]
  59. */
  60. /* /////////////////////////////////////////////////////////////////////////
  61. * Includes
  62. */
  63. #ifndef WINSTL_INCL_WINSTL_H_WINSTL
  64. # include <winstl/winstl.h>
  65. #endif /* !WINSTL_INCL_WINSTL_H_WINSTL */
  66. #ifndef WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_TYPES
  67. # include <winstl/synch/atomic_types.h>
  68. #endif /* !WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_TYPES */
  69. #ifdef __cplusplus
  70. # ifndef WINSTL_INCL_WINSTL_SYNCH_HPP_SPIN_MUTEX
  71. # include <winstl/synch/spin_mutex.hpp>
  72. # endif /* !WINSTL_INCL_WINSTL_SYNCH_HPP_SPIN_MUTEX */
  73. #endif /* __cplusplus */
  74. /* /////////////////////////////////////////////////////////////////////////
  75. * Compatibility
  76. */
  77. #if !defined(WINSTL_ARCH_IS_X86) && \
  78. !defined(WINSTL_ARCH_IS_IA64) && \
  79. !defined(WINSTL_ARCH_IS_X64)
  80. # error Not valid for processors other than Intel
  81. #endif /* Win32 || Win64 */
  82. #ifdef STLSOFT_ATOMIC_CALLCONV
  83. # undef STLSOFT_ATOMIC_CALLCONV
  84. #endif /* STLSOFT_ATOMIC_CALLCONV */
  85. #ifdef WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL
  86. # undef WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL
  87. #endif /* WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL */
  88. #ifdef WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL
  89. # undef WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL
  90. #endif /* WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL */
  91. #ifndef STLSOFT_NO_FASTCALL
  92. # if defined(STLSOFT_COMPILER_IS_BORLAND) || \
  93. defined(STLSOFT_COMPILER_IS_DMC) || \
  94. defined(STLSOFT_COMPILER_IS_WATCOM)
  95. # define STLSOFT_NO_FASTCALL
  96. # endif /* compiler */
  97. #endif /* STLSOFT_NO_FASTCALL */
  98. #if defined(WINSTL_ARCH_IS_X86)
  99. # if defined(STLSOFT_CF_FASTCALL_SUPPORTED) && \
  100. !defined(STLSOFT_NO_FASTCALL)
  101. # define WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL
  102. # define WINSTL_ATOMIC_FNS_CALLCONV __fastcall
  103. # elif defined(STLSOFT_CF_STDCALL_SUPPORTED)
  104. # define WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL
  105. # define WINSTL_ATOMIC_FNS_CALLCONV __stdcall
  106. # else
  107. # error Need to define calling convention
  108. # endif /* call-conv */
  109. #elif defined(WINSTL_ARCH_IS_IA64) || \
  110. defined(WINSTL_ARCH_IS_X64)
  111. # define WINSTL_ATOMIC_FNS_CALLCONV_IS_CDECL
  112. # define WINSTL_ATOMIC_FNS_CALLCONV __cdecl
  113. #else /* ? arch */
  114. # error Only defined for the Intel x86 and IA64 architectures
  115. #endif /* arch */
  116. /* /////////////////////////////////////////////////////////////////////////
  117. * Namespace
  118. */
  119. #ifndef _WINSTL_NO_NAMESPACE
  120. # if defined(_STLSOFT_NO_NAMESPACE) || \
  121. defined(STLSOFT_DOCUMENTATION_SKIP_SECTION)
  122. /* There is no stlsoft namespace, so must define ::winstl */
  123. namespace winstl
  124. {
  125. # else
  126. /* Define stlsoft::winstl_project */
  127. namespace stlsoft
  128. {
  129. namespace winstl_project
  130. {
  131. # endif /* _STLSOFT_NO_NAMESPACE */
  132. #endif /* !_WINSTL_NO_NAMESPACE */
  133. /* /////////////////////////////////////////////////////////////////////////
  134. * Implementation options
  135. *
  136. * Because some compilers can make the code actually faster when it the naked
  137. * functions are not inline, we provide for that here. If you want to out-of-line
  138. * the functions, then you just need to define WINSTL_ATOMIC_FNS_DECLARATION_ONLY
  139. * in the code that uses it, and define WINSTL_ATOMIC_FNS_DEFINITION in one
  140. * implementation file.
  141. */
  142. #ifdef WINSTL_ATOMIC_FNS_DECL_
  143. # undef WINSTL_ATOMIC_FNS_DECL_
  144. #endif /* WINSTL_ATOMIC_FNS_DECL_ */
  145. #ifdef WINSTL_ATOMIC_FNS_IMPL_
  146. # undef WINSTL_ATOMIC_FNS_IMPL_
  147. #endif /* WINSTL_ATOMIC_FNS_IMPL_ */
  148. #if defined(WINSTL_ATOMIC_FNS_DECLARATION_ONLY)
  149. /* Only the function declarations are included */
  150. # define WINSTL_ATOMIC_FNS_DECL_(type) type WINSTL_ATOMIC_FNS_CALLCONV
  151. #elif defined(WINSTL_ATOMIC_FNS_DEFINITION)
  152. /* Only the function definitions are included */
  153. # ifdef STSLSOFT_INLINE_ASM_SUPPORTED
  154. # define WINSTL_ATOMIC_FNS_IMPL_(type) __declspec(naked) type WINSTL_ATOMIC_FNS_CALLCONV
  155. # else /* ? STSLSOFT_INLINE_ASM_SUPPORTED */
  156. # define WINSTL_ATOMIC_FNS_IMPL_(type) type WINSTL_ATOMIC_FNS_CALLCONV
  157. # endif /* STSLSOFT_INLINE_ASM_SUPPORTED */
  158. #else /* ? declaration / definition */
  159. # if defined(STLSOFT_COMPILER_IS_MWERKS) && \
  160. (__MWERKS__ & 0xFF00) < 0x3000
  161. # error CodeWarrior 7 and earlier does not generate correct code when inline naked functions are used
  162. # endif /* compiler */
  163. #if !defined(__cplusplus) && \
  164. defined(STSLSOFT_INLINE_ASM_SUPPORTED)
  165. /* Not currently supporting inline assembler for C compilation. It's perfectly possible, but need more work to sort out. */
  166. # undef STSLSOFT_INLINE_ASM_SUPPORTED
  167. #endif /* !__cplusplus && STSLSOFT_INLINE_ASM_SUPPORTED */
  168. # ifdef STSLSOFT_INLINE_ASM_SUPPORTED
  169. /* The default is to define them inline */
  170. # ifdef STSLSOFT_ASM_IN_INLINE_SUPPORTED
  171. # define WINSTL_ATOMIC_FNS_DECL_(type) inline type WINSTL_ATOMIC_FNS_CALLCONV
  172. # define WINSTL_ATOMIC_FNS_IMPL_(type) inline __declspec(naked) type WINSTL_ATOMIC_FNS_CALLCONV
  173. # else /* ? STSLSOFT_ASM_IN_INLINE_SUPPORTED */
  174. # define WINSTL_ATOMIC_FNS_DECL_(type) type WINSTL_ATOMIC_FNS_CALLCONV
  175. # define WINSTL_ATOMIC_FNS_IMPL_(type) static __declspec(naked) type WINSTL_ATOMIC_FNS_CALLCONV
  176. # endif /* STSLSOFT_ASM_IN_INLINE_SUPPORTED */
  177. # else /* ? STSLSOFT_INLINE_ASM_SUPPORTED */
  178. /* ASM not supported, so we're using the Win32 functions */
  179. # if defined(__cplusplus)
  180. # define WINSTL_ATOMIC_FNS_DECL_(type) inline type WINSTL_ATOMIC_FNS_CALLCONV
  181. # define WINSTL_ATOMIC_FNS_IMPL_(type) inline type WINSTL_ATOMIC_FNS_CALLCONV
  182. # else /* ? __cplusplus */
  183. # define WINSTL_ATOMIC_FNS_DECL_(type) STLSOFT_INLINE type WINSTL_ATOMIC_FNS_CALLCONV
  184. # define WINSTL_ATOMIC_FNS_IMPL_(type) STLSOFT_INLINE type WINSTL_ATOMIC_FNS_CALLCONV
  185. # endif /* __cplusplus */
  186. # endif /* STSLSOFT_INLINE_ASM_SUPPORTED */
  187. #endif /* declaration / definition */
  188. /* /////////////////////////////////////////////////////////////////////////
  189. * Atomic function declarations
  190. */
  191. #ifndef WINSTL_ATOMIC_FNS_DEFINITION
  192. /* Uni-processor variants */
  193. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_preincrement_up(atomic_int_t volatile* pl);
  194. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_predecrement_up(atomic_int_t volatile* pl);
  195. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postincrement_up(atomic_int_t volatile* pl);
  196. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postdecrement_up(atomic_int_t volatile* pl);
  197. WINSTL_ATOMIC_FNS_DECL_(void) atomic_increment_up(atomic_int_t volatile* pl);
  198. WINSTL_ATOMIC_FNS_DECL_(void) atomic_decrement_up(atomic_int_t volatile* pl);
  199. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_write_up(atomic_int_t volatile* pl, atomic_int_t n);
  200. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_read_up(atomic_int_t volatile const* pl);
  201. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postadd_up(atomic_int_t volatile* pl, atomic_int_t n);
  202. STLSOFT_INLINE atomic_int_t atomic_preadd_up(atomic_int_t volatile* pl, atomic_int_t n);
  203. /* SMP variants */
  204. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_preincrement_smp(atomic_int_t volatile* pl);
  205. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_predecrement_smp(atomic_int_t volatile* pl);
  206. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postincrement_smp(atomic_int_t volatile* pl);
  207. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postdecrement_smp(atomic_int_t volatile* pl);
  208. STLSOFT_INLINE void atomic_increment_smp(atomic_int_t volatile* pl);
  209. STLSOFT_INLINE void atomic_decrement_smp(atomic_int_t volatile* pl);
  210. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_write_smp(atomic_int_t volatile* pl, atomic_int_t n);
  211. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_read_smp(atomic_int_t volatile const* pl);
  212. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postadd_smp(atomic_int_t volatile* pl, atomic_int_t n);
  213. STLSOFT_INLINE atomic_int_t atomic_preadd_smp(atomic_int_t volatile* pl, atomic_int_t n);
  214. /* Multi-processor detection variants */
  215. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_preincrement(atomic_int_t volatile* pl);
  216. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_predecrement(atomic_int_t volatile* pl);
  217. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postincrement(atomic_int_t volatile* pl);
  218. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postdecrement(atomic_int_t volatile* pl);
  219. WINSTL_ATOMIC_FNS_DECL_(void) atomic_increment(atomic_int_t volatile* pl);
  220. WINSTL_ATOMIC_FNS_DECL_(void) atomic_decrement(atomic_int_t volatile* pl);
  221. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_write(atomic_int_t volatile* pl, atomic_int_t n);
  222. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_read(atomic_int_t volatile const* pl);
  223. WINSTL_ATOMIC_FNS_DECL_(atomic_int_t) atomic_postadd(atomic_int_t volatile* pl, atomic_int_t n);
  224. STLSOFT_INLINE atomic_int_t atomic_preadd(atomic_int_t volatile* pl, atomic_int_t n);
  225. #endif /* !WINSTL_ATOMIC_FNS_DEFINITION */
  226. /* /////////////////////////////////////////////////////////////////////////
  227. * Atomic function definitions
  228. */
  229. #ifndef STLSOFT_DOCUMENTATION_SKIP_SECTION
  230. # if !defined(WINSTL_ATOMIC_FNS_DECLARATION_ONLY)
  231. # ifdef STSLSOFT_INLINE_ASM_SUPPORTED
  232. /* Inline assembler versions */
  233. #ifdef STLSOFT_COMPILER_IS_BORLAND
  234. # pragma warn -8002 /* Suppresses: "Restarting compile using assembly" */
  235. # pragma warn -8070 /* Suppresses: "Function should return a value" */
  236. #endif /* compiler */
  237. /* Uni-processor */
  238. /** \brief
  239. *
  240. * \ingroup group__library__synch
  241. */
  242. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement_up(atomic_int_t volatile* /* pl */)
  243. {
  244. _asm
  245. {
  246. /* pop 1 into eax, which can then be atomically added into *pl (held
  247. * in ecx). Since it's an xadd it exchanges the previous value into eax
  248. */
  249. mov eax, 1
  250. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  251. /* __fastcall: ecx is pl */
  252. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  253. /* __stdcall: arguments are on the stack */
  254. mov ecx, dword ptr [esp + 4]
  255. #else
  256. # error Need to define calling convention
  257. #endif /* call-conv */
  258. xadd dword ptr [ecx], eax
  259. /* Since this is pre-increment, we need to inc eax to catch up with the
  260. * real value
  261. */
  262. inc eax
  263. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  264. ret
  265. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  266. ret 4
  267. #endif /* call-conv */
  268. }
  269. }
  270. /** \brief
  271. *
  272. * \ingroup group__library__synch
  273. */
  274. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement_up(atomic_int_t volatile* /* pl */)
  275. {
  276. _asm
  277. {
  278. /* pop 1 into eax, which can then be atomically added into *pl (held
  279. * in ecx). Since it's an xadd it exchanges the previous value into eax
  280. */
  281. mov eax, -1
  282. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  283. /* __fastcall: ecx is pl */
  284. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  285. /* __stdcall: arguments are on the stack */
  286. mov ecx, dword ptr [esp + 4]
  287. #else
  288. # error Need to define calling convention
  289. #endif /* call-conv */
  290. xadd dword ptr [ecx], eax
  291. /* Since this is pre-decrement, we need to inc eax to catch up with the
  292. * real value
  293. */
  294. dec eax
  295. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  296. ret
  297. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  298. ret 4
  299. #endif /* call-conv */
  300. }
  301. }
  302. /** \brief
  303. *
  304. * \ingroup group__library__synch
  305. */
  306. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement_up(atomic_int_t volatile* /* pl */)
  307. {
  308. _asm
  309. {
  310. /* pop 1 into eax, which can then be atomically added into *pl (held
  311. * in ecx). Since it's an xadd it exchanges the previous value into eax
  312. */
  313. mov eax, 1
  314. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  315. /* __fastcall: ecx is pl */
  316. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  317. /* __stdcall: arguments are on the stack */
  318. mov ecx, dword ptr [esp + 4]
  319. #else
  320. # error Need to define calling convention
  321. #endif /* call-conv */
  322. xadd dword ptr [ecx], eax
  323. /* Since this is post-increment, we need do nothing, since the previous
  324. * value is in eax
  325. */
  326. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  327. ret
  328. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  329. ret 4
  330. #endif /* call-conv */
  331. }
  332. }
  333. /** \brief
  334. *
  335. * \ingroup group__library__synch
  336. */
  337. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement_up(atomic_int_t volatile* /* pl */)
  338. {
  339. _asm
  340. {
  341. /* pop 1 into eax, which can then be atomically added into *pl (held
  342. * in ecx). Since it's an xadd it exchanges the previous value into eax
  343. */
  344. mov eax, -1
  345. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  346. /* __fastcall: ecx is pl */
  347. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  348. /* __stdcall: arguments are on the stack */
  349. mov ecx, dword ptr [esp + 4]
  350. #else
  351. # error Need to define calling convention
  352. #endif /* call-conv */
  353. xadd dword ptr [ecx], eax
  354. /* Since this is post-decrement, we need do nothing, since the previous
  355. * value is in eax
  356. */
  357. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  358. ret
  359. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  360. ret 4
  361. #endif /* call-conv */
  362. }
  363. }
  364. /** \brief
  365. *
  366. * \ingroup group__library__synch
  367. */
  368. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment_up(atomic_int_t volatile* /* pl */)
  369. {
  370. _asm
  371. {
  372. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  373. /* __fastcall: ecx is pl */
  374. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  375. /* __stdcall: arguments are on the stack */
  376. mov ecx, dword ptr [esp + 4]
  377. #else
  378. # error Need to define calling convention
  379. #endif /* call-conv */
  380. add dword ptr [ecx], 1
  381. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  382. ret
  383. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  384. ret 4
  385. #endif /* call-conv */
  386. }
  387. }
  388. /** \brief
  389. *
  390. * \ingroup group__library__synch
  391. */
  392. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement_up(atomic_int_t volatile* /* pl */)
  393. {
  394. _asm
  395. {
  396. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  397. /* __fastcall: ecx is pl */
  398. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  399. /* __stdcall: arguments are on the stack */
  400. mov ecx, dword ptr [esp + 4]
  401. #else
  402. # error Need to define calling convention
  403. #endif /* call-conv */
  404. sub dword ptr [ecx], 1
  405. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  406. ret
  407. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  408. ret 4
  409. #endif /* call-conv */
  410. }
  411. }
  412. /** \brief
  413. *
  414. * \ingroup group__library__synch
  415. */
  416. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read_up(atomic_int_t volatile const* /* pl */)
  417. {
  418. _asm
  419. {
  420. mov eax, 0
  421. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  422. /* __fastcall: ecx is pl */
  423. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  424. /* __stdcall: arguments are on the stack */
  425. mov ecx, dword ptr [esp + 4]
  426. #else
  427. # error Need to define calling convention
  428. #endif /* call-conv */
  429. /* pop 0 into eax, which can then be atomically added into *pl (held
  430. * in ecx), leaving the value unchanged.
  431. */
  432. xadd dword ptr [ecx], eax
  433. /* Since it's an xadd it exchanges the previous value into eax, which
  434. * is exactly what's required
  435. */
  436. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  437. ret
  438. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  439. ret 4
  440. #endif /* call-conv */
  441. }
  442. }
  443. /** \brief
  444. *
  445. * \ingroup group__library__synch
  446. */
  447. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write_up(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  448. {
  449. _asm
  450. {
  451. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  452. /* __fastcall: ecx is pl, edx is n */
  453. /* Just exchange *pl and n */
  454. xchg dword ptr [ecx], edx
  455. /* The previous value goes into edx, so me move it into eax for return */
  456. mov eax, edx
  457. ret
  458. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  459. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  460. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  461. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  462. xchg dword ptr [ecx], eax
  463. ret 8
  464. #else
  465. # error Need to define calling convention
  466. #endif /* call-conv */
  467. }
  468. }
  469. /** \brief
  470. *
  471. * \ingroup group__library__synch
  472. */
  473. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd_up(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  474. {
  475. /* Thanks to Eugene Gershnik for the fast-call implementation */
  476. __asm
  477. {
  478. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  479. /* __fastcall: ecx is pl, edx is n */
  480. /* Simply atomically add them, which will leave the previous value
  481. * in edx
  482. */
  483. xadd dword ptr [ecx], edx
  484. /* Just need to move adx into eax to return it */
  485. mov eax, edx
  486. ret
  487. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  488. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  489. /* Simply atomically add them, which will leave the previous value
  490. * in edx
  491. */
  492. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  493. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  494. xadd dword ptr [ecx], eax
  495. /* Just need to move adx into eax to return it */
  496. ret 8
  497. #else
  498. # error Need to define calling convention
  499. #endif /* call-conv */
  500. }
  501. }
  502. /* Symmetric multi-processor */
  503. /** \brief
  504. *
  505. * \ingroup group__library__synch
  506. */
  507. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement_smp(atomic_int_t volatile* /* pl */)
  508. {
  509. _asm
  510. {
  511. /* pop 1 into eax, which can then be atomically added into *pl (held
  512. * in ecx). Since it's an xadd it exchanges the previous value into eax
  513. */
  514. mov eax, 1
  515. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  516. /* __fastcall: ecx is pl */
  517. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  518. /* __stdcall: arguments are on the stack */
  519. mov ecx, dword ptr [esp + 4]
  520. #else
  521. # error Need to define calling convention
  522. #endif /* call-conv */
  523. lock xadd dword ptr [ecx], eax
  524. /* Since this is pre-increment, we need to inc eax to catch up with the
  525. * real value
  526. */
  527. inc eax
  528. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  529. ret
  530. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  531. ret 4
  532. #endif /* call-conv */
  533. }
  534. }
  535. /** \brief
  536. *
  537. * \ingroup group__library__synch
  538. */
  539. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement_smp(atomic_int_t volatile* /* pl */)
  540. {
  541. _asm
  542. {
  543. /* pop 1 into eax, which can then be atomically added into *pl (held
  544. * in ecx). Since it's an xadd it exchanges the previous value into eax
  545. */
  546. mov eax, -1
  547. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  548. /* __fastcall: ecx is pl */
  549. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  550. /* __stdcall: arguments are on the stack */
  551. mov ecx, dword ptr [esp + 4]
  552. #else
  553. # error Need to define calling convention
  554. #endif /* call-conv */
  555. lock xadd dword ptr [ecx], eax
  556. /* Since this is pre-decrement, we need to inc eax to catch up with the
  557. * real value
  558. */
  559. dec eax
  560. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  561. ret
  562. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  563. ret 4
  564. #endif /* call-conv */
  565. }
  566. }
  567. /** \brief
  568. *
  569. * \ingroup group__library__synch
  570. */
  571. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement_smp(atomic_int_t volatile* /* pl */)
  572. {
  573. _asm
  574. {
  575. /* pop 1 into eax, which can then be atomically added into *pl (held
  576. * in ecx). Since it's an xadd it exchanges the previous value into eax
  577. */
  578. mov eax, 1
  579. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  580. /* __fastcall: ecx is pl */
  581. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  582. /* __stdcall: arguments are on the stack */
  583. mov ecx, dword ptr [esp + 4]
  584. #else
  585. # error Need to define calling convention
  586. #endif /* call-conv */
  587. lock xadd dword ptr [ecx], eax
  588. /* Since this is post-increment, we need do nothing, since the previous
  589. * value is in eax
  590. */
  591. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  592. ret
  593. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  594. ret 4
  595. #endif /* call-conv */
  596. }
  597. }
  598. /** \brief
  599. *
  600. * \ingroup group__library__synch
  601. */
  602. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement_smp(atomic_int_t volatile* /* pl */)
  603. {
  604. _asm
  605. {
  606. /* pop 1 into eax, which can then be atomically added into *pl (held
  607. * in ecx). Since it's an xadd it exchanges the previous value into eax
  608. */
  609. mov eax, -1
  610. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  611. /* __fastcall: ecx is pl */
  612. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  613. /* __stdcall: arguments are on the stack */
  614. mov ecx, dword ptr [esp + 4]
  615. #else
  616. # error Need to define calling convention
  617. #endif /* call-conv */
  618. lock xadd dword ptr [ecx], eax
  619. /* Since this is post-decrement, we need do nothing, since the previous
  620. * value is in eax
  621. */
  622. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  623. ret
  624. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  625. ret 4
  626. #endif /* call-conv */
  627. }
  628. }
  629. /** \brief
  630. *
  631. * \ingroup group__library__synch
  632. */
  633. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read_smp(atomic_int_t volatile const* /* pl */)
  634. {
  635. _asm
  636. {
  637. mov eax, 0
  638. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  639. /* __fastcall: ecx is pl */
  640. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  641. /* __stdcall: arguments are on the stack */
  642. mov ecx, dword ptr [esp + 4]
  643. #else
  644. # error Need to define calling convention
  645. #endif /* call-conv */
  646. /* pop 0 into eax, which can then be atomically added into *pl (held
  647. * in ecx), leaving the value unchanged.
  648. */
  649. lock xadd dword ptr [ecx], eax
  650. /* Since it's an xadd it exchanges the previous value into eax, which
  651. * is exactly what's required
  652. */
  653. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  654. ret
  655. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  656. ret 4
  657. #endif /* call-conv */
  658. }
  659. }
  660. /** \brief
  661. *
  662. * \ingroup group__library__synch
  663. */
  664. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write_smp(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  665. {
  666. _asm
  667. {
  668. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  669. /* __fastcall: ecx is pl, edx is n */
  670. /* Just exchange *pl and n */
  671. /* lock */ xchg dword ptr [ecx], edx
  672. /* The previous value goes into edx, so me move it into eax for return */
  673. mov eax, edx
  674. ret
  675. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  676. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  677. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  678. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  679. /* lock */ xchg dword ptr [ecx], eax
  680. ret 8
  681. #else
  682. # error Need to define calling convention
  683. #endif /* call-conv */
  684. }
  685. }
  686. /** \brief
  687. *
  688. * \ingroup group__library__synch
  689. */
  690. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd_smp(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  691. {
  692. /* Thanks to Eugene Gershnik for the fast-call implementation */
  693. __asm
  694. {
  695. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  696. /* __fastcall: ecx is pl, edx is n */
  697. /* Simply atomically add them, which will leave the previous value
  698. * in edx
  699. */
  700. lock xadd dword ptr [ecx], edx
  701. /* Just need to move adx into eax to return it */
  702. mov eax, edx
  703. ret
  704. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  705. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  706. /* Simply atomically add them, which will leave the previous value
  707. * in edx
  708. */
  709. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  710. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  711. lock xadd dword ptr [ecx], eax
  712. /* Just need to move adx into eax to return it */
  713. ret 8
  714. #else
  715. # error Need to define calling convention
  716. #endif /* call-conv */
  717. }
  718. }
  719. /* Processor detection */
  720. namespace
  721. {
  722. inline ws_bool_t is_host_up()
  723. {
  724. /* All these statics are guaranteed to be zero as a result of the module/process loading */
  725. static atomic_int_t s_spin; /* The spin variable */
  726. static ws_bool_t s_init; /* This is guaranteed to be zero */
  727. static ws_bool_t s_up; /* This is the flag variably, also guaranteed to be zero */
  728. /* Simple spin lock */
  729. if(!s_init) /* Low cost pre-test. In the unlikely event that another thread does come in and */
  730. { /* also see this as false, the dual initialisation of all three statics is benign */
  731. spin_mutex smx(&s_spin);
  732. smx.lock();
  733. if(!s_init)
  734. {
  735. SYSTEM_INFO sys_info;
  736. ::GetSystemInfo(&sys_info);
  737. s_init = true;
  738. s_up = 1 == sys_info.dwNumberOfProcessors;
  739. }
  740. smx.unlock();
  741. }
  742. return s_up;
  743. }
  744. /* s_up is guaranteed to be zero at load time.
  745. *
  746. * There is a race condition with all static variables, since multiple threads
  747. * can come in and one can have set the hidden flag variable without prior to
  748. * setting the static variable itself, just at the time that an arbitrary number
  749. * of other threads pick up the pre-initialised value.
  750. *
  751. * However, because the test here is whether to skip the lock, the pathological
  752. * case is benign. The only cost in the very rare case where it happens is that
  753. * the thread(s) will use bus locking until such time as the static is fully
  754. * initialised.
  755. */
  756. static ws_bool_t s_up = is_host_up();
  757. }
  758. /** \brief
  759. *
  760. * \ingroup group__library__synch
  761. */
  762. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement(atomic_int_t volatile* /* pl */)
  763. {
  764. if(s_up)
  765. {
  766. _asm
  767. {
  768. /* pop 1 into eax, which can then be atomically added into *pl (held
  769. * in ecx). Since it's an xadd it exchanges the previous value into eax
  770. */
  771. mov eax, 1
  772. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  773. /* __fastcall: ecx is pl */
  774. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  775. /* __stdcall: arguments are on the stack */
  776. mov ecx, dword ptr [esp + 4]
  777. #else
  778. # error Need to define calling convention
  779. #endif /* call-conv */
  780. xadd dword ptr [ecx], eax
  781. /* Since this is pre-increment, we need to inc eax to catch up with the
  782. * real value
  783. */
  784. inc eax
  785. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  786. ret
  787. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  788. ret 4
  789. #endif /* call-conv */
  790. }
  791. }
  792. else
  793. {
  794. _asm
  795. {
  796. /* pop 1 into eax, which can then be atomically added into *pl (held
  797. * in ecx). Since it's an xadd it exchanges the previous value into eax
  798. */
  799. mov eax, 1
  800. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  801. /* __fastcall: ecx is pl */
  802. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  803. /* __stdcall: arguments are on the stack */
  804. mov ecx, dword ptr [esp + 4]
  805. #else
  806. # error Need to define calling convention
  807. #endif /* call-conv */
  808. lock xadd dword ptr [ecx], eax
  809. /* Since this is pre-increment, we need to inc eax to catch up with the
  810. * real value
  811. */
  812. inc eax
  813. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  814. ret
  815. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  816. ret 4
  817. #endif /* call-conv */
  818. }
  819. }
  820. }
  821. /** \brief
  822. *
  823. * \ingroup group__library__synch
  824. */
  825. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement(atomic_int_t volatile* /* pl */)
  826. {
  827. if(s_up)
  828. {
  829. _asm
  830. {
  831. /* pop 1 into eax, which can then be atomically added into *pl (held
  832. * in ecx). Since it's an xadd it exchanges the previous value into eax
  833. */
  834. mov eax, -1
  835. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  836. /* __fastcall: ecx is pl */
  837. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  838. /* __stdcall: arguments are on the stack */
  839. mov ecx, dword ptr [esp + 4]
  840. #else
  841. # error Need to define calling convention
  842. #endif /* call-conv */
  843. xadd dword ptr [ecx], eax
  844. /* Since this is pre-decrement, we need to inc eax to catch up with the
  845. * real value
  846. */
  847. dec eax
  848. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  849. ret
  850. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  851. ret 4
  852. #endif /* call-conv */
  853. }
  854. }
  855. else
  856. {
  857. _asm
  858. {
  859. /* pop 1 into eax, which can then be atomically added into *pl (held
  860. * in ecx). Since it's an xadd it exchanges the previous value into eax
  861. */
  862. mov eax, -1
  863. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  864. /* __fastcall: ecx is pl */
  865. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  866. /* __stdcall: arguments are on the stack */
  867. mov ecx, dword ptr [esp + 4]
  868. #else
  869. # error Need to define calling convention
  870. #endif /* call-conv */
  871. lock xadd dword ptr [ecx], eax
  872. /* Since this is pre-decrement, we need to inc eax to catch up with the
  873. * real value
  874. */
  875. dec eax
  876. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  877. ret
  878. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  879. ret 4
  880. #endif /* call-conv */
  881. }
  882. }
  883. }
  884. /** \brief
  885. *
  886. * \ingroup group__library__synch
  887. */
  888. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement(atomic_int_t volatile* /* pl */)
  889. {
  890. if(s_up)
  891. {
  892. _asm
  893. {
  894. /* pop 1 into eax, which can then be atomically added into *pl (held
  895. * in ecx). Since it's an xadd it exchanges the previous value into eax
  896. */
  897. mov eax, 1
  898. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  899. /* __fastcall: ecx is pl */
  900. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  901. /* __stdcall: arguments are on the stack */
  902. mov ecx, dword ptr [esp + 4]
  903. #else
  904. # error Need to define calling convention
  905. #endif /* call-conv */
  906. xadd dword ptr [ecx], eax
  907. /* Since this is post-increment, we need do nothing, since the previous
  908. * value is in eax
  909. */
  910. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  911. ret
  912. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  913. ret 4
  914. #endif /* call-conv */
  915. }
  916. }
  917. else
  918. {
  919. _asm
  920. {
  921. /* pop 1 into eax, which can then be atomically added into *pl (held
  922. * in ecx). Since it's an xadd it exchanges the previous value into eax
  923. */
  924. mov eax, 1
  925. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  926. /* __fastcall: ecx is pl */
  927. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  928. /* __stdcall: arguments are on the stack */
  929. mov ecx, dword ptr [esp + 4]
  930. #else
  931. # error Need to define calling convention
  932. #endif /* call-conv */
  933. lock xadd dword ptr [ecx], eax
  934. /* Since this is post-increment, we need do nothing, since the previous
  935. * value is in eax
  936. */
  937. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  938. ret
  939. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  940. ret 4
  941. #endif /* call-conv */
  942. }
  943. }
  944. }
  945. /** \brief
  946. *
  947. * \ingroup group__library__synch
  948. */
  949. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement(atomic_int_t volatile* /* pl */)
  950. {
  951. if(s_up)
  952. {
  953. _asm
  954. {
  955. /* pop 1 into eax, which can then be atomically added into *pl (held
  956. * in ecx). Since it's an xadd it exchanges the previous value into eax
  957. */
  958. mov eax, -1
  959. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  960. /* __fastcall: ecx is pl */
  961. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  962. /* __stdcall: arguments are on the stack */
  963. mov ecx, dword ptr [esp + 4]
  964. #else
  965. # error Need to define calling convention
  966. #endif /* call-conv */
  967. xadd dword ptr [ecx], eax
  968. /* Since this is post-decrement, we need do nothing, since the previous
  969. * value is in eax
  970. */
  971. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  972. ret
  973. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  974. ret 4
  975. #endif /* call-conv */
  976. }
  977. }
  978. else
  979. {
  980. _asm
  981. {
  982. /* pop 1 into eax, which can then be atomically added into *pl (held
  983. * in ecx). Since it's an xadd it exchanges the previous value into eax
  984. */
  985. mov eax, -1
  986. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  987. /* __fastcall: ecx is pl */
  988. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  989. /* __stdcall: arguments are on the stack */
  990. mov ecx, dword ptr [esp + 4]
  991. #else
  992. # error Need to define calling convention
  993. #endif /* call-conv */
  994. lock xadd dword ptr [ecx], eax
  995. /* Since this is post-decrement, we need do nothing, since the previous
  996. * value is in eax
  997. */
  998. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  999. ret
  1000. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1001. ret 4
  1002. #endif /* call-conv */
  1003. }
  1004. }
  1005. }
  1006. /** \brief
  1007. *
  1008. * \ingroup group__library__synch
  1009. */
  1010. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment(atomic_int_t volatile* /* pl */)
  1011. {
  1012. if(s_up)
  1013. {
  1014. _asm
  1015. {
  1016. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1017. /* __fastcall: ecx is pl */
  1018. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1019. /* __stdcall: arguments are on the stack */
  1020. mov ecx, dword ptr [esp + 4]
  1021. #else
  1022. # error Need to define calling convention
  1023. #endif /* call-conv */
  1024. add dword ptr [ecx], 1
  1025. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1026. ret
  1027. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1028. ret 4
  1029. #endif /* call-conv */
  1030. }
  1031. }
  1032. else
  1033. {
  1034. _asm
  1035. {
  1036. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1037. /* __fastcall: ecx is pl */
  1038. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1039. /* __stdcall: arguments are on the stack */
  1040. mov ecx, dword ptr [esp + 4]
  1041. #else
  1042. # error Need to define calling convention
  1043. #endif /* call-conv */
  1044. /* The IA-32 Intel Architecture Software Developer's Manual, volume 2
  1045. * states that a LOCK can be prefixed to ADD, but CodePlay VectorC
  1046. * has a problem with it.
  1047. */
  1048. #if defined(STLSOFT_COMPILER_IS_VECTORC)
  1049. mov eax, 1
  1050. lock xadd dword ptr [ecx], eax
  1051. #else /* ? compiler */
  1052. lock add dword ptr [ecx], 1
  1053. #endif /* compiler */
  1054. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1055. ret
  1056. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1057. ret 4
  1058. #endif /* call-conv */
  1059. }
  1060. }
  1061. }
  1062. /** \brief
  1063. *
  1064. * \ingroup group__library__synch
  1065. */
  1066. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement(atomic_int_t volatile* /* pl */)
  1067. {
  1068. if(s_up)
  1069. {
  1070. _asm
  1071. {
  1072. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1073. /* __fastcall: ecx is pl */
  1074. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1075. /* __stdcall: arguments are on the stack */
  1076. mov ecx, dword ptr [esp + 4]
  1077. #else
  1078. # error Need to define calling convention
  1079. #endif /* call-conv */
  1080. add dword ptr [ecx], -1
  1081. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1082. ret
  1083. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1084. ret 4
  1085. #endif /* call-conv */
  1086. }
  1087. }
  1088. else
  1089. {
  1090. _asm
  1091. {
  1092. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1093. /* __fastcall: ecx is pl */
  1094. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1095. /* __stdcall: arguments are on the stack */
  1096. mov ecx, dword ptr [esp + 4]
  1097. #else
  1098. # error Need to define calling convention
  1099. #endif /* call-conv */
  1100. #if defined(STLSOFT_COMPILER_IS_VECTORC)
  1101. mov eax, -1
  1102. lock xadd dword ptr [ecx], eax
  1103. #else /* ? compiler */
  1104. /* This might be wrong */
  1105. lock sub dword ptr [ecx], 1
  1106. #endif /* compiler */
  1107. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1108. ret
  1109. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1110. ret 4
  1111. #endif /* call-conv */
  1112. }
  1113. }
  1114. }
  1115. /** \brief
  1116. *
  1117. * \ingroup group__library__synch
  1118. */
  1119. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read(atomic_int_t volatile const* /* pl */)
  1120. {
  1121. if(s_up)
  1122. {
  1123. _asm
  1124. {
  1125. mov eax, 0
  1126. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1127. /* __fastcall: ecx is pl */
  1128. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1129. /* __stdcall: arguments are on the stack */
  1130. mov ecx, dword ptr [esp + 4]
  1131. #else
  1132. # error Need to define calling convention
  1133. #endif /* call-conv */
  1134. /* pop 0 into eax, which can then be atomically added into *pl (held
  1135. * in ecx), leaving the value unchanged.
  1136. */
  1137. xadd dword ptr [ecx], eax
  1138. /* Since it's an xadd it exchanges the previous value into eax, which
  1139. * is exactly what's required
  1140. */
  1141. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1142. ret
  1143. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1144. ret 4
  1145. #endif /* call-conv */
  1146. }
  1147. }
  1148. else
  1149. {
  1150. _asm
  1151. {
  1152. mov eax, 0
  1153. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1154. /* __fastcall: ecx is pl */
  1155. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1156. /* __stdcall: arguments are on the stack */
  1157. mov ecx, dword ptr [esp + 4]
  1158. #else
  1159. # error Need to define calling convention
  1160. #endif /* call-conv */
  1161. /* pop 0 into eax, which can then be atomically added into *pl (held
  1162. * in ecx), leaving the value unchanged.
  1163. */
  1164. lock xadd dword ptr [ecx], eax
  1165. /* Since it's an xadd it exchanges the previous value into eax, which
  1166. * is exactly what's required
  1167. */
  1168. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1169. ret
  1170. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1171. ret 4
  1172. #endif /* call-conv */
  1173. }
  1174. }
  1175. }
  1176. /** \brief
  1177. *
  1178. * \ingroup group__library__synch
  1179. */
  1180. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  1181. {
  1182. _asm
  1183. {
  1184. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1185. /* __fastcall: ecx is pl, edx is n */
  1186. /* Just exchange *pl and n */
  1187. lock xchg dword ptr [ecx], edx
  1188. /* The previous value goes into edx, so me move it into eax for return */
  1189. mov eax, edx
  1190. ret
  1191. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1192. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  1193. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  1194. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  1195. xchg dword ptr [ecx], eax
  1196. ret 8
  1197. #else
  1198. # error Need to define calling convention
  1199. #endif /* call-conv */
  1200. }
  1201. }
  1202. /** \brief
  1203. *
  1204. * \ingroup group__library__synch
  1205. */
  1206. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd(atomic_int_t volatile* /* pl */, atomic_int_t /* n */)
  1207. {
  1208. /* Thanks to Eugene Gershnik for the fast-call implementation */
  1209. if(s_up)
  1210. {
  1211. __asm
  1212. {
  1213. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1214. /* __fastcall: ecx is pl, edx is n */
  1215. /* Simply atomically add them, which will leave the previous value
  1216. * in edx
  1217. */
  1218. xadd dword ptr [ecx], edx
  1219. /* Just need to move adx into eax to return it */
  1220. mov eax, edx
  1221. ret
  1222. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1223. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  1224. /* Simply atomically add them, which will leave the previous value
  1225. * in edx
  1226. */
  1227. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  1228. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  1229. xadd dword ptr [ecx], eax
  1230. /* Just need to move adx into eax to return it */
  1231. ret 8
  1232. #else
  1233. # error Need to define calling convention
  1234. #endif /* call-conv */
  1235. }
  1236. }
  1237. else
  1238. {
  1239. __asm
  1240. {
  1241. #if defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_FASTCALL)
  1242. /* __fastcall: ecx is pl, edx is n */
  1243. /* Simply atomically add them, which will leave the previous value
  1244. * in edx
  1245. */
  1246. lock xadd dword ptr [ecx], edx
  1247. /* Just need to move adx into eax to return it */
  1248. mov eax, edx
  1249. ret
  1250. #elif defined(WINSTL_ATOMIC_FNS_CALLCONV_IS_STDCALL)
  1251. /* __stdcall: arguments are on the stack: pl in esp+4, pl in esp+8 */
  1252. /* Simply atomically add them, which will leave the previous value
  1253. * in edx
  1254. */
  1255. mov ecx, dword ptr [esp + 4] /* Load the address of pl into ecx */
  1256. mov eax, dword ptr [esp + 8] /* Load the value into eax, so the return value will be there waiting */
  1257. lock xadd dword ptr [ecx], eax
  1258. /* Just need to move adx into eax to return it */
  1259. ret 8
  1260. #else
  1261. # error Need to define calling convention
  1262. #endif /* call-conv */
  1263. }
  1264. }
  1265. }
  1266. #ifdef STLSOFT_COMPILER_IS_BORLAND
  1267. # pragma warn .8070 /* Suppresses: "Function should return a value" */
  1268. # pragma warn .8002 /* Suppresses: "Restarting compile using assembly" */
  1269. #endif /* compiler */
  1270. # else /* STSLSOFT_INLINE_ASM_SUPPORTED */
  1271. /* Non-assembler versions
  1272. *
  1273. * These use the Win32 Interlocked functions. These are not guaranteed to give
  1274. * precise answers on Windows 95.
  1275. */
  1276. /* Multi-processor detection variants */
  1277. /** \brief
  1278. *
  1279. * \ingroup group__library__synch
  1280. */
  1281. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement(atomic_int_t volatile* pl)
  1282. {
  1283. #if defined(WINSTL_OS_IS_WIN32)
  1284. return STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
  1285. #elif defined(WINSTL_OS_IS_WIN64)
  1286. return STLSOFT_NS_GLOBAL(InterlockedDecrement64)((LONGLONG*)pl);
  1287. #else /* ? arch */
  1288. # error Not valid for processors other than Intel
  1289. #endif /* Win32 || Win64 */
  1290. }
  1291. /** \brief
  1292. *
  1293. * \ingroup group__library__synch
  1294. */
  1295. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement(atomic_int_t volatile* pl)
  1296. {
  1297. #if defined(WINSTL_OS_IS_WIN32)
  1298. return STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
  1299. #elif defined(WINSTL_OS_IS_WIN64)
  1300. return STLSOFT_NS_GLOBAL(InterlockedDecrement64)((LONGLONG*)pl);
  1301. #else /* ? arch */
  1302. # error Not valid for processors other than Intel
  1303. #endif /* Win32 || Win64 */
  1304. }
  1305. /** \brief
  1306. *
  1307. * \ingroup group__library__synch
  1308. */
  1309. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement(atomic_int_t volatile* pl)
  1310. {
  1311. atomic_int_t pre = *pl;
  1312. #if defined(WINSTL_OS_IS_WIN32)
  1313. STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
  1314. #elif defined(WINSTL_OS_IS_WIN64)
  1315. STLSOFT_NS_GLOBAL(InterlockedIncrement64)((LONGLONG*)pl);
  1316. #else /* ? arch */
  1317. # error Not valid for processors other than Intel
  1318. #endif /* Win32 || Win64 */
  1319. return pre;
  1320. }
  1321. /** \brief
  1322. *
  1323. * \ingroup group__library__synch
  1324. */
  1325. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement(atomic_int_t volatile* pl)
  1326. {
  1327. atomic_int_t pre = *pl;
  1328. #if defined(WINSTL_OS_IS_WIN32)
  1329. STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
  1330. #elif defined(WINSTL_OS_IS_WIN64)
  1331. STLSOFT_NS_GLOBAL(InterlockedDecrement64)((LONGLONG*)pl);
  1332. #else /* ? arch */
  1333. # error Not valid for processors other than Intel
  1334. #endif /* Win32 || Win64 */
  1335. return pre;
  1336. }
  1337. /** \brief
  1338. *
  1339. * \ingroup group__library__synch
  1340. */
  1341. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment(atomic_int_t volatile* pl)
  1342. {
  1343. #if defined(WINSTL_OS_IS_WIN32)
  1344. STLSOFT_NS_GLOBAL(InterlockedIncrement)((LPLONG)pl);
  1345. #elif defined(WINSTL_OS_IS_WIN64)
  1346. STLSOFT_NS_GLOBAL(InterlockedIncrement64)((LONGLONG*)pl);
  1347. #else /* ? arch */
  1348. # error Not valid for processors other than Intel
  1349. #endif /* Win32 || Win64 */
  1350. }
  1351. /** \brief
  1352. *
  1353. * \ingroup group__library__synch
  1354. */
  1355. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement(atomic_int_t volatile* pl)
  1356. {
  1357. #if defined(WINSTL_OS_IS_WIN32)
  1358. STLSOFT_NS_GLOBAL(InterlockedDecrement)((LPLONG)pl);
  1359. #elif defined(WINSTL_OS_IS_WIN64)
  1360. STLSOFT_NS_GLOBAL(InterlockedDecrement64)((LONGLONG*)pl);
  1361. #else /* ? arch */
  1362. # error Not valid for processors other than Intel
  1363. #endif /* Win32 || Win64 */
  1364. }
  1365. /** \brief
  1366. *
  1367. * \ingroup group__library__synch
  1368. */
  1369. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write(atomic_int_t volatile* pl, atomic_int_t n)
  1370. {
  1371. #if defined(WINSTL_OS_IS_WIN32)
  1372. return STLSOFT_NS_GLOBAL(InterlockedExchange)((LPLONG)pl, n);
  1373. #elif defined(WINSTL_OS_IS_WIN64)
  1374. return STLSOFT_NS_GLOBAL(InterlockedExchange64)((LONGLONG*)pl, n);
  1375. #else /* ? arch */
  1376. # error Not valid for processors other than Intel
  1377. #endif /* Win32 || Win64 */
  1378. }
  1379. /** \brief
  1380. *
  1381. * \ingroup group__library__synch
  1382. */
  1383. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read(atomic_int_t volatile const* pl)
  1384. {
  1385. return *pl;
  1386. }
  1387. /** \brief
  1388. *
  1389. * \ingroup group__library__synch
  1390. */
  1391. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd(atomic_int_t volatile* pl, atomic_int_t n)
  1392. {
  1393. #if defined(WINSTL_OS_IS_WIN32)
  1394. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
  1395. #elif defined(WINSTL_OS_IS_WIN64)
  1396. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd64)((LONGLONG*)pl, n);
  1397. #else /* ? arch */
  1398. # error Not valid for processors other than Intel
  1399. #endif /* Win32 || Win64 */
  1400. }
  1401. /* Uni-processor variants */
  1402. /** \brief
  1403. *
  1404. * \ingroup group__library__synch
  1405. */
  1406. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement_up(atomic_int_t volatile* pl)
  1407. {
  1408. return atomic_preincrement(pl);
  1409. }
  1410. /** \brief
  1411. *
  1412. * \ingroup group__library__synch
  1413. */
  1414. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement_up(atomic_int_t volatile* pl)
  1415. {
  1416. return atomic_predecrement(pl);
  1417. }
  1418. /** \brief
  1419. *
  1420. * \ingroup group__library__synch
  1421. */
  1422. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement_up(atomic_int_t volatile* pl)
  1423. {
  1424. return atomic_postincrement(pl);
  1425. }
  1426. /** \brief
  1427. *
  1428. * \ingroup group__library__synch
  1429. */
  1430. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement_up(atomic_int_t volatile* pl)
  1431. {
  1432. return atomic_postdecrement(pl);
  1433. }
  1434. /** \brief
  1435. *
  1436. * \ingroup group__library__synch
  1437. */
  1438. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_increment_up(atomic_int_t volatile* pl)
  1439. {
  1440. atomic_increment(pl);
  1441. }
  1442. /** \brief
  1443. *
  1444. * \ingroup group__library__synch
  1445. */
  1446. WINSTL_ATOMIC_FNS_IMPL_(void) atomic_decrement_up(atomic_int_t volatile* pl)
  1447. {
  1448. atomic_decrement(pl);
  1449. }
  1450. /** \brief
  1451. *
  1452. * \ingroup group__library__synch
  1453. */
  1454. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write_up(atomic_int_t volatile* pl, atomic_int_t n)
  1455. {
  1456. return atomic_write(pl, n);
  1457. }
  1458. /** \brief
  1459. *
  1460. * \ingroup group__library__synch
  1461. */
  1462. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read_up(atomic_int_t volatile const* pl)
  1463. {
  1464. return *pl;
  1465. }
  1466. /** \brief
  1467. *
  1468. * \ingroup group__library__synch
  1469. */
  1470. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd_up(atomic_int_t volatile* pl, atomic_int_t n)
  1471. {
  1472. #if defined(WINSTL_OS_IS_WIN32)
  1473. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
  1474. #elif defined(WINSTL_OS_IS_WIN64)
  1475. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd64)((LONGLONG*)pl, n);
  1476. #else /* ? arch */
  1477. # error Not valid for processors other than Intel
  1478. #endif /* Win32 || Win64 */
  1479. }
  1480. /* SMP variants */
  1481. /** \brief
  1482. *
  1483. * \ingroup group__library__synch
  1484. */
  1485. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_preincrement_smp(atomic_int_t volatile* pl)
  1486. {
  1487. return atomic_preincrement(pl);
  1488. }
  1489. /** \brief
  1490. *
  1491. * \ingroup group__library__synch
  1492. */
  1493. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_predecrement_smp(atomic_int_t volatile* pl)
  1494. {
  1495. return atomic_predecrement(pl);
  1496. }
  1497. /** \brief
  1498. *
  1499. * \ingroup group__library__synch
  1500. */
  1501. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postincrement_smp(atomic_int_t volatile* pl)
  1502. {
  1503. return atomic_postincrement(pl);
  1504. }
  1505. /** \brief
  1506. *
  1507. * \ingroup group__library__synch
  1508. */
  1509. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postdecrement_smp(atomic_int_t volatile* pl)
  1510. {
  1511. return atomic_postdecrement(pl);
  1512. }
  1513. /** \brief
  1514. *
  1515. * \ingroup group__library__synch
  1516. */
  1517. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_write_smp(atomic_int_t volatile* pl, atomic_int_t n)
  1518. {
  1519. return atomic_write(pl, n);
  1520. }
  1521. /** \brief
  1522. *
  1523. * \ingroup group__library__synch
  1524. */
  1525. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_read_smp(atomic_int_t volatile const* pl)
  1526. {
  1527. return *pl;
  1528. }
  1529. /** \brief
  1530. *
  1531. * \ingroup group__library__synch
  1532. */
  1533. WINSTL_ATOMIC_FNS_IMPL_(atomic_int_t) atomic_postadd_smp(atomic_int_t volatile* pl, atomic_int_t n)
  1534. {
  1535. #if defined(WINSTL_OS_IS_WIN32)
  1536. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd)((LPLONG)pl, n);
  1537. #elif defined(WINSTL_OS_IS_WIN64)
  1538. return (atomic_int_t)STLSOFT_NS_GLOBAL(InterlockedExchangeAdd64)((LONGLONG*)pl, n);
  1539. #else /* ? arch */
  1540. # error Not valid for processors other than Intel
  1541. #endif /* Win32 || Win64 */
  1542. }
  1543. # endif /* STSLSOFT_INLINE_ASM_SUPPORTED */
  1544. # endif /* !WINSTL_ATOMIC_FNS_DECLARATION_ONLY */
  1545. /* /////////////////////////////////////////////////////////////////////////
  1546. * Other inline atomic function
  1547. */
  1548. /** \brief
  1549. *
  1550. * \ingroup group__library__synch
  1551. */
  1552. STLSOFT_INLINE atomic_int_t atomic_preadd_up(atomic_int_t volatile* pl, atomic_int_t n)
  1553. {
  1554. return n + atomic_postadd_up(pl, n);
  1555. }
  1556. /** \brief
  1557. *
  1558. * \ingroup group__library__synch
  1559. */
  1560. STLSOFT_INLINE void atomic_increment_smp(atomic_int_t volatile* pl)
  1561. {
  1562. atomic_postincrement_smp(pl);
  1563. }
  1564. /** \brief
  1565. *
  1566. * \ingroup group__library__synch
  1567. */
  1568. STLSOFT_INLINE void atomic_decrement_smp(atomic_int_t volatile* pl)
  1569. {
  1570. atomic_postdecrement_smp(pl);
  1571. }
  1572. /** \brief
  1573. *
  1574. * \ingroup group__library__synch
  1575. */
  1576. STLSOFT_INLINE atomic_int_t atomic_preadd_smp(atomic_int_t volatile* pl, atomic_int_t n)
  1577. {
  1578. return n + atomic_postadd_smp(pl, n);
  1579. }
  1580. /** \brief
  1581. *
  1582. * \ingroup group__library__synch
  1583. */
  1584. STLSOFT_INLINE atomic_int_t atomic_preadd(atomic_int_t volatile* pl, atomic_int_t n)
  1585. {
  1586. return n + atomic_postadd(pl, n);
  1587. }
  1588. #endif /* !STLSOFT_DOCUMENTATION_SKIP_SECTION */
  1589. /* /////////////////////////////////////////////////////////////////////////
  1590. * Unit-testing
  1591. */
  1592. #ifdef STLSOFT_UNITTEST
  1593. # include "./unittest/atomic_functions_unittest_.h"
  1594. #endif /* STLSOFT_UNITTEST */
  1595. /* ////////////////////////////////////////////////////////////////////// */
  1596. #ifndef _WINSTL_NO_NAMESPACE
  1597. # if defined(_STLSOFT_NO_NAMESPACE) || \
  1598. defined(STLSOFT_DOCUMENTATION_SKIP_SECTION)
  1599. } /* namespace winstl */
  1600. # else
  1601. } /* namespace winstl_project */
  1602. } /* namespace stlsoft */
  1603. # endif /* _STLSOFT_NO_NAMESPACE */
  1604. #endif /* !_WINSTL_NO_NAMESPACE */
  1605. /* ////////////////////////////////////////////////////////////////////// */
  1606. #endif /* !WINSTL_INCL_WINSTL_SYNCH_H_ATOMIC_FUNCTIONS */
  1607. /* ///////////////////////////// end of file //////////////////////////// */