| 1 | /* |
| 2 | * ========== Copyright Header Begin ========================================== |
| 3 | * |
| 4 | * OpenSPARC T2 Processor File: atomic.h |
| 5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. |
| 6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. |
| 7 | * |
| 8 | * The above named program is free software; you can redistribute it and/or |
| 9 | * modify it under the terms of the GNU General Public |
| 10 | * License version 2 as published by the Free Software Foundation. |
| 11 | * |
| 12 | * The above named program is distributed in the hope that it will be |
| 13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public |
| 18 | * License along with this work; if not, write to the Free Software |
| 19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. |
| 20 | * |
| 21 | * ========== Copyright Header End ============================================ |
| 22 | */ |
| 23 | /* |
| 24 | * "atomic.h" |
| 25 | * |
| 26 | * atomic arithmetic and logical ops |
| 27 | * all atomic ops return the PREVIOUS value. |
| 28 | * |
| 29 | * barrier sync using atomics and spinloops, |
| 30 | * for when #workerthreads <= #hostcpus only, otherwise |
| 31 | * performance will be _severly_ degraded... |
| 32 | * and requires TSO memory as currently coded w/o any membars. |
| 33 | * |
| 34 | */ |
| 35 | #ifndef _ATOMIC_H |
| 36 | #define _ATOMIC_H |
| 37 | |
| 38 | #ifdef __cplusplus |
| 39 | extern "C" { |
| 40 | #endif |
| 41 | |
| 42 | extern int32_t atomic_add_32 (volatile int32_t * variable, const int32_t value); |
| 43 | extern int32_t atomic_sub_32 (volatile int32_t * variable, const int32_t value); |
| 44 | extern int32_t atomic_and_32 (volatile int32_t * variable, const int32_t value); |
| 45 | extern int32_t atomic_or_32 (volatile int32_t * variable, const int32_t value); |
| 46 | extern int32_t atomic_xor_32 (volatile int32_t * variable, const int32_t value); |
| 47 | |
| 48 | |
| 49 | |
| 50 | inline int32_t atomic_barrier (volatile int32_t * numThreads, |
| 51 | volatile int32_t * doneCount, |
| 52 | volatile int32_t * doneLock, |
| 53 | volatile int32_t * returnVal, |
| 54 | volatile int32_t * tempVal) |
| 55 | { |
| 56 | register int32_t tmp = *doneLock; // fetch on entry <------------# |
| 57 | register int32_t NTm1 = *numThreads-1; |
| 58 | |
| 59 | if (atomic_add_32 (doneCount, 1) < NTm1) { // early arrivals |
| 60 | |
| 61 | while (*doneLock == tmp) ; // spin until flip <---# |
| 62 | |
| 63 | } else { // last arrival |
| 64 | |
| 65 | *doneCount = 0; // resets |
| 66 | *tempVal = *returnVal; // inspects |
| 67 | *doneLock = !tmp; // flips <-------------# |
| 68 | } |
| 69 | |
| 70 | return *tempVal; // everybody gets identical return value. |
| 71 | } |
| 72 | |
| 73 | |
| 74 | |
| 75 | extern int64_t atomic_add_64 (volatile int64_t * variable, const int64_t value); |
| 76 | extern int64_t atomic_sub_64 (volatile int64_t * variable, const int64_t value); |
| 77 | extern int64_t atomic_and_64 (volatile int64_t * variable, const int64_t value); |
| 78 | extern int64_t atomic_or_64 (volatile int64_t * variable, const int64_t value); |
| 79 | extern int64_t atomic_xor_64 (volatile int64_t * variable, const int64_t value); |
| 80 | |
| 81 | #ifdef __cplusplus |
| 82 | } |
| 83 | #endif |
| 84 | |
| 85 | #endif/*_ATOMIC_H*/ |
| 86 | |
| 87 | |
| 88 | |