Commit | Line | Data |
---|---|---|
920dae64 AT |
1 | /* |
2 | * ========== Copyright Header Begin ========================================== | |
3 | * | |
4 | * OpenSPARC T2 Processor File: atomic.s | |
5 | * Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved. | |
6 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES. | |
7 | * | |
8 | * The above named program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public | |
10 | * License version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * The above named program is distributed in the hope that it will be | |
13 | * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public | |
18 | * License along with this work; if not, write to the Free Software | |
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | * | |
21 | * ========== Copyright Header End ============================================ | |
22 | */ | |
23 | #if defined(ARCH_X64) | |
24 | ||
25 | .text | |
26 | .align 16 | |
27 | .globl atomic_add_32 | |
28 | .type atomic_add_32, @function | |
29 | atomic_add_32: | |
30 | movl %esi,%eax | |
31 | lock | |
32 | xaddl %eax,(%rdi) | |
33 | ret | |
34 | .size atomic_add_32, [.-atomic_add_32] | |
35 | ||
36 | .align 16 | |
37 | .globl atomic_add_64 | |
38 | .type atomic_add_64, @function | |
39 | atomic_add_64: | |
40 | movq %rsi,%rax | |
41 | lock | |
42 | xaddq %rax,(%rdi) | |
43 | ret | |
44 | .size atomic_add_64, [.-atomic_add_64] | |
45 | ||
46 | #else | |
47 | ||
48 | #include <sys/asm_linkage.h> | |
49 | ||
50 | ! | |
51 | ! atomic ADD, SUB, AND, OR, XOR operations, both 32 and 64 bit versions. | |
52 | ! | |
53 | ! currently these return the "previous" value, but would the "new" value | |
54 | ! be more useful ??? | |
55 | ! | |
56 | ! compile with "ASFLAGS= -P -D_ASM" using /usr/ccs/bin/as | |
57 | ! | |
58 | ||
59 | ||
60 | ||
61 | ENTRY(atomic_add_32) | |
62 | ! %o0 = address of counter, %o1 = value to add to it | |
63 | 1: ld [%o0], %o2 | |
64 | add %o1, %o2, %o3 | |
65 | cas [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
66 | cmp %o2, %o3 | |
67 | bne 1b | |
68 | nop ! there's no "annul if taken" flavor of branch | |
69 | jmpl %o7+8, %g0 | |
70 | mov %o3, %o0 ! return previous value | |
71 | SET_SIZE(atomic_add_32) | |
72 | ||
73 | ! which is more useful ??? | |
74 | ! add %o1, %o2, %o3 ! return updated value, | |
75 | ||
76 | ||
77 | ENTRY(atomic_sub_32) | |
78 | ! %o0 = address of counter, %o1 = value to add to it | |
79 | 1: ld [%o0], %o2 | |
80 | sub %o1, %o2, %o3 | |
81 | cas [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
82 | cmp %o2, %o3 | |
83 | bne 1b | |
84 | nop ! there's no "annul if taken" flavor of branch | |
85 | jmpl %o7+8, %g0 | |
86 | mov %o3, %o0 ! return previous value | |
87 | SET_SIZE(atomic_sub_32) | |
88 | ||
89 | ||
90 | ENTRY(atomic_and_32) | |
91 | ! %o0 = address of counter, %o1 = value to add to it | |
92 | 1: ld [%o0], %o2 | |
93 | and %o1, %o2, %o3 | |
94 | cas [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
95 | cmp %o2, %o3 | |
96 | bne 1b | |
97 | nop ! there's no "annul if taken" flavor of branch | |
98 | jmpl %o7+8, %g0 | |
99 | mov %o3, %o0 ! return previous value | |
100 | SET_SIZE(atomic_and_32) | |
101 | ||
102 | ||
103 | ENTRY(atomic_or_32) | |
104 | ! %o0 = address of counter, %o1 = value to add to it | |
105 | 1: ld [%o0], %o2 | |
106 | or %o1, %o2, %o3 | |
107 | cas [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
108 | cmp %o2, %o3 | |
109 | bne 1b | |
110 | nop ! there's no "annul if taken" flavor of branch | |
111 | jmpl %o7+8, %g0 | |
112 | mov %o3, %o0 ! return previous value | |
113 | SET_SIZE(atomic_or_32) | |
114 | ||
115 | ||
116 | ENTRY(atomic_xor_32) | |
117 | ! %o0 = address of counter, %o1 = value to add to it | |
118 | 1: ld [%o0], %o2 | |
119 | xor %o1, %o2, %o3 | |
120 | cas [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
121 | cmp %o2, %o3 | |
122 | bne 1b | |
123 | nop ! there's no "annul if taken" flavor of branch | |
124 | jmpl %o7+8, %g0 | |
125 | mov %o3, %o0 ! return previous value | |
126 | SET_SIZE(atomic_xor_32) | |
127 | ||
128 | ||
129 | ||
130 | #if 1 /* change to ifdef arch-v9 when we figure out how... */ | |
131 | ||
132 | ||
133 | ENTRY(atomic_add_64) | |
134 | ! %o0 = address of counter, %o1 = value to add to it | |
135 | 1: ldx [%o0], %o2 | |
136 | add %o1, %o2, %o3 | |
137 | casx [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
138 | cmp %o2, %o3 | |
139 | bne 1b | |
140 | nop ! there's no "annul if taken" flavor of branch | |
141 | jmpl %o7+8, %g0 | |
142 | mov %o3, %o0 ! return previous value | |
143 | SET_SIZE(atomic_add_64) | |
144 | ||
145 | ||
146 | ENTRY(atomic_sub_64) | |
147 | ! %o0 = address of counter, %o1 = value to add to it | |
148 | 1: ldx [%o0], %o2 | |
149 | sub %o1, %o2, %o3 | |
150 | casx [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
151 | cmp %o2, %o3 | |
152 | bne 1b | |
153 | nop ! there's no "annul if taken" flavor of branch | |
154 | jmpl %o7+8, %g0 | |
155 | mov %o3, %o0 ! return previous value | |
156 | SET_SIZE(atomic_sub_64) | |
157 | ||
158 | ||
159 | ENTRY(atomic_and_64) | |
160 | ! %o0 = address of counter, %o1 = value to add to it | |
161 | 1: ldx [%o0], %o2 | |
162 | and %o1, %o2, %o3 | |
163 | casx [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
164 | cmp %o2, %o3 | |
165 | bne 1b | |
166 | nop ! there's no "annul if taken" flavor of branch | |
167 | jmpl %o7+8, %g0 | |
168 | mov %o3, %o0 ! return previous value | |
169 | SET_SIZE(atomic_and_64) | |
170 | ||
171 | ||
172 | ENTRY(atomic_or_64) | |
173 | ! %o0 = address of counter, %o1 = value to add to it | |
174 | 1: ldx [%o0], %o2 | |
175 | or %o1, %o2, %o3 | |
176 | casx [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
177 | cmp %o2, %o3 | |
178 | bne 1b | |
179 | nop ! there's no "annul if taken" flavor of branch | |
180 | jmpl %o7+8, %g0 | |
181 | mov %o3, %o0 ! return previous value | |
182 | SET_SIZE(atomic_or_64) | |
183 | ||
184 | ||
185 | ENTRY(atomic_xor_64) | |
186 | ! %o0 = address of counter, %o1 = value to add to it | |
187 | 1: ldx [%o0], %o2 | |
188 | xor %o1, %o2, %o3 | |
189 | casx [%o0], %o2, %o3 ! swap [%o0] and %o3 _IFF_ [%o0] == %o2 | |
190 | cmp %o2, %o3 | |
191 | bne 1b | |
192 | nop ! there's no "annul if taken" flavor of branch | |
193 | jmpl %o7+8, %g0 | |
194 | mov %o3, %o0 ! return previous value | |
195 | SET_SIZE(atomic_xor_64) | |
196 | ||
197 | ||
198 | #endif | |
199 | ||
200 | ||
201 | #endif |