Print this page
5043 remove deprecated atomic functions' prototypes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/atomic/sparc/atomic.s
+++ new/usr/src/common/atomic/sparc/atomic.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 .file "atomic.s"
28 28
29 29 #include <sys/asm_linkage.h>
30 30
31 31 #if defined(_KERNEL)
32 32 /*
33 - * Legacy kernel interfaces; they will go away (eventually).
33 + * Legacy kernel interfaces; they will go away the moment our closed
34 + * bins no longer require them.
34 35 */
35 36 ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
36 37 ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
37 38 ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
38 39 ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
39 40 ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
40 41 ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
41 42 ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
42 43 ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
43 44 #endif
44 45
45 46 /*
46 47 * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
47 48 * separated, you need to also edit the libc sparc platform
48 49 * specific mapfile and remove the NODYNSORT attribute
49 50 * from atomic_inc_8_nv.
50 51 */
51 52 ENTRY(atomic_inc_8)
52 53 ALTENTRY(atomic_inc_8_nv)
53 54 ALTENTRY(atomic_inc_uchar)
54 55 ALTENTRY(atomic_inc_uchar_nv)
55 56 ba add_8
56 57 add %g0, 1, %o1
57 58 SET_SIZE(atomic_inc_uchar_nv)
58 59 SET_SIZE(atomic_inc_uchar)
59 60 SET_SIZE(atomic_inc_8_nv)
60 61 SET_SIZE(atomic_inc_8)
61 62
62 63 /*
63 64 * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
64 65 * separated, you need to also edit the libc sparc platform
65 66 * specific mapfile and remove the NODYNSORT attribute
66 67 * from atomic_dec_8_nv.
67 68 */
68 69 ENTRY(atomic_dec_8)
69 70 ALTENTRY(atomic_dec_8_nv)
70 71 ALTENTRY(atomic_dec_uchar)
71 72 ALTENTRY(atomic_dec_uchar_nv)
72 73 ba add_8
73 74 sub %g0, 1, %o1
74 75 SET_SIZE(atomic_dec_uchar_nv)
75 76 SET_SIZE(atomic_dec_uchar)
76 77 SET_SIZE(atomic_dec_8_nv)
77 78 SET_SIZE(atomic_dec_8)
78 79
79 80 /*
80 81 * NOTE: If atomic_add_8 and atomic_add_8_nv are ever
81 82 * separated, you need to also edit the libc sparc platform
82 83 * specific mapfile and remove the NODYNSORT attribute
83 84 * from atomic_add_8_nv.
84 85 */
85 86 ENTRY(atomic_add_8)
86 87 ALTENTRY(atomic_add_8_nv)
87 88 ALTENTRY(atomic_add_char)
88 89 ALTENTRY(atomic_add_char_nv)
89 90 add_8:
90 91 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
91 92 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
92 93 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
93 94 set 0xff, %o3 ! %o3 = mask
94 95 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
95 96 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
96 97 and %o1, %o3, %o1 ! %o1 = single byte value
97 98 andn %o0, 0x3, %o0 ! %o0 = word address
98 99 ld [%o0], %o2 ! read old value
99 100 1:
100 101 add %o2, %o1, %o5 ! add value to the old value
101 102 and %o5, %o3, %o5 ! clear other bits
102 103 andn %o2, %o3, %o4 ! clear target bits
103 104 or %o4, %o5, %o5 ! insert the new value
104 105 cas [%o0], %o2, %o5
105 106 cmp %o2, %o5
106 107 bne,a,pn %icc, 1b
107 108 mov %o5, %o2 ! %o2 = old value
108 109 add %o2, %o1, %o5
109 110 and %o5, %o3, %o5
110 111 retl
111 112 srl %o5, %g1, %o0 ! %o0 = new value
112 113 SET_SIZE(atomic_add_char_nv)
113 114 SET_SIZE(atomic_add_char)
114 115 SET_SIZE(atomic_add_8_nv)
115 116 SET_SIZE(atomic_add_8)
116 117
117 118 /*
118 119 * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
119 120 * separated, you need to also edit the libc sparc platform
120 121 * specific mapfile and remove the NODYNSORT attribute
121 122 * from atomic_inc_16_nv.
122 123 */
123 124 ENTRY(atomic_inc_16)
124 125 ALTENTRY(atomic_inc_16_nv)
125 126 ALTENTRY(atomic_inc_ushort)
126 127 ALTENTRY(atomic_inc_ushort_nv)
127 128 ba add_16
128 129 add %g0, 1, %o1
129 130 SET_SIZE(atomic_inc_ushort_nv)
130 131 SET_SIZE(atomic_inc_ushort)
131 132 SET_SIZE(atomic_inc_16_nv)
132 133 SET_SIZE(atomic_inc_16)
133 134
134 135 /*
135 136 * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
136 137 * separated, you need to also edit the libc sparc platform
137 138 * specific mapfile and remove the NODYNSORT attribute
138 139 * from atomic_dec_16_nv.
139 140 */
140 141 ENTRY(atomic_dec_16)
141 142 ALTENTRY(atomic_dec_16_nv)
142 143 ALTENTRY(atomic_dec_ushort)
143 144 ALTENTRY(atomic_dec_ushort_nv)
144 145 ba add_16
145 146 sub %g0, 1, %o1
146 147 SET_SIZE(atomic_dec_ushort_nv)
147 148 SET_SIZE(atomic_dec_ushort)
148 149 SET_SIZE(atomic_dec_16_nv)
149 150 SET_SIZE(atomic_dec_16)
150 151
151 152 /*
152 153 * NOTE: If atomic_add_16 and atomic_add_16_nv are ever
153 154 * separated, you need to also edit the libc sparc platform
154 155 * specific mapfile and remove the NODYNSORT attribute
155 156 * from atomic_add_16_nv.
156 157 */
157 158 ENTRY(atomic_add_16)
158 159 ALTENTRY(atomic_add_16_nv)
159 160 ALTENTRY(atomic_add_short)
160 161 ALTENTRY(atomic_add_short_nv)
161 162 add_16:
162 163 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
163 164 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
164 165 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
165 166 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
166 167 sethi %hi(0xffff0000), %o3 ! %o3 = mask
167 168 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
168 169 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
169 170 and %o1, %o3, %o1 ! %o1 = single short value
170 171 andn %o0, 0x2, %o0 ! %o0 = word address
171 172 ! if low-order bit is 1, we will properly get an alignment fault here
172 173 ld [%o0], %o2 ! read old value
173 174 1:
174 175 add %o1, %o2, %o5 ! add value to the old value
175 176 and %o5, %o3, %o5 ! clear other bits
176 177 andn %o2, %o3, %o4 ! clear target bits
177 178 or %o4, %o5, %o5 ! insert the new value
178 179 cas [%o0], %o2, %o5
179 180 cmp %o2, %o5
180 181 bne,a,pn %icc, 1b
181 182 mov %o5, %o2 ! %o2 = old value
182 183 add %o1, %o2, %o5
183 184 and %o5, %o3, %o5
184 185 retl
185 186 srl %o5, %g1, %o0 ! %o0 = new value
186 187 SET_SIZE(atomic_add_short_nv)
187 188 SET_SIZE(atomic_add_short)
188 189 SET_SIZE(atomic_add_16_nv)
189 190 SET_SIZE(atomic_add_16)
190 191
191 192 /*
192 193 * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
193 194 * separated, you need to also edit the libc sparc platform
194 195 * specific mapfile and remove the NODYNSORT attribute
195 196 * from atomic_inc_32_nv.
196 197 */
197 198 ENTRY(atomic_inc_32)
198 199 ALTENTRY(atomic_inc_32_nv)
199 200 ALTENTRY(atomic_inc_uint)
200 201 ALTENTRY(atomic_inc_uint_nv)
201 202 ALTENTRY(atomic_inc_ulong)
202 203 ALTENTRY(atomic_inc_ulong_nv)
203 204 ba add_32
204 205 add %g0, 1, %o1
205 206 SET_SIZE(atomic_inc_ulong_nv)
206 207 SET_SIZE(atomic_inc_ulong)
207 208 SET_SIZE(atomic_inc_uint_nv)
208 209 SET_SIZE(atomic_inc_uint)
209 210 SET_SIZE(atomic_inc_32_nv)
210 211 SET_SIZE(atomic_inc_32)
211 212
212 213 /*
213 214 * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
214 215 * separated, you need to also edit the libc sparc platform
215 216 * specific mapfile and remove the NODYNSORT attribute
216 217 * from atomic_dec_32_nv.
217 218 */
218 219 ENTRY(atomic_dec_32)
219 220 ALTENTRY(atomic_dec_32_nv)
220 221 ALTENTRY(atomic_dec_uint)
221 222 ALTENTRY(atomic_dec_uint_nv)
222 223 ALTENTRY(atomic_dec_ulong)
223 224 ALTENTRY(atomic_dec_ulong_nv)
224 225 ba add_32
225 226 sub %g0, 1, %o1
226 227 SET_SIZE(atomic_dec_ulong_nv)
227 228 SET_SIZE(atomic_dec_ulong)
228 229 SET_SIZE(atomic_dec_uint_nv)
229 230 SET_SIZE(atomic_dec_uint)
230 231 SET_SIZE(atomic_dec_32_nv)
231 232 SET_SIZE(atomic_dec_32)
232 233
233 234 /*
234 235 * NOTE: If atomic_add_32 and atomic_add_32_nv are ever
235 236 * separated, you need to also edit the libc sparc platform
236 237 * specific mapfile and remove the NODYNSORT attribute
237 238 * from atomic_add_32_nv.
238 239 */
239 240 ENTRY(atomic_add_32)
240 241 ALTENTRY(atomic_add_32_nv)
241 242 ALTENTRY(atomic_add_int)
242 243 ALTENTRY(atomic_add_int_nv)
243 244 ALTENTRY(atomic_add_ptr)
244 245 ALTENTRY(atomic_add_ptr_nv)
245 246 ALTENTRY(atomic_add_long)
246 247 ALTENTRY(atomic_add_long_nv)
247 248 add_32:
248 249 ld [%o0], %o2
249 250 1:
250 251 add %o2, %o1, %o3
251 252 cas [%o0], %o2, %o3
252 253 cmp %o2, %o3
253 254 bne,a,pn %icc, 1b
254 255 mov %o3, %o2
255 256 retl
256 257 add %o2, %o1, %o0 ! return new value
257 258 SET_SIZE(atomic_add_long_nv)
258 259 SET_SIZE(atomic_add_long)
259 260 SET_SIZE(atomic_add_ptr_nv)
260 261 SET_SIZE(atomic_add_ptr)
261 262 SET_SIZE(atomic_add_int_nv)
262 263 SET_SIZE(atomic_add_int)
263 264 SET_SIZE(atomic_add_32_nv)
264 265 SET_SIZE(atomic_add_32)
265 266
266 267 /*
267 268 * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
268 269 * separated, you need to also edit the libc sparc platform
269 270 * specific mapfile and remove the NODYNSORT attribute
270 271 * from atomic_inc_64_nv.
271 272 */
272 273 ENTRY(atomic_inc_64)
273 274 ALTENTRY(atomic_inc_64_nv)
274 275 ba add_64
275 276 add %g0, 1, %o1
276 277 SET_SIZE(atomic_inc_64_nv)
277 278 SET_SIZE(atomic_inc_64)
278 279
279 280 /*
280 281 * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
281 282 * separated, you need to also edit the libc sparc platform
282 283 * specific mapfile and remove the NODYNSORT attribute
283 284 * from atomic_dec_64_nv.
284 285 */
285 286 ENTRY(atomic_dec_64)
286 287 ALTENTRY(atomic_dec_64_nv)
287 288 ba add_64
288 289 sub %g0, 1, %o1
289 290 SET_SIZE(atomic_dec_64_nv)
290 291 SET_SIZE(atomic_dec_64)
291 292
292 293 /*
293 294 * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
294 295 * separated, you need to also edit the libc sparc platform
295 296 * specific mapfile and remove the NODYNSORT attribute
296 297 * from atomic_add_64_nv.
297 298 */
298 299 ENTRY(atomic_add_64)
299 300 ALTENTRY(atomic_add_64_nv)
300 301 sllx %o1, 32, %o1 ! upper 32 in %o1, lower in %o2
301 302 srl %o2, 0, %o2
302 303 add %o1, %o2, %o1 ! convert 2 32-bit args into 1 64-bit
303 304 add_64:
304 305 ldx [%o0], %o2
305 306 1:
306 307 add %o2, %o1, %o3
307 308 casx [%o0], %o2, %o3
308 309 cmp %o2, %o3
309 310 bne,a,pn %xcc, 1b
310 311 mov %o3, %o2
311 312 add %o2, %o1, %o1 ! return lower 32-bits in %o1
312 313 retl
313 314 srlx %o1, 32, %o0 ! return upper 32-bits in %o0
314 315 SET_SIZE(atomic_add_64_nv)
315 316 SET_SIZE(atomic_add_64)
316 317
317 318 /*
318 319 * NOTE: If atomic_or_8 and atomic_or_8_nv are ever
319 320 * separated, you need to also edit the libc sparc platform
320 321 * specific mapfile and remove the NODYNSORT attribute
321 322 * from atomic_or_8_nv.
322 323 */
323 324 ENTRY(atomic_or_8)
324 325 ALTENTRY(atomic_or_8_nv)
325 326 ALTENTRY(atomic_or_uchar)
326 327 ALTENTRY(atomic_or_uchar_nv)
327 328 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
328 329 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
329 330 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
330 331 set 0xff, %o3 ! %o3 = mask
331 332 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
332 333 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
333 334 and %o1, %o3, %o1 ! %o1 = single byte value
334 335 andn %o0, 0x3, %o0 ! %o0 = word address
335 336 ld [%o0], %o2 ! read old value
336 337 1:
337 338 or %o2, %o1, %o5 ! or in the new value
338 339 cas [%o0], %o2, %o5
339 340 cmp %o2, %o5
340 341 bne,a,pn %icc, 1b
341 342 mov %o5, %o2 ! %o2 = old value
342 343 or %o2, %o1, %o5
343 344 and %o5, %o3, %o5
344 345 retl
345 346 srl %o5, %g1, %o0 ! %o0 = new value
346 347 SET_SIZE(atomic_or_uchar_nv)
347 348 SET_SIZE(atomic_or_uchar)
348 349 SET_SIZE(atomic_or_8_nv)
349 350 SET_SIZE(atomic_or_8)
350 351
351 352 /*
352 353 * NOTE: If atomic_or_16 and atomic_or_16_nv are ever
353 354 * separated, you need to also edit the libc sparc platform
354 355 * specific mapfile and remove the NODYNSORT attribute
355 356 * from atomic_or_16_nv.
356 357 */
357 358 ENTRY(atomic_or_16)
358 359 ALTENTRY(atomic_or_16_nv)
359 360 ALTENTRY(atomic_or_ushort)
360 361 ALTENTRY(atomic_or_ushort_nv)
361 362 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
362 363 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
363 364 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
364 365 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
365 366 sethi %hi(0xffff0000), %o3 ! %o3 = mask
366 367 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
367 368 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
368 369 and %o1, %o3, %o1 ! %o1 = single short value
369 370 andn %o0, 0x2, %o0 ! %o0 = word address
370 371 ! if low-order bit is 1, we will properly get an alignment fault here
371 372 ld [%o0], %o2 ! read old value
372 373 1:
373 374 or %o2, %o1, %o5 ! or in the new value
374 375 cas [%o0], %o2, %o5
375 376 cmp %o2, %o5
376 377 bne,a,pn %icc, 1b
377 378 mov %o5, %o2 ! %o2 = old value
378 379 or %o2, %o1, %o5 ! or in the new value
379 380 and %o5, %o3, %o5
380 381 retl
381 382 srl %o5, %g1, %o0 ! %o0 = new value
382 383 SET_SIZE(atomic_or_ushort_nv)
383 384 SET_SIZE(atomic_or_ushort)
384 385 SET_SIZE(atomic_or_16_nv)
385 386 SET_SIZE(atomic_or_16)
386 387
387 388 /*
388 389 * NOTE: If atomic_or_32 and atomic_or_32_nv are ever
389 390 * separated, you need to also edit the libc sparc platform
390 391 * specific mapfile and remove the NODYNSORT attribute
391 392 * from atomic_or_32_nv.
392 393 */
393 394 ENTRY(atomic_or_32)
394 395 ALTENTRY(atomic_or_32_nv)
395 396 ALTENTRY(atomic_or_uint)
396 397 ALTENTRY(atomic_or_uint_nv)
397 398 ALTENTRY(atomic_or_ulong)
398 399 ALTENTRY(atomic_or_ulong_nv)
399 400 ld [%o0], %o2
400 401 1:
401 402 or %o2, %o1, %o3
402 403 cas [%o0], %o2, %o3
403 404 cmp %o2, %o3
404 405 bne,a,pn %icc, 1b
405 406 mov %o3, %o2
406 407 retl
407 408 or %o2, %o1, %o0 ! return new value
408 409 SET_SIZE(atomic_or_ulong_nv)
409 410 SET_SIZE(atomic_or_ulong)
410 411 SET_SIZE(atomic_or_uint_nv)
411 412 SET_SIZE(atomic_or_uint)
412 413 SET_SIZE(atomic_or_32_nv)
413 414 SET_SIZE(atomic_or_32)
414 415
415 416 /*
416 417 * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
417 418 * separated, you need to also edit the libc sparc platform
418 419 * specific mapfile and remove the NODYNSORT attribute
419 420 * from atomic_or_64_nv.
420 421 */
421 422 ENTRY(atomic_or_64)
422 423 ALTENTRY(atomic_or_64_nv)
423 424 sllx %o1, 32, %o1 ! upper 32 in %o1, lower in %o2
424 425 srl %o2, 0, %o2
425 426 add %o1, %o2, %o1 ! convert 2 32-bit args into 1 64-bit
426 427 ldx [%o0], %o2
427 428 1:
428 429 or %o2, %o1, %o3
429 430 casx [%o0], %o2, %o3
430 431 cmp %o2, %o3
431 432 bne,a,pn %xcc, 1b
432 433 mov %o3, %o2
433 434 or %o2, %o1, %o1 ! return lower 32-bits in %o1
434 435 retl
435 436 srlx %o1, 32, %o0 ! return upper 32-bits in %o0
436 437 SET_SIZE(atomic_or_64_nv)
437 438 SET_SIZE(atomic_or_64)
438 439
439 440 /*
440 441 * NOTE: If atomic_and_8 and atomic_and_8_nv are ever
441 442 * separated, you need to also edit the libc sparc platform
442 443 * specific mapfile and remove the NODYNSORT attribute
443 444 * from atomic_and_8_nv.
444 445 */
445 446 ENTRY(atomic_and_8)
446 447 ALTENTRY(atomic_and_8_nv)
447 448 ALTENTRY(atomic_and_uchar)
448 449 ALTENTRY(atomic_and_uchar_nv)
449 450 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
450 451 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
451 452 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
452 453 set 0xff, %o3 ! %o3 = mask
453 454 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
454 455 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
455 456 orn %o1, %o3, %o1 ! all ones in other bytes
456 457 andn %o0, 0x3, %o0 ! %o0 = word address
457 458 ld [%o0], %o2 ! read old value
458 459 1:
459 460 and %o2, %o1, %o5 ! and in the new value
460 461 cas [%o0], %o2, %o5
461 462 cmp %o2, %o5
462 463 bne,a,pn %icc, 1b
463 464 mov %o5, %o2 ! %o2 = old value
464 465 and %o2, %o1, %o5
465 466 and %o5, %o3, %o5
466 467 retl
467 468 srl %o5, %g1, %o0 ! %o0 = new value
468 469 SET_SIZE(atomic_and_uchar_nv)
469 470 SET_SIZE(atomic_and_uchar)
470 471 SET_SIZE(atomic_and_8_nv)
471 472 SET_SIZE(atomic_and_8)
472 473
473 474 /*
474 475 * NOTE: If atomic_and_16 and atomic_and_16_nv are ever
475 476 * separated, you need to also edit the libc sparc platform
476 477 * specific mapfile and remove the NODYNSORT attribute
477 478 * from atomic_and_16_nv.
478 479 */
479 480 ENTRY(atomic_and_16)
480 481 ALTENTRY(atomic_and_16_nv)
481 482 ALTENTRY(atomic_and_ushort)
482 483 ALTENTRY(atomic_and_ushort_nv)
483 484 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
484 485 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
485 486 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
486 487 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
487 488 sethi %hi(0xffff0000), %o3 ! %o3 = mask
488 489 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
489 490 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
490 491 orn %o1, %o3, %o1 ! all ones in the other half
491 492 andn %o0, 0x2, %o0 ! %o0 = word address
492 493 ! if low-order bit is 1, we will properly get an alignment fault here
493 494 ld [%o0], %o2 ! read old value
494 495 1:
495 496 and %o2, %o1, %o5 ! and in the new value
496 497 cas [%o0], %o2, %o5
497 498 cmp %o2, %o5
498 499 bne,a,pn %icc, 1b
499 500 mov %o5, %o2 ! %o2 = old value
500 501 and %o2, %o1, %o5
501 502 and %o5, %o3, %o5
502 503 retl
503 504 srl %o5, %g1, %o0 ! %o0 = new value
504 505 SET_SIZE(atomic_and_ushort_nv)
505 506 SET_SIZE(atomic_and_ushort)
506 507 SET_SIZE(atomic_and_16_nv)
507 508 SET_SIZE(atomic_and_16)
508 509
509 510 /*
510 511 * NOTE: If atomic_and_32 and atomic_and_32_nv are ever
511 512 * separated, you need to also edit the libc sparc platform
512 513 * specific mapfile and remove the NODYNSORT attribute
513 514 * from atomic_and_32_nv.
514 515 */
515 516 ENTRY(atomic_and_32)
516 517 ALTENTRY(atomic_and_32_nv)
517 518 ALTENTRY(atomic_and_uint)
518 519 ALTENTRY(atomic_and_uint_nv)
519 520 ALTENTRY(atomic_and_ulong)
520 521 ALTENTRY(atomic_and_ulong_nv)
521 522 ld [%o0], %o2
522 523 1:
523 524 and %o2, %o1, %o3
524 525 cas [%o0], %o2, %o3
525 526 cmp %o2, %o3
526 527 bne,a,pn %icc, 1b
527 528 mov %o3, %o2
528 529 retl
529 530 and %o2, %o1, %o0 ! return new value
530 531 SET_SIZE(atomic_and_ulong_nv)
531 532 SET_SIZE(atomic_and_ulong)
532 533 SET_SIZE(atomic_and_uint_nv)
533 534 SET_SIZE(atomic_and_uint)
534 535 SET_SIZE(atomic_and_32_nv)
535 536 SET_SIZE(atomic_and_32)
536 537
537 538 /*
538 539 * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
539 540 * separated, you need to also edit the libc sparc platform
540 541 * specific mapfile and remove the NODYNSORT attribute
541 542 * from atomic_and_64_nv.
542 543 */
543 544 ENTRY(atomic_and_64)
544 545 ALTENTRY(atomic_and_64_nv)
545 546 sllx %o1, 32, %o1 ! upper 32 in %o1, lower in %o2
546 547 srl %o2, 0, %o2
547 548 add %o1, %o2, %o1 ! convert 2 32-bit args into 1 64-bit
548 549 ldx [%o0], %o2
549 550 1:
550 551 and %o2, %o1, %o3
551 552 casx [%o0], %o2, %o3
552 553 cmp %o2, %o3
553 554 bne,a,pn %xcc, 1b
554 555 mov %o3, %o2
555 556 and %o2, %o1, %o1 ! return lower 32-bits in %o1
556 557 retl
557 558 srlx %o1, 32, %o0 ! return upper 32-bits in %o0
558 559 SET_SIZE(atomic_and_64_nv)
559 560 SET_SIZE(atomic_and_64)
560 561
561 562 ENTRY(atomic_cas_8)
562 563 ALTENTRY(atomic_cas_uchar)
563 564 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
564 565 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
565 566 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
566 567 set 0xff, %o3 ! %o3 = mask
567 568 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
568 569 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
569 570 and %o1, %o3, %o1 ! %o1 = single byte value
570 571 sll %o2, %g1, %o2 ! %o2 = shifted to bit offset
571 572 and %o2, %o3, %o2 ! %o2 = single byte value
572 573 andn %o0, 0x3, %o0 ! %o0 = word address
573 574 ld [%o0], %o4 ! read old value
574 575 1:
575 576 andn %o4, %o3, %o4 ! clear target bits
576 577 or %o4, %o2, %o5 ! insert the new value
577 578 or %o4, %o1, %o4 ! insert the comparison value
578 579 cas [%o0], %o4, %o5
579 580 cmp %o4, %o5 ! did we succeed?
580 581 be,pt %icc, 2f
581 582 and %o5, %o3, %o4 ! isolate the old value
582 583 cmp %o1, %o4 ! should we have succeeded?
583 584 be,a,pt %icc, 1b ! yes, try again
584 585 mov %o5, %o4 ! %o4 = old value
585 586 2:
586 587 retl
587 588 srl %o4, %g1, %o0 ! %o0 = old value
588 589 SET_SIZE(atomic_cas_uchar)
589 590 SET_SIZE(atomic_cas_8)
590 591
591 592 ENTRY(atomic_cas_16)
592 593 ALTENTRY(atomic_cas_ushort)
593 594 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
594 595 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
595 596 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
596 597 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
597 598 sethi %hi(0xffff0000), %o3 ! %o3 = mask
598 599 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
599 600 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
600 601 and %o1, %o3, %o1 ! %o1 = single short value
601 602 sll %o2, %g1, %o2 ! %o2 = shifted to bit offset
602 603 and %o2, %o3, %o2 ! %o2 = single short value
603 604 andn %o0, 0x2, %o0 ! %o0 = word address
604 605 ! if low-order bit is 1, we will properly get an alignment fault here
605 606 ld [%o0], %o4 ! read old value
606 607 1:
607 608 andn %o4, %o3, %o4 ! clear target bits
608 609 or %o4, %o2, %o5 ! insert the new value
609 610 or %o4, %o1, %o4 ! insert the comparison value
610 611 cas [%o0], %o4, %o5
611 612 cmp %o4, %o5 ! did we succeed?
612 613 be,pt %icc, 2f
613 614 and %o5, %o3, %o4 ! isolate the old value
614 615 cmp %o1, %o4 ! should we have succeeded?
615 616 be,a,pt %icc, 1b ! yes, try again
616 617 mov %o5, %o4 ! %o4 = old value
617 618 2:
618 619 retl
619 620 srl %o4, %g1, %o0 ! %o0 = old value
620 621 SET_SIZE(atomic_cas_ushort)
621 622 SET_SIZE(atomic_cas_16)
622 623
623 624 ENTRY(atomic_cas_32)
624 625 ALTENTRY(atomic_cas_uint)
625 626 ALTENTRY(atomic_cas_ptr)
626 627 ALTENTRY(atomic_cas_ulong)
627 628 cas [%o0], %o1, %o2
628 629 retl
629 630 mov %o2, %o0
630 631 SET_SIZE(atomic_cas_ulong)
631 632 SET_SIZE(atomic_cas_ptr)
632 633 SET_SIZE(atomic_cas_uint)
633 634 SET_SIZE(atomic_cas_32)
634 635
635 636 ENTRY(atomic_cas_64)
636 637 sllx %o1, 32, %o1 ! cmp's upper 32 in %o1, lower in %o2
637 638 srl %o2, 0, %o2 ! convert 2 32-bit args into 1 64-bit
638 639 add %o1, %o2, %o1
639 640 sllx %o3, 32, %o2 ! newval upper 32 in %o3, lower in %o4
640 641 srl %o4, 0, %o4 ! setup %o2 to have newval
641 642 add %o2, %o4, %o2
642 643 casx [%o0], %o1, %o2
643 644 srl %o2, 0, %o1 ! return lower 32-bits in %o1
644 645 retl
645 646 srlx %o2, 32, %o0 ! return upper 32-bits in %o0
646 647 SET_SIZE(atomic_cas_64)
647 648
648 649 ENTRY(atomic_swap_8)
649 650 ALTENTRY(atomic_swap_uchar)
650 651 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
651 652 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
652 653 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
653 654 set 0xff, %o3 ! %o3 = mask
654 655 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
655 656 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
656 657 and %o1, %o3, %o1 ! %o1 = single byte value
657 658 andn %o0, 0x3, %o0 ! %o0 = word address
658 659 ld [%o0], %o2 ! read old value
659 660 1:
660 661 andn %o2, %o3, %o5 ! clear target bits
661 662 or %o5, %o1, %o5 ! insert the new value
662 663 cas [%o0], %o2, %o5
663 664 cmp %o2, %o5
664 665 bne,a,pn %icc, 1b
665 666 mov %o5, %o2 ! %o2 = old value
666 667 and %o5, %o3, %o5
667 668 retl
668 669 srl %o5, %g1, %o0 ! %o0 = old value
669 670 SET_SIZE(atomic_swap_uchar)
670 671 SET_SIZE(atomic_swap_8)
671 672
672 673 ENTRY(atomic_swap_16)
673 674 ALTENTRY(atomic_swap_ushort)
674 675 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
675 676 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
676 677 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
677 678 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
678 679 sethi %hi(0xffff0000), %o3 ! %o3 = mask
679 680 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
680 681 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
681 682 and %o1, %o3, %o1 ! %o1 = single short value
682 683 andn %o0, 0x2, %o0 ! %o0 = word address
683 684 ! if low-order bit is 1, we will properly get an alignment fault here
684 685 ld [%o0], %o2 ! read old value
685 686 1:
686 687 andn %o2, %o3, %o5 ! clear target bits
687 688 or %o5, %o1, %o5 ! insert the new value
688 689 cas [%o0], %o2, %o5
689 690 cmp %o2, %o5
690 691 bne,a,pn %icc, 1b
691 692 mov %o5, %o2 ! %o2 = old value
692 693 and %o5, %o3, %o5
693 694 retl
694 695 srl %o5, %g1, %o0 ! %o0 = old value
695 696 SET_SIZE(atomic_swap_ushort)
696 697 SET_SIZE(atomic_swap_16)
697 698
698 699 ENTRY(atomic_swap_32)
699 700 ALTENTRY(atomic_swap_uint)
700 701 ALTENTRY(atomic_swap_ptr)
701 702 ALTENTRY(atomic_swap_ulong)
702 703 ld [%o0], %o2
703 704 1:
704 705 mov %o1, %o3
705 706 cas [%o0], %o2, %o3
706 707 cmp %o2, %o3
707 708 bne,a,pn %icc, 1b
708 709 mov %o3, %o2
709 710 retl
710 711 mov %o3, %o0
711 712 SET_SIZE(atomic_swap_ulong)
712 713 SET_SIZE(atomic_swap_ptr)
713 714 SET_SIZE(atomic_swap_uint)
714 715 SET_SIZE(atomic_swap_32)
715 716
716 717 ENTRY(atomic_swap_64)
717 718 sllx %o1, 32, %o1 ! upper 32 in %o1, lower in %o2
718 719 srl %o2, 0, %o2
719 720 add %o1, %o2, %o1 ! convert 2 32-bit args into 1 64-bit
720 721 ldx [%o0], %o2
721 722 1:
722 723 mov %o1, %o3
723 724 casx [%o0], %o2, %o3
724 725 cmp %o2, %o3
725 726 bne,a,pn %xcc, 1b
726 727 mov %o3, %o2
727 728 srl %o3, 0, %o1 ! return lower 32-bits in %o1
728 729 retl
729 730 srlx %o3, 32, %o0 ! return upper 32-bits in %o0
730 731 SET_SIZE(atomic_swap_64)
731 732
732 733 ENTRY(atomic_set_long_excl)
733 734 mov 1, %o3
734 735 slln %o3, %o1, %o3
735 736 ldn [%o0], %o2
736 737 1:
737 738 andcc %o2, %o3, %g0 ! test if the bit is set
738 739 bnz,a,pn %ncc, 2f ! if so, then fail out
739 740 mov -1, %o0
740 741 or %o2, %o3, %o4 ! set the bit, and try to commit it
741 742 casn [%o0], %o2, %o4
742 743 cmp %o2, %o4
743 744 bne,a,pn %ncc, 1b ! failed to commit, try again
744 745 mov %o4, %o2
745 746 mov %g0, %o0
746 747 2:
747 748 retl
748 749 nop
749 750 SET_SIZE(atomic_set_long_excl)
750 751
751 752 ENTRY(atomic_clear_long_excl)
752 753 mov 1, %o3
753 754 slln %o3, %o1, %o3
754 755 ldn [%o0], %o2
755 756 1:
756 757 andncc %o3, %o2, %g0 ! test if the bit is clear
757 758 bnz,a,pn %ncc, 2f ! if so, then fail out
758 759 mov -1, %o0
759 760 andn %o2, %o3, %o4 ! clear the bit, and try to commit it
760 761 casn [%o0], %o2, %o4
761 762 cmp %o2, %o4
762 763 bne,a,pn %ncc, 1b ! failed to commit, try again
763 764 mov %o4, %o2
764 765 mov %g0, %o0
765 766 2:
766 767 retl
767 768 nop
768 769 SET_SIZE(atomic_clear_long_excl)
769 770
770 771 #if !defined(_KERNEL)
771 772
772 773 /*
773 774 * Spitfires and Blackbirds have a problem with membars in the
774 775 * delay slot (SF_ERRATA_51). For safety's sake, we assume
775 776 * that the whole world needs the workaround.
776 777 */
777 778 ENTRY(membar_enter)
778 779 membar #StoreLoad|#StoreStore
779 780 retl
780 781 nop
781 782 SET_SIZE(membar_enter)
782 783
783 784 ENTRY(membar_exit)
784 785 membar #LoadStore|#StoreStore
785 786 retl
786 787 nop
787 788 SET_SIZE(membar_exit)
788 789
789 790 ENTRY(membar_producer)
790 791 membar #StoreStore
791 792 retl
792 793 nop
793 794 SET_SIZE(membar_producer)
794 795
795 796 ENTRY(membar_consumer)
796 797 membar #LoadLoad
797 798 retl
798 799 nop
799 800 SET_SIZE(membar_consumer)
800 801
801 802 #endif /* !_KERNEL */
↓ open down ↓ |
758 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX