Print this page
atomic: remove old weak symbols
These symbols have been depricated for a really long time on x86 and SPARC.
They exist as weak symbols to allow old binaries to still work. On ARM,
these old binaries never existed to begin with and so we don't have to
provide these legacy symbols.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/atomic/arm/atomic.s
+++ new/usr/src/common/atomic/arm/atomic.s
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2013 Joyent, Inc. All rights reserved.
14 14 */
15 15
16 16 .file "atomic.s"
17 17
18 18 /*
19 19 * Atomic Operatoins for 32-bit ARM. Note, that these require at least ARMv6K so
20 20 * as to have access to the non-word size LDREX and STREX.
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
21 21 */
22 22
23 23 #include <sys/asm_linkage.h>
24 24 #include <sys/atomic_impl.h>
25 25
26 26 /*
27 27 * XXX We probably want some kind of backoff built in to these routines at some
28 28 * point.
29 29 */
30 30
31 -#if defined(_KERNEL)
32 - /*
33 - * Legacy kernel interfaces; they will go away (eventually).
34 - */
35 - ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
36 - ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
37 - ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
38 - ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
39 - ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
40 - ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
41 - ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
42 - ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
43 -#endif
44 -
45 31 /*
46 32 * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
47 33 * separated, you need to also edit the libc arm platform
48 34 * specific mapfile and remove the NODYNSORT attribute
49 35 * from atomic_inc_8_nv.
50 36 */
51 37 ENTRY(atomic_inc_8)
52 38 ALTENTRY(atomic_inc_8_nv)
53 39 ALTENTRY(atomic_inc_uchar)
54 40 ALTENTRY(atomic_inc_uchar_nv)
55 41 mov r1, #1
56 42 b atomic_add_8
57 43 SET_SIZE(atomic_inc_uchar_nv)
58 44 SET_SIZE(atomic_inc_uchar)
59 45 SET_SIZE(atomic_inc_8_nv)
60 46 SET_SIZE(atomic_inc_8)
61 47
62 48 /*
63 49 * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
64 50 * separated, you need to also edit the libc arm platform
65 51 * specific mapfile and remove the NODYNSORT attribute
66 52 * from atomic_dec_8_nv.
67 53 */
68 54 ENTRY(atomic_dec_8)
69 55 ALTENTRY(atomic_dec_8_nv)
70 56 ALTENTRY(atomic_dec_uchar)
71 57 ALTENTRY(atomic_dec_uchar_nv)
72 58 mov r1, #-1
73 59 b atomic_add_8
74 60 SET_SIZE(atomic_dec_uchar_nv)
75 61 SET_SIZE(atomic_dec_uchar)
76 62 SET_SIZE(atomic_dec_8_nv)
77 63 SET_SIZE(atomic_dec_8)
78 64
79 65 /*
80 66 * NOTE: If atomic_add_8 and atomic_add_8_nv are ever
81 67 * separated, you need to also edit the libc arm platform
82 68 * specific mapfile and remove the NODYNSORT attribute
83 69 * from atomic_add_8_nv.
84 70 */
85 71 ENTRY(atomic_add_8)
86 72 ALTENTRY(atomic_add_8_nv)
87 73 ALTENTRY(atomic_add_char)
88 74 ALTENTRY(atomic_add_char_nv)
89 75 1:
90 76 ldrexb r2, [r0]
91 77 add r2, r1, r2
92 78 strexb r3, r2, [r0]
93 79 cmp r3, #0
94 80 bne 1b
95 81 mov r0, r2
96 82 bx lr
97 83 SET_SIZE(atomic_add_char_nv)
98 84 SET_SIZE(atomic_add_char)
99 85 SET_SIZE(atomic_add_8_nv)
100 86 SET_SIZE(atomic_add_8)
101 87
102 88 /*
103 89 * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
104 90 * separated, you need to also edit the libc arm platform
105 91 * specific mapfile and remove the NODYNSORT attribute
106 92 * from atomic_inc_16_nv.
107 93 */
108 94 ENTRY(atomic_inc_16)
109 95 ALTENTRY(atomic_inc_16_nv)
110 96 ALTENTRY(atomic_inc_ushort)
111 97 ALTENTRY(atomic_inc_ushort_nv)
112 98 mov r1, #1
113 99 b atomic_add_16
114 100 SET_SIZE(atomic_inc_ushort_nv)
115 101 SET_SIZE(atomic_inc_ushort)
116 102 SET_SIZE(atomic_inc_16_nv)
117 103 SET_SIZE(atomic_inc_16)
118 104
119 105 /*
120 106 * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
121 107 * separated, you need to also edit the libc arm platform
122 108 * specific mapfile and remove the NODYNSORT attribute
123 109 * from atomic_dec_16_nv.
124 110 */
125 111 ENTRY(atomic_dec_16)
126 112 ALTENTRY(atomic_dec_16_nv)
127 113 ALTENTRY(atomic_dec_ushort)
128 114 ALTENTRY(atomic_dec_ushort_nv)
129 115 mov r1, #-1
130 116 b atomic_add_16
131 117 SET_SIZE(atomic_dec_ushort_nv)
132 118 SET_SIZE(atomic_dec_ushort)
133 119 SET_SIZE(atomic_dec_16_nv)
134 120 SET_SIZE(atomic_dec_16)
135 121
136 122 /*
137 123 * NOTE: If atomic_add_16 and atomic_add_16_nv are ever
138 124 * separated, you need to also edit the libc arm platform
139 125 * specific mapfile and remove the NODYNSORT attribute
140 126 * from atomic_add_16_nv.
141 127 */
142 128 ENTRY(atomic_add_16)
143 129 ALTENTRY(atomic_add_16_nv)
144 130 ALTENTRY(atomic_add_short)
145 131 ALTENTRY(atomic_add_short_nv)
146 132 1:
147 133 ldrexh r2, [r0]
148 134 add r2, r1, r2
149 135 strexh r3, r2, [r0]
150 136 cmp r3, #0
151 137 bne 1b
152 138 mov r0, r2
153 139 bx lr
154 140 SET_SIZE(atomic_add_short_nv)
155 141 SET_SIZE(atomic_add_short)
156 142 SET_SIZE(atomic_add_16_nv)
157 143 SET_SIZE(atomic_add_16)
158 144
159 145 /*
160 146 * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
161 147 * separated, you need to also edit the libc arm platform
162 148 * specific mapfile and remove the NODYNSORT attribute
163 149 * from atomic_inc_32_nv.
164 150 */
165 151 ENTRY(atomic_inc_32)
166 152 ALTENTRY(atomic_inc_32_nv)
167 153 ALTENTRY(atomic_inc_uint)
168 154 ALTENTRY(atomic_inc_uint_nv)
169 155 ALTENTRY(atomic_inc_ulong)
170 156 ALTENTRY(atomic_inc_ulong_nv)
171 157 mov r1, #1
172 158 b atomic_add_32
173 159 SET_SIZE(atomic_inc_ulong_nv)
174 160 SET_SIZE(atomic_inc_ulong)
175 161 SET_SIZE(atomic_inc_uint_nv)
176 162 SET_SIZE(atomic_inc_uint)
177 163 SET_SIZE(atomic_inc_32_nv)
178 164 SET_SIZE(atomic_inc_32)
179 165
180 166 /*
181 167 * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
182 168 * separated, you need to also edit the libc arm platform
183 169 * specific mapfile and remove the NODYNSORT attribute
184 170 * from atomic_dec_32_nv.
185 171 */
186 172 ENTRY(atomic_dec_32)
187 173 ALTENTRY(atomic_dec_32_nv)
188 174 ALTENTRY(atomic_dec_uint)
189 175 ALTENTRY(atomic_dec_uint_nv)
190 176 ALTENTRY(atomic_dec_ulong)
191 177 ALTENTRY(atomic_dec_ulong_nv)
192 178 mov r1, #-1
193 179 b atomic_add_32
194 180 SET_SIZE(atomic_dec_ulong_nv)
195 181 SET_SIZE(atomic_dec_ulong)
196 182 SET_SIZE(atomic_dec_uint_nv)
197 183 SET_SIZE(atomic_dec_uint)
198 184 SET_SIZE(atomic_dec_32_nv)
199 185 SET_SIZE(atomic_dec_32)
200 186
201 187 /*
202 188 * NOTE: If atomic_add_32 and atomic_add_32_nv are ever
203 189 * separated, you need to also edit the libc arm platform
204 190 * specific mapfile and remove the NODYNSORT attribute
205 191 * from atomic_add_32_nv.
206 192 */
207 193 ENTRY(atomic_add_32)
208 194 ALTENTRY(atomic_add_32_nv)
209 195 ALTENTRY(atomic_add_int)
210 196 ALTENTRY(atomic_add_int_nv)
211 197 ALTENTRY(atomic_add_ptr)
212 198 ALTENTRY(atomic_add_ptr_nv)
213 199 ALTENTRY(atomic_add_long)
214 200 ALTENTRY(atomic_add_long_nv)
215 201 1:
216 202 ldrex r2, [r0]
217 203 add r2, r1, r2
218 204 strex r3, r2, [r0]
219 205 cmp r3, #0
220 206 bne 1b
221 207 mov r0, r2
222 208 bx lr
223 209 SET_SIZE(atomic_add_long_nv)
224 210 SET_SIZE(atomic_add_long)
225 211 SET_SIZE(atomic_add_ptr_nv)
226 212 SET_SIZE(atomic_add_ptr)
227 213 SET_SIZE(atomic_add_int_nv)
228 214 SET_SIZE(atomic_add_int)
229 215 SET_SIZE(atomic_add_32_nv)
230 216 SET_SIZE(atomic_add_32)
231 217
232 218 /*
233 219 * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
234 220 * separated, you need to also edit the libc arm platform
235 221 * specific mapfile and remove the NODYNSORT attribute
236 222 * from atomic_inc_64_nv.
237 223 */
238 224 ENTRY(atomic_inc_64)
239 225 ALTENTRY(atomic_inc_64_nv)
240 226 mov r2, #1
241 227 mov r3, #0
242 228 b atomic_add_64
243 229 SET_SIZE(atomic_inc_64_nv)
244 230 SET_SIZE(atomic_inc_64)
245 231
246 232 /*
247 233 * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
248 234 * separated, you need to also edit the libc arm platform
249 235 * specific mapfile and remove the NODYNSORT attribute
250 236 * from atomic_dec_64_nv.
251 237 */
252 238 ENTRY(atomic_dec_64)
253 239 ALTENTRY(atomic_dec_64_nv)
254 240 mov r2, #-1
255 241 mvn r3, #0
256 242 b atomic_add_64
257 243 SET_SIZE(atomic_dec_64_nv)
258 244 SET_SIZE(atomic_dec_64)
259 245
260 246 /*
261 247 * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
262 248 * separated, you need to also edit the libc arm platform
263 249 * specific mapfile and remove the NODYNSORT attribute
264 250 * from atomic_add_64_nv.
265 251 */
266 252 ENTRY(atomic_add_64)
267 253 ALTENTRY(atomic_add_64_nv)
268 254 push { r4, r5 }
269 255 1:
270 256 ldrexd r4, r5, [r0]
271 257 adds r4, r4, r2
272 258 adc r5, r5, r3
273 259 strexd r1, r4, r5, [r0]
274 260 cmp r1, #0
275 261 bne 1b
276 262 mov r0, r4
277 263 mov r1, r5
278 264 pop { r4, r5 }
279 265 bx lr
280 266 SET_SIZE(atomic_add_64_nv)
281 267 SET_SIZE(atomic_add_64)
282 268
283 269 /*
284 270 * NOTE: If atomic_or_8 and atomic_or_8_nv are ever
285 271 * separated, you need to also edit the libc arm platform
286 272 * specific mapfile and remove the NODYNSORT attribute
287 273 * from atomic_or_8_nv.
288 274 */
289 275 ENTRY(atomic_or_8)
290 276 ALTENTRY(atomic_or_8_nv)
291 277 ALTENTRY(atomic_or_uchar)
292 278 ALTENTRY(atomic_or_uchar_nv)
293 279 1:
294 280 ldrexb r2, [r0]
295 281 orr r2, r1, r2
296 282 strexb r3, r2, [r0]
297 283 cmp r3, #0
298 284 bne 1b
299 285 mov r0, r2
300 286 bx lr
301 287 SET_SIZE(atomic_or_uchar_nv)
302 288 SET_SIZE(atomic_or_uchar)
303 289 SET_SIZE(atomic_or_8_nv)
304 290 SET_SIZE(atomic_or_8)
305 291
306 292 /*
307 293 * NOTE: If atomic_or_16 and atomic_or_16_nv are ever
308 294 * separated, you need to also edit the libc arm platform
309 295 * specific mapfile and remove the NODYNSORT attribute
310 296 * from atomic_or_16_nv.
311 297 */
312 298 ENTRY(atomic_or_16)
313 299 ALTENTRY(atomic_or_16_nv)
314 300 ALTENTRY(atomic_or_ushort)
315 301 ALTENTRY(atomic_or_ushort_nv)
316 302 1:
317 303 ldrexh r2, [r0]
318 304 orr r2, r1, r2
319 305 strexh r3, r2, [r0]
320 306 cmp r3, #0
321 307 bne 1b
322 308 mov r0, r2
323 309 bx lr
324 310 SET_SIZE(atomic_or_ushort_nv)
325 311 SET_SIZE(atomic_or_ushort)
326 312 SET_SIZE(atomic_or_16_nv)
327 313 SET_SIZE(atomic_or_16)
328 314
329 315 /*
330 316 * NOTE: If atomic_or_32 and atomic_or_32_nv are ever
331 317 * separated, you need to also edit the libc arm platform
332 318 * specific mapfile and remove the NODYNSORT attribute
333 319 * from atomic_or_32_nv.
334 320 */
335 321 ENTRY(atomic_or_32)
336 322 ALTENTRY(atomic_or_32_nv)
337 323 ALTENTRY(atomic_or_uint)
338 324 ALTENTRY(atomic_or_uint_nv)
339 325 ALTENTRY(atomic_or_ulong)
340 326 ALTENTRY(atomic_or_ulong_nv)
341 327 1:
342 328 ldrex r2, [r0]
343 329 add r2, r1, r2
344 330 strex r3, r2, [r0]
345 331 cmp r3, #0
346 332 bne 1b
347 333 mov r0, r2
348 334 bx lr
349 335 SET_SIZE(atomic_or_ulong_nv)
350 336 SET_SIZE(atomic_or_ulong)
351 337 SET_SIZE(atomic_or_uint_nv)
352 338 SET_SIZE(atomic_or_uint)
353 339 SET_SIZE(atomic_or_32_nv)
354 340 SET_SIZE(atomic_or_32)
355 341
356 342 /*
357 343 * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
358 344 * separated, you need to also edit the libc arm platform
359 345 * specific mapfile and remove the NODYNSORT attribute
360 346 * from atomic_or_64_nv.
361 347 */
362 348 ENTRY(atomic_or_64)
363 349 ALTENTRY(atomic_or_64_nv)
364 350 push { r4, r5 }
365 351 1:
366 352 ldrexd r4, r5, [r0]
367 353 orr r4, r4, r2
368 354 orr r5, r5, r3
369 355 strexd r1, r4, r5, [r0]
370 356 cmp r1, #0
371 357 bne 1b
372 358 mov r0, r4
373 359 mov r1, r5
374 360 pop { r4, r5 }
375 361 bx lr
376 362 SET_SIZE(atomic_or_64_nv)
377 363 SET_SIZE(atomic_or_64)
378 364
379 365 /*
380 366 * NOTE: If atomic_and_8 and atomic_and_8_nv are ever
381 367 * separated, you need to also edit the libc arm platform
382 368 * specific mapfile and remove the NODYNSORT attribute
383 369 * from atomic_and_8_nv.
384 370 */
385 371 ENTRY(atomic_and_8)
386 372 ALTENTRY(atomic_and_8_nv)
387 373 ALTENTRY(atomic_and_uchar)
388 374 ALTENTRY(atomic_and_uchar_nv)
389 375 1:
390 376 ldrexb r2, [r0]
391 377 and r2, r1, r2
392 378 strexb r3, r2, [r0]
393 379 cmp r3, #0
394 380 bne 1b
395 381 mov r0, r2
396 382 bx lr
397 383 SET_SIZE(atomic_and_uchar)
398 384 SET_SIZE(atomic_and_8_nv)
399 385 SET_SIZE(atomic_and_8)
400 386
401 387 /*
402 388 * NOTE: If atomic_and_16 and atomic_and_16_nv are ever
403 389 * separated, you need to also edit the libc arm platform
404 390 * specific mapfile and remove the NODYNSORT attribute
405 391 * from atomic_and_16_nv.
406 392 */
407 393 ENTRY(atomic_and_16)
408 394 ALTENTRY(atomic_and_16_nv)
409 395 ALTENTRY(atomic_and_ushort)
410 396 ALTENTRY(atomic_and_ushort_nv)
411 397 1:
412 398 ldrexh r2, [r0]
413 399 and r2, r1, r2
414 400 strexh r3, r2, [r0]
415 401 cmp r3, #0
416 402 bne 1b
417 403 mov r0, r2
418 404 bx lr
419 405 SET_SIZE(atomic_and_ushort_nv)
420 406 SET_SIZE(atomic_and_ushort)
421 407 SET_SIZE(atomic_and_16_nv)
422 408 SET_SIZE(atomic_and_16)
423 409
424 410 /*
425 411 * NOTE: If atomic_and_32 and atomic_and_32_nv are ever
426 412 * separated, you need to also edit the libc arm platform
427 413 * specific mapfile and remove the NODYNSORT attribute
428 414 * from atomic_and_32_nv.
429 415 */
430 416 ENTRY(atomic_and_32)
431 417 ALTENTRY(atomic_and_32_nv)
432 418 ALTENTRY(atomic_and_uint)
433 419 ALTENTRY(atomic_and_uint_nv)
434 420 ALTENTRY(atomic_and_ulong)
435 421 ALTENTRY(atomic_and_ulong_nv)
436 422 1:
437 423 ldrex r2, [r0]
438 424 and r2, r1, r2
439 425 strex r3, r2, [r0]
440 426 cmp r3, #0
441 427 bne 1b
442 428 mov r0, r2
443 429 bx lr
444 430 SET_SIZE(atomic_and_ulong_nv)
445 431 SET_SIZE(atomic_and_ulong)
446 432 SET_SIZE(atomic_and_uint_nv)
447 433 SET_SIZE(atomic_and_uint)
448 434 SET_SIZE(atomic_and_32_nv)
449 435 SET_SIZE(atomic_and_32)
450 436
451 437 /*
452 438 * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
453 439 * separated, you need to also edit the libc arm platform
454 440 * specific mapfile and remove the NODYNSORT attribute
455 441 * from atomic_and_64_nv.
456 442 */
457 443 ENTRY(atomic_and_64)
458 444 ALTENTRY(atomic_and_64_nv)
459 445 push { r4, r5 }
460 446 1:
461 447 ldrexd r4, r5, [r0]
462 448 and r4, r4, r2
463 449 and r5, r5, r3
464 450 strexd r1, r4, r5, [r0]
465 451 cmp r1, #0
466 452 bne 1b
467 453 mov r0, r4
468 454 mov r1, r5
469 455 pop { r4, r5 }
470 456 bx lr
471 457 SET_SIZE(atomic_and_64_nv)
472 458 SET_SIZE(atomic_and_64)
473 459
474 460 ENTRY(atomic_cas_8)
475 461 ALTENTRY(atomic_cas_uchar)
476 462 push { r4 }
477 463 1:
478 464 ldrexb r3, [r0]
479 465 cmp r1, r3
480 466 bne 2f @ Compare failed, bail
481 467 strexb r4, r2, [r0]
482 468 cmp r4, #0 @ strexb failed, take another lap
483 469 bne 1b
484 470 2:
485 471 mov r0, r3
486 472 pop { r4 }
487 473 bx lr
488 474 SET_SIZE(atomic_cas_uchar)
489 475 SET_SIZE(atomic_cas_8)
490 476
491 477 ENTRY(atomic_cas_16)
492 478 ALTENTRY(atomic_cas_ushort)
493 479 push { r4 }
494 480 1:
495 481 ldrexh r3, [r0]
496 482 cmp r1, r3
497 483 bne 2f @ Compare failed, bail
498 484 strexh r4, r2, [r0]
499 485 cmp r4, #0 @ strexb failed, take another lap
500 486 bne 1b
501 487 2:
502 488 mov r0, r3
503 489 pop { r4 }
504 490 bx lr
505 491 SET_SIZE(atomic_cas_ushort)
506 492 SET_SIZE(atomic_cas_16)
507 493
508 494 ENTRY(atomic_cas_32)
509 495 ALTENTRY(atomic_cas_uint)
510 496 ALTENTRY(atomic_cas_ptr)
511 497 ALTENTRY(atomic_cas_ulong)
512 498 push { r4 }
513 499 1:
514 500 ldrex r3, [r0]
515 501 cmp r1, r3
516 502 bne 2f @ Compare failed, bail
517 503 strex r4, r2, [r0]
518 504 cmp r4, #0 @ strexb failed, take another lap
519 505 bne 1b
520 506 2:
521 507 mov r0, r3
522 508 pop { r4 }
523 509 bx lr
524 510 SET_SIZE(atomic_cas_ulong)
525 511 SET_SIZE(atomic_cas_ptr)
526 512 SET_SIZE(atomic_cas_uint)
527 513 SET_SIZE(atomic_cas_32)
528 514
529 515 /*
530 516 * atomic_cas_64(uint64_t *target, uint64_t cmp, uint64_t newval);
531 517 *
532 518 * target is in r0
533 519 * cmp is in r2,r3
534 520 * newval is on the stack
535 521 *
536 522 * Our register allocation:
537 523 * r0 - Always contains target
538 524 * r1 - Always used for the result of strexd
539 525 * r2, r3 - Always used for cmp
540 526 * r4, r5 - Always used for newval
541 527 * r6, r7 - Always used as the ldrexd target
542 528 *
543 529 * Note that sp points to newval when we enter. We push four values, so
544 530 * we need to add 16 when we load newval.
545 531 */
546 532 ENTRY(atomic_cas_64)
547 533 push { r4, r5, r6, r7 }
548 534 ldrd r4, [sp, #16] @ load newval into memory
549 535 1:
550 536 ldrexd r6, r7, [r0] @ load *target
551 537 cmp r6, r2
552 538 bne 2f @ bail if high word not equal
553 539 cmp r5, r3
554 540 bne 2f @ bail if low word not equal
555 541 strexd r1, r4, r5, [r0] @ try to store *target
556 542 cmp r1, #0
557 543 bne 1b @ try again if store aborted
558 544 2:
559 545 mov r0, r6 @ ret low word of *target
560 546 mov r1, r7 @ ret high word of *target
561 547 pop { r4, r5, r6, r7 }
562 548 bx lr
563 549 SET_SIZE(atomic_cas_64)
564 550
565 551 ENTRY(atomic_swap_8)
566 552 ALTENTRY(atomic_swap_uchar)
567 553 1:
568 554 ldrexb r2, [r0]
569 555 strexb r3, r1, [r0]
570 556 cmp r3, #0
571 557 bne 1b
572 558 mov r0, r2
573 559 bx lr
574 560 SET_SIZE(atomic_swap_uchar)
575 561 SET_SIZE(atomic_swap_8)
576 562
577 563 ENTRY(atomic_swap_16)
578 564 ALTENTRY(atomic_swap_ushort)
579 565 1:
580 566 ldrexh r2, [r0]
581 567 strexh r3, r1, [r0]
582 568 cmp r3, #0
583 569 bne 1b
584 570 mov r0, r2
585 571 bx lr
586 572 SET_SIZE(atomic_swap_ushort)
587 573 SET_SIZE(atomic_swap_16)
588 574
589 575 ENTRY(atomic_swap_32)
590 576 ALTENTRY(atomic_swap_uint)
591 577 ALTENTRY(atomic_swap_ptr)
592 578 ALTENTRY(atomic_swap_ulong)
593 579 1:
594 580 ldrex r2, [r0]
595 581 strex r3, r1, [r0]
596 582 cmp r3, #0
597 583 bne 1b
598 584 mov r0, r2
599 585 bx lr
600 586 SET_SIZE(atomic_swap_ulong)
601 587 SET_SIZE(atomic_swap_ptr)
602 588 SET_SIZE(atomic_swap_uint)
603 589 SET_SIZE(atomic_swap_32)
604 590
605 591 ENTRY(atomic_swap_64)
606 592 push { r4, r5 }
607 593 1:
608 594 ldrexd r4, r5, [r0]
609 595 strexd r1, r2, r3, [r0]
610 596 cmp r1, #0
611 597 bne 1b
612 598 mov r0, r4
613 599 mov r1, r5
614 600 pop { r4, r5 }
615 601 bx lr
616 602 SET_SIZE(atomic_swap_64)
617 603
618 604 ENTRY(atomic_set_long_excl)
619 605 mov r3, #1
620 606 lsl r1, r3, r1 @ bit to set
621 607 1:
622 608 ldrex r2, [r0]
623 609 and r3, r1, r2
624 610 cmp r3, r1 @ Check if the bit is set
625 611 beq 2f
626 612 orr r2, r1, r2 @ Set the bit
627 613 strex r3, r1, [r0]
628 614 cmp r3, #0
629 615 bne 1b
630 616 mov r0, #0
631 617 bx lr
632 618 2:
633 619 mov r0, #-1 @ bit already set
634 620 bx lr
635 621 SET_SIZE(atomic_set_long_excl)
636 622
637 623 ENTRY(atomic_clear_long_excl)
638 624 mov r3, #1
639 625 lsl r1, r3, r1
640 626 1:
641 627 ldrex r2, [r0]
642 628 and r3, r1, r2
643 629 cmp r3, r1
644 630 bne 2f
645 631 bic r2, r2, r1 @ r2 = r2 & ~r1
646 632 strex r3, r1, [r0]
647 633 cmp r3, #0
648 634 bne 1b
649 635 mov r0, #0
650 636 bx lr
651 637 2:
652 638 mov r0, #-1
653 639 bx lr
654 640 SET_SIZE(atomic_clear_long_excl)
655 641
656 642 #if !defined(_KERNEL)
657 643
658 644 /*
659 645 * NOTE: membar_enter, membar_exit, membar_producer, and
660 646 * membar_consumer are identical routines. We define them
661 647 * separately, instead of using ALTENTRY definitions to alias
662 648 * them together, so that DTrace and debuggers will see a unique
663 649 * address for them, allowing more accurate tracing.
664 650 */
665 651 ENTRY(membar_enter)
666 652 ARM_DMB_INSTR(r0)
667 653 bx lr
668 654 SET_SIZE(membar_enter)
669 655
670 656 ENTRY(membar_exit)
671 657 ARM_DMB_INSTR(r0)
672 658 bx lr
673 659 SET_SIZE(membar_exit)
674 660
675 661 ENTRY(membar_producer)
676 662 ARM_DMB_INSTR(r0)
677 663 bx lr
678 664 SET_SIZE(membar_producer)
679 665
680 666 ENTRY(membar_consumer)
681 667 ARM_DMB_INSTR(r0)
682 668 bx lr
683 669 SET_SIZE(membar_consumer)
684 670
685 671 #endif /* !_KERNEL */
↓ open down ↓ |
631 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX