1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright (c) 2013 Joyent, Inc.  All rights reserved.
  14  */
  15 
  16         .file   "atomic.s"
  17 
  18 /*
  19  * Atomic Operatoins for 32-bit ARM. Note, that these require at least ARMv6K so
  20  * as to have access to the non-word size LDREX and STREX.
  21  */
  22 
  23 #include <sys/asm_linkage.h>
  24 #include <sys/atomic_impl.h>
  25 
  26 /*
  27  * XXX We probably want some kind of backoff built in to these routines at some
  28  * point.
  29  */
  30 
  31 #if defined(_KERNEL)
  32         /*
  33          * Legacy kernel interfaces; they will go away (eventually).
  34          */
  35         ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
  36         ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
  37         ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
  38         ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
  39         ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
  40         ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
  41         ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
  42         ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
  43 #endif
  44 
  45         /*
  46          * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
  47          * separated, you need to also edit the libc arm platform
  48          * specific mapfile and remove the NODYNSORT attribute
  49          * from atomic_inc_8_nv.
  50          */
  51         ENTRY(atomic_inc_8)
  52         ALTENTRY(atomic_inc_8_nv)
  53         ALTENTRY(atomic_inc_uchar)
  54         ALTENTRY(atomic_inc_uchar_nv)
  55         mov     r1, #1
  56         b       atomic_add_8
  57         SET_SIZE(atomic_inc_uchar_nv)
  58         SET_SIZE(atomic_inc_uchar)
  59         SET_SIZE(atomic_inc_8_nv)
  60         SET_SIZE(atomic_inc_8)
  61 
  62         /*
  63          * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
  64          * separated, you need to also edit the libc arm platform
  65          * specific mapfile and remove the NODYNSORT attribute
  66          * from atomic_dec_8_nv.
  67          */
  68         ENTRY(atomic_dec_8)
  69         ALTENTRY(atomic_dec_8_nv)
  70         ALTENTRY(atomic_dec_uchar)
  71         ALTENTRY(atomic_dec_uchar_nv)
  72         mov     r1, #-1
  73         b       atomic_add_8
  74         SET_SIZE(atomic_dec_uchar_nv)
  75         SET_SIZE(atomic_dec_uchar)
  76         SET_SIZE(atomic_dec_8_nv)
  77         SET_SIZE(atomic_dec_8)
  78 
  79         /*
  80          * NOTE: If atomic_add_8 and atomic_add_8_nv are ever
  81          * separated, you need to also edit the libc arm platform
  82          * specific mapfile and remove the NODYNSORT attribute
  83          * from atomic_add_8_nv.
  84          */
  85         ENTRY(atomic_add_8)
  86         ALTENTRY(atomic_add_8_nv)
  87         ALTENTRY(atomic_add_char)
  88         ALTENTRY(atomic_add_char_nv)
  89 1:
  90         ldrexb  r2, [r0]
  91         add     r2, r1, r2
  92         strexb  r3, r2, [r0]
  93         cmp     r3, #0
  94         bne     1b
  95         mov     r0, r2
  96         bx      lr
  97         SET_SIZE(atomic_add_char_nv)
  98         SET_SIZE(atomic_add_char)
  99         SET_SIZE(atomic_add_8_nv)
 100         SET_SIZE(atomic_add_8)
 101 
 102         /*
 103          * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
 104          * separated, you need to also edit the libc arm platform
 105          * specific mapfile and remove the NODYNSORT attribute
 106          * from atomic_inc_16_nv.
 107          */
 108         ENTRY(atomic_inc_16)
 109         ALTENTRY(atomic_inc_16_nv)
 110         ALTENTRY(atomic_inc_ushort)
 111         ALTENTRY(atomic_inc_ushort_nv)
 112         mov     r1, #1
 113         b       atomic_add_16
 114         SET_SIZE(atomic_inc_ushort_nv)
 115         SET_SIZE(atomic_inc_ushort)
 116         SET_SIZE(atomic_inc_16_nv)
 117         SET_SIZE(atomic_inc_16)
 118 
 119         /*
 120          * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
 121          * separated, you need to also edit the libc arm platform
 122          * specific mapfile and remove the NODYNSORT attribute
 123          * from atomic_dec_16_nv.
 124          */
 125         ENTRY(atomic_dec_16)
 126         ALTENTRY(atomic_dec_16_nv)
 127         ALTENTRY(atomic_dec_ushort)
 128         ALTENTRY(atomic_dec_ushort_nv)
 129         mov     r1, #-1
 130         b       atomic_add_16
 131         SET_SIZE(atomic_dec_ushort_nv)
 132         SET_SIZE(atomic_dec_ushort)
 133         SET_SIZE(atomic_dec_16_nv)
 134         SET_SIZE(atomic_dec_16)
 135 
 136         /*
 137          * NOTE: If atomic_add_16 and atomic_add_16_nv are ever
 138          * separated, you need to also edit the libc arm platform
 139          * specific mapfile and remove the NODYNSORT attribute
 140          * from atomic_add_16_nv.
 141          */
 142         ENTRY(atomic_add_16)
 143         ALTENTRY(atomic_add_16_nv)
 144         ALTENTRY(atomic_add_short)
 145         ALTENTRY(atomic_add_short_nv)
 146 1:
 147         ldrexh  r2, [r0]
 148         add     r2, r1, r2
 149         strexh  r3, r2, [r0]
 150         cmp     r3, #0
 151         bne     1b
 152         mov     r0, r2
 153         bx      lr
 154         SET_SIZE(atomic_add_short_nv)
 155         SET_SIZE(atomic_add_short)
 156         SET_SIZE(atomic_add_16_nv)
 157         SET_SIZE(atomic_add_16)
 158 
 159         /*
 160          * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
 161          * separated, you need to also edit the libc arm platform
 162          * specific mapfile and remove the NODYNSORT attribute
 163          * from atomic_inc_32_nv.
 164          */
 165         ENTRY(atomic_inc_32)
 166         ALTENTRY(atomic_inc_32_nv)
 167         ALTENTRY(atomic_inc_uint)
 168         ALTENTRY(atomic_inc_uint_nv)
 169         ALTENTRY(atomic_inc_ulong)
 170         ALTENTRY(atomic_inc_ulong_nv)
 171         mov     r1, #1
 172         b       atomic_add_32
 173         SET_SIZE(atomic_inc_ulong_nv)
 174         SET_SIZE(atomic_inc_ulong)
 175         SET_SIZE(atomic_inc_uint_nv)
 176         SET_SIZE(atomic_inc_uint)
 177         SET_SIZE(atomic_inc_32_nv)
 178         SET_SIZE(atomic_inc_32)
 179 
 180         /*
 181          * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
 182          * separated, you need to also edit the libc arm platform
 183          * specific mapfile and remove the NODYNSORT attribute
 184          * from atomic_dec_32_nv.
 185          */
 186         ENTRY(atomic_dec_32)
 187         ALTENTRY(atomic_dec_32_nv)
 188         ALTENTRY(atomic_dec_uint)
 189         ALTENTRY(atomic_dec_uint_nv)
 190         ALTENTRY(atomic_dec_ulong)
 191         ALTENTRY(atomic_dec_ulong_nv)
 192         mov     r1, #-1
 193         b       atomic_add_32
 194         SET_SIZE(atomic_dec_ulong_nv)
 195         SET_SIZE(atomic_dec_ulong)
 196         SET_SIZE(atomic_dec_uint_nv)
 197         SET_SIZE(atomic_dec_uint)
 198         SET_SIZE(atomic_dec_32_nv)
 199         SET_SIZE(atomic_dec_32)
 200 
 201         /*
 202          * NOTE: If atomic_add_32 and atomic_add_32_nv are ever
 203          * separated, you need to also edit the libc arm platform
 204          * specific mapfile and remove the NODYNSORT attribute
 205          * from atomic_add_32_nv.
 206          */
 207         ENTRY(atomic_add_32)
 208         ALTENTRY(atomic_add_32_nv)
 209         ALTENTRY(atomic_add_int)
 210         ALTENTRY(atomic_add_int_nv)
 211         ALTENTRY(atomic_add_ptr)
 212         ALTENTRY(atomic_add_ptr_nv)
 213         ALTENTRY(atomic_add_long)
 214         ALTENTRY(atomic_add_long_nv)
 215 1:
 216         ldrex   r2, [r0]
 217         add     r2, r1, r2
 218         strex   r3, r2, [r0]
 219         cmp     r3, #0
 220         bne     1b
 221         mov     r0, r2
 222         bx      lr
 223         SET_SIZE(atomic_add_long_nv)
 224         SET_SIZE(atomic_add_long)
 225         SET_SIZE(atomic_add_ptr_nv)
 226         SET_SIZE(atomic_add_ptr)
 227         SET_SIZE(atomic_add_int_nv)
 228         SET_SIZE(atomic_add_int)
 229         SET_SIZE(atomic_add_32_nv)
 230         SET_SIZE(atomic_add_32)
 231 
 232         /*
 233          * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
 234          * separated, you need to also edit the libc arm platform
 235          * specific mapfile and remove the NODYNSORT attribute
 236          * from atomic_inc_64_nv.
 237          */
 238         ENTRY(atomic_inc_64)
 239         ALTENTRY(atomic_inc_64_nv)
 240         mov     r2, #1
 241         mov     r3, #0
 242         b       atomic_add_64
 243         SET_SIZE(atomic_inc_64_nv)
 244         SET_SIZE(atomic_inc_64)
 245 
 246         /*
 247          * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
 248          * separated, you need to also edit the libc arm platform
 249          * specific mapfile and remove the NODYNSORT attribute
 250          * from atomic_dec_64_nv.
 251          */
 252         ENTRY(atomic_dec_64)
 253         ALTENTRY(atomic_dec_64_nv)
 254         mov     r2, #-1
 255         mvn     r3, #0
 256         b       atomic_add_64
 257         SET_SIZE(atomic_dec_64_nv)
 258         SET_SIZE(atomic_dec_64)
 259 
 260         /*
 261          * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
 262          * separated, you need to also edit the libc arm platform
 263          * specific mapfile and remove the NODYNSORT attribute
 264          * from atomic_add_64_nv.
 265          */
 266         ENTRY(atomic_add_64)
 267         ALTENTRY(atomic_add_64_nv)
 268         push    { r4, r5 }
 269 1:
 270         ldrexd  r4, r5, [r0]
 271         adds    r4, r4, r2      
 272         adc     r5, r5, r3      
 273         strexd  r1, r4, r5, [r0]
 274         cmp     r1, #0
 275         bne     1b
 276         mov     r0, r4
 277         mov     r1, r5
 278         pop     { r4, r5 }
 279         bx      lr
 280         SET_SIZE(atomic_add_64_nv)
 281         SET_SIZE(atomic_add_64)
 282 
 283         /*
 284          * NOTE: If atomic_or_8 and atomic_or_8_nv are ever
 285          * separated, you need to also edit the libc arm platform
 286          * specific mapfile and remove the NODYNSORT attribute
 287          * from atomic_or_8_nv.
 288          */
 289         ENTRY(atomic_or_8)
 290         ALTENTRY(atomic_or_8_nv)
 291         ALTENTRY(atomic_or_uchar)
 292         ALTENTRY(atomic_or_uchar_nv)
 293 1:
 294         ldrexb  r2, [r0]
 295         orr     r2, r1, r2
 296         strexb  r3, r2, [r0]
 297         cmp     r3, #0
 298         bne     1b
 299         mov     r0, r2
 300         bx      lr
 301         SET_SIZE(atomic_or_uchar_nv)
 302         SET_SIZE(atomic_or_uchar)
 303         SET_SIZE(atomic_or_8_nv)
 304         SET_SIZE(atomic_or_8)
 305 
 306         /*
 307          * NOTE: If atomic_or_16 and atomic_or_16_nv are ever
 308          * separated, you need to also edit the libc arm platform
 309          * specific mapfile and remove the NODYNSORT attribute
 310          * from atomic_or_16_nv.
 311          */
 312         ENTRY(atomic_or_16)
 313         ALTENTRY(atomic_or_16_nv)
 314         ALTENTRY(atomic_or_ushort)
 315         ALTENTRY(atomic_or_ushort_nv)
 316 1:
 317         ldrexh  r2, [r0]
 318         orr     r2, r1, r2
 319         strexh  r3, r2, [r0]
 320         cmp     r3, #0
 321         bne     1b
 322         mov     r0, r2
 323         bx      lr
 324         SET_SIZE(atomic_or_ushort_nv)
 325         SET_SIZE(atomic_or_ushort)
 326         SET_SIZE(atomic_or_16_nv)
 327         SET_SIZE(atomic_or_16)
 328 
 329         /*
 330          * NOTE: If atomic_or_32 and atomic_or_32_nv are ever
 331          * separated, you need to also edit the libc arm platform
 332          * specific mapfile and remove the NODYNSORT attribute
 333          * from atomic_or_32_nv.
 334          */
 335         ENTRY(atomic_or_32)
 336         ALTENTRY(atomic_or_32_nv)
 337         ALTENTRY(atomic_or_uint)
 338         ALTENTRY(atomic_or_uint_nv)
 339         ALTENTRY(atomic_or_ulong)
 340         ALTENTRY(atomic_or_ulong_nv)
 341 1:
 342         ldrex   r2, [r0]
 343         add     r2, r1, r2
 344         strex   r3, r2, [r0]
 345         cmp     r3, #0
 346         bne     1b
 347         mov     r0, r2
 348         bx      lr
 349         SET_SIZE(atomic_or_ulong_nv)
 350         SET_SIZE(atomic_or_ulong)
 351         SET_SIZE(atomic_or_uint_nv)
 352         SET_SIZE(atomic_or_uint)
 353         SET_SIZE(atomic_or_32_nv)
 354         SET_SIZE(atomic_or_32)
 355 
 356         /*
 357          * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
 358          * separated, you need to also edit the libc arm platform
 359          * specific mapfile and remove the NODYNSORT attribute
 360          * from atomic_or_64_nv.
 361          */
 362         ENTRY(atomic_or_64)
 363         ALTENTRY(atomic_or_64_nv)
 364         push    { r4, r5 }
 365 1:
 366         ldrexd  r4, r5, [r0]
 367         orr     r4, r4, r2      
 368         orr     r5, r5, r3      
 369         strexd  r1, r4, r5, [r0]
 370         cmp     r1, #0
 371         bne     1b
 372         mov     r0, r4
 373         mov     r1, r5
 374         pop     { r4, r5 }
 375         bx      lr
 376         SET_SIZE(atomic_or_64_nv)
 377         SET_SIZE(atomic_or_64)
 378 
 379         /*
 380          * NOTE: If atomic_and_8 and atomic_and_8_nv are ever
 381          * separated, you need to also edit the libc arm platform
 382          * specific mapfile and remove the NODYNSORT attribute
 383          * from atomic_and_8_nv.
 384          */
 385         ENTRY(atomic_and_8)
 386         ALTENTRY(atomic_and_8_nv)
 387         ALTENTRY(atomic_and_uchar)
 388         ALTENTRY(atomic_and_uchar_nv)
 389 1:
 390         ldrexb  r2, [r0]
 391         and     r2, r1, r2
 392         strexb  r3, r2, [r0]
 393         cmp     r3, #0
 394         bne     1b
 395         mov     r0, r2
 396         bx      lr
 397         SET_SIZE(atomic_and_uchar)
 398         SET_SIZE(atomic_and_8_nv)
 399         SET_SIZE(atomic_and_8)
 400 
 401         /*
 402          * NOTE: If atomic_and_16 and atomic_and_16_nv are ever
 403          * separated, you need to also edit the libc arm platform
 404          * specific mapfile and remove the NODYNSORT attribute
 405          * from atomic_and_16_nv.
 406          */
 407         ENTRY(atomic_and_16)
 408         ALTENTRY(atomic_and_16_nv)
 409         ALTENTRY(atomic_and_ushort)
 410         ALTENTRY(atomic_and_ushort_nv)
 411 1:
 412         ldrexh  r2, [r0]
 413         and     r2, r1, r2
 414         strexh  r3, r2, [r0]
 415         cmp     r3, #0
 416         bne     1b
 417         mov     r0, r2
 418         bx      lr
 419         SET_SIZE(atomic_and_ushort_nv)
 420         SET_SIZE(atomic_and_ushort)
 421         SET_SIZE(atomic_and_16_nv)
 422         SET_SIZE(atomic_and_16)
 423 
 424         /*
 425          * NOTE: If atomic_and_32 and atomic_and_32_nv are ever
 426          * separated, you need to also edit the libc arm platform
 427          * specific mapfile and remove the NODYNSORT attribute
 428          * from atomic_and_32_nv.
 429          */
 430         ENTRY(atomic_and_32)
 431         ALTENTRY(atomic_and_32_nv)
 432         ALTENTRY(atomic_and_uint)
 433         ALTENTRY(atomic_and_uint_nv)
 434         ALTENTRY(atomic_and_ulong)
 435         ALTENTRY(atomic_and_ulong_nv)
 436 1:
 437         ldrex   r2, [r0]
 438         and     r2, r1, r2
 439         strex   r3, r2, [r0]
 440         cmp     r3, #0
 441         bne     1b
 442         mov     r0, r2
 443         bx      lr
 444         SET_SIZE(atomic_and_ulong_nv)
 445         SET_SIZE(atomic_and_ulong)
 446         SET_SIZE(atomic_and_uint_nv)
 447         SET_SIZE(atomic_and_uint)
 448         SET_SIZE(atomic_and_32_nv)
 449         SET_SIZE(atomic_and_32)
 450 
 451         /*
 452          * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
 453          * separated, you need to also edit the libc arm platform
 454          * specific mapfile and remove the NODYNSORT attribute
 455          * from atomic_and_64_nv.
 456          */
 457         ENTRY(atomic_and_64)
 458         ALTENTRY(atomic_and_64_nv)
 459         push    { r4, r5 }
 460 1:
 461         ldrexd  r4, r5, [r0]
 462         and     r4, r4, r2      
 463         and     r5, r5, r3      
 464         strexd  r1, r4, r5, [r0]
 465         cmp     r1, #0
 466         bne     1b
 467         mov     r0, r4
 468         mov     r1, r5
 469         pop     { r4, r5 }
 470         bx      lr
 471         SET_SIZE(atomic_and_64_nv)
 472         SET_SIZE(atomic_and_64)
 473 
 474         ENTRY(atomic_cas_8)
 475         ALTENTRY(atomic_cas_uchar)
 476         push    { r4 }
 477 1:
 478         ldrexb  r3, [r0]
 479         cmp     r1, r3
 480         bne     2f                      @ Compare failed, bail
 481         strexb  r4, r2, [r0]
 482         cmp     r4, #0                  @ strexb failed, take another lap
 483         bne     1b
 484 2:
 485         mov     r0, r3
 486         pop     { r4 }
 487         bx      lr
 488         SET_SIZE(atomic_cas_uchar)
 489         SET_SIZE(atomic_cas_8)
 490 
 491         ENTRY(atomic_cas_16)
 492         ALTENTRY(atomic_cas_ushort)
 493         push    { r4 }
 494 1:
 495         ldrexh  r3, [r0]
 496         cmp     r1, r3
 497         bne     2f                      @ Compare failed, bail
 498         strexh  r4, r2, [r0]
 499         cmp     r4, #0                  @ strexb failed, take another lap
 500         bne     1b
 501 2:
 502         mov     r0, r3
 503         pop     { r4 }
 504         bx      lr
 505         SET_SIZE(atomic_cas_ushort)
 506         SET_SIZE(atomic_cas_16)
 507 
 508         ENTRY(atomic_cas_32)
 509         ALTENTRY(atomic_cas_uint)
 510         ALTENTRY(atomic_cas_ptr)
 511         ALTENTRY(atomic_cas_ulong)
 512         push    { r4 }
 513 1:
 514         ldrex   r3, [r0]
 515         cmp     r1, r3
 516         bne     2f                      @ Compare failed, bail
 517         strex   r4, r2, [r0]
 518         cmp     r4, #0                  @ strexb failed, take another lap
 519         bne     1b
 520 2:
 521         mov     r0, r3
 522         pop     { r4 }
 523         bx      lr
 524         SET_SIZE(atomic_cas_ulong)
 525         SET_SIZE(atomic_cas_ptr)
 526         SET_SIZE(atomic_cas_uint)
 527         SET_SIZE(atomic_cas_32)
 528 
 529         /*
 530          * atomic_cas_64(uint64_t *target, uint64_t cmp, uint64_t newval);
 531          *
 532          * target is in r0
 533          * cmp is in r2,r3
 534          * newval is on the stack 
 535          *
 536          * Our register allocation:
 537          * r0 - Always contains target
 538          * r1 - Always used for the result of strexd
 539          * r2, r3 - Always used for cmp
 540          * r4, r5 - Always used for newval
 541          * r6, r7 - Always used as the ldrexd target
 542          *
 543          * Note that sp points to newval when we enter. We push four values, so
 544          * we need to add 16 when we load newval.
 545          */
 546         ENTRY(atomic_cas_64)
 547         push    { r4, r5, r6, r7 }
 548         ldrd    r4, [sp, #16]           @ load newval into memory
 549 1:
 550         ldrexd  r6, r7, [r0]            @ load *target
 551         cmp     r6, r2
 552         bne     2f                      @ bail if high word not equal
 553         cmp     r5, r3
 554         bne     2f                      @ bail if low word not equal
 555         strexd  r1, r4, r5, [r0]        @ try to store *target
 556         cmp     r1, #0
 557         bne     1b                      @ try again if store aborted
 558 2:
 559         mov     r0, r6                  @ ret low word of *target
 560         mov     r1, r7                  @ ret high word of *target
 561         pop     { r4, r5, r6, r7 }
 562         bx      lr
 563         SET_SIZE(atomic_cas_64)
 564 
 565         ENTRY(atomic_swap_8)
 566         ALTENTRY(atomic_swap_uchar)
 567 1:
 568         ldrexb  r2, [r0]
 569         strexb  r3, r1, [r0]
 570         cmp     r3, #0
 571         bne     1b
 572         mov     r0, r2
 573         bx      lr
 574         SET_SIZE(atomic_swap_uchar)
 575         SET_SIZE(atomic_swap_8)
 576 
 577         ENTRY(atomic_swap_16)
 578         ALTENTRY(atomic_swap_ushort)
 579 1:
 580         ldrexh  r2, [r0]
 581         strexh  r3, r1, [r0]
 582         cmp     r3, #0
 583         bne     1b
 584         mov     r0, r2
 585         bx      lr
 586         SET_SIZE(atomic_swap_ushort)
 587         SET_SIZE(atomic_swap_16)
 588 
 589         ENTRY(atomic_swap_32)
 590         ALTENTRY(atomic_swap_uint)
 591         ALTENTRY(atomic_swap_ptr)
 592         ALTENTRY(atomic_swap_ulong)
 593 1:
 594         ldrex   r2, [r0]
 595         strex   r3, r1, [r0]
 596         cmp     r3, #0
 597         bne     1b
 598         mov     r0, r2
 599         bx      lr
 600         SET_SIZE(atomic_swap_ulong)
 601         SET_SIZE(atomic_swap_ptr)
 602         SET_SIZE(atomic_swap_uint)
 603         SET_SIZE(atomic_swap_32)
 604 
 605         ENTRY(atomic_swap_64)
 606         push    { r4, r5 }
 607 1:
 608         ldrexd  r4, r5, [r0]
 609         strexd  r1, r2, r3, [r0]
 610         cmp     r1, #0
 611         bne     1b
 612         mov     r0, r4
 613         mov     r1, r5
 614         pop     { r4, r5 }
 615         bx      lr
 616         SET_SIZE(atomic_swap_64)
 617 
 618         ENTRY(atomic_set_long_excl)
 619         mov     r3, #1
 620         lsl     r1, r3, r1              @ bit to set
 621 1:
 622         ldrex   r2, [r0]
 623         and     r3, r1, r2
 624         cmp     r3, r1                  @ Check if the bit is set
 625         beq     2f
 626         orr     r2, r1, r2              @ Set the bit
 627         strex   r3, r1, [r0]
 628         cmp     r3, #0
 629         bne     1b
 630         mov     r0, #0
 631         bx      lr
 632 2:
 633         mov     r0, #-1                 @ bit already set
 634         bx      lr
 635         SET_SIZE(atomic_set_long_excl)
 636 
 637         ENTRY(atomic_clear_long_excl)
 638         mov     r3, #1
 639         lsl     r1, r3, r1
 640 1:
 641         ldrex   r2, [r0]
 642         and     r3, r1, r2
 643         cmp     r3, r1
 644         bne     2f
 645         bic     r2, r2, r1              @ r2 = r2 & ~r1
 646         strex   r3, r1, [r0]
 647         cmp     r3, #0
 648         bne     1b
 649         mov     r0, #0
 650         bx      lr
 651 2:
 652         mov     r0, #-1
 653         bx      lr
 654         SET_SIZE(atomic_clear_long_excl)
 655 
 656 #if !defined(_KERNEL)
 657 
 658         /*
 659          * NOTE: membar_enter, membar_exit, membar_producer, and
 660          * membar_consumer are identical routines.  We define them
 661          * separately, instead of using ALTENTRY definitions to alias
 662          * them together, so that DTrace and debuggers will see a unique
 663          * address for them, allowing more accurate tracing.
 664          */
 665         ENTRY(membar_enter)
 666         ARM_DMB_INSTR(r0)
 667         bx lr
 668         SET_SIZE(membar_enter)
 669 
 670         ENTRY(membar_exit)
 671         ARM_DMB_INSTR(r0)
 672         bx lr
 673         SET_SIZE(membar_exit)
 674 
 675         ENTRY(membar_producer)
 676         ARM_DMB_INSTR(r0)
 677         bx lr
 678         SET_SIZE(membar_producer)
 679 
 680         ENTRY(membar_consumer)
 681         ARM_DMB_INSTR(r0)
 682         bx lr
 683         SET_SIZE(membar_consumer)
 684 
 685 #endif  /* !_KERNEL */