Print this page
5043 remove deprecated atomic functions' prototypes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/atomic/i386/atomic.s
+++ new/usr/src/common/atomic/i386/atomic.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 .file "atomic.s"
28 28
29 29 #include <sys/asm_linkage.h>
30 30
31 31 #if defined(_KERNEL)
32 32 /*
33 - * Legacy kernel interfaces; they will go away (eventually).
33 + * Legacy kernel interfaces; they will go away the moment our closed
34 + * bins no longer require them.
34 35 */
35 36 ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
36 37 ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
37 38 ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
38 39 ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
39 40 ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
40 41 ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
41 42 ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
42 43 #endif
43 44
44 45 ENTRY(atomic_inc_8)
45 46 ALTENTRY(atomic_inc_uchar)
46 47 movl 4(%esp), %eax
47 48 lock
48 49 incb (%eax)
49 50 ret
50 51 SET_SIZE(atomic_inc_uchar)
51 52 SET_SIZE(atomic_inc_8)
52 53
53 54 ENTRY(atomic_inc_16)
54 55 ALTENTRY(atomic_inc_ushort)
55 56 movl 4(%esp), %eax
56 57 lock
57 58 incw (%eax)
58 59 ret
59 60 SET_SIZE(atomic_inc_ushort)
60 61 SET_SIZE(atomic_inc_16)
61 62
62 63 ENTRY(atomic_inc_32)
63 64 ALTENTRY(atomic_inc_uint)
64 65 ALTENTRY(atomic_inc_ulong)
65 66 movl 4(%esp), %eax
66 67 lock
67 68 incl (%eax)
68 69 ret
69 70 SET_SIZE(atomic_inc_ulong)
70 71 SET_SIZE(atomic_inc_uint)
71 72 SET_SIZE(atomic_inc_32)
72 73
73 74 ENTRY(atomic_inc_8_nv)
74 75 ALTENTRY(atomic_inc_uchar_nv)
75 76 movl 4(%esp), %edx / %edx = target address
76 77 xorl %eax, %eax / clear upper bits of %eax
77 78 incb %al / %al = 1
78 79 lock
79 80 xaddb %al, (%edx) / %al = old value, inc (%edx)
80 81 incb %al / return new value
81 82 ret
82 83 SET_SIZE(atomic_inc_uchar_nv)
83 84 SET_SIZE(atomic_inc_8_nv)
84 85
85 86 ENTRY(atomic_inc_16_nv)
86 87 ALTENTRY(atomic_inc_ushort_nv)
87 88 movl 4(%esp), %edx / %edx = target address
88 89 xorl %eax, %eax / clear upper bits of %eax
89 90 incw %ax / %ax = 1
90 91 lock
91 92 xaddw %ax, (%edx) / %ax = old value, inc (%edx)
92 93 incw %ax / return new value
93 94 ret
94 95 SET_SIZE(atomic_inc_ushort_nv)
95 96 SET_SIZE(atomic_inc_16_nv)
96 97
97 98 ENTRY(atomic_inc_32_nv)
98 99 ALTENTRY(atomic_inc_uint_nv)
99 100 ALTENTRY(atomic_inc_ulong_nv)
100 101 movl 4(%esp), %edx / %edx = target address
101 102 xorl %eax, %eax / %eax = 0
102 103 incl %eax / %eax = 1
103 104 lock
104 105 xaddl %eax, (%edx) / %eax = old value, inc (%edx)
105 106 incl %eax / return new value
106 107 ret
107 108 SET_SIZE(atomic_inc_ulong_nv)
108 109 SET_SIZE(atomic_inc_uint_nv)
109 110 SET_SIZE(atomic_inc_32_nv)
110 111
111 112 /*
112 113 * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
113 114 * separated, you need to also edit the libc i386 platform
114 115 * specific mapfile and remove the NODYNSORT attribute
115 116 * from atomic_inc_64_nv.
116 117 */
117 118 ENTRY(atomic_inc_64)
118 119 ALTENTRY(atomic_inc_64_nv)
119 120 pushl %edi
120 121 pushl %ebx
121 122 movl 12(%esp), %edi / %edi = target address
122 123 movl (%edi), %eax
123 124 movl 4(%edi), %edx / %edx:%eax = old value
124 125 1:
125 126 xorl %ebx, %ebx
126 127 xorl %ecx, %ecx
127 128 incl %ebx / %ecx:%ebx = 1
128 129 addl %eax, %ebx
129 130 adcl %edx, %ecx / add in the carry from inc
130 131 lock
131 132 cmpxchg8b (%edi) / try to stick it in
132 133 jne 1b
133 134 movl %ebx, %eax
134 135 movl %ecx, %edx / return new value
135 136 popl %ebx
136 137 popl %edi
137 138 ret
138 139 SET_SIZE(atomic_inc_64_nv)
139 140 SET_SIZE(atomic_inc_64)
140 141
141 142 ENTRY(atomic_dec_8)
142 143 ALTENTRY(atomic_dec_uchar)
143 144 movl 4(%esp), %eax
144 145 lock
145 146 decb (%eax)
146 147 ret
147 148 SET_SIZE(atomic_dec_uchar)
148 149 SET_SIZE(atomic_dec_8)
149 150
150 151 ENTRY(atomic_dec_16)
151 152 ALTENTRY(atomic_dec_ushort)
152 153 movl 4(%esp), %eax
153 154 lock
154 155 decw (%eax)
155 156 ret
156 157 SET_SIZE(atomic_dec_ushort)
157 158 SET_SIZE(atomic_dec_16)
158 159
159 160 ENTRY(atomic_dec_32)
160 161 ALTENTRY(atomic_dec_uint)
161 162 ALTENTRY(atomic_dec_ulong)
162 163 movl 4(%esp), %eax
163 164 lock
164 165 decl (%eax)
165 166 ret
166 167 SET_SIZE(atomic_dec_ulong)
167 168 SET_SIZE(atomic_dec_uint)
168 169 SET_SIZE(atomic_dec_32)
169 170
170 171 ENTRY(atomic_dec_8_nv)
171 172 ALTENTRY(atomic_dec_uchar_nv)
172 173 movl 4(%esp), %edx / %edx = target address
173 174 xorl %eax, %eax / zero upper bits of %eax
174 175 decb %al / %al = -1
175 176 lock
176 177 xaddb %al, (%edx) / %al = old value, dec (%edx)
177 178 decb %al / return new value
178 179 ret
179 180 SET_SIZE(atomic_dec_uchar_nv)
180 181 SET_SIZE(atomic_dec_8_nv)
181 182
182 183 ENTRY(atomic_dec_16_nv)
183 184 ALTENTRY(atomic_dec_ushort_nv)
184 185 movl 4(%esp), %edx / %edx = target address
185 186 xorl %eax, %eax / zero upper bits of %eax
186 187 decw %ax / %ax = -1
187 188 lock
188 189 xaddw %ax, (%edx) / %ax = old value, dec (%edx)
189 190 decw %ax / return new value
190 191 ret
191 192 SET_SIZE(atomic_dec_ushort_nv)
192 193 SET_SIZE(atomic_dec_16_nv)
193 194
194 195 ENTRY(atomic_dec_32_nv)
195 196 ALTENTRY(atomic_dec_uint_nv)
196 197 ALTENTRY(atomic_dec_ulong_nv)
197 198 movl 4(%esp), %edx / %edx = target address
198 199 xorl %eax, %eax / %eax = 0
199 200 decl %eax / %eax = -1
200 201 lock
201 202 xaddl %eax, (%edx) / %eax = old value, dec (%edx)
202 203 decl %eax / return new value
203 204 ret
204 205 SET_SIZE(atomic_dec_ulong_nv)
205 206 SET_SIZE(atomic_dec_uint_nv)
206 207 SET_SIZE(atomic_dec_32_nv)
207 208
208 209 /*
209 210 * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
210 211 * separated, it is important to edit the libc i386 platform
211 212 * specific mapfile and remove the NODYNSORT attribute
212 213 * from atomic_dec_64_nv.
213 214 */
214 215 ENTRY(atomic_dec_64)
215 216 ALTENTRY(atomic_dec_64_nv)
216 217 pushl %edi
217 218 pushl %ebx
218 219 movl 12(%esp), %edi / %edi = target address
219 220 movl (%edi), %eax
220 221 movl 4(%edi), %edx / %edx:%eax = old value
221 222 1:
222 223 xorl %ebx, %ebx
223 224 xorl %ecx, %ecx
224 225 not %ecx
225 226 not %ebx / %ecx:%ebx = -1
226 227 addl %eax, %ebx
227 228 adcl %edx, %ecx / add in the carry from inc
228 229 lock
229 230 cmpxchg8b (%edi) / try to stick it in
230 231 jne 1b
231 232 movl %ebx, %eax
232 233 movl %ecx, %edx / return new value
233 234 popl %ebx
234 235 popl %edi
235 236 ret
236 237 SET_SIZE(atomic_dec_64_nv)
237 238 SET_SIZE(atomic_dec_64)
238 239
239 240 ENTRY(atomic_add_8)
240 241 ALTENTRY(atomic_add_char)
241 242 movl 4(%esp), %eax
242 243 movl 8(%esp), %ecx
243 244 lock
244 245 addb %cl, (%eax)
245 246 ret
246 247 SET_SIZE(atomic_add_char)
247 248 SET_SIZE(atomic_add_8)
248 249
249 250 ENTRY(atomic_add_16)
250 251 ALTENTRY(atomic_add_short)
251 252 movl 4(%esp), %eax
252 253 movl 8(%esp), %ecx
253 254 lock
254 255 addw %cx, (%eax)
255 256 ret
256 257 SET_SIZE(atomic_add_short)
257 258 SET_SIZE(atomic_add_16)
258 259
259 260 ENTRY(atomic_add_32)
260 261 ALTENTRY(atomic_add_int)
261 262 ALTENTRY(atomic_add_ptr)
262 263 ALTENTRY(atomic_add_long)
263 264 movl 4(%esp), %eax
264 265 movl 8(%esp), %ecx
265 266 lock
266 267 addl %ecx, (%eax)
267 268 ret
268 269 SET_SIZE(atomic_add_long)
269 270 SET_SIZE(atomic_add_ptr)
270 271 SET_SIZE(atomic_add_int)
271 272 SET_SIZE(atomic_add_32)
272 273
273 274 ENTRY(atomic_or_8)
274 275 ALTENTRY(atomic_or_uchar)
275 276 movl 4(%esp), %eax
276 277 movb 8(%esp), %cl
277 278 lock
278 279 orb %cl, (%eax)
279 280 ret
280 281 SET_SIZE(atomic_or_uchar)
281 282 SET_SIZE(atomic_or_8)
282 283
283 284 ENTRY(atomic_or_16)
284 285 ALTENTRY(atomic_or_ushort)
285 286 movl 4(%esp), %eax
286 287 movw 8(%esp), %cx
287 288 lock
288 289 orw %cx, (%eax)
289 290 ret
290 291 SET_SIZE(atomic_or_ushort)
291 292 SET_SIZE(atomic_or_16)
292 293
293 294 ENTRY(atomic_or_32)
294 295 ALTENTRY(atomic_or_uint)
295 296 ALTENTRY(atomic_or_ulong)
296 297 movl 4(%esp), %eax
297 298 movl 8(%esp), %ecx
298 299 lock
299 300 orl %ecx, (%eax)
300 301 ret
301 302 SET_SIZE(atomic_or_ulong)
302 303 SET_SIZE(atomic_or_uint)
303 304 SET_SIZE(atomic_or_32)
304 305
305 306 ENTRY(atomic_and_8)
306 307 ALTENTRY(atomic_and_uchar)
307 308 movl 4(%esp), %eax
308 309 movb 8(%esp), %cl
309 310 lock
310 311 andb %cl, (%eax)
311 312 ret
312 313 SET_SIZE(atomic_and_uchar)
313 314 SET_SIZE(atomic_and_8)
314 315
315 316 ENTRY(atomic_and_16)
316 317 ALTENTRY(atomic_and_ushort)
317 318 movl 4(%esp), %eax
318 319 movw 8(%esp), %cx
319 320 lock
320 321 andw %cx, (%eax)
321 322 ret
322 323 SET_SIZE(atomic_and_ushort)
323 324 SET_SIZE(atomic_and_16)
324 325
325 326 ENTRY(atomic_and_32)
326 327 ALTENTRY(atomic_and_uint)
327 328 ALTENTRY(atomic_and_ulong)
328 329 movl 4(%esp), %eax
329 330 movl 8(%esp), %ecx
330 331 lock
331 332 andl %ecx, (%eax)
332 333 ret
333 334 SET_SIZE(atomic_and_ulong)
334 335 SET_SIZE(atomic_and_uint)
335 336 SET_SIZE(atomic_and_32)
336 337
337 338 ENTRY(atomic_add_8_nv)
338 339 ALTENTRY(atomic_add_char_nv)
339 340 movl 4(%esp), %edx / %edx = target address
340 341 movb 8(%esp), %cl / %cl = delta
341 342 movzbl %cl, %eax / %al = delta, zero extended
342 343 lock
343 344 xaddb %cl, (%edx) / %cl = old value, (%edx) = sum
344 345 addb %cl, %al / return old value plus delta
345 346 ret
346 347 SET_SIZE(atomic_add_char_nv)
347 348 SET_SIZE(atomic_add_8_nv)
348 349
349 350 ENTRY(atomic_add_16_nv)
350 351 ALTENTRY(atomic_add_short_nv)
351 352 movl 4(%esp), %edx / %edx = target address
352 353 movw 8(%esp), %cx / %cx = delta
353 354 movzwl %cx, %eax / %ax = delta, zero extended
354 355 lock
355 356 xaddw %cx, (%edx) / %cx = old value, (%edx) = sum
356 357 addw %cx, %ax / return old value plus delta
357 358 ret
358 359 SET_SIZE(atomic_add_short_nv)
359 360 SET_SIZE(atomic_add_16_nv)
360 361
361 362 ENTRY(atomic_add_32_nv)
362 363 ALTENTRY(atomic_add_int_nv)
363 364 ALTENTRY(atomic_add_ptr_nv)
364 365 ALTENTRY(atomic_add_long_nv)
365 366 movl 4(%esp), %edx / %edx = target address
366 367 movl 8(%esp), %eax / %eax = delta
367 368 movl %eax, %ecx / %ecx = delta
368 369 lock
369 370 xaddl %eax, (%edx) / %eax = old value, (%edx) = sum
370 371 addl %ecx, %eax / return old value plus delta
371 372 ret
372 373 SET_SIZE(atomic_add_long_nv)
373 374 SET_SIZE(atomic_add_ptr_nv)
374 375 SET_SIZE(atomic_add_int_nv)
375 376 SET_SIZE(atomic_add_32_nv)
376 377
377 378 /*
378 379 * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
379 380 * separated, it is important to edit the libc i386 platform
380 381 * specific mapfile and remove the NODYNSORT attribute
381 382 * from atomic_add_64_nv.
382 383 */
383 384 ENTRY(atomic_add_64)
384 385 ALTENTRY(atomic_add_64_nv)
385 386 pushl %edi
386 387 pushl %ebx
387 388 movl 12(%esp), %edi / %edi = target address
388 389 movl (%edi), %eax
389 390 movl 4(%edi), %edx / %edx:%eax = old value
390 391 1:
391 392 movl 16(%esp), %ebx
392 393 movl 20(%esp), %ecx / %ecx:%ebx = delta
393 394 addl %eax, %ebx
394 395 adcl %edx, %ecx / %ecx:%ebx = new value
395 396 lock
396 397 cmpxchg8b (%edi) / try to stick it in
397 398 jne 1b
398 399 movl %ebx, %eax
399 400 movl %ecx, %edx / return new value
400 401 popl %ebx
401 402 popl %edi
402 403 ret
403 404 SET_SIZE(atomic_add_64_nv)
404 405 SET_SIZE(atomic_add_64)
405 406
406 407 ENTRY(atomic_or_8_nv)
407 408 ALTENTRY(atomic_or_uchar_nv)
408 409 movl 4(%esp), %edx / %edx = target address
409 410 movb (%edx), %al / %al = old value
410 411 1:
411 412 movl 8(%esp), %ecx / %ecx = delta
412 413 orb %al, %cl / %cl = new value
413 414 lock
414 415 cmpxchgb %cl, (%edx) / try to stick it in
415 416 jne 1b
416 417 movzbl %cl, %eax / return new value
417 418 ret
418 419 SET_SIZE(atomic_or_uchar_nv)
419 420 SET_SIZE(atomic_or_8_nv)
420 421
421 422 ENTRY(atomic_or_16_nv)
422 423 ALTENTRY(atomic_or_ushort_nv)
423 424 movl 4(%esp), %edx / %edx = target address
424 425 movw (%edx), %ax / %ax = old value
425 426 1:
426 427 movl 8(%esp), %ecx / %ecx = delta
427 428 orw %ax, %cx / %cx = new value
428 429 lock
429 430 cmpxchgw %cx, (%edx) / try to stick it in
430 431 jne 1b
431 432 movzwl %cx, %eax / return new value
432 433 ret
433 434 SET_SIZE(atomic_or_ushort_nv)
434 435 SET_SIZE(atomic_or_16_nv)
435 436
436 437 ENTRY(atomic_or_32_nv)
437 438 ALTENTRY(atomic_or_uint_nv)
438 439 ALTENTRY(atomic_or_ulong_nv)
439 440 movl 4(%esp), %edx / %edx = target address
440 441 movl (%edx), %eax / %eax = old value
441 442 1:
442 443 movl 8(%esp), %ecx / %ecx = delta
443 444 orl %eax, %ecx / %ecx = new value
444 445 lock
445 446 cmpxchgl %ecx, (%edx) / try to stick it in
446 447 jne 1b
447 448 movl %ecx, %eax / return new value
448 449 ret
449 450 SET_SIZE(atomic_or_ulong_nv)
450 451 SET_SIZE(atomic_or_uint_nv)
451 452 SET_SIZE(atomic_or_32_nv)
452 453
453 454 /*
454 455 * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
455 456 * separated, it is important to edit the libc i386 platform
456 457 * specific mapfile and remove the NODYNSORT attribute
457 458 * from atomic_or_64_nv.
458 459 */
459 460 ENTRY(atomic_or_64)
460 461 ALTENTRY(atomic_or_64_nv)
461 462 pushl %edi
462 463 pushl %ebx
463 464 movl 12(%esp), %edi / %edi = target address
464 465 movl (%edi), %eax
465 466 movl 4(%edi), %edx / %edx:%eax = old value
466 467 1:
467 468 movl 16(%esp), %ebx
468 469 movl 20(%esp), %ecx / %ecx:%ebx = delta
469 470 orl %eax, %ebx
470 471 orl %edx, %ecx / %ecx:%ebx = new value
471 472 lock
472 473 cmpxchg8b (%edi) / try to stick it in
473 474 jne 1b
474 475 movl %ebx, %eax
475 476 movl %ecx, %edx / return new value
476 477 popl %ebx
477 478 popl %edi
478 479 ret
479 480 SET_SIZE(atomic_or_64_nv)
480 481 SET_SIZE(atomic_or_64)
481 482
482 483 ENTRY(atomic_and_8_nv)
483 484 ALTENTRY(atomic_and_uchar_nv)
484 485 movl 4(%esp), %edx / %edx = target address
485 486 movb (%edx), %al / %al = old value
486 487 1:
487 488 movl 8(%esp), %ecx / %ecx = delta
488 489 andb %al, %cl / %cl = new value
489 490 lock
490 491 cmpxchgb %cl, (%edx) / try to stick it in
491 492 jne 1b
492 493 movzbl %cl, %eax / return new value
493 494 ret
494 495 SET_SIZE(atomic_and_uchar_nv)
495 496 SET_SIZE(atomic_and_8_nv)
496 497
497 498 ENTRY(atomic_and_16_nv)
498 499 ALTENTRY(atomic_and_ushort_nv)
499 500 movl 4(%esp), %edx / %edx = target address
500 501 movw (%edx), %ax / %ax = old value
501 502 1:
502 503 movl 8(%esp), %ecx / %ecx = delta
503 504 andw %ax, %cx / %cx = new value
504 505 lock
505 506 cmpxchgw %cx, (%edx) / try to stick it in
506 507 jne 1b
507 508 movzwl %cx, %eax / return new value
508 509 ret
509 510 SET_SIZE(atomic_and_ushort_nv)
510 511 SET_SIZE(atomic_and_16_nv)
511 512
512 513 ENTRY(atomic_and_32_nv)
513 514 ALTENTRY(atomic_and_uint_nv)
514 515 ALTENTRY(atomic_and_ulong_nv)
515 516 movl 4(%esp), %edx / %edx = target address
516 517 movl (%edx), %eax / %eax = old value
517 518 1:
518 519 movl 8(%esp), %ecx / %ecx = delta
519 520 andl %eax, %ecx / %ecx = new value
520 521 lock
521 522 cmpxchgl %ecx, (%edx) / try to stick it in
522 523 jne 1b
523 524 movl %ecx, %eax / return new value
524 525 ret
525 526 SET_SIZE(atomic_and_ulong_nv)
526 527 SET_SIZE(atomic_and_uint_nv)
527 528 SET_SIZE(atomic_and_32_nv)
528 529
529 530 /*
530 531 * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
531 532 * separated, it is important to edit the libc i386 platform
532 533 * specific mapfile and remove the NODYNSORT attribute
533 534 * from atomic_and_64_nv.
534 535 */
535 536 ENTRY(atomic_and_64)
536 537 ALTENTRY(atomic_and_64_nv)
537 538 pushl %edi
538 539 pushl %ebx
539 540 movl 12(%esp), %edi / %edi = target address
540 541 movl (%edi), %eax
541 542 movl 4(%edi), %edx / %edx:%eax = old value
542 543 1:
543 544 movl 16(%esp), %ebx
544 545 movl 20(%esp), %ecx / %ecx:%ebx = delta
545 546 andl %eax, %ebx
546 547 andl %edx, %ecx / %ecx:%ebx = new value
547 548 lock
548 549 cmpxchg8b (%edi) / try to stick it in
549 550 jne 1b
550 551 movl %ebx, %eax
551 552 movl %ecx, %edx / return new value
552 553 popl %ebx
553 554 popl %edi
554 555 ret
555 556 SET_SIZE(atomic_and_64_nv)
556 557 SET_SIZE(atomic_and_64)
557 558
558 559 ENTRY(atomic_cas_8)
559 560 ALTENTRY(atomic_cas_uchar)
560 561 movl 4(%esp), %edx
561 562 movzbl 8(%esp), %eax
562 563 movb 12(%esp), %cl
563 564 lock
564 565 cmpxchgb %cl, (%edx)
565 566 ret
566 567 SET_SIZE(atomic_cas_uchar)
567 568 SET_SIZE(atomic_cas_8)
568 569
569 570 ENTRY(atomic_cas_16)
570 571 ALTENTRY(atomic_cas_ushort)
571 572 movl 4(%esp), %edx
572 573 movzwl 8(%esp), %eax
573 574 movw 12(%esp), %cx
574 575 lock
575 576 cmpxchgw %cx, (%edx)
576 577 ret
577 578 SET_SIZE(atomic_cas_ushort)
578 579 SET_SIZE(atomic_cas_16)
579 580
580 581 ENTRY(atomic_cas_32)
581 582 ALTENTRY(atomic_cas_uint)
582 583 ALTENTRY(atomic_cas_ulong)
583 584 ALTENTRY(atomic_cas_ptr)
584 585 movl 4(%esp), %edx
585 586 movl 8(%esp), %eax
586 587 movl 12(%esp), %ecx
587 588 lock
588 589 cmpxchgl %ecx, (%edx)
589 590 ret
590 591 SET_SIZE(atomic_cas_ptr)
591 592 SET_SIZE(atomic_cas_ulong)
592 593 SET_SIZE(atomic_cas_uint)
593 594 SET_SIZE(atomic_cas_32)
594 595
595 596 ENTRY(atomic_cas_64)
596 597 pushl %ebx
597 598 pushl %esi
598 599 movl 12(%esp), %esi
599 600 movl 16(%esp), %eax
600 601 movl 20(%esp), %edx
601 602 movl 24(%esp), %ebx
602 603 movl 28(%esp), %ecx
603 604 lock
604 605 cmpxchg8b (%esi)
605 606 popl %esi
606 607 popl %ebx
607 608 ret
608 609 SET_SIZE(atomic_cas_64)
609 610
610 611 ENTRY(atomic_swap_8)
611 612 ALTENTRY(atomic_swap_uchar)
612 613 movl 4(%esp), %edx
613 614 movzbl 8(%esp), %eax
614 615 lock
615 616 xchgb %al, (%edx)
616 617 ret
617 618 SET_SIZE(atomic_swap_uchar)
618 619 SET_SIZE(atomic_swap_8)
619 620
620 621 ENTRY(atomic_swap_16)
621 622 ALTENTRY(atomic_swap_ushort)
622 623 movl 4(%esp), %edx
623 624 movzwl 8(%esp), %eax
624 625 lock
625 626 xchgw %ax, (%edx)
626 627 ret
627 628 SET_SIZE(atomic_swap_ushort)
628 629 SET_SIZE(atomic_swap_16)
629 630
630 631 ENTRY(atomic_swap_32)
631 632 ALTENTRY(atomic_swap_uint)
632 633 ALTENTRY(atomic_swap_ptr)
633 634 ALTENTRY(atomic_swap_ulong)
634 635 movl 4(%esp), %edx
635 636 movl 8(%esp), %eax
636 637 lock
637 638 xchgl %eax, (%edx)
638 639 ret
639 640 SET_SIZE(atomic_swap_ulong)
640 641 SET_SIZE(atomic_swap_ptr)
641 642 SET_SIZE(atomic_swap_uint)
642 643 SET_SIZE(atomic_swap_32)
643 644
644 645 ENTRY(atomic_swap_64)
645 646 pushl %esi
646 647 pushl %ebx
647 648 movl 12(%esp), %esi
648 649 movl 16(%esp), %ebx
649 650 movl 20(%esp), %ecx
650 651 movl (%esi), %eax
651 652 movl 4(%esi), %edx / %edx:%eax = old value
652 653 1:
653 654 lock
654 655 cmpxchg8b (%esi)
655 656 jne 1b
656 657 popl %ebx
657 658 popl %esi
658 659 ret
659 660 SET_SIZE(atomic_swap_64)
660 661
661 662 ENTRY(atomic_set_long_excl)
662 663 movl 4(%esp), %edx / %edx = target address
663 664 movl 8(%esp), %ecx / %ecx = bit id
664 665 xorl %eax, %eax
665 666 lock
666 667 btsl %ecx, (%edx)
667 668 jnc 1f
668 669 decl %eax / return -1
669 670 1:
670 671 ret
671 672 SET_SIZE(atomic_set_long_excl)
672 673
673 674 ENTRY(atomic_clear_long_excl)
674 675 movl 4(%esp), %edx / %edx = target address
675 676 movl 8(%esp), %ecx / %ecx = bit id
676 677 xorl %eax, %eax
677 678 lock
678 679 btrl %ecx, (%edx)
679 680 jc 1f
680 681 decl %eax / return -1
681 682 1:
682 683 ret
683 684 SET_SIZE(atomic_clear_long_excl)
684 685
685 686 #if !defined(_KERNEL)
686 687
687 688 /*
688 689 * NOTE: membar_enter, membar_exit, membar_producer, and
689 690 * membar_consumer are all identical routines. We define them
690 691 * separately, instead of using ALTENTRY definitions to alias them
691 692 * together, so that DTrace and debuggers will see a unique address
692 693 * for them, allowing more accurate tracing.
693 694 */
694 695
695 696
696 697 ENTRY(membar_enter)
697 698 lock
698 699 xorl $0, (%esp)
699 700 ret
700 701 SET_SIZE(membar_enter)
701 702
702 703 ENTRY(membar_exit)
703 704 lock
704 705 xorl $0, (%esp)
705 706 ret
706 707 SET_SIZE(membar_exit)
707 708
708 709 ENTRY(membar_producer)
709 710 lock
710 711 xorl $0, (%esp)
711 712 ret
712 713 SET_SIZE(membar_producer)
713 714
714 715 ENTRY(membar_consumer)
715 716 lock
716 717 xorl $0, (%esp)
717 718 ret
718 719 SET_SIZE(membar_consumer)
719 720
720 721 #endif /* !_KERNEL */
↓ open down ↓ |
677 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX