atomic-v6.h revision 315371
1/* $NetBSD: atomic.h,v 1.1 2002/10/19 12:22:34 bsh Exp $ */ 2 3/*- 4 * Copyright (C) 2003-2004 Olivier Houchard 5 * Copyright (C) 1994-1997 Mark Brinicombe 6 * Copyright (C) 1994 Brini 7 * All rights reserved. 8 * 9 * This code is derived from software written for Brini by Mark Brinicombe 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by Brini. 22 * 4. The name of Brini may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR 26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 29 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 31 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 33 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 34 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35 * 36 * $FreeBSD: stable/11/sys/arm/include/atomic-v6.h 315371 2017-03-16 06:00:27Z mjg $ 37 */ 38 39#ifndef _MACHINE_ATOMIC_V6_H_ 40#define _MACHINE_ATOMIC_V6_H_ 41 42#ifndef _MACHINE_ATOMIC_H_ 43#error Do not include this file directly, use <machine/atomic.h> 44#endif 45 46#if __ARM_ARCH >= 7 47#define isb() __asm __volatile("isb" : : : "memory") 48#define dsb() __asm __volatile("dsb" : : : "memory") 49#define dmb() __asm __volatile("dmb" : : : "memory") 50#elif __ARM_ARCH >= 6 51#define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory") 52#define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory") 53#define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory") 54#else 55#error Only use this file with ARMv6 and later 56#endif 57 58#define mb() dmb() 59#define wmb() dmb() 60#define rmb() dmb() 61 62#define ARM_HAVE_ATOMIC64 63 64#define ATOMIC_ACQ_REL_LONG(NAME) \ 65static __inline void \ 66atomic_##NAME##_acq_long(__volatile u_long *p, u_long v) \ 67{ \ 68 atomic_##NAME##_long(p, v); \ 69 dmb(); \ 70} \ 71 \ 72static __inline void \ 73atomic_##NAME##_rel_long(__volatile u_long *p, u_long v) \ 74{ \ 75 dmb(); \ 76 atomic_##NAME##_long(p, v); \ 77} 78 79#define ATOMIC_ACQ_REL(NAME, WIDTH) \ 80static __inline void \ 81atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ 82{ \ 83 atomic_##NAME##_##WIDTH(p, v); \ 84 dmb(); \ 85} \ 86 \ 87static __inline void \ 88atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ 89{ \ 90 dmb(); \ 91 atomic_##NAME##_##WIDTH(p, v); \ 92} 93 94 95static __inline void 96atomic_add_32(volatile uint32_t *p, uint32_t val) 97{ 98 uint32_t tmp = 0, tmp2 = 0; 99 100 __asm __volatile( 101 "1: ldrex %0, [%2] \n" 102 " add %0, %0, %3 \n" 103 " strex %1, %0, [%2] \n" 104 " cmp %1, #0 \n" 105 " it ne \n" 106 " bne 1b \n" 107 : "=&r" (tmp), "+r" (tmp2) 108 ,"+r" (p), "+r" (val) : : "cc", "memory"); 109} 110 111static __inline void 112atomic_add_64(volatile uint64_t *p, uint64_t val) 113{ 114 uint64_t tmp; 115 uint32_t exflag; 116 117 __asm __volatile( 118 "1: \n" 119 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 120 " adds %Q[tmp], %Q[val] \n" 121 " adc %R[tmp], %R[tmp], %R[val] \n" 122 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 123 " teq %[exf], #0 \n" 124 " it ne \n" 125 " bne 1b \n" 126 : [exf] "=&r" (exflag), 127 [tmp] "=&r" (tmp) 128 : [ptr] "r" (p), 129 [val] "r" (val) 130 : "cc", "memory"); 131} 132 133static __inline void 134atomic_add_long(volatile u_long *p, u_long val) 135{ 136 137 atomic_add_32((volatile uint32_t *)p, val); 138} 139 140ATOMIC_ACQ_REL(add, 32) 141ATOMIC_ACQ_REL(add, 64) 142ATOMIC_ACQ_REL_LONG(add) 143 144static __inline void 145atomic_clear_32(volatile uint32_t *address, uint32_t setmask) 146{ 147 uint32_t tmp = 0, tmp2 = 0; 148 149 __asm __volatile( 150 "1: ldrex %0, [%2] \n" 151 " bic %0, %0, %3 \n" 152 " strex %1, %0, [%2] \n" 153 " cmp %1, #0 \n" 154 " it ne \n" 155 " bne 1b \n" 156 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask) 157 : : "cc", "memory"); 158} 159 160static __inline void 161atomic_clear_64(volatile uint64_t *p, uint64_t val) 162{ 163 uint64_t tmp; 164 uint32_t exflag; 165 166 __asm __volatile( 167 "1: \n" 168 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 169 " bic %Q[tmp], %Q[val] \n" 170 " bic %R[tmp], %R[val] \n" 171 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 172 " teq %[exf], #0 \n" 173 " it ne \n" 174 " bne 1b \n" 175 : [exf] "=&r" (exflag), 176 [tmp] "=&r" (tmp) 177 : [ptr] "r" (p), 178 [val] "r" (val) 179 : "cc", "memory"); 180} 181 182static __inline void 183atomic_clear_long(volatile u_long *address, u_long setmask) 184{ 185 186 atomic_clear_32((volatile uint32_t *)address, setmask); 187} 188 189ATOMIC_ACQ_REL(clear, 32) 190ATOMIC_ACQ_REL(clear, 64) 191ATOMIC_ACQ_REL_LONG(clear) 192 193static __inline int 194atomic_fcmpset_32(volatile uint32_t *p, uint32_t *cmpval, uint32_t newval) 195{ 196 uint32_t tmp; 197 uint32_t _cmpval = *cmpval; 198 int ret; 199 200 __asm __volatile( 201 "1: mov %0, #1 \n" 202 " ldrex %1, [%2] \n" 203 " cmp %1, %3 \n" 204 " it ne \n" 205 " bne 2f \n" 206 " strex %0, %4, [%2] \n" 207 "2:" 208 : "=&r" (ret), "=&r" (tmp), "+r" (p), "+r" (_cmpval), "+r" (newval) 209 : : "cc", "memory"); 210 *cmpval = tmp; 211 return (!ret); 212} 213 214static __inline uint64_t 215atomic_fcmpset_64(volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) 216{ 217 uint64_t tmp; 218 uint64_t _cmpval = *cmpval; 219 int ret; 220 221 __asm __volatile( 222 "1: mov %[ret], #1 \n" 223 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 224 " teq %Q[tmp], %Q[_cmpval] \n" 225 " itee eq \n" 226 " teqeq %R[tmp], %R[_cmpval] \n" 227 " bne 2f \n" 228 " strexd %[ret], %Q[newval], %R[newval], [%[ptr]]\n" 229 "2: \n" 230 : [ret] "=&r" (ret), 231 [tmp] "=&r" (tmp) 232 : [ptr] "r" (p), 233 [_cmpval] "r" (_cmpval), 234 [newval] "r" (newval) 235 : "cc", "memory"); 236 *cmpval = tmp; 237 return (!ret); 238} 239 240static __inline u_long 241atomic_fcmpset_long(volatile u_long *p, u_long *cmpval, u_long newval) 242{ 243 244 return (atomic_fcmpset_32((volatile uint32_t *)p, 245 (uint32_t *)cmpval, newval)); 246} 247 248static __inline uint64_t 249atomic_fcmpset_acq_64(volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) 250{ 251 uint64_t ret; 252 253 ret = atomic_fcmpset_64(p, cmpval, newval); 254 dmb(); 255 return (ret); 256} 257 258static __inline u_long 259atomic_fcmpset_acq_long(volatile u_long *p, u_long *cmpval, u_long newval) 260{ 261 u_long ret; 262 263 ret = atomic_fcmpset_long(p, cmpval, newval); 264 dmb(); 265 return (ret); 266} 267 268static __inline uint32_t 269atomic_fcmpset_acq_32(volatile uint32_t *p, uint32_t *cmpval, uint32_t newval) 270{ 271 272 uint32_t ret; 273 274 ret = atomic_fcmpset_32(p, cmpval, newval); 275 dmb(); 276 return (ret); 277} 278 279static __inline uint32_t 280atomic_fcmpset_rel_32(volatile uint32_t *p, uint32_t *cmpval, uint32_t newval) 281{ 282 283 dmb(); 284 return (atomic_fcmpset_32(p, cmpval, newval)); 285} 286 287static __inline uint64_t 288atomic_fcmpset_rel_64(volatile uint64_t *p, uint64_t *cmpval, uint64_t newval) 289{ 290 291 dmb(); 292 return (atomic_fcmpset_64(p, cmpval, newval)); 293} 294 295static __inline u_long 296atomic_fcmpset_rel_long(volatile u_long *p, u_long *cmpval, u_long newval) 297{ 298 299 dmb(); 300 return (atomic_fcmpset_long(p, cmpval, newval)); 301} 302 303static __inline uint32_t 304atomic_cmpset_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) 305{ 306 uint32_t ret; 307 308 __asm __volatile( 309 "1: ldrex %0, [%1] \n" 310 " cmp %0, %2 \n" 311 " itt ne \n" 312 " movne %0, #0 \n" 313 " bne 2f \n" 314 " strex %0, %3, [%1] \n" 315 " cmp %0, #0 \n" 316 " ite eq \n" 317 " moveq %0, #1 \n" 318 " bne 1b \n" 319 "2:" 320 : "=&r" (ret), "+r" (p), "+r" (cmpval), "+r" (newval) 321 : : "cc", "memory"); 322 return (ret); 323} 324 325static __inline int 326atomic_cmpset_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) 327{ 328 uint64_t tmp; 329 uint32_t ret; 330 331 __asm __volatile( 332 "1: \n" 333 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 334 " teq %Q[tmp], %Q[cmpval] \n" 335 " itee eq \n" 336 " teqeq %R[tmp], %R[cmpval] \n" 337 " movne %[ret], #0 \n" 338 " bne 2f \n" 339 " strexd %[ret], %Q[newval], %R[newval], [%[ptr]]\n" 340 " teq %[ret], #0 \n" 341 " it ne \n" 342 " bne 1b \n" 343 " mov %[ret], #1 \n" 344 "2: \n" 345 : [ret] "=&r" (ret), 346 [tmp] "=&r" (tmp) 347 : [ptr] "r" (p), 348 [cmpval] "r" (cmpval), 349 [newval] "r" (newval) 350 : "cc", "memory"); 351 return (ret); 352} 353 354static __inline u_long 355atomic_cmpset_long(volatile u_long *p, u_long cmpval, u_long newval) 356{ 357 358 return (atomic_cmpset_32((volatile uint32_t *)p, cmpval, newval)); 359} 360 361static __inline uint32_t 362atomic_cmpset_acq_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) 363{ 364 uint32_t ret; 365 366 ret = atomic_cmpset_32(p, cmpval, newval); 367 dmb(); 368 return (ret); 369} 370 371static __inline uint64_t 372atomic_cmpset_acq_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) 373{ 374 uint64_t ret; 375 376 ret = atomic_cmpset_64(p, cmpval, newval); 377 dmb(); 378 return (ret); 379} 380 381static __inline u_long 382atomic_cmpset_acq_long(volatile u_long *p, u_long cmpval, u_long newval) 383{ 384 u_long ret; 385 386 ret = atomic_cmpset_long(p, cmpval, newval); 387 dmb(); 388 return (ret); 389} 390 391static __inline uint32_t 392atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval) 393{ 394 395 dmb(); 396 return (atomic_cmpset_32(p, cmpval, newval)); 397} 398 399static __inline uint64_t 400atomic_cmpset_rel_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval) 401{ 402 403 dmb(); 404 return (atomic_cmpset_64(p, cmpval, newval)); 405} 406 407static __inline u_long 408atomic_cmpset_rel_long(volatile u_long *p, u_long cmpval, u_long newval) 409{ 410 411 dmb(); 412 return (atomic_cmpset_long(p, cmpval, newval)); 413} 414 415static __inline uint32_t 416atomic_fetchadd_32(volatile uint32_t *p, uint32_t val) 417{ 418 uint32_t tmp = 0, tmp2 = 0, ret = 0; 419 420 __asm __volatile( 421 "1: ldrex %0, [%3] \n" 422 " add %1, %0, %4 \n" 423 " strex %2, %1, [%3] \n" 424 " cmp %2, #0 \n" 425 " it ne \n" 426 " bne 1b \n" 427 : "+r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val) 428 : : "cc", "memory"); 429 return (ret); 430} 431 432static __inline uint64_t 433atomic_fetchadd_64(volatile uint64_t *p, uint64_t val) 434{ 435 uint64_t ret, tmp; 436 uint32_t exflag; 437 438 __asm __volatile( 439 "1: \n" 440 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 441 " adds %Q[tmp], %Q[ret], %Q[val] \n" 442 " adc %R[tmp], %R[ret], %R[val] \n" 443 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 444 " teq %[exf], #0 \n" 445 " it ne \n" 446 " bne 1b \n" 447 : [ret] "=&r" (ret), 448 [exf] "=&r" (exflag), 449 [tmp] "=&r" (tmp) 450 : [ptr] "r" (p), 451 [val] "r" (val) 452 : "cc", "memory"); 453 return (ret); 454} 455 456static __inline u_long 457atomic_fetchadd_long(volatile u_long *p, u_long val) 458{ 459 460 return (atomic_fetchadd_32((volatile uint32_t *)p, val)); 461} 462 463static __inline uint32_t 464atomic_load_acq_32(volatile uint32_t *p) 465{ 466 uint32_t v; 467 468 v = *p; 469 dmb(); 470 return (v); 471} 472 473static __inline uint64_t 474atomic_load_64(volatile uint64_t *p) 475{ 476 uint64_t ret; 477 478 /* 479 * The only way to atomically load 64 bits is with LDREXD which puts the 480 * exclusive monitor into the exclusive state, so reset it to open state 481 * with CLREX because we don't actually need to store anything. 482 */ 483 __asm __volatile( 484 "ldrexd %Q[ret], %R[ret], [%[ptr]] \n" 485 "clrex \n" 486 : [ret] "=&r" (ret) 487 : [ptr] "r" (p) 488 : "cc", "memory"); 489 return (ret); 490} 491 492static __inline uint64_t 493atomic_load_acq_64(volatile uint64_t *p) 494{ 495 uint64_t ret; 496 497 ret = atomic_load_64(p); 498 dmb(); 499 return (ret); 500} 501 502static __inline u_long 503atomic_load_acq_long(volatile u_long *p) 504{ 505 u_long v; 506 507 v = *p; 508 dmb(); 509 return (v); 510} 511 512static __inline uint32_t 513atomic_readandclear_32(volatile uint32_t *p) 514{ 515 uint32_t ret, tmp = 0, tmp2 = 0; 516 517 __asm __volatile( 518 "1: ldrex %0, [%3] \n" 519 " mov %1, #0 \n" 520 " strex %2, %1, [%3] \n" 521 " cmp %2, #0 \n" 522 " it ne \n" 523 " bne 1b \n" 524 : "=r" (ret), "=&r" (tmp), "+r" (tmp2), "+r" (p) 525 : : "cc", "memory"); 526 return (ret); 527} 528 529static __inline uint64_t 530atomic_readandclear_64(volatile uint64_t *p) 531{ 532 uint64_t ret, tmp; 533 uint32_t exflag; 534 535 __asm __volatile( 536 "1: \n" 537 " ldrexd %Q[ret], %R[ret], [%[ptr]] \n" 538 " mov %Q[tmp], #0 \n" 539 " mov %R[tmp], #0 \n" 540 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 541 " teq %[exf], #0 \n" 542 " it ne \n" 543 " bne 1b \n" 544 : [ret] "=&r" (ret), 545 [exf] "=&r" (exflag), 546 [tmp] "=&r" (tmp) 547 : [ptr] "r" (p) 548 : "cc", "memory"); 549 return (ret); 550} 551 552static __inline u_long 553atomic_readandclear_long(volatile u_long *p) 554{ 555 556 return (atomic_readandclear_32((volatile uint32_t *)p)); 557} 558 559static __inline void 560atomic_set_32(volatile uint32_t *address, uint32_t setmask) 561{ 562 uint32_t tmp = 0, tmp2 = 0; 563 564 __asm __volatile( 565 "1: ldrex %0, [%2] \n" 566 " orr %0, %0, %3 \n" 567 " strex %1, %0, [%2] \n" 568 " cmp %1, #0 \n" 569 " it ne \n" 570 " bne 1b \n" 571 : "=&r" (tmp), "+r" (tmp2), "+r" (address), "+r" (setmask) 572 : : "cc", "memory"); 573} 574 575static __inline void 576atomic_set_64(volatile uint64_t *p, uint64_t val) 577{ 578 uint64_t tmp; 579 uint32_t exflag; 580 581 __asm __volatile( 582 "1: \n" 583 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 584 " orr %Q[tmp], %Q[val] \n" 585 " orr %R[tmp], %R[val] \n" 586 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 587 " teq %[exf], #0 \n" 588 " it ne \n" 589 " bne 1b \n" 590 : [exf] "=&r" (exflag), 591 [tmp] "=&r" (tmp) 592 : [ptr] "r" (p), 593 [val] "r" (val) 594 : "cc", "memory"); 595} 596 597static __inline void 598atomic_set_long(volatile u_long *address, u_long setmask) 599{ 600 601 atomic_set_32((volatile uint32_t *)address, setmask); 602} 603 604ATOMIC_ACQ_REL(set, 32) 605ATOMIC_ACQ_REL(set, 64) 606ATOMIC_ACQ_REL_LONG(set) 607 608static __inline void 609atomic_subtract_32(volatile uint32_t *p, uint32_t val) 610{ 611 uint32_t tmp = 0, tmp2 = 0; 612 613 __asm __volatile( 614 "1: ldrex %0, [%2] \n" 615 " sub %0, %0, %3 \n" 616 " strex %1, %0, [%2] \n" 617 " cmp %1, #0 \n" 618 " it ne \n" 619 " bne 1b \n" 620 : "=&r" (tmp), "+r" (tmp2), "+r" (p), "+r" (val) 621 : : "cc", "memory"); 622} 623 624static __inline void 625atomic_subtract_64(volatile uint64_t *p, uint64_t val) 626{ 627 uint64_t tmp; 628 uint32_t exflag; 629 630 __asm __volatile( 631 "1: \n" 632 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 633 " subs %Q[tmp], %Q[val] \n" 634 " sbc %R[tmp], %R[tmp], %R[val] \n" 635 " strexd %[exf], %Q[tmp], %R[tmp], [%[ptr]] \n" 636 " teq %[exf], #0 \n" 637 " it ne \n" 638 " bne 1b \n" 639 : [exf] "=&r" (exflag), 640 [tmp] "=&r" (tmp) 641 : [ptr] "r" (p), 642 [val] "r" (val) 643 : "cc", "memory"); 644} 645 646static __inline void 647atomic_subtract_long(volatile u_long *p, u_long val) 648{ 649 650 atomic_subtract_32((volatile uint32_t *)p, val); 651} 652 653ATOMIC_ACQ_REL(subtract, 32) 654ATOMIC_ACQ_REL(subtract, 64) 655ATOMIC_ACQ_REL_LONG(subtract) 656 657static __inline void 658atomic_store_64(volatile uint64_t *p, uint64_t val) 659{ 660 uint64_t tmp; 661 uint32_t exflag; 662 663 /* 664 * The only way to atomically store 64 bits is with STREXD, which will 665 * succeed only if paired up with a preceeding LDREXD using the same 666 * address, so we read and discard the existing value before storing. 667 */ 668 __asm __volatile( 669 "1: \n" 670 " ldrexd %Q[tmp], %R[tmp], [%[ptr]] \n" 671 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n" 672 " teq %[exf], #0 \n" 673 " it ne \n" 674 " bne 1b \n" 675 : [tmp] "=&r" (tmp), 676 [exf] "=&r" (exflag) 677 : [ptr] "r" (p), 678 [val] "r" (val) 679 : "cc", "memory"); 680} 681 682static __inline void 683atomic_store_rel_32(volatile uint32_t *p, uint32_t v) 684{ 685 686 dmb(); 687 *p = v; 688} 689 690static __inline void 691atomic_store_rel_64(volatile uint64_t *p, uint64_t val) 692{ 693 694 dmb(); 695 atomic_store_64(p, val); 696} 697 698static __inline void 699atomic_store_rel_long(volatile u_long *p, u_long v) 700{ 701 702 dmb(); 703 *p = v; 704} 705 706static __inline int 707atomic_testandset_32(volatile uint32_t *p, u_int v) 708{ 709 uint32_t tmp, tmp2, res, mask; 710 711 mask = 1u << (v & 0x1f); 712 tmp = tmp2 = 0; 713 __asm __volatile( 714 "1: ldrex %0, [%4] \n" 715 " orr %1, %0, %3 \n" 716 " strex %2, %1, [%4] \n" 717 " cmp %2, #0 \n" 718 " it ne \n" 719 " bne 1b \n" 720 : "=&r" (res), "=&r" (tmp), "=&r" (tmp2) 721 : "r" (mask), "r" (p) 722 : "cc", "memory"); 723 return ((res & mask) != 0); 724} 725 726static __inline int 727atomic_testandset_int(volatile u_int *p, u_int v) 728{ 729 730 return (atomic_testandset_32((volatile uint32_t *)p, v)); 731} 732 733static __inline int 734atomic_testandset_long(volatile u_long *p, u_int v) 735{ 736 737 return (atomic_testandset_32((volatile uint32_t *)p, v)); 738} 739 740static __inline int 741atomic_testandset_64(volatile uint64_t *p, u_int v) 742{ 743 volatile uint32_t *p32; 744 745 p32 = (volatile uint32_t *)p; 746 /* Assume little-endian */ 747 if (v >= 32) { 748 v &= 0x1f; 749 p32++; 750 } 751 return (atomic_testandset_32(p32, v)); 752} 753 754static __inline uint32_t 755atomic_swap_32(volatile uint32_t *p, uint32_t v) 756{ 757 uint32_t ret, exflag; 758 759 __asm __volatile( 760 "1: ldrex %[ret], [%[ptr]] \n" 761 " strex %[exf], %[val], [%[ptr]] \n" 762 " teq %[exf], #0 \n" 763 " it ne \n" 764 " bne 1b \n" 765 : [ret] "=&r" (ret), 766 [exf] "=&r" (exflag) 767 : [val] "r" (v), 768 [ptr] "r" (p) 769 : "cc", "memory"); 770 return (ret); 771} 772 773static __inline uint64_t 774atomic_swap_64(volatile uint64_t *p, uint64_t v) 775{ 776 uint64_t ret; 777 uint32_t exflag; 778 779 __asm __volatile( 780 "1: ldrexd %Q[ret], %R[ret], [%[ptr]] \n" 781 " strexd %[exf], %Q[val], %R[val], [%[ptr]] \n" 782 " teq %[exf], #0 \n" 783 " it ne \n" 784 " bne 1b \n" 785 : [ret] "=&r" (ret), 786 [exf] "=&r" (exflag) 787 : [val] "r" (v), 788 [ptr] "r" (p) 789 : "cc", "memory"); 790 return (ret); 791} 792 793#undef ATOMIC_ACQ_REL 794#undef ATOMIC_ACQ_REL_LONG 795 796static __inline void 797atomic_thread_fence_acq(void) 798{ 799 800 dmb(); 801} 802 803static __inline void 804atomic_thread_fence_rel(void) 805{ 806 807 dmb(); 808} 809 810static __inline void 811atomic_thread_fence_acq_rel(void) 812{ 813 814 dmb(); 815} 816 817static __inline void 818atomic_thread_fence_seq_cst(void) 819{ 820 821 dmb(); 822} 823 824#endif /* _MACHINE_ATOMIC_V6_H_ */ 825