1/* 2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved. 3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. 4 * 5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 6 * 7 * This file contains Original Code and/or Modifications of Original Code 8 * as defined in and that are subject to the Apple Public Source License 9 * Version 2.0 (the 'License'). You may not use this file except in 10 * compliance with the License. The rights granted to you under the License 11 * may not be used to create, or enable the creation or redistribution of, 12 * unlawful or unlicensed copies of an Apple operating system, or to 13 * circumvent, violate, or enable the circumvention or violation of, any 14 * terms of an Apple operating system software license agreement. 15 * 16 * Please obtain a copy of the License at 17 * http://www.opensource.apple.com/apsl/ and read it before using this file. 18 * 19 * The Original Code and all software distributed under the License are 20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 24 * Please see the License for the specific language governing rights and 25 * limitations under the License. 26 * 27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 28 */ 29/* 30 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. 31 * 32 * HISTORY 33 * 34 */ 35 36#ifndef _OS_OSATOMIC_H 37#define _OS_OSATOMIC_H 38 39#include <libkern/OSBase.h> 40 41#if defined(__cplusplus) 42extern "C" { 43#endif 44 45#ifdef XNU_KERNEL_PRIVATE 46/* 47 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure 48 * the data the pointer is referencing is the same size. If it is not, it will cause 49 * a division by zero compiler warning. This is to work around "SInt32" being defined 50 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to 51 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size 52 * mismatches. 53 * -- var is used, but sizeof does not evaluate the 54 * argument, i.e. we're safe against "++" etc. in var -- 55 */ 56#define __SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0))) 57#else 58#define __SAFE_CAST_PTR(type, var) ((type)(var)) 59#endif 60 61/*! 62 * @header 63 * 64 * @abstract 65 * This header declares the OSAtomic group of functions for atomic 66 * reading and updating of values. 67 */ 68 69/*! 70 * @function OSCompareAndSwap64 71 * 72 * @abstract 73 * 64-bit compare and swap operation. 74 * 75 * @discussion 76 * See OSCompareAndSwap. 77 */ 78extern Boolean OSCompareAndSwap64( 79 UInt64 oldValue, 80 UInt64 newValue, 81 volatile UInt64 * address); 82#define OSCompareAndSwap64(a, b, c) \ 83 (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c))) 84 85/*! 86 * @function OSAddAtomic64 87 * 88 * @abstract 89 * 64-bit atomic add operation. 90 * 91 * @discussion 92 * See OSAddAtomic. 93 */ 94extern SInt64 OSAddAtomic64( 95 SInt64 theAmount, 96 volatile SInt64 * address); 97#define OSAddAtomic64(a, b) \ 98 (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b))) 99 100/*! 101 * @function OSIncrementAtomic64 102 * 103 * @abstract 104 * 64-bit increment. 105 * 106 * @discussion 107 * See OSIncrementAtomic. 108*/ 109inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address) 110{ 111 return OSAddAtomic64(1LL, address); 112} 113 114/*! 115 * @function OSDecrementAtomic64 116 * 117 * @abstract 118 * 64-bit decrement. 119 * 120 * @discussion 121 * See OSDecrementAtomic. 122*/ 123inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address) 124{ 125 return OSAddAtomic64(-1LL, address); 126} 127 128#if XNU_KERNEL_PRIVATE 129/* Not to be included in headerdoc. 130 * 131 * @function OSAddAtomicLong 132 * 133 * @abstract 134 * 32/64-bit atomic add operation, depending on sizeof(long). 135 * 136 * @discussion 137 * See OSAddAtomic. 138 */ 139extern long OSAddAtomicLong( 140 long theAmount, 141 volatile long * address); 142#define OSAddAtomicLong(a, b) \ 143 (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b))) 144 145/* Not to be included in headerdoc. 146 * 147 * @function OSIncrementAtomicLong 148 * 149 * @abstract 150 * 32/64-bit increment, depending on sizeof(long) 151 * 152 * @discussion 153 * See OSIncrementAtomic. 154*/ 155inline static long OSIncrementAtomicLong(volatile long * address) 156{ 157 return OSAddAtomicLong(1L, address); 158} 159 160/* Not to be included in headerdoc. 161 * 162 * @function OSDecrementAtomicLong 163 * 164 * @abstract 165 * 32/64-bit decrement, depending on sizeof(long) 166 *@discussion See OSDecrementAtomic. 167 */ 168inline static long OSDecrementAtomicLong(volatile long * address) 169{ 170 return OSAddAtomicLong(-1L, address); 171} 172#endif /* XNU_KERNEL_PRIVATE */ 173 174#if XNU_KERNEL_PRIVATE 175#endif /* XNU_KERNEL_PRIVATE */ 176 177/*! 178 * @function OSCompareAndSwap 179 * 180 * @abstract 181 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 182 * 183 * @discussion 184 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false. 185 * 186 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 187 * 188 * @param oldValue The value to compare at address. 189 * @param newValue The value to write to address if oldValue compares true. 190 * @param address The 4-byte aligned address of the data to update atomically. 191 * @result true if newValue was written to the address. 192 */ 193extern Boolean OSCompareAndSwap( 194 UInt32 oldValue, 195 UInt32 newValue, 196 volatile UInt32 * address); 197#define OSCompareAndSwap(a, b, c) \ 198 (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c))) 199 200/*! 201 * @function OSCompareAndSwapPtr 202 * 203 * @abstract 204 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 205 * 206 * @discussion 207 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false. 208 * 209 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 210 * @param oldValue The pointer value to compare at address. 211 * @param newValue The pointer value to write to address if oldValue compares true. 212 * @param address The pointer-size aligned address of the data to update atomically. 213 * @result true if newValue was written to the address. 214 */ 215extern Boolean OSCompareAndSwapPtr( 216 void * oldValue, 217 void * newValue, 218 void * volatile * address); 219#define OSCompareAndSwapPtr(a, b, c) \ 220 (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c))) 221 222/*! 223 * @function OSAddAtomic 224 * 225 * @abstract 226 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 227 * 228 * @discussion 229 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value. 230 * 231 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 232 * @param amount The amount to add. 233 * @param address The 4-byte aligned address of the value to update atomically. 234 * @result The value before the addition 235 */ 236extern SInt32 OSAddAtomic( 237 SInt32 amount, 238 volatile SInt32 * address); 239#define OSAddAtomic(a, b) \ 240 (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b))) 241 242/*! 243 * @function OSAddAtomic16 244 * 245 * @abstract 246 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 247 * 248 * @discussion 249 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value. 250 * 251 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 252 * @param address The 2-byte aligned address of the value to update atomically. 253 * @result The value before the addition 254 */ 255extern SInt16 OSAddAtomic16( 256 SInt32 amount, 257 volatile SInt16 * address); 258 259/*! 260 * @function OSAddAtomic8 261 * 262 * @abstract 263 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 264 * 265 * @discussion 266 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value. 267 * 268 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 269 * @param amount The amount to add. 270 * @param address The address of the value to update atomically. 271 * @result The value before the addition. 272 */ 273extern SInt8 OSAddAtomic8( 274 SInt32 amount, 275 volatile SInt8 * address); 276 277/*! 278 * @function OSIncrementAtomic 279 * 280 * @abstract 281 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 282 * 283 * @discussion 284 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value. 285 * 286 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. 287 * @param address The 4-byte aligned address of the value to update atomically. 288 * @result The value before the increment. 289 */ 290extern SInt32 OSIncrementAtomic(volatile SInt32 * address); 291#define OSIncrementAtomic(a) \ 292 (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a))) 293 294/*! 295 * @function OSIncrementAtomic16 296 * 297 * @abstract 298 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 299 * 300 * @discussion 301 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value. 302 * 303 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 304 * @param address The 2-byte aligned address of the value to update atomically. 305 * @result The value before the increment. 306 */ 307extern SInt16 OSIncrementAtomic16(volatile SInt16 * address); 308 309/*! 310 * @function OSIncrementAtomic8 311 * 312 * @abstract 313 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 314 * 315 * @discussion 316 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value. 317 * 318 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 319 * @param address The address of the value to update atomically. 320 * @result The value before the increment. 321 */ 322extern SInt8 OSIncrementAtomic8(volatile SInt8 * address); 323 324/*! 325 * @function OSDecrementAtomic 326 * 327 * @abstract 328 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 329 * 330 * @discussion 331 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value. 332 * 333 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 334 * @param address The 4-byte aligned address of the value to update atomically. 335 * @result The value before the decrement. 336 */ 337extern SInt32 OSDecrementAtomic(volatile SInt32 * address); 338#define OSDecrementAtomic(a) \ 339 (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a))) 340 341/*! 342 * @function OSDecrementAtomic16 343 * 344 * @abstract 345 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 346 * 347 * @discussion 348 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value. 349 * 350 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 351 * @param address The 2-byte aligned address of the value to update atomically. 352 * @result The value before the decrement. 353 */ 354extern SInt16 OSDecrementAtomic16(volatile SInt16 * address); 355 356/*! 357 * @function OSDecrementAtomic8 358 * 359 * @abstract 360 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 361 * 362 * @discussion 363 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value. 364 * 365 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers. 366 * @param address The address of the value to update atomically. 367 * @result The value before the decrement. 368 */ 369extern SInt8 OSDecrementAtomic8(volatile SInt8 * address); 370 371/*! 372 * @function OSBitAndAtomic 373 * 374 * @abstract 375 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 376 * 377 * @discussion 378 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value. 379 * 380 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.. 381 * @param mask The mask to logically and with the value. 382 * @param address The 4-byte aligned address of the value to update atomically. 383 * @result The value before the bitwise operation 384 */ 385extern UInt32 OSBitAndAtomic( 386 UInt32 mask, 387 volatile UInt32 * address); 388#define OSBitAndAtomic(a, b) \ 389 (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) 390 391/*! 392 * @function OSBitAndAtomic16 393 * 394 * @abstract 395 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 396 * 397 * @discussion 398 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value. 399 * 400 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 401 * @param mask The mask to logically and with the value. 402 * @param address The 2-byte aligned address of the value to update atomically. 403 * @result The value before the bitwise operation. 404 */ 405extern UInt16 OSBitAndAtomic16( 406 UInt32 mask, 407 volatile UInt16 * address); 408 409/*! 410 * @function OSBitAndAtomic8 411 * 412 * @abstract 413 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 414 * 415 * @discussion 416 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value. 417 * 418 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 419 * @param mask The mask to logically and with the value. 420 * @param address The address of the value to update atomically. 421 * @result The value before the bitwise operation. 422 */ 423extern UInt8 OSBitAndAtomic8( 424 UInt32 mask, 425 volatile UInt8 * address); 426 427/*! 428 * @function OSBitOrAtomic 429 * 430 * @abstract 431 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 432 * 433 * @discussion 434 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value. 435 * 436 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 437 * @param mask The mask to logically or with the value. 438 * @param address The 4-byte aligned address of the value to update atomically. 439 * @result The value before the bitwise operation. 440 */ 441extern UInt32 OSBitOrAtomic( 442 UInt32 mask, 443 volatile UInt32 * address); 444#define OSBitOrAtomic(a, b) \ 445 (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) 446 447/*! 448 * @function OSBitOrAtomic16 449 * 450 * @abstract 451 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 452 * 453 * @discussion 454 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value. 455 * 456 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 457 * @param mask The mask to logically or with the value. 458 * @param address The 2-byte aligned address of the value to update atomically. 459 * @result The value before the bitwise operation. 460 */ 461extern UInt16 OSBitOrAtomic16( 462 UInt32 mask, 463 volatile UInt16 * address); 464 465/*! 466 * @function OSBitOrAtomic8 467 * 468 * @abstract 469 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 470 * 471 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 472 * 473 * @discussion 474 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value. 475 * @param mask The mask to logically or with the value. 476 * @param address The address of the value to update atomically. 477 * @result The value before the bitwise operation. 478 */ 479extern UInt8 OSBitOrAtomic8( 480 UInt32 mask, 481 volatile UInt8 * address); 482 483/*! 484 * @function OSBitXorAtomic 485 * 486 * @abstract 487 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 488 * 489 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 490 * 491 * @discussion 492 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value. 493 * @param mask The mask to logically or with the value. 494 * @param address The 4-byte aligned address of the value to update atomically. 495 * @result The value before the bitwise operation. 496 */ 497extern UInt32 OSBitXorAtomic( 498 UInt32 mask, 499 volatile UInt32 * address); 500#define OSBitXorAtomic(a, b) \ 501 (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) 502 503/*! 504 * @function OSBitXorAtomic16 505 * 506 * @abstract 507 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 508 * 509 * @discussion 510 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value. 511 * 512 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 513 * @param mask The mask to logically or with the value. 514 * @param address The 2-byte aligned address of the value to update atomically. 515 * @result The value before the bitwise operation. 516 */ 517extern UInt16 OSBitXorAtomic16( 518 UInt32 mask, 519 volatile UInt16 * address); 520 521/*! 522 * @function OSBitXorAtomic8 523 * 524 * @abstract 525 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 526 * 527 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 528 * 529 * @discussion 530 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value. 531 * @param mask The mask to logically or with the value. 532 * @param address The address of the value to update atomically. 533 * @result The value before the bitwise operation. 534 */ 535extern UInt8 OSBitXorAtomic8( 536 UInt32 mask, 537 volatile UInt8 * address); 538 539/*! 540 * @function OSTestAndSet 541 * 542 * @abstract 543 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 544 * 545 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 546 * 547 * @discussion 548 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise. 549 * @param bit The bit number in the range 0 through 7. 550 * @param startAddress The address of the byte to update atomically. 551 * @result true if the bit was already set, false otherwise. 552 */ 553extern Boolean OSTestAndSet( 554 UInt32 bit, 555 volatile UInt8 * startAddress); 556 557/*! 558 * @function OSTestAndClear 559 * 560 * @abstract 561 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. 562 * 563 * @discussion 564 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise. 565 * 566 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures. 567 * @param bit The bit number in the range 0 through 7. 568 * @param startAddress The address of the byte to update atomically. 569 * @result true if the bit was already clear, false otherwise. 570 */ 571extern Boolean OSTestAndClear( 572 UInt32 bit, 573 volatile UInt8 * startAddress); 574 575/*! 576 * @defined OS_SPINLOCK_INIT 577 * 578 * @abstract 579 * The default value for an OSSpinLock. 580 * 581 * @discussion 582 * The convention is that unlocked is zero, locked is nonzero. 583 */ 584#define OS_SPINLOCK_INIT 0 585 586/*! 587 * @typedef OSSpinLock 588 * 589 * @abstract 590 * Data type for a spinlock. 591 * 592 * @discussion 593 * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it. 594 */ 595typedef SInt32 OSSpinLock; 596 597#ifdef PRIVATE 598/*! 599 * @function OSSpinLockTry 600 * 601 * @abstract 602 * Locks a spinlock if it would not block. 603 * 604 * @discussion 605 * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode. 606 * 607 * @result 608 * Returns false if the lock was already held by another thread, true if it took the lock successfully. 609 */ 610extern Boolean OSSpinLockTry(volatile OSSpinLock * lock); 611 612/*! 613 * @function OSSpinLockUnlock 614 * 615 * @abstract 616 * Unlocks a spinlock. 617 * 618 * @discussion 619 * Unlocks a spinlock. 620 */ 621extern void OSSpinLockUnlock(volatile OSSpinLock * lock); 622#endif /* PRIVATE */ 623 624/*! 625 * @function OSSynchronizeIO 626 * 627 * @abstract 628 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. 629 * 630 * @discussion 631 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors. 632 */ 633static __inline__ void OSSynchronizeIO(void) 634{ 635} 636 637#if defined(KERNEL_PRIVATE) 638 639#if defined(__i386__) || defined(__x86_64__) 640#if defined(XNU_KERNEL_PRIVATE) 641static inline void OSMemoryBarrier(void) { 642 __asm__ volatile("mfence" ::: "memory"); 643} 644#endif /* XNU_KERNEL_PRIVATE */ 645#endif 646 647#endif /* KERNEL_PRIVATE */ 648 649#if defined(__cplusplus) 650} 651#endif 652 653#endif /* ! _OS_OSATOMIC_H */ 654