1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * Portions Copyright 2008 John Birrell <jb@freebsd.org> 23 * 24 * $FreeBSD$ 25 * 26 * This is a simplified version of the cyclic timer subsystem from 27 * OpenSolaris. In the FreeBSD version, we don't use interrupt levels. 28 */ 29 30/* 31 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 32 * Use is subject to license terms. 33 */ 34 35/* 36 * The Cyclic Subsystem 37 * -------------------- 38 * 39 * Prehistory 40 * 41 * Historically, most computer architectures have specified interval-based 42 * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While 43 * these parts deal in relative (i.e. not absolute) time values, they are 44 * typically used by the operating system to implement the abstraction of 45 * absolute time. As a result, these parts cannot typically be reprogrammed 46 * without introducing error in the system's notion of time. 47 * 48 * Starting in about 1994, chip architectures began specifying high resolution 49 * timestamp registers. As of this writing (1999), all major chip families 50 * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution 51 * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity 52 * to interrupt based on timestamp values. These timestamp-compare registers 53 * present a time-based interrupt source which can be reprogrammed arbitrarily 54 * often without introducing error. Given the low cost of implementing such a 55 * timestamp-compare register (and the tangible benefit of eliminating 56 * discrete timer parts), it is reasonable to expect that future chip 57 * architectures will adopt this feature. 58 * 59 * The cyclic subsystem has been designed to take advantage of chip 60 * architectures with the capacity to interrupt based on absolute, high 61 * resolution values of time. 62 * 63 * Subsystem Overview 64 * 65 * The cyclic subsystem is a low-level kernel subsystem designed to provide 66 * arbitrarily high resolution, per-CPU interval timers (to avoid colliding 67 * with existing terms, we dub such an interval timer a "cyclic"). 68 * Alternatively, a cyclic may be specified to be "omnipresent", denoting 69 * firing on all online CPUs. 70 * 71 * Cyclic Subsystem Interface Overview 72 * ----------------------------------- 73 * 74 * The cyclic subsystem has interfaces with the kernel at-large, with other 75 * kernel subsystems (e.g. the processor management subsystem, the checkpoint 76 * resume subsystem) and with the platform (the cyclic backend). Each 77 * of these interfaces is given a brief synopsis here, and is described 78 * in full above the interface's implementation. 79 * 80 * The following diagram displays the cyclic subsystem's interfaces to 81 * other kernel components. The arrows denote a "calls" relationship, with 82 * the large arrow indicating the cyclic subsystem's consumer interface. 83 * Each arrow is labeled with the section in which the corresponding 84 * interface is described. 85 * 86 * Kernel at-large consumers 87 * -----------++------------ 88 * || 89 * || 90 * _||_ 91 * \ / 92 * \/ 93 * +---------------------+ 94 * | | 95 * | Cyclic subsystem |<----------- Other kernel subsystems 96 * | | 97 * +---------------------+ 98 * ^ | 99 * | | 100 * | | 101 * | v 102 * +---------------------+ 103 * | | 104 * | Cyclic backend | 105 * | (platform specific) | 106 * | | 107 * +---------------------+ 108 * 109 * 110 * Kernel At-Large Interfaces 111 * 112 * cyclic_add() <-- Creates a cyclic 113 * cyclic_add_omni() <-- Creates an omnipresent cyclic 114 * cyclic_remove() <-- Removes a cyclic 115 * 116 * Backend Interfaces 117 * 118 * cyclic_init() <-- Initializes the cyclic subsystem 119 * cyclic_fire() <-- Interrupt entry point 120 * 121 * The backend-supplied interfaces (through the cyc_backend structure) are 122 * documented in detail in <sys/cyclic_impl.h> 123 * 124 * 125 * Cyclic Subsystem Implementation Overview 126 * ---------------------------------------- 127 * 128 * The cyclic subsystem is designed to minimize interference between cyclics 129 * on different CPUs. Thus, all of the cyclic subsystem's data structures 130 * hang off of a per-CPU structure, cyc_cpu. 131 * 132 * Each cyc_cpu has a power-of-two sized array of cyclic structures (the 133 * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called 134 * and there does not exist a free slot in the cyp_cyclics array, the size of 135 * the array will be doubled. The array will never shrink. Cyclics are 136 * referred to by their index in the cyp_cyclics array, which is of type 137 * cyc_index_t. 138 * 139 * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The 140 * heap is keyed by cyclic expiration time, with parents expiring earlier 141 * than their children. 142 * 143 * Heap Management 144 * 145 * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire() 146 * compares the root cyclic's expiration time to the current time. If the 147 * expiration time is in the past, cyclic_expire() is called on the root 148 * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time 149 * is derived by adding its interval to its old expiration time, and a 150 * downheap operation is performed. After the downheap, cyclic_fire() 151 * examines the (potentially changed) root cyclic, repeating the 152 * cyclic_expire()/add interval/cyclic_downheap() sequence until the root 153 * cyclic has an expiration time in the future. This expiration time 154 * (guaranteed to be the earliest in the heap) is then communicated to the 155 * backend via cyb_reprogram. Optimal backends will next call cyclic_fire() 156 * shortly after the root cyclic's expiration time. 157 * 158 * To allow efficient, deterministic downheap operations, we implement the 159 * heap as an array (the cyp_heap member of the cyc_cpu structure), with each 160 * element containing an index into the CPU's cyp_cyclics array. 161 * 162 * The heap is laid out in the array according to the following: 163 * 164 * 1. The root of the heap is always in the 0th element of the heap array 165 * 2. The left and right children of the nth element are element 166 * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively. 167 * 168 * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof 169 * that these constraints correctly lay out a heap (or indeed, any binary 170 * tree) is trivial and left to the reader. 171 * 172 * To see the heap by example, assume our cyclics array has the following 173 * members (at time t): 174 * 175 * cy_handler cy_expire 176 * --------------------------------------------- 177 * [ 0] clock() t+10000000 178 * [ 1] deadman() t+1000000000 179 * [ 2] clock_highres_fire() t+100 180 * [ 3] clock_highres_fire() t+1000 181 * [ 4] clock_highres_fire() t+500 182 * [ 5] (free) -- 183 * [ 6] (free) -- 184 * [ 7] (free) -- 185 * 186 * The heap array could be: 187 * 188 * [0] [1] [2] [3] [4] [5] [6] [7] 189 * +-----+-----+-----+-----+-----+-----+-----+-----+ 190 * | | | | | | | | | 191 * | 2 | 3 | 4 | 0 | 1 | x | x | x | 192 * | | | | | | | | | 193 * +-----+-----+-----+-----+-----+-----+-----+-----+ 194 * 195 * Graphically, this array corresponds to the following (excuse the ASCII art): 196 * 197 * 2 198 * | 199 * +------------------+------------------+ 200 * 3 4 201 * | 202 * +---------+--------+ 203 * 0 1 204 * 205 * Note that the heap is laid out by layer: all nodes at a given depth are 206 * stored in consecutive elements of the array. Moreover, layers of 207 * consecutive depths are in adjacent element ranges. This property 208 * guarantees high locality of reference during downheap operations. 209 * Specifically, we are guaranteed that we can downheap to a depth of 210 * 211 * lg (cache_line_size / sizeof (cyc_index_t)) 212 * 213 * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line 214 * size), this corresponds to a depth of four nodes. Thus, if there are 215 * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at 216 * most once in the e-cache. 217 * 218 * Downheaps are required to compare siblings as they proceed down the 219 * heap. For downheaps proceeding beyond the one-cache-miss depth, every 220 * access to a left child could potentially miss in the cache. However, 221 * if we assume 222 * 223 * (cache_line_size / sizeof (cyc_index_t)) > 2, 224 * 225 * then all siblings are guaranteed to be on the same cache line. Thus, the 226 * miss on the left child will guarantee a hit on the right child; downheaps 227 * will incur at most one cache miss per layer beyond the one-cache-miss 228 * depth. The total number of cache misses for heap management during a 229 * downheap operation is thus bounded by 230 * 231 * lg (n) - lg (cache_line_size / sizeof (cyc_index_t)) 232 * 233 * Traditional pointer-based heaps are implemented without regard to 234 * locality. Downheaps can thus incur two cache misses per layer (one for 235 * each child), but at most one cache miss at the root. This yields a bound 236 * of 237 * 238 * 2 * lg (n) - 1 239 * 240 * on the total cache misses. 241 * 242 * This difference may seem theoretically trivial (the difference is, after 243 * all, constant), but can become substantial in practice -- especially for 244 * caches with very large cache lines and high miss penalties (e.g. TLBs). 245 * 246 * Heaps must always be full, balanced trees. Heap management must therefore 247 * track the next point-of-insertion into the heap. In pointer-based heaps, 248 * recomputing this point takes O(lg (n)). Given the layout of the 249 * array-based implementation, however, the next point-of-insertion is 250 * always: 251 * 252 * heap[number_of_elements] 253 * 254 * We exploit this property by implementing the free-list in the usused 255 * heap elements. Heap insertion, therefore, consists only of filling in 256 * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing 257 * the number of elements, and performing an upheap. Heap deletion consists 258 * of decrementing the number of elements, swapping the to-be-deleted element 259 * with the element at cyp_heap[number_of_elements], and downheaping. 260 * 261 * Filling in more details in our earlier example: 262 * 263 * +--- free list head 264 * | 265 * V 266 * 267 * [0] [1] [2] [3] [4] [5] [6] [7] 268 * +-----+-----+-----+-----+-----+-----+-----+-----+ 269 * | | | | | | | | | 270 * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 | 271 * | | | | | | | | | 272 * +-----+-----+-----+-----+-----+-----+-----+-----+ 273 * 274 * To insert into this heap, we would just need to fill in the cyclic at 275 * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform 276 * an upheap. 277 * 278 * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it 279 * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement 280 * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4], 281 * and perform a downheap from cyp_heap[1]. The linear scan is required 282 * because the cyclic does not keep a backpointer into the heap. This makes 283 * heap manipulation (e.g. downheaps) faster at the expense of removal 284 * operations. 285 * 286 * Expiry processing 287 * 288 * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire 289 * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary 290 * time t in the future, their cyclic handler will have been called 291 * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call 292 * the handler. 293 * 294 * Resizing 295 * 296 * All of the discussion thus far has assumed a static number of cyclics. 297 * Obviously, static limitations are not practical; we need the capacity 298 * to resize our data structures dynamically. 299 * 300 * We resize our data structures lazily, and only on a per-CPU basis. 301 * The size of the data structures always doubles and never shrinks. We 302 * serialize adds (and thus resizes) on cpu_lock; we never need to deal 303 * with concurrent resizes. Resizes should be rare; they may induce jitter 304 * on the CPU being resized, but should not affect cyclic operation on other 305 * CPUs. 306 * 307 * Three key cyc_cpu data structures need to be resized: the cyclics array, 308 * nad the heap array. Resizing is relatively straightforward: 309 * 310 * 1. The new, larger arrays are allocated in cyclic_expand() (called 311 * from cyclic_add()). 312 * 2. The contents of the old arrays are copied into the new arrays. 313 * 3. The old cyclics array is bzero()'d 314 * 4. The pointers are updated. 315 * 316 * Removals 317 * 318 * Cyclic removals should be rare. To simplify the implementation (and to 319 * allow optimization for the cyclic_fire()/cyclic_expire() 320 * path), we force removals and adds to serialize on cpu_lock. 321 * 322 */ 323#include <sys/cdefs.h> 324#include <sys/param.h> 325#include <sys/conf.h> 326#include <sys/kernel.h> 327#include <sys/lock.h> 328#include <sys/sx.h> 329#include <sys/cyclic_impl.h> 330#include <sys/module.h> 331#include <sys/systm.h> 332#include <sys/atomic.h> 333#include <sys/kmem.h> 334#include <sys/cmn_err.h> 335#include <sys/dtrace_bsd.h> 336#include <machine/cpu.h> 337 338static kmem_cache_t *cyclic_id_cache; 339static cyc_id_t *cyclic_id_head; 340static cyc_backend_t cyclic_backend; 341 342static MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem"); 343 344static __inline hrtime_t 345cyc_gethrtime(void) 346{ 347 struct bintime bt; 348 349 binuptime(&bt); 350 return ((hrtime_t)bt.sec * NANOSEC + 351 (((uint64_t)NANOSEC * (uint32_t)(bt.frac >> 32)) >> 32)); 352} 353 354/* 355 * Returns 1 if the upheap propagated to the root, 0 if it did not. This 356 * allows the caller to reprogram the backend only when the root has been 357 * modified. 358 */ 359static int 360cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx) 361{ 362 cyclic_t *cyclics; 363 cyc_index_t *heap; 364 cyc_index_t heap_parent, heap_current = ndx; 365 cyc_index_t parent, current; 366 367 if (heap_current == 0) 368 return (1); 369 370 heap = cpu->cyp_heap; 371 cyclics = cpu->cyp_cyclics; 372 heap_parent = CYC_HEAP_PARENT(heap_current); 373 374 for (;;) { 375 current = heap[heap_current]; 376 parent = heap[heap_parent]; 377 378 /* 379 * We have an expiration time later than our parent; we're 380 * done. 381 */ 382 if (cyclics[current].cy_expire >= cyclics[parent].cy_expire) 383 return (0); 384 385 /* 386 * We need to swap with our parent, and continue up the heap. 387 */ 388 heap[heap_parent] = current; 389 heap[heap_current] = parent; 390 391 /* 392 * If we just reached the root, we're done. 393 */ 394 if (heap_parent == 0) 395 return (1); 396 397 heap_current = heap_parent; 398 heap_parent = CYC_HEAP_PARENT(heap_current); 399 } 400} 401 402static void 403cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx) 404{ 405 cyclic_t *cyclics = cpu->cyp_cyclics; 406 cyc_index_t *heap = cpu->cyp_heap; 407 408 cyc_index_t heap_left, heap_right, heap_me = ndx; 409 cyc_index_t left, right, me; 410 cyc_index_t nelems = cpu->cyp_nelems; 411 412 for (;;) { 413 /* 414 * If we don't have a left child (i.e., we're a leaf), we're 415 * done. 416 */ 417 if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems) 418 return; 419 420 left = heap[heap_left]; 421 me = heap[heap_me]; 422 423 heap_right = CYC_HEAP_RIGHT(heap_me); 424 425 /* 426 * Even if we don't have a right child, we still need to compare 427 * our expiration time against that of our left child. 428 */ 429 if (heap_right >= nelems) 430 goto comp_left; 431 432 right = heap[heap_right]; 433 434 /* 435 * We have both a left and a right child. We need to compare 436 * the expiration times of the children to determine which 437 * expires earlier. 438 */ 439 if (cyclics[right].cy_expire < cyclics[left].cy_expire) { 440 /* 441 * Our right child is the earlier of our children. 442 * We'll now compare our expiration time to its; if 443 * ours is the earlier, we're done. 444 */ 445 if (cyclics[me].cy_expire <= cyclics[right].cy_expire) 446 return; 447 448 /* 449 * Our right child expires earlier than we do; swap 450 * with our right child, and descend right. 451 */ 452 heap[heap_right] = me; 453 heap[heap_me] = right; 454 heap_me = heap_right; 455 continue; 456 } 457 458comp_left: 459 /* 460 * Our left child is the earlier of our children (or we have 461 * no right child). We'll now compare our expiration time 462 * to its; if ours is the earlier, we're done. 463 */ 464 if (cyclics[me].cy_expire <= cyclics[left].cy_expire) 465 return; 466 467 /* 468 * Our left child expires earlier than we do; swap with our 469 * left child, and descend left. 470 */ 471 heap[heap_left] = me; 472 heap[heap_me] = left; 473 heap_me = heap_left; 474 } 475} 476 477static void 478cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic) 479{ 480 cyc_func_t handler = cyclic->cy_handler; 481 void *arg = cyclic->cy_arg; 482 483 (*handler)(arg); 484} 485 486/* 487 * cyclic_fire(cpu_t *) 488 * 489 * Overview 490 * 491 * cyclic_fire() is the cyclic subsystem's interrupt handler. 492 * Called by the cyclic backend. 493 * 494 * Arguments and notes 495 * 496 * The only argument is the CPU on which the interrupt is executing; 497 * backends must call into cyclic_fire() on the specified CPU. 498 * 499 * cyclic_fire() may be called spuriously without ill effect. Optimal 500 * backends will call into cyclic_fire() at or shortly after the time 501 * requested via cyb_reprogram(). However, calling cyclic_fire() 502 * arbitrarily late will only manifest latency bubbles; the correctness 503 * of the cyclic subsystem does not rely on the timeliness of the backend. 504 * 505 * cyclic_fire() is wait-free; it will not block or spin. 506 * 507 * Return values 508 * 509 * None. 510 * 511 */ 512static void 513cyclic_fire(cpu_t *c) 514{ 515 cyc_cpu_t *cpu = c->cpu_cyclic; 516 cyc_backend_t *be = cpu->cyp_backend; 517 cyc_index_t *heap = cpu->cyp_heap; 518 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; 519 void *arg = be->cyb_arg; 520 hrtime_t now = cyc_gethrtime(); 521 hrtime_t exp; 522 523 if (cpu->cyp_nelems == 0) { 524 /* This is a spurious fire. */ 525 return; 526 } 527 528 for (;;) { 529 cyc_index_t ndx = heap[0]; 530 531 cyclic = &cyclics[ndx]; 532 533 ASSERT(!(cyclic->cy_flags & CYF_FREE)); 534 535 if ((exp = cyclic->cy_expire) > now) 536 break; 537 538 cyclic_expire(cpu, ndx, cyclic); 539 540 /* 541 * If this cyclic will be set to next expire in the distant 542 * past, we have one of two situations: 543 * 544 * a) This is the first firing of a cyclic which had 545 * cy_expire set to 0. 546 * 547 * b) We are tragically late for a cyclic -- most likely 548 * due to being in the debugger. 549 * 550 * In either case, we set the new expiration time to be the 551 * the next interval boundary. This assures that the 552 * expiration time modulo the interval is invariant. 553 * 554 * We arbitrarily define "distant" to be one second (one second 555 * is chosen because it's shorter than any foray to the 556 * debugger while still being longer than any legitimate 557 * stretch). 558 */ 559 exp += cyclic->cy_interval; 560 561 if (now - exp > NANOSEC) { 562 hrtime_t interval = cyclic->cy_interval; 563 564 exp += ((now - exp) / interval + 1) * interval; 565 } 566 567 cyclic->cy_expire = exp; 568 cyclic_downheap(cpu, 0); 569 } 570 571 /* 572 * Now we have a cyclic in the root slot which isn't in the past; 573 * reprogram the interrupt source. 574 */ 575 be->cyb_reprogram(arg, exp); 576} 577 578static void 579cyclic_expand_xcall(cyc_xcallarg_t *arg) 580{ 581 cyc_cpu_t *cpu = arg->cyx_cpu; 582 cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i; 583 cyc_index_t *new_heap = arg->cyx_heap; 584 cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics; 585 586 /* Disable preemption and interrupts. */ 587 mtx_lock_spin(&cpu->cyp_mtx); 588 589 /* 590 * Assert that the new size is a power of 2. 591 */ 592 ASSERT((new_size & (new_size - 1)) == 0); 593 ASSERT(new_size == (size << 1)); 594 ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL); 595 596 bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size); 597 bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size); 598 599 /* 600 * Set up the free list, and set all of the new cyclics to be CYF_FREE. 601 */ 602 for (i = size; i < new_size; i++) { 603 new_heap[i] = i; 604 new_cyclics[i].cy_flags = CYF_FREE; 605 } 606 607 /* 608 * We can go ahead and plow the value of cyp_heap and cyp_cyclics; 609 * cyclic_expand() has kept a copy. 610 */ 611 cpu->cyp_heap = new_heap; 612 cpu->cyp_cyclics = new_cyclics; 613 cpu->cyp_size = new_size; 614 mtx_unlock_spin(&cpu->cyp_mtx); 615} 616 617/* 618 * cyclic_expand() will cross call onto the CPU to perform the actual 619 * expand operation. 620 */ 621static void 622cyclic_expand(cyc_cpu_t *cpu) 623{ 624 cyc_index_t new_size, old_size; 625 cyc_index_t *new_heap, *old_heap; 626 cyclic_t *new_cyclics, *old_cyclics; 627 cyc_xcallarg_t arg; 628 cyc_backend_t *be = cpu->cyp_backend; 629 630 ASSERT(MUTEX_HELD(&cpu_lock)); 631 632 old_heap = cpu->cyp_heap; 633 old_cyclics = cpu->cyp_cyclics; 634 635 if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) { 636 new_size = CY_DEFAULT_PERCPU; 637 ASSERT(old_heap == NULL && old_cyclics == NULL); 638 } 639 640 /* 641 * Check that the new_size is a power of 2. 642 */ 643 ASSERT(((new_size - 1) & new_size) == 0); 644 645 new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK); 646 new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK); 647 648 arg.cyx_cpu = cpu; 649 arg.cyx_heap = new_heap; 650 arg.cyx_cyclics = new_cyclics; 651 arg.cyx_size = new_size; 652 653 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, 654 (cyc_func_t)cyclic_expand_xcall, &arg); 655 656 if (old_cyclics != NULL) { 657 ASSERT(old_heap != NULL); 658 ASSERT(old_size != 0); 659 free(old_cyclics, M_CYCLIC); 660 free(old_heap, M_CYCLIC); 661 } 662} 663 664static void 665cyclic_add_xcall(cyc_xcallarg_t *arg) 666{ 667 cyc_cpu_t *cpu = arg->cyx_cpu; 668 cyc_handler_t *hdlr = arg->cyx_hdlr; 669 cyc_time_t *when = arg->cyx_when; 670 cyc_backend_t *be = cpu->cyp_backend; 671 cyc_index_t ndx, nelems; 672 cyb_arg_t bar = be->cyb_arg; 673 cyclic_t *cyclic; 674 675 ASSERT(cpu->cyp_nelems < cpu->cyp_size); 676 677 /* Disable preemption and interrupts. */ 678 mtx_lock_spin(&cpu->cyp_mtx); 679 nelems = cpu->cyp_nelems++; 680 681 if (nelems == 0) { 682 /* 683 * If this is the first element, we need to enable the 684 * backend on this CPU. 685 */ 686 be->cyb_enable(bar); 687 } 688 689 ndx = cpu->cyp_heap[nelems]; 690 cyclic = &cpu->cyp_cyclics[ndx]; 691 692 ASSERT(cyclic->cy_flags == CYF_FREE); 693 cyclic->cy_interval = when->cyt_interval; 694 695 if (when->cyt_when == 0) { 696 /* 697 * If a start time hasn't been explicitly specified, we'll 698 * start on the next interval boundary. 699 */ 700 cyclic->cy_expire = (cyc_gethrtime() / cyclic->cy_interval + 1) * 701 cyclic->cy_interval; 702 } else { 703 cyclic->cy_expire = when->cyt_when; 704 } 705 706 cyclic->cy_handler = hdlr->cyh_func; 707 cyclic->cy_arg = hdlr->cyh_arg; 708 cyclic->cy_flags = arg->cyx_flags; 709 710 if (cyclic_upheap(cpu, nelems)) { 711 hrtime_t exp = cyclic->cy_expire; 712 713 /* 714 * If our upheap propagated to the root, we need to 715 * reprogram the interrupt source. 716 */ 717 be->cyb_reprogram(bar, exp); 718 } 719 mtx_unlock_spin(&cpu->cyp_mtx); 720 721 arg->cyx_ndx = ndx; 722} 723 724static cyc_index_t 725cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, 726 cyc_time_t *when, uint16_t flags) 727{ 728 cyc_backend_t *be = cpu->cyp_backend; 729 cyb_arg_t bar = be->cyb_arg; 730 cyc_xcallarg_t arg; 731 732 ASSERT(MUTEX_HELD(&cpu_lock)); 733 ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); 734 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 735 736 if (cpu->cyp_nelems == cpu->cyp_size) { 737 /* 738 * This is expensive; it will cross call onto the other 739 * CPU to perform the expansion. 740 */ 741 cyclic_expand(cpu); 742 ASSERT(cpu->cyp_nelems < cpu->cyp_size); 743 } 744 745 /* 746 * By now, we know that we're going to be able to successfully 747 * perform the add. Now cross call over to the CPU of interest to 748 * actually add our cyclic. 749 */ 750 arg.cyx_cpu = cpu; 751 arg.cyx_hdlr = hdlr; 752 arg.cyx_when = when; 753 arg.cyx_flags = flags; 754 755 be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg); 756 757 return (arg.cyx_ndx); 758} 759 760static void 761cyclic_remove_xcall(cyc_xcallarg_t *arg) 762{ 763 cyc_cpu_t *cpu = arg->cyx_cpu; 764 cyc_backend_t *be = cpu->cyp_backend; 765 cyb_arg_t bar = be->cyb_arg; 766 cyc_index_t ndx = arg->cyx_ndx, nelems = cpu->cyp_nelems, i; 767 cyc_index_t *heap = cpu->cyp_heap, last; 768 cyclic_t *cyclic; 769 770 ASSERT(nelems > 0); 771 772 /* Disable preemption and interrupts. */ 773 mtx_lock_spin(&cpu->cyp_mtx); 774 cyclic = &cpu->cyp_cyclics[ndx]; 775 776 /* 777 * Grab the current expiration time. If this cyclic is being 778 * removed as part of a juggling operation, the expiration time 779 * will be used when the cyclic is added to the new CPU. 780 */ 781 if (arg->cyx_when != NULL) { 782 arg->cyx_when->cyt_when = cyclic->cy_expire; 783 arg->cyx_when->cyt_interval = cyclic->cy_interval; 784 } 785 786 /* 787 * Now set the flags to CYF_FREE. We don't need a membar_enter() 788 * between zeroing pend and setting the flags because we're at 789 * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting 790 * of cy_flags appear atomic to softints). 791 */ 792 cyclic->cy_flags = CYF_FREE; 793 794 for (i = 0; i < nelems; i++) { 795 if (heap[i] == ndx) 796 break; 797 } 798 799 if (i == nelems) 800 panic("attempt to remove non-existent cyclic"); 801 802 cpu->cyp_nelems = --nelems; 803 804 if (nelems == 0) { 805 /* 806 * If we just removed the last element, then we need to 807 * disable the backend on this CPU. 808 */ 809 be->cyb_disable(bar); 810 } 811 812 if (i == nelems) { 813 /* 814 * If we just removed the last element of the heap, then 815 * we don't have to downheap. 816 */ 817 goto out; 818 } 819 820 /* 821 * Swap the last element of the heap with the one we want to 822 * remove, and downheap (this has the implicit effect of putting 823 * the newly freed element on the free list). 824 */ 825 heap[i] = (last = heap[nelems]); 826 heap[nelems] = ndx; 827 828 if (i == 0) { 829 cyclic_downheap(cpu, 0); 830 } else { 831 if (cyclic_upheap(cpu, i) == 0) { 832 /* 833 * The upheap didn't propagate to the root; if it 834 * didn't propagate at all, we need to downheap. 835 */ 836 if (heap[i] == last) { 837 cyclic_downheap(cpu, i); 838 } 839 goto out; 840 } 841 } 842 843 /* 844 * We're here because we changed the root; we need to reprogram 845 * the clock source. 846 */ 847 cyclic = &cpu->cyp_cyclics[heap[0]]; 848 849 ASSERT(nelems != 0); 850 be->cyb_reprogram(bar, cyclic->cy_expire); 851out: 852 mtx_unlock_spin(&cpu->cyp_mtx); 853} 854 855static int 856cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) 857{ 858 cyc_backend_t *be = cpu->cyp_backend; 859 cyc_xcallarg_t arg; 860 861 ASSERT(MUTEX_HELD(&cpu_lock)); 862 ASSERT(wait == CY_WAIT || wait == CY_NOWAIT); 863 864 arg.cyx_ndx = ndx; 865 arg.cyx_cpu = cpu; 866 arg.cyx_when = when; 867 arg.cyx_wait = wait; 868 869 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, 870 (cyc_func_t)cyclic_remove_xcall, &arg); 871 872 return (1); 873} 874 875static void 876cyclic_configure(cpu_t *c) 877{ 878 cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK); 879 cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK); 880 881 ASSERT(MUTEX_HELD(&cpu_lock)); 882 883 if (cyclic_id_cache == NULL) 884 cyclic_id_cache = kmem_cache_create("cyclic_id_cache", 885 sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 886 887 cpu->cyp_cpu = c; 888 889 cpu->cyp_size = 1; 890 cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK); 891 cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK); 892 cpu->cyp_cyclics->cy_flags = CYF_FREE; 893 894 mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN); 895 896 /* 897 * Setup the backend for this CPU. 898 */ 899 bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t)); 900 if (nbe->cyb_configure != NULL) 901 nbe->cyb_arg = nbe->cyb_configure(c); 902 cpu->cyp_backend = nbe; 903 904 /* 905 * On platforms where stray interrupts may be taken during startup, 906 * the CPU's cpu_cyclic pointer serves as an indicator that the 907 * cyclic subsystem for this CPU is prepared to field interrupts. 908 */ 909 membar_producer(); 910 911 c->cpu_cyclic = cpu; 912} 913 914static void 915cyclic_unconfigure(cpu_t *c) 916{ 917 cyc_cpu_t *cpu = c->cpu_cyclic; 918 cyc_backend_t *be = cpu->cyp_backend; 919 cyb_arg_t bar = be->cyb_arg; 920 921 ASSERT(MUTEX_HELD(&cpu_lock)); 922 923 c->cpu_cyclic = NULL; 924 925 /* 926 * Let the backend know that the CPU is being yanked, and free up 927 * the backend structure. 928 */ 929 if (be->cyb_unconfigure != NULL) 930 be->cyb_unconfigure(bar); 931 free(be, M_CYCLIC); 932 cpu->cyp_backend = NULL; 933 934 mtx_destroy(&cpu->cyp_mtx); 935 936 /* Finally, clean up our remaining dynamic structures. */ 937 free(cpu->cyp_cyclics, M_CYCLIC); 938 free(cpu->cyp_heap, M_CYCLIC); 939 free(cpu, M_CYCLIC); 940} 941 942static void 943cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu) 944{ 945 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 946 cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK); 947 cyc_handler_t hdlr; 948 cyc_time_t when; 949 950 ASSERT(MUTEX_HELD(&cpu_lock)); 951 ASSERT(idp->cyi_cpu == NULL); 952 953 hdlr.cyh_func = NULL; 954 hdlr.cyh_arg = NULL; 955 956 when.cyt_when = 0; 957 when.cyt_interval = 0; 958 959 omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when); 960 961 ASSERT(hdlr.cyh_func != NULL); 962 ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0); 963 964 ocpu->cyo_cpu = cpu; 965 ocpu->cyo_arg = hdlr.cyh_arg; 966 ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0); 967 ocpu->cyo_next = idp->cyi_omni_list; 968 idp->cyi_omni_list = ocpu; 969} 970 971static void 972cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu) 973{ 974 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 975 cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL; 976 977 ASSERT(MUTEX_HELD(&cpu_lock)); 978 ASSERT(idp->cyi_cpu == NULL); 979 ASSERT(ocpu != NULL); 980 981 while (ocpu != NULL && ocpu->cyo_cpu != cpu) { 982 prev = ocpu; 983 ocpu = ocpu->cyo_next; 984 } 985 986 /* 987 * We _must_ have found an cyc_omni_cpu which corresponds to this 988 * CPU -- the definition of an omnipresent cyclic is that it runs 989 * on all online CPUs. 990 */ 991 ASSERT(ocpu != NULL); 992 993 if (prev == NULL) { 994 idp->cyi_omni_list = ocpu->cyo_next; 995 } else { 996 prev->cyo_next = ocpu->cyo_next; 997 } 998 999 (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT); 1000 1001 /* 1002 * The cyclic has been removed from this CPU; time to call the 1003 * omnipresent offline handler. 1004 */ 1005 if (omni->cyo_offline != NULL) 1006 omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg); 1007 1008 free(ocpu, M_CYCLIC); 1009} 1010 1011static cyc_id_t * 1012cyclic_new_id(void) 1013{ 1014 cyc_id_t *idp; 1015 1016 ASSERT(MUTEX_HELD(&cpu_lock)); 1017 1018 idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP); 1019 1020 /* 1021 * The cyi_cpu field of the cyc_id_t structure tracks the CPU 1022 * associated with the cyclic. If and only if this field is NULL, the 1023 * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be 1024 * NULL for an omnipresent cyclic while the cyclic is being created 1025 * or destroyed. 1026 */ 1027 idp->cyi_cpu = NULL; 1028 idp->cyi_ndx = 0; 1029 1030 idp->cyi_next = cyclic_id_head; 1031 idp->cyi_prev = NULL; 1032 idp->cyi_omni_list = NULL; 1033 1034 if (cyclic_id_head != NULL) { 1035 ASSERT(cyclic_id_head->cyi_prev == NULL); 1036 cyclic_id_head->cyi_prev = idp; 1037 } 1038 1039 cyclic_id_head = idp; 1040 1041 return (idp); 1042} 1043 1044/* 1045 * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *) 1046 * 1047 * Overview 1048 * 1049 * cyclic_add() will create an unbound cyclic with the specified handler and 1050 * interval. The cyclic will run on a CPU which both has interrupts enabled 1051 * and is in the system CPU partition. 1052 * 1053 * Arguments and notes 1054 * 1055 * As its first argument, cyclic_add() takes a cyc_handler, which has the 1056 * following members: 1057 * 1058 * cyc_func_t cyh_func <-- Cyclic handler 1059 * void *cyh_arg <-- Argument to cyclic handler 1060 * 1061 * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which 1062 * has the following members: 1063 * 1064 * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at 1065 * which to start firing 1066 * hrtime_t cyt_interval <-- Length of interval, in nanoseconds 1067 * 1068 * gethrtime() is the time source for nanoseconds since boot. If cyt_when 1069 * is set to 0, the cyclic will start to fire when cyt_interval next 1070 * divides the number of nanoseconds since boot. 1071 * 1072 * The cyt_interval field _must_ be filled in by the caller; one-shots are 1073 * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will 1074 * assert that cyt_interval is non-zero). The maximum value for either 1075 * field is INT64_MAX; the caller is responsible for assuring that 1076 * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative. 1077 * 1078 * For an arbitrary time t in the future, the cyclic handler is guaranteed 1079 * to have been called (t - cyt_when) / cyt_interval times. This will 1080 * be true even if interrupts have been disabled for periods greater than 1081 * cyt_interval nanoseconds. In order to compensate for such periods, 1082 * the cyclic handler may be called a finite number of times with an 1083 * arbitrarily small interval. 1084 * 1085 * The cyclic subsystem will not enforce any lower bound on the interval; 1086 * if the interval is less than the time required to process an interrupt, 1087 * the CPU will wedge. It's the responsibility of the caller to assure that 1088 * either the value of the interval is sane, or that its caller has 1089 * sufficient privilege to deny service (i.e. its caller is root). 1090 * 1091 * Return value 1092 * 1093 * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value 1094 * other than CYCLIC_NONE. cyclic_add() cannot fail. 1095 * 1096 * Caller's context 1097 * 1098 * cpu_lock must be held by the caller, and the caller must not be in 1099 * interrupt context. cyclic_add() will perform a KM_SLEEP kernel 1100 * memory allocation, so the usual rules (e.g. p_lock cannot be held) 1101 * apply. A cyclic may be added even in the presence of CPUs that have 1102 * not been configured with respect to the cyclic subsystem, but only 1103 * configured CPUs will be eligible to run the new cyclic. 1104 * 1105 * Cyclic handler's context 1106 * 1107 * Cyclic handlers will be executed in the interrupt context corresponding 1108 * to the specified level (i.e. either high, lock or low level). The 1109 * usual context rules apply. 1110 * 1111 * A cyclic handler may not grab ANY locks held by the caller of any of 1112 * cyclic_add() or cyclic_remove(); the implementation of these functions 1113 * may require blocking on cyclic handler completion. 1114 * Moreover, cyclic handlers may not make any call back into the cyclic 1115 * subsystem. 1116 */ 1117cyclic_id_t 1118cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when) 1119{ 1120 cyc_id_t *idp = cyclic_new_id(); 1121 solaris_cpu_t *c = &solaris_cpu[curcpu]; 1122 1123 ASSERT(MUTEX_HELD(&cpu_lock)); 1124 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 1125 1126 idp->cyi_cpu = c->cpu_cyclic; 1127 idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0); 1128 1129 return ((uintptr_t)idp); 1130} 1131 1132/* 1133 * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *) 1134 * 1135 * Overview 1136 * 1137 * cyclic_add_omni() will create an omnipresent cyclic with the specified 1138 * online and offline handlers. Omnipresent cyclics run on all online 1139 * CPUs, including CPUs which have unbound interrupts disabled. 1140 * 1141 * Arguments 1142 * 1143 * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which 1144 * has the following members: 1145 * 1146 * void (*cyo_online)() <-- Online handler 1147 * void (*cyo_offline)() <-- Offline handler 1148 * void *cyo_arg <-- Argument to be passed to on/offline handlers 1149 * 1150 * Online handler 1151 * 1152 * The cyo_online member is a pointer to a function which has the following 1153 * four arguments: 1154 * 1155 * void * <-- Argument (cyo_arg) 1156 * cpu_t * <-- Pointer to CPU about to be onlined 1157 * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in 1158 * by omni online handler 1159 * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by 1160 * omni online handler 1161 * 1162 * The omni cyclic online handler is always called _before_ the omni 1163 * cyclic begins to fire on the specified CPU. As the above argument 1164 * description implies, the online handler must fill in the two structures 1165 * passed to it: the cyc_handler_t and the cyc_time_t. These are the 1166 * same two structures passed to cyclic_add(), outlined above. This 1167 * allows the omni cyclic to have maximum flexibility; different CPUs may 1168 * optionally 1169 * 1170 * (a) have different intervals 1171 * (b) be explicitly in or out of phase with one another 1172 * (c) have different handlers 1173 * (d) have different handler arguments 1174 * (e) fire at different levels 1175 * 1176 * Of these, (e) seems somewhat dubious, but is nonetheless allowed. 1177 * 1178 * The omni online handler is called in the same context as cyclic_add(), 1179 * and has the same liberties: omni online handlers may perform KM_SLEEP 1180 * kernel memory allocations, and may grab locks which are also acquired 1181 * by cyclic handlers. However, omni cyclic online handlers may _not_ 1182 * call back into the cyclic subsystem, and should be generally careful 1183 * about calling into arbitrary kernel subsystems. 1184 * 1185 * Offline handler 1186 * 1187 * The cyo_offline member is a pointer to a function which has the following 1188 * three arguments: 1189 * 1190 * void * <-- Argument (cyo_arg) 1191 * cpu_t * <-- Pointer to CPU about to be offlined 1192 * void * <-- CPU's cyclic argument (that is, value 1193 * to which cyh_arg member of the cyc_handler_t 1194 * was set in the omni online handler) 1195 * 1196 * The omni cyclic offline handler is always called _after_ the omni 1197 * cyclic has ceased firing on the specified CPU. Its purpose is to 1198 * allow cleanup of any resources dynamically allocated in the omni cyclic 1199 * online handler. The context of the offline handler is identical to 1200 * that of the online handler; the same constraints and liberties apply. 1201 * 1202 * The offline handler is optional; it may be NULL. 1203 * 1204 * Return value 1205 * 1206 * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a 1207 * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail. 1208 * 1209 * Caller's context 1210 * 1211 * The caller's context is identical to that of cyclic_add(), specified 1212 * above. 1213 */ 1214cyclic_id_t 1215cyclic_add_omni(cyc_omni_handler_t *omni) 1216{ 1217 cyc_id_t *idp = cyclic_new_id(); 1218 cyc_cpu_t *cpu; 1219 cpu_t *c; 1220 int i; 1221 1222 ASSERT(MUTEX_HELD(&cpu_lock)); 1223 ASSERT(omni != NULL && omni->cyo_online != NULL); 1224 1225 idp->cyi_omni_hdlr = *omni; 1226 1227 CPU_FOREACH(i) { 1228 c = &solaris_cpu[i]; 1229 if ((cpu = c->cpu_cyclic) == NULL) 1230 continue; 1231 cyclic_omni_start(idp, cpu); 1232 } 1233 1234 /* 1235 * We must have found at least one online CPU on which to run 1236 * this cyclic. 1237 */ 1238 ASSERT(idp->cyi_omni_list != NULL); 1239 ASSERT(idp->cyi_cpu == NULL); 1240 1241 return ((uintptr_t)idp); 1242} 1243 1244/* 1245 * void cyclic_remove(cyclic_id_t) 1246 * 1247 * Overview 1248 * 1249 * cyclic_remove() will remove the specified cyclic from the system. 1250 * 1251 * Arguments and notes 1252 * 1253 * The only argument is a cyclic_id returned from either cyclic_add() or 1254 * cyclic_add_omni(). 1255 * 1256 * By the time cyclic_remove() returns, the caller is guaranteed that the 1257 * removed cyclic handler has completed execution (this is the same 1258 * semantic that untimeout() provides). As a result, cyclic_remove() may 1259 * need to block, waiting for the removed cyclic to complete execution. 1260 * This leads to an important constraint on the caller: no lock may be 1261 * held across cyclic_remove() that also may be acquired by a cyclic 1262 * handler. 1263 * 1264 * Return value 1265 * 1266 * None; cyclic_remove() always succeeds. 1267 * 1268 * Caller's context 1269 * 1270 * cpu_lock must be held by the caller, and the caller must not be in 1271 * interrupt context. The caller may not hold any locks which are also 1272 * grabbed by any cyclic handler. See "Arguments and notes", above. 1273 */ 1274void 1275cyclic_remove(cyclic_id_t id) 1276{ 1277 cyc_id_t *idp = (cyc_id_t *)id; 1278 cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next; 1279 cyc_cpu_t *cpu = idp->cyi_cpu; 1280 1281 ASSERT(MUTEX_HELD(&cpu_lock)); 1282 1283 if (cpu != NULL) { 1284 (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT); 1285 } else { 1286 ASSERT(idp->cyi_omni_list != NULL); 1287 while (idp->cyi_omni_list != NULL) 1288 cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu); 1289 } 1290 1291 if (prev != NULL) { 1292 ASSERT(cyclic_id_head != idp); 1293 prev->cyi_next = next; 1294 } else { 1295 ASSERT(cyclic_id_head == idp); 1296 cyclic_id_head = next; 1297 } 1298 1299 if (next != NULL) 1300 next->cyi_prev = prev; 1301 1302 kmem_cache_free(cyclic_id_cache, idp); 1303} 1304 1305static void 1306cyclic_init(cyc_backend_t *be) 1307{ 1308 ASSERT(MUTEX_HELD(&cpu_lock)); 1309 1310 /* 1311 * Copy the passed cyc_backend into the backend template. This must 1312 * be done before the CPU can be configured. 1313 */ 1314 bcopy(be, &cyclic_backend, sizeof (cyc_backend_t)); 1315 1316 cyclic_configure(&solaris_cpu[curcpu]); 1317} 1318 1319/* 1320 * It is assumed that cyclic_mp_init() is called some time after cyclic 1321 * init (and therefore, after cpu0 has been initialized). We grab cpu_lock, 1322 * find the already initialized CPU, and initialize every other CPU with the 1323 * same backend. 1324 */ 1325static void 1326cyclic_mp_init(void) 1327{ 1328 cpu_t *c; 1329 int i; 1330 1331 mutex_enter(&cpu_lock); 1332 1333 CPU_FOREACH(i) { 1334 c = &solaris_cpu[i]; 1335 if (c->cpu_cyclic == NULL) 1336 cyclic_configure(c); 1337 } 1338 1339 mutex_exit(&cpu_lock); 1340} 1341 1342static void 1343cyclic_uninit(void) 1344{ 1345 cpu_t *c; 1346 int id; 1347 1348 CPU_FOREACH(id) { 1349 c = &solaris_cpu[id]; 1350 if (c->cpu_cyclic == NULL) 1351 continue; 1352 cyclic_unconfigure(c); 1353 } 1354 1355 if (cyclic_id_cache != NULL) 1356 kmem_cache_destroy(cyclic_id_cache); 1357} 1358 1359#include "cyclic_machdep.c" 1360 1361/* 1362 * Cyclic subsystem initialisation. 1363 */ 1364static void 1365cyclic_load(void *dummy) 1366{ 1367 mutex_enter(&cpu_lock); 1368 1369 /* Initialise the machine-dependent backend. */ 1370 cyclic_machdep_init(); 1371 1372 mutex_exit(&cpu_lock); 1373} 1374 1375SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL); 1376 1377static void 1378cyclic_unload(void) 1379{ 1380 mutex_enter(&cpu_lock); 1381 1382 /* Uninitialise the machine-dependent backend. */ 1383 cyclic_machdep_uninit(); 1384 1385 mutex_exit(&cpu_lock); 1386} 1387 1388SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL); 1389 1390/* ARGSUSED */ 1391static int 1392cyclic_modevent(module_t mod __unused, int type, void *data __unused) 1393{ 1394 int error = 0; 1395 1396 switch (type) { 1397 case MOD_LOAD: 1398 break; 1399 1400 case MOD_UNLOAD: 1401 break; 1402 1403 case MOD_SHUTDOWN: 1404 break; 1405 1406 default: 1407 error = EOPNOTSUPP; 1408 break; 1409 1410 } 1411 return (error); 1412} 1413 1414DEV_MODULE(cyclic, cyclic_modevent, NULL); 1415MODULE_VERSION(cyclic, 1); 1416MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1); 1417