17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5c210ded4Sesaxe * Common Development and Distribution License (the "License").
6c210ded4Sesaxe * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
22c210ded4Sesaxe * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
26c48ec423SBryan Cantrill /*
27*cb2d1b02SPatrick Mooney * Copyright 2019 Joyent, Inc.
28c48ec423SBryan Cantrill */
29c48ec423SBryan Cantrill
307c478bd9Sstevel@tonic-gate /*
317c478bd9Sstevel@tonic-gate * The Cyclic Subsystem
327c478bd9Sstevel@tonic-gate * --------------------
337c478bd9Sstevel@tonic-gate *
347c478bd9Sstevel@tonic-gate * Prehistory
357c478bd9Sstevel@tonic-gate *
367c478bd9Sstevel@tonic-gate * Historically, most computer architectures have specified interval-based
377c478bd9Sstevel@tonic-gate * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While
387c478bd9Sstevel@tonic-gate * these parts deal in relative (i.e. not absolute) time values, they are
397c478bd9Sstevel@tonic-gate * typically used by the operating system to implement the abstraction of
407c478bd9Sstevel@tonic-gate * absolute time. As a result, these parts cannot typically be reprogrammed
417c478bd9Sstevel@tonic-gate * without introducing error in the system's notion of time.
427c478bd9Sstevel@tonic-gate *
437c478bd9Sstevel@tonic-gate * Starting in about 1994, chip architectures began specifying high resolution
447c478bd9Sstevel@tonic-gate * timestamp registers. As of this writing (1999), all major chip families
457c478bd9Sstevel@tonic-gate * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution
467c478bd9Sstevel@tonic-gate * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity
477c478bd9Sstevel@tonic-gate * to interrupt based on timestamp values. These timestamp-compare registers
487c478bd9Sstevel@tonic-gate * present a time-based interrupt source which can be reprogrammed arbitrarily
497c478bd9Sstevel@tonic-gate * often without introducing error. Given the low cost of implementing such a
507c478bd9Sstevel@tonic-gate * timestamp-compare register (and the tangible benefit of eliminating
517c478bd9Sstevel@tonic-gate * discrete timer parts), it is reasonable to expect that future chip
527c478bd9Sstevel@tonic-gate * architectures will adopt this feature.
537c478bd9Sstevel@tonic-gate *
547c478bd9Sstevel@tonic-gate * The cyclic subsystem has been designed to take advantage of chip
557c478bd9Sstevel@tonic-gate * architectures with the capacity to interrupt based on absolute, high
567c478bd9Sstevel@tonic-gate * resolution values of time.
577c478bd9Sstevel@tonic-gate *
587c478bd9Sstevel@tonic-gate * Subsystem Overview
597c478bd9Sstevel@tonic-gate *
607c478bd9Sstevel@tonic-gate * The cyclic subsystem is a low-level kernel subsystem designed to provide
617c478bd9Sstevel@tonic-gate * arbitrarily high resolution, per-CPU interval timers (to avoid colliding
627c478bd9Sstevel@tonic-gate * with existing terms, we dub such an interval timer a "cyclic"). Cyclics
637c478bd9Sstevel@tonic-gate * can be specified to fire at high, lock or low interrupt level, and may be
647c478bd9Sstevel@tonic-gate * optionally bound to a CPU or a CPU partition. A cyclic's CPU or CPU
657c478bd9Sstevel@tonic-gate * partition binding may be changed dynamically; the cyclic will be "juggled"
667c478bd9Sstevel@tonic-gate * to a CPU which satisfies the new binding. Alternatively, a cyclic may
677c478bd9Sstevel@tonic-gate * be specified to be "omnipresent", denoting firing on all online CPUs.
687c478bd9Sstevel@tonic-gate *
697c478bd9Sstevel@tonic-gate * Cyclic Subsystem Interface Overview
707c478bd9Sstevel@tonic-gate * -----------------------------------
717c478bd9Sstevel@tonic-gate *
727c478bd9Sstevel@tonic-gate * The cyclic subsystem has interfaces with the kernel at-large, with other
737c478bd9Sstevel@tonic-gate * kernel subsystems (e.g. the processor management subsystem, the checkpoint
747c478bd9Sstevel@tonic-gate * resume subsystem) and with the platform (the cyclic backend). Each
757c478bd9Sstevel@tonic-gate * of these interfaces is given a brief synopsis here, and is described
767c478bd9Sstevel@tonic-gate * in full above the interface's implementation.
777c478bd9Sstevel@tonic-gate *
787c478bd9Sstevel@tonic-gate * The following diagram displays the cyclic subsystem's interfaces to
797c478bd9Sstevel@tonic-gate * other kernel components. The arrows denote a "calls" relationship, with
807c478bd9Sstevel@tonic-gate * the large arrow indicating the cyclic subsystem's consumer interface.
817c478bd9Sstevel@tonic-gate * Each arrow is labeled with the section in which the corresponding
827c478bd9Sstevel@tonic-gate * interface is described.
837c478bd9Sstevel@tonic-gate *
847c478bd9Sstevel@tonic-gate * Kernel at-large consumers
857c478bd9Sstevel@tonic-gate * -----------++------------
867c478bd9Sstevel@tonic-gate * ||
877c478bd9Sstevel@tonic-gate * ||
887c478bd9Sstevel@tonic-gate * _||_
897c478bd9Sstevel@tonic-gate * \ /
907c478bd9Sstevel@tonic-gate * \/
917c478bd9Sstevel@tonic-gate * +---------------------+
927c478bd9Sstevel@tonic-gate * | |
937c478bd9Sstevel@tonic-gate * | Cyclic subsystem |<----------- Other kernel subsystems
947c478bd9Sstevel@tonic-gate * | |
957c478bd9Sstevel@tonic-gate * +---------------------+
967c478bd9Sstevel@tonic-gate * ^ |
977c478bd9Sstevel@tonic-gate * | |
987c478bd9Sstevel@tonic-gate * | |
997c478bd9Sstevel@tonic-gate * | v
1007c478bd9Sstevel@tonic-gate * +---------------------+
1017c478bd9Sstevel@tonic-gate * | |
1027c478bd9Sstevel@tonic-gate * | Cyclic backend |
1037c478bd9Sstevel@tonic-gate * | (platform specific) |
1047c478bd9Sstevel@tonic-gate * | |
1057c478bd9Sstevel@tonic-gate * +---------------------+
1067c478bd9Sstevel@tonic-gate *
1077c478bd9Sstevel@tonic-gate *
1087c478bd9Sstevel@tonic-gate * Kernel At-Large Interfaces
1097c478bd9Sstevel@tonic-gate *
1107c478bd9Sstevel@tonic-gate * cyclic_add() <-- Creates a cyclic
1117c478bd9Sstevel@tonic-gate * cyclic_add_omni() <-- Creates an omnipresent cyclic
1127c478bd9Sstevel@tonic-gate * cyclic_remove() <-- Removes a cyclic
1137c478bd9Sstevel@tonic-gate * cyclic_bind() <-- Change a cyclic's CPU or partition binding
11487a18d3fSMadhavan Venkataraman * cyclic_reprogram() <-- Reprogram a cyclic's expiration
115d0a94a58SPatrick Mooney * cyclic_move_here() <-- Shuffle cyclic to current CPU
1167c478bd9Sstevel@tonic-gate *
1177c478bd9Sstevel@tonic-gate * Inter-subsystem Interfaces
1187c478bd9Sstevel@tonic-gate *
1197c478bd9Sstevel@tonic-gate * cyclic_juggle() <-- Juggles cyclics away from a CPU
1207c478bd9Sstevel@tonic-gate * cyclic_offline() <-- Offlines cyclic operation on a CPU
1217c478bd9Sstevel@tonic-gate * cyclic_online() <-- Reenables operation on an offlined CPU
1227c478bd9Sstevel@tonic-gate * cyclic_move_in() <-- Notifies subsystem of change in CPU partition
1237c478bd9Sstevel@tonic-gate * cyclic_move_out() <-- Notifies subsystem of change in CPU partition
1247c478bd9Sstevel@tonic-gate * cyclic_suspend() <-- Suspends the cyclic subsystem on all CPUs
1257c478bd9Sstevel@tonic-gate * cyclic_resume() <-- Resumes the cyclic subsystem on all CPUs
1267c478bd9Sstevel@tonic-gate *
1277c478bd9Sstevel@tonic-gate * Backend Interfaces
1287c478bd9Sstevel@tonic-gate *
1297c478bd9Sstevel@tonic-gate * cyclic_init() <-- Initializes the cyclic subsystem
1307c478bd9Sstevel@tonic-gate * cyclic_fire() <-- CY_HIGH_LEVEL interrupt entry point
1317c478bd9Sstevel@tonic-gate * cyclic_softint() <-- CY_LOCK/LOW_LEVEL soft interrupt entry point
1327c478bd9Sstevel@tonic-gate *
1337c478bd9Sstevel@tonic-gate * The backend-supplied interfaces (through the cyc_backend structure) are
1347c478bd9Sstevel@tonic-gate * documented in detail in <sys/cyclic_impl.h>
1357c478bd9Sstevel@tonic-gate *
1367c478bd9Sstevel@tonic-gate *
1377c478bd9Sstevel@tonic-gate * Cyclic Subsystem Implementation Overview
1387c478bd9Sstevel@tonic-gate * ----------------------------------------
1397c478bd9Sstevel@tonic-gate *
1407c478bd9Sstevel@tonic-gate * The cyclic subsystem is designed to minimize interference between cyclics
1417c478bd9Sstevel@tonic-gate * on different CPUs. Thus, all of the cyclic subsystem's data structures
1427c478bd9Sstevel@tonic-gate * hang off of a per-CPU structure, cyc_cpu.
1437c478bd9Sstevel@tonic-gate *
1447c478bd9Sstevel@tonic-gate * Each cyc_cpu has a power-of-two sized array of cyclic structures (the
1457c478bd9Sstevel@tonic-gate * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called
1467c478bd9Sstevel@tonic-gate * and there does not exist a free slot in the cyp_cyclics array, the size of
1477c478bd9Sstevel@tonic-gate * the array will be doubled. The array will never shrink. Cyclics are
1487c478bd9Sstevel@tonic-gate * referred to by their index in the cyp_cyclics array, which is of type
1497c478bd9Sstevel@tonic-gate * cyc_index_t.
1507c478bd9Sstevel@tonic-gate *
1517c478bd9Sstevel@tonic-gate * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The
1527c478bd9Sstevel@tonic-gate * heap is keyed by cyclic expiration time, with parents expiring earlier
1537c478bd9Sstevel@tonic-gate * than their children.
1547c478bd9Sstevel@tonic-gate *
1557c478bd9Sstevel@tonic-gate * Heap Management
1567c478bd9Sstevel@tonic-gate *
1577c478bd9Sstevel@tonic-gate * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire()
1587c478bd9Sstevel@tonic-gate * compares the root cyclic's expiration time to the current time. If the
1597c478bd9Sstevel@tonic-gate * expiration time is in the past, cyclic_expire() is called on the root
1607c478bd9Sstevel@tonic-gate * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time
1617c478bd9Sstevel@tonic-gate * is derived by adding its interval to its old expiration time, and a
1627c478bd9Sstevel@tonic-gate * downheap operation is performed. After the downheap, cyclic_fire()
1637c478bd9Sstevel@tonic-gate * examines the (potentially changed) root cyclic, repeating the
1647c478bd9Sstevel@tonic-gate * cyclic_expire()/add interval/cyclic_downheap() sequence until the root
1657c478bd9Sstevel@tonic-gate * cyclic has an expiration time in the future. This expiration time
1667c478bd9Sstevel@tonic-gate * (guaranteed to be the earliest in the heap) is then communicated to the
1677c478bd9Sstevel@tonic-gate * backend via cyb_reprogram. Optimal backends will next call cyclic_fire()
1687c478bd9Sstevel@tonic-gate * shortly after the root cyclic's expiration time.
1697c478bd9Sstevel@tonic-gate *
1707c478bd9Sstevel@tonic-gate * To allow efficient, deterministic downheap operations, we implement the
1717c478bd9Sstevel@tonic-gate * heap as an array (the cyp_heap member of the cyc_cpu structure), with each
1727c478bd9Sstevel@tonic-gate * element containing an index into the CPU's cyp_cyclics array.
1737c478bd9Sstevel@tonic-gate *
1747c478bd9Sstevel@tonic-gate * The heap is laid out in the array according to the following:
1757c478bd9Sstevel@tonic-gate *
1767c478bd9Sstevel@tonic-gate * 1. The root of the heap is always in the 0th element of the heap array
1777c478bd9Sstevel@tonic-gate * 2. The left and right children of the nth element are element
1787c478bd9Sstevel@tonic-gate * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively.
1797c478bd9Sstevel@tonic-gate *
1807c478bd9Sstevel@tonic-gate * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof
1817c478bd9Sstevel@tonic-gate * that these constraints correctly lay out a heap (or indeed, any binary
1827c478bd9Sstevel@tonic-gate * tree) is trivial and left to the reader.
1837c478bd9Sstevel@tonic-gate *
1847c478bd9Sstevel@tonic-gate * To see the heap by example, assume our cyclics array has the following
1857c478bd9Sstevel@tonic-gate * members (at time t):
1867c478bd9Sstevel@tonic-gate *
1877c478bd9Sstevel@tonic-gate * cy_handler cy_level cy_expire
1887c478bd9Sstevel@tonic-gate * ---------------------------------------------
1897c478bd9Sstevel@tonic-gate * [ 0] clock() LOCK t+10000000
1907c478bd9Sstevel@tonic-gate * [ 1] deadman() HIGH t+1000000000
1917c478bd9Sstevel@tonic-gate * [ 2] clock_highres_fire() LOW t+100
1927c478bd9Sstevel@tonic-gate * [ 3] clock_highres_fire() LOW t+1000
1937c478bd9Sstevel@tonic-gate * [ 4] clock_highres_fire() LOW t+500
1947c478bd9Sstevel@tonic-gate * [ 5] (free) -- --
1957c478bd9Sstevel@tonic-gate * [ 6] (free) -- --
1967c478bd9Sstevel@tonic-gate * [ 7] (free) -- --
1977c478bd9Sstevel@tonic-gate *
1987c478bd9Sstevel@tonic-gate * The heap array could be:
1997c478bd9Sstevel@tonic-gate *
2007c478bd9Sstevel@tonic-gate * [0] [1] [2] [3] [4] [5] [6] [7]
2017c478bd9Sstevel@tonic-gate * +-----+-----+-----+-----+-----+-----+-----+-----+
2027c478bd9Sstevel@tonic-gate * | | | | | | | | |
2037c478bd9Sstevel@tonic-gate * | 2 | 3 | 4 | 0 | 1 | x | x | x |
2047c478bd9Sstevel@tonic-gate * | | | | | | | | |
2057c478bd9Sstevel@tonic-gate * +-----+-----+-----+-----+-----+-----+-----+-----+
2067c478bd9Sstevel@tonic-gate *
2077c478bd9Sstevel@tonic-gate * Graphically, this array corresponds to the following (excuse the ASCII art):
2087c478bd9Sstevel@tonic-gate *
2097c478bd9Sstevel@tonic-gate * 2
2107c478bd9Sstevel@tonic-gate * |
2117c478bd9Sstevel@tonic-gate * +------------------+------------------+
2127c478bd9Sstevel@tonic-gate * 3 4
2137c478bd9Sstevel@tonic-gate * |
2147c478bd9Sstevel@tonic-gate * +---------+--------+
2157c478bd9Sstevel@tonic-gate * 0 1
2167c478bd9Sstevel@tonic-gate *
2177c478bd9Sstevel@tonic-gate * Note that the heap is laid out by layer: all nodes at a given depth are
2187c478bd9Sstevel@tonic-gate * stored in consecutive elements of the array. Moreover, layers of
2197c478bd9Sstevel@tonic-gate * consecutive depths are in adjacent element ranges. This property
2207c478bd9Sstevel@tonic-gate * guarantees high locality of reference during downheap operations.
2217c478bd9Sstevel@tonic-gate * Specifically, we are guaranteed that we can downheap to a depth of
2227c478bd9Sstevel@tonic-gate *
2237c478bd9Sstevel@tonic-gate * lg (cache_line_size / sizeof (cyc_index_t))
2247c478bd9Sstevel@tonic-gate *
2257c478bd9Sstevel@tonic-gate * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line
2267c478bd9Sstevel@tonic-gate * size), this corresponds to a depth of four nodes. Thus, if there are
2277c478bd9Sstevel@tonic-gate * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at
2287c478bd9Sstevel@tonic-gate * most once in the e-cache.
2297c478bd9Sstevel@tonic-gate *
2307c478bd9Sstevel@tonic-gate * Downheaps are required to compare siblings as they proceed down the
2317c478bd9Sstevel@tonic-gate * heap. For downheaps proceeding beyond the one-cache-miss depth, every
2327c478bd9Sstevel@tonic-gate * access to a left child could potentially miss in the cache. However,
2337c478bd9Sstevel@tonic-gate * if we assume
2347c478bd9Sstevel@tonic-gate *
2357c478bd9Sstevel@tonic-gate * (cache_line_size / sizeof (cyc_index_t)) > 2,
2367c478bd9Sstevel@tonic-gate *
2377c478bd9Sstevel@tonic-gate * then all siblings are guaranteed to be on the same cache line. Thus, the
2387c478bd9Sstevel@tonic-gate * miss on the left child will guarantee a hit on the right child; downheaps
2397c478bd9Sstevel@tonic-gate * will incur at most one cache miss per layer beyond the one-cache-miss
2407c478bd9Sstevel@tonic-gate * depth. The total number of cache misses for heap management during a
2417c478bd9Sstevel@tonic-gate * downheap operation is thus bounded by
2427c478bd9Sstevel@tonic-gate *
2437c478bd9Sstevel@tonic-gate * lg (n) - lg (cache_line_size / sizeof (cyc_index_t))
2447c478bd9Sstevel@tonic-gate *
2457c478bd9Sstevel@tonic-gate * Traditional pointer-based heaps are implemented without regard to
2467c478bd9Sstevel@tonic-gate * locality. Downheaps can thus incur two cache misses per layer (one for
2477c478bd9Sstevel@tonic-gate * each child), but at most one cache miss at the root. This yields a bound
2487c478bd9Sstevel@tonic-gate * of
2497c478bd9Sstevel@tonic-gate *
2507c478bd9Sstevel@tonic-gate * 2 * lg (n) - 1
2517c478bd9Sstevel@tonic-gate *
2527c478bd9Sstevel@tonic-gate * on the total cache misses.
2537c478bd9Sstevel@tonic-gate *
2547c478bd9Sstevel@tonic-gate * This difference may seem theoretically trivial (the difference is, after
2557c478bd9Sstevel@tonic-gate * all, constant), but can become substantial in practice -- especially for
2567c478bd9Sstevel@tonic-gate * caches with very large cache lines and high miss penalties (e.g. TLBs).
2577c478bd9Sstevel@tonic-gate *
2587c478bd9Sstevel@tonic-gate * Heaps must always be full, balanced trees. Heap management must therefore
2597c478bd9Sstevel@tonic-gate * track the next point-of-insertion into the heap. In pointer-based heaps,
2607c478bd9Sstevel@tonic-gate * recomputing this point takes O(lg (n)). Given the layout of the
2617c478bd9Sstevel@tonic-gate * array-based implementation, however, the next point-of-insertion is
2627c478bd9Sstevel@tonic-gate * always:
2637c478bd9Sstevel@tonic-gate *
2647c478bd9Sstevel@tonic-gate * heap[number_of_elements]
2657c478bd9Sstevel@tonic-gate *
2667c478bd9Sstevel@tonic-gate * We exploit this property by implementing the free-list in the usused
2677c478bd9Sstevel@tonic-gate * heap elements. Heap insertion, therefore, consists only of filling in
2687c478bd9Sstevel@tonic-gate * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing
2697c478bd9Sstevel@tonic-gate * the number of elements, and performing an upheap. Heap deletion consists
2707c478bd9Sstevel@tonic-gate * of decrementing the number of elements, swapping the to-be-deleted element
2717c478bd9Sstevel@tonic-gate * with the element at cyp_heap[number_of_elements], and downheaping.
2727c478bd9Sstevel@tonic-gate *
2737c478bd9Sstevel@tonic-gate * Filling in more details in our earlier example:
2747c478bd9Sstevel@tonic-gate *
2757c478bd9Sstevel@tonic-gate * +--- free list head
2767c478bd9Sstevel@tonic-gate * |
2777c478bd9Sstevel@tonic-gate * V
2787c478bd9Sstevel@tonic-gate *
2797c478bd9Sstevel@tonic-gate * [0] [1] [2] [3] [4] [5] [6] [7]
2807c478bd9Sstevel@tonic-gate * +-----+-----+-----+-----+-----+-----+-----+-----+
2817c478bd9Sstevel@tonic-gate * | | | | | | | | |
2827c478bd9Sstevel@tonic-gate * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 |
2837c478bd9Sstevel@tonic-gate * | | | | | | | | |
2847c478bd9Sstevel@tonic-gate * +-----+-----+-----+-----+-----+-----+-----+-----+
2857c478bd9Sstevel@tonic-gate *
2867c478bd9Sstevel@tonic-gate * To insert into this heap, we would just need to fill in the cyclic at
2877c478bd9Sstevel@tonic-gate * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform
2887c478bd9Sstevel@tonic-gate * an upheap.
2897c478bd9Sstevel@tonic-gate *
2907c478bd9Sstevel@tonic-gate * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it
2917c478bd9Sstevel@tonic-gate * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement
2927c478bd9Sstevel@tonic-gate * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4],
2937c478bd9Sstevel@tonic-gate * and perform a downheap from cyp_heap[1]. The linear scan is required
2947c478bd9Sstevel@tonic-gate * because the cyclic does not keep a backpointer into the heap. This makes
2957c478bd9Sstevel@tonic-gate * heap manipulation (e.g. downheaps) faster at the expense of removal
2967c478bd9Sstevel@tonic-gate * operations.
2977c478bd9Sstevel@tonic-gate *
2987c478bd9Sstevel@tonic-gate * Expiry processing
2997c478bd9Sstevel@tonic-gate *
3007c478bd9Sstevel@tonic-gate * As alluded to above, cyclic_expire() is called by cyclic_fire() at
3017c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL to expire a cyclic. Cyclic subsystem consumers are
3027c478bd9Sstevel@tonic-gate * guaranteed that for an arbitrary time t in the future, their cyclic
3037c478bd9Sstevel@tonic-gate * handler will have been called (t - cyt_when) / cyt_interval times. Thus,
3047c478bd9Sstevel@tonic-gate * there must be a one-to-one mapping between a cyclic's expiration at
3057c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL and its execution at the desired level (either CY_HIGH_LEVEL,
3067c478bd9Sstevel@tonic-gate * CY_LOCK_LEVEL or CY_LOW_LEVEL).
3077c478bd9Sstevel@tonic-gate *
3087c478bd9Sstevel@tonic-gate * For CY_HIGH_LEVEL cyclics, this is trivial; cyclic_expire() simply needs
3097c478bd9Sstevel@tonic-gate * to call the handler.
3107c478bd9Sstevel@tonic-gate *
3117c478bd9Sstevel@tonic-gate * For CY_LOCK_LEVEL and CY_LOW_LEVEL cyclics, however, there exists a
3127c478bd9Sstevel@tonic-gate * potential disconnect: if the CPU is at an interrupt level less than
3137c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL but greater than the level of a cyclic for a period of
3147c478bd9Sstevel@tonic-gate * time longer than twice the cyclic's interval, the cyclic will be expired
3157c478bd9Sstevel@tonic-gate * twice before it can be handled.
3167c478bd9Sstevel@tonic-gate *
3177c478bd9Sstevel@tonic-gate * To maintain the one-to-one mapping, we track the difference between the
3187c478bd9Sstevel@tonic-gate * number of times a cyclic has been expired and the number of times it's
3197c478bd9Sstevel@tonic-gate * been handled in a "pending count" (the cy_pend field of the cyclic
3207c478bd9Sstevel@tonic-gate * structure). cyclic_expire() thus increments the cy_pend count for the
3217c478bd9Sstevel@tonic-gate * expired cyclic and posts a soft interrupt at the desired level. In the
3227c478bd9Sstevel@tonic-gate * cyclic subsystem's soft interrupt handler, cyclic_softint(), we repeatedly
3237c478bd9Sstevel@tonic-gate * call the cyclic handler and decrement cy_pend until we have decremented
3247c478bd9Sstevel@tonic-gate * cy_pend to zero.
3257c478bd9Sstevel@tonic-gate *
3267c478bd9Sstevel@tonic-gate * The Producer/Consumer Buffer
3277c478bd9Sstevel@tonic-gate *
3287c478bd9Sstevel@tonic-gate * If we wish to avoid a linear scan of the cyclics array at soft interrupt
3297c478bd9Sstevel@tonic-gate * level, cyclic_softint() must be able to quickly determine which cyclics
3307c478bd9Sstevel@tonic-gate * have a non-zero cy_pend count. We thus introduce a per-soft interrupt
3317c478bd9Sstevel@tonic-gate * level producer/consumer buffer shared with CY_HIGH_LEVEL. These buffers
3327c478bd9Sstevel@tonic-gate * are encapsulated in the cyc_pcbuffer structure, and, like cyp_heap, are
3337c478bd9Sstevel@tonic-gate * implemented as cyc_index_t arrays (the cypc_buf member of the cyc_pcbuffer
3347c478bd9Sstevel@tonic-gate * structure).
3357c478bd9Sstevel@tonic-gate *
3367c478bd9Sstevel@tonic-gate * The producer (cyclic_expire() running at CY_HIGH_LEVEL) enqueues a cyclic
3377c478bd9Sstevel@tonic-gate * by storing the cyclic's index to cypc_buf[cypc_prodndx] and incrementing
3387c478bd9Sstevel@tonic-gate * cypc_prodndx. The consumer (cyclic_softint() running at either
3397c478bd9Sstevel@tonic-gate * CY_LOCK_LEVEL or CY_LOW_LEVEL) dequeues a cyclic by loading from
3407c478bd9Sstevel@tonic-gate * cypc_buf[cypc_consndx] and bumping cypc_consndx. The buffer is empty when
3417c478bd9Sstevel@tonic-gate * cypc_prodndx == cypc_consndx.
3427c478bd9Sstevel@tonic-gate *
3437c478bd9Sstevel@tonic-gate * To bound the size of the producer/consumer buffer, cyclic_expire() only
3447c478bd9Sstevel@tonic-gate * enqueues a cyclic if its cy_pend was zero (if the cyclic's cy_pend is
3457c478bd9Sstevel@tonic-gate * non-zero, cyclic_expire() only bumps cy_pend). Symmetrically,
3467c478bd9Sstevel@tonic-gate * cyclic_softint() only consumes a cyclic after it has decremented the
3477c478bd9Sstevel@tonic-gate * cy_pend count to zero.
3487c478bd9Sstevel@tonic-gate *
3497c478bd9Sstevel@tonic-gate * Returning to our example, here is what the CY_LOW_LEVEL producer/consumer
3507c478bd9Sstevel@tonic-gate * buffer might look like:
3517c478bd9Sstevel@tonic-gate *
3527c478bd9Sstevel@tonic-gate * cypc_consndx ---+ +--- cypc_prodndx
3537c478bd9Sstevel@tonic-gate * | |
3547c478bd9Sstevel@tonic-gate * V V
3557c478bd9Sstevel@tonic-gate *
3567c478bd9Sstevel@tonic-gate * [0] [1] [2] [3] [4] [5] [6] [7]
3577c478bd9Sstevel@tonic-gate * +-----+-----+-----+-----+-----+-----+-----+-----+
3587c478bd9Sstevel@tonic-gate * | | | | | | | | |
3597c478bd9Sstevel@tonic-gate * | x | x | 3 | 2 | 4 | x | x | x | <== cypc_buf
3607c478bd9Sstevel@tonic-gate * | | | . | . | . | | | |
3617c478bd9Sstevel@tonic-gate * +-----+-----+- | -+- | -+- | -+-----+-----+-----+
3627c478bd9Sstevel@tonic-gate * | | |
3637c478bd9Sstevel@tonic-gate * | | | cy_pend cy_handler
3647c478bd9Sstevel@tonic-gate * | | | -------------------------
3657c478bd9Sstevel@tonic-gate * | | | [ 0] 1 clock()
3667c478bd9Sstevel@tonic-gate * | | | [ 1] 0 deadman()
3677c478bd9Sstevel@tonic-gate * | +---- | -------> [ 2] 3 clock_highres_fire()
3687c478bd9Sstevel@tonic-gate * +---------- | -------> [ 3] 1 clock_highres_fire()
3697c478bd9Sstevel@tonic-gate * +--------> [ 4] 1 clock_highres_fire()
3707c478bd9Sstevel@tonic-gate * [ 5] - (free)
3717c478bd9Sstevel@tonic-gate * [ 6] - (free)
3727c478bd9Sstevel@tonic-gate * [ 7] - (free)
3737c478bd9Sstevel@tonic-gate *
3747c478bd9Sstevel@tonic-gate * In particular, note that clock()'s cy_pend is 1 but that it is _not_ in
3757c478bd9Sstevel@tonic-gate * this producer/consumer buffer; it would be enqueued in the CY_LOCK_LEVEL
3767c478bd9Sstevel@tonic-gate * producer/consumer buffer.
3777c478bd9Sstevel@tonic-gate *
3787c478bd9Sstevel@tonic-gate * Locking
3797c478bd9Sstevel@tonic-gate *
3807c478bd9Sstevel@tonic-gate * Traditionally, access to per-CPU data structures shared between
3817c478bd9Sstevel@tonic-gate * interrupt levels is serialized by manipulating programmable interrupt
3827c478bd9Sstevel@tonic-gate * level: readers and writers are required to raise their interrupt level
3837c478bd9Sstevel@tonic-gate * to that of the highest level writer.
3847c478bd9Sstevel@tonic-gate *
3857c478bd9Sstevel@tonic-gate * For the producer/consumer buffers (shared between cyclic_fire()/
3867c478bd9Sstevel@tonic-gate * cyclic_expire() executing at CY_HIGH_LEVEL and cyclic_softint() executing
3877c478bd9Sstevel@tonic-gate * at one of CY_LOCK_LEVEL or CY_LOW_LEVEL), forcing cyclic_softint() to raise
3887c478bd9Sstevel@tonic-gate * programmable interrupt level is undesirable: aside from the additional
3897c478bd9Sstevel@tonic-gate * latency incurred by manipulating interrupt level in the hot cy_pend
3907c478bd9Sstevel@tonic-gate * processing path, this would create the potential for soft level cy_pend
3917c478bd9Sstevel@tonic-gate * processing to delay CY_HIGH_LEVEL firing and expiry processing.
3927c478bd9Sstevel@tonic-gate * CY_LOCK/LOW_LEVEL cyclics could thereby induce jitter in CY_HIGH_LEVEL
3937c478bd9Sstevel@tonic-gate * cyclics.
3947c478bd9Sstevel@tonic-gate *
3957c478bd9Sstevel@tonic-gate * To minimize jitter, then, we would like the cyclic_fire()/cyclic_expire()
3967c478bd9Sstevel@tonic-gate * and cyclic_softint() code paths to be lock-free.
3977c478bd9Sstevel@tonic-gate *
3987c478bd9Sstevel@tonic-gate * For cyclic_fire()/cyclic_expire(), lock-free execution is straightforward:
3997c478bd9Sstevel@tonic-gate * because these routines execute at a higher interrupt level than
4007c478bd9Sstevel@tonic-gate * cyclic_softint(), their actions on the producer/consumer buffer appear
4017c478bd9Sstevel@tonic-gate * atomic. In particular, the increment of cy_pend appears to occur
4027c478bd9Sstevel@tonic-gate * atomically with the increment of cypc_prodndx.
4037c478bd9Sstevel@tonic-gate *
4047c478bd9Sstevel@tonic-gate * For cyclic_softint(), however, lock-free execution requires more delicacy.
4057c478bd9Sstevel@tonic-gate * When cyclic_softint() discovers a cyclic in the producer/consumer buffer,
4067c478bd9Sstevel@tonic-gate * it calls the cyclic's handler and attempts to atomically decrement the
4077c478bd9Sstevel@tonic-gate * cy_pend count with a compare&swap operation.
4087c478bd9Sstevel@tonic-gate *
4097c478bd9Sstevel@tonic-gate * If the compare&swap operation succeeds, cyclic_softint() behaves
4107c478bd9Sstevel@tonic-gate * conditionally based on the value it atomically wrote to cy_pend:
4117c478bd9Sstevel@tonic-gate *
4127c478bd9Sstevel@tonic-gate * - If the cy_pend was decremented to 0, the cyclic has been consumed;
4137c478bd9Sstevel@tonic-gate * cyclic_softint() increments the cypc_consndx and checks for more
4147c478bd9Sstevel@tonic-gate * enqueued work.
4157c478bd9Sstevel@tonic-gate *
4167c478bd9Sstevel@tonic-gate * - If the count was decremented to a non-zero value, there is more work
4177c478bd9Sstevel@tonic-gate * to be done on the cyclic; cyclic_softint() calls the cyclic handler
4187c478bd9Sstevel@tonic-gate * and repeats the atomic decrement process.
4197c478bd9Sstevel@tonic-gate *
4207c478bd9Sstevel@tonic-gate * If the compare&swap operation fails, cyclic_softint() knows that
4217c478bd9Sstevel@tonic-gate * cyclic_expire() has intervened and bumped the cy_pend count (resizes
4227c478bd9Sstevel@tonic-gate * and removals complicate this, however -- see the sections on their
4237c478bd9Sstevel@tonic-gate * operation, below). cyclic_softint() thus reloads cy_pend, and re-attempts
4247c478bd9Sstevel@tonic-gate * the atomic decrement.
4257c478bd9Sstevel@tonic-gate *
4267c478bd9Sstevel@tonic-gate * Recall that we bound the size of the producer/consumer buffer by
4277c478bd9Sstevel@tonic-gate * having cyclic_expire() only enqueue the specified cyclic if its
4287c478bd9Sstevel@tonic-gate * cy_pend count is zero; this assures that each cyclic is enqueued at
4297c478bd9Sstevel@tonic-gate * most once. This leads to a critical constraint on cyclic_softint(),
4307c478bd9Sstevel@tonic-gate * however: after the compare&swap operation which successfully decrements
4317c478bd9Sstevel@tonic-gate * cy_pend to zero, cyclic_softint() must _not_ re-examine the consumed
4327c478bd9Sstevel@tonic-gate * cyclic. In part to obey this constraint, cyclic_softint() calls the
4337c478bd9Sstevel@tonic-gate * cyclic handler before decrementing cy_pend.
4347c478bd9Sstevel@tonic-gate *
4357c478bd9Sstevel@tonic-gate * Resizing
4367c478bd9Sstevel@tonic-gate *
4377c478bd9Sstevel@tonic-gate * All of the discussion thus far has assumed a static number of cyclics.
4387c478bd9Sstevel@tonic-gate * Obviously, static limitations are not practical; we need the capacity
4397c478bd9Sstevel@tonic-gate * to resize our data structures dynamically.
4407c478bd9Sstevel@tonic-gate *
4417c478bd9Sstevel@tonic-gate * We resize our data structures lazily, and only on a per-CPU basis.
4427c478bd9Sstevel@tonic-gate * The size of the data structures always doubles and never shrinks. We
4437c478bd9Sstevel@tonic-gate * serialize adds (and thus resizes) on cpu_lock; we never need to deal
4447c478bd9Sstevel@tonic-gate * with concurrent resizes. Resizes should be rare; they may induce jitter
4457c478bd9Sstevel@tonic-gate * on the CPU being resized, but should not affect cyclic operation on other
4467c478bd9Sstevel@tonic-gate * CPUs. Pending cyclics may not be dropped during a resize operation.
4477c478bd9Sstevel@tonic-gate *
4487c478bd9Sstevel@tonic-gate * Three key cyc_cpu data structures need to be resized: the cyclics array,
4497c478bd9Sstevel@tonic-gate * the heap array and the producer/consumer buffers. Resizing the first two
4507c478bd9Sstevel@tonic-gate * is relatively straightforward:
4517c478bd9Sstevel@tonic-gate *
4527c478bd9Sstevel@tonic-gate * 1. The new, larger arrays are allocated in cyclic_expand() (called
4537c478bd9Sstevel@tonic-gate * from cyclic_add()).
4547c478bd9Sstevel@tonic-gate * 2. cyclic_expand() cross calls cyclic_expand_xcall() on the CPU
4557c478bd9Sstevel@tonic-gate * undergoing the resize.
4567c478bd9Sstevel@tonic-gate * 3. cyclic_expand_xcall() raises interrupt level to CY_HIGH_LEVEL
4577c478bd9Sstevel@tonic-gate * 4. The contents of the old arrays are copied into the new arrays.
4587c478bd9Sstevel@tonic-gate * 5. The old cyclics array is bzero()'d
4597c478bd9Sstevel@tonic-gate * 6. The pointers are updated.
4607c478bd9Sstevel@tonic-gate *
4617c478bd9Sstevel@tonic-gate * The producer/consumer buffer is dicier: cyclic_expand_xcall() may have
4627c478bd9Sstevel@tonic-gate * interrupted cyclic_softint() in the middle of consumption. To resize the
4637c478bd9Sstevel@tonic-gate * producer/consumer buffer, we implement up to two buffers per soft interrupt
4647c478bd9Sstevel@tonic-gate * level: a hard buffer (the buffer being produced into by cyclic_expire())
4657c478bd9Sstevel@tonic-gate * and a soft buffer (the buffer from which cyclic_softint() is consuming).
4667c478bd9Sstevel@tonic-gate * During normal operation, the hard buffer and soft buffer point to the
4677c478bd9Sstevel@tonic-gate * same underlying producer/consumer buffer.
4687c478bd9Sstevel@tonic-gate *
4697c478bd9Sstevel@tonic-gate * During a resize, however, cyclic_expand_xcall() changes the hard buffer
4707c478bd9Sstevel@tonic-gate * to point to the new, larger producer/consumer buffer; all future
4717c478bd9Sstevel@tonic-gate * cyclic_expire()'s will produce into the new buffer. cyclic_expand_xcall()
4727c478bd9Sstevel@tonic-gate * then posts a CY_LOCK_LEVEL soft interrupt, landing in cyclic_softint().
4737c478bd9Sstevel@tonic-gate *
4747c478bd9Sstevel@tonic-gate * As under normal operation, cyclic_softint() will consume cyclics from
4757c478bd9Sstevel@tonic-gate * its soft buffer. After the soft buffer is drained, however,
4767c478bd9Sstevel@tonic-gate * cyclic_softint() will see that the hard buffer has changed. At that time,
4777c478bd9Sstevel@tonic-gate * cyclic_softint() will change its soft buffer to point to the hard buffer,
4787c478bd9Sstevel@tonic-gate * and repeat the producer/consumer buffer draining procedure.
4797c478bd9Sstevel@tonic-gate *
4807c478bd9Sstevel@tonic-gate * After the new buffer is drained, cyclic_softint() will determine if both
4817c478bd9Sstevel@tonic-gate * soft levels have seen their new producer/consumer buffer. If both have,
4827c478bd9Sstevel@tonic-gate * cyclic_softint() will post on the semaphore cyp_modify_wait. If not, a
4837c478bd9Sstevel@tonic-gate * soft interrupt will be generated for the remaining level.
4847c478bd9Sstevel@tonic-gate *
4857c478bd9Sstevel@tonic-gate * cyclic_expand() blocks on the cyp_modify_wait semaphore (a semaphore is
4867c478bd9Sstevel@tonic-gate * used instead of a condition variable because of the race between the
4877c478bd9Sstevel@tonic-gate * sema_p() in cyclic_expand() and the sema_v() in cyclic_softint()). This
4887c478bd9Sstevel@tonic-gate * allows cyclic_expand() to know when the resize operation is complete;
4897c478bd9Sstevel@tonic-gate * all of the old buffers (the heap, the cyclics array and the producer/
4907c478bd9Sstevel@tonic-gate * consumer buffers) can be freed.
4917c478bd9Sstevel@tonic-gate *
4927c478bd9Sstevel@tonic-gate * A final caveat on resizing: we described step (5) in the
4937c478bd9Sstevel@tonic-gate * cyclic_expand_xcall() procedure without providing any motivation. This
4947c478bd9Sstevel@tonic-gate * step addresses the problem of a cyclic_softint() attempting to decrement
4957c478bd9Sstevel@tonic-gate * a cy_pend count while interrupted by a cyclic_expand_xcall(). Because
4967c478bd9Sstevel@tonic-gate * cyclic_softint() has already called the handler by the time cy_pend is
4977c478bd9Sstevel@tonic-gate * decremented, we want to assure that it doesn't decrement a cy_pend
4987c478bd9Sstevel@tonic-gate * count in the old cyclics array. By zeroing the old cyclics array in
4997c478bd9Sstevel@tonic-gate * cyclic_expand_xcall(), we are zeroing out every cy_pend count; when
5007c478bd9Sstevel@tonic-gate * cyclic_softint() attempts to compare&swap on the cy_pend count, it will
5017c478bd9Sstevel@tonic-gate * fail and recognize that the count has been zeroed. cyclic_softint() will
5027c478bd9Sstevel@tonic-gate * update its stale copy of the cyp_cyclics pointer, re-read the cy_pend
5037c478bd9Sstevel@tonic-gate * count from the new cyclics array, and re-attempt the compare&swap.
5047c478bd9Sstevel@tonic-gate *
5057c478bd9Sstevel@tonic-gate * Removals
5067c478bd9Sstevel@tonic-gate *
5077c478bd9Sstevel@tonic-gate * Cyclic removals should be rare. To simplify the implementation (and to
5087c478bd9Sstevel@tonic-gate * allow optimization for the cyclic_fire()/cyclic_expire()/cyclic_softint()
5097c478bd9Sstevel@tonic-gate * path), we force removals and adds to serialize on cpu_lock.
5107c478bd9Sstevel@tonic-gate *
5117c478bd9Sstevel@tonic-gate * Cyclic removal is complicated by a guarantee made to the consumer of
5127c478bd9Sstevel@tonic-gate * the cyclic subsystem: after cyclic_remove() returns, the cyclic handler
5137c478bd9Sstevel@tonic-gate * has returned and will never again be called.
5147c478bd9Sstevel@tonic-gate *
5157c478bd9Sstevel@tonic-gate * Here is the procedure for cyclic removal:
5167c478bd9Sstevel@tonic-gate *
5177c478bd9Sstevel@tonic-gate * 1. cyclic_remove() calls cyclic_remove_xcall() on the CPU undergoing
5187c478bd9Sstevel@tonic-gate * the removal.
5197c478bd9Sstevel@tonic-gate * 2. cyclic_remove_xcall() raises interrupt level to CY_HIGH_LEVEL
5207c478bd9Sstevel@tonic-gate * 3. The current expiration time for the removed cyclic is recorded.
5217c478bd9Sstevel@tonic-gate * 4. If the cy_pend count on the removed cyclic is non-zero, it
5227c478bd9Sstevel@tonic-gate * is copied into cyp_rpend and subsequently zeroed.
5237c478bd9Sstevel@tonic-gate * 5. The cyclic is removed from the heap
5247c478bd9Sstevel@tonic-gate * 6. If the root of the heap has changed, the backend is reprogrammed.
5257c478bd9Sstevel@tonic-gate * 7. If the cy_pend count was non-zero cyclic_remove() blocks on the
5267c478bd9Sstevel@tonic-gate * cyp_modify_wait semaphore.
5277c478bd9Sstevel@tonic-gate *
5287c478bd9Sstevel@tonic-gate * The motivation for step (3) is explained in "Juggling", below.
5297c478bd9Sstevel@tonic-gate *
5307c478bd9Sstevel@tonic-gate * The cy_pend count is decremented in cyclic_softint() after the cyclic
5317c478bd9Sstevel@tonic-gate * handler returns. Thus, if we find a cy_pend count of zero in step
5327c478bd9Sstevel@tonic-gate * (4), we know that cyclic_remove() doesn't need to block.
5337c478bd9Sstevel@tonic-gate *
5347c478bd9Sstevel@tonic-gate * If the cy_pend count is non-zero, however, we must block in cyclic_remove()
5357c478bd9Sstevel@tonic-gate * until cyclic_softint() has finished calling the cyclic handler. To let
5367c478bd9Sstevel@tonic-gate * cyclic_softint() know that this cyclic has been removed, we zero the
5377c478bd9Sstevel@tonic-gate * cy_pend count. This will cause cyclic_softint()'s compare&swap to fail.
5387c478bd9Sstevel@tonic-gate * When cyclic_softint() sees the zero cy_pend count, it knows that it's been
5397c478bd9Sstevel@tonic-gate * caught during a resize (see "Resizing", above) or that the cyclic has been
5407c478bd9Sstevel@tonic-gate * removed. In the latter case, it calls cyclic_remove_pend() to call the
5417c478bd9Sstevel@tonic-gate * cyclic handler cyp_rpend - 1 times, and posts on cyp_modify_wait.
5427c478bd9Sstevel@tonic-gate *
5437c478bd9Sstevel@tonic-gate * Juggling
5447c478bd9Sstevel@tonic-gate *
5457c478bd9Sstevel@tonic-gate * At first glance, cyclic juggling seems to be a difficult problem. The
5467c478bd9Sstevel@tonic-gate * subsystem must guarantee that a cyclic doesn't execute simultaneously on
5477c478bd9Sstevel@tonic-gate * different CPUs, while also assuring that a cyclic fires exactly once
5487c478bd9Sstevel@tonic-gate * per interval. We solve this problem by leveraging a property of the
5497c478bd9Sstevel@tonic-gate * platform: gethrtime() is required to increase in lock-step across
5507c478bd9Sstevel@tonic-gate * multiple CPUs. Therefore, to juggle a cyclic, we remove it from its
5517c478bd9Sstevel@tonic-gate * CPU, recording its expiration time in the remove cross call (step (3)
5527c478bd9Sstevel@tonic-gate * in "Removing", above). We then add the cyclic to the new CPU, explicitly
5537c478bd9Sstevel@tonic-gate * setting its expiration time to the time recorded in the removal. This
5547c478bd9Sstevel@tonic-gate * leverages the existing cyclic expiry processing, which will compensate
5557c478bd9Sstevel@tonic-gate * for any time lost while juggling.
5567c478bd9Sstevel@tonic-gate *
55787a18d3fSMadhavan Venkataraman * Reprogramming
55887a18d3fSMadhavan Venkataraman *
55987a18d3fSMadhavan Venkataraman * Normally, after a cyclic fires, its next expiration is computed from
56087a18d3fSMadhavan Venkataraman * the current time and the cyclic interval. But there are situations when
56187a18d3fSMadhavan Venkataraman * the next expiration needs to be reprogrammed by the kernel subsystem that
56287a18d3fSMadhavan Venkataraman * is using the cyclic. cyclic_reprogram() allows this to be done. This,
56387a18d3fSMadhavan Venkataraman * unlike the other kernel at-large cyclic API functions, is permitted to
56487a18d3fSMadhavan Venkataraman * be called from the cyclic handler. This is because it does not use the
56587a18d3fSMadhavan Venkataraman * cpu_lock to serialize access.
56687a18d3fSMadhavan Venkataraman *
56787a18d3fSMadhavan Venkataraman * When cyclic_reprogram() is called for an omni-cyclic, the operation is
56887a18d3fSMadhavan Venkataraman * applied to the omni-cyclic's component on the current CPU.
56987a18d3fSMadhavan Venkataraman *
57087a18d3fSMadhavan Venkataraman * If a high-level cyclic handler reprograms its own cyclic, then
57187a18d3fSMadhavan Venkataraman * cyclic_fire() detects that and does not recompute the cyclic's next
57287a18d3fSMadhavan Venkataraman * expiration. However, for a lock-level or a low-level cyclic, the
57387a18d3fSMadhavan Venkataraman * actual cyclic handler will execute at the lower PIL only after
57487a18d3fSMadhavan Venkataraman * cyclic_fire() is done with all expired cyclics. To deal with this, such
57587a18d3fSMadhavan Venkataraman * cyclics can be specified with a special interval of CY_INFINITY (INT64_MAX).
57687a18d3fSMadhavan Venkataraman * cyclic_fire() recognizes this special value and recomputes the next
57787a18d3fSMadhavan Venkataraman * expiration to CY_INFINITY. This effectively moves the cyclic to the
57887a18d3fSMadhavan Venkataraman * bottom of the heap and prevents it from going off until its handler has
57987a18d3fSMadhavan Venkataraman * had a chance to reprogram it. Infact, this is the way to create and reuse
58087a18d3fSMadhavan Venkataraman * "one-shot" timers in the context of the cyclic subsystem without using
58187a18d3fSMadhavan Venkataraman * cyclic_remove().
58287a18d3fSMadhavan Venkataraman *
58387a18d3fSMadhavan Venkataraman * Here is the procedure for cyclic reprogramming:
58487a18d3fSMadhavan Venkataraman *
58587a18d3fSMadhavan Venkataraman * 1. cyclic_reprogram() calls cyclic_reprogram_xcall() on the CPU
58687a18d3fSMadhavan Venkataraman * that houses the cyclic.
58787a18d3fSMadhavan Venkataraman * 2. cyclic_reprogram_xcall() raises interrupt level to CY_HIGH_LEVEL
58887a18d3fSMadhavan Venkataraman * 3. The cyclic is located in the cyclic heap. The search for this is
58987a18d3fSMadhavan Venkataraman * done from the bottom of the heap to the top as reprogrammable cyclics
59087a18d3fSMadhavan Venkataraman * would be located closer to the bottom than the top.
59187a18d3fSMadhavan Venkataraman * 4. The cyclic expiration is set and the cyclic is moved to its
59287a18d3fSMadhavan Venkataraman * correct position in the heap (up or down depending on whether the
59387a18d3fSMadhavan Venkataraman * new expiration is less than or greater than the old one).
59487a18d3fSMadhavan Venkataraman * 5. If the cyclic move modified the root of the heap, the backend is
595*cb2d1b02SPatrick Mooney * reprogrammed.
59687a18d3fSMadhavan Venkataraman *
59787a18d3fSMadhavan Venkataraman * Reprogramming can be a frequent event (see the callout subsystem). So,
59887a18d3fSMadhavan Venkataraman * the serialization used has to be efficient. As with all other cyclic
59987a18d3fSMadhavan Venkataraman * operations, the interrupt level is raised during reprogramming. Plus,
60087a18d3fSMadhavan Venkataraman * during reprogramming, the cyclic must not be juggled (regular cyclic)
60187a18d3fSMadhavan Venkataraman * or stopped (omni-cyclic). The implementation defines a per-cyclic
60287a18d3fSMadhavan Venkataraman * reader-writer lock to accomplish this. This lock is acquired in the
60387a18d3fSMadhavan Venkataraman * reader mode by cyclic_reprogram() and writer mode by cyclic_juggle() and
60487a18d3fSMadhavan Venkataraman * cyclic_omni_stop(). The reader-writer lock makes it efficient if
60587a18d3fSMadhavan Venkataraman * an omni-cyclic is reprogrammed on different CPUs frequently.
60687a18d3fSMadhavan Venkataraman *
60787a18d3fSMadhavan Venkataraman * Note that since the cpu_lock is not used during reprogramming, it is
60887a18d3fSMadhavan Venkataraman * the responsibility of the user of the reprogrammable cyclic to make sure
60987a18d3fSMadhavan Venkataraman * that the cyclic is not removed via cyclic_remove() during reprogramming.
61087a18d3fSMadhavan Venkataraman * This is not an unreasonable requirement as the user will typically have
61187a18d3fSMadhavan Venkataraman * some sort of synchronization for its cyclic-related activities. This
61287a18d3fSMadhavan Venkataraman * little caveat exists because the cyclic ID is not really an ID. It is
61387a18d3fSMadhavan Venkataraman * implemented as a pointer to a structure.
614*cb2d1b02SPatrick Mooney *
615*cb2d1b02SPatrick Mooney * For cyclics which reprogram themselves during their own handler function,
616*cb2d1b02SPatrick Mooney * avoiding the potential race with cyclic_remove() can be a challenge. If a
617*cb2d1b02SPatrick Mooney * handler is running and a remote thread issues a cyclic_remove() on its
618*cb2d1b02SPatrick Mooney * cyclic (interrupting the handler with the removal xcall), subsequent
619*cb2d1b02SPatrick Mooney * attempts to reprogram the cyclics from within the handler will result in a
620*cb2d1b02SPatrick Mooney * failure return code from cyclic_reprogram().
6217c478bd9Sstevel@tonic-gate */
6227c478bd9Sstevel@tonic-gate #include <sys/cyclic_impl.h>
6237c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
6247c478bd9Sstevel@tonic-gate #include <sys/systm.h>
6257c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
6267c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
6277c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
6287c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
629c210ded4Sesaxe #include <sys/sdt.h>
6307c478bd9Sstevel@tonic-gate
6317c478bd9Sstevel@tonic-gate #ifdef CYCLIC_TRACE
6327c478bd9Sstevel@tonic-gate
6337c478bd9Sstevel@tonic-gate /*
6347c478bd9Sstevel@tonic-gate * cyc_trace_enabled is for the benefit of kernel debuggers.
6357c478bd9Sstevel@tonic-gate */
6367c478bd9Sstevel@tonic-gate int cyc_trace_enabled = 1;
6377c478bd9Sstevel@tonic-gate static cyc_tracebuf_t cyc_ptrace;
6387c478bd9Sstevel@tonic-gate static cyc_coverage_t cyc_coverage[CY_NCOVERAGE];
6397c478bd9Sstevel@tonic-gate
6407c478bd9Sstevel@tonic-gate /*
6417c478bd9Sstevel@tonic-gate * Seen this anywhere?
6427c478bd9Sstevel@tonic-gate */
6437c478bd9Sstevel@tonic-gate static uint_t
cyclic_coverage_hash(char * p)6447c478bd9Sstevel@tonic-gate cyclic_coverage_hash(char *p)
6457c478bd9Sstevel@tonic-gate {
6467c478bd9Sstevel@tonic-gate unsigned int g;
6477c478bd9Sstevel@tonic-gate uint_t hval;
6487c478bd9Sstevel@tonic-gate
6497c478bd9Sstevel@tonic-gate hval = 0;
6507c478bd9Sstevel@tonic-gate while (*p) {
6517c478bd9Sstevel@tonic-gate hval = (hval << 4) + *p++;
6527c478bd9Sstevel@tonic-gate if ((g = (hval & 0xf0000000)) != 0)
6537c478bd9Sstevel@tonic-gate hval ^= g >> 24;
6547c478bd9Sstevel@tonic-gate hval &= ~g;
6557c478bd9Sstevel@tonic-gate }
6567c478bd9Sstevel@tonic-gate return (hval);
6577c478bd9Sstevel@tonic-gate }
6587c478bd9Sstevel@tonic-gate
6597c478bd9Sstevel@tonic-gate static void
cyclic_coverage(char * why,int level,uint64_t arg0,uint64_t arg1)6607c478bd9Sstevel@tonic-gate cyclic_coverage(char *why, int level, uint64_t arg0, uint64_t arg1)
6617c478bd9Sstevel@tonic-gate {
6627c478bd9Sstevel@tonic-gate uint_t ndx, orig;
6637c478bd9Sstevel@tonic-gate
6647c478bd9Sstevel@tonic-gate for (ndx = orig = cyclic_coverage_hash(why) % CY_NCOVERAGE; ; ) {
6657c478bd9Sstevel@tonic-gate if (cyc_coverage[ndx].cyv_why == why)
6667c478bd9Sstevel@tonic-gate break;
6677c478bd9Sstevel@tonic-gate
6687c478bd9Sstevel@tonic-gate if (cyc_coverage[ndx].cyv_why != NULL ||
66975d94465SJosef 'Jeff' Sipek atomic_cas_ptr(&cyc_coverage[ndx].cyv_why, NULL, why) !=
67075d94465SJosef 'Jeff' Sipek NULL) {
6717c478bd9Sstevel@tonic-gate
6727c478bd9Sstevel@tonic-gate if (++ndx == CY_NCOVERAGE)
6737c478bd9Sstevel@tonic-gate ndx = 0;
6747c478bd9Sstevel@tonic-gate
6757c478bd9Sstevel@tonic-gate if (ndx == orig)
6767c478bd9Sstevel@tonic-gate panic("too many cyclic coverage points");
6777c478bd9Sstevel@tonic-gate continue;
6787c478bd9Sstevel@tonic-gate }
6797c478bd9Sstevel@tonic-gate
6807c478bd9Sstevel@tonic-gate /*
6817c478bd9Sstevel@tonic-gate * If we're here, we have successfully swung our guy into
6827c478bd9Sstevel@tonic-gate * the position at "ndx".
6837c478bd9Sstevel@tonic-gate */
6847c478bd9Sstevel@tonic-gate break;
6857c478bd9Sstevel@tonic-gate }
6867c478bd9Sstevel@tonic-gate
6877c478bd9Sstevel@tonic-gate if (level == CY_PASSIVE_LEVEL)
6887c478bd9Sstevel@tonic-gate cyc_coverage[ndx].cyv_passive_count++;
6897c478bd9Sstevel@tonic-gate else
6907c478bd9Sstevel@tonic-gate cyc_coverage[ndx].cyv_count[level]++;
6917c478bd9Sstevel@tonic-gate
6927c478bd9Sstevel@tonic-gate cyc_coverage[ndx].cyv_arg0 = arg0;
6937c478bd9Sstevel@tonic-gate cyc_coverage[ndx].cyv_arg1 = arg1;
6947c478bd9Sstevel@tonic-gate }
6957c478bd9Sstevel@tonic-gate
6967c478bd9Sstevel@tonic-gate #define CYC_TRACE(cpu, level, why, arg0, arg1) \
6977c478bd9Sstevel@tonic-gate CYC_TRACE_IMPL(&cpu->cyp_trace[level], level, why, arg0, arg1)
6987c478bd9Sstevel@tonic-gate
6997c478bd9Sstevel@tonic-gate #define CYC_PTRACE(why, arg0, arg1) \
7007c478bd9Sstevel@tonic-gate CYC_TRACE_IMPL(&cyc_ptrace, CY_PASSIVE_LEVEL, why, arg0, arg1)
7017c478bd9Sstevel@tonic-gate
7027c478bd9Sstevel@tonic-gate #define CYC_TRACE_IMPL(buf, level, why, a0, a1) { \
7037c478bd9Sstevel@tonic-gate if (panicstr == NULL) { \
7047c478bd9Sstevel@tonic-gate int _ndx = (buf)->cyt_ndx; \
7057c478bd9Sstevel@tonic-gate cyc_tracerec_t *_rec = &(buf)->cyt_buf[_ndx]; \
7067c478bd9Sstevel@tonic-gate (buf)->cyt_ndx = (++_ndx == CY_NTRACEREC) ? 0 : _ndx; \
7077c478bd9Sstevel@tonic-gate _rec->cyt_tstamp = gethrtime_unscaled(); \
7087c478bd9Sstevel@tonic-gate _rec->cyt_why = (why); \
7097c478bd9Sstevel@tonic-gate _rec->cyt_arg0 = (uint64_t)(uintptr_t)(a0); \
7107c478bd9Sstevel@tonic-gate _rec->cyt_arg1 = (uint64_t)(uintptr_t)(a1); \
7117c478bd9Sstevel@tonic-gate cyclic_coverage(why, level, \
7127c478bd9Sstevel@tonic-gate (uint64_t)(uintptr_t)(a0), (uint64_t)(uintptr_t)(a1)); \
7137c478bd9Sstevel@tonic-gate } \
7147c478bd9Sstevel@tonic-gate }
7157c478bd9Sstevel@tonic-gate
7167c478bd9Sstevel@tonic-gate #else
7177c478bd9Sstevel@tonic-gate
7187c478bd9Sstevel@tonic-gate static int cyc_trace_enabled = 0;
7197c478bd9Sstevel@tonic-gate
7207c478bd9Sstevel@tonic-gate #define CYC_TRACE(cpu, level, why, arg0, arg1)
7217c478bd9Sstevel@tonic-gate #define CYC_PTRACE(why, arg0, arg1)
7227c478bd9Sstevel@tonic-gate
7237c478bd9Sstevel@tonic-gate #endif
7247c478bd9Sstevel@tonic-gate
7257c478bd9Sstevel@tonic-gate #define CYC_TRACE0(cpu, level, why) CYC_TRACE(cpu, level, why, 0, 0)
7267c478bd9Sstevel@tonic-gate #define CYC_TRACE1(cpu, level, why, arg0) CYC_TRACE(cpu, level, why, arg0, 0)
7277c478bd9Sstevel@tonic-gate
7287c478bd9Sstevel@tonic-gate #define CYC_PTRACE0(why) CYC_PTRACE(why, 0, 0)
7297c478bd9Sstevel@tonic-gate #define CYC_PTRACE1(why, arg0) CYC_PTRACE(why, arg0, 0)
7307c478bd9Sstevel@tonic-gate
7317c478bd9Sstevel@tonic-gate static kmem_cache_t *cyclic_id_cache;
7327c478bd9Sstevel@tonic-gate static cyc_id_t *cyclic_id_head;
7337c478bd9Sstevel@tonic-gate static hrtime_t cyclic_resolution;
7347c478bd9Sstevel@tonic-gate static cyc_backend_t cyclic_backend;
7357c478bd9Sstevel@tonic-gate
7367c478bd9Sstevel@tonic-gate /*
7377c478bd9Sstevel@tonic-gate * Returns 1 if the upheap propagated to the root, 0 if it did not. This
7387c478bd9Sstevel@tonic-gate * allows the caller to reprogram the backend only when the root has been
7397c478bd9Sstevel@tonic-gate * modified.
7407c478bd9Sstevel@tonic-gate */
7417c478bd9Sstevel@tonic-gate static int
cyclic_upheap(cyc_cpu_t * cpu,cyc_index_t ndx)7427c478bd9Sstevel@tonic-gate cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx)
7437c478bd9Sstevel@tonic-gate {
7447c478bd9Sstevel@tonic-gate cyclic_t *cyclics;
7457c478bd9Sstevel@tonic-gate cyc_index_t *heap;
7467c478bd9Sstevel@tonic-gate cyc_index_t heap_parent, heap_current = ndx;
7477c478bd9Sstevel@tonic-gate cyc_index_t parent, current;
7487c478bd9Sstevel@tonic-gate
7497c478bd9Sstevel@tonic-gate if (heap_current == 0)
7507c478bd9Sstevel@tonic-gate return (1);
7517c478bd9Sstevel@tonic-gate
7527c478bd9Sstevel@tonic-gate heap = cpu->cyp_heap;
7537c478bd9Sstevel@tonic-gate cyclics = cpu->cyp_cyclics;
7547c478bd9Sstevel@tonic-gate heap_parent = CYC_HEAP_PARENT(heap_current);
7557c478bd9Sstevel@tonic-gate
7567c478bd9Sstevel@tonic-gate for (;;) {
7577c478bd9Sstevel@tonic-gate current = heap[heap_current];
7587c478bd9Sstevel@tonic-gate parent = heap[heap_parent];
7597c478bd9Sstevel@tonic-gate
7607c478bd9Sstevel@tonic-gate /*
7617c478bd9Sstevel@tonic-gate * We have an expiration time later than our parent; we're
7627c478bd9Sstevel@tonic-gate * done.
7637c478bd9Sstevel@tonic-gate */
7647c478bd9Sstevel@tonic-gate if (cyclics[current].cy_expire >= cyclics[parent].cy_expire)
7657c478bd9Sstevel@tonic-gate return (0);
7667c478bd9Sstevel@tonic-gate
7677c478bd9Sstevel@tonic-gate /*
7687c478bd9Sstevel@tonic-gate * We need to swap with our parent, and continue up the heap.
7697c478bd9Sstevel@tonic-gate */
7707c478bd9Sstevel@tonic-gate heap[heap_parent] = current;
7717c478bd9Sstevel@tonic-gate heap[heap_current] = parent;
7727c478bd9Sstevel@tonic-gate
7737c478bd9Sstevel@tonic-gate /*
7747c478bd9Sstevel@tonic-gate * If we just reached the root, we're done.
7757c478bd9Sstevel@tonic-gate */
7767c478bd9Sstevel@tonic-gate if (heap_parent == 0)
7777c478bd9Sstevel@tonic-gate return (1);
7787c478bd9Sstevel@tonic-gate
7797c478bd9Sstevel@tonic-gate heap_current = heap_parent;
7807c478bd9Sstevel@tonic-gate heap_parent = CYC_HEAP_PARENT(heap_current);
7817c478bd9Sstevel@tonic-gate }
7827c478bd9Sstevel@tonic-gate }
7837c478bd9Sstevel@tonic-gate
7847c478bd9Sstevel@tonic-gate static void
cyclic_downheap(cyc_cpu_t * cpu,cyc_index_t ndx)7857c478bd9Sstevel@tonic-gate cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx)
7867c478bd9Sstevel@tonic-gate {
7877c478bd9Sstevel@tonic-gate cyclic_t *cyclics = cpu->cyp_cyclics;
7887c478bd9Sstevel@tonic-gate cyc_index_t *heap = cpu->cyp_heap;
7897c478bd9Sstevel@tonic-gate
7907c478bd9Sstevel@tonic-gate cyc_index_t heap_left, heap_right, heap_me = ndx;
7917c478bd9Sstevel@tonic-gate cyc_index_t left, right, me;
7927c478bd9Sstevel@tonic-gate cyc_index_t nelems = cpu->cyp_nelems;
7937c478bd9Sstevel@tonic-gate
7947c478bd9Sstevel@tonic-gate for (;;) {
7957c478bd9Sstevel@tonic-gate /*
7967c478bd9Sstevel@tonic-gate * If we don't have a left child (i.e., we're a leaf), we're
7977c478bd9Sstevel@tonic-gate * done.
7987c478bd9Sstevel@tonic-gate */
7997c478bd9Sstevel@tonic-gate if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems)
8007c478bd9Sstevel@tonic-gate return;
8017c478bd9Sstevel@tonic-gate
8027c478bd9Sstevel@tonic-gate left = heap[heap_left];
8037c478bd9Sstevel@tonic-gate me = heap[heap_me];
8047c478bd9Sstevel@tonic-gate
8057c478bd9Sstevel@tonic-gate heap_right = CYC_HEAP_RIGHT(heap_me);
8067c478bd9Sstevel@tonic-gate
8077c478bd9Sstevel@tonic-gate /*
8087c478bd9Sstevel@tonic-gate * Even if we don't have a right child, we still need to compare
8097c478bd9Sstevel@tonic-gate * our expiration time against that of our left child.
8107c478bd9Sstevel@tonic-gate */
8117c478bd9Sstevel@tonic-gate if (heap_right >= nelems)
8127c478bd9Sstevel@tonic-gate goto comp_left;
8137c478bd9Sstevel@tonic-gate
8147c478bd9Sstevel@tonic-gate right = heap[heap_right];
8157c478bd9Sstevel@tonic-gate
8167c478bd9Sstevel@tonic-gate /*
8177c478bd9Sstevel@tonic-gate * We have both a left and a right child. We need to compare
8187c478bd9Sstevel@tonic-gate * the expiration times of the children to determine which
8197c478bd9Sstevel@tonic-gate * expires earlier.
8207c478bd9Sstevel@tonic-gate */
8217c478bd9Sstevel@tonic-gate if (cyclics[right].cy_expire < cyclics[left].cy_expire) {
8227c478bd9Sstevel@tonic-gate /*
8237c478bd9Sstevel@tonic-gate * Our right child is the earlier of our children.
8247c478bd9Sstevel@tonic-gate * We'll now compare our expiration time to its; if
8257c478bd9Sstevel@tonic-gate * ours is the earlier, we're done.
8267c478bd9Sstevel@tonic-gate */
8277c478bd9Sstevel@tonic-gate if (cyclics[me].cy_expire <= cyclics[right].cy_expire)
8287c478bd9Sstevel@tonic-gate return;
8297c478bd9Sstevel@tonic-gate
8307c478bd9Sstevel@tonic-gate /*
8317c478bd9Sstevel@tonic-gate * Our right child expires earlier than we do; swap
8327c478bd9Sstevel@tonic-gate * with our right child, and descend right.
8337c478bd9Sstevel@tonic-gate */
8347c478bd9Sstevel@tonic-gate heap[heap_right] = me;
8357c478bd9Sstevel@tonic-gate heap[heap_me] = right;
8367c478bd9Sstevel@tonic-gate heap_me = heap_right;
8377c478bd9Sstevel@tonic-gate continue;
8387c478bd9Sstevel@tonic-gate }
8397c478bd9Sstevel@tonic-gate
8407c478bd9Sstevel@tonic-gate comp_left:
8417c478bd9Sstevel@tonic-gate /*
8427c478bd9Sstevel@tonic-gate * Our left child is the earlier of our children (or we have
8437c478bd9Sstevel@tonic-gate * no right child). We'll now compare our expiration time
8447c478bd9Sstevel@tonic-gate * to its; if ours is the earlier, we're done.
8457c478bd9Sstevel@tonic-gate */
8467c478bd9Sstevel@tonic-gate if (cyclics[me].cy_expire <= cyclics[left].cy_expire)
8477c478bd9Sstevel@tonic-gate return;
8487c478bd9Sstevel@tonic-gate
8497c478bd9Sstevel@tonic-gate /*
8507c478bd9Sstevel@tonic-gate * Our left child expires earlier than we do; swap with our
8517c478bd9Sstevel@tonic-gate * left child, and descend left.
8527c478bd9Sstevel@tonic-gate */
8537c478bd9Sstevel@tonic-gate heap[heap_left] = me;
8547c478bd9Sstevel@tonic-gate heap[heap_me] = left;
8557c478bd9Sstevel@tonic-gate heap_me = heap_left;
8567c478bd9Sstevel@tonic-gate }
8577c478bd9Sstevel@tonic-gate }
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate static void
cyclic_expire(cyc_cpu_t * cpu,cyc_index_t ndx,cyclic_t * cyclic)8607c478bd9Sstevel@tonic-gate cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic)
8617c478bd9Sstevel@tonic-gate {
8627c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
8637c478bd9Sstevel@tonic-gate cyc_level_t level = cyclic->cy_level;
8647c478bd9Sstevel@tonic-gate
8657c478bd9Sstevel@tonic-gate /*
8667c478bd9Sstevel@tonic-gate * If this is a CY_HIGH_LEVEL cyclic, just call the handler; we don't
8677c478bd9Sstevel@tonic-gate * need to worry about the pend count for CY_HIGH_LEVEL cyclics.
8687c478bd9Sstevel@tonic-gate */
8697c478bd9Sstevel@tonic-gate if (level == CY_HIGH_LEVEL) {
8707c478bd9Sstevel@tonic-gate cyc_func_t handler = cyclic->cy_handler;
8717c478bd9Sstevel@tonic-gate void *arg = cyclic->cy_arg;
8727c478bd9Sstevel@tonic-gate
8737c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-in", handler, arg);
874c210ded4Sesaxe DTRACE_PROBE1(cyclic__start, cyclic_t *, cyclic);
875c210ded4Sesaxe
8767c478bd9Sstevel@tonic-gate (*handler)(arg);
877c210ded4Sesaxe
878c210ded4Sesaxe DTRACE_PROBE1(cyclic__end, cyclic_t *, cyclic);
8797c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "handler-out", handler, arg);
8807c478bd9Sstevel@tonic-gate
8817c478bd9Sstevel@tonic-gate return;
8827c478bd9Sstevel@tonic-gate }
8837c478bd9Sstevel@tonic-gate
8847c478bd9Sstevel@tonic-gate /*
8857c478bd9Sstevel@tonic-gate * We're at CY_HIGH_LEVEL; this modification to cy_pend need not
8867c478bd9Sstevel@tonic-gate * be atomic (the high interrupt level assures that it will appear
8877c478bd9Sstevel@tonic-gate * atomic to any softint currently running).
8887c478bd9Sstevel@tonic-gate */
8897c478bd9Sstevel@tonic-gate if (cyclic->cy_pend++ == 0) {
8907c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[level];
8917c478bd9Sstevel@tonic-gate cyc_pcbuffer_t *pc = &softbuf->cys_buf[softbuf->cys_hard];
8927c478bd9Sstevel@tonic-gate
8937c478bd9Sstevel@tonic-gate /*
8947c478bd9Sstevel@tonic-gate * We need to enqueue this cyclic in the soft buffer.
8957c478bd9Sstevel@tonic-gate */
8967c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-enq", cyclic,
8977c478bd9Sstevel@tonic-gate pc->cypc_prodndx);
8987c478bd9Sstevel@tonic-gate pc->cypc_buf[pc->cypc_prodndx++ & pc->cypc_sizemask] = ndx;
8997c478bd9Sstevel@tonic-gate
9007c478bd9Sstevel@tonic-gate ASSERT(pc->cypc_prodndx != pc->cypc_consndx);
9017c478bd9Sstevel@tonic-gate } else {
9027c478bd9Sstevel@tonic-gate /*
9037c478bd9Sstevel@tonic-gate * If the pend count is zero after we incremented it, then
9047c478bd9Sstevel@tonic-gate * we've wrapped (i.e. we had a cy_pend count of over four
9057c478bd9Sstevel@tonic-gate * billion. In this case, we clamp the pend count at
9067c478bd9Sstevel@tonic-gate * UINT32_MAX. Yes, cyclics can be lost in this case.
9077c478bd9Sstevel@tonic-gate */
9087c478bd9Sstevel@tonic-gate if (cyclic->cy_pend == 0) {
9097c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "expire-wrap", cyclic);
9107c478bd9Sstevel@tonic-gate cyclic->cy_pend = UINT32_MAX;
9117c478bd9Sstevel@tonic-gate }
9127c478bd9Sstevel@tonic-gate
9137c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "expire-bump", cyclic, 0);
9147c478bd9Sstevel@tonic-gate }
9157c478bd9Sstevel@tonic-gate
9167c478bd9Sstevel@tonic-gate be->cyb_softint(be->cyb_arg, cyclic->cy_level);
9177c478bd9Sstevel@tonic-gate }
9187c478bd9Sstevel@tonic-gate
9197c478bd9Sstevel@tonic-gate /*
9207c478bd9Sstevel@tonic-gate * cyclic_fire(cpu_t *)
9217c478bd9Sstevel@tonic-gate *
9227c478bd9Sstevel@tonic-gate * Overview
9237c478bd9Sstevel@tonic-gate *
9247c478bd9Sstevel@tonic-gate * cyclic_fire() is the cyclic subsystem's CY_HIGH_LEVEL interrupt handler.
9257c478bd9Sstevel@tonic-gate * Called by the cyclic backend.
9267c478bd9Sstevel@tonic-gate *
9277c478bd9Sstevel@tonic-gate * Arguments and notes
9287c478bd9Sstevel@tonic-gate *
9297c478bd9Sstevel@tonic-gate * The only argument is the CPU on which the interrupt is executing;
9307c478bd9Sstevel@tonic-gate * backends must call into cyclic_fire() on the specified CPU.
9317c478bd9Sstevel@tonic-gate *
9327c478bd9Sstevel@tonic-gate * cyclic_fire() may be called spuriously without ill effect. Optimal
9337c478bd9Sstevel@tonic-gate * backends will call into cyclic_fire() at or shortly after the time
9347c478bd9Sstevel@tonic-gate * requested via cyb_reprogram(). However, calling cyclic_fire()
9357c478bd9Sstevel@tonic-gate * arbitrarily late will only manifest latency bubbles; the correctness
9367c478bd9Sstevel@tonic-gate * of the cyclic subsystem does not rely on the timeliness of the backend.
9377c478bd9Sstevel@tonic-gate *
9387c478bd9Sstevel@tonic-gate * cyclic_fire() is wait-free; it will not block or spin.
9397c478bd9Sstevel@tonic-gate *
9407c478bd9Sstevel@tonic-gate * Return values
9417c478bd9Sstevel@tonic-gate *
9427c478bd9Sstevel@tonic-gate * None.
9437c478bd9Sstevel@tonic-gate *
9447c478bd9Sstevel@tonic-gate * Caller's context
9457c478bd9Sstevel@tonic-gate *
9467c478bd9Sstevel@tonic-gate * cyclic_fire() must be called from CY_HIGH_LEVEL interrupt context.
9477c478bd9Sstevel@tonic-gate */
9487c478bd9Sstevel@tonic-gate void
cyclic_fire(cpu_t * c)9497c478bd9Sstevel@tonic-gate cyclic_fire(cpu_t *c)
9507c478bd9Sstevel@tonic-gate {
9517c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
9527c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
9537c478bd9Sstevel@tonic-gate cyc_index_t *heap = cpu->cyp_heap;
9547c478bd9Sstevel@tonic-gate cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
9557c478bd9Sstevel@tonic-gate void *arg = be->cyb_arg;
9567c478bd9Sstevel@tonic-gate hrtime_t now = gethrtime();
9577c478bd9Sstevel@tonic-gate hrtime_t exp;
9587c478bd9Sstevel@tonic-gate
9597c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire", now, 0);
9607c478bd9Sstevel@tonic-gate
9617c478bd9Sstevel@tonic-gate if (cpu->cyp_nelems == 0) {
9627c478bd9Sstevel@tonic-gate /*
9637c478bd9Sstevel@tonic-gate * This is a spurious fire. Count it as such, and blow
9647c478bd9Sstevel@tonic-gate * out of here.
9657c478bd9Sstevel@tonic-gate */
9667c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "fire-spurious");
9677c478bd9Sstevel@tonic-gate return;
9687c478bd9Sstevel@tonic-gate }
9697c478bd9Sstevel@tonic-gate
9707c478bd9Sstevel@tonic-gate for (;;) {
9717c478bd9Sstevel@tonic-gate cyc_index_t ndx = heap[0];
9727c478bd9Sstevel@tonic-gate
9737c478bd9Sstevel@tonic-gate cyclic = &cyclics[ndx];
9747c478bd9Sstevel@tonic-gate
9757c478bd9Sstevel@tonic-gate ASSERT(!(cyclic->cy_flags & CYF_FREE));
9767c478bd9Sstevel@tonic-gate
9777c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "fire-check", cyclic,
9787c478bd9Sstevel@tonic-gate cyclic->cy_expire);
9797c478bd9Sstevel@tonic-gate
9807c478bd9Sstevel@tonic-gate if ((exp = cyclic->cy_expire) > now)
9817c478bd9Sstevel@tonic-gate break;
9827c478bd9Sstevel@tonic-gate
9837c478bd9Sstevel@tonic-gate cyclic_expire(cpu, ndx, cyclic);
9847c478bd9Sstevel@tonic-gate
98587a18d3fSMadhavan Venkataraman /*
98687a18d3fSMadhavan Venkataraman * If the handler reprogrammed the cyclic, then don't
98787a18d3fSMadhavan Venkataraman * recompute the expiration. Then, if the interval is
98887a18d3fSMadhavan Venkataraman * infinity, set the expiration to infinity. This can
98987a18d3fSMadhavan Venkataraman * be used to create one-shot timers.
99087a18d3fSMadhavan Venkataraman */
99187a18d3fSMadhavan Venkataraman if (exp != cyclic->cy_expire) {
99287a18d3fSMadhavan Venkataraman /*
99387a18d3fSMadhavan Venkataraman * If a hi level cyclic reprograms itself,
99487a18d3fSMadhavan Venkataraman * the heap adjustment and reprogramming of the
99587a18d3fSMadhavan Venkataraman * clock source have already been done at this
99687a18d3fSMadhavan Venkataraman * point. So, we can continue.
99787a18d3fSMadhavan Venkataraman */
99887a18d3fSMadhavan Venkataraman continue;
99987a18d3fSMadhavan Venkataraman }
100087a18d3fSMadhavan Venkataraman
100187a18d3fSMadhavan Venkataraman if (cyclic->cy_interval == CY_INFINITY)
100287a18d3fSMadhavan Venkataraman exp = CY_INFINITY;
100387a18d3fSMadhavan Venkataraman else
100487a18d3fSMadhavan Venkataraman exp += cyclic->cy_interval;
100587a18d3fSMadhavan Venkataraman
10067c478bd9Sstevel@tonic-gate /*
10077c478bd9Sstevel@tonic-gate * If this cyclic will be set to next expire in the distant
10087c478bd9Sstevel@tonic-gate * past, we have one of two situations:
10097c478bd9Sstevel@tonic-gate *
10107c478bd9Sstevel@tonic-gate * a) This is the first firing of a cyclic which had
10117c478bd9Sstevel@tonic-gate * cy_expire set to 0.
10127c478bd9Sstevel@tonic-gate *
10137c478bd9Sstevel@tonic-gate * b) We are tragically late for a cyclic -- most likely
10147c478bd9Sstevel@tonic-gate * due to being in the debugger.
10157c478bd9Sstevel@tonic-gate *
10167c478bd9Sstevel@tonic-gate * In either case, we set the new expiration time to be the
10177c478bd9Sstevel@tonic-gate * the next interval boundary. This assures that the
10187c478bd9Sstevel@tonic-gate * expiration time modulo the interval is invariant.
10197c478bd9Sstevel@tonic-gate *
10207c478bd9Sstevel@tonic-gate * We arbitrarily define "distant" to be one second (one second
10217c478bd9Sstevel@tonic-gate * is chosen because it's shorter than any foray to the
10227c478bd9Sstevel@tonic-gate * debugger while still being longer than any legitimate
10237c478bd9Sstevel@tonic-gate * stretch at CY_HIGH_LEVEL).
10247c478bd9Sstevel@tonic-gate */
10257c478bd9Sstevel@tonic-gate
10267c478bd9Sstevel@tonic-gate if (now - exp > NANOSEC) {
10277c478bd9Sstevel@tonic-gate hrtime_t interval = cyclic->cy_interval;
10287c478bd9Sstevel@tonic-gate
10297c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, exp == interval ?
10307c478bd9Sstevel@tonic-gate "fire-first" : "fire-swing", now, exp);
10317c478bd9Sstevel@tonic-gate
10327c478bd9Sstevel@tonic-gate exp += ((now - exp) / interval + 1) * interval;
10337c478bd9Sstevel@tonic-gate }
10347c478bd9Sstevel@tonic-gate
10357c478bd9Sstevel@tonic-gate cyclic->cy_expire = exp;
10367c478bd9Sstevel@tonic-gate cyclic_downheap(cpu, 0);
10377c478bd9Sstevel@tonic-gate }
10387c478bd9Sstevel@tonic-gate
10397c478bd9Sstevel@tonic-gate /*
10407c478bd9Sstevel@tonic-gate * Now we have a cyclic in the root slot which isn't in the past;
10417c478bd9Sstevel@tonic-gate * reprogram the interrupt source.
10427c478bd9Sstevel@tonic-gate */
10437c478bd9Sstevel@tonic-gate be->cyb_reprogram(arg, exp);
10447c478bd9Sstevel@tonic-gate }
10457c478bd9Sstevel@tonic-gate
10467c478bd9Sstevel@tonic-gate static void
cyclic_remove_pend(cyc_cpu_t * cpu,cyc_level_t level,cyclic_t * cyclic)10477c478bd9Sstevel@tonic-gate cyclic_remove_pend(cyc_cpu_t *cpu, cyc_level_t level, cyclic_t *cyclic)
10487c478bd9Sstevel@tonic-gate {
10497c478bd9Sstevel@tonic-gate cyc_func_t handler = cyclic->cy_handler;
10507c478bd9Sstevel@tonic-gate void *arg = cyclic->cy_arg;
10517c478bd9Sstevel@tonic-gate uint32_t i, rpend = cpu->cyp_rpend - 1;
10527c478bd9Sstevel@tonic-gate
10537c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_flags & CYF_FREE);
10547c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_pend == 0);
10557c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_REMOVING);
10567c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_rpend > 0);
10577c478bd9Sstevel@tonic-gate
10587c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "remove-rpend", cyclic, cpu->cyp_rpend);
10597c478bd9Sstevel@tonic-gate
10607c478bd9Sstevel@tonic-gate /*
10617c478bd9Sstevel@tonic-gate * Note that we only call the handler cyp_rpend - 1 times; this is
10627c478bd9Sstevel@tonic-gate * to account for the handler call in cyclic_softint().
10637c478bd9Sstevel@tonic-gate */
10647c478bd9Sstevel@tonic-gate for (i = 0; i < rpend; i++) {
10657c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "rpend-in", handler, arg);
1066c210ded4Sesaxe DTRACE_PROBE1(cyclic__start, cyclic_t *, cyclic);
1067c210ded4Sesaxe
10687c478bd9Sstevel@tonic-gate (*handler)(arg);
1069c210ded4Sesaxe
1070c210ded4Sesaxe DTRACE_PROBE1(cyclic__end, cyclic_t *, cyclic);
10717c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "rpend-out", handler, arg);
10727c478bd9Sstevel@tonic-gate }
10737c478bd9Sstevel@tonic-gate
10747c478bd9Sstevel@tonic-gate /*
10757c478bd9Sstevel@tonic-gate * We can now let the remove operation complete.
10767c478bd9Sstevel@tonic-gate */
10777c478bd9Sstevel@tonic-gate sema_v(&cpu->cyp_modify_wait);
10787c478bd9Sstevel@tonic-gate }
10797c478bd9Sstevel@tonic-gate
10807c478bd9Sstevel@tonic-gate /*
10817c478bd9Sstevel@tonic-gate * cyclic_softint(cpu_t *cpu, cyc_level_t level)
10827c478bd9Sstevel@tonic-gate *
10837c478bd9Sstevel@tonic-gate * Overview
10847c478bd9Sstevel@tonic-gate *
10857c478bd9Sstevel@tonic-gate * cyclic_softint() is the cyclic subsystem's CY_LOCK_LEVEL and CY_LOW_LEVEL
10867c478bd9Sstevel@tonic-gate * soft interrupt handler. Called by the cyclic backend.
10877c478bd9Sstevel@tonic-gate *
10887c478bd9Sstevel@tonic-gate * Arguments and notes
10897c478bd9Sstevel@tonic-gate *
10907c478bd9Sstevel@tonic-gate * The first argument to cyclic_softint() is the CPU on which the interrupt
10917c478bd9Sstevel@tonic-gate * is executing; backends must call into cyclic_softint() on the specified
10927c478bd9Sstevel@tonic-gate * CPU. The second argument is the level of the soft interrupt; it must
10937c478bd9Sstevel@tonic-gate * be one of CY_LOCK_LEVEL or CY_LOW_LEVEL.
10947c478bd9Sstevel@tonic-gate *
10957c478bd9Sstevel@tonic-gate * cyclic_softint() will call the handlers for cyclics pending at the
10967c478bd9Sstevel@tonic-gate * specified level. cyclic_softint() will not return until all pending
10977c478bd9Sstevel@tonic-gate * cyclics at the specified level have been dealt with; intervening
10987c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL interrupts which enqueue cyclics at the specified level
10997c478bd9Sstevel@tonic-gate * may therefore prolong cyclic_softint().
11007c478bd9Sstevel@tonic-gate *
11017c478bd9Sstevel@tonic-gate * cyclic_softint() never disables interrupts, and, if neither a
11027c478bd9Sstevel@tonic-gate * cyclic_add() nor a cyclic_remove() is pending on the specified CPU, is
11037c478bd9Sstevel@tonic-gate * lock-free. This assures that in the common case, cyclic_softint()
11047c478bd9Sstevel@tonic-gate * completes without blocking, and never starves cyclic_fire(). If either
11057c478bd9Sstevel@tonic-gate * cyclic_add() or cyclic_remove() is pending, cyclic_softint() may grab
11067c478bd9Sstevel@tonic-gate * a dispatcher lock.
11077c478bd9Sstevel@tonic-gate *
11087c478bd9Sstevel@tonic-gate * While cyclic_softint() is designed for bounded latency, it is obviously
11097c478bd9Sstevel@tonic-gate * at the mercy of its cyclic handlers. Because cyclic handlers may block
11107c478bd9Sstevel@tonic-gate * arbitrarily, callers of cyclic_softint() should not rely upon
11117c478bd9Sstevel@tonic-gate * deterministic completion.
11127c478bd9Sstevel@tonic-gate *
11137c478bd9Sstevel@tonic-gate * cyclic_softint() may be called spuriously without ill effect.
11147c478bd9Sstevel@tonic-gate *
11157c478bd9Sstevel@tonic-gate * Return value
11167c478bd9Sstevel@tonic-gate *
11177c478bd9Sstevel@tonic-gate * None.
11187c478bd9Sstevel@tonic-gate *
11197c478bd9Sstevel@tonic-gate * Caller's context
11207c478bd9Sstevel@tonic-gate *
11217c478bd9Sstevel@tonic-gate * The caller must be executing in soft interrupt context at either
11227c478bd9Sstevel@tonic-gate * CY_LOCK_LEVEL or CY_LOW_LEVEL. The level passed to cyclic_softint()
11237c478bd9Sstevel@tonic-gate * must match the level at which it is executing. On optimal backends,
11247c478bd9Sstevel@tonic-gate * the caller will hold no locks. In any case, the caller may not hold
11257c478bd9Sstevel@tonic-gate * cpu_lock or any lock acquired by any cyclic handler or held across
11267c478bd9Sstevel@tonic-gate * any of cyclic_add(), cyclic_remove(), cyclic_bind() or cyclic_juggle().
11277c478bd9Sstevel@tonic-gate */
11287c478bd9Sstevel@tonic-gate void
cyclic_softint(cpu_t * c,cyc_level_t level)11297c478bd9Sstevel@tonic-gate cyclic_softint(cpu_t *c, cyc_level_t level)
11307c478bd9Sstevel@tonic-gate {
11317c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
11327c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf;
11337c478bd9Sstevel@tonic-gate int soft, *buf, consndx, resized = 0, intr_resized = 0;
11347c478bd9Sstevel@tonic-gate cyc_pcbuffer_t *pc;
11357c478bd9Sstevel@tonic-gate cyclic_t *cyclics = cpu->cyp_cyclics;
11367c478bd9Sstevel@tonic-gate int sizemask;
11377c478bd9Sstevel@tonic-gate
11387c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "softint", cyclics, 0);
11397c478bd9Sstevel@tonic-gate
11407c478bd9Sstevel@tonic-gate ASSERT(level < CY_LOW_LEVEL + CY_SOFT_LEVELS);
11417c478bd9Sstevel@tonic-gate
11427c478bd9Sstevel@tonic-gate softbuf = &cpu->cyp_softbuf[level];
11437c478bd9Sstevel@tonic-gate top:
11447c478bd9Sstevel@tonic-gate soft = softbuf->cys_soft;
11457c478bd9Sstevel@tonic-gate ASSERT(soft == 0 || soft == 1);
11467c478bd9Sstevel@tonic-gate
11477c478bd9Sstevel@tonic-gate pc = &softbuf->cys_buf[soft];
11487c478bd9Sstevel@tonic-gate buf = pc->cypc_buf;
11497c478bd9Sstevel@tonic-gate consndx = pc->cypc_consndx;
11507c478bd9Sstevel@tonic-gate sizemask = pc->cypc_sizemask;
11517c478bd9Sstevel@tonic-gate
11527c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "softint-top", cyclics, pc);
11537c478bd9Sstevel@tonic-gate
11547c478bd9Sstevel@tonic-gate while (consndx != pc->cypc_prodndx) {
1155c48ec423SBryan Cantrill uint32_t pend, npend, opend;
11567c478bd9Sstevel@tonic-gate int consmasked = consndx & sizemask;
11577c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cyclics[buf[consmasked]];
11587c478bd9Sstevel@tonic-gate cyc_func_t handler = cyclic->cy_handler;
11597c478bd9Sstevel@tonic-gate void *arg = cyclic->cy_arg;
11607c478bd9Sstevel@tonic-gate
11617c478bd9Sstevel@tonic-gate ASSERT(buf[consmasked] < cpu->cyp_size);
11627c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "consuming", consndx, cyclic);
11637c478bd9Sstevel@tonic-gate
11647c478bd9Sstevel@tonic-gate /*
11657c478bd9Sstevel@tonic-gate * We have found this cyclic in the pcbuffer. We know that
11667c478bd9Sstevel@tonic-gate * one of the following is true:
11677c478bd9Sstevel@tonic-gate *
11687c478bd9Sstevel@tonic-gate * (a) The pend is non-zero. We need to execute the handler
11697c478bd9Sstevel@tonic-gate * at least once.
11707c478bd9Sstevel@tonic-gate *
11717c478bd9Sstevel@tonic-gate * (b) The pend _was_ non-zero, but it's now zero due to a
11727c478bd9Sstevel@tonic-gate * resize. We will call the handler once, see that we
11737c478bd9Sstevel@tonic-gate * are in this case, and read the new cyclics buffer
11747c478bd9Sstevel@tonic-gate * (and hence the old non-zero pend).
11757c478bd9Sstevel@tonic-gate *
11767c478bd9Sstevel@tonic-gate * (c) The pend _was_ non-zero, but it's now zero due to a
11777c478bd9Sstevel@tonic-gate * removal. We will call the handler once, see that we
11787c478bd9Sstevel@tonic-gate * are in this case, and call into cyclic_remove_pend()
11797c478bd9Sstevel@tonic-gate * to call the cyclic rpend times. We will take into
11807c478bd9Sstevel@tonic-gate * account that we have already called the handler once.
11817c478bd9Sstevel@tonic-gate *
11827c478bd9Sstevel@tonic-gate * Point is: it's safe to call the handler without first
11837c478bd9Sstevel@tonic-gate * checking the pend.
11847c478bd9Sstevel@tonic-gate */
11857c478bd9Sstevel@tonic-gate do {
11867c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "handler-in", handler, arg);
1187c210ded4Sesaxe DTRACE_PROBE1(cyclic__start, cyclic_t *, cyclic);
1188c210ded4Sesaxe
11897c478bd9Sstevel@tonic-gate (*handler)(arg);
1190c210ded4Sesaxe
1191c210ded4Sesaxe DTRACE_PROBE1(cyclic__end, cyclic_t *, cyclic);
11927c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "handler-out", handler, arg);
11937c478bd9Sstevel@tonic-gate reread:
11947c478bd9Sstevel@tonic-gate pend = cyclic->cy_pend;
11957c478bd9Sstevel@tonic-gate npend = pend - 1;
11967c478bd9Sstevel@tonic-gate
11977c478bd9Sstevel@tonic-gate if (pend == 0) {
11987c478bd9Sstevel@tonic-gate if (cpu->cyp_state == CYS_REMOVING) {
11997c478bd9Sstevel@tonic-gate /*
12007c478bd9Sstevel@tonic-gate * This cyclic has been removed while
12017c478bd9Sstevel@tonic-gate * it had a non-zero pend count (we
12027c478bd9Sstevel@tonic-gate * know it was non-zero because we
12037c478bd9Sstevel@tonic-gate * found this cyclic in the pcbuffer).
12047c478bd9Sstevel@tonic-gate * There must be a non-zero rpend for
12057c478bd9Sstevel@tonic-gate * this CPU, and there must be a remove
12067c478bd9Sstevel@tonic-gate * operation blocking; we'll call into
12077c478bd9Sstevel@tonic-gate * cyclic_remove_pend() to clean this
12087c478bd9Sstevel@tonic-gate * up, and break out of the pend loop.
12097c478bd9Sstevel@tonic-gate */
12107c478bd9Sstevel@tonic-gate cyclic_remove_pend(cpu, level, cyclic);
12117c478bd9Sstevel@tonic-gate break;
12127c478bd9Sstevel@tonic-gate }
12137c478bd9Sstevel@tonic-gate
12147c478bd9Sstevel@tonic-gate /*
12157c478bd9Sstevel@tonic-gate * We must have had a resize interrupt us.
12167c478bd9Sstevel@tonic-gate */
12177c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "resize-int", cyclics, 0);
12187c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_EXPANDING);
12197c478bd9Sstevel@tonic-gate ASSERT(cyclics != cpu->cyp_cyclics);
12207c478bd9Sstevel@tonic-gate ASSERT(resized == 0);
12217c478bd9Sstevel@tonic-gate ASSERT(intr_resized == 0);
12227c478bd9Sstevel@tonic-gate intr_resized = 1;
12237c478bd9Sstevel@tonic-gate cyclics = cpu->cyp_cyclics;
12247c478bd9Sstevel@tonic-gate cyclic = &cyclics[buf[consmasked]];
12257c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_handler == handler);
12267c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_arg == arg);
12277c478bd9Sstevel@tonic-gate goto reread;
12287c478bd9Sstevel@tonic-gate }
12297c478bd9Sstevel@tonic-gate
12307c478bd9Sstevel@tonic-gate if ((opend =
123175d94465SJosef 'Jeff' Sipek atomic_cas_32(&cyclic->cy_pend, pend, npend)) !=
123275d94465SJosef 'Jeff' Sipek pend) {
12337c478bd9Sstevel@tonic-gate /*
123475d94465SJosef 'Jeff' Sipek * Our atomic_cas_32 can fail for one of several
12357c478bd9Sstevel@tonic-gate * reasons:
12367c478bd9Sstevel@tonic-gate *
12377c478bd9Sstevel@tonic-gate * (a) An intervening high level bumped up the
12387c478bd9Sstevel@tonic-gate * pend count on this cyclic. In this
12397c478bd9Sstevel@tonic-gate * case, we will see a higher pend.
12407c478bd9Sstevel@tonic-gate *
12417c478bd9Sstevel@tonic-gate * (b) The cyclics array has been yanked out
12427c478bd9Sstevel@tonic-gate * from underneath us by a resize
12437c478bd9Sstevel@tonic-gate * operation. In this case, pend is 0 and
12447c478bd9Sstevel@tonic-gate * cyp_state is CYS_EXPANDING.
12457c478bd9Sstevel@tonic-gate *
12467c478bd9Sstevel@tonic-gate * (c) The cyclic has been removed by an
12477c478bd9Sstevel@tonic-gate * intervening remove-xcall. In this case,
12487c478bd9Sstevel@tonic-gate * pend will be 0, the cyp_state will be
12497c478bd9Sstevel@tonic-gate * CYS_REMOVING, and the cyclic will be
12507c478bd9Sstevel@tonic-gate * marked CYF_FREE.
12517c478bd9Sstevel@tonic-gate *
12527c478bd9Sstevel@tonic-gate * The assertion below checks that we are
12537c478bd9Sstevel@tonic-gate * in one of the above situations. The
12547c478bd9Sstevel@tonic-gate * action under all three is to return to
12557c478bd9Sstevel@tonic-gate * the top of the loop.
12567c478bd9Sstevel@tonic-gate */
12577c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "cas-fail", opend, pend);
12587c478bd9Sstevel@tonic-gate ASSERT(opend > pend || (opend == 0 &&
12597c478bd9Sstevel@tonic-gate ((cyclics != cpu->cyp_cyclics &&
12607c478bd9Sstevel@tonic-gate cpu->cyp_state == CYS_EXPANDING) ||
12617c478bd9Sstevel@tonic-gate (cpu->cyp_state == CYS_REMOVING &&
12627c478bd9Sstevel@tonic-gate (cyclic->cy_flags & CYF_FREE)))));
12637c478bd9Sstevel@tonic-gate goto reread;
12647c478bd9Sstevel@tonic-gate }
12657c478bd9Sstevel@tonic-gate
12667c478bd9Sstevel@tonic-gate /*
12677c478bd9Sstevel@tonic-gate * Okay, so we've managed to successfully decrement
12687c478bd9Sstevel@tonic-gate * pend. If we just decremented the pend to 0, we're
12697c478bd9Sstevel@tonic-gate * done.
12707c478bd9Sstevel@tonic-gate */
12717c478bd9Sstevel@tonic-gate } while (npend > 0);
12727c478bd9Sstevel@tonic-gate
12737c478bd9Sstevel@tonic-gate pc->cypc_consndx = ++consndx;
12747c478bd9Sstevel@tonic-gate }
12757c478bd9Sstevel@tonic-gate
12767c478bd9Sstevel@tonic-gate /*
12777c478bd9Sstevel@tonic-gate * If the high level handler is no longer writing to the same
12787c478bd9Sstevel@tonic-gate * buffer, then we've had a resize. We need to switch our soft
12797c478bd9Sstevel@tonic-gate * index, and goto top.
12807c478bd9Sstevel@tonic-gate */
12817c478bd9Sstevel@tonic-gate if (soft != softbuf->cys_hard) {
12827c478bd9Sstevel@tonic-gate /*
12837c478bd9Sstevel@tonic-gate * We can assert that the other buffer has grown by exactly
12847c478bd9Sstevel@tonic-gate * one factor of two.
12857c478bd9Sstevel@tonic-gate */
12867c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, level, "buffer-grow", 0, 0);
12877c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_EXPANDING);
12887c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[softbuf->cys_hard].cypc_sizemask ==
12897c478bd9Sstevel@tonic-gate (softbuf->cys_buf[soft].cypc_sizemask << 1) + 1 ||
12907c478bd9Sstevel@tonic-gate softbuf->cys_buf[soft].cypc_sizemask == 0);
12917c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_hard == (softbuf->cys_soft ^ 1));
12927c478bd9Sstevel@tonic-gate
12937c478bd9Sstevel@tonic-gate /*
12947c478bd9Sstevel@tonic-gate * If our cached cyclics pointer doesn't match cyp_cyclics,
12957c478bd9Sstevel@tonic-gate * then we took a resize between our last iteration of the
12967c478bd9Sstevel@tonic-gate * pend loop and the check against softbuf->cys_hard.
12977c478bd9Sstevel@tonic-gate */
12987c478bd9Sstevel@tonic-gate if (cpu->cyp_cyclics != cyclics) {
12997c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, level, "resize-int-int", consndx);
13007c478bd9Sstevel@tonic-gate cyclics = cpu->cyp_cyclics;
13017c478bd9Sstevel@tonic-gate }
13027c478bd9Sstevel@tonic-gate
13037c478bd9Sstevel@tonic-gate softbuf->cys_soft = softbuf->cys_hard;
13047c478bd9Sstevel@tonic-gate
13057c478bd9Sstevel@tonic-gate ASSERT(resized == 0);
13067c478bd9Sstevel@tonic-gate resized = 1;
13077c478bd9Sstevel@tonic-gate goto top;
13087c478bd9Sstevel@tonic-gate }
13097c478bd9Sstevel@tonic-gate
13107c478bd9Sstevel@tonic-gate /*
13117c478bd9Sstevel@tonic-gate * If we were interrupted by a resize operation, then we must have
13127c478bd9Sstevel@tonic-gate * seen the hard index change.
13137c478bd9Sstevel@tonic-gate */
13147c478bd9Sstevel@tonic-gate ASSERT(!(intr_resized == 1 && resized == 0));
13157c478bd9Sstevel@tonic-gate
13167c478bd9Sstevel@tonic-gate if (resized) {
13177c478bd9Sstevel@tonic-gate uint32_t lev, nlev;
13187c478bd9Sstevel@tonic-gate
13197c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_EXPANDING);
13207c478bd9Sstevel@tonic-gate
13217c478bd9Sstevel@tonic-gate do {
13227c478bd9Sstevel@tonic-gate lev = cpu->cyp_modify_levels;
13237c478bd9Sstevel@tonic-gate nlev = lev + 1;
132475d94465SJosef 'Jeff' Sipek } while (atomic_cas_32(&cpu->cyp_modify_levels, lev, nlev) !=
132575d94465SJosef 'Jeff' Sipek lev);
13267c478bd9Sstevel@tonic-gate
13277c478bd9Sstevel@tonic-gate /*
13287c478bd9Sstevel@tonic-gate * If we are the last soft level to see the modification,
13297c478bd9Sstevel@tonic-gate * post on cyp_modify_wait. Otherwise, (if we're not
13307c478bd9Sstevel@tonic-gate * already at low level), post down to the next soft level.
13317c478bd9Sstevel@tonic-gate */
13327c478bd9Sstevel@tonic-gate if (nlev == CY_SOFT_LEVELS) {
13337c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, level, "resize-kick");
13347c478bd9Sstevel@tonic-gate sema_v(&cpu->cyp_modify_wait);
13357c478bd9Sstevel@tonic-gate } else {
13367c478bd9Sstevel@tonic-gate ASSERT(nlev < CY_SOFT_LEVELS);
13377c478bd9Sstevel@tonic-gate if (level != CY_LOW_LEVEL) {
13387c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
13397c478bd9Sstevel@tonic-gate
13407c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, level, "resize-post");
13417c478bd9Sstevel@tonic-gate be->cyb_softint(be->cyb_arg, level - 1);
13427c478bd9Sstevel@tonic-gate }
13437c478bd9Sstevel@tonic-gate }
13447c478bd9Sstevel@tonic-gate }
13457c478bd9Sstevel@tonic-gate }
13467c478bd9Sstevel@tonic-gate
13477c478bd9Sstevel@tonic-gate static void
cyclic_expand_xcall(cyc_xcallarg_t * arg)13487c478bd9Sstevel@tonic-gate cyclic_expand_xcall(cyc_xcallarg_t *arg)
13497c478bd9Sstevel@tonic-gate {
13507c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = arg->cyx_cpu;
13517c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
13527c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
13537c478bd9Sstevel@tonic-gate cyc_cookie_t cookie;
13547c478bd9Sstevel@tonic-gate cyc_index_t new_size = arg->cyx_size, size = cpu->cyp_size, i;
13557c478bd9Sstevel@tonic-gate cyc_index_t *new_heap = arg->cyx_heap;
13567c478bd9Sstevel@tonic-gate cyclic_t *cyclics = cpu->cyp_cyclics, *new_cyclics = arg->cyx_cyclics;
13577c478bd9Sstevel@tonic-gate
13587c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_EXPANDING);
13597c478bd9Sstevel@tonic-gate
13607c478bd9Sstevel@tonic-gate /*
13617c478bd9Sstevel@tonic-gate * This is a little dicey. First, we'll raise our interrupt level
13627c478bd9Sstevel@tonic-gate * to CY_HIGH_LEVEL. This CPU already has a new heap, cyclic array,
13637c478bd9Sstevel@tonic-gate * etc.; we just need to bcopy them across. As for the softint
13647c478bd9Sstevel@tonic-gate * buffers, we'll switch the active buffers. The actual softints will
13657c478bd9Sstevel@tonic-gate * take care of consuming any pending cyclics in the old buffer.
13667c478bd9Sstevel@tonic-gate */
13677c478bd9Sstevel@tonic-gate cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
13687c478bd9Sstevel@tonic-gate
13697c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "expand", new_size, 0);
13707c478bd9Sstevel@tonic-gate
13717c478bd9Sstevel@tonic-gate /*
13727c478bd9Sstevel@tonic-gate * Assert that the new size is a power of 2.
13737c478bd9Sstevel@tonic-gate */
13747c478bd9Sstevel@tonic-gate ASSERT((new_size & new_size - 1) == 0);
13757c478bd9Sstevel@tonic-gate ASSERT(new_size == (size << 1));
13767c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_heap != NULL && cpu->cyp_cyclics != NULL);
13777c478bd9Sstevel@tonic-gate
13787c478bd9Sstevel@tonic-gate bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * size);
13797c478bd9Sstevel@tonic-gate bcopy(cyclics, new_cyclics, sizeof (cyclic_t) * size);
13807c478bd9Sstevel@tonic-gate
13817c478bd9Sstevel@tonic-gate /*
13827c478bd9Sstevel@tonic-gate * Now run through the old cyclics array, setting pend to 0. To
13837c478bd9Sstevel@tonic-gate * softints (which are executing at a lower priority level), the
13847c478bd9Sstevel@tonic-gate * pends dropping to 0 will appear atomic with the cyp_cyclics
13857c478bd9Sstevel@tonic-gate * pointer changing.
13867c478bd9Sstevel@tonic-gate */
13877c478bd9Sstevel@tonic-gate for (i = 0; i < size; i++)
13887c478bd9Sstevel@tonic-gate cyclics[i].cy_pend = 0;
13897c478bd9Sstevel@tonic-gate
13907c478bd9Sstevel@tonic-gate /*
13917c478bd9Sstevel@tonic-gate * Set up the free list, and set all of the new cyclics to be CYF_FREE.
13927c478bd9Sstevel@tonic-gate */
13937c478bd9Sstevel@tonic-gate for (i = size; i < new_size; i++) {
13947c478bd9Sstevel@tonic-gate new_heap[i] = i;
13957c478bd9Sstevel@tonic-gate new_cyclics[i].cy_flags = CYF_FREE;
13967c478bd9Sstevel@tonic-gate }
13977c478bd9Sstevel@tonic-gate
13987c478bd9Sstevel@tonic-gate /*
13997c478bd9Sstevel@tonic-gate * We can go ahead and plow the value of cyp_heap and cyp_cyclics;
14007c478bd9Sstevel@tonic-gate * cyclic_expand() has kept a copy.
14017c478bd9Sstevel@tonic-gate */
14027c478bd9Sstevel@tonic-gate cpu->cyp_heap = new_heap;
14037c478bd9Sstevel@tonic-gate cpu->cyp_cyclics = new_cyclics;
14047c478bd9Sstevel@tonic-gate cpu->cyp_size = new_size;
14057c478bd9Sstevel@tonic-gate
14067c478bd9Sstevel@tonic-gate /*
14077c478bd9Sstevel@tonic-gate * We've switched over the heap and the cyclics array. Now we need
14087c478bd9Sstevel@tonic-gate * to switch over our active softint buffer pointers.
14097c478bd9Sstevel@tonic-gate */
14107c478bd9Sstevel@tonic-gate for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
14117c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
14127c478bd9Sstevel@tonic-gate uchar_t hard = softbuf->cys_hard;
14137c478bd9Sstevel@tonic-gate
14147c478bd9Sstevel@tonic-gate /*
14157c478bd9Sstevel@tonic-gate * Assert that we're not in the middle of a resize operation.
14167c478bd9Sstevel@tonic-gate */
14177c478bd9Sstevel@tonic-gate ASSERT(hard == softbuf->cys_soft);
14187c478bd9Sstevel@tonic-gate ASSERT(hard == 0 || hard == 1);
14197c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[hard].cypc_buf != NULL);
14207c478bd9Sstevel@tonic-gate
14217c478bd9Sstevel@tonic-gate softbuf->cys_hard = hard ^ 1;
14227c478bd9Sstevel@tonic-gate
14237c478bd9Sstevel@tonic-gate /*
14247c478bd9Sstevel@tonic-gate * The caller (cyclic_expand()) is responsible for setting
14257c478bd9Sstevel@tonic-gate * up the new producer-consumer buffer; assert that it's
14267c478bd9Sstevel@tonic-gate * been done correctly.
14277c478bd9Sstevel@tonic-gate */
14287c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[hard ^ 1].cypc_buf != NULL);
14297c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[hard ^ 1].cypc_prodndx == 0);
14307c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[hard ^ 1].cypc_consndx == 0);
14317c478bd9Sstevel@tonic-gate }
14327c478bd9Sstevel@tonic-gate
14337c478bd9Sstevel@tonic-gate /*
14347c478bd9Sstevel@tonic-gate * That's all there is to it; now we just need to postdown to
14357c478bd9Sstevel@tonic-gate * get the softint chain going.
14367c478bd9Sstevel@tonic-gate */
14377c478bd9Sstevel@tonic-gate be->cyb_softint(bar, CY_HIGH_LEVEL - 1);
14387c478bd9Sstevel@tonic-gate be->cyb_restore_level(bar, cookie);
14397c478bd9Sstevel@tonic-gate }
14407c478bd9Sstevel@tonic-gate
14417c478bd9Sstevel@tonic-gate /*
14427c478bd9Sstevel@tonic-gate * cyclic_expand() will cross call onto the CPU to perform the actual
14437c478bd9Sstevel@tonic-gate * expand operation.
14447c478bd9Sstevel@tonic-gate */
14457c478bd9Sstevel@tonic-gate static void
cyclic_expand(cyc_cpu_t * cpu)14467c478bd9Sstevel@tonic-gate cyclic_expand(cyc_cpu_t *cpu)
14477c478bd9Sstevel@tonic-gate {
14487c478bd9Sstevel@tonic-gate cyc_index_t new_size, old_size;
14497c478bd9Sstevel@tonic-gate cyc_index_t *new_heap, *old_heap;
14507c478bd9Sstevel@tonic-gate cyclic_t *new_cyclics, *old_cyclics;
14517c478bd9Sstevel@tonic-gate cyc_xcallarg_t arg;
14527c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
14537c478bd9Sstevel@tonic-gate char old_hard;
14547c478bd9Sstevel@tonic-gate int i;
14557c478bd9Sstevel@tonic-gate
14567c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
14577c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
14587c478bd9Sstevel@tonic-gate
14597c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_EXPANDING;
14607c478bd9Sstevel@tonic-gate
14617c478bd9Sstevel@tonic-gate old_heap = cpu->cyp_heap;
14627c478bd9Sstevel@tonic-gate old_cyclics = cpu->cyp_cyclics;
14637c478bd9Sstevel@tonic-gate
14647c478bd9Sstevel@tonic-gate if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) {
14657c478bd9Sstevel@tonic-gate new_size = CY_DEFAULT_PERCPU;
14667c478bd9Sstevel@tonic-gate ASSERT(old_heap == NULL && old_cyclics == NULL);
14677c478bd9Sstevel@tonic-gate }
14687c478bd9Sstevel@tonic-gate
14697c478bd9Sstevel@tonic-gate /*
14707c478bd9Sstevel@tonic-gate * Check that the new_size is a power of 2.
14717c478bd9Sstevel@tonic-gate */
14727c478bd9Sstevel@tonic-gate ASSERT((new_size - 1 & new_size) == 0);
14737c478bd9Sstevel@tonic-gate
14747c478bd9Sstevel@tonic-gate new_heap = kmem_alloc(sizeof (cyc_index_t) * new_size, KM_SLEEP);
14757c478bd9Sstevel@tonic-gate new_cyclics = kmem_zalloc(sizeof (cyclic_t) * new_size, KM_SLEEP);
14767c478bd9Sstevel@tonic-gate
14777c478bd9Sstevel@tonic-gate /*
14787c478bd9Sstevel@tonic-gate * We know that no other expansions are in progress (they serialize
14797c478bd9Sstevel@tonic-gate * on cpu_lock), so we can safely read the softbuf metadata.
14807c478bd9Sstevel@tonic-gate */
14817c478bd9Sstevel@tonic-gate old_hard = cpu->cyp_softbuf[0].cys_hard;
14827c478bd9Sstevel@tonic-gate
14837c478bd9Sstevel@tonic-gate for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
14847c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
14857c478bd9Sstevel@tonic-gate char hard = softbuf->cys_hard;
14867c478bd9Sstevel@tonic-gate cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard ^ 1];
14877c478bd9Sstevel@tonic-gate
14887c478bd9Sstevel@tonic-gate ASSERT(hard == old_hard);
14897c478bd9Sstevel@tonic-gate ASSERT(hard == softbuf->cys_soft);
14907c478bd9Sstevel@tonic-gate ASSERT(pc->cypc_buf == NULL);
14917c478bd9Sstevel@tonic-gate
14927c478bd9Sstevel@tonic-gate pc->cypc_buf =
14937c478bd9Sstevel@tonic-gate kmem_alloc(sizeof (cyc_index_t) * new_size, KM_SLEEP);
14947c478bd9Sstevel@tonic-gate pc->cypc_prodndx = pc->cypc_consndx = 0;
14957c478bd9Sstevel@tonic-gate pc->cypc_sizemask = new_size - 1;
14967c478bd9Sstevel@tonic-gate }
14977c478bd9Sstevel@tonic-gate
14987c478bd9Sstevel@tonic-gate arg.cyx_cpu = cpu;
14997c478bd9Sstevel@tonic-gate arg.cyx_heap = new_heap;
15007c478bd9Sstevel@tonic-gate arg.cyx_cyclics = new_cyclics;
15017c478bd9Sstevel@tonic-gate arg.cyx_size = new_size;
15027c478bd9Sstevel@tonic-gate
15037c478bd9Sstevel@tonic-gate cpu->cyp_modify_levels = 0;
15047c478bd9Sstevel@tonic-gate
15057c478bd9Sstevel@tonic-gate be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
15067c478bd9Sstevel@tonic-gate (cyc_func_t)cyclic_expand_xcall, &arg);
15077c478bd9Sstevel@tonic-gate
15087c478bd9Sstevel@tonic-gate /*
15097c478bd9Sstevel@tonic-gate * Now block, waiting for the resize operation to complete.
15107c478bd9Sstevel@tonic-gate */
15117c478bd9Sstevel@tonic-gate sema_p(&cpu->cyp_modify_wait);
15127c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_modify_levels == CY_SOFT_LEVELS);
15137c478bd9Sstevel@tonic-gate
15147c478bd9Sstevel@tonic-gate /*
15157c478bd9Sstevel@tonic-gate * The operation is complete; we can now free the old buffers.
15167c478bd9Sstevel@tonic-gate */
15177c478bd9Sstevel@tonic-gate for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
15187c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
15197c478bd9Sstevel@tonic-gate char hard = softbuf->cys_hard;
15207c478bd9Sstevel@tonic-gate cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard ^ 1];
15217c478bd9Sstevel@tonic-gate
15227c478bd9Sstevel@tonic-gate ASSERT(hard == (old_hard ^ 1));
15237c478bd9Sstevel@tonic-gate ASSERT(hard == softbuf->cys_soft);
15247c478bd9Sstevel@tonic-gate
15257c478bd9Sstevel@tonic-gate if (pc->cypc_buf == NULL)
15267c478bd9Sstevel@tonic-gate continue;
15277c478bd9Sstevel@tonic-gate
15287c478bd9Sstevel@tonic-gate ASSERT(pc->cypc_sizemask == ((new_size - 1) >> 1));
15297c478bd9Sstevel@tonic-gate
15307c478bd9Sstevel@tonic-gate kmem_free(pc->cypc_buf,
15317c478bd9Sstevel@tonic-gate sizeof (cyc_index_t) * (pc->cypc_sizemask + 1));
15327c478bd9Sstevel@tonic-gate pc->cypc_buf = NULL;
15337c478bd9Sstevel@tonic-gate }
15347c478bd9Sstevel@tonic-gate
15357c478bd9Sstevel@tonic-gate if (old_cyclics != NULL) {
15367c478bd9Sstevel@tonic-gate ASSERT(old_heap != NULL);
15377c478bd9Sstevel@tonic-gate ASSERT(old_size != 0);
15387c478bd9Sstevel@tonic-gate kmem_free(old_cyclics, sizeof (cyclic_t) * old_size);
15397c478bd9Sstevel@tonic-gate kmem_free(old_heap, sizeof (cyc_index_t) * old_size);
15407c478bd9Sstevel@tonic-gate }
15417c478bd9Sstevel@tonic-gate
15427c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_EXPANDING);
15437c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_ONLINE;
15447c478bd9Sstevel@tonic-gate }
15457c478bd9Sstevel@tonic-gate
15467c478bd9Sstevel@tonic-gate /*
15477c478bd9Sstevel@tonic-gate * cyclic_pick_cpu will attempt to pick a CPU according to the constraints
15487c478bd9Sstevel@tonic-gate * specified by the partition, bound CPU, and flags. Additionally,
15497c478bd9Sstevel@tonic-gate * cyclic_pick_cpu() will not pick the avoid CPU; it will return NULL if
15507c478bd9Sstevel@tonic-gate * the avoid CPU is the only CPU which satisfies the constraints.
15517c478bd9Sstevel@tonic-gate *
15527c478bd9Sstevel@tonic-gate * If CYF_CPU_BOUND is set in flags, the specified CPU must be non-NULL.
15537c478bd9Sstevel@tonic-gate * If CYF_PART_BOUND is set in flags, the specified partition must be non-NULL.
15547c478bd9Sstevel@tonic-gate * If both CYF_CPU_BOUND and CYF_PART_BOUND are set, the specified CPU must
15557c478bd9Sstevel@tonic-gate * be in the specified partition.
15567c478bd9Sstevel@tonic-gate */
15577c478bd9Sstevel@tonic-gate static cyc_cpu_t *
cyclic_pick_cpu(cpupart_t * part,cpu_t * bound,cpu_t * avoid,uint16_t flags)15587c478bd9Sstevel@tonic-gate cyclic_pick_cpu(cpupart_t *part, cpu_t *bound, cpu_t *avoid, uint16_t flags)
15597c478bd9Sstevel@tonic-gate {
15607c478bd9Sstevel@tonic-gate cpu_t *c, *start = (part != NULL) ? part->cp_cpulist : CPU;
15617c478bd9Sstevel@tonic-gate cpu_t *online = NULL;
15627c478bd9Sstevel@tonic-gate uintptr_t offset;
15637c478bd9Sstevel@tonic-gate
15647c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-cpu", part, bound);
15657c478bd9Sstevel@tonic-gate
15667c478bd9Sstevel@tonic-gate ASSERT(!(flags & CYF_CPU_BOUND) || bound != NULL);
15677c478bd9Sstevel@tonic-gate ASSERT(!(flags & CYF_PART_BOUND) || part != NULL);
15687c478bd9Sstevel@tonic-gate
15697c478bd9Sstevel@tonic-gate /*
15707c478bd9Sstevel@tonic-gate * If we're bound to our CPU, there isn't much choice involved. We
15717c478bd9Sstevel@tonic-gate * need to check that the CPU passed as bound is in the cpupart, and
15727c478bd9Sstevel@tonic-gate * that the CPU that we're binding to has been configured.
15737c478bd9Sstevel@tonic-gate */
15747c478bd9Sstevel@tonic-gate if (flags & CYF_CPU_BOUND) {
15757c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-cpu-bound", bound, avoid);
15767c478bd9Sstevel@tonic-gate
15777c478bd9Sstevel@tonic-gate if ((flags & CYF_PART_BOUND) && bound->cpu_part != part)
15787c478bd9Sstevel@tonic-gate panic("cyclic_pick_cpu: "
15797c478bd9Sstevel@tonic-gate "CPU binding contradicts partition binding");
15807c478bd9Sstevel@tonic-gate
15817c478bd9Sstevel@tonic-gate if (bound == avoid)
15827c478bd9Sstevel@tonic-gate return (NULL);
15837c478bd9Sstevel@tonic-gate
15847c478bd9Sstevel@tonic-gate if (bound->cpu_cyclic == NULL)
15857c478bd9Sstevel@tonic-gate panic("cyclic_pick_cpu: "
15867c478bd9Sstevel@tonic-gate "attempt to bind to non-configured CPU");
15877c478bd9Sstevel@tonic-gate
15887c478bd9Sstevel@tonic-gate return (bound->cpu_cyclic);
15897c478bd9Sstevel@tonic-gate }
15907c478bd9Sstevel@tonic-gate
15917c478bd9Sstevel@tonic-gate if (flags & CYF_PART_BOUND) {
15927c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-part-bound", bound, avoid);
15937c478bd9Sstevel@tonic-gate offset = offsetof(cpu_t, cpu_next_part);
15947c478bd9Sstevel@tonic-gate } else {
15957c478bd9Sstevel@tonic-gate offset = offsetof(cpu_t, cpu_next_onln);
15967c478bd9Sstevel@tonic-gate }
15977c478bd9Sstevel@tonic-gate
15987c478bd9Sstevel@tonic-gate c = start;
15997c478bd9Sstevel@tonic-gate do {
16007c478bd9Sstevel@tonic-gate if (c->cpu_cyclic == NULL)
16017c478bd9Sstevel@tonic-gate continue;
16027c478bd9Sstevel@tonic-gate
16037c478bd9Sstevel@tonic-gate if (c->cpu_cyclic->cyp_state == CYS_OFFLINE)
16047c478bd9Sstevel@tonic-gate continue;
16057c478bd9Sstevel@tonic-gate
16067c478bd9Sstevel@tonic-gate if (c == avoid)
16077c478bd9Sstevel@tonic-gate continue;
16087c478bd9Sstevel@tonic-gate
16097c478bd9Sstevel@tonic-gate if (c->cpu_flags & CPU_ENABLE)
16107c478bd9Sstevel@tonic-gate goto found;
16117c478bd9Sstevel@tonic-gate
16127c478bd9Sstevel@tonic-gate if (online == NULL)
16137c478bd9Sstevel@tonic-gate online = c;
16147c478bd9Sstevel@tonic-gate } while ((c = *(cpu_t **)((uintptr_t)c + offset)) != start);
16157c478bd9Sstevel@tonic-gate
16167c478bd9Sstevel@tonic-gate /*
16177c478bd9Sstevel@tonic-gate * If we're here, we're in one of two situations:
16187c478bd9Sstevel@tonic-gate *
16197c478bd9Sstevel@tonic-gate * (a) We have a partition-bound cyclic, and there is no CPU in
16207c478bd9Sstevel@tonic-gate * our partition which is CPU_ENABLE'd. If we saw another
16217c478bd9Sstevel@tonic-gate * non-CYS_OFFLINE CPU in our partition, we'll go with it.
16227c478bd9Sstevel@tonic-gate * If not, the avoid CPU must be the only non-CYS_OFFLINE
16237c478bd9Sstevel@tonic-gate * CPU in the partition; we're forced to return NULL.
16247c478bd9Sstevel@tonic-gate *
16257c478bd9Sstevel@tonic-gate * (b) We have a partition-unbound cyclic, in which case there
16267c478bd9Sstevel@tonic-gate * must only be one CPU CPU_ENABLE'd, and it must be the one
16277c478bd9Sstevel@tonic-gate * we're trying to avoid. If cyclic_juggle()/cyclic_offline()
16287c478bd9Sstevel@tonic-gate * are called appropriately, this generally shouldn't happen
16297c478bd9Sstevel@tonic-gate * (the offline should fail before getting to this code).
16307c478bd9Sstevel@tonic-gate * At any rate: we can't avoid the avoid CPU, so we return
16317c478bd9Sstevel@tonic-gate * NULL.
16327c478bd9Sstevel@tonic-gate */
16337c478bd9Sstevel@tonic-gate if (!(flags & CYF_PART_BOUND)) {
16347c478bd9Sstevel@tonic-gate ASSERT(avoid->cpu_flags & CPU_ENABLE);
16357c478bd9Sstevel@tonic-gate return (NULL);
16367c478bd9Sstevel@tonic-gate }
16377c478bd9Sstevel@tonic-gate
16387c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-no-intr", part, avoid);
16397c478bd9Sstevel@tonic-gate
16407c478bd9Sstevel@tonic-gate if ((c = online) != NULL)
16417c478bd9Sstevel@tonic-gate goto found;
16427c478bd9Sstevel@tonic-gate
16437c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-fail", part, avoid);
16447c478bd9Sstevel@tonic-gate ASSERT(avoid->cpu_part == start->cpu_part);
16457c478bd9Sstevel@tonic-gate return (NULL);
16467c478bd9Sstevel@tonic-gate
16477c478bd9Sstevel@tonic-gate found:
16487c478bd9Sstevel@tonic-gate CYC_PTRACE("pick-cpu-found", c, avoid);
16497c478bd9Sstevel@tonic-gate ASSERT(c != avoid);
16507c478bd9Sstevel@tonic-gate ASSERT(c->cpu_cyclic != NULL);
16517c478bd9Sstevel@tonic-gate
16527c478bd9Sstevel@tonic-gate return (c->cpu_cyclic);
16537c478bd9Sstevel@tonic-gate }
16547c478bd9Sstevel@tonic-gate
16557c478bd9Sstevel@tonic-gate static void
cyclic_add_xcall(cyc_xcallarg_t * arg)16567c478bd9Sstevel@tonic-gate cyclic_add_xcall(cyc_xcallarg_t *arg)
16577c478bd9Sstevel@tonic-gate {
16587c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = arg->cyx_cpu;
16597c478bd9Sstevel@tonic-gate cyc_handler_t *hdlr = arg->cyx_hdlr;
16607c478bd9Sstevel@tonic-gate cyc_time_t *when = arg->cyx_when;
16617c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
16627c478bd9Sstevel@tonic-gate cyc_index_t ndx, nelems;
16637c478bd9Sstevel@tonic-gate cyc_cookie_t cookie;
16647c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
16657c478bd9Sstevel@tonic-gate cyclic_t *cyclic;
16667c478bd9Sstevel@tonic-gate
16677c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_nelems < cpu->cyp_size);
16687c478bd9Sstevel@tonic-gate
16697c478bd9Sstevel@tonic-gate cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
16707c478bd9Sstevel@tonic-gate
16717c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL,
16727c478bd9Sstevel@tonic-gate "add-xcall", when->cyt_when, when->cyt_interval);
16737c478bd9Sstevel@tonic-gate
16747c478bd9Sstevel@tonic-gate nelems = cpu->cyp_nelems++;
16757c478bd9Sstevel@tonic-gate
16767c478bd9Sstevel@tonic-gate if (nelems == 0) {
16777c478bd9Sstevel@tonic-gate /*
16787c478bd9Sstevel@tonic-gate * If this is the first element, we need to enable the
16797c478bd9Sstevel@tonic-gate * backend on this CPU.
16807c478bd9Sstevel@tonic-gate */
16817c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "enabled");
16827c478bd9Sstevel@tonic-gate be->cyb_enable(bar);
16837c478bd9Sstevel@tonic-gate }
16847c478bd9Sstevel@tonic-gate
16857c478bd9Sstevel@tonic-gate ndx = cpu->cyp_heap[nelems];
16867c478bd9Sstevel@tonic-gate cyclic = &cpu->cyp_cyclics[ndx];
16877c478bd9Sstevel@tonic-gate
16887c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_flags == CYF_FREE);
16897c478bd9Sstevel@tonic-gate cyclic->cy_interval = when->cyt_interval;
16907c478bd9Sstevel@tonic-gate
16917c478bd9Sstevel@tonic-gate if (when->cyt_when == 0) {
16927c478bd9Sstevel@tonic-gate /*
16937c478bd9Sstevel@tonic-gate * If a start time hasn't been explicitly specified, we'll
16947c478bd9Sstevel@tonic-gate * start on the next interval boundary.
16957c478bd9Sstevel@tonic-gate */
16967c478bd9Sstevel@tonic-gate cyclic->cy_expire = (gethrtime() / cyclic->cy_interval + 1) *
16977c478bd9Sstevel@tonic-gate cyclic->cy_interval;
16987c478bd9Sstevel@tonic-gate } else {
16997c478bd9Sstevel@tonic-gate cyclic->cy_expire = when->cyt_when;
17007c478bd9Sstevel@tonic-gate }
17017c478bd9Sstevel@tonic-gate
17027c478bd9Sstevel@tonic-gate cyclic->cy_handler = hdlr->cyh_func;
17037c478bd9Sstevel@tonic-gate cyclic->cy_arg = hdlr->cyh_arg;
17047c478bd9Sstevel@tonic-gate cyclic->cy_level = hdlr->cyh_level;
17057c478bd9Sstevel@tonic-gate cyclic->cy_flags = arg->cyx_flags;
17067c478bd9Sstevel@tonic-gate
17077c478bd9Sstevel@tonic-gate if (cyclic_upheap(cpu, nelems)) {
17087c478bd9Sstevel@tonic-gate hrtime_t exp = cyclic->cy_expire;
17097c478bd9Sstevel@tonic-gate
17107c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "add-reprog", cyclic, exp);
17117c478bd9Sstevel@tonic-gate
17127c478bd9Sstevel@tonic-gate /*
17137c478bd9Sstevel@tonic-gate * If our upheap propagated to the root, we need to
17147c478bd9Sstevel@tonic-gate * reprogram the interrupt source.
17157c478bd9Sstevel@tonic-gate */
17167c478bd9Sstevel@tonic-gate be->cyb_reprogram(bar, exp);
17177c478bd9Sstevel@tonic-gate }
17187c478bd9Sstevel@tonic-gate be->cyb_restore_level(bar, cookie);
17197c478bd9Sstevel@tonic-gate
17207c478bd9Sstevel@tonic-gate arg->cyx_ndx = ndx;
17217c478bd9Sstevel@tonic-gate }
17227c478bd9Sstevel@tonic-gate
17237c478bd9Sstevel@tonic-gate static cyc_index_t
cyclic_add_here(cyc_cpu_t * cpu,cyc_handler_t * hdlr,cyc_time_t * when,uint16_t flags)17247c478bd9Sstevel@tonic-gate cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr,
17257c478bd9Sstevel@tonic-gate cyc_time_t *when, uint16_t flags)
17267c478bd9Sstevel@tonic-gate {
17277c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
17287c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
17297c478bd9Sstevel@tonic-gate cyc_xcallarg_t arg;
17307c478bd9Sstevel@tonic-gate
17317c478bd9Sstevel@tonic-gate CYC_PTRACE("add-cpu", cpu, hdlr->cyh_func);
17327c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
17337c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
17347c478bd9Sstevel@tonic-gate ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE));
17357c478bd9Sstevel@tonic-gate ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
17367c478bd9Sstevel@tonic-gate
17377c478bd9Sstevel@tonic-gate if (cpu->cyp_nelems == cpu->cyp_size) {
17387c478bd9Sstevel@tonic-gate /*
17397c478bd9Sstevel@tonic-gate * This is expensive; it will cross call onto the other
17407c478bd9Sstevel@tonic-gate * CPU to perform the expansion.
17417c478bd9Sstevel@tonic-gate */
17427c478bd9Sstevel@tonic-gate cyclic_expand(cpu);
17437c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_nelems < cpu->cyp_size);
17447c478bd9Sstevel@tonic-gate }
17457c478bd9Sstevel@tonic-gate
17467c478bd9Sstevel@tonic-gate /*
17477c478bd9Sstevel@tonic-gate * By now, we know that we're going to be able to successfully
17487c478bd9Sstevel@tonic-gate * perform the add. Now cross call over to the CPU of interest to
17497c478bd9Sstevel@tonic-gate * actually add our cyclic.
17507c478bd9Sstevel@tonic-gate */
17517c478bd9Sstevel@tonic-gate arg.cyx_cpu = cpu;
17527c478bd9Sstevel@tonic-gate arg.cyx_hdlr = hdlr;
17537c478bd9Sstevel@tonic-gate arg.cyx_when = when;
17547c478bd9Sstevel@tonic-gate arg.cyx_flags = flags;
17557c478bd9Sstevel@tonic-gate
17567c478bd9Sstevel@tonic-gate be->cyb_xcall(bar, cpu->cyp_cpu, (cyc_func_t)cyclic_add_xcall, &arg);
17577c478bd9Sstevel@tonic-gate
17587c478bd9Sstevel@tonic-gate CYC_PTRACE("add-cpu-done", cpu, arg.cyx_ndx);
17597c478bd9Sstevel@tonic-gate
17607c478bd9Sstevel@tonic-gate return (arg.cyx_ndx);
17617c478bd9Sstevel@tonic-gate }
17627c478bd9Sstevel@tonic-gate
17637c478bd9Sstevel@tonic-gate static void
cyclic_remove_xcall(cyc_xcallarg_t * arg)17647c478bd9Sstevel@tonic-gate cyclic_remove_xcall(cyc_xcallarg_t *arg)
17657c478bd9Sstevel@tonic-gate {
17667c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = arg->cyx_cpu;
17677c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
17687c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
17697c478bd9Sstevel@tonic-gate cyc_cookie_t cookie;
177087a18d3fSMadhavan Venkataraman cyc_index_t ndx = arg->cyx_ndx, nelems, i;
177187a18d3fSMadhavan Venkataraman cyc_index_t *heap, last;
17727c478bd9Sstevel@tonic-gate cyclic_t *cyclic;
17737c478bd9Sstevel@tonic-gate #ifdef DEBUG
17747c478bd9Sstevel@tonic-gate cyc_index_t root;
17757c478bd9Sstevel@tonic-gate #endif
17767c478bd9Sstevel@tonic-gate
17777c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_REMOVING);
17787c478bd9Sstevel@tonic-gate
17797c478bd9Sstevel@tonic-gate cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
17807c478bd9Sstevel@tonic-gate
17817c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-xcall", ndx);
17827c478bd9Sstevel@tonic-gate
178387a18d3fSMadhavan Venkataraman heap = cpu->cyp_heap;
178487a18d3fSMadhavan Venkataraman nelems = cpu->cyp_nelems;
178587a18d3fSMadhavan Venkataraman ASSERT(nelems > 0);
17867c478bd9Sstevel@tonic-gate cyclic = &cpu->cyp_cyclics[ndx];
17877c478bd9Sstevel@tonic-gate
17887c478bd9Sstevel@tonic-gate /*
17897c478bd9Sstevel@tonic-gate * Grab the current expiration time. If this cyclic is being
17907c478bd9Sstevel@tonic-gate * removed as part of a juggling operation, the expiration time
17917c478bd9Sstevel@tonic-gate * will be used when the cyclic is added to the new CPU.
17927c478bd9Sstevel@tonic-gate */
17937c478bd9Sstevel@tonic-gate if (arg->cyx_when != NULL) {
17947c478bd9Sstevel@tonic-gate arg->cyx_when->cyt_when = cyclic->cy_expire;
17957c478bd9Sstevel@tonic-gate arg->cyx_when->cyt_interval = cyclic->cy_interval;
17967c478bd9Sstevel@tonic-gate }
17977c478bd9Sstevel@tonic-gate
17987c478bd9Sstevel@tonic-gate if (cyclic->cy_pend != 0) {
17997c478bd9Sstevel@tonic-gate /*
18007c478bd9Sstevel@tonic-gate * The pend is non-zero; this cyclic is currently being
18017c478bd9Sstevel@tonic-gate * executed (or will be executed shortly). If the caller
18027c478bd9Sstevel@tonic-gate * refuses to wait, we must return (doing nothing). Otherwise,
18037c478bd9Sstevel@tonic-gate * we will stash the pend value * in this CPU's rpend, and
18047c478bd9Sstevel@tonic-gate * then zero it out. The softint in the pend loop will see
18057c478bd9Sstevel@tonic-gate * that we have zeroed out pend, and will call the cyclic
18067c478bd9Sstevel@tonic-gate * handler rpend times. The caller will wait until the
18077c478bd9Sstevel@tonic-gate * softint has completed calling the cyclic handler.
18087c478bd9Sstevel@tonic-gate */
18097c478bd9Sstevel@tonic-gate if (arg->cyx_wait == CY_NOWAIT) {
18107c478bd9Sstevel@tonic-gate arg->cyx_wait = CY_WAIT;
18117c478bd9Sstevel@tonic-gate goto out;
18127c478bd9Sstevel@tonic-gate }
18137c478bd9Sstevel@tonic-gate
18147c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_level != CY_HIGH_LEVEL);
18157c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "remove-pend", cyclic->cy_pend);
18167c478bd9Sstevel@tonic-gate cpu->cyp_rpend = cyclic->cy_pend;
18177c478bd9Sstevel@tonic-gate cyclic->cy_pend = 0;
18187c478bd9Sstevel@tonic-gate }
18197c478bd9Sstevel@tonic-gate
18207c478bd9Sstevel@tonic-gate /*
18217c478bd9Sstevel@tonic-gate * Now set the flags to CYF_FREE. We don't need a membar_enter()
18227c478bd9Sstevel@tonic-gate * between zeroing pend and setting the flags because we're at
18237c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL (that is, the zeroing of pend and the setting
18247c478bd9Sstevel@tonic-gate * of cy_flags appear atomic to softints).
18257c478bd9Sstevel@tonic-gate */
18267c478bd9Sstevel@tonic-gate cyclic->cy_flags = CYF_FREE;
18277c478bd9Sstevel@tonic-gate
18287c478bd9Sstevel@tonic-gate for (i = 0; i < nelems; i++) {
18297c478bd9Sstevel@tonic-gate if (heap[i] == ndx)
18307c478bd9Sstevel@tonic-gate break;
18317c478bd9Sstevel@tonic-gate }
18327c478bd9Sstevel@tonic-gate
18337c478bd9Sstevel@tonic-gate if (i == nelems)
18347c478bd9Sstevel@tonic-gate panic("attempt to remove non-existent cyclic");
18357c478bd9Sstevel@tonic-gate
18367c478bd9Sstevel@tonic-gate cpu->cyp_nelems = --nelems;
18377c478bd9Sstevel@tonic-gate
18387c478bd9Sstevel@tonic-gate if (nelems == 0) {
18397c478bd9Sstevel@tonic-gate /*
18407c478bd9Sstevel@tonic-gate * If we just removed the last element, then we need to
18417c478bd9Sstevel@tonic-gate * disable the backend on this CPU.
18427c478bd9Sstevel@tonic-gate */
18437c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "disabled");
18447c478bd9Sstevel@tonic-gate be->cyb_disable(bar);
18457c478bd9Sstevel@tonic-gate }
18467c478bd9Sstevel@tonic-gate
18477c478bd9Sstevel@tonic-gate if (i == nelems) {
18487c478bd9Sstevel@tonic-gate /*
18497c478bd9Sstevel@tonic-gate * If we just removed the last element of the heap, then
18507c478bd9Sstevel@tonic-gate * we don't have to downheap.
18517c478bd9Sstevel@tonic-gate */
18527c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-bottom");
18537c478bd9Sstevel@tonic-gate goto out;
18547c478bd9Sstevel@tonic-gate }
18557c478bd9Sstevel@tonic-gate
18567c478bd9Sstevel@tonic-gate #ifdef DEBUG
18577c478bd9Sstevel@tonic-gate root = heap[0];
18587c478bd9Sstevel@tonic-gate #endif
18597c478bd9Sstevel@tonic-gate
18607c478bd9Sstevel@tonic-gate /*
18617c478bd9Sstevel@tonic-gate * Swap the last element of the heap with the one we want to
18627c478bd9Sstevel@tonic-gate * remove, and downheap (this has the implicit effect of putting
18637c478bd9Sstevel@tonic-gate * the newly freed element on the free list).
18647c478bd9Sstevel@tonic-gate */
18657c478bd9Sstevel@tonic-gate heap[i] = (last = heap[nelems]);
18667c478bd9Sstevel@tonic-gate heap[nelems] = ndx;
18677c478bd9Sstevel@tonic-gate
18687c478bd9Sstevel@tonic-gate if (i == 0) {
18697c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-root");
18707c478bd9Sstevel@tonic-gate cyclic_downheap(cpu, 0);
18717c478bd9Sstevel@tonic-gate } else {
18727c478bd9Sstevel@tonic-gate if (cyclic_upheap(cpu, i) == 0) {
18737c478bd9Sstevel@tonic-gate /*
18747c478bd9Sstevel@tonic-gate * The upheap didn't propagate to the root; if it
18757c478bd9Sstevel@tonic-gate * didn't propagate at all, we need to downheap.
18767c478bd9Sstevel@tonic-gate */
18777c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-root");
18787c478bd9Sstevel@tonic-gate if (heap[i] == last) {
18797c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-no-up");
18807c478bd9Sstevel@tonic-gate cyclic_downheap(cpu, i);
18817c478bd9Sstevel@tonic-gate }
18827c478bd9Sstevel@tonic-gate ASSERT(heap[0] == root);
18837c478bd9Sstevel@tonic-gate goto out;
18847c478bd9Sstevel@tonic-gate }
18857c478bd9Sstevel@tonic-gate }
18867c478bd9Sstevel@tonic-gate
18877c478bd9Sstevel@tonic-gate /*
18887c478bd9Sstevel@tonic-gate * We're here because we changed the root; we need to reprogram
18897c478bd9Sstevel@tonic-gate * the clock source.
18907c478bd9Sstevel@tonic-gate */
18917c478bd9Sstevel@tonic-gate cyclic = &cpu->cyp_cyclics[heap[0]];
18927c478bd9Sstevel@tonic-gate
18937c478bd9Sstevel@tonic-gate CYC_TRACE0(cpu, CY_HIGH_LEVEL, "remove-reprog");
18947c478bd9Sstevel@tonic-gate
18957c478bd9Sstevel@tonic-gate ASSERT(nelems != 0);
18967c478bd9Sstevel@tonic-gate be->cyb_reprogram(bar, cyclic->cy_expire);
18977c478bd9Sstevel@tonic-gate out:
18987c478bd9Sstevel@tonic-gate be->cyb_restore_level(bar, cookie);
18997c478bd9Sstevel@tonic-gate }
19007c478bd9Sstevel@tonic-gate
19017c478bd9Sstevel@tonic-gate static int
cyclic_remove_here(cyc_cpu_t * cpu,cyc_index_t ndx,cyc_time_t * when,int wait)19027c478bd9Sstevel@tonic-gate cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait)
19037c478bd9Sstevel@tonic-gate {
19047c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
19057c478bd9Sstevel@tonic-gate cyc_xcallarg_t arg;
19067c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cpu->cyp_cyclics[ndx];
19077c478bd9Sstevel@tonic-gate cyc_level_t level = cyclic->cy_level;
19087c478bd9Sstevel@tonic-gate
19097c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
19107c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_rpend == 0);
19117c478bd9Sstevel@tonic-gate ASSERT(wait == CY_WAIT || wait == CY_NOWAIT);
19127c478bd9Sstevel@tonic-gate
19137c478bd9Sstevel@tonic-gate arg.cyx_ndx = ndx;
19147c478bd9Sstevel@tonic-gate arg.cyx_cpu = cpu;
19157c478bd9Sstevel@tonic-gate arg.cyx_when = when;
19167c478bd9Sstevel@tonic-gate arg.cyx_wait = wait;
19177c478bd9Sstevel@tonic-gate
19187c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
19197c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_REMOVING;
19207c478bd9Sstevel@tonic-gate
19217c478bd9Sstevel@tonic-gate be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
19227c478bd9Sstevel@tonic-gate (cyc_func_t)cyclic_remove_xcall, &arg);
19237c478bd9Sstevel@tonic-gate
19247c478bd9Sstevel@tonic-gate /*
19257c478bd9Sstevel@tonic-gate * If the cyclic we removed wasn't at CY_HIGH_LEVEL, then we need to
19267c478bd9Sstevel@tonic-gate * check the cyp_rpend. If it's non-zero, then we need to wait here
19277c478bd9Sstevel@tonic-gate * for all pending cyclic handlers to run.
19287c478bd9Sstevel@tonic-gate */
19297c478bd9Sstevel@tonic-gate ASSERT(!(level == CY_HIGH_LEVEL && cpu->cyp_rpend != 0));
19307c478bd9Sstevel@tonic-gate ASSERT(!(wait == CY_NOWAIT && cpu->cyp_rpend != 0));
19317c478bd9Sstevel@tonic-gate ASSERT(!(arg.cyx_wait == CY_NOWAIT && cpu->cyp_rpend != 0));
19327c478bd9Sstevel@tonic-gate
19337c478bd9Sstevel@tonic-gate if (wait != arg.cyx_wait) {
19347c478bd9Sstevel@tonic-gate /*
19357c478bd9Sstevel@tonic-gate * We are being told that we must wait if we want to
19367c478bd9Sstevel@tonic-gate * remove this cyclic; put the CPU back in the CYS_ONLINE
19377c478bd9Sstevel@tonic-gate * state and return failure.
19387c478bd9Sstevel@tonic-gate */
19397c478bd9Sstevel@tonic-gate ASSERT(wait == CY_NOWAIT && arg.cyx_wait == CY_WAIT);
19407c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_REMOVING);
19417c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_ONLINE;
19427c478bd9Sstevel@tonic-gate
19437c478bd9Sstevel@tonic-gate return (0);
19447c478bd9Sstevel@tonic-gate }
19457c478bd9Sstevel@tonic-gate
19467c478bd9Sstevel@tonic-gate if (cpu->cyp_rpend != 0)
19477c478bd9Sstevel@tonic-gate sema_p(&cpu->cyp_modify_wait);
19487c478bd9Sstevel@tonic-gate
19497c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_REMOVING);
19507c478bd9Sstevel@tonic-gate
19517c478bd9Sstevel@tonic-gate cpu->cyp_rpend = 0;
19527c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_ONLINE;
19537c478bd9Sstevel@tonic-gate
19547c478bd9Sstevel@tonic-gate return (1);
19557c478bd9Sstevel@tonic-gate }
19567c478bd9Sstevel@tonic-gate
195787a18d3fSMadhavan Venkataraman /*
195887a18d3fSMadhavan Venkataraman * If cyclic_reprogram() is called on the same CPU as the cyclic's CPU, then
195987a18d3fSMadhavan Venkataraman * it calls this function directly. Else, it invokes this function through
196087a18d3fSMadhavan Venkataraman * an X-call to the cyclic's CPU.
196187a18d3fSMadhavan Venkataraman */
1962*cb2d1b02SPatrick Mooney static boolean_t
cyclic_reprogram_cyclic(cyc_cpu_t * cpu,cyc_index_t ndx,hrtime_t expire,boolean_t is_local)1963*cb2d1b02SPatrick Mooney cyclic_reprogram_cyclic(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expire,
1964*cb2d1b02SPatrick Mooney boolean_t is_local)
196587a18d3fSMadhavan Venkataraman {
196687a18d3fSMadhavan Venkataraman cyc_backend_t *be = cpu->cyp_backend;
196787a18d3fSMadhavan Venkataraman cyb_arg_t bar = be->cyb_arg;
196887a18d3fSMadhavan Venkataraman cyc_cookie_t cookie;
196987a18d3fSMadhavan Venkataraman cyc_index_t nelems, i;
197087a18d3fSMadhavan Venkataraman cyc_index_t *heap;
197187a18d3fSMadhavan Venkataraman cyclic_t *cyclic;
197287a18d3fSMadhavan Venkataraman hrtime_t oexpire;
197387a18d3fSMadhavan Venkataraman int reprog;
197487a18d3fSMadhavan Venkataraman
197587a18d3fSMadhavan Venkataraman cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
197687a18d3fSMadhavan Venkataraman
197787a18d3fSMadhavan Venkataraman CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-xcall", ndx);
197887a18d3fSMadhavan Venkataraman
197987a18d3fSMadhavan Venkataraman nelems = cpu->cyp_nelems;
198087a18d3fSMadhavan Venkataraman ASSERT(nelems > 0);
198187a18d3fSMadhavan Venkataraman heap = cpu->cyp_heap;
198287a18d3fSMadhavan Venkataraman
198387a18d3fSMadhavan Venkataraman /*
198487a18d3fSMadhavan Venkataraman * Reprogrammed cyclics are typically one-shot ones that get
198587a18d3fSMadhavan Venkataraman * set to infinity on every expiration. We shorten the search by
198687a18d3fSMadhavan Venkataraman * searching from the bottom of the heap to the top instead of the
198787a18d3fSMadhavan Venkataraman * other way around.
198887a18d3fSMadhavan Venkataraman */
198987a18d3fSMadhavan Venkataraman for (i = nelems - 1; i >= 0; i--) {
199087a18d3fSMadhavan Venkataraman if (heap[i] == ndx)
199187a18d3fSMadhavan Venkataraman break;
199287a18d3fSMadhavan Venkataraman }
1993*cb2d1b02SPatrick Mooney if (i < 0) {
1994*cb2d1b02SPatrick Mooney /*
1995*cb2d1b02SPatrick Mooney * Report failure (rather than panicking) if and only if the
1996*cb2d1b02SPatrick Mooney * cyclic_reprogram() is occurring on the CPU which the cyclic
1997*cb2d1b02SPatrick Mooney * resides upon, and there is evidence that a pending cyclic
1998*cb2d1b02SPatrick Mooney * was removed from that CPU.
1999*cb2d1b02SPatrick Mooney *
2000*cb2d1b02SPatrick Mooney * This covers the race where a cyclic is removed out from
2001*cb2d1b02SPatrick Mooney * under its running handler, which then attempts a reprogram.
2002*cb2d1b02SPatrick Mooney */
2003*cb2d1b02SPatrick Mooney if (is_local &&
2004*cb2d1b02SPatrick Mooney cpu->cyp_state == CYS_REMOVING && cpu->cyp_rpend > 0) {
2005*cb2d1b02SPatrick Mooney return (B_FALSE);
2006*cb2d1b02SPatrick Mooney }
200787a18d3fSMadhavan Venkataraman panic("attempt to reprogram non-existent cyclic");
2008*cb2d1b02SPatrick Mooney }
200987a18d3fSMadhavan Venkataraman
201087a18d3fSMadhavan Venkataraman cyclic = &cpu->cyp_cyclics[ndx];
201187a18d3fSMadhavan Venkataraman oexpire = cyclic->cy_expire;
201287a18d3fSMadhavan Venkataraman cyclic->cy_expire = expire;
201387a18d3fSMadhavan Venkataraman
201487a18d3fSMadhavan Venkataraman reprog = (i == 0);
201587a18d3fSMadhavan Venkataraman if (expire > oexpire) {
201687a18d3fSMadhavan Venkataraman CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-down", i);
201787a18d3fSMadhavan Venkataraman cyclic_downheap(cpu, i);
201887a18d3fSMadhavan Venkataraman } else if (i > 0) {
201987a18d3fSMadhavan Venkataraman CYC_TRACE1(cpu, CY_HIGH_LEVEL, "reprog-up", i);
202087a18d3fSMadhavan Venkataraman reprog = cyclic_upheap(cpu, i);
202187a18d3fSMadhavan Venkataraman }
202287a18d3fSMadhavan Venkataraman
202387a18d3fSMadhavan Venkataraman if (reprog && (cpu->cyp_state != CYS_SUSPENDED)) {
202487a18d3fSMadhavan Venkataraman /*
202587a18d3fSMadhavan Venkataraman * The root changed. Reprogram the clock source.
202687a18d3fSMadhavan Venkataraman */
202787a18d3fSMadhavan Venkataraman CYC_TRACE0(cpu, CY_HIGH_LEVEL, "reprog-root");
202887a18d3fSMadhavan Venkataraman cyclic = &cpu->cyp_cyclics[heap[0]];
202987a18d3fSMadhavan Venkataraman be->cyb_reprogram(bar, cyclic->cy_expire);
203087a18d3fSMadhavan Venkataraman }
203187a18d3fSMadhavan Venkataraman
203287a18d3fSMadhavan Venkataraman be->cyb_restore_level(bar, cookie);
2033*cb2d1b02SPatrick Mooney return (B_TRUE);
203487a18d3fSMadhavan Venkataraman }
203587a18d3fSMadhavan Venkataraman
203687a18d3fSMadhavan Venkataraman static void
cyclic_reprogram_xcall(cyc_xcallarg_t * arg)203787a18d3fSMadhavan Venkataraman cyclic_reprogram_xcall(cyc_xcallarg_t *arg)
203887a18d3fSMadhavan Venkataraman {
2039*cb2d1b02SPatrick Mooney /*
2040*cb2d1b02SPatrick Mooney * Cross-call reprogram operations should never fail due to racing
2041*cb2d1b02SPatrick Mooney * cyclic removal, as they cannot occur from the handler itself.
2042*cb2d1b02SPatrick Mooney */
2043*cb2d1b02SPatrick Mooney VERIFY(cyclic_reprogram_cyclic(arg->cyx_cpu, arg->cyx_ndx,
2044*cb2d1b02SPatrick Mooney arg->cyx_when->cyt_when, B_FALSE));
204587a18d3fSMadhavan Venkataraman }
204687a18d3fSMadhavan Venkataraman
204787a18d3fSMadhavan Venkataraman static void
cyclic_reprogram_here(cyc_cpu_t * cpu,cyc_index_t ndx,hrtime_t expiration)204887a18d3fSMadhavan Venkataraman cyclic_reprogram_here(cyc_cpu_t *cpu, cyc_index_t ndx, hrtime_t expiration)
204987a18d3fSMadhavan Venkataraman {
205087a18d3fSMadhavan Venkataraman cyc_backend_t *be = cpu->cyp_backend;
205187a18d3fSMadhavan Venkataraman cyc_xcallarg_t arg;
205287a18d3fSMadhavan Venkataraman cyc_time_t when;
205387a18d3fSMadhavan Venkataraman
205487a18d3fSMadhavan Venkataraman ASSERT(expiration > 0);
205587a18d3fSMadhavan Venkataraman
205687a18d3fSMadhavan Venkataraman arg.cyx_ndx = ndx;
205787a18d3fSMadhavan Venkataraman arg.cyx_cpu = cpu;
205887a18d3fSMadhavan Venkataraman arg.cyx_when = &when;
205987a18d3fSMadhavan Venkataraman when.cyt_when = expiration;
206087a18d3fSMadhavan Venkataraman
206187a18d3fSMadhavan Venkataraman be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu,
206287a18d3fSMadhavan Venkataraman (cyc_func_t)cyclic_reprogram_xcall, &arg);
206387a18d3fSMadhavan Venkataraman }
206487a18d3fSMadhavan Venkataraman
20657c478bd9Sstevel@tonic-gate /*
20667c478bd9Sstevel@tonic-gate * cyclic_juggle_one_to() should only be called when the source cyclic
20677c478bd9Sstevel@tonic-gate * can be juggled and the destination CPU is known to be able to accept
20687c478bd9Sstevel@tonic-gate * it.
20697c478bd9Sstevel@tonic-gate */
20707c478bd9Sstevel@tonic-gate static void
cyclic_juggle_one_to(cyc_id_t * idp,cyc_cpu_t * dest)20717c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(cyc_id_t *idp, cyc_cpu_t *dest)
20727c478bd9Sstevel@tonic-gate {
20737c478bd9Sstevel@tonic-gate cyc_cpu_t *src = idp->cyi_cpu;
20747c478bd9Sstevel@tonic-gate cyc_index_t ndx = idp->cyi_ndx;
20757c478bd9Sstevel@tonic-gate cyc_time_t when;
20767c478bd9Sstevel@tonic-gate cyc_handler_t hdlr;
20777c478bd9Sstevel@tonic-gate cyclic_t *cyclic;
20787c478bd9Sstevel@tonic-gate uint16_t flags;
20797c478bd9Sstevel@tonic-gate hrtime_t delay;
20807c478bd9Sstevel@tonic-gate
20817c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
20827c478bd9Sstevel@tonic-gate ASSERT(src != NULL && idp->cyi_omni_list == NULL);
20837c478bd9Sstevel@tonic-gate ASSERT(!(dest->cyp_cpu->cpu_flags & (CPU_QUIESCED | CPU_OFFLINE)));
20847c478bd9Sstevel@tonic-gate CYC_PTRACE("juggle-one-to", idp, dest);
20857c478bd9Sstevel@tonic-gate
20867c478bd9Sstevel@tonic-gate cyclic = &src->cyp_cyclics[ndx];
20877c478bd9Sstevel@tonic-gate
20887c478bd9Sstevel@tonic-gate flags = cyclic->cy_flags;
20897c478bd9Sstevel@tonic-gate ASSERT(!(flags & CYF_CPU_BOUND) && !(flags & CYF_FREE));
20907c478bd9Sstevel@tonic-gate
20917c478bd9Sstevel@tonic-gate hdlr.cyh_func = cyclic->cy_handler;
20927c478bd9Sstevel@tonic-gate hdlr.cyh_level = cyclic->cy_level;
20937c478bd9Sstevel@tonic-gate hdlr.cyh_arg = cyclic->cy_arg;
20947c478bd9Sstevel@tonic-gate
20957c478bd9Sstevel@tonic-gate /*
20967c478bd9Sstevel@tonic-gate * Before we begin the juggling process, see if the destination
20977c478bd9Sstevel@tonic-gate * CPU requires an expansion. If it does, we'll perform the
20987c478bd9Sstevel@tonic-gate * expansion before removing the cyclic. This is to prevent us
20997c478bd9Sstevel@tonic-gate * from blocking while a system-critical cyclic (notably, the clock
21007c478bd9Sstevel@tonic-gate * cyclic) isn't on a CPU.
21017c478bd9Sstevel@tonic-gate */
21027c478bd9Sstevel@tonic-gate if (dest->cyp_nelems == dest->cyp_size) {
21037c478bd9Sstevel@tonic-gate CYC_PTRACE("remove-expand", idp, dest);
21047c478bd9Sstevel@tonic-gate cyclic_expand(dest);
21057c478bd9Sstevel@tonic-gate ASSERT(dest->cyp_nelems < dest->cyp_size);
21067c478bd9Sstevel@tonic-gate }
21077c478bd9Sstevel@tonic-gate
210887a18d3fSMadhavan Venkataraman /*
210987a18d3fSMadhavan Venkataraman * Prevent a reprogram of this cyclic while we are relocating it.
211087a18d3fSMadhavan Venkataraman * Otherwise, cyclic_reprogram_here() will end up sending an X-call
211187a18d3fSMadhavan Venkataraman * to the wrong CPU.
211287a18d3fSMadhavan Venkataraman */
211387a18d3fSMadhavan Venkataraman rw_enter(&idp->cyi_lock, RW_WRITER);
211487a18d3fSMadhavan Venkataraman
21157c478bd9Sstevel@tonic-gate /*
21167c478bd9Sstevel@tonic-gate * Remove the cyclic from the source. As mentioned above, we cannot
21177c478bd9Sstevel@tonic-gate * block during this operation; if we cannot remove the cyclic
21187c478bd9Sstevel@tonic-gate * without waiting, we spin for a time shorter than the interval, and
21197c478bd9Sstevel@tonic-gate * reattempt the (non-blocking) removal. If we continue to fail,
21207c478bd9Sstevel@tonic-gate * we will exponentially back off (up to half of the interval).
21217c478bd9Sstevel@tonic-gate * Note that the removal will ultimately succeed -- even if the
21227c478bd9Sstevel@tonic-gate * cyclic handler is blocked on a resource held by a thread which we
21237c478bd9Sstevel@tonic-gate * have preempted, priority inheritance assures that the preempted
21247c478bd9Sstevel@tonic-gate * thread will preempt us and continue to progress.
21257c478bd9Sstevel@tonic-gate */
21267c478bd9Sstevel@tonic-gate for (delay = NANOSEC / MICROSEC; ; delay <<= 1) {
21277c478bd9Sstevel@tonic-gate /*
21287c478bd9Sstevel@tonic-gate * Before we begin this operation, disable kernel preemption.
21297c478bd9Sstevel@tonic-gate */
21307c478bd9Sstevel@tonic-gate kpreempt_disable();
21317c478bd9Sstevel@tonic-gate if (cyclic_remove_here(src, ndx, &when, CY_NOWAIT))
21327c478bd9Sstevel@tonic-gate break;
21337c478bd9Sstevel@tonic-gate
21347c478bd9Sstevel@tonic-gate /*
21357c478bd9Sstevel@tonic-gate * The operation failed; enable kernel preemption while
21367c478bd9Sstevel@tonic-gate * spinning.
21377c478bd9Sstevel@tonic-gate */
21387c478bd9Sstevel@tonic-gate kpreempt_enable();
21397c478bd9Sstevel@tonic-gate
21407c478bd9Sstevel@tonic-gate CYC_PTRACE("remove-retry", idp, src);
21417c478bd9Sstevel@tonic-gate
21427c478bd9Sstevel@tonic-gate if (delay > (cyclic->cy_interval >> 1))
21437c478bd9Sstevel@tonic-gate delay = cyclic->cy_interval >> 1;
21447c478bd9Sstevel@tonic-gate
214587a18d3fSMadhavan Venkataraman /*
214687a18d3fSMadhavan Venkataraman * Drop the RW lock to avoid a deadlock with the cyclic
214787a18d3fSMadhavan Venkataraman * handler (because it can potentially call cyclic_reprogram().
214887a18d3fSMadhavan Venkataraman */
214987a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
21507c478bd9Sstevel@tonic-gate drv_usecwait((clock_t)(delay / (NANOSEC / MICROSEC)));
215187a18d3fSMadhavan Venkataraman rw_enter(&idp->cyi_lock, RW_WRITER);
21527c478bd9Sstevel@tonic-gate }
21537c478bd9Sstevel@tonic-gate
21547c478bd9Sstevel@tonic-gate /*
21557c478bd9Sstevel@tonic-gate * Now add the cyclic to the destination. This won't block; we
21567c478bd9Sstevel@tonic-gate * performed any necessary (blocking) expansion of the destination
21577c478bd9Sstevel@tonic-gate * CPU before removing the cyclic from the source CPU.
21587c478bd9Sstevel@tonic-gate */
21597c478bd9Sstevel@tonic-gate idp->cyi_ndx = cyclic_add_here(dest, &hdlr, &when, flags);
21607c478bd9Sstevel@tonic-gate idp->cyi_cpu = dest;
21617c478bd9Sstevel@tonic-gate kpreempt_enable();
216287a18d3fSMadhavan Venkataraman
216387a18d3fSMadhavan Venkataraman /*
216487a18d3fSMadhavan Venkataraman * Now that we have successfully relocated the cyclic, allow
216587a18d3fSMadhavan Venkataraman * it to be reprogrammed.
216687a18d3fSMadhavan Venkataraman */
216787a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
21687c478bd9Sstevel@tonic-gate }
21697c478bd9Sstevel@tonic-gate
21707c478bd9Sstevel@tonic-gate static int
cyclic_juggle_one(cyc_id_t * idp)21717c478bd9Sstevel@tonic-gate cyclic_juggle_one(cyc_id_t *idp)
21727c478bd9Sstevel@tonic-gate {
21737c478bd9Sstevel@tonic-gate cyc_index_t ndx = idp->cyi_ndx;
21747c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu, *dest;
21757c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cpu->cyp_cyclics[ndx];
21767c478bd9Sstevel@tonic-gate cpu_t *c = cpu->cyp_cpu;
21777c478bd9Sstevel@tonic-gate cpupart_t *part = c->cpu_part;
21787c478bd9Sstevel@tonic-gate
21797c478bd9Sstevel@tonic-gate CYC_PTRACE("juggle-one", idp, cpu);
21807c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
21817c478bd9Sstevel@tonic-gate ASSERT(!(c->cpu_flags & CPU_OFFLINE));
21827c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
21837c478bd9Sstevel@tonic-gate ASSERT(!(cyclic->cy_flags & CYF_FREE));
21847c478bd9Sstevel@tonic-gate
21857c478bd9Sstevel@tonic-gate if ((dest = cyclic_pick_cpu(part, c, c, cyclic->cy_flags)) == NULL) {
21867c478bd9Sstevel@tonic-gate /*
21877c478bd9Sstevel@tonic-gate * Bad news: this cyclic can't be juggled.
21887c478bd9Sstevel@tonic-gate */
21897c478bd9Sstevel@tonic-gate CYC_PTRACE("juggle-fail", idp, cpu)
21907c478bd9Sstevel@tonic-gate return (0);
21917c478bd9Sstevel@tonic-gate }
21927c478bd9Sstevel@tonic-gate
21937c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(idp, dest);
21947c478bd9Sstevel@tonic-gate
21957c478bd9Sstevel@tonic-gate return (1);
21967c478bd9Sstevel@tonic-gate }
21977c478bd9Sstevel@tonic-gate
21987c478bd9Sstevel@tonic-gate static void
cyclic_unbind_cpu(cyclic_id_t id)21997c478bd9Sstevel@tonic-gate cyclic_unbind_cpu(cyclic_id_t id)
22007c478bd9Sstevel@tonic-gate {
22017c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
22027c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu;
22037c478bd9Sstevel@tonic-gate cpu_t *c = cpu->cyp_cpu;
22047c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
22057c478bd9Sstevel@tonic-gate
22067c478bd9Sstevel@tonic-gate CYC_PTRACE("unbind-cpu", id, cpu);
22077c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
22087c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
22097c478bd9Sstevel@tonic-gate ASSERT(!(cyclic->cy_flags & CYF_FREE));
22107c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_flags & CYF_CPU_BOUND);
22117c478bd9Sstevel@tonic-gate
22127c478bd9Sstevel@tonic-gate cyclic->cy_flags &= ~CYF_CPU_BOUND;
22137c478bd9Sstevel@tonic-gate
22147c478bd9Sstevel@tonic-gate /*
22157c478bd9Sstevel@tonic-gate * If we were bound to CPU which has interrupts disabled, we need
22167c478bd9Sstevel@tonic-gate * to juggle away. This can only fail if we are bound to a
22177c478bd9Sstevel@tonic-gate * processor set, and if every CPU in the processor set has
22187c478bd9Sstevel@tonic-gate * interrupts disabled.
22197c478bd9Sstevel@tonic-gate */
22207c478bd9Sstevel@tonic-gate if (!(c->cpu_flags & CPU_ENABLE)) {
22217c478bd9Sstevel@tonic-gate int res = cyclic_juggle_one(idp);
22227c478bd9Sstevel@tonic-gate
22237c478bd9Sstevel@tonic-gate ASSERT((res && idp->cyi_cpu != cpu) ||
22247c478bd9Sstevel@tonic-gate (!res && (cyclic->cy_flags & CYF_PART_BOUND)));
22257c478bd9Sstevel@tonic-gate }
22267c478bd9Sstevel@tonic-gate }
22277c478bd9Sstevel@tonic-gate
22287c478bd9Sstevel@tonic-gate static void
cyclic_bind_cpu(cyclic_id_t id,cpu_t * d)22297c478bd9Sstevel@tonic-gate cyclic_bind_cpu(cyclic_id_t id, cpu_t *d)
22307c478bd9Sstevel@tonic-gate {
22317c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
22327c478bd9Sstevel@tonic-gate cyc_cpu_t *dest = d->cpu_cyclic, *cpu = idp->cyi_cpu;
22337c478bd9Sstevel@tonic-gate cpu_t *c = cpu->cyp_cpu;
22347c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
22357c478bd9Sstevel@tonic-gate cpupart_t *part = c->cpu_part;
22367c478bd9Sstevel@tonic-gate
22377c478bd9Sstevel@tonic-gate CYC_PTRACE("bind-cpu", id, dest);
22387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
22397c478bd9Sstevel@tonic-gate ASSERT(!(d->cpu_flags & CPU_OFFLINE));
22407c478bd9Sstevel@tonic-gate ASSERT(!(c->cpu_flags & CPU_OFFLINE));
22417c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
22427c478bd9Sstevel@tonic-gate ASSERT(dest != NULL);
22437c478bd9Sstevel@tonic-gate ASSERT(dest->cyp_state == CYS_ONLINE);
22447c478bd9Sstevel@tonic-gate ASSERT(!(cyclic->cy_flags & CYF_FREE));
22457c478bd9Sstevel@tonic-gate ASSERT(!(cyclic->cy_flags & CYF_CPU_BOUND));
22467c478bd9Sstevel@tonic-gate
22477c478bd9Sstevel@tonic-gate dest = cyclic_pick_cpu(part, d, NULL, cyclic->cy_flags | CYF_CPU_BOUND);
22487c478bd9Sstevel@tonic-gate
22497c478bd9Sstevel@tonic-gate if (dest != cpu) {
22507c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(idp, dest);
22517c478bd9Sstevel@tonic-gate cyclic = &dest->cyp_cyclics[idp->cyi_ndx];
22527c478bd9Sstevel@tonic-gate }
22537c478bd9Sstevel@tonic-gate
22547c478bd9Sstevel@tonic-gate cyclic->cy_flags |= CYF_CPU_BOUND;
22557c478bd9Sstevel@tonic-gate }
22567c478bd9Sstevel@tonic-gate
22577c478bd9Sstevel@tonic-gate static void
cyclic_unbind_cpupart(cyclic_id_t id)22587c478bd9Sstevel@tonic-gate cyclic_unbind_cpupart(cyclic_id_t id)
22597c478bd9Sstevel@tonic-gate {
22607c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
22617c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu;
22627c478bd9Sstevel@tonic-gate cpu_t *c = cpu->cyp_cpu;
22637c478bd9Sstevel@tonic-gate cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx];
22647c478bd9Sstevel@tonic-gate
22657c478bd9Sstevel@tonic-gate CYC_PTRACE("unbind-part", idp, c->cpu_part);
22667c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
22677c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
22687c478bd9Sstevel@tonic-gate ASSERT(!(cyc->cy_flags & CYF_FREE));
22697c478bd9Sstevel@tonic-gate ASSERT(cyc->cy_flags & CYF_PART_BOUND);
22707c478bd9Sstevel@tonic-gate
22717c478bd9Sstevel@tonic-gate cyc->cy_flags &= ~CYF_PART_BOUND;
22727c478bd9Sstevel@tonic-gate
22737c478bd9Sstevel@tonic-gate /*
22747c478bd9Sstevel@tonic-gate * If we're on a CPU which has interrupts disabled (and if this cyclic
22757c478bd9Sstevel@tonic-gate * isn't bound to the CPU), we need to juggle away.
22767c478bd9Sstevel@tonic-gate */
22777c478bd9Sstevel@tonic-gate if (!(c->cpu_flags & CPU_ENABLE) && !(cyc->cy_flags & CYF_CPU_BOUND)) {
22787c478bd9Sstevel@tonic-gate int res = cyclic_juggle_one(idp);
22797c478bd9Sstevel@tonic-gate
22807c478bd9Sstevel@tonic-gate ASSERT(res && idp->cyi_cpu != cpu);
22817c478bd9Sstevel@tonic-gate }
22827c478bd9Sstevel@tonic-gate }
22837c478bd9Sstevel@tonic-gate
22847c478bd9Sstevel@tonic-gate static void
cyclic_bind_cpupart(cyclic_id_t id,cpupart_t * part)22857c478bd9Sstevel@tonic-gate cyclic_bind_cpupart(cyclic_id_t id, cpupart_t *part)
22867c478bd9Sstevel@tonic-gate {
22877c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
22887c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu, *dest;
22897c478bd9Sstevel@tonic-gate cpu_t *c = cpu->cyp_cpu;
22907c478bd9Sstevel@tonic-gate cyclic_t *cyc = &cpu->cyp_cyclics[idp->cyi_ndx];
22917c478bd9Sstevel@tonic-gate
22927c478bd9Sstevel@tonic-gate CYC_PTRACE("bind-part", idp, part);
22937c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
22947c478bd9Sstevel@tonic-gate ASSERT(!(c->cpu_flags & CPU_OFFLINE));
22957c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
22967c478bd9Sstevel@tonic-gate ASSERT(!(cyc->cy_flags & CYF_FREE));
22977c478bd9Sstevel@tonic-gate ASSERT(!(cyc->cy_flags & CYF_PART_BOUND));
22987c478bd9Sstevel@tonic-gate ASSERT(part->cp_ncpus > 0);
22997c478bd9Sstevel@tonic-gate
23007c478bd9Sstevel@tonic-gate dest = cyclic_pick_cpu(part, c, NULL, cyc->cy_flags | CYF_PART_BOUND);
23017c478bd9Sstevel@tonic-gate
23027c478bd9Sstevel@tonic-gate if (dest != cpu) {
23037c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(idp, dest);
23047c478bd9Sstevel@tonic-gate cyc = &dest->cyp_cyclics[idp->cyi_ndx];
23057c478bd9Sstevel@tonic-gate }
23067c478bd9Sstevel@tonic-gate
23077c478bd9Sstevel@tonic-gate cyc->cy_flags |= CYF_PART_BOUND;
23087c478bd9Sstevel@tonic-gate }
23097c478bd9Sstevel@tonic-gate
23107c478bd9Sstevel@tonic-gate static void
cyclic_configure(cpu_t * c)23117c478bd9Sstevel@tonic-gate cyclic_configure(cpu_t *c)
23127c478bd9Sstevel@tonic-gate {
23137c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = kmem_zalloc(sizeof (cyc_cpu_t), KM_SLEEP);
23147c478bd9Sstevel@tonic-gate cyc_backend_t *nbe = kmem_zalloc(sizeof (cyc_backend_t), KM_SLEEP);
23157c478bd9Sstevel@tonic-gate int i;
23167c478bd9Sstevel@tonic-gate
23177c478bd9Sstevel@tonic-gate CYC_PTRACE1("configure", cpu);
23187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
23197c478bd9Sstevel@tonic-gate
23207c478bd9Sstevel@tonic-gate if (cyclic_id_cache == NULL)
23217c478bd9Sstevel@tonic-gate cyclic_id_cache = kmem_cache_create("cyclic_id_cache",
23227c478bd9Sstevel@tonic-gate sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
23237c478bd9Sstevel@tonic-gate
23247c478bd9Sstevel@tonic-gate cpu->cyp_cpu = c;
23257c478bd9Sstevel@tonic-gate
23267c478bd9Sstevel@tonic-gate sema_init(&cpu->cyp_modify_wait, 0, NULL, SEMA_DEFAULT, NULL);
23277c478bd9Sstevel@tonic-gate
23287c478bd9Sstevel@tonic-gate cpu->cyp_size = 1;
23297c478bd9Sstevel@tonic-gate cpu->cyp_heap = kmem_zalloc(sizeof (cyc_index_t), KM_SLEEP);
23307c478bd9Sstevel@tonic-gate cpu->cyp_cyclics = kmem_zalloc(sizeof (cyclic_t), KM_SLEEP);
23317c478bd9Sstevel@tonic-gate cpu->cyp_cyclics->cy_flags = CYF_FREE;
23327c478bd9Sstevel@tonic-gate
23337c478bd9Sstevel@tonic-gate for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
23347c478bd9Sstevel@tonic-gate /*
23357c478bd9Sstevel@tonic-gate * We don't need to set the sizemask; it's already zero
23367c478bd9Sstevel@tonic-gate * (which is the appropriate sizemask for a size of 1).
23377c478bd9Sstevel@tonic-gate */
23387c478bd9Sstevel@tonic-gate cpu->cyp_softbuf[i].cys_buf[0].cypc_buf =
23397c478bd9Sstevel@tonic-gate kmem_alloc(sizeof (cyc_index_t), KM_SLEEP);
23407c478bd9Sstevel@tonic-gate }
23417c478bd9Sstevel@tonic-gate
23427c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_OFFLINE;
23437c478bd9Sstevel@tonic-gate
23447c478bd9Sstevel@tonic-gate /*
23457c478bd9Sstevel@tonic-gate * Setup the backend for this CPU.
23467c478bd9Sstevel@tonic-gate */
23477c478bd9Sstevel@tonic-gate bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t));
23487c478bd9Sstevel@tonic-gate nbe->cyb_arg = nbe->cyb_configure(c);
23497c478bd9Sstevel@tonic-gate cpu->cyp_backend = nbe;
23507c478bd9Sstevel@tonic-gate
23517c478bd9Sstevel@tonic-gate /*
23527c478bd9Sstevel@tonic-gate * On platforms where stray interrupts may be taken during startup,
23537c478bd9Sstevel@tonic-gate * the CPU's cpu_cyclic pointer serves as an indicator that the
23547c478bd9Sstevel@tonic-gate * cyclic subsystem for this CPU is prepared to field interrupts.
23557c478bd9Sstevel@tonic-gate */
23567c478bd9Sstevel@tonic-gate membar_producer();
23577c478bd9Sstevel@tonic-gate
23587c478bd9Sstevel@tonic-gate c->cpu_cyclic = cpu;
23597c478bd9Sstevel@tonic-gate }
23607c478bd9Sstevel@tonic-gate
23617c478bd9Sstevel@tonic-gate static void
cyclic_unconfigure(cpu_t * c)23627c478bd9Sstevel@tonic-gate cyclic_unconfigure(cpu_t *c)
23637c478bd9Sstevel@tonic-gate {
23647c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
23657c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
23667c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
23677c478bd9Sstevel@tonic-gate int i;
23687c478bd9Sstevel@tonic-gate
23697c478bd9Sstevel@tonic-gate CYC_PTRACE1("unconfigure", cpu);
23707c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
23717c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_OFFLINE);
23727c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_nelems == 0);
23737c478bd9Sstevel@tonic-gate
23747c478bd9Sstevel@tonic-gate /*
23757c478bd9Sstevel@tonic-gate * Let the backend know that the CPU is being yanked, and free up
23767c478bd9Sstevel@tonic-gate * the backend structure.
23777c478bd9Sstevel@tonic-gate */
23787c478bd9Sstevel@tonic-gate be->cyb_unconfigure(bar);
23797c478bd9Sstevel@tonic-gate kmem_free(be, sizeof (cyc_backend_t));
23807c478bd9Sstevel@tonic-gate cpu->cyp_backend = NULL;
23817c478bd9Sstevel@tonic-gate
23827c478bd9Sstevel@tonic-gate /*
23837c478bd9Sstevel@tonic-gate * Free up the producer/consumer buffers at each of the soft levels.
23847c478bd9Sstevel@tonic-gate */
23857c478bd9Sstevel@tonic-gate for (i = CY_LOW_LEVEL; i < CY_LOW_LEVEL + CY_SOFT_LEVELS; i++) {
23867c478bd9Sstevel@tonic-gate cyc_softbuf_t *softbuf = &cpu->cyp_softbuf[i];
23877c478bd9Sstevel@tonic-gate uchar_t hard = softbuf->cys_hard;
23887c478bd9Sstevel@tonic-gate cyc_pcbuffer_t *pc = &softbuf->cys_buf[hard];
23897c478bd9Sstevel@tonic-gate size_t bufsize = sizeof (cyc_index_t) * (pc->cypc_sizemask + 1);
23907c478bd9Sstevel@tonic-gate
23917c478bd9Sstevel@tonic-gate /*
23927c478bd9Sstevel@tonic-gate * Assert that we're not in the middle of a resize operation.
23937c478bd9Sstevel@tonic-gate */
23947c478bd9Sstevel@tonic-gate ASSERT(hard == softbuf->cys_soft);
23957c478bd9Sstevel@tonic-gate ASSERT(hard == 0 || hard == 1);
23967c478bd9Sstevel@tonic-gate ASSERT(pc->cypc_buf != NULL);
23977c478bd9Sstevel@tonic-gate ASSERT(softbuf->cys_buf[hard ^ 1].cypc_buf == NULL);
23987c478bd9Sstevel@tonic-gate
23997c478bd9Sstevel@tonic-gate kmem_free(pc->cypc_buf, bufsize);
24007c478bd9Sstevel@tonic-gate pc->cypc_buf = NULL;
24017c478bd9Sstevel@tonic-gate }
24027c478bd9Sstevel@tonic-gate
24037c478bd9Sstevel@tonic-gate /*
24047c478bd9Sstevel@tonic-gate * Finally, clean up our remaining dynamic structures and NULL out
24057c478bd9Sstevel@tonic-gate * the cpu_cyclic pointer.
24067c478bd9Sstevel@tonic-gate */
24077c478bd9Sstevel@tonic-gate kmem_free(cpu->cyp_cyclics, cpu->cyp_size * sizeof (cyclic_t));
24087c478bd9Sstevel@tonic-gate kmem_free(cpu->cyp_heap, cpu->cyp_size * sizeof (cyc_index_t));
24097c478bd9Sstevel@tonic-gate kmem_free(cpu, sizeof (cyc_cpu_t));
24107c478bd9Sstevel@tonic-gate
24117c478bd9Sstevel@tonic-gate c->cpu_cyclic = NULL;
24127c478bd9Sstevel@tonic-gate }
24137c478bd9Sstevel@tonic-gate
24147c478bd9Sstevel@tonic-gate static int
cyclic_cpu_setup(cpu_setup_t what,int id,void * arg __unused)24153ebafc60SToomas Soome cyclic_cpu_setup(cpu_setup_t what, int id, void *arg __unused)
24167c478bd9Sstevel@tonic-gate {
24177c478bd9Sstevel@tonic-gate /*
24187c478bd9Sstevel@tonic-gate * We are guaranteed that there is still/already an entry in the
24197c478bd9Sstevel@tonic-gate * cpu array for this CPU.
24207c478bd9Sstevel@tonic-gate */
24217c478bd9Sstevel@tonic-gate cpu_t *c = cpu[id];
24227c478bd9Sstevel@tonic-gate cyc_cpu_t *cyp = c->cpu_cyclic;
24237c478bd9Sstevel@tonic-gate
24247c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
24257c478bd9Sstevel@tonic-gate
24267c478bd9Sstevel@tonic-gate switch (what) {
24277c478bd9Sstevel@tonic-gate case CPU_CONFIG:
24287c478bd9Sstevel@tonic-gate ASSERT(cyp == NULL);
24297c478bd9Sstevel@tonic-gate cyclic_configure(c);
24307c478bd9Sstevel@tonic-gate break;
24317c478bd9Sstevel@tonic-gate
24327c478bd9Sstevel@tonic-gate case CPU_UNCONFIG:
24337c478bd9Sstevel@tonic-gate ASSERT(cyp != NULL && cyp->cyp_state == CYS_OFFLINE);
24347c478bd9Sstevel@tonic-gate cyclic_unconfigure(c);
24357c478bd9Sstevel@tonic-gate break;
24367c478bd9Sstevel@tonic-gate
24377c478bd9Sstevel@tonic-gate default:
24387c478bd9Sstevel@tonic-gate break;
24397c478bd9Sstevel@tonic-gate }
24407c478bd9Sstevel@tonic-gate
24417c478bd9Sstevel@tonic-gate return (0);
24427c478bd9Sstevel@tonic-gate }
24437c478bd9Sstevel@tonic-gate
24447c478bd9Sstevel@tonic-gate static void
cyclic_suspend_xcall(cyc_xcallarg_t * arg)24457c478bd9Sstevel@tonic-gate cyclic_suspend_xcall(cyc_xcallarg_t *arg)
24467c478bd9Sstevel@tonic-gate {
24477c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = arg->cyx_cpu;
24487c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
24497c478bd9Sstevel@tonic-gate cyc_cookie_t cookie;
24507c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
24517c478bd9Sstevel@tonic-gate
24527c478bd9Sstevel@tonic-gate cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
24537c478bd9Sstevel@tonic-gate
24547c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "suspend-xcall", cpu->cyp_nelems);
24557c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE || cpu->cyp_state == CYS_OFFLINE);
24567c478bd9Sstevel@tonic-gate
24577c478bd9Sstevel@tonic-gate /*
24587c478bd9Sstevel@tonic-gate * We won't disable this CPU unless it has a non-zero number of
24597c478bd9Sstevel@tonic-gate * elements (cpu_lock assures that no one else may be attempting
24607c478bd9Sstevel@tonic-gate * to disable this CPU).
24617c478bd9Sstevel@tonic-gate */
24627c478bd9Sstevel@tonic-gate if (cpu->cyp_nelems > 0) {
24637c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
24647c478bd9Sstevel@tonic-gate be->cyb_disable(bar);
24657c478bd9Sstevel@tonic-gate }
24667c478bd9Sstevel@tonic-gate
24677c478bd9Sstevel@tonic-gate if (cpu->cyp_state == CYS_ONLINE)
24687c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_SUSPENDED;
24697c478bd9Sstevel@tonic-gate
24707c478bd9Sstevel@tonic-gate be->cyb_suspend(bar);
24717c478bd9Sstevel@tonic-gate be->cyb_restore_level(bar, cookie);
24727c478bd9Sstevel@tonic-gate }
24737c478bd9Sstevel@tonic-gate
24747c478bd9Sstevel@tonic-gate static void
cyclic_resume_xcall(cyc_xcallarg_t * arg)24757c478bd9Sstevel@tonic-gate cyclic_resume_xcall(cyc_xcallarg_t *arg)
24767c478bd9Sstevel@tonic-gate {
24777c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = arg->cyx_cpu;
24787c478bd9Sstevel@tonic-gate cyc_backend_t *be = cpu->cyp_backend;
24797c478bd9Sstevel@tonic-gate cyc_cookie_t cookie;
24807c478bd9Sstevel@tonic-gate cyb_arg_t bar = be->cyb_arg;
24817c478bd9Sstevel@tonic-gate cyc_state_t state = cpu->cyp_state;
24827c478bd9Sstevel@tonic-gate
24837c478bd9Sstevel@tonic-gate cookie = be->cyb_set_level(bar, CY_HIGH_LEVEL);
24847c478bd9Sstevel@tonic-gate
24857c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-xcall", cpu->cyp_nelems);
24867c478bd9Sstevel@tonic-gate ASSERT(state == CYS_SUSPENDED || state == CYS_OFFLINE);
24877c478bd9Sstevel@tonic-gate
24887c478bd9Sstevel@tonic-gate be->cyb_resume(bar);
24897c478bd9Sstevel@tonic-gate
24907c478bd9Sstevel@tonic-gate /*
24917c478bd9Sstevel@tonic-gate * We won't enable this CPU unless it has a non-zero number of
24927c478bd9Sstevel@tonic-gate * elements.
24937c478bd9Sstevel@tonic-gate */
24947c478bd9Sstevel@tonic-gate if (cpu->cyp_nelems > 0) {
24957c478bd9Sstevel@tonic-gate cyclic_t *cyclic = &cpu->cyp_cyclics[cpu->cyp_heap[0]];
24967c478bd9Sstevel@tonic-gate hrtime_t exp = cyclic->cy_expire;
24977c478bd9Sstevel@tonic-gate
24987c478bd9Sstevel@tonic-gate CYC_TRACE(cpu, CY_HIGH_LEVEL, "resume-reprog", cyclic, exp);
24997c478bd9Sstevel@tonic-gate ASSERT(state == CYS_SUSPENDED);
25007c478bd9Sstevel@tonic-gate be->cyb_enable(bar);
25017c478bd9Sstevel@tonic-gate be->cyb_reprogram(bar, exp);
25027c478bd9Sstevel@tonic-gate }
25037c478bd9Sstevel@tonic-gate
25047c478bd9Sstevel@tonic-gate if (state == CYS_SUSPENDED)
25057c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_ONLINE;
25067c478bd9Sstevel@tonic-gate
25077c478bd9Sstevel@tonic-gate CYC_TRACE1(cpu, CY_HIGH_LEVEL, "resume-done", cpu->cyp_nelems);
25087c478bd9Sstevel@tonic-gate be->cyb_restore_level(bar, cookie);
25097c478bd9Sstevel@tonic-gate }
25107c478bd9Sstevel@tonic-gate
25117c478bd9Sstevel@tonic-gate static void
cyclic_omni_start(cyc_id_t * idp,cyc_cpu_t * cpu)25127c478bd9Sstevel@tonic-gate cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu)
25137c478bd9Sstevel@tonic-gate {
25147c478bd9Sstevel@tonic-gate cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
25157c478bd9Sstevel@tonic-gate cyc_omni_cpu_t *ocpu = kmem_alloc(sizeof (cyc_omni_cpu_t), KM_SLEEP);
25167c478bd9Sstevel@tonic-gate cyc_handler_t hdlr;
25177c478bd9Sstevel@tonic-gate cyc_time_t when;
25187c478bd9Sstevel@tonic-gate
25197c478bd9Sstevel@tonic-gate CYC_PTRACE("omni-start", cpu, idp);
25207c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
25217c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
25227c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_cpu == NULL);
25237c478bd9Sstevel@tonic-gate
25247c478bd9Sstevel@tonic-gate hdlr.cyh_func = NULL;
25257c478bd9Sstevel@tonic-gate hdlr.cyh_arg = NULL;
25267c478bd9Sstevel@tonic-gate hdlr.cyh_level = CY_LEVELS;
25277c478bd9Sstevel@tonic-gate
25287c478bd9Sstevel@tonic-gate when.cyt_when = 0;
25297c478bd9Sstevel@tonic-gate when.cyt_interval = 0;
25307c478bd9Sstevel@tonic-gate
25317c478bd9Sstevel@tonic-gate omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when);
25327c478bd9Sstevel@tonic-gate
25337c478bd9Sstevel@tonic-gate ASSERT(hdlr.cyh_func != NULL);
25347c478bd9Sstevel@tonic-gate ASSERT(hdlr.cyh_level < CY_LEVELS);
25357c478bd9Sstevel@tonic-gate ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0);
25367c478bd9Sstevel@tonic-gate
25377c478bd9Sstevel@tonic-gate ocpu->cyo_cpu = cpu;
25387c478bd9Sstevel@tonic-gate ocpu->cyo_arg = hdlr.cyh_arg;
25397c478bd9Sstevel@tonic-gate ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0);
25407c478bd9Sstevel@tonic-gate ocpu->cyo_next = idp->cyi_omni_list;
25417c478bd9Sstevel@tonic-gate idp->cyi_omni_list = ocpu;
25427c478bd9Sstevel@tonic-gate }
25437c478bd9Sstevel@tonic-gate
25447c478bd9Sstevel@tonic-gate static void
cyclic_omni_stop(cyc_id_t * idp,cyc_cpu_t * cpu)25457c478bd9Sstevel@tonic-gate cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu)
25467c478bd9Sstevel@tonic-gate {
25477c478bd9Sstevel@tonic-gate cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr;
25487c478bd9Sstevel@tonic-gate cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL;
254987a18d3fSMadhavan Venkataraman clock_t delay;
255087a18d3fSMadhavan Venkataraman int ret;
25517c478bd9Sstevel@tonic-gate
25527c478bd9Sstevel@tonic-gate CYC_PTRACE("omni-stop", cpu, idp);
25537c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
25547c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
25557c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_cpu == NULL);
25567c478bd9Sstevel@tonic-gate ASSERT(ocpu != NULL);
25577c478bd9Sstevel@tonic-gate
255887a18d3fSMadhavan Venkataraman /*
255987a18d3fSMadhavan Venkataraman * Prevent a reprogram of this cyclic while we are removing it.
256087a18d3fSMadhavan Venkataraman * Otherwise, cyclic_reprogram_here() will end up sending an X-call
256187a18d3fSMadhavan Venkataraman * to the offlined CPU.
256287a18d3fSMadhavan Venkataraman */
256387a18d3fSMadhavan Venkataraman rw_enter(&idp->cyi_lock, RW_WRITER);
256487a18d3fSMadhavan Venkataraman
25657c478bd9Sstevel@tonic-gate while (ocpu != NULL && ocpu->cyo_cpu != cpu) {
25667c478bd9Sstevel@tonic-gate prev = ocpu;
25677c478bd9Sstevel@tonic-gate ocpu = ocpu->cyo_next;
25687c478bd9Sstevel@tonic-gate }
25697c478bd9Sstevel@tonic-gate
25707c478bd9Sstevel@tonic-gate /*
25717c478bd9Sstevel@tonic-gate * We _must_ have found an cyc_omni_cpu which corresponds to this
25727c478bd9Sstevel@tonic-gate * CPU -- the definition of an omnipresent cyclic is that it runs
25737c478bd9Sstevel@tonic-gate * on all online CPUs.
25747c478bd9Sstevel@tonic-gate */
25757c478bd9Sstevel@tonic-gate ASSERT(ocpu != NULL);
25767c478bd9Sstevel@tonic-gate
25777c478bd9Sstevel@tonic-gate if (prev == NULL) {
25787c478bd9Sstevel@tonic-gate idp->cyi_omni_list = ocpu->cyo_next;
25797c478bd9Sstevel@tonic-gate } else {
25807c478bd9Sstevel@tonic-gate prev->cyo_next = ocpu->cyo_next;
25817c478bd9Sstevel@tonic-gate }
25827c478bd9Sstevel@tonic-gate
258387a18d3fSMadhavan Venkataraman /*
258487a18d3fSMadhavan Venkataraman * Remove the cyclic from the source. We cannot block during this
258587a18d3fSMadhavan Venkataraman * operation because we are holding the cyi_lock which can be held
258687a18d3fSMadhavan Venkataraman * by the cyclic handler via cyclic_reprogram().
258787a18d3fSMadhavan Venkataraman *
258887a18d3fSMadhavan Venkataraman * If we cannot remove the cyclic without waiting, we spin for a time,
258987a18d3fSMadhavan Venkataraman * and reattempt the (non-blocking) removal. If the handler is blocked
259087a18d3fSMadhavan Venkataraman * on the cyi_lock, then we let go of it in the spin loop to give
259187a18d3fSMadhavan Venkataraman * the handler a chance to run. Note that the removal will ultimately
259287a18d3fSMadhavan Venkataraman * succeed -- even if the cyclic handler is blocked on a resource
259387a18d3fSMadhavan Venkataraman * held by a thread which we have preempted, priority inheritance
259487a18d3fSMadhavan Venkataraman * assures that the preempted thread will preempt us and continue
259587a18d3fSMadhavan Venkataraman * to progress.
259687a18d3fSMadhavan Venkataraman */
259787a18d3fSMadhavan Venkataraman for (delay = 1; ; delay <<= 1) {
259887a18d3fSMadhavan Venkataraman /*
259987a18d3fSMadhavan Venkataraman * Before we begin this operation, disable kernel preemption.
260087a18d3fSMadhavan Venkataraman */
260187a18d3fSMadhavan Venkataraman kpreempt_disable();
260287a18d3fSMadhavan Venkataraman ret = cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL,
260387a18d3fSMadhavan Venkataraman CY_NOWAIT);
260487a18d3fSMadhavan Venkataraman /*
260587a18d3fSMadhavan Venkataraman * Enable kernel preemption while spinning.
260687a18d3fSMadhavan Venkataraman */
260787a18d3fSMadhavan Venkataraman kpreempt_enable();
260887a18d3fSMadhavan Venkataraman
260987a18d3fSMadhavan Venkataraman if (ret)
261087a18d3fSMadhavan Venkataraman break;
261187a18d3fSMadhavan Venkataraman
261287a18d3fSMadhavan Venkataraman CYC_PTRACE("remove-omni-retry", idp, ocpu->cyo_cpu);
261387a18d3fSMadhavan Venkataraman
261487a18d3fSMadhavan Venkataraman /*
261587a18d3fSMadhavan Venkataraman * Drop the RW lock to avoid a deadlock with the cyclic
261687a18d3fSMadhavan Venkataraman * handler (because it can potentially call cyclic_reprogram().
261787a18d3fSMadhavan Venkataraman */
261887a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
261987a18d3fSMadhavan Venkataraman drv_usecwait(delay);
262087a18d3fSMadhavan Venkataraman rw_enter(&idp->cyi_lock, RW_WRITER);
262187a18d3fSMadhavan Venkataraman }
262287a18d3fSMadhavan Venkataraman
262387a18d3fSMadhavan Venkataraman /*
262487a18d3fSMadhavan Venkataraman * Now that we have successfully removed the cyclic, allow the omni
262587a18d3fSMadhavan Venkataraman * cyclic to be reprogrammed on other CPUs.
262687a18d3fSMadhavan Venkataraman */
262787a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
26287c478bd9Sstevel@tonic-gate
26297c478bd9Sstevel@tonic-gate /*
26307c478bd9Sstevel@tonic-gate * The cyclic has been removed from this CPU; time to call the
26317c478bd9Sstevel@tonic-gate * omnipresent offline handler.
26327c478bd9Sstevel@tonic-gate */
26337c478bd9Sstevel@tonic-gate if (omni->cyo_offline != NULL)
26347c478bd9Sstevel@tonic-gate omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg);
26357c478bd9Sstevel@tonic-gate
26367c478bd9Sstevel@tonic-gate kmem_free(ocpu, sizeof (cyc_omni_cpu_t));
26377c478bd9Sstevel@tonic-gate }
26387c478bd9Sstevel@tonic-gate
26397c478bd9Sstevel@tonic-gate static cyc_id_t *
cyclic_new_id()26407c478bd9Sstevel@tonic-gate cyclic_new_id()
26417c478bd9Sstevel@tonic-gate {
26427c478bd9Sstevel@tonic-gate cyc_id_t *idp;
26437c478bd9Sstevel@tonic-gate
26447c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
26457c478bd9Sstevel@tonic-gate
26467c478bd9Sstevel@tonic-gate idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP);
26477c478bd9Sstevel@tonic-gate
26487c478bd9Sstevel@tonic-gate /*
26497c478bd9Sstevel@tonic-gate * The cyi_cpu field of the cyc_id_t structure tracks the CPU
26507c478bd9Sstevel@tonic-gate * associated with the cyclic. If and only if this field is NULL, the
26517c478bd9Sstevel@tonic-gate * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be
26527c478bd9Sstevel@tonic-gate * NULL for an omnipresent cyclic while the cyclic is being created
26537c478bd9Sstevel@tonic-gate * or destroyed.
26547c478bd9Sstevel@tonic-gate */
26557c478bd9Sstevel@tonic-gate idp->cyi_cpu = NULL;
26567c478bd9Sstevel@tonic-gate idp->cyi_ndx = 0;
265787a18d3fSMadhavan Venkataraman rw_init(&idp->cyi_lock, NULL, RW_DEFAULT, NULL);
26587c478bd9Sstevel@tonic-gate
26597c478bd9Sstevel@tonic-gate idp->cyi_next = cyclic_id_head;
26607c478bd9Sstevel@tonic-gate idp->cyi_prev = NULL;
26617c478bd9Sstevel@tonic-gate idp->cyi_omni_list = NULL;
26627c478bd9Sstevel@tonic-gate
26637c478bd9Sstevel@tonic-gate if (cyclic_id_head != NULL) {
26647c478bd9Sstevel@tonic-gate ASSERT(cyclic_id_head->cyi_prev == NULL);
26657c478bd9Sstevel@tonic-gate cyclic_id_head->cyi_prev = idp;
26667c478bd9Sstevel@tonic-gate }
26677c478bd9Sstevel@tonic-gate
26687c478bd9Sstevel@tonic-gate cyclic_id_head = idp;
26697c478bd9Sstevel@tonic-gate
26707c478bd9Sstevel@tonic-gate return (idp);
26717c478bd9Sstevel@tonic-gate }
26727c478bd9Sstevel@tonic-gate
26737c478bd9Sstevel@tonic-gate /*
26747c478bd9Sstevel@tonic-gate * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *)
26757c478bd9Sstevel@tonic-gate *
26767c478bd9Sstevel@tonic-gate * Overview
26777c478bd9Sstevel@tonic-gate *
26787c478bd9Sstevel@tonic-gate * cyclic_add() will create an unbound cyclic with the specified handler and
26797c478bd9Sstevel@tonic-gate * interval. The cyclic will run on a CPU which both has interrupts enabled
26807c478bd9Sstevel@tonic-gate * and is in the system CPU partition.
26817c478bd9Sstevel@tonic-gate *
26827c478bd9Sstevel@tonic-gate * Arguments and notes
26837c478bd9Sstevel@tonic-gate *
26847c478bd9Sstevel@tonic-gate * As its first argument, cyclic_add() takes a cyc_handler, which has the
26857c478bd9Sstevel@tonic-gate * following members:
26867c478bd9Sstevel@tonic-gate *
26877c478bd9Sstevel@tonic-gate * cyc_func_t cyh_func <-- Cyclic handler
26887c478bd9Sstevel@tonic-gate * void *cyh_arg <-- Argument to cyclic handler
26897c478bd9Sstevel@tonic-gate * cyc_level_t cyh_level <-- Level at which to fire; must be one of
26907c478bd9Sstevel@tonic-gate * CY_LOW_LEVEL, CY_LOCK_LEVEL or CY_HIGH_LEVEL
26917c478bd9Sstevel@tonic-gate *
26927c478bd9Sstevel@tonic-gate * Note that cyh_level is _not_ an ipl or spl; it must be one the
26937c478bd9Sstevel@tonic-gate * CY_*_LEVELs. This layer of abstraction allows the platform to define
26947c478bd9Sstevel@tonic-gate * the precise interrupt priority levels, within the following constraints:
26957c478bd9Sstevel@tonic-gate *
26967c478bd9Sstevel@tonic-gate * CY_LOCK_LEVEL must map to LOCK_LEVEL
26977c478bd9Sstevel@tonic-gate * CY_HIGH_LEVEL must map to an ipl greater than LOCK_LEVEL
26987c478bd9Sstevel@tonic-gate * CY_LOW_LEVEL must map to an ipl below LOCK_LEVEL
26997c478bd9Sstevel@tonic-gate *
27007c478bd9Sstevel@tonic-gate * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which
27017c478bd9Sstevel@tonic-gate * has the following members:
27027c478bd9Sstevel@tonic-gate *
27037c478bd9Sstevel@tonic-gate * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at
27047c478bd9Sstevel@tonic-gate * which to start firing
27057c478bd9Sstevel@tonic-gate * hrtime_t cyt_interval <-- Length of interval, in nanoseconds
27067c478bd9Sstevel@tonic-gate *
27077c478bd9Sstevel@tonic-gate * gethrtime() is the time source for nanoseconds since boot. If cyt_when
27087c478bd9Sstevel@tonic-gate * is set to 0, the cyclic will start to fire when cyt_interval next
27097c478bd9Sstevel@tonic-gate * divides the number of nanoseconds since boot.
27107c478bd9Sstevel@tonic-gate *
27117c478bd9Sstevel@tonic-gate * The cyt_interval field _must_ be filled in by the caller; one-shots are
27127c478bd9Sstevel@tonic-gate * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will
27137c478bd9Sstevel@tonic-gate * assert that cyt_interval is non-zero). The maximum value for either
27147c478bd9Sstevel@tonic-gate * field is INT64_MAX; the caller is responsible for assuring that
27157c478bd9Sstevel@tonic-gate * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative.
27167c478bd9Sstevel@tonic-gate *
27177c478bd9Sstevel@tonic-gate * For an arbitrary time t in the future, the cyclic handler is guaranteed
27187c478bd9Sstevel@tonic-gate * to have been called (t - cyt_when) / cyt_interval times. This will
27197c478bd9Sstevel@tonic-gate * be true even if interrupts have been disabled for periods greater than
27207c478bd9Sstevel@tonic-gate * cyt_interval nanoseconds. In order to compensate for such periods,
27217c478bd9Sstevel@tonic-gate * the cyclic handler may be called a finite number of times with an
27227c478bd9Sstevel@tonic-gate * arbitrarily small interval.
27237c478bd9Sstevel@tonic-gate *
27247c478bd9Sstevel@tonic-gate * The cyclic subsystem will not enforce any lower bound on the interval;
27257c478bd9Sstevel@tonic-gate * if the interval is less than the time required to process an interrupt,
27267c478bd9Sstevel@tonic-gate * the CPU will wedge. It's the responsibility of the caller to assure that
27277c478bd9Sstevel@tonic-gate * either the value of the interval is sane, or that its caller has
27287c478bd9Sstevel@tonic-gate * sufficient privilege to deny service (i.e. its caller is root).
27297c478bd9Sstevel@tonic-gate *
27307c478bd9Sstevel@tonic-gate * The cyclic handler is guaranteed to be single threaded, even while the
27317c478bd9Sstevel@tonic-gate * cyclic is being juggled between CPUs (see cyclic_juggle(), below).
27327c478bd9Sstevel@tonic-gate * That is, a given cyclic handler will never be executed simultaneously
27337c478bd9Sstevel@tonic-gate * on different CPUs.
27347c478bd9Sstevel@tonic-gate *
27357c478bd9Sstevel@tonic-gate * Return value
27367c478bd9Sstevel@tonic-gate *
27377c478bd9Sstevel@tonic-gate * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value
27387c478bd9Sstevel@tonic-gate * other than CYCLIC_NONE. cyclic_add() cannot fail.
27397c478bd9Sstevel@tonic-gate *
27407c478bd9Sstevel@tonic-gate * Caller's context
27417c478bd9Sstevel@tonic-gate *
27427c478bd9Sstevel@tonic-gate * cpu_lock must be held by the caller, and the caller must not be in
27437c478bd9Sstevel@tonic-gate * interrupt context. cyclic_add() will perform a KM_SLEEP kernel
27447c478bd9Sstevel@tonic-gate * memory allocation, so the usual rules (e.g. p_lock cannot be held)
27457c478bd9Sstevel@tonic-gate * apply. A cyclic may be added even in the presence of CPUs that have
27467c478bd9Sstevel@tonic-gate * not been configured with respect to the cyclic subsystem, but only
27477c478bd9Sstevel@tonic-gate * configured CPUs will be eligible to run the new cyclic.
27487c478bd9Sstevel@tonic-gate *
27497c478bd9Sstevel@tonic-gate * Cyclic handler's context
27507c478bd9Sstevel@tonic-gate *
27517c478bd9Sstevel@tonic-gate * Cyclic handlers will be executed in the interrupt context corresponding
27527c478bd9Sstevel@tonic-gate * to the specified level (i.e. either high, lock or low level). The
27537c478bd9Sstevel@tonic-gate * usual context rules apply.
27547c478bd9Sstevel@tonic-gate *
27557c478bd9Sstevel@tonic-gate * A cyclic handler may not grab ANY locks held by the caller of any of
27567c478bd9Sstevel@tonic-gate * cyclic_add(), cyclic_remove() or cyclic_bind(); the implementation of
27577c478bd9Sstevel@tonic-gate * these functions may require blocking on cyclic handler completion.
27587c478bd9Sstevel@tonic-gate * Moreover, cyclic handlers may not make any call back into the cyclic
27597c478bd9Sstevel@tonic-gate * subsystem.
27607c478bd9Sstevel@tonic-gate */
27617c478bd9Sstevel@tonic-gate cyclic_id_t
cyclic_add(cyc_handler_t * hdlr,cyc_time_t * when)27627c478bd9Sstevel@tonic-gate cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when)
27637c478bd9Sstevel@tonic-gate {
27647c478bd9Sstevel@tonic-gate cyc_id_t *idp = cyclic_new_id();
27657c478bd9Sstevel@tonic-gate
27667c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
27677c478bd9Sstevel@tonic-gate ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0);
27687c478bd9Sstevel@tonic-gate
27697c478bd9Sstevel@tonic-gate idp->cyi_cpu = cyclic_pick_cpu(NULL, NULL, NULL, 0);
27707c478bd9Sstevel@tonic-gate idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0);
27717c478bd9Sstevel@tonic-gate
27727c478bd9Sstevel@tonic-gate return ((uintptr_t)idp);
27737c478bd9Sstevel@tonic-gate }
27747c478bd9Sstevel@tonic-gate
27757c478bd9Sstevel@tonic-gate /*
27767c478bd9Sstevel@tonic-gate * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *)
27777c478bd9Sstevel@tonic-gate *
27787c478bd9Sstevel@tonic-gate * Overview
27797c478bd9Sstevel@tonic-gate *
27807c478bd9Sstevel@tonic-gate * cyclic_add_omni() will create an omnipresent cyclic with the specified
27817c478bd9Sstevel@tonic-gate * online and offline handlers. Omnipresent cyclics run on all online
27827c478bd9Sstevel@tonic-gate * CPUs, including CPUs which have unbound interrupts disabled.
27837c478bd9Sstevel@tonic-gate *
27847c478bd9Sstevel@tonic-gate * Arguments
27857c478bd9Sstevel@tonic-gate *
27867c478bd9Sstevel@tonic-gate * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which
27877c478bd9Sstevel@tonic-gate * has the following members:
27887c478bd9Sstevel@tonic-gate *
27897c478bd9Sstevel@tonic-gate * void (*cyo_online)() <-- Online handler
27907c478bd9Sstevel@tonic-gate * void (*cyo_offline)() <-- Offline handler
27917c478bd9Sstevel@tonic-gate * void *cyo_arg <-- Argument to be passed to on/offline handlers
27927c478bd9Sstevel@tonic-gate *
27937c478bd9Sstevel@tonic-gate * Online handler
27947c478bd9Sstevel@tonic-gate *
27957c478bd9Sstevel@tonic-gate * The cyo_online member is a pointer to a function which has the following
27967c478bd9Sstevel@tonic-gate * four arguments:
27977c478bd9Sstevel@tonic-gate *
27987c478bd9Sstevel@tonic-gate * void * <-- Argument (cyo_arg)
27997c478bd9Sstevel@tonic-gate * cpu_t * <-- Pointer to CPU about to be onlined
28007c478bd9Sstevel@tonic-gate * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in
28017c478bd9Sstevel@tonic-gate * by omni online handler
28027c478bd9Sstevel@tonic-gate * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by
28037c478bd9Sstevel@tonic-gate * omni online handler
28047c478bd9Sstevel@tonic-gate *
28057c478bd9Sstevel@tonic-gate * The omni cyclic online handler is always called _before_ the omni
28067c478bd9Sstevel@tonic-gate * cyclic begins to fire on the specified CPU. As the above argument
28077c478bd9Sstevel@tonic-gate * description implies, the online handler must fill in the two structures
28087c478bd9Sstevel@tonic-gate * passed to it: the cyc_handler_t and the cyc_time_t. These are the
28097c478bd9Sstevel@tonic-gate * same two structures passed to cyclic_add(), outlined above. This
28107c478bd9Sstevel@tonic-gate * allows the omni cyclic to have maximum flexibility; different CPUs may
28117c478bd9Sstevel@tonic-gate * optionally
28127c478bd9Sstevel@tonic-gate *
28137c478bd9Sstevel@tonic-gate * (a) have different intervals
28147c478bd9Sstevel@tonic-gate * (b) be explicitly in or out of phase with one another
28157c478bd9Sstevel@tonic-gate * (c) have different handlers
28167c478bd9Sstevel@tonic-gate * (d) have different handler arguments
28177c478bd9Sstevel@tonic-gate * (e) fire at different levels
28187c478bd9Sstevel@tonic-gate *
28197c478bd9Sstevel@tonic-gate * Of these, (e) seems somewhat dubious, but is nonetheless allowed.
28207c478bd9Sstevel@tonic-gate *
28217c478bd9Sstevel@tonic-gate * The omni online handler is called in the same context as cyclic_add(),
28227c478bd9Sstevel@tonic-gate * and has the same liberties: omni online handlers may perform KM_SLEEP
28237c478bd9Sstevel@tonic-gate * kernel memory allocations, and may grab locks which are also acquired
28247c478bd9Sstevel@tonic-gate * by cyclic handlers. However, omni cyclic online handlers may _not_
28257c478bd9Sstevel@tonic-gate * call back into the cyclic subsystem, and should be generally careful
28267c478bd9Sstevel@tonic-gate * about calling into arbitrary kernel subsystems.
28277c478bd9Sstevel@tonic-gate *
28287c478bd9Sstevel@tonic-gate * Offline handler
28297c478bd9Sstevel@tonic-gate *
28307c478bd9Sstevel@tonic-gate * The cyo_offline member is a pointer to a function which has the following
28317c478bd9Sstevel@tonic-gate * three arguments:
28327c478bd9Sstevel@tonic-gate *
28337c478bd9Sstevel@tonic-gate * void * <-- Argument (cyo_arg)
28347c478bd9Sstevel@tonic-gate * cpu_t * <-- Pointer to CPU about to be offlined
28357c478bd9Sstevel@tonic-gate * void * <-- CPU's cyclic argument (that is, value
28367c478bd9Sstevel@tonic-gate * to which cyh_arg member of the cyc_handler_t
28377c478bd9Sstevel@tonic-gate * was set in the omni online handler)
28387c478bd9Sstevel@tonic-gate *
28397c478bd9Sstevel@tonic-gate * The omni cyclic offline handler is always called _after_ the omni
28407c478bd9Sstevel@tonic-gate * cyclic has ceased firing on the specified CPU. Its purpose is to
28417c478bd9Sstevel@tonic-gate * allow cleanup of any resources dynamically allocated in the omni cyclic
28427c478bd9Sstevel@tonic-gate * online handler. The context of the offline handler is identical to
28437c478bd9Sstevel@tonic-gate * that of the online handler; the same constraints and liberties apply.
28447c478bd9Sstevel@tonic-gate *
28457c478bd9Sstevel@tonic-gate * The offline handler is optional; it may be NULL.
28467c478bd9Sstevel@tonic-gate *
28477c478bd9Sstevel@tonic-gate * Return value
28487c478bd9Sstevel@tonic-gate *
28497c478bd9Sstevel@tonic-gate * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a
28507c478bd9Sstevel@tonic-gate * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail.
28517c478bd9Sstevel@tonic-gate *
28527c478bd9Sstevel@tonic-gate * Caller's context
28537c478bd9Sstevel@tonic-gate *
28547c478bd9Sstevel@tonic-gate * The caller's context is identical to that of cyclic_add(), specified
28557c478bd9Sstevel@tonic-gate * above.
28567c478bd9Sstevel@tonic-gate */
28577c478bd9Sstevel@tonic-gate cyclic_id_t
cyclic_add_omni(cyc_omni_handler_t * omni)28587c478bd9Sstevel@tonic-gate cyclic_add_omni(cyc_omni_handler_t *omni)
28597c478bd9Sstevel@tonic-gate {
28607c478bd9Sstevel@tonic-gate cyc_id_t *idp = cyclic_new_id();
28617c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu;
28627c478bd9Sstevel@tonic-gate cpu_t *c;
28637c478bd9Sstevel@tonic-gate
28647c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
28657c478bd9Sstevel@tonic-gate ASSERT(omni != NULL && omni->cyo_online != NULL);
28667c478bd9Sstevel@tonic-gate
28677c478bd9Sstevel@tonic-gate idp->cyi_omni_hdlr = *omni;
28687c478bd9Sstevel@tonic-gate
28697c478bd9Sstevel@tonic-gate c = cpu_list;
28707c478bd9Sstevel@tonic-gate do {
28717c478bd9Sstevel@tonic-gate if ((cpu = c->cpu_cyclic) == NULL)
28727c478bd9Sstevel@tonic-gate continue;
28737c478bd9Sstevel@tonic-gate
28747c478bd9Sstevel@tonic-gate if (cpu->cyp_state != CYS_ONLINE) {
28757c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_OFFLINE);
28767c478bd9Sstevel@tonic-gate continue;
28777c478bd9Sstevel@tonic-gate }
28787c478bd9Sstevel@tonic-gate
28797c478bd9Sstevel@tonic-gate cyclic_omni_start(idp, cpu);
28807c478bd9Sstevel@tonic-gate } while ((c = c->cpu_next) != cpu_list);
28817c478bd9Sstevel@tonic-gate
28827c478bd9Sstevel@tonic-gate /*
28837c478bd9Sstevel@tonic-gate * We must have found at least one online CPU on which to run
28847c478bd9Sstevel@tonic-gate * this cyclic.
28857c478bd9Sstevel@tonic-gate */
28867c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_omni_list != NULL);
28877c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_cpu == NULL);
28887c478bd9Sstevel@tonic-gate
28897c478bd9Sstevel@tonic-gate return ((uintptr_t)idp);
28907c478bd9Sstevel@tonic-gate }
28917c478bd9Sstevel@tonic-gate
28927c478bd9Sstevel@tonic-gate /*
28937c478bd9Sstevel@tonic-gate * void cyclic_remove(cyclic_id_t)
28947c478bd9Sstevel@tonic-gate *
28957c478bd9Sstevel@tonic-gate * Overview
28967c478bd9Sstevel@tonic-gate *
28977c478bd9Sstevel@tonic-gate * cyclic_remove() will remove the specified cyclic from the system.
28987c478bd9Sstevel@tonic-gate *
28997c478bd9Sstevel@tonic-gate * Arguments and notes
29007c478bd9Sstevel@tonic-gate *
29017c478bd9Sstevel@tonic-gate * The only argument is a cyclic_id returned from either cyclic_add() or
29027c478bd9Sstevel@tonic-gate * cyclic_add_omni().
29037c478bd9Sstevel@tonic-gate *
29047c478bd9Sstevel@tonic-gate * By the time cyclic_remove() returns, the caller is guaranteed that the
29057c478bd9Sstevel@tonic-gate * removed cyclic handler has completed execution (this is the same
29067c478bd9Sstevel@tonic-gate * semantic that untimeout() provides). As a result, cyclic_remove() may
29077c478bd9Sstevel@tonic-gate * need to block, waiting for the removed cyclic to complete execution.
29087c478bd9Sstevel@tonic-gate * This leads to an important constraint on the caller: no lock may be
29097c478bd9Sstevel@tonic-gate * held across cyclic_remove() that also may be acquired by a cyclic
29107c478bd9Sstevel@tonic-gate * handler.
29117c478bd9Sstevel@tonic-gate *
29127c478bd9Sstevel@tonic-gate * Return value
29137c478bd9Sstevel@tonic-gate *
29147c478bd9Sstevel@tonic-gate * None; cyclic_remove() always succeeds.
29157c478bd9Sstevel@tonic-gate *
29167c478bd9Sstevel@tonic-gate * Caller's context
29177c478bd9Sstevel@tonic-gate *
29187c478bd9Sstevel@tonic-gate * cpu_lock must be held by the caller, and the caller must not be in
29197c478bd9Sstevel@tonic-gate * interrupt context. The caller may not hold any locks which are also
29207c478bd9Sstevel@tonic-gate * grabbed by any cyclic handler. See "Arguments and notes", above.
29217c478bd9Sstevel@tonic-gate */
29227c478bd9Sstevel@tonic-gate void
cyclic_remove(cyclic_id_t id)29237c478bd9Sstevel@tonic-gate cyclic_remove(cyclic_id_t id)
29247c478bd9Sstevel@tonic-gate {
29257c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
29267c478bd9Sstevel@tonic-gate cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next;
29277c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu;
29287c478bd9Sstevel@tonic-gate
29297c478bd9Sstevel@tonic-gate CYC_PTRACE("remove", idp, idp->cyi_cpu);
29307c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
29317c478bd9Sstevel@tonic-gate
29327c478bd9Sstevel@tonic-gate if (cpu != NULL) {
29337c478bd9Sstevel@tonic-gate (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT);
29347c478bd9Sstevel@tonic-gate } else {
29357c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_omni_list != NULL);
29367c478bd9Sstevel@tonic-gate while (idp->cyi_omni_list != NULL)
29377c478bd9Sstevel@tonic-gate cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu);
29387c478bd9Sstevel@tonic-gate }
29397c478bd9Sstevel@tonic-gate
29407c478bd9Sstevel@tonic-gate if (prev != NULL) {
29417c478bd9Sstevel@tonic-gate ASSERT(cyclic_id_head != idp);
29427c478bd9Sstevel@tonic-gate prev->cyi_next = next;
29437c478bd9Sstevel@tonic-gate } else {
29447c478bd9Sstevel@tonic-gate ASSERT(cyclic_id_head == idp);
29457c478bd9Sstevel@tonic-gate cyclic_id_head = next;
29467c478bd9Sstevel@tonic-gate }
29477c478bd9Sstevel@tonic-gate
29487c478bd9Sstevel@tonic-gate if (next != NULL)
29497c478bd9Sstevel@tonic-gate next->cyi_prev = prev;
29507c478bd9Sstevel@tonic-gate
29517c478bd9Sstevel@tonic-gate kmem_cache_free(cyclic_id_cache, idp);
29527c478bd9Sstevel@tonic-gate }
29537c478bd9Sstevel@tonic-gate
29547c478bd9Sstevel@tonic-gate /*
29557c478bd9Sstevel@tonic-gate * void cyclic_bind(cyclic_id_t, cpu_t *, cpupart_t *)
29567c478bd9Sstevel@tonic-gate *
29577c478bd9Sstevel@tonic-gate * Overview
29587c478bd9Sstevel@tonic-gate *
29597c478bd9Sstevel@tonic-gate * cyclic_bind() atomically changes the CPU and CPU partition bindings
29607c478bd9Sstevel@tonic-gate * of a cyclic.
29617c478bd9Sstevel@tonic-gate *
29627c478bd9Sstevel@tonic-gate * Arguments and notes
29637c478bd9Sstevel@tonic-gate *
29647c478bd9Sstevel@tonic-gate * The first argument is a cyclic_id retuned from cyclic_add().
29657c478bd9Sstevel@tonic-gate * cyclic_bind() may _not_ be called on a cyclic_id returned from
29667c478bd9Sstevel@tonic-gate * cyclic_add_omni().
29677c478bd9Sstevel@tonic-gate *
29687c478bd9Sstevel@tonic-gate * The second argument specifies the CPU to which to bind the specified
29697c478bd9Sstevel@tonic-gate * cyclic. If the specified cyclic is bound to a CPU other than the one
29707c478bd9Sstevel@tonic-gate * specified, it will be unbound from its bound CPU. Unbinding the cyclic
29717c478bd9Sstevel@tonic-gate * from its CPU may cause it to be juggled to another CPU. If the specified
29727c478bd9Sstevel@tonic-gate * CPU is non-NULL, the cyclic will be subsequently rebound to the specified
29737c478bd9Sstevel@tonic-gate * CPU.
29747c478bd9Sstevel@tonic-gate *
29757c478bd9Sstevel@tonic-gate * If a CPU with bound cyclics is transitioned into the P_NOINTR state,
29767c478bd9Sstevel@tonic-gate * only cyclics not bound to the CPU can be juggled away; CPU-bound cyclics
29777c478bd9Sstevel@tonic-gate * will continue to fire on the P_NOINTR CPU. A CPU with bound cyclics
29787c478bd9Sstevel@tonic-gate * cannot be offlined (attempts to offline the CPU will return EBUSY).
29797c478bd9Sstevel@tonic-gate * Likewise, cyclics may not be bound to an offline CPU; if the caller
29807c478bd9Sstevel@tonic-gate * attempts to bind a cyclic to an offline CPU, the cyclic subsystem will
29817c478bd9Sstevel@tonic-gate * panic.
29827c478bd9Sstevel@tonic-gate *
29837c478bd9Sstevel@tonic-gate * The third argument specifies the CPU partition to which to bind the
29847c478bd9Sstevel@tonic-gate * specified cyclic. If the specified cyclic is bound to a CPU partition
29857c478bd9Sstevel@tonic-gate * other than the one specified, it will be unbound from its bound
29867c478bd9Sstevel@tonic-gate * partition. Unbinding the cyclic from its CPU partition may cause it
29877c478bd9Sstevel@tonic-gate * to be juggled to another CPU. If the specified CPU partition is
29887c478bd9Sstevel@tonic-gate * non-NULL, the cyclic will be subsequently rebound to the specified CPU
29897c478bd9Sstevel@tonic-gate * partition.
29907c478bd9Sstevel@tonic-gate *
29917c478bd9Sstevel@tonic-gate * It is the caller's responsibility to assure that the specified CPU
29927c478bd9Sstevel@tonic-gate * partition contains a CPU. If it does not, the cyclic subsystem will
29937c478bd9Sstevel@tonic-gate * panic. A CPU partition with bound cyclics cannot be destroyed (attempts
29947c478bd9Sstevel@tonic-gate * to destroy the partition will return EBUSY). If a CPU with
29957c478bd9Sstevel@tonic-gate * partition-bound cyclics is transitioned into the P_NOINTR state, cyclics
29967c478bd9Sstevel@tonic-gate * bound to the CPU's partition (but not bound to the CPU) will be juggled
29977c478bd9Sstevel@tonic-gate * away only if there exists another CPU in the partition in the P_ONLINE
29987c478bd9Sstevel@tonic-gate * state.
29997c478bd9Sstevel@tonic-gate *
30007c478bd9Sstevel@tonic-gate * It is the caller's responsibility to assure that the specified CPU and
30017c478bd9Sstevel@tonic-gate * CPU partition are self-consistent. If both parameters are non-NULL,
30027c478bd9Sstevel@tonic-gate * and the specified CPU partition does not contain the specified CPU, the
30037c478bd9Sstevel@tonic-gate * cyclic subsystem will panic.
30047c478bd9Sstevel@tonic-gate *
30057c478bd9Sstevel@tonic-gate * It is the caller's responsibility to assure that the specified CPU has
30067c478bd9Sstevel@tonic-gate * been configured with respect to the cyclic subsystem. Generally, this
30077c478bd9Sstevel@tonic-gate * is always true for valid, on-line CPUs. The only periods of time during
30087c478bd9Sstevel@tonic-gate * which this may not be true are during MP boot (i.e. after cyclic_init()
30097c478bd9Sstevel@tonic-gate * is called but before cyclic_mp_init() is called) or during dynamic
30107c478bd9Sstevel@tonic-gate * reconfiguration; cyclic_bind() should only be called with great care
30117c478bd9Sstevel@tonic-gate * from these contexts.
30127c478bd9Sstevel@tonic-gate *
30137c478bd9Sstevel@tonic-gate * Return value
30147c478bd9Sstevel@tonic-gate *
30157c478bd9Sstevel@tonic-gate * None; cyclic_bind() always succeeds.
30167c478bd9Sstevel@tonic-gate *
30177c478bd9Sstevel@tonic-gate * Caller's context
30187c478bd9Sstevel@tonic-gate *
30197c478bd9Sstevel@tonic-gate * cpu_lock must be held by the caller, and the caller must not be in
30207c478bd9Sstevel@tonic-gate * interrupt context. The caller may not hold any locks which are also
30217c478bd9Sstevel@tonic-gate * grabbed by any cyclic handler.
30227c478bd9Sstevel@tonic-gate */
30237c478bd9Sstevel@tonic-gate void
cyclic_bind(cyclic_id_t id,cpu_t * d,cpupart_t * part)30247c478bd9Sstevel@tonic-gate cyclic_bind(cyclic_id_t id, cpu_t *d, cpupart_t *part)
30257c478bd9Sstevel@tonic-gate {
30267c478bd9Sstevel@tonic-gate cyc_id_t *idp = (cyc_id_t *)id;
30277c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu;
30287c478bd9Sstevel@tonic-gate cpu_t *c;
30297c478bd9Sstevel@tonic-gate uint16_t flags;
30307c478bd9Sstevel@tonic-gate
30317c478bd9Sstevel@tonic-gate CYC_PTRACE("bind", d, part);
30327c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
30337c478bd9Sstevel@tonic-gate ASSERT(part == NULL || d == NULL || d->cpu_part == part);
30347c478bd9Sstevel@tonic-gate
30357c478bd9Sstevel@tonic-gate if (cpu == NULL) {
30367c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_omni_list != NULL);
30377c478bd9Sstevel@tonic-gate panic("attempt to change binding of omnipresent cyclic");
30387c478bd9Sstevel@tonic-gate }
30397c478bd9Sstevel@tonic-gate
30407c478bd9Sstevel@tonic-gate c = cpu->cyp_cpu;
30417c478bd9Sstevel@tonic-gate flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags;
30427c478bd9Sstevel@tonic-gate
30437c478bd9Sstevel@tonic-gate if (c != d && (flags & CYF_CPU_BOUND))
30447c478bd9Sstevel@tonic-gate cyclic_unbind_cpu(id);
30457c478bd9Sstevel@tonic-gate
30467c478bd9Sstevel@tonic-gate /*
30477c478bd9Sstevel@tonic-gate * Reload our cpu (we may have migrated). We don't have to reload
30487c478bd9Sstevel@tonic-gate * the flags field here; if we were CYF_PART_BOUND on entry, we are
30497c478bd9Sstevel@tonic-gate * CYF_PART_BOUND now.
30507c478bd9Sstevel@tonic-gate */
30517c478bd9Sstevel@tonic-gate cpu = idp->cyi_cpu;
30527c478bd9Sstevel@tonic-gate c = cpu->cyp_cpu;
30537c478bd9Sstevel@tonic-gate
30547c478bd9Sstevel@tonic-gate if (part != c->cpu_part && (flags & CYF_PART_BOUND))
30557c478bd9Sstevel@tonic-gate cyclic_unbind_cpupart(id);
30567c478bd9Sstevel@tonic-gate
30577c478bd9Sstevel@tonic-gate /*
30587c478bd9Sstevel@tonic-gate * Now reload the flags field, asserting that if we are CPU bound,
30597c478bd9Sstevel@tonic-gate * the CPU was specified (and likewise, if we are partition bound,
30607c478bd9Sstevel@tonic-gate * the partition was specified).
30617c478bd9Sstevel@tonic-gate */
30627c478bd9Sstevel@tonic-gate cpu = idp->cyi_cpu;
30637c478bd9Sstevel@tonic-gate c = cpu->cyp_cpu;
30647c478bd9Sstevel@tonic-gate flags = cpu->cyp_cyclics[idp->cyi_ndx].cy_flags;
30657c478bd9Sstevel@tonic-gate ASSERT(!(flags & CYF_CPU_BOUND) || c == d);
30667c478bd9Sstevel@tonic-gate ASSERT(!(flags & CYF_PART_BOUND) || c->cpu_part == part);
30677c478bd9Sstevel@tonic-gate
30687c478bd9Sstevel@tonic-gate if (!(flags & CYF_CPU_BOUND) && d != NULL)
30697c478bd9Sstevel@tonic-gate cyclic_bind_cpu(id, d);
30707c478bd9Sstevel@tonic-gate
30717c478bd9Sstevel@tonic-gate if (!(flags & CYF_PART_BOUND) && part != NULL)
30727c478bd9Sstevel@tonic-gate cyclic_bind_cpupart(id, part);
30737c478bd9Sstevel@tonic-gate }
30747c478bd9Sstevel@tonic-gate
307587a18d3fSMadhavan Venkataraman int
cyclic_reprogram(cyclic_id_t id,hrtime_t expiration)307687a18d3fSMadhavan Venkataraman cyclic_reprogram(cyclic_id_t id, hrtime_t expiration)
307787a18d3fSMadhavan Venkataraman {
307887a18d3fSMadhavan Venkataraman cyc_id_t *idp = (cyc_id_t *)id;
307987a18d3fSMadhavan Venkataraman cyc_cpu_t *cpu;
308087a18d3fSMadhavan Venkataraman cyc_omni_cpu_t *ocpu;
308187a18d3fSMadhavan Venkataraman cyc_index_t ndx;
3082*cb2d1b02SPatrick Mooney int res = 1;
308387a18d3fSMadhavan Venkataraman
308487a18d3fSMadhavan Venkataraman ASSERT(expiration > 0);
308587a18d3fSMadhavan Venkataraman
308687a18d3fSMadhavan Venkataraman CYC_PTRACE("reprog", idp, idp->cyi_cpu);
308787a18d3fSMadhavan Venkataraman
308887a18d3fSMadhavan Venkataraman kpreempt_disable();
308987a18d3fSMadhavan Venkataraman
309087a18d3fSMadhavan Venkataraman /*
309187a18d3fSMadhavan Venkataraman * Prevent the cyclic from moving or disappearing while we reprogram.
309287a18d3fSMadhavan Venkataraman */
309387a18d3fSMadhavan Venkataraman rw_enter(&idp->cyi_lock, RW_READER);
309487a18d3fSMadhavan Venkataraman
309587a18d3fSMadhavan Venkataraman if (idp->cyi_cpu == NULL) {
309687a18d3fSMadhavan Venkataraman ASSERT(curthread->t_preempt > 0);
309787a18d3fSMadhavan Venkataraman cpu = CPU->cpu_cyclic;
309887a18d3fSMadhavan Venkataraman
309987a18d3fSMadhavan Venkataraman /*
310087a18d3fSMadhavan Venkataraman * For an omni cyclic, we reprogram the cyclic corresponding
310187a18d3fSMadhavan Venkataraman * to the current CPU. Look for it in the list.
310287a18d3fSMadhavan Venkataraman */
310387a18d3fSMadhavan Venkataraman ocpu = idp->cyi_omni_list;
310487a18d3fSMadhavan Venkataraman while (ocpu != NULL) {
310587a18d3fSMadhavan Venkataraman if (ocpu->cyo_cpu == cpu)
310687a18d3fSMadhavan Venkataraman break;
310787a18d3fSMadhavan Venkataraman ocpu = ocpu->cyo_next;
310887a18d3fSMadhavan Venkataraman }
310987a18d3fSMadhavan Venkataraman
311087a18d3fSMadhavan Venkataraman if (ocpu == NULL) {
311187a18d3fSMadhavan Venkataraman /*
311287a18d3fSMadhavan Venkataraman * Didn't find it. This means that CPU offline
311387a18d3fSMadhavan Venkataraman * must have removed it racing with us. So,
311487a18d3fSMadhavan Venkataraman * nothing to do.
311587a18d3fSMadhavan Venkataraman */
311687a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
311787a18d3fSMadhavan Venkataraman
311887a18d3fSMadhavan Venkataraman kpreempt_enable();
311987a18d3fSMadhavan Venkataraman
312087a18d3fSMadhavan Venkataraman return (0);
312187a18d3fSMadhavan Venkataraman }
312287a18d3fSMadhavan Venkataraman ndx = ocpu->cyo_ndx;
312387a18d3fSMadhavan Venkataraman } else {
312487a18d3fSMadhavan Venkataraman cpu = idp->cyi_cpu;
312587a18d3fSMadhavan Venkataraman ndx = idp->cyi_ndx;
312687a18d3fSMadhavan Venkataraman }
312787a18d3fSMadhavan Venkataraman
3128*cb2d1b02SPatrick Mooney if (cpu->cyp_cpu == CPU) {
3129*cb2d1b02SPatrick Mooney /*
3130*cb2d1b02SPatrick Mooney * If this reprogram is being done as part of a running cyclic
3131*cb2d1b02SPatrick Mooney * handler, it is possible that a racing cyclic_remove() on a
3132*cb2d1b02SPatrick Mooney * remote CPU will cause it to fail.
3133*cb2d1b02SPatrick Mooney */
3134*cb2d1b02SPatrick Mooney if (!cyclic_reprogram_cyclic(cpu, ndx, expiration, B_TRUE)) {
3135*cb2d1b02SPatrick Mooney res = 0;
3136*cb2d1b02SPatrick Mooney }
3137*cb2d1b02SPatrick Mooney } else {
313887a18d3fSMadhavan Venkataraman cyclic_reprogram_here(cpu, ndx, expiration);
3139*cb2d1b02SPatrick Mooney }
314087a18d3fSMadhavan Venkataraman
314187a18d3fSMadhavan Venkataraman /*
314287a18d3fSMadhavan Venkataraman * Allow the cyclic to be moved or removed.
314387a18d3fSMadhavan Venkataraman */
314487a18d3fSMadhavan Venkataraman rw_exit(&idp->cyi_lock);
314587a18d3fSMadhavan Venkataraman
314687a18d3fSMadhavan Venkataraman kpreempt_enable();
314787a18d3fSMadhavan Venkataraman
3148*cb2d1b02SPatrick Mooney return (res);
314987a18d3fSMadhavan Venkataraman }
315087a18d3fSMadhavan Venkataraman
3151d0a94a58SPatrick Mooney /*
3152d0a94a58SPatrick Mooney * void cyclic_move_here(cyclic_id_t)
3153d0a94a58SPatrick Mooney *
3154d0a94a58SPatrick Mooney * Overview
3155d0a94a58SPatrick Mooney *
3156d0a94a58SPatrick Mooney * cyclic_move_here() attempts to shuffle a cyclic onto the current CPU.
3157d0a94a58SPatrick Mooney *
3158d0a94a58SPatrick Mooney * Arguments and notes
3159d0a94a58SPatrick Mooney *
3160d0a94a58SPatrick Mooney * The first argument is a cyclic_id returned from cyclic_add().
3161d0a94a58SPatrick Mooney * cyclic_move_here() may _not_ be called on a cyclic_id returned from
3162d0a94a58SPatrick Mooney * cyclic_add_omni() or one bound to a CPU or partition via cyclic_bind().
3163d0a94a58SPatrick Mooney *
3164d0a94a58SPatrick Mooney * This cyclic shuffling is performed on a best-effort basis. If for some
3165d0a94a58SPatrick Mooney * reason the current CPU is unsuitable or the thread migrates between CPUs
3166d0a94a58SPatrick Mooney * during the call, the function may return with the cyclic residing on some
3167d0a94a58SPatrick Mooney * other CPU.
3168d0a94a58SPatrick Mooney *
3169d0a94a58SPatrick Mooney * Return value
3170d0a94a58SPatrick Mooney *
3171d0a94a58SPatrick Mooney * None; cyclic_move_here() always reports success.
3172d0a94a58SPatrick Mooney *
3173d0a94a58SPatrick Mooney * Caller's context
3174d0a94a58SPatrick Mooney *
3175d0a94a58SPatrick Mooney * cpu_lock must be held by the caller, and the caller must not be in
3176d0a94a58SPatrick Mooney * interrupt context. The caller may not hold any locks which are also
3177d0a94a58SPatrick Mooney * grabbed by any cyclic handler.
3178d0a94a58SPatrick Mooney */
3179d0a94a58SPatrick Mooney void
cyclic_move_here(cyclic_id_t id)3180d0a94a58SPatrick Mooney cyclic_move_here(cyclic_id_t id)
3181d0a94a58SPatrick Mooney {
3182d0a94a58SPatrick Mooney cyc_id_t *idp = (cyc_id_t *)id;
3183d0a94a58SPatrick Mooney cyc_cpu_t *cc = idp->cyi_cpu;
3184d0a94a58SPatrick Mooney cpu_t *dest = CPU;
3185d0a94a58SPatrick Mooney
3186d0a94a58SPatrick Mooney ASSERT(MUTEX_HELD(&cpu_lock));
3187d0a94a58SPatrick Mooney CYC_PTRACE("move_here", idp, dest);
3188d0a94a58SPatrick Mooney VERIFY3P(cc, !=, NULL);
3189d0a94a58SPatrick Mooney VERIFY3U(cc->cyp_cyclics[idp->cyi_ndx].cy_flags &
3190d0a94a58SPatrick Mooney (CYF_CPU_BOUND|CYF_PART_BOUND), ==, 0);
3191d0a94a58SPatrick Mooney
3192d0a94a58SPatrick Mooney if (cc->cyp_cpu == dest) {
3193d0a94a58SPatrick Mooney return;
3194d0a94a58SPatrick Mooney }
3195d0a94a58SPatrick Mooney
3196d0a94a58SPatrick Mooney /* Is the destination CPU suitable for a migration target? */
3197d0a94a58SPatrick Mooney if (dest->cpu_cyclic == NULL ||
3198d0a94a58SPatrick Mooney dest->cpu_cyclic->cyp_state == CYS_OFFLINE ||
3199d0a94a58SPatrick Mooney (dest->cpu_flags & CPU_ENABLE) == 0) {
3200d0a94a58SPatrick Mooney return;
3201d0a94a58SPatrick Mooney }
3202d0a94a58SPatrick Mooney
3203d0a94a58SPatrick Mooney cyclic_juggle_one_to(idp, dest->cpu_cyclic);
3204d0a94a58SPatrick Mooney }
3205d0a94a58SPatrick Mooney
32067c478bd9Sstevel@tonic-gate hrtime_t
cyclic_getres()32077c478bd9Sstevel@tonic-gate cyclic_getres()
32087c478bd9Sstevel@tonic-gate {
32097c478bd9Sstevel@tonic-gate return (cyclic_resolution);
32107c478bd9Sstevel@tonic-gate }
32117c478bd9Sstevel@tonic-gate
32127c478bd9Sstevel@tonic-gate void
cyclic_init(cyc_backend_t * be,hrtime_t resolution)32137c478bd9Sstevel@tonic-gate cyclic_init(cyc_backend_t *be, hrtime_t resolution)
32147c478bd9Sstevel@tonic-gate {
32157c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
32167c478bd9Sstevel@tonic-gate
32177c478bd9Sstevel@tonic-gate CYC_PTRACE("init", be, resolution);
32187c478bd9Sstevel@tonic-gate cyclic_resolution = resolution;
32197c478bd9Sstevel@tonic-gate
32207c478bd9Sstevel@tonic-gate /*
32217c478bd9Sstevel@tonic-gate * Copy the passed cyc_backend into the backend template. This must
32227c478bd9Sstevel@tonic-gate * be done before the CPU can be configured.
32237c478bd9Sstevel@tonic-gate */
32247c478bd9Sstevel@tonic-gate bcopy(be, &cyclic_backend, sizeof (cyc_backend_t));
32257c478bd9Sstevel@tonic-gate
32267c478bd9Sstevel@tonic-gate /*
32277c478bd9Sstevel@tonic-gate * It's safe to look at the "CPU" pointer without disabling kernel
32287c478bd9Sstevel@tonic-gate * preemption; cyclic_init() is called only during startup by the
32297c478bd9Sstevel@tonic-gate * cyclic backend.
32307c478bd9Sstevel@tonic-gate */
32317c478bd9Sstevel@tonic-gate cyclic_configure(CPU);
32327c478bd9Sstevel@tonic-gate cyclic_online(CPU);
32337c478bd9Sstevel@tonic-gate }
32347c478bd9Sstevel@tonic-gate
32357c478bd9Sstevel@tonic-gate /*
32367c478bd9Sstevel@tonic-gate * It is assumed that cyclic_mp_init() is called some time after cyclic
32377c478bd9Sstevel@tonic-gate * init (and therefore, after cpu0 has been initialized). We grab cpu_lock,
32387c478bd9Sstevel@tonic-gate * find the already initialized CPU, and initialize every other CPU with the
32397c478bd9Sstevel@tonic-gate * same backend. Finally, we register a cpu_setup function.
32407c478bd9Sstevel@tonic-gate */
32417c478bd9Sstevel@tonic-gate void
cyclic_mp_init()32427c478bd9Sstevel@tonic-gate cyclic_mp_init()
32437c478bd9Sstevel@tonic-gate {
32447c478bd9Sstevel@tonic-gate cpu_t *c;
32457c478bd9Sstevel@tonic-gate
32467c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
32477c478bd9Sstevel@tonic-gate
32487c478bd9Sstevel@tonic-gate c = cpu_list;
32497c478bd9Sstevel@tonic-gate do {
32507c478bd9Sstevel@tonic-gate if (c->cpu_cyclic == NULL) {
32517c478bd9Sstevel@tonic-gate cyclic_configure(c);
32527c478bd9Sstevel@tonic-gate cyclic_online(c);
32537c478bd9Sstevel@tonic-gate }
32547c478bd9Sstevel@tonic-gate } while ((c = c->cpu_next) != cpu_list);
32557c478bd9Sstevel@tonic-gate
32563ebafc60SToomas Soome register_cpu_setup_func(cyclic_cpu_setup, NULL);
32577c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
32587c478bd9Sstevel@tonic-gate }
32597c478bd9Sstevel@tonic-gate
32607c478bd9Sstevel@tonic-gate /*
32617c478bd9Sstevel@tonic-gate * int cyclic_juggle(cpu_t *)
32627c478bd9Sstevel@tonic-gate *
32637c478bd9Sstevel@tonic-gate * Overview
32647c478bd9Sstevel@tonic-gate *
32657c478bd9Sstevel@tonic-gate * cyclic_juggle() juggles as many cyclics as possible away from the
32667c478bd9Sstevel@tonic-gate * specified CPU; all remaining cyclics on the CPU will either be CPU-
32677c478bd9Sstevel@tonic-gate * or partition-bound.
32687c478bd9Sstevel@tonic-gate *
32697c478bd9Sstevel@tonic-gate * Arguments and notes
32707c478bd9Sstevel@tonic-gate *
32717c478bd9Sstevel@tonic-gate * The only argument to cyclic_juggle() is the CPU from which cyclics
32727c478bd9Sstevel@tonic-gate * should be juggled. CPU-bound cyclics are never juggled; partition-bound
32737c478bd9Sstevel@tonic-gate * cyclics are only juggled if the specified CPU is in the P_NOINTR state
32747c478bd9Sstevel@tonic-gate * and there exists a P_ONLINE CPU in the partition. The cyclic subsystem
32757c478bd9Sstevel@tonic-gate * assures that a cyclic will never fire late or spuriously, even while
32767c478bd9Sstevel@tonic-gate * being juggled.
32777c478bd9Sstevel@tonic-gate *
32787c478bd9Sstevel@tonic-gate * Return value
32797c478bd9Sstevel@tonic-gate *
32807c478bd9Sstevel@tonic-gate * cyclic_juggle() returns a non-zero value if all cyclics were able to
32817c478bd9Sstevel@tonic-gate * be juggled away from the CPU, and zero if one or more cyclics could
32827c478bd9Sstevel@tonic-gate * not be juggled away.
32837c478bd9Sstevel@tonic-gate *
32847c478bd9Sstevel@tonic-gate * Caller's context
32857c478bd9Sstevel@tonic-gate *
32867c478bd9Sstevel@tonic-gate * cpu_lock must be held by the caller, and the caller must not be in
32877c478bd9Sstevel@tonic-gate * interrupt context. The caller may not hold any locks which are also
32887c478bd9Sstevel@tonic-gate * grabbed by any cyclic handler. While cyclic_juggle() _may_ be called
32897c478bd9Sstevel@tonic-gate * in any context satisfying these constraints, it _must_ be called
32907c478bd9Sstevel@tonic-gate * immediately after clearing CPU_ENABLE (i.e. before dropping cpu_lock).
32917c478bd9Sstevel@tonic-gate * Failure to do so could result in an assertion failure in the cyclic
32927c478bd9Sstevel@tonic-gate * subsystem.
32937c478bd9Sstevel@tonic-gate */
32947c478bd9Sstevel@tonic-gate int
cyclic_juggle(cpu_t * c)32957c478bd9Sstevel@tonic-gate cyclic_juggle(cpu_t *c)
32967c478bd9Sstevel@tonic-gate {
32977c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
32987c478bd9Sstevel@tonic-gate cyc_id_t *idp;
32997c478bd9Sstevel@tonic-gate int all_juggled = 1;
33007c478bd9Sstevel@tonic-gate
33017c478bd9Sstevel@tonic-gate CYC_PTRACE1("juggle", c);
33027c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
33037c478bd9Sstevel@tonic-gate
33047c478bd9Sstevel@tonic-gate /*
33057c478bd9Sstevel@tonic-gate * We'll go through each cyclic on the CPU, attempting to juggle
33067c478bd9Sstevel@tonic-gate * each one elsewhere.
33077c478bd9Sstevel@tonic-gate */
33087c478bd9Sstevel@tonic-gate for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
33097c478bd9Sstevel@tonic-gate if (idp->cyi_cpu != cpu)
33107c478bd9Sstevel@tonic-gate continue;
33117c478bd9Sstevel@tonic-gate
33127c478bd9Sstevel@tonic-gate if (cyclic_juggle_one(idp) == 0) {
33137c478bd9Sstevel@tonic-gate all_juggled = 0;
33147c478bd9Sstevel@tonic-gate continue;
33157c478bd9Sstevel@tonic-gate }
33167c478bd9Sstevel@tonic-gate
33177c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_cpu != cpu);
33187c478bd9Sstevel@tonic-gate }
33197c478bd9Sstevel@tonic-gate
33207c478bd9Sstevel@tonic-gate return (all_juggled);
33217c478bd9Sstevel@tonic-gate }
33227c478bd9Sstevel@tonic-gate
33237c478bd9Sstevel@tonic-gate /*
33247c478bd9Sstevel@tonic-gate * int cyclic_offline(cpu_t *)
33257c478bd9Sstevel@tonic-gate *
33267c478bd9Sstevel@tonic-gate * Overview
33277c478bd9Sstevel@tonic-gate *
33287c478bd9Sstevel@tonic-gate * cyclic_offline() offlines the cyclic subsystem on the specified CPU.
33297c478bd9Sstevel@tonic-gate *
33307c478bd9Sstevel@tonic-gate * Arguments and notes
33317c478bd9Sstevel@tonic-gate *
33327c478bd9Sstevel@tonic-gate * The only argument to cyclic_offline() is a CPU to offline.
33337c478bd9Sstevel@tonic-gate * cyclic_offline() will attempt to juggle cyclics away from the specified
33347c478bd9Sstevel@tonic-gate * CPU.
33357c478bd9Sstevel@tonic-gate *
33367c478bd9Sstevel@tonic-gate * Return value
33377c478bd9Sstevel@tonic-gate *
33387c478bd9Sstevel@tonic-gate * cyclic_offline() returns 1 if all cyclics on the CPU were juggled away
33397c478bd9Sstevel@tonic-gate * and the cyclic subsystem on the CPU was successfully offlines.
33407c478bd9Sstevel@tonic-gate * cyclic_offline returns 0 if some cyclics remain, blocking the cyclic
33417c478bd9Sstevel@tonic-gate * offline operation. All remaining cyclics on the CPU will either be
33427c478bd9Sstevel@tonic-gate * CPU- or partition-bound.
33437c478bd9Sstevel@tonic-gate *
33447c478bd9Sstevel@tonic-gate * See the "Arguments and notes" of cyclic_juggle(), below, for more detail
33457c478bd9Sstevel@tonic-gate * on cyclic juggling.
33467c478bd9Sstevel@tonic-gate *
33477c478bd9Sstevel@tonic-gate * Caller's context
33487c478bd9Sstevel@tonic-gate *
33497c478bd9Sstevel@tonic-gate * The only caller of cyclic_offline() should be the processor management
33507c478bd9Sstevel@tonic-gate * subsystem. It is expected that the caller of cyclic_offline() will
33517c478bd9Sstevel@tonic-gate * offline the CPU immediately after cyclic_offline() returns success (i.e.
33527c478bd9Sstevel@tonic-gate * before dropping cpu_lock). Moreover, it is expected that the caller will
33537c478bd9Sstevel@tonic-gate * fail the CPU offline operation if cyclic_offline() returns failure.
33547c478bd9Sstevel@tonic-gate */
33557c478bd9Sstevel@tonic-gate int
cyclic_offline(cpu_t * c)33567c478bd9Sstevel@tonic-gate cyclic_offline(cpu_t *c)
33577c478bd9Sstevel@tonic-gate {
33587c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
33597c478bd9Sstevel@tonic-gate cyc_id_t *idp;
33607c478bd9Sstevel@tonic-gate
33617c478bd9Sstevel@tonic-gate CYC_PTRACE1("offline", cpu);
33627c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
33637c478bd9Sstevel@tonic-gate
33647c478bd9Sstevel@tonic-gate if (!cyclic_juggle(c))
33657c478bd9Sstevel@tonic-gate return (0);
33667c478bd9Sstevel@tonic-gate
33677c478bd9Sstevel@tonic-gate /*
33687c478bd9Sstevel@tonic-gate * This CPU is headed offline; we need to now stop omnipresent
33697c478bd9Sstevel@tonic-gate * cyclic firing on this CPU.
33707c478bd9Sstevel@tonic-gate */
33717c478bd9Sstevel@tonic-gate for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
33727c478bd9Sstevel@tonic-gate if (idp->cyi_cpu != NULL)
33737c478bd9Sstevel@tonic-gate continue;
33747c478bd9Sstevel@tonic-gate
33757c478bd9Sstevel@tonic-gate /*
33767c478bd9Sstevel@tonic-gate * We cannot possibly be offlining the last CPU; cyi_omni_list
33777c478bd9Sstevel@tonic-gate * must be non-NULL.
33787c478bd9Sstevel@tonic-gate */
33797c478bd9Sstevel@tonic-gate ASSERT(idp->cyi_omni_list != NULL);
33807c478bd9Sstevel@tonic-gate cyclic_omni_stop(idp, cpu);
33817c478bd9Sstevel@tonic-gate }
33827c478bd9Sstevel@tonic-gate
33837c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_ONLINE);
33847c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_OFFLINE;
33857c478bd9Sstevel@tonic-gate
33867c478bd9Sstevel@tonic-gate return (1);
33877c478bd9Sstevel@tonic-gate }
33887c478bd9Sstevel@tonic-gate
33897c478bd9Sstevel@tonic-gate /*
33907c478bd9Sstevel@tonic-gate * void cyclic_online(cpu_t *)
33917c478bd9Sstevel@tonic-gate *
33927c478bd9Sstevel@tonic-gate * Overview
33937c478bd9Sstevel@tonic-gate *
33947c478bd9Sstevel@tonic-gate * cyclic_online() onlines a CPU previously offlined with cyclic_offline().
33957c478bd9Sstevel@tonic-gate *
33967c478bd9Sstevel@tonic-gate * Arguments and notes
33977c478bd9Sstevel@tonic-gate *
33987c478bd9Sstevel@tonic-gate * cyclic_online()'s only argument is a CPU to online. The specified
33997c478bd9Sstevel@tonic-gate * CPU must have been previously offlined with cyclic_offline(). After
34007c478bd9Sstevel@tonic-gate * cyclic_online() returns, the specified CPU will be eligible to execute
34017c478bd9Sstevel@tonic-gate * cyclics.
34027c478bd9Sstevel@tonic-gate *
34037c478bd9Sstevel@tonic-gate * Return value
34047c478bd9Sstevel@tonic-gate *
34057c478bd9Sstevel@tonic-gate * None; cyclic_online() always succeeds.
34067c478bd9Sstevel@tonic-gate *
34077c478bd9Sstevel@tonic-gate * Caller's context
34087c478bd9Sstevel@tonic-gate *
34097c478bd9Sstevel@tonic-gate * cyclic_online() should only be called by the processor management
34107c478bd9Sstevel@tonic-gate * subsystem; cpu_lock must be held.
34117c478bd9Sstevel@tonic-gate */
34127c478bd9Sstevel@tonic-gate void
cyclic_online(cpu_t * c)34137c478bd9Sstevel@tonic-gate cyclic_online(cpu_t *c)
34147c478bd9Sstevel@tonic-gate {
34157c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic;
34167c478bd9Sstevel@tonic-gate cyc_id_t *idp;
34177c478bd9Sstevel@tonic-gate
34187c478bd9Sstevel@tonic-gate CYC_PTRACE1("online", cpu);
34197c478bd9Sstevel@tonic-gate ASSERT(c->cpu_flags & CPU_ENABLE);
34207c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
34217c478bd9Sstevel@tonic-gate ASSERT(cpu->cyp_state == CYS_OFFLINE);
34227c478bd9Sstevel@tonic-gate
34237c478bd9Sstevel@tonic-gate cpu->cyp_state = CYS_ONLINE;
34247c478bd9Sstevel@tonic-gate
34257c478bd9Sstevel@tonic-gate /*
34267c478bd9Sstevel@tonic-gate * Now that this CPU is open for business, we need to start firing
34277c478bd9Sstevel@tonic-gate * all omnipresent cyclics on it.
34287c478bd9Sstevel@tonic-gate */
34297c478bd9Sstevel@tonic-gate for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
34307c478bd9Sstevel@tonic-gate if (idp->cyi_cpu != NULL)
34317c478bd9Sstevel@tonic-gate continue;
34327c478bd9Sstevel@tonic-gate
34337c478bd9Sstevel@tonic-gate cyclic_omni_start(idp, cpu);
34347c478bd9Sstevel@tonic-gate }
34357c478bd9Sstevel@tonic-gate }
34367c478bd9Sstevel@tonic-gate
34377c478bd9Sstevel@tonic-gate /*
34387c478bd9Sstevel@tonic-gate * void cyclic_move_in(cpu_t *)
34397c478bd9Sstevel@tonic-gate *
34407c478bd9Sstevel@tonic-gate * Overview
34417c478bd9Sstevel@tonic-gate *
34427c478bd9Sstevel@tonic-gate * cyclic_move_in() is called by the CPU partition code immediately after
34437c478bd9Sstevel@tonic-gate * the specified CPU has moved into a new partition.
34447c478bd9Sstevel@tonic-gate *
34457c478bd9Sstevel@tonic-gate * Arguments and notes
34467c478bd9Sstevel@tonic-gate *
34477c478bd9Sstevel@tonic-gate * The only argument to cyclic_move_in() is a CPU which has moved into a
34487c478bd9Sstevel@tonic-gate * new partition. If the specified CPU is P_ONLINE, and every other
34497c478bd9Sstevel@tonic-gate * CPU in the specified CPU's new partition is P_NOINTR, cyclic_move_in()
34507c478bd9Sstevel@tonic-gate * will juggle all partition-bound, CPU-unbound cyclics to the specified
34517c478bd9Sstevel@tonic-gate * CPU.
34527c478bd9Sstevel@tonic-gate *
34537c478bd9Sstevel@tonic-gate * Return value
34547c478bd9Sstevel@tonic-gate *
34557c478bd9Sstevel@tonic-gate * None; cyclic_move_in() always succeeds.
34567c478bd9Sstevel@tonic-gate *
34577c478bd9Sstevel@tonic-gate * Caller's context
34587c478bd9Sstevel@tonic-gate *
34597c478bd9Sstevel@tonic-gate * cyclic_move_in() should _only_ be called immediately after a CPU has
34607c478bd9Sstevel@tonic-gate * moved into a new partition, with cpu_lock held. As with other calls
34617c478bd9Sstevel@tonic-gate * into the cyclic subsystem, no lock may be held which is also grabbed
34627c478bd9Sstevel@tonic-gate * by any cyclic handler.
34637c478bd9Sstevel@tonic-gate */
34647c478bd9Sstevel@tonic-gate void
cyclic_move_in(cpu_t * d)34657c478bd9Sstevel@tonic-gate cyclic_move_in(cpu_t *d)
34667c478bd9Sstevel@tonic-gate {
34677c478bd9Sstevel@tonic-gate cyc_id_t *idp;
34687c478bd9Sstevel@tonic-gate cyc_cpu_t *dest = d->cpu_cyclic;
34697c478bd9Sstevel@tonic-gate cyclic_t *cyclic;
34707c478bd9Sstevel@tonic-gate cpupart_t *part = d->cpu_part;
34717c478bd9Sstevel@tonic-gate
34727c478bd9Sstevel@tonic-gate CYC_PTRACE("move-in", dest, part);
34737c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
34747c478bd9Sstevel@tonic-gate
34757c478bd9Sstevel@tonic-gate /*
34767c478bd9Sstevel@tonic-gate * Look for CYF_PART_BOUND cyclics in the new partition. If
34777c478bd9Sstevel@tonic-gate * we find one, check to see if it is currently on a CPU which has
34787c478bd9Sstevel@tonic-gate * interrupts disabled. If it is (and if this CPU currently has
34797c478bd9Sstevel@tonic-gate * interrupts enabled), we'll juggle those cyclics over here.
34807c478bd9Sstevel@tonic-gate */
34817c478bd9Sstevel@tonic-gate if (!(d->cpu_flags & CPU_ENABLE)) {
34827c478bd9Sstevel@tonic-gate CYC_PTRACE1("move-in-none", dest);
34837c478bd9Sstevel@tonic-gate return;
34847c478bd9Sstevel@tonic-gate }
34857c478bd9Sstevel@tonic-gate
34867c478bd9Sstevel@tonic-gate for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
34877c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = idp->cyi_cpu;
34887c478bd9Sstevel@tonic-gate cpu_t *c;
34897c478bd9Sstevel@tonic-gate
34907c478bd9Sstevel@tonic-gate /*
34917c478bd9Sstevel@tonic-gate * Omnipresent cyclics are exempt from juggling.
34927c478bd9Sstevel@tonic-gate */
34937c478bd9Sstevel@tonic-gate if (cpu == NULL)
34947c478bd9Sstevel@tonic-gate continue;
34957c478bd9Sstevel@tonic-gate
34967c478bd9Sstevel@tonic-gate c = cpu->cyp_cpu;
34977c478bd9Sstevel@tonic-gate
34987c478bd9Sstevel@tonic-gate if (c->cpu_part != part || (c->cpu_flags & CPU_ENABLE))
34997c478bd9Sstevel@tonic-gate continue;
35007c478bd9Sstevel@tonic-gate
35017c478bd9Sstevel@tonic-gate cyclic = &cpu->cyp_cyclics[idp->cyi_ndx];
35027c478bd9Sstevel@tonic-gate
35037c478bd9Sstevel@tonic-gate if (cyclic->cy_flags & CYF_CPU_BOUND)
35047c478bd9Sstevel@tonic-gate continue;
35057c478bd9Sstevel@tonic-gate
35067c478bd9Sstevel@tonic-gate /*
35077c478bd9Sstevel@tonic-gate * We know that this cyclic is bound to its processor set
35087c478bd9Sstevel@tonic-gate * (otherwise, it would not be on a CPU with interrupts
35097c478bd9Sstevel@tonic-gate * disabled); juggle it to our CPU.
35107c478bd9Sstevel@tonic-gate */
35117c478bd9Sstevel@tonic-gate ASSERT(cyclic->cy_flags & CYF_PART_BOUND);
35127c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(idp, dest);
35137c478bd9Sstevel@tonic-gate }
35147c478bd9Sstevel@tonic-gate
35157c478bd9Sstevel@tonic-gate CYC_PTRACE1("move-in-done", dest);
35167c478bd9Sstevel@tonic-gate }
35177c478bd9Sstevel@tonic-gate
35187c478bd9Sstevel@tonic-gate /*
35197c478bd9Sstevel@tonic-gate * int cyclic_move_out(cpu_t *)
35207c478bd9Sstevel@tonic-gate *
35217c478bd9Sstevel@tonic-gate * Overview
35227c478bd9Sstevel@tonic-gate *
35237c478bd9Sstevel@tonic-gate * cyclic_move_out() is called by the CPU partition code immediately before
35247c478bd9Sstevel@tonic-gate * the specified CPU is to move out of its partition.
35257c478bd9Sstevel@tonic-gate *
35267c478bd9Sstevel@tonic-gate * Arguments and notes
35277c478bd9Sstevel@tonic-gate *
35287c478bd9Sstevel@tonic-gate * The only argument to cyclic_move_out() is a CPU which is to move out of
35297c478bd9Sstevel@tonic-gate * its partition.
35307c478bd9Sstevel@tonic-gate *
35317c478bd9Sstevel@tonic-gate * cyclic_move_out() will attempt to juggle away all partition-bound
35327c478bd9Sstevel@tonic-gate * cyclics. If the specified CPU is the last CPU in a partition with
35337c478bd9Sstevel@tonic-gate * partition-bound cyclics, cyclic_move_out() will fail. If there exists
35347c478bd9Sstevel@tonic-gate * a partition-bound cyclic which is CPU-bound to the specified CPU,
35357c478bd9Sstevel@tonic-gate * cyclic_move_out() will fail.
35367c478bd9Sstevel@tonic-gate *
35377c478bd9Sstevel@tonic-gate * Note that cyclic_move_out() will _only_ attempt to juggle away
35387c478bd9Sstevel@tonic-gate * partition-bound cyclics; CPU-bound cyclics which are not partition-bound
35397c478bd9Sstevel@tonic-gate * and unbound cyclics are not affected by changing the partition
35407c478bd9Sstevel@tonic-gate * affiliation of the CPU.
35417c478bd9Sstevel@tonic-gate *
35427c478bd9Sstevel@tonic-gate * Return value
35437c478bd9Sstevel@tonic-gate *
35447c478bd9Sstevel@tonic-gate * cyclic_move_out() returns 1 if all partition-bound cyclics on the CPU
35457c478bd9Sstevel@tonic-gate * were juggled away; 0 if some cyclics remain.
35467c478bd9Sstevel@tonic-gate *
35477c478bd9Sstevel@tonic-gate * Caller's context
35487c478bd9Sstevel@tonic-gate *
35497c478bd9Sstevel@tonic-gate * cyclic_move_out() should _only_ be called immediately before a CPU has
35507c478bd9Sstevel@tonic-gate * moved out of its partition, with cpu_lock held. It is expected that
35517c478bd9Sstevel@tonic-gate * the caller of cyclic_move_out() will change the processor set affiliation
35527c478bd9Sstevel@tonic-gate * of the specified CPU immediately after cyclic_move_out() returns
35537c478bd9Sstevel@tonic-gate * success (i.e. before dropping cpu_lock). Moreover, it is expected that
35547c478bd9Sstevel@tonic-gate * the caller will fail the CPU repartitioning operation if cyclic_move_out()
35557c478bd9Sstevel@tonic-gate * returns failure. As with other calls into the cyclic subsystem, no lock
35567c478bd9Sstevel@tonic-gate * may be held which is also grabbed by any cyclic handler.
35577c478bd9Sstevel@tonic-gate */
35587c478bd9Sstevel@tonic-gate int
cyclic_move_out(cpu_t * c)35597c478bd9Sstevel@tonic-gate cyclic_move_out(cpu_t *c)
35607c478bd9Sstevel@tonic-gate {
35617c478bd9Sstevel@tonic-gate cyc_id_t *idp;
35627c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu = c->cpu_cyclic, *dest;
35637c478bd9Sstevel@tonic-gate cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics;
35647c478bd9Sstevel@tonic-gate cpupart_t *part = c->cpu_part;
35657c478bd9Sstevel@tonic-gate
35667c478bd9Sstevel@tonic-gate CYC_PTRACE1("move-out", cpu);
35677c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
35687c478bd9Sstevel@tonic-gate
35697c478bd9Sstevel@tonic-gate /*
35707c478bd9Sstevel@tonic-gate * If there are any CYF_PART_BOUND cyclics on this CPU, we need
35717c478bd9Sstevel@tonic-gate * to try to juggle them away.
35727c478bd9Sstevel@tonic-gate */
35737c478bd9Sstevel@tonic-gate for (idp = cyclic_id_head; idp != NULL; idp = idp->cyi_next) {
35747c478bd9Sstevel@tonic-gate
35757c478bd9Sstevel@tonic-gate if (idp->cyi_cpu != cpu)
35767c478bd9Sstevel@tonic-gate continue;
35777c478bd9Sstevel@tonic-gate
35787c478bd9Sstevel@tonic-gate cyclic = &cyclics[idp->cyi_ndx];
35797c478bd9Sstevel@tonic-gate
35807c478bd9Sstevel@tonic-gate if (!(cyclic->cy_flags & CYF_PART_BOUND))
35817c478bd9Sstevel@tonic-gate continue;
35827c478bd9Sstevel@tonic-gate
35837c478bd9Sstevel@tonic-gate dest = cyclic_pick_cpu(part, c, c, cyclic->cy_flags);
35847c478bd9Sstevel@tonic-gate
35857c478bd9Sstevel@tonic-gate if (dest == NULL) {
35867c478bd9Sstevel@tonic-gate /*
35877c478bd9Sstevel@tonic-gate * We can't juggle this cyclic; we need to return
35887c478bd9Sstevel@tonic-gate * failure (we won't bother trying to juggle away
35897c478bd9Sstevel@tonic-gate * other cyclics).
35907c478bd9Sstevel@tonic-gate */
35917c478bd9Sstevel@tonic-gate CYC_PTRACE("move-out-fail", cpu, idp);
35927c478bd9Sstevel@tonic-gate return (0);
35937c478bd9Sstevel@tonic-gate }
35947c478bd9Sstevel@tonic-gate cyclic_juggle_one_to(idp, dest);
35957c478bd9Sstevel@tonic-gate }
35967c478bd9Sstevel@tonic-gate
35977c478bd9Sstevel@tonic-gate CYC_PTRACE1("move-out-done", cpu);
35987c478bd9Sstevel@tonic-gate return (1);
35997c478bd9Sstevel@tonic-gate }
36007c478bd9Sstevel@tonic-gate
36017c478bd9Sstevel@tonic-gate /*
36027c478bd9Sstevel@tonic-gate * void cyclic_suspend()
36037c478bd9Sstevel@tonic-gate *
36047c478bd9Sstevel@tonic-gate * Overview
36057c478bd9Sstevel@tonic-gate *
36067c478bd9Sstevel@tonic-gate * cyclic_suspend() suspends all cyclic activity throughout the cyclic
36077c478bd9Sstevel@tonic-gate * subsystem. It should be called only by subsystems which are attempting
36087c478bd9Sstevel@tonic-gate * to suspend the entire system (e.g. checkpoint/resume, dynamic
36097c478bd9Sstevel@tonic-gate * reconfiguration).
36107c478bd9Sstevel@tonic-gate *
36117c478bd9Sstevel@tonic-gate * Arguments and notes
36127c478bd9Sstevel@tonic-gate *
36137c478bd9Sstevel@tonic-gate * cyclic_suspend() takes no arguments. Each CPU with an active cyclic
36147c478bd9Sstevel@tonic-gate * disables its backend (offline CPUs disable their backends as part of
36157c478bd9Sstevel@tonic-gate * the cyclic_offline() operation), thereby disabling future CY_HIGH_LEVEL
36167c478bd9Sstevel@tonic-gate * interrupts.
36177c478bd9Sstevel@tonic-gate *
36187c478bd9Sstevel@tonic-gate * Note that disabling CY_HIGH_LEVEL interrupts does not completely preclude
36197c478bd9Sstevel@tonic-gate * cyclic handlers from being called after cyclic_suspend() returns: if a
36207c478bd9Sstevel@tonic-gate * CY_LOCK_LEVEL or CY_LOW_LEVEL interrupt thread was blocked at the time
36217c478bd9Sstevel@tonic-gate * of cyclic_suspend(), cyclic handlers at its level may continue to be
36227c478bd9Sstevel@tonic-gate * called after the interrupt thread becomes unblocked. The
36237c478bd9Sstevel@tonic-gate * post-cyclic_suspend() activity is bounded by the pend count on all
36247c478bd9Sstevel@tonic-gate * cyclics at the time of cyclic_suspend(). Callers concerned with more
36257c478bd9Sstevel@tonic-gate * than simply disabling future CY_HIGH_LEVEL interrupts must check for
36267c478bd9Sstevel@tonic-gate * this condition.
36277c478bd9Sstevel@tonic-gate *
36287c478bd9Sstevel@tonic-gate * On most platforms, timestamps from gethrtime() and gethrestime() are not
36297c478bd9Sstevel@tonic-gate * guaranteed to monotonically increase between cyclic_suspend() and
36307c478bd9Sstevel@tonic-gate * cyclic_resume(). However, timestamps are guaranteed to monotonically
36317c478bd9Sstevel@tonic-gate * increase across the entire cyclic_suspend()/cyclic_resume() operation.
36327c478bd9Sstevel@tonic-gate * That is, every timestamp obtained before cyclic_suspend() will be less
36337c478bd9Sstevel@tonic-gate * than every timestamp obtained after cyclic_resume().
36347c478bd9Sstevel@tonic-gate *
36357c478bd9Sstevel@tonic-gate * Return value
36367c478bd9Sstevel@tonic-gate *
36377c478bd9Sstevel@tonic-gate * None; cyclic_suspend() always succeeds.
36387c478bd9Sstevel@tonic-gate *
36397c478bd9Sstevel@tonic-gate * Caller's context
36407c478bd9Sstevel@tonic-gate *
36417c478bd9Sstevel@tonic-gate * The cyclic subsystem must be configured on every valid CPU;
36427c478bd9Sstevel@tonic-gate * cyclic_suspend() may not be called during boot or during dynamic
36437c478bd9Sstevel@tonic-gate * reconfiguration. Additionally, cpu_lock must be held, and the caller
36447c478bd9Sstevel@tonic-gate * cannot be in high-level interrupt context. However, unlike most other
36457c478bd9Sstevel@tonic-gate * cyclic entry points, cyclic_suspend() may be called with locks held
36467c478bd9Sstevel@tonic-gate * which are also acquired by CY_LOCK_LEVEL or CY_LOW_LEVEL cyclic
36477c478bd9Sstevel@tonic-gate * handlers.
36487c478bd9Sstevel@tonic-gate */
36497c478bd9Sstevel@tonic-gate void
cyclic_suspend()36507c478bd9Sstevel@tonic-gate cyclic_suspend()
36517c478bd9Sstevel@tonic-gate {
36527c478bd9Sstevel@tonic-gate cpu_t *c;
36537c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu;
36547c478bd9Sstevel@tonic-gate cyc_xcallarg_t arg;
36557c478bd9Sstevel@tonic-gate cyc_backend_t *be;
36567c478bd9Sstevel@tonic-gate
36577c478bd9Sstevel@tonic-gate CYC_PTRACE0("suspend");
36587c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
36597c478bd9Sstevel@tonic-gate c = cpu_list;
36607c478bd9Sstevel@tonic-gate
36617c478bd9Sstevel@tonic-gate do {
36627c478bd9Sstevel@tonic-gate cpu = c->cpu_cyclic;
36637c478bd9Sstevel@tonic-gate be = cpu->cyp_backend;
36647c478bd9Sstevel@tonic-gate arg.cyx_cpu = cpu;
36657c478bd9Sstevel@tonic-gate
36667c478bd9Sstevel@tonic-gate be->cyb_xcall(be->cyb_arg, c,
36677c478bd9Sstevel@tonic-gate (cyc_func_t)cyclic_suspend_xcall, &arg);
36687c478bd9Sstevel@tonic-gate } while ((c = c->cpu_next) != cpu_list);
36697c478bd9Sstevel@tonic-gate }
36707c478bd9Sstevel@tonic-gate
36717c478bd9Sstevel@tonic-gate /*
36727c478bd9Sstevel@tonic-gate * void cyclic_resume()
36737c478bd9Sstevel@tonic-gate *
36747c478bd9Sstevel@tonic-gate * cyclic_resume() resumes all cyclic activity throughout the cyclic
36757c478bd9Sstevel@tonic-gate * subsystem. It should be called only by system-suspending subsystems.
36767c478bd9Sstevel@tonic-gate *
36777c478bd9Sstevel@tonic-gate * Arguments and notes
36787c478bd9Sstevel@tonic-gate *
36797c478bd9Sstevel@tonic-gate * cyclic_resume() takes no arguments. Each CPU with an active cyclic
36807c478bd9Sstevel@tonic-gate * reenables and reprograms its backend (offline CPUs are not reenabled).
36817c478bd9Sstevel@tonic-gate * On most platforms, timestamps from gethrtime() and gethrestime() are not
36827c478bd9Sstevel@tonic-gate * guaranteed to monotonically increase between cyclic_suspend() and
36837c478bd9Sstevel@tonic-gate * cyclic_resume(). However, timestamps are guaranteed to monotonically
36847c478bd9Sstevel@tonic-gate * increase across the entire cyclic_suspend()/cyclic_resume() operation.
36857c478bd9Sstevel@tonic-gate * That is, every timestamp obtained before cyclic_suspend() will be less
36867c478bd9Sstevel@tonic-gate * than every timestamp obtained after cyclic_resume().
36877c478bd9Sstevel@tonic-gate *
36887c478bd9Sstevel@tonic-gate * Return value
36897c478bd9Sstevel@tonic-gate *
36907c478bd9Sstevel@tonic-gate * None; cyclic_resume() always succeeds.
36917c478bd9Sstevel@tonic-gate *
36927c478bd9Sstevel@tonic-gate * Caller's context
36937c478bd9Sstevel@tonic-gate *
36947c478bd9Sstevel@tonic-gate * The cyclic subsystem must be configured on every valid CPU;
36957c478bd9Sstevel@tonic-gate * cyclic_resume() may not be called during boot or during dynamic
36967c478bd9Sstevel@tonic-gate * reconfiguration. Additionally, cpu_lock must be held, and the caller
36977c478bd9Sstevel@tonic-gate * cannot be in high-level interrupt context. However, unlike most other
36987c478bd9Sstevel@tonic-gate * cyclic entry points, cyclic_resume() may be called with locks held which
36997c478bd9Sstevel@tonic-gate * are also acquired by CY_LOCK_LEVEL or CY_LOW_LEVEL cyclic handlers.
37007c478bd9Sstevel@tonic-gate */
37017c478bd9Sstevel@tonic-gate void
cyclic_resume()37027c478bd9Sstevel@tonic-gate cyclic_resume()
37037c478bd9Sstevel@tonic-gate {
37047c478bd9Sstevel@tonic-gate cpu_t *c;
37057c478bd9Sstevel@tonic-gate cyc_cpu_t *cpu;
37067c478bd9Sstevel@tonic-gate cyc_xcallarg_t arg;
37077c478bd9Sstevel@tonic-gate cyc_backend_t *be;
37087c478bd9Sstevel@tonic-gate
37097c478bd9Sstevel@tonic-gate CYC_PTRACE0("resume");
37107c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
37117c478bd9Sstevel@tonic-gate
37127c478bd9Sstevel@tonic-gate c = cpu_list;
37137c478bd9Sstevel@tonic-gate
37147c478bd9Sstevel@tonic-gate do {
37157c478bd9Sstevel@tonic-gate cpu = c->cpu_cyclic;
37167c478bd9Sstevel@tonic-gate be = cpu->cyp_backend;
37177c478bd9Sstevel@tonic-gate arg.cyx_cpu = cpu;
37187c478bd9Sstevel@tonic-gate
37197c478bd9Sstevel@tonic-gate be->cyb_xcall(be->cyb_arg, c,
37207c478bd9Sstevel@tonic-gate (cyc_func_t)cyclic_resume_xcall, &arg);
37217c478bd9Sstevel@tonic-gate } while ((c = c->cpu_next) != cpu_list);
37227c478bd9Sstevel@tonic-gate }
3723