xref: /illumos-gate/usr/src/uts/i86pc/os/x_call.c (revision 7c478bd9)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * Facilities for cross-processor subroutine calls using "mailbox" interrupts.
31*7c478bd9Sstevel@tonic-gate  *
32*7c478bd9Sstevel@tonic-gate  */
33*7c478bd9Sstevel@tonic-gate 
34*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
35*7c478bd9Sstevel@tonic-gate 
36*7c478bd9Sstevel@tonic-gate #include <sys/param.h>
37*7c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
38*7c478bd9Sstevel@tonic-gate #include <sys/thread.h>
39*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
40*7c478bd9Sstevel@tonic-gate #include <sys/x_call.h>
41*7c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
42*7c478bd9Sstevel@tonic-gate #include <sys/psw.h>
43*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
44*7c478bd9Sstevel@tonic-gate #include <sys/mmu.h>
45*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
46*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
47*7c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
48*7c478bd9Sstevel@tonic-gate #include <sys/mutex_impl.h>
49*7c478bd9Sstevel@tonic-gate 
50*7c478bd9Sstevel@tonic-gate static struct	xc_mbox xc_mboxes[X_CALL_LEVELS];
51*7c478bd9Sstevel@tonic-gate static kmutex_t xc_mbox_lock[X_CALL_LEVELS];
52*7c478bd9Sstevel@tonic-gate static uint_t 	xc_xlat_xcptoipl[X_CALL_LEVELS] = {
53*7c478bd9Sstevel@tonic-gate 	XC_LO_PIL,
54*7c478bd9Sstevel@tonic-gate 	XC_MED_PIL,
55*7c478bd9Sstevel@tonic-gate 	XC_HI_PIL
56*7c478bd9Sstevel@tonic-gate };
57*7c478bd9Sstevel@tonic-gate 
58*7c478bd9Sstevel@tonic-gate static void xc_common(xc_func_t, xc_arg_t, xc_arg_t, xc_arg_t,
59*7c478bd9Sstevel@tonic-gate     int, cpuset_t, int);
60*7c478bd9Sstevel@tonic-gate 
61*7c478bd9Sstevel@tonic-gate static int	xc_initialized = 0;
62*7c478bd9Sstevel@tonic-gate extern ulong_t	cpu_ready_set;
63*7c478bd9Sstevel@tonic-gate 
64*7c478bd9Sstevel@tonic-gate void
65*7c478bd9Sstevel@tonic-gate xc_init()
66*7c478bd9Sstevel@tonic-gate {
67*7c478bd9Sstevel@tonic-gate 	/*
68*7c478bd9Sstevel@tonic-gate 	 * By making these mutexes type MUTEX_DRIVER, the ones below
69*7c478bd9Sstevel@tonic-gate 	 * LOCK_LEVEL will be implemented as adaptive mutexes, and the
70*7c478bd9Sstevel@tonic-gate 	 * ones above LOCK_LEVEL will be spin mutexes.
71*7c478bd9Sstevel@tonic-gate 	 */
72*7c478bd9Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[0], NULL, MUTEX_DRIVER,
73*7c478bd9Sstevel@tonic-gate 	    (void *)ipltospl(XC_LO_PIL));
74*7c478bd9Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[1], NULL, MUTEX_DRIVER,
75*7c478bd9Sstevel@tonic-gate 	    (void *)ipltospl(XC_MED_PIL));
76*7c478bd9Sstevel@tonic-gate 	mutex_init(&xc_mbox_lock[2], NULL, MUTEX_DRIVER,
77*7c478bd9Sstevel@tonic-gate 	    (void *)ipltospl(XC_HI_PIL));
78*7c478bd9Sstevel@tonic-gate 
79*7c478bd9Sstevel@tonic-gate 	xc_initialized = 1;
80*7c478bd9Sstevel@tonic-gate }
81*7c478bd9Sstevel@tonic-gate 
82*7c478bd9Sstevel@tonic-gate /*
83*7c478bd9Sstevel@tonic-gate  * Used by the debugger to determine whether or not cross calls have been
84*7c478bd9Sstevel@tonic-gate  * initialized and are safe to use.
85*7c478bd9Sstevel@tonic-gate  */
86*7c478bd9Sstevel@tonic-gate int
87*7c478bd9Sstevel@tonic-gate kdi_xc_initialized(void)
88*7c478bd9Sstevel@tonic-gate {
89*7c478bd9Sstevel@tonic-gate 	return (xc_initialized);
90*7c478bd9Sstevel@tonic-gate }
91*7c478bd9Sstevel@tonic-gate 
92*7c478bd9Sstevel@tonic-gate #define	CAPTURE_CPU_ARG	0xffffffff
93*7c478bd9Sstevel@tonic-gate 
94*7c478bd9Sstevel@tonic-gate /*
95*7c478bd9Sstevel@tonic-gate  * X-call interrupt service routine.
96*7c478bd9Sstevel@tonic-gate  *
97*7c478bd9Sstevel@tonic-gate  * arg == X_CALL_MEDPRI	-  capture cpus.
98*7c478bd9Sstevel@tonic-gate  *
99*7c478bd9Sstevel@tonic-gate  * We're protected against changing CPUs by being a high-priority interrupt.
100*7c478bd9Sstevel@tonic-gate  */
101*7c478bd9Sstevel@tonic-gate /*ARGSUSED*/
102*7c478bd9Sstevel@tonic-gate uint_t
103*7c478bd9Sstevel@tonic-gate xc_serv(caddr_t arg1, caddr_t arg2)
104*7c478bd9Sstevel@tonic-gate {
105*7c478bd9Sstevel@tonic-gate 	int	op;
106*7c478bd9Sstevel@tonic-gate 	int	pri = (int)(uintptr_t)arg1;
107*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup = CPU;
108*7c478bd9Sstevel@tonic-gate 	xc_arg_t *argp;
109*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2val;
110*7c478bd9Sstevel@tonic-gate 	uint_t	tlbflush;
111*7c478bd9Sstevel@tonic-gate 
112*7c478bd9Sstevel@tonic-gate 	if (pri == X_CALL_MEDPRI) {
113*7c478bd9Sstevel@tonic-gate 
114*7c478bd9Sstevel@tonic-gate 		argp = &xc_mboxes[X_CALL_MEDPRI].arg2;
115*7c478bd9Sstevel@tonic-gate 		arg2val = *argp;
116*7c478bd9Sstevel@tonic-gate 		if (arg2val != CAPTURE_CPU_ARG &&
117*7c478bd9Sstevel@tonic-gate 		    !(arg2val & (1 << cpup->cpu_id)))
118*7c478bd9Sstevel@tonic-gate 			return (DDI_INTR_UNCLAIMED);
119*7c478bd9Sstevel@tonic-gate 		ASSERT(arg2val == CAPTURE_CPU_ARG);
120*7c478bd9Sstevel@tonic-gate 		if (cpup->cpu_m.xc_pend[pri] == 0)
121*7c478bd9Sstevel@tonic-gate 			return (DDI_INTR_UNCLAIMED);
122*7c478bd9Sstevel@tonic-gate 
123*7c478bd9Sstevel@tonic-gate 		cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 0;
124*7c478bd9Sstevel@tonic-gate 		cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 1;
125*7c478bd9Sstevel@tonic-gate 
126*7c478bd9Sstevel@tonic-gate 		for (;;) {
127*7c478bd9Sstevel@tonic-gate 			if ((cpup->cpu_m.xc_state[X_CALL_MEDPRI] == XC_DONE) ||
128*7c478bd9Sstevel@tonic-gate 				(cpup->cpu_m.xc_pend[X_CALL_MEDPRI]))
129*7c478bd9Sstevel@tonic-gate 				break;
130*7c478bd9Sstevel@tonic-gate 			ht_pause();
131*7c478bd9Sstevel@tonic-gate 			return_instr();
132*7c478bd9Sstevel@tonic-gate 		}
133*7c478bd9Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
134*7c478bd9Sstevel@tonic-gate 	}
135*7c478bd9Sstevel@tonic-gate 	if (cpup->cpu_m.xc_pend[pri] == 0)
136*7c478bd9Sstevel@tonic-gate 		return (DDI_INTR_UNCLAIMED);
137*7c478bd9Sstevel@tonic-gate 
138*7c478bd9Sstevel@tonic-gate 	cpup->cpu_m.xc_pend[pri] = 0;
139*7c478bd9Sstevel@tonic-gate 	op = cpup->cpu_m.xc_state[pri];
140*7c478bd9Sstevel@tonic-gate 
141*7c478bd9Sstevel@tonic-gate 	/*
142*7c478bd9Sstevel@tonic-gate 	 * When invalidating TLB entries, wait until the initiator changes the
143*7c478bd9Sstevel@tonic-gate 	 * memory PTE before doing any INVLPG. Otherwise, if the PTE in memory
144*7c478bd9Sstevel@tonic-gate 	 * hasn't been changed, the processor's TLB Flush filter may ignore
145*7c478bd9Sstevel@tonic-gate 	 * the INVLPG instruction.
146*7c478bd9Sstevel@tonic-gate 	 */
147*7c478bd9Sstevel@tonic-gate 	tlbflush = (cpup->cpu_m.xc_wait[pri] == 2);
148*7c478bd9Sstevel@tonic-gate 
149*7c478bd9Sstevel@tonic-gate 	/*
150*7c478bd9Sstevel@tonic-gate 	 * Don't invoke a null function.
151*7c478bd9Sstevel@tonic-gate 	 */
152*7c478bd9Sstevel@tonic-gate 	if (xc_mboxes[pri].func != NULL) {
153*7c478bd9Sstevel@tonic-gate 		if (!tlbflush)
154*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_retval[pri] = (*xc_mboxes[pri].func)
155*7c478bd9Sstevel@tonic-gate 			    (xc_mboxes[pri].arg1, xc_mboxes[pri].arg2,
156*7c478bd9Sstevel@tonic-gate 				xc_mboxes[pri].arg3);
157*7c478bd9Sstevel@tonic-gate 	} else
158*7c478bd9Sstevel@tonic-gate 		cpup->cpu_m.xc_retval[pri] = 0;
159*7c478bd9Sstevel@tonic-gate 
160*7c478bd9Sstevel@tonic-gate 	/*
161*7c478bd9Sstevel@tonic-gate 	 * Acknowledge that we have completed the x-call operation.
162*7c478bd9Sstevel@tonic-gate 	 */
163*7c478bd9Sstevel@tonic-gate 	cpup->cpu_m.xc_ack[pri] = 1;
164*7c478bd9Sstevel@tonic-gate 
165*7c478bd9Sstevel@tonic-gate 	if (op == XC_CALL_OP)
166*7c478bd9Sstevel@tonic-gate 		return (DDI_INTR_CLAIMED);
167*7c478bd9Sstevel@tonic-gate 
168*7c478bd9Sstevel@tonic-gate 	/*
169*7c478bd9Sstevel@tonic-gate 	 * for (op == XC_SYNC_OP)
170*7c478bd9Sstevel@tonic-gate 	 * Wait for the initiator of the x-call to indicate
171*7c478bd9Sstevel@tonic-gate 	 * that all CPUs involved can proceed.
172*7c478bd9Sstevel@tonic-gate 	 */
173*7c478bd9Sstevel@tonic-gate 	while (cpup->cpu_m.xc_wait[pri]) {
174*7c478bd9Sstevel@tonic-gate 		ht_pause();
175*7c478bd9Sstevel@tonic-gate 		return_instr();
176*7c478bd9Sstevel@tonic-gate 	}
177*7c478bd9Sstevel@tonic-gate 
178*7c478bd9Sstevel@tonic-gate 	while (cpup->cpu_m.xc_state[pri] != XC_DONE) {
179*7c478bd9Sstevel@tonic-gate 		ht_pause();
180*7c478bd9Sstevel@tonic-gate 		return_instr();
181*7c478bd9Sstevel@tonic-gate 	}
182*7c478bd9Sstevel@tonic-gate 
183*7c478bd9Sstevel@tonic-gate 	/*
184*7c478bd9Sstevel@tonic-gate 	 * Flush the TLB, if that's what is requested.
185*7c478bd9Sstevel@tonic-gate 	 */
186*7c478bd9Sstevel@tonic-gate 	if (xc_mboxes[pri].func != NULL && tlbflush) {
187*7c478bd9Sstevel@tonic-gate 		cpup->cpu_m.xc_retval[pri] = (*xc_mboxes[pri].func)
188*7c478bd9Sstevel@tonic-gate 		    (xc_mboxes[pri].arg1, xc_mboxes[pri].arg2,
189*7c478bd9Sstevel@tonic-gate 			xc_mboxes[pri].arg3);
190*7c478bd9Sstevel@tonic-gate 	}
191*7c478bd9Sstevel@tonic-gate 
192*7c478bd9Sstevel@tonic-gate 	/*
193*7c478bd9Sstevel@tonic-gate 	 * Acknowledge that we have received the directive to continue.
194*7c478bd9Sstevel@tonic-gate 	 */
195*7c478bd9Sstevel@tonic-gate 	ASSERT(cpup->cpu_m.xc_ack[pri] == 0);
196*7c478bd9Sstevel@tonic-gate 	cpup->cpu_m.xc_ack[pri] = 1;
197*7c478bd9Sstevel@tonic-gate 
198*7c478bd9Sstevel@tonic-gate 	return (DDI_INTR_CLAIMED);
199*7c478bd9Sstevel@tonic-gate }
200*7c478bd9Sstevel@tonic-gate 
201*7c478bd9Sstevel@tonic-gate 
202*7c478bd9Sstevel@tonic-gate /*
203*7c478bd9Sstevel@tonic-gate  * xc_do_call:
204*7c478bd9Sstevel@tonic-gate  */
205*7c478bd9Sstevel@tonic-gate static void
206*7c478bd9Sstevel@tonic-gate xc_do_call(
207*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
208*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
209*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
210*7c478bd9Sstevel@tonic-gate 	int pri,
211*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
212*7c478bd9Sstevel@tonic-gate 	xc_func_t func,
213*7c478bd9Sstevel@tonic-gate 	int sync)
214*7c478bd9Sstevel@tonic-gate {
215*7c478bd9Sstevel@tonic-gate 	/*
216*7c478bd9Sstevel@tonic-gate 	 * If the pri indicates a low priority lock (below LOCK_LEVEL),
217*7c478bd9Sstevel@tonic-gate 	 * we must disable preemption to avoid migrating to another CPU
218*7c478bd9Sstevel@tonic-gate 	 * during the call.
219*7c478bd9Sstevel@tonic-gate 	 */
220*7c478bd9Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI) {
221*7c478bd9Sstevel@tonic-gate 		kpreempt_disable();
222*7c478bd9Sstevel@tonic-gate 	} else {
223*7c478bd9Sstevel@tonic-gate 		pri = X_CALL_HIPRI;
224*7c478bd9Sstevel@tonic-gate 	}
225*7c478bd9Sstevel@tonic-gate 
226*7c478bd9Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
227*7c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
228*7c478bd9Sstevel@tonic-gate 	xc_common(func, arg1, arg2, arg3, pri, set, sync);
229*7c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
230*7c478bd9Sstevel@tonic-gate 	if (pri == X_CALL_LOPRI)
231*7c478bd9Sstevel@tonic-gate 		kpreempt_enable();
232*7c478bd9Sstevel@tonic-gate }
233*7c478bd9Sstevel@tonic-gate 
234*7c478bd9Sstevel@tonic-gate 
235*7c478bd9Sstevel@tonic-gate /*
236*7c478bd9Sstevel@tonic-gate  * xc_call: call specified function on all processors
237*7c478bd9Sstevel@tonic-gate  * remotes may continue after service
238*7c478bd9Sstevel@tonic-gate  * we wait here until everybody has completed.
239*7c478bd9Sstevel@tonic-gate  */
240*7c478bd9Sstevel@tonic-gate void
241*7c478bd9Sstevel@tonic-gate xc_call(
242*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
243*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
244*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
245*7c478bd9Sstevel@tonic-gate 	int pri,
246*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
247*7c478bd9Sstevel@tonic-gate 	xc_func_t func)
248*7c478bd9Sstevel@tonic-gate {
249*7c478bd9Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 0);
250*7c478bd9Sstevel@tonic-gate }
251*7c478bd9Sstevel@tonic-gate 
252*7c478bd9Sstevel@tonic-gate /*
253*7c478bd9Sstevel@tonic-gate  * xc_sync: call specified function on all processors
254*7c478bd9Sstevel@tonic-gate  * after doing work, each remote waits until we let
255*7c478bd9Sstevel@tonic-gate  * it continue; send the contiunue after everyone has
256*7c478bd9Sstevel@tonic-gate  * informed us that they are done.
257*7c478bd9Sstevel@tonic-gate  */
258*7c478bd9Sstevel@tonic-gate void
259*7c478bd9Sstevel@tonic-gate xc_sync(
260*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
261*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
262*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
263*7c478bd9Sstevel@tonic-gate 	int pri,
264*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
265*7c478bd9Sstevel@tonic-gate 	xc_func_t func)
266*7c478bd9Sstevel@tonic-gate {
267*7c478bd9Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 1);
268*7c478bd9Sstevel@tonic-gate }
269*7c478bd9Sstevel@tonic-gate 
270*7c478bd9Sstevel@tonic-gate /*
271*7c478bd9Sstevel@tonic-gate  * xc_sync_wait: similar to xc_sync(), except that the starting
272*7c478bd9Sstevel@tonic-gate  * cpu waits for all other cpus to check in before running its
273*7c478bd9Sstevel@tonic-gate  * service locally.
274*7c478bd9Sstevel@tonic-gate  */
275*7c478bd9Sstevel@tonic-gate void
276*7c478bd9Sstevel@tonic-gate xc_wait_sync(
277*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
278*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
279*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
280*7c478bd9Sstevel@tonic-gate 	int pri,
281*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
282*7c478bd9Sstevel@tonic-gate 	xc_func_t func)
283*7c478bd9Sstevel@tonic-gate {
284*7c478bd9Sstevel@tonic-gate 	xc_do_call(arg1, arg2, arg3, pri, set, func, 2);
285*7c478bd9Sstevel@tonic-gate }
286*7c478bd9Sstevel@tonic-gate 
287*7c478bd9Sstevel@tonic-gate 
288*7c478bd9Sstevel@tonic-gate /*
289*7c478bd9Sstevel@tonic-gate  * The routines xc_capture_cpus and xc_release_cpus
290*7c478bd9Sstevel@tonic-gate  * can be used in place of xc_sync in order to implement a critical
291*7c478bd9Sstevel@tonic-gate  * code section where all CPUs in the system can be controlled.
292*7c478bd9Sstevel@tonic-gate  * xc_capture_cpus is used to start the critical code section, and
293*7c478bd9Sstevel@tonic-gate  * xc_release_cpus is used to end the critical code section.
294*7c478bd9Sstevel@tonic-gate  */
295*7c478bd9Sstevel@tonic-gate 
296*7c478bd9Sstevel@tonic-gate /*
297*7c478bd9Sstevel@tonic-gate  * Capture the CPUs specified in order to start a x-call session,
298*7c478bd9Sstevel@tonic-gate  * and/or to begin a critical section.
299*7c478bd9Sstevel@tonic-gate  */
300*7c478bd9Sstevel@tonic-gate void
301*7c478bd9Sstevel@tonic-gate xc_capture_cpus(cpuset_t set)
302*7c478bd9Sstevel@tonic-gate {
303*7c478bd9Sstevel@tonic-gate 	int cix;
304*7c478bd9Sstevel@tonic-gate 	int lcx;
305*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup;
306*7c478bd9Sstevel@tonic-gate 	int	i;
307*7c478bd9Sstevel@tonic-gate 	cpuset_t *cpus;
308*7c478bd9Sstevel@tonic-gate 	cpuset_t c;
309*7c478bd9Sstevel@tonic-gate 
310*7c478bd9Sstevel@tonic-gate 	CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
311*7c478bd9Sstevel@tonic-gate 
312*7c478bd9Sstevel@tonic-gate 	/*
313*7c478bd9Sstevel@tonic-gate 	 * Prevent deadlocks where we take an interrupt and are waiting
314*7c478bd9Sstevel@tonic-gate 	 * for a mutex owned by one of the CPUs that is captured for
315*7c478bd9Sstevel@tonic-gate 	 * the x-call, while that CPU is waiting for some x-call signal
316*7c478bd9Sstevel@tonic-gate 	 * to be set by us.
317*7c478bd9Sstevel@tonic-gate 	 *
318*7c478bd9Sstevel@tonic-gate 	 * This mutex also prevents preemption, since it raises SPL above
319*7c478bd9Sstevel@tonic-gate 	 * LOCK_LEVEL (it is a spin-type driver mutex).
320*7c478bd9Sstevel@tonic-gate 	 */
321*7c478bd9Sstevel@tonic-gate 	/* always grab highest mutex to avoid deadlock */
322*7c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_mbox_lock[X_CALL_HIPRI]);
323*7c478bd9Sstevel@tonic-gate 	lcx = CPU->cpu_id;	/* now we're safe */
324*7c478bd9Sstevel@tonic-gate 
325*7c478bd9Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
326*7c478bd9Sstevel@tonic-gate 
327*7c478bd9Sstevel@tonic-gate 	/*
328*7c478bd9Sstevel@tonic-gate 	 * Wait for all cpus
329*7c478bd9Sstevel@tonic-gate 	 */
330*7c478bd9Sstevel@tonic-gate 	cpus = (cpuset_t *)&xc_mboxes[X_CALL_MEDPRI].arg2;
331*7c478bd9Sstevel@tonic-gate 	if (CPU_IN_SET(*cpus, CPU->cpu_id))
332*7c478bd9Sstevel@tonic-gate 		CPUSET_ATOMIC_DEL(*cpus, CPU->cpu_id);
333*7c478bd9Sstevel@tonic-gate 	for (;;) {
334*7c478bd9Sstevel@tonic-gate 		c = *(volatile cpuset_t *)cpus;
335*7c478bd9Sstevel@tonic-gate 		CPUSET_AND(c, cpu_ready_set);
336*7c478bd9Sstevel@tonic-gate 		if (CPUSET_ISNULL(c))
337*7c478bd9Sstevel@tonic-gate 			break;
338*7c478bd9Sstevel@tonic-gate 		ht_pause();
339*7c478bd9Sstevel@tonic-gate 	}
340*7c478bd9Sstevel@tonic-gate 
341*7c478bd9Sstevel@tonic-gate 	/*
342*7c478bd9Sstevel@tonic-gate 	 * Store the set of CPUs involved in the x-call session, so that
343*7c478bd9Sstevel@tonic-gate 	 * xc_release_cpus will know what CPUs to act upon.
344*7c478bd9Sstevel@tonic-gate 	 */
345*7c478bd9Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].set = set;
346*7c478bd9Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = CAPTURE_CPU_ARG;
347*7c478bd9Sstevel@tonic-gate 
348*7c478bd9Sstevel@tonic-gate 	/*
349*7c478bd9Sstevel@tonic-gate 	 * Now capture each CPU in the set and cause it to go into a
350*7c478bd9Sstevel@tonic-gate 	 * holding pattern.
351*7c478bd9Sstevel@tonic-gate 	 */
352*7c478bd9Sstevel@tonic-gate 	i = 0;
353*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
354*7c478bd9Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
355*7c478bd9Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
356*7c478bd9Sstevel@tonic-gate 			/*
357*7c478bd9Sstevel@tonic-gate 			 * In case CPU wasn't ready, but becomes ready later,
358*7c478bd9Sstevel@tonic-gate 			 * take the CPU out of the set now.
359*7c478bd9Sstevel@tonic-gate 			 */
360*7c478bd9Sstevel@tonic-gate 			CPUSET_DEL(set, cix);
361*7c478bd9Sstevel@tonic-gate 			continue;
362*7c478bd9Sstevel@tonic-gate 		}
363*7c478bd9Sstevel@tonic-gate 		if (cix != lcx && CPU_IN_SET(set, cix)) {
364*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
365*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_HOLD;
366*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[X_CALL_MEDPRI] = 1;
367*7c478bd9Sstevel@tonic-gate 			send_dirint(cix, XC_MED_PIL);
368*7c478bd9Sstevel@tonic-gate 		}
369*7c478bd9Sstevel@tonic-gate 		i++;
370*7c478bd9Sstevel@tonic-gate 		if (i >= ncpus)
371*7c478bd9Sstevel@tonic-gate 			break;
372*7c478bd9Sstevel@tonic-gate 	}
373*7c478bd9Sstevel@tonic-gate 
374*7c478bd9Sstevel@tonic-gate 	/*
375*7c478bd9Sstevel@tonic-gate 	 * Wait here until all remote calls to complete.
376*7c478bd9Sstevel@tonic-gate 	 */
377*7c478bd9Sstevel@tonic-gate 	i = 0;
378*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
379*7c478bd9Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
380*7c478bd9Sstevel@tonic-gate 			cpup = cpu[cix];
381*7c478bd9Sstevel@tonic-gate 			while (cpup->cpu_m.xc_ack[X_CALL_MEDPRI] == 0) {
382*7c478bd9Sstevel@tonic-gate 				ht_pause();
383*7c478bd9Sstevel@tonic-gate 				return_instr();
384*7c478bd9Sstevel@tonic-gate 			}
385*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[X_CALL_MEDPRI] = 0;
386*7c478bd9Sstevel@tonic-gate 		}
387*7c478bd9Sstevel@tonic-gate 		i++;
388*7c478bd9Sstevel@tonic-gate 		if (i >= ncpus)
389*7c478bd9Sstevel@tonic-gate 			break;
390*7c478bd9Sstevel@tonic-gate 	}
391*7c478bd9Sstevel@tonic-gate 
392*7c478bd9Sstevel@tonic-gate }
393*7c478bd9Sstevel@tonic-gate 
394*7c478bd9Sstevel@tonic-gate /*
395*7c478bd9Sstevel@tonic-gate  * Release the CPUs captured by xc_capture_cpus, thus terminating the
396*7c478bd9Sstevel@tonic-gate  * x-call session and exiting the critical section.
397*7c478bd9Sstevel@tonic-gate  */
398*7c478bd9Sstevel@tonic-gate void
399*7c478bd9Sstevel@tonic-gate xc_release_cpus(void)
400*7c478bd9Sstevel@tonic-gate {
401*7c478bd9Sstevel@tonic-gate 	int cix;
402*7c478bd9Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
403*7c478bd9Sstevel@tonic-gate 	cpuset_t set = xc_mboxes[X_CALL_MEDPRI].set;
404*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup;
405*7c478bd9Sstevel@tonic-gate 	int	i;
406*7c478bd9Sstevel@tonic-gate 
407*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
408*7c478bd9Sstevel@tonic-gate 
409*7c478bd9Sstevel@tonic-gate 	/*
410*7c478bd9Sstevel@tonic-gate 	 * Allow each CPU to exit its holding pattern.
411*7c478bd9Sstevel@tonic-gate 	 */
412*7c478bd9Sstevel@tonic-gate 	i = 0;
413*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
414*7c478bd9Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL)
415*7c478bd9Sstevel@tonic-gate 			continue;
416*7c478bd9Sstevel@tonic-gate 		if ((cpup->cpu_flags & CPU_READY) &&
417*7c478bd9Sstevel@tonic-gate 		    (cix != lcx) && CPU_IN_SET(set, cix)) {
418*7c478bd9Sstevel@tonic-gate 			/*
419*7c478bd9Sstevel@tonic-gate 			 * Clear xc_ack since we will be waiting for it
420*7c478bd9Sstevel@tonic-gate 			 * to be set again after we set XC_DONE.
421*7c478bd9Sstevel@tonic-gate 			 */
422*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_state[X_CALL_MEDPRI] = XC_DONE;
423*7c478bd9Sstevel@tonic-gate 		}
424*7c478bd9Sstevel@tonic-gate 		i++;
425*7c478bd9Sstevel@tonic-gate 		if (i >= ncpus)
426*7c478bd9Sstevel@tonic-gate 			break;
427*7c478bd9Sstevel@tonic-gate 	}
428*7c478bd9Sstevel@tonic-gate 
429*7c478bd9Sstevel@tonic-gate 	xc_mboxes[X_CALL_MEDPRI].arg2 = 0;
430*7c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
431*7c478bd9Sstevel@tonic-gate }
432*7c478bd9Sstevel@tonic-gate 
433*7c478bd9Sstevel@tonic-gate /*
434*7c478bd9Sstevel@tonic-gate  * Common code to call a specified function on a set of processors.
435*7c478bd9Sstevel@tonic-gate  * sync specifies what kind of waiting is done.
436*7c478bd9Sstevel@tonic-gate  *	-1 - no waiting, don't release remotes
437*7c478bd9Sstevel@tonic-gate  *	0 - no waiting, release remotes immediately
438*7c478bd9Sstevel@tonic-gate  *	1 - run service locally w/o waiting for remotes.
439*7c478bd9Sstevel@tonic-gate  *	2 - wait for remotes before running locally
440*7c478bd9Sstevel@tonic-gate  */
441*7c478bd9Sstevel@tonic-gate static void
442*7c478bd9Sstevel@tonic-gate xc_common(
443*7c478bd9Sstevel@tonic-gate 	xc_func_t func,
444*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
445*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
446*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
447*7c478bd9Sstevel@tonic-gate 	int pri,
448*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
449*7c478bd9Sstevel@tonic-gate 	int sync)
450*7c478bd9Sstevel@tonic-gate {
451*7c478bd9Sstevel@tonic-gate 	int cix;
452*7c478bd9Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
453*7c478bd9Sstevel@tonic-gate 	struct cpu *cpup;
454*7c478bd9Sstevel@tonic-gate 
455*7c478bd9Sstevel@tonic-gate 	ASSERT(panicstr == NULL);
456*7c478bd9Sstevel@tonic-gate 
457*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&xc_mbox_lock[X_CALL_HIPRI]));
458*7c478bd9Sstevel@tonic-gate 	ASSERT(CPU->cpu_flags & CPU_READY);
459*7c478bd9Sstevel@tonic-gate 
460*7c478bd9Sstevel@tonic-gate 	/*
461*7c478bd9Sstevel@tonic-gate 	 * Set up the service definition mailbox.
462*7c478bd9Sstevel@tonic-gate 	 */
463*7c478bd9Sstevel@tonic-gate 	xc_mboxes[pri].func = func;
464*7c478bd9Sstevel@tonic-gate 	xc_mboxes[pri].arg1 = arg1;
465*7c478bd9Sstevel@tonic-gate 	xc_mboxes[pri].arg2 = arg2;
466*7c478bd9Sstevel@tonic-gate 	xc_mboxes[pri].arg3 = arg3;
467*7c478bd9Sstevel@tonic-gate 
468*7c478bd9Sstevel@tonic-gate 	/*
469*7c478bd9Sstevel@tonic-gate 	 * Request service on all remote processors.
470*7c478bd9Sstevel@tonic-gate 	 */
471*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
472*7c478bd9Sstevel@tonic-gate 		if ((cpup = cpu[cix]) == NULL ||
473*7c478bd9Sstevel@tonic-gate 		    (cpup->cpu_flags & CPU_READY) == 0) {
474*7c478bd9Sstevel@tonic-gate 			/*
475*7c478bd9Sstevel@tonic-gate 			 * In case CPU wasn't ready, but becomes ready later,
476*7c478bd9Sstevel@tonic-gate 			 * take the CPU out of the set now.
477*7c478bd9Sstevel@tonic-gate 			 */
478*7c478bd9Sstevel@tonic-gate 			CPUSET_DEL(set, cix);
479*7c478bd9Sstevel@tonic-gate 		} else if (cix != lcx && CPU_IN_SET(set, cix)) {
480*7c478bd9Sstevel@tonic-gate 			CPU_STATS_ADDQ(CPU, sys, xcalls, 1);
481*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
482*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_wait[pri] = sync;
483*7c478bd9Sstevel@tonic-gate 			if (sync > 0)
484*7c478bd9Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_SYNC_OP;
485*7c478bd9Sstevel@tonic-gate 			else
486*7c478bd9Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_CALL_OP;
487*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_pend[pri] = 1;
488*7c478bd9Sstevel@tonic-gate 			send_dirint(cix, xc_xlat_xcptoipl[pri]);
489*7c478bd9Sstevel@tonic-gate 		}
490*7c478bd9Sstevel@tonic-gate 	}
491*7c478bd9Sstevel@tonic-gate 
492*7c478bd9Sstevel@tonic-gate 	/*
493*7c478bd9Sstevel@tonic-gate 	 * Run service locally if not waiting for remotes.
494*7c478bd9Sstevel@tonic-gate 	 */
495*7c478bd9Sstevel@tonic-gate 	if (sync != 2 && CPU_IN_SET(set, lcx) && func != NULL)
496*7c478bd9Sstevel@tonic-gate 		CPU->cpu_m.xc_retval[pri] = (*func)(arg1, arg2, arg3);
497*7c478bd9Sstevel@tonic-gate 
498*7c478bd9Sstevel@tonic-gate 	if (sync == -1)
499*7c478bd9Sstevel@tonic-gate 		return;
500*7c478bd9Sstevel@tonic-gate 
501*7c478bd9Sstevel@tonic-gate 	/*
502*7c478bd9Sstevel@tonic-gate 	 * Wait here until all remote calls complete.
503*7c478bd9Sstevel@tonic-gate 	 */
504*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
505*7c478bd9Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
506*7c478bd9Sstevel@tonic-gate 			cpup = cpu[cix];
507*7c478bd9Sstevel@tonic-gate 			while (cpup->cpu_m.xc_ack[pri] == 0) {
508*7c478bd9Sstevel@tonic-gate 				ht_pause();
509*7c478bd9Sstevel@tonic-gate 				return_instr();
510*7c478bd9Sstevel@tonic-gate 			}
511*7c478bd9Sstevel@tonic-gate 			cpup->cpu_m.xc_ack[pri] = 0;
512*7c478bd9Sstevel@tonic-gate 		}
513*7c478bd9Sstevel@tonic-gate 	}
514*7c478bd9Sstevel@tonic-gate 
515*7c478bd9Sstevel@tonic-gate 	/*
516*7c478bd9Sstevel@tonic-gate 	 * Run service locally if waiting for remotes.
517*7c478bd9Sstevel@tonic-gate 	 */
518*7c478bd9Sstevel@tonic-gate 	if (sync == 2 && CPU_IN_SET(set, lcx) && func != NULL)
519*7c478bd9Sstevel@tonic-gate 		CPU->cpu_m.xc_retval[pri] = (*func)(arg1, arg2, arg3);
520*7c478bd9Sstevel@tonic-gate 
521*7c478bd9Sstevel@tonic-gate 	if (sync == 0)
522*7c478bd9Sstevel@tonic-gate 		return;
523*7c478bd9Sstevel@tonic-gate 
524*7c478bd9Sstevel@tonic-gate 	/*
525*7c478bd9Sstevel@tonic-gate 	 * Release any waiting CPUs
526*7c478bd9Sstevel@tonic-gate 	 */
527*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
528*7c478bd9Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
529*7c478bd9Sstevel@tonic-gate 			cpup = cpu[cix];
530*7c478bd9Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
531*7c478bd9Sstevel@tonic-gate 				cpup->cpu_m.xc_wait[pri] = 0;
532*7c478bd9Sstevel@tonic-gate 				cpup->cpu_m.xc_state[pri] = XC_DONE;
533*7c478bd9Sstevel@tonic-gate 			}
534*7c478bd9Sstevel@tonic-gate 		}
535*7c478bd9Sstevel@tonic-gate 	}
536*7c478bd9Sstevel@tonic-gate 
537*7c478bd9Sstevel@tonic-gate 	/*
538*7c478bd9Sstevel@tonic-gate 	 * Wait for all CPUs to acknowledge completion before we continue.
539*7c478bd9Sstevel@tonic-gate 	 * Without this check it's possible (on a VM or hyper-threaded CPUs
540*7c478bd9Sstevel@tonic-gate 	 * or in the presence of Service Management Interrupts which can all
541*7c478bd9Sstevel@tonic-gate 	 * cause delays) for the remote processor to still be waiting by
542*7c478bd9Sstevel@tonic-gate 	 * the time xc_common() is next invoked with the sync flag set
543*7c478bd9Sstevel@tonic-gate 	 * resulting in a deadlock.
544*7c478bd9Sstevel@tonic-gate 	 */
545*7c478bd9Sstevel@tonic-gate 	for (cix = 0; cix < NCPU; cix++) {
546*7c478bd9Sstevel@tonic-gate 		if (lcx != cix && CPU_IN_SET(set, cix)) {
547*7c478bd9Sstevel@tonic-gate 			cpup = cpu[cix];
548*7c478bd9Sstevel@tonic-gate 			if (cpup != NULL && (cpup->cpu_flags & CPU_READY)) {
549*7c478bd9Sstevel@tonic-gate 				while (cpup->cpu_m.xc_ack[pri] == 0) {
550*7c478bd9Sstevel@tonic-gate 					ht_pause();
551*7c478bd9Sstevel@tonic-gate 					return_instr();
552*7c478bd9Sstevel@tonic-gate 				}
553*7c478bd9Sstevel@tonic-gate 				cpup->cpu_m.xc_ack[pri] = 0;
554*7c478bd9Sstevel@tonic-gate 			}
555*7c478bd9Sstevel@tonic-gate 		}
556*7c478bd9Sstevel@tonic-gate 	}
557*7c478bd9Sstevel@tonic-gate }
558*7c478bd9Sstevel@tonic-gate 
559*7c478bd9Sstevel@tonic-gate /*
560*7c478bd9Sstevel@tonic-gate  * xc_trycall: attempt to call specified function on all processors
561*7c478bd9Sstevel@tonic-gate  * remotes may wait for a long time
562*7c478bd9Sstevel@tonic-gate  * we continue immediately
563*7c478bd9Sstevel@tonic-gate  */
564*7c478bd9Sstevel@tonic-gate void
565*7c478bd9Sstevel@tonic-gate xc_trycall(
566*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg1,
567*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg2,
568*7c478bd9Sstevel@tonic-gate 	xc_arg_t arg3,
569*7c478bd9Sstevel@tonic-gate 	cpuset_t set,
570*7c478bd9Sstevel@tonic-gate 	xc_func_t func)
571*7c478bd9Sstevel@tonic-gate {
572*7c478bd9Sstevel@tonic-gate 	int		save_kernel_preemption;
573*7c478bd9Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
574*7c478bd9Sstevel@tonic-gate 
575*7c478bd9Sstevel@tonic-gate 	/*
576*7c478bd9Sstevel@tonic-gate 	 * If we can grab the mutex, we'll do the cross-call.  If not -- if
577*7c478bd9Sstevel@tonic-gate 	 * someone else is already doing a cross-call -- we won't.
578*7c478bd9Sstevel@tonic-gate 	 */
579*7c478bd9Sstevel@tonic-gate 
580*7c478bd9Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
581*7c478bd9Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
582*7c478bd9Sstevel@tonic-gate 	if (mutex_tryenter(&xc_mbox_lock[X_CALL_HIPRI])) {
583*7c478bd9Sstevel@tonic-gate 		xc_common(func, arg1, arg2, arg3, X_CALL_HIPRI, set, -1);
584*7c478bd9Sstevel@tonic-gate 		mutex_exit(&xc_mbox_lock[X_CALL_HIPRI]);
585*7c478bd9Sstevel@tonic-gate 	}
586*7c478bd9Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
587*7c478bd9Sstevel@tonic-gate }
588*7c478bd9Sstevel@tonic-gate 
589*7c478bd9Sstevel@tonic-gate /*
590*7c478bd9Sstevel@tonic-gate  * Used by the debugger to cross-call the other CPUs, thus causing them to
591*7c478bd9Sstevel@tonic-gate  * enter the debugger.  We can't hold locks, so we spin on the cross-call
592*7c478bd9Sstevel@tonic-gate  * lock until we get it.  When we get it, we send the cross-call, and assume
593*7c478bd9Sstevel@tonic-gate  * that we successfully stopped the other CPUs.
594*7c478bd9Sstevel@tonic-gate  */
595*7c478bd9Sstevel@tonic-gate void
596*7c478bd9Sstevel@tonic-gate kdi_xc_others(int this_cpu, void (*func)(void))
597*7c478bd9Sstevel@tonic-gate {
598*7c478bd9Sstevel@tonic-gate 	extern int	IGNORE_KERNEL_PREEMPTION;
599*7c478bd9Sstevel@tonic-gate 	int save_kernel_preemption;
600*7c478bd9Sstevel@tonic-gate 	mutex_impl_t *lp;
601*7c478bd9Sstevel@tonic-gate 	cpuset_t set;
602*7c478bd9Sstevel@tonic-gate 	int x;
603*7c478bd9Sstevel@tonic-gate 
604*7c478bd9Sstevel@tonic-gate 	CPUSET_ALL_BUT(set, this_cpu);
605*7c478bd9Sstevel@tonic-gate 
606*7c478bd9Sstevel@tonic-gate 	save_kernel_preemption = IGNORE_KERNEL_PREEMPTION;
607*7c478bd9Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = 1;
608*7c478bd9Sstevel@tonic-gate 
609*7c478bd9Sstevel@tonic-gate 	lp = (mutex_impl_t *)&xc_mbox_lock[X_CALL_HIPRI];
610*7c478bd9Sstevel@tonic-gate 	for (x = 0; x < 0x400000; x++) {
611*7c478bd9Sstevel@tonic-gate 		if (lock_spin_try(&lp->m_spin.m_spinlock)) {
612*7c478bd9Sstevel@tonic-gate 			xc_common((xc_func_t)func, 0, 0, 0, X_CALL_HIPRI,
613*7c478bd9Sstevel@tonic-gate 			    set, -1);
614*7c478bd9Sstevel@tonic-gate 			lp->m_spin.m_spinlock = 0; /* XXX */
615*7c478bd9Sstevel@tonic-gate 			break;
616*7c478bd9Sstevel@tonic-gate 		}
617*7c478bd9Sstevel@tonic-gate 		(void) xc_serv((caddr_t)X_CALL_MEDPRI, NULL);
618*7c478bd9Sstevel@tonic-gate 	}
619*7c478bd9Sstevel@tonic-gate 	IGNORE_KERNEL_PREEMPTION = save_kernel_preemption;
620*7c478bd9Sstevel@tonic-gate }
621