xref: /illumos-gate/usr/src/uts/sun4/os/x_call.c (revision 12551037)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5b0fc0e77Sgovinda  * Common Development and Distribution License (the "License").
6b0fc0e77Sgovinda  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22b885580bSAlexander Kolbasov  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <sys/systm.h>
277c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
287c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
297c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
307c478bd9Sstevel@tonic-gate #include <sys/intreg.h>
317c478bd9Sstevel@tonic-gate #include <sys/x_call.h>
327c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
337c478bd9Sstevel@tonic-gate #include <sys/membar.h>
347c478bd9Sstevel@tonic-gate #include <sys/disp.h>
357c478bd9Sstevel@tonic-gate #include <sys/debug.h>
367c478bd9Sstevel@tonic-gate #include <sys/privregs.h>
377c478bd9Sstevel@tonic-gate #include <sys/xc_impl.h>
387c478bd9Sstevel@tonic-gate #include <sys/ivintr.h>
397c478bd9Sstevel@tonic-gate #include <sys/dmv.h>
40370b8e80Sjkennedy #include <sys/sysmacros.h>
417c478bd9Sstevel@tonic-gate 
428b9d661eSrjnoe #ifdef TRAPTRACE
437c478bd9Sstevel@tonic-gate uint_t x_dstat[NCPU][XC_LOOP_EXIT+1];
447c478bd9Sstevel@tonic-gate uint_t x_rstat[NCPU][4];
458b9d661eSrjnoe #endif /* TRAPTRACE */
467c478bd9Sstevel@tonic-gate 
47b0fc0e77Sgovinda static uint64_t xc_serv_inum;	/* software interrupt number for xc_serv() */
48b0fc0e77Sgovinda static uint64_t xc_loop_inum;	/* software interrupt number for xc_loop() */
497c478bd9Sstevel@tonic-gate kmutex_t xc_sys_mutex;		/* protect xcall session and xc_mbox */
507c478bd9Sstevel@tonic-gate int xc_spl_enter[NCPU];		/* protect sending x-call */
517c478bd9Sstevel@tonic-gate static int xc_holder = -1; /* the cpu who initiates xc_attention, 0 is valid */
527c478bd9Sstevel@tonic-gate 
537c478bd9Sstevel@tonic-gate /*
547c478bd9Sstevel@tonic-gate  * Mail box for handshaking and xcall request; protected by xc_sys_mutex
557c478bd9Sstevel@tonic-gate  */
567c478bd9Sstevel@tonic-gate static struct xc_mbox {
577c478bd9Sstevel@tonic-gate 	xcfunc_t *xc_func;
587c478bd9Sstevel@tonic-gate 	uint64_t xc_arg1;
597c478bd9Sstevel@tonic-gate 	uint64_t xc_arg2;
607c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset;
617c478bd9Sstevel@tonic-gate 	volatile uint_t	xc_state;
627c478bd9Sstevel@tonic-gate } xc_mbox[NCPU];
637c478bd9Sstevel@tonic-gate 
647c478bd9Sstevel@tonic-gate uint64_t xc_tick_limit;		/* send_mondo() tick limit value */
657c478bd9Sstevel@tonic-gate uint64_t xc_tick_limit_scale = 1;	/* scale used to increase the limit */
667c478bd9Sstevel@tonic-gate uint64_t xc_tick_jump_limit;	/* send_mondo() irregular tick jump limit */
67374ae87fSsvemuri uint64_t xc_sync_tick_limit;	/* timeout limit for xt_sync() calls */
687c478bd9Sstevel@tonic-gate 
697c478bd9Sstevel@tonic-gate /* timeout value for xcalls to be received by the target CPU */
707c478bd9Sstevel@tonic-gate uint64_t xc_mondo_time_limit;
717c478bd9Sstevel@tonic-gate 
727c478bd9Sstevel@tonic-gate /* timeout value for xcall functions to be executed on the target CPU */
737c478bd9Sstevel@tonic-gate uint64_t xc_func_time_limit;
747c478bd9Sstevel@tonic-gate 
757c478bd9Sstevel@tonic-gate uint64_t xc_scale = 1;	/* scale used to calculate timeout limits */
76370b8e80Sjkennedy uint64_t xc_mondo_multiplier = 10;
777c478bd9Sstevel@tonic-gate 
787c478bd9Sstevel@tonic-gate uint_t sendmondo_in_recover;
797c478bd9Sstevel@tonic-gate 
807c478bd9Sstevel@tonic-gate /*
817c478bd9Sstevel@tonic-gate  * sending x-calls
827c478bd9Sstevel@tonic-gate  */
837c478bd9Sstevel@tonic-gate void	init_mondo(xcfunc_t *func, uint64_t arg1, uint64_t arg2);
847c478bd9Sstevel@tonic-gate void	send_one_mondo(int cpuid);
857c478bd9Sstevel@tonic-gate void	send_mondo_set(cpuset_t set);
867c478bd9Sstevel@tonic-gate 
87370b8e80Sjkennedy /*
88370b8e80Sjkennedy  * Adjust xc_attention timeout if a faster cpu is dynamically added.
89370b8e80Sjkennedy  * Ignore the dynamic removal of a cpu that would lower these timeout
90370b8e80Sjkennedy  * values.
91370b8e80Sjkennedy  */
92370b8e80Sjkennedy static int
xc_func_timeout_adj(cpu_setup_t what,int cpuid)93*12551037SToomas Soome xc_func_timeout_adj(cpu_setup_t what, int cpuid)
94*12551037SToomas Soome {
95370b8e80Sjkennedy 	uint64_t freq = cpunodes[cpuid].clock_freq;
96370b8e80Sjkennedy 
97370b8e80Sjkennedy 	switch (what) {
98370b8e80Sjkennedy 	case CPU_ON:
99370b8e80Sjkennedy 	case CPU_INIT:
100370b8e80Sjkennedy 	case CPU_CONFIG:
101370b8e80Sjkennedy 	case CPU_CPUPART_IN:
102370b8e80Sjkennedy 		if (freq * xc_scale > xc_mondo_time_limit) {
103370b8e80Sjkennedy 			xc_mondo_time_limit = freq * xc_scale;
104370b8e80Sjkennedy 			xc_func_time_limit = xc_mondo_time_limit *
105370b8e80Sjkennedy 			    xc_mondo_multiplier;
106370b8e80Sjkennedy 		}
107370b8e80Sjkennedy 		break;
108370b8e80Sjkennedy 	case CPU_OFF:
109370b8e80Sjkennedy 	case CPU_UNCONFIG:
110370b8e80Sjkennedy 	case CPU_CPUPART_OUT:
111370b8e80Sjkennedy 	default:
112370b8e80Sjkennedy 		break;
113370b8e80Sjkennedy 	}
114370b8e80Sjkennedy 
115370b8e80Sjkennedy 	return (0);
116370b8e80Sjkennedy }
117370b8e80Sjkennedy 
1187c478bd9Sstevel@tonic-gate /*
1197c478bd9Sstevel@tonic-gate  * xc_init - initialize x-call related locks
1207c478bd9Sstevel@tonic-gate  */
1217c478bd9Sstevel@tonic-gate void
xc_init(void)1227c478bd9Sstevel@tonic-gate xc_init(void)
1237c478bd9Sstevel@tonic-gate {
1247c478bd9Sstevel@tonic-gate 	int pix;
125370b8e80Sjkennedy 	uint64_t maxfreq = 0;
1267c478bd9Sstevel@tonic-gate 
1277c478bd9Sstevel@tonic-gate 	mutex_init(&xc_sys_mutex, NULL, MUTEX_SPIN,
1287c478bd9Sstevel@tonic-gate 	    (void *)ipltospl(XCALL_PIL));
1297c478bd9Sstevel@tonic-gate 
1308b9d661eSrjnoe #ifdef TRAPTRACE
1317c478bd9Sstevel@tonic-gate 	/* Initialize for all possible CPUs. */
1327c478bd9Sstevel@tonic-gate 	for (pix = 0; pix < NCPU; pix++) {
1337c478bd9Sstevel@tonic-gate 		XC_STAT_INIT(pix);
1347c478bd9Sstevel@tonic-gate 	}
1358b9d661eSrjnoe #endif /* TRAPTRACE */
1367c478bd9Sstevel@tonic-gate 
137b0fc0e77Sgovinda 	xc_serv_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_serv, 0,
138b0fc0e77Sgovinda 	    SOFTINT_MT);
139b0fc0e77Sgovinda 	xc_loop_inum = add_softintr(XCALL_PIL, (softintrfunc)xc_loop, 0,
140b0fc0e77Sgovinda 	    SOFTINT_MT);
1417c478bd9Sstevel@tonic-gate 
1427c478bd9Sstevel@tonic-gate 	/*
1437c478bd9Sstevel@tonic-gate 	 * Initialize the calibrated tick limit for send_mondo.
1447c478bd9Sstevel@tonic-gate 	 * The value represents the maximum tick count to wait.
1457c478bd9Sstevel@tonic-gate 	 */
1467c478bd9Sstevel@tonic-gate 	xc_tick_limit =
1477c478bd9Sstevel@tonic-gate 	    ((uint64_t)sys_tick_freq * XC_SEND_MONDO_MSEC) / 1000;
1487c478bd9Sstevel@tonic-gate 	xc_tick_jump_limit = xc_tick_limit / 32;
1497c478bd9Sstevel@tonic-gate 	xc_tick_limit *= xc_tick_limit_scale;
150374ae87fSsvemuri 	xc_sync_tick_limit = xc_tick_limit;
1517c478bd9Sstevel@tonic-gate 
1527c478bd9Sstevel@tonic-gate 	/*
1537c478bd9Sstevel@tonic-gate 	 * Maximum number of loops to wait before timing out in xc_attention.
1547c478bd9Sstevel@tonic-gate 	 */
155370b8e80Sjkennedy 	for (pix = 0; pix < NCPU; pix++) {
156370b8e80Sjkennedy 		maxfreq = MAX(cpunodes[pix].clock_freq, maxfreq);
157370b8e80Sjkennedy 	}
158370b8e80Sjkennedy 	xc_mondo_time_limit = maxfreq * xc_scale;
159370b8e80Sjkennedy 	register_cpu_setup_func((cpu_setup_func_t *)xc_func_timeout_adj, NULL);
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate 	/*
1627c478bd9Sstevel@tonic-gate 	 * Maximum number of loops to wait for a xcall function to be
163370b8e80Sjkennedy 	 * executed on the target CPU.
1647c478bd9Sstevel@tonic-gate 	 */
165370b8e80Sjkennedy 	xc_func_time_limit = xc_mondo_time_limit * xc_mondo_multiplier;
1667c478bd9Sstevel@tonic-gate }
1677c478bd9Sstevel@tonic-gate 
1687c478bd9Sstevel@tonic-gate /*
1697c478bd9Sstevel@tonic-gate  * The following routines basically provide callers with two kinds of
1707c478bd9Sstevel@tonic-gate  * inter-processor interrupt services:
1717c478bd9Sstevel@tonic-gate  *	1. cross calls (x-calls) - requests are handled at target cpu's TL=0
1727c478bd9Sstevel@tonic-gate  *	2. cross traps (c-traps) - requests are handled at target cpu's TL>0
1737c478bd9Sstevel@tonic-gate  *
1747c478bd9Sstevel@tonic-gate  * Although these routines protect the services from migrating to other cpus
1757c478bd9Sstevel@tonic-gate  * "after" they are called, it is the caller's choice or responsibility to
1767c478bd9Sstevel@tonic-gate  * prevent the cpu migration "before" calling them.
1777c478bd9Sstevel@tonic-gate  *
1787c478bd9Sstevel@tonic-gate  * X-call routines:
1797c478bd9Sstevel@tonic-gate  *
1807c478bd9Sstevel@tonic-gate  *	xc_one()  - send a request to one processor
1817c478bd9Sstevel@tonic-gate  *	xc_some() - send a request to some processors
1827c478bd9Sstevel@tonic-gate  *	xc_all()  - send a request to all processors
1837c478bd9Sstevel@tonic-gate  *
1847c478bd9Sstevel@tonic-gate  *	Their common parameters:
1857c478bd9Sstevel@tonic-gate  *		func - a TL=0 handler address
1867c478bd9Sstevel@tonic-gate  *		arg1 and arg2  - optional
1877c478bd9Sstevel@tonic-gate  *
1887c478bd9Sstevel@tonic-gate  *	The services provided by x-call routines allow callers
1897c478bd9Sstevel@tonic-gate  *	to send a request to target cpus to execute a TL=0
1907c478bd9Sstevel@tonic-gate  *	handler.
1917c478bd9Sstevel@tonic-gate  *	The interface of the registers of the TL=0 handler:
1927c478bd9Sstevel@tonic-gate  *		%o0: arg1
1937c478bd9Sstevel@tonic-gate  *		%o1: arg2
1947c478bd9Sstevel@tonic-gate  *
1957c478bd9Sstevel@tonic-gate  * X-trap routines:
1967c478bd9Sstevel@tonic-gate  *
1977c478bd9Sstevel@tonic-gate  *	xt_one()  - send a request to one processor
1987c478bd9Sstevel@tonic-gate  *	xt_some() - send a request to some processors
1997c478bd9Sstevel@tonic-gate  *	xt_all()  - send a request to all processors
2007c478bd9Sstevel@tonic-gate  *
2017c478bd9Sstevel@tonic-gate  *	Their common parameters:
2027c478bd9Sstevel@tonic-gate  *		func - a TL>0 handler address or an interrupt number
2037c478bd9Sstevel@tonic-gate  *		arg1, arg2
2047c478bd9Sstevel@tonic-gate  *		       optional when "func" is an address;
2057c478bd9Sstevel@tonic-gate  *		       0        when "func" is an interrupt number
2067c478bd9Sstevel@tonic-gate  *
2077c478bd9Sstevel@tonic-gate  *	If the request of "func" is a kernel address, then
2087c478bd9Sstevel@tonic-gate  *	the target cpu will execute the request of "func" with
2097c478bd9Sstevel@tonic-gate  *	args at "TL>0" level.
2107c478bd9Sstevel@tonic-gate  *	The interface of the registers of the TL>0 handler:
2117c478bd9Sstevel@tonic-gate  *		%g1: arg1
2127c478bd9Sstevel@tonic-gate  *		%g2: arg2
2137c478bd9Sstevel@tonic-gate  *
2147c478bd9Sstevel@tonic-gate  *	If the request of "func" is not a kernel address, then it has
2157c478bd9Sstevel@tonic-gate  *	to be an assigned interrupt number through add_softintr().
2167c478bd9Sstevel@tonic-gate  *	An interrupt number is an index to the interrupt vector table,
2177c478bd9Sstevel@tonic-gate  *	which entry contains an interrupt handler address with its
2187c478bd9Sstevel@tonic-gate  *	corresponding interrupt level and argument.
2197c478bd9Sstevel@tonic-gate  *	The target cpu will arrange the request to be serviced according
2207c478bd9Sstevel@tonic-gate  *	to its pre-registered information.
2217c478bd9Sstevel@tonic-gate  *	args are assumed to be zeros in this case.
2227c478bd9Sstevel@tonic-gate  *
2237c478bd9Sstevel@tonic-gate  * In addition, callers are allowed to capture and release cpus by
2247c478bd9Sstevel@tonic-gate  * calling the routines: xc_attention() and xc_dismissed().
2257c478bd9Sstevel@tonic-gate  */
2267c478bd9Sstevel@tonic-gate 
227b885580bSAlexander Kolbasov /*
228b885580bSAlexander Kolbasov  * spl_xcall - set PIL to xcall level
229b885580bSAlexander Kolbasov  */
230b885580bSAlexander Kolbasov int
spl_xcall(void)231b885580bSAlexander Kolbasov spl_xcall(void)
232b885580bSAlexander Kolbasov {
233b885580bSAlexander Kolbasov 	return (splr(XCALL_PIL));
234b885580bSAlexander Kolbasov }
235b885580bSAlexander Kolbasov 
2367c478bd9Sstevel@tonic-gate /*
2377c478bd9Sstevel@tonic-gate  * xt_one - send a "x-trap" to a cpu
2387c478bd9Sstevel@tonic-gate  */
2397c478bd9Sstevel@tonic-gate void
xt_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)2407c478bd9Sstevel@tonic-gate xt_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
2417c478bd9Sstevel@tonic-gate {
2427c478bd9Sstevel@tonic-gate 	if (!CPU_IN_SET(cpu_ready_set, cix)) {
2437c478bd9Sstevel@tonic-gate 		return;
2447c478bd9Sstevel@tonic-gate 	}
2457c478bd9Sstevel@tonic-gate 	xt_one_unchecked(cix, func, arg1, arg2);
2467c478bd9Sstevel@tonic-gate }
2477c478bd9Sstevel@tonic-gate 
2487c478bd9Sstevel@tonic-gate /*
2497c478bd9Sstevel@tonic-gate  * xt_one_unchecked - send a "x-trap" to a cpu without checking for its
2507c478bd9Sstevel@tonic-gate  * existance in cpu_ready_set
2517c478bd9Sstevel@tonic-gate  */
2527c478bd9Sstevel@tonic-gate void
xt_one_unchecked(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)2537c478bd9Sstevel@tonic-gate xt_one_unchecked(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
2547c478bd9Sstevel@tonic-gate {
2557c478bd9Sstevel@tonic-gate 	int lcx;
2567c478bd9Sstevel@tonic-gate 	int opl;
2577c478bd9Sstevel@tonic-gate 	cpuset_t tset;
2587c478bd9Sstevel@tonic-gate 
2597c478bd9Sstevel@tonic-gate 	/*
2607c478bd9Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
2617c478bd9Sstevel@tonic-gate 	 * dmv interrupt
2627c478bd9Sstevel@tonic-gate 	 */
2637c478bd9Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
2647c478bd9Sstevel@tonic-gate 
2657c478bd9Sstevel@tonic-gate 	/*
2667c478bd9Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
2677c478bd9Sstevel@tonic-gate 	 * interface.
2687c478bd9Sstevel@tonic-gate 	 */
2697c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
2707c478bd9Sstevel@tonic-gate 
2717c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
2727c478bd9Sstevel@tonic-gate 
2737c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
2747c478bd9Sstevel@tonic-gate 
2757c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, cix);
2767c478bd9Sstevel@tonic-gate 
2777c478bd9Sstevel@tonic-gate 	if (cix == lcx) {
2787c478bd9Sstevel@tonic-gate 		/*
2797c478bd9Sstevel@tonic-gate 		 * same cpu - use software fast trap
2807c478bd9Sstevel@tonic-gate 		 */
2817c478bd9Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
2827c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ONE_SELF]);
2837c478bd9Sstevel@tonic-gate 		XC_TRACE(XT_ONE_SELF, &tset, func, arg1, arg2);
2847c478bd9Sstevel@tonic-gate 	} else {	/* other cpu - send a mondo to the target cpu */
2857c478bd9Sstevel@tonic-gate 		/*
2867c478bd9Sstevel@tonic-gate 		 * other cpu - send a mondo to the target cpu
2877c478bd9Sstevel@tonic-gate 		 */
2887c478bd9Sstevel@tonic-gate 		XC_TRACE(XT_ONE_OTHER, &tset, func, arg1, arg2);
2897c478bd9Sstevel@tonic-gate 		init_mondo(func, arg1, arg2);
2907c478bd9Sstevel@tonic-gate 		send_one_mondo(cix);
2917c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ONE_OTHER]);
2927c478bd9Sstevel@tonic-gate 	}
2937c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
2947c478bd9Sstevel@tonic-gate }
2957c478bd9Sstevel@tonic-gate 
2967c478bd9Sstevel@tonic-gate /*
2977c478bd9Sstevel@tonic-gate  * xt_some - send a "x-trap" to some cpus
2987c478bd9Sstevel@tonic-gate  */
2997c478bd9Sstevel@tonic-gate void
xt_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)3007c478bd9Sstevel@tonic-gate xt_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
3017c478bd9Sstevel@tonic-gate {
3027c478bd9Sstevel@tonic-gate 	int lcx;
3037c478bd9Sstevel@tonic-gate 	int opl;
3047c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
3057c478bd9Sstevel@tonic-gate 
3067c478bd9Sstevel@tonic-gate 	/*
3077c478bd9Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
3087c478bd9Sstevel@tonic-gate 	 * dmv interrupt
3097c478bd9Sstevel@tonic-gate 	 */
3107c478bd9Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
3117c478bd9Sstevel@tonic-gate 
3127c478bd9Sstevel@tonic-gate 	/*
3137c478bd9Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
3147c478bd9Sstevel@tonic-gate 	 * interface.
3157c478bd9Sstevel@tonic-gate 	 */
3167c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
3177c478bd9Sstevel@tonic-gate 
3187c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
3197c478bd9Sstevel@tonic-gate 
3207c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
3217c478bd9Sstevel@tonic-gate 
3227c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
3237c478bd9Sstevel@tonic-gate 
3247c478bd9Sstevel@tonic-gate 	/*
3257c478bd9Sstevel@tonic-gate 	 * only send to the CPU_READY ones
3267c478bd9Sstevel@tonic-gate 	 */
3277c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
3287c478bd9Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
3297c478bd9Sstevel@tonic-gate 
3307c478bd9Sstevel@tonic-gate 	/*
3317c478bd9Sstevel@tonic-gate 	 * send to nobody; just return
3327c478bd9Sstevel@tonic-gate 	 */
3337c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
3347c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
3357c478bd9Sstevel@tonic-gate 		return;
3367c478bd9Sstevel@tonic-gate 	}
3377c478bd9Sstevel@tonic-gate 
3387c478bd9Sstevel@tonic-gate 	/*
3397c478bd9Sstevel@tonic-gate 	 * don't send mondo to self
3407c478bd9Sstevel@tonic-gate 	 */
3417c478bd9Sstevel@tonic-gate 	if (CPU_IN_SET(xc_cpuset, lcx)) {
3427c478bd9Sstevel@tonic-gate 		/*
3437c478bd9Sstevel@tonic-gate 		 * same cpu - use software fast trap
3447c478bd9Sstevel@tonic-gate 		 */
3457c478bd9Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
3467c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_SOME_SELF]);
3477c478bd9Sstevel@tonic-gate 		XC_TRACE(XT_SOME_SELF, &tset, func, arg1, arg2);
3487c478bd9Sstevel@tonic-gate 		CPUSET_DEL(xc_cpuset, lcx);
3497c478bd9Sstevel@tonic-gate 		if (CPUSET_ISNULL(xc_cpuset)) {
3507c478bd9Sstevel@tonic-gate 			XC_SPL_EXIT(lcx, opl);
3517c478bd9Sstevel@tonic-gate 			return;
3527c478bd9Sstevel@tonic-gate 		}
3537c478bd9Sstevel@tonic-gate 	}
3547c478bd9Sstevel@tonic-gate 	XC_TRACE(XT_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
3557c478bd9Sstevel@tonic-gate 	init_mondo(func, arg1, arg2);
3567c478bd9Sstevel@tonic-gate 	send_mondo_set(xc_cpuset);
3577c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XT_SOME_OTHER]);
3587c478bd9Sstevel@tonic-gate 
3597c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
3607c478bd9Sstevel@tonic-gate }
3617c478bd9Sstevel@tonic-gate 
3627c478bd9Sstevel@tonic-gate /*
3637c478bd9Sstevel@tonic-gate  * xt_all - send a "x-trap" to all cpus
3647c478bd9Sstevel@tonic-gate  */
3657c478bd9Sstevel@tonic-gate void
xt_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)3667c478bd9Sstevel@tonic-gate xt_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
3677c478bd9Sstevel@tonic-gate {
3687c478bd9Sstevel@tonic-gate 	int lcx;
3697c478bd9Sstevel@tonic-gate 	int opl;
3707c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
3717c478bd9Sstevel@tonic-gate 
3727c478bd9Sstevel@tonic-gate 	/*
3737c478bd9Sstevel@tonic-gate 	 * Make sure the function address will not be interpreted as a
3747c478bd9Sstevel@tonic-gate 	 * dmv interrupt
3757c478bd9Sstevel@tonic-gate 	 */
3767c478bd9Sstevel@tonic-gate 	ASSERT(!DMV_IS_DMV(func));
3777c478bd9Sstevel@tonic-gate 
3787c478bd9Sstevel@tonic-gate 	/*
3797c478bd9Sstevel@tonic-gate 	 * It's illegal to send software inums through the cross-trap
3807c478bd9Sstevel@tonic-gate 	 * interface.
3817c478bd9Sstevel@tonic-gate 	 */
3827c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func >= KERNELBASE);
3837c478bd9Sstevel@tonic-gate 
3847c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
3857c478bd9Sstevel@tonic-gate 
3867c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
3877c478bd9Sstevel@tonic-gate 
3887c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
3897c478bd9Sstevel@tonic-gate 
3907c478bd9Sstevel@tonic-gate 	/*
3917c478bd9Sstevel@tonic-gate 	 * same cpu - use software fast trap
3927c478bd9Sstevel@tonic-gate 	 */
3937c478bd9Sstevel@tonic-gate 	if (CPU_IN_SET(cpu_ready_set, lcx))
3947c478bd9Sstevel@tonic-gate 		send_self_xcall(CPU, arg1, arg2, func);
3957c478bd9Sstevel@tonic-gate 
3967c478bd9Sstevel@tonic-gate 	XC_TRACE(XT_ALL_OTHER, &cpu_ready_set, func, arg1, arg2);
3977c478bd9Sstevel@tonic-gate 
3987c478bd9Sstevel@tonic-gate 	/*
3997c478bd9Sstevel@tonic-gate 	 * don't send mondo to self
4007c478bd9Sstevel@tonic-gate 	 */
4017c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
4027c478bd9Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
4057c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XT_ALL_SELF]);
4067c478bd9Sstevel@tonic-gate 		XC_TRACE(XT_ALL_SELF, &tset, func, arg1, arg2);
4077c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4087c478bd9Sstevel@tonic-gate 		return;
4097c478bd9Sstevel@tonic-gate 	}
4107c478bd9Sstevel@tonic-gate 
4117c478bd9Sstevel@tonic-gate 	init_mondo(func, arg1, arg2);
4127c478bd9Sstevel@tonic-gate 	send_mondo_set(xc_cpuset);
4137c478bd9Sstevel@tonic-gate 
4147c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XT_ALL_OTHER]);
4157c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
4167c478bd9Sstevel@tonic-gate }
4177c478bd9Sstevel@tonic-gate 
4187c478bd9Sstevel@tonic-gate /*
4197c478bd9Sstevel@tonic-gate  * xc_one - send a "x-call" to a cpu
4207c478bd9Sstevel@tonic-gate  */
4217c478bd9Sstevel@tonic-gate void
xc_one(int cix,xcfunc_t * func,uint64_t arg1,uint64_t arg2)4227c478bd9Sstevel@tonic-gate xc_one(int cix, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
4237c478bd9Sstevel@tonic-gate {
4247c478bd9Sstevel@tonic-gate 	int lcx;
4257c478bd9Sstevel@tonic-gate 	int opl;
4267c478bd9Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
4277c478bd9Sstevel@tonic-gate 	cpuset_t tset;
4287c478bd9Sstevel@tonic-gate 	int first_time = 1;
4297c478bd9Sstevel@tonic-gate 
4307c478bd9Sstevel@tonic-gate 	/*
4317c478bd9Sstevel@tonic-gate 	 * send to nobody; just return
4327c478bd9Sstevel@tonic-gate 	 */
4337c478bd9Sstevel@tonic-gate 	if (!CPU_IN_SET(cpu_ready_set, cix))
4347c478bd9Sstevel@tonic-gate 		return;
4357c478bd9Sstevel@tonic-gate 
4367c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
4377c478bd9Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
4407c478bd9Sstevel@tonic-gate 
4417c478bd9Sstevel@tonic-gate 	kpreempt_disable();
4427c478bd9Sstevel@tonic-gate 
4437c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);		/* lcx set by the macro */
4447c478bd9Sstevel@tonic-gate 
4457c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, cix);
4467c478bd9Sstevel@tonic-gate 
4477c478bd9Sstevel@tonic-gate 	if (cix == lcx) {	/* same cpu just do it */
4487c478bd9Sstevel@tonic-gate 		XC_TRACE(XC_ONE_SELF, &tset, func, arg1, arg2);
4497c478bd9Sstevel@tonic-gate 		(*func)(arg1, arg2);
4507c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ONE_SELF]);
4517c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4527c478bd9Sstevel@tonic-gate 		kpreempt_enable();
4537c478bd9Sstevel@tonic-gate 		return;
4547c478bd9Sstevel@tonic-gate 	}
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
4577c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
4587c478bd9Sstevel@tonic-gate 		ASSERT(CPU_IN_SET(xc_mbox[lcx].xc_cpuset, lcx));
4597c478bd9Sstevel@tonic-gate 		ASSERT(CPU_IN_SET(xc_mbox[cix].xc_cpuset, cix));
4607c478bd9Sstevel@tonic-gate 		ASSERT(xc_mbox[cix].xc_state == XC_WAIT);
4617c478bd9Sstevel@tonic-gate 		XC_TRACE(XC_ONE_OTHER_H, &tset, func, arg1, arg2);
4627c478bd9Sstevel@tonic-gate 
4637c478bd9Sstevel@tonic-gate 		/*
4647c478bd9Sstevel@tonic-gate 		 * target processor's xc_loop should be waiting
4657c478bd9Sstevel@tonic-gate 		 * for the work to do; just set up the xc_mbox
4667c478bd9Sstevel@tonic-gate 		 */
4677c478bd9Sstevel@tonic-gate 		XC_SETUP(cix, func, arg1, arg2);
4687c478bd9Sstevel@tonic-gate 		membar_stld();
4697c478bd9Sstevel@tonic-gate 
4707c478bd9Sstevel@tonic-gate 		while (xc_mbox[cix].xc_state != XC_WAIT) {
4717c478bd9Sstevel@tonic-gate 			if (loop_cnt++ > xc_func_time_limit) {
4727c478bd9Sstevel@tonic-gate 				if (sendmondo_in_recover) {
4737c478bd9Sstevel@tonic-gate 					drv_usecwait(1);
4747c478bd9Sstevel@tonic-gate 					loop_cnt = 0;
4757c478bd9Sstevel@tonic-gate 					continue;
4767c478bd9Sstevel@tonic-gate 				}
4777c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC, "xc_one() timeout, "
4787c478bd9Sstevel@tonic-gate 				    "xc_state[%d] != XC_WAIT", cix);
4797c478bd9Sstevel@tonic-gate 			}
4807c478bd9Sstevel@tonic-gate 		}
4817c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER_H]);
4827c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
4837c478bd9Sstevel@tonic-gate 		kpreempt_enable();
4847c478bd9Sstevel@tonic-gate 		return;
4857c478bd9Sstevel@tonic-gate 	}
4867c478bd9Sstevel@tonic-gate 
4877c478bd9Sstevel@tonic-gate 	/*
4887c478bd9Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
4897c478bd9Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
4907c478bd9Sstevel@tonic-gate 	 */
4917c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
4927c478bd9Sstevel@tonic-gate 
4937c478bd9Sstevel@tonic-gate 	/*
4947c478bd9Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
4957c478bd9Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
4967c478bd9Sstevel@tonic-gate 	 */
4977c478bd9Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
4987c478bd9Sstevel@tonic-gate 
4997c478bd9Sstevel@tonic-gate 	/*
5007c478bd9Sstevel@tonic-gate 	 * Since xc_holder is not owned by us, it could be that
5017c478bd9Sstevel@tonic-gate 	 * no one owns it, or we are not informed to enter into
5027c478bd9Sstevel@tonic-gate 	 * xc_loop(). In either case, we need to grab the
5037c478bd9Sstevel@tonic-gate 	 * xc_sys_mutex before we write to the xc_mbox, and
5047c478bd9Sstevel@tonic-gate 	 * we shouldn't release it until the request is finished.
5057c478bd9Sstevel@tonic-gate 	 */
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
5087c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
5097c478bd9Sstevel@tonic-gate 
5107c478bd9Sstevel@tonic-gate 	/*
5117c478bd9Sstevel@tonic-gate 	 * Since we own xc_sys_mutex now, we are safe to
512254020a7Scb 	 * write to the xc_mbox.
5137c478bd9Sstevel@tonic-gate 	 */
5147c478bd9Sstevel@tonic-gate 	ASSERT(xc_mbox[cix].xc_state == XC_IDLE);
5157c478bd9Sstevel@tonic-gate 	XC_TRACE(XC_ONE_OTHER, &tset, func, arg1, arg2);
5167c478bd9Sstevel@tonic-gate 	XC_SETUP(cix, func, arg1, arg2);
5177c478bd9Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
5187c478bd9Sstevel@tonic-gate 	send_one_mondo(cix);
519254020a7Scb 	xc_spl_enter[lcx] = 0;
5207c478bd9Sstevel@tonic-gate 
5217c478bd9Sstevel@tonic-gate 	/* xc_serv does membar_stld */
5227c478bd9Sstevel@tonic-gate 	while (xc_mbox[cix].xc_state != XC_IDLE) {
5237c478bd9Sstevel@tonic-gate 		if (loop_cnt++ > xc_func_time_limit) {
5247c478bd9Sstevel@tonic-gate 			if (sendmondo_in_recover) {
5257c478bd9Sstevel@tonic-gate 				drv_usecwait(1);
5267c478bd9Sstevel@tonic-gate 				loop_cnt = 0;
5277c478bd9Sstevel@tonic-gate 				continue;
5287c478bd9Sstevel@tonic-gate 			}
5297c478bd9Sstevel@tonic-gate 			if (first_time) {
5307c478bd9Sstevel@tonic-gate 				XT_SYNC_ONE(cix);
5317c478bd9Sstevel@tonic-gate 				first_time = 0;
5327c478bd9Sstevel@tonic-gate 				loop_cnt = 0;
5337c478bd9Sstevel@tonic-gate 				continue;
5347c478bd9Sstevel@tonic-gate 			}
5357c478bd9Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_one() timeout, "
5367c478bd9Sstevel@tonic-gate 			    "xc_state[%d] != XC_IDLE", cix);
5377c478bd9Sstevel@tonic-gate 		}
5387c478bd9Sstevel@tonic-gate 	}
5397c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ONE_OTHER]);
5407c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
5417c478bd9Sstevel@tonic-gate 
5427c478bd9Sstevel@tonic-gate 	kpreempt_enable();
5437c478bd9Sstevel@tonic-gate }
5447c478bd9Sstevel@tonic-gate 
5457c478bd9Sstevel@tonic-gate /*
5467c478bd9Sstevel@tonic-gate  * xc_some - send a "x-call" to some cpus; sending to self is excluded
5477c478bd9Sstevel@tonic-gate  */
5487c478bd9Sstevel@tonic-gate void
xc_some(cpuset_t cpuset,xcfunc_t * func,uint64_t arg1,uint64_t arg2)5497c478bd9Sstevel@tonic-gate xc_some(cpuset_t cpuset, xcfunc_t *func, uint64_t arg1, uint64_t arg2)
5507c478bd9Sstevel@tonic-gate {
5517c478bd9Sstevel@tonic-gate 	int lcx;
5527c478bd9Sstevel@tonic-gate 	int opl;
5537c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
5547c478bd9Sstevel@tonic-gate 
5557c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
5567c478bd9Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
5577c478bd9Sstevel@tonic-gate 
5587c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
5597c478bd9Sstevel@tonic-gate 
5607c478bd9Sstevel@tonic-gate 	kpreempt_disable();
5617c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
5627c478bd9Sstevel@tonic-gate 
5637c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
5647c478bd9Sstevel@tonic-gate 
5657c478bd9Sstevel@tonic-gate 	/*
5667c478bd9Sstevel@tonic-gate 	 * only send to the CPU_READY ones
5677c478bd9Sstevel@tonic-gate 	 */
5687c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
5697c478bd9Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 	/*
5727c478bd9Sstevel@tonic-gate 	 * send to nobody; just return
5737c478bd9Sstevel@tonic-gate 	 */
5747c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
5757c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
5767c478bd9Sstevel@tonic-gate 		kpreempt_enable();
5777c478bd9Sstevel@tonic-gate 		return;
5787c478bd9Sstevel@tonic-gate 	}
5797c478bd9Sstevel@tonic-gate 
5807c478bd9Sstevel@tonic-gate 	if (CPU_IN_SET(xc_cpuset, lcx)) {
5817c478bd9Sstevel@tonic-gate 		/*
5827c478bd9Sstevel@tonic-gate 		 * same cpu just do it
5837c478bd9Sstevel@tonic-gate 		 */
5847c478bd9Sstevel@tonic-gate 		(*func)(arg1, arg2);
5857c478bd9Sstevel@tonic-gate 		CPUSET_DEL(xc_cpuset, lcx);
5867c478bd9Sstevel@tonic-gate 		if (CPUSET_ISNULL(xc_cpuset)) {
5877c478bd9Sstevel@tonic-gate 			XC_STAT_INC(x_dstat[lcx][XC_SOME_SELF]);
5887c478bd9Sstevel@tonic-gate 			XC_TRACE(XC_SOME_SELF, &tset, func, arg1, arg2);
5897c478bd9Sstevel@tonic-gate 			XC_SPL_EXIT(lcx, opl);
5907c478bd9Sstevel@tonic-gate 			kpreempt_enable();
5917c478bd9Sstevel@tonic-gate 			return;
5927c478bd9Sstevel@tonic-gate 		}
5937c478bd9Sstevel@tonic-gate 	}
5947c478bd9Sstevel@tonic-gate 
5957c478bd9Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
5967c478bd9Sstevel@tonic-gate 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate 		CPUSET_AND(mset, cpuset);
5997c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
6007c478bd9Sstevel@tonic-gate 		ASSERT(CPUSET_ISEQUAL(mset, cpuset));
6017c478bd9Sstevel@tonic-gate 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
6027c478bd9Sstevel@tonic-gate 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
6037c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER_H]);
6047c478bd9Sstevel@tonic-gate 		XC_TRACE(XC_SOME_OTHER_H, &xc_cpuset, func, arg1, arg2);
6057c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6067c478bd9Sstevel@tonic-gate 		kpreempt_enable();
6077c478bd9Sstevel@tonic-gate 		return;
6087c478bd9Sstevel@tonic-gate 	}
6097c478bd9Sstevel@tonic-gate 
6107c478bd9Sstevel@tonic-gate 	/*
6117c478bd9Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
6127c478bd9Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
6137c478bd9Sstevel@tonic-gate 	 */
6147c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
6157c478bd9Sstevel@tonic-gate 
6167c478bd9Sstevel@tonic-gate 	/*
6177c478bd9Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
6187c478bd9Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
6197c478bd9Sstevel@tonic-gate 	 */
6207c478bd9Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
6217c478bd9Sstevel@tonic-gate 
6227c478bd9Sstevel@tonic-gate 	/*
6237c478bd9Sstevel@tonic-gate 	 * grab xc_sys_mutex before writing to the xc_mbox
6247c478bd9Sstevel@tonic-gate 	 */
6257c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
6267c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
6277c478bd9Sstevel@tonic-gate 
6287c478bd9Sstevel@tonic-gate 	XC_TRACE(XC_SOME_OTHER, &xc_cpuset, func, arg1, arg2);
6297c478bd9Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
6307c478bd9Sstevel@tonic-gate 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
6317c478bd9Sstevel@tonic-gate 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
6327c478bd9Sstevel@tonic-gate 
6337c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
6347c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_SOME_OTHER]);
6357c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
6367c478bd9Sstevel@tonic-gate 	kpreempt_enable();
6377c478bd9Sstevel@tonic-gate }
6387c478bd9Sstevel@tonic-gate 
6397c478bd9Sstevel@tonic-gate /*
6407c478bd9Sstevel@tonic-gate  * xc_all - send a "x-call" to all cpus
6417c478bd9Sstevel@tonic-gate  */
6427c478bd9Sstevel@tonic-gate void
xc_all(xcfunc_t * func,uint64_t arg1,uint64_t arg2)6437c478bd9Sstevel@tonic-gate xc_all(xcfunc_t *func, uint64_t arg1, uint64_t arg2)
6447c478bd9Sstevel@tonic-gate {
6457c478bd9Sstevel@tonic-gate 	int lcx;
6467c478bd9Sstevel@tonic-gate 	int opl;
6477c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tset;
6487c478bd9Sstevel@tonic-gate 
6497c478bd9Sstevel@tonic-gate 	ASSERT((uintptr_t)func > KERNELBASE);
6507c478bd9Sstevel@tonic-gate 	ASSERT(((uintptr_t)func % PC_ALIGN) == 0);
6517c478bd9Sstevel@tonic-gate 
6527c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
6537c478bd9Sstevel@tonic-gate 
6547c478bd9Sstevel@tonic-gate 	kpreempt_disable();
6557c478bd9Sstevel@tonic-gate 	XC_SPL_ENTER(lcx, opl);			/* lcx set by the macro */
6567c478bd9Sstevel@tonic-gate 
6577c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
6587c478bd9Sstevel@tonic-gate 
6597c478bd9Sstevel@tonic-gate 	/*
6607c478bd9Sstevel@tonic-gate 	 * same cpu just do it
6617c478bd9Sstevel@tonic-gate 	 */
6627c478bd9Sstevel@tonic-gate 	(*func)(arg1, arg2);
6637c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
6647c478bd9Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
6657c478bd9Sstevel@tonic-gate 
6667c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
6677c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ALL_SELF]);
6687c478bd9Sstevel@tonic-gate 		XC_TRACE(XC_ALL_SELF, &tset, func, arg1, arg2);
6697c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6707c478bd9Sstevel@tonic-gate 		kpreempt_enable();
6717c478bd9Sstevel@tonic-gate 		return;
6727c478bd9Sstevel@tonic-gate 	}
6737c478bd9Sstevel@tonic-gate 
6747c478bd9Sstevel@tonic-gate 	if (xc_holder == lcx) {		/* got the xc_sys_mutex already */
6757c478bd9Sstevel@tonic-gate 		cpuset_t mset = xc_mbox[lcx].xc_cpuset;
6767c478bd9Sstevel@tonic-gate 
6777c478bd9Sstevel@tonic-gate 		CPUSET_AND(mset, xc_cpuset);
6787c478bd9Sstevel@tonic-gate 		ASSERT(MUTEX_HELD(&xc_sys_mutex));
6797c478bd9Sstevel@tonic-gate 		ASSERT(CPUSET_ISEQUAL(mset, xc_cpuset));
6807c478bd9Sstevel@tonic-gate 		XC_TRACE(XC_ALL_OTHER_H, &xc_cpuset, func, arg1, arg2);
6817c478bd9Sstevel@tonic-gate 		SEND_MBOX_ONLY(xc_cpuset, func, arg1, arg2, lcx, XC_WAIT);
6827c478bd9Sstevel@tonic-gate 		WAIT_MBOX_DONE(xc_cpuset, lcx, XC_WAIT, 0);
6837c478bd9Sstevel@tonic-gate 		XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER_H]);
6847c478bd9Sstevel@tonic-gate 		XC_SPL_EXIT(lcx, opl);
6857c478bd9Sstevel@tonic-gate 		kpreempt_enable();
6867c478bd9Sstevel@tonic-gate 		return;
6877c478bd9Sstevel@tonic-gate 	}
6887c478bd9Sstevel@tonic-gate 
6897c478bd9Sstevel@tonic-gate 	/*
6907c478bd9Sstevel@tonic-gate 	 * Avoid dead lock if someone has sent us a xc_loop request while
6917c478bd9Sstevel@tonic-gate 	 * we are trying to grab xc_sys_mutex.
6927c478bd9Sstevel@tonic-gate 	 */
6937c478bd9Sstevel@tonic-gate 	XC_SPL_EXIT(lcx, opl);
6947c478bd9Sstevel@tonic-gate 
6957c478bd9Sstevel@tonic-gate 	/*
6967c478bd9Sstevel@tonic-gate 	 * At this point, since we don't own xc_sys_mutex,
6977c478bd9Sstevel@tonic-gate 	 * our pil shouldn't run at or above the XCALL_PIL.
6987c478bd9Sstevel@tonic-gate 	 */
6997c478bd9Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
7007c478bd9Sstevel@tonic-gate 
7017c478bd9Sstevel@tonic-gate 	/*
7027c478bd9Sstevel@tonic-gate 	 * grab xc_sys_mutex before writing to the xc_mbox
7037c478bd9Sstevel@tonic-gate 	 */
7047c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
7057c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
7067c478bd9Sstevel@tonic-gate 
7077c478bd9Sstevel@tonic-gate 	XC_TRACE(XC_ALL_OTHER, &xc_cpuset, func, arg1, arg2);
7087c478bd9Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_serv_inum, 0);
7097c478bd9Sstevel@tonic-gate 	SEND_MBOX_MONDO(xc_cpuset, func, arg1, arg2, XC_IDLE);
7107c478bd9Sstevel@tonic-gate 	WAIT_MBOX_DONE(xc_cpuset, lcx, XC_IDLE, 1);
7117c478bd9Sstevel@tonic-gate 
7127c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
7137c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ALL_OTHER]);
7147c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
7157c478bd9Sstevel@tonic-gate 	kpreempt_enable();
7167c478bd9Sstevel@tonic-gate }
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate /*
7197c478bd9Sstevel@tonic-gate  * xc_attention - paired with xc_dismissed()
7207c478bd9Sstevel@tonic-gate  *
7217c478bd9Sstevel@tonic-gate  * xt_attention() holds the xc_sys_mutex and xc_dismissed() releases it
7227c478bd9Sstevel@tonic-gate  * called when an initiator wants to capture some/all cpus for a critical
7237c478bd9Sstevel@tonic-gate  * session.
7247c478bd9Sstevel@tonic-gate  */
7257c478bd9Sstevel@tonic-gate void
xc_attention(cpuset_t cpuset)7267c478bd9Sstevel@tonic-gate xc_attention(cpuset_t cpuset)
7277c478bd9Sstevel@tonic-gate {
7287c478bd9Sstevel@tonic-gate 	int pix, lcx;
7297c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tmpset;
7307c478bd9Sstevel@tonic-gate 	cpuset_t recv_cpuset;
7317c478bd9Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
7327c478bd9Sstevel@tonic-gate 	int first_time = 1;
7337c478bd9Sstevel@tonic-gate 
7347c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(recv_cpuset);
7357c478bd9Sstevel@tonic-gate 
7367c478bd9Sstevel@tonic-gate 	/*
7377c478bd9Sstevel@tonic-gate 	 * don't migrate the cpu until xc_dismissed() is finished
7387c478bd9Sstevel@tonic-gate 	 */
7397c478bd9Sstevel@tonic-gate 	ASSERT(getpil() < XCALL_PIL);
7407c478bd9Sstevel@tonic-gate 	mutex_enter(&xc_sys_mutex);
7417c478bd9Sstevel@tonic-gate 	lcx = (int)(CPU->cpu_id);
7427c478bd9Sstevel@tonic-gate 	ASSERT(x_dstat[lcx][XC_ATTENTION] ==
7437c478bd9Sstevel@tonic-gate 	    x_dstat[lcx][XC_DISMISSED]);
7447c478bd9Sstevel@tonic-gate 	ASSERT(xc_holder == -1);
7457c478bd9Sstevel@tonic-gate 	xc_mbox[lcx].xc_cpuset = cpuset;
7467c478bd9Sstevel@tonic-gate 	xc_holder = lcx; /* no membar; only current cpu needs the right lcx */
7477c478bd9Sstevel@tonic-gate 
7487c478bd9Sstevel@tonic-gate 	/*
7497c478bd9Sstevel@tonic-gate 	 * only send to the CPU_READY ones
7507c478bd9Sstevel@tonic-gate 	 */
7517c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
7527c478bd9Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
7537c478bd9Sstevel@tonic-gate 
7547c478bd9Sstevel@tonic-gate 	/*
7557c478bd9Sstevel@tonic-gate 	 * don't send mondo to self
7567c478bd9Sstevel@tonic-gate 	 */
7577c478bd9Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
7587c478bd9Sstevel@tonic-gate 
7597c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_ATTENTION]);
760*12551037SToomas Soome 	XC_TRACE(XC_ATTENTION, &xc_cpuset, NULL, 0, 0);
7617c478bd9Sstevel@tonic-gate 
7627c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset))
7637c478bd9Sstevel@tonic-gate 		return;
7647c478bd9Sstevel@tonic-gate 
7657c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 1;
7667c478bd9Sstevel@tonic-gate 	/*
7677c478bd9Sstevel@tonic-gate 	 * inform the target processors to enter into xc_loop()
7687c478bd9Sstevel@tonic-gate 	 */
7697c478bd9Sstevel@tonic-gate 	init_mondo(setsoftint_tl1, xc_loop_inum, 0);
770e5900f74Sha 	SEND_MBOX_MONDO_XC_ENTER(xc_cpuset);
7717c478bd9Sstevel@tonic-gate 	xc_spl_enter[lcx] = 0;
7727c478bd9Sstevel@tonic-gate 
7737c478bd9Sstevel@tonic-gate 	/*
7747c478bd9Sstevel@tonic-gate 	 * make sure target processors have entered into xc_loop()
7757c478bd9Sstevel@tonic-gate 	 */
7767c478bd9Sstevel@tonic-gate 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
7777c478bd9Sstevel@tonic-gate 		tmpset = xc_cpuset;
7787c478bd9Sstevel@tonic-gate 		for (pix = 0; pix < NCPU; pix++) {
7797c478bd9Sstevel@tonic-gate 			if (CPU_IN_SET(tmpset, pix)) {
7807c478bd9Sstevel@tonic-gate 				/*
7817c478bd9Sstevel@tonic-gate 				 * membar_stld() is done in xc_loop
7827c478bd9Sstevel@tonic-gate 				 */
7837c478bd9Sstevel@tonic-gate 				if (xc_mbox[pix].xc_state == XC_WAIT) {
7847c478bd9Sstevel@tonic-gate 					CPUSET_ADD(recv_cpuset, pix);
7857c478bd9Sstevel@tonic-gate 				}
7867c478bd9Sstevel@tonic-gate 				CPUSET_DEL(tmpset, pix);
7877c478bd9Sstevel@tonic-gate 				if (CPUSET_ISNULL(tmpset)) {
7887c478bd9Sstevel@tonic-gate 					break;
7897c478bd9Sstevel@tonic-gate 				}
7907c478bd9Sstevel@tonic-gate 			}
7917c478bd9Sstevel@tonic-gate 		}
7927c478bd9Sstevel@tonic-gate 		if (loop_cnt++ > xc_mondo_time_limit) {
7937c478bd9Sstevel@tonic-gate 			if (sendmondo_in_recover) {
7947c478bd9Sstevel@tonic-gate 				drv_usecwait(1);
7957c478bd9Sstevel@tonic-gate 				loop_cnt = 0;
7967c478bd9Sstevel@tonic-gate 				continue;
7977c478bd9Sstevel@tonic-gate 			}
7987c478bd9Sstevel@tonic-gate 			if (first_time) {
7997c478bd9Sstevel@tonic-gate 				XT_SYNC_SOME(xc_cpuset);
8007c478bd9Sstevel@tonic-gate 				first_time = 0;
8017c478bd9Sstevel@tonic-gate 				loop_cnt = 0;
8027c478bd9Sstevel@tonic-gate 				continue;
8037c478bd9Sstevel@tonic-gate 			}
8047c478bd9Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_attention() timeout");
8057c478bd9Sstevel@tonic-gate 		}
8067c478bd9Sstevel@tonic-gate 	}
8077c478bd9Sstevel@tonic-gate 
8087c478bd9Sstevel@tonic-gate 	/*
8097c478bd9Sstevel@tonic-gate 	 * xc_sys_mutex remains held until xc_dismissed() is finished
8107c478bd9Sstevel@tonic-gate 	 */
8117c478bd9Sstevel@tonic-gate }
8127c478bd9Sstevel@tonic-gate 
8137c478bd9Sstevel@tonic-gate /*
8147c478bd9Sstevel@tonic-gate  * xc_dismissed - paired with xc_attention()
8157c478bd9Sstevel@tonic-gate  *
8167c478bd9Sstevel@tonic-gate  * Called after the critical session is finished.
8177c478bd9Sstevel@tonic-gate  */
8187c478bd9Sstevel@tonic-gate void
xc_dismissed(cpuset_t cpuset)8197c478bd9Sstevel@tonic-gate xc_dismissed(cpuset_t cpuset)
8207c478bd9Sstevel@tonic-gate {
8217c478bd9Sstevel@tonic-gate 	int pix;
8227c478bd9Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
8237c478bd9Sstevel@tonic-gate 	cpuset_t xc_cpuset, tmpset;
8247c478bd9Sstevel@tonic-gate 	cpuset_t recv_cpuset;
8257c478bd9Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
8267c478bd9Sstevel@tonic-gate 
8277c478bd9Sstevel@tonic-gate 	ASSERT(lcx == xc_holder);
8287c478bd9Sstevel@tonic-gate 	ASSERT(CPUSET_ISEQUAL(xc_mbox[lcx].xc_cpuset, cpuset));
8297c478bd9Sstevel@tonic-gate 	ASSERT(getpil() >= XCALL_PIL);
8307c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(xc_mbox[lcx].xc_cpuset);
8317c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(recv_cpuset);
8327c478bd9Sstevel@tonic-gate 	membar_stld();
8337c478bd9Sstevel@tonic-gate 
8347c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_dstat[lcx][XC_DISMISSED]);
8357c478bd9Sstevel@tonic-gate 	ASSERT(x_dstat[lcx][XC_DISMISSED] == x_dstat[lcx][XC_ATTENTION]);
8367c478bd9Sstevel@tonic-gate 
8377c478bd9Sstevel@tonic-gate 	/*
8387c478bd9Sstevel@tonic-gate 	 * only send to the CPU_READY ones
8397c478bd9Sstevel@tonic-gate 	 */
8407c478bd9Sstevel@tonic-gate 	xc_cpuset = cpu_ready_set;
8417c478bd9Sstevel@tonic-gate 	CPUSET_AND(xc_cpuset, cpuset);
8427c478bd9Sstevel@tonic-gate 
8437c478bd9Sstevel@tonic-gate 	/*
8447c478bd9Sstevel@tonic-gate 	 * exclude itself
8457c478bd9Sstevel@tonic-gate 	 */
8467c478bd9Sstevel@tonic-gate 	CPUSET_DEL(xc_cpuset, lcx);
847*12551037SToomas Soome 	XC_TRACE(XC_DISMISSED, &xc_cpuset, NULL, 0, 0);
8487c478bd9Sstevel@tonic-gate 	if (CPUSET_ISNULL(xc_cpuset)) {
8497c478bd9Sstevel@tonic-gate 		xc_holder = -1;
8507c478bd9Sstevel@tonic-gate 		mutex_exit(&xc_sys_mutex);
8517c478bd9Sstevel@tonic-gate 		return;
8527c478bd9Sstevel@tonic-gate 	}
8537c478bd9Sstevel@tonic-gate 
8547c478bd9Sstevel@tonic-gate 	/*
8557c478bd9Sstevel@tonic-gate 	 * inform other processors to get out of xc_loop()
8567c478bd9Sstevel@tonic-gate 	 */
8577c478bd9Sstevel@tonic-gate 	tmpset = xc_cpuset;
8587c478bd9Sstevel@tonic-gate 	for (pix = 0; pix < NCPU; pix++) {
8597c478bd9Sstevel@tonic-gate 		if (CPU_IN_SET(tmpset, pix)) {
8607c478bd9Sstevel@tonic-gate 			xc_mbox[pix].xc_state = XC_EXIT;
8617c478bd9Sstevel@tonic-gate 			membar_stld();
8627c478bd9Sstevel@tonic-gate 			CPUSET_DEL(tmpset, pix);
8637c478bd9Sstevel@tonic-gate 			if (CPUSET_ISNULL(tmpset)) {
8647c478bd9Sstevel@tonic-gate 				break;
8657c478bd9Sstevel@tonic-gate 			}
8667c478bd9Sstevel@tonic-gate 		}
8677c478bd9Sstevel@tonic-gate 	}
8687c478bd9Sstevel@tonic-gate 
8697c478bd9Sstevel@tonic-gate 	/*
8707c478bd9Sstevel@tonic-gate 	 * make sure target processors have exited from xc_loop()
8717c478bd9Sstevel@tonic-gate 	 */
8727c478bd9Sstevel@tonic-gate 	while (!CPUSET_ISEQUAL(recv_cpuset, xc_cpuset)) {
8737c478bd9Sstevel@tonic-gate 		tmpset = xc_cpuset;
8747c478bd9Sstevel@tonic-gate 		for (pix = 0; pix < NCPU; pix++) {
8757c478bd9Sstevel@tonic-gate 			if (CPU_IN_SET(tmpset, pix)) {
8767c478bd9Sstevel@tonic-gate 				/*
8777c478bd9Sstevel@tonic-gate 				 * membar_stld() is done in xc_loop
8787c478bd9Sstevel@tonic-gate 				 */
8797c478bd9Sstevel@tonic-gate 				if (xc_mbox[pix].xc_state == XC_IDLE) {
8807c478bd9Sstevel@tonic-gate 					CPUSET_ADD(recv_cpuset, pix);
8817c478bd9Sstevel@tonic-gate 				}
8827c478bd9Sstevel@tonic-gate 				CPUSET_DEL(tmpset, pix);
8837c478bd9Sstevel@tonic-gate 				if (CPUSET_ISNULL(tmpset)) {
8847c478bd9Sstevel@tonic-gate 					break;
8857c478bd9Sstevel@tonic-gate 				}
8867c478bd9Sstevel@tonic-gate 			}
8877c478bd9Sstevel@tonic-gate 		}
8887c478bd9Sstevel@tonic-gate 		if (loop_cnt++ > xc_func_time_limit) {
8897c478bd9Sstevel@tonic-gate 				if (sendmondo_in_recover) {
8907c478bd9Sstevel@tonic-gate 					drv_usecwait(1);
8917c478bd9Sstevel@tonic-gate 					loop_cnt = 0;
8927c478bd9Sstevel@tonic-gate 					continue;
8937c478bd9Sstevel@tonic-gate 				}
8947c478bd9Sstevel@tonic-gate 			cmn_err(CE_PANIC, "xc_dismissed() timeout");
8957c478bd9Sstevel@tonic-gate 		}
8967c478bd9Sstevel@tonic-gate 	}
8977c478bd9Sstevel@tonic-gate 	xc_holder = -1;
8987c478bd9Sstevel@tonic-gate 	mutex_exit(&xc_sys_mutex);
8997c478bd9Sstevel@tonic-gate }
9007c478bd9Sstevel@tonic-gate 
9017c478bd9Sstevel@tonic-gate /*
9027c478bd9Sstevel@tonic-gate  * xc_serv - "x-call" handler at TL=0; serves only one x-call request
9037c478bd9Sstevel@tonic-gate  * runs at XCALL_PIL level.
9047c478bd9Sstevel@tonic-gate  */
9057c478bd9Sstevel@tonic-gate uint_t
xc_serv(void)9067c478bd9Sstevel@tonic-gate xc_serv(void)
9077c478bd9Sstevel@tonic-gate {
9087c478bd9Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
9097c478bd9Sstevel@tonic-gate 	struct xc_mbox *xmp;
9107c478bd9Sstevel@tonic-gate 	xcfunc_t *func;
9117c478bd9Sstevel@tonic-gate 	uint64_t arg1, arg2;
9127c478bd9Sstevel@tonic-gate 	cpuset_t tset;
9137c478bd9Sstevel@tonic-gate 
9147c478bd9Sstevel@tonic-gate 	ASSERT(getpil() == XCALL_PIL);
9157c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
9167c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
9177c478bd9Sstevel@tonic-gate 	flush_windows();
9187c478bd9Sstevel@tonic-gate 	xmp = &xc_mbox[lcx];
9197c478bd9Sstevel@tonic-gate 	ASSERT(lcx != xc_holder);
9207c478bd9Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_DOIT);
9217c478bd9Sstevel@tonic-gate 	func = xmp->xc_func;
9227c478bd9Sstevel@tonic-gate 	XC_TRACE(XC_SERV, &tset, func, xmp->xc_arg1, xmp->xc_arg2);
9237c478bd9Sstevel@tonic-gate 	if (func != NULL) {
9247c478bd9Sstevel@tonic-gate 		arg1 = xmp->xc_arg1;
9257c478bd9Sstevel@tonic-gate 		arg2 = xmp->xc_arg2;
9267c478bd9Sstevel@tonic-gate 		(*func)(arg1, arg2);
9277c478bd9Sstevel@tonic-gate 	}
9287c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_rstat[lcx][XC_SERV]);
9297c478bd9Sstevel@tonic-gate 	XC_TRACE(XC_SERV, &tset, func, arg1, arg2);
9307c478bd9Sstevel@tonic-gate 	xmp->xc_state = XC_IDLE;
9317c478bd9Sstevel@tonic-gate 	membar_stld();
9327c478bd9Sstevel@tonic-gate 	return (1);
9337c478bd9Sstevel@tonic-gate }
9347c478bd9Sstevel@tonic-gate 
9357c478bd9Sstevel@tonic-gate /*
9367c478bd9Sstevel@tonic-gate  * if == 1, an xc_loop timeout will cause a panic
9377c478bd9Sstevel@tonic-gate  * otherwise print a warning
9387c478bd9Sstevel@tonic-gate  */
9397c478bd9Sstevel@tonic-gate uint_t xc_loop_panic = 0;
9407c478bd9Sstevel@tonic-gate 
9417c478bd9Sstevel@tonic-gate /*
9427c478bd9Sstevel@tonic-gate  * xc_loop - "x-call" handler at TL=0; capture the cpu for a critial
9437c478bd9Sstevel@tonic-gate  * session, or serve multiple x-call requests runs at XCALL_PIL level.
9447c478bd9Sstevel@tonic-gate  */
9457c478bd9Sstevel@tonic-gate uint_t
xc_loop(void)9467c478bd9Sstevel@tonic-gate xc_loop(void)
9477c478bd9Sstevel@tonic-gate {
9487c478bd9Sstevel@tonic-gate 	int lcx = (int)(CPU->cpu_id);
9497c478bd9Sstevel@tonic-gate 	struct xc_mbox *xmp;
9507c478bd9Sstevel@tonic-gate 	xcfunc_t *func;
9517c478bd9Sstevel@tonic-gate 	uint64_t arg1, arg2;
9527c478bd9Sstevel@tonic-gate 	uint64_t loop_cnt = 0;
9537c478bd9Sstevel@tonic-gate 	cpuset_t tset;
9547c478bd9Sstevel@tonic-gate 
9557c478bd9Sstevel@tonic-gate 	ASSERT(getpil() == XCALL_PIL);
9567c478bd9Sstevel@tonic-gate 
9577c478bd9Sstevel@tonic-gate 	CPUSET_ZERO(tset);
9587c478bd9Sstevel@tonic-gate 	flush_windows();
9597c478bd9Sstevel@tonic-gate 
9607c478bd9Sstevel@tonic-gate 	/*
9617c478bd9Sstevel@tonic-gate 	 * Some one must have owned the xc_sys_mutex;
9627c478bd9Sstevel@tonic-gate 	 * no further interrupt (at XCALL_PIL or below) can
9637c478bd9Sstevel@tonic-gate 	 * be taken by this processor until xc_loop exits.
9647c478bd9Sstevel@tonic-gate 	 *
9657c478bd9Sstevel@tonic-gate 	 * The owner of xc_sys_mutex (or xc_holder) can expect
9667c478bd9Sstevel@tonic-gate 	 * its xc/xt requests are handled as follows:
967*12551037SToomas Soome 	 *	xc requests use xc_mbox's handshaking for their services
968*12551037SToomas Soome 	 *	xt requests at TL>0 will be handled immediately
969*12551037SToomas Soome 	 *	xt requests at TL=0:
9707c478bd9Sstevel@tonic-gate 	 *		if their handlers'pils are <= XCALL_PIL, then
9717c478bd9Sstevel@tonic-gate 	 *			they will be handled after xc_loop exits
9727c478bd9Sstevel@tonic-gate 	 *			(so, they probably should not be used)
9737c478bd9Sstevel@tonic-gate 	 *		else they will be handled immediately
9747c478bd9Sstevel@tonic-gate 	 *
9757c478bd9Sstevel@tonic-gate 	 * For those who are not informed to enter xc_loop, if they
9767c478bd9Sstevel@tonic-gate 	 * send xc/xt requests to this processor at this moment,
9777c478bd9Sstevel@tonic-gate 	 * the requests will be handled as follows:
9787c478bd9Sstevel@tonic-gate 	 *	xc requests will be handled after they grab xc_sys_mutex
9797c478bd9Sstevel@tonic-gate 	 *	xt requests at TL>0 will be handled immediately
980*12551037SToomas Soome 	 *	xt requests at TL=0:
9817c478bd9Sstevel@tonic-gate 	 *		if their handlers'pils are <= XCALL_PIL, then
9827c478bd9Sstevel@tonic-gate 	 *			they will be handled after xc_loop exits
9837c478bd9Sstevel@tonic-gate 	 *		else they will be handled immediately
9847c478bd9Sstevel@tonic-gate 	 */
9857c478bd9Sstevel@tonic-gate 	xmp = &xc_mbox[lcx];
9867c478bd9Sstevel@tonic-gate 	ASSERT(lcx != xc_holder);
9877c478bd9Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_ENTER);
9887c478bd9Sstevel@tonic-gate 	xmp->xc_state = XC_WAIT;
9897c478bd9Sstevel@tonic-gate 	CPUSET_ADD(tset, lcx);
9907c478bd9Sstevel@tonic-gate 	membar_stld();
9917c478bd9Sstevel@tonic-gate 	XC_STAT_INC(x_rstat[lcx][XC_LOOP]);
992*12551037SToomas Soome 	XC_TRACE(XC_LOOP_ENTER, &tset, NULL, 0, 0);
9937c478bd9Sstevel@tonic-gate 	while (xmp->xc_state != XC_EXIT) {
9947c478bd9Sstevel@tonic-gate 		if (xmp->xc_state == XC_DOIT) {
9957c478bd9Sstevel@tonic-gate 			func = xmp->xc_func;
9967c478bd9Sstevel@tonic-gate 			arg1 = xmp->xc_arg1;
9977c478bd9Sstevel@tonic-gate 			arg2 = xmp->xc_arg2;
9987c478bd9Sstevel@tonic-gate 			XC_TRACE(XC_LOOP_DOIT, &tset, func, arg1, arg2);
9997c478bd9Sstevel@tonic-gate 			if (func != NULL)
10007c478bd9Sstevel@tonic-gate 				(*func)(arg1, arg2);
10017c478bd9Sstevel@tonic-gate 			xmp->xc_state = XC_WAIT;
10027c478bd9Sstevel@tonic-gate 			membar_stld();
10037c478bd9Sstevel@tonic-gate 			/*
10047c478bd9Sstevel@tonic-gate 			 * reset the timeout counter
10057c478bd9Sstevel@tonic-gate 			 * since some work was done
10067c478bd9Sstevel@tonic-gate 			 */
10077c478bd9Sstevel@tonic-gate 			loop_cnt = 0;
10087c478bd9Sstevel@tonic-gate 		} else {
10097c478bd9Sstevel@tonic-gate 			/* patience is a virtue... */
10107c478bd9Sstevel@tonic-gate 			loop_cnt++;
10117c478bd9Sstevel@tonic-gate 		}
10127c478bd9Sstevel@tonic-gate 
10137c478bd9Sstevel@tonic-gate 		if (loop_cnt > xc_func_time_limit) {
10147c478bd9Sstevel@tonic-gate 			if (sendmondo_in_recover) {
10157c478bd9Sstevel@tonic-gate 				drv_usecwait(1);
10167c478bd9Sstevel@tonic-gate 				loop_cnt = 0;
10177c478bd9Sstevel@tonic-gate 				continue;
10187c478bd9Sstevel@tonic-gate 			}
10197c478bd9Sstevel@tonic-gate 			cmn_err(xc_loop_panic ? CE_PANIC : CE_WARN,
10207c478bd9Sstevel@tonic-gate 			    "xc_loop() timeout");
10217c478bd9Sstevel@tonic-gate 			/*
10227c478bd9Sstevel@tonic-gate 			 * if the above displayed a warning,
10237c478bd9Sstevel@tonic-gate 			 * reset the timeout counter and be patient
10247c478bd9Sstevel@tonic-gate 			 */
10257c478bd9Sstevel@tonic-gate 			loop_cnt = 0;
10267c478bd9Sstevel@tonic-gate 		}
10277c478bd9Sstevel@tonic-gate 	}
10287c478bd9Sstevel@tonic-gate 	ASSERT(xmp->xc_state == XC_EXIT);
10297c478bd9Sstevel@tonic-gate 	ASSERT(xc_holder != -1);
1030*12551037SToomas Soome 	XC_TRACE(XC_LOOP_EXIT, &tset, NULL, 0, 0);
10317c478bd9Sstevel@tonic-gate 	xmp->xc_state = XC_IDLE;
10327c478bd9Sstevel@tonic-gate 	membar_stld();
10337c478bd9Sstevel@tonic-gate 	return (1);
10347c478bd9Sstevel@tonic-gate }
1035