1*03831d35Sstevel /*
2*03831d35Sstevel  * CDDL HEADER START
3*03831d35Sstevel  *
4*03831d35Sstevel  * The contents of this file are subject to the terms of the
5*03831d35Sstevel  * Common Development and Distribution License (the "License").
6*03831d35Sstevel  * You may not use this file except in compliance with the License.
7*03831d35Sstevel  *
8*03831d35Sstevel  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*03831d35Sstevel  * or http://www.opensolaris.org/os/licensing.
10*03831d35Sstevel  * See the License for the specific language governing permissions
11*03831d35Sstevel  * and limitations under the License.
12*03831d35Sstevel  *
13*03831d35Sstevel  * When distributing Covered Code, include this CDDL HEADER in each
14*03831d35Sstevel  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*03831d35Sstevel  * If applicable, add the following below this CDDL HEADER, with the
16*03831d35Sstevel  * fields enclosed by brackets "[]" replaced with your own identifying
17*03831d35Sstevel  * information: Portions Copyright [yyyy] [name of copyright owner]
18*03831d35Sstevel  *
19*03831d35Sstevel  * CDDL HEADER END
20*03831d35Sstevel  */
21*03831d35Sstevel 
22*03831d35Sstevel /*
23*03831d35Sstevel  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*03831d35Sstevel  * Use is subject to license terms.
25*03831d35Sstevel  */
26*03831d35Sstevel 
27*03831d35Sstevel #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*03831d35Sstevel 
29*03831d35Sstevel /*
30*03831d35Sstevel  * CPU management for serengeti DR
31*03831d35Sstevel  *
32*03831d35Sstevel  * There are three states a CPU can be in:
33*03831d35Sstevel  *
34*03831d35Sstevel  *	disconnected:		In reset
35*03831d35Sstevel  *	connect,unconfigured:	Idling in OBP's idle loop
36*03831d35Sstevel  *	configured:		Running Solaris
37*03831d35Sstevel  *
38*03831d35Sstevel  * State transitions:
39*03831d35Sstevel  *
40*03831d35Sstevel  *                connect              configure
41*03831d35Sstevel  *              ------------>         ------------>
42*03831d35Sstevel  * disconnected              connected             configured
43*03831d35Sstevel  *                          unconfigured
44*03831d35Sstevel  *              <-----------         <-------------
45*03831d35Sstevel  *                disconnect           unconfigure
46*03831d35Sstevel  *
47*03831d35Sstevel  * Firmware involvements
48*03831d35Sstevel  *
49*03831d35Sstevel  *              start_cpu(SC)
50*03831d35Sstevel  *      prom_serengeti_wakeupcpu(OBP)
51*03831d35Sstevel  *              ------------>         ------------------------->
52*03831d35Sstevel  * disconnected              connected                         configured
53*03831d35Sstevel  *                          unconfigured
54*03831d35Sstevel  *              <-----------          <-------------------------
55*03831d35Sstevel  *      prom_serengeti_cpu_off(OBP)  prom_serengeti_cpu_off(OBP)
56*03831d35Sstevel  *               stop_cpu(SC)        prom_serengeti_wakeupcpu(OBP)
57*03831d35Sstevel  *
58*03831d35Sstevel  * SIR (Software Initiated Reset) is used to unconfigure a CPU.
59*03831d35Sstevel  * After the CPU has completed flushing the caches, it issues an
60*03831d35Sstevel  * sir instruction to put itself through POST.  POST detects that
61*03831d35Sstevel  * it is an SIR, and re-enters OBP as a slave.  When the operation
62*03831d35Sstevel  * completes successfully, the CPU will be idling in OBP.
63*03831d35Sstevel  */
64*03831d35Sstevel 
65*03831d35Sstevel #include <sys/obpdefs.h>
66*03831d35Sstevel #include <sys/types.h>
67*03831d35Sstevel #include <sys/cmn_err.h>
68*03831d35Sstevel #include <sys/cpuvar.h>
69*03831d35Sstevel #include <sys/membar.h>
70*03831d35Sstevel #include <sys/x_call.h>
71*03831d35Sstevel #include <sys/machsystm.h>
72*03831d35Sstevel #include <sys/cpu_sgnblk_defs.h>
73*03831d35Sstevel #include <sys/pte.h>
74*03831d35Sstevel #include <vm/hat_sfmmu.h>
75*03831d35Sstevel #include <sys/promif.h>
76*03831d35Sstevel #include <sys/note.h>
77*03831d35Sstevel #include <sys/vmsystm.h>
78*03831d35Sstevel #include <vm/seg_kmem.h>
79*03831d35Sstevel 
80*03831d35Sstevel #include <sys/sbd_ioctl.h>
81*03831d35Sstevel #include <sys/sbd.h>
82*03831d35Sstevel #include <sys/sbdp_priv.h>
83*03831d35Sstevel #include <sys/sbdp_mem.h>
84*03831d35Sstevel #include <sys/sbdp_error.h>
85*03831d35Sstevel #include <sys/sgsbbc_iosram.h>
86*03831d35Sstevel #include <sys/prom_plat.h>
87*03831d35Sstevel #include <sys/cheetahregs.h>
88*03831d35Sstevel 
89*03831d35Sstevel uint64_t	*sbdp_valp;
90*03831d35Sstevel extern uint64_t	va_to_pa(void *);
91*03831d35Sstevel static int	sbdp_cpu_ntries = 50000;
92*03831d35Sstevel static int	sbdp_cpu_delay = 100;
93*03831d35Sstevel void		sbdp_get_cpu_sram_addr(uint64_t, uint64_t);
94*03831d35Sstevel static int	cpusram_map(caddr_t *, pgcnt_t *);
95*03831d35Sstevel static void	cpusram_unmap(caddr_t *, pgcnt_t);
96*03831d35Sstevel extern int	prom_serengeti_wakeupcpu(pnode_t);
97*03831d35Sstevel extern int	prom_serengeti_cpu_off(pnode_t);
98*03831d35Sstevel extern sbdp_wnode_t *sbdp_get_wnodep(int);
99*03831d35Sstevel extern caddr_t	sbdp_shutdown_va;
100*03831d35Sstevel static int	sbdp_prom_get_cpu(void *arg, int changed);
101*03831d35Sstevel 
102*03831d35Sstevel 
103*03831d35Sstevel int
104*03831d35Sstevel sbdp_disconnect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
105*03831d35Sstevel {
106*03831d35Sstevel 	pnode_t		nodeid;
107*03831d35Sstevel 	int		bd, wnode;
108*03831d35Sstevel 	sbdp_wnode_t	*wnodep;
109*03831d35Sstevel 	sbdp_bd_t	*bdp = NULL;
110*03831d35Sstevel 	int		rv = 0;
111*03831d35Sstevel 	processorid_t	cpu = cpuid;
112*03831d35Sstevel 	processorid_t	portid;
113*03831d35Sstevel 	static fn_t	f = "sbdp_disconnect_cpu";
114*03831d35Sstevel 
115*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
116*03831d35Sstevel 
117*03831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
118*03831d35Sstevel 
119*03831d35Sstevel 	/*
120*03831d35Sstevel 	 * Get board number and node number
121*03831d35Sstevel 	 * The check for determining if nodeid is valid is done inside
122*03831d35Sstevel 	 * sbdp_get_bd_and_wnode_num.
123*03831d35Sstevel 	 */
124*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
125*03831d35Sstevel 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
126*03831d35Sstevel 
127*03831d35Sstevel 		rv = -1;
128*03831d35Sstevel 		goto out;
129*03831d35Sstevel 	}
130*03831d35Sstevel 
131*03831d35Sstevel 	/*
132*03831d35Sstevel 	 * Grab the lock to prevent status threads from accessing
133*03831d35Sstevel 	 * registers on the CPU when it is being put into reset.
134*03831d35Sstevel 	 */
135*03831d35Sstevel 	wnodep = sbdp_get_wnodep(wnode);
136*03831d35Sstevel 	bdp = &wnodep->bds[bd];
137*03831d35Sstevel 	ASSERT(bdp);
138*03831d35Sstevel 	mutex_enter(&bdp->bd_mutex);
139*03831d35Sstevel 
140*03831d35Sstevel 	/*
141*03831d35Sstevel 	 * Mark the CPU in reset.  This should be done before calling
142*03831d35Sstevel 	 * the SC because we won't know at which stage it failed if
143*03831d35Sstevel 	 * the SC call returns failure.
144*03831d35Sstevel 	 */
145*03831d35Sstevel 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 1);
146*03831d35Sstevel 
147*03831d35Sstevel 	/*
148*03831d35Sstevel 	 * Ask OBP to mark the CPU as in POST
149*03831d35Sstevel 	 */
150*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 1) || prom_serengeti_cpu_off(nodeid) != 0) {
151*03831d35Sstevel 
152*03831d35Sstevel 		rv = -1;
153*03831d35Sstevel 		goto out;
154*03831d35Sstevel 	}
155*03831d35Sstevel 
156*03831d35Sstevel 	/*
157*03831d35Sstevel 	 * Ask the SC to put the CPU into reset. If the first
158*03831d35Sstevel 	 * core is not present, the stop CPU interface needs
159*03831d35Sstevel 	 * to be called with the portid rather than the cpuid.
160*03831d35Sstevel 	 */
161*03831d35Sstevel 	portid = SG_CPUID_TO_PORTID(cpuid);
162*03831d35Sstevel 	if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
163*03831d35Sstevel 		cpu = portid;
164*03831d35Sstevel 	}
165*03831d35Sstevel 
166*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 2) || sbdp_stop_cpu(cpu) != 0) {
167*03831d35Sstevel 
168*03831d35Sstevel 		rv = -1;
169*03831d35Sstevel 		goto out;
170*03831d35Sstevel 	}
171*03831d35Sstevel 
172*03831d35Sstevel out:
173*03831d35Sstevel 	if (bdp != NULL) {
174*03831d35Sstevel 		mutex_exit(&bdp->bd_mutex);
175*03831d35Sstevel 	}
176*03831d35Sstevel 
177*03831d35Sstevel 	if (rv != 0) {
178*03831d35Sstevel 		sbdp_set_err(hp->h_err, ESGT_STOPCPU, NULL);
179*03831d35Sstevel 	}
180*03831d35Sstevel 
181*03831d35Sstevel 	return (rv);
182*03831d35Sstevel }
183*03831d35Sstevel 
184*03831d35Sstevel int
185*03831d35Sstevel sbdp_connect_cpu(sbdp_handle_t *hp, dev_info_t *dip, processorid_t cpuid)
186*03831d35Sstevel {
187*03831d35Sstevel 	pnode_t		nodeid;
188*03831d35Sstevel 	sbd_error_t	*sep;
189*03831d35Sstevel 	int		i;
190*03831d35Sstevel 	int		bd, wnode;
191*03831d35Sstevel 	int		rv = 0;
192*03831d35Sstevel 	static fn_t	f = "sbdp_connect_cpu";
193*03831d35Sstevel 
194*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
195*03831d35Sstevel 
196*03831d35Sstevel 	sep = hp->h_err;
197*03831d35Sstevel 
198*03831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
199*03831d35Sstevel 
200*03831d35Sstevel 	/*
201*03831d35Sstevel 	 * The check for determining if nodeid is valid is done inside
202*03831d35Sstevel 	 * sbdp_get_bd_and_wnode_num.
203*03831d35Sstevel 	 */
204*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
205*03831d35Sstevel 	    sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) != 0) {
206*03831d35Sstevel 
207*03831d35Sstevel 		rv = -1;
208*03831d35Sstevel 		goto out;
209*03831d35Sstevel 	}
210*03831d35Sstevel 
211*03831d35Sstevel 	/*
212*03831d35Sstevel 	 * Ask the SC to bring the CPU out of reset.
213*03831d35Sstevel 	 * At this point, the sb_dev_present bit is not set for the CPU.
214*03831d35Sstevel 	 * From sbd point of view the CPU is not present yet.  No
215*03831d35Sstevel 	 * status threads will try to read registers off the CPU.
216*03831d35Sstevel 	 * Since we are already holding sb_mutex, it is not necessary
217*03831d35Sstevel 	 * to grab the board mutex when checking and setting the
218*03831d35Sstevel 	 * cpus_in_reset bit.
219*03831d35Sstevel 	 */
220*03831d35Sstevel 	if (sbdp_is_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid))) {
221*03831d35Sstevel 
222*03831d35Sstevel 		sbdp_wnode_t	*wnodep;
223*03831d35Sstevel 		sbdp_bd_t	*bdp = NULL;
224*03831d35Sstevel 		processorid_t	cpu = cpuid;
225*03831d35Sstevel 		processorid_t	portid;
226*03831d35Sstevel 
227*03831d35Sstevel 		wnodep = sbdp_get_wnodep(wnode);
228*03831d35Sstevel 		bdp = &wnodep->bds[bd];
229*03831d35Sstevel 		ASSERT(bdp);
230*03831d35Sstevel 
231*03831d35Sstevel 		/*
232*03831d35Sstevel 		 * If the first core is not present, the start CPU
233*03831d35Sstevel 		 * interface needs to be called with the portid rather
234*03831d35Sstevel 		 * than the cpuid.
235*03831d35Sstevel 		 */
236*03831d35Sstevel 		portid = SG_CPUID_TO_PORTID(cpuid);
237*03831d35Sstevel 		if (!SBDP_IS_CPU_PRESENT(bdp, SG_CPUID_TO_CPU_UNIT(portid))) {
238*03831d35Sstevel 			cpu = portid;
239*03831d35Sstevel 		}
240*03831d35Sstevel 
241*03831d35Sstevel 		if (SBDP_INJECT_ERROR(f, 1) || sbdp_start_cpu(cpu) != 0) {
242*03831d35Sstevel 
243*03831d35Sstevel 			rv = -1;
244*03831d35Sstevel 			goto out;
245*03831d35Sstevel 		}
246*03831d35Sstevel 
247*03831d35Sstevel 		if (SBDP_INJECT_ERROR(f, 2) ||
248*03831d35Sstevel 		    prom_serengeti_wakeupcpu(nodeid) != 0) {
249*03831d35Sstevel 
250*03831d35Sstevel 			rv = -1;
251*03831d35Sstevel 			goto out;
252*03831d35Sstevel 		}
253*03831d35Sstevel 	}
254*03831d35Sstevel 
255*03831d35Sstevel 	/*
256*03831d35Sstevel 	 * Mark the CPU out of reset.
257*03831d35Sstevel 	 */
258*03831d35Sstevel 	sbdp_cpu_in_reset(wnode, bd, SG_CPUID_TO_CPU_UNIT(cpuid), 0);
259*03831d35Sstevel 
260*03831d35Sstevel 	/*
261*03831d35Sstevel 	 * Refresh the bd info
262*03831d35Sstevel 	 * we need to wait until all cpus are out of reset
263*03831d35Sstevel 	 */
264*03831d35Sstevel 	for (i = 0; i < SG_MAX_CPUS_PER_BD; i++)
265*03831d35Sstevel 		if (sbdp_is_cpu_present(wnode, bd, i) &&
266*03831d35Sstevel 		    sbdp_is_cpu_in_reset(wnode, bd, i) == 1) {
267*03831d35Sstevel 			break;
268*03831d35Sstevel 		}
269*03831d35Sstevel 
270*03831d35Sstevel 	if (i == SG_MAX_CPUS_PER_BD) {
271*03831d35Sstevel 		/*
272*03831d35Sstevel 		 * All cpus are out of reset so it is safe to
273*03831d35Sstevel 		 * update the bd info
274*03831d35Sstevel 		 */
275*03831d35Sstevel 		sbdp_add_new_bd_info(wnode, bd);
276*03831d35Sstevel 	}
277*03831d35Sstevel 
278*03831d35Sstevel out:
279*03831d35Sstevel 	if (rv != 0)
280*03831d35Sstevel 		sbdp_set_err(sep, ESGT_WAKEUPCPU, NULL);
281*03831d35Sstevel 
282*03831d35Sstevel 	return (rv);
283*03831d35Sstevel }
284*03831d35Sstevel 
285*03831d35Sstevel int
286*03831d35Sstevel sbdp_cpu_poweron(struct cpu *cp)
287*03831d35Sstevel {
288*03831d35Sstevel 	int		cpuid;
289*03831d35Sstevel 	int		ntries;
290*03831d35Sstevel 	pnode_t		nodeid;
291*03831d35Sstevel 	extern void	restart_other_cpu(int);
292*03831d35Sstevel 	static fn_t	f = "sbdp_cpu_poweron";
293*03831d35Sstevel 
294*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
295*03831d35Sstevel 
296*03831d35Sstevel 	ASSERT(MUTEX_HELD(&cpu_lock));
297*03831d35Sstevel 
298*03831d35Sstevel 	ntries = sbdp_cpu_ntries;
299*03831d35Sstevel 	cpuid = cp->cpu_id;
300*03831d35Sstevel 
301*03831d35Sstevel 	nodeid = cpunodes[cpuid].nodeid;
302*03831d35Sstevel 	ASSERT(nodeid != (pnode_t)0);
303*03831d35Sstevel 
304*03831d35Sstevel 	/*
305*03831d35Sstevel 	 * This is a safe guard in case the CPU has taken a trap
306*03831d35Sstevel 	 * and idling in POST.
307*03831d35Sstevel 	 */
308*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
309*03831d35Sstevel 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
310*03831d35Sstevel 
311*03831d35Sstevel 		return (EBUSY);
312*03831d35Sstevel 	}
313*03831d35Sstevel 
314*03831d35Sstevel 	cp->cpu_flags &= ~CPU_POWEROFF;
315*03831d35Sstevel 
316*03831d35Sstevel 	/*
317*03831d35Sstevel 	 * NOTE: restart_other_cpu pauses cpus during the
318*03831d35Sstevel 	 *	slave cpu start.  This helps to quiesce the
319*03831d35Sstevel 	 *	bus traffic a bit which makes the tick sync
320*03831d35Sstevel 	 *	routine in the prom more robust.
321*03831d35Sstevel 	 */
322*03831d35Sstevel 	SBDP_DBG_CPU("%s: COLD START for cpu (%d)\n", f, cpuid);
323*03831d35Sstevel 
324*03831d35Sstevel 	restart_other_cpu(cpuid);
325*03831d35Sstevel 
326*03831d35Sstevel 	SBDP_DBG_CPU("after restarting other cpus\n");
327*03831d35Sstevel 
328*03831d35Sstevel 	/*
329*03831d35Sstevel 	 * Wait for the cpu to reach its idle thread before
330*03831d35Sstevel 	 * we zap him with a request to blow away the mappings
331*03831d35Sstevel 	 * he (might) have for the sbdp_shutdown_asm code
332*03831d35Sstevel 	 * he may have executed on unconfigure.
333*03831d35Sstevel 	 */
334*03831d35Sstevel 	while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) {
335*03831d35Sstevel 		DELAY(sbdp_cpu_delay);
336*03831d35Sstevel 		ntries--;
337*03831d35Sstevel 	}
338*03831d35Sstevel 
339*03831d35Sstevel 	SBDP_DBG_CPU("%s: waited %d out of %d loops for cpu %d\n",
340*03831d35Sstevel 	    f, sbdp_cpu_ntries - ntries, sbdp_cpu_ntries, cpuid);
341*03831d35Sstevel 
342*03831d35Sstevel 	return (0);
343*03831d35Sstevel }
344*03831d35Sstevel 
345*03831d35Sstevel 
346*03831d35Sstevel #define	SBDP_CPU_SRAM_ADDR	0x7fff0900000ull
347*03831d35Sstevel #define	SBDP_CPU_SRAM_SIZE	0x20000ull
348*03831d35Sstevel 
349*03831d35Sstevel static const char cpyren_key[] = "COPYREN";
350*03831d35Sstevel 
351*03831d35Sstevel static uint64_t bbsram_pa;
352*03831d35Sstevel static uint_t bbsram_size;
353*03831d35Sstevel 
354*03831d35Sstevel typedef struct {
355*03831d35Sstevel 	caddr_t		vaddr;
356*03831d35Sstevel 	pgcnt_t		npages;
357*03831d35Sstevel 	uint64_t	*pa;
358*03831d35Sstevel 	uint_t		*size;
359*03831d35Sstevel } sbdp_cpu_sram_map_t;
360*03831d35Sstevel 
361*03831d35Sstevel int
362*03831d35Sstevel sbdp_cpu_poweroff(struct cpu *cp)
363*03831d35Sstevel {
364*03831d35Sstevel 	processorid_t	cpuid;
365*03831d35Sstevel 	static void	sbdp_cpu_shutdown_self(void);
366*03831d35Sstevel 	pnode_t		nodeid;
367*03831d35Sstevel 	sbdp_cpu_sram_map_t	map;
368*03831d35Sstevel 	static fn_t	f = "sbdp_cpu_poweroff";
369*03831d35Sstevel 
370*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
371*03831d35Sstevel 
372*03831d35Sstevel 	ASSERT(MUTEX_HELD(&cpu_lock));
373*03831d35Sstevel 
374*03831d35Sstevel 	/*
375*03831d35Sstevel 	 * Capture all CPUs (except for detaching proc) to prevent
376*03831d35Sstevel 	 * crosscalls to the detaching proc until it has cleared its
377*03831d35Sstevel 	 * bit in cpu_ready_set.
378*03831d35Sstevel 	 */
379*03831d35Sstevel 	cpuid = cp->cpu_id;
380*03831d35Sstevel 
381*03831d35Sstevel 	nodeid = cpunodes[cpuid].nodeid;
382*03831d35Sstevel 	ASSERT(nodeid != (pnode_t)0);
383*03831d35Sstevel 
384*03831d35Sstevel 	*sbdp_valp = 0ull;
385*03831d35Sstevel 	/*
386*03831d35Sstevel 	 * Do the cpu sram mapping now.  This avoids problems with
387*03831d35Sstevel 	 * mutexes and high PILS
388*03831d35Sstevel 	 */
389*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 0) ||
390*03831d35Sstevel 	    cpusram_map(&map.vaddr, &map.npages) != DDI_SUCCESS) {
391*03831d35Sstevel 		return (EBUSY);
392*03831d35Sstevel 	}
393*03831d35Sstevel 
394*03831d35Sstevel 	map.pa = &bbsram_pa;
395*03831d35Sstevel 	map.size = &bbsram_size;
396*03831d35Sstevel 
397*03831d35Sstevel 	/*
398*03831d35Sstevel 	 * Do a cross call to the cpu so it obtains the base address
399*03831d35Sstevel 	 */
400*03831d35Sstevel 	xc_one(cpuid, sbdp_get_cpu_sram_addr, (uint64_t)&map,
401*03831d35Sstevel 	    (uint64_t)NULL);
402*03831d35Sstevel 
403*03831d35Sstevel 	cpusram_unmap(&map.vaddr, map.npages);
404*03831d35Sstevel 
405*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 1) || bbsram_size == 0) {
406*03831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: Key \"%s\" missing from CPU SRAM TOC",
407*03831d35Sstevel 		    cpuid, cpyren_key);
408*03831d35Sstevel 		return (EBUSY);
409*03831d35Sstevel 	}
410*03831d35Sstevel 
411*03831d35Sstevel 	if ((bbsram_pa & MMU_PAGEOFFSET) != 0) {
412*03831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" not page aligned, "
413*03831d35Sstevel 		    "offset = 0x%lx", cpuid, cpyren_key,
414*03831d35Sstevel 		    bbsram_pa - SBDP_CPU_SRAM_ADDR);
415*03831d35Sstevel 		return (EBUSY);
416*03831d35Sstevel 	}
417*03831d35Sstevel 
418*03831d35Sstevel 	if (bbsram_size < MMU_PAGESIZE) {
419*03831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU SRAM key \"%s\" too small, "
420*03831d35Sstevel 		    "size = 0x%x", cpuid, cpyren_key, bbsram_size);
421*03831d35Sstevel 		return (EBUSY);
422*03831d35Sstevel 	}
423*03831d35Sstevel 
424*03831d35Sstevel 	/*
425*03831d35Sstevel 	 * Capture all CPUs (except for detaching proc) to prevent
426*03831d35Sstevel 	 * crosscalls to the detaching proc until it has cleared its
427*03831d35Sstevel 	 * bit in cpu_ready_set.
428*03831d35Sstevel 	 *
429*03831d35Sstevel 	 * The CPU's remain paused and the prom_mutex is known to be free.
430*03831d35Sstevel 	 * This prevents the x-trap victim from blocking when doing prom
431*03831d35Sstevel 	 * IEEE-1275 calls at a high PIL level.
432*03831d35Sstevel 	 */
433*03831d35Sstevel 
434*03831d35Sstevel 	promsafe_pause_cpus();
435*03831d35Sstevel 
436*03831d35Sstevel 	/*
437*03831d35Sstevel 	 * Quiesce interrupts on the target CPU. We do this by setting
438*03831d35Sstevel 	 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to
439*03831d35Sstevel 	 * prevent it from receiving cross calls and cross traps.
440*03831d35Sstevel 	 * This prevents the processor from receiving any new soft interrupts.
441*03831d35Sstevel 	 */
442*03831d35Sstevel 
443*03831d35Sstevel 	mp_cpu_quiesce(cp);
444*03831d35Sstevel 
445*03831d35Sstevel 	/* tell the prom the cpu is going away */
446*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 2) || prom_serengeti_cpu_off(nodeid) != 0)
447*03831d35Sstevel 		return (EBUSY);
448*03831d35Sstevel 
449*03831d35Sstevel 	/*
450*03831d35Sstevel 	 * An sir instruction is issued at the end of the shutdown
451*03831d35Sstevel 	 * routine to make the CPU go through POST and re-enter OBP.
452*03831d35Sstevel 	 */
453*03831d35Sstevel 	xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall,
454*03831d35Sstevel 	    (uint64_t)sbdp_cpu_shutdown_self, 0);
455*03831d35Sstevel 
456*03831d35Sstevel 	*sbdp_valp = 3ull;
457*03831d35Sstevel 
458*03831d35Sstevel 	start_cpus();
459*03831d35Sstevel 
460*03831d35Sstevel 	/*
461*03831d35Sstevel 	 * Wait until we reach the OBP idle loop or time out.
462*03831d35Sstevel 	 * prom_serengeti_wakeupcpu waits for up to 60 seconds for the
463*03831d35Sstevel 	 * CPU to reach OBP idle loop.
464*03831d35Sstevel 	 */
465*03831d35Sstevel 	if (SBDP_INJECT_ERROR(f, 3) ||
466*03831d35Sstevel 	    prom_serengeti_wakeupcpu(nodeid) != 0) {
467*03831d35Sstevel 
468*03831d35Sstevel 		/*
469*03831d35Sstevel 		 * If it fails here, we still consider the unconfigure
470*03831d35Sstevel 		 * operation as successful.
471*03831d35Sstevel 		 */
472*03831d35Sstevel 		cmn_err(CE_WARN, "cpu%d: CPU failed to enter OBP idle loop.\n",
473*03831d35Sstevel 		    cpuid);
474*03831d35Sstevel 	}
475*03831d35Sstevel 
476*03831d35Sstevel 	ASSERT(!(CPU_IN_SET(cpu_ready_set, cpuid)));
477*03831d35Sstevel 
478*03831d35Sstevel 	bbsram_pa = 0;
479*03831d35Sstevel 	bbsram_size = 0;
480*03831d35Sstevel 
481*03831d35Sstevel 	return (0);
482*03831d35Sstevel }
483*03831d35Sstevel 
484*03831d35Sstevel processorid_t
485*03831d35Sstevel sbdp_get_cpuid(sbdp_handle_t *hp, dev_info_t *dip)
486*03831d35Sstevel {
487*03831d35Sstevel 	int		cpuid;
488*03831d35Sstevel 	char		type[OBP_MAXPROPNAME];
489*03831d35Sstevel 	pnode_t		nodeid;
490*03831d35Sstevel 	sbd_error_t	*sep;
491*03831d35Sstevel 	static fn_t	f = "sbdp_get_cpuid";
492*03831d35Sstevel 
493*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
494*03831d35Sstevel 
495*03831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
496*03831d35Sstevel 	if (sbdp_is_node_bad(nodeid))
497*03831d35Sstevel 		return (-1);
498*03831d35Sstevel 
499*03831d35Sstevel 	sep = hp->h_err;
500*03831d35Sstevel 
501*03831d35Sstevel 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
502*03831d35Sstevel 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
503*03831d35Sstevel 	else {
504*03831d35Sstevel 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
505*03831d35Sstevel 		return (-1);
506*03831d35Sstevel 	}
507*03831d35Sstevel 
508*03831d35Sstevel 	if (strcmp(type, "cpu") != 0) {
509*03831d35Sstevel 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
510*03831d35Sstevel 		return (-1);
511*03831d35Sstevel 	}
512*03831d35Sstevel 
513*03831d35Sstevel 	/*
514*03831d35Sstevel 	 * Check to see if property "cpuid" exists first.
515*03831d35Sstevel 	 * If not, check for "portid".
516*03831d35Sstevel 	 */
517*03831d35Sstevel 	if (prom_getprop(nodeid, "cpuid", (caddr_t)&cpuid) == -1)
518*03831d35Sstevel 		if (prom_getprop(nodeid, "portid", (caddr_t)&cpuid) == -1) {
519*03831d35Sstevel 
520*03831d35Sstevel 			return (-1);
521*03831d35Sstevel 	}
522*03831d35Sstevel 
523*03831d35Sstevel 	return ((processorid_t)cpuid & SG_CPU_ID_MASK);
524*03831d35Sstevel }
525*03831d35Sstevel 
526*03831d35Sstevel int
527*03831d35Sstevel sbdp_cpu_get_impl(sbdp_handle_t *hp, dev_info_t *dip)
528*03831d35Sstevel {
529*03831d35Sstevel 	int		impl;
530*03831d35Sstevel 	char		type[OBP_MAXPROPNAME];
531*03831d35Sstevel 	pnode_t		nodeid;
532*03831d35Sstevel 	sbd_error_t	*sep;
533*03831d35Sstevel 	static fn_t	f = "sbdp_cpu_get_impl";
534*03831d35Sstevel 
535*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
536*03831d35Sstevel 
537*03831d35Sstevel 	nodeid = ddi_get_nodeid(dip);
538*03831d35Sstevel 	if (sbdp_is_node_bad(nodeid))
539*03831d35Sstevel 		return (-1);
540*03831d35Sstevel 
541*03831d35Sstevel 	sep = hp->h_err;
542*03831d35Sstevel 
543*03831d35Sstevel 	if (prom_getproplen(nodeid, "device_type") < OBP_MAXPROPNAME)
544*03831d35Sstevel 		(void) prom_getprop(nodeid, "device_type", (caddr_t)type);
545*03831d35Sstevel 	else {
546*03831d35Sstevel 		sbdp_set_err(sep, ESGT_NO_DEV_TYPE, NULL);
547*03831d35Sstevel 		return (-1);
548*03831d35Sstevel 	}
549*03831d35Sstevel 
550*03831d35Sstevel 	if (strcmp(type, "cpu") != 0) {
551*03831d35Sstevel 		sbdp_set_err(sep, ESGT_NOT_CPUTYPE, NULL);
552*03831d35Sstevel 		return (-1);
553*03831d35Sstevel 	}
554*03831d35Sstevel 
555*03831d35Sstevel 	/*
556*03831d35Sstevel 	 * Get the implementation# property.
557*03831d35Sstevel 	 */
558*03831d35Sstevel 	if (prom_getprop(nodeid, "implementation#", (caddr_t)&impl) == -1)
559*03831d35Sstevel 		return (-1);
560*03831d35Sstevel 
561*03831d35Sstevel 	return (impl);
562*03831d35Sstevel }
563*03831d35Sstevel 
564*03831d35Sstevel struct sbdp_prom_get_node_args {
565*03831d35Sstevel 	pnode_t node;		/* current node */
566*03831d35Sstevel 	processorid_t portid;	/* portid we are looking for */
567*03831d35Sstevel 	pnode_t result_node;	/* node found with the above portid */
568*03831d35Sstevel };
569*03831d35Sstevel 
570*03831d35Sstevel pnode_t
571*03831d35Sstevel sbdp_find_nearby_cpu_by_portid(pnode_t nodeid, processorid_t portid)
572*03831d35Sstevel {
573*03831d35Sstevel 	struct sbdp_prom_get_node_args arg;
574*03831d35Sstevel 	static fn_t	f = "sbdp_find_nearby_cpu_by_portid";
575*03831d35Sstevel 
576*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
577*03831d35Sstevel 
578*03831d35Sstevel 	arg.node = nodeid;
579*03831d35Sstevel 	arg.portid = portid;
580*03831d35Sstevel 	(void) prom_tree_access(sbdp_prom_get_cpu, &arg, NULL);
581*03831d35Sstevel 
582*03831d35Sstevel 	return (arg.result_node);
583*03831d35Sstevel }
584*03831d35Sstevel 
585*03831d35Sstevel /*ARGSUSED*/
586*03831d35Sstevel static int
587*03831d35Sstevel sbdp_prom_get_cpu(void *arg, int changed)
588*03831d35Sstevel {
589*03831d35Sstevel 	int	portid;
590*03831d35Sstevel 	pnode_t	parent, cur_node;
591*03831d35Sstevel 	struct sbdp_prom_get_node_args *argp = arg;
592*03831d35Sstevel 	static fn_t	f = "sbdp_prom_get_cpu";
593*03831d35Sstevel 
594*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
595*03831d35Sstevel 
596*03831d35Sstevel 	parent = prom_parentnode(argp->node);
597*03831d35Sstevel 
598*03831d35Sstevel 	for (cur_node = prom_childnode(parent); cur_node != OBP_NONODE;
599*03831d35Sstevel 	    cur_node = prom_nextnode(cur_node)) {
600*03831d35Sstevel 
601*03831d35Sstevel 		if (prom_getprop(cur_node, OBP_PORTID, (caddr_t)&portid) < 0)
602*03831d35Sstevel 			continue;
603*03831d35Sstevel 
604*03831d35Sstevel 		if ((portid == argp->portid) && (cur_node != argp->node))
605*03831d35Sstevel 			break;
606*03831d35Sstevel 	}
607*03831d35Sstevel 
608*03831d35Sstevel 	argp->result_node = cur_node;
609*03831d35Sstevel 
610*03831d35Sstevel 	return (0);
611*03831d35Sstevel }
612*03831d35Sstevel 
613*03831d35Sstevel 
614*03831d35Sstevel /*
615*03831d35Sstevel  * A detaching CPU is xcalled with an xtrap to sbdp_cpu_stop_self() after
616*03831d35Sstevel  * it has been offlined. The function of this routine is to get the cpu
617*03831d35Sstevel  * spinning in a safe place. The requirement is that the system will not
618*03831d35Sstevel  * reference anything on the detaching board (memory and i/o is detached
619*03831d35Sstevel  * elsewhere) and that the CPU not reference anything on any other board
620*03831d35Sstevel  * in the system.  This isolation is required during and after the writes
621*03831d35Sstevel  * to the domain masks to remove the board from the domain.
622*03831d35Sstevel  *
623*03831d35Sstevel  * To accomplish this isolation the following is done:
624*03831d35Sstevel  *	0) Map the CPUSRAM to obtain the correct address in SRAM
625*03831d35Sstevel  *      1) Create a locked mapping to a location in CPU SRAM where
626*03831d35Sstevel  *      the cpu will execute.
627*03831d35Sstevel  *      2) Copy the target function (sbdp_shutdown_asm) in which
628*03831d35Sstevel  *      the cpu will execute into CPU SRAM.
629*03831d35Sstevel  *      3) Jump into function with CPU SRAM.
630*03831d35Sstevel  *      Function will:
631*03831d35Sstevel  *      3.1) Flush its Ecache (displacement).
632*03831d35Sstevel  *      3.2) Flush its Dcache with HW mechanism.
633*03831d35Sstevel  *      3.3) Flush its Icache with HW mechanism.
634*03831d35Sstevel  *      3.4) Flush all valid and _unlocked_ D-TLB entries.
635*03831d35Sstevel  *      3.5) Flush all valid and _unlocked_ I-TLB entries.
636*03831d35Sstevel  *      4) Jump into a tight loop.
637*03831d35Sstevel  */
638*03831d35Sstevel 
639*03831d35Sstevel static void
640*03831d35Sstevel sbdp_cpu_stop_self(uint64_t pa)
641*03831d35Sstevel {
642*03831d35Sstevel 	cpu_t		*cp = CPU;
643*03831d35Sstevel 	int		cpuid = cp->cpu_id;
644*03831d35Sstevel 	tte_t		tte;
645*03831d35Sstevel 	volatile uint_t	*src, *dst;
646*03831d35Sstevel 	uint_t		funclen;
647*03831d35Sstevel 	sbdp_shutdown_t	sht;
648*03831d35Sstevel 	uint_t		bbsram_pfn;
649*03831d35Sstevel 	uint64_t	bbsram_addr;
650*03831d35Sstevel 	void		(*bbsram_func)(sbdp_shutdown_t *);
651*03831d35Sstevel 	extern void	sbdp_shutdown_asm(sbdp_shutdown_t *);
652*03831d35Sstevel 	extern void	sbdp_shutdown_asm_end(void);
653*03831d35Sstevel 
654*03831d35Sstevel 	funclen = (uint_t)sbdp_shutdown_asm_end - (uint_t)sbdp_shutdown_asm;
655*03831d35Sstevel 	ASSERT(funclen <= MMU_PAGESIZE);
656*03831d35Sstevel 	ASSERT(bbsram_pa != 0);
657*03831d35Sstevel 	ASSERT((bbsram_pa & MMU_PAGEOFFSET) == 0);
658*03831d35Sstevel 	ASSERT(bbsram_size >= MMU_PAGESIZE);
659*03831d35Sstevel 
660*03831d35Sstevel 	stdphys(pa, 3);
661*03831d35Sstevel 	bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT);
662*03831d35Sstevel 
663*03831d35Sstevel 	bbsram_addr = (uint64_t)sbdp_shutdown_va;
664*03831d35Sstevel 	sht.estack = bbsram_addr + MMU_PAGESIZE;
665*03831d35Sstevel 	sht.flushaddr = ecache_flushaddr;
666*03831d35Sstevel 
667*03831d35Sstevel 	tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) |
668*03831d35Sstevel 	    TTE_PFN_INTHI(bbsram_pfn);
669*03831d35Sstevel 	tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) |
670*03831d35Sstevel 	    TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT;
671*03831d35Sstevel 	sfmmu_dtlb_ld(sbdp_shutdown_va, KCONTEXT, &tte); /* load dtlb */
672*03831d35Sstevel 	sfmmu_itlb_ld(sbdp_shutdown_va, KCONTEXT, &tte); /* load itlb */
673*03831d35Sstevel 
674*03831d35Sstevel 	for (src = (uint_t *)sbdp_shutdown_asm, dst = (uint_t *)bbsram_addr;
675*03831d35Sstevel 	    src < (uint_t *)sbdp_shutdown_asm_end; src++, dst++)
676*03831d35Sstevel 	*dst = *src;
677*03831d35Sstevel 
678*03831d35Sstevel 	bbsram_func = (void (*)())bbsram_addr;
679*03831d35Sstevel 	sht.size = (uint32_t)cpunodes[cpuid].ecache_size << 1;
680*03831d35Sstevel 	sht.linesize = (uint32_t)cpunodes[cpuid].ecache_linesize;
681*03831d35Sstevel 	sht.physaddr = pa;
682*03831d35Sstevel 
683*03831d35Sstevel 	/*
684*03831d35Sstevel 	 * Signal to sbdp_cpu_poweroff() that we're just
685*03831d35Sstevel 	 * about done.
686*03831d35Sstevel 	 */
687*03831d35Sstevel 	cp->cpu_m.in_prom = 1;
688*03831d35Sstevel 
689*03831d35Sstevel 	stdphys(pa, 4);
690*03831d35Sstevel 	(*bbsram_func)(&sht);
691*03831d35Sstevel }
692*03831d35Sstevel 
693*03831d35Sstevel /* ARGSUSED */
694*03831d35Sstevel void
695*03831d35Sstevel sbdp_get_cpu_sram_addr(uint64_t arg1, uint64_t arg2)
696*03831d35Sstevel {
697*03831d35Sstevel 	uint64_t	*pap;
698*03831d35Sstevel 	uint_t		*sizep;
699*03831d35Sstevel 	struct iosram_toc *tocp;
700*03831d35Sstevel 	uint_t		offset;
701*03831d35Sstevel 	uint_t		size;
702*03831d35Sstevel 	sbdp_cpu_sram_map_t *map;
703*03831d35Sstevel 	int		i;
704*03831d35Sstevel 	fn_t		f = "sbdp_get_cpu_sram_addr";
705*03831d35Sstevel 
706*03831d35Sstevel 	SBDP_DBG_FUNC("%s\n", f);
707*03831d35Sstevel 
708*03831d35Sstevel 	map = (sbdp_cpu_sram_map_t *)arg1;
709*03831d35Sstevel 	tocp = (struct iosram_toc *)map->vaddr;
710*03831d35Sstevel 	pap = map->pa;
711*03831d35Sstevel 	sizep = map->size;
712*03831d35Sstevel 
713*03831d35Sstevel 	for (i = 0; i < tocp->iosram_tagno; i++) {
714*03831d35Sstevel 		if (strcmp(tocp->iosram_keys[i].key, cpyren_key) == 0)
715*03831d35Sstevel 			break;
716*03831d35Sstevel 	}
717*03831d35Sstevel 	if (i == tocp->iosram_tagno) {
718*03831d35Sstevel 		*pap = 0;
719*03831d35Sstevel 		*sizep = 0;
720*03831d35Sstevel 		return;
721*03831d35Sstevel 	}
722*03831d35Sstevel 	offset = tocp->iosram_keys[i].offset;
723*03831d35Sstevel 	size = tocp->iosram_keys[i].size;
724*03831d35Sstevel 
725*03831d35Sstevel 	/*
726*03831d35Sstevel 	 * The address we want is the begining of cpusram + offset
727*03831d35Sstevel 	 */
728*03831d35Sstevel 	*pap = SBDP_CPU_SRAM_ADDR + offset;
729*03831d35Sstevel 
730*03831d35Sstevel 	*sizep = size;
731*03831d35Sstevel }
732*03831d35Sstevel 
733*03831d35Sstevel static int
734*03831d35Sstevel cpusram_map(caddr_t *vaddrp, pgcnt_t *npp)
735*03831d35Sstevel {
736*03831d35Sstevel 	uint_t		pgoffset;
737*03831d35Sstevel 	pgcnt_t		npages;
738*03831d35Sstevel 	pfn_t		pfn;
739*03831d35Sstevel 	uint64_t	base;
740*03831d35Sstevel 	caddr_t		kaddr;
741*03831d35Sstevel 	uint_t		mapping_attr;
742*03831d35Sstevel 
743*03831d35Sstevel 	base = (uint64_t)SBDP_CPU_SRAM_ADDR & (~MMU_PAGEOFFSET);
744*03831d35Sstevel 	pfn = mmu_btop(base);
745*03831d35Sstevel 
746*03831d35Sstevel 	/*
747*03831d35Sstevel 	 * Do a quick sanity check to make sure we are in I/O space.
748*03831d35Sstevel 	 */
749*03831d35Sstevel 	if (pf_is_memory(pfn))
750*03831d35Sstevel 		return (DDI_FAILURE);
751*03831d35Sstevel 
752*03831d35Sstevel 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
753*03831d35Sstevel 	npages = mmu_btopr(SBDP_CPU_SRAM_SIZE + pgoffset);
754*03831d35Sstevel 
755*03831d35Sstevel 	kaddr = vmem_alloc(heap_arena, ptob(npages), VM_NOSLEEP);
756*03831d35Sstevel 	if (kaddr == NULL)
757*03831d35Sstevel 		return (DDI_ME_NORESOURCES);
758*03831d35Sstevel 
759*03831d35Sstevel 	mapping_attr = PROT_READ;
760*03831d35Sstevel 	/*
761*03831d35Sstevel 	 * Now map in the pages we've allocated...
762*03831d35Sstevel 	 */
763*03831d35Sstevel 	hat_devload(kas.a_hat, kaddr, ptob(npages), pfn, mapping_attr,
764*03831d35Sstevel 	    HAT_LOAD_LOCK);
765*03831d35Sstevel 
766*03831d35Sstevel 	*vaddrp = kaddr + pgoffset;
767*03831d35Sstevel 	*npp = npages;
768*03831d35Sstevel 
769*03831d35Sstevel 	return (DDI_SUCCESS);
770*03831d35Sstevel }
771*03831d35Sstevel 
772*03831d35Sstevel static void
773*03831d35Sstevel cpusram_unmap(caddr_t *vaddrp, pgcnt_t npages)
774*03831d35Sstevel {
775*03831d35Sstevel 	uint_t  pgoffset;
776*03831d35Sstevel 	caddr_t base;
777*03831d35Sstevel 	caddr_t addr = *vaddrp;
778*03831d35Sstevel 
779*03831d35Sstevel 
780*03831d35Sstevel 	pgoffset = (ulong_t)SBDP_CPU_SRAM_ADDR & MMU_PAGEOFFSET;
781*03831d35Sstevel 	base = addr - pgoffset;
782*03831d35Sstevel 	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
783*03831d35Sstevel 	vmem_free(heap_arena, base, ptob(npages));
784*03831d35Sstevel 
785*03831d35Sstevel 	*vaddrp = 0;
786*03831d35Sstevel }
787*03831d35Sstevel 
788*03831d35Sstevel 
789*03831d35Sstevel static void
790*03831d35Sstevel sbdp_cpu_shutdown_self(void)
791*03831d35Sstevel {
792*03831d35Sstevel 	cpu_t		*cp = CPU;
793*03831d35Sstevel 	int		cpuid = cp->cpu_id;
794*03831d35Sstevel 	extern void	flush_windows(void);
795*03831d35Sstevel 	uint64_t	pa = va_to_pa((void *)sbdp_valp);
796*03831d35Sstevel 
797*03831d35Sstevel 	stdphys(pa, 8);
798*03831d35Sstevel 	flush_windows();
799*03831d35Sstevel 
800*03831d35Sstevel 	(void) spl8();
801*03831d35Sstevel 
802*03831d35Sstevel 	stdphys(pa, 6);
803*03831d35Sstevel 
804*03831d35Sstevel 	ASSERT(cp->cpu_intr_actv == 0);
805*03831d35Sstevel 	ASSERT(cp->cpu_thread == cp->cpu_idle_thread ||
806*03831d35Sstevel 	    cp->cpu_thread == cp->cpu_startup_thread);
807*03831d35Sstevel 
808*03831d35Sstevel 	cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF;
809*03831d35Sstevel 
810*03831d35Sstevel 	CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid);
811*03831d35Sstevel 
812*03831d35Sstevel 	stdphys(pa, 7);
813*03831d35Sstevel 	sbdp_cpu_stop_self(pa);
814*03831d35Sstevel 
815*03831d35Sstevel 	cmn_err(CE_PANIC, "sbdp_cpu_shutdown_self: CPU %d FAILED TO SHUTDOWN",
816*03831d35Sstevel 	    cpuid);
817*03831d35Sstevel }
818*03831d35Sstevel 
819*03831d35Sstevel typedef struct {
820*03831d35Sstevel 	int	node;
821*03831d35Sstevel 	int	board;
822*03831d35Sstevel 	int 	non_panther_cpus;
823*03831d35Sstevel } sbdp_node_walk_t;
824*03831d35Sstevel 
825*03831d35Sstevel static int
826*03831d35Sstevel sbdp_find_non_panther_cpus(dev_info_t *dip, void *node_args)
827*03831d35Sstevel {
828*03831d35Sstevel 	int	impl, cpuid, portid;
829*03831d35Sstevel 	int	buflen;
830*03831d35Sstevel 	char	buf[OBP_MAXPROPNAME];
831*03831d35Sstevel 	sbdp_node_walk_t *args = (sbdp_node_walk_t *)node_args;
832*03831d35Sstevel 
833*03831d35Sstevel 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
834*03831d35Sstevel 	    DDI_PROP_DONTPASS, OBP_DEVICETYPE, (caddr_t)buf,
835*03831d35Sstevel 	    &buflen) != DDI_PROP_SUCCESS) {
836*03831d35Sstevel 		return (DDI_WALK_CONTINUE);
837*03831d35Sstevel 	}
838*03831d35Sstevel 
839*03831d35Sstevel 	if (strcmp(buf, "cpu") != 0) {
840*03831d35Sstevel 		return (DDI_WALK_CONTINUE);
841*03831d35Sstevel 	}
842*03831d35Sstevel 
843*03831d35Sstevel 	if ((impl = ddi_getprop(DDI_DEV_T_ANY, dip,
844*03831d35Sstevel 	    DDI_PROP_DONTPASS, "implementation#", -1)) == -1) {
845*03831d35Sstevel 		return (DDI_WALK_CONTINUE);
846*03831d35Sstevel 	}
847*03831d35Sstevel 
848*03831d35Sstevel 	if ((cpuid = ddi_getprop(DDI_DEV_T_ANY, dip,
849*03831d35Sstevel 	    DDI_PROP_DONTPASS, "cpuid", -1)) == -1) {
850*03831d35Sstevel 		return (DDI_WALK_CONTINUE);
851*03831d35Sstevel 	}
852*03831d35Sstevel 
853*03831d35Sstevel 	portid = SG_CPUID_TO_PORTID(cpuid);
854*03831d35Sstevel 
855*03831d35Sstevel 	/* filter out nodes not on this board */
856*03831d35Sstevel 	if (SG_PORTID_TO_BOARD_NUM(portid) != args->board ||
857*03831d35Sstevel 	    SG_PORTID_TO_NODEID(portid) != args->node) {
858*03831d35Sstevel 		return (DDI_WALK_PRUNECHILD);
859*03831d35Sstevel 	}
860*03831d35Sstevel 
861*03831d35Sstevel 	switch (impl) {
862*03831d35Sstevel 	case CHEETAH_IMPL:
863*03831d35Sstevel 	case CHEETAH_PLUS_IMPL:
864*03831d35Sstevel 	case JAGUAR_IMPL:
865*03831d35Sstevel 		args->non_panther_cpus++;
866*03831d35Sstevel 		break;
867*03831d35Sstevel 	case PANTHER_IMPL:
868*03831d35Sstevel 		break;
869*03831d35Sstevel 	default:
870*03831d35Sstevel 		ASSERT(0);
871*03831d35Sstevel 		args->non_panther_cpus++;
872*03831d35Sstevel 		break;
873*03831d35Sstevel 	}
874*03831d35Sstevel 
875*03831d35Sstevel 	SBDP_DBG_CPU("cpuid=0x%x, portid=0x%x, impl=0x%x, device_type=%s",
876*03831d35Sstevel 	    cpuid, portid, impl, buf);
877*03831d35Sstevel 
878*03831d35Sstevel 	return (DDI_WALK_CONTINUE);
879*03831d35Sstevel }
880*03831d35Sstevel 
881*03831d35Sstevel int
882*03831d35Sstevel sbdp_board_non_panther_cpus(int node, int board)
883*03831d35Sstevel {
884*03831d35Sstevel 	sbdp_node_walk_t arg = {0};
885*03831d35Sstevel 
886*03831d35Sstevel 	arg.node = node;
887*03831d35Sstevel 	arg.board = board;
888*03831d35Sstevel 
889*03831d35Sstevel 	/*
890*03831d35Sstevel 	 * Root node doesn't have to be held.
891*03831d35Sstevel 	 */
892*03831d35Sstevel 	ddi_walk_devs(ddi_root_node(), sbdp_find_non_panther_cpus,
893*03831d35Sstevel 	    (void *)&arg);
894*03831d35Sstevel 
895*03831d35Sstevel 	return (arg.non_panther_cpus);
896*03831d35Sstevel }
897