xref: /illumos-gate/usr/src/uts/i86pc/io/dr/dr_cpu.c (revision 3fe80ca4)
1a3114836SGerry Liu /*
2a3114836SGerry Liu  * CDDL HEADER START
3a3114836SGerry Liu  *
4a3114836SGerry Liu  * The contents of this file are subject to the terms of the
5a3114836SGerry Liu  * Common Development and Distribution License (the "License").
6a3114836SGerry Liu  * You may not use this file except in compliance with the License.
7a3114836SGerry Liu  *
8a3114836SGerry Liu  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9a3114836SGerry Liu  * or http://www.opensolaris.org/os/licensing.
10a3114836SGerry Liu  * See the License for the specific language governing permissions
11a3114836SGerry Liu  * and limitations under the License.
12a3114836SGerry Liu  *
13a3114836SGerry Liu  * When distributing Covered Code, include this CDDL HEADER in each
14a3114836SGerry Liu  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15a3114836SGerry Liu  * If applicable, add the following below this CDDL HEADER, with the
16a3114836SGerry Liu  * fields enclosed by brackets "[]" replaced with your own identifying
17a3114836SGerry Liu  * information: Portions Copyright [yyyy] [name of copyright owner]
18a3114836SGerry Liu  *
19a3114836SGerry Liu  * CDDL HEADER END
20a3114836SGerry Liu  */
21a3114836SGerry Liu 
22a3114836SGerry Liu /*
23a3114836SGerry Liu  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24a3114836SGerry Liu  * Use is subject to license terms.
25a3114836SGerry Liu  */
26a3114836SGerry Liu /*
27a3114836SGerry Liu  * Copyright (c) 2010, Intel Corporation.
28a3114836SGerry Liu  * All rights reserved.
29a3114836SGerry Liu  */
30a3114836SGerry Liu 
31c3377ee9SJohn Levon /*
32c3377ee9SJohn Levon  * Copyright 2019 Joyent, Inc.
33*3fe80ca4SDan Cross  * Copyright 2023 Oxide Computer Company
34c3377ee9SJohn Levon  */
35c3377ee9SJohn Levon 
36a3114836SGerry Liu /*
37a3114836SGerry Liu  * CPU support routines for DR
38a3114836SGerry Liu  */
39a3114836SGerry Liu 
40a3114836SGerry Liu #include <sys/note.h>
41a3114836SGerry Liu #include <sys/debug.h>
42a3114836SGerry Liu #include <sys/types.h>
43a3114836SGerry Liu #include <sys/errno.h>
44a3114836SGerry Liu #include <sys/dditypes.h>
45a3114836SGerry Liu #include <sys/ddi.h>
46a3114836SGerry Liu #include <sys/sunddi.h>
47a3114836SGerry Liu #include <sys/sunndi.h>
48a3114836SGerry Liu #include <sys/ndi_impldefs.h>
49a3114836SGerry Liu #include <sys/kmem.h>
50a3114836SGerry Liu #include <sys/processor.h>
51a3114836SGerry Liu #include <sys/cpuvar.h>
52a3114836SGerry Liu #include <sys/promif.h>
53a3114836SGerry Liu #include <sys/sysmacros.h>
54a3114836SGerry Liu #include <sys/archsystm.h>
55a3114836SGerry Liu #include <sys/machsystm.h>
56a3114836SGerry Liu #include <sys/cpu_module.h>
57a3114836SGerry Liu #include <sys/cmn_err.h>
58a3114836SGerry Liu 
59a3114836SGerry Liu #include <sys/dr.h>
60a3114836SGerry Liu #include <sys/dr_util.h>
61a3114836SGerry Liu 
62a3114836SGerry Liu /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
63a3114836SGerry Liu static char *dr_ie_fmt = "dr_cpu.c %d";
64a3114836SGerry Liu 
65a3114836SGerry Liu int
dr_cpu_unit_is_sane(dr_board_t * bp,dr_cpu_unit_t * cp)66a3114836SGerry Liu dr_cpu_unit_is_sane(dr_board_t *bp, dr_cpu_unit_t *cp)
67a3114836SGerry Liu {
68a3114836SGerry Liu #ifdef DEBUG
69a3114836SGerry Liu 	ASSERT(cp->sbc_cm.sbdev_bp == bp);
70a3114836SGerry Liu 	ASSERT(cp->sbc_cm.sbdev_type == SBD_COMP_CPU);
71a3114836SGerry Liu #else
72a3114836SGerry Liu 	_NOTE(ARGUNUSED(bp))
73a3114836SGerry Liu 	_NOTE(ARGUNUSED(cp))
74a3114836SGerry Liu #endif
75a3114836SGerry Liu 
76a3114836SGerry Liu 	return (1);
77a3114836SGerry Liu }
78a3114836SGerry Liu 
79a3114836SGerry Liu static int
dr_errno2ecode(int error)80a3114836SGerry Liu dr_errno2ecode(int error)
81a3114836SGerry Liu {
82a3114836SGerry Liu 	int	rv;
83a3114836SGerry Liu 
84a3114836SGerry Liu 	switch (error) {
85a3114836SGerry Liu 	case EBUSY:
86a3114836SGerry Liu 		rv = ESBD_BUSY;
87a3114836SGerry Liu 		break;
88a3114836SGerry Liu 	case EINVAL:
89a3114836SGerry Liu 		rv = ESBD_INVAL;
90a3114836SGerry Liu 		break;
91a3114836SGerry Liu 	case EALREADY:
92a3114836SGerry Liu 		rv = ESBD_ALREADY;
93a3114836SGerry Liu 		break;
94a3114836SGerry Liu 	case ENODEV:
95a3114836SGerry Liu 		rv = ESBD_NODEV;
96a3114836SGerry Liu 		break;
97a3114836SGerry Liu 	case ENOMEM:
98a3114836SGerry Liu 		rv = ESBD_NOMEM;
99a3114836SGerry Liu 		break;
100a3114836SGerry Liu 	default:
101a3114836SGerry Liu 		rv = ESBD_INVAL;
102a3114836SGerry Liu 	}
103a3114836SGerry Liu 
104a3114836SGerry Liu 	return (rv);
105a3114836SGerry Liu }
106a3114836SGerry Liu 
107a3114836SGerry Liu /*
108a3114836SGerry Liu  * On x86, the "clock-frequency" and cache size device properties may be
109a3114836SGerry Liu  * unavailable before CPU starts. If they are unavailabe, just set them to zero.
110a3114836SGerry Liu  */
111a3114836SGerry Liu static void
dr_cpu_set_prop(dr_cpu_unit_t * cp)112a3114836SGerry Liu dr_cpu_set_prop(dr_cpu_unit_t *cp)
113a3114836SGerry Liu {
114a3114836SGerry Liu 	sbd_error_t	*err;
115a3114836SGerry Liu 	dev_info_t	*dip;
116a3114836SGerry Liu 	uint64_t	clock_freq;
117a3114836SGerry Liu 	int		ecache_size = 0;
118a3114836SGerry Liu 	char		*cache_str = NULL;
119a3114836SGerry Liu 
120a3114836SGerry Liu 	err = drmach_get_dip(cp->sbc_cm.sbdev_id, &dip);
121a3114836SGerry Liu 	if (err) {
122a3114836SGerry Liu 		DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
123a3114836SGerry Liu 		return;
124a3114836SGerry Liu 	}
125a3114836SGerry Liu 
126a3114836SGerry Liu 	if (dip == NULL) {
127a3114836SGerry Liu 		DR_DEV_INTERNAL_ERROR(&cp->sbc_cm);
128a3114836SGerry Liu 		return;
129a3114836SGerry Liu 	}
130a3114836SGerry Liu 
131a3114836SGerry Liu 	/* read in the CPU speed */
132a3114836SGerry Liu 	clock_freq = (unsigned int)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
133a3114836SGerry Liu 	    DDI_PROP_DONTPASS, "clock-frequency", 0);
134a3114836SGerry Liu 
135a3114836SGerry Liu 	/*
136a3114836SGerry Liu 	 * The ecache property string is not the same
137a3114836SGerry Liu 	 * for all CPU implementations.
138a3114836SGerry Liu 	 */
139a3114836SGerry Liu 	switch (cp->sbc_cpu_impl) {
140a3114836SGerry Liu 	case X86_CPU_IMPL_NEHALEM_EX:
141a3114836SGerry Liu 		cache_str = "l3-cache-size";
142a3114836SGerry Liu 		break;
143a3114836SGerry Liu 	default:
144a3114836SGerry Liu 		cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
145a3114836SGerry Liu 		    cp->sbc_cpu_impl);
146a3114836SGerry Liu 		break;
147a3114836SGerry Liu 	}
148a3114836SGerry Liu 
149a3114836SGerry Liu 	if (cache_str != NULL) {
150a3114836SGerry Liu 		/* read in the ecache size */
151a3114836SGerry Liu 		/*
152a3114836SGerry Liu 		 * If the property is not found in the CPU node,
153a3114836SGerry Liu 		 * it has to be kept in the core or cmp node so
154a3114836SGerry Liu 		 * we just keep looking.
155a3114836SGerry Liu 		 */
156a3114836SGerry Liu 
157a3114836SGerry Liu 		ecache_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
158a3114836SGerry Liu 		    cache_str, 0);
159a3114836SGerry Liu 	}
160a3114836SGerry Liu 
161a3114836SGerry Liu 	/* convert to the proper units */
162a3114836SGerry Liu 	cp->sbc_speed = (clock_freq + 500000) / 1000000;
163a3114836SGerry Liu 	cp->sbc_ecache = ecache_size / (1024 * 1024);
164a3114836SGerry Liu }
165a3114836SGerry Liu 
166a3114836SGerry Liu void
dr_init_cpu_unit(dr_cpu_unit_t * cp)167a3114836SGerry Liu dr_init_cpu_unit(dr_cpu_unit_t *cp)
168a3114836SGerry Liu {
169a3114836SGerry Liu 	sbd_error_t	*err;
170a3114836SGerry Liu 	dr_state_t	new_state;
171a3114836SGerry Liu 	int		cpuid;
172a3114836SGerry Liu 	int		impl;
173a3114836SGerry Liu 
174a3114836SGerry Liu 	if (DR_DEV_IS_ATTACHED(&cp->sbc_cm)) {
175a3114836SGerry Liu 		new_state = DR_STATE_CONFIGURED;
176a3114836SGerry Liu 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
177a3114836SGerry Liu 	} else if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
178a3114836SGerry Liu 		new_state = DR_STATE_CONNECTED;
179a3114836SGerry Liu 		cp->sbc_cm.sbdev_cond = SBD_COND_OK;
180a3114836SGerry Liu 	} else {
181a3114836SGerry Liu 		new_state = DR_STATE_EMPTY;
182a3114836SGerry Liu 		cp->sbc_cm.sbdev_cond = SBD_COND_UNKNOWN;
183a3114836SGerry Liu 	}
184a3114836SGerry Liu 
185a3114836SGerry Liu 	if (DR_DEV_IS_PRESENT(&cp->sbc_cm)) {
186a3114836SGerry Liu 		err = drmach_cpu_get_id(cp->sbc_cm.sbdev_id, &cpuid);
187a3114836SGerry Liu 		if (err) {
188a3114836SGerry Liu 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
189a3114836SGerry Liu 			new_state = DR_STATE_FATAL;
190a3114836SGerry Liu 			goto done;
191a3114836SGerry Liu 		}
192a3114836SGerry Liu 
193a3114836SGerry Liu 		err = drmach_cpu_get_impl(cp->sbc_cm.sbdev_id, &impl);
194a3114836SGerry Liu 		if (err) {
195a3114836SGerry Liu 			DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
196a3114836SGerry Liu 			new_state = DR_STATE_FATAL;
197a3114836SGerry Liu 			goto done;
198a3114836SGerry Liu 		}
199a3114836SGerry Liu 	} else {
200a3114836SGerry Liu 		cp->sbc_cpu_id = -1;
201a3114836SGerry Liu 		cp->sbc_cpu_impl = -1;
202a3114836SGerry Liu 		goto done;
203a3114836SGerry Liu 	}
204a3114836SGerry Liu 
205a3114836SGerry Liu 	cp->sbc_cpu_id = cpuid;
206a3114836SGerry Liu 	cp->sbc_cpu_impl = impl;
207a3114836SGerry Liu 
208a3114836SGerry Liu 	/* if true at init time, it must always be true */
209a3114836SGerry Liu 	ASSERT(dr_cpu_unit_is_sane(cp->sbc_cm.sbdev_bp, cp));
210a3114836SGerry Liu 
211a3114836SGerry Liu 	mutex_enter(&cpu_lock);
212a3114836SGerry Liu 	if ((cpuid >= 0) && cpu[cpuid])
213a3114836SGerry Liu 		cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
214a3114836SGerry Liu 	else
215a3114836SGerry Liu 		cp->sbc_cpu_flags = P_OFFLINE | P_POWEROFF;
216a3114836SGerry Liu 	mutex_exit(&cpu_lock);
217a3114836SGerry Liu 
218a3114836SGerry Liu 	dr_cpu_set_prop(cp);
219a3114836SGerry Liu 
220a3114836SGerry Liu done:
221a3114836SGerry Liu 	/* delay transition until fully initialized */
222a3114836SGerry Liu 	dr_device_transition(&cp->sbc_cm, new_state);
223a3114836SGerry Liu }
224a3114836SGerry Liu 
225a3114836SGerry Liu int
dr_pre_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)226a3114836SGerry Liu dr_pre_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
227a3114836SGerry Liu {
228a3114836SGerry Liu 	int		i;
229a3114836SGerry Liu 	static fn_t	f = "dr_pre_attach_cpu";
230a3114836SGerry Liu 
231a3114836SGerry Liu 	PR_CPU("%s...\n", f);
232a3114836SGerry Liu 
233a3114836SGerry Liu 	for (i = 0; i < devnum; i++) {
234a3114836SGerry Liu 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
235a3114836SGerry Liu 
236a3114836SGerry Liu 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
237a3114836SGerry Liu 
238a3114836SGerry Liu 		/*
239a3114836SGerry Liu 		 * Print a console message for each attachment
240a3114836SGerry Liu 		 * point. For CMP devices, this means that only
241a3114836SGerry Liu 		 * one message should be printed, no matter how
242a3114836SGerry Liu 		 * many cores are actually present.
243a3114836SGerry Liu 		 */
244a3114836SGerry Liu 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
245a3114836SGerry Liu 			cmn_err(CE_CONT, "OS configure %s",
246a3114836SGerry Liu 			    up->sbc_cm.sbdev_path);
247a3114836SGerry Liu 		}
248a3114836SGerry Liu 	}
249a3114836SGerry Liu 
250a3114836SGerry Liu 	/*
251a3114836SGerry Liu 	 * Block out status threads while creating
252a3114836SGerry Liu 	 * devinfo tree branches
253a3114836SGerry Liu 	 */
254a3114836SGerry Liu 	dr_lock_status(hp->h_bd);
255*3fe80ca4SDan Cross 	ndi_devi_enter(ddi_root_node());
256a3114836SGerry Liu 	mutex_enter(&cpu_lock);
257a3114836SGerry Liu 
258a3114836SGerry Liu 	return (0);
259a3114836SGerry Liu }
260a3114836SGerry Liu 
261a3114836SGerry Liu /*ARGSUSED*/
262a3114836SGerry Liu void
dr_attach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)263a3114836SGerry Liu dr_attach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
264a3114836SGerry Liu {
265a3114836SGerry Liu 	sbd_error_t	*err;
266a3114836SGerry Liu 	processorid_t	 cpuid;
267a3114836SGerry Liu 	int		 rv;
268a3114836SGerry Liu 
269a3114836SGerry Liu 	ASSERT(MUTEX_HELD(&cpu_lock));
270a3114836SGerry Liu 
271a3114836SGerry Liu 	err = drmach_configure(cp->sbdev_id, 0);
272a3114836SGerry Liu 	if (err) {
273a3114836SGerry Liu 		DRERR_SET_C(&cp->sbdev_error, &err);
274a3114836SGerry Liu 		return;
275a3114836SGerry Liu 	}
276a3114836SGerry Liu 
277a3114836SGerry Liu 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
278a3114836SGerry Liu 	if (err) {
279a3114836SGerry Liu 		DRERR_SET_C(&cp->sbdev_error, &err);
280a3114836SGerry Liu 
281a3114836SGerry Liu 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
282a3114836SGerry Liu 		if (err)
283a3114836SGerry Liu 			sbd_err_clear(&err);
284a3114836SGerry Liu 	} else if ((rv = cpu_configure(cpuid)) != 0) {
285a3114836SGerry Liu 		dr_dev_err(CE_WARN, cp, dr_errno2ecode(rv));
286a3114836SGerry Liu 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
287a3114836SGerry Liu 		if (err)
288a3114836SGerry Liu 			sbd_err_clear(&err);
289a3114836SGerry Liu 	} else {
290a3114836SGerry Liu 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)cp;
291a3114836SGerry Liu 		up->sbc_cpu_id = cpuid;
292a3114836SGerry Liu 	}
293a3114836SGerry Liu }
294a3114836SGerry Liu 
295a3114836SGerry Liu /*
296a3114836SGerry Liu  * dr_post_attach_cpu
297a3114836SGerry Liu  *
298a3114836SGerry Liu  * sbd error policy: Does not stop on error.  Processes all units in list.
299a3114836SGerry Liu  */
300a3114836SGerry Liu int
dr_post_attach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)301a3114836SGerry Liu dr_post_attach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
302a3114836SGerry Liu {
303a3114836SGerry Liu 	int		i;
304a3114836SGerry Liu 	int		errflag = 0;
305a3114836SGerry Liu 	static fn_t	f = "dr_post_attach_cpu";
306a3114836SGerry Liu 
307a3114836SGerry Liu 	PR_CPU("%s...\n", f);
308a3114836SGerry Liu 
309a3114836SGerry Liu 	/* Startup and online newly-attached CPUs */
310a3114836SGerry Liu 	for (i = 0; i < devnum; i++) {
311a3114836SGerry Liu 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
312a3114836SGerry Liu 		struct cpu	*cp;
313a3114836SGerry Liu 
314a3114836SGerry Liu 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
315a3114836SGerry Liu 
316a3114836SGerry Liu 		cp = cpu_get(up->sbc_cpu_id);
317a3114836SGerry Liu 		if (cp == NULL) {
318a3114836SGerry Liu 			cmn_err(CE_WARN, "%s: cpu_get failed for cpu %d",
319a3114836SGerry Liu 			    f, up->sbc_cpu_id);
320a3114836SGerry Liu 			continue;
321a3114836SGerry Liu 		}
322a3114836SGerry Liu 
323a3114836SGerry Liu 		if (cpu_is_poweredoff(cp)) {
324a3114836SGerry Liu 			if (cpu_poweron(cp) != 0) {
325a3114836SGerry Liu 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTART);
326a3114836SGerry Liu 				errflag = 1;
327a3114836SGerry Liu 			}
328a3114836SGerry Liu 			PR_CPU("%s: cpu %d powered ON\n", f, up->sbc_cpu_id);
329a3114836SGerry Liu 		}
330a3114836SGerry Liu 
331a3114836SGerry Liu 		if (cpu_is_offline(cp)) {
332a3114836SGerry Liu 			PR_CPU("%s: onlining cpu %d...\n", f, up->sbc_cpu_id);
333a3114836SGerry Liu 
334c3377ee9SJohn Levon 			if (cpu_online(cp, 0) != 0) {
335a3114836SGerry Liu 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_ONLINE);
336a3114836SGerry Liu 				errflag = 1;
337a3114836SGerry Liu 			}
338a3114836SGerry Liu 		}
339a3114836SGerry Liu 
340a3114836SGerry Liu 	}
341a3114836SGerry Liu 
342a3114836SGerry Liu 	mutex_exit(&cpu_lock);
343*3fe80ca4SDan Cross 	ndi_devi_exit(ddi_root_node());
344a3114836SGerry Liu 	dr_unlock_status(hp->h_bd);
345a3114836SGerry Liu 
346a3114836SGerry Liu 	if (errflag)
347a3114836SGerry Liu 		return (-1);
348a3114836SGerry Liu 	else
349a3114836SGerry Liu 		return (0);
350a3114836SGerry Liu }
351a3114836SGerry Liu 
352a3114836SGerry Liu /*
353a3114836SGerry Liu  * dr_pre_release_cpu
354a3114836SGerry Liu  *
355a3114836SGerry Liu  * sbd error policy: Stops on first error.
356a3114836SGerry Liu  */
357a3114836SGerry Liu int
dr_pre_release_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)358a3114836SGerry Liu dr_pre_release_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
359a3114836SGerry Liu {
360a3114836SGerry Liu 	int		c, cix, i, lastoffline = -1, rv = 0;
361a3114836SGerry Liu 	processorid_t	cpuid;
362a3114836SGerry Liu 	struct cpu	*cp;
363a3114836SGerry Liu 	dr_cpu_unit_t	*up;
364a3114836SGerry Liu 	dr_devset_t	devset;
365a3114836SGerry Liu 	sbd_dev_stat_t	*ds;
366a3114836SGerry Liu 	static fn_t	f = "dr_pre_release_cpu";
367a3114836SGerry Liu 	int		cpu_flags = 0;
368a3114836SGerry Liu 
369a3114836SGerry Liu 	devset = DR_DEVS_PRESENT(hp->h_bd);
370a3114836SGerry Liu 
371a3114836SGerry Liu 	/* allocate status struct storage. */
372a3114836SGerry Liu 	ds = (sbd_dev_stat_t *) kmem_zalloc(sizeof (sbd_dev_stat_t) *
373a3114836SGerry Liu 	    MAX_CPU_UNITS_PER_BOARD, KM_SLEEP);
374a3114836SGerry Liu 
375a3114836SGerry Liu 	cix = dr_cpu_status(hp, devset, ds);
376a3114836SGerry Liu 
377a3114836SGerry Liu 	mutex_enter(&cpu_lock);
378a3114836SGerry Liu 
379a3114836SGerry Liu 	for (i = 0; i < devnum; i++) {
380a3114836SGerry Liu 		up = (dr_cpu_unit_t *)devlist[i];
381a3114836SGerry Liu 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
382a3114836SGerry Liu 			continue;
383a3114836SGerry Liu 		}
384a3114836SGerry Liu 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
385a3114836SGerry Liu 
386a3114836SGerry Liu 		/*
387a3114836SGerry Liu 		 * On x86 systems, some CPUs can't be unconfigured.
388a3114836SGerry Liu 		 * For example, CPU0 can't be unconfigured because many other
389a3114836SGerry Liu 		 * components have a dependency on it.
390a3114836SGerry Liu 		 * This check determines if a CPU is currently in use and
391a3114836SGerry Liu 		 * returns a "Device busy" error if so.
392a3114836SGerry Liu 		 */
393a3114836SGerry Liu 		for (c = 0; c < cix; c++) {
394a3114836SGerry Liu 			if (ds[c].d_cpu.cs_unit == up->sbc_cm.sbdev_unum) {
395a3114836SGerry Liu 				if (ds[c].d_cpu.cs_busy) {
396a3114836SGerry Liu 					dr_dev_err(CE_WARN, &up->sbc_cm,
397a3114836SGerry Liu 					    ESBD_BUSY);
398a3114836SGerry Liu 					rv = -1;
399a3114836SGerry Liu 					break;
400a3114836SGerry Liu 				}
401a3114836SGerry Liu 			}
402a3114836SGerry Liu 		}
403a3114836SGerry Liu 		if (c < cix)
404a3114836SGerry Liu 			break;
405a3114836SGerry Liu 
406a3114836SGerry Liu 		cpuid = up->sbc_cpu_id;
407a3114836SGerry Liu 		if ((cp = cpu_get(cpuid)) == NULL) {
408a3114836SGerry Liu 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
409a3114836SGerry Liu 			rv = -1;
410a3114836SGerry Liu 			break;
411a3114836SGerry Liu 		}
412a3114836SGerry Liu 
413a3114836SGerry Liu 		/* used by dr_cancel_cpu during error flow */
414a3114836SGerry Liu 		up->sbc_cpu_flags = cp->cpu_flags;
415a3114836SGerry Liu 
416a3114836SGerry Liu 		if (CPU_ACTIVE(cp)) {
417a3114836SGerry Liu 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
418a3114836SGerry Liu 				cpu_flags = CPU_FORCED;
419a3114836SGerry Liu 
420a3114836SGerry Liu 			PR_CPU("%s: offlining cpu %d\n", f, cpuid);
421a3114836SGerry Liu 			if (cpu_offline(cp, cpu_flags)) {
422a3114836SGerry Liu 				PR_CPU("%s: failed to offline cpu %d\n", f,
423a3114836SGerry Liu 				    cpuid);
424a3114836SGerry Liu 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
425a3114836SGerry Liu 				if (disp_bound_threads(cp, 0)) {
426a3114836SGerry Liu 					cmn_err(CE_WARN, "%s: thread(s) bound "
427a3114836SGerry Liu 					    "to cpu %d", f, cp->cpu_id);
428a3114836SGerry Liu 				}
429a3114836SGerry Liu 				rv = -1;
430a3114836SGerry Liu 				break;
431a3114836SGerry Liu 			} else
432a3114836SGerry Liu 				lastoffline = i;
433a3114836SGerry Liu 		}
434a3114836SGerry Liu 
435a3114836SGerry Liu 		if (!rv) {
436a3114836SGerry Liu 			sbd_error_t *err;
437a3114836SGerry Liu 
438a3114836SGerry Liu 			err = drmach_release(up->sbc_cm.sbdev_id);
439a3114836SGerry Liu 			if (err) {
440a3114836SGerry Liu 				DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
441a3114836SGerry Liu 				rv = -1;
442a3114836SGerry Liu 				break;
443a3114836SGerry Liu 			}
444a3114836SGerry Liu 		}
445a3114836SGerry Liu 	}
446a3114836SGerry Liu 
447a3114836SGerry Liu 	mutex_exit(&cpu_lock);
448a3114836SGerry Liu 
449a3114836SGerry Liu 	if (rv) {
450a3114836SGerry Liu 		/*
451a3114836SGerry Liu 		 * Need to unwind others since at this level (pre-release)
452a3114836SGerry Liu 		 * the device state has not yet transitioned and failures
453a3114836SGerry Liu 		 * will prevent us from reaching the "post" release
454a3114836SGerry Liu 		 * function where states are normally transitioned.
455a3114836SGerry Liu 		 */
456a3114836SGerry Liu 		for (i = lastoffline; i >= 0; i--) {
457a3114836SGerry Liu 			up = (dr_cpu_unit_t *)devlist[i];
458a3114836SGerry Liu 			(void) dr_cancel_cpu(up);
459a3114836SGerry Liu 		}
460a3114836SGerry Liu 	}
461a3114836SGerry Liu 
462a3114836SGerry Liu 	kmem_free(ds, sizeof (sbd_dev_stat_t) * MAX_CPU_UNITS_PER_BOARD);
463a3114836SGerry Liu 	return (rv);
464a3114836SGerry Liu }
465a3114836SGerry Liu 
466a3114836SGerry Liu /*
467a3114836SGerry Liu  * dr_pre_detach_cpu
468a3114836SGerry Liu  *
469a3114836SGerry Liu  * sbd error policy: Stops on first error.
470a3114836SGerry Liu  */
471a3114836SGerry Liu int
dr_pre_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)472a3114836SGerry Liu dr_pre_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
473a3114836SGerry Liu {
474a3114836SGerry Liu 	_NOTE(ARGUNUSED(hp))
475a3114836SGerry Liu 
476a3114836SGerry Liu 	int		i;
477a3114836SGerry Liu 	int		cpu_flags = 0;
478a3114836SGerry Liu 	static fn_t	f = "dr_pre_detach_cpu";
479a3114836SGerry Liu 
480a3114836SGerry Liu 	PR_CPU("%s...\n", f);
481a3114836SGerry Liu 
482a3114836SGerry Liu 	/*
483a3114836SGerry Liu 	 * Block out status threads while destroying devinfo tree
484a3114836SGerry Liu 	 * branches
485a3114836SGerry Liu 	 */
486a3114836SGerry Liu 	dr_lock_status(hp->h_bd);
487a3114836SGerry Liu 	mutex_enter(&cpu_lock);
488a3114836SGerry Liu 
489a3114836SGerry Liu 	for (i = 0; i < devnum; i++) {
490a3114836SGerry Liu 		dr_cpu_unit_t *up = (dr_cpu_unit_t *)devlist[i];
491a3114836SGerry Liu 		struct cpu	*cp;
492a3114836SGerry Liu 
493a3114836SGerry Liu 		if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
494a3114836SGerry Liu 			continue;
495a3114836SGerry Liu 		}
496a3114836SGerry Liu 
497a3114836SGerry Liu 		ASSERT(dr_cpu_unit_is_sane(hp->h_bd, up));
498a3114836SGerry Liu 
499a3114836SGerry Liu 		cp = cpu_get(up->sbc_cpu_id);
500a3114836SGerry Liu 		if (cp == NULL)
501a3114836SGerry Liu 			continue;
502a3114836SGerry Liu 
503a3114836SGerry Liu 		/*
504a3114836SGerry Liu 		 * Print a console message for each attachment
505a3114836SGerry Liu 		 * point. For CMP devices, this means that only
506a3114836SGerry Liu 		 * one message should be printed, no matter how
507a3114836SGerry Liu 		 * many cores are actually present.
508a3114836SGerry Liu 		 */
509a3114836SGerry Liu 		if ((up->sbc_cm.sbdev_unum % MAX_CORES_PER_CMP) == 0) {
510a3114836SGerry Liu 			cmn_err(CE_CONT, "OS unconfigure %s\n",
511a3114836SGerry Liu 			    up->sbc_cm.sbdev_path);
512a3114836SGerry Liu 		}
513a3114836SGerry Liu 
514a3114836SGerry Liu 		/*
515a3114836SGerry Liu 		 * CPUs were offlined during Release.
516a3114836SGerry Liu 		 */
517a3114836SGerry Liu 		if (cpu_is_poweredoff(cp)) {
518a3114836SGerry Liu 			PR_CPU("%s: cpu %d already powered OFF\n",
519a3114836SGerry Liu 			    f, up->sbc_cpu_id);
520a3114836SGerry Liu 			continue;
521a3114836SGerry Liu 		}
522a3114836SGerry Liu 
523a3114836SGerry Liu 		if (!cpu_is_offline(cp)) {
524a3114836SGerry Liu 			if (dr_cmd_flags(hp) & SBD_FLAG_FORCE)
525a3114836SGerry Liu 				cpu_flags = CPU_FORCED;
526a3114836SGerry Liu 			/* cpu was onlined after release.  Offline it again */
527a3114836SGerry Liu 			PR_CPU("%s: offlining cpu %d\n", f, up->sbc_cpu_id);
528a3114836SGerry Liu 			if (cpu_offline(cp, cpu_flags)) {
529a3114836SGerry Liu 				PR_CPU("%s: failed to offline cpu %d\n",
530a3114836SGerry Liu 				    f, up->sbc_cpu_id);
531a3114836SGerry Liu 				dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_OFFLINE);
532a3114836SGerry Liu 				if (disp_bound_threads(cp, 0)) {
533a3114836SGerry Liu 					cmn_err(CE_WARN, "%s: thread(s) bound "
534a3114836SGerry Liu 					    "to cpu %d", f, cp->cpu_id);
535a3114836SGerry Liu 				}
536a3114836SGerry Liu 				goto err;
537a3114836SGerry Liu 			}
538a3114836SGerry Liu 		}
539a3114836SGerry Liu 		if (cpu_poweroff(cp) != 0) {
540a3114836SGerry Liu 			dr_dev_err(CE_WARN, &up->sbc_cm, ESBD_CPUSTOP);
541a3114836SGerry Liu 			goto err;
542a3114836SGerry Liu 		} else {
543a3114836SGerry Liu 			PR_CPU("%s: cpu %d powered OFF\n", f, up->sbc_cpu_id);
544a3114836SGerry Liu 		}
545a3114836SGerry Liu 	}
546a3114836SGerry Liu 
547a3114836SGerry Liu 	return (0);
548a3114836SGerry Liu 
549a3114836SGerry Liu err:
550a3114836SGerry Liu 	mutex_exit(&cpu_lock);
551a3114836SGerry Liu 	dr_unlock_status(hp->h_bd);
552a3114836SGerry Liu 	return (-1);
553a3114836SGerry Liu }
554a3114836SGerry Liu 
555a3114836SGerry Liu /*ARGSUSED*/
556a3114836SGerry Liu void
dr_detach_cpu(dr_handle_t * hp,dr_common_unit_t * cp)557a3114836SGerry Liu dr_detach_cpu(dr_handle_t *hp, dr_common_unit_t *cp)
558a3114836SGerry Liu {
559a3114836SGerry Liu 	sbd_error_t	*err;
560a3114836SGerry Liu 	processorid_t	 cpuid;
561a3114836SGerry Liu 	int		 rv;
562a3114836SGerry Liu 	dr_cpu_unit_t	*up = (dr_cpu_unit_t *)cp;
563a3114836SGerry Liu 
564a3114836SGerry Liu 	ASSERT(MUTEX_HELD(&cpu_lock));
565a3114836SGerry Liu 
566a3114836SGerry Liu 	if (!DR_DEV_IS_ATTACHED(&up->sbc_cm)) {
567a3114836SGerry Liu 		return;
568a3114836SGerry Liu 	}
569a3114836SGerry Liu 
570a3114836SGerry Liu 	err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
571a3114836SGerry Liu 	if (err) {
572a3114836SGerry Liu 		DRERR_SET_C(&cp->sbdev_error, &err);
573a3114836SGerry Liu 	} else if ((rv = cpu_unconfigure(cpuid)) != 0) {
574a3114836SGerry Liu 		dr_dev_err(CE_IGNORE, cp, dr_errno2ecode(rv));
575a3114836SGerry Liu 	} else {
576a3114836SGerry Liu 		err = drmach_unconfigure(cp->sbdev_id, DEVI_BRANCH_DESTROY);
577a3114836SGerry Liu 		if (err) {
578a3114836SGerry Liu 			DRERR_SET_C(&cp->sbdev_error, &err);
579a3114836SGerry Liu 		} else {
580a3114836SGerry Liu 			up->sbc_cpu_id = -1;
581a3114836SGerry Liu 		}
582a3114836SGerry Liu 	}
583a3114836SGerry Liu }
584a3114836SGerry Liu 
585a3114836SGerry Liu /*ARGSUSED1*/
586a3114836SGerry Liu int
dr_post_detach_cpu(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)587a3114836SGerry Liu dr_post_detach_cpu(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
588a3114836SGerry Liu {
589a3114836SGerry Liu 	static fn_t	f = "dr_post_detach_cpu";
590a3114836SGerry Liu 
591a3114836SGerry Liu 	PR_CPU("%s...\n", f);
592a3114836SGerry Liu 
593a3114836SGerry Liu 	mutex_exit(&cpu_lock);
594a3114836SGerry Liu 	dr_unlock_status(hp->h_bd);
595a3114836SGerry Liu 
596a3114836SGerry Liu 	return (0);
597a3114836SGerry Liu }
598a3114836SGerry Liu 
599a3114836SGerry Liu static void
dr_fill_cpu_stat(dr_cpu_unit_t * cp,drmach_status_t * pstat,sbd_cpu_stat_t * csp)600a3114836SGerry Liu dr_fill_cpu_stat(dr_cpu_unit_t *cp, drmach_status_t *pstat, sbd_cpu_stat_t *csp)
601a3114836SGerry Liu {
602a3114836SGerry Liu 	ASSERT(cp && pstat && csp);
603a3114836SGerry Liu 
604a3114836SGerry Liu 	/* Fill in the common status information */
605a3114836SGerry Liu 	bzero((caddr_t)csp, sizeof (*csp));
606a3114836SGerry Liu 	csp->cs_type = cp->sbc_cm.sbdev_type;
607a3114836SGerry Liu 	csp->cs_unit = cp->sbc_cm.sbdev_unum;
608a3114836SGerry Liu 	(void) strlcpy(csp->cs_name, pstat->type, sizeof (csp->cs_name));
609a3114836SGerry Liu 	csp->cs_cond = cp->sbc_cm.sbdev_cond;
610a3114836SGerry Liu 	csp->cs_busy = cp->sbc_cm.sbdev_busy | pstat->busy;
611a3114836SGerry Liu 	csp->cs_time = cp->sbc_cm.sbdev_time;
612a3114836SGerry Liu 	csp->cs_ostate = cp->sbc_cm.sbdev_ostate;
613a3114836SGerry Liu 	csp->cs_suspend = 0;
614a3114836SGerry Liu 
615a3114836SGerry Liu 	/* CPU specific status data */
616a3114836SGerry Liu 	csp->cs_cpuid = cp->sbc_cpu_id;
617a3114836SGerry Liu 
618a3114836SGerry Liu 	/*
619a3114836SGerry Liu 	 * If the speed and ecache properties have not been
620a3114836SGerry Liu 	 * cached yet, read them in from the device tree.
621a3114836SGerry Liu 	 */
622a3114836SGerry Liu 	if ((cp->sbc_speed == 0) || (cp->sbc_ecache == 0))
623a3114836SGerry Liu 		dr_cpu_set_prop(cp);
624a3114836SGerry Liu 
625a3114836SGerry Liu 	/* use the cached speed and ecache values */
626a3114836SGerry Liu 	csp->cs_speed = cp->sbc_speed;
627a3114836SGerry Liu 	csp->cs_ecache = cp->sbc_ecache;
628a3114836SGerry Liu 
629a3114836SGerry Liu 	mutex_enter(&cpu_lock);
630a3114836SGerry Liu 	if (!cpu_get(csp->cs_cpuid)) {
631a3114836SGerry Liu 		/* ostate must be UNCONFIGURED */
632a3114836SGerry Liu 		csp->cs_cm.c_ostate = SBD_STAT_UNCONFIGURED;
633a3114836SGerry Liu 	}
634a3114836SGerry Liu 	mutex_exit(&cpu_lock);
635a3114836SGerry Liu }
636a3114836SGerry Liu 
637a3114836SGerry Liu /*ARGSUSED2*/
638a3114836SGerry Liu static void
dr_fill_cmp_stat(sbd_cpu_stat_t * csp,int ncores,int impl,sbd_cmp_stat_t * psp)639a3114836SGerry Liu dr_fill_cmp_stat(sbd_cpu_stat_t *csp, int ncores, int impl, sbd_cmp_stat_t *psp)
640a3114836SGerry Liu {
641a3114836SGerry Liu 	int	core;
642a3114836SGerry Liu 
643a3114836SGerry Liu 	ASSERT(csp && psp && (ncores >= 1));
644a3114836SGerry Liu 
645a3114836SGerry Liu 	bzero((caddr_t)psp, sizeof (*psp));
646a3114836SGerry Liu 
647a3114836SGerry Liu 	/*
648a3114836SGerry Liu 	 * Fill in the common status information based
649a3114836SGerry Liu 	 * on the data for the first core.
650a3114836SGerry Liu 	 */
651a3114836SGerry Liu 	psp->ps_type = SBD_COMP_CMP;
652a3114836SGerry Liu 	psp->ps_unit = DR_UNUM2SBD_UNUM(csp->cs_unit, SBD_COMP_CMP);
653a3114836SGerry Liu 	(void) strlcpy(psp->ps_name, csp->cs_name, sizeof (psp->ps_name));
654a3114836SGerry Liu 	psp->ps_cond = csp->cs_cond;
655a3114836SGerry Liu 	psp->ps_busy = csp->cs_busy;
656a3114836SGerry Liu 	psp->ps_time = csp->cs_time;
657a3114836SGerry Liu 	psp->ps_ostate = csp->cs_ostate;
658a3114836SGerry Liu 	psp->ps_suspend = csp->cs_suspend;
659a3114836SGerry Liu 
660a3114836SGerry Liu 	/* CMP specific status data */
661a3114836SGerry Liu 	*psp->ps_cpuid = csp->cs_cpuid;
662a3114836SGerry Liu 	psp->ps_ncores = 1;
663a3114836SGerry Liu 	psp->ps_speed = csp->cs_speed;
664a3114836SGerry Liu 	psp->ps_ecache = csp->cs_ecache;
665a3114836SGerry Liu 
666a3114836SGerry Liu 	/*
667a3114836SGerry Liu 	 * Walk through the data for the remaining cores.
668a3114836SGerry Liu 	 * Make any adjustments to the common status data,
669a3114836SGerry Liu 	 * or the shared CMP specific data if necessary.
670a3114836SGerry Liu 	 */
671a3114836SGerry Liu 	for (core = 1; core < ncores; core++) {
672a3114836SGerry Liu 		/*
673a3114836SGerry Liu 		 * The following properties should be the same
674a3114836SGerry Liu 		 * for all the cores of the CMP.
675a3114836SGerry Liu 		 */
676a3114836SGerry Liu 		ASSERT(psp->ps_unit == DR_UNUM2SBD_UNUM(csp[core].cs_unit,
677a3114836SGerry Liu 		    SBD_COMP_CMP));
678a3114836SGerry Liu 
679a3114836SGerry Liu 		if (csp[core].cs_speed > psp->ps_speed)
680a3114836SGerry Liu 			psp->ps_speed = csp[core].cs_speed;
681a3114836SGerry Liu 		if (csp[core].cs_ecache > psp->ps_ecache)
682a3114836SGerry Liu 			psp->ps_ecache = csp[core].cs_ecache;
683a3114836SGerry Liu 
684a3114836SGerry Liu 		psp->ps_cpuid[core] = csp[core].cs_cpuid;
685a3114836SGerry Liu 		psp->ps_ncores++;
686a3114836SGerry Liu 
687a3114836SGerry Liu 		/* adjust time if necessary */
688a3114836SGerry Liu 		if (csp[core].cs_time > psp->ps_time) {
689a3114836SGerry Liu 			psp->ps_time = csp[core].cs_time;
690a3114836SGerry Liu 		}
691a3114836SGerry Liu 
692a3114836SGerry Liu 		psp->ps_busy |= csp[core].cs_busy;
693a3114836SGerry Liu 
694a3114836SGerry Liu 		/*
695a3114836SGerry Liu 		 * If any of the cores are configured, the
696a3114836SGerry Liu 		 * entire CMP is marked as configured.
697a3114836SGerry Liu 		 */
698a3114836SGerry Liu 		if (csp[core].cs_ostate == SBD_STAT_CONFIGURED) {
699a3114836SGerry Liu 			psp->ps_ostate = csp[core].cs_ostate;
700a3114836SGerry Liu 		}
701a3114836SGerry Liu 	}
702a3114836SGerry Liu }
703a3114836SGerry Liu 
704a3114836SGerry Liu int
dr_cpu_status(dr_handle_t * hp,dr_devset_t devset,sbd_dev_stat_t * dsp)705a3114836SGerry Liu dr_cpu_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp)
706a3114836SGerry Liu {
707a3114836SGerry Liu 	int		cmp;
708a3114836SGerry Liu 	int		core;
709a3114836SGerry Liu 	int		ncpu;
710a3114836SGerry Liu 	dr_board_t	*bp;
711a3114836SGerry Liu 	sbd_cpu_stat_t	*cstat;
712a3114836SGerry Liu 	int		impl;
713a3114836SGerry Liu 
714a3114836SGerry Liu 	bp = hp->h_bd;
715a3114836SGerry Liu 	ncpu = 0;
716a3114836SGerry Liu 
717a3114836SGerry Liu 	devset &= DR_DEVS_PRESENT(bp);
718a3114836SGerry Liu 	cstat = kmem_zalloc(sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP,
719a3114836SGerry Liu 	    KM_SLEEP);
720a3114836SGerry Liu 
721a3114836SGerry Liu 	/*
722a3114836SGerry Liu 	 * Treat every CPU as a CMP. In the case where the
723a3114836SGerry Liu 	 * device is not a CMP, treat it as a CMP with only
724a3114836SGerry Liu 	 * one core.
725a3114836SGerry Liu 	 */
726a3114836SGerry Liu 	for (cmp = 0; cmp < MAX_CMP_UNITS_PER_BOARD; cmp++) {
727a3114836SGerry Liu 		int		ncores;
728a3114836SGerry Liu 		dr_cpu_unit_t	*cp;
729a3114836SGerry Liu 		drmach_status_t	pstat;
730a3114836SGerry Liu 		sbd_error_t	*err;
731a3114836SGerry Liu 		sbd_cmp_stat_t	*psp;
732a3114836SGerry Liu 
733a3114836SGerry Liu 		if ((devset & DEVSET(SBD_COMP_CMP, cmp)) == 0) {
734a3114836SGerry Liu 			continue;
735a3114836SGerry Liu 		}
736a3114836SGerry Liu 
737a3114836SGerry Liu 		ncores = 0;
738a3114836SGerry Liu 
739a3114836SGerry Liu 		for (core = 0; core < MAX_CORES_PER_CMP; core++) {
740a3114836SGerry Liu 
741a3114836SGerry Liu 			cp = dr_get_cpu_unit(bp, DR_CMP_CORE_UNUM(cmp, core));
742a3114836SGerry Liu 
743a3114836SGerry Liu 			if (cp->sbc_cm.sbdev_state == DR_STATE_EMPTY) {
744a3114836SGerry Liu 				/* present, but not fully initialized */
745a3114836SGerry Liu 				continue;
746a3114836SGerry Liu 			}
747a3114836SGerry Liu 
748a3114836SGerry Liu 			ASSERT(dr_cpu_unit_is_sane(hp->h_bd, cp));
749a3114836SGerry Liu 
750a3114836SGerry Liu 			/* skip if not present */
751a3114836SGerry Liu 			if (cp->sbc_cm.sbdev_id == (drmachid_t)0) {
752a3114836SGerry Liu 				continue;
753a3114836SGerry Liu 			}
754a3114836SGerry Liu 
755a3114836SGerry Liu 			/* fetch platform status */
756a3114836SGerry Liu 			err = drmach_status(cp->sbc_cm.sbdev_id, &pstat);
757a3114836SGerry Liu 			if (err) {
758a3114836SGerry Liu 				DRERR_SET_C(&cp->sbc_cm.sbdev_error, &err);
759a3114836SGerry Liu 				continue;
760a3114836SGerry Liu 			}
761a3114836SGerry Liu 
762a3114836SGerry Liu 			dr_fill_cpu_stat(cp, &pstat, &cstat[ncores++]);
763a3114836SGerry Liu 			/*
764a3114836SGerry Liu 			 * We should set impl here because the last core
765a3114836SGerry Liu 			 * found might be EMPTY or not present.
766a3114836SGerry Liu 			 */
767a3114836SGerry Liu 			impl = cp->sbc_cpu_impl;
768a3114836SGerry Liu 		}
769a3114836SGerry Liu 
770a3114836SGerry Liu 		if (ncores == 0) {
771a3114836SGerry Liu 			continue;
772a3114836SGerry Liu 		}
773a3114836SGerry Liu 
774a3114836SGerry Liu 		/*
775a3114836SGerry Liu 		 * Store the data to the outgoing array. If the
776a3114836SGerry Liu 		 * device is a CMP, combine all the data for the
777a3114836SGerry Liu 		 * cores into a single stat structure.
778a3114836SGerry Liu 		 *
779a3114836SGerry Liu 		 * The check for a CMP device uses the last core
780a3114836SGerry Liu 		 * found, assuming that all cores will have the
781a3114836SGerry Liu 		 * same implementation.
782a3114836SGerry Liu 		 */
783a3114836SGerry Liu 		if (CPU_IMPL_IS_CMP(impl)) {
784a3114836SGerry Liu 			psp = (sbd_cmp_stat_t *)dsp;
785a3114836SGerry Liu 			dr_fill_cmp_stat(cstat, ncores, impl, psp);
786a3114836SGerry Liu 		} else {
787a3114836SGerry Liu 			ASSERT(ncores == 1);
788a3114836SGerry Liu 			bcopy(cstat, dsp, sizeof (sbd_cpu_stat_t));
789a3114836SGerry Liu 		}
790a3114836SGerry Liu 
791a3114836SGerry Liu 		dsp++;
792a3114836SGerry Liu 		ncpu++;
793a3114836SGerry Liu 	}
794a3114836SGerry Liu 
795a3114836SGerry Liu 	kmem_free(cstat, sizeof (sbd_cpu_stat_t) * MAX_CORES_PER_CMP);
796a3114836SGerry Liu 
797a3114836SGerry Liu 	return (ncpu);
798a3114836SGerry Liu }
799a3114836SGerry Liu 
800a3114836SGerry Liu /*
801a3114836SGerry Liu  * Cancel previous release operation for cpu.
802a3114836SGerry Liu  * For cpus this means simply bringing cpus that
803a3114836SGerry Liu  * were offline back online.  Note that they had
804a3114836SGerry Liu  * to have been online at the time there were
805a3114836SGerry Liu  * released.
806a3114836SGerry Liu  */
807a3114836SGerry Liu int
dr_cancel_cpu(dr_cpu_unit_t * up)808a3114836SGerry Liu dr_cancel_cpu(dr_cpu_unit_t *up)
809a3114836SGerry Liu {
810a3114836SGerry Liu 	int		rv = 0;
811a3114836SGerry Liu 	static fn_t	f = "dr_cancel_cpu";
812a3114836SGerry Liu 
813a3114836SGerry Liu 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
814a3114836SGerry Liu 
815a3114836SGerry Liu 	if (cpu_flagged_active(up->sbc_cpu_flags)) {
816a3114836SGerry Liu 		struct cpu	*cp;
817a3114836SGerry Liu 
818a3114836SGerry Liu 		/*
819a3114836SGerry Liu 		 * CPU had been online, go ahead
820a3114836SGerry Liu 		 * bring it back online.
821a3114836SGerry Liu 		 */
822a3114836SGerry Liu 		PR_CPU("%s: bringing cpu %d back ONLINE\n", f, up->sbc_cpu_id);
823a3114836SGerry Liu 
824a3114836SGerry Liu 		mutex_enter(&cpu_lock);
825a3114836SGerry Liu 		cp = cpu[up->sbc_cpu_id];
826a3114836SGerry Liu 
827a3114836SGerry Liu 		if (cpu_is_poweredoff(cp)) {
828a3114836SGerry Liu 			if (cpu_poweron(cp)) {
829a3114836SGerry Liu 				cmn_err(CE_WARN, "%s: failed to power-on "
830a3114836SGerry Liu 				    "cpu %d", f, up->sbc_cpu_id);
831a3114836SGerry Liu 				rv = -1;
832a3114836SGerry Liu 			}
833a3114836SGerry Liu 		}
834a3114836SGerry Liu 
835a3114836SGerry Liu 		if (rv == 0 && cpu_is_offline(cp)) {
836c3377ee9SJohn Levon 			if (cpu_online(cp, 0)) {
837a3114836SGerry Liu 				cmn_err(CE_WARN, "%s: failed to online cpu %d",
838a3114836SGerry Liu 				    f, up->sbc_cpu_id);
839a3114836SGerry Liu 				rv = -1;
840a3114836SGerry Liu 			}
841a3114836SGerry Liu 		}
842a3114836SGerry Liu 
843a3114836SGerry Liu 		if (rv == 0 && cpu_is_online(cp)) {
844a3114836SGerry Liu 			if (cpu_flagged_nointr(up->sbc_cpu_flags)) {
845a3114836SGerry Liu 				if (cpu_intr_disable(cp) != 0) {
846a3114836SGerry Liu 					cmn_err(CE_WARN, "%s: failed to "
847a3114836SGerry Liu 					    "disable interrupts on cpu %d", f,
848a3114836SGerry Liu 					    up->sbc_cpu_id);
849a3114836SGerry Liu 				}
850a3114836SGerry Liu 			}
851a3114836SGerry Liu 		}
852a3114836SGerry Liu 
853a3114836SGerry Liu 		mutex_exit(&cpu_lock);
854a3114836SGerry Liu 	}
855a3114836SGerry Liu 
856a3114836SGerry Liu 	return (rv);
857a3114836SGerry Liu }
858a3114836SGerry Liu 
859a3114836SGerry Liu int
dr_disconnect_cpu(dr_cpu_unit_t * up)860a3114836SGerry Liu dr_disconnect_cpu(dr_cpu_unit_t *up)
861a3114836SGerry Liu {
862a3114836SGerry Liu 	sbd_error_t	*err;
863a3114836SGerry Liu 	static fn_t	f = "dr_disconnect_cpu";
864a3114836SGerry Liu 
865a3114836SGerry Liu 	PR_CPU("%s...\n", f);
866a3114836SGerry Liu 
867a3114836SGerry Liu 	ASSERT((up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) ||
868a3114836SGerry Liu 	    (up->sbc_cm.sbdev_state == DR_STATE_UNCONFIGURED));
869a3114836SGerry Liu 
870a3114836SGerry Liu 	ASSERT(dr_cpu_unit_is_sane(up->sbc_cm.sbdev_bp, up));
871a3114836SGerry Liu 
872a3114836SGerry Liu 	if (up->sbc_cm.sbdev_state == DR_STATE_CONNECTED) {
873a3114836SGerry Liu 		/*
874a3114836SGerry Liu 		 * Cpus were never brought in and so are still
875a3114836SGerry Liu 		 * effectively disconnected, so nothing to do here.
876a3114836SGerry Liu 		 */
877a3114836SGerry Liu 		PR_CPU("%s: cpu %d never brought in\n", f, up->sbc_cpu_id);
878a3114836SGerry Liu 		return (0);
879a3114836SGerry Liu 	}
880a3114836SGerry Liu 
881a3114836SGerry Liu 	err = drmach_cpu_disconnect(up->sbc_cm.sbdev_id);
882a3114836SGerry Liu 	if (err == NULL)
883a3114836SGerry Liu 		return (0);
884a3114836SGerry Liu 	else {
885a3114836SGerry Liu 		DRERR_SET_C(&up->sbc_cm.sbdev_error, &err);
886a3114836SGerry Liu 		return (-1);
887a3114836SGerry Liu 	}
888a3114836SGerry Liu 	/*NOTREACHED*/
889a3114836SGerry Liu }
890