xref: /illumos-gate/usr/src/uts/sun4u/ngdr/io/dr.c (revision c3937c08)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * PIM-DR layer of DR driver.  Provides interface between user
29  * level applications and the PSM-DR layer.
30  */
31 
32 #include <sys/note.h>
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/cred.h>
37 #include <sys/dditypes.h>
38 #include <sys/devops.h>
39 #include <sys/modctl.h>
40 #include <sys/poll.h>
41 #include <sys/conf.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/stat.h>
46 #include <sys/kmem.h>
47 #include <sys/processor.h>
48 #include <sys/cpuvar.h>
49 #include <sys/mem_config.h>
50 
51 #include <sys/autoconf.h>
52 #include <sys/cmn_err.h>
53 
54 #include <sys/ddi_impldefs.h>
55 #include <sys/promif.h>
56 #include <sys/machsystm.h>
57 
58 #include <sys/dr.h>
59 #include <sys/drmach.h>
60 #include <sys/dr_util.h>
61 
62 extern int		 nulldev();
63 extern int		 nodev();
64 extern struct memlist	*phys_install;
65 
66 #ifdef DEBUG
67 uint_t	dr_debug = 0;			/* dr.h for bit values */
68 #endif /* DEBUG */
69 
70 /*
71  * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
72  * kernel.  They are, however, referenced during both debug and non-debug
73  * compiles.
74  */
75 
76 static char *state_str[] = {
77 	"EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
78 	"PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
79 	"FATAL"
80 };
81 
82 #define	SBD_CMD_STR(c) \
83 	(((c) == SBD_CMD_ASSIGN)	? "ASSIGN"	: \
84 	((c) == SBD_CMD_UNASSIGN)	? "UNASSIGN"	: \
85 	((c) == SBD_CMD_POWERON)	? "POWERON"	: \
86 	((c) == SBD_CMD_POWEROFF)	? "POWEROFF"	: \
87 	((c) == SBD_CMD_TEST)		? "TEST"	: \
88 	((c) == SBD_CMD_CONNECT)	? "CONNECT"	: \
89 	((c) == SBD_CMD_DISCONNECT)	? "DISCONNECT"	: \
90 	((c) == SBD_CMD_CONFIGURE)	? "CONFIGURE"	: \
91 	((c) == SBD_CMD_UNCONFIGURE)	? "UNCONFIGURE"	: \
92 	((c) == SBD_CMD_GETNCM)		? "GETNCM"	: \
93 	((c) == SBD_CMD_PASSTHRU)	? "PASSTHRU"	: \
94 	((c) == SBD_CMD_STATUS)		? "STATUS"	: "unknown")
95 
96 #define	DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[NIX(ut)][un]))
97 
98 #define	DR_MAKE_MINOR(i, b)	(((i) << 16) | (b))
99 #define	DR_MINOR2INST(m)	(((m) >> 16) & 0xffff)
100 #define	DR_MINOR2BNUM(m)	((m) & 0xffff)
101 
102 /* for the DR*INTERNAL_ERROR macros.  see sys/dr.h. */
103 static char *dr_ie_fmt = "dr.c %d";
104 
105 /* struct for drmach device name to sbd_comp_type_t mapping */
106 typedef	struct {
107 	char		*s_devtype;
108 	sbd_comp_type_t	s_nodetype;
109 } dr_devname_t;
110 
111 /* struct to map starfire device attributes - name:sbd_comp_type_t */
112 static	dr_devname_t	dr_devattr[] = {
113 	{ DRMACH_DEVTYPE_MEM,	SBD_COMP_MEM },
114 	{ DRMACH_DEVTYPE_CPU,	SBD_COMP_CPU },
115 	{ DRMACH_DEVTYPE_PCI,	SBD_COMP_IO },
116 #if defined(DRMACH_DEVTYPE_SBUS)
117 	{ DRMACH_DEVTYPE_SBUS,	SBD_COMP_IO },
118 #endif
119 #if defined(DRMACH_DEVTYPE_WCI)
120 	{ DRMACH_DEVTYPE_WCI,	SBD_COMP_IO },
121 #endif
122 	/* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
123 	{ NULL,			SBD_COMP_UNKNOWN }
124 };
125 
126 /*
127  * Per instance soft-state structure.
128  */
129 typedef struct dr_softstate {
130 	dev_info_t	*dip;
131 	dr_board_t	*boards;
132 	kmutex_t	i_lock;
133 	int		 dr_initialized;
134 } dr_softstate_t;
135 
136 /*
137  * dr Global data elements
138  */
139 struct dr_global {
140 	dr_softstate_t	*softsp;	/* pointer to initialize soft state */
141 	kmutex_t	lock;
142 } dr_g;
143 
144 dr_unsafe_devs_t	dr_unsafe_devs;
145 
146 /*
147  * Table of known passthru commands.
148  */
149 
150 struct {
151 	char	*pt_name;
152 	int	(*pt_func)(dr_handle_t *);
153 } pt_arr[] = {
154 	"quiesce",		dr_pt_test_suspend,
155 };
156 
157 int dr_modunload_okay = 0;		/* set to non-zero to allow unload */
158 
159 static int	dr_dev_type_to_nt(char *);
160 
161 /*
162  * State transition table.  States valid transitions for "board" state.
163  * Recall that non-zero return value terminates operation, however
164  * the herrno value is what really indicates an error , if any.
165  */
166 static int
_cmd2index(int c)167 _cmd2index(int c)
168 {
169 	/*
170 	 * Translate DR CMD to index into dr_state_transition.
171 	 */
172 	switch (c) {
173 	case SBD_CMD_CONNECT:		return (0);
174 	case SBD_CMD_DISCONNECT:	return (1);
175 	case SBD_CMD_CONFIGURE:		return (2);
176 	case SBD_CMD_UNCONFIGURE:	return (3);
177 	case SBD_CMD_ASSIGN:		return (4);
178 	case SBD_CMD_UNASSIGN:		return (5);
179 	case SBD_CMD_POWERON:		return (6);
180 	case SBD_CMD_POWEROFF:		return (7);
181 	case SBD_CMD_TEST:		return (8);
182 	default:			return (-1);
183 	}
184 }
185 
186 #define	CMD2INDEX(c)	_cmd2index(c)
187 
188 static struct dr_state_trans {
189 	int	x_cmd;
190 	struct {
191 		int	x_rv;		/* return value of pre_op */
192 		int	x_err;		/* error, if any */
193 	} x_op[DR_STATE_MAX];
194 } dr_state_transition[] = {
195 	{ SBD_CMD_CONNECT,
196 		{
197 			{ 0, 0 },			/* empty */
198 			{ 0, 0 },			/* occupied */
199 			{ -1, ESBD_STATE },		/* connected */
200 			{ -1, ESBD_STATE },		/* unconfigured */
201 			{ -1, ESBD_STATE },		/* partial */
202 			{ -1, ESBD_STATE },		/* configured */
203 			{ -1, ESBD_STATE },		/* release */
204 			{ -1, ESBD_STATE },		/* unreferenced */
205 			{ -1, ESBD_FATAL_STATE },	/* fatal */
206 		}
207 	},
208 	{ SBD_CMD_DISCONNECT,
209 		{
210 			{ -1, ESBD_STATE },		/* empty */
211 			{ 0, 0 },			/* occupied */
212 			{ 0, 0 },			/* connected */
213 			{ 0, 0 },			/* unconfigured */
214 			{ -1, ESBD_STATE },		/* partial */
215 			{ -1, ESBD_STATE },		/* configured */
216 			{ -1, ESBD_STATE },		/* release */
217 			{ -1, ESBD_STATE },		/* unreferenced */
218 			{ -1, ESBD_FATAL_STATE },	/* fatal */
219 		}
220 	},
221 	{ SBD_CMD_CONFIGURE,
222 		{
223 			{ -1, ESBD_STATE },		/* empty */
224 			{ -1, ESBD_STATE },		/* occupied */
225 			{ 0, 0 },			/* connected */
226 			{ 0, 0 },			/* unconfigured */
227 			{ 0, 0 },			/* partial */
228 			{ 0, 0 },			/* configured */
229 			{ -1, ESBD_STATE },		/* release */
230 			{ -1, ESBD_STATE },		/* unreferenced */
231 			{ -1, ESBD_FATAL_STATE },	/* fatal */
232 		}
233 	},
234 	{ SBD_CMD_UNCONFIGURE,
235 		{
236 			{ -1, ESBD_STATE },		/* empty */
237 			{ -1, ESBD_STATE },		/* occupied */
238 			{ -1, ESBD_STATE },		/* connected */
239 			{ -1, ESBD_STATE },		/* unconfigured */
240 			{ 0, 0 },			/* partial */
241 			{ 0, 0 },			/* configured */
242 			{ 0, 0 },			/* release */
243 			{ 0, 0 },			/* unreferenced */
244 			{ -1, ESBD_FATAL_STATE },	/* fatal */
245 		}
246 	},
247 	{ SBD_CMD_ASSIGN,
248 		{
249 			{ 0, 0 },			/* empty */
250 			{ 0, 0 },			/* occupied */
251 			{ -1, ESBD_STATE },		/* connected */
252 			{ -1, ESBD_STATE },		/* unconfigured */
253 			{ -1, ESBD_STATE },		/* partial */
254 			{ -1, ESBD_STATE },		/* configured */
255 			{ -1, ESBD_STATE },		/* release */
256 			{ -1, ESBD_STATE },		/* unreferenced */
257 			{ -1, ESBD_FATAL_STATE },	/* fatal */
258 		}
259 	},
260 	{ SBD_CMD_UNASSIGN,
261 		{
262 			{ 0, 0 },			/* empty */
263 			{ 0, 0 },			/* occupied */
264 			{ -1, ESBD_STATE },		/* connected */
265 			{ -1, ESBD_STATE },		/* unconfigured */
266 			{ -1, ESBD_STATE },		/* partial */
267 			{ -1, ESBD_STATE },		/* configured */
268 			{ -1, ESBD_STATE },		/* release */
269 			{ -1, ESBD_STATE },		/* unreferenced */
270 			{ -1, ESBD_FATAL_STATE },	/* fatal */
271 		}
272 	},
273 	{ SBD_CMD_POWERON,
274 		{
275 			{ 0, 0 },			/* empty */
276 			{ 0, 0 },			/* occupied */
277 			{ -1, ESBD_STATE },		/* connected */
278 			{ -1, ESBD_STATE },		/* unconfigured */
279 			{ -1, ESBD_STATE },		/* partial */
280 			{ -1, ESBD_STATE },		/* configured */
281 			{ -1, ESBD_STATE },		/* release */
282 			{ -1, ESBD_STATE },		/* unreferenced */
283 			{ -1, ESBD_FATAL_STATE },	/* fatal */
284 		}
285 	},
286 	{ SBD_CMD_POWEROFF,
287 		{
288 			{ 0, 0 },			/* empty */
289 			{ 0, 0 },			/* occupied */
290 			{ -1, ESBD_STATE },		/* connected */
291 			{ -1, ESBD_STATE },		/* unconfigured */
292 			{ -1, ESBD_STATE },		/* partial */
293 			{ -1, ESBD_STATE },		/* configured */
294 			{ -1, ESBD_STATE },		/* release */
295 			{ -1, ESBD_STATE },		/* unreferenced */
296 			{ -1, ESBD_FATAL_STATE },	/* fatal */
297 		}
298 	},
299 	{ SBD_CMD_TEST,
300 		{
301 			{ 0, 0 },			/* empty */
302 			{ 0, 0 },			/* occupied */
303 			{ -1, ESBD_STATE },		/* connected */
304 			{ -1, ESBD_STATE },		/* unconfigured */
305 			{ -1, ESBD_STATE },		/* partial */
306 			{ -1, ESBD_STATE },		/* configured */
307 			{ -1, ESBD_STATE },		/* release */
308 			{ -1, ESBD_STATE },		/* unreferenced */
309 			{ -1, ESBD_FATAL_STATE },	/* fatal */
310 		}
311 	},
312 };
313 
314 /*
315  * Global R/W lock to synchronize access across
316  * multiple boards.  Users wanting multi-board access
317  * must grab WRITE lock, others must grab READ lock.
318  */
319 krwlock_t	dr_grwlock;
320 
321 /*
322  * Head of the boardlist used as a reference point for
323  * locating board structs.
324  * TODO: eliminate dr_boardlist
325  */
326 dr_board_t	*dr_boardlist;
327 
328 /*
329  * DR support functions.
330  */
331 static dr_devset_t	dr_dev2devset(sbd_comp_id_t *cid);
332 static int		dr_check_transition(dr_board_t *bp,
333 					dr_devset_t *devsetp,
334 					struct dr_state_trans *transp,
335 					int cmd);
336 static int		dr_check_unit_attached(dr_common_unit_t *dp);
337 static sbd_error_t	*dr_init_devlists(dr_board_t *bp);
338 static void		dr_board_discovery(dr_board_t *bp);
339 static int		dr_board_init(dr_board_t *bp, dev_info_t *dip,
340 					int bd);
341 static void		dr_board_destroy(dr_board_t *bp);
342 static void		dr_board_transition(dr_board_t *bp, dr_state_t st);
343 
344 /*
345  * DR driver (DDI) entry points.
346  */
347 static int	dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
348 				void *arg, void **result);
349 static int	dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
350 static int	dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
351 static int	dr_probe(dev_info_t *dip);
352 static int	dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
353 				cred_t *cred_p, int *rval_p);
354 static int	dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
355 static int	dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
356 
357 /*
358  * DR command processing operations.
359  */
360 
361 static int	dr_copyin_iocmd(dr_handle_t *hp);
362 static int	dr_copyout_iocmd(dr_handle_t *hp);
363 static int	dr_copyout_errs(dr_handle_t *hp);
364 static int	dr_pre_op(dr_handle_t *hp);
365 static int	dr_post_op(dr_handle_t *hp);
366 static int	dr_exec_op(dr_handle_t *hp);
367 static void	dr_assign_board(dr_handle_t *hp);
368 static void	dr_unassign_board(dr_handle_t *hp);
369 static void	dr_connect(dr_handle_t *hp);
370 static int	dr_disconnect(dr_handle_t *hp);
371 static void	dr_dev_configure(dr_handle_t *hp);
372 static void	dr_dev_release(dr_handle_t *hp);
373 static int	dr_dev_unconfigure(dr_handle_t *hp);
374 static void	dr_dev_cancel(dr_handle_t *hp);
375 static int	dr_dev_status(dr_handle_t *hp);
376 static int	dr_get_ncm(dr_handle_t *hp);
377 static int	dr_pt_ioctl(dr_handle_t *hp);
378 static void	dr_poweron_board(dr_handle_t *hp);
379 static void	dr_poweroff_board(dr_handle_t *hp);
380 static void	dr_test_board(dr_handle_t *hp);
381 
382 
383 
384 /*
385  * Autoconfiguration data structures
386  */
387 
388 struct cb_ops dr_cb_ops = {
389 	dr_open,	/* open */
390 	dr_close,	/* close */
391 	nodev,		/* strategy */
392 	nodev,		/* print */
393 	nodev,		/* dump */
394 	nodev,		/* read */
395 	nodev,		/* write */
396 	dr_ioctl,	/* ioctl */
397 	nodev,		/* devmap */
398 	nodev,		/* mmap */
399 	nodev,		/* segmap */
400 	nochpoll,	/* chpoll */
401 	ddi_prop_op,	/* cb_prop_op */
402 	NULL,		/* struct streamtab */
403 	D_NEW | D_MP | D_MTSAFE,	/* compatibility flags */
404 	CB_REV,		/* Rev */
405 	nodev,		/* cb_aread */
406 	nodev		/* cb_awrite */
407 };
408 
409 struct dev_ops dr_dev_ops = {
410 	DEVO_REV,	/* build version */
411 	0,		/* dev ref count */
412 	dr_getinfo,	/* getinfo */
413 	nulldev,	/* identify */
414 	dr_probe,	/* probe */
415 	dr_attach,	/* attach */
416 	dr_detach,	/* detach */
417 	nodev,		/* reset */
418 	&dr_cb_ops,	/* cb_ops */
419 	(struct bus_ops *)NULL, /* bus ops */
420 	NULL,		/* power */
421 	ddi_quiesce_not_needed,	/* quiesce */
422 };
423 
424 extern struct mod_ops mod_driverops;
425 
426 static struct modldrv modldrv = {
427 	&mod_driverops,
428 	"Dynamic Reconfiguration",
429 	&dr_dev_ops
430 };
431 
432 static struct modlinkage modlinkage = {
433 	MODREV_1,
434 	(void *)&modldrv,
435 	NULL
436 };
437 
438 /*
439  * Driver entry points.
440  */
441 int
_init(void)442 _init(void)
443 {
444 	int	err;
445 
446 	/*
447 	 * If you need to support multiple nodes (instances), then
448 	 * whatever the maximum number of supported nodes is would
449 	 * need to passed as the third parameter to ddi_soft_state_init().
450 	 * Alternative would be to dynamically fini and re-init the
451 	 * soft state structure each time a node is attached.
452 	 */
453 	err = ddi_soft_state_init((void **)&dr_g.softsp,
454 	    sizeof (dr_softstate_t), 1);
455 	if (err)
456 		return (err);
457 
458 	mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
459 	rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
460 
461 	return (mod_install(&modlinkage));
462 }
463 
464 int
_fini(void)465 _fini(void)
466 {
467 	int	err;
468 
469 	if ((err = mod_remove(&modlinkage)) != 0)
470 		return (err);
471 
472 	mutex_destroy(&dr_g.lock);
473 	rw_destroy(&dr_grwlock);
474 
475 	ddi_soft_state_fini((void **)&dr_g.softsp);
476 
477 	return (0);
478 }
479 
480 int
_info(struct modinfo * modinfop)481 _info(struct modinfo *modinfop)
482 {
483 	return (mod_info(&modlinkage, modinfop));
484 }
485 
486 /*ARGSUSED1*/
487 static int
dr_open(dev_t * dev,int flag,int otyp,cred_t * cred_p)488 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
489 {
490 	int		 instance;
491 	dr_softstate_t	*softsp;
492 	dr_board_t	*bp;
493 	/*
494 	 * Don't open unless we've attached.
495 	 */
496 	instance = DR_MINOR2INST(getminor(*dev));
497 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
498 	if (softsp == NULL)
499 		return (ENXIO);
500 
501 	mutex_enter(&softsp->i_lock);
502 	if (!softsp->dr_initialized) {
503 		int		 bd;
504 		int		 rv = 0;
505 
506 		bp = softsp->boards;
507 
508 		/* initialize each array element */
509 		for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
510 			rv = dr_board_init(bp, softsp->dip, bd);
511 			if (rv)
512 				break;
513 		}
514 
515 		if (rv == 0) {
516 			softsp->dr_initialized = 1;
517 		} else {
518 			/* destroy elements initialized thus far */
519 			while (--bp >= softsp->boards)
520 				dr_board_destroy(bp);
521 
522 
523 			/* TODO: should this be another errno val ? */
524 			mutex_exit(&softsp->i_lock);
525 			return (ENXIO);
526 		}
527 	}
528 	mutex_exit(&softsp->i_lock);
529 
530 	bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
531 
532 	/*
533 	 * prevent opening of a dyn-ap for a board
534 	 * that does not exist
535 	 */
536 	if (!bp->b_assigned) {
537 		if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
538 			return (ENODEV);
539 	}
540 
541 	return (0);
542 }
543 
544 /*ARGSUSED*/
545 static int
dr_close(dev_t dev,int flag,int otyp,cred_t * cred_p)546 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
547 {
548 	return (0);
549 }
550 
551 /*
552  * Enable/disable DR features.
553  */
554 int dr_enable = 1;
555 
556 /*ARGSUSED3*/
557 static int
dr_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)558 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
559     cred_t *cred_p, int *rval_p)
560 {
561 	int		rv = 0;
562 	int		instance;
563 	int		bd;
564 	dr_handle_t	*hp;
565 	dr_softstate_t	*softsp;
566 	static fn_t	f = "dr_ioctl";
567 
568 	PR_ALL("%s...\n", f);
569 
570 	instance = DR_MINOR2INST(getminor(dev));
571 	softsp = ddi_get_soft_state(dr_g.softsp, instance);
572 	if (softsp == NULL) {
573 		cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
574 		return (ENXIO);
575 	}
576 
577 	if (!dr_enable) {
578 		switch (cmd) {
579 			case SBD_CMD_STATUS:
580 			case SBD_CMD_GETNCM:
581 			case SBD_CMD_PASSTHRU:
582 				break;
583 			default:
584 				return (ENOTSUP);
585 		}
586 	}
587 
588 	bd = DR_MINOR2BNUM(getminor(dev));
589 	if (bd >= MAX_BOARDS)
590 		return (ENXIO);
591 
592 	/* get and initialize storage for new handle */
593 	hp = GETSTRUCT(dr_handle_t, 1);
594 	hp->h_bd = &softsp->boards[bd];
595 	hp->h_err = NULL;
596 	hp->h_dev = getminor(dev);
597 	hp->h_cmd = cmd;
598 	hp->h_mode = mode;
599 	hp->h_iap = (sbd_ioctl_arg_t *)arg;
600 
601 	/* copy sbd command into handle */
602 	rv = dr_copyin_iocmd(hp);
603 	if (rv) {
604 		FREESTRUCT(hp, dr_handle_t, 1);
605 		return (EINVAL);
606 	}
607 
608 	/* translate canonical name to component type */
609 	if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
610 		hp->h_sbdcmd.cmd_cm.c_id.c_type =
611 		    dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
612 
613 		PR_ALL("%s: c_name = %s, c_type = %d\n",
614 		    f,
615 		    hp->h_sbdcmd.cmd_cm.c_id.c_name,
616 		    hp->h_sbdcmd.cmd_cm.c_id.c_type);
617 	} else {
618 		/*EMPTY*/
619 		PR_ALL("%s: c_name is NULL\n", f);
620 	}
621 
622 	/* determine scope of operation */
623 	hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
624 
625 	switch (hp->h_cmd) {
626 	case SBD_CMD_STATUS:
627 	case SBD_CMD_GETNCM:
628 		/* no locks needed for these commands */
629 		break;
630 
631 	default:
632 		rw_enter(&dr_grwlock, RW_WRITER);
633 		mutex_enter(&hp->h_bd->b_lock);
634 
635 		/*
636 		 * If we're dealing with memory at all, then we have
637 		 * to keep the "exclusive" global lock held.  This is
638 		 * necessary since we will probably need to look at
639 		 * multiple board structs.  Otherwise, we only have
640 		 * to deal with the board in question and so can drop
641 		 * the global lock to "shared".
642 		 */
643 		rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
644 		if (rv == 0)
645 			rw_downgrade(&dr_grwlock);
646 		break;
647 	}
648 	rv = 0;
649 
650 	if (rv == 0)
651 		rv = dr_pre_op(hp);
652 	if (rv == 0)
653 		rv = dr_exec_op(hp);
654 	if (rv == 0)
655 		rv = dr_post_op(hp);
656 
657 	if (rv == -1)
658 		rv = EIO;
659 
660 	if (hp->h_err != NULL)
661 		if (!(rv = dr_copyout_errs(hp)))
662 			rv = EIO;
663 
664 	/* undo locking, if any, done before dr_pre_op */
665 	switch (hp->h_cmd) {
666 	case SBD_CMD_STATUS:
667 	case SBD_CMD_GETNCM:
668 		break;
669 
670 	case SBD_CMD_ASSIGN:
671 	case SBD_CMD_UNASSIGN:
672 	case SBD_CMD_POWERON:
673 	case SBD_CMD_POWEROFF:
674 	case SBD_CMD_CONNECT:
675 	case SBD_CMD_CONFIGURE:
676 	case SBD_CMD_UNCONFIGURE:
677 	case SBD_CMD_DISCONNECT:
678 		/* Board changed state. Log a sysevent. */
679 		if (rv == 0)
680 			(void) drmach_log_sysevent(hp->h_bd->b_num, "",
681 			    SE_SLEEP, 1);
682 		/* Fall through */
683 
684 	default:
685 		mutex_exit(&hp->h_bd->b_lock);
686 		rw_exit(&dr_grwlock);
687 	}
688 
689 	if (hp->h_opts.size != 0)
690 		FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
691 
692 	FREESTRUCT(hp, dr_handle_t, 1);
693 
694 	return (rv);
695 }
696 
697 /*ARGSUSED*/
698 static int
dr_probe(dev_info_t * dip)699 dr_probe(dev_info_t *dip)
700 {
701 	return (DDI_PROBE_SUCCESS);
702 }
703 
704 static int
dr_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)705 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
706 {
707 	int		rv, rv2;
708 	int		bd;
709 	int		instance;
710 	sbd_error_t	*err;
711 	dr_softstate_t	*softsp;
712 
713 	instance = ddi_get_instance(dip);
714 
715 	switch (cmd) {
716 
717 	case DDI_ATTACH:
718 
719 		rw_enter(&dr_grwlock, RW_WRITER);
720 
721 		rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
722 		if (rv != DDI_SUCCESS) {
723 			cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
724 			    instance);
725 			return (DDI_FAILURE);
726 		}
727 
728 		/* initialize softstate structure */
729 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
730 		softsp->dip = dip;
731 
732 		mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
733 
734 		/* allocate board array (aka boardlist) */
735 		softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
736 
737 		/* TODO: eliminate dr_boardlist */
738 		dr_boardlist = softsp->boards;
739 
740 		/* initialize each array element */
741 		rv = DDI_SUCCESS;
742 		for (bd = 0; bd < MAX_BOARDS; bd++) {
743 			dr_board_t	*bp = &softsp->boards[bd];
744 			char		*p, *name;
745 			int		 l, minor_num;
746 
747 			/*
748 			 * initialized board attachment point path
749 			 * (relative to pseudo) in a form immediately
750 			 * reusable as an cfgadm command argument.
751 			 * TODO: clean this up
752 			 */
753 			p = bp->b_path;
754 			l = sizeof (bp->b_path);
755 			(void) snprintf(p, l, "dr@%d:", instance);
756 			while (*p != '\0') {
757 				l--;
758 				p++;
759 			}
760 
761 			name = p;
762 			err = drmach_board_name(bd, p, l);
763 			if (err) {
764 				sbd_err_clear(&err);
765 				rv = DDI_FAILURE;
766 				break;
767 			}
768 
769 			minor_num = DR_MAKE_MINOR(instance, bd);
770 			rv = ddi_create_minor_node(dip, name, S_IFCHR,
771 			    minor_num, DDI_NT_SBD_ATTACHMENT_POINT, 0);
772 			if (rv != DDI_SUCCESS)
773 				rv = DDI_FAILURE;
774 		}
775 
776 		if (rv == DDI_SUCCESS) {
777 			/*
778 			 * Announce the node's presence.
779 			 */
780 			ddi_report_dev(dip);
781 		} else {
782 			ddi_remove_minor_node(dip, NULL);
783 		}
784 		/*
785 		 * Init registered unsafe devs.
786 		 */
787 		dr_unsafe_devs.devnames = NULL;
788 		rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
789 		    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
790 		    "unsupported-io-drivers", &dr_unsafe_devs.devnames,
791 		    &dr_unsafe_devs.ndevs);
792 
793 		if (rv2 != DDI_PROP_SUCCESS)
794 			dr_unsafe_devs.ndevs = 0;
795 
796 		rw_exit(&dr_grwlock);
797 		return (rv);
798 
799 	default:
800 		return (DDI_FAILURE);
801 	}
802 
803 	/*NOTREACHED*/
804 }
805 
806 static int
dr_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)807 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
808 {
809 	int		instance;
810 	dr_softstate_t	*softsp;
811 
812 	switch (cmd) {
813 	case DDI_DETACH:
814 		if (!dr_modunload_okay)
815 			return (DDI_FAILURE);
816 
817 		rw_enter(&dr_grwlock, RW_WRITER);
818 
819 		instance = ddi_get_instance(dip);
820 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
821 
822 		/* TODO: eliminate dr_boardlist */
823 		ASSERT(softsp->boards == dr_boardlist);
824 
825 		/* remove all minor nodes */
826 		ddi_remove_minor_node(dip, NULL);
827 
828 		if (softsp->dr_initialized) {
829 			int bd;
830 
831 			for (bd = 0; bd < MAX_BOARDS; bd++)
832 				dr_board_destroy(&softsp->boards[bd]);
833 		}
834 
835 		FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
836 		mutex_destroy(&softsp->i_lock);
837 		ddi_soft_state_free(dr_g.softsp, instance);
838 
839 		rw_exit(&dr_grwlock);
840 		return (DDI_SUCCESS);
841 
842 	default:
843 		return (DDI_FAILURE);
844 	}
845 	/*NOTREACHED*/
846 }
847 
848 static int
dr_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)849 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
850 {
851 	_NOTE(ARGUNUSED(dip))
852 
853 	dev_t		dev = (dev_t)arg;
854 	int		instance, error;
855 	dr_softstate_t	*softsp;
856 
857 	*result = NULL;
858 	error = DDI_SUCCESS;
859 	instance = DR_MINOR2INST(getminor(dev));
860 
861 	switch (cmd) {
862 	case DDI_INFO_DEVT2DEVINFO:
863 		softsp = ddi_get_soft_state(dr_g.softsp, instance);
864 		if (softsp == NULL)
865 			return (DDI_FAILURE);
866 		*result = (void *)softsp->dip;
867 		break;
868 
869 	case DDI_INFO_DEVT2INSTANCE:
870 		*result = (void *)(uintptr_t)instance;
871 		break;
872 
873 	default:
874 		error = DDI_FAILURE;
875 		break;
876 	}
877 
878 	return (error);
879 }
880 
881 /*
882  * DR operations.
883  */
884 
885 static int
dr_copyin_iocmd(dr_handle_t * hp)886 dr_copyin_iocmd(dr_handle_t *hp)
887 {
888 	static fn_t	f = "dr_copyin_iocmd";
889 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
890 
891 	if (hp->h_iap == NULL)
892 		return (EINVAL);
893 
894 	bzero((caddr_t)scp, sizeof (sbd_cmd_t));
895 
896 #ifdef _MULTI_DATAMODEL
897 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
898 		sbd_cmd32_t	scmd32;
899 
900 		bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
901 
902 		if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
903 		    sizeof (sbd_cmd32_t), hp->h_mode)) {
904 			cmn_err(CE_WARN,
905 			    "%s: (32bit) failed to copyin "
906 			    "sbdcmd-struct", f);
907 			return (EFAULT);
908 		}
909 		scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
910 		scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
911 		bcopy(&scmd32.cmd_cm.c_id.c_name[0],
912 		    &scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
913 		scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
914 		scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
915 		scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
916 
917 		switch (hp->h_cmd) {
918 		case SBD_CMD_STATUS:
919 			scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
920 			scp->cmd_stat.s_statp =
921 			    (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
922 			break;
923 		default:
924 			break;
925 
926 		}
927 	} else
928 #endif /* _MULTI_DATAMODEL */
929 	if (ddi_copyin((void *)hp->h_iap, (void *)scp,
930 	    sizeof (sbd_cmd_t), hp->h_mode) != 0) {
931 		cmn_err(CE_WARN,
932 		    "%s: failed to copyin sbdcmd-struct", f);
933 		return (EFAULT);
934 	}
935 
936 	if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
937 		hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
938 		++hp->h_opts.size;
939 		if (ddi_copyin((void *)scp->cmd_cm.c_opts,
940 		    (void *)hp->h_opts.copts,
941 		    scp->cmd_cm.c_len, hp->h_mode) != 0) {
942 			cmn_err(CE_WARN, "%s: failed to copyin options", f);
943 			return (EFAULT);
944 		}
945 	}
946 	return (0);
947 }
948 
949 static int
dr_copyout_iocmd(dr_handle_t * hp)950 dr_copyout_iocmd(dr_handle_t *hp)
951 {
952 	static fn_t	f = "dr_copyout_iocmd";
953 	sbd_cmd_t	*scp = &hp->h_sbdcmd;
954 
955 	if (hp->h_iap == NULL)
956 		return (EINVAL);
957 
958 #ifdef _MULTI_DATAMODEL
959 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
960 		sbd_cmd32_t	scmd32;
961 
962 		scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
963 		scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
964 		bcopy(&scp->cmd_cm.c_id.c_name[0],
965 		    &scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
966 
967 		scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
968 		scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
969 		scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
970 
971 		switch (hp->h_cmd) {
972 		case SBD_CMD_GETNCM:
973 			scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
974 			break;
975 		default:
976 			break;
977 		}
978 
979 		if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
980 		    sizeof (sbd_cmd32_t), hp->h_mode)) {
981 			cmn_err(CE_WARN,
982 			    "%s: (32bit) failed to copyout "
983 			    "sbdcmd-struct", f);
984 			return (EFAULT);
985 		}
986 	} else
987 #endif /* _MULTI_DATAMODEL */
988 	if (ddi_copyout((void *)scp, (void *)hp->h_iap,
989 	    sizeof (sbd_cmd_t), hp->h_mode) != 0) {
990 		cmn_err(CE_WARN,
991 		    "%s: failed to copyout sbdcmd-struct", f);
992 		return (EFAULT);
993 	}
994 
995 	return (0);
996 }
997 
998 static int
dr_copyout_errs(dr_handle_t * hp)999 dr_copyout_errs(dr_handle_t *hp)
1000 {
1001 	static fn_t	f = "dr_copyout_errs";
1002 
1003 	if (hp->h_err == NULL)
1004 		return (0);
1005 
1006 	if (hp->h_err->e_code) {
1007 		PR_ALL("%s: error %d %s",
1008 		    f, hp->h_err->e_code, hp->h_err->e_rsc);
1009 	}
1010 
1011 #ifdef _MULTI_DATAMODEL
1012 	if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1013 		sbd_error32_t	*serr32p;
1014 
1015 		serr32p = GETSTRUCT(sbd_error32_t, 1);
1016 
1017 		serr32p->e_code = hp->h_err->e_code;
1018 		bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1019 		    MAXPATHLEN);
1020 		if (ddi_copyout((void *)serr32p,
1021 		    (void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1022 		    sizeof (sbd_error32_t), hp->h_mode)) {
1023 			cmn_err(CE_WARN,
1024 			    "%s: (32bit) failed to copyout", f);
1025 			return (EFAULT);
1026 		}
1027 		FREESTRUCT(serr32p, sbd_error32_t, 1);
1028 	} else
1029 #endif /* _MULTI_DATAMODEL */
1030 	if (ddi_copyout((void *)hp->h_err,
1031 	    (void *)&hp->h_iap->i_err,
1032 	    sizeof (sbd_error_t), hp->h_mode)) {
1033 		cmn_err(CE_WARN,
1034 		    "%s: failed to copyout", f);
1035 		return (EFAULT);
1036 	}
1037 
1038 	sbd_err_clear(&hp->h_err);
1039 
1040 	return (0);
1041 
1042 }
1043 
1044 /*
1045  * pre-op entry point must sbd_err_set_c(), if needed.
1046  * Return value of non-zero indicates failure.
1047  */
1048 static int
dr_pre_op(dr_handle_t * hp)1049 dr_pre_op(dr_handle_t *hp)
1050 {
1051 	int		rv = 0, t;
1052 	int		cmd, serr = 0;
1053 	dr_devset_t	devset;
1054 	dr_board_t	*bp = hp->h_bd;
1055 	dr_handle_t	*shp = hp;
1056 	static fn_t	f = "dr_pre_op";
1057 
1058 	cmd = hp->h_cmd;
1059 	devset = shp->h_devset;
1060 
1061 	PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1062 
1063 	hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts);
1064 	if (hp->h_err != NULL) {
1065 		PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1066 		    SBD_CMD_STR(cmd), cmd);
1067 		return (-1);
1068 	}
1069 
1070 	/*
1071 	 * Check for valid state transitions.
1072 	 */
1073 	if ((t = CMD2INDEX(cmd)) != -1) {
1074 		struct dr_state_trans	*transp;
1075 		int			state_err;
1076 
1077 		transp = &dr_state_transition[t];
1078 		ASSERT(transp->x_cmd == cmd);
1079 
1080 		state_err = dr_check_transition(bp, &devset, transp, cmd);
1081 
1082 		if (state_err < 0) {
1083 			/*
1084 			 * Invalidate device.
1085 			 */
1086 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1087 			serr = -1;
1088 			PR_ALL("%s: invalid devset (0x%x)\n",
1089 			    f, (uint_t)devset);
1090 		} else if (state_err != 0) {
1091 			/*
1092 			 * State transition is not a valid one.
1093 			 */
1094 			dr_op_err(CE_IGNORE, hp,
1095 			    transp->x_op[state_err].x_err, NULL);
1096 
1097 			serr = transp->x_op[state_err].x_rv;
1098 
1099 			PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1100 			    f, state_str[state_err], state_err,
1101 			    SBD_CMD_STR(cmd), cmd);
1102 		} else {
1103 			shp->h_devset = devset;
1104 		}
1105 	}
1106 
1107 	if (serr) {
1108 		rv = -1;
1109 	}
1110 
1111 	return (rv);
1112 }
1113 
1114 static int
dr_post_op(dr_handle_t * hp)1115 dr_post_op(dr_handle_t *hp)
1116 {
1117 	int		rv = 0;
1118 	int		cmd;
1119 	dr_board_t	*bp = hp->h_bd;
1120 	static fn_t	f = "dr_post_op";
1121 
1122 	cmd = hp->h_cmd;
1123 
1124 	PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1125 
1126 	/* errors should have been caught by now */
1127 	ASSERT(hp->h_err == NULL);
1128 
1129 	hp->h_err = drmach_post_op(cmd, bp->b_id, &hp->h_opts);
1130 	if (hp->h_err != NULL) {
1131 		PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1132 		    SBD_CMD_STR(cmd), cmd);
1133 		return (-1);
1134 	}
1135 
1136 	switch (cmd) {
1137 	case SBD_CMD_CONFIGURE:
1138 	case SBD_CMD_UNCONFIGURE:
1139 	case SBD_CMD_CONNECT:
1140 	case SBD_CMD_DISCONNECT:
1141 	case SBD_CMD_GETNCM:
1142 	case SBD_CMD_STATUS:
1143 		break;
1144 
1145 	default:
1146 		break;
1147 	}
1148 
1149 	return (rv);
1150 }
1151 
1152 static int
dr_exec_op(dr_handle_t * hp)1153 dr_exec_op(dr_handle_t *hp)
1154 {
1155 	int		rv = 0;
1156 	static fn_t	f = "dr_exec_op";
1157 
1158 	/* errors should have been caught by now */
1159 	ASSERT(hp->h_err == NULL);
1160 
1161 	switch (hp->h_cmd) {
1162 	case SBD_CMD_ASSIGN:
1163 		dr_assign_board(hp);
1164 		break;
1165 
1166 	case SBD_CMD_UNASSIGN:
1167 		dr_unassign_board(hp);
1168 		break;
1169 
1170 	case SBD_CMD_POWEROFF:
1171 		dr_poweroff_board(hp);
1172 		break;
1173 
1174 	case SBD_CMD_POWERON:
1175 		dr_poweron_board(hp);
1176 		break;
1177 
1178 	case SBD_CMD_TEST:
1179 		dr_test_board(hp);
1180 		break;
1181 
1182 	case SBD_CMD_CONNECT:
1183 		dr_connect(hp);
1184 		break;
1185 
1186 	case SBD_CMD_CONFIGURE:
1187 		dr_dev_configure(hp);
1188 		break;
1189 
1190 	case SBD_CMD_UNCONFIGURE:
1191 		dr_dev_release(hp);
1192 		if (hp->h_err == NULL)
1193 			rv = dr_dev_unconfigure(hp);
1194 		else
1195 			dr_dev_cancel(hp);
1196 		break;
1197 
1198 	case SBD_CMD_DISCONNECT:
1199 		rv = dr_disconnect(hp);
1200 		break;
1201 
1202 	case SBD_CMD_STATUS:
1203 		rv = dr_dev_status(hp);
1204 		break;
1205 
1206 	case SBD_CMD_GETNCM:
1207 		hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1208 		rv = dr_copyout_iocmd(hp);
1209 		break;
1210 
1211 	case SBD_CMD_PASSTHRU:
1212 		rv = dr_pt_ioctl(hp);
1213 		break;
1214 
1215 	default:
1216 		cmn_err(CE_WARN,
1217 		    "%s: unknown command (%d)",
1218 		    f, hp->h_cmd);
1219 		break;
1220 	}
1221 
1222 	if (hp->h_err != NULL) {
1223 		rv = -1;
1224 	}
1225 
1226 	return (rv);
1227 }
1228 
1229 static void
dr_assign_board(dr_handle_t * hp)1230 dr_assign_board(dr_handle_t *hp)
1231 {
1232 	dr_board_t *bp = hp->h_bd;
1233 
1234 	hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1235 	if (hp->h_err == NULL) {
1236 		bp->b_assigned = 1;
1237 	}
1238 }
1239 
1240 static void
dr_unassign_board(dr_handle_t * hp)1241 dr_unassign_board(dr_handle_t *hp)
1242 {
1243 	dr_board_t *bp = hp->h_bd;
1244 
1245 	/*
1246 	 * Block out status during unassign.
1247 	 * Not doing cv_wait_sig here as starfire SSP software
1248 	 * ignores unassign failure and removes board from
1249 	 * domain mask causing system panic.
1250 	 * TODO: Change cv_wait to cv_wait_sig when SSP software
1251 	 * handles unassign failure.
1252 	 */
1253 	dr_lock_status(bp);
1254 
1255 	hp->h_err = drmach_board_unassign(bp->b_id);
1256 	if (hp->h_err == NULL) {
1257 		/*
1258 		 * clear drmachid_t handle; not valid after board unassign
1259 		 */
1260 		bp->b_id = 0;
1261 		bp->b_assigned = 0;
1262 	}
1263 
1264 	dr_unlock_status(bp);
1265 }
1266 
1267 static void
dr_poweron_board(dr_handle_t * hp)1268 dr_poweron_board(dr_handle_t *hp)
1269 {
1270 	dr_board_t *bp = hp->h_bd;
1271 
1272 	hp->h_err = drmach_board_poweron(bp->b_id);
1273 }
1274 
1275 static void
dr_poweroff_board(dr_handle_t * hp)1276 dr_poweroff_board(dr_handle_t *hp)
1277 {
1278 	dr_board_t *bp = hp->h_bd;
1279 
1280 	hp->h_err = drmach_board_poweroff(bp->b_id);
1281 }
1282 
1283 static void
dr_test_board(dr_handle_t * hp)1284 dr_test_board(dr_handle_t *hp)
1285 {
1286 	dr_board_t *bp = hp->h_bd;
1287 	hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1288 	    dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1289 }
1290 
1291 /*
1292  * Create and populate the component nodes for a board.  Assumes that the
1293  * devlists for the board have been initialized.
1294  */
1295 static void
dr_make_comp_nodes(dr_board_t * bp)1296 dr_make_comp_nodes(dr_board_t *bp)
1297 {
1298 
1299 	int	i;
1300 
1301 	/*
1302 	 * Make nodes for the individual components on the board.
1303 	 * First we need to initialize memory unit data structures of board
1304 	 * structure.
1305 	 */
1306 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1307 		dr_mem_unit_t *mp;
1308 
1309 		mp = dr_get_mem_unit(bp, i);
1310 		dr_init_mem_unit(mp);
1311 	}
1312 
1313 	/*
1314 	 * Initialize cpu unit data structures.
1315 	 */
1316 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1317 		dr_cpu_unit_t *cp;
1318 
1319 		cp = dr_get_cpu_unit(bp, i);
1320 		dr_init_cpu_unit(cp);
1321 	}
1322 
1323 	/*
1324 	 * Initialize io unit data structures.
1325 	 */
1326 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1327 		dr_io_unit_t *ip;
1328 
1329 		ip = dr_get_io_unit(bp, i);
1330 		dr_init_io_unit(ip);
1331 	}
1332 
1333 	dr_board_transition(bp, DR_STATE_CONNECTED);
1334 
1335 	bp->b_rstate = SBD_STAT_CONNECTED;
1336 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
1337 	bp->b_cond = SBD_COND_OK;
1338 	(void) drv_getparm(TIME, (void *)&bp->b_time);
1339 
1340 }
1341 
1342 /*
1343  * Only do work if called to operate on an entire board
1344  * which doesn't already have components present.
1345  */
1346 static void
dr_connect(dr_handle_t * hp)1347 dr_connect(dr_handle_t *hp)
1348 {
1349 	dr_board_t	*bp = hp->h_bd;
1350 	static fn_t	f = "dr_connect";
1351 
1352 	PR_ALL("%s...\n", f);
1353 
1354 	if (DR_DEVS_PRESENT(bp)) {
1355 		/*
1356 		 * Board already has devices present.
1357 		 */
1358 		PR_ALL("%s: devices already present (0x%lx)\n",
1359 		    f, DR_DEVS_PRESENT(bp));
1360 		return;
1361 	}
1362 
1363 	hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1364 	if (hp->h_err)
1365 		return;
1366 
1367 	hp->h_err = dr_init_devlists(bp);
1368 	if (hp->h_err)
1369 		return;
1370 	else if (bp->b_ndev == 0) {
1371 		dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1372 		return;
1373 	} else {
1374 		dr_make_comp_nodes(bp);
1375 		return;
1376 	}
1377 	/*NOTREACHED*/
1378 }
1379 
1380 static int
dr_disconnect(dr_handle_t * hp)1381 dr_disconnect(dr_handle_t *hp)
1382 {
1383 	int		i;
1384 	dr_devset_t	devset;
1385 	dr_board_t	*bp = hp->h_bd;
1386 	static fn_t	f = "dr_disconnect";
1387 
1388 	PR_ALL("%s...\n", f);
1389 
1390 	/*
1391 	 * Only devices which are present, but
1392 	 * unattached can be disconnected.
1393 	 */
1394 	devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1395 	    DR_DEVS_UNATTACHED(bp);
1396 
1397 	if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1398 		dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1399 		return (0);
1400 	}
1401 
1402 	/*
1403 	 * Block out status during disconnect.
1404 	 */
1405 	mutex_enter(&bp->b_slock);
1406 	while (bp->b_sflags & DR_BSLOCK) {
1407 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1408 			mutex_exit(&bp->b_slock);
1409 			return (EINTR);
1410 		}
1411 	}
1412 	bp->b_sflags |= DR_BSLOCK;
1413 	mutex_exit(&bp->b_slock);
1414 
1415 	hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1416 
1417 	DR_DEVS_DISCONNECT(bp, devset);
1418 
1419 	ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1420 
1421 	/*
1422 	 * Update per-device state transitions.
1423 	 */
1424 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1425 		dr_cpu_unit_t *cp;
1426 
1427 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1428 			continue;
1429 
1430 		cp = dr_get_cpu_unit(bp, i);
1431 		if (dr_disconnect_cpu(cp) == 0)
1432 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1433 		else if (cp->sbc_cm.sbdev_error != NULL)
1434 			DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1435 
1436 		ASSERT(cp->sbc_cm.sbdev_error == NULL);
1437 	}
1438 
1439 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1440 		dr_mem_unit_t *mp;
1441 
1442 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1443 			continue;
1444 
1445 		mp = dr_get_mem_unit(bp, i);
1446 		if (dr_disconnect_mem(mp) == 0)
1447 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1448 		else if (mp->sbm_cm.sbdev_error != NULL)
1449 			DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1450 
1451 		ASSERT(mp->sbm_cm.sbdev_error == NULL);
1452 	}
1453 
1454 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1455 		dr_io_unit_t *ip;
1456 
1457 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1458 			continue;
1459 
1460 		ip = dr_get_io_unit(bp, i);
1461 		if (dr_disconnect_io(ip) == 0)
1462 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1463 		else if (ip->sbi_cm.sbdev_error != NULL)
1464 			DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1465 
1466 		ASSERT(ip->sbi_cm.sbdev_error == NULL);
1467 	}
1468 	if (hp->h_err) {
1469 		/*
1470 		 * For certain errors, drmach_board_disconnect will mark
1471 		 * the board as unusable; in these cases the devtree must
1472 		 * be purged so that status calls will succeed.
1473 		 * XXX
1474 		 * This implementation checks for discrete error codes -
1475 		 * someday, the i/f to drmach_board_disconnect should be
1476 		 * changed to avoid the e_code testing.
1477 		 */
1478 		if ((hp->h_err->e_code == ESTC_MBXRPLY) ||
1479 		    (hp->h_err->e_code == ESTC_MBXRQST) ||
1480 		    (hp->h_err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
1481 		    (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1482 		    (hp->h_err->e_code == ESTC_DEPROBE) ||
1483 		    (hp->h_err->e_code == EOPL_DEPROBE)) {
1484 			bp->b_ostate = SBD_STAT_UNCONFIGURED;
1485 			bp->b_busy = 0;
1486 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1487 
1488 			if (drmach_board_deprobe(bp->b_id))
1489 				goto disconnect_done;
1490 			else
1491 				bp->b_ndev = 0;
1492 		}
1493 
1494 		/*
1495 		 * If the disconnect failed in a recoverable way,
1496 		 * more work is required.
1497 		 * XXX
1498 		 * This implementation checks for discrete error codes -
1499 		 * someday, the i/f to drmach_board_disconnect should be
1500 		 * changed to avoid the e_code testing.
1501 		 */
1502 		if ((hp->h_err->e_code == ESTC_MBXRQST) ||
1503 		    (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1504 		    (hp->h_err->e_code == ESTC_DEPROBE) ||
1505 		    (hp->h_err->e_code == EOPL_DEPROBE)) {
1506 			/*
1507 			 * With this failure, the board has been deprobed
1508 			 * by IKP, and reprobed.  We've already gotten rid
1509 			 * of the old devtree, now we need to reconstruct it
1510 			 * based on the new IKP probe
1511 			 */
1512 			if (dr_init_devlists(bp) || (bp->b_ndev == 0))
1513 				goto disconnect_done;
1514 
1515 			dr_make_comp_nodes(bp);
1516 		}
1517 	}
1518 	/*
1519 	 * Once all the components on a board have been disconnect
1520 	 * the board's state can transition to disconnected and
1521 	 * we can allow the deprobe to take place.
1522 	 */
1523 	if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1524 		dr_board_transition(bp, DR_STATE_OCCUPIED);
1525 		bp->b_rstate = SBD_STAT_DISCONNECTED;
1526 		bp->b_ostate = SBD_STAT_UNCONFIGURED;
1527 		bp->b_busy = 0;
1528 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1529 
1530 		hp->h_err = drmach_board_deprobe(bp->b_id);
1531 
1532 		if (hp->h_err == NULL) {
1533 			bp->b_ndev = 0;
1534 			dr_board_transition(bp, DR_STATE_EMPTY);
1535 			bp->b_rstate = SBD_STAT_EMPTY;
1536 			(void) drv_getparm(TIME, (void *)&bp->b_time);
1537 		}
1538 	}
1539 
1540 disconnect_done:
1541 	dr_unlock_status(bp);
1542 
1543 	return (0);
1544 }
1545 
1546 /*
1547  * Check if a particular device is a valid target of the current
1548  * operation. Return 1 if it is a valid target, and 0 otherwise.
1549  */
1550 static int
dr_dev_is_target(dr_dev_unit_t * dp,int present_only,uint_t uset)1551 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1552 {
1553 	dr_common_unit_t *cp;
1554 	int		 is_present;
1555 	int		 is_attached;
1556 
1557 	cp = &dp->du_common;
1558 
1559 	/* check if the user requested this device */
1560 	if ((uset & (1 << cp->sbdev_unum)) == 0) {
1561 		return (0);
1562 	}
1563 
1564 	is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1565 	is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1566 
1567 	/*
1568 	 * If the present_only flag is set, a valid target
1569 	 * must be present but not attached. Otherwise, it
1570 	 * must be both present and attached.
1571 	 */
1572 	if (is_present && (present_only ^ is_attached)) {
1573 		/* sanity check */
1574 		ASSERT(cp->sbdev_id != (drmachid_t)0);
1575 
1576 		return (1);
1577 	}
1578 
1579 	return (0);
1580 }
1581 
1582 static void
dr_dev_make_list(dr_handle_t * hp,sbd_comp_type_t type,int present_only,dr_common_unit_t *** devlist,int * devnum)1583 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1584     dr_common_unit_t ***devlist, int *devnum)
1585 {
1586 	dr_board_t	*bp = hp->h_bd;
1587 	int		 unum;
1588 	int		 nunits;
1589 	uint_t		 uset;
1590 	int		 len;
1591 	dr_common_unit_t **list, **wp;
1592 
1593 	switch (type) {
1594 	case SBD_COMP_CPU:
1595 		nunits = MAX_CPU_UNITS_PER_BOARD;
1596 		break;
1597 	case SBD_COMP_MEM:
1598 		nunits = MAX_MEM_UNITS_PER_BOARD;
1599 		break;
1600 	case SBD_COMP_IO:
1601 		nunits = MAX_IO_UNITS_PER_BOARD;
1602 		break;
1603 	default:
1604 		/* catch this in debug kernels */
1605 		ASSERT(0);
1606 		break;
1607 	}
1608 
1609 	/* allocate list storage. */
1610 	len = sizeof (dr_common_unit_t *) * (nunits + 1);
1611 	list = kmem_zalloc(len, KM_SLEEP);
1612 
1613 	/* record length of storage in first element */
1614 	*list++ = (dr_common_unit_t *)(uintptr_t)len;
1615 
1616 	/* get bit array signifying which units are to be involved */
1617 	uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1618 
1619 	/*
1620 	 * Adjust the loop count for CPU devices since all cores
1621 	 * in a CMP will be examined in a single iteration.
1622 	 */
1623 	if (type == SBD_COMP_CPU) {
1624 		nunits = MAX_CMP_UNITS_PER_BOARD;
1625 	}
1626 
1627 	/* populate list */
1628 	for (wp = list, unum = 0; unum < nunits; unum++) {
1629 
1630 		dr_dev_unit_t	*dp;
1631 		int		core;
1632 		int		cunum;
1633 
1634 		dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1635 		if (dr_dev_is_target(dp, present_only, uset)) {
1636 			*wp++ = &dp->du_common;
1637 		}
1638 
1639 		/* further processing is only required for CPUs */
1640 		if (type != SBD_COMP_CPU) {
1641 			continue;
1642 		}
1643 
1644 		/*
1645 		 * Add any additional cores from the current CPU
1646 		 * device. This is to ensure that all the cores
1647 		 * are grouped together in the device list, and
1648 		 * consequently sequenced together during the actual
1649 		 * operation.
1650 		 */
1651 		for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1652 
1653 			cunum = DR_CMP_CORE_UNUM(unum, core);
1654 			dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1655 
1656 			if (dr_dev_is_target(dp, present_only, uset)) {
1657 				*wp++ = &dp->du_common;
1658 			}
1659 		}
1660 	}
1661 
1662 	/* calculate number of units in list, return result and list pointer */
1663 	*devnum = wp - list;
1664 	*devlist = list;
1665 }
1666 
1667 static void
dr_dev_clean_up(dr_handle_t * hp,dr_common_unit_t ** list,int devnum)1668 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1669 {
1670 	int len;
1671 	int n = 0;
1672 	dr_common_unit_t *cp, **rp = list;
1673 
1674 	/*
1675 	 * move first encountered unit error to handle if handle
1676 	 * does not yet have a recorded error.
1677 	 */
1678 	if (hp->h_err == NULL) {
1679 		while (n++ < devnum) {
1680 			cp = *rp++;
1681 			if (cp->sbdev_error != NULL) {
1682 				hp->h_err = cp->sbdev_error;
1683 				cp->sbdev_error = NULL;
1684 				break;
1685 			}
1686 		}
1687 	}
1688 
1689 	/* free remaining unit errors */
1690 	while (n++ < devnum) {
1691 		cp = *rp++;
1692 		if (cp->sbdev_error != NULL) {
1693 			sbd_err_clear(&cp->sbdev_error);
1694 			cp->sbdev_error = NULL;
1695 		}
1696 	}
1697 
1698 	/* free list */
1699 	list -= 1;
1700 	len = (int)(uintptr_t)list[0];
1701 	kmem_free(list, len);
1702 }
1703 
1704 static int
dr_dev_walk(dr_handle_t * hp,sbd_comp_type_t type,int present_only,int (* pre_op)(dr_handle_t *,dr_common_unit_t **,int),void (* op)(dr_handle_t *,dr_common_unit_t *),int (* post_op)(dr_handle_t *,dr_common_unit_t **,int),void (* board_op)(dr_handle_t *,dr_common_unit_t **,int))1705 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1706     int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1707     void (*op)(dr_handle_t *, dr_common_unit_t *),
1708     int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1709     void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1710 {
1711 	int			  devnum, rv;
1712 	dr_common_unit_t	**devlist;
1713 
1714 	dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1715 
1716 	rv = 0;
1717 	if (devnum > 0) {
1718 		rv = (*pre_op)(hp, devlist, devnum);
1719 		if (rv == 0) {
1720 			int n;
1721 
1722 			for (n = 0; n < devnum; n++)
1723 				(*op)(hp, devlist[n]);
1724 
1725 			rv = (*post_op)(hp, devlist, devnum);
1726 
1727 			(*board_op)(hp, devlist, devnum);
1728 		}
1729 	}
1730 
1731 	dr_dev_clean_up(hp, devlist, devnum);
1732 	return (rv);
1733 }
1734 
1735 /*ARGSUSED*/
1736 static int
dr_dev_noop(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1737 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1738 {
1739 	return (0);
1740 }
1741 
1742 static void
dr_attach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1743 dr_attach_update_state(dr_handle_t *hp,
1744     dr_common_unit_t **devlist, int devnum)
1745 {
1746 	dr_board_t	*bp = hp->h_bd;
1747 	int		i;
1748 	dr_devset_t	devs_unattached, devs_present;
1749 	static fn_t	f = "dr_post_attach_devlist";
1750 
1751 	for (i = 0; i < devnum; i++) {
1752 		dr_common_unit_t *cp = devlist[i];
1753 
1754 		if (dr_check_unit_attached(cp) == -1) {
1755 			PR_ALL("%s: ERROR %s not attached\n",
1756 			    f, cp->sbdev_path);
1757 			continue;
1758 		}
1759 
1760 		DR_DEV_SET_ATTACHED(cp);
1761 
1762 		dr_device_transition(cp, DR_STATE_CONFIGURED);
1763 		cp->sbdev_cond = SBD_COND_OK;
1764 	}
1765 
1766 	devs_present = DR_DEVS_PRESENT(bp);
1767 	devs_unattached = DR_DEVS_UNATTACHED(bp);
1768 
1769 	switch (bp->b_state) {
1770 	case DR_STATE_CONNECTED:
1771 	case DR_STATE_UNCONFIGURED:
1772 		ASSERT(devs_present);
1773 
1774 		if (devs_unattached == 0) {
1775 			/*
1776 			 * All devices finally attached.
1777 			 */
1778 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1779 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1780 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1781 			hp->h_bd->b_cond = SBD_COND_OK;
1782 			hp->h_bd->b_busy = 0;
1783 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1784 		} else if (devs_present != devs_unattached) {
1785 			/*
1786 			 * Only some devices are fully attached.
1787 			 */
1788 			dr_board_transition(bp, DR_STATE_PARTIAL);
1789 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1790 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1791 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1792 		}
1793 		break;
1794 
1795 	case DR_STATE_PARTIAL:
1796 		ASSERT(devs_present);
1797 		/*
1798 		 * All devices finally attached.
1799 		 */
1800 		if (devs_unattached == 0) {
1801 			dr_board_transition(bp, DR_STATE_CONFIGURED);
1802 			hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1803 			hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1804 			hp->h_bd->b_cond = SBD_COND_OK;
1805 			hp->h_bd->b_busy = 0;
1806 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1807 		}
1808 		break;
1809 
1810 	default:
1811 		break;
1812 	}
1813 }
1814 
1815 static void
dr_dev_configure(dr_handle_t * hp)1816 dr_dev_configure(dr_handle_t *hp)
1817 {
1818 	int rv;
1819 
1820 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1821 	    dr_pre_attach_cpu,
1822 	    dr_attach_cpu,
1823 	    dr_post_attach_cpu,
1824 	    dr_attach_update_state);
1825 
1826 	if (rv >= 0) {
1827 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1828 		    dr_pre_attach_mem,
1829 		    dr_attach_mem,
1830 		    dr_post_attach_mem,
1831 		    dr_attach_update_state);
1832 	}
1833 
1834 	if (rv >= 0) {
1835 		(void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1836 		    dr_pre_attach_io,
1837 		    dr_attach_io,
1838 		    dr_post_attach_io,
1839 		    dr_attach_update_state);
1840 	}
1841 }
1842 
1843 static void
dr_release_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1844 dr_release_update_state(dr_handle_t *hp,
1845     dr_common_unit_t **devlist, int devnum)
1846 {
1847 	_NOTE(ARGUNUSED(devlist))
1848 	_NOTE(ARGUNUSED(devnum))
1849 
1850 	dr_board_t *bp = hp->h_bd;
1851 
1852 	/*
1853 	 * If the entire board was released and all components
1854 	 * unreferenced then transfer it to the UNREFERENCED state.
1855 	 */
1856 	if ((bp->b_state != DR_STATE_RELEASE) &&
1857 	    (DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1858 		dr_board_transition(bp, DR_STATE_RELEASE);
1859 		hp->h_bd->b_busy = 1;
1860 	}
1861 }
1862 
1863 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1864 int
dr_release_dev_done(dr_common_unit_t * cp)1865 dr_release_dev_done(dr_common_unit_t *cp)
1866 {
1867 	if (cp->sbdev_state == DR_STATE_RELEASE) {
1868 		ASSERT(DR_DEV_IS_RELEASED(cp));
1869 
1870 		DR_DEV_SET_UNREFERENCED(cp);
1871 
1872 		dr_device_transition(cp, DR_STATE_UNREFERENCED);
1873 
1874 		return (0);
1875 	} else {
1876 		return (-1);
1877 	}
1878 }
1879 
1880 static void
dr_release_done(dr_handle_t * hp,dr_common_unit_t * cp)1881 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1882 {
1883 	_NOTE(ARGUNUSED(hp))
1884 
1885 	dr_board_t		*bp;
1886 	static fn_t		f = "dr_release_done";
1887 
1888 	PR_ALL("%s...\n", f);
1889 
1890 	/* get board pointer & sanity check */
1891 	bp = cp->sbdev_bp;
1892 	ASSERT(bp == hp->h_bd);
1893 
1894 	/*
1895 	 * Transfer the device which just completed its release
1896 	 * to the UNREFERENCED state.
1897 	 */
1898 	switch (cp->sbdev_type) {
1899 	case SBD_COMP_MEM:
1900 		dr_release_mem_done(cp);
1901 		break;
1902 
1903 	default:
1904 		DR_DEV_SET_RELEASED(cp);
1905 
1906 		dr_device_transition(cp, DR_STATE_RELEASE);
1907 
1908 		(void) dr_release_dev_done(cp);
1909 		break;
1910 	}
1911 
1912 	/*
1913 	 * If we're not already in the RELEASE state for this
1914 	 * board and we now have released all that were previously
1915 	 * attached, then transfer the board to the RELEASE state.
1916 	 */
1917 	if ((bp->b_state == DR_STATE_RELEASE) &&
1918 	    (DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1919 		dr_board_transition(bp, DR_STATE_UNREFERENCED);
1920 		bp->b_busy = 1;
1921 		(void) drv_getparm(TIME, (void *)&bp->b_time);
1922 	}
1923 }
1924 
1925 static void
dr_dev_release_mem(dr_handle_t * hp,dr_common_unit_t * dv)1926 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1927 {
1928 	dr_release_mem(dv);
1929 	dr_release_done(hp, dv);
1930 }
1931 
1932 static void
dr_dev_release(dr_handle_t * hp)1933 dr_dev_release(dr_handle_t *hp)
1934 {
1935 	int rv;
1936 
1937 	hp->h_bd->b_busy = 1;
1938 
1939 	rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1940 	    dr_pre_release_cpu,
1941 	    dr_release_done,
1942 	    dr_dev_noop,
1943 	    dr_release_update_state);
1944 
1945 	if (rv >= 0) {
1946 		rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1947 		    dr_pre_release_mem,
1948 		    dr_dev_release_mem,
1949 		    dr_dev_noop,
1950 		    dr_release_update_state);
1951 	}
1952 
1953 	if (rv >= 0) {
1954 		rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1955 		    dr_pre_release_io,
1956 		    dr_release_done,
1957 		    dr_dev_noop,
1958 		    dr_release_update_state);
1959 
1960 	}
1961 
1962 	if (rv < 0)
1963 		hp->h_bd->b_busy = 0;
1964 	/* else, b_busy will be cleared in dr_detach_update_state() */
1965 }
1966 
1967 static void
dr_detach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1968 dr_detach_update_state(dr_handle_t *hp,
1969     dr_common_unit_t **devlist, int devnum)
1970 {
1971 	dr_board_t	*bp = hp->h_bd;
1972 	int		i;
1973 	dr_state_t	bstate;
1974 	static fn_t	f = "dr_detach_update_state";
1975 
1976 	for (i = 0; i < devnum; i++) {
1977 		dr_common_unit_t *cp = devlist[i];
1978 
1979 		if (dr_check_unit_attached(cp) >= 0) {
1980 			/*
1981 			 * Device is still attached probably due
1982 			 * to an error.  Need to keep track of it.
1983 			 */
1984 			PR_ALL("%s: ERROR %s not detached\n",
1985 			    f, cp->sbdev_path);
1986 
1987 			continue;
1988 		}
1989 
1990 		DR_DEV_CLR_ATTACHED(cp);
1991 		DR_DEV_CLR_RELEASED(cp);
1992 		DR_DEV_CLR_UNREFERENCED(cp);
1993 		dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1994 	}
1995 
1996 	bstate = bp->b_state;
1997 	if (bstate != DR_STATE_UNCONFIGURED) {
1998 		if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1999 			/*
2000 			 * All devices are finally detached.
2001 			 */
2002 			dr_board_transition(bp, DR_STATE_UNCONFIGURED);
2003 			hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
2004 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2005 		} else if ((bp->b_state != DR_STATE_PARTIAL) &&
2006 		    (DR_DEVS_ATTACHED(bp) !=
2007 		    DR_DEVS_PRESENT(bp))) {
2008 			/*
2009 			 * Some devices remain attached.
2010 			 */
2011 			dr_board_transition(bp, DR_STATE_PARTIAL);
2012 			(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2013 		}
2014 
2015 		if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
2016 			hp->h_bd->b_busy = 0;
2017 	}
2018 }
2019 
2020 static int
dr_dev_unconfigure(dr_handle_t * hp)2021 dr_dev_unconfigure(dr_handle_t *hp)
2022 {
2023 	dr_board_t	*bp = hp->h_bd;
2024 
2025 	/*
2026 	 * Block out status during IO unconfig.
2027 	 */
2028 	mutex_enter(&bp->b_slock);
2029 	while (bp->b_sflags & DR_BSLOCK) {
2030 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2031 			mutex_exit(&bp->b_slock);
2032 			return (EINTR);
2033 		}
2034 	}
2035 	bp->b_sflags |= DR_BSLOCK;
2036 	mutex_exit(&bp->b_slock);
2037 
2038 	(void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2039 	    dr_pre_detach_io,
2040 	    dr_detach_io,
2041 	    dr_post_detach_io,
2042 	    dr_detach_update_state);
2043 
2044 	dr_unlock_status(bp);
2045 
2046 	(void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2047 	    dr_pre_detach_cpu,
2048 	    dr_detach_cpu,
2049 	    dr_post_detach_cpu,
2050 	    dr_detach_update_state);
2051 
2052 	(void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2053 	    dr_pre_detach_mem,
2054 	    dr_detach_mem,
2055 	    dr_post_detach_mem,
2056 	    dr_detach_update_state);
2057 
2058 	return (0);
2059 }
2060 
2061 static void
dr_dev_cancel(dr_handle_t * hp)2062 dr_dev_cancel(dr_handle_t *hp)
2063 {
2064 	int		i;
2065 	dr_devset_t	devset;
2066 	dr_board_t	*bp = hp->h_bd;
2067 	static fn_t	f = "dr_dev_cancel";
2068 
2069 	PR_ALL("%s...\n", f);
2070 
2071 	/*
2072 	 * Only devices which have been "released" are
2073 	 * subject to cancellation.
2074 	 */
2075 	devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2076 
2077 	/*
2078 	 * Nothing to do for CPUs or IO other than change back
2079 	 * their state.
2080 	 */
2081 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2082 		dr_cpu_unit_t	*cp;
2083 		dr_state_t	nstate;
2084 
2085 		if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2086 			continue;
2087 
2088 		cp = dr_get_cpu_unit(bp, i);
2089 		if (dr_cancel_cpu(cp) == 0)
2090 			nstate = DR_STATE_CONFIGURED;
2091 		else
2092 			nstate = DR_STATE_FATAL;
2093 
2094 		dr_device_transition(&cp->sbc_cm, nstate);
2095 	}
2096 
2097 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2098 		dr_io_unit_t *ip;
2099 
2100 		if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2101 			continue;
2102 		ip = dr_get_io_unit(bp, i);
2103 		dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2104 	}
2105 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2106 		dr_mem_unit_t	*mp;
2107 		dr_state_t	nstate;
2108 
2109 		if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2110 			continue;
2111 
2112 		mp = dr_get_mem_unit(bp, i);
2113 		if (dr_cancel_mem(mp) == 0)
2114 			nstate = DR_STATE_CONFIGURED;
2115 		else
2116 			nstate = DR_STATE_FATAL;
2117 
2118 		dr_device_transition(&mp->sbm_cm, nstate);
2119 	}
2120 
2121 	PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2122 
2123 	DR_DEVS_CANCEL(bp, devset);
2124 
2125 	if (DR_DEVS_RELEASED(bp) == 0) {
2126 		dr_state_t	new_state;
2127 		/*
2128 		 * If the board no longer has any released devices
2129 		 * than transfer it back to the CONFIG/PARTIAL state.
2130 		 */
2131 		if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2132 			new_state = DR_STATE_CONFIGURED;
2133 		else
2134 			new_state = DR_STATE_PARTIAL;
2135 		if (bp->b_state != new_state) {
2136 			dr_board_transition(bp, new_state);
2137 		}
2138 		hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2139 		hp->h_bd->b_busy = 0;
2140 		(void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2141 	}
2142 }
2143 
2144 static int
dr_dev_status(dr_handle_t * hp)2145 dr_dev_status(dr_handle_t *hp)
2146 {
2147 	int		nstat, mode, ncm, sz, pbsz, pnstat;
2148 	dr_handle_t	*shp;
2149 	dr_devset_t	devset = 0;
2150 	sbd_stat_t	*dstatp = NULL;
2151 	sbd_dev_stat_t	*devstatp;
2152 	dr_board_t	*bp;
2153 	drmach_status_t	 pstat;
2154 	int		rv = 0;
2155 
2156 #ifdef _MULTI_DATAMODEL
2157 	int sz32 = 0;
2158 #endif /* _MULTI_DATAMODEL */
2159 
2160 	static fn_t	f = "dr_status";
2161 
2162 	PR_ALL("%s...\n", f);
2163 
2164 	mode = hp->h_mode;
2165 	shp = hp;
2166 	devset = shp->h_devset;
2167 	bp = hp->h_bd;
2168 
2169 	/*
2170 	 * Block out disconnect, unassign, IO unconfigure and
2171 	 * devinfo branch creation during status.
2172 	 */
2173 	mutex_enter(&bp->b_slock);
2174 	while (bp->b_sflags & DR_BSLOCK) {
2175 		if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2176 			mutex_exit(&bp->b_slock);
2177 			return (EINTR);
2178 		}
2179 	}
2180 	bp->b_sflags |= DR_BSLOCK;
2181 	mutex_exit(&bp->b_slock);
2182 
2183 	ncm = 1;
2184 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2185 		if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2186 		/*
2187 		 * Calculate the maximum number of components possible
2188 		 * for a board.  This number will be used to size the
2189 		 * status scratch buffer used by board and component
2190 		 * status functions.
2191 		 * This buffer may differ in size from what is provided
2192 		 * by the plugin, since the known component set on the
2193 		 * board may change between the plugin's GETNCM call, and
2194 		 * the status call.  Sizing will be adjusted to the plugin's
2195 		 * receptacle buffer at copyout time.
2196 		 */
2197 			ncm = MAX_CPU_UNITS_PER_BOARD +
2198 			    MAX_MEM_UNITS_PER_BOARD +
2199 			    MAX_IO_UNITS_PER_BOARD;
2200 
2201 		} else {
2202 			/*
2203 			 * In the case of c_type == SBD_COMP_NONE, and
2204 			 * SBD_FLAG_ALLCMP not specified, only the board
2205 			 * info is to be returned, no components.
2206 			 */
2207 			ncm = 0;
2208 			devset = 0;
2209 		}
2210 	}
2211 
2212 	sz = sizeof (sbd_stat_t);
2213 	if (ncm > 1)
2214 		sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2215 
2216 
2217 	pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2218 	pnstat = (pbsz - sizeof (sbd_stat_t))/sizeof (sbd_dev_stat_t);
2219 
2220 	/*
2221 	 * s_nbytes describes the size of the preallocated user
2222 	 * buffer into which the application is execting to
2223 	 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2224 	 */
2225 
2226 #ifdef _MULTI_DATAMODEL
2227 
2228 	/*
2229 	 * More buffer space is required for the 64bit to 32bit
2230 	 * conversion of data structures.
2231 	 */
2232 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2233 		sz32 = sizeof (sbd_stat32_t);
2234 		if (ncm > 1)
2235 			sz32  += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2236 		pnstat = (pbsz - sizeof (sbd_stat32_t))/
2237 		    sizeof (sbd_dev_stat32_t);
2238 	}
2239 
2240 	sz += sz32;
2241 #endif
2242 	/*
2243 	 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2244 	 * increment the plugin's nstat count.
2245 	 */
2246 	++pnstat;
2247 
2248 	if (bp->b_id == 0) {
2249 		bzero(&pstat, sizeof (pstat));
2250 	} else {
2251 		sbd_error_t *err;
2252 
2253 		err = drmach_status(bp->b_id, &pstat);
2254 		if (err) {
2255 			DRERR_SET_C(&hp->h_err, &err);
2256 			rv = EIO;
2257 			goto status_done;
2258 		}
2259 	}
2260 
2261 	dstatp = (sbd_stat_t *)GETSTRUCT(char, sz);
2262 
2263 	devstatp = &dstatp->s_stat[0];
2264 
2265 	dstatp->s_board = bp->b_num;
2266 
2267 	/*
2268 	 * Detect transitions between empty and disconnected.
2269 	 */
2270 	if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2271 		bp->b_rstate = SBD_STAT_DISCONNECTED;
2272 	else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2273 		bp->b_rstate = SBD_STAT_EMPTY;
2274 
2275 	dstatp->s_rstate = bp->b_rstate;
2276 	dstatp->s_ostate = bp->b_ostate;
2277 	dstatp->s_cond = bp->b_cond = pstat.cond;
2278 	dstatp->s_busy = bp->b_busy | pstat.busy;
2279 	dstatp->s_time = bp->b_time;
2280 	dstatp->s_power = pstat.powered;
2281 	dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2282 	dstatp->s_nstat = nstat = 0;
2283 	bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2284 	bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2285 
2286 	devset &= DR_DEVS_PRESENT(bp);
2287 	if (devset == 0) {
2288 		/*
2289 		 * No device chosen.
2290 		 */
2291 		PR_ALL("%s: no device present\n", f);
2292 	}
2293 
2294 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2295 		if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2296 			dstatp->s_nstat += nstat;
2297 			devstatp += nstat;
2298 		}
2299 
2300 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2301 		if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2302 			dstatp->s_nstat += nstat;
2303 			devstatp += nstat;
2304 		}
2305 
2306 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2307 		if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2308 			dstatp->s_nstat += nstat;
2309 			devstatp += nstat;
2310 		}
2311 
2312 	/*
2313 	 * Due to a possible change in number of components between
2314 	 * the time of plugin's GETNCM call and now, there may be
2315 	 * more or less components than the plugin's buffer can
2316 	 * hold.  Adjust s_nstat accordingly.
2317 	 */
2318 
2319 	dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2320 
2321 
2322 #ifdef _MULTI_DATAMODEL
2323 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2324 		int		i, j;
2325 		sbd_stat32_t	*dstat32p;
2326 
2327 		dstat32p = (sbd_stat32_t *)devstatp;
2328 
2329 		/* Alignment Paranoia */
2330 		if ((ulong_t)dstat32p & 0x1) {
2331 			PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2332 			    f, sizeof (sbd_stat32_t), (void *)dstat32p);
2333 			DR_OP_INTERNAL_ERROR(hp);
2334 			rv = EINVAL;
2335 			goto status_done;
2336 		}
2337 
2338 		/* paranoia: detect buffer overrun */
2339 		if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2340 		    ((caddr_t)dstatp) + sz) {
2341 			DR_OP_INTERNAL_ERROR(hp);
2342 			rv = EINVAL;
2343 			goto status_done;
2344 		}
2345 
2346 		/* copy sbd_stat_t structure members */
2347 #define	_SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2348 		_SBD_STAT(int32_t, s_board);
2349 		_SBD_STAT(int32_t, s_rstate);
2350 		_SBD_STAT(int32_t, s_ostate);
2351 		_SBD_STAT(int32_t, s_cond);
2352 		_SBD_STAT(int32_t, s_busy);
2353 		_SBD_STAT(time32_t, s_time);
2354 		_SBD_STAT(uint32_t, s_power);
2355 		_SBD_STAT(uint32_t, s_assigned);
2356 		_SBD_STAT(int32_t, s_nstat);
2357 		bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2358 		    SBD_TYPE_LEN);
2359 		bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2360 		    SBD_MAX_INFO);
2361 #undef _SBD_STAT
2362 
2363 		for (i = 0; i < dstatp->s_nstat; i++) {
2364 			sbd_dev_stat_t		*dsp = &dstatp->s_stat[i];
2365 			sbd_dev_stat32_t	*ds32p = &dstat32p->s_stat[i];
2366 #define	_SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2367 
2368 			/* copy sbd_cm_stat_t structure members */
2369 			_SBD_DEV_STAT(int32_t, ds_type);
2370 			_SBD_DEV_STAT(int32_t, ds_unit);
2371 			_SBD_DEV_STAT(int32_t, ds_ostate);
2372 			_SBD_DEV_STAT(int32_t, ds_cond);
2373 			_SBD_DEV_STAT(int32_t, ds_busy);
2374 			_SBD_DEV_STAT(int32_t, ds_suspend);
2375 			_SBD_DEV_STAT(time32_t, ds_time);
2376 			bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2377 			    OBP_MAXPROPNAME);
2378 
2379 			switch (dsp->ds_type) {
2380 			case SBD_COMP_CPU:
2381 				/* copy sbd_cpu_stat_t structure members */
2382 				_SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2383 				_SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2384 				_SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2385 				_SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2386 				break;
2387 
2388 			case SBD_COMP_MEM:
2389 				/* copy sbd_mem_stat_t structure members */
2390 				_SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2391 				_SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2392 				_SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2393 				_SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2394 				_SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2395 				_SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2396 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2397 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2398 				_SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2399 				_SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2400 				_SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2401 				bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2402 				    &ds32p->d_mem.ms_peer_ap_id[0],
2403 				    sizeof (ds32p->d_mem.ms_peer_ap_id));
2404 				break;
2405 
2406 			case SBD_COMP_IO:
2407 				/* copy sbd_io_stat_t structure members */
2408 				_SBD_DEV_STAT(int32_t, d_io.is_referenced);
2409 				_SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2410 
2411 				for (j = 0; j < SBD_MAX_UNSAFE; j++)
2412 					_SBD_DEV_STAT(int32_t,
2413 					    d_io.is_unsafe_list[j]);
2414 
2415 				bcopy(&dsp->d_io.is_pathname[0],
2416 				    &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2417 				break;
2418 
2419 			case SBD_COMP_CMP:
2420 				/* copy sbd_cmp_stat_t structure members */
2421 				bcopy(&dsp->d_cmp.ps_cpuid[0],
2422 				    &ds32p->d_cmp.ps_cpuid[0],
2423 				    sizeof (ds32p->d_cmp.ps_cpuid));
2424 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2425 				_SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2426 				_SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2427 				break;
2428 
2429 			default:
2430 				cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2431 				    f, (int)dsp->ds_type);
2432 				rv = EFAULT;
2433 				goto status_done;
2434 			}
2435 #undef _SBD_DEV_STAT
2436 		}
2437 
2438 
2439 		if (ddi_copyout((void *)dstat32p,
2440 		    hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2441 			cmn_err(CE_WARN,
2442 			    "%s: failed to copyout status "
2443 			    "for board %d", f, bp->b_num);
2444 			rv = EFAULT;
2445 			goto status_done;
2446 		}
2447 	} else
2448 #endif /* _MULTI_DATAMODEL */
2449 
2450 	if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2451 	    pbsz, mode) != 0) {
2452 		cmn_err(CE_WARN,
2453 		    "%s: failed to copyout status for board %d",
2454 		    f, bp->b_num);
2455 		rv = EFAULT;
2456 		goto status_done;
2457 	}
2458 
2459 status_done:
2460 	if (dstatp != NULL)
2461 		FREESTRUCT(dstatp, char, sz);
2462 
2463 	dr_unlock_status(bp);
2464 
2465 	return (rv);
2466 }
2467 
2468 static int
dr_get_ncm(dr_handle_t * hp)2469 dr_get_ncm(dr_handle_t *hp)
2470 {
2471 	int		i;
2472 	int		ncm = 0;
2473 	dr_devset_t	devset;
2474 
2475 	devset = DR_DEVS_PRESENT(hp->h_bd);
2476 	if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2477 		devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2478 		    DEVSET_ANYUNIT);
2479 
2480 	/*
2481 	 * Handle CPUs first to deal with possible CMP
2482 	 * devices. If the CPU is a CMP, we need to only
2483 	 * increment ncm once even if there are multiple
2484 	 * cores for that CMP present in the devset.
2485 	 */
2486 	for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2487 		if (devset & DEVSET(SBD_COMP_CMP, i)) {
2488 			ncm++;
2489 		}
2490 	}
2491 
2492 	/* eliminate the CPU information from the devset */
2493 	devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2494 
2495 	for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2496 		ncm += devset & 0x1;
2497 		devset >>= 1;
2498 	}
2499 
2500 	return (ncm);
2501 }
2502 
2503 /* used by dr_mem.c */
2504 /* TODO: eliminate dr_boardlist */
2505 dr_board_t *
dr_lookup_board(int board_num)2506 dr_lookup_board(int board_num)
2507 {
2508 	dr_board_t *bp;
2509 
2510 	ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2511 
2512 	bp = &dr_boardlist[board_num];
2513 	ASSERT(bp->b_num == board_num);
2514 
2515 	return (bp);
2516 }
2517 
2518 static dr_dev_unit_t *
dr_get_dev_unit(dr_board_t * bp,sbd_comp_type_t nt,int unit_num)2519 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2520 {
2521 	dr_dev_unit_t	*dp;
2522 
2523 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2524 	ASSERT(dp->du_common.sbdev_bp == bp);
2525 	ASSERT(dp->du_common.sbdev_unum == unit_num);
2526 	ASSERT(dp->du_common.sbdev_type == nt);
2527 
2528 	return (dp);
2529 }
2530 
2531 dr_cpu_unit_t *
dr_get_cpu_unit(dr_board_t * bp,int unit_num)2532 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2533 {
2534 	dr_dev_unit_t	*dp;
2535 
2536 	ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2537 
2538 	dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2539 	return (&dp->du_cpu);
2540 }
2541 
2542 dr_mem_unit_t *
dr_get_mem_unit(dr_board_t * bp,int unit_num)2543 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2544 {
2545 	dr_dev_unit_t	*dp;
2546 
2547 	ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2548 
2549 	dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2550 	return (&dp->du_mem);
2551 }
2552 
2553 dr_io_unit_t *
dr_get_io_unit(dr_board_t * bp,int unit_num)2554 dr_get_io_unit(dr_board_t *bp, int unit_num)
2555 {
2556 	dr_dev_unit_t	*dp;
2557 
2558 	ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2559 
2560 	dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2561 	return (&dp->du_io);
2562 }
2563 
2564 dr_common_unit_t *
dr_get_common_unit(dr_board_t * bp,sbd_comp_type_t nt,int unum)2565 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2566 {
2567 	dr_dev_unit_t	*dp;
2568 
2569 	dp = dr_get_dev_unit(bp, nt, unum);
2570 	return (&dp->du_common);
2571 }
2572 
2573 static dr_devset_t
dr_dev2devset(sbd_comp_id_t * cid)2574 dr_dev2devset(sbd_comp_id_t *cid)
2575 {
2576 	static fn_t	f = "dr_dev2devset";
2577 
2578 	dr_devset_t	devset;
2579 	int		unit = cid->c_unit;
2580 
2581 	switch (cid->c_type) {
2582 		case SBD_COMP_NONE:
2583 			devset =  DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2584 			devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2585 			devset |= DEVSET(SBD_COMP_IO,  DEVSET_ANYUNIT);
2586 			PR_ALL("%s: COMP_NONE devset = 0x%lx\n", f, devset);
2587 			break;
2588 
2589 		case SBD_COMP_CPU:
2590 			if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2591 				cmn_err(CE_WARN,
2592 				    "%s: invalid cpu unit# = %d",
2593 				    f, unit);
2594 				devset = 0;
2595 			} else {
2596 				/*
2597 				 * Generate a devset that includes all the
2598 				 * cores of a CMP device. If this is not a
2599 				 * CMP, the extra cores will be eliminated
2600 				 * later since they are not present. This is
2601 				 * also true for CMP devices that do not have
2602 				 * all cores active.
2603 				 */
2604 				devset = DEVSET(SBD_COMP_CMP, unit);
2605 			}
2606 
2607 			PR_ALL("%s: CPU devset = 0x%lx\n", f, devset);
2608 			break;
2609 
2610 		case SBD_COMP_MEM:
2611 			if (unit == SBD_NULL_UNIT) {
2612 				unit = 0;
2613 				cid->c_unit = 0;
2614 			}
2615 
2616 			if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2617 				cmn_err(CE_WARN,
2618 				    "%s: invalid mem unit# = %d",
2619 				    f, unit);
2620 				devset = 0;
2621 			} else
2622 				devset = DEVSET(cid->c_type, unit);
2623 
2624 			PR_ALL("%s: MEM devset = 0x%lx\n", f, devset);
2625 			break;
2626 
2627 		case SBD_COMP_IO:
2628 			if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2629 				cmn_err(CE_WARN,
2630 				    "%s: invalid io unit# = %d",
2631 				    f, unit);
2632 				devset = 0;
2633 			} else
2634 				devset = DEVSET(cid->c_type, unit);
2635 
2636 			PR_ALL("%s: IO devset = 0x%lx\n", f, devset);
2637 			break;
2638 
2639 		default:
2640 		case SBD_COMP_UNKNOWN:
2641 			devset = 0;
2642 			break;
2643 	}
2644 
2645 	return (devset);
2646 }
2647 
2648 /*
2649  * Converts a dynamic attachment point name to a SBD_COMP_* type.
2650  * Returns SDB_COMP_UNKNOWN if name is not recognized.
2651  */
2652 static int
dr_dev_type_to_nt(char * type)2653 dr_dev_type_to_nt(char *type)
2654 {
2655 	int i;
2656 
2657 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2658 		if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2659 			break;
2660 
2661 	return (dr_devattr[i].s_nodetype);
2662 }
2663 
2664 /*
2665  * Converts a SBD_COMP_* type to a dynamic attachment point name.
2666  * Return NULL if SBD_COMP_ type is not recognized.
2667  */
2668 char *
dr_nt_to_dev_type(int nt)2669 dr_nt_to_dev_type(int nt)
2670 {
2671 	int i;
2672 
2673 	for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2674 		if (dr_devattr[i].s_nodetype == nt)
2675 			break;
2676 
2677 	return (dr_devattr[i].s_devtype);
2678 }
2679 
2680 
2681 /*
2682  * State transition policy is that if there is some component for which
2683  * the state transition is valid, then let it through. The exception is
2684  * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2685  * for ALL components.
2686  * Returns the state that is in error, if any.
2687  */
2688 static int
dr_check_transition(dr_board_t * bp,dr_devset_t * devsetp,struct dr_state_trans * transp,int cmd)2689 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2690     struct dr_state_trans *transp, int cmd)
2691 {
2692 	int			s, ut;
2693 	int			state_err = 0;
2694 	dr_devset_t		devset;
2695 	dr_common_unit_t	*cp;
2696 	static fn_t		f = "dr_check_transition";
2697 
2698 	devset = *devsetp;
2699 
2700 	if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2701 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2702 			if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2703 				continue;
2704 
2705 			cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2706 			s = (int)cp->sbdev_state;
2707 			if (!DR_DEV_IS_PRESENT(cp)) {
2708 				DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2709 			} else {
2710 				if (transp->x_op[s].x_rv) {
2711 					if (!state_err)
2712 						state_err = s;
2713 					DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2714 				}
2715 			}
2716 		}
2717 	}
2718 	if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2719 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2720 			if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2721 				continue;
2722 
2723 			cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2724 			s = (int)cp->sbdev_state;
2725 			if (!DR_DEV_IS_PRESENT(cp)) {
2726 				DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2727 			} else {
2728 				if (transp->x_op[s].x_rv) {
2729 					if (!state_err)
2730 						state_err = s;
2731 					DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2732 				}
2733 			}
2734 		}
2735 	}
2736 	if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2737 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2738 			if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2739 				continue;
2740 
2741 			cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2742 			s = (int)cp->sbdev_state;
2743 			if (!DR_DEV_IS_PRESENT(cp)) {
2744 				DEVSET_DEL(devset, SBD_COMP_IO, ut);
2745 			} else {
2746 				if (transp->x_op[s].x_rv) {
2747 					if (!state_err)
2748 						state_err = s;
2749 					DEVSET_DEL(devset, SBD_COMP_IO, ut);
2750 				}
2751 			}
2752 		}
2753 	}
2754 
2755 	PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2756 	    f, (uint_t)*devsetp, (uint_t)devset);
2757 
2758 	*devsetp = devset;
2759 	/*
2760 	 * If there are some remaining components for which
2761 	 * this state transition is valid, then allow them
2762 	 * through, otherwise if none are left then return
2763 	 * the state error. The exception is SBD_CMD_DISCONNECT.
2764 	 * On disconnect, the state transition must be valid for ALL
2765 	 * components.
2766 	 */
2767 	if (cmd == SBD_CMD_DISCONNECT)
2768 		return (state_err);
2769 	return (devset ? 0 : state_err);
2770 }
2771 
2772 void
dr_device_transition(dr_common_unit_t * cp,dr_state_t st)2773 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2774 {
2775 	PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2776 	    cp->sbdev_path,
2777 	    state_str[cp->sbdev_state], cp->sbdev_state,
2778 	    state_str[st], st);
2779 
2780 	cp->sbdev_state = st;
2781 	if (st == DR_STATE_CONFIGURED) {
2782 		cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2783 		if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2784 			cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2785 			(void) drv_getparm(TIME,
2786 			    (void *) &cp->sbdev_bp->b_time);
2787 		}
2788 	} else
2789 		cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2790 
2791 	(void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2792 }
2793 
2794 static void
dr_board_transition(dr_board_t * bp,dr_state_t st)2795 dr_board_transition(dr_board_t *bp, dr_state_t st)
2796 {
2797 	PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2798 	    bp->b_num,
2799 	    state_str[bp->b_state], bp->b_state,
2800 	    state_str[st], st);
2801 
2802 	bp->b_state = st;
2803 }
2804 
2805 void
dr_op_err(int ce,dr_handle_t * hp,int code,char * fmt,...)2806 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2807 {
2808 	sbd_error_t	*err;
2809 	va_list		args;
2810 
2811 	va_start(args, fmt);
2812 	err = drerr_new_v(code, fmt, args);
2813 	va_end(args);
2814 
2815 	if (ce != CE_IGNORE)
2816 		sbd_err_log(err, ce);
2817 
2818 	DRERR_SET_C(&hp->h_err, &err);
2819 }
2820 
2821 void
dr_dev_err(int ce,dr_common_unit_t * cp,int code)2822 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2823 {
2824 	sbd_error_t	*err;
2825 
2826 	err = drerr_new(0, code, cp->sbdev_path, NULL);
2827 
2828 	if (ce != CE_IGNORE)
2829 		sbd_err_log(err, ce);
2830 
2831 	DRERR_SET_C(&cp->sbdev_error, &err);
2832 }
2833 
2834 /*
2835  * A callback routine.  Called from the drmach layer as a result of
2836  * call to drmach_board_find_devices from dr_init_devlists.
2837  */
2838 static sbd_error_t *
dr_dev_found(void * data,const char * name,int unum,drmachid_t id)2839 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2840 {
2841 	dr_board_t	*bp = data;
2842 	dr_dev_unit_t	*dp;
2843 	int		 nt;
2844 	static fn_t	f = "dr_dev_found";
2845 
2846 	PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2847 	    f, bp->b_num, name, unum, id);
2848 
2849 	nt = dr_dev_type_to_nt((char *)name);
2850 	if (nt == SBD_COMP_UNKNOWN) {
2851 		/*
2852 		 * this should not happen.  When it does, it indicates
2853 		 * a missmatch in devices supported by the drmach layer
2854 		 * vs devices supported by this layer.
2855 		 */
2856 		return (DR_INTERNAL_ERROR());
2857 	}
2858 
2859 	dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2860 
2861 	/* sanity check */
2862 	ASSERT(dp->du_common.sbdev_bp == bp);
2863 	ASSERT(dp->du_common.sbdev_unum == unum);
2864 	ASSERT(dp->du_common.sbdev_type == nt);
2865 
2866 	/* render dynamic attachment point path of this unit */
2867 	(void) snprintf(dp->du_common.sbdev_path,
2868 	    sizeof (dp->du_common.sbdev_path),
2869 	    (nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
2870 	    bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2871 
2872 	dp->du_common.sbdev_id = id;
2873 	DR_DEV_SET_PRESENT(&dp->du_common);
2874 
2875 	bp->b_ndev++;
2876 
2877 	return (NULL);
2878 }
2879 
2880 static sbd_error_t *
dr_init_devlists(dr_board_t * bp)2881 dr_init_devlists(dr_board_t *bp)
2882 {
2883 	int		i;
2884 	sbd_error_t	*err;
2885 	dr_dev_unit_t	*dp;
2886 	static fn_t	f = "dr_init_devlists";
2887 
2888 	PR_ALL("%s (%s)...\n", f, bp->b_path);
2889 
2890 	/* sanity check */
2891 	ASSERT(bp->b_ndev == 0);
2892 
2893 	DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2894 
2895 	/*
2896 	 * This routine builds the board's devlist and initializes
2897 	 * the common portion of the unit data structures.
2898 	 * Note: because the common portion is considered
2899 	 * uninitialized, the dr_get_*_unit() routines can not
2900 	 * be used.
2901 	 */
2902 
2903 	/*
2904 	 * Clear out old entries, if any.
2905 	 */
2906 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2907 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2908 
2909 		bzero(dp, sizeof (*dp));
2910 		dp->du_common.sbdev_bp = bp;
2911 		dp->du_common.sbdev_unum = i;
2912 		dp->du_common.sbdev_type = SBD_COMP_CPU;
2913 	}
2914 
2915 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2916 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2917 
2918 		bzero(dp, sizeof (*dp));
2919 		dp->du_common.sbdev_bp = bp;
2920 		dp->du_common.sbdev_unum = i;
2921 		dp->du_common.sbdev_type = SBD_COMP_MEM;
2922 	}
2923 
2924 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2925 		dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2926 
2927 		bzero(dp, sizeof (*dp));
2928 		dp->du_common.sbdev_bp = bp;
2929 		dp->du_common.sbdev_unum = i;
2930 		dp->du_common.sbdev_type = SBD_COMP_IO;
2931 	}
2932 
2933 	err = NULL;
2934 	if (bp->b_id) {
2935 		/* find devices on this board */
2936 		err = drmach_board_find_devices(
2937 		    bp->b_id, bp, dr_dev_found);
2938 	}
2939 
2940 	return (err);
2941 }
2942 
2943 /*
2944  * Return the unit number of the respective drmachid if
2945  * it's found to be attached.
2946  */
2947 static int
dr_check_unit_attached(dr_common_unit_t * cp)2948 dr_check_unit_attached(dr_common_unit_t *cp)
2949 {
2950 	int		rv = 0;
2951 	processorid_t	cpuid;
2952 	uint64_t	basepa, endpa;
2953 	struct memlist	*ml;
2954 	extern struct memlist	*phys_install;
2955 	sbd_error_t	*err;
2956 	int		yes;
2957 	static fn_t	f = "dr_check_unit_attached";
2958 
2959 	switch (cp->sbdev_type) {
2960 	case SBD_COMP_CPU:
2961 		err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2962 		if (err) {
2963 			DRERR_SET_C(&cp->sbdev_error, &err);
2964 			rv = -1;
2965 			break;
2966 		}
2967 		mutex_enter(&cpu_lock);
2968 		if (cpu_get(cpuid) == NULL)
2969 			rv = -1;
2970 		mutex_exit(&cpu_lock);
2971 		break;
2972 
2973 	case SBD_COMP_MEM:
2974 		err = drmach_mem_get_base_physaddr(cp->sbdev_id, &basepa);
2975 		if (err) {
2976 			DRERR_SET_C(&cp->sbdev_error, &err);
2977 			rv = -1;
2978 			break;
2979 		}
2980 
2981 		/*
2982 		 * basepa may not be on a alignment boundary, make it so.
2983 		 */
2984 		err = drmach_mem_get_slice_size(cp->sbdev_id, &endpa);
2985 		if (err) {
2986 			DRERR_SET_C(&cp->sbdev_error, &err);
2987 			rv = -1;
2988 			break;
2989 		}
2990 
2991 		basepa &= ~(endpa - 1);
2992 		endpa += basepa;
2993 
2994 		/*
2995 		 * Check if base address is in phys_install.
2996 		 */
2997 		memlist_read_lock();
2998 		for (ml = phys_install; ml; ml = ml->ml_next)
2999 			if ((endpa <= ml->ml_address) ||
3000 			    (basepa >= (ml->ml_address + ml->ml_size)))
3001 				continue;
3002 			else
3003 				break;
3004 		memlist_read_unlock();
3005 		if (ml == NULL)
3006 			rv = -1;
3007 		break;
3008 
3009 	case SBD_COMP_IO:
3010 		err = drmach_io_is_attached(cp->sbdev_id, &yes);
3011 		if (err) {
3012 			DRERR_SET_C(&cp->sbdev_error, &err);
3013 			rv = -1;
3014 			break;
3015 		} else if (!yes)
3016 			rv = -1;
3017 		break;
3018 
3019 	default:
3020 		PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
3021 		    f, cp->sbdev_type, cp->sbdev_id);
3022 		rv = -1;
3023 		break;
3024 	}
3025 
3026 	return (rv);
3027 }
3028 
3029 /*
3030  * See if drmach recognizes the passthru command.  DRMACH expects the
3031  * id to identify the thing to which the command is being applied.  Using
3032  * nonsense SBD terms, that information has been perversely encoded in the
3033  * c_id member of the sbd_cmd_t structure.  This logic reads those tea
3034  * leaves, finds the associated drmach id, then calls drmach to process
3035  * the passthru command.
3036  */
3037 static int
dr_pt_try_drmach(dr_handle_t * hp)3038 dr_pt_try_drmach(dr_handle_t *hp)
3039 {
3040 	dr_board_t	*bp = hp->h_bd;
3041 	sbd_comp_id_t	*comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
3042 	drmachid_t	 id;
3043 
3044 	if (comp_id->c_type == SBD_COMP_NONE) {
3045 		id = bp->b_id;
3046 	} else {
3047 		sbd_comp_type_t	 nt;
3048 
3049 		nt = dr_dev_type_to_nt(comp_id->c_name);
3050 		if (nt == SBD_COMP_UNKNOWN) {
3051 			dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3052 			id = 0;
3053 		} else {
3054 			/* pt command applied to dynamic attachment point */
3055 			dr_common_unit_t *cp;
3056 			cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3057 			id = cp->sbdev_id;
3058 		}
3059 	}
3060 
3061 	if (hp->h_err == NULL)
3062 		hp->h_err = drmach_passthru(id, &hp->h_opts);
3063 
3064 	return (hp->h_err == NULL ? 0 : -1);
3065 }
3066 
3067 static int
dr_pt_ioctl(dr_handle_t * hp)3068 dr_pt_ioctl(dr_handle_t *hp)
3069 {
3070 	int		cmd, rv, len;
3071 	int32_t		sz;
3072 	int		found;
3073 	char		*copts;
3074 	static fn_t	f = "dr_pt_ioctl";
3075 
3076 	PR_ALL("%s...\n", f);
3077 
3078 	sz = hp->h_opts.size;
3079 	copts = hp->h_opts.copts;
3080 
3081 	if (sz == 0 || copts == (char *)NULL) {
3082 		cmn_err(CE_WARN, "%s: invalid passthru args", f);
3083 		return (EINVAL);
3084 	}
3085 
3086 	found = 0;
3087 	for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3088 		len = strlen(pt_arr[cmd].pt_name);
3089 		found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3090 		if (found)
3091 			break;
3092 	}
3093 
3094 	if (found)
3095 		rv = (*pt_arr[cmd].pt_func)(hp);
3096 	else
3097 		rv = dr_pt_try_drmach(hp);
3098 
3099 	return (rv);
3100 }
3101 
3102 /*
3103  * Called at driver load time to determine the state and condition
3104  * of an existing board in the system.
3105  */
3106 static void
dr_board_discovery(dr_board_t * bp)3107 dr_board_discovery(dr_board_t *bp)
3108 {
3109 	int			i;
3110 	dr_devset_t		devs_lost, devs_attached = 0;
3111 	dr_cpu_unit_t		*cp;
3112 	dr_mem_unit_t		*mp;
3113 	dr_io_unit_t		*ip;
3114 	static fn_t		f = "dr_board_discovery";
3115 
3116 	if (DR_DEVS_PRESENT(bp) == 0) {
3117 		PR_ALL("%s: board %d has no devices present\n",
3118 		    f, bp->b_num);
3119 		return;
3120 	}
3121 
3122 	/*
3123 	 * Check for existence of cpus.
3124 	 */
3125 	for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3126 		cp = dr_get_cpu_unit(bp, i);
3127 
3128 		if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3129 			continue;
3130 
3131 		if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3132 			DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3133 			DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3134 			PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3135 			    f, bp->b_num, i);
3136 		}
3137 		dr_init_cpu_unit(cp);
3138 	}
3139 
3140 	/*
3141 	 * Check for existence of memory.
3142 	 */
3143 	for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3144 		mp = dr_get_mem_unit(bp, i);
3145 
3146 		if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3147 			continue;
3148 
3149 		if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3150 			DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3151 			DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3152 			PR_ALL("%s: board %d, mem-unit %d - attached\n",
3153 			    f, bp->b_num, i);
3154 		}
3155 		dr_init_mem_unit(mp);
3156 	}
3157 
3158 	/*
3159 	 * Check for i/o state.
3160 	 */
3161 	for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3162 		ip = dr_get_io_unit(bp, i);
3163 
3164 		if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3165 			continue;
3166 
3167 		if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3168 			/*
3169 			 * Found it!
3170 			 */
3171 			DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3172 			DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3173 			PR_ALL("%s: board %d, io-unit %d - attached\n",
3174 			    f, bp->b_num, i);
3175 		}
3176 		dr_init_io_unit(ip);
3177 	}
3178 
3179 	DR_DEVS_CONFIGURE(bp, devs_attached);
3180 	if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3181 		int		ut;
3182 		/*
3183 		 * It is not legal on board discovery to have a
3184 		 * board that is only partially attached.  A board
3185 		 * is either all attached or all connected.  If a
3186 		 * board has at least one attached device, then
3187 		 * the the remaining devices, if any, must have
3188 		 * been lost or disconnected.  These devices can
3189 		 * only be recovered by a full attach from scratch.
3190 		 * Note that devices previously in the unreferenced
3191 		 * state are subsequently lost until the next full
3192 		 * attach.  This is necessary since the driver unload
3193 		 * that must have occurred would have wiped out the
3194 		 * information necessary to re-configure the device
3195 		 * back online, e.g. memlist.
3196 		 */
3197 		PR_ALL("%s: some devices LOST (0x%lx)...\n", f, devs_lost);
3198 
3199 		for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3200 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3201 				continue;
3202 
3203 			cp = dr_get_cpu_unit(bp, ut);
3204 			dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3205 		}
3206 
3207 		for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3208 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3209 				continue;
3210 
3211 			mp = dr_get_mem_unit(bp, ut);
3212 			dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3213 		}
3214 
3215 		for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3216 			if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3217 				continue;
3218 
3219 			ip = dr_get_io_unit(bp, ut);
3220 			dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3221 		}
3222 
3223 		DR_DEVS_DISCONNECT(bp, devs_lost);
3224 	}
3225 }
3226 
3227 static int
dr_board_init(dr_board_t * bp,dev_info_t * dip,int bd)3228 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3229 {
3230 	sbd_error_t	*err;
3231 
3232 	mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3233 	mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3234 	cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3235 	bp->b_rstate = SBD_STAT_EMPTY;
3236 	bp->b_ostate = SBD_STAT_UNCONFIGURED;
3237 	bp->b_cond = SBD_COND_UNKNOWN;
3238 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3239 
3240 	(void) drmach_board_lookup(bd, &bp->b_id);
3241 	bp->b_num = bd;
3242 	bp->b_dip = dip;
3243 
3244 	bp->b_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3245 	    MAX_CPU_UNITS_PER_BOARD);
3246 
3247 	bp->b_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3248 	    MAX_MEM_UNITS_PER_BOARD);
3249 
3250 	bp->b_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3251 	    MAX_IO_UNITS_PER_BOARD);
3252 
3253 	/*
3254 	 * Initialize the devlists
3255 	 */
3256 	err = dr_init_devlists(bp);
3257 	if (err) {
3258 		sbd_err_clear(&err);
3259 		dr_board_destroy(bp);
3260 		return (-1);
3261 	} else if (bp->b_ndev == 0) {
3262 		dr_board_transition(bp, DR_STATE_EMPTY);
3263 	} else {
3264 		/*
3265 		 * Couldn't have made it down here without
3266 		 * having found at least one device.
3267 		 */
3268 		ASSERT(DR_DEVS_PRESENT(bp) != 0);
3269 		/*
3270 		 * Check the state of any possible devices on the
3271 		 * board.
3272 		 */
3273 		dr_board_discovery(bp);
3274 
3275 		bp->b_assigned = 1;
3276 
3277 		if (DR_DEVS_UNATTACHED(bp) == 0) {
3278 			/*
3279 			 * The board has no unattached devices, therefore
3280 			 * by reason of insanity it must be configured!
3281 			 */
3282 			dr_board_transition(bp, DR_STATE_CONFIGURED);
3283 			bp->b_ostate = SBD_STAT_CONFIGURED;
3284 			bp->b_rstate = SBD_STAT_CONNECTED;
3285 			bp->b_cond = SBD_COND_OK;
3286 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3287 		} else if (DR_DEVS_ATTACHED(bp)) {
3288 			dr_board_transition(bp, DR_STATE_PARTIAL);
3289 			bp->b_ostate = SBD_STAT_CONFIGURED;
3290 			bp->b_rstate = SBD_STAT_CONNECTED;
3291 			bp->b_cond = SBD_COND_OK;
3292 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3293 		} else {
3294 			dr_board_transition(bp, DR_STATE_CONNECTED);
3295 			bp->b_rstate = SBD_STAT_CONNECTED;
3296 			(void) drv_getparm(TIME, (void *)&bp->b_time);
3297 		}
3298 	}
3299 
3300 	return (0);
3301 }
3302 
3303 static void
dr_board_destroy(dr_board_t * bp)3304 dr_board_destroy(dr_board_t *bp)
3305 {
3306 	PR_ALL("dr_board_destroy: num %d, path %s\n",
3307 	    bp->b_num, bp->b_path);
3308 
3309 	dr_board_transition(bp, DR_STATE_EMPTY);
3310 	bp->b_rstate = SBD_STAT_EMPTY;
3311 	(void) drv_getparm(TIME, (void *)&bp->b_time);
3312 
3313 	/*
3314 	 * Free up MEM unit structs.
3315 	 */
3316 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_MEM)],
3317 	    dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3318 	bp->b_dev[NIX(SBD_COMP_MEM)] = NULL;
3319 	/*
3320 	 * Free up CPU unit structs.
3321 	 */
3322 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_CPU)],
3323 	    dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3324 	bp->b_dev[NIX(SBD_COMP_CPU)] = NULL;
3325 	/*
3326 	 * Free up IO unit structs.
3327 	 */
3328 	FREESTRUCT(bp->b_dev[NIX(SBD_COMP_IO)],
3329 	    dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3330 	bp->b_dev[NIX(SBD_COMP_IO)] = NULL;
3331 
3332 	mutex_destroy(&bp->b_lock);
3333 	mutex_destroy(&bp->b_slock);
3334 	cv_destroy(&bp->b_scv);
3335 }
3336 
3337 void
dr_lock_status(dr_board_t * bp)3338 dr_lock_status(dr_board_t *bp)
3339 {
3340 	mutex_enter(&bp->b_slock);
3341 	while (bp->b_sflags & DR_BSLOCK)
3342 		cv_wait(&bp->b_scv, &bp->b_slock);
3343 	bp->b_sflags |= DR_BSLOCK;
3344 	mutex_exit(&bp->b_slock);
3345 }
3346 
3347 void
dr_unlock_status(dr_board_t * bp)3348 dr_unlock_status(dr_board_t *bp)
3349 {
3350 	mutex_enter(&bp->b_slock);
3351 	bp->b_sflags &= ~DR_BSLOCK;
3352 	cv_signal(&bp->b_scv);
3353 	mutex_exit(&bp->b_slock);
3354 }
3355 
3356 /*
3357  * Extract flags passed via ioctl.
3358  */
3359 int
dr_cmd_flags(dr_handle_t * hp)3360 dr_cmd_flags(dr_handle_t *hp)
3361 {
3362 	return (hp->h_sbdcmd.cmd_cm.c_flags);
3363 }
3364