1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * pm	This driver now only handles the ioctl interface.  The scanning
28 *	and policy stuff now lives in common/os/sunpm.c.
29 *	Not DDI compliant
30 */
31
32#include <sys/types.h>
33#include <sys/errno.h>
34#include <sys/modctl.h>
35#include <sys/callb.h>		/* callback registration for cpu_deep_idle */
36#include <sys/conf.h>		/* driver flags and functions */
37#include <sys/open.h>		/* OTYP_CHR definition */
38#include <sys/stat.h>		/* S_IFCHR definition */
39#include <sys/pathname.h>	/* name -> dev_info xlation */
40#include <sys/kmem.h>		/* memory alloc stuff */
41#include <sys/debug.h>
42#include <sys/pm.h>
43#include <sys/ddi.h>
44#include <sys/sunddi.h>
45#include <sys/epm.h>
46#include <sys/vfs.h>
47#include <sys/mode.h>
48#include <sys/mkdev.h>
49#include <sys/promif.h>
50#include <sys/consdev.h>
51#include <sys/ddi_impldefs.h>
52#include <sys/poll.h>
53#include <sys/note.h>
54#include <sys/taskq.h>
55#include <sys/policy.h>
56#include <sys/cpu_pm.h>
57
58/*
59 * Minor number is instance<<8 + clone minor from range 1-254; (0 reserved
60 * for "original")
61 */
62#define	PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE -1))
63
64#define	PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
65#define	PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
66#define	PM_MAJOR(dip) ddi_driver_major(dip)
67#define	PM_RELE(dip) ddi_release_devi(dip)
68
69#define	PM_IDLEDOWN_TIME	10
70#define	MAXSMBIOSSTRLEN 64	/* from SMBIOS spec */
71#define	MAXCOPYBUF	(MAXSMBIOSSTRLEN + 1)
72
73extern kmutex_t	pm_scan_lock;	/* protects autopm_enable, pm_scans_disabled */
74extern kmutex_t	pm_clone_lock;	/* protects pm_clones array */
75extern int	autopm_enabled;
76extern pm_cpupm_t cpupm;
77extern pm_cpupm_t cpupm_default_mode;
78extern int	pm_default_idle_threshold;
79extern int	pm_system_idle_threshold;
80extern int	pm_cpu_idle_threshold;
81extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
82extern uint_t	pm_poll_cnt[PM_MAX_CLONE];
83extern int	autoS3_enabled;
84extern void	pm_record_thresh(pm_thresh_rec_t *);
85extern void	pm_register_watcher(int, dev_info_t *);
86extern int	pm_get_current_power(dev_info_t *, int, int *);
87extern int	pm_interest_registered(int);
88extern void	pm_all_to_default_thresholds(void);
89extern int	pm_current_threshold(dev_info_t *, int, int *);
90extern void	pm_deregister_watcher(int, dev_info_t *);
91extern void	pm_unrecord_threshold(char *);
92extern int	pm_S3_enabled;
93extern int	pm_ppm_searchlist(pm_searchargs_t *);
94extern psce_t	*pm_psc_clone_to_direct(int);
95extern psce_t	*pm_psc_clone_to_interest(int);
96
97/*
98 * The soft state of the power manager.  Since there will only
99 * one of these, just reference it through a static pointer.
100 */
101static struct pmstate {
102	dev_info_t	*pm_dip;		/* ptr to our dev_info node */
103	int		pm_instance;		/* for ddi_get_instance() */
104	timeout_id_t	pm_idledown_id;		/* pm idledown timeout id */
105	uchar_t		pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
106	struct cred	*pm_cred[PM_MAX_CLONE];	/* cred for each unique open */
107} pm_state = { NULL, -1, (timeout_id_t)0 };
108typedef struct pmstate *pm_state_t;
109static pm_state_t pmstp = &pm_state;
110
111static int	pm_open(dev_t *, int, int, cred_t *);
112static int	pm_close(dev_t, int, int, cred_t *);
113static int	pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
114static int	pm_chpoll(dev_t, short, int, short *, struct pollhead **);
115
116static struct cb_ops pm_cb_ops = {
117	pm_open,	/* open */
118	pm_close,	/* close */
119	nodev,		/* strategy */
120	nodev,		/* print */
121	nodev,		/* dump */
122	nodev,		/* read */
123	nodev,		/* write */
124	pm_ioctl,	/* ioctl */
125	nodev,		/* devmap */
126	nodev,		/* mmap */
127	nodev,		/* segmap */
128	pm_chpoll,	/* poll */
129	ddi_prop_op,	/* prop_op */
130	NULL,		/* streamtab */
131	D_NEW | D_MP	/* driver compatibility flag */
132};
133
134static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
135    void **result);
136static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
137static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
138
139static struct dev_ops pm_ops = {
140	DEVO_REV,		/* devo_rev */
141	0,			/* refcnt */
142	pm_getinfo,		/* info */
143	nulldev,		/* identify */
144	nulldev,		/* probe */
145	pm_attach,		/* attach */
146	pm_detach,		/* detach */
147	nodev,			/* reset */
148	&pm_cb_ops,		/* driver operations */
149	NULL,			/* bus operations */
150	NULL,			/* power */
151	ddi_quiesce_not_needed,		/* quiesce */
152};
153
154static struct modldrv modldrv = {
155	&mod_driverops,
156	"power management driver",
157	&pm_ops
158};
159
160static struct modlinkage modlinkage = {
161	MODREV_1, &modldrv, 0
162};
163
164/* Local functions */
165#ifdef DEBUG
166static int	print_info(dev_info_t *, void *);
167
168#endif
169
170int
171_init(void)
172{
173	return (mod_install(&modlinkage));
174}
175
176int
177_fini(void)
178{
179	return (mod_remove(&modlinkage));
180}
181
182int
183_info(struct modinfo *modinfop)
184{
185	return (mod_info(&modlinkage, modinfop));
186}
187
188static int
189pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
190{
191	int		i;
192
193	switch (cmd) {
194
195	case DDI_ATTACH:
196		if (pmstp->pm_instance != -1)	/* Only allow one instance */
197			return (DDI_FAILURE);
198		pmstp->pm_instance = ddi_get_instance(dip);
199		if (ddi_create_minor_node(dip, "pm", S_IFCHR,
200		    (pmstp->pm_instance << 8) + 0,
201		    DDI_PSEUDO, 0) != DDI_SUCCESS) {
202			return (DDI_FAILURE);
203		}
204		pmstp->pm_dip = dip;	/* pm_init and getinfo depend on it */
205
206		for (i = 0; i < PM_MAX_CLONE; i++)
207			cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
208
209		ddi_report_dev(dip);
210		return (DDI_SUCCESS);
211
212	default:
213		return (DDI_FAILURE);
214	}
215}
216
217/* ARGSUSED */
218static int
219pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
220{
221	int i;
222
223	switch (cmd) {
224	case DDI_DETACH:
225		/*
226		 * Don't detach while idledown timeout is pending.  Note that
227		 * we already know we're not in pm_ioctl() due to framework
228		 * synchronization, so this is a sufficient test
229		 */
230		if (pmstp->pm_idledown_id)
231			return (DDI_FAILURE);
232
233		for (i = 0; i < PM_MAX_CLONE; i++)
234			cv_destroy(&pm_clones_cv[i]);
235
236		ddi_remove_minor_node(dip, NULL);
237		pmstp->pm_instance = -1;
238		return (DDI_SUCCESS);
239
240	default:
241		return (DDI_FAILURE);
242	}
243}
244
245static int
246pm_close_direct_pm_device(dev_info_t *dip, void *arg)
247{
248	int clone;
249	char *pathbuf;
250	pm_info_t *info = PM_GET_PM_INFO(dip);
251
252	clone = *((int *)arg);
253
254	if (!info)
255		return (DDI_WALK_CONTINUE);
256
257	pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
258	PM_LOCK_DIP(dip);
259	if (clone == info->pmi_clone) {
260		PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
261		    PM_DEVICE(dip)))
262		ASSERT(PM_ISDIRECT(dip));
263		info->pmi_dev_pm_state &= ~PM_DIRECT;
264		PM_UNLOCK_DIP(dip);
265		pm_proceed(dip, PMP_RELEASE, -1, -1);
266		/* Bring ourselves up if there is a keeper that is up */
267		(void) ddi_pathname(dip, pathbuf);
268		pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
269		    pathbuf, PM_DEP_NOWAIT, NULL, 0);
270		PM_LOCK_DIP(dip);
271		info->pmi_clone = 0;
272		PM_UNLOCK_DIP(dip);
273	} else {
274		PM_UNLOCK_DIP(dip);
275	}
276	kmem_free(pathbuf, MAXPATHLEN);
277
278	/* restart autopm on device released from direct pm */
279	pm_rescan(dip);
280
281	return (DDI_WALK_CONTINUE);
282}
283
284#define	PM_REQ		1
285#define	NOSTRUCT	2
286#define	DIP		3
287#define	NODIP		4
288#define	NODEP		5
289#define	DEP		6
290#define	PM_PSC		7
291#define	PM_SRCH		8
292
293#define	CHECKPERMS	0x001
294#define	SU		0x002
295#define	SG		0x004
296#define	OWNER		0x008
297
298#define	INWHO		0x001
299#define	INDATAINT	0x002
300#define	INDATASTRING	0x004
301#define	INDEP		0x008
302#define	INDATAOUT	0x010
303#define	INDATA	(INDATAOUT | INDATAINT | INDATASTRING | INDEP)
304
305struct pm_cmd_info {
306	int cmd;		/* command code */
307	char *name;		/* printable string */
308	int supported;		/* true if still supported */
309	int str_type;		/* PM_REQ or NOSTRUCT */
310	int inargs;		/* INWHO, INDATAINT, INDATASTRING, INDEP, */
311				/* INDATAOUT */
312	int diptype;		/* DIP or NODIP */
313	int deptype;		/* DEP or NODEP */
314	int permission;		/* SU, GU, or CHECKPERMS */
315};
316
317#ifdef DEBUG
318char *pm_cmd_string;
319int pm_cmd;
320#endif
321
322/*
323 * Returns true if permission granted by credentials
324 */
325static int
326pm_perms(int perm, cred_t *cr)
327{
328	if (perm == 0)			/* no restrictions */
329		return (1);
330	if (perm == CHECKPERMS)		/* ok for now (is checked later) */
331		return (1);
332	if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
333		return (1);
334	if ((perm & SG) && (crgetgid(cr) == 0))	/* group 0 is ok */
335		return (1);
336	return (0);
337}
338
339#ifdef DEBUG
340static int
341print_info(dev_info_t *dip, void *arg)
342{
343	_NOTE(ARGUNUSED(arg))
344	pm_info_t	*info;
345	int		i, j;
346	struct pm_component *cp;
347	extern int pm_cur_power(pm_component_t *cp);
348
349	info = PM_GET_PM_INFO(dip);
350	if (!info)
351		return (DDI_WALK_CONTINUE);
352	cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
353	for (i = 0; i < PM_NUMCMPTS(dip); i++) {
354		cp = PM_CP(dip, i);
355		cmn_err(CE_CONT, "\tThresholds[%d] =",  i);
356		for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
357			cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
358		cmn_err(CE_CONT, "\n");
359		cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
360		    pm_cur_power(cp));
361	}
362	if (PM_ISDIRECT(dip))
363		cmn_err(CE_CONT, "\tDirect power management\n");
364	return (DDI_WALK_CONTINUE);
365}
366#endif
367
368/*
369 * command, name, supported, str_type, inargs, diptype, deptype, permission
370 */
371static struct pm_cmd_info pmci[] = {
372	{PM_SCHEDULE, "PM_SCHEDULE", 0},
373	{PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
374	{PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
375	{PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
376	{PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
377	{PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
378	{PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
379	{PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
380	{PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
381	{PM_GET_DEP, "PM_GET_DEP", 0},
382	{PM_ADD_DEP, "PM_ADD_DEP", 0},
383	{PM_REM_DEP, "PM_REM_DEP", 0},
384	{PM_REM_DEVICE, "PM_REM_DEVICE", 0},
385	{PM_REM_DEVICES, "PM_REM_DEVICES", 0},
386	{PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
387	    NODEP},
388	{PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
389	{PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
390	{PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
391	{PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
392	    INWHO, NODIP, NODEP, SU},
393	{PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
394	{PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
395	    1, NOSTRUCT},
396	{PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
397	    0, 0, 0, SU},
398	{PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
399	{PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
400	{PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
401	{PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
402	    DIP, NODEP},
403	{PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
404	    DIP, NODEP},
405	{PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
406	    DIP, NODEP},
407	{PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
408	    INWHO | INDATAOUT, DIP, NODEP},
409	{PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
410	    DIP, NODEP},
411	{PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
412	    INWHO | INDATAOUT, DIP, NODEP},
413	{PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
414	    DIP, NODEP},
415	{PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
416	{PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
417	{PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
418	    (SU | SG)},
419	{PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
420	    DIP, NODEP},
421	{PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
422	{PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
423	{PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
424	    INWHO, DIP, NODEP, SU},
425	{PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
426	{PM_GET_AUTOS3_STATE, "PM_GET_AUTOS3_STATE", 1, NOSTRUCT},
427	{PM_GET_S3_SUPPORT_STATE, "PM_GET_S3_SUPPORT_STATE", 1, NOSTRUCT},
428	{PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
429	    DIP, NODEP},
430	{PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
431	    INWHO | INDATAINT, NODIP, NODEP, SU},
432	{PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
433	    INWHO | INDATAOUT, DIP, NODEP},
434	{PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
435	{PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
436	    PM_REQ, INWHO, DIP, NODEP},
437	{PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
438	    NODEP},
439	{PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
440	    NODEP},
441	{PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
442	    NODEP},
443	{PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
444	    DIP, DEP, SU},
445	{PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
446	{PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
447	    INWHO | INDATASTRING, NODIP, DEP, SU},
448	{PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
449	{PM_START_CPUPM_EV, "PM_START_CPUPM_EV", 1, NOSTRUCT, 0,
450	    0, 0, SU},
451	{PM_START_CPUPM_POLL, "PM_START_CPUPM_POLL", 1, NOSTRUCT, 0,
452	    0, 0, SU},
453	{PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
454	{PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
455	{PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
456	    0, 0, 0, SU},
457	{PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
458	{PM_START_AUTOS3, "PM_START_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
459	{PM_STOP_AUTOS3, "PM_STOP_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
460	{PM_ENABLE_S3, "PM_ENABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
461	{PM_DISABLE_S3, "PM_DISABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
462	{PM_ENTER_S3, "PM_ENTER_S3", 1, NOSTRUCT, 0, 0, 0, SU},
463	{PM_SEARCH_LIST, "PM_SEARCH_LIST", 1, PM_SRCH, 0, 0, 0, SU},
464	{PM_GET_CMD_NAME, "PM_GET_CMD_NAME", 1, PM_REQ, INDATAOUT, NODIP,
465	    NODEP, 0},
466	{PM_DISABLE_CPU_DEEP_IDLE, "PM_DISABLE_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
467	    0, 0, SU},
468	{PM_ENABLE_CPU_DEEP_IDLE, "PM_START_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
469	    0, 0, SU},
470	{PM_DEFAULT_CPU_DEEP_IDLE, "PM_DFLT_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
471	    0, 0, SU},
472	{0, NULL}
473};
474
475struct pm_cmd_info *
476pc_info(int cmd)
477{
478	struct pm_cmd_info *pcip;
479
480	for (pcip = pmci; pcip->name; pcip++) {
481		if (cmd == pcip->cmd)
482			return (pcip);
483	}
484	return (NULL);
485}
486
487static char *
488pm_decode_cmd(int cmd)
489{
490	static char invbuf[64];
491	struct pm_cmd_info *pcip = pc_info(cmd);
492	if (pcip != NULL)
493		return (pcip->name);
494	(void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
495	return (invbuf);
496}
497
498/*
499 * Allocate scan resource, create taskq, then dispatch scan,
500 * called only if autopm is enabled.
501 */
502int
503pm_start_pm_walk(dev_info_t *dip, void *arg)
504{
505	int cmd = *((int *)arg);
506#ifdef PMDDEBUG
507	char *cmdstr = pm_decode_cmd(cmd);
508#endif
509
510	if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
511		return (DDI_WALK_CONTINUE);
512
513	switch (cmd) {
514	case PM_START_CPUPM:
515	case PM_START_CPUPM_POLL:
516		if (!PM_ISCPU(dip))
517			return (DDI_WALK_CONTINUE);
518		mutex_enter(&pm_scan_lock);
519		if (!PM_CPUPM_DISABLED && !PM_EVENT_CPUPM)
520			pm_scan_init(dip);
521		mutex_exit(&pm_scan_lock);
522		break;
523	case PM_START_PM:
524		mutex_enter(&pm_scan_lock);
525		if (PM_ISCPU(dip) && (PM_CPUPM_DISABLED || PM_EVENT_CPUPM)) {
526			mutex_exit(&pm_scan_lock);
527			return (DDI_WALK_CONTINUE);
528		}
529		if (autopm_enabled)
530			pm_scan_init(dip);
531		mutex_exit(&pm_scan_lock);
532		break;
533	}
534
535	/*
536	 * Start doing pm on device: ensure pm_scan data structure initiated,
537	 * no need to guarantee a successful scan run.
538	 */
539	PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
540	    PM_DEVICE(dip)))
541	pm_rescan(dip);
542
543	return (DDI_WALK_CONTINUE);
544}
545
546/*
547 * Bring devices to full power level, then stop scan
548 */
549int
550pm_stop_pm_walk(dev_info_t *dip, void *arg)
551{
552	pm_info_t *info = PM_GET_PM_INFO(dip);
553	int cmd = *((int *)arg);
554#ifdef PMDDEBUG
555	char *cmdstr = pm_decode_cmd(cmd);
556#endif
557
558	if (!info)
559		return (DDI_WALK_CONTINUE);
560
561	switch (cmd) {
562	case PM_STOP_PM:
563		/*
564		 * If CPU devices are being managed independently, then don't
565		 * stop them as part of PM_STOP_PM. Only stop them as part of
566		 * PM_STOP_CPUPM and PM_RESET_PM.
567		 */
568		if (PM_ISCPU(dip) && PM_POLLING_CPUPM)
569			return (DDI_WALK_CONTINUE);
570		break;
571	case PM_STOP_CPUPM:
572		/*
573		 * If stopping CPU devices and this device is not marked
574		 * as a CPU device, then skip.
575		 */
576		if (!PM_ISCPU(dip))
577			return (DDI_WALK_CONTINUE);
578		break;
579	}
580
581	/*
582	 * Stop the current scan, and then bring it back to normal power.
583	 */
584	if (!PM_ISBC(dip)) {
585		PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
586		    "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
587		pm_scan_stop(dip);
588	}
589
590	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
591	    !pm_all_at_normal(dip)) {
592		PM_LOCK_DIP(dip);
593		if (info->pmi_dev_pm_state & PM_DETACHING) {
594			PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
595			    "all_to_normal because %s@%s(%s#%d) is detaching\n",
596			    cmdstr, PM_DEVICE(dip)))
597			info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
598			PM_UNLOCK_DIP(dip);
599			return (DDI_WALK_CONTINUE);
600		}
601		PM_UNLOCK_DIP(dip);
602		if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
603			PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
604			    "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
605		}
606	}
607
608	return (DDI_WALK_CONTINUE);
609}
610
611static int
612pm_start_idledown(dev_info_t *dip, void *arg)
613{
614	int		flag = (int)(intptr_t)arg;
615	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
616
617	if (!scanp)
618		return (DDI_WALK_CONTINUE);
619
620	PM_LOCK_DIP(dip);
621	scanp->ps_idle_down |= flag;
622	PM_UNLOCK_DIP(dip);
623	pm_rescan(dip);
624
625	return (DDI_WALK_CONTINUE);
626}
627
628/*ARGSUSED*/
629static int
630pm_end_idledown(dev_info_t *dip, void *ignore)
631{
632	pm_scan_t	*scanp = PM_GET_PM_SCAN(dip);
633
634	if (!scanp)
635		return (DDI_WALK_CONTINUE);
636
637	PM_LOCK_DIP(dip);
638	/*
639	 * The PMID_TIMERS bits are place holder till idledown expires.
640	 * The bits are also the base for regenerating PMID_SCANS bits.
641	 * While it's up to scan thread to clear up the PMID_SCANS bits
642	 * after each scan run, PMID_TIMERS ensure aggressive scan down
643	 * performance throughout the idledown period.
644	 */
645	scanp->ps_idle_down &= ~PMID_TIMERS;
646	PM_UNLOCK_DIP(dip);
647
648	return (DDI_WALK_CONTINUE);
649}
650
651/*ARGSUSED*/
652static void
653pm_end_idledown_walk(void *ignore)
654{
655	PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
656	    "off\n", (ulong_t)pmstp->pm_idledown_id));
657
658	mutex_enter(&pm_scan_lock);
659	pmstp->pm_idledown_id = 0;
660	mutex_exit(&pm_scan_lock);
661
662	ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
663}
664
665/*
666 * pm_timeout_idledown - keep idledown effect for 10 seconds.
667 *
668 * Return 0 if another competing caller scheduled idledown timeout,
669 * otherwise, return idledown timeout_id.
670 */
671static timeout_id_t
672pm_timeout_idledown(void)
673{
674	timeout_id_t	to_id;
675
676	/*
677	 * Keep idle-down in effect for either 10 seconds
678	 * or length of a scan interval, which ever is greater.
679	 */
680	mutex_enter(&pm_scan_lock);
681	if (pmstp->pm_idledown_id != 0) {
682		to_id = pmstp->pm_idledown_id;
683		pmstp->pm_idledown_id = 0;
684		mutex_exit(&pm_scan_lock);
685		(void) untimeout(to_id);
686		mutex_enter(&pm_scan_lock);
687		if (pmstp->pm_idledown_id != 0) {
688			PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
689			    "another caller got it, idledown_id(%lx)!\n",
690			    (ulong_t)pmstp->pm_idledown_id))
691			mutex_exit(&pm_scan_lock);
692			return (0);
693		}
694	}
695	pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
696	    PM_IDLEDOWN_TIME * hz);
697	PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
698	    (ulong_t)pmstp->pm_idledown_id))
699	mutex_exit(&pm_scan_lock);
700
701	return (pmstp->pm_idledown_id);
702}
703
704static int
705pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
706    struct pollhead **phpp)
707{
708	extern struct pollhead pm_pollhead;	/* common/os/sunpm.c */
709	int	clone;
710
711	clone = PM_MINOR_TO_CLONE(getminor(dev));
712	PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
713	if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
714		*reventsp |= (POLLIN | POLLRDNORM);
715		PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
716	} else {
717		*reventsp = 0;
718		if (!anyyet) {
719			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
720			*phpp = &pm_pollhead;
721		}
722#ifdef DEBUG
723		else {
724			PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
725		}
726#endif
727	}
728	return (0);
729}
730
731/*
732 * called by pm_dicard_entries to free up the memory. It also decrements
733 * pm_poll_cnt, if direct is non zero.
734 */
735static void
736pm_free_entries(psce_t *pscep, int clone, int direct)
737{
738	pm_state_change_t	*p;
739
740	if (pscep) {
741		p = pscep->psce_out;
742		while (p->size) {
743			if (direct) {
744				PMD(PMD_IOCTL, ("ioctl: discard: "
745				    "pm_poll_cnt[%d] is %d before "
746				    "ASSERT\n", clone,
747				    pm_poll_cnt[clone]))
748				ASSERT(pm_poll_cnt[clone]);
749				pm_poll_cnt[clone]--;
750			}
751			kmem_free(p->physpath, p->size);
752			p->size = 0;
753			if (p == pscep->psce_last)
754				p = pscep->psce_first;
755			else
756				p++;
757		}
758		pscep->psce_out = pscep->psce_first;
759		pscep->psce_in = pscep->psce_first;
760		mutex_exit(&pscep->psce_lock);
761	}
762}
763
764/*
765 * Discard entries for this clone. Calls pm_free_entries to free up memory.
766 */
767static void
768pm_discard_entries(int clone)
769{
770	psce_t	*pscep;
771	int			direct = 0;
772
773	mutex_enter(&pm_clone_lock);
774	if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
775		direct = 1;
776	pm_free_entries(pscep, clone, direct);
777	pscep = pm_psc_clone_to_interest(clone);
778	pm_free_entries(pscep, clone, 0);
779	mutex_exit(&pm_clone_lock);
780}
781
782
783static void
784pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
785{
786	if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
787		switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
788		case PMC_DEF_THRESH:
789		case PMC_CPU_THRESH:
790			PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
791			    "%s@%s(%s#%d) default thresh to 0t%d\n",
792			    PM_DEVICE(dip), thresh))
793			pm_set_device_threshold(dip, thresh, flag);
794			break;
795		default:
796			break;
797		}
798	}
799}
800
801static int
802pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
803{
804	int cmd = *((int *)arg);
805
806	if (!PM_GET_PM_INFO(dip))
807		return (DDI_WALK_CONTINUE);
808
809	switch (cmd) {
810	case PM_SET_SYSTEM_THRESHOLD:
811		if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
812			break;
813		pm_set_idle_threshold(dip, pm_system_idle_threshold,
814		    PMC_DEF_THRESH);
815		pm_rescan(dip);
816		break;
817	case PM_SET_CPU_THRESHOLD:
818		if (!PM_ISCPU(dip))
819			break;
820		pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
821		    PMC_CPU_THRESH);
822		pm_rescan(dip);
823		break;
824	}
825
826	return (DDI_WALK_CONTINUE);
827}
828
829/*ARGSUSED*/
830static int
831pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
832{
833	dev_t	dev;
834	int	instance;
835
836	switch (infocmd) {
837	case DDI_INFO_DEVT2DEVINFO:
838		if (pmstp->pm_instance == -1)
839			return (DDI_FAILURE);
840		*result = pmstp->pm_dip;
841		return (DDI_SUCCESS);
842
843	case DDI_INFO_DEVT2INSTANCE:
844		dev = (dev_t)arg;
845		instance = getminor(dev) >> 8;
846		*result = (void *)(uintptr_t)instance;
847		return (DDI_SUCCESS);
848
849	default:
850		return (DDI_FAILURE);
851	}
852}
853
854
855/*ARGSUSED1*/
856static int
857pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
858{
859	int		clone;
860
861	if (otyp != OTYP_CHR)
862		return (EINVAL);
863
864	mutex_enter(&pm_clone_lock);
865	for (clone = 1; clone < PM_MAX_CLONE; clone++)
866		if (!pmstp->pm_clones[clone])
867			break;
868
869	if (clone == PM_MAX_CLONE) {
870		mutex_exit(&pm_clone_lock);
871		return (ENXIO);
872	}
873	pmstp->pm_cred[clone] = cr;
874	crhold(cr);
875
876	*devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
877	pmstp->pm_clones[clone] = 1;
878	mutex_exit(&pm_clone_lock);
879
880	return (0);
881}
882
883/*ARGSUSED1*/
884static int
885pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
886{
887	int clone;
888
889	if (otyp != OTYP_CHR)
890		return (EINVAL);
891
892	clone = PM_MINOR_TO_CLONE(getminor(dev));
893	PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
894	    clone))
895
896	/*
897	 * Walk the entire device tree to find the corresponding
898	 * device and operate on it.
899	 */
900	ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
901	    (void *) &clone);
902
903	crfree(pmstp->pm_cred[clone]);
904	pmstp->pm_cred[clone] = 0;
905	pmstp->pm_clones[clone] = 0;
906	pm_discard_entries(clone);
907	ASSERT(pm_poll_cnt[clone] == 0);
908	pm_deregister_watcher(clone, NULL);
909	return (0);
910}
911
912/*ARGSUSED*/
913static int
914pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
915{
916	struct pm_cmd_info *pc_info(int);
917	struct pm_cmd_info *pcip = pc_info(cmd);
918	pm_req_t	req;
919	dev_info_t	*dip = NULL;
920	pm_info_t	*info = NULL;
921	int		clone;
922	char		*cmdstr = pm_decode_cmd(cmd);
923	/*
924	 * To keep devinfo nodes from going away while we're holding a
925	 * pointer to their dip, pm_name_to_dip() optionally holds
926	 * the devinfo node.  If we've done that, we set dipheld
927	 * so we know at the end of the ioctl processing to release the
928	 * node again.
929	 */
930	int		dipheld = 0;
931	int		icount = 0;
932	int		i;
933	int		comps;
934	size_t		lencopied;
935	int		ret = ENOTTY;
936	int		curpower;
937	char		who[MAXNAMELEN];
938	size_t		wholen;			/* copyinstr length */
939	size_t		deplen = MAXNAMELEN;
940	char		*dep, i_dep_buf[MAXNAMELEN];
941	char		pathbuf[MAXNAMELEN];
942	struct pm_component *cp;
943#ifdef	_MULTI_DATAMODEL
944	pm_state_change32_t		*pscp32;
945	pm_state_change32_t		psc32;
946	pm_searchargs32_t		psa32;
947	size_t				copysize32;
948#endif
949	pm_state_change_t		*pscp;
950	pm_state_change_t		psc;
951	pm_searchargs_t		psa;
952	char		listname[MAXCOPYBUF];
953	char		manufacturer[MAXCOPYBUF];
954	char		product[MAXCOPYBUF];
955	size_t		copysize;
956
957	PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
958
959#ifdef DEBUG
960	if (cmd == 666) {
961		ddi_walk_devs(ddi_root_node(), print_info, NULL);
962		return (0);
963	}
964	ret = 0x0badcafe;			/* sanity checking */
965	pm_cmd = cmd;				/* for ASSERT debugging */
966	pm_cmd_string = cmdstr;	/* for ASSERT debugging */
967#endif
968
969
970	if (pcip == NULL) {
971		PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
972		return (ENOTTY);
973	}
974	if (pcip == NULL || pcip->supported == 0) {
975		PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
976		    pcip->name))
977		return (ENOTTY);
978	}
979
980	wholen = 0;
981	dep = i_dep_buf;
982	i_dep_buf[0] = 0;
983	clone = PM_MINOR_TO_CLONE(getminor(dev));
984	if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
985		ret = EPERM;
986		return (ret);
987	}
988	switch (pcip->str_type) {
989	case PM_REQ:
990	{
991#ifdef	_MULTI_DATAMODEL
992		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
993			pm_req32_t	req32;
994
995			if (ddi_copyin((caddr_t)arg, &req32,
996			    sizeof (req32), mode) != 0) {
997				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
998				    "EFAULT\n\n", cmdstr))
999				ret = EFAULT;
1000				break;
1001			}
1002			req.component = req32.component;
1003			req.value = req32.value;
1004			req.datasize = req32.datasize;
1005			if (pcip->inargs & INWHO) {
1006				ret = copyinstr((char *)(uintptr_t)
1007				    req32.physpath, who, MAXNAMELEN, &wholen);
1008				if (ret) {
1009					PMD(PMD_ERROR, ("ioctl: %s: "
1010					    "copyinstr fails returning %d\n",
1011					    cmdstr, ret))
1012					break;
1013				}
1014				req.physpath = who;
1015				PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1016				    cmdstr, req.physpath))
1017			}
1018			if (pcip->inargs & INDATA) {
1019				req.data = (void *)(uintptr_t)req32.data;
1020				req.datasize = req32.datasize;
1021			} else {
1022				req.data = NULL;
1023				req.datasize = 0;
1024			}
1025			switch (pcip->diptype) {
1026			case DIP:
1027				if (!(dip =
1028				    pm_name_to_dip(req.physpath, 1))) {
1029					PMD(PMD_ERROR, ("ioctl: %s: "
1030					    "pm_name_to_dip for %s failed\n",
1031					    cmdstr, req.physpath))
1032					return (ENODEV);
1033				}
1034				ASSERT(!dipheld);
1035				dipheld++;
1036				break;
1037			case NODIP:
1038				break;
1039			default:
1040				/*
1041				 * Internal error, invalid ioctl description
1042				 * force debug entry even if pm_debug not set
1043				 */
1044#ifdef	DEBUG
1045				pm_log("invalid diptype %d for cmd %d (%s)\n",
1046				    pcip->diptype, cmd, pcip->name);
1047#endif
1048				ASSERT(0);
1049				return (EIO);
1050			}
1051			if (pcip->inargs & INDATAINT) {
1052				int32_t int32buf;
1053				int32_t *i32p;
1054				int *ip;
1055				icount = req32.datasize / sizeof (int32_t);
1056				if (icount <= 0) {
1057					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1058					    " 0 or neg EFAULT\n\n", cmdstr))
1059					ret = EFAULT;
1060					break;
1061				}
1062				ASSERT(!(pcip->inargs & INDATASTRING));
1063				req.datasize = icount * sizeof (int);
1064				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1065				ip = req.data;
1066				ret = 0;
1067				for (i = 0,
1068				    i32p = (int32_t *)(uintptr_t)req32.data;
1069				    i < icount; i++, i32p++) {
1070					if (ddi_copyin((void *)i32p, &int32buf,
1071					    sizeof (int32_t), mode)) {
1072						kmem_free(req.data,
1073						    req.datasize);
1074						PMD(PMD_ERROR, ("ioctl: %s: "
1075						    "entry %d EFAULT\n",
1076						    cmdstr, i))
1077						ret = EFAULT;
1078						break;
1079					}
1080					*ip++ = (int)int32buf;
1081				}
1082				if (ret)
1083					break;
1084			}
1085			if (pcip->inargs & INDATASTRING) {
1086				ASSERT(!(pcip->inargs & INDATAINT));
1087				ASSERT(pcip->deptype == DEP);
1088				if (req32.data != 0) {
1089					if (copyinstr((void *)(uintptr_t)
1090					    req32.data, dep, deplen, NULL)) {
1091						PMD(PMD_ERROR, ("ioctl: %s: "
1092						    "0x%p dep size %lx, EFAULT"
1093						    "\n", cmdstr,
1094						    (void *)req.data, deplen))
1095						ret = EFAULT;
1096						break;
1097					}
1098#ifdef DEBUG
1099					else {
1100						PMD(PMD_DEP, ("ioctl: %s: "
1101						    "dep %s\n", cmdstr, dep))
1102					}
1103#endif
1104				} else {
1105					PMD(PMD_ERROR, ("ioctl: %s: no "
1106					    "dependent\n", cmdstr))
1107					ret = EINVAL;
1108					break;
1109				}
1110			}
1111		} else
1112#endif /* _MULTI_DATAMODEL */
1113		{
1114			if (ddi_copyin((caddr_t)arg,
1115			    &req, sizeof (req), mode) != 0) {
1116				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1117				    "EFAULT\n\n", cmdstr))
1118				ret = EFAULT;
1119				break;
1120			}
1121			if (pcip->inargs & INWHO) {
1122				ret = copyinstr((char *)req.physpath, who,
1123				    MAXNAMELEN, &wholen);
1124				if (ret) {
1125					PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1126					    " fails returning %d\n", cmdstr,
1127					    ret))
1128					break;
1129				}
1130				req.physpath = who;
1131				PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1132				    cmdstr, req.physpath))
1133			}
1134			if (!(pcip->inargs & INDATA)) {
1135				req.data = NULL;
1136				req.datasize = 0;
1137			}
1138			switch (pcip->diptype) {
1139			case DIP:
1140				if (!(dip =
1141				    pm_name_to_dip(req.physpath, 1))) {
1142					PMD(PMD_ERROR, ("ioctl: %s: "
1143					    "pm_name_to_dip for %s failed\n",
1144					    cmdstr, req.physpath))
1145					return (ENODEV);
1146				}
1147				ASSERT(!dipheld);
1148				dipheld++;
1149				break;
1150			case NODIP:
1151				break;
1152			default:
1153				/*
1154				 * Internal error, invalid ioctl description
1155				 * force debug entry even if pm_debug not set
1156				 */
1157#ifdef	DEBUG
1158				pm_log("invalid diptype %d for cmd %d (%s)\n",
1159				    pcip->diptype, cmd, pcip->name);
1160#endif
1161				ASSERT(0);
1162				return (EIO);
1163			}
1164			if (pcip->inargs & INDATAINT) {
1165				int *ip;
1166
1167				ASSERT(!(pcip->inargs & INDATASTRING));
1168				ip = req.data;
1169				icount = req.datasize / sizeof (int);
1170				if (icount <= 0) {
1171					PMD(PMD_ERROR, ("ioctl: %s: datasize"
1172					    " 0 or neg EFAULT\n\n", cmdstr))
1173					ret = EFAULT;
1174					break;
1175				}
1176				req.data = kmem_alloc(req.datasize, KM_SLEEP);
1177				if (ddi_copyin((caddr_t)ip, req.data,
1178				    req.datasize, mode) != 0) {
1179					PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1180					    "EFAULT\n\n", cmdstr))
1181					ret = EFAULT;
1182					break;
1183				}
1184			}
1185			if (pcip->inargs & INDATASTRING) {
1186				ASSERT(!(pcip->inargs & INDATAINT));
1187				ASSERT(pcip->deptype == DEP);
1188				if (req.data != NULL) {
1189					if (copyinstr((caddr_t)req.data,
1190					    dep, deplen, NULL)) {
1191						PMD(PMD_ERROR, ("ioctl: %s: "
1192						    "0x%p dep size %lu, "
1193						    "EFAULT\n", cmdstr,
1194						    (void *)req.data, deplen))
1195						ret = EFAULT;
1196						break;
1197					}
1198#ifdef DEBUG
1199					else {
1200						PMD(PMD_DEP, ("ioctl: %s: "
1201						    "dep %s\n", cmdstr, dep))
1202					}
1203#endif
1204				} else {
1205					PMD(PMD_ERROR, ("ioctl: %s: no "
1206					    "dependent\n", cmdstr))
1207					ret = EINVAL;
1208					break;
1209				}
1210			}
1211		}
1212		/*
1213		 * Now we've got all the args in for the commands that
1214		 * use the new pm_req struct.
1215		 */
1216		switch (cmd) {
1217		case PM_REPARSE_PM_PROPS:
1218		{
1219			struct dev_ops	*drv;
1220			struct cb_ops	*cb;
1221			void		*propval;
1222			int length;
1223			/*
1224			 * This ioctl is provided only for the ddivs pm test.
1225			 * We only do it to a driver which explicitly allows
1226			 * us to do so by exporting a pm-reparse-ok property.
1227			 * We only care whether the property exists or not.
1228			 */
1229			if ((drv = ddi_get_driver(dip)) == NULL) {
1230				ret = EINVAL;
1231				break;
1232			}
1233			if ((cb = drv->devo_cb_ops) != NULL) {
1234				if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1235				    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1236				    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1237				    "pm-reparse-ok", (caddr_t)&propval,
1238				    &length) != DDI_SUCCESS) {
1239					ret = EINVAL;
1240					break;
1241				}
1242			} else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1243			    PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1244			    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1245			    "pm-reparse-ok", (caddr_t)&propval,
1246			    &length) != DDI_SUCCESS) {
1247				ret = EINVAL;
1248				break;
1249			}
1250			kmem_free(propval, length);
1251			ret =  e_new_pm_props(dip);
1252			break;
1253		}
1254
1255		case PM_GET_DEVICE_THRESHOLD:
1256		{
1257			PM_LOCK_DIP(dip);
1258			if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1259				PM_UNLOCK_DIP(dip);
1260				PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1261				    cmdstr))
1262				ret = ENODEV;
1263				break;
1264			}
1265			*rval_p = DEVI(dip)->devi_pm_dev_thresh;
1266			PM_UNLOCK_DIP(dip);
1267			ret = 0;
1268			break;
1269		}
1270
1271		case PM_DIRECT_PM:
1272		{
1273			int has_dep;
1274			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1275				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1276				    "ENODEV\n", cmdstr))
1277				ret = ENODEV;
1278				break;
1279			}
1280			/*
1281			 * Check to see if we are there is a dependency on
1282			 * this kept device, if so, return EBUSY.
1283			 */
1284			(void) ddi_pathname(dip, pathbuf);
1285			pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1286			    NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1287			if (has_dep) {
1288				PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1289				    cmdstr))
1290				ret = EBUSY;
1291				break;
1292			}
1293			PM_LOCK_DIP(dip);
1294			if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1295				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1296				    "%s@%s(%s#%d): EBUSY\n", cmdstr,
1297				    PM_DEVICE(dip)))
1298				PM_UNLOCK_DIP(dip);
1299				ret = EBUSY;
1300				break;
1301			}
1302			info->pmi_dev_pm_state |= PM_DIRECT;
1303			info->pmi_clone = clone;
1304			PM_UNLOCK_DIP(dip);
1305			PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1306			    cmdstr, (void *)info, clone))
1307			mutex_enter(&pm_clone_lock);
1308			pm_register_watcher(clone, dip);
1309			mutex_exit(&pm_clone_lock);
1310			ret = 0;
1311			break;
1312		}
1313
1314		case PM_RELEASE_DIRECT_PM:
1315		{
1316			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1317				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1318				    "ENODEV\n", cmdstr))
1319				ret = ENODEV;
1320				break;
1321			}
1322			PM_LOCK_DIP(dip);
1323			if (info->pmi_clone != clone) {
1324				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1325				    "%s@%s(%s#%d) EINVAL\n", cmdstr,
1326				    PM_DEVICE(dip)))
1327				ret = EINVAL;
1328				PM_UNLOCK_DIP(dip);
1329				break;
1330			}
1331			ASSERT(PM_ISDIRECT(dip));
1332			info->pmi_dev_pm_state &= ~PM_DIRECT;
1333			PM_UNLOCK_DIP(dip);
1334			/* Bring ourselves up if there is a keeper. */
1335			(void) ddi_pathname(dip, pathbuf);
1336			pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1337			    NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1338			pm_discard_entries(clone);
1339			pm_deregister_watcher(clone, dip);
1340			/*
1341			 * Now we could let the other threads that are
1342			 * trying to do a DIRECT_PM thru
1343			 */
1344			PM_LOCK_DIP(dip);
1345			info->pmi_clone = 0;
1346			PM_UNLOCK_DIP(dip);
1347			pm_proceed(dip, PMP_RELEASE, -1, -1);
1348			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1349			    cmdstr))
1350			pm_rescan(dip);
1351			ret = 0;
1352			break;
1353		}
1354
1355		case PM_SET_CURRENT_POWER:
1356		{
1357			int comp = req.component;
1358			int  value = req.value;
1359			PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1360			    "%d\n", cmdstr, req.physpath, comp, value))
1361			if (!e_pm_valid_comp(dip, comp, NULL) ||
1362			    !e_pm_valid_power(dip, comp, value)) {
1363				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1364				    "physpath=%s, comp=%d, level=%d, fails\n",
1365				    cmdstr, req.physpath, comp, value))
1366				ret = EINVAL;
1367				break;
1368			}
1369
1370			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1371				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1372				    "ENODEV\n", cmdstr))
1373				ret = ENODEV;
1374				break;
1375			}
1376			if (info->pmi_clone != clone) {
1377				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1378				    "(not owner) %s fails; clone %d, owner %d"
1379				    "\n", cmdstr, req.physpath, clone,
1380				    info->pmi_clone))
1381				ret = EINVAL;
1382				break;
1383			}
1384			ASSERT(PM_ISDIRECT(dip));
1385
1386			if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1387			    PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1388				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1389				    "pm_set_power for %s fails, errno=%d\n",
1390				    cmdstr, req.physpath, ret))
1391				break;
1392			}
1393
1394			pm_proceed(dip, PMP_SETPOWER, comp, value);
1395
1396			/*
1397			 * Power down all idle components if console framebuffer
1398			 * is powered off.
1399			 */
1400			if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1401			    pm_default_idle_threshold)) {
1402				dev_info_t	*root = ddi_root_node();
1403				if (PM_ISBC(dip)) {
1404					if (comp == 0 && value == 0 &&
1405					    (pm_timeout_idledown() != 0)) {
1406						ddi_walk_devs(root,
1407						    pm_start_idledown,
1408						    (void *)PMID_CFB);
1409					}
1410				} else {
1411					int count = 0;
1412					for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1413						ret = pm_get_current_power(dip,
1414						    i, &curpower);
1415						if (ret == DDI_SUCCESS &&
1416						    curpower == 0)
1417							count++;
1418					}
1419					if ((count == PM_NUMCMPTS(dip)) &&
1420					    (pm_timeout_idledown() != 0)) {
1421						ddi_walk_devs(root,
1422						    pm_start_idledown,
1423						    (void *)PMID_CFB);
1424					}
1425				}
1426			}
1427
1428			PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1429			    cmdstr))
1430			pm_rescan(dip);
1431			*rval_p = 0;
1432			ret = 0;
1433			break;
1434		}
1435
1436		case PM_GET_FULL_POWER:
1437		{
1438			int normal;
1439			ASSERT(dip);
1440			PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1441			    cmdstr, req.physpath, req.component))
1442			normal =  pm_get_normal_power(dip, req.component);
1443
1444			if (normal == DDI_FAILURE) {
1445				PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1446				    "returns EINVAL\n", cmdstr))
1447				ret = EINVAL;
1448				break;
1449			}
1450			*rval_p = normal;
1451			PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1452			    cmdstr, normal))
1453			ret = 0;
1454			break;
1455		}
1456
1457		case PM_GET_CURRENT_POWER:
1458		{
1459			if (pm_get_current_power(dip, req.component,
1460			    rval_p) != DDI_SUCCESS) {
1461				PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1462				    "EINVAL\n", cmdstr))
1463				ret = EINVAL;
1464				break;
1465			}
1466			PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1467			    cmdstr, req.physpath, req.component, *rval_p))
1468			if (*rval_p == PM_LEVEL_UNKNOWN)
1469				ret = EAGAIN;
1470			else
1471				ret = 0;
1472			break;
1473		}
1474
1475		case PM_GET_TIME_IDLE:
1476		{
1477			time_t timestamp;
1478			int comp = req.component;
1479			pm_component_t *cp;
1480			if (!e_pm_valid_comp(dip, comp, &cp)) {
1481				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1482				    "component %d > numcmpts - 1 %d--EINVAL\n",
1483				    cmdstr, PM_DEVICE(dip), comp,
1484				    PM_NUMCMPTS(dip) - 1))
1485				ret = EINVAL;
1486				break;
1487			}
1488			timestamp = cp->pmc_timestamp;
1489			if (timestamp) {
1490				time_t now;
1491				(void) drv_getparm(TIME, &now);
1492				*rval_p = (now - timestamp);
1493			} else {
1494				*rval_p = 0;
1495			}
1496			ret = 0;
1497			break;
1498		}
1499
1500		case PM_ADD_DEPENDENT:
1501		{
1502			dev_info_t	*kept_dip;
1503
1504			PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1505			    dep, req.physpath))
1506
1507			/*
1508			 * hold and install kept while processing dependency
1509			 * keeper (in .physpath) has already been held.
1510			 */
1511			if (dep[0] == '\0') {
1512				PMD(PMD_ERROR, ("kept NULL or null\n"))
1513				ret = EINVAL;
1514				break;
1515			} else if ((kept_dip =
1516			    pm_name_to_dip(dep, 1)) == NULL) {
1517				PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1518				ret = ENODEV;
1519				break;
1520			} else if (kept_dip == dip) {
1521				PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1522				    "self-dependency not allowed.\n",
1523				    dep, (void *)kept_dip, req.physpath,
1524				    (void *) dip))
1525				PM_RELE(dip);	/* release "double" hold */
1526				ret = EINVAL;
1527				break;
1528			}
1529			ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1530
1531			/*
1532			 * record dependency, then walk through device tree
1533			 * independently on behalf of kept and keeper to
1534			 * establish newly created dependency.
1535			 */
1536			pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1537			    req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1538
1539			/*
1540			 * release kept after establishing dependency, keeper
1541			 * is released as part of ioctl exit processing.
1542			 */
1543			PM_RELE(kept_dip);
1544			*rval_p = 0;
1545			ret = 0;
1546			break;
1547		}
1548
1549		case PM_ADD_DEPENDENT_PROPERTY:
1550		{
1551			char *keeper, *kept;
1552
1553			if (dep[0] == '\0') {
1554				PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1555				    "null\n", cmdstr))
1556				ret = EINVAL;
1557				break;
1558			}
1559			kept = dep;
1560			keeper = req.physpath;
1561			/*
1562			 * record keeper - kept dependency, then walk through
1563			 * device tree to find out all attached keeper, walk
1564			 * through again to apply dependency to all the
1565			 * potential kept.
1566			 */
1567			pm_dispatch_to_dep_thread(
1568			    PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1569			    PM_DEP_WAIT, NULL, 0);
1570
1571			*rval_p = 0;
1572			ret = 0;
1573			break;
1574		}
1575
1576		case PM_SET_DEVICE_THRESHOLD:
1577		{
1578			pm_thresh_rec_t *rp;
1579			pm_pte_t *ep;	/* threshold header storage */
1580			int *tp;	/* threshold storage */
1581			size_t size;
1582			extern int pm_thresh_specd(dev_info_t *);
1583
1584			/*
1585			 * The header struct plus one entry struct plus one
1586			 * threshold plus the length of the string
1587			 */
1588			size = sizeof (pm_thresh_rec_t) +
1589			    (sizeof (pm_pte_t) * 1) +
1590			    (1 * sizeof (int)) +
1591			    strlen(req.physpath) + 1;
1592
1593			rp = kmem_zalloc(size, KM_SLEEP);
1594			rp->ptr_size = size;
1595			rp->ptr_numcomps = 0;	/* means device threshold */
1596			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1597			rp->ptr_entries = ep;
1598			tp = (int *)((intptr_t)ep +
1599			    (1 * sizeof (pm_pte_t)));
1600			ep->pte_numthresh = 1;
1601			ep->pte_thresh = tp;
1602			*tp++ = req.value;
1603			(void) strcat((char *)tp, req.physpath);
1604			rp->ptr_physpath = (char *)tp;
1605			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1606			    (intptr_t)rp + rp->ptr_size);
1607			PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1608			    "%s\n", cmdstr, req.value, req.physpath))
1609			pm_record_thresh(rp);
1610			/*
1611			 * Don't free rp, pm_record_thresh() keeps it.
1612			 * We don't try to apply it ourselves because we'd need
1613			 * to know too much about locking.  Since we don't
1614			 * hold a lock the entry could be removed before
1615			 * we get here
1616			 */
1617			ASSERT(dip == NULL);
1618			ret = 0;		/* can't fail now */
1619			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1620				break;
1621			}
1622			(void) pm_thresh_specd(dip);
1623			PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1624			    cmdstr, PM_DEVICE(dip)))
1625			PM_RELE(dip);
1626			break;
1627		}
1628
1629		case PM_RESET_DEVICE_THRESHOLD:
1630		{
1631			/*
1632			 * This only applies to a currently attached and power
1633			 * managed node
1634			 */
1635			/*
1636			 * We don't do this to old-style drivers
1637			 */
1638			info = PM_GET_PM_INFO(dip);
1639			if (info == NULL) {
1640				PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1641				    "managed\n", cmdstr, req.physpath))
1642				ret = EINVAL;
1643				break;
1644			}
1645			if (PM_ISBC(dip)) {
1646				PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1647				    cmdstr, req.physpath))
1648				ret = EINVAL;
1649				break;
1650			}
1651			pm_unrecord_threshold(req.physpath);
1652			if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
1653				pm_set_device_threshold(dip,
1654				    pm_cpu_idle_threshold, PMC_CPU_THRESH);
1655			else
1656				pm_set_device_threshold(dip,
1657				    pm_system_idle_threshold, PMC_DEF_THRESH);
1658			ret = 0;
1659			break;
1660		}
1661
1662		case PM_GET_NUM_COMPONENTS:
1663		{
1664			ret = 0;
1665			*rval_p = PM_NUMCMPTS(dip);
1666			break;
1667		}
1668
1669		case PM_GET_DEVICE_TYPE:
1670		{
1671			ret = 0;
1672			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1673				PMD(PMD_ERROR, ("ioctl: %s: "
1674				    "PM_NO_PM_COMPONENTS\n", cmdstr))
1675				*rval_p = PM_NO_PM_COMPONENTS;
1676				break;
1677			}
1678			if (PM_ISBC(dip)) {
1679				*rval_p = PM_CREATE_COMPONENTS;
1680			} else {
1681				*rval_p = PM_AUTOPM;
1682			}
1683			break;
1684		}
1685
1686		case PM_SET_COMPONENT_THRESHOLDS:
1687		{
1688			int comps = 0;
1689			int *end = (int *)req.data + icount;
1690			pm_thresh_rec_t *rp;
1691			pm_pte_t *ep;	/* threshold header storage */
1692			int *tp;	/* threshold storage */
1693			int *ip;
1694			int j;
1695			size_t size;
1696			extern int pm_thresh_specd(dev_info_t *);
1697			extern int pm_valid_thresh(dev_info_t *,
1698			    pm_thresh_rec_t *);
1699
1700			for (ip = req.data; *ip; ip++) {
1701				if (ip >= end) {
1702					ret = EFAULT;
1703					break;
1704				}
1705				comps++;
1706				/* skip over indicated number of entries */
1707				for (j = *ip; j; j--) {
1708					if (++ip >= end) {
1709						ret = EFAULT;
1710						break;
1711					}
1712				}
1713				if (ret)
1714					break;
1715			}
1716			if (ret)
1717				break;
1718			if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1719				/* did not exactly fill buffer */
1720				ret = EINVAL;
1721				break;
1722			}
1723			if (comps == 0) {
1724				PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1725				    "--EINVAL\n", cmdstr, req.physpath))
1726				ret = EINVAL;
1727				break;
1728			}
1729			/*
1730			 * The header struct plus one entry struct per component
1731			 * plus the size of the lists minus the counts
1732			 * plus the length of the string
1733			 */
1734			size = sizeof (pm_thresh_rec_t) +
1735			    (sizeof (pm_pte_t) * comps) + req.datasize -
1736			    ((comps + 1) * sizeof (int)) +
1737			    strlen(req.physpath) + 1;
1738
1739			rp = kmem_zalloc(size, KM_SLEEP);
1740			rp->ptr_size = size;
1741			rp->ptr_numcomps = comps;
1742			ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1743			rp->ptr_entries = ep;
1744			tp = (int *)((intptr_t)ep +
1745			    (comps * sizeof (pm_pte_t)));
1746			for (ip = req.data; *ip; ep++) {
1747				ep->pte_numthresh = *ip;
1748				ep->pte_thresh = tp;
1749				for (j = *ip++; j; j--) {
1750					*tp++ = *ip++;
1751				}
1752			}
1753			(void) strcat((char *)tp, req.physpath);
1754			rp->ptr_physpath = (char *)tp;
1755			ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1756			ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1757			    (intptr_t)rp + rp->ptr_size);
1758
1759			ASSERT(dip == NULL);
1760			/*
1761			 * If this is not a currently power managed node,
1762			 * then we can't check for validity of the thresholds
1763			 */
1764			if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1765				/* don't free rp, pm_record_thresh uses it */
1766				pm_record_thresh(rp);
1767				PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1768				    "for %s failed\n", cmdstr, req.physpath))
1769				ret = 0;
1770				break;
1771			}
1772			ASSERT(!dipheld);
1773			dipheld++;
1774
1775			if (!pm_valid_thresh(dip, rp)) {
1776				PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1777				    "for %s@%s(%s#%d)\n", cmdstr,
1778				    PM_DEVICE(dip)))
1779				kmem_free(rp, size);
1780				ret = EINVAL;
1781				break;
1782			}
1783			/*
1784			 * We don't just apply it ourselves because we'd need
1785			 * to know too much about locking.  Since we don't
1786			 * hold a lock the entry could be removed before
1787			 * we get here
1788			 */
1789			pm_record_thresh(rp);
1790			(void) pm_thresh_specd(dip);
1791			ret = 0;
1792			break;
1793		}
1794
1795		case PM_GET_COMPONENT_THRESHOLDS:
1796		{
1797			int musthave;
1798			int numthresholds = 0;
1799			int wordsize;
1800			int numcomps;
1801			caddr_t uaddr = req.data;	/* user address */
1802			int val;	/* int value to be copied out */
1803			int32_t val32;	/* int32 value to be copied out */
1804			caddr_t vaddr;	/* address to copyout from */
1805			int j;
1806
1807#ifdef	_MULTI_DATAMODEL
1808			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1809				wordsize = sizeof (int32_t);
1810			} else
1811#endif /* _MULTI_DATAMODEL */
1812			{
1813				wordsize = sizeof (int);
1814			}
1815
1816			ASSERT(dip);
1817
1818			numcomps = PM_NUMCMPTS(dip);
1819			for (i = 0; i < numcomps; i++) {
1820				cp = PM_CP(dip, i);
1821				numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1822			}
1823			musthave = (numthresholds + numcomps + 1) *  wordsize;
1824			if (req.datasize < musthave) {
1825				PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1826				    "%d--EINVAL\n", cmdstr, req.datasize,
1827				    musthave))
1828				ret = EINVAL;
1829				break;
1830			}
1831			PM_LOCK_DIP(dip);
1832			for (i = 0; i < numcomps; i++) {
1833				int *thp;
1834				cp = PM_CP(dip, i);
1835				thp = cp->pmc_comp.pmc_thresh;
1836				/* first copyout the count */
1837				if (wordsize == sizeof (int32_t)) {
1838					val32 = cp->pmc_comp.pmc_numlevels - 1;
1839					vaddr = (caddr_t)&val32;
1840				} else {
1841					val = cp->pmc_comp.pmc_numlevels - 1;
1842					vaddr = (caddr_t)&val;
1843				}
1844				if (ddi_copyout(vaddr, (void *)uaddr,
1845				    wordsize, mode) != 0) {
1846					PM_UNLOCK_DIP(dip);
1847					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1848					    "(%s#%d) vaddr %p EFAULT\n",
1849					    cmdstr, PM_DEVICE(dip),
1850					    (void*)vaddr))
1851					ret = EFAULT;
1852					break;
1853				}
1854				vaddr = uaddr;
1855				vaddr += wordsize;
1856				uaddr = (caddr_t)vaddr;
1857				/* then copyout each threshold value */
1858				for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1859				    j++) {
1860					if (wordsize == sizeof (int32_t)) {
1861						val32 = thp[j + 1];
1862						vaddr = (caddr_t)&val32;
1863					} else {
1864						val = thp[i + 1];
1865						vaddr = (caddr_t)&val;
1866					}
1867					if (ddi_copyout(vaddr, (void *) uaddr,
1868					    wordsize, mode) != 0) {
1869						PM_UNLOCK_DIP(dip);
1870						PMD(PMD_ERROR, ("ioctl: %s: "
1871						    "%s@%s(%s#%d) uaddr %p "
1872						    "EFAULT\n", cmdstr,
1873						    PM_DEVICE(dip),
1874						    (void *)uaddr))
1875						ret = EFAULT;
1876						break;
1877					}
1878					vaddr = uaddr;
1879					vaddr += wordsize;
1880					uaddr = (caddr_t)vaddr;
1881				}
1882			}
1883			if (ret)
1884				break;
1885			/* last copyout a terminating 0 count */
1886			if (wordsize == sizeof (int32_t)) {
1887				val32 = 0;
1888				vaddr = (caddr_t)&val32;
1889			} else {
1890				ASSERT(wordsize == sizeof (int));
1891				val = 0;
1892				vaddr = (caddr_t)&val;
1893			}
1894			if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1895				PM_UNLOCK_DIP(dip);
1896				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1897				    "vaddr %p (0 count) EFAULT\n", cmdstr,
1898				    PM_DEVICE(dip), (void *)vaddr))
1899				ret = EFAULT;
1900				break;
1901			}
1902			/* finished, so don't need to increment addresses */
1903			PM_UNLOCK_DIP(dip);
1904			ret = 0;
1905			break;
1906		}
1907
1908		case PM_GET_STATS:
1909		{
1910			time_t now;
1911			time_t *timestamp;
1912			extern int pm_cur_power(pm_component_t *cp);
1913			int musthave;
1914			int wordsize;
1915
1916#ifdef	_MULTI_DATAMODEL
1917			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1918				wordsize = sizeof (int32_t);
1919			} else
1920#endif /* _MULTI_DATAMODEL */
1921			{
1922				wordsize = sizeof (int);
1923			}
1924
1925			comps = PM_NUMCMPTS(dip);
1926			if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1927				PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1928				    " or not power managed--EINVAL\n", cmdstr,
1929				    req.physpath))
1930				ret = EINVAL;
1931				break;
1932			}
1933			musthave = comps * 2 * wordsize;
1934			if (req.datasize < musthave) {
1935				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1936				    "%d--EINVAL\n", cmdstr, req.datasize,
1937				    musthave))
1938				ret = EINVAL;
1939				break;
1940			}
1941
1942			PM_LOCK_DIP(dip);
1943			(void) drv_getparm(TIME, &now);
1944			timestamp = kmem_zalloc(comps * sizeof (time_t),
1945			    KM_SLEEP);
1946			pm_get_timestamps(dip, timestamp);
1947			/*
1948			 * First the current power levels
1949			 */
1950			for (i = 0; i < comps; i++) {
1951				int curpwr;
1952				int32_t curpwr32;
1953				caddr_t cpaddr;
1954
1955				cp = PM_CP(dip, i);
1956				if (wordsize == sizeof (int)) {
1957					curpwr = pm_cur_power(cp);
1958					cpaddr = (caddr_t)&curpwr;
1959				} else {
1960					ASSERT(wordsize == sizeof (int32_t));
1961					curpwr32 = pm_cur_power(cp);
1962					cpaddr = (caddr_t)&curpwr32;
1963				}
1964				if (ddi_copyout(cpaddr, (void *) req.data,
1965				    wordsize, mode) != 0) {
1966					PM_UNLOCK_DIP(dip);
1967					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1968					    "(%s#%d) req.data %p EFAULT\n",
1969					    cmdstr, PM_DEVICE(dip),
1970					    (void *)req.data))
1971					ASSERT(!dipheld);
1972					return (EFAULT);
1973				}
1974				cpaddr = (caddr_t)req.data;
1975				cpaddr += wordsize;
1976				req.data = cpaddr;
1977			}
1978			/*
1979			 * Then the times remaining
1980			 */
1981			for (i = 0; i < comps; i++) {
1982				int retval;
1983				int32_t retval32;
1984				caddr_t rvaddr;
1985				int curpwr;
1986
1987				cp = PM_CP(dip, i);
1988				curpwr = cp->pmc_cur_pwr;
1989				if (curpwr == 0 || timestamp[i] == 0) {
1990					PMD(PMD_STATS, ("ioctl: %s: "
1991					    "cur_pwer %x, timestamp %lx\n",
1992					    cmdstr, curpwr, timestamp[i]))
1993					retval = INT_MAX;
1994				} else {
1995					int thresh;
1996					(void) pm_current_threshold(dip, i,
1997					    &thresh);
1998					retval = thresh - (now - timestamp[i]);
1999					PMD(PMD_STATS, ("ioctl: %s: current "
2000					    "thresh %x, now %lx, timestamp %lx,"
2001					    " retval %x\n", cmdstr, thresh, now,
2002					    timestamp[i], retval))
2003				}
2004				if (wordsize == sizeof (int)) {
2005					rvaddr = (caddr_t)&retval;
2006				} else {
2007					ASSERT(wordsize == sizeof (int32_t));
2008					retval32 = retval;
2009					rvaddr = (caddr_t)&retval32;
2010				}
2011				if (ddi_copyout(rvaddr, (void *) req.data,
2012				    wordsize, mode) != 0) {
2013					PM_UNLOCK_DIP(dip);
2014					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2015					    "(%s#%d) req.data %p EFAULT\n",
2016					    cmdstr, PM_DEVICE(dip),
2017					    (void *)req.data))
2018					ASSERT(!dipheld);
2019					kmem_free(timestamp,
2020					    comps * sizeof (time_t));
2021					return (EFAULT);
2022				}
2023				rvaddr = (caddr_t)req.data;
2024				rvaddr += wordsize;
2025				req.data = (int *)rvaddr;
2026			}
2027			PM_UNLOCK_DIP(dip);
2028			*rval_p = comps;
2029			ret = 0;
2030			kmem_free(timestamp, comps * sizeof (time_t));
2031			break;
2032		}
2033
2034		case PM_GET_CMD_NAME:
2035		{
2036			PMD(PMD_IOCTL, ("%s: %s\n", cmdstr,
2037			    pm_decode_cmd(req.value)))
2038			if (ret = copyoutstr(pm_decode_cmd(req.value),
2039			    (char *)req.data, req.datasize, &lencopied)) {
2040				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2041				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2042				    PM_DEVICE(dip), (void *)req.data))
2043				break;
2044			}
2045			*rval_p = lencopied;
2046			ret = 0;
2047			break;
2048		}
2049
2050		case PM_GET_COMPONENT_NAME:
2051		{
2052			ASSERT(dip);
2053			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2054				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2055				    "component %d > numcmpts - 1 %d--EINVAL\n",
2056				    cmdstr, PM_DEVICE(dip), req.component,
2057				    PM_NUMCMPTS(dip) - 1))
2058				ret = EINVAL;
2059				break;
2060			}
2061			if (ret = copyoutstr(cp->pmc_comp.pmc_name,
2062			    (char *)req.data, req.datasize, &lencopied)) {
2063				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2064				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2065				    PM_DEVICE(dip), (void *)req.data))
2066				break;
2067			}
2068			*rval_p = lencopied;
2069			ret = 0;
2070			break;
2071		}
2072
2073		case PM_GET_POWER_NAME:
2074		{
2075			int i;
2076
2077			ASSERT(dip);
2078			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2079				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2080				    "component %d > numcmpts - 1 %d--EINVAL\n",
2081				    cmdstr, PM_DEVICE(dip), req.component,
2082				    PM_NUMCMPTS(dip) - 1))
2083				ret = EINVAL;
2084				break;
2085			}
2086			if ((i = req.value) < 0 ||
2087			    i > cp->pmc_comp.pmc_numlevels - 1) {
2088				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2089				    "value %d > num_levels - 1 %d--EINVAL\n",
2090				    cmdstr, PM_DEVICE(dip), req.value,
2091				    cp->pmc_comp.pmc_numlevels - 1))
2092				ret = EINVAL;
2093				break;
2094			}
2095			dep = cp->pmc_comp.pmc_lnames[req.value];
2096			if (ret = copyoutstr(dep,
2097			    req.data, req.datasize, &lencopied)) {
2098				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2099				    "copyoutstr %p failed--EFAULT\n", cmdstr,
2100				    PM_DEVICE(dip), (void *)req.data))
2101				break;
2102			}
2103			*rval_p = lencopied;
2104			ret = 0;
2105			break;
2106		}
2107
2108		case PM_GET_POWER_LEVELS:
2109		{
2110			int musthave;
2111			int numlevels;
2112			int wordsize;
2113
2114#ifdef	_MULTI_DATAMODEL
2115			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2116				wordsize = sizeof (int32_t);
2117			} else
2118#endif /* _MULTI_DATAMODEL */
2119			{
2120				wordsize = sizeof (int);
2121			}
2122			ASSERT(dip);
2123
2124			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2125				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2126				    "has %d components, component %d requested"
2127				    "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2128				    PM_NUMCMPTS(dip), req.component))
2129				ret = EINVAL;
2130				break;
2131			}
2132			numlevels = cp->pmc_comp.pmc_numlevels;
2133			musthave = numlevels *  wordsize;
2134			if (req.datasize < musthave) {
2135				PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2136				    "%d--EINVAL\n", cmdstr, req.datasize,
2137				    musthave))
2138				ret = EINVAL;
2139				break;
2140			}
2141			PM_LOCK_DIP(dip);
2142			for (i = 0; i < numlevels; i++) {
2143				int level;
2144				int32_t level32;
2145				caddr_t laddr;
2146
2147				if (wordsize == sizeof (int)) {
2148					level = cp->pmc_comp.pmc_lvals[i];
2149					laddr = (caddr_t)&level;
2150				} else {
2151					level32 = cp->pmc_comp.pmc_lvals[i];
2152					laddr = (caddr_t)&level32;
2153				}
2154				if (ddi_copyout(laddr, (void *) req.data,
2155				    wordsize, mode) != 0) {
2156					PM_UNLOCK_DIP(dip);
2157					PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2158					    "(%s#%d) laddr %p EFAULT\n",
2159					    cmdstr, PM_DEVICE(dip),
2160					    (void *)laddr))
2161					ASSERT(!dipheld);
2162					return (EFAULT);
2163				}
2164				laddr = (caddr_t)req.data;
2165				laddr += wordsize;
2166				req.data = (int *)laddr;
2167			}
2168			PM_UNLOCK_DIP(dip);
2169			*rval_p = numlevels;
2170			ret = 0;
2171			break;
2172		}
2173
2174
2175		case PM_GET_NUM_POWER_LEVELS:
2176		{
2177			if (!e_pm_valid_comp(dip, req.component, &cp)) {
2178				PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2179				    "component %d > numcmpts - 1 %d--EINVAL\n",
2180				    cmdstr, PM_DEVICE(dip), req.component,
2181				    PM_NUMCMPTS(dip) - 1))
2182				ret = EINVAL;
2183				break;
2184			}
2185			*rval_p = cp->pmc_comp.pmc_numlevels;
2186			ret = 0;
2187			break;
2188		}
2189
2190		case PM_GET_DEVICE_THRESHOLD_BASIS:
2191		{
2192			ret = 0;
2193			PM_LOCK_DIP(dip);
2194			if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2195				PM_UNLOCK_DIP(dip);
2196				PMD(PMD_ERROR, ("ioctl: %s: "
2197				    "PM_NO_PM_COMPONENTS\n", cmdstr))
2198				*rval_p = PM_NO_PM_COMPONENTS;
2199				break;
2200			}
2201			if (PM_ISDIRECT(dip)) {
2202				PM_UNLOCK_DIP(dip);
2203				*rval_p = PM_DIRECTLY_MANAGED;
2204				break;
2205			}
2206			switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2207			case PMC_DEF_THRESH:
2208			case PMC_NEXDEF_THRESH:
2209				*rval_p = PM_DEFAULT_THRESHOLD;
2210				break;
2211			case PMC_DEV_THRESH:
2212				*rval_p = PM_DEVICE_THRESHOLD;
2213				break;
2214			case PMC_COMP_THRESH:
2215				*rval_p = PM_COMPONENT_THRESHOLD;
2216				break;
2217			case PMC_CPU_THRESH:
2218				*rval_p = PM_CPU_THRESHOLD;
2219				break;
2220			default:
2221				if (PM_ISBC(dip)) {
2222					*rval_p = PM_OLD_THRESHOLD;
2223					break;
2224				}
2225				PMD(PMD_ERROR, ("ioctl: %s: default, not "
2226				    "BC--EINVAL", cmdstr))
2227				ret = EINVAL;
2228				break;
2229			}
2230			PM_UNLOCK_DIP(dip);
2231			break;
2232		}
2233		default:
2234			/*
2235			 * Internal error, invalid ioctl description
2236			 * force debug entry even if pm_debug not set
2237			 */
2238#ifdef	DEBUG
2239			pm_log("invalid diptype %d for cmd %d (%s)\n",
2240			    pcip->diptype, cmd, pcip->name);
2241#endif
2242			ASSERT(0);
2243			return (EIO);
2244		}
2245		break;
2246	}
2247
2248	case PM_PSC:
2249	{
2250		/*
2251		 * Commands that require pm_state_change_t as arg
2252		 */
2253#ifdef	_MULTI_DATAMODEL
2254		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2255			pscp32 = (pm_state_change32_t *)arg;
2256			if (ddi_copyin((caddr_t)arg, &psc32,
2257			    sizeof (psc32), mode) != 0) {
2258				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2259				    "EFAULT\n\n", cmdstr))
2260				ASSERT(!dipheld);
2261				return (EFAULT);
2262			}
2263			psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2264			psc.size = psc32.size;
2265		} else
2266#endif /* _MULTI_DATAMODEL */
2267		{
2268			pscp = (pm_state_change_t *)arg;
2269			if (ddi_copyin((caddr_t)arg, &psc,
2270			    sizeof (psc), mode) != 0) {
2271				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2272				    "EFAULT\n\n", cmdstr))
2273				ASSERT(!dipheld);
2274				return (EFAULT);
2275			}
2276		}
2277		switch (cmd) {
2278
2279		case PM_GET_STATE_CHANGE:
2280		case PM_GET_STATE_CHANGE_WAIT:
2281		{
2282			psce_t			*pscep;
2283			pm_state_change_t	*p;
2284			caddr_t			physpath;
2285			size_t			physlen;
2286
2287			/*
2288			 * We want to know if any device has changed state.
2289			 * We look up by clone.  In case we have another thread
2290			 * from the same process, we loop.
2291			 * pm_psc_clone_to_interest() returns a locked entry.
2292			 * We create an internal copy of the event entry prior
2293			 * to copyout to user space because we don't want to
2294			 * hold the psce_lock while doing copyout as we might
2295			 * hit page fault  which eventually brings us back
2296			 * here requesting the same lock.
2297			 */
2298			mutex_enter(&pm_clone_lock);
2299			if (!pm_interest_registered(clone))
2300				pm_register_watcher(clone, NULL);
2301			while ((pscep =
2302			    pm_psc_clone_to_interest(clone)) == NULL) {
2303				if (cmd == PM_GET_STATE_CHANGE) {
2304					PMD(PMD_IOCTL, ("ioctl: %s: "
2305					    "EWOULDBLOCK\n", cmdstr))
2306					mutex_exit(&pm_clone_lock);
2307					ASSERT(!dipheld);
2308					return (EWOULDBLOCK);
2309				} else {
2310					if (cv_wait_sig(&pm_clones_cv[clone],
2311					    &pm_clone_lock) == 0) {
2312						mutex_exit(&pm_clone_lock);
2313						PMD(PMD_ERROR, ("ioctl: %s "
2314						    "EINTR\n", cmdstr))
2315						ASSERT(!dipheld);
2316						return (EINTR);
2317					}
2318				}
2319			}
2320			mutex_exit(&pm_clone_lock);
2321
2322			physlen = pscep->psce_out->size;
2323			physpath = NULL;
2324			/*
2325			 * If we were unable to store the path while bringing
2326			 * up the console fb upon entering the prom, we give
2327			 * a "" name with the overrun event set
2328			 */
2329			if (physlen == (size_t)-1) {	/* kmemalloc failed */
2330				physpath = kmem_zalloc(1, KM_SLEEP);
2331				physlen = 1;
2332			}
2333			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2334				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2335				mutex_exit(&pscep->psce_lock);
2336				ret = EFAULT;
2337				break;
2338			}
2339			if (physpath == NULL) {
2340				physpath = kmem_zalloc(physlen, KM_SLEEP);
2341				bcopy((const void *) pscep->psce_out->physpath,
2342				    (void *) physpath, physlen);
2343			}
2344
2345			p = pscep->psce_out;
2346#ifdef	_MULTI_DATAMODEL
2347			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2348#ifdef DEBUG
2349				size_t usrcopysize;
2350#endif
2351				psc32.flags = (ushort_t)p->flags;
2352				psc32.event = (ushort_t)p->event;
2353				psc32.timestamp = (int32_t)p->timestamp;
2354				psc32.component = (int32_t)p->component;
2355				psc32.old_level = (int32_t)p->old_level;
2356				psc32.new_level = (int32_t)p->new_level;
2357				copysize32 = ((intptr_t)&psc32.size -
2358				    (intptr_t)&psc32.component);
2359#ifdef DEBUG
2360				usrcopysize = ((intptr_t)&pscp32->size -
2361				    (intptr_t)&pscp32->component);
2362				ASSERT(usrcopysize == copysize32);
2363#endif
2364			} else
2365#endif /* _MULTI_DATAMODEL */
2366			{
2367				psc.flags = p->flags;
2368				psc.event = p->event;
2369				psc.timestamp = p->timestamp;
2370				psc.component = p->component;
2371				psc.old_level = p->old_level;
2372				psc.new_level = p->new_level;
2373				copysize = ((long)&p->size -
2374				    (long)&p->component);
2375			}
2376			if (p->size != (size_t)-1)
2377				kmem_free(p->physpath, p->size);
2378			p->size = 0;
2379			p->physpath = NULL;
2380			if (pscep->psce_out == pscep->psce_last)
2381				p = pscep->psce_first;
2382			else
2383				p++;
2384			pscep->psce_out = p;
2385			mutex_exit(&pscep->psce_lock);
2386
2387			ret = copyoutstr(physpath, psc.physpath,
2388			    physlen, &lencopied);
2389			kmem_free(physpath, physlen);
2390			if (ret) {
2391				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2392				    "failed--EFAULT\n", cmdstr,
2393				    (void *)psc.physpath))
2394				break;
2395			}
2396
2397#ifdef	_MULTI_DATAMODEL
2398			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2399				if (ddi_copyout(&psc32.component,
2400				    &pscp32->component, copysize32, mode)
2401				    != 0) {
2402					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2403					    "failed--EFAULT\n", cmdstr))
2404					ret = EFAULT;
2405					break;
2406				}
2407			} else
2408#endif	/* _MULTI_DATAMODEL */
2409			{
2410				if (ddi_copyout(&psc.component,
2411				    &pscp->component, copysize, mode) != 0) {
2412					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2413					    "failed--EFAULT\n", cmdstr))
2414					ret = EFAULT;
2415					break;
2416				}
2417			}
2418			ret = 0;
2419			break;
2420		}
2421
2422		case PM_DIRECT_NOTIFY:
2423		case PM_DIRECT_NOTIFY_WAIT:
2424		{
2425			psce_t			*pscep;
2426			pm_state_change_t	*p;
2427			caddr_t			physpath;
2428			size_t			physlen;
2429			/*
2430			 * We want to know if any direct device of ours has
2431			 * something we should know about.  We look up by clone.
2432			 * In case we have another thread from the same process,
2433			 * we loop.
2434			 * pm_psc_clone_to_direct() returns a locked entry.
2435			 */
2436			mutex_enter(&pm_clone_lock);
2437			while (pm_poll_cnt[clone] == 0 ||
2438			    (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2439				if (cmd == PM_DIRECT_NOTIFY) {
2440					PMD(PMD_IOCTL, ("ioctl: %s: "
2441					    "EWOULDBLOCK\n", cmdstr))
2442					mutex_exit(&pm_clone_lock);
2443					ASSERT(!dipheld);
2444					return (EWOULDBLOCK);
2445				} else {
2446					if (cv_wait_sig(&pm_clones_cv[clone],
2447					    &pm_clone_lock) == 0) {
2448						mutex_exit(&pm_clone_lock);
2449						PMD(PMD_ERROR, ("ioctl: %s: "
2450						    "EINTR\n", cmdstr))
2451						ASSERT(!dipheld);
2452						return (EINTR);
2453					}
2454				}
2455			}
2456			mutex_exit(&pm_clone_lock);
2457			physlen = pscep->psce_out->size;
2458			if ((psc.physpath == NULL) || (psc.size < physlen)) {
2459				mutex_exit(&pscep->psce_lock);
2460				PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2461				    cmdstr))
2462				ret = EFAULT;
2463				break;
2464			}
2465			physpath = kmem_zalloc(physlen, KM_SLEEP);
2466			bcopy((const void *) pscep->psce_out->physpath,
2467			    (void *) physpath, physlen);
2468
2469			p = pscep->psce_out;
2470#ifdef	_MULTI_DATAMODEL
2471			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2472#ifdef DEBUG
2473				size_t usrcopysize;
2474#endif
2475				psc32.component = (int32_t)p->component;
2476				psc32.flags = (ushort_t)p->flags;
2477				psc32.event = (ushort_t)p->event;
2478				psc32.timestamp = (int32_t)p->timestamp;
2479				psc32.old_level = (int32_t)p->old_level;
2480				psc32.new_level = (int32_t)p->new_level;
2481				copysize32 = (intptr_t)&psc32.size -
2482				    (intptr_t)&psc32.component;
2483				PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2484				    "%d -> %d\n", cmdstr, physpath,
2485				    p->component, p->old_level, p->new_level))
2486#ifdef DEBUG
2487				usrcopysize = (intptr_t)&pscp32->size -
2488				    (intptr_t)&pscp32->component;
2489				ASSERT(usrcopysize == copysize32);
2490#endif
2491			} else
2492#endif
2493			{
2494				psc.component = p->component;
2495				psc.flags = p->flags;
2496				psc.event = p->event;
2497				psc.timestamp = p->timestamp;
2498				psc.old_level = p->old_level;
2499				psc.new_level = p->new_level;
2500				copysize = (intptr_t)&p->size -
2501				    (intptr_t)&p->component;
2502				PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2503				    "%d -> %d\n", cmdstr, physpath,
2504				    p->component, p->old_level, p->new_level))
2505			}
2506			mutex_enter(&pm_clone_lock);
2507			PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2508			    "before decrement\n", cmdstr, clone,
2509			    pm_poll_cnt[clone]))
2510			pm_poll_cnt[clone]--;
2511			mutex_exit(&pm_clone_lock);
2512			kmem_free(p->physpath, p->size);
2513			p->size = 0;
2514			p->physpath = NULL;
2515			if (pscep->psce_out == pscep->psce_last)
2516				p = pscep->psce_first;
2517			else
2518				p++;
2519			pscep->psce_out = p;
2520			mutex_exit(&pscep->psce_lock);
2521
2522			ret = copyoutstr(physpath, psc.physpath,
2523			    physlen, &lencopied);
2524			kmem_free(physpath, physlen);
2525			if (ret) {
2526				PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2527				    "failed--EFAULT\n", cmdstr,
2528				    (void *)psc.physpath))
2529				break;
2530			}
2531
2532#ifdef	_MULTI_DATAMODEL
2533			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2534				if (ddi_copyout(&psc32.component,
2535				    &pscp32->component, copysize32, mode)
2536				    != 0) {
2537					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2538					    "failed--EFAULT\n", cmdstr))
2539					ret = EFAULT;
2540					break;
2541				}
2542			} else
2543#endif	/* _MULTI_DATAMODEL */
2544			{
2545				if (ddi_copyout(&psc.component,
2546				    &pscp->component, copysize, mode) != 0) {
2547					PMD(PMD_ERROR, ("ioctl: %s: copyout "
2548					    "failed--EFAULT\n", cmdstr))
2549					ret = EFAULT;
2550					break;
2551				}
2552			}
2553			ret = 0;
2554			break;
2555		}
2556		default:
2557			/*
2558			 * Internal error, invalid ioctl description
2559			 * force debug entry even if pm_debug not set
2560			 */
2561#ifdef	DEBUG
2562			pm_log("invalid diptype %d for cmd %d (%s)\n",
2563			    pcip->diptype, cmd, pcip->name);
2564#endif
2565			ASSERT(0);
2566			return (EIO);
2567		}
2568		break;
2569	}
2570
2571	case PM_SRCH:		/* command that takes a pm_searchargs_t arg */
2572	{
2573		/*
2574		 * If no ppm, then there is nothing to search.
2575		 */
2576		if (DEVI(ddi_root_node())->devi_pm_ppm == NULL) {
2577			ret = ENODEV;
2578			break;
2579		}
2580
2581#ifdef	_MULTI_DATAMODEL
2582		if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2583			if (ddi_copyin((caddr_t)arg, &psa32,
2584			    sizeof (psa32), mode) != 0) {
2585				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2586				    "EFAULT\n\n", cmdstr))
2587				return (EFAULT);
2588			}
2589			if (copyinstr((void *)(uintptr_t)psa32.pms_listname,
2590			    listname, MAXCOPYBUF, NULL)) {
2591				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2592				    "%d, " "EFAULT\n", cmdstr,
2593				    (void *)(uintptr_t)psa32.pms_listname,
2594				    MAXCOPYBUF))
2595				ret = EFAULT;
2596				break;
2597			}
2598			if (copyinstr((void *)(uintptr_t)psa32.pms_manufacturer,
2599			    manufacturer, MAXCOPYBUF, NULL)) {
2600				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2601				    "%d, " "EFAULT\n", cmdstr,
2602				    (void *)(uintptr_t)psa32.pms_manufacturer,
2603				    MAXCOPYBUF))
2604				ret = EFAULT;
2605				break;
2606			}
2607			if (copyinstr((void *)(uintptr_t)psa32.pms_product,
2608			    product, MAXCOPYBUF, NULL)) {
2609				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2610				    "%d, " "EFAULT\n", cmdstr,
2611				    (void *)(uintptr_t)psa32.pms_product,
2612				    MAXCOPYBUF))
2613				ret = EFAULT;
2614				break;
2615			}
2616		} else
2617#endif /* _MULTI_DATAMODEL */
2618		{
2619			if (ddi_copyin((caddr_t)arg, &psa,
2620			    sizeof (psa), mode) != 0) {
2621				PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2622				    "EFAULT\n\n", cmdstr))
2623				return (EFAULT);
2624			}
2625			if (copyinstr(psa.pms_listname,
2626			    listname, MAXCOPYBUF, NULL)) {
2627				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2628				    "%d, " "EFAULT\n", cmdstr,
2629				    (void *)psa.pms_listname, MAXCOPYBUF))
2630				ret = EFAULT;
2631				break;
2632			}
2633			if (copyinstr(psa.pms_manufacturer,
2634			    manufacturer, MAXCOPYBUF, NULL)) {
2635				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2636				    "%d, " "EFAULT\n", cmdstr,
2637				    (void *)psa.pms_manufacturer, MAXCOPYBUF))
2638				ret = EFAULT;
2639				break;
2640			}
2641			if (copyinstr(psa.pms_product,
2642			    product, MAXCOPYBUF, NULL)) {
2643				PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2644				    "%d, " "EFAULT\n", cmdstr,
2645				    (void *)psa.pms_product, MAXCOPYBUF))
2646				ret = EFAULT;
2647				break;
2648			}
2649		}
2650		psa.pms_listname = listname;
2651		psa.pms_manufacturer = manufacturer;
2652		psa.pms_product = product;
2653		switch (cmd) {
2654		case PM_SEARCH_LIST:
2655			ret = pm_ppm_searchlist(&psa);
2656			break;
2657
2658		default:
2659			/*
2660			 * Internal error, invalid ioctl description
2661			 * force debug entry even if pm_debug not set
2662			 */
2663#ifdef	DEBUG
2664			pm_log("invalid diptype %d for cmd %d (%s)\n",
2665			    pcip->diptype, cmd, pcip->name);
2666#endif
2667			ASSERT(0);
2668			return (EIO);
2669		}
2670		break;
2671	}
2672
2673	case NOSTRUCT:
2674	{
2675		switch (cmd) {
2676		case PM_START_PM:
2677		case PM_START_CPUPM:
2678		case PM_START_CPUPM_EV:
2679		case PM_START_CPUPM_POLL:
2680		{
2681			pm_cpupm_t	new_mode = PM_CPUPM_NOTSET;
2682			pm_cpupm_t	old_mode = PM_CPUPM_NOTSET;
2683			int		r;
2684
2685			mutex_enter(&pm_scan_lock);
2686			if ((cmd == PM_START_PM && autopm_enabled) ||
2687			    (cmd == PM_START_CPUPM && PM_DEFAULT_CPUPM) ||
2688			    (cmd == PM_START_CPUPM_EV && PM_EVENT_CPUPM) ||
2689			    (cmd == PM_START_CPUPM_POLL && PM_POLLING_CPUPM)) {
2690				mutex_exit(&pm_scan_lock);
2691				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n", cmdstr))
2692				ret = EBUSY;
2693				break;
2694			}
2695
2696			if (cmd == PM_START_PM) {
2697				autopm_enabled = 1;
2698			} else if (cmd == PM_START_CPUPM) {
2699				old_mode = cpupm;
2700				new_mode = cpupm = cpupm_default_mode;
2701			} else if (cmd == PM_START_CPUPM_EV) {
2702				old_mode = cpupm;
2703				new_mode = cpupm = PM_CPUPM_EVENT;
2704			} else if (cmd == PM_START_CPUPM_POLL) {
2705				old_mode = cpupm;
2706				new_mode = cpupm = PM_CPUPM_POLLING;
2707			}
2708
2709			mutex_exit(&pm_scan_lock);
2710
2711			/*
2712			 * If we are changing CPUPM modes, and it is active,
2713			 * then stop it from operating in the old mode.
2714			 */
2715			if (old_mode == PM_CPUPM_POLLING) {
2716				int c = PM_STOP_CPUPM;
2717				ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk,
2718				    &c);
2719			} else if (old_mode == PM_CPUPM_EVENT) {
2720				r = cpupm_set_policy(CPUPM_POLICY_DISABLED);
2721
2722				/*
2723				 * Disabling CPUPM policy should always
2724				 * succeed
2725				 */
2726				ASSERT(r == 0);
2727			}
2728
2729			/*
2730			 * If we are changing to event based CPUPM, enable it.
2731			 * In the event it's not supported, fall back to
2732			 * polling based CPUPM.
2733			 */
2734			if (new_mode == PM_CPUPM_EVENT &&
2735			    cpupm_set_policy(CPUPM_POLICY_ELASTIC) < 0) {
2736				mutex_enter(&pm_scan_lock);
2737				new_mode = cpupm = PM_CPUPM_POLLING;
2738				cmd = PM_START_CPUPM_POLL;
2739				mutex_exit(&pm_scan_lock);
2740			}
2741			if (new_mode == PM_CPUPM_POLLING ||
2742			    cmd == PM_START_PM) {
2743				ddi_walk_devs(ddi_root_node(), pm_start_pm_walk,
2744				    &cmd);
2745			}
2746			ret = 0;
2747			break;
2748		}
2749
2750		case PM_RESET_PM:
2751		case PM_STOP_PM:
2752		case PM_STOP_CPUPM:
2753		{
2754			extern void pm_discard_thresholds(void);
2755			pm_cpupm_t old_mode = PM_CPUPM_NOTSET;
2756
2757			mutex_enter(&pm_scan_lock);
2758			if ((cmd == PM_STOP_PM && !autopm_enabled) ||
2759			    (cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
2760				mutex_exit(&pm_scan_lock);
2761				PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2762				    cmdstr))
2763				ret = EINVAL;
2764				break;
2765			}
2766
2767			if (cmd == PM_STOP_PM) {
2768				autopm_enabled = 0;
2769				pm_S3_enabled = 0;
2770				autoS3_enabled = 0;
2771			} else if (cmd == PM_STOP_CPUPM) {
2772				old_mode = cpupm;
2773				cpupm = PM_CPUPM_DISABLE;
2774			} else {
2775				autopm_enabled = 0;
2776				autoS3_enabled = 0;
2777				old_mode = cpupm;
2778				cpupm = PM_CPUPM_NOTSET;
2779			}
2780			mutex_exit(&pm_scan_lock);
2781
2782			/*
2783			 * bring devices to full power level, stop scan
2784			 * If CPUPM was operating in event driven mode, disable
2785			 * that.
2786			 */
2787			if (old_mode == PM_CPUPM_EVENT) {
2788				(void) cpupm_set_policy(CPUPM_POLICY_DISABLED);
2789			}
2790			ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2791			ret = 0;
2792			if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
2793				break;
2794			/*
2795			 * Now do only PM_RESET_PM stuff.
2796			 */
2797			pm_system_idle_threshold = pm_default_idle_threshold;
2798			pm_cpu_idle_threshold = 0;
2799			pm_discard_thresholds();
2800			pm_all_to_default_thresholds();
2801			pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2802			    NULL, NULL, PM_DEP_WAIT, NULL, 0);
2803			break;
2804		}
2805
2806		case PM_GET_SYSTEM_THRESHOLD:
2807		{
2808			*rval_p = pm_system_idle_threshold;
2809			ret = 0;
2810			break;
2811		}
2812
2813		case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2814		{
2815			*rval_p = pm_default_idle_threshold;
2816			ret = 0;
2817			break;
2818		}
2819
2820		case PM_GET_CPU_THRESHOLD:
2821		{
2822			*rval_p = pm_cpu_idle_threshold;
2823			ret = 0;
2824			break;
2825		}
2826
2827		case PM_SET_SYSTEM_THRESHOLD:
2828		case PM_SET_CPU_THRESHOLD:
2829		{
2830			if ((int)arg < 0) {
2831				PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2832				    "--EINVAL\n", cmdstr, (int)arg))
2833				ret = EINVAL;
2834				break;
2835			}
2836			PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2837			    (int)arg, (int)arg))
2838			if (cmd == PM_SET_SYSTEM_THRESHOLD)
2839				pm_system_idle_threshold = (int)arg;
2840			else {
2841				pm_cpu_idle_threshold = (int)arg;
2842			}
2843			ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
2844			    (void *) &cmd);
2845
2846			ret = 0;
2847			break;
2848		}
2849
2850		case PM_IDLE_DOWN:
2851		{
2852			if (pm_timeout_idledown() != 0) {
2853				ddi_walk_devs(ddi_root_node(),
2854				    pm_start_idledown, (void *)PMID_IOC);
2855			}
2856			ret = 0;
2857			break;
2858		}
2859
2860		case PM_GET_PM_STATE:
2861		{
2862			if (autopm_enabled) {
2863				*rval_p = PM_SYSTEM_PM_ENABLED;
2864			} else {
2865				*rval_p = PM_SYSTEM_PM_DISABLED;
2866			}
2867			ret = 0;
2868			break;
2869		}
2870
2871		case PM_GET_CPUPM_STATE:
2872		{
2873			if (PM_POLLING_CPUPM || PM_EVENT_CPUPM)
2874				*rval_p = PM_CPU_PM_ENABLED;
2875			else if (PM_CPUPM_DISABLED)
2876				*rval_p = PM_CPU_PM_DISABLED;
2877			else
2878				*rval_p = PM_CPU_PM_NOTSET;
2879			ret = 0;
2880			break;
2881		}
2882
2883		case PM_GET_AUTOS3_STATE:
2884		{
2885			if (autoS3_enabled) {
2886				*rval_p = PM_AUTOS3_ENABLED;
2887			} else {
2888				*rval_p = PM_AUTOS3_DISABLED;
2889			}
2890			ret = 0;
2891			break;
2892		}
2893
2894		case PM_GET_S3_SUPPORT_STATE:
2895		{
2896			if (pm_S3_enabled) {
2897				*rval_p = PM_S3_SUPPORT_ENABLED;
2898			} else {
2899				*rval_p = PM_S3_SUPPORT_DISABLED;
2900			}
2901			ret = 0;
2902			break;
2903		}
2904
2905		/*
2906		 * pmconfig tells us if the platform supports S3
2907		 */
2908		case PM_ENABLE_S3:
2909		{
2910			mutex_enter(&pm_scan_lock);
2911			if (pm_S3_enabled) {
2912				mutex_exit(&pm_scan_lock);
2913				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2914				    cmdstr))
2915				ret = EBUSY;
2916				break;
2917			}
2918			pm_S3_enabled = 1;
2919			mutex_exit(&pm_scan_lock);
2920			ret = 0;
2921			break;
2922		}
2923
2924		case PM_DISABLE_S3:
2925		{
2926			mutex_enter(&pm_scan_lock);
2927			pm_S3_enabled = 0;
2928			mutex_exit(&pm_scan_lock);
2929			ret = 0;
2930			break;
2931		}
2932
2933		case PM_START_AUTOS3:
2934		{
2935			mutex_enter(&pm_scan_lock);
2936			if (autoS3_enabled) {
2937				mutex_exit(&pm_scan_lock);
2938				PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2939				    cmdstr))
2940				ret = EBUSY;
2941				break;
2942			}
2943			autoS3_enabled = 1;
2944			mutex_exit(&pm_scan_lock);
2945			ret = 0;
2946			break;
2947		}
2948
2949		case PM_STOP_AUTOS3:
2950		{
2951			mutex_enter(&pm_scan_lock);
2952			autoS3_enabled = 0;
2953			mutex_exit(&pm_scan_lock);
2954			ret = 0;
2955			break;
2956		}
2957
2958		case PM_ENABLE_CPU_DEEP_IDLE:
2959		{
2960			if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2961			    PM_ENABLE_CPU_DEEP_IDLE) == NULL)
2962				ret = 0;
2963			else
2964				ret = EBUSY;
2965			break;
2966		}
2967		case PM_DISABLE_CPU_DEEP_IDLE:
2968		{
2969			if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2970			    PM_DISABLE_CPU_DEEP_IDLE) == NULL)
2971				ret = 0;
2972			else
2973				ret = EINVAL;
2974			break;
2975		}
2976		case PM_DEFAULT_CPU_DEEP_IDLE:
2977		{
2978			if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2979			    PM_DEFAULT_CPU_DEEP_IDLE) == NULL)
2980				ret = 0;
2981			else
2982				ret = EBUSY;
2983			break;
2984		}
2985
2986		default:
2987			/*
2988			 * Internal error, invalid ioctl description
2989			 * force debug entry even if pm_debug not set
2990			 */
2991#ifdef	DEBUG
2992			pm_log("invalid diptype %d for cmd %d (%s)\n",
2993			    pcip->diptype, cmd, pcip->name);
2994#endif
2995			ASSERT(0);
2996			return (EIO);
2997		}
2998		break;
2999	}
3000
3001default:
3002		/*
3003		 * Internal error, invalid ioctl description
3004		 * force debug entry even if pm_debug not set
3005		 */
3006#ifdef	DEBUG
3007		pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
3008		    pcip->str_type, cmd, pcip->name);
3009#endif
3010		ASSERT(0);
3011		return (EIO);
3012	}
3013	ASSERT(ret != 0x0badcafe);	/* some cmd in wrong case! */
3014	if (dipheld) {
3015		ASSERT(dip);
3016		PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
3017		    "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
3018		PM_RELE(dip);
3019	}
3020	PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
3021	return (ret);
3022}
3023