xref: /illumos-gate/usr/src/uts/common/os/devid_cache.c (revision 83c4dfe9)
1*83c4dfe9Sjg /*
2*83c4dfe9Sjg  * CDDL HEADER START
3*83c4dfe9Sjg  *
4*83c4dfe9Sjg  * The contents of this file are subject to the terms of the
5*83c4dfe9Sjg  * Common Development and Distribution License (the "License").
6*83c4dfe9Sjg  * You may not use this file except in compliance with the License.
7*83c4dfe9Sjg  *
8*83c4dfe9Sjg  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*83c4dfe9Sjg  * or http://www.opensolaris.org/os/licensing.
10*83c4dfe9Sjg  * See the License for the specific language governing permissions
11*83c4dfe9Sjg  * and limitations under the License.
12*83c4dfe9Sjg  *
13*83c4dfe9Sjg  * When distributing Covered Code, include this CDDL HEADER in each
14*83c4dfe9Sjg  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*83c4dfe9Sjg  * If applicable, add the following below this CDDL HEADER, with the
16*83c4dfe9Sjg  * fields enclosed by brackets "[]" replaced with your own identifying
17*83c4dfe9Sjg  * information: Portions Copyright [yyyy] [name of copyright owner]
18*83c4dfe9Sjg  *
19*83c4dfe9Sjg  * CDDL HEADER END
20*83c4dfe9Sjg  */
21*83c4dfe9Sjg /*
22*83c4dfe9Sjg  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23*83c4dfe9Sjg  * Use is subject to license terms.
24*83c4dfe9Sjg  */
25*83c4dfe9Sjg 
26*83c4dfe9Sjg #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*83c4dfe9Sjg 
28*83c4dfe9Sjg #include <sys/note.h>
29*83c4dfe9Sjg #include <sys/t_lock.h>
30*83c4dfe9Sjg #include <sys/cmn_err.h>
31*83c4dfe9Sjg #include <sys/instance.h>
32*83c4dfe9Sjg #include <sys/conf.h>
33*83c4dfe9Sjg #include <sys/stat.h>
34*83c4dfe9Sjg #include <sys/ddi.h>
35*83c4dfe9Sjg #include <sys/hwconf.h>
36*83c4dfe9Sjg #include <sys/sunddi.h>
37*83c4dfe9Sjg #include <sys/sunndi.h>
38*83c4dfe9Sjg #include <sys/ddi_impldefs.h>
39*83c4dfe9Sjg #include <sys/ndi_impldefs.h>
40*83c4dfe9Sjg #include <sys/kobj.h>
41*83c4dfe9Sjg #include <sys/devcache.h>
42*83c4dfe9Sjg #include <sys/devid_cache.h>
43*83c4dfe9Sjg #include <sys/sysmacros.h>
44*83c4dfe9Sjg 
45*83c4dfe9Sjg /*
46*83c4dfe9Sjg  * Discovery refers to the heroic effort made to discover a device which
47*83c4dfe9Sjg  * cannot be accessed at the physical path where it once resided.  Discovery
48*83c4dfe9Sjg  * involves walking the entire device tree attaching all possible disk
49*83c4dfe9Sjg  * instances, to search for the device referenced by a devid.  Obviously,
50*83c4dfe9Sjg  * full device discovery is something to be avoided where possible.
51*83c4dfe9Sjg  * Note that simply invoking devfsadm(1M) is equivalent to running full
52*83c4dfe9Sjg  * discovery at the devid cache level.
53*83c4dfe9Sjg  *
54*83c4dfe9Sjg  * Reasons why a disk may not be accessible:
55*83c4dfe9Sjg  *	disk powered off
56*83c4dfe9Sjg  *	disk removed or cable disconnected
57*83c4dfe9Sjg  *	disk or adapter broken
58*83c4dfe9Sjg  *
59*83c4dfe9Sjg  * Note that discovery is not needed and cannot succeed in any of these
60*83c4dfe9Sjg  * cases.
61*83c4dfe9Sjg  *
62*83c4dfe9Sjg  * When discovery may succeed:
63*83c4dfe9Sjg  *	Discovery will result in success when a device has been moved
64*83c4dfe9Sjg  *	to a different address.  Note that it's recommended that
65*83c4dfe9Sjg  *	devfsadm(1M) be invoked (no arguments required) whenever a system's
66*83c4dfe9Sjg  *	h/w configuration has been updated.  Alternatively, a
67*83c4dfe9Sjg  *	reconfiguration boot can be used to accomplish the same result.
68*83c4dfe9Sjg  *
69*83c4dfe9Sjg  * Note that discovery is not necessary to be able to correct an access
70*83c4dfe9Sjg  * failure for a device which was powered off.  Assuming the cache has an
71*83c4dfe9Sjg  * entry for such a device, simply powering it on should permit the system
72*83c4dfe9Sjg  * to access it.  If problems persist after powering it on, invoke
73*83c4dfe9Sjg  * devfsadm(1M).
74*83c4dfe9Sjg  *
75*83c4dfe9Sjg  * Discovery prior to mounting root is only of interest when booting
76*83c4dfe9Sjg  * from a filesystem which accesses devices by device id, which of
77*83c4dfe9Sjg  * not all do.
78*83c4dfe9Sjg  *
79*83c4dfe9Sjg  * Tunables
80*83c4dfe9Sjg  *
81*83c4dfe9Sjg  * devid_discovery_boot (default 1)
82*83c4dfe9Sjg  *	Number of times discovery will be attempted prior to mounting root.
83*83c4dfe9Sjg  *	Must be done at least once to recover from corrupted or missing
84*83c4dfe9Sjg  *	devid cache backing store.  Probably there's no reason to ever
85*83c4dfe9Sjg  * 	set this to greater than one as a missing device will remain
86*83c4dfe9Sjg  *	unavailable no matter how often the system searches for it.
87*83c4dfe9Sjg  *
88*83c4dfe9Sjg  * devid_discovery_postboot (default 1)
89*83c4dfe9Sjg  *	Number of times discovery will be attempted after mounting root.
90*83c4dfe9Sjg  *	This must be performed at least once to discover any devices
91*83c4dfe9Sjg  *	needed after root is mounted which may have been powered
92*83c4dfe9Sjg  *	off and moved before booting.
93*83c4dfe9Sjg  *	Setting this to a larger positive number will introduce
94*83c4dfe9Sjg  *	some inconsistency in system operation.  Searching for a device
95*83c4dfe9Sjg  *	will take an indeterminate amount of time, sometimes slower,
96*83c4dfe9Sjg  *	sometimes faster.  In addition, the system will sometimes
97*83c4dfe9Sjg  *	discover a newly powered on device, sometimes it won't.
98*83c4dfe9Sjg  *	Use of this option is not therefore recommended.
99*83c4dfe9Sjg  *
100*83c4dfe9Sjg  * devid_discovery_postboot_always (default 0)
101*83c4dfe9Sjg  *	Set to 1, the system will always attempt full discovery.
102*83c4dfe9Sjg  *
103*83c4dfe9Sjg  * devid_discovery_secs (default 0)
104*83c4dfe9Sjg  *	Set to a positive value, the system will attempt full discovery
105*83c4dfe9Sjg  *	but with a minimum delay between attempts.  A device search
106*83c4dfe9Sjg  *	within the period of time specified will result in failure.
107*83c4dfe9Sjg  *
108*83c4dfe9Sjg  * devid_cache_read_disable (default 0)
109*83c4dfe9Sjg  *	Set to 1 to disable reading /etc/devices/devid_cache.
110*83c4dfe9Sjg  *	Devid cache will continue to operate normally but
111*83c4dfe9Sjg  *	at least one discovery attempt will be required.
112*83c4dfe9Sjg  *
113*83c4dfe9Sjg  * devid_cache_write_disable (default 0)
114*83c4dfe9Sjg  *	Set to 1 to disable updates to /etc/devices/devid_cache.
115*83c4dfe9Sjg  *	Any updates to the devid cache will not be preserved across a reboot.
116*83c4dfe9Sjg  *
117*83c4dfe9Sjg  * devid_report_error (default 0)
118*83c4dfe9Sjg  *	Set to 1 to enable some error messages related to devid
119*83c4dfe9Sjg  *	cache failures.
120*83c4dfe9Sjg  *
121*83c4dfe9Sjg  * The devid is packed in the cache file as a byte array.  For
122*83c4dfe9Sjg  * portability, this could be done in the encoded string format.
123*83c4dfe9Sjg  */
124*83c4dfe9Sjg 
125*83c4dfe9Sjg 
126*83c4dfe9Sjg int devid_discovery_boot = 1;
127*83c4dfe9Sjg int devid_discovery_postboot = 1;
128*83c4dfe9Sjg int devid_discovery_postboot_always = 0;
129*83c4dfe9Sjg int devid_discovery_secs = 0;
130*83c4dfe9Sjg 
131*83c4dfe9Sjg int devid_cache_read_disable = 0;
132*83c4dfe9Sjg int devid_cache_write_disable = 0;
133*83c4dfe9Sjg 
134*83c4dfe9Sjg int devid_report_error = 0;
135*83c4dfe9Sjg 
136*83c4dfe9Sjg 
137*83c4dfe9Sjg /*
138*83c4dfe9Sjg  * State to manage discovery of devices providing a devid
139*83c4dfe9Sjg  */
140*83c4dfe9Sjg static int		devid_discovery_busy = 0;
141*83c4dfe9Sjg static kmutex_t		devid_discovery_mutex;
142*83c4dfe9Sjg static kcondvar_t	devid_discovery_cv;
143*83c4dfe9Sjg static clock_t		devid_last_discovery = 0;
144*83c4dfe9Sjg 
145*83c4dfe9Sjg 
146*83c4dfe9Sjg #ifdef	DEBUG
147*83c4dfe9Sjg int nvp_devid_debug = 0;
148*83c4dfe9Sjg int devid_debug = 0;
149*83c4dfe9Sjg int devid_log_registers = 0;
150*83c4dfe9Sjg int devid_log_finds = 0;
151*83c4dfe9Sjg int devid_log_lookups = 0;
152*83c4dfe9Sjg int devid_log_discovery = 0;
153*83c4dfe9Sjg int devid_log_matches = 0;
154*83c4dfe9Sjg int devid_log_paths = 0;
155*83c4dfe9Sjg int devid_log_failures = 0;
156*83c4dfe9Sjg int devid_log_hold = 0;
157*83c4dfe9Sjg int devid_log_unregisters = 0;
158*83c4dfe9Sjg int devid_log_removes = 0;
159*83c4dfe9Sjg int devid_register_debug = 0;
160*83c4dfe9Sjg int devid_log_stale = 0;
161*83c4dfe9Sjg int devid_log_detaches = 0;
162*83c4dfe9Sjg #endif	/* DEBUG */
163*83c4dfe9Sjg 
164*83c4dfe9Sjg /*
165*83c4dfe9Sjg  * devid cache file registration for cache reads and updates
166*83c4dfe9Sjg  */
167*83c4dfe9Sjg static nvf_ops_t devid_cache_ops = {
168*83c4dfe9Sjg 	"/etc/devices/devid_cache",		/* path to cache */
169*83c4dfe9Sjg 	devid_cache_unpack_nvlist,		/* read: nvlist to nvp */
170*83c4dfe9Sjg 	devid_cache_pack_list,			/* write: nvp to nvlist */
171*83c4dfe9Sjg 	devid_list_free,			/* free data list */
172*83c4dfe9Sjg 	NULL					/* write complete callback */
173*83c4dfe9Sjg };
174*83c4dfe9Sjg 
175*83c4dfe9Sjg /*
176*83c4dfe9Sjg  * handle to registered devid cache handlers
177*83c4dfe9Sjg  */
178*83c4dfe9Sjg nvf_handle_t	dcfd_handle;
179*83c4dfe9Sjg 
180*83c4dfe9Sjg 
181*83c4dfe9Sjg /*
182*83c4dfe9Sjg  * Initialize devid cache file management
183*83c4dfe9Sjg  */
184*83c4dfe9Sjg void
185*83c4dfe9Sjg devid_cache_init(void)
186*83c4dfe9Sjg {
187*83c4dfe9Sjg 	dcfd_handle = nvf_register_file(&devid_cache_ops);
188*83c4dfe9Sjg 	ASSERT(dcfd_handle);
189*83c4dfe9Sjg 
190*83c4dfe9Sjg 	list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
191*83c4dfe9Sjg 	    offsetof(nvp_devid_t, nvp_link));
192*83c4dfe9Sjg 
193*83c4dfe9Sjg 	mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
194*83c4dfe9Sjg 	cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
195*83c4dfe9Sjg }
196*83c4dfe9Sjg 
197*83c4dfe9Sjg /*
198*83c4dfe9Sjg  * Read and initialize the devid cache from the persistent store
199*83c4dfe9Sjg  */
200*83c4dfe9Sjg void
201*83c4dfe9Sjg devid_cache_read(void)
202*83c4dfe9Sjg {
203*83c4dfe9Sjg 	if (!devid_cache_read_disable) {
204*83c4dfe9Sjg 		rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
205*83c4dfe9Sjg 		ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
206*83c4dfe9Sjg 		(void) nvf_read_file(dcfd_handle);
207*83c4dfe9Sjg 		rw_exit(nvf_lock(dcfd_handle));
208*83c4dfe9Sjg 	}
209*83c4dfe9Sjg }
210*83c4dfe9Sjg 
211*83c4dfe9Sjg static void
212*83c4dfe9Sjg devid_nvp_free(nvp_devid_t *dp)
213*83c4dfe9Sjg {
214*83c4dfe9Sjg 	if (dp->nvp_devpath)
215*83c4dfe9Sjg 		kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
216*83c4dfe9Sjg 	if (dp->nvp_devid)
217*83c4dfe9Sjg 		kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
218*83c4dfe9Sjg 
219*83c4dfe9Sjg 	kmem_free(dp, sizeof (nvp_devid_t));
220*83c4dfe9Sjg }
221*83c4dfe9Sjg 
222*83c4dfe9Sjg static void
223*83c4dfe9Sjg devid_list_free(nvf_handle_t fd)
224*83c4dfe9Sjg {
225*83c4dfe9Sjg 	list_t		*listp;
226*83c4dfe9Sjg 	nvp_devid_t	*np;
227*83c4dfe9Sjg 
228*83c4dfe9Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
229*83c4dfe9Sjg 
230*83c4dfe9Sjg 	listp = nvf_list(fd);
231*83c4dfe9Sjg 	while (np = list_head(listp)) {
232*83c4dfe9Sjg 		list_remove(listp, np);
233*83c4dfe9Sjg 		devid_nvp_free(np);
234*83c4dfe9Sjg 	}
235*83c4dfe9Sjg }
236*83c4dfe9Sjg 
237*83c4dfe9Sjg /*
238*83c4dfe9Sjg  * Free an nvp element in a list
239*83c4dfe9Sjg  */
240*83c4dfe9Sjg static void
241*83c4dfe9Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
242*83c4dfe9Sjg {
243*83c4dfe9Sjg 	list_remove(nvf_list(fd), np);
244*83c4dfe9Sjg 	devid_nvp_free(np);
245*83c4dfe9Sjg }
246*83c4dfe9Sjg 
247*83c4dfe9Sjg /*
248*83c4dfe9Sjg  * Unpack a device path/nvlist pair to the list of devid cache elements.
249*83c4dfe9Sjg  * Used to parse the nvlist format when reading
250*83c4dfe9Sjg  * /etc/devices/devid_cache
251*83c4dfe9Sjg  */
252*83c4dfe9Sjg static int
253*83c4dfe9Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
254*83c4dfe9Sjg {
255*83c4dfe9Sjg 	nvp_devid_t *np;
256*83c4dfe9Sjg 	ddi_devid_t devidp;
257*83c4dfe9Sjg 	int rval;
258*83c4dfe9Sjg 	uint_t n;
259*83c4dfe9Sjg 
260*83c4dfe9Sjg 	NVP_DEVID_DEBUG_PATH((name));
261*83c4dfe9Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
262*83c4dfe9Sjg 
263*83c4dfe9Sjg 	/*
264*83c4dfe9Sjg 	 * check path for a devid
265*83c4dfe9Sjg 	 */
266*83c4dfe9Sjg 	rval = nvlist_lookup_byte_array(nvl,
267*83c4dfe9Sjg 		DP_DEVID_ID, (uchar_t **)&devidp, &n);
268*83c4dfe9Sjg 	if (rval == 0) {
269*83c4dfe9Sjg 		if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
270*83c4dfe9Sjg 			ASSERT(n == ddi_devid_sizeof(devidp));
271*83c4dfe9Sjg 			np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
272*83c4dfe9Sjg 			np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
273*83c4dfe9Sjg 			np->nvp_devid = kmem_alloc(n, KM_SLEEP);
274*83c4dfe9Sjg 			(void) bcopy(devidp, np->nvp_devid, n);
275*83c4dfe9Sjg 			list_insert_tail(nvf_list(fd), np);
276*83c4dfe9Sjg 			NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
277*83c4dfe9Sjg 		} else {
278*83c4dfe9Sjg 			DEVIDERR((CE_CONT,
279*83c4dfe9Sjg 			    "%s: invalid devid\n", name));
280*83c4dfe9Sjg 		}
281*83c4dfe9Sjg 	} else {
282*83c4dfe9Sjg 		DEVIDERR((CE_CONT,
283*83c4dfe9Sjg 		    "%s: devid not available\n", name));
284*83c4dfe9Sjg 	}
285*83c4dfe9Sjg 
286*83c4dfe9Sjg 	return (0);
287*83c4dfe9Sjg }
288*83c4dfe9Sjg 
289*83c4dfe9Sjg /*
290*83c4dfe9Sjg  * Pack the list of devid cache elements into a single nvlist
291*83c4dfe9Sjg  * Used when writing the nvlist file.
292*83c4dfe9Sjg  */
293*83c4dfe9Sjg static int
294*83c4dfe9Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
295*83c4dfe9Sjg {
296*83c4dfe9Sjg 	nvlist_t	*nvl, *sub_nvl;
297*83c4dfe9Sjg 	nvp_devid_t	*np;
298*83c4dfe9Sjg 	int		rval;
299*83c4dfe9Sjg 	list_t		*listp;
300*83c4dfe9Sjg 
301*83c4dfe9Sjg 	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
302*83c4dfe9Sjg 
303*83c4dfe9Sjg 	rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
304*83c4dfe9Sjg 	if (rval != 0) {
305*83c4dfe9Sjg 		nvf_error("%s: nvlist alloc error %d\n",
306*83c4dfe9Sjg 			nvf_cache_name(fd), rval);
307*83c4dfe9Sjg 		return (DDI_FAILURE);
308*83c4dfe9Sjg 	}
309*83c4dfe9Sjg 
310*83c4dfe9Sjg 	listp = nvf_list(fd);
311*83c4dfe9Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
312*83c4dfe9Sjg 		if (np->nvp_devid == NULL)
313*83c4dfe9Sjg 		    continue;
314*83c4dfe9Sjg 		NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
315*83c4dfe9Sjg 		rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
316*83c4dfe9Sjg 		if (rval != 0) {
317*83c4dfe9Sjg 			nvf_error("%s: nvlist alloc error %d\n",
318*83c4dfe9Sjg 				nvf_cache_name(fd), rval);
319*83c4dfe9Sjg 			sub_nvl = NULL;
320*83c4dfe9Sjg 			goto err;
321*83c4dfe9Sjg 		}
322*83c4dfe9Sjg 
323*83c4dfe9Sjg 		rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
324*83c4dfe9Sjg 			(uchar_t *)np->nvp_devid,
325*83c4dfe9Sjg 			ddi_devid_sizeof(np->nvp_devid));
326*83c4dfe9Sjg 		if (rval == 0) {
327*83c4dfe9Sjg 			NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
328*83c4dfe9Sjg 		} else {
329*83c4dfe9Sjg 			nvf_error(
330*83c4dfe9Sjg 			    "%s: nvlist add error %d (devid)\n",
331*83c4dfe9Sjg 			    nvf_cache_name(fd), rval);
332*83c4dfe9Sjg 			goto err;
333*83c4dfe9Sjg 		}
334*83c4dfe9Sjg 
335*83c4dfe9Sjg 		rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
336*83c4dfe9Sjg 		if (rval != 0) {
337*83c4dfe9Sjg 			nvf_error("%s: nvlist add error %d (sublist)\n",
338*83c4dfe9Sjg 			    nvf_cache_name(fd), rval);
339*83c4dfe9Sjg 			goto err;
340*83c4dfe9Sjg 		}
341*83c4dfe9Sjg 		nvlist_free(sub_nvl);
342*83c4dfe9Sjg 	}
343*83c4dfe9Sjg 
344*83c4dfe9Sjg 	*ret_nvl = nvl;
345*83c4dfe9Sjg 	return (DDI_SUCCESS);
346*83c4dfe9Sjg 
347*83c4dfe9Sjg err:
348*83c4dfe9Sjg 	if (sub_nvl)
349*83c4dfe9Sjg 		nvlist_free(sub_nvl);
350*83c4dfe9Sjg 	nvlist_free(nvl);
351*83c4dfe9Sjg 	*ret_nvl = NULL;
352*83c4dfe9Sjg 	return (DDI_FAILURE);
353*83c4dfe9Sjg }
354*83c4dfe9Sjg 
355*83c4dfe9Sjg static int
356*83c4dfe9Sjg e_devid_do_discovery(void)
357*83c4dfe9Sjg {
358*83c4dfe9Sjg 	ASSERT(mutex_owned(&devid_discovery_mutex));
359*83c4dfe9Sjg 
360*83c4dfe9Sjg 	if (i_ddi_io_initialized() == 0) {
361*83c4dfe9Sjg 		if (devid_discovery_boot > 0) {
362*83c4dfe9Sjg 			devid_discovery_boot--;
363*83c4dfe9Sjg 			return (1);
364*83c4dfe9Sjg 		}
365*83c4dfe9Sjg 	} else {
366*83c4dfe9Sjg 		if (devid_discovery_postboot_always > 0)
367*83c4dfe9Sjg 			return (1);
368*83c4dfe9Sjg 		if (devid_discovery_postboot > 0) {
369*83c4dfe9Sjg 			devid_discovery_postboot--;
370*83c4dfe9Sjg 			return (1);
371*83c4dfe9Sjg 		}
372*83c4dfe9Sjg 		if (devid_discovery_secs > 0) {
373*83c4dfe9Sjg 			if ((ddi_get_lbolt() - devid_last_discovery) >
374*83c4dfe9Sjg 			    drv_usectohz(devid_discovery_secs * MICROSEC)) {
375*83c4dfe9Sjg 				return (1);
376*83c4dfe9Sjg 			}
377*83c4dfe9Sjg 		}
378*83c4dfe9Sjg 	}
379*83c4dfe9Sjg 
380*83c4dfe9Sjg 	DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
381*83c4dfe9Sjg 	return (0);
382*83c4dfe9Sjg }
383*83c4dfe9Sjg 
384*83c4dfe9Sjg static void
385*83c4dfe9Sjg e_ddi_devid_hold_by_major(major_t major)
386*83c4dfe9Sjg {
387*83c4dfe9Sjg 	DEVID_LOG_DISC((CE_CONT,
388*83c4dfe9Sjg 	    "devid_discovery: ddi_hold_installed_driver %d\n", major));
389*83c4dfe9Sjg 
390*83c4dfe9Sjg 	if (ddi_hold_installed_driver(major) == NULL)
391*83c4dfe9Sjg 		return;
392*83c4dfe9Sjg 
393*83c4dfe9Sjg 	ddi_rele_driver(major);
394*83c4dfe9Sjg }
395*83c4dfe9Sjg 
396*83c4dfe9Sjg static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd", "dad" };
397*83c4dfe9Sjg 
398*83c4dfe9Sjg #define	N_DRIVERS_TO_HOLD	\
399*83c4dfe9Sjg 	(sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
400*83c4dfe9Sjg 
401*83c4dfe9Sjg 
402*83c4dfe9Sjg static void
403*83c4dfe9Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid)
404*83c4dfe9Sjg {
405*83c4dfe9Sjg 	impl_devid_t	*id = (impl_devid_t *)devid;
406*83c4dfe9Sjg 	major_t		major, hint_major;
407*83c4dfe9Sjg 	char		hint[DEVID_HINT_SIZE + 1];
408*83c4dfe9Sjg 	char		**drvp;
409*83c4dfe9Sjg 	int		i;
410*83c4dfe9Sjg 
411*83c4dfe9Sjg 	/* Count non-null bytes */
412*83c4dfe9Sjg 	for (i = 0; i < DEVID_HINT_SIZE; i++)
413*83c4dfe9Sjg 		if (id->did_driver[i] == '\0')
414*83c4dfe9Sjg 			break;
415*83c4dfe9Sjg 
416*83c4dfe9Sjg 	/* Make a copy of the driver hint */
417*83c4dfe9Sjg 	bcopy(id->did_driver, hint, i);
418*83c4dfe9Sjg 	hint[i] = '\0';
419*83c4dfe9Sjg 
420*83c4dfe9Sjg 	/* search for the devid using the hint driver */
421*83c4dfe9Sjg 	hint_major = ddi_name_to_major(hint);
422*83c4dfe9Sjg 	if (hint_major != (major_t)-1) {
423*83c4dfe9Sjg 		e_ddi_devid_hold_by_major(hint_major);
424*83c4dfe9Sjg 	}
425*83c4dfe9Sjg 
426*83c4dfe9Sjg 	drvp = e_ddi_devid_hold_driver_list;
427*83c4dfe9Sjg 	for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
428*83c4dfe9Sjg 		major = ddi_name_to_major(*drvp);
429*83c4dfe9Sjg 		if (major != (major_t)-1 && major != hint_major) {
430*83c4dfe9Sjg 			e_ddi_devid_hold_by_major(major);
431*83c4dfe9Sjg 		}
432*83c4dfe9Sjg 	}
433*83c4dfe9Sjg }
434*83c4dfe9Sjg 
435*83c4dfe9Sjg 
436*83c4dfe9Sjg /*
437*83c4dfe9Sjg  * Return success if discovery was attempted, to indicate
438*83c4dfe9Sjg  * that the desired device may now be available.
439*83c4dfe9Sjg  */
440*83c4dfe9Sjg int
441*83c4dfe9Sjg e_ddi_devid_discovery(ddi_devid_t devid)
442*83c4dfe9Sjg {
443*83c4dfe9Sjg 	int flags;
444*83c4dfe9Sjg 	int rval = DDI_SUCCESS;
445*83c4dfe9Sjg 
446*83c4dfe9Sjg 	mutex_enter(&devid_discovery_mutex);
447*83c4dfe9Sjg 
448*83c4dfe9Sjg 	if (devid_discovery_busy) {
449*83c4dfe9Sjg 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
450*83c4dfe9Sjg 		while (devid_discovery_busy) {
451*83c4dfe9Sjg 			cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
452*83c4dfe9Sjg 		}
453*83c4dfe9Sjg 	} else if (e_devid_do_discovery()) {
454*83c4dfe9Sjg 		devid_discovery_busy = 1;
455*83c4dfe9Sjg 		mutex_exit(&devid_discovery_mutex);
456*83c4dfe9Sjg 
457*83c4dfe9Sjg 		if (i_ddi_io_initialized() == 0) {
458*83c4dfe9Sjg 			e_ddi_devid_hold_installed_driver(devid);
459*83c4dfe9Sjg 		} else {
460*83c4dfe9Sjg 			DEVID_LOG_DISC((CE_CONT,
461*83c4dfe9Sjg 			    "devid_discovery: ndi_devi_config\n"));
462*83c4dfe9Sjg 			flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
463*83c4dfe9Sjg 			if (i_ddi_io_initialized())
464*83c4dfe9Sjg 				flags |= NDI_DRV_CONF_REPROBE;
465*83c4dfe9Sjg 			(void) ndi_devi_config(ddi_root_node(), flags);
466*83c4dfe9Sjg 		}
467*83c4dfe9Sjg 
468*83c4dfe9Sjg 		mutex_enter(&devid_discovery_mutex);
469*83c4dfe9Sjg 		devid_discovery_busy = 0;
470*83c4dfe9Sjg 		cv_broadcast(&devid_discovery_cv);
471*83c4dfe9Sjg 		if (devid_discovery_secs > 0)
472*83c4dfe9Sjg 			devid_last_discovery = ddi_get_lbolt();
473*83c4dfe9Sjg 		DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
474*83c4dfe9Sjg 	} else {
475*83c4dfe9Sjg 		rval = DDI_FAILURE;
476*83c4dfe9Sjg 		DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
477*83c4dfe9Sjg 	}
478*83c4dfe9Sjg 
479*83c4dfe9Sjg 	mutex_exit(&devid_discovery_mutex);
480*83c4dfe9Sjg 
481*83c4dfe9Sjg 	return (rval);
482*83c4dfe9Sjg }
483*83c4dfe9Sjg 
484*83c4dfe9Sjg /*
485*83c4dfe9Sjg  * As part of registering a devid for a device,
486*83c4dfe9Sjg  * update the devid cache with this device/devid pair
487*83c4dfe9Sjg  * or note that this combination has registered.
488*83c4dfe9Sjg  */
489*83c4dfe9Sjg int
490*83c4dfe9Sjg e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
491*83c4dfe9Sjg {
492*83c4dfe9Sjg 	nvp_devid_t *np;
493*83c4dfe9Sjg 	nvp_devid_t *new_nvp;
494*83c4dfe9Sjg 	ddi_devid_t new_devid;
495*83c4dfe9Sjg 	int new_devid_size;
496*83c4dfe9Sjg 	char *path, *fullpath;
497*83c4dfe9Sjg 	ddi_devid_t free_devid = NULL;
498*83c4dfe9Sjg 	int pathlen;
499*83c4dfe9Sjg 	list_t *listp;
500*83c4dfe9Sjg 	int is_dirty = 0;
501*83c4dfe9Sjg 
502*83c4dfe9Sjg 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
503*83c4dfe9Sjg 
504*83c4dfe9Sjg 	fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
505*83c4dfe9Sjg 	(void) ddi_pathname(dip, fullpath);
506*83c4dfe9Sjg 	pathlen = strlen(fullpath) + 1;
507*83c4dfe9Sjg 	path = kmem_alloc(pathlen, KM_SLEEP);
508*83c4dfe9Sjg 	bcopy(fullpath, path, pathlen);
509*83c4dfe9Sjg 	kmem_free(fullpath, MAXPATHLEN);
510*83c4dfe9Sjg 
511*83c4dfe9Sjg 	DEVID_LOG_REG(("register", devid, path));
512*83c4dfe9Sjg 
513*83c4dfe9Sjg 	new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
514*83c4dfe9Sjg 	new_devid_size = ddi_devid_sizeof(devid);
515*83c4dfe9Sjg 	new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
516*83c4dfe9Sjg 	(void) bcopy(devid, new_devid, new_devid_size);
517*83c4dfe9Sjg 
518*83c4dfe9Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
519*83c4dfe9Sjg 
520*83c4dfe9Sjg 	listp = nvf_list(dcfd_handle);
521*83c4dfe9Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
522*83c4dfe9Sjg 		if (strcmp(path, np->nvp_devpath) == 0) {
523*83c4dfe9Sjg 			DEVID_DEBUG2((CE_CONT,
524*83c4dfe9Sjg 			    "register: %s path match\n", path));
525*83c4dfe9Sjg 			if (np->nvp_devid == NULL) {
526*83c4dfe9Sjg 			    replace:
527*83c4dfe9Sjg 				np->nvp_devid = new_devid;
528*83c4dfe9Sjg 				np->nvp_flags |=
529*83c4dfe9Sjg 					NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
530*83c4dfe9Sjg 				np->nvp_dip = dip;
531*83c4dfe9Sjg 				if (!devid_cache_write_disable) {
532*83c4dfe9Sjg 					nvf_mark_dirty(dcfd_handle);
533*83c4dfe9Sjg 					is_dirty = 1;
534*83c4dfe9Sjg 				}
535*83c4dfe9Sjg 				rw_exit(nvf_lock(dcfd_handle));
536*83c4dfe9Sjg 				kmem_free(new_nvp, sizeof (nvp_devid_t));
537*83c4dfe9Sjg 				kmem_free(path, pathlen);
538*83c4dfe9Sjg 				goto exit;
539*83c4dfe9Sjg 			}
540*83c4dfe9Sjg 			if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
541*83c4dfe9Sjg 				/* replace invalid devid */
542*83c4dfe9Sjg 				free_devid = np->nvp_devid;
543*83c4dfe9Sjg 				goto replace;
544*83c4dfe9Sjg 			}
545*83c4dfe9Sjg 			/*
546*83c4dfe9Sjg 			 * We're registering an already-cached path
547*83c4dfe9Sjg 			 * Does the device's devid match the cache?
548*83c4dfe9Sjg 			 */
549*83c4dfe9Sjg 			if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
550*83c4dfe9Sjg 				DEVID_DEBUG((CE_CONT, "devid register: "
551*83c4dfe9Sjg 				    "devid %s does not match\n", path));
552*83c4dfe9Sjg 				/*
553*83c4dfe9Sjg 				 * Replace cached devid for this path
554*83c4dfe9Sjg 				 * with newly registered devid.  A devid
555*83c4dfe9Sjg 				 * may map to multiple paths but one path
556*83c4dfe9Sjg 				 * should only map to one devid.
557*83c4dfe9Sjg 				 */
558*83c4dfe9Sjg 				devid_nvp_unlink_and_free(dcfd_handle, np);
559*83c4dfe9Sjg 				np = NULL;
560*83c4dfe9Sjg 				break;
561*83c4dfe9Sjg 			} else {
562*83c4dfe9Sjg 				DEVID_DEBUG2((CE_CONT,
563*83c4dfe9Sjg 				    "devid register: %s devid match\n", path));
564*83c4dfe9Sjg 				np->nvp_flags |=
565*83c4dfe9Sjg 					NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
566*83c4dfe9Sjg 				np->nvp_dip = dip;
567*83c4dfe9Sjg 				rw_exit(nvf_lock(dcfd_handle));
568*83c4dfe9Sjg 				kmem_free(new_nvp, sizeof (nvp_devid_t));
569*83c4dfe9Sjg 				kmem_free(path, pathlen);
570*83c4dfe9Sjg 				kmem_free(new_devid, new_devid_size);
571*83c4dfe9Sjg 				return (DDI_SUCCESS);
572*83c4dfe9Sjg 			}
573*83c4dfe9Sjg 		}
574*83c4dfe9Sjg 	}
575*83c4dfe9Sjg 
576*83c4dfe9Sjg 	/*
577*83c4dfe9Sjg 	 * Add newly registered devid to the cache
578*83c4dfe9Sjg 	 */
579*83c4dfe9Sjg 	ASSERT(np == NULL);
580*83c4dfe9Sjg 
581*83c4dfe9Sjg 	new_nvp->nvp_devpath = path;
582*83c4dfe9Sjg 	new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
583*83c4dfe9Sjg 	new_nvp->nvp_dip = dip;
584*83c4dfe9Sjg 	new_nvp->nvp_devid = new_devid;
585*83c4dfe9Sjg 
586*83c4dfe9Sjg 	if (!devid_cache_write_disable) {
587*83c4dfe9Sjg 		is_dirty = 1;
588*83c4dfe9Sjg 		nvf_mark_dirty(dcfd_handle);
589*83c4dfe9Sjg 	}
590*83c4dfe9Sjg 	list_insert_tail(nvf_list(dcfd_handle), new_nvp);
591*83c4dfe9Sjg 
592*83c4dfe9Sjg 	rw_exit(nvf_lock(dcfd_handle));
593*83c4dfe9Sjg 
594*83c4dfe9Sjg exit:
595*83c4dfe9Sjg 	if (free_devid)
596*83c4dfe9Sjg 		kmem_free(free_devid, ddi_devid_sizeof(free_devid));
597*83c4dfe9Sjg 
598*83c4dfe9Sjg 	if (is_dirty)
599*83c4dfe9Sjg 		nvf_wake_daemon();
600*83c4dfe9Sjg 
601*83c4dfe9Sjg 	return (DDI_SUCCESS);
602*83c4dfe9Sjg }
603*83c4dfe9Sjg 
604*83c4dfe9Sjg /*
605*83c4dfe9Sjg  * Unregister a device's devid
606*83c4dfe9Sjg  * Called as an instance detachs
607*83c4dfe9Sjg  * Invalidate the devid's devinfo reference
608*83c4dfe9Sjg  * Devid-path remains in the cache
609*83c4dfe9Sjg  */
610*83c4dfe9Sjg void
611*83c4dfe9Sjg e_devid_cache_unregister(dev_info_t *dip)
612*83c4dfe9Sjg {
613*83c4dfe9Sjg 	nvp_devid_t *np;
614*83c4dfe9Sjg 	list_t *listp;
615*83c4dfe9Sjg 
616*83c4dfe9Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
617*83c4dfe9Sjg 
618*83c4dfe9Sjg 	listp = nvf_list(dcfd_handle);
619*83c4dfe9Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
620*83c4dfe9Sjg 		if (np->nvp_devid == NULL)
621*83c4dfe9Sjg 			continue;
622*83c4dfe9Sjg 		if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
623*83c4dfe9Sjg 			DEVID_LOG_UNREG((CE_CONT,
624*83c4dfe9Sjg 				"unregister: %s\n", np->nvp_devpath));
625*83c4dfe9Sjg 			np->nvp_flags &= ~NVP_DEVID_DIP;
626*83c4dfe9Sjg 			np->nvp_dip = NULL;
627*83c4dfe9Sjg 			break;
628*83c4dfe9Sjg 		}
629*83c4dfe9Sjg 	}
630*83c4dfe9Sjg 
631*83c4dfe9Sjg 	rw_exit(nvf_lock(dcfd_handle));
632*83c4dfe9Sjg }
633*83c4dfe9Sjg 
634*83c4dfe9Sjg /*
635*83c4dfe9Sjg  * Purge devid cache of stale devids
636*83c4dfe9Sjg  */
637*83c4dfe9Sjg void
638*83c4dfe9Sjg devid_cache_cleanup(void)
639*83c4dfe9Sjg {
640*83c4dfe9Sjg 	nvp_devid_t *np, *next;
641*83c4dfe9Sjg 	list_t *listp;
642*83c4dfe9Sjg 	int is_dirty = 0;
643*83c4dfe9Sjg 
644*83c4dfe9Sjg 	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
645*83c4dfe9Sjg 
646*83c4dfe9Sjg 	listp = nvf_list(dcfd_handle);
647*83c4dfe9Sjg 	for (np = list_head(listp); np; np = next) {
648*83c4dfe9Sjg 		next = list_next(listp, np);
649*83c4dfe9Sjg 		if (np->nvp_devid == NULL)
650*83c4dfe9Sjg 			continue;
651*83c4dfe9Sjg 		if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
652*83c4dfe9Sjg 			DEVID_LOG_REMOVE((CE_CONT,
653*83c4dfe9Sjg 				    "cleanup: %s\n", np->nvp_devpath));
654*83c4dfe9Sjg 			if (!devid_cache_write_disable) {
655*83c4dfe9Sjg 				nvf_mark_dirty(dcfd_handle);
656*83c4dfe9Sjg 				is_dirty = 0;
657*83c4dfe9Sjg 			}
658*83c4dfe9Sjg 			devid_nvp_unlink_and_free(dcfd_handle, np);
659*83c4dfe9Sjg 		}
660*83c4dfe9Sjg 	}
661*83c4dfe9Sjg 
662*83c4dfe9Sjg 	rw_exit(nvf_lock(dcfd_handle));
663*83c4dfe9Sjg 
664*83c4dfe9Sjg 	if (is_dirty)
665*83c4dfe9Sjg 		nvf_wake_daemon();
666*83c4dfe9Sjg }
667*83c4dfe9Sjg 
668*83c4dfe9Sjg 
669*83c4dfe9Sjg /*
670*83c4dfe9Sjg  * Build a list of dev_t's for a device/devid
671*83c4dfe9Sjg  *
672*83c4dfe9Sjg  * The effect of this function is cumulative, adding dev_t's
673*83c4dfe9Sjg  * for the device to the list of all dev_t's for a given
674*83c4dfe9Sjg  * devid.
675*83c4dfe9Sjg  */
676*83c4dfe9Sjg static void
677*83c4dfe9Sjg e_devid_minor_to_devlist(
678*83c4dfe9Sjg 	dev_info_t	*dip,
679*83c4dfe9Sjg 	char		*minor_name,
680*83c4dfe9Sjg 	int		ndevts_alloced,
681*83c4dfe9Sjg 	int		*devtcntp,
682*83c4dfe9Sjg 	dev_t		*devtsp)
683*83c4dfe9Sjg {
684*83c4dfe9Sjg 	struct ddi_minor_data	*dmdp;
685*83c4dfe9Sjg 	int			minor_all = 0;
686*83c4dfe9Sjg 	int			ndevts = *devtcntp;
687*83c4dfe9Sjg 
688*83c4dfe9Sjg 	ASSERT(i_ddi_devi_attached(dip));
689*83c4dfe9Sjg 
690*83c4dfe9Sjg 	/* are we looking for a set of minor nodes? */
691*83c4dfe9Sjg 	if ((minor_name == DEVID_MINOR_NAME_ALL) ||
692*83c4dfe9Sjg 	    (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
693*83c4dfe9Sjg 	    (minor_name == DEVID_MINOR_NAME_ALL_BLK))
694*83c4dfe9Sjg 		minor_all = 1;
695*83c4dfe9Sjg 
696*83c4dfe9Sjg 	mutex_enter(&(DEVI(dip)->devi_lock));
697*83c4dfe9Sjg 
698*83c4dfe9Sjg 	/* Find matching minor names */
699*83c4dfe9Sjg 	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
700*83c4dfe9Sjg 
701*83c4dfe9Sjg 		/* Skip non-minors, and non matching minor names */
702*83c4dfe9Sjg 		if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
703*83c4dfe9Sjg 		    strcmp(dmdp->ddm_name, minor_name)))
704*83c4dfe9Sjg 			continue;
705*83c4dfe9Sjg 
706*83c4dfe9Sjg 		/* filter out minor_all mismatches */
707*83c4dfe9Sjg 		if (minor_all &&
708*83c4dfe9Sjg 		    (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
709*83c4dfe9Sjg 		    (dmdp->ddm_spec_type != S_IFCHR)) ||
710*83c4dfe9Sjg 		    ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
711*83c4dfe9Sjg 		    (dmdp->ddm_spec_type != S_IFBLK))))
712*83c4dfe9Sjg 			continue;
713*83c4dfe9Sjg 
714*83c4dfe9Sjg 		if (ndevts < ndevts_alloced)
715*83c4dfe9Sjg 			devtsp[ndevts] = dmdp->ddm_dev;
716*83c4dfe9Sjg 		ndevts++;
717*83c4dfe9Sjg 	}
718*83c4dfe9Sjg 
719*83c4dfe9Sjg 	mutex_exit(&(DEVI(dip)->devi_lock));
720*83c4dfe9Sjg 
721*83c4dfe9Sjg 	*devtcntp = ndevts;
722*83c4dfe9Sjg }
723*83c4dfe9Sjg 
724*83c4dfe9Sjg /*
725*83c4dfe9Sjg  * Search for cached entries matching a devid
726*83c4dfe9Sjg  * Return two lists:
727*83c4dfe9Sjg  *	a list of dev_info nodes, for those devices in the attached state
728*83c4dfe9Sjg  *	a list of pathnames whose instances registered the given devid
729*83c4dfe9Sjg  * If the lists passed in are not sufficient to return the matching
730*83c4dfe9Sjg  * references, return the size of lists required.
731*83c4dfe9Sjg  * The dev_info nodes are returned with a hold that the caller must release.
732*83c4dfe9Sjg  */
733*83c4dfe9Sjg static int
734*83c4dfe9Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
735*83c4dfe9Sjg 	int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
736*83c4dfe9Sjg {
737*83c4dfe9Sjg 	nvp_devid_t *np;
738*83c4dfe9Sjg 	int ndevis, npaths;
739*83c4dfe9Sjg 	dev_info_t *dip, *pdip;
740*83c4dfe9Sjg 	int circ;
741*83c4dfe9Sjg 	int maxdevis = 0;
742*83c4dfe9Sjg 	int maxpaths = 0;
743*83c4dfe9Sjg 	list_t *listp;
744*83c4dfe9Sjg 
745*83c4dfe9Sjg 	ndevis = 0;
746*83c4dfe9Sjg 	npaths = 0;
747*83c4dfe9Sjg 	listp = nvf_list(dcfd_handle);
748*83c4dfe9Sjg 	for (np = list_head(listp); np; np = list_next(listp, np)) {
749*83c4dfe9Sjg 		if (np->nvp_devid == NULL)
750*83c4dfe9Sjg 			continue;
751*83c4dfe9Sjg 		if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
752*83c4dfe9Sjg 			DEVIDERR((CE_CONT,
753*83c4dfe9Sjg 			    "find: invalid devid %s\n",
754*83c4dfe9Sjg 			    np->nvp_devpath));
755*83c4dfe9Sjg 			continue;
756*83c4dfe9Sjg 		}
757*83c4dfe9Sjg 		if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
758*83c4dfe9Sjg 			DEVID_DEBUG2((CE_CONT,
759*83c4dfe9Sjg 			    "find: devid match: %s 0x%x\n",
760*83c4dfe9Sjg 			    np->nvp_devpath, np->nvp_flags));
761*83c4dfe9Sjg 			DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
762*83c4dfe9Sjg 			DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
763*83c4dfe9Sjg 
764*83c4dfe9Sjg 			/*
765*83c4dfe9Sjg 			 * Check if we have a cached devinfo reference for this
766*83c4dfe9Sjg 			 * devid.  Place a hold on it to prevent detach
767*83c4dfe9Sjg 			 * Otherwise, use the path instead.
768*83c4dfe9Sjg 			 * Note: returns with a hold on each dev_info
769*83c4dfe9Sjg 			 * node in the list.
770*83c4dfe9Sjg 			 */
771*83c4dfe9Sjg 			dip = NULL;
772*83c4dfe9Sjg 			if (np->nvp_flags & NVP_DEVID_DIP) {
773*83c4dfe9Sjg 				pdip = ddi_get_parent(np->nvp_dip);
774*83c4dfe9Sjg 				if (ndi_devi_tryenter(pdip, &circ)) {
775*83c4dfe9Sjg 					dip = np->nvp_dip;
776*83c4dfe9Sjg 					ndi_hold_devi(dip);
777*83c4dfe9Sjg 					ndi_devi_exit(pdip, circ);
778*83c4dfe9Sjg 					ASSERT(!DEVI_IS_ATTACHING(dip));
779*83c4dfe9Sjg 					ASSERT(!DEVI_IS_DETACHING(dip));
780*83c4dfe9Sjg 				} else {
781*83c4dfe9Sjg 					DEVID_LOG_DETACH((CE_CONT,
782*83c4dfe9Sjg 					    "may be detaching: %s\n",
783*83c4dfe9Sjg 					    np->nvp_devpath));
784*83c4dfe9Sjg 				}
785*83c4dfe9Sjg 			}
786*83c4dfe9Sjg 
787*83c4dfe9Sjg 			if (dip) {
788*83c4dfe9Sjg 				if (ndevis < retmax) {
789*83c4dfe9Sjg 					retdevis[ndevis++] = dip;
790*83c4dfe9Sjg 				} else {
791*83c4dfe9Sjg 					ndi_rele_devi(dip);
792*83c4dfe9Sjg 				}
793*83c4dfe9Sjg 				maxdevis++;
794*83c4dfe9Sjg 			} else {
795*83c4dfe9Sjg 				if (npaths < retmax)
796*83c4dfe9Sjg 					retpaths[npaths++] = np->nvp_devpath;
797*83c4dfe9Sjg 				maxpaths++;
798*83c4dfe9Sjg 			}
799*83c4dfe9Sjg 		}
800*83c4dfe9Sjg 	}
801*83c4dfe9Sjg 
802*83c4dfe9Sjg 	*retndevis = ndevis;
803*83c4dfe9Sjg 	*retnpaths = npaths;
804*83c4dfe9Sjg 	return (maxdevis > maxpaths ? maxdevis : maxpaths);
805*83c4dfe9Sjg }
806*83c4dfe9Sjg 
807*83c4dfe9Sjg 
808*83c4dfe9Sjg /*
809*83c4dfe9Sjg  * Search the devid cache, returning dev_t list for all
810*83c4dfe9Sjg  * device paths mapping to the device identified by the
811*83c4dfe9Sjg  * given devid.
812*83c4dfe9Sjg  *
813*83c4dfe9Sjg  * Primary interface used by ddi_lyr_devid_to_devlist()
814*83c4dfe9Sjg  */
815*83c4dfe9Sjg int
816*83c4dfe9Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
817*83c4dfe9Sjg 	int *retndevts, dev_t **retdevts)
818*83c4dfe9Sjg {
819*83c4dfe9Sjg 	char		*path, **paths;
820*83c4dfe9Sjg 	int		i, j, n;
821*83c4dfe9Sjg 	dev_t		*devts, *udevts;
822*83c4dfe9Sjg 	dev_t		tdevt;
823*83c4dfe9Sjg 	int		ndevts, undevts, ndevts_alloced;
824*83c4dfe9Sjg 	dev_info_t	*devi, **devis;
825*83c4dfe9Sjg 	int		ndevis, npaths, nalloced;
826*83c4dfe9Sjg 	ddi_devid_t	match_devid;
827*83c4dfe9Sjg 
828*83c4dfe9Sjg 	DEVID_LOG_FIND(("find", devid, NULL));
829*83c4dfe9Sjg 
830*83c4dfe9Sjg 	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
831*83c4dfe9Sjg 	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
832*83c4dfe9Sjg 		DEVID_LOG_ERR(("invalid devid", devid, NULL));
833*83c4dfe9Sjg 		return (DDI_FAILURE);
834*83c4dfe9Sjg 	}
835*83c4dfe9Sjg 
836*83c4dfe9Sjg 	nalloced = 128;
837*83c4dfe9Sjg 
838*83c4dfe9Sjg 	for (;;) {
839*83c4dfe9Sjg 		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
840*83c4dfe9Sjg 		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
841*83c4dfe9Sjg 
842*83c4dfe9Sjg 		rw_enter(nvf_lock(dcfd_handle), RW_READER);
843*83c4dfe9Sjg 		n = e_devid_cache_devi_path_lists(devid, nalloced,
844*83c4dfe9Sjg 			&ndevis, devis, &npaths, paths);
845*83c4dfe9Sjg 		if (n <= nalloced)
846*83c4dfe9Sjg 			break;
847*83c4dfe9Sjg 		rw_exit(nvf_lock(dcfd_handle));
848*83c4dfe9Sjg 		for (i = 0; i < ndevis; i++)
849*83c4dfe9Sjg 			ndi_rele_devi(devis[i]);
850*83c4dfe9Sjg 		kmem_free(paths, nalloced * sizeof (char *));
851*83c4dfe9Sjg 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
852*83c4dfe9Sjg 		nalloced = n + 128;
853*83c4dfe9Sjg 	}
854*83c4dfe9Sjg 
855*83c4dfe9Sjg 	for (i = 0; i < npaths; i++) {
856*83c4dfe9Sjg 		path = i_ddi_strdup(paths[i], KM_SLEEP);
857*83c4dfe9Sjg 		paths[i] = path;
858*83c4dfe9Sjg 	}
859*83c4dfe9Sjg 	rw_exit(nvf_lock(dcfd_handle));
860*83c4dfe9Sjg 
861*83c4dfe9Sjg 	if (ndevis == 0 && npaths == 0) {
862*83c4dfe9Sjg 		DEVID_LOG_ERR(("no devid found", devid, NULL));
863*83c4dfe9Sjg 		kmem_free(paths, nalloced * sizeof (char *));
864*83c4dfe9Sjg 		kmem_free(devis, nalloced * sizeof (dev_info_t *));
865*83c4dfe9Sjg 		return (DDI_FAILURE);
866*83c4dfe9Sjg 	}
867*83c4dfe9Sjg 
868*83c4dfe9Sjg 	ndevts_alloced = 128;
869*83c4dfe9Sjg restart:
870*83c4dfe9Sjg 	ndevts = 0;
871*83c4dfe9Sjg 	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
872*83c4dfe9Sjg 	for (i = 0; i < ndevis; i++) {
873*83c4dfe9Sjg 		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
874*83c4dfe9Sjg 		ASSERT(!DEVI_IS_DETACHING(devis[i]));
875*83c4dfe9Sjg 		e_devid_minor_to_devlist(devis[i], minor_name,
876*83c4dfe9Sjg 			ndevts_alloced, &ndevts, devts);
877*83c4dfe9Sjg 		if (ndevts > ndevts_alloced) {
878*83c4dfe9Sjg 			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
879*83c4dfe9Sjg 			ndevts_alloced += 128;
880*83c4dfe9Sjg 			goto restart;
881*83c4dfe9Sjg 		}
882*83c4dfe9Sjg 	}
883*83c4dfe9Sjg 	for (i = 0; i < npaths; i++) {
884*83c4dfe9Sjg 		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
885*83c4dfe9Sjg 		devi = e_ddi_hold_devi_by_path(paths[i], 0);
886*83c4dfe9Sjg 		if (devi == NULL) {
887*83c4dfe9Sjg 			DEVID_LOG_STALE(("stale device reference",
888*83c4dfe9Sjg 			    devid, paths[i]));
889*83c4dfe9Sjg 			continue;
890*83c4dfe9Sjg 		}
891*83c4dfe9Sjg 		/*
892*83c4dfe9Sjg 		 * Verify the newly attached device registered a matching devid
893*83c4dfe9Sjg 		 */
894*83c4dfe9Sjg 		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
895*83c4dfe9Sjg 		    &match_devid) != DDI_SUCCESS) {
896*83c4dfe9Sjg 			DEVIDERR((CE_CONT,
897*83c4dfe9Sjg 			    "%s: no devid registered on attach\n",
898*83c4dfe9Sjg 			    paths[i]));
899*83c4dfe9Sjg 			ddi_release_devi(devi);
900*83c4dfe9Sjg 			continue;
901*83c4dfe9Sjg 		}
902*83c4dfe9Sjg 
903*83c4dfe9Sjg 		if (ddi_devid_compare(devid, match_devid) != 0) {
904*83c4dfe9Sjg 			DEVID_LOG_STALE(("new devid registered",
905*83c4dfe9Sjg 			    devid, paths[i]));
906*83c4dfe9Sjg 			ddi_release_devi(devi);
907*83c4dfe9Sjg 			ddi_devid_free(match_devid);
908*83c4dfe9Sjg 			continue;
909*83c4dfe9Sjg 		}
910*83c4dfe9Sjg 		ddi_devid_free(match_devid);
911*83c4dfe9Sjg 
912*83c4dfe9Sjg 		e_devid_minor_to_devlist(devi, minor_name,
913*83c4dfe9Sjg 			ndevts_alloced, &ndevts, devts);
914*83c4dfe9Sjg 		ddi_release_devi(devi);
915*83c4dfe9Sjg 		if (ndevts > ndevts_alloced) {
916*83c4dfe9Sjg 			kmem_free(devts,
917*83c4dfe9Sjg 			    ndevts_alloced * sizeof (dev_t));
918*83c4dfe9Sjg 			ndevts_alloced += 128;
919*83c4dfe9Sjg 			goto restart;
920*83c4dfe9Sjg 		}
921*83c4dfe9Sjg 	}
922*83c4dfe9Sjg 
923*83c4dfe9Sjg 	/* drop hold from e_devid_cache_devi_path_lists */
924*83c4dfe9Sjg 	for (i = 0; i < ndevis; i++) {
925*83c4dfe9Sjg 		ndi_rele_devi(devis[i]);
926*83c4dfe9Sjg 	}
927*83c4dfe9Sjg 	for (i = 0; i < npaths; i++) {
928*83c4dfe9Sjg 		kmem_free(paths[i], strlen(paths[i]) + 1);
929*83c4dfe9Sjg 	}
930*83c4dfe9Sjg 	kmem_free(paths, nalloced * sizeof (char *));
931*83c4dfe9Sjg 	kmem_free(devis, nalloced * sizeof (dev_info_t *));
932*83c4dfe9Sjg 
933*83c4dfe9Sjg 	if (ndevts == 0) {
934*83c4dfe9Sjg 		DEVID_LOG_ERR(("no devid found", devid, NULL));
935*83c4dfe9Sjg 		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
936*83c4dfe9Sjg 		return (DDI_FAILURE);
937*83c4dfe9Sjg 	}
938*83c4dfe9Sjg 
939*83c4dfe9Sjg 	/*
940*83c4dfe9Sjg 	 * Build the final list of sorted dev_t's with duplicates collapsed so
941*83c4dfe9Sjg 	 * returned results are consistent. This prevents implementation
942*83c4dfe9Sjg 	 * artifacts from causing unnecessary changes in SVM namespace.
943*83c4dfe9Sjg 	 */
944*83c4dfe9Sjg 	/* bubble sort */
945*83c4dfe9Sjg 	for (i = 0; i < (ndevts - 1); i++) {
946*83c4dfe9Sjg 		for (j = 0; j < ((ndevts - 1) - i); j++) {
947*83c4dfe9Sjg 			if (devts[j + 1] < devts[j]) {
948*83c4dfe9Sjg 				tdevt = devts[j];
949*83c4dfe9Sjg 				devts[j] = devts[j + 1];
950*83c4dfe9Sjg 				devts[j + 1] = tdevt;
951*83c4dfe9Sjg 			}
952*83c4dfe9Sjg 		}
953*83c4dfe9Sjg 	}
954*83c4dfe9Sjg 
955*83c4dfe9Sjg 	/* determine number of unique values */
956*83c4dfe9Sjg 	for (undevts = ndevts, i = 1; i < ndevts; i++) {
957*83c4dfe9Sjg 		if (devts[i - 1] == devts[i])
958*83c4dfe9Sjg 			undevts--;
959*83c4dfe9Sjg 	}
960*83c4dfe9Sjg 
961*83c4dfe9Sjg 	/* allocate unique */
962*83c4dfe9Sjg 	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
963*83c4dfe9Sjg 
964*83c4dfe9Sjg 	/* copy unique */
965*83c4dfe9Sjg 	udevts[0] = devts[0];
966*83c4dfe9Sjg 	for (i = 1, j = 1; i < ndevts; i++) {
967*83c4dfe9Sjg 		if (devts[i - 1] != devts[i])
968*83c4dfe9Sjg 			udevts[j++] = devts[i];
969*83c4dfe9Sjg 	}
970*83c4dfe9Sjg 	ASSERT(j == undevts);
971*83c4dfe9Sjg 
972*83c4dfe9Sjg 	kmem_free(devts, ndevts_alloced * sizeof (dev_t));
973*83c4dfe9Sjg 
974*83c4dfe9Sjg 	*retndevts = undevts;
975*83c4dfe9Sjg 	*retdevts = udevts;
976*83c4dfe9Sjg 
977*83c4dfe9Sjg 	return (DDI_SUCCESS);
978*83c4dfe9Sjg }
979*83c4dfe9Sjg 
980*83c4dfe9Sjg void
981*83c4dfe9Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list)
982*83c4dfe9Sjg {
983*83c4dfe9Sjg 	kmem_free(devt_list, ndevts * sizeof (dev_t *));
984*83c4dfe9Sjg }
985*83c4dfe9Sjg 
986*83c4dfe9Sjg #ifdef	DEBUG
987*83c4dfe9Sjg static void
988*83c4dfe9Sjg devid_log(char *fmt, ddi_devid_t devid, char *path)
989*83c4dfe9Sjg {
990*83c4dfe9Sjg 	char *devidstr = ddi_devid_str_encode(devid, NULL);
991*83c4dfe9Sjg 	if (path) {
992*83c4dfe9Sjg 		cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr);
993*83c4dfe9Sjg 	} else {
994*83c4dfe9Sjg 		cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr);
995*83c4dfe9Sjg 	}
996*83c4dfe9Sjg 	ddi_devid_str_free(devidstr);
997*83c4dfe9Sjg }
998*83c4dfe9Sjg #endif	/* DEBUG */
999