183c4dfejg/*
283c4dfejg * CDDL HEADER START
383c4dfejg *
483c4dfejg * The contents of this file are subject to the terms of the
583c4dfejg * Common Development and Distribution License (the "License").
683c4dfejg * You may not use this file except in compliance with the License.
783c4dfejg *
883c4dfejg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
983c4dfejg * or http://www.opensolaris.org/os/licensing.
1083c4dfejg * See the License for the specific language governing permissions
1183c4dfejg * and limitations under the License.
1283c4dfejg *
1383c4dfejg * When distributing Covered Code, include this CDDL HEADER in each
1483c4dfejg * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1583c4dfejg * If applicable, add the following below this CDDL HEADER, with the
1683c4dfejg * fields enclosed by brackets "[]" replaced with your own identifying
1783c4dfejg * information: Portions Copyright [yyyy] [name of copyright owner]
1883c4dfejg *
1983c4dfejg * CDDL HEADER END
2083c4dfejg */
2183c4dfejg/*
224f1e984Reed * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
236fe4f30Pavel Zakharov * Copyright (c) 2018 by Delphix. All rights reserved.
2483c4dfejg */
2583c4dfejg
2683c4dfejg#include <sys/note.h>
2783c4dfejg#include <sys/t_lock.h>
2883c4dfejg#include <sys/cmn_err.h>
2983c4dfejg#include <sys/instance.h>
3083c4dfejg#include <sys/conf.h>
3183c4dfejg#include <sys/stat.h>
3283c4dfejg#include <sys/ddi.h>
3383c4dfejg#include <sys/hwconf.h>
3483c4dfejg#include <sys/sunddi.h>
3583c4dfejg#include <sys/sunndi.h>
36392e836Gavin Maltby#include <sys/sunmdi.h>
3783c4dfejg#include <sys/ddi_impldefs.h>
3883c4dfejg#include <sys/ndi_impldefs.h>
3983c4dfejg#include <sys/kobj.h>
4083c4dfejg#include <sys/devcache.h>
4183c4dfejg#include <sys/devid_cache.h>
4283c4dfejg#include <sys/sysmacros.h>
4383c4dfejg
4483c4dfejg/*
4583c4dfejg * Discovery refers to the heroic effort made to discover a device which
4683c4dfejg * cannot be accessed at the physical path where it once resided.  Discovery
4783c4dfejg * involves walking the entire device tree attaching all possible disk
4883c4dfejg * instances, to search for the device referenced by a devid.  Obviously,
4983c4dfejg * full device discovery is something to be avoided where possible.
5083c4dfejg * Note that simply invoking devfsadm(1M) is equivalent to running full
5183c4dfejg * discovery at the devid cache level.
5283c4dfejg *
5383c4dfejg * Reasons why a disk may not be accessible:
5483c4dfejg *	disk powered off
5583c4dfejg *	disk removed or cable disconnected
5683c4dfejg *	disk or adapter broken
5783c4dfejg *
5883c4dfejg * Note that discovery is not needed and cannot succeed in any of these
5983c4dfejg * cases.
6083c4dfejg *
6183c4dfejg * When discovery may succeed:
6283c4dfejg *	Discovery will result in success when a device has been moved
6383c4dfejg *	to a different address.  Note that it's recommended that
6483c4dfejg *	devfsadm(1M) be invoked (no arguments required) whenever a system's
6583c4dfejg *	h/w configuration has been updated.  Alternatively, a
6683c4dfejg *	reconfiguration boot can be used to accomplish the same result.
6783c4dfejg *
6883c4dfejg * Note that discovery is not necessary to be able to correct an access
6983c4dfejg * failure for a device which was powered off.  Assuming the cache has an
7083c4dfejg * entry for such a device, simply powering it on should permit the system
7183c4dfejg * to access it.  If problems persist after powering it on, invoke
7283c4dfejg * devfsadm(1M).
7383c4dfejg *
7483c4dfejg * Discovery prior to mounting root is only of interest when booting
7583c4dfejg * from a filesystem which accesses devices by device id, which of
7683c4dfejg * not all do.
7783c4dfejg *
7883c4dfejg * Tunables
7983c4dfejg *
8083c4dfejg * devid_discovery_boot (default 1)
8183c4dfejg *	Number of times discovery will be attempted prior to mounting root.
8283c4dfejg *	Must be done at least once to recover from corrupted or missing
8383c4dfejg *	devid cache backing store.  Probably there's no reason to ever
84392e836Gavin Maltby *	set this to greater than one as a missing device will remain
8583c4dfejg *	unavailable no matter how often the system searches for it.
8683c4dfejg *
8783c4dfejg * devid_discovery_postboot (default 1)
8883c4dfejg *	Number of times discovery will be attempted after mounting root.
8983c4dfejg *	This must be performed at least once to discover any devices
9083c4dfejg *	needed after root is mounted which may have been powered
9183c4dfejg *	off and moved before booting.
9283c4dfejg *	Setting this to a larger positive number will introduce
9383c4dfejg *	some inconsistency in system operation.  Searching for a device
9483c4dfejg *	will take an indeterminate amount of time, sometimes slower,
9583c4dfejg *	sometimes faster.  In addition, the system will sometimes
9683c4dfejg *	discover a newly powered on device, sometimes it won't.
9783c4dfejg *	Use of this option is not therefore recommended.
9883c4dfejg *
9983c4dfejg * devid_discovery_postboot_always (default 0)
10083c4dfejg *	Set to 1, the system will always attempt full discovery.
10183c4dfejg *
10283c4dfejg * devid_discovery_secs (default 0)
10383c4dfejg *	Set to a positive value, the system will attempt full discovery
10483c4dfejg *	but with a minimum delay between attempts.  A device search
10583c4dfejg *	within the period of time specified will result in failure.
10683c4dfejg *
10783c4dfejg * devid_cache_read_disable (default 0)
10883c4dfejg *	Set to 1 to disable reading /etc/devices/devid_cache.
10983c4dfejg *	Devid cache will continue to operate normally but
11083c4dfejg *	at least one discovery attempt will be required.
11183c4dfejg *
11283c4dfejg * devid_cache_write_disable (default 0)
11383c4dfejg *	Set to 1 to disable updates to /etc/devices/devid_cache.
11483c4dfejg *	Any updates to the devid cache will not be preserved across a reboot.
11583c4dfejg *
11683c4dfejg * devid_report_error (default 0)
11783c4dfejg *	Set to 1 to enable some error messages related to devid
11883c4dfejg *	cache failures.
11983c4dfejg *
12083c4dfejg * The devid is packed in the cache file as a byte array.  For
12183c4dfejg * portability, this could be done in the encoded string format.
12283c4dfejg */
12383c4dfejg
12483c4dfejg
12583c4dfejgint devid_discovery_boot = 1;
12683c4dfejgint devid_discovery_postboot = 1;
12783c4dfejgint devid_discovery_postboot_always = 0;
12883c4dfejgint devid_discovery_secs = 0;
12983c4dfejg
13083c4dfejgint devid_cache_read_disable = 0;
13183c4dfejgint devid_cache_write_disable = 0;
13283c4dfejg
13383c4dfejgint devid_report_error = 0;
13483c4dfejg
13583c4dfejg
13683c4dfejg/*
13783c4dfejg * State to manage discovery of devices providing a devid
13883c4dfejg */
13983c4dfejgstatic int		devid_discovery_busy = 0;
14083c4dfejgstatic kmutex_t		devid_discovery_mutex;
14183c4dfejgstatic kcondvar_t	devid_discovery_cv;
14283c4dfejgstatic clock_t		devid_last_discovery = 0;
14383c4dfejg
14483c4dfejg
14583c4dfejg#ifdef	DEBUG
14683c4dfejgint nvp_devid_debug = 0;
14783c4dfejgint devid_debug = 0;
14883c4dfejgint devid_log_registers = 0;
14983c4dfejgint devid_log_finds = 0;
15083c4dfejgint devid_log_lookups = 0;
15183c4dfejgint devid_log_discovery = 0;
15283c4dfejgint devid_log_matches = 0;
15383c4dfejgint devid_log_paths = 0;
15483c4dfejgint devid_log_failures = 0;
15583c4dfejgint devid_log_hold = 0;
15683c4dfejgint devid_log_unregisters = 0;
15783c4dfejgint devid_log_removes = 0;
15883c4dfejgint devid_register_debug = 0;
15983c4dfejgint devid_log_stale = 0;
16083c4dfejgint devid_log_detaches = 0;
16183c4dfejg#endif	/* DEBUG */
16283c4dfejg
16383c4dfejg/*
16483c4dfejg * devid cache file registration for cache reads and updates
16583c4dfejg */
16683c4dfejgstatic nvf_ops_t devid_cache_ops = {
16783c4dfejg	"/etc/devices/devid_cache",		/* path to cache */
16883c4dfejg	devid_cache_unpack_nvlist,		/* read: nvlist to nvp */
16983c4dfejg	devid_cache_pack_list,			/* write: nvp to nvlist */
17083c4dfejg	devid_list_free,			/* free data list */
17183c4dfejg	NULL					/* write complete callback */
17283c4dfejg};
17383c4dfejg
17483c4dfejg/*
17583c4dfejg * handle to registered devid cache handlers
17683c4dfejg */
17783c4dfejgnvf_handle_t	dcfd_handle;
17883c4dfejg
17983c4dfejg
18083c4dfejg/*
18183c4dfejg * Initialize devid cache file management
18283c4dfejg */
18383c4dfejgvoid
18483c4dfejgdevid_cache_init(void)
18583c4dfejg{
18683c4dfejg	dcfd_handle = nvf_register_file(&devid_cache_ops);
18783c4dfejg	ASSERT(dcfd_handle);
18883c4dfejg
18983c4dfejg	list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t),
19083c4dfejg	    offsetof(nvp_devid_t, nvp_link));
19183c4dfejg
19283c4dfejg	mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL);
19383c4dfejg	cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL);
19483c4dfejg}
19583c4dfejg
19683c4dfejg/*
19783c4dfejg * Read and initialize the devid cache from the persistent store
19883c4dfejg */
19983c4dfejgvoid
20083c4dfejgdevid_cache_read(void)
20183c4dfejg{
20283c4dfejg	if (!devid_cache_read_disable) {
20383c4dfejg		rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
20483c4dfejg		ASSERT(list_head(nvf_list(dcfd_handle)) == NULL);
20583c4dfejg		(void) nvf_read_file(dcfd_handle);
20683c4dfejg		rw_exit(nvf_lock(dcfd_handle));
20783c4dfejg	}
20883c4dfejg}
20983c4dfejg
21083c4dfejgstatic void
21183c4dfejgdevid_nvp_free(nvp_devid_t *dp)
21283c4dfejg{
21383c4dfejg	if (dp->nvp_devpath)
21483c4dfejg		kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1);
21583c4dfejg	if (dp->nvp_devid)
21683c4dfejg		kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid));
21783c4dfejg
21883c4dfejg	kmem_free(dp, sizeof (nvp_devid_t));
21983c4dfejg}
22083c4dfejg
22183c4dfejgstatic void
22283c4dfejgdevid_list_free(nvf_handle_t fd)
22383c4dfejg{
22483c4dfejg	list_t		*listp;
22583c4dfejg	nvp_devid_t	*np;
22683c4dfejg
22783c4dfejg	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
22883c4dfejg
22983c4dfejg	listp = nvf_list(fd);
23083c4dfejg	while (np = list_head(listp)) {
23183c4dfejg		list_remove(listp, np);
23283c4dfejg		devid_nvp_free(np);
23383c4dfejg	}
23483c4dfejg}
23583c4dfejg
23683c4dfejg/*
23783c4dfejg * Free an nvp element in a list
23883c4dfejg */
23983c4dfejgstatic void
24083c4dfejgdevid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np)
24183c4dfejg{
24283c4dfejg	list_remove(nvf_list(fd), np);
24383c4dfejg	devid_nvp_free(np);
24483c4dfejg}
24583c4dfejg
24683c4dfejg/*
24783c4dfejg * Unpack a device path/nvlist pair to the list of devid cache elements.
24883c4dfejg * Used to parse the nvlist format when reading
24983c4dfejg * /etc/devices/devid_cache
25083c4dfejg */
25183c4dfejgstatic int
25283c4dfejgdevid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
25383c4dfejg{
25483c4dfejg	nvp_devid_t *np;
25583c4dfejg	ddi_devid_t devidp;
25683c4dfejg	int rval;
25783c4dfejg	uint_t n;
25883c4dfejg
25983c4dfejg	NVP_DEVID_DEBUG_PATH((name));
26083c4dfejg	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
26183c4dfejg
26283c4dfejg	/*
26383c4dfejg	 * check path for a devid
26483c4dfejg	 */
26583c4dfejg	rval = nvlist_lookup_byte_array(nvl,
266a204de7cth	    DP_DEVID_ID, (uchar_t **)&devidp, &n);
26783c4dfejg	if (rval == 0) {
26883c4dfejg		if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
26983c4dfejg			ASSERT(n == ddi_devid_sizeof(devidp));
27083c4dfejg			np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
27183c4dfejg			np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);
27283c4dfejg			np->nvp_devid = kmem_alloc(n, KM_SLEEP);
27383c4dfejg			(void) bcopy(devidp, np->nvp_devid, n);
27483c4dfejg			list_insert_tail(nvf_list(fd), np);
27583c4dfejg			NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
27683c4dfejg		} else {
27783c4dfejg			DEVIDERR((CE_CONT,
27883c4dfejg			    "%s: invalid devid\n", name));
27983c4dfejg		}
28083c4dfejg	} else {
28183c4dfejg		DEVIDERR((CE_CONT,
28283c4dfejg		    "%s: devid not available\n", name));
28383c4dfejg	}
28483c4dfejg
28583c4dfejg	return (0);
28683c4dfejg}
28783c4dfejg
28883c4dfejg/*
28983c4dfejg * Pack the list of devid cache elements into a single nvlist
29083c4dfejg * Used when writing the nvlist file.
29183c4dfejg */
29283c4dfejgstatic int
29383c4dfejgdevid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl)
29483c4dfejg{
29583c4dfejg	nvlist_t	*nvl, *sub_nvl;
29683c4dfejg	nvp_devid_t	*np;
29783c4dfejg	int		rval;
29883c4dfejg	list_t		*listp;
29983c4dfejg
30083c4dfejg	ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle)));
30183c4dfejg
30283c4dfejg	rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP);
30383c4dfejg	if (rval != 0) {
30483c4dfejg		nvf_error("%s: nvlist alloc error %d\n",
305a204de7cth		    nvf_cache_name(fd), rval);
30683c4dfejg		return (DDI_FAILURE);
30783c4dfejg	}
30883c4dfejg
30983c4dfejg	listp = nvf_list(fd);
31083c4dfejg	for (np = list_head(listp); np; np = list_next(listp, np)) {
31183c4dfejg		if (np->nvp_devid == NULL)
312a204de7cth			continue;
31383c4dfejg		NVP_DEVID_DEBUG_PATH(np->nvp_devpath);
31483c4dfejg		rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP);
31583c4dfejg		if (rval != 0) {
31683c4dfejg			nvf_error("%s: nvlist alloc error %d\n",
317a204de7cth			    nvf_cache_name(fd), rval);
31883c4dfejg			sub_nvl = NULL;
31983c4dfejg			goto err;
32083c4dfejg		}
32183c4dfejg
32283c4dfejg		rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID,
323a204de7cth		    (uchar_t *)np->nvp_devid,
324a204de7cth		    ddi_devid_sizeof(np->nvp_devid));
32583c4dfejg		if (rval == 0) {
32683c4dfejg			NVP_DEVID_DEBUG_DEVID(np->nvp_devid);
32783c4dfejg		} else {
32883c4dfejg			nvf_error(
32983c4dfejg			    "%s: nvlist add error %d (devid)\n",
33083c4dfejg			    nvf_cache_name(fd), rval);
33183c4dfejg			goto err;
33283c4dfejg		}
33383c4dfejg
33483c4dfejg		rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl);
33583c4dfejg		if (rval != 0) {
33683c4dfejg			nvf_error("%s: nvlist add error %d (sublist)\n",
33783c4dfejg			    nvf_cache_name(fd), rval);
33883c4dfejg			goto err;
33983c4dfejg		}
34083c4dfejg		nvlist_free(sub_nvl);
34183c4dfejg	}
34283c4dfejg
34383c4dfejg	*ret_nvl = nvl;
34483c4dfejg	return (DDI_SUCCESS);
34583c4dfejg
34683c4dfejgerr:
347aab83bbJosef 'Jeff' Sipek	nvlist_free(sub_nvl);
34883c4dfejg	nvlist_free(nvl);
34983c4dfejg	*ret_nvl = NULL;
35083c4dfejg	return (DDI_FAILURE);
35183c4dfejg}
35283c4dfejg
35383c4dfejgstatic int
35483c4dfejge_devid_do_discovery(void)
35583c4dfejg{
35683c4dfejg	ASSERT(mutex_owned(&devid_discovery_mutex));
35783c4dfejg
35883c4dfejg	if (i_ddi_io_initialized() == 0) {
35983c4dfejg		if (devid_discovery_boot > 0) {
36083c4dfejg			devid_discovery_boot--;
36183c4dfejg			return (1);
36283c4dfejg		}
36383c4dfejg	} else {
36483c4dfejg		if (devid_discovery_postboot_always > 0)
36583c4dfejg			return (1);
36683c4dfejg		if (devid_discovery_postboot > 0) {
36783c4dfejg			devid_discovery_postboot--;
36883c4dfejg			return (1);
36983c4dfejg		}
37083c4dfejg		if (devid_discovery_secs > 0) {
37183c4dfejg			if ((ddi_get_lbolt() - devid_last_discovery) >
37283c4dfejg			    drv_usectohz(devid_discovery_secs * MICROSEC)) {
37383c4dfejg				return (1);
37483c4dfejg			}
37583c4dfejg		}
37683c4dfejg	}
37783c4dfejg
37883c4dfejg	DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
37983c4dfejg	return (0);
38083c4dfejg}
38183c4dfejg
38283c4dfejgstatic void
38383c4dfejge_ddi_devid_hold_by_major(major_t major)
38483c4dfejg{
38583c4dfejg	DEVID_LOG_DISC((CE_CONT,
38683c4dfejg	    "devid_discovery: ddi_hold_installed_driver %d\n", major));
38783c4dfejg
38883c4dfejg	if (ddi_hold_installed_driver(major) == NULL)
38983c4dfejg		return;
39083c4dfejg
39183c4dfejg	ddi_rele_driver(major);
39283c4dfejg}
39383c4dfejg
3946f25ad7Jerry Gilliam/* legacy support - see below */
3956f25ad7Jerry Gilliamstatic char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" };
39683c4dfejg
39783c4dfejg#define	N_DRIVERS_TO_HOLD	\
39883c4dfejg	(sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *))
39983c4dfejg
40083c4dfejgstatic void
40183c4dfejge_ddi_devid_hold_installed_driver(ddi_devid_t devid)
40283c4dfejg{
40383c4dfejg	impl_devid_t	*id = (impl_devid_t *)devid;
40483c4dfejg	major_t		major, hint_major;
40583c4dfejg	char		hint[DEVID_HINT_SIZE + 1];
4066f25ad7Jerry Gilliam	struct devnames	*dnp;
40783c4dfejg	char		**drvp;
40883c4dfejg	int		i;
40983c4dfejg
41083c4dfejg	/* Count non-null bytes */
41183c4dfejg	for (i = 0; i < DEVID_HINT_SIZE; i++)
41283c4dfejg		if (id->did_driver[i] == '\0')
41383c4dfejg			break;
41483c4dfejg
41583c4dfejg	/* Make a copy of the driver hint */
41683c4dfejg	bcopy(id->did_driver, hint, i);
41783c4dfejg	hint[i] = '\0';
41883c4dfejg
41983c4dfejg	/* search for the devid using the hint driver */
42083c4dfejg	hint_major = ddi_name_to_major(hint);
421a204de7cth	if (hint_major != DDI_MAJOR_T_NONE) {
42283c4dfejg		e_ddi_devid_hold_by_major(hint_major);
42383c4dfejg	}
42483c4dfejg
4256f25ad7Jerry Gilliam	/*
4266f25ad7Jerry Gilliam	 * search for the devid with each driver declaring
4276f25ad7Jerry Gilliam	 * itself as a devid registrant.
4286f25ad7Jerry Gilliam	 */
4296f25ad7Jerry Gilliam	for (major = 0; major < devcnt; major++) {
4306f25ad7Jerry Gilliam		if (major == hint_major)
4316f25ad7Jerry Gilliam			continue;
4326f25ad7Jerry Gilliam		dnp = &devnamesp[major];
4336f25ad7Jerry Gilliam		if (dnp->dn_flags & DN_DEVID_REGISTRANT) {
4346f25ad7Jerry Gilliam			e_ddi_devid_hold_by_major(major);
4356f25ad7Jerry Gilliam		}
4366f25ad7Jerry Gilliam	}
4376f25ad7Jerry Gilliam
4386f25ad7Jerry Gilliam	/*
4396f25ad7Jerry Gilliam	 * Legacy support: may be removed once an upgrade mechanism
4406f25ad7Jerry Gilliam	 * for driver conf files is available.
4416f25ad7Jerry Gilliam	 */
44283c4dfejg	drvp = e_ddi_devid_hold_driver_list;
44383c4dfejg	for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) {
44483c4dfejg		major = ddi_name_to_major(*drvp);
445a204de7cth		if (major != DDI_MAJOR_T_NONE && major != hint_major) {
44683c4dfejg			e_ddi_devid_hold_by_major(major);
44783c4dfejg		}
44883c4dfejg	}
44983c4dfejg}
45083c4dfejg
45183c4dfejg/*
45283c4dfejg * Return success if discovery was attempted, to indicate
45383c4dfejg * that the desired device may now be available.
45483c4dfejg */
45583c4dfejgint
45683c4dfejge_ddi_devid_discovery(ddi_devid_t devid)
45783c4dfejg{
45883c4dfejg	int flags;
45983c4dfejg	int rval = DDI_SUCCESS;
46083c4dfejg
46183c4dfejg	mutex_enter(&devid_discovery_mutex);
46283c4dfejg
46383c4dfejg	if (devid_discovery_busy) {
46483c4dfejg		DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n"));
46583c4dfejg		while (devid_discovery_busy) {
46683c4dfejg			cv_wait(&devid_discovery_cv, &devid_discovery_mutex);
46783c4dfejg		}
46883c4dfejg	} else if (e_devid_do_discovery()) {
46983c4dfejg		devid_discovery_busy = 1;
47083c4dfejg		mutex_exit(&devid_discovery_mutex);
47183c4dfejg
47283c4dfejg		if (i_ddi_io_initialized() == 0) {
47383c4dfejg			e_ddi_devid_hold_installed_driver(devid);
47483c4dfejg		} else {
47583c4dfejg			DEVID_LOG_DISC((CE_CONT,
47683c4dfejg			    "devid_discovery: ndi_devi_config\n"));
47783c4dfejg			flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT;
47883c4dfejg			if (i_ddi_io_initialized())
47983c4dfejg				flags |= NDI_DRV_CONF_REPROBE;
48083c4dfejg			(void) ndi_devi_config(ddi_root_node(), flags);
48183c4dfejg		}
48283c4dfejg
48383c4dfejg		mutex_enter(&devid_discovery_mutex);
48483c4dfejg		devid_discovery_busy = 0;
48583c4dfejg		cv_broadcast(&devid_discovery_cv);
48683c4dfejg		if (devid_discovery_secs > 0)
48783c4dfejg			devid_last_discovery = ddi_get_lbolt();
48883c4dfejg		DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n"));
48983c4dfejg	} else {
49083c4dfejg		rval = DDI_FAILURE;
49183c4dfejg		DEVID_LOG_DISC((CE_CONT, "no devid discovery\n"));
49283c4dfejg	}
49383c4dfejg
49483c4dfejg	mutex_exit(&devid_discovery_mutex);
49583c4dfejg
49683c4dfejg	return (rval);
49783c4dfejg}
49883c4dfejg
49983c4dfejg/*
50083c4dfejg * As part of registering a devid for a device,
50183c4dfejg * update the devid cache with this device/devid pair
50283c4dfejg * or note that this combination has registered.
503392e836Gavin Maltby *
504392e836Gavin Maltby * If a devpath is provided it will be used as the path to register the
505392e836Gavin Maltby * devid against, otherwise we use ddi_pathname(dip).  In both cases
506392e836Gavin Maltby * we duplicate the path string so that it can be cached/freed indepdently
507392e836Gavin Maltby * of the original owner.
50883c4dfejg */
509392e836Gavin Maltbystatic int
510392e836Gavin Maltbye_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath)
51183c4dfejg{
51283c4dfejg	nvp_devid_t *np;
51383c4dfejg	nvp_devid_t *new_nvp;
51483c4dfejg	ddi_devid_t new_devid;
51583c4dfejg	int new_devid_size;
51683c4dfejg	char *path, *fullpath;
51783c4dfejg	ddi_devid_t free_devid = NULL;
51883c4dfejg	int pathlen;
51983c4dfejg	list_t *listp;
52083c4dfejg	int is_dirty = 0;
52183c4dfejg
5224f1e984Reed
52383c4dfejg	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
52483c4dfejg
525392e836Gavin Maltby	if (devpath) {
526392e836Gavin Maltby		pathlen = strlen(devpath) + 1;
527392e836Gavin Maltby		path = kmem_alloc(pathlen, KM_SLEEP);
528392e836Gavin Maltby		bcopy(devpath, path, pathlen);
529392e836Gavin Maltby	} else {
530392e836Gavin Maltby		/*
531392e836Gavin Maltby		 * We are willing to accept DS_BOUND nodes if we can form a full
532392e836Gavin Maltby		 * ddi_pathname (i.e. the node is part way to becomming
533392e836Gavin Maltby		 * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL).
534392e836Gavin Maltby		 */
535392e836Gavin Maltby		if (ddi_get_name_addr(dip) == NULL)
536392e836Gavin Maltby			return (DDI_FAILURE);
537392e836Gavin Maltby
538392e836Gavin Maltby		fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
539392e836Gavin Maltby		(void) ddi_pathname(dip, fullpath);
540392e836Gavin Maltby		pathlen = strlen(fullpath) + 1;
541392e836Gavin Maltby		path = kmem_alloc(pathlen, KM_SLEEP);
542392e836Gavin Maltby		bcopy(fullpath, path, pathlen);
543392e836Gavin Maltby		kmem_free(fullpath, MAXPATHLEN);
544392e836Gavin Maltby	}
54583c4dfejg
54683c4dfejg	DEVID_LOG_REG(("register", devid, path));
54783c4dfejg
54883c4dfejg	new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
54983c4dfejg	new_devid_size = ddi_devid_sizeof(devid);
55083c4dfejg	new_devid = kmem_alloc(new_devid_size, KM_SLEEP);
55183c4dfejg	(void) bcopy(devid, new_devid, new_devid_size);
55283c4dfejg
55383c4dfejg	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
55483c4dfejg
55583c4dfejg	listp = nvf_list(dcfd_handle);
55683c4dfejg	for (np = list_head(listp); np; np = list_next(listp, np)) {
55783c4dfejg		if (strcmp(path, np->nvp_devpath) == 0) {
55883c4dfejg			DEVID_DEBUG2((CE_CONT,
55983c4dfejg			    "register: %s path match\n", path));
56083c4dfejg			if (np->nvp_devid == NULL) {
561a204de7cthreplace:			np->nvp_devid = new_devid;
56283c4dfejg				np->nvp_flags |=
563a204de7cth				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
56483c4dfejg				np->nvp_dip = dip;
56583c4dfejg				if (!devid_cache_write_disable) {
56683c4dfejg					nvf_mark_dirty(dcfd_handle);
56783c4dfejg					is_dirty = 1;
56883c4dfejg				}
56983c4dfejg				rw_exit(nvf_lock(dcfd_handle));
57083c4dfejg				kmem_free(new_nvp, sizeof (nvp_devid_t));
57183c4dfejg				kmem_free(path, pathlen);
57283c4dfejg				goto exit;
57383c4dfejg			}
57483c4dfejg			if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
57583c4dfejg				/* replace invalid devid */
57683c4dfejg				free_devid = np->nvp_devid;
57783c4dfejg				goto replace;
57883c4dfejg			}
57983c4dfejg			/*
58083c4dfejg			 * We're registering an already-cached path
58183c4dfejg			 * Does the device's devid match the cache?
58283c4dfejg			 */
58383c4dfejg			if (ddi_devid_compare(devid, np->nvp_devid) != 0) {
58483c4dfejg				DEVID_DEBUG((CE_CONT, "devid register: "
58583c4dfejg				    "devid %s does not match\n", path));
58683c4dfejg				/*
5876fe4f30Pavel Zakharov				 * We do not expect devids to change, log it.
5886fe4f30Pavel Zakharov				 */
5896fe4f30Pavel Zakharov				char *devid_stored =
5906fe4f30Pavel Zakharov				    ddi_devid_str_encode(np->nvp_devid, NULL);
5916fe4f30Pavel Zakharov				char *devid_new =
5926fe4f30Pavel Zakharov				    ddi_devid_str_encode(devid, NULL);
5936fe4f30Pavel Zakharov
5946fe4f30Pavel Zakharov				cmn_err(CE_CONT, "devid register: devid for "
5956fe4f30Pavel Zakharov				    "%s does not match. stored: %s, new: %s.",
5966fe4f30Pavel Zakharov				    path, devid_stored, devid_new);
5976fe4f30Pavel Zakharov
5986fe4f30Pavel Zakharov				ddi_devid_str_free(devid_stored);
5996fe4f30Pavel Zakharov				ddi_devid_str_free(devid_new);
6006fe4f30Pavel Zakharov
6016fe4f30Pavel Zakharov				/*
60283c4dfejg				 * Replace cached devid for this path
60383c4dfejg				 * with newly registered devid.  A devid
60483c4dfejg				 * may map to multiple paths but one path
60583c4dfejg				 * should only map to one devid.
60683c4dfejg				 */
60783c4dfejg				devid_nvp_unlink_and_free(dcfd_handle, np);
60883c4dfejg				np = NULL;
60983c4dfejg				break;
61083c4dfejg			} else {
61183c4dfejg				DEVID_DEBUG2((CE_CONT,
61283c4dfejg				    "devid register: %s devid match\n", path));
61383c4dfejg				np->nvp_flags |=
614a204de7cth				    NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
61583c4dfejg				np->nvp_dip = dip;
61683c4dfejg				rw_exit(nvf_lock(dcfd_handle));
61783c4dfejg				kmem_free(new_nvp, sizeof (nvp_devid_t));
61883c4dfejg				kmem_free(path, pathlen);
61983c4dfejg				kmem_free(new_devid, new_devid_size);
62083c4dfejg				return (DDI_SUCCESS);
62183c4dfejg			}
62283c4dfejg		}
62383c4dfejg	}
62483c4dfejg
62583c4dfejg	/*
62683c4dfejg	 * Add newly registered devid to the cache
62783c4dfejg	 */
62883c4dfejg	ASSERT(np == NULL);
62983c4dfejg
63083c4dfejg	new_nvp->nvp_devpath = path;
63183c4dfejg	new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED;
63283c4dfejg	new_nvp->nvp_dip = dip;
63383c4dfejg	new_nvp->nvp_devid = new_devid;
63483c4dfejg
63583c4dfejg	if (!devid_cache_write_disable) {
63683c4dfejg		is_dirty = 1;
63783c4dfejg		nvf_mark_dirty(dcfd_handle);
63883c4dfejg	}
63983c4dfejg	list_insert_tail(nvf_list(dcfd_handle), new_nvp);
64083c4dfejg
64183c4dfejg	rw_exit(nvf_lock(dcfd_handle));
64283c4dfejg
64383c4dfejgexit:
64483c4dfejg	if (free_devid)
64583c4dfejg		kmem_free(free_devid, ddi_devid_sizeof(free_devid));
64683c4dfejg
64783c4dfejg	if (is_dirty)
64883c4dfejg		nvf_wake_daemon();
64983c4dfejg
65083c4dfejg	return (DDI_SUCCESS);
65183c4dfejg}
65283c4dfejg
653392e836Gavin Maltbyint
654392e836Gavin Maltbye_devid_cache_register(dev_info_t *dip, ddi_devid_t devid)
655392e836Gavin Maltby{
656392e836Gavin Maltby	return (e_devid_cache_register_cmn(dip, devid, NULL));
657392e836Gavin Maltby}
658392e836Gavin Maltby
65983c4dfejg/*
66088acca0Gavin Maltby * Unregister a device's devid; the devinfo may hit on multiple entries
66188acca0Gavin Maltby * arising from both pHCI and vHCI paths.
66288acca0Gavin Maltby * Called as an instance detachs.
66388acca0Gavin Maltby * Invalidate the devid's devinfo reference.
66488acca0Gavin Maltby * Devid-path remains in the cache.
66583c4dfejg */
66688acca0Gavin Maltby
66783c4dfejgvoid
66883c4dfejge_devid_cache_unregister(dev_info_t *dip)
66983c4dfejg{
67083c4dfejg	nvp_devid_t *np;
67183c4dfejg	list_t *listp;
67283c4dfejg
67383c4dfejg	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
67483c4dfejg
67583c4dfejg	listp = nvf_list(dcfd_handle);
67683c4dfejg	for (np = list_head(listp); np; np = list_next(listp, np)) {
67783c4dfejg		if (np->nvp_devid == NULL)
67883c4dfejg			continue;
67983c4dfejg		if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) {
68083c4dfejg			DEVID_LOG_UNREG((CE_CONT,
681a204de7cth			    "unregister: %s\n", np->nvp_devpath));
68283c4dfejg			np->nvp_flags &= ~NVP_DEVID_DIP;
68383c4dfejg			np->nvp_dip = NULL;
68483c4dfejg		}
68583c4dfejg	}
68683c4dfejg
68783c4dfejg	rw_exit(nvf_lock(dcfd_handle));
68883c4dfejg}
68983c4dfejg
690392e836Gavin Maltbyint
691392e836Gavin Maltbye_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid)
692392e836Gavin Maltby{
693392e836Gavin Maltby	char *path = mdi_pi_pathname(pip);
694392e836Gavin Maltby
695392e836Gavin Maltby	return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid,
696392e836Gavin Maltby	    path));
697392e836Gavin Maltby}
698392e836Gavin Maltby
69983c4dfejg/*
70083c4dfejg * Purge devid cache of stale devids
70183c4dfejg */
70283c4dfejgvoid
70383c4dfejgdevid_cache_cleanup(void)
70483c4dfejg{
70583c4dfejg	nvp_devid_t *np, *next;
70683c4dfejg	list_t *listp;
70783c4dfejg	int is_dirty = 0;
70883c4dfejg
70983c4dfejg	rw_enter(nvf_lock(dcfd_handle), RW_WRITER);
71083c4dfejg
71183c4dfejg	listp = nvf_list(dcfd_handle);
71283c4dfejg	for (np = list_head(listp); np; np = next) {
71383c4dfejg		next = list_next(listp, np);
71483c4dfejg		if (np->nvp_devid == NULL)
71583c4dfejg			continue;
71683c4dfejg		if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) {
71783c4dfejg			DEVID_LOG_REMOVE((CE_CONT,
718a204de7cth			    "cleanup: %s\n", np->nvp_devpath));
71983c4dfejg			if (!devid_cache_write_disable) {
72083c4dfejg				nvf_mark_dirty(dcfd_handle);
72183c4dfejg				is_dirty = 0;
72283c4dfejg			}
72383c4dfejg			devid_nvp_unlink_and_free(dcfd_handle, np);
72483c4dfejg		}
72583c4dfejg	}
72683c4dfejg
72783c4dfejg	rw_exit(nvf_lock(dcfd_handle));
72883c4dfejg
72983c4dfejg	if (is_dirty)
73083c4dfejg		nvf_wake_daemon();
73183c4dfejg}
73283c4dfejg
73383c4dfejg
73483c4dfejg/*
73583c4dfejg * Build a list of dev_t's for a device/devid
73683c4dfejg *
73783c4dfejg * The effect of this function is cumulative, adding dev_t's
73883c4dfejg * for the device to the list of all dev_t's for a given
73983c4dfejg * devid.
74083c4dfejg */
74183c4dfejgstatic void
74283c4dfejge_devid_minor_to_devlist(
74383c4dfejg	dev_info_t	*dip,
74483c4dfejg	char		*minor_name,
74583c4dfejg	int		ndevts_alloced,
74683c4dfejg	int		*devtcntp,
74783c4dfejg	dev_t		*devtsp)
74883c4dfejg{
749b9ccdc5cth	int			circ;
75083c4dfejg	struct ddi_minor_data	*dmdp;
75183c4dfejg	int			minor_all = 0;
75283c4dfejg	int			ndevts = *devtcntp;
75383c4dfejg
75483c4dfejg	ASSERT(i_ddi_devi_attached(dip));
75583c4dfejg
75683c4dfejg	/* are we looking for a set of minor nodes? */
75783c4dfejg	if ((minor_name == DEVID_MINOR_NAME_ALL) ||
75883c4dfejg	    (minor_name == DEVID_MINOR_NAME_ALL_CHR) ||
75983c4dfejg	    (minor_name == DEVID_MINOR_NAME_ALL_BLK))
76083c4dfejg		minor_all = 1;
76183c4dfejg
76283c4dfejg	/* Find matching minor names */
763b9ccdc5cth	ndi_devi_enter(dip, &circ);
76483c4dfejg	for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) {
76583c4dfejg
76683c4dfejg		/* Skip non-minors, and non matching minor names */
76783c4dfejg		if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) &&
76883c4dfejg		    strcmp(dmdp->ddm_name, minor_name)))
76983c4dfejg			continue;
77083c4dfejg
77183c4dfejg		/* filter out minor_all mismatches */
77283c4dfejg		if (minor_all &&
77383c4dfejg		    (((minor_name == DEVID_MINOR_NAME_ALL_CHR) &&
77483c4dfejg		    (dmdp->ddm_spec_type != S_IFCHR)) ||
77583c4dfejg		    ((minor_name == DEVID_MINOR_NAME_ALL_BLK) &&
77683c4dfejg		    (dmdp->ddm_spec_type != S_IFBLK))))
77783c4dfejg			continue;
77883c4dfejg
77983c4dfejg		if (ndevts < ndevts_alloced)
78083c4dfejg			devtsp[ndevts] = dmdp->ddm_dev;
78183c4dfejg		ndevts++;
78283c4dfejg	}
783b9ccdc5cth	ndi_devi_exit(dip, circ);
78483c4dfejg
78583c4dfejg	*devtcntp = ndevts;
78683c4dfejg}
78783c4dfejg
78883c4dfejg/*
78983c4dfejg * Search for cached entries matching a devid
79083c4dfejg * Return two lists:
79183c4dfejg *	a list of dev_info nodes, for those devices in the attached state
79283c4dfejg *	a list of pathnames whose instances registered the given devid
79383c4dfejg * If the lists passed in are not sufficient to return the matching
79483c4dfejg * references, return the size of lists required.
79583c4dfejg * The dev_info nodes are returned with a hold that the caller must release.
79683c4dfejg */
79783c4dfejgstatic int
79883c4dfejge_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
7996fe4f30Pavel Zakharov    int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
80083c4dfejg{
80183c4dfejg	nvp_devid_t *np;
80283c4dfejg	int ndevis, npaths;
80383c4dfejg	dev_info_t *dip, *pdip;
80483c4dfejg	int circ;
80583c4dfejg	int maxdevis = 0;
80683c4dfejg	int maxpaths = 0;
80783c4dfejg	list_t *listp;
80883c4dfejg
80983c4dfejg	ndevis = 0;
81083c4dfejg	npaths = 0;
81183c4dfejg	listp = nvf_list(dcfd_handle);
81283c4dfejg	for (np = list_head(listp); np; np = list_next(listp, np)) {
81383c4dfejg		if (np->nvp_devid == NULL)
81483c4dfejg			continue;
81583c4dfejg		if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
81683c4dfejg			DEVIDERR((CE_CONT,
81783c4dfejg			    "find: invalid devid %s\n",
81883c4dfejg			    np->nvp_devpath));
81983c4dfejg			continue;
82083c4dfejg		}
82183c4dfejg		if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
82283c4dfejg			DEVID_DEBUG2((CE_CONT,
82383c4dfejg			    "find: devid match: %s 0x%x\n",
82483c4dfejg			    np->nvp_devpath, np->nvp_flags));
82583c4dfejg			DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
82683c4dfejg			DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));
82783c4dfejg
82883c4dfejg			/*
82983c4dfejg			 * Check if we have a cached devinfo reference for this
83083c4dfejg			 * devid.  Place a hold on it to prevent detach
83183c4dfejg			 * Otherwise, use the path instead.
83283c4dfejg			 * Note: returns with a hold on each dev_info
83383c4dfejg			 * node in the list.
83483c4dfejg			 */
83583c4dfejg			dip = NULL;
83683c4dfejg			if (np->nvp_flags & NVP_DEVID_DIP) {
83783c4dfejg				pdip = ddi_get_parent(np->nvp_dip);
83883c4dfejg				if (ndi_devi_tryenter(pdip, &circ)) {
83983c4dfejg					dip = np->nvp_dip;
84083c4dfejg					ndi_hold_devi(dip);
84183c4dfejg					ndi_devi_exit(pdip, circ);
84283c4dfejg					ASSERT(!DEVI_IS_ATTACHING(dip));
84383c4dfejg					ASSERT(!DEVI_IS_DETACHING(dip));
84483c4dfejg				} else {
84583c4dfejg					DEVID_LOG_DETACH((CE_CONT,
84683c4dfejg					    "may be detaching: %s\n",
84783c4dfejg					    np->nvp_devpath));
84883c4dfejg				}
84983c4dfejg			}
85083c4dfejg
85183c4dfejg			if (dip) {
85283c4dfejg				if (ndevis < retmax) {
85383c4dfejg					retdevis[ndevis++] = dip;
85483c4dfejg				} else {
85583c4dfejg					ndi_rele_devi(dip);
85683c4dfejg				}
85783c4dfejg				maxdevis++;
85883c4dfejg			} else {
85983c4dfejg				if (npaths < retmax)
86083c4dfejg					retpaths[npaths++] = np->nvp_devpath;
86183c4dfejg				maxpaths++;
86283c4dfejg			}
86383c4dfejg		}
86483c4dfejg	}
86583c4dfejg
86683c4dfejg	*retndevis = ndevis;
86783c4dfejg	*retnpaths = npaths;
86883c4dfejg	return (maxdevis > maxpaths ? maxdevis : maxpaths);
86983c4dfejg}
87083c4dfejg
87183c4dfejg
87283c4dfejg/*
87383c4dfejg * Search the devid cache, returning dev_t list for all
87483c4dfejg * device paths mapping to the device identified by the
87583c4dfejg * given devid.
87683c4dfejg *
87783c4dfejg * Primary interface used by ddi_lyr_devid_to_devlist()
87883c4dfejg */
87983c4dfejgint
88083c4dfejge_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
8816fe4f30Pavel Zakharov    int *retndevts, dev_t **retdevts)
88283c4dfejg{
88383c4dfejg	char		*path, **paths;
88483c4dfejg	int		i, j, n;
88583c4dfejg	dev_t		*devts, *udevts;
88683c4dfejg	dev_t		tdevt;
88783c4dfejg	int		ndevts, undevts, ndevts_alloced;
88883c4dfejg	dev_info_t	*devi, **devis;
88983c4dfejg	int		ndevis, npaths, nalloced;
89083c4dfejg	ddi_devid_t	match_devid;
89183c4dfejg
89283c4dfejg	DEVID_LOG_FIND(("find", devid, NULL));
89383c4dfejg
89483c4dfejg	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
89583c4dfejg	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
89683c4dfejg		DEVID_LOG_ERR(("invalid devid", devid, NULL));
89783c4dfejg		return (DDI_FAILURE);
89883c4dfejg	}
89983c4dfejg
90083c4dfejg	nalloced = 128;
90183c4dfejg
90283c4dfejg	for (;;) {
90383c4dfejg		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
90483c4dfejg		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);
90583c4dfejg
90683c4dfejg		rw_enter(nvf_lock(dcfd_handle), RW_READER);
90783c4dfejg		n = e_devid_cache_devi_path_lists(devid, nalloced,
908a204de7cth		    &ndevis, devis, &npaths, paths);
90983c4dfejg		if (n <= nalloced)
91083c4dfejg			break;
91183c4dfejg		rw_exit(nvf_lock(dcfd_handle));
91283c4dfejg		for (i = 0; i < ndevis; i++)
91383c4dfejg			ndi_rele_devi(devis[i]);
91483c4dfejg		kmem_free(paths, nalloced * sizeof (char *));
91583c4dfejg		kmem_free(devis, nalloced * sizeof (dev_info_t *));
91683c4dfejg		nalloced = n + 128;
91783c4dfejg	}
91883c4dfejg
91983c4dfejg	for (i = 0; i < npaths; i++) {
92083c4dfejg		path = i_ddi_strdup(paths[i], KM_SLEEP);
92183c4dfejg		paths[i] = path;
92283c4dfejg	}
92383c4dfejg	rw_exit(nvf_lock(dcfd_handle));
92483c4dfejg
92583c4dfejg	if (ndevis == 0 && npaths == 0) {
92683c4dfejg		DEVID_LOG_ERR(("no devid found", devid, NULL));
92783c4dfejg		kmem_free(paths, nalloced * sizeof (char *));
92883c4dfejg		kmem_free(devis, nalloced * sizeof (dev_info_t *));
92983c4dfejg		return (DDI_FAILURE);
93083c4dfejg	}
93183c4dfejg
93283c4dfejg	ndevts_alloced = 128;
93383c4dfejgrestart:
93483c4dfejg	ndevts = 0;
93583c4dfejg	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
93683c4dfejg	for (i = 0; i < ndevis; i++) {
93783c4dfejg		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
93883c4dfejg		ASSERT(!DEVI_IS_DETACHING(devis[i]));
93983c4dfejg		e_devid_minor_to_devlist(devis[i], minor_name,
940a204de7cth		    ndevts_alloced, &ndevts, devts);
94183c4dfejg		if (ndevts > ndevts_alloced) {
94283c4dfejg			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
94383c4dfejg			ndevts_alloced += 128;
94483c4dfejg			goto restart;
94583c4dfejg		}
94683c4dfejg	}
94783c4dfejg	for (i = 0; i < npaths; i++) {
94883c4dfejg		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
94983c4dfejg		devi = e_ddi_hold_devi_by_path(paths[i], 0);
95083c4dfejg		if (devi == NULL) {
95183c4dfejg			DEVID_LOG_STALE(("stale device reference",
95283c4dfejg			    devid, paths[i]));
95383c4dfejg			continue;
95483c4dfejg		}
95583c4dfejg		/*
95683c4dfejg		 * Verify the newly attached device registered a matching devid
95783c4dfejg		 */
95883c4dfejg		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
95983c4dfejg		    &match_devid) != DDI_SUCCESS) {
96083c4dfejg			DEVIDERR((CE_CONT,
96183c4dfejg			    "%s: no devid registered on attach\n",
96283c4dfejg			    paths[i]));
96383c4dfejg			ddi_release_devi(devi);
96483c4dfejg			continue;
96583c4dfejg		}
96683c4dfejg
96783c4dfejg		if (ddi_devid_compare(devid, match_devid) != 0) {
96883c4dfejg			DEVID_LOG_STALE(("new devid registered",
96983c4dfejg			    devid, paths[i]));
97083c4dfejg			ddi_release_devi(devi);
97183c4dfejg			ddi_devid_free(match_devid);
97283c4dfejg			continue;
97383c4dfejg		}
97483c4dfejg		ddi_devid_free(match_devid);
97583c4dfejg
97683c4dfejg		e_devid_minor_to_devlist(devi, minor_name,
977a204de7cth		    ndevts_alloced, &ndevts, devts);
97883c4dfejg		ddi_release_devi(devi);
97983c4dfejg		if (ndevts > ndevts_alloced) {
98083c4dfejg			kmem_free(devts,
98183c4dfejg			    ndevts_alloced * sizeof (dev_t));
98283c4dfejg			ndevts_alloced += 128;
98383c4dfejg			goto restart;
98483c4dfejg		}
98583c4dfejg	}
98683c4dfejg
98783c4dfejg	/* drop hold from e_devid_cache_devi_path_lists */
98883c4dfejg	for (i = 0; i < ndevis; i++) {
98983c4dfejg		ndi_rele_devi(devis[i]);
99083c4dfejg	}
99183c4dfejg	for (i = 0; i < npaths; i++) {
99283c4dfejg		kmem_free(paths[i], strlen(paths[i]) + 1);
99383c4dfejg	}
99483c4dfejg	kmem_free(paths, nalloced * sizeof (char *));
99583c4dfejg	kmem_free(devis, nalloced * sizeof (dev_info_t *));
99683c4dfejg
99783c4dfejg	if (ndevts == 0) {
99883c4dfejg		DEVID_LOG_ERR(("no devid found", devid, NULL));
99983c4dfejg		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
100083c4dfejg		return (DDI_FAILURE);
100183c4dfejg	}
100283c4dfejg
100383c4dfejg	/*
100483c4dfejg	 * Build the final list of sorted dev_t's with duplicates collapsed so
100583c4dfejg	 * returned results are consistent. This prevents implementation
100683c4dfejg	 * artifacts from causing unnecessary changes in SVM namespace.
100783c4dfejg	 */
100883c4dfejg	/* bubble sort */
100983c4dfejg	for (i = 0; i < (ndevts - 1); i++) {
101083c4dfejg		for (j = 0; j < ((ndevts - 1) - i); j++) {
101183c4dfejg			if (devts[j + 1] < devts[j]) {
101283c4dfejg				tdevt = devts[j];
101383c4dfejg				devts[j] = devts[j + 1];
101483c4dfejg				devts[j + 1] = tdevt;
101583c4dfejg			}
101683c4dfejg		}
101783c4dfejg	}
101883c4dfejg
101983c4dfejg	/* determine number of unique values */
102083c4dfejg	for (undevts = ndevts, i = 1; i < ndevts; i++) {
102183c4dfejg		if (devts[i - 1] == devts[i])
102283c4dfejg			undevts--;
102383c4dfejg	}
102483c4dfejg
102583c4dfejg	/* allocate unique */
102683c4dfejg	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);
102783c4dfejg
102883c4dfejg	/* copy unique */
102983c4dfejg	udevts[0] = devts[0];
103083c4dfejg	for (i = 1, j = 1; i < ndevts; i++) {
103183c4dfejg		if (devts[i - 1] != devts[i])
103283c4dfejg			udevts[j++] = devts[i];
103383c4dfejg	}
103483c4dfejg	ASSERT(j == undevts);
1035