183c4dfe9Sjg /* 283c4dfe9Sjg * CDDL HEADER START 383c4dfe9Sjg * 483c4dfe9Sjg * The contents of this file are subject to the terms of the 583c4dfe9Sjg * Common Development and Distribution License (the "License"). 683c4dfe9Sjg * You may not use this file except in compliance with the License. 783c4dfe9Sjg * 883c4dfe9Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 983c4dfe9Sjg * or http://www.opensolaris.org/os/licensing. 1083c4dfe9Sjg * See the License for the specific language governing permissions 1183c4dfe9Sjg * and limitations under the License. 1283c4dfe9Sjg * 1383c4dfe9Sjg * When distributing Covered Code, include this CDDL HEADER in each 1483c4dfe9Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1583c4dfe9Sjg * If applicable, add the following below this CDDL HEADER, with the 1683c4dfe9Sjg * fields enclosed by brackets "[]" replaced with your own identifying 1783c4dfe9Sjg * information: Portions Copyright [yyyy] [name of copyright owner] 1883c4dfe9Sjg * 1983c4dfe9Sjg * CDDL HEADER END 2083c4dfe9Sjg */ 2183c4dfe9Sjg /* 224f1e984dSReed * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 2383c4dfe9Sjg */ 2483c4dfe9Sjg 2583c4dfe9Sjg #include <sys/note.h> 2683c4dfe9Sjg #include <sys/t_lock.h> 2783c4dfe9Sjg #include <sys/cmn_err.h> 2883c4dfe9Sjg #include <sys/instance.h> 2983c4dfe9Sjg #include <sys/conf.h> 3083c4dfe9Sjg #include <sys/stat.h> 3183c4dfe9Sjg #include <sys/ddi.h> 3283c4dfe9Sjg #include <sys/hwconf.h> 3383c4dfe9Sjg #include <sys/sunddi.h> 3483c4dfe9Sjg #include <sys/sunndi.h> 35392e836bSGavin Maltby #include <sys/sunmdi.h> 3683c4dfe9Sjg #include <sys/ddi_impldefs.h> 3783c4dfe9Sjg #include <sys/ndi_impldefs.h> 3883c4dfe9Sjg #include <sys/kobj.h> 3983c4dfe9Sjg #include <sys/devcache.h> 4083c4dfe9Sjg #include <sys/devid_cache.h> 4183c4dfe9Sjg #include <sys/sysmacros.h> 4283c4dfe9Sjg 4383c4dfe9Sjg /* 4483c4dfe9Sjg * Discovery refers to the heroic effort made to discover a device which 4583c4dfe9Sjg * cannot be accessed at the physical path where it once resided. Discovery 4683c4dfe9Sjg * involves walking the entire device tree attaching all possible disk 4783c4dfe9Sjg * instances, to search for the device referenced by a devid. Obviously, 4883c4dfe9Sjg * full device discovery is something to be avoided where possible. 4983c4dfe9Sjg * Note that simply invoking devfsadm(1M) is equivalent to running full 5083c4dfe9Sjg * discovery at the devid cache level. 5183c4dfe9Sjg * 5283c4dfe9Sjg * Reasons why a disk may not be accessible: 5383c4dfe9Sjg * disk powered off 5483c4dfe9Sjg * disk removed or cable disconnected 5583c4dfe9Sjg * disk or adapter broken 5683c4dfe9Sjg * 5783c4dfe9Sjg * Note that discovery is not needed and cannot succeed in any of these 5883c4dfe9Sjg * cases. 5983c4dfe9Sjg * 6083c4dfe9Sjg * When discovery may succeed: 6183c4dfe9Sjg * Discovery will result in success when a device has been moved 6283c4dfe9Sjg * to a different address. Note that it's recommended that 6383c4dfe9Sjg * devfsadm(1M) be invoked (no arguments required) whenever a system's 6483c4dfe9Sjg * h/w configuration has been updated. Alternatively, a 6583c4dfe9Sjg * reconfiguration boot can be used to accomplish the same result. 6683c4dfe9Sjg * 6783c4dfe9Sjg * Note that discovery is not necessary to be able to correct an access 6883c4dfe9Sjg * failure for a device which was powered off. Assuming the cache has an 6983c4dfe9Sjg * entry for such a device, simply powering it on should permit the system 7083c4dfe9Sjg * to access it. If problems persist after powering it on, invoke 7183c4dfe9Sjg * devfsadm(1M). 7283c4dfe9Sjg * 7383c4dfe9Sjg * Discovery prior to mounting root is only of interest when booting 7483c4dfe9Sjg * from a filesystem which accesses devices by device id, which of 7583c4dfe9Sjg * not all do. 7683c4dfe9Sjg * 7783c4dfe9Sjg * Tunables 7883c4dfe9Sjg * 7983c4dfe9Sjg * devid_discovery_boot (default 1) 8083c4dfe9Sjg * Number of times discovery will be attempted prior to mounting root. 8183c4dfe9Sjg * Must be done at least once to recover from corrupted or missing 8283c4dfe9Sjg * devid cache backing store. Probably there's no reason to ever 83392e836bSGavin Maltby * set this to greater than one as a missing device will remain 8483c4dfe9Sjg * unavailable no matter how often the system searches for it. 8583c4dfe9Sjg * 8683c4dfe9Sjg * devid_discovery_postboot (default 1) 8783c4dfe9Sjg * Number of times discovery will be attempted after mounting root. 8883c4dfe9Sjg * This must be performed at least once to discover any devices 8983c4dfe9Sjg * needed after root is mounted which may have been powered 9083c4dfe9Sjg * off and moved before booting. 9183c4dfe9Sjg * Setting this to a larger positive number will introduce 9283c4dfe9Sjg * some inconsistency in system operation. Searching for a device 9383c4dfe9Sjg * will take an indeterminate amount of time, sometimes slower, 9483c4dfe9Sjg * sometimes faster. In addition, the system will sometimes 9583c4dfe9Sjg * discover a newly powered on device, sometimes it won't. 9683c4dfe9Sjg * Use of this option is not therefore recommended. 9783c4dfe9Sjg * 9883c4dfe9Sjg * devid_discovery_postboot_always (default 0) 9983c4dfe9Sjg * Set to 1, the system will always attempt full discovery. 10083c4dfe9Sjg * 10183c4dfe9Sjg * devid_discovery_secs (default 0) 10283c4dfe9Sjg * Set to a positive value, the system will attempt full discovery 10383c4dfe9Sjg * but with a minimum delay between attempts. A device search 10483c4dfe9Sjg * within the period of time specified will result in failure. 10583c4dfe9Sjg * 10683c4dfe9Sjg * devid_cache_read_disable (default 0) 10783c4dfe9Sjg * Set to 1 to disable reading /etc/devices/devid_cache. 10883c4dfe9Sjg * Devid cache will continue to operate normally but 10983c4dfe9Sjg * at least one discovery attempt will be required. 11083c4dfe9Sjg * 11183c4dfe9Sjg * devid_cache_write_disable (default 0) 11283c4dfe9Sjg * Set to 1 to disable updates to /etc/devices/devid_cache. 11383c4dfe9Sjg * Any updates to the devid cache will not be preserved across a reboot. 11483c4dfe9Sjg * 11583c4dfe9Sjg * devid_report_error (default 0) 11683c4dfe9Sjg * Set to 1 to enable some error messages related to devid 11783c4dfe9Sjg * cache failures. 11883c4dfe9Sjg * 11983c4dfe9Sjg * The devid is packed in the cache file as a byte array. For 12083c4dfe9Sjg * portability, this could be done in the encoded string format. 12183c4dfe9Sjg */ 12283c4dfe9Sjg 12383c4dfe9Sjg 12483c4dfe9Sjg int devid_discovery_boot = 1; 12583c4dfe9Sjg int devid_discovery_postboot = 1; 12683c4dfe9Sjg int devid_discovery_postboot_always = 0; 12783c4dfe9Sjg int devid_discovery_secs = 0; 12883c4dfe9Sjg 12983c4dfe9Sjg int devid_cache_read_disable = 0; 13083c4dfe9Sjg int devid_cache_write_disable = 0; 13183c4dfe9Sjg 13283c4dfe9Sjg int devid_report_error = 0; 13383c4dfe9Sjg 13483c4dfe9Sjg 13583c4dfe9Sjg /* 13683c4dfe9Sjg * State to manage discovery of devices providing a devid 13783c4dfe9Sjg */ 13883c4dfe9Sjg static int devid_discovery_busy = 0; 13983c4dfe9Sjg static kmutex_t devid_discovery_mutex; 14083c4dfe9Sjg static kcondvar_t devid_discovery_cv; 14183c4dfe9Sjg static clock_t devid_last_discovery = 0; 14283c4dfe9Sjg 14383c4dfe9Sjg 14483c4dfe9Sjg #ifdef DEBUG 14583c4dfe9Sjg int nvp_devid_debug = 0; 14683c4dfe9Sjg int devid_debug = 0; 14783c4dfe9Sjg int devid_log_registers = 0; 14883c4dfe9Sjg int devid_log_finds = 0; 14983c4dfe9Sjg int devid_log_lookups = 0; 15083c4dfe9Sjg int devid_log_discovery = 0; 15183c4dfe9Sjg int devid_log_matches = 0; 15283c4dfe9Sjg int devid_log_paths = 0; 15383c4dfe9Sjg int devid_log_failures = 0; 15483c4dfe9Sjg int devid_log_hold = 0; 15583c4dfe9Sjg int devid_log_unregisters = 0; 15683c4dfe9Sjg int devid_log_removes = 0; 15783c4dfe9Sjg int devid_register_debug = 0; 15883c4dfe9Sjg int devid_log_stale = 0; 15983c4dfe9Sjg int devid_log_detaches = 0; 16083c4dfe9Sjg #endif /* DEBUG */ 16183c4dfe9Sjg 16283c4dfe9Sjg /* 16383c4dfe9Sjg * devid cache file registration for cache reads and updates 16483c4dfe9Sjg */ 16583c4dfe9Sjg static nvf_ops_t devid_cache_ops = { 16683c4dfe9Sjg "/etc/devices/devid_cache", /* path to cache */ 16783c4dfe9Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */ 16883c4dfe9Sjg devid_cache_pack_list, /* write: nvp to nvlist */ 16983c4dfe9Sjg devid_list_free, /* free data list */ 17083c4dfe9Sjg NULL /* write complete callback */ 17183c4dfe9Sjg }; 17283c4dfe9Sjg 17383c4dfe9Sjg /* 17483c4dfe9Sjg * handle to registered devid cache handlers 17583c4dfe9Sjg */ 17683c4dfe9Sjg nvf_handle_t dcfd_handle; 17783c4dfe9Sjg 17883c4dfe9Sjg 17983c4dfe9Sjg /* 18083c4dfe9Sjg * Initialize devid cache file management 18183c4dfe9Sjg */ 18283c4dfe9Sjg void 18383c4dfe9Sjg devid_cache_init(void) 18483c4dfe9Sjg { 18583c4dfe9Sjg dcfd_handle = nvf_register_file(&devid_cache_ops); 18683c4dfe9Sjg ASSERT(dcfd_handle); 18783c4dfe9Sjg 18883c4dfe9Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t), 18983c4dfe9Sjg offsetof(nvp_devid_t, nvp_link)); 19083c4dfe9Sjg 19183c4dfe9Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL); 19283c4dfe9Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL); 19383c4dfe9Sjg } 19483c4dfe9Sjg 19583c4dfe9Sjg /* 19683c4dfe9Sjg * Read and initialize the devid cache from the persistent store 19783c4dfe9Sjg */ 19883c4dfe9Sjg void 19983c4dfe9Sjg devid_cache_read(void) 20083c4dfe9Sjg { 20183c4dfe9Sjg if (!devid_cache_read_disable) { 20283c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 20383c4dfe9Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL); 20483c4dfe9Sjg (void) nvf_read_file(dcfd_handle); 20583c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 20683c4dfe9Sjg } 20783c4dfe9Sjg } 20883c4dfe9Sjg 20983c4dfe9Sjg static void 21083c4dfe9Sjg devid_nvp_free(nvp_devid_t *dp) 21183c4dfe9Sjg { 21283c4dfe9Sjg if (dp->nvp_devpath) 21383c4dfe9Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1); 21483c4dfe9Sjg if (dp->nvp_devid) 21583c4dfe9Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid)); 21683c4dfe9Sjg 21783c4dfe9Sjg kmem_free(dp, sizeof (nvp_devid_t)); 21883c4dfe9Sjg } 21983c4dfe9Sjg 22083c4dfe9Sjg static void 22183c4dfe9Sjg devid_list_free(nvf_handle_t fd) 22283c4dfe9Sjg { 22383c4dfe9Sjg list_t *listp; 22483c4dfe9Sjg nvp_devid_t *np; 22583c4dfe9Sjg 22683c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 22783c4dfe9Sjg 22883c4dfe9Sjg listp = nvf_list(fd); 22983c4dfe9Sjg while (np = list_head(listp)) { 23083c4dfe9Sjg list_remove(listp, np); 23183c4dfe9Sjg devid_nvp_free(np); 23283c4dfe9Sjg } 23383c4dfe9Sjg } 23483c4dfe9Sjg 23583c4dfe9Sjg /* 23683c4dfe9Sjg * Free an nvp element in a list 23783c4dfe9Sjg */ 23883c4dfe9Sjg static void 23983c4dfe9Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np) 24083c4dfe9Sjg { 24183c4dfe9Sjg list_remove(nvf_list(fd), np); 24283c4dfe9Sjg devid_nvp_free(np); 24383c4dfe9Sjg } 24483c4dfe9Sjg 24583c4dfe9Sjg /* 24683c4dfe9Sjg * Unpack a device path/nvlist pair to the list of devid cache elements. 24783c4dfe9Sjg * Used to parse the nvlist format when reading 24883c4dfe9Sjg * /etc/devices/devid_cache 24983c4dfe9Sjg */ 25083c4dfe9Sjg static int 25183c4dfe9Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name) 25283c4dfe9Sjg { 25383c4dfe9Sjg nvp_devid_t *np; 25483c4dfe9Sjg ddi_devid_t devidp; 25583c4dfe9Sjg int rval; 25683c4dfe9Sjg uint_t n; 25783c4dfe9Sjg 25883c4dfe9Sjg NVP_DEVID_DEBUG_PATH((name)); 25983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 26083c4dfe9Sjg 26183c4dfe9Sjg /* 26283c4dfe9Sjg * check path for a devid 26383c4dfe9Sjg */ 26483c4dfe9Sjg rval = nvlist_lookup_byte_array(nvl, 265a204de77Scth DP_DEVID_ID, (uchar_t **)&devidp, &n); 26683c4dfe9Sjg if (rval == 0) { 26783c4dfe9Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) { 26883c4dfe9Sjg ASSERT(n == ddi_devid_sizeof(devidp)); 26983c4dfe9Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 27083c4dfe9Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP); 27183c4dfe9Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP); 27283c4dfe9Sjg (void) bcopy(devidp, np->nvp_devid, n); 27383c4dfe9Sjg list_insert_tail(nvf_list(fd), np); 27483c4dfe9Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid)); 27583c4dfe9Sjg } else { 27683c4dfe9Sjg DEVIDERR((CE_CONT, 27783c4dfe9Sjg "%s: invalid devid\n", name)); 27883c4dfe9Sjg } 27983c4dfe9Sjg } else { 28083c4dfe9Sjg DEVIDERR((CE_CONT, 28183c4dfe9Sjg "%s: devid not available\n", name)); 28283c4dfe9Sjg } 28383c4dfe9Sjg 28483c4dfe9Sjg return (0); 28583c4dfe9Sjg } 28683c4dfe9Sjg 28783c4dfe9Sjg /* 28883c4dfe9Sjg * Pack the list of devid cache elements into a single nvlist 28983c4dfe9Sjg * Used when writing the nvlist file. 29083c4dfe9Sjg */ 29183c4dfe9Sjg static int 29283c4dfe9Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl) 29383c4dfe9Sjg { 29483c4dfe9Sjg nvlist_t *nvl, *sub_nvl; 29583c4dfe9Sjg nvp_devid_t *np; 29683c4dfe9Sjg int rval; 29783c4dfe9Sjg list_t *listp; 29883c4dfe9Sjg 29983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 30083c4dfe9Sjg 30183c4dfe9Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 30283c4dfe9Sjg if (rval != 0) { 30383c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 304a204de77Scth nvf_cache_name(fd), rval); 30583c4dfe9Sjg return (DDI_FAILURE); 30683c4dfe9Sjg } 30783c4dfe9Sjg 30883c4dfe9Sjg listp = nvf_list(fd); 30983c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 31083c4dfe9Sjg if (np->nvp_devid == NULL) 311a204de77Scth continue; 31283c4dfe9Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath); 31383c4dfe9Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 31483c4dfe9Sjg if (rval != 0) { 31583c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 316a204de77Scth nvf_cache_name(fd), rval); 31783c4dfe9Sjg sub_nvl = NULL; 31883c4dfe9Sjg goto err; 31983c4dfe9Sjg } 32083c4dfe9Sjg 32183c4dfe9Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID, 322a204de77Scth (uchar_t *)np->nvp_devid, 323a204de77Scth ddi_devid_sizeof(np->nvp_devid)); 32483c4dfe9Sjg if (rval == 0) { 32583c4dfe9Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid); 32683c4dfe9Sjg } else { 32783c4dfe9Sjg nvf_error( 32883c4dfe9Sjg "%s: nvlist add error %d (devid)\n", 32983c4dfe9Sjg nvf_cache_name(fd), rval); 33083c4dfe9Sjg goto err; 33183c4dfe9Sjg } 33283c4dfe9Sjg 33383c4dfe9Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl); 33483c4dfe9Sjg if (rval != 0) { 33583c4dfe9Sjg nvf_error("%s: nvlist add error %d (sublist)\n", 33683c4dfe9Sjg nvf_cache_name(fd), rval); 33783c4dfe9Sjg goto err; 33883c4dfe9Sjg } 33983c4dfe9Sjg nvlist_free(sub_nvl); 34083c4dfe9Sjg } 34183c4dfe9Sjg 34283c4dfe9Sjg *ret_nvl = nvl; 34383c4dfe9Sjg return (DDI_SUCCESS); 34483c4dfe9Sjg 34583c4dfe9Sjg err: 34683c4dfe9Sjg if (sub_nvl) 34783c4dfe9Sjg nvlist_free(sub_nvl); 34883c4dfe9Sjg nvlist_free(nvl); 34983c4dfe9Sjg *ret_nvl = NULL; 35083c4dfe9Sjg return (DDI_FAILURE); 35183c4dfe9Sjg } 35283c4dfe9Sjg 35383c4dfe9Sjg static int 35483c4dfe9Sjg e_devid_do_discovery(void) 35583c4dfe9Sjg { 35683c4dfe9Sjg ASSERT(mutex_owned(&devid_discovery_mutex)); 35783c4dfe9Sjg 35883c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 35983c4dfe9Sjg if (devid_discovery_boot > 0) { 36083c4dfe9Sjg devid_discovery_boot--; 36183c4dfe9Sjg return (1); 36283c4dfe9Sjg } 36383c4dfe9Sjg } else { 36483c4dfe9Sjg if (devid_discovery_postboot_always > 0) 36583c4dfe9Sjg return (1); 36683c4dfe9Sjg if (devid_discovery_postboot > 0) { 36783c4dfe9Sjg devid_discovery_postboot--; 36883c4dfe9Sjg return (1); 36983c4dfe9Sjg } 37083c4dfe9Sjg if (devid_discovery_secs > 0) { 37183c4dfe9Sjg if ((ddi_get_lbolt() - devid_last_discovery) > 37283c4dfe9Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) { 37383c4dfe9Sjg return (1); 37483c4dfe9Sjg } 37583c4dfe9Sjg } 37683c4dfe9Sjg } 37783c4dfe9Sjg 37883c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n")); 37983c4dfe9Sjg return (0); 38083c4dfe9Sjg } 38183c4dfe9Sjg 38283c4dfe9Sjg static void 38383c4dfe9Sjg e_ddi_devid_hold_by_major(major_t major) 38483c4dfe9Sjg { 38583c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 38683c4dfe9Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major)); 38783c4dfe9Sjg 38883c4dfe9Sjg if (ddi_hold_installed_driver(major) == NULL) 38983c4dfe9Sjg return; 39083c4dfe9Sjg 39183c4dfe9Sjg ddi_rele_driver(major); 39283c4dfe9Sjg } 39383c4dfe9Sjg 394*6f25ad7fSJerry Gilliam /* legacy support - see below */ 395*6f25ad7fSJerry Gilliam static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd" }; 39683c4dfe9Sjg 39783c4dfe9Sjg #define N_DRIVERS_TO_HOLD \ 39883c4dfe9Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *)) 39983c4dfe9Sjg 40083c4dfe9Sjg static void 40183c4dfe9Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid) 40283c4dfe9Sjg { 40383c4dfe9Sjg impl_devid_t *id = (impl_devid_t *)devid; 40483c4dfe9Sjg major_t major, hint_major; 40583c4dfe9Sjg char hint[DEVID_HINT_SIZE + 1]; 406*6f25ad7fSJerry Gilliam struct devnames *dnp; 40783c4dfe9Sjg char **drvp; 40883c4dfe9Sjg int i; 40983c4dfe9Sjg 41083c4dfe9Sjg /* Count non-null bytes */ 41183c4dfe9Sjg for (i = 0; i < DEVID_HINT_SIZE; i++) 41283c4dfe9Sjg if (id->did_driver[i] == '\0') 41383c4dfe9Sjg break; 41483c4dfe9Sjg 41583c4dfe9Sjg /* Make a copy of the driver hint */ 41683c4dfe9Sjg bcopy(id->did_driver, hint, i); 41783c4dfe9Sjg hint[i] = '\0'; 41883c4dfe9Sjg 41983c4dfe9Sjg /* search for the devid using the hint driver */ 42083c4dfe9Sjg hint_major = ddi_name_to_major(hint); 421a204de77Scth if (hint_major != DDI_MAJOR_T_NONE) { 42283c4dfe9Sjg e_ddi_devid_hold_by_major(hint_major); 42383c4dfe9Sjg } 42483c4dfe9Sjg 425*6f25ad7fSJerry Gilliam /* 426*6f25ad7fSJerry Gilliam * search for the devid with each driver declaring 427*6f25ad7fSJerry Gilliam * itself as a devid registrant. 428*6f25ad7fSJerry Gilliam */ 429*6f25ad7fSJerry Gilliam for (major = 0; major < devcnt; major++) { 430*6f25ad7fSJerry Gilliam if (major == hint_major) 431*6f25ad7fSJerry Gilliam continue; 432*6f25ad7fSJerry Gilliam dnp = &devnamesp[major]; 433*6f25ad7fSJerry Gilliam if (dnp->dn_flags & DN_DEVID_REGISTRANT) { 434*6f25ad7fSJerry Gilliam e_ddi_devid_hold_by_major(major); 435*6f25ad7fSJerry Gilliam } 436*6f25ad7fSJerry Gilliam } 437*6f25ad7fSJerry Gilliam 438*6f25ad7fSJerry Gilliam /* 439*6f25ad7fSJerry Gilliam * Legacy support: may be removed once an upgrade mechanism 440*6f25ad7fSJerry Gilliam * for driver conf files is available. 441*6f25ad7fSJerry Gilliam */ 44283c4dfe9Sjg drvp = e_ddi_devid_hold_driver_list; 44383c4dfe9Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) { 44483c4dfe9Sjg major = ddi_name_to_major(*drvp); 445a204de77Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) { 44683c4dfe9Sjg e_ddi_devid_hold_by_major(major); 44783c4dfe9Sjg } 44883c4dfe9Sjg } 44983c4dfe9Sjg } 45083c4dfe9Sjg 45183c4dfe9Sjg /* 45283c4dfe9Sjg * Return success if discovery was attempted, to indicate 45383c4dfe9Sjg * that the desired device may now be available. 45483c4dfe9Sjg */ 45583c4dfe9Sjg int 45683c4dfe9Sjg e_ddi_devid_discovery(ddi_devid_t devid) 45783c4dfe9Sjg { 45883c4dfe9Sjg int flags; 45983c4dfe9Sjg int rval = DDI_SUCCESS; 46083c4dfe9Sjg 46183c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 46283c4dfe9Sjg 46383c4dfe9Sjg if (devid_discovery_busy) { 46483c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n")); 46583c4dfe9Sjg while (devid_discovery_busy) { 46683c4dfe9Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex); 46783c4dfe9Sjg } 46883c4dfe9Sjg } else if (e_devid_do_discovery()) { 46983c4dfe9Sjg devid_discovery_busy = 1; 47083c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 47183c4dfe9Sjg 47283c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 47383c4dfe9Sjg e_ddi_devid_hold_installed_driver(devid); 47483c4dfe9Sjg } else { 47583c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 47683c4dfe9Sjg "devid_discovery: ndi_devi_config\n")); 47783c4dfe9Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT; 47883c4dfe9Sjg if (i_ddi_io_initialized()) 47983c4dfe9Sjg flags |= NDI_DRV_CONF_REPROBE; 48083c4dfe9Sjg (void) ndi_devi_config(ddi_root_node(), flags); 48183c4dfe9Sjg } 48283c4dfe9Sjg 48383c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 48483c4dfe9Sjg devid_discovery_busy = 0; 48583c4dfe9Sjg cv_broadcast(&devid_discovery_cv); 48683c4dfe9Sjg if (devid_discovery_secs > 0) 48783c4dfe9Sjg devid_last_discovery = ddi_get_lbolt(); 48883c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n")); 48983c4dfe9Sjg } else { 49083c4dfe9Sjg rval = DDI_FAILURE; 49183c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n")); 49283c4dfe9Sjg } 49383c4dfe9Sjg 49483c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 49583c4dfe9Sjg 49683c4dfe9Sjg return (rval); 49783c4dfe9Sjg } 49883c4dfe9Sjg 49983c4dfe9Sjg /* 50083c4dfe9Sjg * As part of registering a devid for a device, 50183c4dfe9Sjg * update the devid cache with this device/devid pair 50283c4dfe9Sjg * or note that this combination has registered. 503392e836bSGavin Maltby * 504392e836bSGavin Maltby * If a devpath is provided it will be used as the path to register the 505392e836bSGavin Maltby * devid against, otherwise we use ddi_pathname(dip). In both cases 506392e836bSGavin Maltby * we duplicate the path string so that it can be cached/freed indepdently 507392e836bSGavin Maltby * of the original owner. 50883c4dfe9Sjg */ 509392e836bSGavin Maltby static int 510392e836bSGavin Maltby e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath) 51183c4dfe9Sjg { 51283c4dfe9Sjg nvp_devid_t *np; 51383c4dfe9Sjg nvp_devid_t *new_nvp; 51483c4dfe9Sjg ddi_devid_t new_devid; 51583c4dfe9Sjg int new_devid_size; 51683c4dfe9Sjg char *path, *fullpath; 51783c4dfe9Sjg ddi_devid_t free_devid = NULL; 51883c4dfe9Sjg int pathlen; 51983c4dfe9Sjg list_t *listp; 52083c4dfe9Sjg int is_dirty = 0; 52183c4dfe9Sjg 5224f1e984dSReed 52383c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 52483c4dfe9Sjg 525392e836bSGavin Maltby if (devpath) { 526392e836bSGavin Maltby pathlen = strlen(devpath) + 1; 527392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP); 528392e836bSGavin Maltby bcopy(devpath, path, pathlen); 529392e836bSGavin Maltby } else { 530392e836bSGavin Maltby /* 531392e836bSGavin Maltby * We are willing to accept DS_BOUND nodes if we can form a full 532392e836bSGavin Maltby * ddi_pathname (i.e. the node is part way to becomming 533392e836bSGavin Maltby * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL). 534392e836bSGavin Maltby */ 535392e836bSGavin Maltby if (ddi_get_name_addr(dip) == NULL) 536392e836bSGavin Maltby return (DDI_FAILURE); 537392e836bSGavin Maltby 538392e836bSGavin Maltby fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 539392e836bSGavin Maltby (void) ddi_pathname(dip, fullpath); 540392e836bSGavin Maltby pathlen = strlen(fullpath) + 1; 541392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP); 542392e836bSGavin Maltby bcopy(fullpath, path, pathlen); 543392e836bSGavin Maltby kmem_free(fullpath, MAXPATHLEN); 544392e836bSGavin Maltby } 54583c4dfe9Sjg 54683c4dfe9Sjg DEVID_LOG_REG(("register", devid, path)); 54783c4dfe9Sjg 54883c4dfe9Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 54983c4dfe9Sjg new_devid_size = ddi_devid_sizeof(devid); 55083c4dfe9Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP); 55183c4dfe9Sjg (void) bcopy(devid, new_devid, new_devid_size); 55283c4dfe9Sjg 55383c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 55483c4dfe9Sjg 55583c4dfe9Sjg listp = nvf_list(dcfd_handle); 55683c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 55783c4dfe9Sjg if (strcmp(path, np->nvp_devpath) == 0) { 55883c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 55983c4dfe9Sjg "register: %s path match\n", path)); 56083c4dfe9Sjg if (np->nvp_devid == NULL) { 561a204de77Scth replace: np->nvp_devid = new_devid; 56283c4dfe9Sjg np->nvp_flags |= 563a204de77Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 56483c4dfe9Sjg np->nvp_dip = dip; 56583c4dfe9Sjg if (!devid_cache_write_disable) { 56683c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 56783c4dfe9Sjg is_dirty = 1; 56883c4dfe9Sjg } 56983c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 57083c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 57183c4dfe9Sjg kmem_free(path, pathlen); 57283c4dfe9Sjg goto exit; 57383c4dfe9Sjg } 57483c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 57583c4dfe9Sjg /* replace invalid devid */ 57683c4dfe9Sjg free_devid = np->nvp_devid; 57783c4dfe9Sjg goto replace; 57883c4dfe9Sjg } 57983c4dfe9Sjg /* 58083c4dfe9Sjg * We're registering an already-cached path 58183c4dfe9Sjg * Does the device's devid match the cache? 58283c4dfe9Sjg */ 58383c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) { 58483c4dfe9Sjg DEVID_DEBUG((CE_CONT, "devid register: " 58583c4dfe9Sjg "devid %s does not match\n", path)); 58683c4dfe9Sjg /* 58783c4dfe9Sjg * Replace cached devid for this path 58883c4dfe9Sjg * with newly registered devid. A devid 58983c4dfe9Sjg * may map to multiple paths but one path 59083c4dfe9Sjg * should only map to one devid. 59183c4dfe9Sjg */ 59283c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 59383c4dfe9Sjg np = NULL; 59483c4dfe9Sjg break; 59583c4dfe9Sjg } else { 59683c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 59783c4dfe9Sjg "devid register: %s devid match\n", path)); 59883c4dfe9Sjg np->nvp_flags |= 599a204de77Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 60083c4dfe9Sjg np->nvp_dip = dip; 60183c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 60283c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 60383c4dfe9Sjg kmem_free(path, pathlen); 60483c4dfe9Sjg kmem_free(new_devid, new_devid_size); 60583c4dfe9Sjg return (DDI_SUCCESS); 60683c4dfe9Sjg } 60783c4dfe9Sjg } 60883c4dfe9Sjg } 60983c4dfe9Sjg 61083c4dfe9Sjg /* 61183c4dfe9Sjg * Add newly registered devid to the cache 61283c4dfe9Sjg */ 61383c4dfe9Sjg ASSERT(np == NULL); 61483c4dfe9Sjg 61583c4dfe9Sjg new_nvp->nvp_devpath = path; 61683c4dfe9Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 61783c4dfe9Sjg new_nvp->nvp_dip = dip; 61883c4dfe9Sjg new_nvp->nvp_devid = new_devid; 61983c4dfe9Sjg 62083c4dfe9Sjg if (!devid_cache_write_disable) { 62183c4dfe9Sjg is_dirty = 1; 62283c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 62383c4dfe9Sjg } 62483c4dfe9Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp); 62583c4dfe9Sjg 62683c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 62783c4dfe9Sjg 62883c4dfe9Sjg exit: 62983c4dfe9Sjg if (free_devid) 63083c4dfe9Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid)); 63183c4dfe9Sjg 63283c4dfe9Sjg if (is_dirty) 63383c4dfe9Sjg nvf_wake_daemon(); 63483c4dfe9Sjg 63583c4dfe9Sjg return (DDI_SUCCESS); 63683c4dfe9Sjg } 63783c4dfe9Sjg 638392e836bSGavin Maltby int 639392e836bSGavin Maltby e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid) 640392e836bSGavin Maltby { 641392e836bSGavin Maltby return (e_devid_cache_register_cmn(dip, devid, NULL)); 642392e836bSGavin Maltby } 643392e836bSGavin Maltby 64483c4dfe9Sjg /* 64588acca04SGavin Maltby * Unregister a device's devid; the devinfo may hit on multiple entries 64688acca04SGavin Maltby * arising from both pHCI and vHCI paths. 64788acca04SGavin Maltby * Called as an instance detachs. 64888acca04SGavin Maltby * Invalidate the devid's devinfo reference. 64988acca04SGavin Maltby * Devid-path remains in the cache. 65083c4dfe9Sjg */ 65188acca04SGavin Maltby 65283c4dfe9Sjg void 65383c4dfe9Sjg e_devid_cache_unregister(dev_info_t *dip) 65483c4dfe9Sjg { 65583c4dfe9Sjg nvp_devid_t *np; 65683c4dfe9Sjg list_t *listp; 65783c4dfe9Sjg 65883c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 65983c4dfe9Sjg 66083c4dfe9Sjg listp = nvf_list(dcfd_handle); 66183c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 66283c4dfe9Sjg if (np->nvp_devid == NULL) 66383c4dfe9Sjg continue; 66483c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) { 66583c4dfe9Sjg DEVID_LOG_UNREG((CE_CONT, 666a204de77Scth "unregister: %s\n", np->nvp_devpath)); 66783c4dfe9Sjg np->nvp_flags &= ~NVP_DEVID_DIP; 66883c4dfe9Sjg np->nvp_dip = NULL; 66983c4dfe9Sjg } 67083c4dfe9Sjg } 67183c4dfe9Sjg 67283c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 67383c4dfe9Sjg } 67483c4dfe9Sjg 675392e836bSGavin Maltby int 676392e836bSGavin Maltby e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid) 677392e836bSGavin Maltby { 678392e836bSGavin Maltby char *path = mdi_pi_pathname(pip); 679392e836bSGavin Maltby 680392e836bSGavin Maltby return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid, 681392e836bSGavin Maltby path)); 682392e836bSGavin Maltby } 683392e836bSGavin Maltby 68483c4dfe9Sjg /* 68583c4dfe9Sjg * Purge devid cache of stale devids 68683c4dfe9Sjg */ 68783c4dfe9Sjg void 68883c4dfe9Sjg devid_cache_cleanup(void) 68983c4dfe9Sjg { 69083c4dfe9Sjg nvp_devid_t *np, *next; 69183c4dfe9Sjg list_t *listp; 69283c4dfe9Sjg int is_dirty = 0; 69383c4dfe9Sjg 69483c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 69583c4dfe9Sjg 69683c4dfe9Sjg listp = nvf_list(dcfd_handle); 69783c4dfe9Sjg for (np = list_head(listp); np; np = next) { 69883c4dfe9Sjg next = list_next(listp, np); 69983c4dfe9Sjg if (np->nvp_devid == NULL) 70083c4dfe9Sjg continue; 70183c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) { 70283c4dfe9Sjg DEVID_LOG_REMOVE((CE_CONT, 703a204de77Scth "cleanup: %s\n", np->nvp_devpath)); 70483c4dfe9Sjg if (!devid_cache_write_disable) { 70583c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 70683c4dfe9Sjg is_dirty = 0; 70783c4dfe9Sjg } 70883c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 70983c4dfe9Sjg } 71083c4dfe9Sjg } 71183c4dfe9Sjg 71283c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 71383c4dfe9Sjg 71483c4dfe9Sjg if (is_dirty) 71583c4dfe9Sjg nvf_wake_daemon(); 71683c4dfe9Sjg } 71783c4dfe9Sjg 71883c4dfe9Sjg 71983c4dfe9Sjg /* 72083c4dfe9Sjg * Build a list of dev_t's for a device/devid 72183c4dfe9Sjg * 72283c4dfe9Sjg * The effect of this function is cumulative, adding dev_t's 72383c4dfe9Sjg * for the device to the list of all dev_t's for a given 72483c4dfe9Sjg * devid. 72583c4dfe9Sjg */ 72683c4dfe9Sjg static void 72783c4dfe9Sjg e_devid_minor_to_devlist( 72883c4dfe9Sjg dev_info_t *dip, 72983c4dfe9Sjg char *minor_name, 73083c4dfe9Sjg int ndevts_alloced, 73183c4dfe9Sjg int *devtcntp, 73283c4dfe9Sjg dev_t *devtsp) 73383c4dfe9Sjg { 734b9ccdc5aScth int circ; 73583c4dfe9Sjg struct ddi_minor_data *dmdp; 73683c4dfe9Sjg int minor_all = 0; 73783c4dfe9Sjg int ndevts = *devtcntp; 73883c4dfe9Sjg 73983c4dfe9Sjg ASSERT(i_ddi_devi_attached(dip)); 74083c4dfe9Sjg 74183c4dfe9Sjg /* are we looking for a set of minor nodes? */ 74283c4dfe9Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) || 74383c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) || 74483c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK)) 74583c4dfe9Sjg minor_all = 1; 74683c4dfe9Sjg 74783c4dfe9Sjg /* Find matching minor names */ 748b9ccdc5aScth ndi_devi_enter(dip, &circ); 74983c4dfe9Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 75083c4dfe9Sjg 75183c4dfe9Sjg /* Skip non-minors, and non matching minor names */ 75283c4dfe9Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) && 75383c4dfe9Sjg strcmp(dmdp->ddm_name, minor_name))) 75483c4dfe9Sjg continue; 75583c4dfe9Sjg 75683c4dfe9Sjg /* filter out minor_all mismatches */ 75783c4dfe9Sjg if (minor_all && 75883c4dfe9Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 75983c4dfe9Sjg (dmdp->ddm_spec_type != S_IFCHR)) || 76083c4dfe9Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 76183c4dfe9Sjg (dmdp->ddm_spec_type != S_IFBLK)))) 76283c4dfe9Sjg continue; 76383c4dfe9Sjg 76483c4dfe9Sjg if (ndevts < ndevts_alloced) 76583c4dfe9Sjg devtsp[ndevts] = dmdp->ddm_dev; 76683c4dfe9Sjg ndevts++; 76783c4dfe9Sjg } 768b9ccdc5aScth ndi_devi_exit(dip, circ); 76983c4dfe9Sjg 77083c4dfe9Sjg *devtcntp = ndevts; 77183c4dfe9Sjg } 77283c4dfe9Sjg 77383c4dfe9Sjg /* 77483c4dfe9Sjg * Search for cached entries matching a devid 77583c4dfe9Sjg * Return two lists: 77683c4dfe9Sjg * a list of dev_info nodes, for those devices in the attached state 77783c4dfe9Sjg * a list of pathnames whose instances registered the given devid 77883c4dfe9Sjg * If the lists passed in are not sufficient to return the matching 77983c4dfe9Sjg * references, return the size of lists required. 78083c4dfe9Sjg * The dev_info nodes are returned with a hold that the caller must release. 78183c4dfe9Sjg */ 78283c4dfe9Sjg static int 78383c4dfe9Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax, 78483c4dfe9Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths) 78583c4dfe9Sjg { 78683c4dfe9Sjg nvp_devid_t *np; 78783c4dfe9Sjg int ndevis, npaths; 78883c4dfe9Sjg dev_info_t *dip, *pdip; 78983c4dfe9Sjg int circ; 79083c4dfe9Sjg int maxdevis = 0; 79183c4dfe9Sjg int maxpaths = 0; 79283c4dfe9Sjg list_t *listp; 79383c4dfe9Sjg 79483c4dfe9Sjg ndevis = 0; 79583c4dfe9Sjg npaths = 0; 79683c4dfe9Sjg listp = nvf_list(dcfd_handle); 79783c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 79883c4dfe9Sjg if (np->nvp_devid == NULL) 79983c4dfe9Sjg continue; 80083c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 80183c4dfe9Sjg DEVIDERR((CE_CONT, 80283c4dfe9Sjg "find: invalid devid %s\n", 80383c4dfe9Sjg np->nvp_devpath)); 80483c4dfe9Sjg continue; 80583c4dfe9Sjg } 80683c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) { 80783c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 80883c4dfe9Sjg "find: devid match: %s 0x%x\n", 80983c4dfe9Sjg np->nvp_devpath, np->nvp_flags)); 81083c4dfe9Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath)); 81183c4dfe9Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath)); 81283c4dfe9Sjg 81383c4dfe9Sjg /* 81483c4dfe9Sjg * Check if we have a cached devinfo reference for this 81583c4dfe9Sjg * devid. Place a hold on it to prevent detach 81683c4dfe9Sjg * Otherwise, use the path instead. 81783c4dfe9Sjg * Note: returns with a hold on each dev_info 81883c4dfe9Sjg * node in the list. 81983c4dfe9Sjg */ 82083c4dfe9Sjg dip = NULL; 82183c4dfe9Sjg if (np->nvp_flags & NVP_DEVID_DIP) { 82283c4dfe9Sjg pdip = ddi_get_parent(np->nvp_dip); 82383c4dfe9Sjg if (ndi_devi_tryenter(pdip, &circ)) { 82483c4dfe9Sjg dip = np->nvp_dip; 82583c4dfe9Sjg ndi_hold_devi(dip); 82683c4dfe9Sjg ndi_devi_exit(pdip, circ); 82783c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(dip)); 82883c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(dip)); 82983c4dfe9Sjg } else { 83083c4dfe9Sjg DEVID_LOG_DETACH((CE_CONT, 83183c4dfe9Sjg "may be detaching: %s\n", 83283c4dfe9Sjg np->nvp_devpath)); 83383c4dfe9Sjg } 83483c4dfe9Sjg } 83583c4dfe9Sjg 83683c4dfe9Sjg if (dip) { 83783c4dfe9Sjg if (ndevis < retmax) { 83883c4dfe9Sjg retdevis[ndevis++] = dip; 83983c4dfe9Sjg } else { 84083c4dfe9Sjg ndi_rele_devi(dip); 84183c4dfe9Sjg } 84283c4dfe9Sjg maxdevis++; 84383c4dfe9Sjg } else { 84483c4dfe9Sjg if (npaths < retmax) 84583c4dfe9Sjg retpaths[npaths++] = np->nvp_devpath; 84683c4dfe9Sjg maxpaths++; 84783c4dfe9Sjg } 84883c4dfe9Sjg } 84983c4dfe9Sjg } 85083c4dfe9Sjg 85183c4dfe9Sjg *retndevis = ndevis; 85283c4dfe9Sjg *retnpaths = npaths; 85383c4dfe9Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths); 85483c4dfe9Sjg } 85583c4dfe9Sjg 85683c4dfe9Sjg 85783c4dfe9Sjg /* 85883c4dfe9Sjg * Search the devid cache, returning dev_t list for all 85983c4dfe9Sjg * device paths mapping to the device identified by the 86083c4dfe9Sjg * given devid. 86183c4dfe9Sjg * 86283c4dfe9Sjg * Primary interface used by ddi_lyr_devid_to_devlist() 86383c4dfe9Sjg */ 86483c4dfe9Sjg int 86583c4dfe9Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name, 86683c4dfe9Sjg int *retndevts, dev_t **retdevts) 86783c4dfe9Sjg { 86883c4dfe9Sjg char *path, **paths; 86983c4dfe9Sjg int i, j, n; 87083c4dfe9Sjg dev_t *devts, *udevts; 87183c4dfe9Sjg dev_t tdevt; 87283c4dfe9Sjg int ndevts, undevts, ndevts_alloced; 87383c4dfe9Sjg dev_info_t *devi, **devis; 87483c4dfe9Sjg int ndevis, npaths, nalloced; 87583c4dfe9Sjg ddi_devid_t match_devid; 87683c4dfe9Sjg 87783c4dfe9Sjg DEVID_LOG_FIND(("find", devid, NULL)); 87883c4dfe9Sjg 87983c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 88083c4dfe9Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) { 88183c4dfe9Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL)); 88283c4dfe9Sjg return (DDI_FAILURE); 88383c4dfe9Sjg } 88483c4dfe9Sjg 88583c4dfe9Sjg nalloced = 128; 88683c4dfe9Sjg 88783c4dfe9Sjg for (;;) { 88883c4dfe9Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP); 88983c4dfe9Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP); 89083c4dfe9Sjg 89183c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER); 89283c4dfe9Sjg n = e_devid_cache_devi_path_lists(devid, nalloced, 893a204de77Scth &ndevis, devis, &npaths, paths); 89483c4dfe9Sjg if (n <= nalloced) 89583c4dfe9Sjg break; 89683c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 89783c4dfe9Sjg for (i = 0; i < ndevis; i++) 89883c4dfe9Sjg ndi_rele_devi(devis[i]); 89983c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 90083c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 90183c4dfe9Sjg nalloced = n + 128; 90283c4dfe9Sjg } 90383c4dfe9Sjg 90483c4dfe9Sjg for (i = 0; i < npaths; i++) { 90583c4dfe9Sjg path = i_ddi_strdup(paths[i], KM_SLEEP); 90683c4dfe9Sjg paths[i] = path; 90783c4dfe9Sjg } 90883c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 90983c4dfe9Sjg 91083c4dfe9Sjg if (ndevis == 0 && npaths == 0) { 91183c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 91283c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 91383c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 91483c4dfe9Sjg return (DDI_FAILURE); 91583c4dfe9Sjg } 91683c4dfe9Sjg 91783c4dfe9Sjg ndevts_alloced = 128; 91883c4dfe9Sjg restart: 91983c4dfe9Sjg ndevts = 0; 92083c4dfe9Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP); 92183c4dfe9Sjg for (i = 0; i < ndevis; i++) { 92283c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i])); 92383c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(devis[i])); 92483c4dfe9Sjg e_devid_minor_to_devlist(devis[i], minor_name, 925a204de77Scth ndevts_alloced, &ndevts, devts); 92683c4dfe9Sjg if (ndevts > ndevts_alloced) { 92783c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 92883c4dfe9Sjg ndevts_alloced += 128; 92983c4dfe9Sjg goto restart; 93083c4dfe9Sjg } 93183c4dfe9Sjg } 93283c4dfe9Sjg for (i = 0; i < npaths; i++) { 93383c4dfe9Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i])); 93483c4dfe9Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0); 93583c4dfe9Sjg if (devi == NULL) { 93683c4dfe9Sjg DEVID_LOG_STALE(("stale device reference", 93783c4dfe9Sjg devid, paths[i])); 93883c4dfe9Sjg continue; 93983c4dfe9Sjg } 94083c4dfe9Sjg /* 94183c4dfe9Sjg * Verify the newly attached device registered a matching devid 94283c4dfe9Sjg */ 94383c4dfe9Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi, 94483c4dfe9Sjg &match_devid) != DDI_SUCCESS) { 94583c4dfe9Sjg DEVIDERR((CE_CONT, 94683c4dfe9Sjg "%s: no devid registered on attach\n", 94783c4dfe9Sjg paths[i])); 94883c4dfe9Sjg ddi_release_devi(devi); 94983c4dfe9Sjg continue; 95083c4dfe9Sjg } 95183c4dfe9Sjg 95283c4dfe9Sjg if (ddi_devid_compare(devid, match_devid) != 0) { 95383c4dfe9Sjg DEVID_LOG_STALE(("new devid registered", 95483c4dfe9Sjg devid, paths[i])); 95583c4dfe9Sjg ddi_release_devi(devi); 95683c4dfe9Sjg ddi_devid_free(match_devid); 95783c4dfe9Sjg continue; 95883c4dfe9Sjg } 95983c4dfe9Sjg ddi_devid_free(match_devid); 96083c4dfe9Sjg 96183c4dfe9Sjg e_devid_minor_to_devlist(devi, minor_name, 962a204de77Scth ndevts_alloced, &ndevts, devts); 96383c4dfe9Sjg ddi_release_devi(devi); 96483c4dfe9Sjg if (ndevts > ndevts_alloced) { 96583c4dfe9Sjg kmem_free(devts, 96683c4dfe9Sjg ndevts_alloced * sizeof (dev_t)); 96783c4dfe9Sjg ndevts_alloced += 128; 96883c4dfe9Sjg goto restart; 96983c4dfe9Sjg } 97083c4dfe9Sjg } 97183c4dfe9Sjg 97283c4dfe9Sjg /* drop hold from e_devid_cache_devi_path_lists */ 97383c4dfe9Sjg for (i = 0; i < ndevis; i++) { 97483c4dfe9Sjg ndi_rele_devi(devis[i]); 97583c4dfe9Sjg } 97683c4dfe9Sjg for (i = 0; i < npaths; i++) { 97783c4dfe9Sjg kmem_free(paths[i], strlen(paths[i]) + 1); 97883c4dfe9Sjg } 97983c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 98083c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 98183c4dfe9Sjg 98283c4dfe9Sjg if (ndevts == 0) { 98383c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 98483c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 98583c4dfe9Sjg return (DDI_FAILURE); 98683c4dfe9Sjg } 98783c4dfe9Sjg 98883c4dfe9Sjg /* 98983c4dfe9Sjg * Build the final list of sorted dev_t's with duplicates collapsed so 99083c4dfe9Sjg * returned results are consistent. This prevents implementation 99183c4dfe9Sjg * artifacts from causing unnecessary changes in SVM namespace. 99283c4dfe9Sjg */ 99383c4dfe9Sjg /* bubble sort */ 99483c4dfe9Sjg for (i = 0; i < (ndevts - 1); i++) { 99583c4dfe9Sjg for (j = 0; j < ((ndevts - 1) - i); j++) { 99683c4dfe9Sjg if (devts[j + 1] < devts[j]) { 99783c4dfe9Sjg tdevt = devts[j]; 99883c4dfe9Sjg devts[j] = devts[j + 1]; 99983c4dfe9Sjg devts[j + 1] = tdevt; 100083c4dfe9Sjg } 100183c4dfe9Sjg } 100283c4dfe9Sjg } 100383c4dfe9Sjg 100483c4dfe9Sjg /* determine number of unique values */ 100583c4dfe9Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) { 100683c4dfe9Sjg if (devts[i - 1] == devts[i]) 100783c4dfe9Sjg undevts--; 100883c4dfe9Sjg } 100983c4dfe9Sjg 101083c4dfe9Sjg /* allocate unique */ 101183c4dfe9Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP); 101283c4dfe9Sjg 101383c4dfe9Sjg /* copy unique */ 101483c4dfe9Sjg udevts[0] = devts[0]; 101583c4dfe9Sjg for (i = 1, j = 1; i < ndevts; i++) { 101683c4dfe9Sjg if (devts[i - 1] != devts[i]) 101783c4dfe9Sjg udevts[j++] = devts[i]; 101883c4dfe9Sjg } 101983c4dfe9Sjg ASSERT(j == undevts); 102083c4dfe9Sjg 102183c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 102283c4dfe9Sjg 102383c4dfe9Sjg *retndevts = undevts; 102483c4dfe9Sjg *retdevts = udevts; 102583c4dfe9Sjg 102683c4dfe9Sjg return (DDI_SUCCESS); 102783c4dfe9Sjg } 102883c4dfe9Sjg 102983c4dfe9Sjg void 103083c4dfe9Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list) 103183c4dfe9Sjg { 103283c4dfe9Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *)); 103383c4dfe9Sjg } 103483c4dfe9Sjg 1035392e836bSGavin Maltby /* 1036392e836bSGavin Maltby * If given a full path and NULL ua, search for a cache entry 1037392e836bSGavin Maltby * whose path matches the full path. On a cache hit duplicate the 1038392e836bSGavin Maltby * devid of the matched entry into the given devid (caller 1039392e836bSGavin Maltby * must free); nodenamebuf is not touched for this usage. 1040392e836bSGavin Maltby * 1041392e836bSGavin Maltby * Given a path and a non-NULL unit address, search the cache for any entry 1042392e836bSGavin Maltby * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning 1043392e836bSGavin Maltby * any node name. The path should not end a '/'. On a cache hit 1044392e836bSGavin Maltby * duplicate the devid as before (caller must free) and copy into 1045392e836bSGavin Maltby * the caller-provided nodenamebuf (if not NULL) the nodename of the 1046392e836bSGavin Maltby * matched entry. 1047392e836bSGavin Maltby * 1048392e836bSGavin Maltby * We must not make use of nvp_dip since that may be NULL for cached 1049392e836bSGavin Maltby * entries that are not present in the current tree. 1050392e836bSGavin Maltby */ 1051392e836bSGavin Maltby int 1052392e836bSGavin Maltby e_devid_cache_path_to_devid(char *path, char *ua, 1053392e836bSGavin Maltby char *nodenamebuf, ddi_devid_t *devidp) 1054392e836bSGavin Maltby { 1055392e836bSGavin Maltby size_t pathlen, ualen; 1056392e836bSGavin Maltby int rv = DDI_FAILURE; 1057392e836bSGavin Maltby nvp_devid_t *np; 1058392e836bSGavin Maltby list_t *listp; 1059392e836bSGavin Maltby char *cand; 1060392e836bSGavin Maltby 1061392e836bSGavin Maltby if (path == NULL || *path == '\0' || (ua && *ua == '\0') || 1062392e836bSGavin Maltby devidp == NULL) 1063392e836bSGavin Maltby return (DDI_FAILURE); 1064392e836bSGavin Maltby 1065392e836bSGavin Maltby *devidp = NULL; 1066392e836bSGavin Maltby 1067392e836bSGavin Maltby if (ua) { 1068392e836bSGavin Maltby pathlen = strlen(path); 1069392e836bSGavin Maltby ualen = strlen(ua); 1070392e836bSGavin Maltby } 1071392e836bSGavin Maltby 1072392e836bSGavin Maltby rw_enter(nvf_lock(dcfd_handle), RW_READER); 1073392e836bSGavin Maltby 1074392e836bSGavin Maltby listp = nvf_list(dcfd_handle); 1075392e836bSGavin Maltby for (np = list_head(listp); np; np = list_next(listp, np)) { 1076392e836bSGavin Maltby size_t nodelen, candlen, n; 1077392e836bSGavin Maltby ddi_devid_t devid_dup; 1078392e836bSGavin Maltby char *uasep, *node; 1079392e836bSGavin Maltby 1080392e836bSGavin Maltby if (np->nvp_devid == NULL) 1081392e836bSGavin Maltby continue; 1082392e836bSGavin Maltby 1083392e836bSGavin Maltby if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 1084392e836bSGavin Maltby DEVIDERR((CE_CONT, 1085392e836bSGavin Maltby "pathsearch: invalid devid %s\n", 1086392e836bSGavin Maltby np->nvp_devpath)); 1087392e836bSGavin Maltby continue; 1088392e836bSGavin Maltby } 1089392e836bSGavin Maltby 1090392e836bSGavin Maltby cand = np->nvp_devpath; /* candidate path */ 1091392e836bSGavin Maltby 1092392e836bSGavin Maltby /* If a full pathname was provided the compare is easy */ 1093392e836bSGavin Maltby if (ua == NULL) { 1094392e836bSGavin Maltby if (strcmp(cand, path) == 0) 1095392e836bSGavin Maltby goto match; 1096392e836bSGavin Maltby else 1097392e836bSGavin Maltby continue; 1098392e836bSGavin Maltby } 1099392e836bSGavin Maltby 1100392e836bSGavin Maltby /* 1101392e836bSGavin Maltby * The compare for initial path plus ua and unknown nodename 1102392e836bSGavin Maltby * is trickier. 1103392e836bSGavin Maltby * 1104392e836bSGavin Maltby * Does the initial path component match 'path'? 1105392e836bSGavin Maltby */ 1106392e836bSGavin Maltby if (strncmp(path, cand, pathlen) != 0) 1107392e836bSGavin Maltby continue; 1108392e836bSGavin Maltby 1109392e836bSGavin Maltby candlen = strlen(cand); 1110392e836bSGavin Maltby 1111392e836bSGavin Maltby /* 1112392e836bSGavin Maltby * The next character must be a '/' and there must be no 1113392e836bSGavin Maltby * further '/' thereafter. Begin by checking that the 1114392e836bSGavin Maltby * candidate is long enough to include at mininum a 1115392e836bSGavin Maltby * "/<nodename>@<ua>" after the initial portion already 1116392e836bSGavin Maltby * matched assuming a nodename length of 1. 1117392e836bSGavin Maltby */ 1118392e836bSGavin Maltby if (candlen < pathlen + 1 + 1 + 1 + ualen || 1119392e836bSGavin Maltby cand[pathlen] != '/' || 1120392e836bSGavin Maltby strchr(cand + pathlen + 1, '/') != NULL) 1121392e836bSGavin Maltby continue; 1122392e836bSGavin Maltby 1123392e836bSGavin Maltby node = cand + pathlen + 1; /* <node>@<ua> string */ 1124392e836bSGavin Maltby 1125392e836bSGavin Maltby /* 1126392e836bSGavin Maltby * Find the '@' before the unit address. Check for 1127392e836bSGavin Maltby * unit address match. 1128392e836bSGavin Maltby */ 1129392e836bSGavin Maltby if ((uasep = strchr(node, '@')) == NULL) 1130392e836bSGavin Maltby continue; 1131392e836bSGavin Maltby 1132392e836bSGavin Maltby /* 1133392e836bSGavin Maltby * Check we still have enough length and that ua matches 1134392e836bSGavin Maltby */ 1135392e836bSGavin Maltby nodelen = (uintptr_t)uasep - (uintptr_t)node; 1136392e836bSGavin Maltby if (candlen < pathlen + 1 + nodelen + 1 + ualen || 1137392e836bSGavin Maltby strncmp(ua, uasep + 1, ualen) != 0) 1138392e836bSGavin Maltby continue; 1139392e836bSGavin Maltby match: 1140392e836bSGavin Maltby n = ddi_devid_sizeof(np->nvp_devid); 1141392e836bSGavin Maltby devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */ 1142392e836bSGavin Maltby (void) bcopy(np->nvp_devid, devid_dup, n); 1143392e836bSGavin Maltby *devidp = devid_dup; 1144392e836bSGavin Maltby 1145392e836bSGavin Maltby if (ua && nodenamebuf) { 1146392e836bSGavin Maltby (void) strncpy(nodenamebuf, node, nodelen); 1147392e836bSGavin Maltby nodenamebuf[nodelen] = '\0'; 1148392e836bSGavin Maltby } 1149392e836bSGavin Maltby 1150392e836bSGavin Maltby rv = DDI_SUCCESS; 1151392e836bSGavin Maltby break; 1152392e836bSGavin Maltby } 1153392e836bSGavin Maltby 1154392e836bSGavin Maltby rw_exit(nvf_lock(dcfd_handle)); 1155392e836bSGavin Maltby 1156392e836bSGavin Maltby return (rv); 1157392e836bSGavin Maltby } 1158392e836bSGavin Maltby 115983c4dfe9Sjg #ifdef DEBUG 116083c4dfe9Sjg static void 116183c4dfe9Sjg devid_log(char *fmt, ddi_devid_t devid, char *path) 116283c4dfe9Sjg { 116383c4dfe9Sjg char *devidstr = ddi_devid_str_encode(devid, NULL); 116483c4dfe9Sjg if (path) { 116583c4dfe9Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr); 116683c4dfe9Sjg } else { 116783c4dfe9Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr); 116883c4dfe9Sjg } 116983c4dfe9Sjg ddi_devid_str_free(devidstr); 117083c4dfe9Sjg } 117183c4dfe9Sjg #endif /* DEBUG */ 1172