183c4dfe9Sjg /* 283c4dfe9Sjg * CDDL HEADER START 383c4dfe9Sjg * 483c4dfe9Sjg * The contents of this file are subject to the terms of the 583c4dfe9Sjg * Common Development and Distribution License (the "License"). 683c4dfe9Sjg * You may not use this file except in compliance with the License. 783c4dfe9Sjg * 883c4dfe9Sjg * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 983c4dfe9Sjg * or http://www.opensolaris.org/os/licensing. 1083c4dfe9Sjg * See the License for the specific language governing permissions 1183c4dfe9Sjg * and limitations under the License. 1283c4dfe9Sjg * 1383c4dfe9Sjg * When distributing Covered Code, include this CDDL HEADER in each 1483c4dfe9Sjg * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1583c4dfe9Sjg * If applicable, add the following below this CDDL HEADER, with the 1683c4dfe9Sjg * fields enclosed by brackets "[]" replaced with your own identifying 1783c4dfe9Sjg * information: Portions Copyright [yyyy] [name of copyright owner] 1883c4dfe9Sjg * 1983c4dfe9Sjg * CDDL HEADER END 2083c4dfe9Sjg */ 2183c4dfe9Sjg /* 224f1e984dSReed * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 2383c4dfe9Sjg */ 2483c4dfe9Sjg 2583c4dfe9Sjg #include <sys/note.h> 2683c4dfe9Sjg #include <sys/t_lock.h> 2783c4dfe9Sjg #include <sys/cmn_err.h> 2883c4dfe9Sjg #include <sys/instance.h> 2983c4dfe9Sjg #include <sys/conf.h> 3083c4dfe9Sjg #include <sys/stat.h> 3183c4dfe9Sjg #include <sys/ddi.h> 3283c4dfe9Sjg #include <sys/hwconf.h> 3383c4dfe9Sjg #include <sys/sunddi.h> 3483c4dfe9Sjg #include <sys/sunndi.h> 35*392e836bSGavin Maltby #include <sys/sunmdi.h> 3683c4dfe9Sjg #include <sys/ddi_impldefs.h> 3783c4dfe9Sjg #include <sys/ndi_impldefs.h> 3883c4dfe9Sjg #include <sys/kobj.h> 3983c4dfe9Sjg #include <sys/devcache.h> 4083c4dfe9Sjg #include <sys/devid_cache.h> 4183c4dfe9Sjg #include <sys/sysmacros.h> 4283c4dfe9Sjg 4383c4dfe9Sjg /* 4483c4dfe9Sjg * Discovery refers to the heroic effort made to discover a device which 4583c4dfe9Sjg * cannot be accessed at the physical path where it once resided. Discovery 4683c4dfe9Sjg * involves walking the entire device tree attaching all possible disk 4783c4dfe9Sjg * instances, to search for the device referenced by a devid. Obviously, 4883c4dfe9Sjg * full device discovery is something to be avoided where possible. 4983c4dfe9Sjg * Note that simply invoking devfsadm(1M) is equivalent to running full 5083c4dfe9Sjg * discovery at the devid cache level. 5183c4dfe9Sjg * 5283c4dfe9Sjg * Reasons why a disk may not be accessible: 5383c4dfe9Sjg * disk powered off 5483c4dfe9Sjg * disk removed or cable disconnected 5583c4dfe9Sjg * disk or adapter broken 5683c4dfe9Sjg * 5783c4dfe9Sjg * Note that discovery is not needed and cannot succeed in any of these 5883c4dfe9Sjg * cases. 5983c4dfe9Sjg * 6083c4dfe9Sjg * When discovery may succeed: 6183c4dfe9Sjg * Discovery will result in success when a device has been moved 6283c4dfe9Sjg * to a different address. Note that it's recommended that 6383c4dfe9Sjg * devfsadm(1M) be invoked (no arguments required) whenever a system's 6483c4dfe9Sjg * h/w configuration has been updated. Alternatively, a 6583c4dfe9Sjg * reconfiguration boot can be used to accomplish the same result. 6683c4dfe9Sjg * 6783c4dfe9Sjg * Note that discovery is not necessary to be able to correct an access 6883c4dfe9Sjg * failure for a device which was powered off. Assuming the cache has an 6983c4dfe9Sjg * entry for such a device, simply powering it on should permit the system 7083c4dfe9Sjg * to access it. If problems persist after powering it on, invoke 7183c4dfe9Sjg * devfsadm(1M). 7283c4dfe9Sjg * 7383c4dfe9Sjg * Discovery prior to mounting root is only of interest when booting 7483c4dfe9Sjg * from a filesystem which accesses devices by device id, which of 7583c4dfe9Sjg * not all do. 7683c4dfe9Sjg * 7783c4dfe9Sjg * Tunables 7883c4dfe9Sjg * 7983c4dfe9Sjg * devid_discovery_boot (default 1) 8083c4dfe9Sjg * Number of times discovery will be attempted prior to mounting root. 8183c4dfe9Sjg * Must be done at least once to recover from corrupted or missing 8283c4dfe9Sjg * devid cache backing store. Probably there's no reason to ever 83*392e836bSGavin Maltby * set this to greater than one as a missing device will remain 8483c4dfe9Sjg * unavailable no matter how often the system searches for it. 8583c4dfe9Sjg * 8683c4dfe9Sjg * devid_discovery_postboot (default 1) 8783c4dfe9Sjg * Number of times discovery will be attempted after mounting root. 8883c4dfe9Sjg * This must be performed at least once to discover any devices 8983c4dfe9Sjg * needed after root is mounted which may have been powered 9083c4dfe9Sjg * off and moved before booting. 9183c4dfe9Sjg * Setting this to a larger positive number will introduce 9283c4dfe9Sjg * some inconsistency in system operation. Searching for a device 9383c4dfe9Sjg * will take an indeterminate amount of time, sometimes slower, 9483c4dfe9Sjg * sometimes faster. In addition, the system will sometimes 9583c4dfe9Sjg * discover a newly powered on device, sometimes it won't. 9683c4dfe9Sjg * Use of this option is not therefore recommended. 9783c4dfe9Sjg * 9883c4dfe9Sjg * devid_discovery_postboot_always (default 0) 9983c4dfe9Sjg * Set to 1, the system will always attempt full discovery. 10083c4dfe9Sjg * 10183c4dfe9Sjg * devid_discovery_secs (default 0) 10283c4dfe9Sjg * Set to a positive value, the system will attempt full discovery 10383c4dfe9Sjg * but with a minimum delay between attempts. A device search 10483c4dfe9Sjg * within the period of time specified will result in failure. 10583c4dfe9Sjg * 10683c4dfe9Sjg * devid_cache_read_disable (default 0) 10783c4dfe9Sjg * Set to 1 to disable reading /etc/devices/devid_cache. 10883c4dfe9Sjg * Devid cache will continue to operate normally but 10983c4dfe9Sjg * at least one discovery attempt will be required. 11083c4dfe9Sjg * 11183c4dfe9Sjg * devid_cache_write_disable (default 0) 11283c4dfe9Sjg * Set to 1 to disable updates to /etc/devices/devid_cache. 11383c4dfe9Sjg * Any updates to the devid cache will not be preserved across a reboot. 11483c4dfe9Sjg * 11583c4dfe9Sjg * devid_report_error (default 0) 11683c4dfe9Sjg * Set to 1 to enable some error messages related to devid 11783c4dfe9Sjg * cache failures. 11883c4dfe9Sjg * 11983c4dfe9Sjg * The devid is packed in the cache file as a byte array. For 12083c4dfe9Sjg * portability, this could be done in the encoded string format. 12183c4dfe9Sjg */ 12283c4dfe9Sjg 12383c4dfe9Sjg 12483c4dfe9Sjg int devid_discovery_boot = 1; 12583c4dfe9Sjg int devid_discovery_postboot = 1; 12683c4dfe9Sjg int devid_discovery_postboot_always = 0; 12783c4dfe9Sjg int devid_discovery_secs = 0; 12883c4dfe9Sjg 12983c4dfe9Sjg int devid_cache_read_disable = 0; 13083c4dfe9Sjg int devid_cache_write_disable = 0; 13183c4dfe9Sjg 13283c4dfe9Sjg int devid_report_error = 0; 13383c4dfe9Sjg 13483c4dfe9Sjg 13583c4dfe9Sjg /* 13683c4dfe9Sjg * State to manage discovery of devices providing a devid 13783c4dfe9Sjg */ 13883c4dfe9Sjg static int devid_discovery_busy = 0; 13983c4dfe9Sjg static kmutex_t devid_discovery_mutex; 14083c4dfe9Sjg static kcondvar_t devid_discovery_cv; 14183c4dfe9Sjg static clock_t devid_last_discovery = 0; 14283c4dfe9Sjg 14383c4dfe9Sjg 14483c4dfe9Sjg #ifdef DEBUG 14583c4dfe9Sjg int nvp_devid_debug = 0; 14683c4dfe9Sjg int devid_debug = 0; 14783c4dfe9Sjg int devid_log_registers = 0; 14883c4dfe9Sjg int devid_log_finds = 0; 14983c4dfe9Sjg int devid_log_lookups = 0; 15083c4dfe9Sjg int devid_log_discovery = 0; 15183c4dfe9Sjg int devid_log_matches = 0; 15283c4dfe9Sjg int devid_log_paths = 0; 15383c4dfe9Sjg int devid_log_failures = 0; 15483c4dfe9Sjg int devid_log_hold = 0; 15583c4dfe9Sjg int devid_log_unregisters = 0; 15683c4dfe9Sjg int devid_log_removes = 0; 15783c4dfe9Sjg int devid_register_debug = 0; 15883c4dfe9Sjg int devid_log_stale = 0; 15983c4dfe9Sjg int devid_log_detaches = 0; 16083c4dfe9Sjg #endif /* DEBUG */ 16183c4dfe9Sjg 16283c4dfe9Sjg /* 16383c4dfe9Sjg * devid cache file registration for cache reads and updates 16483c4dfe9Sjg */ 16583c4dfe9Sjg static nvf_ops_t devid_cache_ops = { 16683c4dfe9Sjg "/etc/devices/devid_cache", /* path to cache */ 16783c4dfe9Sjg devid_cache_unpack_nvlist, /* read: nvlist to nvp */ 16883c4dfe9Sjg devid_cache_pack_list, /* write: nvp to nvlist */ 16983c4dfe9Sjg devid_list_free, /* free data list */ 17083c4dfe9Sjg NULL /* write complete callback */ 17183c4dfe9Sjg }; 17283c4dfe9Sjg 17383c4dfe9Sjg /* 17483c4dfe9Sjg * handle to registered devid cache handlers 17583c4dfe9Sjg */ 17683c4dfe9Sjg nvf_handle_t dcfd_handle; 17783c4dfe9Sjg 17883c4dfe9Sjg 17983c4dfe9Sjg /* 18083c4dfe9Sjg * Initialize devid cache file management 18183c4dfe9Sjg */ 18283c4dfe9Sjg void 18383c4dfe9Sjg devid_cache_init(void) 18483c4dfe9Sjg { 18583c4dfe9Sjg dcfd_handle = nvf_register_file(&devid_cache_ops); 18683c4dfe9Sjg ASSERT(dcfd_handle); 18783c4dfe9Sjg 18883c4dfe9Sjg list_create(nvf_list(dcfd_handle), sizeof (nvp_devid_t), 18983c4dfe9Sjg offsetof(nvp_devid_t, nvp_link)); 19083c4dfe9Sjg 19183c4dfe9Sjg mutex_init(&devid_discovery_mutex, NULL, MUTEX_DEFAULT, NULL); 19283c4dfe9Sjg cv_init(&devid_discovery_cv, NULL, CV_DRIVER, NULL); 19383c4dfe9Sjg } 19483c4dfe9Sjg 19583c4dfe9Sjg /* 19683c4dfe9Sjg * Read and initialize the devid cache from the persistent store 19783c4dfe9Sjg */ 19883c4dfe9Sjg void 19983c4dfe9Sjg devid_cache_read(void) 20083c4dfe9Sjg { 20183c4dfe9Sjg if (!devid_cache_read_disable) { 20283c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 20383c4dfe9Sjg ASSERT(list_head(nvf_list(dcfd_handle)) == NULL); 20483c4dfe9Sjg (void) nvf_read_file(dcfd_handle); 20583c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 20683c4dfe9Sjg } 20783c4dfe9Sjg } 20883c4dfe9Sjg 20983c4dfe9Sjg static void 21083c4dfe9Sjg devid_nvp_free(nvp_devid_t *dp) 21183c4dfe9Sjg { 21283c4dfe9Sjg if (dp->nvp_devpath) 21383c4dfe9Sjg kmem_free(dp->nvp_devpath, strlen(dp->nvp_devpath)+1); 21483c4dfe9Sjg if (dp->nvp_devid) 21583c4dfe9Sjg kmem_free(dp->nvp_devid, ddi_devid_sizeof(dp->nvp_devid)); 21683c4dfe9Sjg 21783c4dfe9Sjg kmem_free(dp, sizeof (nvp_devid_t)); 21883c4dfe9Sjg } 21983c4dfe9Sjg 22083c4dfe9Sjg static void 22183c4dfe9Sjg devid_list_free(nvf_handle_t fd) 22283c4dfe9Sjg { 22383c4dfe9Sjg list_t *listp; 22483c4dfe9Sjg nvp_devid_t *np; 22583c4dfe9Sjg 22683c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 22783c4dfe9Sjg 22883c4dfe9Sjg listp = nvf_list(fd); 22983c4dfe9Sjg while (np = list_head(listp)) { 23083c4dfe9Sjg list_remove(listp, np); 23183c4dfe9Sjg devid_nvp_free(np); 23283c4dfe9Sjg } 23383c4dfe9Sjg } 23483c4dfe9Sjg 23583c4dfe9Sjg /* 23683c4dfe9Sjg * Free an nvp element in a list 23783c4dfe9Sjg */ 23883c4dfe9Sjg static void 23983c4dfe9Sjg devid_nvp_unlink_and_free(nvf_handle_t fd, nvp_devid_t *np) 24083c4dfe9Sjg { 24183c4dfe9Sjg list_remove(nvf_list(fd), np); 24283c4dfe9Sjg devid_nvp_free(np); 24383c4dfe9Sjg } 24483c4dfe9Sjg 24583c4dfe9Sjg /* 24683c4dfe9Sjg * Unpack a device path/nvlist pair to the list of devid cache elements. 24783c4dfe9Sjg * Used to parse the nvlist format when reading 24883c4dfe9Sjg * /etc/devices/devid_cache 24983c4dfe9Sjg */ 25083c4dfe9Sjg static int 25183c4dfe9Sjg devid_cache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name) 25283c4dfe9Sjg { 25383c4dfe9Sjg nvp_devid_t *np; 25483c4dfe9Sjg ddi_devid_t devidp; 25583c4dfe9Sjg int rval; 25683c4dfe9Sjg uint_t n; 25783c4dfe9Sjg 25883c4dfe9Sjg NVP_DEVID_DEBUG_PATH((name)); 25983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 26083c4dfe9Sjg 26183c4dfe9Sjg /* 26283c4dfe9Sjg * check path for a devid 26383c4dfe9Sjg */ 26483c4dfe9Sjg rval = nvlist_lookup_byte_array(nvl, 265a204de77Scth DP_DEVID_ID, (uchar_t **)&devidp, &n); 26683c4dfe9Sjg if (rval == 0) { 26783c4dfe9Sjg if (ddi_devid_valid(devidp) == DDI_SUCCESS) { 26883c4dfe9Sjg ASSERT(n == ddi_devid_sizeof(devidp)); 26983c4dfe9Sjg np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 27083c4dfe9Sjg np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP); 27183c4dfe9Sjg np->nvp_devid = kmem_alloc(n, KM_SLEEP); 27283c4dfe9Sjg (void) bcopy(devidp, np->nvp_devid, n); 27383c4dfe9Sjg list_insert_tail(nvf_list(fd), np); 27483c4dfe9Sjg NVP_DEVID_DEBUG_DEVID((np->nvp_devid)); 27583c4dfe9Sjg } else { 27683c4dfe9Sjg DEVIDERR((CE_CONT, 27783c4dfe9Sjg "%s: invalid devid\n", name)); 27883c4dfe9Sjg } 27983c4dfe9Sjg } else { 28083c4dfe9Sjg DEVIDERR((CE_CONT, 28183c4dfe9Sjg "%s: devid not available\n", name)); 28283c4dfe9Sjg } 28383c4dfe9Sjg 28483c4dfe9Sjg return (0); 28583c4dfe9Sjg } 28683c4dfe9Sjg 28783c4dfe9Sjg /* 28883c4dfe9Sjg * Pack the list of devid cache elements into a single nvlist 28983c4dfe9Sjg * Used when writing the nvlist file. 29083c4dfe9Sjg */ 29183c4dfe9Sjg static int 29283c4dfe9Sjg devid_cache_pack_list(nvf_handle_t fd, nvlist_t **ret_nvl) 29383c4dfe9Sjg { 29483c4dfe9Sjg nvlist_t *nvl, *sub_nvl; 29583c4dfe9Sjg nvp_devid_t *np; 29683c4dfe9Sjg int rval; 29783c4dfe9Sjg list_t *listp; 29883c4dfe9Sjg 29983c4dfe9Sjg ASSERT(RW_WRITE_HELD(nvf_lock(dcfd_handle))); 30083c4dfe9Sjg 30183c4dfe9Sjg rval = nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP); 30283c4dfe9Sjg if (rval != 0) { 30383c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 304a204de77Scth nvf_cache_name(fd), rval); 30583c4dfe9Sjg return (DDI_FAILURE); 30683c4dfe9Sjg } 30783c4dfe9Sjg 30883c4dfe9Sjg listp = nvf_list(fd); 30983c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 31083c4dfe9Sjg if (np->nvp_devid == NULL) 311a204de77Scth continue; 31283c4dfe9Sjg NVP_DEVID_DEBUG_PATH(np->nvp_devpath); 31383c4dfe9Sjg rval = nvlist_alloc(&sub_nvl, NV_UNIQUE_NAME, KM_SLEEP); 31483c4dfe9Sjg if (rval != 0) { 31583c4dfe9Sjg nvf_error("%s: nvlist alloc error %d\n", 316a204de77Scth nvf_cache_name(fd), rval); 31783c4dfe9Sjg sub_nvl = NULL; 31883c4dfe9Sjg goto err; 31983c4dfe9Sjg } 32083c4dfe9Sjg 32183c4dfe9Sjg rval = nvlist_add_byte_array(sub_nvl, DP_DEVID_ID, 322a204de77Scth (uchar_t *)np->nvp_devid, 323a204de77Scth ddi_devid_sizeof(np->nvp_devid)); 32483c4dfe9Sjg if (rval == 0) { 32583c4dfe9Sjg NVP_DEVID_DEBUG_DEVID(np->nvp_devid); 32683c4dfe9Sjg } else { 32783c4dfe9Sjg nvf_error( 32883c4dfe9Sjg "%s: nvlist add error %d (devid)\n", 32983c4dfe9Sjg nvf_cache_name(fd), rval); 33083c4dfe9Sjg goto err; 33183c4dfe9Sjg } 33283c4dfe9Sjg 33383c4dfe9Sjg rval = nvlist_add_nvlist(nvl, np->nvp_devpath, sub_nvl); 33483c4dfe9Sjg if (rval != 0) { 33583c4dfe9Sjg nvf_error("%s: nvlist add error %d (sublist)\n", 33683c4dfe9Sjg nvf_cache_name(fd), rval); 33783c4dfe9Sjg goto err; 33883c4dfe9Sjg } 33983c4dfe9Sjg nvlist_free(sub_nvl); 34083c4dfe9Sjg } 34183c4dfe9Sjg 34283c4dfe9Sjg *ret_nvl = nvl; 34383c4dfe9Sjg return (DDI_SUCCESS); 34483c4dfe9Sjg 34583c4dfe9Sjg err: 34683c4dfe9Sjg if (sub_nvl) 34783c4dfe9Sjg nvlist_free(sub_nvl); 34883c4dfe9Sjg nvlist_free(nvl); 34983c4dfe9Sjg *ret_nvl = NULL; 35083c4dfe9Sjg return (DDI_FAILURE); 35183c4dfe9Sjg } 35283c4dfe9Sjg 35383c4dfe9Sjg static int 35483c4dfe9Sjg e_devid_do_discovery(void) 35583c4dfe9Sjg { 35683c4dfe9Sjg ASSERT(mutex_owned(&devid_discovery_mutex)); 35783c4dfe9Sjg 35883c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 35983c4dfe9Sjg if (devid_discovery_boot > 0) { 36083c4dfe9Sjg devid_discovery_boot--; 36183c4dfe9Sjg return (1); 36283c4dfe9Sjg } 36383c4dfe9Sjg } else { 36483c4dfe9Sjg if (devid_discovery_postboot_always > 0) 36583c4dfe9Sjg return (1); 36683c4dfe9Sjg if (devid_discovery_postboot > 0) { 36783c4dfe9Sjg devid_discovery_postboot--; 36883c4dfe9Sjg return (1); 36983c4dfe9Sjg } 37083c4dfe9Sjg if (devid_discovery_secs > 0) { 37183c4dfe9Sjg if ((ddi_get_lbolt() - devid_last_discovery) > 37283c4dfe9Sjg drv_usectohz(devid_discovery_secs * MICROSEC)) { 37383c4dfe9Sjg return (1); 37483c4dfe9Sjg } 37583c4dfe9Sjg } 37683c4dfe9Sjg } 37783c4dfe9Sjg 37883c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n")); 37983c4dfe9Sjg return (0); 38083c4dfe9Sjg } 38183c4dfe9Sjg 38283c4dfe9Sjg static void 38383c4dfe9Sjg e_ddi_devid_hold_by_major(major_t major) 38483c4dfe9Sjg { 38583c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 38683c4dfe9Sjg "devid_discovery: ddi_hold_installed_driver %d\n", major)); 38783c4dfe9Sjg 38883c4dfe9Sjg if (ddi_hold_installed_driver(major) == NULL) 38983c4dfe9Sjg return; 39083c4dfe9Sjg 39183c4dfe9Sjg ddi_rele_driver(major); 39283c4dfe9Sjg } 39383c4dfe9Sjg 39483c4dfe9Sjg static char *e_ddi_devid_hold_driver_list[] = { "sd", "ssd", "dad" }; 39583c4dfe9Sjg 39683c4dfe9Sjg #define N_DRIVERS_TO_HOLD \ 39783c4dfe9Sjg (sizeof (e_ddi_devid_hold_driver_list) / sizeof (char *)) 39883c4dfe9Sjg 39983c4dfe9Sjg 40083c4dfe9Sjg static void 40183c4dfe9Sjg e_ddi_devid_hold_installed_driver(ddi_devid_t devid) 40283c4dfe9Sjg { 40383c4dfe9Sjg impl_devid_t *id = (impl_devid_t *)devid; 40483c4dfe9Sjg major_t major, hint_major; 40583c4dfe9Sjg char hint[DEVID_HINT_SIZE + 1]; 40683c4dfe9Sjg char **drvp; 40783c4dfe9Sjg int i; 40883c4dfe9Sjg 40983c4dfe9Sjg /* Count non-null bytes */ 41083c4dfe9Sjg for (i = 0; i < DEVID_HINT_SIZE; i++) 41183c4dfe9Sjg if (id->did_driver[i] == '\0') 41283c4dfe9Sjg break; 41383c4dfe9Sjg 41483c4dfe9Sjg /* Make a copy of the driver hint */ 41583c4dfe9Sjg bcopy(id->did_driver, hint, i); 41683c4dfe9Sjg hint[i] = '\0'; 41783c4dfe9Sjg 41883c4dfe9Sjg /* search for the devid using the hint driver */ 41983c4dfe9Sjg hint_major = ddi_name_to_major(hint); 420a204de77Scth if (hint_major != DDI_MAJOR_T_NONE) { 42183c4dfe9Sjg e_ddi_devid_hold_by_major(hint_major); 42283c4dfe9Sjg } 42383c4dfe9Sjg 42483c4dfe9Sjg drvp = e_ddi_devid_hold_driver_list; 42583c4dfe9Sjg for (i = 0; i < N_DRIVERS_TO_HOLD; i++, drvp++) { 42683c4dfe9Sjg major = ddi_name_to_major(*drvp); 427a204de77Scth if (major != DDI_MAJOR_T_NONE && major != hint_major) { 42883c4dfe9Sjg e_ddi_devid_hold_by_major(major); 42983c4dfe9Sjg } 43083c4dfe9Sjg } 43183c4dfe9Sjg } 43283c4dfe9Sjg 43383c4dfe9Sjg 43483c4dfe9Sjg /* 43583c4dfe9Sjg * Return success if discovery was attempted, to indicate 43683c4dfe9Sjg * that the desired device may now be available. 43783c4dfe9Sjg */ 43883c4dfe9Sjg int 43983c4dfe9Sjg e_ddi_devid_discovery(ddi_devid_t devid) 44083c4dfe9Sjg { 44183c4dfe9Sjg int flags; 44283c4dfe9Sjg int rval = DDI_SUCCESS; 44383c4dfe9Sjg 44483c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 44583c4dfe9Sjg 44683c4dfe9Sjg if (devid_discovery_busy) { 44783c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: busy\n")); 44883c4dfe9Sjg while (devid_discovery_busy) { 44983c4dfe9Sjg cv_wait(&devid_discovery_cv, &devid_discovery_mutex); 45083c4dfe9Sjg } 45183c4dfe9Sjg } else if (e_devid_do_discovery()) { 45283c4dfe9Sjg devid_discovery_busy = 1; 45383c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 45483c4dfe9Sjg 45583c4dfe9Sjg if (i_ddi_io_initialized() == 0) { 45683c4dfe9Sjg e_ddi_devid_hold_installed_driver(devid); 45783c4dfe9Sjg } else { 45883c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, 45983c4dfe9Sjg "devid_discovery: ndi_devi_config\n")); 46083c4dfe9Sjg flags = NDI_DEVI_PERSIST | NDI_CONFIG | NDI_NO_EVENT; 46183c4dfe9Sjg if (i_ddi_io_initialized()) 46283c4dfe9Sjg flags |= NDI_DRV_CONF_REPROBE; 46383c4dfe9Sjg (void) ndi_devi_config(ddi_root_node(), flags); 46483c4dfe9Sjg } 46583c4dfe9Sjg 46683c4dfe9Sjg mutex_enter(&devid_discovery_mutex); 46783c4dfe9Sjg devid_discovery_busy = 0; 46883c4dfe9Sjg cv_broadcast(&devid_discovery_cv); 46983c4dfe9Sjg if (devid_discovery_secs > 0) 47083c4dfe9Sjg devid_last_discovery = ddi_get_lbolt(); 47183c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "devid_discovery: done\n")); 47283c4dfe9Sjg } else { 47383c4dfe9Sjg rval = DDI_FAILURE; 47483c4dfe9Sjg DEVID_LOG_DISC((CE_CONT, "no devid discovery\n")); 47583c4dfe9Sjg } 47683c4dfe9Sjg 47783c4dfe9Sjg mutex_exit(&devid_discovery_mutex); 47883c4dfe9Sjg 47983c4dfe9Sjg return (rval); 48083c4dfe9Sjg } 48183c4dfe9Sjg 48283c4dfe9Sjg /* 48383c4dfe9Sjg * As part of registering a devid for a device, 48483c4dfe9Sjg * update the devid cache with this device/devid pair 48583c4dfe9Sjg * or note that this combination has registered. 486*392e836bSGavin Maltby * 487*392e836bSGavin Maltby * If a devpath is provided it will be used as the path to register the 488*392e836bSGavin Maltby * devid against, otherwise we use ddi_pathname(dip). In both cases 489*392e836bSGavin Maltby * we duplicate the path string so that it can be cached/freed indepdently 490*392e836bSGavin Maltby * of the original owner. 49183c4dfe9Sjg */ 492*392e836bSGavin Maltby static int 493*392e836bSGavin Maltby e_devid_cache_register_cmn(dev_info_t *dip, ddi_devid_t devid, char *devpath) 49483c4dfe9Sjg { 49583c4dfe9Sjg nvp_devid_t *np; 49683c4dfe9Sjg nvp_devid_t *new_nvp; 49783c4dfe9Sjg ddi_devid_t new_devid; 49883c4dfe9Sjg int new_devid_size; 49983c4dfe9Sjg char *path, *fullpath; 50083c4dfe9Sjg ddi_devid_t free_devid = NULL; 50183c4dfe9Sjg int pathlen; 50283c4dfe9Sjg list_t *listp; 50383c4dfe9Sjg int is_dirty = 0; 50483c4dfe9Sjg 5054f1e984dSReed 50683c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 50783c4dfe9Sjg 508*392e836bSGavin Maltby if (devpath) { 509*392e836bSGavin Maltby pathlen = strlen(devpath) + 1; 510*392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP); 511*392e836bSGavin Maltby bcopy(devpath, path, pathlen); 512*392e836bSGavin Maltby } else { 513*392e836bSGavin Maltby /* 514*392e836bSGavin Maltby * We are willing to accept DS_BOUND nodes if we can form a full 515*392e836bSGavin Maltby * ddi_pathname (i.e. the node is part way to becomming 516*392e836bSGavin Maltby * DS_INITIALIZED and devi_addr/ddi_get_name_addr are non-NULL). 517*392e836bSGavin Maltby */ 518*392e836bSGavin Maltby if (ddi_get_name_addr(dip) == NULL) 519*392e836bSGavin Maltby return (DDI_FAILURE); 520*392e836bSGavin Maltby 521*392e836bSGavin Maltby fullpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 522*392e836bSGavin Maltby (void) ddi_pathname(dip, fullpath); 523*392e836bSGavin Maltby pathlen = strlen(fullpath) + 1; 524*392e836bSGavin Maltby path = kmem_alloc(pathlen, KM_SLEEP); 525*392e836bSGavin Maltby bcopy(fullpath, path, pathlen); 526*392e836bSGavin Maltby kmem_free(fullpath, MAXPATHLEN); 527*392e836bSGavin Maltby } 52883c4dfe9Sjg 52983c4dfe9Sjg DEVID_LOG_REG(("register", devid, path)); 53083c4dfe9Sjg 53183c4dfe9Sjg new_nvp = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP); 53283c4dfe9Sjg new_devid_size = ddi_devid_sizeof(devid); 53383c4dfe9Sjg new_devid = kmem_alloc(new_devid_size, KM_SLEEP); 53483c4dfe9Sjg (void) bcopy(devid, new_devid, new_devid_size); 53583c4dfe9Sjg 53683c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 53783c4dfe9Sjg 53883c4dfe9Sjg listp = nvf_list(dcfd_handle); 53983c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 54083c4dfe9Sjg if (strcmp(path, np->nvp_devpath) == 0) { 54183c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 54283c4dfe9Sjg "register: %s path match\n", path)); 54383c4dfe9Sjg if (np->nvp_devid == NULL) { 544a204de77Scth replace: np->nvp_devid = new_devid; 54583c4dfe9Sjg np->nvp_flags |= 546a204de77Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 54783c4dfe9Sjg np->nvp_dip = dip; 54883c4dfe9Sjg if (!devid_cache_write_disable) { 54983c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 55083c4dfe9Sjg is_dirty = 1; 55183c4dfe9Sjg } 55283c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 55383c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 55483c4dfe9Sjg kmem_free(path, pathlen); 55583c4dfe9Sjg goto exit; 55683c4dfe9Sjg } 55783c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 55883c4dfe9Sjg /* replace invalid devid */ 55983c4dfe9Sjg free_devid = np->nvp_devid; 56083c4dfe9Sjg goto replace; 56183c4dfe9Sjg } 56283c4dfe9Sjg /* 56383c4dfe9Sjg * We're registering an already-cached path 56483c4dfe9Sjg * Does the device's devid match the cache? 56583c4dfe9Sjg */ 56683c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) != 0) { 56783c4dfe9Sjg DEVID_DEBUG((CE_CONT, "devid register: " 56883c4dfe9Sjg "devid %s does not match\n", path)); 56983c4dfe9Sjg /* 57083c4dfe9Sjg * Replace cached devid for this path 57183c4dfe9Sjg * with newly registered devid. A devid 57283c4dfe9Sjg * may map to multiple paths but one path 57383c4dfe9Sjg * should only map to one devid. 57483c4dfe9Sjg */ 57583c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 57683c4dfe9Sjg np = NULL; 57783c4dfe9Sjg break; 57883c4dfe9Sjg } else { 57983c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 58083c4dfe9Sjg "devid register: %s devid match\n", path)); 58183c4dfe9Sjg np->nvp_flags |= 582a204de77Scth NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 58383c4dfe9Sjg np->nvp_dip = dip; 58483c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 58583c4dfe9Sjg kmem_free(new_nvp, sizeof (nvp_devid_t)); 58683c4dfe9Sjg kmem_free(path, pathlen); 58783c4dfe9Sjg kmem_free(new_devid, new_devid_size); 58883c4dfe9Sjg return (DDI_SUCCESS); 58983c4dfe9Sjg } 59083c4dfe9Sjg } 59183c4dfe9Sjg } 59283c4dfe9Sjg 59383c4dfe9Sjg /* 59483c4dfe9Sjg * Add newly registered devid to the cache 59583c4dfe9Sjg */ 59683c4dfe9Sjg ASSERT(np == NULL); 59783c4dfe9Sjg 59883c4dfe9Sjg new_nvp->nvp_devpath = path; 59983c4dfe9Sjg new_nvp->nvp_flags = NVP_DEVID_DIP | NVP_DEVID_REGISTERED; 60083c4dfe9Sjg new_nvp->nvp_dip = dip; 60183c4dfe9Sjg new_nvp->nvp_devid = new_devid; 60283c4dfe9Sjg 60383c4dfe9Sjg if (!devid_cache_write_disable) { 60483c4dfe9Sjg is_dirty = 1; 60583c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 60683c4dfe9Sjg } 60783c4dfe9Sjg list_insert_tail(nvf_list(dcfd_handle), new_nvp); 60883c4dfe9Sjg 60983c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 61083c4dfe9Sjg 61183c4dfe9Sjg exit: 61283c4dfe9Sjg if (free_devid) 61383c4dfe9Sjg kmem_free(free_devid, ddi_devid_sizeof(free_devid)); 61483c4dfe9Sjg 61583c4dfe9Sjg if (is_dirty) 61683c4dfe9Sjg nvf_wake_daemon(); 61783c4dfe9Sjg 61883c4dfe9Sjg return (DDI_SUCCESS); 61983c4dfe9Sjg } 62083c4dfe9Sjg 621*392e836bSGavin Maltby int 622*392e836bSGavin Maltby e_devid_cache_register(dev_info_t *dip, ddi_devid_t devid) 623*392e836bSGavin Maltby { 624*392e836bSGavin Maltby return (e_devid_cache_register_cmn(dip, devid, NULL)); 625*392e836bSGavin Maltby } 626*392e836bSGavin Maltby 62783c4dfe9Sjg /* 62883c4dfe9Sjg * Unregister a device's devid 62983c4dfe9Sjg * Called as an instance detachs 63083c4dfe9Sjg * Invalidate the devid's devinfo reference 63183c4dfe9Sjg * Devid-path remains in the cache 63283c4dfe9Sjg */ 63383c4dfe9Sjg void 63483c4dfe9Sjg e_devid_cache_unregister(dev_info_t *dip) 63583c4dfe9Sjg { 63683c4dfe9Sjg nvp_devid_t *np; 63783c4dfe9Sjg list_t *listp; 63883c4dfe9Sjg 63983c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 64083c4dfe9Sjg 64183c4dfe9Sjg listp = nvf_list(dcfd_handle); 64283c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 64383c4dfe9Sjg if (np->nvp_devid == NULL) 64483c4dfe9Sjg continue; 64583c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_DIP) && np->nvp_dip == dip) { 64683c4dfe9Sjg DEVID_LOG_UNREG((CE_CONT, 647a204de77Scth "unregister: %s\n", np->nvp_devpath)); 64883c4dfe9Sjg np->nvp_flags &= ~NVP_DEVID_DIP; 64983c4dfe9Sjg np->nvp_dip = NULL; 65083c4dfe9Sjg break; 65183c4dfe9Sjg } 65283c4dfe9Sjg } 65383c4dfe9Sjg 65483c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 65583c4dfe9Sjg } 65683c4dfe9Sjg 657*392e836bSGavin Maltby int 658*392e836bSGavin Maltby e_devid_cache_pathinfo(mdi_pathinfo_t *pip, ddi_devid_t devid) 659*392e836bSGavin Maltby { 660*392e836bSGavin Maltby char *path = mdi_pi_pathname(pip); 661*392e836bSGavin Maltby 662*392e836bSGavin Maltby return (e_devid_cache_register_cmn(mdi_pi_get_client(pip), devid, 663*392e836bSGavin Maltby path)); 664*392e836bSGavin Maltby } 665*392e836bSGavin Maltby 66683c4dfe9Sjg /* 66783c4dfe9Sjg * Purge devid cache of stale devids 66883c4dfe9Sjg */ 66983c4dfe9Sjg void 67083c4dfe9Sjg devid_cache_cleanup(void) 67183c4dfe9Sjg { 67283c4dfe9Sjg nvp_devid_t *np, *next; 67383c4dfe9Sjg list_t *listp; 67483c4dfe9Sjg int is_dirty = 0; 67583c4dfe9Sjg 67683c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_WRITER); 67783c4dfe9Sjg 67883c4dfe9Sjg listp = nvf_list(dcfd_handle); 67983c4dfe9Sjg for (np = list_head(listp); np; np = next) { 68083c4dfe9Sjg next = list_next(listp, np); 68183c4dfe9Sjg if (np->nvp_devid == NULL) 68283c4dfe9Sjg continue; 68383c4dfe9Sjg if ((np->nvp_flags & NVP_DEVID_REGISTERED) == 0) { 68483c4dfe9Sjg DEVID_LOG_REMOVE((CE_CONT, 685a204de77Scth "cleanup: %s\n", np->nvp_devpath)); 68683c4dfe9Sjg if (!devid_cache_write_disable) { 68783c4dfe9Sjg nvf_mark_dirty(dcfd_handle); 68883c4dfe9Sjg is_dirty = 0; 68983c4dfe9Sjg } 69083c4dfe9Sjg devid_nvp_unlink_and_free(dcfd_handle, np); 69183c4dfe9Sjg } 69283c4dfe9Sjg } 69383c4dfe9Sjg 69483c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 69583c4dfe9Sjg 69683c4dfe9Sjg if (is_dirty) 69783c4dfe9Sjg nvf_wake_daemon(); 69883c4dfe9Sjg } 69983c4dfe9Sjg 70083c4dfe9Sjg 70183c4dfe9Sjg /* 70283c4dfe9Sjg * Build a list of dev_t's for a device/devid 70383c4dfe9Sjg * 70483c4dfe9Sjg * The effect of this function is cumulative, adding dev_t's 70583c4dfe9Sjg * for the device to the list of all dev_t's for a given 70683c4dfe9Sjg * devid. 70783c4dfe9Sjg */ 70883c4dfe9Sjg static void 70983c4dfe9Sjg e_devid_minor_to_devlist( 71083c4dfe9Sjg dev_info_t *dip, 71183c4dfe9Sjg char *minor_name, 71283c4dfe9Sjg int ndevts_alloced, 71383c4dfe9Sjg int *devtcntp, 71483c4dfe9Sjg dev_t *devtsp) 71583c4dfe9Sjg { 716b9ccdc5aScth int circ; 71783c4dfe9Sjg struct ddi_minor_data *dmdp; 71883c4dfe9Sjg int minor_all = 0; 71983c4dfe9Sjg int ndevts = *devtcntp; 72083c4dfe9Sjg 72183c4dfe9Sjg ASSERT(i_ddi_devi_attached(dip)); 72283c4dfe9Sjg 72383c4dfe9Sjg /* are we looking for a set of minor nodes? */ 72483c4dfe9Sjg if ((minor_name == DEVID_MINOR_NAME_ALL) || 72583c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_CHR) || 72683c4dfe9Sjg (minor_name == DEVID_MINOR_NAME_ALL_BLK)) 72783c4dfe9Sjg minor_all = 1; 72883c4dfe9Sjg 72983c4dfe9Sjg /* Find matching minor names */ 730b9ccdc5aScth ndi_devi_enter(dip, &circ); 73183c4dfe9Sjg for (dmdp = DEVI(dip)->devi_minor; dmdp; dmdp = dmdp->next) { 73283c4dfe9Sjg 73383c4dfe9Sjg /* Skip non-minors, and non matching minor names */ 73483c4dfe9Sjg if ((dmdp->type != DDM_MINOR) || ((minor_all == 0) && 73583c4dfe9Sjg strcmp(dmdp->ddm_name, minor_name))) 73683c4dfe9Sjg continue; 73783c4dfe9Sjg 73883c4dfe9Sjg /* filter out minor_all mismatches */ 73983c4dfe9Sjg if (minor_all && 74083c4dfe9Sjg (((minor_name == DEVID_MINOR_NAME_ALL_CHR) && 74183c4dfe9Sjg (dmdp->ddm_spec_type != S_IFCHR)) || 74283c4dfe9Sjg ((minor_name == DEVID_MINOR_NAME_ALL_BLK) && 74383c4dfe9Sjg (dmdp->ddm_spec_type != S_IFBLK)))) 74483c4dfe9Sjg continue; 74583c4dfe9Sjg 74683c4dfe9Sjg if (ndevts < ndevts_alloced) 74783c4dfe9Sjg devtsp[ndevts] = dmdp->ddm_dev; 74883c4dfe9Sjg ndevts++; 74983c4dfe9Sjg } 750b9ccdc5aScth ndi_devi_exit(dip, circ); 75183c4dfe9Sjg 75283c4dfe9Sjg *devtcntp = ndevts; 75383c4dfe9Sjg } 75483c4dfe9Sjg 75583c4dfe9Sjg /* 75683c4dfe9Sjg * Search for cached entries matching a devid 75783c4dfe9Sjg * Return two lists: 75883c4dfe9Sjg * a list of dev_info nodes, for those devices in the attached state 75983c4dfe9Sjg * a list of pathnames whose instances registered the given devid 76083c4dfe9Sjg * If the lists passed in are not sufficient to return the matching 76183c4dfe9Sjg * references, return the size of lists required. 76283c4dfe9Sjg * The dev_info nodes are returned with a hold that the caller must release. 76383c4dfe9Sjg */ 76483c4dfe9Sjg static int 76583c4dfe9Sjg e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax, 76683c4dfe9Sjg int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths) 76783c4dfe9Sjg { 76883c4dfe9Sjg nvp_devid_t *np; 76983c4dfe9Sjg int ndevis, npaths; 77083c4dfe9Sjg dev_info_t *dip, *pdip; 77183c4dfe9Sjg int circ; 77283c4dfe9Sjg int maxdevis = 0; 77383c4dfe9Sjg int maxpaths = 0; 77483c4dfe9Sjg list_t *listp; 77583c4dfe9Sjg 77683c4dfe9Sjg ndevis = 0; 77783c4dfe9Sjg npaths = 0; 77883c4dfe9Sjg listp = nvf_list(dcfd_handle); 77983c4dfe9Sjg for (np = list_head(listp); np; np = list_next(listp, np)) { 78083c4dfe9Sjg if (np->nvp_devid == NULL) 78183c4dfe9Sjg continue; 78283c4dfe9Sjg if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 78383c4dfe9Sjg DEVIDERR((CE_CONT, 78483c4dfe9Sjg "find: invalid devid %s\n", 78583c4dfe9Sjg np->nvp_devpath)); 78683c4dfe9Sjg continue; 78783c4dfe9Sjg } 78883c4dfe9Sjg if (ddi_devid_compare(devid, np->nvp_devid) == 0) { 78983c4dfe9Sjg DEVID_DEBUG2((CE_CONT, 79083c4dfe9Sjg "find: devid match: %s 0x%x\n", 79183c4dfe9Sjg np->nvp_devpath, np->nvp_flags)); 79283c4dfe9Sjg DEVID_LOG_MATCH(("find", devid, np->nvp_devpath)); 79383c4dfe9Sjg DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath)); 79483c4dfe9Sjg 79583c4dfe9Sjg /* 79683c4dfe9Sjg * Check if we have a cached devinfo reference for this 79783c4dfe9Sjg * devid. Place a hold on it to prevent detach 79883c4dfe9Sjg * Otherwise, use the path instead. 79983c4dfe9Sjg * Note: returns with a hold on each dev_info 80083c4dfe9Sjg * node in the list. 80183c4dfe9Sjg */ 80283c4dfe9Sjg dip = NULL; 80383c4dfe9Sjg if (np->nvp_flags & NVP_DEVID_DIP) { 80483c4dfe9Sjg pdip = ddi_get_parent(np->nvp_dip); 80583c4dfe9Sjg if (ndi_devi_tryenter(pdip, &circ)) { 80683c4dfe9Sjg dip = np->nvp_dip; 80783c4dfe9Sjg ndi_hold_devi(dip); 80883c4dfe9Sjg ndi_devi_exit(pdip, circ); 80983c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(dip)); 81083c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(dip)); 81183c4dfe9Sjg } else { 81283c4dfe9Sjg DEVID_LOG_DETACH((CE_CONT, 81383c4dfe9Sjg "may be detaching: %s\n", 81483c4dfe9Sjg np->nvp_devpath)); 81583c4dfe9Sjg } 81683c4dfe9Sjg } 81783c4dfe9Sjg 81883c4dfe9Sjg if (dip) { 81983c4dfe9Sjg if (ndevis < retmax) { 82083c4dfe9Sjg retdevis[ndevis++] = dip; 82183c4dfe9Sjg } else { 82283c4dfe9Sjg ndi_rele_devi(dip); 82383c4dfe9Sjg } 82483c4dfe9Sjg maxdevis++; 82583c4dfe9Sjg } else { 82683c4dfe9Sjg if (npaths < retmax) 82783c4dfe9Sjg retpaths[npaths++] = np->nvp_devpath; 82883c4dfe9Sjg maxpaths++; 82983c4dfe9Sjg } 83083c4dfe9Sjg } 83183c4dfe9Sjg } 83283c4dfe9Sjg 83383c4dfe9Sjg *retndevis = ndevis; 83483c4dfe9Sjg *retnpaths = npaths; 83583c4dfe9Sjg return (maxdevis > maxpaths ? maxdevis : maxpaths); 83683c4dfe9Sjg } 83783c4dfe9Sjg 83883c4dfe9Sjg 83983c4dfe9Sjg /* 84083c4dfe9Sjg * Search the devid cache, returning dev_t list for all 84183c4dfe9Sjg * device paths mapping to the device identified by the 84283c4dfe9Sjg * given devid. 84383c4dfe9Sjg * 84483c4dfe9Sjg * Primary interface used by ddi_lyr_devid_to_devlist() 84583c4dfe9Sjg */ 84683c4dfe9Sjg int 84783c4dfe9Sjg e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name, 84883c4dfe9Sjg int *retndevts, dev_t **retdevts) 84983c4dfe9Sjg { 85083c4dfe9Sjg char *path, **paths; 85183c4dfe9Sjg int i, j, n; 85283c4dfe9Sjg dev_t *devts, *udevts; 85383c4dfe9Sjg dev_t tdevt; 85483c4dfe9Sjg int ndevts, undevts, ndevts_alloced; 85583c4dfe9Sjg dev_info_t *devi, **devis; 85683c4dfe9Sjg int ndevis, npaths, nalloced; 85783c4dfe9Sjg ddi_devid_t match_devid; 85883c4dfe9Sjg 85983c4dfe9Sjg DEVID_LOG_FIND(("find", devid, NULL)); 86083c4dfe9Sjg 86183c4dfe9Sjg ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS); 86283c4dfe9Sjg if (ddi_devid_valid(devid) != DDI_SUCCESS) { 86383c4dfe9Sjg DEVID_LOG_ERR(("invalid devid", devid, NULL)); 86483c4dfe9Sjg return (DDI_FAILURE); 86583c4dfe9Sjg } 86683c4dfe9Sjg 86783c4dfe9Sjg nalloced = 128; 86883c4dfe9Sjg 86983c4dfe9Sjg for (;;) { 87083c4dfe9Sjg paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP); 87183c4dfe9Sjg devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP); 87283c4dfe9Sjg 87383c4dfe9Sjg rw_enter(nvf_lock(dcfd_handle), RW_READER); 87483c4dfe9Sjg n = e_devid_cache_devi_path_lists(devid, nalloced, 875a204de77Scth &ndevis, devis, &npaths, paths); 87683c4dfe9Sjg if (n <= nalloced) 87783c4dfe9Sjg break; 87883c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 87983c4dfe9Sjg for (i = 0; i < ndevis; i++) 88083c4dfe9Sjg ndi_rele_devi(devis[i]); 88183c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 88283c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 88383c4dfe9Sjg nalloced = n + 128; 88483c4dfe9Sjg } 88583c4dfe9Sjg 88683c4dfe9Sjg for (i = 0; i < npaths; i++) { 88783c4dfe9Sjg path = i_ddi_strdup(paths[i], KM_SLEEP); 88883c4dfe9Sjg paths[i] = path; 88983c4dfe9Sjg } 89083c4dfe9Sjg rw_exit(nvf_lock(dcfd_handle)); 89183c4dfe9Sjg 89283c4dfe9Sjg if (ndevis == 0 && npaths == 0) { 89383c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 89483c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 89583c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 89683c4dfe9Sjg return (DDI_FAILURE); 89783c4dfe9Sjg } 89883c4dfe9Sjg 89983c4dfe9Sjg ndevts_alloced = 128; 90083c4dfe9Sjg restart: 90183c4dfe9Sjg ndevts = 0; 90283c4dfe9Sjg devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP); 90383c4dfe9Sjg for (i = 0; i < ndevis; i++) { 90483c4dfe9Sjg ASSERT(!DEVI_IS_ATTACHING(devis[i])); 90583c4dfe9Sjg ASSERT(!DEVI_IS_DETACHING(devis[i])); 90683c4dfe9Sjg e_devid_minor_to_devlist(devis[i], minor_name, 907a204de77Scth ndevts_alloced, &ndevts, devts); 90883c4dfe9Sjg if (ndevts > ndevts_alloced) { 90983c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 91083c4dfe9Sjg ndevts_alloced += 128; 91183c4dfe9Sjg goto restart; 91283c4dfe9Sjg } 91383c4dfe9Sjg } 91483c4dfe9Sjg for (i = 0; i < npaths; i++) { 91583c4dfe9Sjg DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i])); 91683c4dfe9Sjg devi = e_ddi_hold_devi_by_path(paths[i], 0); 91783c4dfe9Sjg if (devi == NULL) { 91883c4dfe9Sjg DEVID_LOG_STALE(("stale device reference", 91983c4dfe9Sjg devid, paths[i])); 92083c4dfe9Sjg continue; 92183c4dfe9Sjg } 92283c4dfe9Sjg /* 92383c4dfe9Sjg * Verify the newly attached device registered a matching devid 92483c4dfe9Sjg */ 92583c4dfe9Sjg if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi, 92683c4dfe9Sjg &match_devid) != DDI_SUCCESS) { 92783c4dfe9Sjg DEVIDERR((CE_CONT, 92883c4dfe9Sjg "%s: no devid registered on attach\n", 92983c4dfe9Sjg paths[i])); 93083c4dfe9Sjg ddi_release_devi(devi); 93183c4dfe9Sjg continue; 93283c4dfe9Sjg } 93383c4dfe9Sjg 93483c4dfe9Sjg if (ddi_devid_compare(devid, match_devid) != 0) { 93583c4dfe9Sjg DEVID_LOG_STALE(("new devid registered", 93683c4dfe9Sjg devid, paths[i])); 93783c4dfe9Sjg ddi_release_devi(devi); 93883c4dfe9Sjg ddi_devid_free(match_devid); 93983c4dfe9Sjg continue; 94083c4dfe9Sjg } 94183c4dfe9Sjg ddi_devid_free(match_devid); 94283c4dfe9Sjg 94383c4dfe9Sjg e_devid_minor_to_devlist(devi, minor_name, 944a204de77Scth ndevts_alloced, &ndevts, devts); 94583c4dfe9Sjg ddi_release_devi(devi); 94683c4dfe9Sjg if (ndevts > ndevts_alloced) { 94783c4dfe9Sjg kmem_free(devts, 94883c4dfe9Sjg ndevts_alloced * sizeof (dev_t)); 94983c4dfe9Sjg ndevts_alloced += 128; 95083c4dfe9Sjg goto restart; 95183c4dfe9Sjg } 95283c4dfe9Sjg } 95383c4dfe9Sjg 95483c4dfe9Sjg /* drop hold from e_devid_cache_devi_path_lists */ 95583c4dfe9Sjg for (i = 0; i < ndevis; i++) { 95683c4dfe9Sjg ndi_rele_devi(devis[i]); 95783c4dfe9Sjg } 95883c4dfe9Sjg for (i = 0; i < npaths; i++) { 95983c4dfe9Sjg kmem_free(paths[i], strlen(paths[i]) + 1); 96083c4dfe9Sjg } 96183c4dfe9Sjg kmem_free(paths, nalloced * sizeof (char *)); 96283c4dfe9Sjg kmem_free(devis, nalloced * sizeof (dev_info_t *)); 96383c4dfe9Sjg 96483c4dfe9Sjg if (ndevts == 0) { 96583c4dfe9Sjg DEVID_LOG_ERR(("no devid found", devid, NULL)); 96683c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 96783c4dfe9Sjg return (DDI_FAILURE); 96883c4dfe9Sjg } 96983c4dfe9Sjg 97083c4dfe9Sjg /* 97183c4dfe9Sjg * Build the final list of sorted dev_t's with duplicates collapsed so 97283c4dfe9Sjg * returned results are consistent. This prevents implementation 97383c4dfe9Sjg * artifacts from causing unnecessary changes in SVM namespace. 97483c4dfe9Sjg */ 97583c4dfe9Sjg /* bubble sort */ 97683c4dfe9Sjg for (i = 0; i < (ndevts - 1); i++) { 97783c4dfe9Sjg for (j = 0; j < ((ndevts - 1) - i); j++) { 97883c4dfe9Sjg if (devts[j + 1] < devts[j]) { 97983c4dfe9Sjg tdevt = devts[j]; 98083c4dfe9Sjg devts[j] = devts[j + 1]; 98183c4dfe9Sjg devts[j + 1] = tdevt; 98283c4dfe9Sjg } 98383c4dfe9Sjg } 98483c4dfe9Sjg } 98583c4dfe9Sjg 98683c4dfe9Sjg /* determine number of unique values */ 98783c4dfe9Sjg for (undevts = ndevts, i = 1; i < ndevts; i++) { 98883c4dfe9Sjg if (devts[i - 1] == devts[i]) 98983c4dfe9Sjg undevts--; 99083c4dfe9Sjg } 99183c4dfe9Sjg 99283c4dfe9Sjg /* allocate unique */ 99383c4dfe9Sjg udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP); 99483c4dfe9Sjg 99583c4dfe9Sjg /* copy unique */ 99683c4dfe9Sjg udevts[0] = devts[0]; 99783c4dfe9Sjg for (i = 1, j = 1; i < ndevts; i++) { 99883c4dfe9Sjg if (devts[i - 1] != devts[i]) 99983c4dfe9Sjg udevts[j++] = devts[i]; 100083c4dfe9Sjg } 100183c4dfe9Sjg ASSERT(j == undevts); 100283c4dfe9Sjg 100383c4dfe9Sjg kmem_free(devts, ndevts_alloced * sizeof (dev_t)); 100483c4dfe9Sjg 100583c4dfe9Sjg *retndevts = undevts; 100683c4dfe9Sjg *retdevts = udevts; 100783c4dfe9Sjg 100883c4dfe9Sjg return (DDI_SUCCESS); 100983c4dfe9Sjg } 101083c4dfe9Sjg 101183c4dfe9Sjg void 101283c4dfe9Sjg e_devid_cache_free_devt_list(int ndevts, dev_t *devt_list) 101383c4dfe9Sjg { 101483c4dfe9Sjg kmem_free(devt_list, ndevts * sizeof (dev_t *)); 101583c4dfe9Sjg } 101683c4dfe9Sjg 1017*392e836bSGavin Maltby /* 1018*392e836bSGavin Maltby * If given a full path and NULL ua, search for a cache entry 1019*392e836bSGavin Maltby * whose path matches the full path. On a cache hit duplicate the 1020*392e836bSGavin Maltby * devid of the matched entry into the given devid (caller 1021*392e836bSGavin Maltby * must free); nodenamebuf is not touched for this usage. 1022*392e836bSGavin Maltby * 1023*392e836bSGavin Maltby * Given a path and a non-NULL unit address, search the cache for any entry 1024*392e836bSGavin Maltby * matching "<path>/%@<unit-address>" where '%' is a wildcard meaning 1025*392e836bSGavin Maltby * any node name. The path should not end a '/'. On a cache hit 1026*392e836bSGavin Maltby * duplicate the devid as before (caller must free) and copy into 1027*392e836bSGavin Maltby * the caller-provided nodenamebuf (if not NULL) the nodename of the 1028*392e836bSGavin Maltby * matched entry. 1029*392e836bSGavin Maltby * 1030*392e836bSGavin Maltby * We must not make use of nvp_dip since that may be NULL for cached 1031*392e836bSGavin Maltby * entries that are not present in the current tree. 1032*392e836bSGavin Maltby */ 1033*392e836bSGavin Maltby int 1034*392e836bSGavin Maltby e_devid_cache_path_to_devid(char *path, char *ua, 1035*392e836bSGavin Maltby char *nodenamebuf, ddi_devid_t *devidp) 1036*392e836bSGavin Maltby { 1037*392e836bSGavin Maltby size_t pathlen, ualen; 1038*392e836bSGavin Maltby int rv = DDI_FAILURE; 1039*392e836bSGavin Maltby nvp_devid_t *np; 1040*392e836bSGavin Maltby list_t *listp; 1041*392e836bSGavin Maltby char *cand; 1042*392e836bSGavin Maltby 1043*392e836bSGavin Maltby if (path == NULL || *path == '\0' || (ua && *ua == '\0') || 1044*392e836bSGavin Maltby devidp == NULL) 1045*392e836bSGavin Maltby return (DDI_FAILURE); 1046*392e836bSGavin Maltby 1047*392e836bSGavin Maltby *devidp = NULL; 1048*392e836bSGavin Maltby 1049*392e836bSGavin Maltby if (ua) { 1050*392e836bSGavin Maltby pathlen = strlen(path); 1051*392e836bSGavin Maltby ualen = strlen(ua); 1052*392e836bSGavin Maltby } 1053*392e836bSGavin Maltby 1054*392e836bSGavin Maltby rw_enter(nvf_lock(dcfd_handle), RW_READER); 1055*392e836bSGavin Maltby 1056*392e836bSGavin Maltby listp = nvf_list(dcfd_handle); 1057*392e836bSGavin Maltby for (np = list_head(listp); np; np = list_next(listp, np)) { 1058*392e836bSGavin Maltby size_t nodelen, candlen, n; 1059*392e836bSGavin Maltby ddi_devid_t devid_dup; 1060*392e836bSGavin Maltby char *uasep, *node; 1061*392e836bSGavin Maltby 1062*392e836bSGavin Maltby if (np->nvp_devid == NULL) 1063*392e836bSGavin Maltby continue; 1064*392e836bSGavin Maltby 1065*392e836bSGavin Maltby if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) { 1066*392e836bSGavin Maltby DEVIDERR((CE_CONT, 1067*392e836bSGavin Maltby "pathsearch: invalid devid %s\n", 1068*392e836bSGavin Maltby np->nvp_devpath)); 1069*392e836bSGavin Maltby continue; 1070*392e836bSGavin Maltby } 1071*392e836bSGavin Maltby 1072*392e836bSGavin Maltby cand = np->nvp_devpath; /* candidate path */ 1073*392e836bSGavin Maltby 1074*392e836bSGavin Maltby /* If a full pathname was provided the compare is easy */ 1075*392e836bSGavin Maltby if (ua == NULL) { 1076*392e836bSGavin Maltby if (strcmp(cand, path) == 0) 1077*392e836bSGavin Maltby goto match; 1078*392e836bSGavin Maltby else 1079*392e836bSGavin Maltby continue; 1080*392e836bSGavin Maltby } 1081*392e836bSGavin Maltby 1082*392e836bSGavin Maltby /* 1083*392e836bSGavin Maltby * The compare for initial path plus ua and unknown nodename 1084*392e836bSGavin Maltby * is trickier. 1085*392e836bSGavin Maltby * 1086*392e836bSGavin Maltby * Does the initial path component match 'path'? 1087*392e836bSGavin Maltby */ 1088*392e836bSGavin Maltby if (strncmp(path, cand, pathlen) != 0) 1089*392e836bSGavin Maltby continue; 1090*392e836bSGavin Maltby 1091*392e836bSGavin Maltby candlen = strlen(cand); 1092*392e836bSGavin Maltby 1093*392e836bSGavin Maltby /* 1094*392e836bSGavin Maltby * The next character must be a '/' and there must be no 1095*392e836bSGavin Maltby * further '/' thereafter. Begin by checking that the 1096*392e836bSGavin Maltby * candidate is long enough to include at mininum a 1097*392e836bSGavin Maltby * "/<nodename>@<ua>" after the initial portion already 1098*392e836bSGavin Maltby * matched assuming a nodename length of 1. 1099*392e836bSGavin Maltby */ 1100*392e836bSGavin Maltby if (candlen < pathlen + 1 + 1 + 1 + ualen || 1101*392e836bSGavin Maltby cand[pathlen] != '/' || 1102*392e836bSGavin Maltby strchr(cand + pathlen + 1, '/') != NULL) 1103*392e836bSGavin Maltby continue; 1104*392e836bSGavin Maltby 1105*392e836bSGavin Maltby node = cand + pathlen + 1; /* <node>@<ua> string */ 1106*392e836bSGavin Maltby 1107*392e836bSGavin Maltby /* 1108*392e836bSGavin Maltby * Find the '@' before the unit address. Check for 1109*392e836bSGavin Maltby * unit address match. 1110*392e836bSGavin Maltby */ 1111*392e836bSGavin Maltby if ((uasep = strchr(node, '@')) == NULL) 1112*392e836bSGavin Maltby continue; 1113*392e836bSGavin Maltby 1114*392e836bSGavin Maltby /* 1115*392e836bSGavin Maltby * Check we still have enough length and that ua matches 1116*392e836bSGavin Maltby */ 1117*392e836bSGavin Maltby nodelen = (uintptr_t)uasep - (uintptr_t)node; 1118*392e836bSGavin Maltby if (candlen < pathlen + 1 + nodelen + 1 + ualen || 1119*392e836bSGavin Maltby strncmp(ua, uasep + 1, ualen) != 0) 1120*392e836bSGavin Maltby continue; 1121*392e836bSGavin Maltby match: 1122*392e836bSGavin Maltby n = ddi_devid_sizeof(np->nvp_devid); 1123*392e836bSGavin Maltby devid_dup = kmem_alloc(n, KM_SLEEP); /* caller must free */ 1124*392e836bSGavin Maltby (void) bcopy(np->nvp_devid, devid_dup, n); 1125*392e836bSGavin Maltby *devidp = devid_dup; 1126*392e836bSGavin Maltby 1127*392e836bSGavin Maltby if (ua && nodenamebuf) { 1128*392e836bSGavin Maltby (void) strncpy(nodenamebuf, node, nodelen); 1129*392e836bSGavin Maltby nodenamebuf[nodelen] = '\0'; 1130*392e836bSGavin Maltby } 1131*392e836bSGavin Maltby 1132*392e836bSGavin Maltby rv = DDI_SUCCESS; 1133*392e836bSGavin Maltby break; 1134*392e836bSGavin Maltby } 1135*392e836bSGavin Maltby 1136*392e836bSGavin Maltby rw_exit(nvf_lock(dcfd_handle)); 1137*392e836bSGavin Maltby 1138*392e836bSGavin Maltby return (rv); 1139*392e836bSGavin Maltby } 1140*392e836bSGavin Maltby 114183c4dfe9Sjg #ifdef DEBUG 114283c4dfe9Sjg static void 114383c4dfe9Sjg devid_log(char *fmt, ddi_devid_t devid, char *path) 114483c4dfe9Sjg { 114583c4dfe9Sjg char *devidstr = ddi_devid_str_encode(devid, NULL); 114683c4dfe9Sjg if (path) { 114783c4dfe9Sjg cmn_err(CE_CONT, "%s: %s %s\n", fmt, path, devidstr); 114883c4dfe9Sjg } else { 114983c4dfe9Sjg cmn_err(CE_CONT, "%s: %s\n", fmt, devidstr); 115083c4dfe9Sjg } 115183c4dfe9Sjg ddi_devid_str_free(devidstr); 115283c4dfe9Sjg } 115383c4dfe9Sjg #endif /* DEBUG */ 1154