1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5bef6b7d2Swebaker * Common Development and Distribution License (the "License"). 6bef6b7d2Swebaker * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 22f13665b7Sbo zhou - Sun Microsystems - Beijing China * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23be6fd75aSMatthew Ahrens * Copyright (c) 2013 by Delphix. All rights reserved. 24a5b57771SDan McDonald * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 25*39cddb10SJoshua M. Clulow * Copyright (c) 2013 Joyent, Inc. All rights reserved. 26fa9e4066Sahrens */ 27fa9e4066Sahrens 28fa9e4066Sahrens #include <sys/zfs_context.h> 29dcba9f3fSGeorge Wilson #include <sys/spa_impl.h> 30e7cbe64fSgw #include <sys/refcount.h> 31fa9e4066Sahrens #include <sys/vdev_disk.h> 32fa9e4066Sahrens #include <sys/vdev_impl.h> 33fa9e4066Sahrens #include <sys/fs/zfs.h> 34fa9e4066Sahrens #include <sys/zio.h> 35afefbcddSeschrock #include <sys/sunldi.h> 364263d13fSGeorge Wilson #include <sys/efi_partition.h> 3751ece835Seschrock #include <sys/fm/fs/zfs.h> 38fa9e4066Sahrens 39fa9e4066Sahrens /* 40fa9e4066Sahrens * Virtual device vector for disks. 41fa9e4066Sahrens */ 42fa9e4066Sahrens 43fa9e4066Sahrens extern ldi_ident_t zfs_li; 44fa9e4066Sahrens 45*39cddb10SJoshua M. Clulow static void vdev_disk_close(vdev_t *); 46*39cddb10SJoshua M. Clulow 47*39cddb10SJoshua M. Clulow typedef struct vdev_disk_ldi_cb { 48*39cddb10SJoshua M. Clulow list_node_t lcb_next; 49*39cddb10SJoshua M. Clulow ldi_callback_id_t lcb_id; 50*39cddb10SJoshua M. Clulow } vdev_disk_ldi_cb_t; 51*39cddb10SJoshua M. Clulow 52*39cddb10SJoshua M. Clulow static void 53*39cddb10SJoshua M. Clulow vdev_disk_alloc(vdev_t *vd) 54*39cddb10SJoshua M. Clulow { 55*39cddb10SJoshua M. Clulow vdev_disk_t *dvd; 56*39cddb10SJoshua M. Clulow 57*39cddb10SJoshua M. Clulow dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); 58*39cddb10SJoshua M. Clulow /* 59*39cddb10SJoshua M. Clulow * Create the LDI event callback list. 60*39cddb10SJoshua M. Clulow */ 61*39cddb10SJoshua M. Clulow list_create(&dvd->vd_ldi_cbs, sizeof (vdev_disk_ldi_cb_t), 62*39cddb10SJoshua M. Clulow offsetof(vdev_disk_ldi_cb_t, lcb_next)); 63*39cddb10SJoshua M. Clulow } 64*39cddb10SJoshua M. Clulow 65*39cddb10SJoshua M. Clulow static void 66*39cddb10SJoshua M. Clulow vdev_disk_free(vdev_t *vd) 67*39cddb10SJoshua M. Clulow { 68*39cddb10SJoshua M. Clulow vdev_disk_t *dvd = vd->vdev_tsd; 69*39cddb10SJoshua M. Clulow vdev_disk_ldi_cb_t *lcb; 70*39cddb10SJoshua M. Clulow 71*39cddb10SJoshua M. Clulow if (dvd == NULL) 72*39cddb10SJoshua M. Clulow return; 73*39cddb10SJoshua M. Clulow 74*39cddb10SJoshua M. Clulow /* 75*39cddb10SJoshua M. Clulow * We have already closed the LDI handle. Clean up the LDI event 76*39cddb10SJoshua M. Clulow * callbacks and free vd->vdev_tsd. 77*39cddb10SJoshua M. Clulow */ 78*39cddb10SJoshua M. Clulow while ((lcb = list_head(&dvd->vd_ldi_cbs)) != NULL) { 79*39cddb10SJoshua M. Clulow list_remove(&dvd->vd_ldi_cbs, lcb); 80*39cddb10SJoshua M. Clulow (void) ldi_ev_remove_callbacks(lcb->lcb_id); 81*39cddb10SJoshua M. Clulow kmem_free(lcb, sizeof (vdev_disk_ldi_cb_t)); 82*39cddb10SJoshua M. Clulow } 83*39cddb10SJoshua M. Clulow list_destroy(&dvd->vd_ldi_cbs); 84*39cddb10SJoshua M. Clulow kmem_free(dvd, sizeof (vdev_disk_t)); 85*39cddb10SJoshua M. Clulow vd->vdev_tsd = NULL; 86*39cddb10SJoshua M. Clulow } 87*39cddb10SJoshua M. Clulow 88*39cddb10SJoshua M. Clulow /* ARGSUSED */ 89*39cddb10SJoshua M. Clulow static int 90*39cddb10SJoshua M. Clulow vdev_disk_off_notify(ldi_handle_t lh, ldi_ev_cookie_t ecookie, void *arg, 91*39cddb10SJoshua M. Clulow void *ev_data) 92*39cddb10SJoshua M. Clulow { 93*39cddb10SJoshua M. Clulow vdev_t *vd = (vdev_t *)arg; 94*39cddb10SJoshua M. Clulow vdev_disk_t *dvd = vd->vdev_tsd; 95*39cddb10SJoshua M. Clulow 96*39cddb10SJoshua M. Clulow /* 97*39cddb10SJoshua M. Clulow * Ignore events other than offline. 98*39cddb10SJoshua M. Clulow */ 99*39cddb10SJoshua M. Clulow if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_OFFLINE) != 0) 100*39cddb10SJoshua M. Clulow return (LDI_EV_SUCCESS); 101*39cddb10SJoshua M. Clulow 102*39cddb10SJoshua M. Clulow /* 103*39cddb10SJoshua M. Clulow * All LDI handles must be closed for the state change to succeed, so 104*39cddb10SJoshua M. Clulow * call on vdev_disk_close() to do this. 105*39cddb10SJoshua M. Clulow * 106*39cddb10SJoshua M. Clulow * We inform vdev_disk_close that it is being called from offline 107*39cddb10SJoshua M. Clulow * notify context so it will defer cleanup of LDI event callbacks and 108*39cddb10SJoshua M. Clulow * freeing of vd->vdev_tsd to the offline finalize or a reopen. 109*39cddb10SJoshua M. Clulow */ 110*39cddb10SJoshua M. Clulow dvd->vd_ldi_offline = B_TRUE; 111*39cddb10SJoshua M. Clulow vdev_disk_close(vd); 112*39cddb10SJoshua M. Clulow 113*39cddb10SJoshua M. Clulow /* 114*39cddb10SJoshua M. Clulow * Now that the device is closed, request that the spa_async_thread 115*39cddb10SJoshua M. Clulow * mark the device as REMOVED and notify FMA of the removal. 116*39cddb10SJoshua M. Clulow */ 117*39cddb10SJoshua M. Clulow zfs_post_remove(vd->vdev_spa, vd); 118*39cddb10SJoshua M. Clulow vd->vdev_remove_wanted = B_TRUE; 119*39cddb10SJoshua M. Clulow spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE); 120*39cddb10SJoshua M. Clulow 121*39cddb10SJoshua M. Clulow return (LDI_EV_SUCCESS); 122*39cddb10SJoshua M. Clulow } 123*39cddb10SJoshua M. Clulow 124*39cddb10SJoshua M. Clulow /* ARGSUSED */ 125*39cddb10SJoshua M. Clulow static void 126*39cddb10SJoshua M. Clulow vdev_disk_off_finalize(ldi_handle_t lh, ldi_ev_cookie_t ecookie, 127*39cddb10SJoshua M. Clulow int ldi_result, void *arg, void *ev_data) 128*39cddb10SJoshua M. Clulow { 129*39cddb10SJoshua M. Clulow vdev_t *vd = (vdev_t *)arg; 130*39cddb10SJoshua M. Clulow 131*39cddb10SJoshua M. Clulow /* 132*39cddb10SJoshua M. Clulow * Ignore events other than offline. 133*39cddb10SJoshua M. Clulow */ 134*39cddb10SJoshua M. Clulow if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_OFFLINE) != 0) 135*39cddb10SJoshua M. Clulow return; 136*39cddb10SJoshua M. Clulow 137*39cddb10SJoshua M. Clulow /* 138*39cddb10SJoshua M. Clulow * We have already closed the LDI handle in notify. 139*39cddb10SJoshua M. Clulow * Clean up the LDI event callbacks and free vd->vdev_tsd. 140*39cddb10SJoshua M. Clulow */ 141*39cddb10SJoshua M. Clulow vdev_disk_free(vd); 142*39cddb10SJoshua M. Clulow 143*39cddb10SJoshua M. Clulow /* 144*39cddb10SJoshua M. Clulow * Request that the vdev be reopened if the offline state change was 145*39cddb10SJoshua M. Clulow * unsuccessful. 146*39cddb10SJoshua M. Clulow */ 147*39cddb10SJoshua M. Clulow if (ldi_result != LDI_EV_SUCCESS) { 148*39cddb10SJoshua M. Clulow vd->vdev_probe_wanted = B_TRUE; 149*39cddb10SJoshua M. Clulow spa_async_request(vd->vdev_spa, SPA_ASYNC_PROBE); 150*39cddb10SJoshua M. Clulow } 151*39cddb10SJoshua M. Clulow } 152*39cddb10SJoshua M. Clulow 153*39cddb10SJoshua M. Clulow static ldi_ev_callback_t vdev_disk_off_callb = { 154*39cddb10SJoshua M. Clulow .cb_vers = LDI_EV_CB_VERS, 155*39cddb10SJoshua M. Clulow .cb_notify = vdev_disk_off_notify, 156*39cddb10SJoshua M. Clulow .cb_finalize = vdev_disk_off_finalize 157*39cddb10SJoshua M. Clulow }; 158*39cddb10SJoshua M. Clulow 159*39cddb10SJoshua M. Clulow /* ARGSUSED */ 160*39cddb10SJoshua M. Clulow static void 161*39cddb10SJoshua M. Clulow vdev_disk_dgrd_finalize(ldi_handle_t lh, ldi_ev_cookie_t ecookie, 162*39cddb10SJoshua M. Clulow int ldi_result, void *arg, void *ev_data) 163*39cddb10SJoshua M. Clulow { 164*39cddb10SJoshua M. Clulow vdev_t *vd = (vdev_t *)arg; 165*39cddb10SJoshua M. Clulow 166*39cddb10SJoshua M. Clulow /* 167*39cddb10SJoshua M. Clulow * Ignore events other than degrade. 168*39cddb10SJoshua M. Clulow */ 169*39cddb10SJoshua M. Clulow if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_DEGRADE) != 0) 170*39cddb10SJoshua M. Clulow return; 171*39cddb10SJoshua M. Clulow 172*39cddb10SJoshua M. Clulow /* 173*39cddb10SJoshua M. Clulow * Degrade events always succeed. Mark the vdev as degraded. 174*39cddb10SJoshua M. Clulow * This status is purely informative for the user. 175*39cddb10SJoshua M. Clulow */ 176*39cddb10SJoshua M. Clulow (void) vdev_degrade(vd->vdev_spa, vd->vdev_guid, 0); 177*39cddb10SJoshua M. Clulow } 178*39cddb10SJoshua M. Clulow 179*39cddb10SJoshua M. Clulow static ldi_ev_callback_t vdev_disk_dgrd_callb = { 180*39cddb10SJoshua M. Clulow .cb_vers = LDI_EV_CB_VERS, 181*39cddb10SJoshua M. Clulow .cb_notify = NULL, 182*39cddb10SJoshua M. Clulow .cb_finalize = vdev_disk_dgrd_finalize 183*39cddb10SJoshua M. Clulow }; 184*39cddb10SJoshua M. Clulow 185dcba9f3fSGeorge Wilson static void 186dcba9f3fSGeorge Wilson vdev_disk_hold(vdev_t *vd) 187dcba9f3fSGeorge Wilson { 188dcba9f3fSGeorge Wilson ddi_devid_t devid; 189dcba9f3fSGeorge Wilson char *minor; 190dcba9f3fSGeorge Wilson 191dcba9f3fSGeorge Wilson ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 192dcba9f3fSGeorge Wilson 193dcba9f3fSGeorge Wilson /* 194dcba9f3fSGeorge Wilson * We must have a pathname, and it must be absolute. 195dcba9f3fSGeorge Wilson */ 196dcba9f3fSGeorge Wilson if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') 197dcba9f3fSGeorge Wilson return; 198dcba9f3fSGeorge Wilson 199dcba9f3fSGeorge Wilson /* 200dcba9f3fSGeorge Wilson * Only prefetch path and devid info if the device has 201dcba9f3fSGeorge Wilson * never been opened. 202dcba9f3fSGeorge Wilson */ 203dcba9f3fSGeorge Wilson if (vd->vdev_tsd != NULL) 204dcba9f3fSGeorge Wilson return; 205dcba9f3fSGeorge Wilson 206dcba9f3fSGeorge Wilson if (vd->vdev_wholedisk == -1ULL) { 207dcba9f3fSGeorge Wilson size_t len = strlen(vd->vdev_path) + 3; 208dcba9f3fSGeorge Wilson char *buf = kmem_alloc(len, KM_SLEEP); 209dcba9f3fSGeorge Wilson 210dcba9f3fSGeorge Wilson (void) snprintf(buf, len, "%ss0", vd->vdev_path); 211dcba9f3fSGeorge Wilson 212dcba9f3fSGeorge Wilson (void) ldi_vp_from_name(buf, &vd->vdev_name_vp); 213dcba9f3fSGeorge Wilson kmem_free(buf, len); 214dcba9f3fSGeorge Wilson } 215dcba9f3fSGeorge Wilson 216dcba9f3fSGeorge Wilson if (vd->vdev_name_vp == NULL) 217dcba9f3fSGeorge Wilson (void) ldi_vp_from_name(vd->vdev_path, &vd->vdev_name_vp); 218dcba9f3fSGeorge Wilson 219dcba9f3fSGeorge Wilson if (vd->vdev_devid != NULL && 220dcba9f3fSGeorge Wilson ddi_devid_str_decode(vd->vdev_devid, &devid, &minor) == 0) { 221dcba9f3fSGeorge Wilson (void) ldi_vp_from_devid(devid, minor, &vd->vdev_devid_vp); 222dcba9f3fSGeorge Wilson ddi_devid_str_free(minor); 223dcba9f3fSGeorge Wilson ddi_devid_free(devid); 224dcba9f3fSGeorge Wilson } 225dcba9f3fSGeorge Wilson } 226dcba9f3fSGeorge Wilson 227dcba9f3fSGeorge Wilson static void 228dcba9f3fSGeorge Wilson vdev_disk_rele(vdev_t *vd) 229dcba9f3fSGeorge Wilson { 230dcba9f3fSGeorge Wilson ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 231dcba9f3fSGeorge Wilson 232dcba9f3fSGeorge Wilson if (vd->vdev_name_vp) { 233dcba9f3fSGeorge Wilson VN_RELE_ASYNC(vd->vdev_name_vp, 234dcba9f3fSGeorge Wilson dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 235dcba9f3fSGeorge Wilson vd->vdev_name_vp = NULL; 236dcba9f3fSGeorge Wilson } 237dcba9f3fSGeorge Wilson if (vd->vdev_devid_vp) { 238dcba9f3fSGeorge Wilson VN_RELE_ASYNC(vd->vdev_devid_vp, 239dcba9f3fSGeorge Wilson dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 240dcba9f3fSGeorge Wilson vd->vdev_devid_vp = NULL; 241dcba9f3fSGeorge Wilson } 242dcba9f3fSGeorge Wilson } 243dcba9f3fSGeorge Wilson 2444263d13fSGeorge Wilson static uint64_t 2454263d13fSGeorge Wilson vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz) 2464263d13fSGeorge Wilson { 2474263d13fSGeorge Wilson ASSERT(vd->vdev_wholedisk); 2484263d13fSGeorge Wilson 2494263d13fSGeorge Wilson vdev_disk_t *dvd = vd->vdev_tsd; 2504263d13fSGeorge Wilson dk_efi_t dk_ioc; 2514263d13fSGeorge Wilson efi_gpt_t *efi; 2524263d13fSGeorge Wilson uint64_t avail_space = 0; 2534263d13fSGeorge Wilson int efisize = EFI_LABEL_SIZE * 2; 2544263d13fSGeorge Wilson 2554263d13fSGeorge Wilson dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP); 2564263d13fSGeorge Wilson dk_ioc.dki_lba = 1; 2574263d13fSGeorge Wilson dk_ioc.dki_length = efisize; 2584263d13fSGeorge Wilson dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data; 2594263d13fSGeorge Wilson efi = dk_ioc.dki_data; 2604263d13fSGeorge Wilson 2614263d13fSGeorge Wilson if (ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc, 2624263d13fSGeorge Wilson FKIOCTL, kcred, NULL) == 0) { 2634263d13fSGeorge Wilson uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA); 2644263d13fSGeorge Wilson 2654263d13fSGeorge Wilson zfs_dbgmsg("vdev %s, capacity %llu, altern lba %llu", 2664263d13fSGeorge Wilson vd->vdev_path, capacity, efi_altern_lba); 2674263d13fSGeorge Wilson if (capacity > efi_altern_lba) 2684263d13fSGeorge Wilson avail_space = (capacity - efi_altern_lba) * blksz; 2694263d13fSGeorge Wilson } 2704263d13fSGeorge Wilson kmem_free(dk_ioc.dki_data, efisize); 2714263d13fSGeorge Wilson return (avail_space); 2724263d13fSGeorge Wilson } 2734263d13fSGeorge Wilson 274a5b57771SDan McDonald /* 275a5b57771SDan McDonald * We want to be loud in DEBUG kernels when DKIOCGMEDIAINFOEXT fails, or when 276a5b57771SDan McDonald * even a fallback to DKIOCGMEDIAINFO fails. 277a5b57771SDan McDonald */ 278a5b57771SDan McDonald #ifdef DEBUG 279a5b57771SDan McDonald #define VDEV_DEBUG(...) cmn_err(CE_NOTE, __VA_ARGS__) 280a5b57771SDan McDonald #else 281a5b57771SDan McDonald #define VDEV_DEBUG(...) /* Nothing... */ 282a5b57771SDan McDonald #endif 283a5b57771SDan McDonald 284fa9e4066Sahrens static int 2854263d13fSGeorge Wilson vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 2864263d13fSGeorge Wilson uint64_t *ashift) 287fa9e4066Sahrens { 2888ad4d6ddSJeff Bonwick spa_t *spa = vd->vdev_spa; 289*39cddb10SJoshua M. Clulow vdev_disk_t *dvd = vd->vdev_tsd; 290*39cddb10SJoshua M. Clulow ldi_ev_cookie_t ecookie; 291*39cddb10SJoshua M. Clulow vdev_disk_ldi_cb_t *lcb; 292a5b57771SDan McDonald union { 293a5b57771SDan McDonald struct dk_minfo_ext ude; 294a5b57771SDan McDonald struct dk_minfo ud; 295a5b57771SDan McDonald } dks; 296a5b57771SDan McDonald struct dk_minfo_ext *dkmext = &dks.ude; 297a5b57771SDan McDonald struct dk_minfo *dkm = &dks.ud; 2980a4e9518Sgw int error; 299e14bb325SJeff Bonwick dev_t dev; 300e14bb325SJeff Bonwick int otyp; 301fb02ae02SGeorge Wilson boolean_t validate_devid = B_FALSE; 302fb02ae02SGeorge Wilson ddi_devid_t devid; 303a5b57771SDan McDonald uint64_t capacity = 0, blksz = 0, pbsize; 304fa9e4066Sahrens 305fa9e4066Sahrens /* 306fa9e4066Sahrens * We must have a pathname, and it must be absolute. 307fa9e4066Sahrens */ 308fa9e4066Sahrens if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { 309fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 310be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 311fa9e4066Sahrens } 312fa9e4066Sahrens 313095bcd66SGeorge Wilson /* 314095bcd66SGeorge Wilson * Reopen the device if it's not currently open. Otherwise, 315095bcd66SGeorge Wilson * just update the physical size of the device. 316095bcd66SGeorge Wilson */ 317*39cddb10SJoshua M. Clulow if (dvd != NULL) { 318*39cddb10SJoshua M. Clulow if (dvd->vd_ldi_offline && dvd->vd_lh == NULL) { 319*39cddb10SJoshua M. Clulow /* 320*39cddb10SJoshua M. Clulow * If we are opening a device in its offline notify 321*39cddb10SJoshua M. Clulow * context, the LDI handle was just closed. Clean 322*39cddb10SJoshua M. Clulow * up the LDI event callbacks and free vd->vdev_tsd. 323*39cddb10SJoshua M. Clulow */ 324*39cddb10SJoshua M. Clulow vdev_disk_free(vd); 325*39cddb10SJoshua M. Clulow } else { 326*39cddb10SJoshua M. Clulow ASSERT(vd->vdev_reopening); 327*39cddb10SJoshua M. Clulow goto skip_open; 328*39cddb10SJoshua M. Clulow } 329095bcd66SGeorge Wilson } 330095bcd66SGeorge Wilson 331*39cddb10SJoshua M. Clulow /* 332*39cddb10SJoshua M. Clulow * Create vd->vdev_tsd. 333*39cddb10SJoshua M. Clulow */ 334*39cddb10SJoshua M. Clulow vdev_disk_alloc(vd); 335*39cddb10SJoshua M. Clulow dvd = vd->vdev_tsd; 336fa9e4066Sahrens 337fa9e4066Sahrens /* 338fa9e4066Sahrens * When opening a disk device, we want to preserve the user's original 339fa9e4066Sahrens * intent. We always want to open the device by the path the user gave 3401724dc7bSJoshua M. Clulow * us, even if it is one of multiple paths to the same device. But we 341fa9e4066Sahrens * also want to be able to survive disks being removed/recabled. 342fa9e4066Sahrens * Therefore the sequence of opening devices is: 343fa9e4066Sahrens * 344afefbcddSeschrock * 1. Try opening the device by path. For legacy pools without the 345afefbcddSeschrock * 'whole_disk' property, attempt to fix the path by appending 's0'. 346fa9e4066Sahrens * 347fa9e4066Sahrens * 2. If the devid of the device matches the stored value, return 348fa9e4066Sahrens * success. 349fa9e4066Sahrens * 350fa9e4066Sahrens * 3. Otherwise, the device may have moved. Try opening the device 351fa9e4066Sahrens * by the devid instead. 352fa9e4066Sahrens */ 353fa9e4066Sahrens if (vd->vdev_devid != NULL) { 354fa9e4066Sahrens if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, 355fa9e4066Sahrens &dvd->vd_minor) != 0) { 356fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 357be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 358fa9e4066Sahrens } 359fa9e4066Sahrens } 360fa9e4066Sahrens 361fa9e4066Sahrens error = EINVAL; /* presume failure */ 362fa9e4066Sahrens 363095bcd66SGeorge Wilson if (vd->vdev_path != NULL) { 364fa9e4066Sahrens 365afefbcddSeschrock if (vd->vdev_wholedisk == -1ULL) { 366afefbcddSeschrock size_t len = strlen(vd->vdev_path) + 3; 367afefbcddSeschrock char *buf = kmem_alloc(len, KM_SLEEP); 368afefbcddSeschrock 369afefbcddSeschrock (void) snprintf(buf, len, "%ss0", vd->vdev_path); 370afefbcddSeschrock 371*39cddb10SJoshua M. Clulow error = ldi_open_by_name(buf, spa_mode(spa), kcred, 372*39cddb10SJoshua M. Clulow &dvd->vd_lh, zfs_li); 373*39cddb10SJoshua M. Clulow if (error == 0) { 374afefbcddSeschrock spa_strfree(vd->vdev_path); 375afefbcddSeschrock vd->vdev_path = buf; 376afefbcddSeschrock vd->vdev_wholedisk = 1ULL; 377afefbcddSeschrock } else { 378afefbcddSeschrock kmem_free(buf, len); 379afefbcddSeschrock } 380afefbcddSeschrock } 381fa9e4066Sahrens 382*39cddb10SJoshua M. Clulow /* 383*39cddb10SJoshua M. Clulow * If we have not yet opened the device, try to open it by the 384*39cddb10SJoshua M. Clulow * specified path. 385*39cddb10SJoshua M. Clulow */ 386*39cddb10SJoshua M. Clulow if (error != 0) { 387*39cddb10SJoshua M. Clulow error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), 388*39cddb10SJoshua M. Clulow kcred, &dvd->vd_lh, zfs_li); 389*39cddb10SJoshua M. Clulow } 390fa9e4066Sahrens 391fa9e4066Sahrens /* 392fa9e4066Sahrens * Compare the devid to the stored value. 393fa9e4066Sahrens */ 394fa9e4066Sahrens if (error == 0 && vd->vdev_devid != NULL && 395fa9e4066Sahrens ldi_get_devid(dvd->vd_lh, &devid) == 0) { 396fa9e4066Sahrens if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { 397be6fd75aSMatthew Ahrens error = SET_ERROR(EINVAL); 3988ad4d6ddSJeff Bonwick (void) ldi_close(dvd->vd_lh, spa_mode(spa), 3998ad4d6ddSJeff Bonwick kcred); 400fa9e4066Sahrens dvd->vd_lh = NULL; 401fa9e4066Sahrens } 402fa9e4066Sahrens ddi_devid_free(devid); 403fa9e4066Sahrens } 404afefbcddSeschrock 405afefbcddSeschrock /* 406afefbcddSeschrock * If we succeeded in opening the device, but 'vdev_wholedisk' 407afefbcddSeschrock * is not yet set, then this must be a slice. 408afefbcddSeschrock */ 409afefbcddSeschrock if (error == 0 && vd->vdev_wholedisk == -1ULL) 410afefbcddSeschrock vd->vdev_wholedisk = 0; 411fa9e4066Sahrens } 412fa9e4066Sahrens 413fa9e4066Sahrens /* 414fa9e4066Sahrens * If we were unable to open by path, or the devid check fails, open by 415fa9e4066Sahrens * devid instead. 416fa9e4066Sahrens */ 417fb02ae02SGeorge Wilson if (error != 0 && vd->vdev_devid != NULL) { 418fa9e4066Sahrens error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, 4198ad4d6ddSJeff Bonwick spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); 420fb02ae02SGeorge Wilson } 421fa9e4066Sahrens 4223d7072f8Seschrock /* 4233d7072f8Seschrock * If all else fails, then try opening by physical path (if available) 4243d7072f8Seschrock * or the logical path (if we failed due to the devid check). While not 4253d7072f8Seschrock * as reliable as the devid, this will give us something, and the higher 4263d7072f8Seschrock * level vdev validation will prevent us from opening the wrong device. 4273d7072f8Seschrock */ 4283d7072f8Seschrock if (error) { 429fb02ae02SGeorge Wilson if (vd->vdev_devid != NULL) 430fb02ae02SGeorge Wilson validate_devid = B_TRUE; 431fb02ae02SGeorge Wilson 4323d7072f8Seschrock if (vd->vdev_physpath != NULL && 433deb8317bSMark J Musante (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV) 4348ad4d6ddSJeff Bonwick error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa), 4353d7072f8Seschrock kcred, &dvd->vd_lh, zfs_li); 4363d7072f8Seschrock 4373d7072f8Seschrock /* 4383d7072f8Seschrock * Note that we don't support the legacy auto-wholedisk support 4393d7072f8Seschrock * as above. This hasn't been used in a very long time and we 4403d7072f8Seschrock * don't need to propagate its oddities to this edge condition. 4413d7072f8Seschrock */ 442095bcd66SGeorge Wilson if (error && vd->vdev_path != NULL) 4438ad4d6ddSJeff Bonwick error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), 4448ad4d6ddSJeff Bonwick kcred, &dvd->vd_lh, zfs_li); 4453d7072f8Seschrock } 4463d7072f8Seschrock 447e14bb325SJeff Bonwick if (error) { 448fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 449fa9e4066Sahrens return (error); 450e14bb325SJeff Bonwick } 451fa9e4066Sahrens 452fb02ae02SGeorge Wilson /* 453fb02ae02SGeorge Wilson * Now that the device has been successfully opened, update the devid 454fb02ae02SGeorge Wilson * if necessary. 455fb02ae02SGeorge Wilson */ 456fb02ae02SGeorge Wilson if (validate_devid && spa_writeable(spa) && 457fb02ae02SGeorge Wilson ldi_get_devid(dvd->vd_lh, &devid) == 0) { 458fb02ae02SGeorge Wilson if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { 459fb02ae02SGeorge Wilson char *vd_devid; 460fb02ae02SGeorge Wilson 461fb02ae02SGeorge Wilson vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor); 462fb02ae02SGeorge Wilson zfs_dbgmsg("vdev %s: update devid from %s, " 463fb02ae02SGeorge Wilson "to %s", vd->vdev_path, vd->vdev_devid, vd_devid); 464fb02ae02SGeorge Wilson spa_strfree(vd->vdev_devid); 465fb02ae02SGeorge Wilson vd->vdev_devid = spa_strdup(vd_devid); 466fb02ae02SGeorge Wilson ddi_devid_str_free(vd_devid); 467fb02ae02SGeorge Wilson } 468fb02ae02SGeorge Wilson ddi_devid_free(devid); 469fb02ae02SGeorge Wilson } 470fb02ae02SGeorge Wilson 4713d7072f8Seschrock /* 4723d7072f8Seschrock * Once a device is opened, verify that the physical device path (if 4733d7072f8Seschrock * available) is up to date. 4743d7072f8Seschrock */ 4753d7072f8Seschrock if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && 4763d7072f8Seschrock ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { 4770a4e9518Sgw char *physpath, *minorname; 4780a4e9518Sgw 4793d7072f8Seschrock physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4803d7072f8Seschrock minorname = NULL; 4813d7072f8Seschrock if (ddi_dev_pathname(dev, otyp, physpath) == 0 && 4823d7072f8Seschrock ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && 4833d7072f8Seschrock (vd->vdev_physpath == NULL || 4843d7072f8Seschrock strcmp(vd->vdev_physpath, physpath) != 0)) { 4853d7072f8Seschrock if (vd->vdev_physpath) 4863d7072f8Seschrock spa_strfree(vd->vdev_physpath); 4873d7072f8Seschrock (void) strlcat(physpath, ":", MAXPATHLEN); 4883d7072f8Seschrock (void) strlcat(physpath, minorname, MAXPATHLEN); 4893d7072f8Seschrock vd->vdev_physpath = spa_strdup(physpath); 4903d7072f8Seschrock } 4913d7072f8Seschrock if (minorname) 4923d7072f8Seschrock kmem_free(minorname, strlen(minorname) + 1); 4933d7072f8Seschrock kmem_free(physpath, MAXPATHLEN); 4943d7072f8Seschrock } 4953d7072f8Seschrock 496*39cddb10SJoshua M. Clulow /* 497*39cddb10SJoshua M. Clulow * Register callbacks for the LDI offline event. 498*39cddb10SJoshua M. Clulow */ 499*39cddb10SJoshua M. Clulow if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_OFFLINE, &ecookie) == 500*39cddb10SJoshua M. Clulow LDI_EV_SUCCESS) { 501*39cddb10SJoshua M. Clulow lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); 502*39cddb10SJoshua M. Clulow list_insert_tail(&dvd->vd_ldi_cbs, lcb); 503*39cddb10SJoshua M. Clulow (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, 504*39cddb10SJoshua M. Clulow &vdev_disk_off_callb, (void *) vd, &lcb->lcb_id); 505*39cddb10SJoshua M. Clulow } 506*39cddb10SJoshua M. Clulow 507*39cddb10SJoshua M. Clulow /* 508*39cddb10SJoshua M. Clulow * Register callbacks for the LDI degrade event. 509*39cddb10SJoshua M. Clulow */ 510*39cddb10SJoshua M. Clulow if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_DEGRADE, &ecookie) == 511*39cddb10SJoshua M. Clulow LDI_EV_SUCCESS) { 512*39cddb10SJoshua M. Clulow lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); 513*39cddb10SJoshua M. Clulow list_insert_tail(&dvd->vd_ldi_cbs, lcb); 514*39cddb10SJoshua M. Clulow (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, 515*39cddb10SJoshua M. Clulow &vdev_disk_dgrd_callb, (void *) vd, &lcb->lcb_id); 516*39cddb10SJoshua M. Clulow } 517095bcd66SGeorge Wilson skip_open: 518fa9e4066Sahrens /* 519fa9e4066Sahrens * Determine the actual size of the device. 520fa9e4066Sahrens */ 521fa9e4066Sahrens if (ldi_get_size(dvd->vd_lh, psize) != 0) { 522fa9e4066Sahrens vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 523be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 524fa9e4066Sahrens } 525fa9e4066Sahrens 526a5b57771SDan McDonald *max_psize = *psize; 527a5b57771SDan McDonald 528ecc2d604Sbonwick /* 529ecc2d604Sbonwick * Determine the device's minimum transfer size. 530ecc2d604Sbonwick * If the ioctl isn't supported, assume DEV_BSIZE. 531ecc2d604Sbonwick */ 532a5b57771SDan McDonald if ((error = ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, 533a5b57771SDan McDonald (intptr_t)dkmext, FKIOCTL, kcred, NULL)) == 0) { 534a5b57771SDan McDonald capacity = dkmext->dki_capacity - 1; 535a5b57771SDan McDonald blksz = dkmext->dki_lbsize; 536a5b57771SDan McDonald pbsize = dkmext->dki_pbsize; 537a5b57771SDan McDonald } else if ((error = ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFO, 538a5b57771SDan McDonald (intptr_t)dkm, FKIOCTL, kcred, NULL)) == 0) { 539a5b57771SDan McDonald VDEV_DEBUG( 540a5b57771SDan McDonald "vdev_disk_open(\"%s\"): fallback to DKIOCGMEDIAINFO\n", 541a5b57771SDan McDonald vd->vdev_path); 542a5b57771SDan McDonald capacity = dkm->dki_capacity - 1; 543a5b57771SDan McDonald blksz = dkm->dki_lbsize; 544a5b57771SDan McDonald pbsize = blksz; 545a5b57771SDan McDonald } else { 546a5b57771SDan McDonald VDEV_DEBUG("vdev_disk_open(\"%s\"): " 547a5b57771SDan McDonald "both DKIOCGMEDIAINFO{,EXT} calls failed, %d\n", 548a5b57771SDan McDonald vd->vdev_path, error); 549a5b57771SDan McDonald pbsize = DEV_BSIZE; 550a5b57771SDan McDonald } 551bef6b7d2Swebaker 552a5b57771SDan McDonald *ashift = highbit(MAX(pbsize, SPA_MINBLOCKSIZE)) - 1; 553bef6b7d2Swebaker 5544263d13fSGeorge Wilson if (vd->vdev_wholedisk == 1) { 5554263d13fSGeorge Wilson int wce = 1; 5564263d13fSGeorge Wilson 557a5b57771SDan McDonald if (error == 0) { 558a5b57771SDan McDonald /* 559a5b57771SDan McDonald * If we have the capability to expand, we'd have 560a5b57771SDan McDonald * found out via success from DKIOCGMEDIAINFO{,EXT}. 561a5b57771SDan McDonald * Adjust max_psize upward accordingly since we know 562a5b57771SDan McDonald * we own the whole disk now. 563a5b57771SDan McDonald */ 564a5b57771SDan McDonald *max_psize += vdev_disk_get_space(vd, capacity, blksz); 565a5b57771SDan McDonald zfs_dbgmsg("capacity change: vdev %s, psize %llu, " 566a5b57771SDan McDonald "max_psize %llu", vd->vdev_path, *psize, 567a5b57771SDan McDonald *max_psize); 568a5b57771SDan McDonald } 569a5b57771SDan McDonald 5704263d13fSGeorge Wilson /* 571a5b57771SDan McDonald * Since we own the whole disk, try to enable disk write 572a5b57771SDan McDonald * caching. We ignore errors because it's OK if we can't do it. 5734263d13fSGeorge Wilson */ 5744263d13fSGeorge Wilson (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, 5754263d13fSGeorge Wilson FKIOCTL, kcred, NULL); 5764263d13fSGeorge Wilson } 5774263d13fSGeorge Wilson 578b468a217Seschrock /* 579b468a217Seschrock * Clear the nowritecache bit, so that on a vdev_reopen() we will 580b468a217Seschrock * try again. 581b468a217Seschrock */ 582b468a217Seschrock vd->vdev_nowritecache = B_FALSE; 583b468a217Seschrock 584fa9e4066Sahrens return (0); 585fa9e4066Sahrens } 586fa9e4066Sahrens 587fa9e4066Sahrens static void 588fa9e4066Sahrens vdev_disk_close(vdev_t *vd) 589fa9e4066Sahrens { 590fa9e4066Sahrens vdev_disk_t *dvd = vd->vdev_tsd; 591fa9e4066Sahrens 592095bcd66SGeorge Wilson if (vd->vdev_reopening || dvd == NULL) 593fa9e4066Sahrens return; 594fa9e4066Sahrens 595*39cddb10SJoshua M. Clulow if (dvd->vd_minor != NULL) { 596fa9e4066Sahrens ddi_devid_str_free(dvd->vd_minor); 597*39cddb10SJoshua M. Clulow dvd->vd_minor = NULL; 598*39cddb10SJoshua M. Clulow } 599fa9e4066Sahrens 600*39cddb10SJoshua M. Clulow if (dvd->vd_devid != NULL) { 601fa9e4066Sahrens ddi_devid_free(dvd->vd_devid); 602*39cddb10SJoshua M. Clulow dvd->vd_devid = NULL; 603*39cddb10SJoshua M. Clulow } 604fa9e4066Sahrens 605*39cddb10SJoshua M. Clulow if (dvd->vd_lh != NULL) { 6068ad4d6ddSJeff Bonwick (void) ldi_close(dvd->vd_lh, spa_mode(vd->vdev_spa), kcred); 607*39cddb10SJoshua M. Clulow dvd->vd_lh = NULL; 608*39cddb10SJoshua M. Clulow } 609fa9e4066Sahrens 61098d1cbfeSGeorge Wilson vd->vdev_delayed_close = B_FALSE; 611*39cddb10SJoshua M. Clulow /* 612*39cddb10SJoshua M. Clulow * If we closed the LDI handle due to an offline notify from LDI, 613*39cddb10SJoshua M. Clulow * don't free vd->vdev_tsd or unregister the callbacks here; 614*39cddb10SJoshua M. Clulow * the offline finalize callback or a reopen will take care of it. 615*39cddb10SJoshua M. Clulow */ 616*39cddb10SJoshua M. Clulow if (dvd->vd_ldi_offline) 617*39cddb10SJoshua M. Clulow return; 618*39cddb10SJoshua M. Clulow 619*39cddb10SJoshua M. Clulow vdev_disk_free(vd); 620fa9e4066Sahrens } 621fa9e4066Sahrens 622e7cbe64fSgw int 623810e43b2SBill Pijewski vdev_disk_physio(vdev_t *vd, caddr_t data, 624810e43b2SBill Pijewski size_t size, uint64_t offset, int flags, boolean_t isdump) 625810e43b2SBill Pijewski { 626810e43b2SBill Pijewski vdev_disk_t *dvd = vd->vdev_tsd; 627810e43b2SBill Pijewski 628*39cddb10SJoshua M. Clulow /* 629*39cddb10SJoshua M. Clulow * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 630*39cddb10SJoshua M. Clulow * Nothing to be done here but return failure. 631*39cddb10SJoshua M. Clulow */ 632*39cddb10SJoshua M. Clulow if (dvd == NULL || (dvd->vd_ldi_offline && dvd->vd_lh == NULL)) 633*39cddb10SJoshua M. Clulow return (EIO); 634*39cddb10SJoshua M. Clulow 635810e43b2SBill Pijewski ASSERT(vd->vdev_ops == &vdev_disk_ops); 636810e43b2SBill Pijewski 637810e43b2SBill Pijewski /* 638810e43b2SBill Pijewski * If in the context of an active crash dump, use the ldi_dump(9F) 639810e43b2SBill Pijewski * call instead of ldi_strategy(9F) as usual. 640810e43b2SBill Pijewski */ 641810e43b2SBill Pijewski if (isdump) { 642810e43b2SBill Pijewski ASSERT3P(dvd, !=, NULL); 643810e43b2SBill Pijewski return (ldi_dump(dvd->vd_lh, data, lbtodb(offset), 644810e43b2SBill Pijewski lbtodb(size))); 645810e43b2SBill Pijewski } 646810e43b2SBill Pijewski 647810e43b2SBill Pijewski return (vdev_disk_ldi_physio(dvd->vd_lh, data, size, offset, flags)); 648810e43b2SBill Pijewski } 649810e43b2SBill Pijewski 650810e43b2SBill Pijewski int 651810e43b2SBill Pijewski vdev_disk_ldi_physio(ldi_handle_t vd_lh, caddr_t data, 652810e43b2SBill Pijewski size_t size, uint64_t offset, int flags) 653e7cbe64fSgw { 654e7cbe64fSgw buf_t *bp; 655e7cbe64fSgw int error = 0; 656e7cbe64fSgw 657e7cbe64fSgw if (vd_lh == NULL) 658be6fd75aSMatthew Ahrens return (SET_ERROR(EINVAL)); 659e7cbe64fSgw 660e7cbe64fSgw ASSERT(flags & B_READ || flags & B_WRITE); 661e7cbe64fSgw 662e7cbe64fSgw bp = getrbuf(KM_SLEEP); 663e7cbe64fSgw bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST; 664e7cbe64fSgw bp->b_bcount = size; 665e7cbe64fSgw bp->b_un.b_addr = (void *)data; 666e7cbe64fSgw bp->b_lblkno = lbtodb(offset); 667e7cbe64fSgw bp->b_bufsize = size; 668e7cbe64fSgw 669e7cbe64fSgw error = ldi_strategy(vd_lh, bp); 670e7cbe64fSgw ASSERT(error == 0); 671e7cbe64fSgw if ((error = biowait(bp)) == 0 && bp->b_resid != 0) 672be6fd75aSMatthew Ahrens error = SET_ERROR(EIO); 673e7cbe64fSgw freerbuf(bp); 674e7cbe64fSgw 675e7cbe64fSgw return (error); 676e7cbe64fSgw } 677e7cbe64fSgw 678fa9e4066Sahrens static void 679fa9e4066Sahrens vdev_disk_io_intr(buf_t *bp) 680fa9e4066Sahrens { 68131d7e8faSGeorge Wilson vdev_buf_t *vb = (vdev_buf_t *)bp; 68231d7e8faSGeorge Wilson zio_t *zio = vb->vb_io; 683fa9e4066Sahrens 68451ece835Seschrock /* 68551ece835Seschrock * The rest of the zio stack only deals with EIO, ECKSUM, and ENXIO. 68651ece835Seschrock * Rather than teach the rest of the stack about other error 68751ece835Seschrock * possibilities (EFAULT, etc), we normalize the error value here. 68851ece835Seschrock */ 68951ece835Seschrock zio->io_error = (geterror(bp) != 0 ? EIO : 0); 69051ece835Seschrock 69151ece835Seschrock if (zio->io_error == 0 && bp->b_resid != 0) 692be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(EIO); 693fa9e4066Sahrens 69431d7e8faSGeorge Wilson kmem_free(vb, sizeof (vdev_buf_t)); 695fa9e4066Sahrens 696e05725b1Sbonwick zio_interrupt(zio); 697fa9e4066Sahrens } 698fa9e4066Sahrens 699f4a72450SJeff Bonwick static void 700f4a72450SJeff Bonwick vdev_disk_ioctl_free(zio_t *zio) 701f4a72450SJeff Bonwick { 702f4a72450SJeff Bonwick kmem_free(zio->io_vsd, sizeof (struct dk_callback)); 703f4a72450SJeff Bonwick } 704f4a72450SJeff Bonwick 70522fe2c88SJonathan Adams static const zio_vsd_ops_t vdev_disk_vsd_ops = { 70622fe2c88SJonathan Adams vdev_disk_ioctl_free, 70722fe2c88SJonathan Adams zio_vsd_default_cksum_report 70822fe2c88SJonathan Adams }; 70922fe2c88SJonathan Adams 710fa9e4066Sahrens static void 711fa9e4066Sahrens vdev_disk_ioctl_done(void *zio_arg, int error) 712fa9e4066Sahrens { 713fa9e4066Sahrens zio_t *zio = zio_arg; 714fa9e4066Sahrens 715fa9e4066Sahrens zio->io_error = error; 716fa9e4066Sahrens 717e05725b1Sbonwick zio_interrupt(zio); 718fa9e4066Sahrens } 719fa9e4066Sahrens 720e05725b1Sbonwick static int 721fa9e4066Sahrens vdev_disk_io_start(zio_t *zio) 722fa9e4066Sahrens { 723fa9e4066Sahrens vdev_t *vd = zio->io_vd; 724fa9e4066Sahrens vdev_disk_t *dvd = vd->vdev_tsd; 72531d7e8faSGeorge Wilson vdev_buf_t *vb; 726e14bb325SJeff Bonwick struct dk_callback *dkc; 727fa9e4066Sahrens buf_t *bp; 728e14bb325SJeff Bonwick int error; 729fa9e4066Sahrens 730*39cddb10SJoshua M. Clulow /* 731*39cddb10SJoshua M. Clulow * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 732*39cddb10SJoshua M. Clulow * Nothing to be done here but return failure. 733*39cddb10SJoshua M. Clulow */ 734*39cddb10SJoshua M. Clulow if (dvd == NULL || (dvd->vd_ldi_offline && dvd->vd_lh == NULL)) { 735*39cddb10SJoshua M. Clulow zio->io_error = ENXIO; 736*39cddb10SJoshua M. Clulow return (ZIO_PIPELINE_CONTINUE); 737*39cddb10SJoshua M. Clulow } 738*39cddb10SJoshua M. Clulow 739fa9e4066Sahrens if (zio->io_type == ZIO_TYPE_IOCTL) { 740fa9e4066Sahrens /* XXPOLICY */ 7410a4e9518Sgw if (!vdev_readable(vd)) { 742be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENXIO); 743e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 744fa9e4066Sahrens } 745fa9e4066Sahrens 746fa9e4066Sahrens switch (zio->io_cmd) { 747fa9e4066Sahrens 748fa9e4066Sahrens case DKIOCFLUSHWRITECACHE: 749fa9e4066Sahrens 750a2eea2e1Sahrens if (zfs_nocacheflush) 751a2eea2e1Sahrens break; 752a2eea2e1Sahrens 753b468a217Seschrock if (vd->vdev_nowritecache) { 754be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENOTSUP); 755b468a217Seschrock break; 756b468a217Seschrock } 757b468a217Seschrock 758e14bb325SJeff Bonwick zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP); 75922fe2c88SJonathan Adams zio->io_vsd_ops = &vdev_disk_vsd_ops; 760e14bb325SJeff Bonwick 761e14bb325SJeff Bonwick dkc->dkc_callback = vdev_disk_ioctl_done; 762e14bb325SJeff Bonwick dkc->dkc_flag = FLUSH_VOLATILE; 763e14bb325SJeff Bonwick dkc->dkc_cookie = zio; 764fa9e4066Sahrens 765fa9e4066Sahrens error = ldi_ioctl(dvd->vd_lh, zio->io_cmd, 766e14bb325SJeff Bonwick (uintptr_t)dkc, FKIOCTL, kcred, NULL); 767fa9e4066Sahrens 768fa9e4066Sahrens if (error == 0) { 769fa9e4066Sahrens /* 770fa9e4066Sahrens * The ioctl will be done asychronously, 771fa9e4066Sahrens * and will call vdev_disk_ioctl_done() 772fa9e4066Sahrens * upon completion. 773fa9e4066Sahrens */ 774e05725b1Sbonwick return (ZIO_PIPELINE_STOP); 775e05725b1Sbonwick } 776e05725b1Sbonwick 777e05725b1Sbonwick if (error == ENOTSUP || error == ENOTTY) { 778b468a217Seschrock /* 779d5782879Smishra * If we get ENOTSUP or ENOTTY, we know that 780d5782879Smishra * no future attempts will ever succeed. 781d5782879Smishra * In this case we set a persistent bit so 782d5782879Smishra * that we don't bother with the ioctl in the 783d5782879Smishra * future. 784b468a217Seschrock */ 785b468a217Seschrock vd->vdev_nowritecache = B_TRUE; 786fa9e4066Sahrens } 787fa9e4066Sahrens zio->io_error = error; 788b468a217Seschrock 789fa9e4066Sahrens break; 790fa9e4066Sahrens 791fa9e4066Sahrens default: 792be6fd75aSMatthew Ahrens zio->io_error = SET_ERROR(ENOTSUP); 793fa9e4066Sahrens } 794fa9e4066Sahrens 795e05725b1Sbonwick return (ZIO_PIPELINE_CONTINUE); 796fa9e4066Sahrens } 797fa9e4066Sahrens 79831d7e8faSGeorge Wilson vb = kmem_alloc(sizeof (vdev_buf_t), KM_SLEEP); 799fa9e4066Sahrens 80031d7e8faSGeorge Wilson vb->vb_io = zio; 80131d7e8faSGeorge Wilson bp = &vb->vb_buf; 802fa9e4066Sahrens 803fa9e4066Sahrens bioinit(bp); 804e14bb325SJeff Bonwick bp->b_flags = B_BUSY | B_NOCACHE | 8058956713aSEric Schrock (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); 8068956713aSEric Schrock if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) 8078956713aSEric Schrock bp->b_flags |= B_FAILFAST; 808fa9e4066Sahrens bp->b_bcount = zio->io_size; 809fa9e4066Sahrens bp->b_un.b_addr = zio->io_data; 810fa9e4066Sahrens bp->b_lblkno = lbtodb(zio->io_offset); 811fa9e4066Sahrens bp->b_bufsize = zio->io_size; 812fa9e4066Sahrens bp->b_iodone = (int (*)())vdev_disk_io_intr; 813fa9e4066Sahrens 814fa9e4066Sahrens /* ldi_strategy() will return non-zero only on programming errors */ 815e14bb325SJeff Bonwick VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0); 816e05725b1Sbonwick 817e05725b1Sbonwick return (ZIO_PIPELINE_STOP); 818fa9e4066Sahrens } 819fa9e4066Sahrens 820e14bb325SJeff Bonwick static void 821fa9e4066Sahrens vdev_disk_io_done(zio_t *zio) 822fa9e4066Sahrens { 823e14bb325SJeff Bonwick vdev_t *vd = zio->io_vd; 824ea8dc4b6Seschrock 8253d7072f8Seschrock /* 8263d7072f8Seschrock * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if 8273d7072f8Seschrock * the device has been removed. If this is the case, then we trigger an 8280a4e9518Sgw * asynchronous removal of the device. Otherwise, probe the device and 8291f7ad2e1Sgw * make sure it's still accessible. 8303d7072f8Seschrock */ 8311d713200SEric Schrock if (zio->io_error == EIO && !vd->vdev_remove_wanted) { 8320a4e9518Sgw vdev_disk_t *dvd = vd->vdev_tsd; 833e14bb325SJeff Bonwick int state = DKIO_NONE; 8340a4e9518Sgw 835e14bb325SJeff Bonwick if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state, 836e14bb325SJeff Bonwick FKIOCTL, kcred, NULL) == 0 && state != DKIO_INSERTED) { 8371d713200SEric Schrock /* 8381d713200SEric Schrock * We post the resource as soon as possible, instead of 8391d713200SEric Schrock * when the async removal actually happens, because the 8401d713200SEric Schrock * DE is using this information to discard previous I/O 8411d713200SEric Schrock * errors. 8421d713200SEric Schrock */ 8431d713200SEric Schrock zfs_post_remove(zio->io_spa, vd); 8443d7072f8Seschrock vd->vdev_remove_wanted = B_TRUE; 8453d7072f8Seschrock spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 84698d1cbfeSGeorge Wilson } else if (!vd->vdev_delayed_close) { 84798d1cbfeSGeorge Wilson vd->vdev_delayed_close = B_TRUE; 8483d7072f8Seschrock } 8493d7072f8Seschrock } 850fa9e4066Sahrens } 851fa9e4066Sahrens 852fa9e4066Sahrens vdev_ops_t vdev_disk_ops = { 853fa9e4066Sahrens vdev_disk_open, 854fa9e4066Sahrens vdev_disk_close, 855fa9e4066Sahrens vdev_default_asize, 856fa9e4066Sahrens vdev_disk_io_start, 857fa9e4066Sahrens vdev_disk_io_done, 858fa9e4066Sahrens NULL, 859dcba9f3fSGeorge Wilson vdev_disk_hold, 860dcba9f3fSGeorge Wilson vdev_disk_rele, 861fa9e4066Sahrens VDEV_TYPE_DISK, /* name of this vdev type */ 862fa9e4066Sahrens B_TRUE /* leaf vdev */ 863fa9e4066Sahrens }; 864e7cbe64fSgw 865e7cbe64fSgw /* 866051aabe6Staylor * Given the root disk device devid or pathname, read the label from 867051aabe6Staylor * the device, and construct a configuration nvlist. 868e7cbe64fSgw */ 869f940fbb1SLin Ling int 870f940fbb1SLin Ling vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) 871e7cbe64fSgw { 872e7cbe64fSgw ldi_handle_t vd_lh; 873e7cbe64fSgw vdev_label_t *label; 874e7cbe64fSgw uint64_t s, size; 875e7cbe64fSgw int l; 876051aabe6Staylor ddi_devid_t tmpdevid; 877f4565e39SLin Ling int error = -1; 878051aabe6Staylor char *minor_name; 879e7cbe64fSgw 880e7cbe64fSgw /* 881e7cbe64fSgw * Read the device label and build the nvlist. 882e7cbe64fSgw */ 883f4565e39SLin Ling if (devid != NULL && ddi_devid_str_decode(devid, &tmpdevid, 884051aabe6Staylor &minor_name) == 0) { 885051aabe6Staylor error = ldi_open_by_devid(tmpdevid, minor_name, 8868ad4d6ddSJeff Bonwick FREAD, kcred, &vd_lh, zfs_li); 887051aabe6Staylor ddi_devid_free(tmpdevid); 888051aabe6Staylor ddi_devid_str_free(minor_name); 889051aabe6Staylor } 890051aabe6Staylor 891f4565e39SLin Ling if (error && (error = ldi_open_by_name(devpath, FREAD, kcred, &vd_lh, 892f4565e39SLin Ling zfs_li))) 893f940fbb1SLin Ling return (error); 894e7cbe64fSgw 895bf82a41bSeschrock if (ldi_get_size(vd_lh, &s)) { 896bf82a41bSeschrock (void) ldi_close(vd_lh, FREAD, kcred); 897be6fd75aSMatthew Ahrens return (SET_ERROR(EIO)); 898bf82a41bSeschrock } 899e7cbe64fSgw 900e7cbe64fSgw size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t); 901e7cbe64fSgw label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP); 902e7cbe64fSgw 90317f1e64aSEric Taylor *config = NULL; 904e7cbe64fSgw for (l = 0; l < VDEV_LABELS; l++) { 905e7cbe64fSgw uint64_t offset, state, txg = 0; 906e7cbe64fSgw 907e7cbe64fSgw /* read vdev label */ 908e7cbe64fSgw offset = vdev_label_offset(size, l, 0); 909810e43b2SBill Pijewski if (vdev_disk_ldi_physio(vd_lh, (caddr_t)label, 9102264ca7fSLin Ling VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, B_READ) != 0) 911e7cbe64fSgw continue; 912e7cbe64fSgw 913e7cbe64fSgw if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 914f940fbb1SLin Ling sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { 915f940fbb1SLin Ling *config = NULL; 916e7cbe64fSgw continue; 917e7cbe64fSgw } 918e7cbe64fSgw 919f940fbb1SLin Ling if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 920e7cbe64fSgw &state) != 0 || state >= POOL_STATE_DESTROYED) { 921f940fbb1SLin Ling nvlist_free(*config); 922f940fbb1SLin Ling *config = NULL; 923e7cbe64fSgw continue; 924e7cbe64fSgw } 925e7cbe64fSgw 926f940fbb1SLin Ling if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 927e7cbe64fSgw &txg) != 0 || txg == 0) { 928f940fbb1SLin Ling nvlist_free(*config); 929f940fbb1SLin Ling *config = NULL; 930e7cbe64fSgw continue; 931e7cbe64fSgw } 932e7cbe64fSgw 933e7cbe64fSgw break; 934e7cbe64fSgw } 935e7cbe64fSgw 936e7cbe64fSgw kmem_free(label, sizeof (vdev_label_t)); 937bf82a41bSeschrock (void) ldi_close(vd_lh, FREAD, kcred); 93817f1e64aSEric Taylor if (*config == NULL) 939be6fd75aSMatthew Ahrens error = SET_ERROR(EIDRM); 940bf82a41bSeschrock 941f940fbb1SLin Ling return (error); 942e7cbe64fSgw } 943