xref: /illumos-gate/usr/src/uts/common/fs/zfs/vdev_disk.c (revision 0a4e9518)
1fa9e4066Sahrens /*
2fa9e4066Sahrens  * CDDL HEADER START
3fa9e4066Sahrens  *
4fa9e4066Sahrens  * The contents of this file are subject to the terms of the
5bef6b7d2Swebaker  * Common Development and Distribution License (the "License").
6bef6b7d2Swebaker  * You may not use this file except in compliance with the License.
7fa9e4066Sahrens  *
8fa9e4066Sahrens  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fa9e4066Sahrens  * or http://www.opensolaris.org/os/licensing.
10fa9e4066Sahrens  * See the License for the specific language governing permissions
11fa9e4066Sahrens  * and limitations under the License.
12fa9e4066Sahrens  *
13fa9e4066Sahrens  * When distributing Covered Code, include this CDDL HEADER in each
14fa9e4066Sahrens  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fa9e4066Sahrens  * If applicable, add the following below this CDDL HEADER, with the
16fa9e4066Sahrens  * fields enclosed by brackets "[]" replaced with your own identifying
17fa9e4066Sahrens  * information: Portions Copyright [yyyy] [name of copyright owner]
18fa9e4066Sahrens  *
19fa9e4066Sahrens  * CDDL HEADER END
20fa9e4066Sahrens  */
21fa9e4066Sahrens /*
223d7072f8Seschrock  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23fa9e4066Sahrens  * Use is subject to license terms.
24fa9e4066Sahrens  */
25fa9e4066Sahrens 
26fa9e4066Sahrens #pragma ident	"%Z%%M%	%I%	%E% SMI"
27fa9e4066Sahrens 
28fa9e4066Sahrens #include <sys/zfs_context.h>
29fa9e4066Sahrens #include <sys/spa.h>
30fa9e4066Sahrens #include <sys/vdev_disk.h>
31fa9e4066Sahrens #include <sys/vdev_impl.h>
32fa9e4066Sahrens #include <sys/fs/zfs.h>
33fa9e4066Sahrens #include <sys/zio.h>
34afefbcddSeschrock #include <sys/sunldi.h>
35fa9e4066Sahrens 
36fa9e4066Sahrens /*
37fa9e4066Sahrens  * Virtual device vector for disks.
38fa9e4066Sahrens  */
39fa9e4066Sahrens 
40fa9e4066Sahrens extern ldi_ident_t zfs_li;
41fa9e4066Sahrens 
42fa9e4066Sahrens typedef struct vdev_disk_buf {
43fa9e4066Sahrens 	buf_t	vdb_buf;
44fa9e4066Sahrens 	zio_t	*vdb_io;
45fa9e4066Sahrens } vdev_disk_buf_t;
46fa9e4066Sahrens 
47fa9e4066Sahrens static int
48*0a4e9518Sgw vdev_disk_open_common(vdev_t *vd)
49fa9e4066Sahrens {
50fa9e4066Sahrens 	vdev_disk_t *dvd;
513d7072f8Seschrock 	dev_t dev;
52*0a4e9518Sgw 	int error;
53fa9e4066Sahrens 
54fa9e4066Sahrens 	/*
55fa9e4066Sahrens 	 * We must have a pathname, and it must be absolute.
56fa9e4066Sahrens 	 */
57fa9e4066Sahrens 	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
58fa9e4066Sahrens 		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
59fa9e4066Sahrens 		return (EINVAL);
60fa9e4066Sahrens 	}
61fa9e4066Sahrens 
62fa9e4066Sahrens 	dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
63fa9e4066Sahrens 
64fa9e4066Sahrens 	/*
65fa9e4066Sahrens 	 * When opening a disk device, we want to preserve the user's original
66fa9e4066Sahrens 	 * intent.  We always want to open the device by the path the user gave
67fa9e4066Sahrens 	 * us, even if it is one of multiple paths to the save device.  But we
68fa9e4066Sahrens 	 * also want to be able to survive disks being removed/recabled.
69fa9e4066Sahrens 	 * Therefore the sequence of opening devices is:
70fa9e4066Sahrens 	 *
71afefbcddSeschrock 	 * 1. Try opening the device by path.  For legacy pools without the
72afefbcddSeschrock 	 *    'whole_disk' property, attempt to fix the path by appending 's0'.
73fa9e4066Sahrens 	 *
74fa9e4066Sahrens 	 * 2. If the devid of the device matches the stored value, return
75fa9e4066Sahrens 	 *    success.
76fa9e4066Sahrens 	 *
77fa9e4066Sahrens 	 * 3. Otherwise, the device may have moved.  Try opening the device
78fa9e4066Sahrens 	 *    by the devid instead.
79fa9e4066Sahrens 	 *
80fa9e4066Sahrens 	 */
81fa9e4066Sahrens 	if (vd->vdev_devid != NULL) {
82fa9e4066Sahrens 		if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
83fa9e4066Sahrens 		    &dvd->vd_minor) != 0) {
84fa9e4066Sahrens 			vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
85fa9e4066Sahrens 			return (EINVAL);
86fa9e4066Sahrens 		}
87fa9e4066Sahrens 	}
88fa9e4066Sahrens 
89fa9e4066Sahrens 	error = EINVAL;		/* presume failure */
90fa9e4066Sahrens 
91fa9e4066Sahrens 	if (vd->vdev_path != NULL) {
92fa9e4066Sahrens 		ddi_devid_t devid;
93fa9e4066Sahrens 
94afefbcddSeschrock 		if (vd->vdev_wholedisk == -1ULL) {
95afefbcddSeschrock 			size_t len = strlen(vd->vdev_path) + 3;
96afefbcddSeschrock 			char *buf = kmem_alloc(len, KM_SLEEP);
97afefbcddSeschrock 			ldi_handle_t lh;
98afefbcddSeschrock 
99afefbcddSeschrock 			(void) snprintf(buf, len, "%ss0", vd->vdev_path);
100afefbcddSeschrock 
101afefbcddSeschrock 			if (ldi_open_by_name(buf, spa_mode, kcred,
102afefbcddSeschrock 			    &lh, zfs_li) == 0) {
103afefbcddSeschrock 				spa_strfree(vd->vdev_path);
104afefbcddSeschrock 				vd->vdev_path = buf;
105afefbcddSeschrock 				vd->vdev_wholedisk = 1ULL;
106afefbcddSeschrock 				(void) ldi_close(lh, spa_mode, kcred);
107afefbcddSeschrock 			} else {
108afefbcddSeschrock 				kmem_free(buf, len);
109afefbcddSeschrock 			}
110afefbcddSeschrock 		}
111fa9e4066Sahrens 
112afefbcddSeschrock 		error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred,
113afefbcddSeschrock 		    &dvd->vd_lh, zfs_li);
114fa9e4066Sahrens 
115fa9e4066Sahrens 		/*
116fa9e4066Sahrens 		 * Compare the devid to the stored value.
117fa9e4066Sahrens 		 */
118fa9e4066Sahrens 		if (error == 0 && vd->vdev_devid != NULL &&
119fa9e4066Sahrens 		    ldi_get_devid(dvd->vd_lh, &devid) == 0) {
120fa9e4066Sahrens 			if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
121fa9e4066Sahrens 				error = EINVAL;
122fa9e4066Sahrens 				(void) ldi_close(dvd->vd_lh, spa_mode, kcred);
123fa9e4066Sahrens 				dvd->vd_lh = NULL;
124fa9e4066Sahrens 			}
125fa9e4066Sahrens 			ddi_devid_free(devid);
126fa9e4066Sahrens 		}
127afefbcddSeschrock 
128afefbcddSeschrock 		/*
129afefbcddSeschrock 		 * If we succeeded in opening the device, but 'vdev_wholedisk'
130afefbcddSeschrock 		 * is not yet set, then this must be a slice.
131afefbcddSeschrock 		 */
132afefbcddSeschrock 		if (error == 0 && vd->vdev_wholedisk == -1ULL)
133afefbcddSeschrock 			vd->vdev_wholedisk = 0;
134fa9e4066Sahrens 	}
135fa9e4066Sahrens 
136fa9e4066Sahrens 	/*
137fa9e4066Sahrens 	 * If we were unable to open by path, or the devid check fails, open by
138fa9e4066Sahrens 	 * devid instead.
139fa9e4066Sahrens 	 */
140fa9e4066Sahrens 	if (error != 0 && vd->vdev_devid != NULL)
141fa9e4066Sahrens 		error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor,
142fa9e4066Sahrens 		    spa_mode, kcred, &dvd->vd_lh, zfs_li);
143fa9e4066Sahrens 
1443d7072f8Seschrock 	/*
1453d7072f8Seschrock 	 * If all else fails, then try opening by physical path (if available)
1463d7072f8Seschrock 	 * or the logical path (if we failed due to the devid check).  While not
1473d7072f8Seschrock 	 * as reliable as the devid, this will give us something, and the higher
1483d7072f8Seschrock 	 * level vdev validation will prevent us from opening the wrong device.
1493d7072f8Seschrock 	 */
1503d7072f8Seschrock 	if (error) {
1513d7072f8Seschrock 		if (vd->vdev_physpath != NULL &&
1523d7072f8Seschrock 		    (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != ENODEV)
1533d7072f8Seschrock 			error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode,
1543d7072f8Seschrock 			    kcred, &dvd->vd_lh, zfs_li);
1553d7072f8Seschrock 
1563d7072f8Seschrock 		/*
1573d7072f8Seschrock 		 * Note that we don't support the legacy auto-wholedisk support
1583d7072f8Seschrock 		 * as above.  This hasn't been used in a very long time and we
1593d7072f8Seschrock 		 * don't need to propagate its oddities to this edge condition.
1603d7072f8Seschrock 		 */
1613d7072f8Seschrock 		if (error && vd->vdev_path != NULL)
1623d7072f8Seschrock 			error = ldi_open_by_name(vd->vdev_path, spa_mode, kcred,
1633d7072f8Seschrock 			    &dvd->vd_lh, zfs_li);
1643d7072f8Seschrock 	}
1653d7072f8Seschrock 
166*0a4e9518Sgw 	if (error)
167fa9e4066Sahrens 		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
168*0a4e9518Sgw 
169*0a4e9518Sgw 	return (error);
170*0a4e9518Sgw }
171*0a4e9518Sgw 
172*0a4e9518Sgw static int
173*0a4e9518Sgw vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
174*0a4e9518Sgw {
175*0a4e9518Sgw 	vdev_disk_t *dvd;
176*0a4e9518Sgw 	struct dk_minfo dkm;
177*0a4e9518Sgw 	int error;
178*0a4e9518Sgw 	dev_t dev;
179*0a4e9518Sgw 	int otyp;
180*0a4e9518Sgw 
181*0a4e9518Sgw 	error = vdev_disk_open_common(vd);
182*0a4e9518Sgw 	if (error)
183fa9e4066Sahrens 		return (error);
184fa9e4066Sahrens 
185*0a4e9518Sgw 	dvd = vd->vdev_tsd;
1863d7072f8Seschrock 	/*
1873d7072f8Seschrock 	 * Once a device is opened, verify that the physical device path (if
1883d7072f8Seschrock 	 * available) is up to date.
1893d7072f8Seschrock 	 */
1903d7072f8Seschrock 	if (ldi_get_dev(dvd->vd_lh, &dev) == 0 &&
1913d7072f8Seschrock 	    ldi_get_otyp(dvd->vd_lh, &otyp) == 0) {
192*0a4e9518Sgw 		char *physpath, *minorname;
193*0a4e9518Sgw 
1943d7072f8Seschrock 		physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1953d7072f8Seschrock 		minorname = NULL;
1963d7072f8Seschrock 		if (ddi_dev_pathname(dev, otyp, physpath) == 0 &&
1973d7072f8Seschrock 		    ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 &&
1983d7072f8Seschrock 		    (vd->vdev_physpath == NULL ||
1993d7072f8Seschrock 		    strcmp(vd->vdev_physpath, physpath) != 0)) {
2003d7072f8Seschrock 			if (vd->vdev_physpath)
2013d7072f8Seschrock 				spa_strfree(vd->vdev_physpath);
2023d7072f8Seschrock 			(void) strlcat(physpath, ":", MAXPATHLEN);
2033d7072f8Seschrock 			(void) strlcat(physpath, minorname, MAXPATHLEN);
2043d7072f8Seschrock 			vd->vdev_physpath = spa_strdup(physpath);
2053d7072f8Seschrock 		}
2063d7072f8Seschrock 		if (minorname)
2073d7072f8Seschrock 			kmem_free(minorname, strlen(minorname) + 1);
2083d7072f8Seschrock 		kmem_free(physpath, MAXPATHLEN);
2093d7072f8Seschrock 	}
2103d7072f8Seschrock 
211fa9e4066Sahrens 	/*
212fa9e4066Sahrens 	 * Determine the actual size of the device.
213fa9e4066Sahrens 	 */
214fa9e4066Sahrens 	if (ldi_get_size(dvd->vd_lh, psize) != 0) {
215fa9e4066Sahrens 		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
216fa9e4066Sahrens 		return (EINVAL);
217fa9e4066Sahrens 	}
218fa9e4066Sahrens 
219ecc2d604Sbonwick 	/*
220ecc2d604Sbonwick 	 * If we own the whole disk, try to enable disk write caching.
221ecc2d604Sbonwick 	 * We ignore errors because it's OK if we can't do it.
222ecc2d604Sbonwick 	 */
223bef6b7d2Swebaker 	if (vd->vdev_wholedisk == 1) {
224ecc2d604Sbonwick 		int wce = 1;
225ecc2d604Sbonwick 		(void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce,
226ecc2d604Sbonwick 		    FKIOCTL, kcred, NULL);
227ecc2d604Sbonwick 	}
228bef6b7d2Swebaker 
229ecc2d604Sbonwick 	/*
230ecc2d604Sbonwick 	 * Determine the device's minimum transfer size.
231ecc2d604Sbonwick 	 * If the ioctl isn't supported, assume DEV_BSIZE.
232ecc2d604Sbonwick 	 */
233ecc2d604Sbonwick 	if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFO, (intptr_t)&dkm,
234ecc2d604Sbonwick 	    FKIOCTL, kcred, NULL) != 0)
235ecc2d604Sbonwick 		dkm.dki_lbsize = DEV_BSIZE;
236bef6b7d2Swebaker 
237ecc2d604Sbonwick 	*ashift = highbit(MAX(dkm.dki_lbsize, SPA_MINBLOCKSIZE)) - 1;
238bef6b7d2Swebaker 
239b468a217Seschrock 	/*
240b468a217Seschrock 	 * Clear the nowritecache bit, so that on a vdev_reopen() we will
241b468a217Seschrock 	 * try again.
242b468a217Seschrock 	 */
243b468a217Seschrock 	vd->vdev_nowritecache = B_FALSE;
244b468a217Seschrock 
245fa9e4066Sahrens 	return (0);
246fa9e4066Sahrens }
247fa9e4066Sahrens 
248fa9e4066Sahrens static void
249fa9e4066Sahrens vdev_disk_close(vdev_t *vd)
250fa9e4066Sahrens {
251fa9e4066Sahrens 	vdev_disk_t *dvd = vd->vdev_tsd;
252fa9e4066Sahrens 
253fa9e4066Sahrens 	if (dvd == NULL)
254fa9e4066Sahrens 		return;
255fa9e4066Sahrens 
256fa9e4066Sahrens 	if (dvd->vd_minor != NULL)
257fa9e4066Sahrens 		ddi_devid_str_free(dvd->vd_minor);
258fa9e4066Sahrens 
259fa9e4066Sahrens 	if (dvd->vd_devid != NULL)
260fa9e4066Sahrens 		ddi_devid_free(dvd->vd_devid);
261fa9e4066Sahrens 
262fa9e4066Sahrens 	if (dvd->vd_lh != NULL)
263fa9e4066Sahrens 		(void) ldi_close(dvd->vd_lh, spa_mode, kcred);
264fa9e4066Sahrens 
265fa9e4066Sahrens 	kmem_free(dvd, sizeof (vdev_disk_t));
266fa9e4066Sahrens 	vd->vdev_tsd = NULL;
267fa9e4066Sahrens }
268fa9e4066Sahrens 
269*0a4e9518Sgw static int
270*0a4e9518Sgw vdev_disk_probe_io(vdev_t *vd, caddr_t data, size_t size, uint64_t offset,
271*0a4e9518Sgw     int flags)
272*0a4e9518Sgw {
273*0a4e9518Sgw 	buf_t buf;
274*0a4e9518Sgw 	int error = 0;
275*0a4e9518Sgw 	vdev_disk_t *dvd = vd->vdev_tsd;
276*0a4e9518Sgw 
277*0a4e9518Sgw 	if (vd == NULL || dvd == NULL || dvd->vd_lh == NULL)
278*0a4e9518Sgw 		return (EINVAL);
279*0a4e9518Sgw 
280*0a4e9518Sgw 	ASSERT(flags & B_READ || flags & B_WRITE);
281*0a4e9518Sgw 
282*0a4e9518Sgw 	bioinit(&buf);
283*0a4e9518Sgw 	buf.b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST;
284*0a4e9518Sgw 	buf.b_bcount = size;
285*0a4e9518Sgw 	buf.b_un.b_addr = (void *)data;
286*0a4e9518Sgw 	buf.b_lblkno = lbtodb(offset);
287*0a4e9518Sgw 	buf.b_bufsize = size;
288*0a4e9518Sgw 
289*0a4e9518Sgw 	error = ldi_strategy(dvd->vd_lh, &buf);
290*0a4e9518Sgw 	ASSERT(error == 0);
291*0a4e9518Sgw 	error = biowait(&buf);
292*0a4e9518Sgw 
293*0a4e9518Sgw 	if (zio_injection_enabled && error == 0)
294*0a4e9518Sgw 		error = zio_handle_device_injection(vd, EIO);
295*0a4e9518Sgw 
296*0a4e9518Sgw 	return (error);
297*0a4e9518Sgw }
298*0a4e9518Sgw 
299*0a4e9518Sgw static int
300*0a4e9518Sgw vdev_disk_probe(vdev_t *vd)
301*0a4e9518Sgw {
302*0a4e9518Sgw 	uint64_t offset;
303*0a4e9518Sgw 	vdev_t *nvd;
304*0a4e9518Sgw 	int l, error = 0, retries = 0;
305*0a4e9518Sgw 	char *vl_pad;
306*0a4e9518Sgw 
307*0a4e9518Sgw 	if (vd == NULL)
308*0a4e9518Sgw 		return (EINVAL);
309*0a4e9518Sgw 
310*0a4e9518Sgw 	/* Hijack the current vdev */
311*0a4e9518Sgw 	nvd = vd;
312*0a4e9518Sgw 
313*0a4e9518Sgw 	/*
314*0a4e9518Sgw 	 * Pick a random label to rewrite.
315*0a4e9518Sgw 	 */
316*0a4e9518Sgw 	l = spa_get_random(VDEV_LABELS);
317*0a4e9518Sgw 	ASSERT(l < VDEV_LABELS);
318*0a4e9518Sgw 
319*0a4e9518Sgw 	offset = vdev_label_offset(vd->vdev_psize, l,
320*0a4e9518Sgw 	    offsetof(vdev_label_t, vl_pad));
321*0a4e9518Sgw 
322*0a4e9518Sgw 	vl_pad = kmem_alloc(VDEV_SKIP_SIZE, KM_SLEEP);
323*0a4e9518Sgw 
324*0a4e9518Sgw 	/*
325*0a4e9518Sgw 	 * Try to read and write to a special location on the
326*0a4e9518Sgw 	 * label. We use the existing vdev initially and only
327*0a4e9518Sgw 	 * try to create and reopen it if we encounter a failure.
328*0a4e9518Sgw 	 */
329*0a4e9518Sgw 	while ((error = vdev_disk_probe_io(nvd, vl_pad, VDEV_SKIP_SIZE,
330*0a4e9518Sgw 	    offset, B_READ)) != 0 && retries == 0) {
331*0a4e9518Sgw 
332*0a4e9518Sgw 		nvd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
333*0a4e9518Sgw 		if (vd->vdev_path)
334*0a4e9518Sgw 			nvd->vdev_path = spa_strdup(vd->vdev_path);
335*0a4e9518Sgw 		if (vd->vdev_physpath)
336*0a4e9518Sgw 			nvd->vdev_physpath = spa_strdup(vd->vdev_physpath);
337*0a4e9518Sgw 		if (vd->vdev_devid)
338*0a4e9518Sgw 			nvd->vdev_devid = spa_strdup(vd->vdev_devid);
339*0a4e9518Sgw 		nvd->vdev_wholedisk = vd->vdev_wholedisk;
340*0a4e9518Sgw 		nvd->vdev_guid = vd->vdev_guid;
341*0a4e9518Sgw 		retries++;
342*0a4e9518Sgw 
343*0a4e9518Sgw 		error = vdev_disk_open_common(nvd);
344*0a4e9518Sgw 		if (error) {
345*0a4e9518Sgw 			vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
346*0a4e9518Sgw 			    nvd->vdev_stat.vs_aux);
347*0a4e9518Sgw 			break;
348*0a4e9518Sgw 		}
349*0a4e9518Sgw 	}
350*0a4e9518Sgw 
351*0a4e9518Sgw 	if (!error) {
352*0a4e9518Sgw 		error = vdev_disk_probe_io(nvd, vl_pad, VDEV_SKIP_SIZE,
353*0a4e9518Sgw 		    offset, B_WRITE);
354*0a4e9518Sgw 	}
355*0a4e9518Sgw 
356*0a4e9518Sgw 	/* Clean up if we allocated a new vdev */
357*0a4e9518Sgw 	if (retries) {
358*0a4e9518Sgw 		vdev_disk_close(nvd);
359*0a4e9518Sgw 		if (nvd->vdev_path)
360*0a4e9518Sgw 			spa_strfree(nvd->vdev_path);
361*0a4e9518Sgw 		if (nvd->vdev_physpath)
362*0a4e9518Sgw 			spa_strfree(nvd->vdev_physpath);
363*0a4e9518Sgw 		if (nvd->vdev_devid)
364*0a4e9518Sgw 			spa_strfree(nvd->vdev_devid);
365*0a4e9518Sgw 		kmem_free(nvd, sizeof (vdev_t));
366*0a4e9518Sgw 	}
367*0a4e9518Sgw 	kmem_free(vl_pad, VDEV_SKIP_SIZE);
368*0a4e9518Sgw 
369*0a4e9518Sgw 	/* Reset the failing flag */
370*0a4e9518Sgw 	if (!error)
371*0a4e9518Sgw 		vd->vdev_is_failing = B_FALSE;
372*0a4e9518Sgw 
373*0a4e9518Sgw 	return (error);
374*0a4e9518Sgw }
375*0a4e9518Sgw 
376fa9e4066Sahrens static void
377fa9e4066Sahrens vdev_disk_io_intr(buf_t *bp)
378fa9e4066Sahrens {
379fa9e4066Sahrens 	vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp;
380fa9e4066Sahrens 	zio_t *zio = vdb->vdb_io;
381fa9e4066Sahrens 
382fa9e4066Sahrens 	if ((zio->io_error = geterror(bp)) == 0 && bp->b_resid != 0)
383fa9e4066Sahrens 		zio->io_error = EIO;
384fa9e4066Sahrens 
385fa9e4066Sahrens 	kmem_free(vdb, sizeof (vdev_disk_buf_t));
386fa9e4066Sahrens 
387fa9e4066Sahrens 	zio_next_stage_async(zio);
388fa9e4066Sahrens }
389fa9e4066Sahrens 
390fa9e4066Sahrens static void
391fa9e4066Sahrens vdev_disk_ioctl_done(void *zio_arg, int error)
392fa9e4066Sahrens {
393fa9e4066Sahrens 	zio_t *zio = zio_arg;
394fa9e4066Sahrens 
395fa9e4066Sahrens 	zio->io_error = error;
396fa9e4066Sahrens 
397fa9e4066Sahrens 	zio_next_stage_async(zio);
398fa9e4066Sahrens }
399fa9e4066Sahrens 
400fa9e4066Sahrens static void
401fa9e4066Sahrens vdev_disk_io_start(zio_t *zio)
402fa9e4066Sahrens {
403fa9e4066Sahrens 	vdev_t *vd = zio->io_vd;
404fa9e4066Sahrens 	vdev_disk_t *dvd = vd->vdev_tsd;
405fa9e4066Sahrens 	vdev_disk_buf_t *vdb;
406fa9e4066Sahrens 	buf_t *bp;
407fa9e4066Sahrens 	int flags, error;
408fa9e4066Sahrens 
409fa9e4066Sahrens 	if (zio->io_type == ZIO_TYPE_IOCTL) {
410fa9e4066Sahrens 		zio_vdev_io_bypass(zio);
411fa9e4066Sahrens 
412fa9e4066Sahrens 		/* XXPOLICY */
413*0a4e9518Sgw 		if (!vdev_readable(vd)) {
414fa9e4066Sahrens 			zio->io_error = ENXIO;
415fa9e4066Sahrens 			zio_next_stage_async(zio);
416fa9e4066Sahrens 			return;
417fa9e4066Sahrens 		}
418fa9e4066Sahrens 
419fa9e4066Sahrens 		switch (zio->io_cmd) {
420fa9e4066Sahrens 
421fa9e4066Sahrens 		case DKIOCFLUSHWRITECACHE:
422fa9e4066Sahrens 
423a2eea2e1Sahrens 			if (zfs_nocacheflush)
424a2eea2e1Sahrens 				break;
425a2eea2e1Sahrens 
426b468a217Seschrock 			if (vd->vdev_nowritecache) {
427b468a217Seschrock 				zio->io_error = ENOTSUP;
428b468a217Seschrock 				break;
429b468a217Seschrock 			}
430b468a217Seschrock 
431fa9e4066Sahrens 			zio->io_dk_callback.dkc_callback = vdev_disk_ioctl_done;
432a84224b3Sgz 			zio->io_dk_callback.dkc_flag = FLUSH_VOLATILE;
433fa9e4066Sahrens 			zio->io_dk_callback.dkc_cookie = zio;
434fa9e4066Sahrens 
435fa9e4066Sahrens 			error = ldi_ioctl(dvd->vd_lh, zio->io_cmd,
436fa9e4066Sahrens 			    (uintptr_t)&zio->io_dk_callback,
437fa9e4066Sahrens 			    FKIOCTL, kcred, NULL);
438fa9e4066Sahrens 
439fa9e4066Sahrens 			if (error == 0) {
440fa9e4066Sahrens 				/*
441fa9e4066Sahrens 				 * The ioctl will be done asychronously,
442fa9e4066Sahrens 				 * and will call vdev_disk_ioctl_done()
443fa9e4066Sahrens 				 * upon completion.
444fa9e4066Sahrens 				 */
445fa9e4066Sahrens 				return;
446d5782879Smishra 			} else if (error == ENOTSUP || error == ENOTTY) {
447b468a217Seschrock 				/*
448d5782879Smishra 				 * If we get ENOTSUP or ENOTTY, we know that
449d5782879Smishra 				 * no future attempts will ever succeed.
450d5782879Smishra 				 * In this case we set a persistent bit so
451d5782879Smishra 				 * that we don't bother with the ioctl in the
452d5782879Smishra 				 * future.
453b468a217Seschrock 				 */
454b468a217Seschrock 				vd->vdev_nowritecache = B_TRUE;
455fa9e4066Sahrens 			}
456fa9e4066Sahrens 			zio->io_error = error;
457b468a217Seschrock 
458fa9e4066Sahrens 			break;
459fa9e4066Sahrens 
460fa9e4066Sahrens 		default:
461fa9e4066Sahrens 			zio->io_error = ENOTSUP;
462fa9e4066Sahrens 		}
463fa9e4066Sahrens 
464fa9e4066Sahrens 		zio_next_stage_async(zio);
465fa9e4066Sahrens 		return;
466fa9e4066Sahrens 	}
467fa9e4066Sahrens 
468fa9e4066Sahrens 	if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0)
469fa9e4066Sahrens 		return;
470fa9e4066Sahrens 
471fa9e4066Sahrens 	if ((zio = vdev_queue_io(zio)) == NULL)
472fa9e4066Sahrens 		return;
473fa9e4066Sahrens 
474fa9e4066Sahrens 	flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE);
475fa9e4066Sahrens 	flags |= B_BUSY | B_NOCACHE;
476fa9e4066Sahrens 	if (zio->io_flags & ZIO_FLAG_FAILFAST)
477fa9e4066Sahrens 		flags |= B_FAILFAST;
478fa9e4066Sahrens 
479fa9e4066Sahrens 	vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP);
480fa9e4066Sahrens 
481fa9e4066Sahrens 	vdb->vdb_io = zio;
482fa9e4066Sahrens 	bp = &vdb->vdb_buf;
483fa9e4066Sahrens 
484fa9e4066Sahrens 	bioinit(bp);
485fa9e4066Sahrens 	bp->b_flags = flags;
486fa9e4066Sahrens 	bp->b_bcount = zio->io_size;
487fa9e4066Sahrens 	bp->b_un.b_addr = zio->io_data;
488fa9e4066Sahrens 	bp->b_lblkno = lbtodb(zio->io_offset);
489fa9e4066Sahrens 	bp->b_bufsize = zio->io_size;
490fa9e4066Sahrens 	bp->b_iodone = (int (*)())vdev_disk_io_intr;
491fa9e4066Sahrens 
492fa9e4066Sahrens 	/* XXPOLICY */
493*0a4e9518Sgw 	if (zio->io_type == ZIO_TYPE_WRITE)
494*0a4e9518Sgw 		error = vdev_writeable(vd) ? vdev_error_inject(vd, zio) : ENXIO;
495*0a4e9518Sgw 	else
496*0a4e9518Sgw 		error = vdev_readable(vd) ? vdev_error_inject(vd, zio) : ENXIO;
497*0a4e9518Sgw 	error = (vd->vdev_remove_wanted || vd->vdev_is_failing) ? ENXIO : error;
498fa9e4066Sahrens 	if (error) {
499fa9e4066Sahrens 		zio->io_error = error;
500fa9e4066Sahrens 		bioerror(bp, error);
501fa9e4066Sahrens 		bp->b_resid = bp->b_bcount;
502fa9e4066Sahrens 		bp->b_iodone(bp);
503fa9e4066Sahrens 		return;
504fa9e4066Sahrens 	}
505fa9e4066Sahrens 
506fa9e4066Sahrens 	error = ldi_strategy(dvd->vd_lh, bp);
507fa9e4066Sahrens 	/* ldi_strategy() will return non-zero only on programming errors */
508fa9e4066Sahrens 	ASSERT(error == 0);
509fa9e4066Sahrens }
510fa9e4066Sahrens 
511fa9e4066Sahrens static void
512fa9e4066Sahrens vdev_disk_io_done(zio_t *zio)
513fa9e4066Sahrens {
514fa9e4066Sahrens 	vdev_queue_io_done(zio);
515fa9e4066Sahrens 
516fa9e4066Sahrens 	if (zio->io_type == ZIO_TYPE_WRITE)
517fa9e4066Sahrens 		vdev_cache_write(zio);
518fa9e4066Sahrens 
519ea8dc4b6Seschrock 	if (zio_injection_enabled && zio->io_error == 0)
520ea8dc4b6Seschrock 		zio->io_error = zio_handle_device_injection(zio->io_vd, EIO);
521ea8dc4b6Seschrock 
5223d7072f8Seschrock 	/*
5233d7072f8Seschrock 	 * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if
5243d7072f8Seschrock 	 * the device has been removed.  If this is the case, then we trigger an
525*0a4e9518Sgw 	 * asynchronous removal of the device. Otherwise, probe the device and
526*0a4e9518Sgw 	 * make sure it's still functional.
5273d7072f8Seschrock 	 */
5283d7072f8Seschrock 	if (zio->io_error == EIO) {
529*0a4e9518Sgw 		vdev_t *vd = zio->io_vd;
530*0a4e9518Sgw 		vdev_disk_t *dvd = vd->vdev_tsd;
531*0a4e9518Sgw 		int state;
532*0a4e9518Sgw 
5333d7072f8Seschrock 		state = DKIO_NONE;
534*0a4e9518Sgw 		if (dvd && ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state,
5353d7072f8Seschrock 		    FKIOCTL, kcred, NULL) == 0 &&
5363d7072f8Seschrock 		    state != DKIO_INSERTED) {
5373d7072f8Seschrock 			vd->vdev_remove_wanted = B_TRUE;
5383d7072f8Seschrock 			spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
539*0a4e9518Sgw 		} else if (vdev_probe(vd) != 0) {
540*0a4e9518Sgw 			ASSERT(vd->vdev_ops->vdev_op_leaf);
541*0a4e9518Sgw 			vd->vdev_is_failing = B_TRUE;
5423d7072f8Seschrock 		}
5433d7072f8Seschrock 	}
5443d7072f8Seschrock 
545fa9e4066Sahrens 	zio_next_stage(zio);
546fa9e4066Sahrens }
547fa9e4066Sahrens 
548fa9e4066Sahrens vdev_ops_t vdev_disk_ops = {
549fa9e4066Sahrens 	vdev_disk_open,
550fa9e4066Sahrens 	vdev_disk_close,
551*0a4e9518Sgw 	vdev_disk_probe,
552fa9e4066Sahrens 	vdev_default_asize,
553fa9e4066Sahrens 	vdev_disk_io_start,
554fa9e4066Sahrens 	vdev_disk_io_done,
555fa9e4066Sahrens 	NULL,
556fa9e4066Sahrens 	VDEV_TYPE_DISK,		/* name of this vdev type */
557fa9e4066Sahrens 	B_TRUE			/* leaf vdev */
558fa9e4066Sahrens };
559