1ea8dc4b6Seschrock /* 2ea8dc4b6Seschrock * CDDL HEADER START 3ea8dc4b6Seschrock * 4ea8dc4b6Seschrock * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7ea8dc4b6Seschrock * 8ea8dc4b6Seschrock * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9ea8dc4b6Seschrock * or http://www.opensolaris.org/os/licensing. 10ea8dc4b6Seschrock * See the License for the specific language governing permissions 11ea8dc4b6Seschrock * and limitations under the License. 12ea8dc4b6Seschrock * 13ea8dc4b6Seschrock * When distributing Covered Code, include this CDDL HEADER in each 14ea8dc4b6Seschrock * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15ea8dc4b6Seschrock * If applicable, add the following below this CDDL HEADER, with the 16ea8dc4b6Seschrock * fields enclosed by brackets "[]" replaced with your own identifying 17ea8dc4b6Seschrock * information: Portions Copyright [yyyy] [name of copyright owner] 18ea8dc4b6Seschrock * 19ea8dc4b6Seschrock * CDDL HEADER END 20ea8dc4b6Seschrock */ 21ea8dc4b6Seschrock /* 226809eb4eSEric Schrock * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23ea8dc4b6Seschrock * Use is subject to license terms. 24ea8dc4b6Seschrock */ 25ea8dc4b6Seschrock 26ea8dc4b6Seschrock #include <sys/spa.h> 27ea8dc4b6Seschrock #include <sys/spa_impl.h> 28ea8dc4b6Seschrock #include <sys/vdev.h> 29ea8dc4b6Seschrock #include <sys/vdev_impl.h> 30ea8dc4b6Seschrock #include <sys/zio.h> 31*22fe2c88SJonathan Adams #include <sys/zio_checksum.h> 32ea8dc4b6Seschrock 33ea8dc4b6Seschrock #include <sys/fm/fs/zfs.h> 34ea8dc4b6Seschrock #include <sys/fm/protocol.h> 35ea8dc4b6Seschrock #include <sys/fm/util.h> 36ea8dc4b6Seschrock #include <sys/sysevent.h> 37ea8dc4b6Seschrock 38ea8dc4b6Seschrock /* 39ea8dc4b6Seschrock * This general routine is responsible for generating all the different ZFS 40ea8dc4b6Seschrock * ereports. The payload is dependent on the class, and which arguments are 41ea8dc4b6Seschrock * supplied to the function: 42ea8dc4b6Seschrock * 43ea8dc4b6Seschrock * EREPORT POOL VDEV IO 44ea8dc4b6Seschrock * block X X X 45ea8dc4b6Seschrock * data X X 46ea8dc4b6Seschrock * device X X 47ea8dc4b6Seschrock * pool X 48ea8dc4b6Seschrock * 49ea8dc4b6Seschrock * If we are in a loading state, all errors are chained together by the same 5032b87932Sek * SPA-wide ENA (Error Numeric Association). 51ea8dc4b6Seschrock * 52ea8dc4b6Seschrock * For isolated I/O requests, we get the ENA from the zio_t. The propagation 53ea8dc4b6Seschrock * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want 54ea8dc4b6Seschrock * to chain together all ereports associated with a logical piece of data. For 55ea8dc4b6Seschrock * read I/Os, there are basically three 'types' of I/O, which form a roughly 56ea8dc4b6Seschrock * layered diagram: 57ea8dc4b6Seschrock * 58ea8dc4b6Seschrock * +---------------+ 59ea8dc4b6Seschrock * | Aggregate I/O | No associated logical data or device 60ea8dc4b6Seschrock * +---------------+ 61ea8dc4b6Seschrock * | 62ea8dc4b6Seschrock * V 63ea8dc4b6Seschrock * +---------------+ Reads associated with a piece of logical data. 64ea8dc4b6Seschrock * | Read I/O | This includes reads on behalf of RAID-Z, 65ea8dc4b6Seschrock * +---------------+ mirrors, gang blocks, retries, etc. 66ea8dc4b6Seschrock * | 67ea8dc4b6Seschrock * V 68ea8dc4b6Seschrock * +---------------+ Reads associated with a particular device, but 69ea8dc4b6Seschrock * | Physical I/O | no logical data. Issued as part of vdev caching 70ea8dc4b6Seschrock * +---------------+ and I/O aggregation. 71ea8dc4b6Seschrock * 72ea8dc4b6Seschrock * Note that 'physical I/O' here is not the same terminology as used in the rest 73ea8dc4b6Seschrock * of ZIO. Typically, 'physical I/O' simply means that there is no attached 74ea8dc4b6Seschrock * blockpointer. But I/O with no associated block pointer can still be related 75ea8dc4b6Seschrock * to a logical piece of data (i.e. RAID-Z requests). 76ea8dc4b6Seschrock * 77ea8dc4b6Seschrock * Purely physical I/O always have unique ENAs. They are not related to a 78ea8dc4b6Seschrock * particular piece of logical data, and therefore cannot be chained together. 79ea8dc4b6Seschrock * We still generate an ereport, but the DE doesn't correlate it with any 80ea8dc4b6Seschrock * logical piece of data. When such an I/O fails, the delegated I/O requests 81ea8dc4b6Seschrock * will issue a retry, which will trigger the 'real' ereport with the correct 82ea8dc4b6Seschrock * ENA. 83ea8dc4b6Seschrock * 84ea8dc4b6Seschrock * We keep track of the ENA for a ZIO chain through the 'io_logical' member. 85ea8dc4b6Seschrock * When a new logical I/O is issued, we set this to point to itself. Child I/Os 86ea8dc4b6Seschrock * then inherit this pointer, so that when it is first set subsequent failures 87e14bb325SJeff Bonwick * will use the same ENA. For vdev cache fill and queue aggregation I/O, 88e14bb325SJeff Bonwick * this pointer is set to NULL, and no ereport will be generated (since it 89e14bb325SJeff Bonwick * doesn't actually correspond to any particular device or piece of data, 90e14bb325SJeff Bonwick * and the caller will always retry without caching or queueing anyway). 91*22fe2c88SJonathan Adams * 92*22fe2c88SJonathan Adams * For checksum errors, we want to include more information about the actual 93*22fe2c88SJonathan Adams * error which occurs. Accordingly, we build an ereport when the error is 94*22fe2c88SJonathan Adams * noticed, but instead of sending it in immediately, we hang it off of the 95*22fe2c88SJonathan Adams * io_cksum_report field of the logical IO. When the logical IO completes 96*22fe2c88SJonathan Adams * (successfully or not), zfs_ereport_finish_checksum() is called with the 97*22fe2c88SJonathan Adams * good and bad versions of the buffer (if available), and we annotate the 98*22fe2c88SJonathan Adams * ereport with information about the differences. 99ea8dc4b6Seschrock */ 100*22fe2c88SJonathan Adams #ifdef _KERNEL 101*22fe2c88SJonathan Adams static void 102*22fe2c88SJonathan Adams zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out, 103*22fe2c88SJonathan Adams const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio, 104ea8dc4b6Seschrock uint64_t stateoroffset, uint64_t size) 105ea8dc4b6Seschrock { 106ea8dc4b6Seschrock nvlist_t *ereport, *detector; 107*22fe2c88SJonathan Adams 108ea8dc4b6Seschrock uint64_t ena; 109ea8dc4b6Seschrock char class[64]; 110ea8dc4b6Seschrock 111ea8dc4b6Seschrock /* 112ea8dc4b6Seschrock * If we are doing a spa_tryimport(), ignore errors. 113ea8dc4b6Seschrock */ 114ea8dc4b6Seschrock if (spa->spa_load_state == SPA_LOAD_TRYIMPORT) 115ea8dc4b6Seschrock return; 116ea8dc4b6Seschrock 117ea8dc4b6Seschrock /* 118ea8dc4b6Seschrock * If we are in the middle of opening a pool, and the previous attempt 119ea8dc4b6Seschrock * failed, don't bother logging any new ereports - we're just going to 120ea8dc4b6Seschrock * get the same diagnosis anyway. 121ea8dc4b6Seschrock */ 122ea8dc4b6Seschrock if (spa->spa_load_state != SPA_LOAD_NONE && 123ea8dc4b6Seschrock spa->spa_last_open_failed) 124ea8dc4b6Seschrock return; 125ea8dc4b6Seschrock 126bf82a41bSeschrock if (zio != NULL) { 127bf82a41bSeschrock /* 128bf82a41bSeschrock * If this is not a read or write zio, ignore the error. This 129bf82a41bSeschrock * can occur if the DKIOCFLUSHWRITECACHE ioctl fails. 130bf82a41bSeschrock */ 131bf82a41bSeschrock if (zio->io_type != ZIO_TYPE_READ && 132bf82a41bSeschrock zio->io_type != ZIO_TYPE_WRITE) 133bf82a41bSeschrock return; 134bf82a41bSeschrock 135bf82a41bSeschrock /* 136bf82a41bSeschrock * Ignore any errors from speculative I/Os, as failure is an 137bf82a41bSeschrock * expected result. 138bf82a41bSeschrock */ 139bf82a41bSeschrock if (zio->io_flags & ZIO_FLAG_SPECULATIVE) 140bf82a41bSeschrock return; 14151ece835Seschrock 1428956713aSEric Schrock /* 1438956713aSEric Schrock * If this I/O is not a retry I/O, don't post an ereport. 1448956713aSEric Schrock * Otherwise, we risk making bad diagnoses based on B_FAILFAST 1458956713aSEric Schrock * I/Os. 1468956713aSEric Schrock */ 1478956713aSEric Schrock if (zio->io_error == EIO && 1488956713aSEric Schrock !(zio->io_flags & ZIO_FLAG_IO_RETRY)) 1498956713aSEric Schrock return; 1508956713aSEric Schrock 1516809eb4eSEric Schrock if (vd != NULL) { 1526809eb4eSEric Schrock /* 1536809eb4eSEric Schrock * If the vdev has already been marked as failing due 1546809eb4eSEric Schrock * to a failed probe, then ignore any subsequent I/O 1556809eb4eSEric Schrock * errors, as the DE will automatically fault the vdev 1566809eb4eSEric Schrock * on the first such failure. This also catches cases 1576809eb4eSEric Schrock * where vdev_remove_wanted is set and the device has 1586809eb4eSEric Schrock * not yet been asynchronously placed into the REMOVED 1596809eb4eSEric Schrock * state. 1606809eb4eSEric Schrock */ 1611d713200SEric Schrock if (zio->io_vd == vd && !vdev_accessible(vd, zio)) 1626809eb4eSEric Schrock return; 1636809eb4eSEric Schrock 1646809eb4eSEric Schrock /* 1656809eb4eSEric Schrock * Ignore checksum errors for reads from DTL regions of 1666809eb4eSEric Schrock * leaf vdevs. 1676809eb4eSEric Schrock */ 1686809eb4eSEric Schrock if (zio->io_type == ZIO_TYPE_READ && 1696809eb4eSEric Schrock zio->io_error == ECKSUM && 1706809eb4eSEric Schrock vd->vdev_ops->vdev_op_leaf && 1716809eb4eSEric Schrock vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1)) 1726809eb4eSEric Schrock return; 1736809eb4eSEric Schrock } 174bf82a41bSeschrock } 175b468a217Seschrock 1761d713200SEric Schrock /* 1771d713200SEric Schrock * For probe failure, we want to avoid posting ereports if we've 1781d713200SEric Schrock * already removed the device in the meantime. 1791d713200SEric Schrock */ 1801d713200SEric Schrock if (vd != NULL && 1811d713200SEric Schrock strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 && 1821d713200SEric Schrock (vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED)) 1831d713200SEric Schrock return; 1841d713200SEric Schrock 185ea8dc4b6Seschrock if ((ereport = fm_nvlist_create(NULL)) == NULL) 186ea8dc4b6Seschrock return; 187ea8dc4b6Seschrock 188ea8dc4b6Seschrock if ((detector = fm_nvlist_create(NULL)) == NULL) { 189ea8dc4b6Seschrock fm_nvlist_destroy(ereport, FM_NVA_FREE); 190ea8dc4b6Seschrock return; 191ea8dc4b6Seschrock } 192ea8dc4b6Seschrock 193ea8dc4b6Seschrock /* 194ea8dc4b6Seschrock * Serialize ereport generation 195ea8dc4b6Seschrock */ 196ea8dc4b6Seschrock mutex_enter(&spa->spa_errlist_lock); 197ea8dc4b6Seschrock 198ea8dc4b6Seschrock /* 199ea8dc4b6Seschrock * Determine the ENA to use for this event. If we are in a loading 200ea8dc4b6Seschrock * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use 201ea8dc4b6Seschrock * a root zio-wide ENA. Otherwise, simply use a unique ENA. 202ea8dc4b6Seschrock */ 203ea8dc4b6Seschrock if (spa->spa_load_state != SPA_LOAD_NONE) { 204ea8dc4b6Seschrock if (spa->spa_ena == 0) 205ea8dc4b6Seschrock spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1); 206ea8dc4b6Seschrock ena = spa->spa_ena; 207ea8dc4b6Seschrock } else if (zio != NULL && zio->io_logical != NULL) { 208ea8dc4b6Seschrock if (zio->io_logical->io_ena == 0) 209ea8dc4b6Seschrock zio->io_logical->io_ena = 210ea8dc4b6Seschrock fm_ena_generate(0, FM_ENA_FMT1); 211ea8dc4b6Seschrock ena = zio->io_logical->io_ena; 212ea8dc4b6Seschrock } else { 213ea8dc4b6Seschrock ena = fm_ena_generate(0, FM_ENA_FMT1); 214ea8dc4b6Seschrock } 215ea8dc4b6Seschrock 216ea8dc4b6Seschrock /* 217ea8dc4b6Seschrock * Construct the full class, detector, and other standard FMA fields. 218ea8dc4b6Seschrock */ 219ea8dc4b6Seschrock (void) snprintf(class, sizeof (class), "%s.%s", 220ea8dc4b6Seschrock ZFS_ERROR_CLASS, subclass); 221ea8dc4b6Seschrock 222ea8dc4b6Seschrock fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa), 223ea8dc4b6Seschrock vd != NULL ? vd->vdev_guid : 0); 224ea8dc4b6Seschrock 225ea8dc4b6Seschrock fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL); 226ea8dc4b6Seschrock 227ea8dc4b6Seschrock /* 228ea8dc4b6Seschrock * Construct the per-ereport payload, depending on which parameters are 229ea8dc4b6Seschrock * passed in. 230ea8dc4b6Seschrock */ 231ea8dc4b6Seschrock 232ea8dc4b6Seschrock /* 233ea8dc4b6Seschrock * Generic payload members common to all ereports. 234ea8dc4b6Seschrock */ 235ea8dc4b6Seschrock fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL, 236e14bb325SJeff Bonwick DATA_TYPE_STRING, spa_name(spa), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, 237ea8dc4b6Seschrock DATA_TYPE_UINT64, spa_guid(spa), 238ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32, 2396809eb4eSEric Schrock spa->spa_load_state, NULL); 240ea8dc4b6Seschrock 24132b87932Sek if (spa != NULL) { 24232b87932Sek fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE, 24332b87932Sek DATA_TYPE_STRING, 24432b87932Sek spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ? 24532b87932Sek FM_EREPORT_FAILMODE_WAIT : 24632b87932Sek spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ? 24732b87932Sek FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC, 24832b87932Sek NULL); 24932b87932Sek } 25032b87932Sek 251ea8dc4b6Seschrock if (vd != NULL) { 252ea8dc4b6Seschrock vdev_t *pvd = vd->vdev_parent; 253ea8dc4b6Seschrock 254ea8dc4b6Seschrock fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, 255ea8dc4b6Seschrock DATA_TYPE_UINT64, vd->vdev_guid, 256ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE, 257ea8dc4b6Seschrock DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL); 2586809eb4eSEric Schrock if (vd->vdev_path != NULL) 259ea8dc4b6Seschrock fm_payload_set(ereport, 260ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, 261ea8dc4b6Seschrock DATA_TYPE_STRING, vd->vdev_path, NULL); 2626809eb4eSEric Schrock if (vd->vdev_devid != NULL) 263ea8dc4b6Seschrock fm_payload_set(ereport, 264ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, 265ea8dc4b6Seschrock DATA_TYPE_STRING, vd->vdev_devid, NULL); 2666809eb4eSEric Schrock if (vd->vdev_fru != NULL) 2676809eb4eSEric Schrock fm_payload_set(ereport, 2686809eb4eSEric Schrock FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, 2696809eb4eSEric Schrock DATA_TYPE_STRING, vd->vdev_fru, NULL); 270ea8dc4b6Seschrock 271ea8dc4b6Seschrock if (pvd != NULL) { 272ea8dc4b6Seschrock fm_payload_set(ereport, 273ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID, 274ea8dc4b6Seschrock DATA_TYPE_UINT64, pvd->vdev_guid, 275ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE, 276ea8dc4b6Seschrock DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type, 277ea8dc4b6Seschrock NULL); 278ea8dc4b6Seschrock if (pvd->vdev_path) 279ea8dc4b6Seschrock fm_payload_set(ereport, 280ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH, 281c25056deSgw DATA_TYPE_STRING, pvd->vdev_path, NULL); 282ea8dc4b6Seschrock if (pvd->vdev_devid) 283ea8dc4b6Seschrock fm_payload_set(ereport, 284ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID, 285ea8dc4b6Seschrock DATA_TYPE_STRING, pvd->vdev_devid, NULL); 286ea8dc4b6Seschrock } 287ea8dc4b6Seschrock } 288ea8dc4b6Seschrock 289ea8dc4b6Seschrock if (zio != NULL) { 290ea8dc4b6Seschrock /* 291ea8dc4b6Seschrock * Payload common to all I/Os. 292ea8dc4b6Seschrock */ 293ea8dc4b6Seschrock fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR, 294ea8dc4b6Seschrock DATA_TYPE_INT32, zio->io_error, NULL); 295ea8dc4b6Seschrock 296ea8dc4b6Seschrock /* 297ea8dc4b6Seschrock * If the 'size' parameter is non-zero, it indicates this is a 298ea8dc4b6Seschrock * RAID-Z or other I/O where the physical offset and length are 299ea8dc4b6Seschrock * provided for us, instead of within the zio_t. 300ea8dc4b6Seschrock */ 301ea8dc4b6Seschrock if (vd != NULL) { 302ea8dc4b6Seschrock if (size) 303ea8dc4b6Seschrock fm_payload_set(ereport, 304ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET, 305ea8dc4b6Seschrock DATA_TYPE_UINT64, stateoroffset, 306ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE, 307c3c6d682Seschrock DATA_TYPE_UINT64, size, NULL); 308ea8dc4b6Seschrock else 309ea8dc4b6Seschrock fm_payload_set(ereport, 310ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET, 311ea8dc4b6Seschrock DATA_TYPE_UINT64, zio->io_offset, 312ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE, 313c3c6d682Seschrock DATA_TYPE_UINT64, zio->io_size, NULL); 314ea8dc4b6Seschrock } 315ea8dc4b6Seschrock 316ea8dc4b6Seschrock /* 317ea8dc4b6Seschrock * Payload for I/Os with corresponding logical information. 318ea8dc4b6Seschrock */ 319ea8dc4b6Seschrock if (zio->io_logical != NULL) 320ea8dc4b6Seschrock fm_payload_set(ereport, 321e7cbe64fSgw FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET, 322e7cbe64fSgw DATA_TYPE_UINT64, 323e7cbe64fSgw zio->io_logical->io_bookmark.zb_objset, 324ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT, 325ea8dc4b6Seschrock DATA_TYPE_UINT64, 326ea8dc4b6Seschrock zio->io_logical->io_bookmark.zb_object, 327ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL, 328c25056deSgw DATA_TYPE_INT64, 329ea8dc4b6Seschrock zio->io_logical->io_bookmark.zb_level, 330ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID, 331ea8dc4b6Seschrock DATA_TYPE_UINT64, 332c3c6d682Seschrock zio->io_logical->io_bookmark.zb_blkid, NULL); 333ea8dc4b6Seschrock } else if (vd != NULL) { 334ea8dc4b6Seschrock /* 335ea8dc4b6Seschrock * If we have a vdev but no zio, this is a device fault, and the 336ea8dc4b6Seschrock * 'stateoroffset' parameter indicates the previous state of the 337ea8dc4b6Seschrock * vdev. 338ea8dc4b6Seschrock */ 339ea8dc4b6Seschrock fm_payload_set(ereport, 340ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_PREV_STATE, 341ea8dc4b6Seschrock DATA_TYPE_UINT64, stateoroffset, NULL); 342ea8dc4b6Seschrock } 343ea8dc4b6Seschrock mutex_exit(&spa->spa_errlist_lock); 344ea8dc4b6Seschrock 345*22fe2c88SJonathan Adams *ereport_out = ereport; 346*22fe2c88SJonathan Adams *detector_out = detector; 347*22fe2c88SJonathan Adams } 348*22fe2c88SJonathan Adams 349*22fe2c88SJonathan Adams /* if it's <= 128 bytes, save the corruption directly */ 350*22fe2c88SJonathan Adams #define ZFM_MAX_INLINE (128 / sizeof (uint64_t)) 351*22fe2c88SJonathan Adams 352*22fe2c88SJonathan Adams #define MAX_RANGES 16 353*22fe2c88SJonathan Adams 354*22fe2c88SJonathan Adams typedef struct zfs_ecksum_info { 355*22fe2c88SJonathan Adams /* histograms of set and cleared bits by bit number in a 64-bit word */ 356*22fe2c88SJonathan Adams uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY]; 357*22fe2c88SJonathan Adams uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY]; 358*22fe2c88SJonathan Adams 359*22fe2c88SJonathan Adams /* inline arrays of bits set and cleared. */ 360*22fe2c88SJonathan Adams uint64_t zei_bits_set[ZFM_MAX_INLINE]; 361*22fe2c88SJonathan Adams uint64_t zei_bits_cleared[ZFM_MAX_INLINE]; 362*22fe2c88SJonathan Adams 363*22fe2c88SJonathan Adams /* 364*22fe2c88SJonathan Adams * for each range, the number of bits set and cleared. The Hamming 365*22fe2c88SJonathan Adams * distance between the good and bad buffers is the sum of them all. 366*22fe2c88SJonathan Adams */ 367*22fe2c88SJonathan Adams uint32_t zei_range_sets[MAX_RANGES]; 368*22fe2c88SJonathan Adams uint32_t zei_range_clears[MAX_RANGES]; 369*22fe2c88SJonathan Adams 370*22fe2c88SJonathan Adams struct zei_ranges { 371*22fe2c88SJonathan Adams uint32_t zr_start; 372*22fe2c88SJonathan Adams uint32_t zr_end; 373*22fe2c88SJonathan Adams } zei_ranges[MAX_RANGES]; 374*22fe2c88SJonathan Adams 375*22fe2c88SJonathan Adams size_t zei_range_count; 376*22fe2c88SJonathan Adams uint32_t zei_mingap; 377*22fe2c88SJonathan Adams uint32_t zei_allowed_mingap; 378*22fe2c88SJonathan Adams 379*22fe2c88SJonathan Adams } zfs_ecksum_info_t; 380*22fe2c88SJonathan Adams 381*22fe2c88SJonathan Adams static void 382*22fe2c88SJonathan Adams update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count) 383*22fe2c88SJonathan Adams { 384*22fe2c88SJonathan Adams size_t i; 385*22fe2c88SJonathan Adams size_t bits = 0; 386*22fe2c88SJonathan Adams uint64_t value = BE_64(value_arg); 387*22fe2c88SJonathan Adams 388*22fe2c88SJonathan Adams /* We store the bits in big-endian (largest-first) order */ 389*22fe2c88SJonathan Adams for (i = 0; i < 64; i++) { 390*22fe2c88SJonathan Adams if (value & (1ull << i)) { 391*22fe2c88SJonathan Adams hist[63 - i]++; 392*22fe2c88SJonathan Adams ++bits; 393*22fe2c88SJonathan Adams } 394*22fe2c88SJonathan Adams } 395*22fe2c88SJonathan Adams /* update the count of bits changed */ 396*22fe2c88SJonathan Adams *count += bits; 397*22fe2c88SJonathan Adams } 398*22fe2c88SJonathan Adams 399*22fe2c88SJonathan Adams /* 400*22fe2c88SJonathan Adams * We've now filled up the range array, and need to increase "mingap" and 401*22fe2c88SJonathan Adams * shrink the range list accordingly. zei_mingap is always the smallest 402*22fe2c88SJonathan Adams * distance between array entries, so we set the new_allowed_gap to be 403*22fe2c88SJonathan Adams * one greater than that. We then go through the list, joining together 404*22fe2c88SJonathan Adams * any ranges which are closer than the new_allowed_gap. 405*22fe2c88SJonathan Adams * 406*22fe2c88SJonathan Adams * By construction, there will be at least one. We also update zei_mingap 407*22fe2c88SJonathan Adams * to the new smallest gap, to prepare for our next invocation. 408*22fe2c88SJonathan Adams */ 409*22fe2c88SJonathan Adams static void 410*22fe2c88SJonathan Adams shrink_ranges(zfs_ecksum_info_t *eip) 411*22fe2c88SJonathan Adams { 412*22fe2c88SJonathan Adams uint32_t mingap = UINT32_MAX; 413*22fe2c88SJonathan Adams uint32_t new_allowed_gap = eip->zei_mingap + 1; 414*22fe2c88SJonathan Adams 415*22fe2c88SJonathan Adams size_t idx, output; 416*22fe2c88SJonathan Adams size_t max = eip->zei_range_count; 417*22fe2c88SJonathan Adams 418*22fe2c88SJonathan Adams struct zei_ranges *r = eip->zei_ranges; 419*22fe2c88SJonathan Adams 420*22fe2c88SJonathan Adams ASSERT3U(eip->zei_range_count, >, 0); 421*22fe2c88SJonathan Adams ASSERT3U(eip->zei_range_count, <=, MAX_RANGES); 422*22fe2c88SJonathan Adams 423*22fe2c88SJonathan Adams output = idx = 0; 424*22fe2c88SJonathan Adams while (idx < max - 1) { 425*22fe2c88SJonathan Adams uint32_t start = r[idx].zr_start; 426*22fe2c88SJonathan Adams uint32_t end = r[idx].zr_end; 427*22fe2c88SJonathan Adams 428*22fe2c88SJonathan Adams while (idx < max - 1) { 429*22fe2c88SJonathan Adams idx++; 430*22fe2c88SJonathan Adams 431*22fe2c88SJonathan Adams uint32_t nstart = r[idx].zr_start; 432*22fe2c88SJonathan Adams uint32_t nend = r[idx].zr_end; 433*22fe2c88SJonathan Adams 434*22fe2c88SJonathan Adams uint32_t gap = nstart - end; 435*22fe2c88SJonathan Adams if (gap < new_allowed_gap) { 436*22fe2c88SJonathan Adams end = nend; 437*22fe2c88SJonathan Adams continue; 438*22fe2c88SJonathan Adams } 439*22fe2c88SJonathan Adams if (gap < mingap) 440*22fe2c88SJonathan Adams mingap = gap; 441*22fe2c88SJonathan Adams break; 442*22fe2c88SJonathan Adams } 443*22fe2c88SJonathan Adams r[output].zr_start = start; 444*22fe2c88SJonathan Adams r[output].zr_end = end; 445*22fe2c88SJonathan Adams output++; 446*22fe2c88SJonathan Adams } 447*22fe2c88SJonathan Adams ASSERT3U(output, <, eip->zei_range_count); 448*22fe2c88SJonathan Adams eip->zei_range_count = output; 449*22fe2c88SJonathan Adams eip->zei_mingap = mingap; 450*22fe2c88SJonathan Adams eip->zei_allowed_mingap = new_allowed_gap; 451*22fe2c88SJonathan Adams } 452*22fe2c88SJonathan Adams 453*22fe2c88SJonathan Adams static void 454*22fe2c88SJonathan Adams add_range(zfs_ecksum_info_t *eip, int start, int end) 455*22fe2c88SJonathan Adams { 456*22fe2c88SJonathan Adams struct zei_ranges *r = eip->zei_ranges; 457*22fe2c88SJonathan Adams size_t count = eip->zei_range_count; 458*22fe2c88SJonathan Adams 459*22fe2c88SJonathan Adams if (count >= MAX_RANGES) { 460*22fe2c88SJonathan Adams shrink_ranges(eip); 461*22fe2c88SJonathan Adams count = eip->zei_range_count; 462*22fe2c88SJonathan Adams } 463*22fe2c88SJonathan Adams if (count == 0) { 464*22fe2c88SJonathan Adams eip->zei_mingap = UINT32_MAX; 465*22fe2c88SJonathan Adams eip->zei_allowed_mingap = 1; 466*22fe2c88SJonathan Adams } else { 467*22fe2c88SJonathan Adams int gap = start - r[count - 1].zr_end; 468*22fe2c88SJonathan Adams 469*22fe2c88SJonathan Adams if (gap < eip->zei_allowed_mingap) { 470*22fe2c88SJonathan Adams r[count - 1].zr_end = end; 471*22fe2c88SJonathan Adams return; 472*22fe2c88SJonathan Adams } 473*22fe2c88SJonathan Adams if (gap < eip->zei_mingap) 474*22fe2c88SJonathan Adams eip->zei_mingap = gap; 475*22fe2c88SJonathan Adams } 476*22fe2c88SJonathan Adams r[count].zr_start = start; 477*22fe2c88SJonathan Adams r[count].zr_end = end; 478*22fe2c88SJonathan Adams eip->zei_range_count++; 479*22fe2c88SJonathan Adams } 480*22fe2c88SJonathan Adams 481*22fe2c88SJonathan Adams static size_t 482*22fe2c88SJonathan Adams range_total_size(zfs_ecksum_info_t *eip) 483*22fe2c88SJonathan Adams { 484*22fe2c88SJonathan Adams struct zei_ranges *r = eip->zei_ranges; 485*22fe2c88SJonathan Adams size_t count = eip->zei_range_count; 486*22fe2c88SJonathan Adams size_t result = 0; 487*22fe2c88SJonathan Adams size_t idx; 488*22fe2c88SJonathan Adams 489*22fe2c88SJonathan Adams for (idx = 0; idx < count; idx++) 490*22fe2c88SJonathan Adams result += (r[idx].zr_end - r[idx].zr_start); 491*22fe2c88SJonathan Adams 492*22fe2c88SJonathan Adams return (result); 493*22fe2c88SJonathan Adams } 494*22fe2c88SJonathan Adams 495*22fe2c88SJonathan Adams static zfs_ecksum_info_t * 496*22fe2c88SJonathan Adams annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info, 497*22fe2c88SJonathan Adams const uint8_t *goodbuf, const uint8_t *badbuf, size_t size, 498*22fe2c88SJonathan Adams boolean_t drop_if_identical) 499*22fe2c88SJonathan Adams { 500*22fe2c88SJonathan Adams const uint64_t *good = (const uint64_t *)goodbuf; 501*22fe2c88SJonathan Adams const uint64_t *bad = (const uint64_t *)badbuf; 502*22fe2c88SJonathan Adams 503*22fe2c88SJonathan Adams uint64_t allset = 0; 504*22fe2c88SJonathan Adams uint64_t allcleared = 0; 505*22fe2c88SJonathan Adams 506*22fe2c88SJonathan Adams size_t nui64s = size / sizeof (uint64_t); 507*22fe2c88SJonathan Adams 508*22fe2c88SJonathan Adams size_t inline_size; 509*22fe2c88SJonathan Adams int no_inline = 0; 510*22fe2c88SJonathan Adams size_t idx; 511*22fe2c88SJonathan Adams size_t range; 512*22fe2c88SJonathan Adams 513*22fe2c88SJonathan Adams size_t offset = 0; 514*22fe2c88SJonathan Adams ssize_t start = -1; 515*22fe2c88SJonathan Adams 516*22fe2c88SJonathan Adams zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP); 517*22fe2c88SJonathan Adams 518*22fe2c88SJonathan Adams /* don't do any annotation for injected checksum errors */ 519*22fe2c88SJonathan Adams if (info != NULL && info->zbc_injected) 520*22fe2c88SJonathan Adams return (eip); 521*22fe2c88SJonathan Adams 522*22fe2c88SJonathan Adams if (info != NULL && info->zbc_has_cksum) { 523*22fe2c88SJonathan Adams fm_payload_set(ereport, 524*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED, 525*22fe2c88SJonathan Adams DATA_TYPE_UINT64_ARRAY, 526*22fe2c88SJonathan Adams sizeof (info->zbc_expected) / sizeof (uint64_t), 527*22fe2c88SJonathan Adams (uint64_t *)&info->zbc_expected, 528*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL, 529*22fe2c88SJonathan Adams DATA_TYPE_UINT64_ARRAY, 530*22fe2c88SJonathan Adams sizeof (info->zbc_actual) / sizeof (uint64_t), 531*22fe2c88SJonathan Adams (uint64_t *)&info->zbc_actual, 532*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO, 533*22fe2c88SJonathan Adams DATA_TYPE_STRING, 534*22fe2c88SJonathan Adams info->zbc_checksum_name, 535*22fe2c88SJonathan Adams NULL); 536*22fe2c88SJonathan Adams 537*22fe2c88SJonathan Adams if (info->zbc_byteswapped) { 538*22fe2c88SJonathan Adams fm_payload_set(ereport, 539*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP, 540*22fe2c88SJonathan Adams DATA_TYPE_BOOLEAN, 1, 541*22fe2c88SJonathan Adams NULL); 542*22fe2c88SJonathan Adams } 543*22fe2c88SJonathan Adams } 544*22fe2c88SJonathan Adams 545*22fe2c88SJonathan Adams if (badbuf == NULL || goodbuf == NULL) 546*22fe2c88SJonathan Adams return (eip); 547*22fe2c88SJonathan Adams 548*22fe2c88SJonathan Adams ASSERT3U(nui64s, <=, UINT16_MAX); 549*22fe2c88SJonathan Adams ASSERT3U(size, ==, nui64s * sizeof (uint64_t)); 550*22fe2c88SJonathan Adams ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 551*22fe2c88SJonathan Adams ASSERT3U(size, <=, UINT32_MAX); 552*22fe2c88SJonathan Adams 553*22fe2c88SJonathan Adams /* build up the range list by comparing the two buffers. */ 554*22fe2c88SJonathan Adams for (idx = 0; idx < nui64s; idx++) { 555*22fe2c88SJonathan Adams if (good[idx] == bad[idx]) { 556*22fe2c88SJonathan Adams if (start == -1) 557*22fe2c88SJonathan Adams continue; 558*22fe2c88SJonathan Adams 559*22fe2c88SJonathan Adams add_range(eip, start, idx); 560*22fe2c88SJonathan Adams start = -1; 561*22fe2c88SJonathan Adams } else { 562*22fe2c88SJonathan Adams if (start != -1) 563*22fe2c88SJonathan Adams continue; 564*22fe2c88SJonathan Adams 565*22fe2c88SJonathan Adams start = idx; 566*22fe2c88SJonathan Adams } 567*22fe2c88SJonathan Adams } 568*22fe2c88SJonathan Adams if (start != -1) 569*22fe2c88SJonathan Adams add_range(eip, start, idx); 570*22fe2c88SJonathan Adams 571*22fe2c88SJonathan Adams /* See if it will fit in our inline buffers */ 572*22fe2c88SJonathan Adams inline_size = range_total_size(eip); 573*22fe2c88SJonathan Adams if (inline_size > ZFM_MAX_INLINE) 574*22fe2c88SJonathan Adams no_inline = 1; 575*22fe2c88SJonathan Adams 576*22fe2c88SJonathan Adams /* 577*22fe2c88SJonathan Adams * If there is no change and we want to drop if the buffers are 578*22fe2c88SJonathan Adams * identical, do so. 579*22fe2c88SJonathan Adams */ 580*22fe2c88SJonathan Adams if (inline_size == 0 && drop_if_identical) { 581*22fe2c88SJonathan Adams kmem_free(eip, sizeof (*eip)); 582*22fe2c88SJonathan Adams return (NULL); 583*22fe2c88SJonathan Adams } 584*22fe2c88SJonathan Adams 585*22fe2c88SJonathan Adams /* 586*22fe2c88SJonathan Adams * Now walk through the ranges, filling in the details of the 587*22fe2c88SJonathan Adams * differences. Also convert our uint64_t-array offsets to byte 588*22fe2c88SJonathan Adams * offsets. 589*22fe2c88SJonathan Adams */ 590*22fe2c88SJonathan Adams for (range = 0; range < eip->zei_range_count; range++) { 591*22fe2c88SJonathan Adams size_t start = eip->zei_ranges[range].zr_start; 592*22fe2c88SJonathan Adams size_t end = eip->zei_ranges[range].zr_end; 593*22fe2c88SJonathan Adams 594*22fe2c88SJonathan Adams for (idx = start; idx < end; idx++) { 595*22fe2c88SJonathan Adams uint64_t set, cleared; 596*22fe2c88SJonathan Adams 597*22fe2c88SJonathan Adams // bits set in bad, but not in good 598*22fe2c88SJonathan Adams set = ((~good[idx]) & bad[idx]); 599*22fe2c88SJonathan Adams // bits set in good, but not in bad 600*22fe2c88SJonathan Adams cleared = (good[idx] & (~bad[idx])); 601*22fe2c88SJonathan Adams 602*22fe2c88SJonathan Adams allset |= set; 603*22fe2c88SJonathan Adams allcleared |= cleared; 604*22fe2c88SJonathan Adams 605*22fe2c88SJonathan Adams if (!no_inline) { 606*22fe2c88SJonathan Adams ASSERT3U(offset, <, inline_size); 607*22fe2c88SJonathan Adams eip->zei_bits_set[offset] = set; 608*22fe2c88SJonathan Adams eip->zei_bits_cleared[offset] = cleared; 609*22fe2c88SJonathan Adams offset++; 610*22fe2c88SJonathan Adams } 611*22fe2c88SJonathan Adams 612*22fe2c88SJonathan Adams update_histogram(set, eip->zei_histogram_set, 613*22fe2c88SJonathan Adams &eip->zei_range_sets[range]); 614*22fe2c88SJonathan Adams update_histogram(cleared, eip->zei_histogram_cleared, 615*22fe2c88SJonathan Adams &eip->zei_range_clears[range]); 616*22fe2c88SJonathan Adams } 617*22fe2c88SJonathan Adams 618*22fe2c88SJonathan Adams /* convert to byte offsets */ 619*22fe2c88SJonathan Adams eip->zei_ranges[range].zr_start *= sizeof (uint64_t); 620*22fe2c88SJonathan Adams eip->zei_ranges[range].zr_end *= sizeof (uint64_t); 621*22fe2c88SJonathan Adams } 622*22fe2c88SJonathan Adams eip->zei_allowed_mingap *= sizeof (uint64_t); 623*22fe2c88SJonathan Adams inline_size *= sizeof (uint64_t); 624*22fe2c88SJonathan Adams 625*22fe2c88SJonathan Adams /* fill in ereport */ 626*22fe2c88SJonathan Adams fm_payload_set(ereport, 627*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES, 628*22fe2c88SJonathan Adams DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count, 629*22fe2c88SJonathan Adams (uint32_t *)eip->zei_ranges, 630*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP, 631*22fe2c88SJonathan Adams DATA_TYPE_UINT32, eip->zei_allowed_mingap, 632*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS, 633*22fe2c88SJonathan Adams DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets, 634*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS, 635*22fe2c88SJonathan Adams DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears, 636*22fe2c88SJonathan Adams NULL); 637*22fe2c88SJonathan Adams 638*22fe2c88SJonathan Adams if (!no_inline) { 639*22fe2c88SJonathan Adams fm_payload_set(ereport, 640*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS, 641*22fe2c88SJonathan Adams DATA_TYPE_UINT8_ARRAY, 642*22fe2c88SJonathan Adams inline_size, (uint8_t *)eip->zei_bits_set, 643*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS, 644*22fe2c88SJonathan Adams DATA_TYPE_UINT8_ARRAY, 645*22fe2c88SJonathan Adams inline_size, (uint8_t *)eip->zei_bits_cleared, 646*22fe2c88SJonathan Adams NULL); 647*22fe2c88SJonathan Adams } else { 648*22fe2c88SJonathan Adams fm_payload_set(ereport, 649*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM, 650*22fe2c88SJonathan Adams DATA_TYPE_UINT16_ARRAY, 651*22fe2c88SJonathan Adams NBBY * sizeof (uint64_t), eip->zei_histogram_set, 652*22fe2c88SJonathan Adams FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM, 653*22fe2c88SJonathan Adams DATA_TYPE_UINT16_ARRAY, 654*22fe2c88SJonathan Adams NBBY * sizeof (uint64_t), eip->zei_histogram_cleared, 655*22fe2c88SJonathan Adams NULL); 656*22fe2c88SJonathan Adams } 657*22fe2c88SJonathan Adams return (eip); 658*22fe2c88SJonathan Adams } 659*22fe2c88SJonathan Adams #endif 660*22fe2c88SJonathan Adams 661*22fe2c88SJonathan Adams void 662*22fe2c88SJonathan Adams zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio, 663*22fe2c88SJonathan Adams uint64_t stateoroffset, uint64_t size) 664*22fe2c88SJonathan Adams { 665*22fe2c88SJonathan Adams #ifdef _KERNEL 666*22fe2c88SJonathan Adams nvlist_t *ereport = NULL; 667*22fe2c88SJonathan Adams nvlist_t *detector = NULL; 668*22fe2c88SJonathan Adams 669*22fe2c88SJonathan Adams zfs_ereport_start(&ereport, &detector, 670*22fe2c88SJonathan Adams subclass, spa, vd, zio, stateoroffset, size); 671*22fe2c88SJonathan Adams 672*22fe2c88SJonathan Adams if (ereport == NULL) 673*22fe2c88SJonathan Adams return; 674*22fe2c88SJonathan Adams 675ea8dc4b6Seschrock fm_ereport_post(ereport, EVCH_SLEEP); 676ea8dc4b6Seschrock 677ea8dc4b6Seschrock fm_nvlist_destroy(ereport, FM_NVA_FREE); 678ea8dc4b6Seschrock fm_nvlist_destroy(detector, FM_NVA_FREE); 679ea8dc4b6Seschrock #endif 680ea8dc4b6Seschrock } 681ea8dc4b6Seschrock 682*22fe2c88SJonathan Adams void 683*22fe2c88SJonathan Adams zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, 684*22fe2c88SJonathan Adams struct zio *zio, uint64_t offset, uint64_t length, void *arg, 685*22fe2c88SJonathan Adams zio_bad_cksum_t *info) 686*22fe2c88SJonathan Adams { 687*22fe2c88SJonathan Adams zio_cksum_report_t *report = kmem_zalloc(sizeof (*report), KM_SLEEP); 688*22fe2c88SJonathan Adams 689*22fe2c88SJonathan Adams if (zio->io_vsd != NULL) 690*22fe2c88SJonathan Adams zio->io_vsd_ops->vsd_cksum_report(zio, report, arg); 691*22fe2c88SJonathan Adams else 692*22fe2c88SJonathan Adams zio_vsd_default_cksum_report(zio, report, arg); 693*22fe2c88SJonathan Adams 694*22fe2c88SJonathan Adams /* copy the checksum failure information if it was provided */ 695*22fe2c88SJonathan Adams if (info != NULL) { 696*22fe2c88SJonathan Adams report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP); 697*22fe2c88SJonathan Adams bcopy(info, report->zcr_ckinfo, sizeof (*info)); 698*22fe2c88SJonathan Adams } 699*22fe2c88SJonathan Adams 700*22fe2c88SJonathan Adams report->zcr_length = length; 701*22fe2c88SJonathan Adams 702*22fe2c88SJonathan Adams #ifdef _KERNEL 703*22fe2c88SJonathan Adams zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector, 704*22fe2c88SJonathan Adams FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length); 705*22fe2c88SJonathan Adams 706*22fe2c88SJonathan Adams if (report->zcr_ereport == NULL) { 707*22fe2c88SJonathan Adams report->zcr_free(report->zcr_cbdata, report->zcr_cbinfo); 708*22fe2c88SJonathan Adams kmem_free(report, sizeof (*report)); 709*22fe2c88SJonathan Adams return; 710*22fe2c88SJonathan Adams } 711*22fe2c88SJonathan Adams #endif 712*22fe2c88SJonathan Adams 713*22fe2c88SJonathan Adams mutex_enter(&spa->spa_errlist_lock); 714*22fe2c88SJonathan Adams report->zcr_next = zio->io_logical->io_cksum_report; 715*22fe2c88SJonathan Adams zio->io_logical->io_cksum_report = report; 716*22fe2c88SJonathan Adams mutex_exit(&spa->spa_errlist_lock); 717*22fe2c88SJonathan Adams } 718*22fe2c88SJonathan Adams 719*22fe2c88SJonathan Adams void 720*22fe2c88SJonathan Adams zfs_ereport_finish_checksum(zio_cksum_report_t *report, 721*22fe2c88SJonathan Adams const void *good_data, const void *bad_data, boolean_t drop_if_identical) 722*22fe2c88SJonathan Adams { 723*22fe2c88SJonathan Adams #ifdef _KERNEL 724*22fe2c88SJonathan Adams zfs_ecksum_info_t *info = NULL; 725*22fe2c88SJonathan Adams info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo, 726*22fe2c88SJonathan Adams good_data, bad_data, report->zcr_length, drop_if_identical); 727*22fe2c88SJonathan Adams 728*22fe2c88SJonathan Adams if (info != NULL) 729*22fe2c88SJonathan Adams fm_ereport_post(report->zcr_ereport, EVCH_SLEEP); 730*22fe2c88SJonathan Adams 731*22fe2c88SJonathan Adams fm_nvlist_destroy(report->zcr_ereport, FM_NVA_FREE); 732*22fe2c88SJonathan Adams fm_nvlist_destroy(report->zcr_detector, FM_NVA_FREE); 733*22fe2c88SJonathan Adams report->zcr_ereport = report->zcr_detector = NULL; 734*22fe2c88SJonathan Adams 735*22fe2c88SJonathan Adams if (info != NULL) 736*22fe2c88SJonathan Adams kmem_free(info, sizeof (*info)); 737*22fe2c88SJonathan Adams #endif 738*22fe2c88SJonathan Adams } 739*22fe2c88SJonathan Adams 740*22fe2c88SJonathan Adams void 741*22fe2c88SJonathan Adams zfs_ereport_free_checksum(zio_cksum_report_t *rpt) 742*22fe2c88SJonathan Adams { 743*22fe2c88SJonathan Adams #ifdef _KERNEL 744*22fe2c88SJonathan Adams if (rpt->zcr_ereport != NULL) { 745*22fe2c88SJonathan Adams fm_nvlist_destroy(rpt->zcr_ereport, 746*22fe2c88SJonathan Adams FM_NVA_FREE); 747*22fe2c88SJonathan Adams fm_nvlist_destroy(rpt->zcr_detector, 748*22fe2c88SJonathan Adams FM_NVA_FREE); 749*22fe2c88SJonathan Adams } 750*22fe2c88SJonathan Adams #endif 751*22fe2c88SJonathan Adams rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo); 752*22fe2c88SJonathan Adams 753*22fe2c88SJonathan Adams if (rpt->zcr_ckinfo != NULL) 754*22fe2c88SJonathan Adams kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo)); 755*22fe2c88SJonathan Adams 756*22fe2c88SJonathan Adams kmem_free(rpt, sizeof (*rpt)); 757*22fe2c88SJonathan Adams } 758*22fe2c88SJonathan Adams 759*22fe2c88SJonathan Adams void 760*22fe2c88SJonathan Adams zfs_ereport_send_interim_checksum(zio_cksum_report_t *report) 761*22fe2c88SJonathan Adams { 762*22fe2c88SJonathan Adams #ifdef _KERNEL 763*22fe2c88SJonathan Adams fm_ereport_post(report->zcr_ereport, EVCH_SLEEP); 764*22fe2c88SJonathan Adams #endif 765*22fe2c88SJonathan Adams } 766*22fe2c88SJonathan Adams 767*22fe2c88SJonathan Adams void 768*22fe2c88SJonathan Adams zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, 769*22fe2c88SJonathan Adams struct zio *zio, uint64_t offset, uint64_t length, 770*22fe2c88SJonathan Adams const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc) 771*22fe2c88SJonathan Adams { 772*22fe2c88SJonathan Adams #ifdef _KERNEL 773*22fe2c88SJonathan Adams nvlist_t *ereport = NULL; 774*22fe2c88SJonathan Adams nvlist_t *detector = NULL; 775*22fe2c88SJonathan Adams zfs_ecksum_info_t *info; 776*22fe2c88SJonathan Adams 777*22fe2c88SJonathan Adams zfs_ereport_start(&ereport, &detector, 778*22fe2c88SJonathan Adams FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length); 779*22fe2c88SJonathan Adams 780*22fe2c88SJonathan Adams if (ereport == NULL) 781*22fe2c88SJonathan Adams return; 782*22fe2c88SJonathan Adams 783*22fe2c88SJonathan Adams info = annotate_ecksum(ereport, zbc, good_data, bad_data, length, 784*22fe2c88SJonathan Adams B_FALSE); 785*22fe2c88SJonathan Adams 786*22fe2c88SJonathan Adams if (info != NULL) 787*22fe2c88SJonathan Adams fm_ereport_post(ereport, EVCH_SLEEP); 788*22fe2c88SJonathan Adams 789*22fe2c88SJonathan Adams fm_nvlist_destroy(ereport, FM_NVA_FREE); 790*22fe2c88SJonathan Adams fm_nvlist_destroy(detector, FM_NVA_FREE); 791*22fe2c88SJonathan Adams 792*22fe2c88SJonathan Adams if (info != NULL) 793*22fe2c88SJonathan Adams kmem_free(info, sizeof (*info)); 794*22fe2c88SJonathan Adams #endif 795*22fe2c88SJonathan Adams } 796*22fe2c88SJonathan Adams 7973d7072f8Seschrock static void 7983d7072f8Seschrock zfs_post_common(spa_t *spa, vdev_t *vd, const char *name) 799ea8dc4b6Seschrock { 800ea8dc4b6Seschrock #ifdef _KERNEL 801ea8dc4b6Seschrock nvlist_t *resource; 802ea8dc4b6Seschrock char class[64]; 803ea8dc4b6Seschrock 8041d713200SEric Schrock if (spa->spa_load_state == SPA_LOAD_TRYIMPORT) 8051d713200SEric Schrock return; 8061d713200SEric Schrock 807ea8dc4b6Seschrock if ((resource = fm_nvlist_create(NULL)) == NULL) 808ea8dc4b6Seschrock return; 809ea8dc4b6Seschrock 810ea8dc4b6Seschrock (void) snprintf(class, sizeof (class), "%s.%s.%s", FM_RSRC_RESOURCE, 8113d7072f8Seschrock ZFS_ERROR_CLASS, name); 812ea8dc4b6Seschrock VERIFY(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION) == 0); 813ea8dc4b6Seschrock VERIFY(nvlist_add_string(resource, FM_CLASS, class) == 0); 814ea8dc4b6Seschrock VERIFY(nvlist_add_uint64(resource, 815ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)) == 0); 816ea8dc4b6Seschrock if (vd) 817ea8dc4b6Seschrock VERIFY(nvlist_add_uint64(resource, 818ea8dc4b6Seschrock FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid) == 0); 819ea8dc4b6Seschrock 820ea8dc4b6Seschrock fm_ereport_post(resource, EVCH_SLEEP); 821ea8dc4b6Seschrock 822ea8dc4b6Seschrock fm_nvlist_destroy(resource, FM_NVA_FREE); 823ea8dc4b6Seschrock #endif 824ea8dc4b6Seschrock } 8253d7072f8Seschrock 8263d7072f8Seschrock /* 8273d7072f8Seschrock * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev 8283d7072f8Seschrock * has been removed from the system. This will cause the DE to ignore any 8293d7072f8Seschrock * recent I/O errors, inferring that they are due to the asynchronous device 8303d7072f8Seschrock * removal. 8313d7072f8Seschrock */ 8323d7072f8Seschrock void 8333d7072f8Seschrock zfs_post_remove(spa_t *spa, vdev_t *vd) 8343d7072f8Seschrock { 8353d7072f8Seschrock zfs_post_common(spa, vd, FM_RESOURCE_REMOVED); 8363d7072f8Seschrock } 8373d7072f8Seschrock 8383d7072f8Seschrock /* 8393d7072f8Seschrock * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool 8403d7072f8Seschrock * has the 'autoreplace' property set, and therefore any broken vdevs will be 8413d7072f8Seschrock * handled by higher level logic, and no vdev fault should be generated. 8423d7072f8Seschrock */ 8433d7072f8Seschrock void 8443d7072f8Seschrock zfs_post_autoreplace(spa_t *spa, vdev_t *vd) 8453d7072f8Seschrock { 8463d7072f8Seschrock zfs_post_common(spa, vd, FM_RESOURCE_AUTOREPLACE); 8473d7072f8Seschrock } 848