1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 */ 16 17 /* 18 * blkdev driver for NVMe compliant storage devices 19 * 20 * This driver was written to conform to version 1.0e of the NVMe specification. 21 * It may work with newer versions, but that is completely untested and disabled 22 * by default. 23 * 24 * The driver has only been tested on x86 systems and will not work on big- 25 * endian systems without changes to the code accessing registers and data 26 * structures used by the hardware. 27 * 28 * 29 * Interrupt Usage: 30 * 31 * The driver will use a FIXED interrupt while configuring the device as the 32 * specification requires. Later in the attach process it will switch to MSI-X 33 * or MSI if supported. The driver wants to have one interrupt vector per CPU, 34 * but it will work correctly if less are available. Interrupts can be shared 35 * by queues, the interrupt handler will iterate through the I/O queue array by 36 * steps of n_intr_cnt. Usually only the admin queue will share an interrupt 37 * with one I/O queue. The interrupt handler will retrieve completed commands 38 * from all queues sharing an interrupt vector and will post them to a taskq 39 * for completion processing. 40 * 41 * 42 * Command Processing: 43 * 44 * NVMe devices can have up to 65536 I/O queue pairs, with each queue holding up 45 * to 65536 I/O commands. The driver will configure one I/O queue pair per 46 * available interrupt vector, with the queue length usually much smaller than 47 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 48 * interrupt vectors will be used. 49 * 50 * Additionally the hardware provides a single special admin queue pair that can 51 * hold up to 4096 admin commands. 52 * 53 * From the hardware perspective both queues of a queue pair are independent, 54 * but they share some driver state: the command array (holding pointers to 55 * commands currently being processed by the hardware) and the active command 56 * counter. Access to the submission side of a queue pair and the shared state 57 * is protected by nq_mutex. The completion side of a queue pair does not need 58 * that protection apart from its access to the shared state; it is called only 59 * in the interrupt handler which does not run concurrently for the same 60 * interrupt vector. 61 * 62 * When a command is submitted to a queue pair the active command counter is 63 * incremented and a pointer to the command is stored in the command array. The 64 * array index is used as command identifier (CID) in the submission queue 65 * entry. Some commands may take a very long time to complete, and if the queue 66 * wraps around in that time a submission may find the next array slot to still 67 * be used by a long-running command. In this case the array is sequentially 68 * searched for the next free slot. The length of the command array is the same 69 * as the configured queue length. 70 * 71 * 72 * Namespace Support: 73 * 74 * NVMe devices can have multiple namespaces, each being a independent data 75 * store. The driver supports multiple namespaces and creates a blkdev interface 76 * for each namespace found. Namespaces can have various attributes to support 77 * thin provisioning, extended LBAs, and protection information. This driver 78 * does not support any of this and ignores namespaces that have these 79 * attributes. 80 * 81 * 82 * Blkdev Interface: 83 * 84 * This driver uses blkdev to do all the heavy lifting involved with presenting 85 * a disk device to the system. As a result, the processing of I/O requests is 86 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 87 * setup, and splitting of transfers into manageable chunks. 88 * 89 * I/O requests coming in from blkdev are turned into NVM commands and posted to 90 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 91 * queues. There is currently no timeout handling of I/O commands. 92 * 93 * Blkdev also supports querying device/media information and generating a 94 * devid. The driver reports the best block size as determined by the namespace 95 * format back to blkdev as physical block size to support partition and block 96 * alignment. The devid is composed using the device vendor ID, model number, 97 * serial number, and the namespace ID. 98 * 99 * 100 * Error Handling: 101 * 102 * Error handling is currently limited to detecting fatal hardware errors, 103 * either by asynchronous events, or synchronously through command status or 104 * admin command timeouts. In case of severe errors the device is fenced off, 105 * all further requests will return EIO. FMA is then called to fault the device. 106 * 107 * The hardware has a limit for outstanding asynchronous event requests. Before 108 * this limit is known the driver assumes it is at least 1 and posts a single 109 * asynchronous request. Later when the limit is known more asynchronous event 110 * requests are posted to allow quicker reception of error information. When an 111 * asynchronous event is posted by the hardware the driver will parse the error 112 * status fields and log information or fault the device, depending on the 113 * severity of the asynchronous event. The asynchronous event request is then 114 * reused and posted to the admin queue again. 115 * 116 * On command completion the command status is checked for errors. In case of 117 * errors indicating a driver bug the driver panics. Almost all other error 118 * status values just cause EIO to be returned. 119 * 120 * Command timeouts are currently detected for all admin commands except 121 * asynchronous event requests. If a command times out and the hardware appears 122 * to be healthy the driver attempts to abort the command. If this fails the 123 * driver assumes the device to be dead, fences it off, and calls FMA to retire 124 * it. In general admin commands are issued at attach time only. No timeout 125 * handling of normal I/O commands is presently done. 126 * 127 * In some cases it may be possible that the ABORT command times out, too. In 128 * that case the device is also declared dead and fenced off. 129 * 130 * 131 * Quiesce / Fast Reboot: 132 * 133 * The driver currently does not support fast reboot. A quiesce(9E) entry point 134 * is still provided which is used to send a shutdown notification to the 135 * device. 136 * 137 * 138 * Driver Configuration: 139 * 140 * The following driver properties can be changed to control some aspects of the 141 * drivers operation: 142 * - strict-version: can be set to 0 to allow devices conforming to newer 143 * versions to be used 144 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 145 * specific command status as a fatal error leading device faulting 146 * - admin-queue-len: the maximum length of the admin queue (16-4096) 147 * - io-queue-len: the maximum length of the I/O queues (16-65536) 148 * - async-event-limit: the maximum number of asynchronous event requests to be 149 * posted by the driver 150 * 151 * 152 * TODO: 153 * - figure out sane default for I/O queue depth reported to blkdev 154 * - polled I/O support to support kernel core dumping 155 * - FMA handling of media errors 156 * - support for the Volatile Write Cache 157 * - support for devices supporting very large I/O requests using chained PRPs 158 * - support for querying log pages from user space 159 * - support for configuring hardware parameters like interrupt coalescing 160 * - support for media formatting and hard partitioning into namespaces 161 * - support for big-endian systems 162 * - support for fast reboot 163 */ 164 165 #include <sys/byteorder.h> 166 #ifdef _BIG_ENDIAN 167 #error nvme driver needs porting for big-endian platforms 168 #endif 169 170 #include <sys/modctl.h> 171 #include <sys/conf.h> 172 #include <sys/devops.h> 173 #include <sys/ddi.h> 174 #include <sys/sunddi.h> 175 #include <sys/bitmap.h> 176 #include <sys/sysmacros.h> 177 #include <sys/param.h> 178 #include <sys/varargs.h> 179 #include <sys/cpuvar.h> 180 #include <sys/disp.h> 181 #include <sys/blkdev.h> 182 #include <sys/atomic.h> 183 #include <sys/archsystm.h> 184 #include <sys/sata/sata_hba.h> 185 186 #include "nvme_reg.h" 187 #include "nvme_var.h" 188 189 190 /* NVMe spec version supported */ 191 static const int nvme_version_major = 1; 192 static const int nvme_version_minor = 0; 193 194 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 195 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 196 static int nvme_quiesce(dev_info_t *); 197 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 198 static int nvme_setup_interrupts(nvme_t *, int, int); 199 static void nvme_release_interrupts(nvme_t *); 200 static uint_t nvme_intr(caddr_t, caddr_t); 201 202 static void nvme_shutdown(nvme_t *, int, boolean_t); 203 static boolean_t nvme_reset(nvme_t *, boolean_t); 204 static int nvme_init(nvme_t *); 205 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 206 static void nvme_free_cmd(nvme_cmd_t *); 207 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 208 bd_xfer_t *); 209 static int nvme_admin_cmd(nvme_cmd_t *, int); 210 static int nvme_submit_cmd(nvme_qpair_t *, nvme_cmd_t *); 211 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 212 static boolean_t nvme_wait_cmd(nvme_cmd_t *, uint_t); 213 static void nvme_wakeup_cmd(void *); 214 static void nvme_async_event_task(void *); 215 216 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 217 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 218 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 219 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 220 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 221 static inline int nvme_check_cmd_status(nvme_cmd_t *); 222 223 static void nvme_abort_cmd(nvme_cmd_t *); 224 static int nvme_async_event(nvme_t *); 225 static void *nvme_get_logpage(nvme_t *, uint8_t, ...); 226 static void *nvme_identify(nvme_t *, uint32_t); 227 static int nvme_set_nqueues(nvme_t *, uint16_t); 228 229 static void nvme_free_dma(nvme_dma_t *); 230 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 231 nvme_dma_t **); 232 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 233 nvme_dma_t **); 234 static void nvme_free_qpair(nvme_qpair_t *); 235 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int); 236 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 237 238 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 239 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 240 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 241 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 242 243 static boolean_t nvme_check_regs_hdl(nvme_t *); 244 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 245 246 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 247 248 static void nvme_bd_xfer_done(void *); 249 static void nvme_bd_driveinfo(void *, bd_drive_t *); 250 static int nvme_bd_mediainfo(void *, bd_media_t *); 251 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 252 static int nvme_bd_read(void *, bd_xfer_t *); 253 static int nvme_bd_write(void *, bd_xfer_t *); 254 static int nvme_bd_sync(void *, bd_xfer_t *); 255 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 256 257 static void nvme_prepare_devid(nvme_t *, uint32_t); 258 259 static void *nvme_state; 260 static kmem_cache_t *nvme_cmd_cache; 261 262 /* 263 * DMA attributes for queue DMA memory 264 * 265 * Queue DMA memory must be page aligned. The maximum length of a queue is 266 * 65536 entries, and an entry can be 64 bytes long. 267 */ 268 static ddi_dma_attr_t nvme_queue_dma_attr = { 269 .dma_attr_version = DMA_ATTR_V0, 270 .dma_attr_addr_lo = 0, 271 .dma_attr_addr_hi = 0xffffffffffffffffULL, 272 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 273 .dma_attr_align = 0x1000, 274 .dma_attr_burstsizes = 0x7ff, 275 .dma_attr_minxfer = 0x1000, 276 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 277 .dma_attr_seg = 0xffffffffffffffffULL, 278 .dma_attr_sgllen = 1, 279 .dma_attr_granular = 1, 280 .dma_attr_flags = 0, 281 }; 282 283 /* 284 * DMA attributes for transfers using Physical Region Page (PRP) entries 285 * 286 * A PRP entry describes one page of DMA memory using the page size specified 287 * in the controller configuration's memory page size register (CC.MPS). It uses 288 * a 64bit base address aligned to this page size. There is no limitation on 289 * chaining PRPs together for arbitrarily large DMA transfers. 290 */ 291 static ddi_dma_attr_t nvme_prp_dma_attr = { 292 .dma_attr_version = DMA_ATTR_V0, 293 .dma_attr_addr_lo = 0, 294 .dma_attr_addr_hi = 0xffffffffffffffffULL, 295 .dma_attr_count_max = 0xfff, 296 .dma_attr_align = 0x1000, 297 .dma_attr_burstsizes = 0x7ff, 298 .dma_attr_minxfer = 0x1000, 299 .dma_attr_maxxfer = 0x1000, 300 .dma_attr_seg = 0xffffffffffffffffULL, 301 .dma_attr_sgllen = -1, 302 .dma_attr_granular = 1, 303 .dma_attr_flags = 0, 304 }; 305 306 /* 307 * DMA attributes for transfers using scatter/gather lists 308 * 309 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 310 * 32bit length field. SGL Segment and SGL Last Segment entries require the 311 * length to be a multiple of 16 bytes. 312 */ 313 static ddi_dma_attr_t nvme_sgl_dma_attr = { 314 .dma_attr_version = DMA_ATTR_V0, 315 .dma_attr_addr_lo = 0, 316 .dma_attr_addr_hi = 0xffffffffffffffffULL, 317 .dma_attr_count_max = 0xffffffffUL, 318 .dma_attr_align = 1, 319 .dma_attr_burstsizes = 0x7ff, 320 .dma_attr_minxfer = 0x10, 321 .dma_attr_maxxfer = 0xfffffffffULL, 322 .dma_attr_seg = 0xffffffffffffffffULL, 323 .dma_attr_sgllen = -1, 324 .dma_attr_granular = 0x10, 325 .dma_attr_flags = 0 326 }; 327 328 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 329 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 330 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 331 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 332 }; 333 334 static struct dev_ops nvme_dev_ops = { 335 .devo_rev = DEVO_REV, 336 .devo_refcnt = 0, 337 .devo_getinfo = ddi_no_info, 338 .devo_identify = nulldev, 339 .devo_probe = nulldev, 340 .devo_attach = nvme_attach, 341 .devo_detach = nvme_detach, 342 .devo_reset = nodev, 343 .devo_cb_ops = NULL, 344 .devo_bus_ops = NULL, 345 .devo_power = NULL, 346 .devo_quiesce = nvme_quiesce, 347 }; 348 349 static struct modldrv nvme_modldrv = { 350 .drv_modops = &mod_driverops, 351 .drv_linkinfo = "NVMe v1.0e", 352 .drv_dev_ops = &nvme_dev_ops 353 }; 354 355 static struct modlinkage nvme_modlinkage = { 356 .ml_rev = MODREV_1, 357 .ml_linkage = { &nvme_modldrv, NULL } 358 }; 359 360 static bd_ops_t nvme_bd_ops = { 361 .o_version = BD_OPS_VERSION_0, 362 .o_drive_info = nvme_bd_driveinfo, 363 .o_media_info = nvme_bd_mediainfo, 364 .o_devid_init = nvme_bd_devid, 365 .o_sync_cache = nvme_bd_sync, 366 .o_read = nvme_bd_read, 367 .o_write = nvme_bd_write, 368 }; 369 370 int 371 _init(void) 372 { 373 int error; 374 375 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 376 if (error != DDI_SUCCESS) 377 return (error); 378 379 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 380 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 381 382 bd_mod_init(&nvme_dev_ops); 383 384 error = mod_install(&nvme_modlinkage); 385 if (error != DDI_SUCCESS) { 386 ddi_soft_state_fini(&nvme_state); 387 bd_mod_fini(&nvme_dev_ops); 388 } 389 390 return (error); 391 } 392 393 int 394 _fini(void) 395 { 396 int error; 397 398 error = mod_remove(&nvme_modlinkage); 399 if (error == DDI_SUCCESS) { 400 ddi_soft_state_fini(&nvme_state); 401 kmem_cache_destroy(nvme_cmd_cache); 402 bd_mod_fini(&nvme_dev_ops); 403 } 404 405 return (error); 406 } 407 408 int 409 _info(struct modinfo *modinfop) 410 { 411 return (mod_info(&nvme_modlinkage, modinfop)); 412 } 413 414 static inline void 415 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 416 { 417 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 418 419 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 420 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 421 } 422 423 static inline void 424 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 425 { 426 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 427 428 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 429 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 430 } 431 432 static inline uint64_t 433 nvme_get64(nvme_t *nvme, uintptr_t reg) 434 { 435 uint64_t val; 436 437 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 438 439 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 440 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 441 442 return (val); 443 } 444 445 static inline uint32_t 446 nvme_get32(nvme_t *nvme, uintptr_t reg) 447 { 448 uint32_t val; 449 450 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 451 452 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 453 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 454 455 return (val); 456 } 457 458 static boolean_t 459 nvme_check_regs_hdl(nvme_t *nvme) 460 { 461 ddi_fm_error_t error; 462 463 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 464 465 if (error.fme_status != DDI_FM_OK) 466 return (B_TRUE); 467 468 return (B_FALSE); 469 } 470 471 static boolean_t 472 nvme_check_dma_hdl(nvme_dma_t *dma) 473 { 474 ddi_fm_error_t error; 475 476 if (dma == NULL) 477 return (B_FALSE); 478 479 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 480 481 if (error.fme_status != DDI_FM_OK) 482 return (B_TRUE); 483 484 return (B_FALSE); 485 } 486 487 static void 488 nvme_free_dma(nvme_dma_t *dma) 489 { 490 if (dma->nd_dmah != NULL) 491 (void) ddi_dma_unbind_handle(dma->nd_dmah); 492 if (dma->nd_acch != NULL) 493 ddi_dma_mem_free(&dma->nd_acch); 494 if (dma->nd_dmah != NULL) 495 ddi_dma_free_handle(&dma->nd_dmah); 496 kmem_free(dma, sizeof (nvme_dma_t)); 497 } 498 499 static int 500 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 501 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 502 { 503 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 504 505 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 506 &dma->nd_dmah) != DDI_SUCCESS) { 507 /* 508 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 509 * the only other possible error is DDI_DMA_BADATTR which 510 * indicates a driver bug which should cause a panic. 511 */ 512 dev_err(nvme->n_dip, CE_PANIC, 513 "!failed to get DMA handle, check DMA attributes"); 514 return (DDI_FAILURE); 515 } 516 517 /* 518 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 519 * or the flags are conflicting, which isn't the case here. 520 */ 521 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 522 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 523 &dma->nd_len, &dma->nd_acch); 524 525 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 526 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 527 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 528 dev_err(nvme->n_dip, CE_WARN, 529 "!failed to bind DMA memory"); 530 atomic_inc_32(&nvme->n_dma_bind_err); 531 *ret = NULL; 532 nvme_free_dma(dma); 533 return (DDI_FAILURE); 534 } 535 536 bzero(dma->nd_memp, dma->nd_len); 537 538 *ret = dma; 539 return (DDI_SUCCESS); 540 } 541 542 static int 543 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 544 uint_t flags, nvme_dma_t **dma) 545 { 546 uint32_t len = nentry * qe_len; 547 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 548 549 len = roundup(len, nvme->n_pagesize); 550 551 q_dma_attr.dma_attr_minxfer = len; 552 553 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 554 != DDI_SUCCESS) { 555 dev_err(nvme->n_dip, CE_WARN, 556 "!failed to get DMA memory for queue"); 557 goto fail; 558 } 559 560 if ((*dma)->nd_ncookie != 1) { 561 dev_err(nvme->n_dip, CE_WARN, 562 "!got too many cookies for queue DMA"); 563 goto fail; 564 } 565 566 return (DDI_SUCCESS); 567 568 fail: 569 if (*dma) { 570 nvme_free_dma(*dma); 571 *dma = NULL; 572 } 573 574 return (DDI_FAILURE); 575 } 576 577 static void 578 nvme_free_qpair(nvme_qpair_t *qp) 579 { 580 int i; 581 582 mutex_destroy(&qp->nq_mutex); 583 584 if (qp->nq_sqdma != NULL) 585 nvme_free_dma(qp->nq_sqdma); 586 if (qp->nq_cqdma != NULL) 587 nvme_free_dma(qp->nq_cqdma); 588 589 if (qp->nq_active_cmds > 0) 590 for (i = 0; i != qp->nq_nentry; i++) 591 if (qp->nq_cmd[i] != NULL) 592 nvme_free_cmd(qp->nq_cmd[i]); 593 594 if (qp->nq_cmd != NULL) 595 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 596 597 kmem_free(qp, sizeof (nvme_qpair_t)); 598 } 599 600 static int 601 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 602 int idx) 603 { 604 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 605 606 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 607 DDI_INTR_PRI(nvme->n_intr_pri)); 608 609 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 610 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 611 goto fail; 612 613 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 614 DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS) 615 goto fail; 616 617 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 618 qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp; 619 qp->nq_nentry = nentry; 620 621 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 622 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx); 623 624 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 625 qp->nq_next_cmd = 0; 626 627 *nqp = qp; 628 return (DDI_SUCCESS); 629 630 fail: 631 nvme_free_qpair(qp); 632 *nqp = NULL; 633 634 return (DDI_FAILURE); 635 } 636 637 static nvme_cmd_t * 638 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 639 { 640 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 641 642 if (cmd == NULL) 643 return (cmd); 644 645 bzero(cmd, sizeof (nvme_cmd_t)); 646 647 cmd->nc_nvme = nvme; 648 649 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 650 DDI_INTR_PRI(nvme->n_intr_pri)); 651 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 652 653 return (cmd); 654 } 655 656 static void 657 nvme_free_cmd(nvme_cmd_t *cmd) 658 { 659 if (cmd->nc_dma) { 660 nvme_free_dma(cmd->nc_dma); 661 cmd->nc_dma = NULL; 662 } 663 664 cv_destroy(&cmd->nc_cv); 665 mutex_destroy(&cmd->nc_mutex); 666 667 kmem_cache_free(nvme_cmd_cache, cmd); 668 } 669 670 static int 671 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 672 { 673 nvme_reg_sqtdbl_t tail = { 0 }; 674 675 mutex_enter(&qp->nq_mutex); 676 677 if (qp->nq_active_cmds == qp->nq_nentry) { 678 mutex_exit(&qp->nq_mutex); 679 return (DDI_FAILURE); 680 } 681 682 cmd->nc_completed = B_FALSE; 683 684 /* 685 * Try to insert the cmd into the active cmd array at the nq_next_cmd 686 * slot. If the slot is already occupied advance to the next slot and 687 * try again. This can happen for long running commands like async event 688 * requests. 689 */ 690 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 691 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 692 qp->nq_cmd[qp->nq_next_cmd] = cmd; 693 694 qp->nq_active_cmds++; 695 696 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 697 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 698 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 699 sizeof (nvme_sqe_t) * qp->nq_sqtail, 700 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 701 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 702 703 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 704 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 705 706 mutex_exit(&qp->nq_mutex); 707 return (DDI_SUCCESS); 708 } 709 710 static nvme_cmd_t * 711 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 712 { 713 nvme_reg_cqhdbl_t head = { 0 }; 714 715 nvme_cqe_t *cqe; 716 nvme_cmd_t *cmd; 717 718 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0, 719 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL); 720 721 cqe = &qp->nq_cq[qp->nq_cqhead]; 722 723 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 724 if (cqe->cqe_sf.sf_p == qp->nq_phase) 725 return (NULL); 726 727 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); 728 ASSERT(cqe->cqe_cid < qp->nq_nentry); 729 730 mutex_enter(&qp->nq_mutex); 731 cmd = qp->nq_cmd[cqe->cqe_cid]; 732 qp->nq_cmd[cqe->cqe_cid] = NULL; 733 qp->nq_active_cmds--; 734 mutex_exit(&qp->nq_mutex); 735 736 ASSERT(cmd != NULL); 737 ASSERT(cmd->nc_nvme == nvme); 738 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 739 ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid); 740 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 741 742 qp->nq_sqhead = cqe->cqe_sqhd; 743 744 head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry; 745 746 /* Toggle phase on wrap-around. */ 747 if (qp->nq_cqhead == 0) 748 qp->nq_phase = qp->nq_phase ? 0 : 1; 749 750 nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r); 751 752 return (cmd); 753 } 754 755 static int 756 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 757 { 758 nvme_cqe_t *cqe = &cmd->nc_cqe; 759 760 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 761 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 762 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 763 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 764 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 765 766 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 767 768 if (cmd->nc_nvme->n_strict_version) { 769 cmd->nc_nvme->n_dead = B_TRUE; 770 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 771 } 772 773 return (EIO); 774 } 775 776 static int 777 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 778 { 779 nvme_cqe_t *cqe = &cmd->nc_cqe; 780 781 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 782 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 783 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 784 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 785 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 786 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 787 cmd->nc_nvme->n_dead = B_TRUE; 788 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 789 } 790 791 return (EIO); 792 } 793 794 static int 795 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 796 { 797 nvme_cqe_t *cqe = &cmd->nc_cqe; 798 799 switch (cqe->cqe_sf.sf_sc) { 800 case NVME_CQE_SC_INT_NVM_WRITE: 801 /* write fail */ 802 /* TODO: post ereport */ 803 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 804 return (EIO); 805 806 case NVME_CQE_SC_INT_NVM_READ: 807 /* read fail */ 808 /* TODO: post ereport */ 809 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 810 return (EIO); 811 812 default: 813 return (nvme_check_unknown_cmd_status(cmd)); 814 } 815 } 816 817 static int 818 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 819 { 820 nvme_cqe_t *cqe = &cmd->nc_cqe; 821 822 switch (cqe->cqe_sf.sf_sc) { 823 case NVME_CQE_SC_GEN_SUCCESS: 824 return (0); 825 826 /* 827 * Errors indicating a bug in the driver should cause a panic. 828 */ 829 case NVME_CQE_SC_GEN_INV_OPC: 830 /* Invalid Command Opcode */ 831 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 832 "invalid opcode in cmd %p", (void *)cmd); 833 return (0); 834 835 case NVME_CQE_SC_GEN_INV_FLD: 836 /* Invalid Field in Command */ 837 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 838 "invalid field in cmd %p", (void *)cmd); 839 return (0); 840 841 case NVME_CQE_SC_GEN_ID_CNFL: 842 /* Command ID Conflict */ 843 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 844 "cmd ID conflict in cmd %p", (void *)cmd); 845 return (0); 846 847 case NVME_CQE_SC_GEN_INV_NS: 848 /* Invalid Namespace or Format */ 849 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 850 "invalid NS/format in cmd %p", (void *)cmd); 851 return (0); 852 853 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 854 /* LBA Out Of Range */ 855 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 856 "LBA out of range in cmd %p", (void *)cmd); 857 return (0); 858 859 /* 860 * Non-fatal errors, handle gracefully. 861 */ 862 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 863 /* Data Transfer Error (DMA) */ 864 /* TODO: post ereport */ 865 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 866 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 867 return (EIO); 868 869 case NVME_CQE_SC_GEN_INTERNAL_ERR: 870 /* 871 * Internal Error. The spec (v1.0, section 4.5.1.2) says 872 * detailed error information is returned as async event, 873 * so we pretty much ignore the error here and handle it 874 * in the async event handler. 875 */ 876 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 877 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 878 return (EIO); 879 880 case NVME_CQE_SC_GEN_ABORT_REQUEST: 881 /* 882 * Command Abort Requested. This normally happens only when a 883 * command times out. 884 */ 885 /* TODO: post ereport or change blkdev to handle this? */ 886 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 887 return (ECANCELED); 888 889 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 890 /* Command Aborted due to Power Loss Notification */ 891 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 892 cmd->nc_nvme->n_dead = B_TRUE; 893 return (EIO); 894 895 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 896 /* Command Aborted due to SQ Deletion */ 897 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 898 return (EIO); 899 900 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 901 /* Capacity Exceeded */ 902 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 903 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 904 return (EIO); 905 906 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 907 /* Namespace Not Ready */ 908 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 909 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 910 return (EIO); 911 912 default: 913 return (nvme_check_unknown_cmd_status(cmd)); 914 } 915 } 916 917 static int 918 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 919 { 920 nvme_cqe_t *cqe = &cmd->nc_cqe; 921 922 switch (cqe->cqe_sf.sf_sc) { 923 case NVME_CQE_SC_SPC_INV_CQ: 924 /* Completion Queue Invalid */ 925 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 926 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 927 return (EINVAL); 928 929 case NVME_CQE_SC_SPC_INV_QID: 930 /* Invalid Queue Identifier */ 931 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 932 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 933 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 934 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 935 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 936 return (EINVAL); 937 938 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 939 /* Max Queue Size Exceeded */ 940 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 941 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 942 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 943 return (EINVAL); 944 945 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 946 /* Abort Command Limit Exceeded */ 947 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 948 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 949 "abort command limit exceeded in cmd %p", (void *)cmd); 950 return (0); 951 952 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 953 /* Async Event Request Limit Exceeded */ 954 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 955 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 956 "async event request limit exceeded in cmd %p", 957 (void *)cmd); 958 return (0); 959 960 case NVME_CQE_SC_SPC_INV_INT_VECT: 961 /* Invalid Interrupt Vector */ 962 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 963 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 964 return (EINVAL); 965 966 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 967 /* Invalid Log Page */ 968 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 969 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 970 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 971 return (EINVAL); 972 973 case NVME_CQE_SC_SPC_INV_FORMAT: 974 /* Invalid Format */ 975 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 976 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 977 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 978 return (EINVAL); 979 980 case NVME_CQE_SC_SPC_INV_Q_DEL: 981 /* Invalid Queue Deletion */ 982 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 983 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 984 return (EINVAL); 985 986 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 987 /* Conflicting Attributes */ 988 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 989 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 990 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 991 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 992 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 993 return (EINVAL); 994 995 case NVME_CQE_SC_SPC_NVM_INV_PROT: 996 /* Invalid Protection Information */ 997 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 998 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 999 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1000 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1001 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1002 return (EINVAL); 1003 1004 case NVME_CQE_SC_SPC_NVM_READONLY: 1005 /* Write to Read Only Range */ 1006 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1007 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1008 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1009 return (EROFS); 1010 1011 default: 1012 return (nvme_check_unknown_cmd_status(cmd)); 1013 } 1014 } 1015 1016 static inline int 1017 nvme_check_cmd_status(nvme_cmd_t *cmd) 1018 { 1019 nvme_cqe_t *cqe = &cmd->nc_cqe; 1020 1021 /* take a shortcut if everything is alright */ 1022 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1023 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1024 return (0); 1025 1026 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1027 return (nvme_check_generic_cmd_status(cmd)); 1028 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1029 return (nvme_check_specific_cmd_status(cmd)); 1030 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1031 return (nvme_check_integrity_cmd_status(cmd)); 1032 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1033 return (nvme_check_vendor_cmd_status(cmd)); 1034 1035 return (nvme_check_unknown_cmd_status(cmd)); 1036 } 1037 1038 /* 1039 * nvme_abort_cmd_cb -- replaces nc_callback of aborted commands 1040 * 1041 * This functions takes care of cleaning up aborted commands. The command 1042 * status is checked to catch any fatal errors. 1043 */ 1044 static void 1045 nvme_abort_cmd_cb(void *arg) 1046 { 1047 nvme_cmd_t *cmd = arg; 1048 1049 /* 1050 * Grab the command mutex. Once we have it we hold the last reference 1051 * to the command and can safely free it. 1052 */ 1053 mutex_enter(&cmd->nc_mutex); 1054 (void) nvme_check_cmd_status(cmd); 1055 mutex_exit(&cmd->nc_mutex); 1056 1057 nvme_free_cmd(cmd); 1058 } 1059 1060 static void 1061 nvme_abort_cmd(nvme_cmd_t *abort_cmd) 1062 { 1063 nvme_t *nvme = abort_cmd->nc_nvme; 1064 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1065 nvme_abort_cmd_t ac = { 0 }; 1066 1067 sema_p(&nvme->n_abort_sema); 1068 1069 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1070 ac.b.ac_sqid = abort_cmd->nc_sqid; 1071 1072 /* 1073 * Drop the mutex of the aborted command. From this point on 1074 * we must assume that the abort callback has freed the command. 1075 */ 1076 mutex_exit(&abort_cmd->nc_mutex); 1077 1078 cmd->nc_sqid = 0; 1079 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1080 cmd->nc_callback = nvme_wakeup_cmd; 1081 cmd->nc_sqe.sqe_cdw10 = ac.r; 1082 1083 /* 1084 * Send the ABORT to the hardware. The ABORT command will return _after_ 1085 * the aborted command has completed (aborted or otherwise). 1086 */ 1087 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1088 sema_v(&nvme->n_abort_sema); 1089 dev_err(nvme->n_dip, CE_WARN, 1090 "!nvme_admin_cmd failed for ABORT"); 1091 atomic_inc_32(&nvme->n_abort_failed); 1092 return; 1093 } 1094 sema_v(&nvme->n_abort_sema); 1095 1096 if (nvme_check_cmd_status(cmd)) { 1097 dev_err(nvme->n_dip, CE_WARN, 1098 "!ABORT failed with sct = %x, sc = %x", 1099 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1100 atomic_inc_32(&nvme->n_abort_failed); 1101 } else { 1102 atomic_inc_32(&nvme->n_cmd_aborted); 1103 } 1104 1105 nvme_free_cmd(cmd); 1106 } 1107 1108 /* 1109 * nvme_wait_cmd -- wait for command completion or timeout 1110 * 1111 * Returns B_TRUE if the command completed normally. 1112 * 1113 * Returns B_FALSE if the command timed out and an abort was attempted. The 1114 * command mutex will be dropped and the command must be considered freed. The 1115 * freeing of the command is normally done by the abort command callback. 1116 * 1117 * In case of a serious error or a timeout of the abort command the hardware 1118 * will be declared dead and FMA will be notified. 1119 */ 1120 static boolean_t 1121 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t usec) 1122 { 1123 clock_t timeout = ddi_get_lbolt() + drv_usectohz(usec); 1124 nvme_t *nvme = cmd->nc_nvme; 1125 nvme_reg_csts_t csts; 1126 1127 ASSERT(mutex_owned(&cmd->nc_mutex)); 1128 1129 while (!cmd->nc_completed) { 1130 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1131 break; 1132 } 1133 1134 if (cmd->nc_completed) 1135 return (B_TRUE); 1136 1137 /* 1138 * The command timed out. Change the callback to the cleanup function. 1139 */ 1140 cmd->nc_callback = nvme_abort_cmd_cb; 1141 1142 /* 1143 * Check controller for fatal status, any errors associated with the 1144 * register or DMA handle, or for a double timeout (abort command timed 1145 * out). If necessary log a warning and call FMA. 1146 */ 1147 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1148 dev_err(nvme->n_dip, CE_WARN, "!command timeout, " 1149 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1150 atomic_inc_32(&nvme->n_cmd_timeout); 1151 1152 if (csts.b.csts_cfs || 1153 nvme_check_regs_hdl(nvme) || 1154 nvme_check_dma_hdl(cmd->nc_dma) || 1155 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1156 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1157 nvme->n_dead = B_TRUE; 1158 mutex_exit(&cmd->nc_mutex); 1159 } else { 1160 /* 1161 * Try to abort the command. The command mutex is released by 1162 * nvme_abort_cmd(). 1163 * If the abort succeeds it will have freed the aborted command. 1164 * If the abort fails for other reasons we must assume that the 1165 * command may complete at any time, and the callback will free 1166 * it for us. 1167 */ 1168 nvme_abort_cmd(cmd); 1169 } 1170 1171 return (B_FALSE); 1172 } 1173 1174 static void 1175 nvme_wakeup_cmd(void *arg) 1176 { 1177 nvme_cmd_t *cmd = arg; 1178 1179 mutex_enter(&cmd->nc_mutex); 1180 /* 1181 * There is a slight chance that this command completed shortly after 1182 * the timeout was hit in nvme_wait_cmd() but before the callback was 1183 * changed. Catch that case here and clean up accordingly. 1184 */ 1185 if (cmd->nc_callback == nvme_abort_cmd_cb) { 1186 mutex_exit(&cmd->nc_mutex); 1187 nvme_abort_cmd_cb(cmd); 1188 return; 1189 } 1190 1191 cmd->nc_completed = B_TRUE; 1192 cv_signal(&cmd->nc_cv); 1193 mutex_exit(&cmd->nc_mutex); 1194 } 1195 1196 static void 1197 nvme_async_event_task(void *arg) 1198 { 1199 nvme_cmd_t *cmd = arg; 1200 nvme_t *nvme = cmd->nc_nvme; 1201 nvme_error_log_entry_t *error_log = NULL; 1202 nvme_health_log_t *health_log = NULL; 1203 nvme_async_event_t event; 1204 int ret; 1205 1206 /* 1207 * Check for errors associated with the async request itself. The only 1208 * command-specific error is "async event limit exceeded", which 1209 * indicates a programming error in the driver and causes a panic in 1210 * nvme_check_cmd_status(). 1211 * 1212 * Other possible errors are various scenarios where the async request 1213 * was aborted, or internal errors in the device. Internal errors are 1214 * reported to FMA, the command aborts need no special handling here. 1215 */ 1216 if (nvme_check_cmd_status(cmd)) { 1217 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1218 "!async event request returned failure, sct = %x, " 1219 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1220 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1221 cmd->nc_cqe.cqe_sf.sf_m); 1222 1223 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1224 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1225 cmd->nc_nvme->n_dead = B_TRUE; 1226 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1227 DDI_SERVICE_LOST); 1228 } 1229 nvme_free_cmd(cmd); 1230 return; 1231 } 1232 1233 1234 event.r = cmd->nc_cqe.cqe_dw0; 1235 1236 /* Clear CQE and re-submit the async request. */ 1237 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1238 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1239 1240 if (ret != DDI_SUCCESS) { 1241 dev_err(nvme->n_dip, CE_WARN, 1242 "!failed to resubmit async event request"); 1243 atomic_inc_32(&nvme->n_async_resubmit_failed); 1244 nvme_free_cmd(cmd); 1245 } 1246 1247 switch (event.b.ae_type) { 1248 case NVME_ASYNC_TYPE_ERROR: 1249 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1250 error_log = (nvme_error_log_entry_t *) 1251 nvme_get_logpage(nvme, event.b.ae_logpage); 1252 } else { 1253 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1254 "async event reply: %d", event.b.ae_logpage); 1255 atomic_inc_32(&nvme->n_wrong_logpage); 1256 } 1257 1258 switch (event.b.ae_info) { 1259 case NVME_ASYNC_ERROR_INV_SQ: 1260 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1261 "invalid submission queue"); 1262 return; 1263 1264 case NVME_ASYNC_ERROR_INV_DBL: 1265 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1266 "invalid doorbell write value"); 1267 return; 1268 1269 case NVME_ASYNC_ERROR_DIAGFAIL: 1270 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1271 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1272 nvme->n_dead = B_TRUE; 1273 atomic_inc_32(&nvme->n_diagfail_event); 1274 break; 1275 1276 case NVME_ASYNC_ERROR_PERSISTENT: 1277 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1278 "device error"); 1279 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1280 nvme->n_dead = B_TRUE; 1281 atomic_inc_32(&nvme->n_persistent_event); 1282 break; 1283 1284 case NVME_ASYNC_ERROR_TRANSIENT: 1285 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1286 "device error"); 1287 /* TODO: send ereport */ 1288 atomic_inc_32(&nvme->n_transient_event); 1289 break; 1290 1291 case NVME_ASYNC_ERROR_FW_LOAD: 1292 dev_err(nvme->n_dip, CE_WARN, 1293 "!firmware image load error"); 1294 atomic_inc_32(&nvme->n_fw_load_event); 1295 break; 1296 } 1297 break; 1298 1299 case NVME_ASYNC_TYPE_HEALTH: 1300 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1301 health_log = (nvme_health_log_t *) 1302 nvme_get_logpage(nvme, event.b.ae_logpage, -1); 1303 } else { 1304 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1305 "async event reply: %d", event.b.ae_logpage); 1306 atomic_inc_32(&nvme->n_wrong_logpage); 1307 } 1308 1309 switch (event.b.ae_info) { 1310 case NVME_ASYNC_HEALTH_RELIABILITY: 1311 dev_err(nvme->n_dip, CE_WARN, 1312 "!device reliability compromised"); 1313 /* TODO: send ereport */ 1314 atomic_inc_32(&nvme->n_reliability_event); 1315 break; 1316 1317 case NVME_ASYNC_HEALTH_TEMPERATURE: 1318 dev_err(nvme->n_dip, CE_WARN, 1319 "!temperature above threshold"); 1320 /* TODO: send ereport */ 1321 atomic_inc_32(&nvme->n_temperature_event); 1322 break; 1323 1324 case NVME_ASYNC_HEALTH_SPARE: 1325 dev_err(nvme->n_dip, CE_WARN, 1326 "!spare space below threshold"); 1327 /* TODO: send ereport */ 1328 atomic_inc_32(&nvme->n_spare_event); 1329 break; 1330 } 1331 break; 1332 1333 case NVME_ASYNC_TYPE_VENDOR: 1334 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1335 "received, info = %x, logpage = %x", event.b.ae_info, 1336 event.b.ae_logpage); 1337 atomic_inc_32(&nvme->n_vendor_event); 1338 break; 1339 1340 default: 1341 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1342 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1343 event.b.ae_info, event.b.ae_logpage); 1344 atomic_inc_32(&nvme->n_unknown_event); 1345 break; 1346 } 1347 1348 if (error_log) 1349 kmem_free(error_log, sizeof (nvme_error_log_entry_t) * 1350 nvme->n_error_log_len); 1351 1352 if (health_log) 1353 kmem_free(health_log, sizeof (nvme_health_log_t)); 1354 } 1355 1356 static int 1357 nvme_admin_cmd(nvme_cmd_t *cmd, int usec) 1358 { 1359 int ret; 1360 1361 mutex_enter(&cmd->nc_mutex); 1362 ret = nvme_submit_cmd(cmd->nc_nvme->n_adminq, cmd); 1363 1364 if (ret != DDI_SUCCESS) { 1365 mutex_exit(&cmd->nc_mutex); 1366 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1367 "!nvme_submit_cmd failed"); 1368 atomic_inc_32(&cmd->nc_nvme->n_admin_queue_full); 1369 nvme_free_cmd(cmd); 1370 return (DDI_FAILURE); 1371 } 1372 1373 if (nvme_wait_cmd(cmd, usec) == B_FALSE) { 1374 /* 1375 * The command timed out. An abort command was posted that 1376 * will take care of the cleanup. 1377 */ 1378 return (DDI_FAILURE); 1379 } 1380 mutex_exit(&cmd->nc_mutex); 1381 1382 return (DDI_SUCCESS); 1383 } 1384 1385 static int 1386 nvme_async_event(nvme_t *nvme) 1387 { 1388 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1389 int ret; 1390 1391 cmd->nc_sqid = 0; 1392 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1393 cmd->nc_callback = nvme_async_event_task; 1394 1395 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1396 1397 if (ret != DDI_SUCCESS) { 1398 dev_err(nvme->n_dip, CE_WARN, 1399 "!nvme_submit_cmd failed for ASYNCHRONOUS EVENT"); 1400 nvme_free_cmd(cmd); 1401 return (DDI_FAILURE); 1402 } 1403 1404 return (DDI_SUCCESS); 1405 } 1406 1407 static void * 1408 nvme_get_logpage(nvme_t *nvme, uint8_t logpage, ...) 1409 { 1410 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1411 void *buf = NULL; 1412 nvme_getlogpage_t getlogpage = { 0 }; 1413 size_t bufsize; 1414 va_list ap; 1415 1416 va_start(ap, logpage); 1417 1418 cmd->nc_sqid = 0; 1419 cmd->nc_callback = nvme_wakeup_cmd; 1420 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1421 1422 getlogpage.b.lp_lid = logpage; 1423 1424 switch (logpage) { 1425 case NVME_LOGPAGE_ERROR: 1426 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1427 bufsize = nvme->n_error_log_len * 1428 sizeof (nvme_error_log_entry_t); 1429 break; 1430 1431 case NVME_LOGPAGE_HEALTH: 1432 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1433 bufsize = sizeof (nvme_health_log_t); 1434 break; 1435 1436 case NVME_LOGPAGE_FWSLOT: 1437 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1438 bufsize = sizeof (nvme_fwslot_log_t); 1439 break; 1440 1441 default: 1442 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1443 logpage); 1444 atomic_inc_32(&nvme->n_unknown_logpage); 1445 goto fail; 1446 } 1447 1448 va_end(ap); 1449 1450 getlogpage.b.lp_numd = bufsize / sizeof (uint32_t) - 1; 1451 1452 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1453 1454 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t), 1455 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1456 dev_err(nvme->n_dip, CE_WARN, 1457 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1458 goto fail; 1459 } 1460 1461 if (cmd->nc_dma->nd_ncookie > 2) { 1462 dev_err(nvme->n_dip, CE_WARN, 1463 "!too many DMA cookies for GET LOG PAGE"); 1464 atomic_inc_32(&nvme->n_too_many_cookies); 1465 goto fail; 1466 } 1467 1468 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1469 if (cmd->nc_dma->nd_ncookie > 1) { 1470 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1471 &cmd->nc_dma->nd_cookie); 1472 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1473 cmd->nc_dma->nd_cookie.dmac_laddress; 1474 } 1475 1476 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1477 dev_err(nvme->n_dip, CE_WARN, 1478 "!nvme_admin_cmd failed for GET LOG PAGE"); 1479 return (NULL); 1480 } 1481 1482 if (nvme_check_cmd_status(cmd)) { 1483 dev_err(nvme->n_dip, CE_WARN, 1484 "!GET LOG PAGE failed with sct = %x, sc = %x", 1485 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1486 goto fail; 1487 } 1488 1489 buf = kmem_alloc(bufsize, KM_SLEEP); 1490 bcopy(cmd->nc_dma->nd_memp, buf, bufsize); 1491 1492 fail: 1493 nvme_free_cmd(cmd); 1494 1495 return (buf); 1496 } 1497 1498 static void * 1499 nvme_identify(nvme_t *nvme, uint32_t nsid) 1500 { 1501 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1502 void *buf = NULL; 1503 1504 cmd->nc_sqid = 0; 1505 cmd->nc_callback = nvme_wakeup_cmd; 1506 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 1507 cmd->nc_sqe.sqe_nsid = nsid; 1508 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 1509 1510 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 1511 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1512 dev_err(nvme->n_dip, CE_WARN, 1513 "!nvme_zalloc_dma failed for IDENTIFY"); 1514 goto fail; 1515 } 1516 1517 if (cmd->nc_dma->nd_ncookie > 2) { 1518 dev_err(nvme->n_dip, CE_WARN, 1519 "!too many DMA cookies for IDENTIFY"); 1520 atomic_inc_32(&nvme->n_too_many_cookies); 1521 goto fail; 1522 } 1523 1524 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1525 if (cmd->nc_dma->nd_ncookie > 1) { 1526 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1527 &cmd->nc_dma->nd_cookie); 1528 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1529 cmd->nc_dma->nd_cookie.dmac_laddress; 1530 } 1531 1532 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1533 dev_err(nvme->n_dip, CE_WARN, 1534 "!nvme_admin_cmd failed for IDENTIFY"); 1535 return (NULL); 1536 } 1537 1538 if (nvme_check_cmd_status(cmd)) { 1539 dev_err(nvme->n_dip, CE_WARN, 1540 "!IDENTIFY failed with sct = %x, sc = %x", 1541 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1542 goto fail; 1543 } 1544 1545 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 1546 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE); 1547 1548 fail: 1549 nvme_free_cmd(cmd); 1550 1551 return (buf); 1552 } 1553 1554 static int 1555 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues) 1556 { 1557 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1558 nvme_nqueue_t nq = { 0 }; 1559 1560 nq.b.nq_nsq = nq.b.nq_ncq = nqueues - 1; 1561 1562 cmd->nc_sqid = 0; 1563 cmd->nc_callback = nvme_wakeup_cmd; 1564 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 1565 cmd->nc_sqe.sqe_cdw10 = NVME_FEAT_NQUEUES; 1566 cmd->nc_sqe.sqe_cdw11 = nq.r; 1567 1568 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1569 dev_err(nvme->n_dip, CE_WARN, 1570 "!nvme_admin_cmd failed for SET FEATURES (NQUEUES)"); 1571 return (0); 1572 } 1573 1574 if (nvme_check_cmd_status(cmd)) { 1575 dev_err(nvme->n_dip, CE_WARN, 1576 "!SET FEATURES (NQUEUES) failed with sct = %x, sc = %x", 1577 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1578 nvme_free_cmd(cmd); 1579 return (0); 1580 } 1581 1582 nq.r = cmd->nc_cqe.cqe_dw0; 1583 nvme_free_cmd(cmd); 1584 1585 /* 1586 * Always use the same number of submission and completion queues, and 1587 * never use more than the requested number of queues. 1588 */ 1589 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq) + 1)); 1590 } 1591 1592 static int 1593 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 1594 { 1595 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1596 nvme_create_queue_dw10_t dw10 = { 0 }; 1597 nvme_create_cq_dw11_t c_dw11 = { 0 }; 1598 nvme_create_sq_dw11_t s_dw11 = { 0 }; 1599 1600 dw10.b.q_qid = idx; 1601 dw10.b.q_qsize = qp->nq_nentry - 1; 1602 1603 c_dw11.b.cq_pc = 1; 1604 c_dw11.b.cq_ien = 1; 1605 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt; 1606 1607 cmd->nc_sqid = 0; 1608 cmd->nc_callback = nvme_wakeup_cmd; 1609 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 1610 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1611 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 1612 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress; 1613 1614 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1615 dev_err(nvme->n_dip, CE_WARN, 1616 "!nvme_admin_cmd failed for CREATE CQUEUE"); 1617 return (DDI_FAILURE); 1618 } 1619 1620 if (nvme_check_cmd_status(cmd)) { 1621 dev_err(nvme->n_dip, CE_WARN, 1622 "!CREATE CQUEUE failed with sct = %x, sc = %x", 1623 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1624 nvme_free_cmd(cmd); 1625 return (DDI_FAILURE); 1626 } 1627 1628 nvme_free_cmd(cmd); 1629 1630 s_dw11.b.sq_pc = 1; 1631 s_dw11.b.sq_cqid = idx; 1632 1633 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1634 cmd->nc_sqid = 0; 1635 cmd->nc_callback = nvme_wakeup_cmd; 1636 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 1637 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1638 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 1639 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 1640 1641 if (nvme_admin_cmd(cmd, NVME_ADMIN_CMD_TIMEOUT) != DDI_SUCCESS) { 1642 dev_err(nvme->n_dip, CE_WARN, 1643 "!nvme_admin_cmd failed for CREATE SQUEUE"); 1644 return (DDI_FAILURE); 1645 } 1646 1647 if (nvme_check_cmd_status(cmd)) { 1648 dev_err(nvme->n_dip, CE_WARN, 1649 "!CREATE SQUEUE failed with sct = %x, sc = %x", 1650 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1651 nvme_free_cmd(cmd); 1652 return (DDI_FAILURE); 1653 } 1654 1655 nvme_free_cmd(cmd); 1656 1657 return (DDI_SUCCESS); 1658 } 1659 1660 static boolean_t 1661 nvme_reset(nvme_t *nvme, boolean_t quiesce) 1662 { 1663 nvme_reg_csts_t csts; 1664 int i; 1665 1666 nvme_put32(nvme, NVME_REG_CC, 0); 1667 1668 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1669 if (csts.b.csts_rdy == 1) { 1670 nvme_put32(nvme, NVME_REG_CC, 0); 1671 for (i = 0; i != nvme->n_timeout * 10; i++) { 1672 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1673 if (csts.b.csts_rdy == 0) 1674 break; 1675 1676 if (quiesce) 1677 drv_usecwait(50000); 1678 else 1679 delay(drv_usectohz(50000)); 1680 } 1681 } 1682 1683 nvme_put32(nvme, NVME_REG_AQA, 0); 1684 nvme_put32(nvme, NVME_REG_ASQ, 0); 1685 nvme_put32(nvme, NVME_REG_ACQ, 0); 1686 1687 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1688 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 1689 } 1690 1691 static void 1692 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 1693 { 1694 nvme_reg_cc_t cc; 1695 nvme_reg_csts_t csts; 1696 int i; 1697 1698 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 1699 1700 cc.r = nvme_get32(nvme, NVME_REG_CC); 1701 cc.b.cc_shn = mode & 0x3; 1702 nvme_put32(nvme, NVME_REG_CC, cc.r); 1703 1704 for (i = 0; i != 10; i++) { 1705 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1706 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 1707 break; 1708 1709 if (quiesce) 1710 drv_usecwait(100000); 1711 else 1712 delay(drv_usectohz(100000)); 1713 } 1714 } 1715 1716 1717 static void 1718 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 1719 { 1720 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1721 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 1722 1723 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 1724 bcopy(nvme->n_idctl->id_serial, serial, 1725 sizeof (nvme->n_idctl->id_serial)); 1726 1727 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 1728 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 1729 1730 (void) snprintf(nvme->n_ns[nsid - 1].ns_devid, 1731 sizeof (nvme->n_ns[0].ns_devid), "%4X-%s-%s-%X", 1732 nvme->n_idctl->id_vid, model, serial, nsid); 1733 } 1734 1735 static int 1736 nvme_init(nvme_t *nvme) 1737 { 1738 nvme_reg_cc_t cc = { 0 }; 1739 nvme_reg_aqa_t aqa = { 0 }; 1740 nvme_reg_asq_t asq = { 0 }; 1741 nvme_reg_acq_t acq = { 0 }; 1742 nvme_reg_cap_t cap; 1743 nvme_reg_vs_t vs; 1744 nvme_reg_csts_t csts; 1745 int i = 0; 1746 int nqueues; 1747 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1748 char *vendor, *product; 1749 1750 /* Setup fixed interrupt for admin queue. */ 1751 if (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 1752 != DDI_SUCCESS) { 1753 dev_err(nvme->n_dip, CE_WARN, 1754 "!failed to setup fixed interrupt"); 1755 goto fail; 1756 } 1757 1758 /* Check controller version */ 1759 vs.r = nvme_get32(nvme, NVME_REG_VS); 1760 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 1761 vs.b.vs_mjr, vs.b.vs_mnr); 1762 1763 if (nvme_version_major < vs.b.vs_mjr || 1764 (nvme_version_major == vs.b.vs_mjr && 1765 nvme_version_minor < vs.b.vs_mnr)) { 1766 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d", 1767 nvme_version_major, nvme_version_minor); 1768 if (nvme->n_strict_version) 1769 goto fail; 1770 } 1771 1772 /* retrieve controller configuration */ 1773 cap.r = nvme_get64(nvme, NVME_REG_CAP); 1774 1775 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 1776 dev_err(nvme->n_dip, CE_WARN, 1777 "!NVM command set not supported by hardware"); 1778 goto fail; 1779 } 1780 1781 nvme->n_nssr_supported = cap.b.cap_nssrs; 1782 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 1783 nvme->n_timeout = cap.b.cap_to; 1784 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 1785 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 1786 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 1787 1788 /* 1789 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 1790 * the base page size of 4k (1<<12), so add 12 here to get the real 1791 * page size value. 1792 */ 1793 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 1794 cap.b.cap_mpsmax + 12); 1795 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 1796 1797 /* 1798 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 1799 */ 1800 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 1801 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1802 1803 /* 1804 * Set up PRP DMA to transfer 1 page-aligned page at a time. 1805 * Maxxfer may be increased after we identified the controller limits. 1806 */ 1807 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 1808 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1809 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 1810 1811 /* 1812 * Reset controller if it's still in ready state. 1813 */ 1814 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 1815 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 1816 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1817 nvme->n_dead = B_TRUE; 1818 goto fail; 1819 } 1820 1821 /* 1822 * Create the admin queue pair. 1823 */ 1824 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 1825 != DDI_SUCCESS) { 1826 dev_err(nvme->n_dip, CE_WARN, 1827 "!unable to allocate admin qpair"); 1828 goto fail; 1829 } 1830 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 1831 nvme->n_ioq[0] = nvme->n_adminq; 1832 1833 nvme->n_progress |= NVME_ADMIN_QUEUE; 1834 1835 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 1836 "admin-queue-len", nvme->n_admin_queue_len); 1837 1838 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 1839 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 1840 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress; 1841 1842 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 1843 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 1844 1845 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 1846 nvme_put64(nvme, NVME_REG_ASQ, asq); 1847 nvme_put64(nvme, NVME_REG_ACQ, acq); 1848 1849 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 1850 cc.b.cc_css = 0; /* use NVM command set */ 1851 cc.b.cc_mps = nvme->n_pageshift - 12; 1852 cc.b.cc_shn = 0; /* no shutdown in progress */ 1853 cc.b.cc_en = 1; /* enable controller */ 1854 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 1855 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 1856 1857 nvme_put32(nvme, NVME_REG_CC, cc.r); 1858 1859 /* 1860 * Wait for the controller to become ready. 1861 */ 1862 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1863 if (csts.b.csts_rdy == 0) { 1864 for (i = 0; i != nvme->n_timeout * 10; i++) { 1865 delay(drv_usectohz(50000)); 1866 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1867 1868 if (csts.b.csts_cfs == 1) { 1869 dev_err(nvme->n_dip, CE_WARN, 1870 "!controller fatal status at init"); 1871 ddi_fm_service_impact(nvme->n_dip, 1872 DDI_SERVICE_LOST); 1873 nvme->n_dead = B_TRUE; 1874 goto fail; 1875 } 1876 1877 if (csts.b.csts_rdy == 1) 1878 break; 1879 } 1880 } 1881 1882 if (csts.b.csts_rdy == 0) { 1883 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 1884 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1885 nvme->n_dead = B_TRUE; 1886 goto fail; 1887 } 1888 1889 /* 1890 * Assume an abort command limit of 1. We'll destroy and re-init 1891 * that later when we know the true abort command limit. 1892 */ 1893 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 1894 1895 /* 1896 * Post an asynchronous event command to catch errors. 1897 */ 1898 if (nvme_async_event(nvme) != DDI_SUCCESS) { 1899 dev_err(nvme->n_dip, CE_WARN, 1900 "!failed to post async event"); 1901 goto fail; 1902 } 1903 1904 /* 1905 * Identify Controller 1906 */ 1907 nvme->n_idctl = nvme_identify(nvme, 0); 1908 if (nvme->n_idctl == NULL) { 1909 dev_err(nvme->n_dip, CE_WARN, 1910 "!failed to identify controller"); 1911 goto fail; 1912 } 1913 1914 /* 1915 * Get Vendor & Product ID 1916 */ 1917 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 1918 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 1919 sata_split_model(model, &vendor, &product); 1920 1921 if (vendor == NULL) 1922 nvme->n_vendor = strdup("NVMe"); 1923 else 1924 nvme->n_vendor = strdup(vendor); 1925 1926 nvme->n_product = strdup(product); 1927 1928 /* 1929 * Get controller limits. 1930 */ 1931 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 1932 MIN(nvme->n_admin_queue_len / 10, 1933 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 1934 1935 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 1936 "async-event-limit", nvme->n_async_event_limit); 1937 1938 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 1939 1940 /* 1941 * Reinitialize the semaphore with the true abort command limit 1942 * supported by the hardware. It's not necessary to disable interrupts 1943 * as only command aborts use the semaphore, and no commands are 1944 * executed or aborted while we're here. 1945 */ 1946 sema_destroy(&nvme->n_abort_sema); 1947 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 1948 SEMA_DRIVER, NULL); 1949 1950 nvme->n_progress |= NVME_CTRL_LIMITS; 1951 1952 if (nvme->n_idctl->id_mdts == 0) 1953 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 1954 else 1955 nvme->n_max_data_transfer_size = 1956 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 1957 1958 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 1959 1960 /* 1961 * Limit n_max_data_transfer_size to what we can handle in one PRP. 1962 * Chained PRPs are currently unsupported. 1963 * 1964 * This is a no-op on hardware which doesn't support a transfer size 1965 * big enough to require chained PRPs. 1966 */ 1967 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 1968 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 1969 1970 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 1971 1972 /* 1973 * Make sure the minimum/maximum queue entry sizes are not 1974 * larger/smaller than the default. 1975 */ 1976 1977 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 1978 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 1979 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 1980 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 1981 goto fail; 1982 1983 /* 1984 * Check for the presence of a Volatile Write Cache. If present, 1985 * enable it by default. 1986 */ 1987 if (nvme->n_idctl->id_vwc.vwc_present == 0) { 1988 nvme->n_volatile_write_cache_enabled = B_FALSE; 1989 nvme_bd_ops.o_sync_cache = NULL; 1990 } else { 1991 /* 1992 * TODO: send SET FEATURES to enable VWC 1993 * (have no hardware to test this) 1994 */ 1995 nvme->n_volatile_write_cache_enabled = B_FALSE; 1996 nvme_bd_ops.o_sync_cache = NULL; 1997 } 1998 1999 /* 2000 * Grab a copy of all mandatory log pages. 2001 * 2002 * TODO: should go away once user space tool exists to print logs 2003 */ 2004 nvme->n_error_log = (nvme_error_log_entry_t *) 2005 nvme_get_logpage(nvme, NVME_LOGPAGE_ERROR); 2006 nvme->n_health_log = (nvme_health_log_t *) 2007 nvme_get_logpage(nvme, NVME_LOGPAGE_HEALTH, -1); 2008 nvme->n_fwslot_log = (nvme_fwslot_log_t *) 2009 nvme_get_logpage(nvme, NVME_LOGPAGE_FWSLOT); 2010 2011 /* 2012 * Identify Namespaces 2013 */ 2014 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2015 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2016 nvme->n_namespace_count, KM_SLEEP); 2017 2018 for (i = 0; i != nvme->n_namespace_count; i++) { 2019 nvme_identify_nsid_t *idns; 2020 int last_rp; 2021 2022 nvme->n_ns[i].ns_nvme = nvme; 2023 nvme->n_ns[i].ns_idns = idns = nvme_identify(nvme, i + 1); 2024 2025 if (idns == NULL) { 2026 dev_err(nvme->n_dip, CE_WARN, 2027 "!failed to identify namespace %d", i + 1); 2028 goto fail; 2029 } 2030 2031 nvme->n_ns[i].ns_id = i + 1; 2032 nvme->n_ns[i].ns_block_count = idns->id_nsize; 2033 nvme->n_ns[i].ns_block_size = 2034 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2035 nvme->n_ns[i].ns_best_block_size = nvme->n_ns[i].ns_block_size; 2036 2037 nvme_prepare_devid(nvme, nvme->n_ns[i].ns_id); 2038 2039 /* 2040 * Find the LBA format with no metadata and the best relative 2041 * performance. A value of 3 means "degraded", 0 is best. 2042 */ 2043 last_rp = 3; 2044 for (int j = 0; j <= idns->id_nlbaf; j++) { 2045 if (idns->id_lbaf[j].lbaf_lbads == 0) 2046 break; 2047 if (idns->id_lbaf[j].lbaf_ms != 0) 2048 continue; 2049 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2050 continue; 2051 last_rp = idns->id_lbaf[j].lbaf_rp; 2052 nvme->n_ns[i].ns_best_block_size = 2053 1 << idns->id_lbaf[j].lbaf_lbads; 2054 } 2055 2056 /* 2057 * We currently don't support namespaces that use either: 2058 * - thin provisioning 2059 * - extended LBAs 2060 * - protection information 2061 */ 2062 if (idns->id_nsfeat.f_thin || 2063 idns->id_flbas.lba_extlba || 2064 idns->id_dps.dp_pinfo) { 2065 dev_err(nvme->n_dip, CE_WARN, 2066 "!ignoring namespace %d, unsupported features: " 2067 "thin = %d, extlba = %d, pinfo = %d", i + 1, 2068 idns->id_nsfeat.f_thin, idns->id_flbas.lba_extlba, 2069 idns->id_dps.dp_pinfo); 2070 nvme->n_ns[i].ns_ignore = B_TRUE; 2071 } 2072 } 2073 2074 /* 2075 * Try to set up MSI/MSI-X interrupts. 2076 */ 2077 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2078 != 0) { 2079 nvme_release_interrupts(nvme); 2080 2081 nqueues = MIN(UINT16_MAX, ncpus); 2082 2083 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2084 nqueues) != DDI_SUCCESS) && 2085 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2086 nqueues) != DDI_SUCCESS)) { 2087 dev_err(nvme->n_dip, CE_WARN, 2088 "!failed to setup MSI/MSI-X interrupts"); 2089 goto fail; 2090 } 2091 } 2092 2093 nqueues = nvme->n_intr_cnt; 2094 2095 /* 2096 * Create I/O queue pairs. 2097 */ 2098 nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues); 2099 if (nvme->n_ioq_count == 0) { 2100 dev_err(nvme->n_dip, CE_WARN, 2101 "!failed to set number of I/O queues to %d", nqueues); 2102 goto fail; 2103 } 2104 2105 /* 2106 * Reallocate I/O queue array 2107 */ 2108 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2109 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2110 (nvme->n_ioq_count + 1), KM_SLEEP); 2111 nvme->n_ioq[0] = nvme->n_adminq; 2112 2113 /* 2114 * If we got less queues than we asked for we might as well give 2115 * some of the interrupt vectors back to the system. 2116 */ 2117 if (nvme->n_ioq_count < nqueues) { 2118 nvme_release_interrupts(nvme); 2119 2120 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 2121 nvme->n_ioq_count) != DDI_SUCCESS) { 2122 dev_err(nvme->n_dip, CE_WARN, 2123 "!failed to reduce number of interrupts"); 2124 goto fail; 2125 } 2126 } 2127 2128 /* 2129 * Alloc & register I/O queue pairs 2130 */ 2131 nvme->n_io_queue_len = 2132 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries); 2133 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len", 2134 nvme->n_io_queue_len); 2135 2136 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2137 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len, 2138 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 2139 dev_err(nvme->n_dip, CE_WARN, 2140 "!unable to allocate I/O qpair %d", i); 2141 goto fail; 2142 } 2143 2144 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) 2145 != DDI_SUCCESS) { 2146 dev_err(nvme->n_dip, CE_WARN, 2147 "!unable to create I/O qpair %d", i); 2148 goto fail; 2149 } 2150 } 2151 2152 /* 2153 * Post more asynchronous events commands to reduce event reporting 2154 * latency as suggested by the spec. 2155 */ 2156 for (i = 1; i != nvme->n_async_event_limit; i++) { 2157 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2158 dev_err(nvme->n_dip, CE_WARN, 2159 "!failed to post async event %d", i); 2160 goto fail; 2161 } 2162 } 2163 2164 return (DDI_SUCCESS); 2165 2166 fail: 2167 (void) nvme_reset(nvme, B_FALSE); 2168 return (DDI_FAILURE); 2169 } 2170 2171 static uint_t 2172 nvme_intr(caddr_t arg1, caddr_t arg2) 2173 { 2174 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2175 nvme_t *nvme = (nvme_t *)arg1; 2176 int inum = (int)(uintptr_t)arg2; 2177 int qnum; 2178 nvme_cmd_t *cmd; 2179 2180 if (inum >= nvme->n_intr_cnt) 2181 return (DDI_INTR_UNCLAIMED); 2182 2183 /* 2184 * The interrupt vector a queue uses is calculated as queue_idx % 2185 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 2186 * in steps of n_intr_cnt to process all queues using this vector. 2187 */ 2188 for (qnum = inum; 2189 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL; 2190 qnum += nvme->n_intr_cnt) { 2191 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) { 2192 taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq, 2193 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 2194 } 2195 } 2196 2197 return (DDI_INTR_CLAIMED); 2198 } 2199 2200 static void 2201 nvme_release_interrupts(nvme_t *nvme) 2202 { 2203 int i; 2204 2205 for (i = 0; i < nvme->n_intr_cnt; i++) { 2206 if (nvme->n_inth[i] == NULL) 2207 break; 2208 2209 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2210 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 2211 else 2212 (void) ddi_intr_disable(nvme->n_inth[i]); 2213 2214 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 2215 (void) ddi_intr_free(nvme->n_inth[i]); 2216 } 2217 2218 kmem_free(nvme->n_inth, nvme->n_inth_sz); 2219 nvme->n_inth = NULL; 2220 nvme->n_inth_sz = 0; 2221 2222 nvme->n_progress &= ~NVME_INTERRUPTS; 2223 } 2224 2225 static int 2226 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 2227 { 2228 int failed = 0; 2229 int nintrs, navail, count; 2230 int ret; 2231 int i; 2232 2233 if (nvme->n_intr_types == 0) { 2234 ret = ddi_intr_get_supported_types(nvme->n_dip, 2235 &nvme->n_intr_types); 2236 if (ret != DDI_SUCCESS) { 2237 dev_err(nvme->n_dip, CE_WARN, 2238 "!%s: ddi_intr_get_supported types failed", 2239 __func__); 2240 return (ret); 2241 } 2242 } 2243 2244 if ((nvme->n_intr_types & intr_type) == 0) 2245 return (DDI_FAILURE); 2246 2247 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 2248 if (ret != DDI_SUCCESS) { 2249 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 2250 __func__); 2251 return (ret); 2252 } 2253 2254 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 2255 if (ret != DDI_SUCCESS) { 2256 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 2257 __func__); 2258 return (ret); 2259 } 2260 2261 /* We want at most one interrupt per queue pair. */ 2262 if (navail > nqpairs) 2263 navail = nqpairs; 2264 2265 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 2266 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 2267 2268 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 2269 &count, 0); 2270 if (ret != DDI_SUCCESS) { 2271 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 2272 __func__); 2273 goto fail; 2274 } 2275 2276 nvme->n_intr_cnt = count; 2277 2278 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 2279 if (ret != DDI_SUCCESS) { 2280 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 2281 __func__); 2282 goto fail; 2283 } 2284 2285 for (i = 0; i < count; i++) { 2286 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 2287 (void *)nvme, (void *)(uintptr_t)i); 2288 if (ret != DDI_SUCCESS) { 2289 dev_err(nvme->n_dip, CE_WARN, 2290 "!%s: ddi_intr_add_handler failed", __func__); 2291 goto fail; 2292 } 2293 } 2294 2295 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 2296 2297 for (i = 0; i < count; i++) { 2298 if (nvme->n_inth[i] == NULL) 2299 break; 2300 2301 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) { 2302 if (ddi_intr_block_enable(&nvme->n_inth[i], 1) != 2303 DDI_SUCCESS) 2304 failed++; 2305 } else { 2306 if (ddi_intr_enable(nvme->n_inth[i]) != DDI_SUCCESS) 2307 failed++; 2308 } 2309 } 2310 2311 if (failed != 0) { 2312 dev_err(nvme->n_dip, CE_WARN, 2313 "!%s: enabling interrupts failed", __func__); 2314 goto fail; 2315 } 2316 2317 nvme->n_intr_type = intr_type; 2318 2319 nvme->n_progress |= NVME_INTERRUPTS; 2320 2321 return (DDI_SUCCESS); 2322 2323 fail: 2324 nvme_release_interrupts(nvme); 2325 2326 return (ret); 2327 } 2328 2329 static int 2330 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 2331 { 2332 _NOTE(ARGUNUSED(arg)); 2333 2334 pci_ereport_post(dip, fm_error, NULL); 2335 return (fm_error->fme_status); 2336 } 2337 2338 static int 2339 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2340 { 2341 nvme_t *nvme; 2342 int instance; 2343 int nregs; 2344 off_t regsize; 2345 int i; 2346 char name[32]; 2347 2348 if (cmd != DDI_ATTACH) 2349 return (DDI_FAILURE); 2350 2351 instance = ddi_get_instance(dip); 2352 2353 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 2354 return (DDI_FAILURE); 2355 2356 nvme = ddi_get_soft_state(nvme_state, instance); 2357 ddi_set_driver_private(dip, nvme); 2358 nvme->n_dip = dip; 2359 2360 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2361 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 2362 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 2363 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 2364 B_TRUE : B_FALSE; 2365 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2366 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 2367 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2368 DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN); 2369 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2370 DDI_PROP_DONTPASS, "async-event-limit", 2371 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 2372 2373 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 2374 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 2375 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 2376 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 2377 2378 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN) 2379 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN; 2380 2381 if (nvme->n_async_event_limit < 1) 2382 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 2383 2384 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 2385 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 2386 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 2387 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 2388 2389 /* 2390 * Setup FMA support. 2391 */ 2392 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 2393 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 2394 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2395 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2396 2397 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 2398 2399 if (nvme->n_fm_cap) { 2400 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 2401 nvme->n_reg_acc_attr.devacc_attr_access = 2402 DDI_FLAGERR_ACC; 2403 2404 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 2405 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2406 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2407 } 2408 2409 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2410 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2411 pci_ereport_setup(dip); 2412 2413 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2414 ddi_fm_handler_register(dip, nvme_fm_errcb, 2415 (void *)nvme); 2416 } 2417 2418 nvme->n_progress |= NVME_FMA_INIT; 2419 2420 /* 2421 * The spec defines several register sets. Only the controller 2422 * registers (set 1) are currently used. 2423 */ 2424 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 2425 nregs < 2 || 2426 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 2427 goto fail; 2428 2429 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 2430 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 2431 dev_err(dip, CE_WARN, "!failed to map regset 1"); 2432 goto fail; 2433 } 2434 2435 nvme->n_progress |= NVME_REGS_MAPPED; 2436 2437 /* 2438 * Create taskq for command completion. 2439 */ 2440 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq", 2441 ddi_driver_name(dip), ddi_get_instance(dip)); 2442 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), 2443 TASKQ_DEFAULTPRI, 0); 2444 if (nvme->n_cmd_taskq == NULL) { 2445 dev_err(dip, CE_WARN, "!failed to create cmd taskq"); 2446 goto fail; 2447 } 2448 2449 2450 if (nvme_init(nvme) != DDI_SUCCESS) 2451 goto fail; 2452 2453 /* 2454 * Attach the blkdev driver for each namespace. 2455 */ 2456 for (i = 0; i != nvme->n_namespace_count; i++) { 2457 if (nvme->n_ns[i].ns_ignore) 2458 continue; 2459 2460 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 2461 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 2462 2463 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 2464 dev_err(dip, CE_WARN, 2465 "!failed to get blkdev handle for namespace %d", i); 2466 goto fail; 2467 } 2468 2469 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 2470 != DDI_SUCCESS) { 2471 dev_err(dip, CE_WARN, 2472 "!failed to attach blkdev handle for namespace %d", 2473 i); 2474 goto fail; 2475 } 2476 } 2477 2478 return (DDI_SUCCESS); 2479 2480 fail: 2481 /* attach successful anyway so that FMA can retire the device */ 2482 if (nvme->n_dead) 2483 return (DDI_SUCCESS); 2484 2485 (void) nvme_detach(dip, DDI_DETACH); 2486 2487 return (DDI_FAILURE); 2488 } 2489 2490 static int 2491 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2492 { 2493 int instance, i; 2494 nvme_t *nvme; 2495 2496 if (cmd != DDI_DETACH) 2497 return (DDI_FAILURE); 2498 2499 instance = ddi_get_instance(dip); 2500 2501 nvme = ddi_get_soft_state(nvme_state, instance); 2502 2503 if (nvme == NULL) 2504 return (DDI_FAILURE); 2505 2506 if (nvme->n_ns) { 2507 for (i = 0; i != nvme->n_namespace_count; i++) { 2508 if (nvme->n_ns[i].ns_bd_hdl) { 2509 (void) bd_detach_handle( 2510 nvme->n_ns[i].ns_bd_hdl); 2511 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 2512 } 2513 2514 if (nvme->n_ns[i].ns_idns) 2515 kmem_free(nvme->n_ns[i].ns_idns, 2516 sizeof (nvme_identify_nsid_t)); 2517 } 2518 2519 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 2520 nvme->n_namespace_count); 2521 } 2522 2523 if (nvme->n_progress & NVME_INTERRUPTS) 2524 nvme_release_interrupts(nvme); 2525 2526 if (nvme->n_cmd_taskq) 2527 ddi_taskq_wait(nvme->n_cmd_taskq); 2528 2529 if (nvme->n_ioq_count > 0) { 2530 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2531 if (nvme->n_ioq[i] != NULL) { 2532 /* TODO: send destroy queue commands */ 2533 nvme_free_qpair(nvme->n_ioq[i]); 2534 } 2535 } 2536 2537 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 2538 (nvme->n_ioq_count + 1)); 2539 } 2540 2541 if (nvme->n_progress & NVME_REGS_MAPPED) { 2542 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 2543 (void) nvme_reset(nvme, B_FALSE); 2544 } 2545 2546 if (nvme->n_cmd_taskq) 2547 ddi_taskq_destroy(nvme->n_cmd_taskq); 2548 2549 if (nvme->n_progress & NVME_CTRL_LIMITS) 2550 sema_destroy(&nvme->n_abort_sema); 2551 2552 if (nvme->n_progress & NVME_ADMIN_QUEUE) 2553 nvme_free_qpair(nvme->n_adminq); 2554 2555 if (nvme->n_idctl) 2556 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t)); 2557 2558 if (nvme->n_progress & NVME_REGS_MAPPED) 2559 ddi_regs_map_free(&nvme->n_regh); 2560 2561 if (nvme->n_progress & NVME_FMA_INIT) { 2562 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2563 ddi_fm_handler_unregister(nvme->n_dip); 2564 2565 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2566 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2567 pci_ereport_teardown(nvme->n_dip); 2568 2569 ddi_fm_fini(nvme->n_dip); 2570 } 2571 2572 if (nvme->n_vendor != NULL) 2573 strfree(nvme->n_vendor); 2574 2575 if (nvme->n_product != NULL) 2576 strfree(nvme->n_product); 2577 2578 ddi_soft_state_free(nvme_state, instance); 2579 2580 return (DDI_SUCCESS); 2581 } 2582 2583 static int 2584 nvme_quiesce(dev_info_t *dip) 2585 { 2586 int instance; 2587 nvme_t *nvme; 2588 2589 instance = ddi_get_instance(dip); 2590 2591 nvme = ddi_get_soft_state(nvme_state, instance); 2592 2593 if (nvme == NULL) 2594 return (DDI_FAILURE); 2595 2596 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 2597 2598 (void) nvme_reset(nvme, B_TRUE); 2599 2600 return (DDI_FAILURE); 2601 } 2602 2603 static int 2604 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 2605 { 2606 nvme_t *nvme = cmd->nc_nvme; 2607 int nprp_page, nprp; 2608 uint64_t *prp; 2609 2610 if (xfer->x_ndmac == 0) 2611 return (DDI_FAILURE); 2612 2613 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 2614 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2615 2616 if (xfer->x_ndmac == 1) { 2617 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 2618 return (DDI_SUCCESS); 2619 } else if (xfer->x_ndmac == 2) { 2620 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 2621 return (DDI_SUCCESS); 2622 } 2623 2624 xfer->x_ndmac--; 2625 2626 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1; 2627 ASSERT(nprp_page > 0); 2628 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 2629 2630 /* 2631 * We currently don't support chained PRPs and set up our DMA 2632 * attributes to reflect that. If we still get an I/O request 2633 * that needs a chained PRP something is very wrong. 2634 */ 2635 VERIFY(nprp == 1); 2636 2637 if (nvme_zalloc_dma(nvme, nvme->n_pagesize * nprp, DDI_DMA_READ, 2638 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2639 dev_err(nvme->n_dip, CE_WARN, "!%s: nvme_zalloc_dma failed", 2640 __func__); 2641 return (DDI_FAILURE); 2642 } 2643 2644 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 2645 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, &cmd->nc_dma->nd_cookie); 2646 2647 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2648 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 2649 xfer->x_ndmac > 0; 2650 prp++, xfer->x_ndmac--) { 2651 *prp = xfer->x_dmac.dmac_laddress; 2652 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2653 } 2654 2655 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 2656 DDI_DMA_SYNC_FORDEV); 2657 return (DDI_SUCCESS); 2658 } 2659 2660 static nvme_cmd_t * 2661 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 2662 { 2663 nvme_t *nvme = ns->ns_nvme; 2664 nvme_cmd_t *cmd; 2665 2666 /* 2667 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 2668 */ 2669 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 2670 KM_NOSLEEP : KM_SLEEP); 2671 2672 if (cmd == NULL) 2673 return (NULL); 2674 2675 cmd->nc_sqe.sqe_opc = opc; 2676 cmd->nc_callback = nvme_bd_xfer_done; 2677 cmd->nc_xfer = xfer; 2678 2679 switch (opc) { 2680 case NVME_OPC_NVM_WRITE: 2681 case NVME_OPC_NVM_READ: 2682 VERIFY(xfer->x_nblks <= 0x10000); 2683 2684 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2685 2686 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 2687 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 2688 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 2689 2690 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 2691 goto fail; 2692 break; 2693 2694 case NVME_OPC_NVM_FLUSH: 2695 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2696 break; 2697 2698 default: 2699 goto fail; 2700 } 2701 2702 return (cmd); 2703 2704 fail: 2705 nvme_free_cmd(cmd); 2706 return (NULL); 2707 } 2708 2709 static void 2710 nvme_bd_xfer_done(void *arg) 2711 { 2712 nvme_cmd_t *cmd = arg; 2713 bd_xfer_t *xfer = cmd->nc_xfer; 2714 int error = 0; 2715 2716 error = nvme_check_cmd_status(cmd); 2717 nvme_free_cmd(cmd); 2718 2719 bd_xfer_done(xfer, error); 2720 } 2721 2722 static void 2723 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 2724 { 2725 nvme_namespace_t *ns = arg; 2726 nvme_t *nvme = ns->ns_nvme; 2727 2728 /* 2729 * blkdev maintains one queue size per instance (namespace), 2730 * but all namespace share the I/O queues. 2731 * TODO: need to figure out a sane default, or use per-NS I/O queues, 2732 * or change blkdev to handle EAGAIN 2733 */ 2734 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len 2735 / nvme->n_namespace_count; 2736 2737 /* 2738 * d_maxxfer is not set, which means the value is taken from the DMA 2739 * attributes specified to bd_alloc_handle. 2740 */ 2741 2742 drive->d_removable = B_FALSE; 2743 drive->d_hotpluggable = B_FALSE; 2744 2745 drive->d_target = ns->ns_id; 2746 drive->d_lun = 0; 2747 2748 drive->d_model = nvme->n_idctl->id_model; 2749 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 2750 drive->d_vendor = nvme->n_vendor; 2751 drive->d_vendor_len = strlen(nvme->n_vendor); 2752 drive->d_product = nvme->n_product; 2753 drive->d_product_len = strlen(nvme->n_product); 2754 drive->d_serial = nvme->n_idctl->id_serial; 2755 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 2756 drive->d_revision = nvme->n_idctl->id_fwrev; 2757 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 2758 } 2759 2760 static int 2761 nvme_bd_mediainfo(void *arg, bd_media_t *media) 2762 { 2763 nvme_namespace_t *ns = arg; 2764 2765 media->m_nblks = ns->ns_block_count; 2766 media->m_blksize = ns->ns_block_size; 2767 media->m_readonly = B_FALSE; 2768 media->m_solidstate = B_TRUE; 2769 2770 media->m_pblksize = ns->ns_best_block_size; 2771 2772 return (0); 2773 } 2774 2775 static int 2776 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 2777 { 2778 nvme_t *nvme = ns->ns_nvme; 2779 nvme_cmd_t *cmd; 2780 2781 if (nvme->n_dead) 2782 return (EIO); 2783 2784 /* No polling for now */ 2785 if (xfer->x_flags & BD_XFER_POLL) 2786 return (EIO); 2787 2788 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 2789 if (cmd == NULL) 2790 return (ENOMEM); 2791 2792 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 2793 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 2794 2795 if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd) 2796 != DDI_SUCCESS) 2797 return (EAGAIN); 2798 2799 return (0); 2800 } 2801 2802 static int 2803 nvme_bd_read(void *arg, bd_xfer_t *xfer) 2804 { 2805 nvme_namespace_t *ns = arg; 2806 2807 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 2808 } 2809 2810 static int 2811 nvme_bd_write(void *arg, bd_xfer_t *xfer) 2812 { 2813 nvme_namespace_t *ns = arg; 2814 2815 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 2816 } 2817 2818 static int 2819 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 2820 { 2821 nvme_namespace_t *ns = arg; 2822 2823 if (ns->ns_nvme->n_dead) 2824 return (EIO); 2825 2826 /* 2827 * If the volatile write cache isn't enabled the FLUSH command is a 2828 * no-op, so we can take a shortcut here. 2829 */ 2830 if (ns->ns_nvme->n_volatile_write_cache_enabled == B_FALSE) { 2831 bd_xfer_done(xfer, ENOTSUP); 2832 return (0); 2833 } 2834 2835 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 2836 } 2837 2838 static int 2839 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 2840 { 2841 nvme_namespace_t *ns = arg; 2842 2843 return (ddi_devid_init(devinfo, DEVID_ENCAP, strlen(ns->ns_devid), 2844 ns->ns_devid, devid)); 2845 } 2846