1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 16 */ 17 18 /* 19 * blkdev driver for NVMe compliant storage devices 20 * 21 * This driver was written to conform to version 1.0e of the NVMe specification. 22 * It may work with newer versions, but that is completely untested and disabled 23 * by default. 24 * 25 * The driver has only been tested on x86 systems and will not work on big- 26 * endian systems without changes to the code accessing registers and data 27 * structures used by the hardware. 28 * 29 * 30 * Interrupt Usage: 31 * 32 * The driver will use a FIXED interrupt while configuring the device as the 33 * specification requires. Later in the attach process it will switch to MSI-X 34 * or MSI if supported. The driver wants to have one interrupt vector per CPU, 35 * but it will work correctly if less are available. Interrupts can be shared 36 * by queues, the interrupt handler will iterate through the I/O queue array by 37 * steps of n_intr_cnt. Usually only the admin queue will share an interrupt 38 * with one I/O queue. The interrupt handler will retrieve completed commands 39 * from all queues sharing an interrupt vector and will post them to a taskq 40 * for completion processing. 41 * 42 * 43 * Command Processing: 44 * 45 * NVMe devices can have up to 65536 I/O queue pairs, with each queue holding up 46 * to 65536 I/O commands. The driver will configure one I/O queue pair per 47 * available interrupt vector, with the queue length usually much smaller than 48 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 49 * interrupt vectors will be used. 50 * 51 * Additionally the hardware provides a single special admin queue pair that can 52 * hold up to 4096 admin commands. 53 * 54 * From the hardware perspective both queues of a queue pair are independent, 55 * but they share some driver state: the command array (holding pointers to 56 * commands currently being processed by the hardware) and the active command 57 * counter. Access to the submission side of a queue pair and the shared state 58 * is protected by nq_mutex. The completion side of a queue pair does not need 59 * that protection apart from its access to the shared state; it is called only 60 * in the interrupt handler which does not run concurrently for the same 61 * interrupt vector. 62 * 63 * When a command is submitted to a queue pair the active command counter is 64 * incremented and a pointer to the command is stored in the command array. The 65 * array index is used as command identifier (CID) in the submission queue 66 * entry. Some commands may take a very long time to complete, and if the queue 67 * wraps around in that time a submission may find the next array slot to still 68 * be used by a long-running command. In this case the array is sequentially 69 * searched for the next free slot. The length of the command array is the same 70 * as the configured queue length. 71 * 72 * 73 * Namespace Support: 74 * 75 * NVMe devices can have multiple namespaces, each being a independent data 76 * store. The driver supports multiple namespaces and creates a blkdev interface 77 * for each namespace found. Namespaces can have various attributes to support 78 * thin provisioning and protection information. This driver does not support 79 * any of this and ignores namespaces that have these attributes. 80 * 81 * 82 * Blkdev Interface: 83 * 84 * This driver uses blkdev to do all the heavy lifting involved with presenting 85 * a disk device to the system. As a result, the processing of I/O requests is 86 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 87 * setup, and splitting of transfers into manageable chunks. 88 * 89 * I/O requests coming in from blkdev are turned into NVM commands and posted to 90 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 91 * queues. There is currently no timeout handling of I/O commands. 92 * 93 * Blkdev also supports querying device/media information and generating a 94 * devid. The driver reports the best block size as determined by the namespace 95 * format back to blkdev as physical block size to support partition and block 96 * alignment. The devid is composed using the device vendor ID, model number, 97 * serial number, and the namespace ID. 98 * 99 * 100 * Error Handling: 101 * 102 * Error handling is currently limited to detecting fatal hardware errors, 103 * either by asynchronous events, or synchronously through command status or 104 * admin command timeouts. In case of severe errors the device is fenced off, 105 * all further requests will return EIO. FMA is then called to fault the device. 106 * 107 * The hardware has a limit for outstanding asynchronous event requests. Before 108 * this limit is known the driver assumes it is at least 1 and posts a single 109 * asynchronous request. Later when the limit is known more asynchronous event 110 * requests are posted to allow quicker reception of error information. When an 111 * asynchronous event is posted by the hardware the driver will parse the error 112 * status fields and log information or fault the device, depending on the 113 * severity of the asynchronous event. The asynchronous event request is then 114 * reused and posted to the admin queue again. 115 * 116 * On command completion the command status is checked for errors. In case of 117 * errors indicating a driver bug the driver panics. Almost all other error 118 * status values just cause EIO to be returned. 119 * 120 * Command timeouts are currently detected for all admin commands except 121 * asynchronous event requests. If a command times out and the hardware appears 122 * to be healthy the driver attempts to abort the command. If this fails the 123 * driver assumes the device to be dead, fences it off, and calls FMA to retire 124 * it. In general admin commands are issued at attach time only. No timeout 125 * handling of normal I/O commands is presently done. 126 * 127 * In some cases it may be possible that the ABORT command times out, too. In 128 * that case the device is also declared dead and fenced off. 129 * 130 * 131 * Quiesce / Fast Reboot: 132 * 133 * The driver currently does not support fast reboot. A quiesce(9E) entry point 134 * is still provided which is used to send a shutdown notification to the 135 * device. 136 * 137 * 138 * Driver Configuration: 139 * 140 * The following driver properties can be changed to control some aspects of the 141 * drivers operation: 142 * - strict-version: can be set to 0 to allow devices conforming to newer 143 * versions to be used 144 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 145 * specific command status as a fatal error leading device faulting 146 * - admin-queue-len: the maximum length of the admin queue (16-4096) 147 * - io-queue-len: the maximum length of the I/O queues (16-65536) 148 * - async-event-limit: the maximum number of asynchronous event requests to be 149 * posted by the driver 150 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 151 * cache 152 * 153 * 154 * TODO: 155 * - figure out sane default for I/O queue depth reported to blkdev 156 * - polled I/O support to support kernel core dumping 157 * - FMA handling of media errors 158 * - support for devices supporting very large I/O requests using chained PRPs 159 * - support for querying log pages from user space 160 * - support for configuring hardware parameters like interrupt coalescing 161 * - support for media formatting and hard partitioning into namespaces 162 * - support for big-endian systems 163 * - support for fast reboot 164 */ 165 166 #include <sys/byteorder.h> 167 #ifdef _BIG_ENDIAN 168 #error nvme driver needs porting for big-endian platforms 169 #endif 170 171 #include <sys/modctl.h> 172 #include <sys/conf.h> 173 #include <sys/devops.h> 174 #include <sys/ddi.h> 175 #include <sys/sunddi.h> 176 #include <sys/bitmap.h> 177 #include <sys/sysmacros.h> 178 #include <sys/param.h> 179 #include <sys/varargs.h> 180 #include <sys/cpuvar.h> 181 #include <sys/disp.h> 182 #include <sys/blkdev.h> 183 #include <sys/atomic.h> 184 #include <sys/archsystm.h> 185 #include <sys/sata/sata_hba.h> 186 187 #include "nvme_reg.h" 188 #include "nvme_var.h" 189 190 191 /* NVMe spec version supported */ 192 static const int nvme_version_major = 1; 193 static const int nvme_version_minor = 0; 194 195 /* tunable for admin command timeout in seconds, default is 1s */ 196 static volatile int nvme_admin_cmd_timeout = 1; 197 198 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 199 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 200 static int nvme_quiesce(dev_info_t *); 201 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 202 static int nvme_setup_interrupts(nvme_t *, int, int); 203 static void nvme_release_interrupts(nvme_t *); 204 static uint_t nvme_intr(caddr_t, caddr_t); 205 206 static void nvme_shutdown(nvme_t *, int, boolean_t); 207 static boolean_t nvme_reset(nvme_t *, boolean_t); 208 static int nvme_init(nvme_t *); 209 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 210 static void nvme_free_cmd(nvme_cmd_t *); 211 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 212 bd_xfer_t *); 213 static int nvme_admin_cmd(nvme_cmd_t *, int); 214 static int nvme_submit_cmd(nvme_qpair_t *, nvme_cmd_t *); 215 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 216 static boolean_t nvme_wait_cmd(nvme_cmd_t *, uint_t); 217 static void nvme_wakeup_cmd(void *); 218 static void nvme_async_event_task(void *); 219 220 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 221 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 222 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 223 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 224 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 225 static inline int nvme_check_cmd_status(nvme_cmd_t *); 226 227 static void nvme_abort_cmd(nvme_cmd_t *); 228 static int nvme_async_event(nvme_t *); 229 static void *nvme_get_logpage(nvme_t *, uint8_t, ...); 230 static void *nvme_identify(nvme_t *, uint32_t); 231 static boolean_t nvme_set_features(nvme_t *, uint32_t, uint8_t, uint32_t, 232 uint32_t *); 233 static boolean_t nvme_write_cache_set(nvme_t *, boolean_t); 234 static int nvme_set_nqueues(nvme_t *, uint16_t); 235 236 static void nvme_free_dma(nvme_dma_t *); 237 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 238 nvme_dma_t **); 239 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 240 nvme_dma_t **); 241 static void nvme_free_qpair(nvme_qpair_t *); 242 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int); 243 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 244 245 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 246 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 247 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 248 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 249 250 static boolean_t nvme_check_regs_hdl(nvme_t *); 251 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 252 253 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 254 255 static void nvme_bd_xfer_done(void *); 256 static void nvme_bd_driveinfo(void *, bd_drive_t *); 257 static int nvme_bd_mediainfo(void *, bd_media_t *); 258 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 259 static int nvme_bd_read(void *, bd_xfer_t *); 260 static int nvme_bd_write(void *, bd_xfer_t *); 261 static int nvme_bd_sync(void *, bd_xfer_t *); 262 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 263 264 static int nvme_prp_dma_constructor(void *, void *, int); 265 static void nvme_prp_dma_destructor(void *, void *); 266 267 static void nvme_prepare_devid(nvme_t *, uint32_t); 268 269 static void *nvme_state; 270 static kmem_cache_t *nvme_cmd_cache; 271 272 /* 273 * DMA attributes for queue DMA memory 274 * 275 * Queue DMA memory must be page aligned. The maximum length of a queue is 276 * 65536 entries, and an entry can be 64 bytes long. 277 */ 278 static ddi_dma_attr_t nvme_queue_dma_attr = { 279 .dma_attr_version = DMA_ATTR_V0, 280 .dma_attr_addr_lo = 0, 281 .dma_attr_addr_hi = 0xffffffffffffffffULL, 282 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 283 .dma_attr_align = 0x1000, 284 .dma_attr_burstsizes = 0x7ff, 285 .dma_attr_minxfer = 0x1000, 286 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 287 .dma_attr_seg = 0xffffffffffffffffULL, 288 .dma_attr_sgllen = 1, 289 .dma_attr_granular = 1, 290 .dma_attr_flags = 0, 291 }; 292 293 /* 294 * DMA attributes for transfers using Physical Region Page (PRP) entries 295 * 296 * A PRP entry describes one page of DMA memory using the page size specified 297 * in the controller configuration's memory page size register (CC.MPS). It uses 298 * a 64bit base address aligned to this page size. There is no limitation on 299 * chaining PRPs together for arbitrarily large DMA transfers. 300 */ 301 static ddi_dma_attr_t nvme_prp_dma_attr = { 302 .dma_attr_version = DMA_ATTR_V0, 303 .dma_attr_addr_lo = 0, 304 .dma_attr_addr_hi = 0xffffffffffffffffULL, 305 .dma_attr_count_max = 0xfff, 306 .dma_attr_align = 0x1000, 307 .dma_attr_burstsizes = 0x7ff, 308 .dma_attr_minxfer = 0x1000, 309 .dma_attr_maxxfer = 0x1000, 310 .dma_attr_seg = 0xfff, 311 .dma_attr_sgllen = -1, 312 .dma_attr_granular = 1, 313 .dma_attr_flags = 0, 314 }; 315 316 /* 317 * DMA attributes for transfers using scatter/gather lists 318 * 319 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 320 * 32bit length field. SGL Segment and SGL Last Segment entries require the 321 * length to be a multiple of 16 bytes. 322 */ 323 static ddi_dma_attr_t nvme_sgl_dma_attr = { 324 .dma_attr_version = DMA_ATTR_V0, 325 .dma_attr_addr_lo = 0, 326 .dma_attr_addr_hi = 0xffffffffffffffffULL, 327 .dma_attr_count_max = 0xffffffffUL, 328 .dma_attr_align = 1, 329 .dma_attr_burstsizes = 0x7ff, 330 .dma_attr_minxfer = 0x10, 331 .dma_attr_maxxfer = 0xfffffffffULL, 332 .dma_attr_seg = 0xffffffffffffffffULL, 333 .dma_attr_sgllen = -1, 334 .dma_attr_granular = 0x10, 335 .dma_attr_flags = 0 336 }; 337 338 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 339 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 340 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 341 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 342 }; 343 344 static struct dev_ops nvme_dev_ops = { 345 .devo_rev = DEVO_REV, 346 .devo_refcnt = 0, 347 .devo_getinfo = ddi_no_info, 348 .devo_identify = nulldev, 349 .devo_probe = nulldev, 350 .devo_attach = nvme_attach, 351 .devo_detach = nvme_detach, 352 .devo_reset = nodev, 353 .devo_cb_ops = NULL, 354 .devo_bus_ops = NULL, 355 .devo_power = NULL, 356 .devo_quiesce = nvme_quiesce, 357 }; 358 359 static struct modldrv nvme_modldrv = { 360 .drv_modops = &mod_driverops, 361 .drv_linkinfo = "NVMe v1.0e", 362 .drv_dev_ops = &nvme_dev_ops 363 }; 364 365 static struct modlinkage nvme_modlinkage = { 366 .ml_rev = MODREV_1, 367 .ml_linkage = { &nvme_modldrv, NULL } 368 }; 369 370 static bd_ops_t nvme_bd_ops = { 371 .o_version = BD_OPS_VERSION_0, 372 .o_drive_info = nvme_bd_driveinfo, 373 .o_media_info = nvme_bd_mediainfo, 374 .o_devid_init = nvme_bd_devid, 375 .o_sync_cache = nvme_bd_sync, 376 .o_read = nvme_bd_read, 377 .o_write = nvme_bd_write, 378 }; 379 380 int 381 _init(void) 382 { 383 int error; 384 385 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 386 if (error != DDI_SUCCESS) 387 return (error); 388 389 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 390 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 391 392 bd_mod_init(&nvme_dev_ops); 393 394 error = mod_install(&nvme_modlinkage); 395 if (error != DDI_SUCCESS) { 396 ddi_soft_state_fini(&nvme_state); 397 bd_mod_fini(&nvme_dev_ops); 398 } 399 400 return (error); 401 } 402 403 int 404 _fini(void) 405 { 406 int error; 407 408 error = mod_remove(&nvme_modlinkage); 409 if (error == DDI_SUCCESS) { 410 ddi_soft_state_fini(&nvme_state); 411 kmem_cache_destroy(nvme_cmd_cache); 412 bd_mod_fini(&nvme_dev_ops); 413 } 414 415 return (error); 416 } 417 418 int 419 _info(struct modinfo *modinfop) 420 { 421 return (mod_info(&nvme_modlinkage, modinfop)); 422 } 423 424 static inline void 425 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 426 { 427 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 428 429 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 430 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 431 } 432 433 static inline void 434 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 435 { 436 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 437 438 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 439 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 440 } 441 442 static inline uint64_t 443 nvme_get64(nvme_t *nvme, uintptr_t reg) 444 { 445 uint64_t val; 446 447 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 448 449 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 450 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 451 452 return (val); 453 } 454 455 static inline uint32_t 456 nvme_get32(nvme_t *nvme, uintptr_t reg) 457 { 458 uint32_t val; 459 460 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 461 462 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 463 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 464 465 return (val); 466 } 467 468 static boolean_t 469 nvme_check_regs_hdl(nvme_t *nvme) 470 { 471 ddi_fm_error_t error; 472 473 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 474 475 if (error.fme_status != DDI_FM_OK) 476 return (B_TRUE); 477 478 return (B_FALSE); 479 } 480 481 static boolean_t 482 nvme_check_dma_hdl(nvme_dma_t *dma) 483 { 484 ddi_fm_error_t error; 485 486 if (dma == NULL) 487 return (B_FALSE); 488 489 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 490 491 if (error.fme_status != DDI_FM_OK) 492 return (B_TRUE); 493 494 return (B_FALSE); 495 } 496 497 static void 498 nvme_free_dma_common(nvme_dma_t *dma) 499 { 500 if (dma->nd_dmah != NULL) 501 (void) ddi_dma_unbind_handle(dma->nd_dmah); 502 if (dma->nd_acch != NULL) 503 ddi_dma_mem_free(&dma->nd_acch); 504 if (dma->nd_dmah != NULL) 505 ddi_dma_free_handle(&dma->nd_dmah); 506 } 507 508 static void 509 nvme_free_dma(nvme_dma_t *dma) 510 { 511 nvme_free_dma_common(dma); 512 kmem_free(dma, sizeof (*dma)); 513 } 514 515 /* ARGSUSED */ 516 static void 517 nvme_prp_dma_destructor(void *buf, void *private) 518 { 519 nvme_dma_t *dma = (nvme_dma_t *)buf; 520 521 nvme_free_dma_common(dma); 522 } 523 524 static int 525 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 526 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 527 { 528 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 529 &dma->nd_dmah) != DDI_SUCCESS) { 530 /* 531 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 532 * the only other possible error is DDI_DMA_BADATTR which 533 * indicates a driver bug which should cause a panic. 534 */ 535 dev_err(nvme->n_dip, CE_PANIC, 536 "!failed to get DMA handle, check DMA attributes"); 537 return (DDI_FAILURE); 538 } 539 540 /* 541 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 542 * or the flags are conflicting, which isn't the case here. 543 */ 544 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 545 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 546 &dma->nd_len, &dma->nd_acch); 547 548 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 549 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 550 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 551 dev_err(nvme->n_dip, CE_WARN, 552 "!failed to bind DMA memory"); 553 atomic_inc_32(&nvme->n_dma_bind_err); 554 nvme_free_dma_common(dma); 555 return (DDI_FAILURE); 556 } 557 558 return (DDI_SUCCESS); 559 } 560 561 static int 562 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 563 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 564 { 565 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 566 567 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 568 DDI_SUCCESS) { 569 *ret = NULL; 570 kmem_free(dma, sizeof (nvme_dma_t)); 571 return (DDI_FAILURE); 572 } 573 574 bzero(dma->nd_memp, dma->nd_len); 575 576 *ret = dma; 577 return (DDI_SUCCESS); 578 } 579 580 /* ARGSUSED */ 581 static int 582 nvme_prp_dma_constructor(void *buf, void *private, int flags) 583 { 584 nvme_dma_t *dma = (nvme_dma_t *)buf; 585 nvme_t *nvme = (nvme_t *)private; 586 587 dma->nd_dmah = NULL; 588 dma->nd_acch = NULL; 589 590 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 591 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 592 return (-1); 593 } 594 595 ASSERT(dma->nd_ncookie == 1); 596 597 dma->nd_cached = B_TRUE; 598 599 return (0); 600 } 601 602 static int 603 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 604 uint_t flags, nvme_dma_t **dma) 605 { 606 uint32_t len = nentry * qe_len; 607 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 608 609 len = roundup(len, nvme->n_pagesize); 610 611 q_dma_attr.dma_attr_minxfer = len; 612 613 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 614 != DDI_SUCCESS) { 615 dev_err(nvme->n_dip, CE_WARN, 616 "!failed to get DMA memory for queue"); 617 goto fail; 618 } 619 620 if ((*dma)->nd_ncookie != 1) { 621 dev_err(nvme->n_dip, CE_WARN, 622 "!got too many cookies for queue DMA"); 623 goto fail; 624 } 625 626 return (DDI_SUCCESS); 627 628 fail: 629 if (*dma) { 630 nvme_free_dma(*dma); 631 *dma = NULL; 632 } 633 634 return (DDI_FAILURE); 635 } 636 637 static void 638 nvme_free_qpair(nvme_qpair_t *qp) 639 { 640 int i; 641 642 mutex_destroy(&qp->nq_mutex); 643 644 if (qp->nq_sqdma != NULL) 645 nvme_free_dma(qp->nq_sqdma); 646 if (qp->nq_cqdma != NULL) 647 nvme_free_dma(qp->nq_cqdma); 648 649 if (qp->nq_active_cmds > 0) 650 for (i = 0; i != qp->nq_nentry; i++) 651 if (qp->nq_cmd[i] != NULL) 652 nvme_free_cmd(qp->nq_cmd[i]); 653 654 if (qp->nq_cmd != NULL) 655 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 656 657 kmem_free(qp, sizeof (nvme_qpair_t)); 658 } 659 660 static int 661 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 662 int idx) 663 { 664 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 665 666 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 667 DDI_INTR_PRI(nvme->n_intr_pri)); 668 669 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 670 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 671 goto fail; 672 673 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 674 DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS) 675 goto fail; 676 677 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 678 qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp; 679 qp->nq_nentry = nentry; 680 681 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 682 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx); 683 684 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 685 qp->nq_next_cmd = 0; 686 687 *nqp = qp; 688 return (DDI_SUCCESS); 689 690 fail: 691 nvme_free_qpair(qp); 692 *nqp = NULL; 693 694 return (DDI_FAILURE); 695 } 696 697 static nvme_cmd_t * 698 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 699 { 700 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 701 702 if (cmd == NULL) 703 return (cmd); 704 705 bzero(cmd, sizeof (nvme_cmd_t)); 706 707 cmd->nc_nvme = nvme; 708 709 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 710 DDI_INTR_PRI(nvme->n_intr_pri)); 711 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 712 713 return (cmd); 714 } 715 716 static void 717 nvme_free_cmd(nvme_cmd_t *cmd) 718 { 719 if (cmd->nc_dma) { 720 if (cmd->nc_dma->nd_cached) 721 kmem_cache_free(cmd->nc_nvme->n_prp_cache, 722 cmd->nc_dma); 723 else 724 nvme_free_dma(cmd->nc_dma); 725 cmd->nc_dma = NULL; 726 } 727 728 cv_destroy(&cmd->nc_cv); 729 mutex_destroy(&cmd->nc_mutex); 730 731 kmem_cache_free(nvme_cmd_cache, cmd); 732 } 733 734 static int 735 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 736 { 737 nvme_reg_sqtdbl_t tail = { 0 }; 738 739 mutex_enter(&qp->nq_mutex); 740 741 if (qp->nq_active_cmds == qp->nq_nentry) { 742 mutex_exit(&qp->nq_mutex); 743 return (DDI_FAILURE); 744 } 745 746 cmd->nc_completed = B_FALSE; 747 748 /* 749 * Try to insert the cmd into the active cmd array at the nq_next_cmd 750 * slot. If the slot is already occupied advance to the next slot and 751 * try again. This can happen for long running commands like async event 752 * requests. 753 */ 754 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 755 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 756 qp->nq_cmd[qp->nq_next_cmd] = cmd; 757 758 qp->nq_active_cmds++; 759 760 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 761 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 762 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 763 sizeof (nvme_sqe_t) * qp->nq_sqtail, 764 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 765 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 766 767 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 768 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 769 770 mutex_exit(&qp->nq_mutex); 771 return (DDI_SUCCESS); 772 } 773 774 static nvme_cmd_t * 775 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 776 { 777 nvme_reg_cqhdbl_t head = { 0 }; 778 779 nvme_cqe_t *cqe; 780 nvme_cmd_t *cmd; 781 782 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0, 783 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL); 784 785 cqe = &qp->nq_cq[qp->nq_cqhead]; 786 787 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 788 if (cqe->cqe_sf.sf_p == qp->nq_phase) 789 return (NULL); 790 791 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); 792 ASSERT(cqe->cqe_cid < qp->nq_nentry); 793 794 mutex_enter(&qp->nq_mutex); 795 cmd = qp->nq_cmd[cqe->cqe_cid]; 796 qp->nq_cmd[cqe->cqe_cid] = NULL; 797 qp->nq_active_cmds--; 798 mutex_exit(&qp->nq_mutex); 799 800 ASSERT(cmd != NULL); 801 ASSERT(cmd->nc_nvme == nvme); 802 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 803 ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid); 804 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 805 806 qp->nq_sqhead = cqe->cqe_sqhd; 807 808 head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry; 809 810 /* Toggle phase on wrap-around. */ 811 if (qp->nq_cqhead == 0) 812 qp->nq_phase = qp->nq_phase ? 0 : 1; 813 814 nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r); 815 816 return (cmd); 817 } 818 819 static int 820 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 821 { 822 nvme_cqe_t *cqe = &cmd->nc_cqe; 823 824 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 825 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 826 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 827 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 828 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 829 830 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 831 832 if (cmd->nc_nvme->n_strict_version) { 833 cmd->nc_nvme->n_dead = B_TRUE; 834 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 835 } 836 837 return (EIO); 838 } 839 840 static int 841 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 842 { 843 nvme_cqe_t *cqe = &cmd->nc_cqe; 844 845 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 846 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 847 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 848 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 849 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 850 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 851 cmd->nc_nvme->n_dead = B_TRUE; 852 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 853 } 854 855 return (EIO); 856 } 857 858 static int 859 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 860 { 861 nvme_cqe_t *cqe = &cmd->nc_cqe; 862 863 switch (cqe->cqe_sf.sf_sc) { 864 case NVME_CQE_SC_INT_NVM_WRITE: 865 /* write fail */ 866 /* TODO: post ereport */ 867 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 868 return (EIO); 869 870 case NVME_CQE_SC_INT_NVM_READ: 871 /* read fail */ 872 /* TODO: post ereport */ 873 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 874 return (EIO); 875 876 default: 877 return (nvme_check_unknown_cmd_status(cmd)); 878 } 879 } 880 881 static int 882 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 883 { 884 nvme_cqe_t *cqe = &cmd->nc_cqe; 885 886 switch (cqe->cqe_sf.sf_sc) { 887 case NVME_CQE_SC_GEN_SUCCESS: 888 return (0); 889 890 /* 891 * Errors indicating a bug in the driver should cause a panic. 892 */ 893 case NVME_CQE_SC_GEN_INV_OPC: 894 /* Invalid Command Opcode */ 895 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 896 "invalid opcode in cmd %p", (void *)cmd); 897 return (0); 898 899 case NVME_CQE_SC_GEN_INV_FLD: 900 /* Invalid Field in Command */ 901 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 902 "invalid field in cmd %p", (void *)cmd); 903 return (0); 904 905 case NVME_CQE_SC_GEN_ID_CNFL: 906 /* Command ID Conflict */ 907 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 908 "cmd ID conflict in cmd %p", (void *)cmd); 909 return (0); 910 911 case NVME_CQE_SC_GEN_INV_NS: 912 /* Invalid Namespace or Format */ 913 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 914 "invalid NS/format in cmd %p", (void *)cmd); 915 return (0); 916 917 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 918 /* LBA Out Of Range */ 919 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 920 "LBA out of range in cmd %p", (void *)cmd); 921 return (0); 922 923 /* 924 * Non-fatal errors, handle gracefully. 925 */ 926 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 927 /* Data Transfer Error (DMA) */ 928 /* TODO: post ereport */ 929 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 930 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 931 return (EIO); 932 933 case NVME_CQE_SC_GEN_INTERNAL_ERR: 934 /* 935 * Internal Error. The spec (v1.0, section 4.5.1.2) says 936 * detailed error information is returned as async event, 937 * so we pretty much ignore the error here and handle it 938 * in the async event handler. 939 */ 940 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 941 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 942 return (EIO); 943 944 case NVME_CQE_SC_GEN_ABORT_REQUEST: 945 /* 946 * Command Abort Requested. This normally happens only when a 947 * command times out. 948 */ 949 /* TODO: post ereport or change blkdev to handle this? */ 950 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 951 return (ECANCELED); 952 953 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 954 /* Command Aborted due to Power Loss Notification */ 955 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 956 cmd->nc_nvme->n_dead = B_TRUE; 957 return (EIO); 958 959 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 960 /* Command Aborted due to SQ Deletion */ 961 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 962 return (EIO); 963 964 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 965 /* Capacity Exceeded */ 966 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 967 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 968 return (EIO); 969 970 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 971 /* Namespace Not Ready */ 972 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 973 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 974 return (EIO); 975 976 default: 977 return (nvme_check_unknown_cmd_status(cmd)); 978 } 979 } 980 981 static int 982 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 983 { 984 nvme_cqe_t *cqe = &cmd->nc_cqe; 985 986 switch (cqe->cqe_sf.sf_sc) { 987 case NVME_CQE_SC_SPC_INV_CQ: 988 /* Completion Queue Invalid */ 989 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 990 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 991 return (EINVAL); 992 993 case NVME_CQE_SC_SPC_INV_QID: 994 /* Invalid Queue Identifier */ 995 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 996 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 997 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 998 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 999 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1000 return (EINVAL); 1001 1002 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1003 /* Max Queue Size Exceeded */ 1004 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1005 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1006 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1007 return (EINVAL); 1008 1009 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1010 /* Abort Command Limit Exceeded */ 1011 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1012 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1013 "abort command limit exceeded in cmd %p", (void *)cmd); 1014 return (0); 1015 1016 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1017 /* Async Event Request Limit Exceeded */ 1018 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1019 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1020 "async event request limit exceeded in cmd %p", 1021 (void *)cmd); 1022 return (0); 1023 1024 case NVME_CQE_SC_SPC_INV_INT_VECT: 1025 /* Invalid Interrupt Vector */ 1026 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1027 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1028 return (EINVAL); 1029 1030 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1031 /* Invalid Log Page */ 1032 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1033 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1034 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1035 return (EINVAL); 1036 1037 case NVME_CQE_SC_SPC_INV_FORMAT: 1038 /* Invalid Format */ 1039 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1040 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1041 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1042 return (EINVAL); 1043 1044 case NVME_CQE_SC_SPC_INV_Q_DEL: 1045 /* Invalid Queue Deletion */ 1046 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1047 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1048 return (EINVAL); 1049 1050 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1051 /* Conflicting Attributes */ 1052 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1053 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1054 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1055 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1056 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1057 return (EINVAL); 1058 1059 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1060 /* Invalid Protection Information */ 1061 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1062 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1063 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1064 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1065 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1066 return (EINVAL); 1067 1068 case NVME_CQE_SC_SPC_NVM_READONLY: 1069 /* Write to Read Only Range */ 1070 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1071 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1072 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1073 return (EROFS); 1074 1075 default: 1076 return (nvme_check_unknown_cmd_status(cmd)); 1077 } 1078 } 1079 1080 static inline int 1081 nvme_check_cmd_status(nvme_cmd_t *cmd) 1082 { 1083 nvme_cqe_t *cqe = &cmd->nc_cqe; 1084 1085 /* take a shortcut if everything is alright */ 1086 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1087 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1088 return (0); 1089 1090 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1091 return (nvme_check_generic_cmd_status(cmd)); 1092 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1093 return (nvme_check_specific_cmd_status(cmd)); 1094 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1095 return (nvme_check_integrity_cmd_status(cmd)); 1096 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1097 return (nvme_check_vendor_cmd_status(cmd)); 1098 1099 return (nvme_check_unknown_cmd_status(cmd)); 1100 } 1101 1102 /* 1103 * nvme_abort_cmd_cb -- replaces nc_callback of aborted commands 1104 * 1105 * This functions takes care of cleaning up aborted commands. The command 1106 * status is checked to catch any fatal errors. 1107 */ 1108 static void 1109 nvme_abort_cmd_cb(void *arg) 1110 { 1111 nvme_cmd_t *cmd = arg; 1112 1113 /* 1114 * Grab the command mutex. Once we have it we hold the last reference 1115 * to the command and can safely free it. 1116 */ 1117 mutex_enter(&cmd->nc_mutex); 1118 (void) nvme_check_cmd_status(cmd); 1119 mutex_exit(&cmd->nc_mutex); 1120 1121 nvme_free_cmd(cmd); 1122 } 1123 1124 static void 1125 nvme_abort_cmd(nvme_cmd_t *abort_cmd) 1126 { 1127 nvme_t *nvme = abort_cmd->nc_nvme; 1128 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1129 nvme_abort_cmd_t ac = { 0 }; 1130 1131 sema_p(&nvme->n_abort_sema); 1132 1133 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1134 ac.b.ac_sqid = abort_cmd->nc_sqid; 1135 1136 /* 1137 * Drop the mutex of the aborted command. From this point on 1138 * we must assume that the abort callback has freed the command. 1139 */ 1140 mutex_exit(&abort_cmd->nc_mutex); 1141 1142 cmd->nc_sqid = 0; 1143 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1144 cmd->nc_callback = nvme_wakeup_cmd; 1145 cmd->nc_sqe.sqe_cdw10 = ac.r; 1146 1147 /* 1148 * Send the ABORT to the hardware. The ABORT command will return _after_ 1149 * the aborted command has completed (aborted or otherwise). 1150 */ 1151 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1152 sema_v(&nvme->n_abort_sema); 1153 dev_err(nvme->n_dip, CE_WARN, 1154 "!nvme_admin_cmd failed for ABORT"); 1155 atomic_inc_32(&nvme->n_abort_failed); 1156 return; 1157 } 1158 sema_v(&nvme->n_abort_sema); 1159 1160 if (nvme_check_cmd_status(cmd)) { 1161 dev_err(nvme->n_dip, CE_WARN, 1162 "!ABORT failed with sct = %x, sc = %x", 1163 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1164 atomic_inc_32(&nvme->n_abort_failed); 1165 } else { 1166 atomic_inc_32(&nvme->n_cmd_aborted); 1167 } 1168 1169 nvme_free_cmd(cmd); 1170 } 1171 1172 /* 1173 * nvme_wait_cmd -- wait for command completion or timeout 1174 * 1175 * Returns B_TRUE if the command completed normally. 1176 * 1177 * Returns B_FALSE if the command timed out and an abort was attempted. The 1178 * command mutex will be dropped and the command must be considered freed. The 1179 * freeing of the command is normally done by the abort command callback. 1180 * 1181 * In case of a serious error or a timeout of the abort command the hardware 1182 * will be declared dead and FMA will be notified. 1183 */ 1184 static boolean_t 1185 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1186 { 1187 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1188 nvme_t *nvme = cmd->nc_nvme; 1189 nvme_reg_csts_t csts; 1190 1191 ASSERT(mutex_owned(&cmd->nc_mutex)); 1192 1193 while (!cmd->nc_completed) { 1194 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1195 break; 1196 } 1197 1198 if (cmd->nc_completed) 1199 return (B_TRUE); 1200 1201 /* 1202 * The command timed out. Change the callback to the cleanup function. 1203 */ 1204 cmd->nc_callback = nvme_abort_cmd_cb; 1205 1206 /* 1207 * Check controller for fatal status, any errors associated with the 1208 * register or DMA handle, or for a double timeout (abort command timed 1209 * out). If necessary log a warning and call FMA. 1210 */ 1211 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1212 dev_err(nvme->n_dip, CE_WARN, "!command timeout, " 1213 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1214 atomic_inc_32(&nvme->n_cmd_timeout); 1215 1216 if (csts.b.csts_cfs || 1217 nvme_check_regs_hdl(nvme) || 1218 nvme_check_dma_hdl(cmd->nc_dma) || 1219 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1220 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1221 nvme->n_dead = B_TRUE; 1222 mutex_exit(&cmd->nc_mutex); 1223 } else { 1224 /* 1225 * Try to abort the command. The command mutex is released by 1226 * nvme_abort_cmd(). 1227 * If the abort succeeds it will have freed the aborted command. 1228 * If the abort fails for other reasons we must assume that the 1229 * command may complete at any time, and the callback will free 1230 * it for us. 1231 */ 1232 nvme_abort_cmd(cmd); 1233 } 1234 1235 return (B_FALSE); 1236 } 1237 1238 static void 1239 nvme_wakeup_cmd(void *arg) 1240 { 1241 nvme_cmd_t *cmd = arg; 1242 1243 mutex_enter(&cmd->nc_mutex); 1244 /* 1245 * There is a slight chance that this command completed shortly after 1246 * the timeout was hit in nvme_wait_cmd() but before the callback was 1247 * changed. Catch that case here and clean up accordingly. 1248 */ 1249 if (cmd->nc_callback == nvme_abort_cmd_cb) { 1250 mutex_exit(&cmd->nc_mutex); 1251 nvme_abort_cmd_cb(cmd); 1252 return; 1253 } 1254 1255 cmd->nc_completed = B_TRUE; 1256 cv_signal(&cmd->nc_cv); 1257 mutex_exit(&cmd->nc_mutex); 1258 } 1259 1260 static void 1261 nvme_async_event_task(void *arg) 1262 { 1263 nvme_cmd_t *cmd = arg; 1264 nvme_t *nvme = cmd->nc_nvme; 1265 nvme_error_log_entry_t *error_log = NULL; 1266 nvme_health_log_t *health_log = NULL; 1267 nvme_async_event_t event; 1268 int ret; 1269 1270 /* 1271 * Check for errors associated with the async request itself. The only 1272 * command-specific error is "async event limit exceeded", which 1273 * indicates a programming error in the driver and causes a panic in 1274 * nvme_check_cmd_status(). 1275 * 1276 * Other possible errors are various scenarios where the async request 1277 * was aborted, or internal errors in the device. Internal errors are 1278 * reported to FMA, the command aborts need no special handling here. 1279 */ 1280 if (nvme_check_cmd_status(cmd)) { 1281 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1282 "!async event request returned failure, sct = %x, " 1283 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1284 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1285 cmd->nc_cqe.cqe_sf.sf_m); 1286 1287 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1288 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1289 cmd->nc_nvme->n_dead = B_TRUE; 1290 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1291 DDI_SERVICE_LOST); 1292 } 1293 nvme_free_cmd(cmd); 1294 return; 1295 } 1296 1297 1298 event.r = cmd->nc_cqe.cqe_dw0; 1299 1300 /* Clear CQE and re-submit the async request. */ 1301 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1302 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1303 1304 if (ret != DDI_SUCCESS) { 1305 dev_err(nvme->n_dip, CE_WARN, 1306 "!failed to resubmit async event request"); 1307 atomic_inc_32(&nvme->n_async_resubmit_failed); 1308 nvme_free_cmd(cmd); 1309 } 1310 1311 switch (event.b.ae_type) { 1312 case NVME_ASYNC_TYPE_ERROR: 1313 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1314 error_log = (nvme_error_log_entry_t *) 1315 nvme_get_logpage(nvme, event.b.ae_logpage); 1316 } else { 1317 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1318 "async event reply: %d", event.b.ae_logpage); 1319 atomic_inc_32(&nvme->n_wrong_logpage); 1320 } 1321 1322 switch (event.b.ae_info) { 1323 case NVME_ASYNC_ERROR_INV_SQ: 1324 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1325 "invalid submission queue"); 1326 return; 1327 1328 case NVME_ASYNC_ERROR_INV_DBL: 1329 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1330 "invalid doorbell write value"); 1331 return; 1332 1333 case NVME_ASYNC_ERROR_DIAGFAIL: 1334 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1335 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1336 nvme->n_dead = B_TRUE; 1337 atomic_inc_32(&nvme->n_diagfail_event); 1338 break; 1339 1340 case NVME_ASYNC_ERROR_PERSISTENT: 1341 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1342 "device error"); 1343 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1344 nvme->n_dead = B_TRUE; 1345 atomic_inc_32(&nvme->n_persistent_event); 1346 break; 1347 1348 case NVME_ASYNC_ERROR_TRANSIENT: 1349 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1350 "device error"); 1351 /* TODO: send ereport */ 1352 atomic_inc_32(&nvme->n_transient_event); 1353 break; 1354 1355 case NVME_ASYNC_ERROR_FW_LOAD: 1356 dev_err(nvme->n_dip, CE_WARN, 1357 "!firmware image load error"); 1358 atomic_inc_32(&nvme->n_fw_load_event); 1359 break; 1360 } 1361 break; 1362 1363 case NVME_ASYNC_TYPE_HEALTH: 1364 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1365 health_log = (nvme_health_log_t *) 1366 nvme_get_logpage(nvme, event.b.ae_logpage, -1); 1367 } else { 1368 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1369 "async event reply: %d", event.b.ae_logpage); 1370 atomic_inc_32(&nvme->n_wrong_logpage); 1371 } 1372 1373 switch (event.b.ae_info) { 1374 case NVME_ASYNC_HEALTH_RELIABILITY: 1375 dev_err(nvme->n_dip, CE_WARN, 1376 "!device reliability compromised"); 1377 /* TODO: send ereport */ 1378 atomic_inc_32(&nvme->n_reliability_event); 1379 break; 1380 1381 case NVME_ASYNC_HEALTH_TEMPERATURE: 1382 dev_err(nvme->n_dip, CE_WARN, 1383 "!temperature above threshold"); 1384 /* TODO: send ereport */ 1385 atomic_inc_32(&nvme->n_temperature_event); 1386 break; 1387 1388 case NVME_ASYNC_HEALTH_SPARE: 1389 dev_err(nvme->n_dip, CE_WARN, 1390 "!spare space below threshold"); 1391 /* TODO: send ereport */ 1392 atomic_inc_32(&nvme->n_spare_event); 1393 break; 1394 } 1395 break; 1396 1397 case NVME_ASYNC_TYPE_VENDOR: 1398 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1399 "received, info = %x, logpage = %x", event.b.ae_info, 1400 event.b.ae_logpage); 1401 atomic_inc_32(&nvme->n_vendor_event); 1402 break; 1403 1404 default: 1405 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1406 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1407 event.b.ae_info, event.b.ae_logpage); 1408 atomic_inc_32(&nvme->n_unknown_event); 1409 break; 1410 } 1411 1412 if (error_log) 1413 kmem_free(error_log, sizeof (nvme_error_log_entry_t) * 1414 nvme->n_error_log_len); 1415 1416 if (health_log) 1417 kmem_free(health_log, sizeof (nvme_health_log_t)); 1418 } 1419 1420 static int 1421 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 1422 { 1423 int ret; 1424 1425 mutex_enter(&cmd->nc_mutex); 1426 ret = nvme_submit_cmd(cmd->nc_nvme->n_adminq, cmd); 1427 1428 if (ret != DDI_SUCCESS) { 1429 mutex_exit(&cmd->nc_mutex); 1430 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1431 "!nvme_submit_cmd failed"); 1432 atomic_inc_32(&cmd->nc_nvme->n_admin_queue_full); 1433 nvme_free_cmd(cmd); 1434 return (DDI_FAILURE); 1435 } 1436 1437 if (nvme_wait_cmd(cmd, sec) == B_FALSE) { 1438 /* 1439 * The command timed out. An abort command was posted that 1440 * will take care of the cleanup. 1441 */ 1442 return (DDI_FAILURE); 1443 } 1444 mutex_exit(&cmd->nc_mutex); 1445 1446 return (DDI_SUCCESS); 1447 } 1448 1449 static int 1450 nvme_async_event(nvme_t *nvme) 1451 { 1452 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1453 int ret; 1454 1455 cmd->nc_sqid = 0; 1456 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1457 cmd->nc_callback = nvme_async_event_task; 1458 1459 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1460 1461 if (ret != DDI_SUCCESS) { 1462 dev_err(nvme->n_dip, CE_WARN, 1463 "!nvme_submit_cmd failed for ASYNCHRONOUS EVENT"); 1464 nvme_free_cmd(cmd); 1465 return (DDI_FAILURE); 1466 } 1467 1468 return (DDI_SUCCESS); 1469 } 1470 1471 static void * 1472 nvme_get_logpage(nvme_t *nvme, uint8_t logpage, ...) 1473 { 1474 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1475 void *buf = NULL; 1476 nvme_getlogpage_t getlogpage = { 0 }; 1477 size_t bufsize; 1478 va_list ap; 1479 1480 va_start(ap, logpage); 1481 1482 cmd->nc_sqid = 0; 1483 cmd->nc_callback = nvme_wakeup_cmd; 1484 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1485 1486 getlogpage.b.lp_lid = logpage; 1487 1488 switch (logpage) { 1489 case NVME_LOGPAGE_ERROR: 1490 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1491 bufsize = nvme->n_error_log_len * 1492 sizeof (nvme_error_log_entry_t); 1493 break; 1494 1495 case NVME_LOGPAGE_HEALTH: 1496 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1497 bufsize = sizeof (nvme_health_log_t); 1498 break; 1499 1500 case NVME_LOGPAGE_FWSLOT: 1501 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1502 bufsize = sizeof (nvme_fwslot_log_t); 1503 break; 1504 1505 default: 1506 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1507 logpage); 1508 atomic_inc_32(&nvme->n_unknown_logpage); 1509 goto fail; 1510 } 1511 1512 va_end(ap); 1513 1514 getlogpage.b.lp_numd = bufsize / sizeof (uint32_t) - 1; 1515 1516 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1517 1518 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t), 1519 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1520 dev_err(nvme->n_dip, CE_WARN, 1521 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1522 goto fail; 1523 } 1524 1525 if (cmd->nc_dma->nd_ncookie > 2) { 1526 dev_err(nvme->n_dip, CE_WARN, 1527 "!too many DMA cookies for GET LOG PAGE"); 1528 atomic_inc_32(&nvme->n_too_many_cookies); 1529 goto fail; 1530 } 1531 1532 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1533 if (cmd->nc_dma->nd_ncookie > 1) { 1534 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1535 &cmd->nc_dma->nd_cookie); 1536 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1537 cmd->nc_dma->nd_cookie.dmac_laddress; 1538 } 1539 1540 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1541 dev_err(nvme->n_dip, CE_WARN, 1542 "!nvme_admin_cmd failed for GET LOG PAGE"); 1543 return (NULL); 1544 } 1545 1546 if (nvme_check_cmd_status(cmd)) { 1547 dev_err(nvme->n_dip, CE_WARN, 1548 "!GET LOG PAGE failed with sct = %x, sc = %x", 1549 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1550 goto fail; 1551 } 1552 1553 buf = kmem_alloc(bufsize, KM_SLEEP); 1554 bcopy(cmd->nc_dma->nd_memp, buf, bufsize); 1555 1556 fail: 1557 nvme_free_cmd(cmd); 1558 1559 return (buf); 1560 } 1561 1562 static void * 1563 nvme_identify(nvme_t *nvme, uint32_t nsid) 1564 { 1565 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1566 void *buf = NULL; 1567 1568 cmd->nc_sqid = 0; 1569 cmd->nc_callback = nvme_wakeup_cmd; 1570 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 1571 cmd->nc_sqe.sqe_nsid = nsid; 1572 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 1573 1574 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 1575 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1576 dev_err(nvme->n_dip, CE_WARN, 1577 "!nvme_zalloc_dma failed for IDENTIFY"); 1578 goto fail; 1579 } 1580 1581 if (cmd->nc_dma->nd_ncookie > 2) { 1582 dev_err(nvme->n_dip, CE_WARN, 1583 "!too many DMA cookies for IDENTIFY"); 1584 atomic_inc_32(&nvme->n_too_many_cookies); 1585 goto fail; 1586 } 1587 1588 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1589 if (cmd->nc_dma->nd_ncookie > 1) { 1590 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1591 &cmd->nc_dma->nd_cookie); 1592 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1593 cmd->nc_dma->nd_cookie.dmac_laddress; 1594 } 1595 1596 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1597 dev_err(nvme->n_dip, CE_WARN, 1598 "!nvme_admin_cmd failed for IDENTIFY"); 1599 return (NULL); 1600 } 1601 1602 if (nvme_check_cmd_status(cmd)) { 1603 dev_err(nvme->n_dip, CE_WARN, 1604 "!IDENTIFY failed with sct = %x, sc = %x", 1605 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1606 goto fail; 1607 } 1608 1609 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 1610 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE); 1611 1612 fail: 1613 nvme_free_cmd(cmd); 1614 1615 return (buf); 1616 } 1617 1618 static boolean_t 1619 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val, 1620 uint32_t *res) 1621 { 1622 _NOTE(ARGUNUSED(nsid)); 1623 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1624 boolean_t ret = B_FALSE; 1625 1626 ASSERT(res != NULL); 1627 1628 cmd->nc_sqid = 0; 1629 cmd->nc_callback = nvme_wakeup_cmd; 1630 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 1631 cmd->nc_sqe.sqe_cdw10 = feature; 1632 cmd->nc_sqe.sqe_cdw11 = val; 1633 1634 switch (feature) { 1635 case NVME_FEAT_WRITE_CACHE: 1636 if (!nvme->n_write_cache_present) 1637 goto fail; 1638 break; 1639 1640 case NVME_FEAT_NQUEUES: 1641 break; 1642 1643 default: 1644 goto fail; 1645 } 1646 1647 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1648 dev_err(nvme->n_dip, CE_WARN, 1649 "!nvme_admin_cmd failed for SET FEATURES"); 1650 return (ret); 1651 } 1652 1653 if (nvme_check_cmd_status(cmd)) { 1654 dev_err(nvme->n_dip, CE_WARN, 1655 "!SET FEATURES %d failed with sct = %x, sc = %x", 1656 feature, cmd->nc_cqe.cqe_sf.sf_sct, 1657 cmd->nc_cqe.cqe_sf.sf_sc); 1658 goto fail; 1659 } 1660 1661 *res = cmd->nc_cqe.cqe_dw0; 1662 ret = B_TRUE; 1663 1664 fail: 1665 nvme_free_cmd(cmd); 1666 return (ret); 1667 } 1668 1669 static boolean_t 1670 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 1671 { 1672 nvme_write_cache_t nwc = { 0 }; 1673 1674 if (enable) 1675 nwc.b.wc_wce = 1; 1676 1677 if (!nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r, &nwc.r)) 1678 return (B_FALSE); 1679 1680 return (B_TRUE); 1681 } 1682 1683 static int 1684 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues) 1685 { 1686 nvme_nqueue_t nq = { 0 }; 1687 1688 nq.b.nq_nsq = nq.b.nq_ncq = nqueues - 1; 1689 1690 if (!nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r)) { 1691 return (0); 1692 } 1693 1694 /* 1695 * Always use the same number of submission and completion queues, and 1696 * never use more than the requested number of queues. 1697 */ 1698 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq) + 1)); 1699 } 1700 1701 static int 1702 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 1703 { 1704 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1705 nvme_create_queue_dw10_t dw10 = { 0 }; 1706 nvme_create_cq_dw11_t c_dw11 = { 0 }; 1707 nvme_create_sq_dw11_t s_dw11 = { 0 }; 1708 1709 dw10.b.q_qid = idx; 1710 dw10.b.q_qsize = qp->nq_nentry - 1; 1711 1712 c_dw11.b.cq_pc = 1; 1713 c_dw11.b.cq_ien = 1; 1714 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt; 1715 1716 cmd->nc_sqid = 0; 1717 cmd->nc_callback = nvme_wakeup_cmd; 1718 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 1719 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1720 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 1721 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress; 1722 1723 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1724 dev_err(nvme->n_dip, CE_WARN, 1725 "!nvme_admin_cmd failed for CREATE CQUEUE"); 1726 return (DDI_FAILURE); 1727 } 1728 1729 if (nvme_check_cmd_status(cmd)) { 1730 dev_err(nvme->n_dip, CE_WARN, 1731 "!CREATE CQUEUE failed with sct = %x, sc = %x", 1732 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1733 nvme_free_cmd(cmd); 1734 return (DDI_FAILURE); 1735 } 1736 1737 nvme_free_cmd(cmd); 1738 1739 s_dw11.b.sq_pc = 1; 1740 s_dw11.b.sq_cqid = idx; 1741 1742 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1743 cmd->nc_sqid = 0; 1744 cmd->nc_callback = nvme_wakeup_cmd; 1745 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 1746 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1747 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 1748 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 1749 1750 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1751 dev_err(nvme->n_dip, CE_WARN, 1752 "!nvme_admin_cmd failed for CREATE SQUEUE"); 1753 return (DDI_FAILURE); 1754 } 1755 1756 if (nvme_check_cmd_status(cmd)) { 1757 dev_err(nvme->n_dip, CE_WARN, 1758 "!CREATE SQUEUE failed with sct = %x, sc = %x", 1759 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1760 nvme_free_cmd(cmd); 1761 return (DDI_FAILURE); 1762 } 1763 1764 nvme_free_cmd(cmd); 1765 1766 return (DDI_SUCCESS); 1767 } 1768 1769 static boolean_t 1770 nvme_reset(nvme_t *nvme, boolean_t quiesce) 1771 { 1772 nvme_reg_csts_t csts; 1773 int i; 1774 1775 nvme_put32(nvme, NVME_REG_CC, 0); 1776 1777 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1778 if (csts.b.csts_rdy == 1) { 1779 nvme_put32(nvme, NVME_REG_CC, 0); 1780 for (i = 0; i != nvme->n_timeout * 10; i++) { 1781 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1782 if (csts.b.csts_rdy == 0) 1783 break; 1784 1785 if (quiesce) 1786 drv_usecwait(50000); 1787 else 1788 delay(drv_usectohz(50000)); 1789 } 1790 } 1791 1792 nvme_put32(nvme, NVME_REG_AQA, 0); 1793 nvme_put32(nvme, NVME_REG_ASQ, 0); 1794 nvme_put32(nvme, NVME_REG_ACQ, 0); 1795 1796 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1797 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 1798 } 1799 1800 static void 1801 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 1802 { 1803 nvme_reg_cc_t cc; 1804 nvme_reg_csts_t csts; 1805 int i; 1806 1807 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 1808 1809 cc.r = nvme_get32(nvme, NVME_REG_CC); 1810 cc.b.cc_shn = mode & 0x3; 1811 nvme_put32(nvme, NVME_REG_CC, cc.r); 1812 1813 for (i = 0; i != 10; i++) { 1814 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1815 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 1816 break; 1817 1818 if (quiesce) 1819 drv_usecwait(100000); 1820 else 1821 delay(drv_usectohz(100000)); 1822 } 1823 } 1824 1825 1826 static void 1827 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 1828 { 1829 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1830 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 1831 1832 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 1833 bcopy(nvme->n_idctl->id_serial, serial, 1834 sizeof (nvme->n_idctl->id_serial)); 1835 1836 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 1837 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 1838 1839 (void) snprintf(nvme->n_ns[nsid - 1].ns_devid, 1840 sizeof (nvme->n_ns[0].ns_devid), "%4X-%s-%s-%X", 1841 nvme->n_idctl->id_vid, model, serial, nsid); 1842 } 1843 1844 static int 1845 nvme_init(nvme_t *nvme) 1846 { 1847 nvme_reg_cc_t cc = { 0 }; 1848 nvme_reg_aqa_t aqa = { 0 }; 1849 nvme_reg_asq_t asq = { 0 }; 1850 nvme_reg_acq_t acq = { 0 }; 1851 nvme_reg_cap_t cap; 1852 nvme_reg_vs_t vs; 1853 nvme_reg_csts_t csts; 1854 int i = 0; 1855 int nqueues; 1856 char model[sizeof (nvme->n_idctl->id_model) + 1]; 1857 char *vendor, *product; 1858 1859 /* Check controller version */ 1860 vs.r = nvme_get32(nvme, NVME_REG_VS); 1861 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 1862 vs.b.vs_mjr, vs.b.vs_mnr); 1863 1864 if (nvme_version_major < vs.b.vs_mjr || 1865 (nvme_version_major == vs.b.vs_mjr && 1866 nvme_version_minor < vs.b.vs_mnr)) { 1867 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d", 1868 nvme_version_major, nvme_version_minor); 1869 if (nvme->n_strict_version) 1870 goto fail; 1871 } 1872 1873 /* retrieve controller configuration */ 1874 cap.r = nvme_get64(nvme, NVME_REG_CAP); 1875 1876 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 1877 dev_err(nvme->n_dip, CE_WARN, 1878 "!NVM command set not supported by hardware"); 1879 goto fail; 1880 } 1881 1882 nvme->n_nssr_supported = cap.b.cap_nssrs; 1883 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 1884 nvme->n_timeout = cap.b.cap_to; 1885 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 1886 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 1887 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 1888 1889 /* 1890 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 1891 * the base page size of 4k (1<<12), so add 12 here to get the real 1892 * page size value. 1893 */ 1894 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 1895 cap.b.cap_mpsmax + 12); 1896 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 1897 1898 /* 1899 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 1900 */ 1901 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 1902 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1903 1904 /* 1905 * Set up PRP DMA to transfer 1 page-aligned page at a time. 1906 * Maxxfer may be increased after we identified the controller limits. 1907 */ 1908 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 1909 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 1910 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 1911 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 1912 1913 /* 1914 * Reset controller if it's still in ready state. 1915 */ 1916 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 1917 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 1918 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1919 nvme->n_dead = B_TRUE; 1920 goto fail; 1921 } 1922 1923 /* 1924 * Create the admin queue pair. 1925 */ 1926 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 1927 != DDI_SUCCESS) { 1928 dev_err(nvme->n_dip, CE_WARN, 1929 "!unable to allocate admin qpair"); 1930 goto fail; 1931 } 1932 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 1933 nvme->n_ioq[0] = nvme->n_adminq; 1934 1935 nvme->n_progress |= NVME_ADMIN_QUEUE; 1936 1937 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 1938 "admin-queue-len", nvme->n_admin_queue_len); 1939 1940 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 1941 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 1942 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress; 1943 1944 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 1945 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 1946 1947 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 1948 nvme_put64(nvme, NVME_REG_ASQ, asq); 1949 nvme_put64(nvme, NVME_REG_ACQ, acq); 1950 1951 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 1952 cc.b.cc_css = 0; /* use NVM command set */ 1953 cc.b.cc_mps = nvme->n_pageshift - 12; 1954 cc.b.cc_shn = 0; /* no shutdown in progress */ 1955 cc.b.cc_en = 1; /* enable controller */ 1956 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 1957 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 1958 1959 nvme_put32(nvme, NVME_REG_CC, cc.r); 1960 1961 /* 1962 * Wait for the controller to become ready. 1963 */ 1964 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1965 if (csts.b.csts_rdy == 0) { 1966 for (i = 0; i != nvme->n_timeout * 10; i++) { 1967 delay(drv_usectohz(50000)); 1968 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1969 1970 if (csts.b.csts_cfs == 1) { 1971 dev_err(nvme->n_dip, CE_WARN, 1972 "!controller fatal status at init"); 1973 ddi_fm_service_impact(nvme->n_dip, 1974 DDI_SERVICE_LOST); 1975 nvme->n_dead = B_TRUE; 1976 goto fail; 1977 } 1978 1979 if (csts.b.csts_rdy == 1) 1980 break; 1981 } 1982 } 1983 1984 if (csts.b.csts_rdy == 0) { 1985 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 1986 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1987 nvme->n_dead = B_TRUE; 1988 goto fail; 1989 } 1990 1991 /* 1992 * Assume an abort command limit of 1. We'll destroy and re-init 1993 * that later when we know the true abort command limit. 1994 */ 1995 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 1996 1997 /* 1998 * Setup initial interrupt for admin queue. 1999 */ 2000 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 2001 != DDI_SUCCESS) && 2002 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 2003 != DDI_SUCCESS) && 2004 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 2005 != DDI_SUCCESS)) { 2006 dev_err(nvme->n_dip, CE_WARN, 2007 "!failed to setup initial interrupt"); 2008 goto fail; 2009 } 2010 2011 /* 2012 * Post an asynchronous event command to catch errors. 2013 */ 2014 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2015 dev_err(nvme->n_dip, CE_WARN, 2016 "!failed to post async event"); 2017 goto fail; 2018 } 2019 2020 /* 2021 * Identify Controller 2022 */ 2023 nvme->n_idctl = nvme_identify(nvme, 0); 2024 if (nvme->n_idctl == NULL) { 2025 dev_err(nvme->n_dip, CE_WARN, 2026 "!failed to identify controller"); 2027 goto fail; 2028 } 2029 2030 /* 2031 * Get Vendor & Product ID 2032 */ 2033 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2034 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2035 sata_split_model(model, &vendor, &product); 2036 2037 if (vendor == NULL) 2038 nvme->n_vendor = strdup("NVMe"); 2039 else 2040 nvme->n_vendor = strdup(vendor); 2041 2042 nvme->n_product = strdup(product); 2043 2044 /* 2045 * Get controller limits. 2046 */ 2047 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 2048 MIN(nvme->n_admin_queue_len / 10, 2049 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 2050 2051 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2052 "async-event-limit", nvme->n_async_event_limit); 2053 2054 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 2055 2056 /* 2057 * Reinitialize the semaphore with the true abort command limit 2058 * supported by the hardware. It's not necessary to disable interrupts 2059 * as only command aborts use the semaphore, and no commands are 2060 * executed or aborted while we're here. 2061 */ 2062 sema_destroy(&nvme->n_abort_sema); 2063 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 2064 SEMA_DRIVER, NULL); 2065 2066 nvme->n_progress |= NVME_CTRL_LIMITS; 2067 2068 if (nvme->n_idctl->id_mdts == 0) 2069 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 2070 else 2071 nvme->n_max_data_transfer_size = 2072 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 2073 2074 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 2075 2076 /* 2077 * Limit n_max_data_transfer_size to what we can handle in one PRP. 2078 * Chained PRPs are currently unsupported. 2079 * 2080 * This is a no-op on hardware which doesn't support a transfer size 2081 * big enough to require chained PRPs. 2082 */ 2083 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 2084 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 2085 2086 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 2087 2088 /* 2089 * Make sure the minimum/maximum queue entry sizes are not 2090 * larger/smaller than the default. 2091 */ 2092 2093 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 2094 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 2095 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 2096 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 2097 goto fail; 2098 2099 /* 2100 * Check for the presence of a Volatile Write Cache. If present, 2101 * enable or disable based on the value of the property 2102 * volatile-write-cache-enable (default is enabled). 2103 */ 2104 nvme->n_write_cache_present = 2105 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 2106 2107 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2108 "volatile-write-cache-present", 2109 nvme->n_write_cache_present ? 1 : 0); 2110 2111 if (!nvme->n_write_cache_present) { 2112 nvme->n_write_cache_enabled = B_FALSE; 2113 } else if (!nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)) { 2114 dev_err(nvme->n_dip, CE_WARN, 2115 "!failed to %sable volatile write cache", 2116 nvme->n_write_cache_enabled ? "en" : "dis"); 2117 /* 2118 * Assume the cache is (still) enabled. 2119 */ 2120 nvme->n_write_cache_enabled = B_TRUE; 2121 } 2122 2123 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2124 "volatile-write-cache-enable", 2125 nvme->n_write_cache_enabled ? 1 : 0); 2126 2127 /* 2128 * Grab a copy of all mandatory log pages. 2129 * 2130 * TODO: should go away once user space tool exists to print logs 2131 */ 2132 nvme->n_error_log = (nvme_error_log_entry_t *) 2133 nvme_get_logpage(nvme, NVME_LOGPAGE_ERROR); 2134 nvme->n_health_log = (nvme_health_log_t *) 2135 nvme_get_logpage(nvme, NVME_LOGPAGE_HEALTH, -1); 2136 nvme->n_fwslot_log = (nvme_fwslot_log_t *) 2137 nvme_get_logpage(nvme, NVME_LOGPAGE_FWSLOT); 2138 2139 /* 2140 * Identify Namespaces 2141 */ 2142 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2143 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2144 nvme->n_namespace_count, KM_SLEEP); 2145 2146 for (i = 0; i != nvme->n_namespace_count; i++) { 2147 nvme_identify_nsid_t *idns; 2148 int last_rp; 2149 2150 nvme->n_ns[i].ns_nvme = nvme; 2151 nvme->n_ns[i].ns_idns = idns = nvme_identify(nvme, i + 1); 2152 2153 if (idns == NULL) { 2154 dev_err(nvme->n_dip, CE_WARN, 2155 "!failed to identify namespace %d", i + 1); 2156 goto fail; 2157 } 2158 2159 nvme->n_ns[i].ns_id = i + 1; 2160 nvme->n_ns[i].ns_block_count = idns->id_nsize; 2161 nvme->n_ns[i].ns_block_size = 2162 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2163 nvme->n_ns[i].ns_best_block_size = nvme->n_ns[i].ns_block_size; 2164 2165 nvme_prepare_devid(nvme, nvme->n_ns[i].ns_id); 2166 2167 /* 2168 * Find the LBA format with no metadata and the best relative 2169 * performance. A value of 3 means "degraded", 0 is best. 2170 */ 2171 last_rp = 3; 2172 for (int j = 0; j <= idns->id_nlbaf; j++) { 2173 if (idns->id_lbaf[j].lbaf_lbads == 0) 2174 break; 2175 if (idns->id_lbaf[j].lbaf_ms != 0) 2176 continue; 2177 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2178 continue; 2179 last_rp = idns->id_lbaf[j].lbaf_rp; 2180 nvme->n_ns[i].ns_best_block_size = 2181 1 << idns->id_lbaf[j].lbaf_lbads; 2182 } 2183 2184 /* 2185 * We currently don't support namespaces that use either: 2186 * - thin provisioning 2187 * - protection information 2188 */ 2189 if (idns->id_nsfeat.f_thin || 2190 idns->id_dps.dp_pinfo) { 2191 dev_err(nvme->n_dip, CE_WARN, 2192 "!ignoring namespace %d, unsupported features: " 2193 "thin = %d, pinfo = %d", i + 1, 2194 idns->id_nsfeat.f_thin, idns->id_dps.dp_pinfo); 2195 nvme->n_ns[i].ns_ignore = B_TRUE; 2196 } 2197 } 2198 2199 /* 2200 * Try to set up MSI/MSI-X interrupts. 2201 */ 2202 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2203 != 0) { 2204 nvme_release_interrupts(nvme); 2205 2206 nqueues = MIN(UINT16_MAX, ncpus); 2207 2208 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2209 nqueues) != DDI_SUCCESS) && 2210 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2211 nqueues) != DDI_SUCCESS)) { 2212 dev_err(nvme->n_dip, CE_WARN, 2213 "!failed to setup MSI/MSI-X interrupts"); 2214 goto fail; 2215 } 2216 } 2217 2218 nqueues = nvme->n_intr_cnt; 2219 2220 /* 2221 * Create I/O queue pairs. 2222 */ 2223 nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues); 2224 if (nvme->n_ioq_count == 0) { 2225 dev_err(nvme->n_dip, CE_WARN, 2226 "!failed to set number of I/O queues to %d", nqueues); 2227 goto fail; 2228 } 2229 2230 /* 2231 * Reallocate I/O queue array 2232 */ 2233 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2234 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2235 (nvme->n_ioq_count + 1), KM_SLEEP); 2236 nvme->n_ioq[0] = nvme->n_adminq; 2237 2238 /* 2239 * If we got less queues than we asked for we might as well give 2240 * some of the interrupt vectors back to the system. 2241 */ 2242 if (nvme->n_ioq_count < nqueues) { 2243 nvme_release_interrupts(nvme); 2244 2245 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 2246 nvme->n_ioq_count) != DDI_SUCCESS) { 2247 dev_err(nvme->n_dip, CE_WARN, 2248 "!failed to reduce number of interrupts"); 2249 goto fail; 2250 } 2251 } 2252 2253 /* 2254 * Alloc & register I/O queue pairs 2255 */ 2256 nvme->n_io_queue_len = 2257 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries); 2258 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len", 2259 nvme->n_io_queue_len); 2260 2261 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2262 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len, 2263 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 2264 dev_err(nvme->n_dip, CE_WARN, 2265 "!unable to allocate I/O qpair %d", i); 2266 goto fail; 2267 } 2268 2269 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) 2270 != DDI_SUCCESS) { 2271 dev_err(nvme->n_dip, CE_WARN, 2272 "!unable to create I/O qpair %d", i); 2273 goto fail; 2274 } 2275 } 2276 2277 /* 2278 * Post more asynchronous events commands to reduce event reporting 2279 * latency as suggested by the spec. 2280 */ 2281 for (i = 1; i != nvme->n_async_event_limit; i++) { 2282 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2283 dev_err(nvme->n_dip, CE_WARN, 2284 "!failed to post async event %d", i); 2285 goto fail; 2286 } 2287 } 2288 2289 return (DDI_SUCCESS); 2290 2291 fail: 2292 (void) nvme_reset(nvme, B_FALSE); 2293 return (DDI_FAILURE); 2294 } 2295 2296 static uint_t 2297 nvme_intr(caddr_t arg1, caddr_t arg2) 2298 { 2299 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2300 nvme_t *nvme = (nvme_t *)arg1; 2301 int inum = (int)(uintptr_t)arg2; 2302 int ccnt = 0; 2303 int qnum; 2304 nvme_cmd_t *cmd; 2305 2306 if (inum >= nvme->n_intr_cnt) 2307 return (DDI_INTR_UNCLAIMED); 2308 2309 /* 2310 * The interrupt vector a queue uses is calculated as queue_idx % 2311 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 2312 * in steps of n_intr_cnt to process all queues using this vector. 2313 */ 2314 for (qnum = inum; 2315 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL; 2316 qnum += nvme->n_intr_cnt) { 2317 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) { 2318 taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq, 2319 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 2320 ccnt++; 2321 } 2322 } 2323 2324 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2325 } 2326 2327 static void 2328 nvme_release_interrupts(nvme_t *nvme) 2329 { 2330 int i; 2331 2332 for (i = 0; i < nvme->n_intr_cnt; i++) { 2333 if (nvme->n_inth[i] == NULL) 2334 break; 2335 2336 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2337 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 2338 else 2339 (void) ddi_intr_disable(nvme->n_inth[i]); 2340 2341 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 2342 (void) ddi_intr_free(nvme->n_inth[i]); 2343 } 2344 2345 kmem_free(nvme->n_inth, nvme->n_inth_sz); 2346 nvme->n_inth = NULL; 2347 nvme->n_inth_sz = 0; 2348 2349 nvme->n_progress &= ~NVME_INTERRUPTS; 2350 } 2351 2352 static int 2353 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 2354 { 2355 int nintrs, navail, count; 2356 int ret; 2357 int i; 2358 2359 if (nvme->n_intr_types == 0) { 2360 ret = ddi_intr_get_supported_types(nvme->n_dip, 2361 &nvme->n_intr_types); 2362 if (ret != DDI_SUCCESS) { 2363 dev_err(nvme->n_dip, CE_WARN, 2364 "!%s: ddi_intr_get_supported types failed", 2365 __func__); 2366 return (ret); 2367 } 2368 } 2369 2370 if ((nvme->n_intr_types & intr_type) == 0) 2371 return (DDI_FAILURE); 2372 2373 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 2374 if (ret != DDI_SUCCESS) { 2375 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 2376 __func__); 2377 return (ret); 2378 } 2379 2380 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 2381 if (ret != DDI_SUCCESS) { 2382 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 2383 __func__); 2384 return (ret); 2385 } 2386 2387 /* We want at most one interrupt per queue pair. */ 2388 if (navail > nqpairs) 2389 navail = nqpairs; 2390 2391 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 2392 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 2393 2394 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 2395 &count, 0); 2396 if (ret != DDI_SUCCESS) { 2397 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 2398 __func__); 2399 goto fail; 2400 } 2401 2402 nvme->n_intr_cnt = count; 2403 2404 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 2405 if (ret != DDI_SUCCESS) { 2406 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 2407 __func__); 2408 goto fail; 2409 } 2410 2411 for (i = 0; i < count; i++) { 2412 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 2413 (void *)nvme, (void *)(uintptr_t)i); 2414 if (ret != DDI_SUCCESS) { 2415 dev_err(nvme->n_dip, CE_WARN, 2416 "!%s: ddi_intr_add_handler failed", __func__); 2417 goto fail; 2418 } 2419 } 2420 2421 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 2422 2423 for (i = 0; i < count; i++) { 2424 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2425 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 2426 else 2427 ret = ddi_intr_enable(nvme->n_inth[i]); 2428 2429 if (ret != DDI_SUCCESS) { 2430 dev_err(nvme->n_dip, CE_WARN, 2431 "!%s: enabling interrupt %d failed", __func__, i); 2432 goto fail; 2433 } 2434 } 2435 2436 nvme->n_intr_type = intr_type; 2437 2438 nvme->n_progress |= NVME_INTERRUPTS; 2439 2440 return (DDI_SUCCESS); 2441 2442 fail: 2443 nvme_release_interrupts(nvme); 2444 2445 return (ret); 2446 } 2447 2448 static int 2449 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 2450 { 2451 _NOTE(ARGUNUSED(arg)); 2452 2453 pci_ereport_post(dip, fm_error, NULL); 2454 return (fm_error->fme_status); 2455 } 2456 2457 static int 2458 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2459 { 2460 nvme_t *nvme; 2461 int instance; 2462 int nregs; 2463 off_t regsize; 2464 int i; 2465 char name[32]; 2466 2467 if (cmd != DDI_ATTACH) 2468 return (DDI_FAILURE); 2469 2470 instance = ddi_get_instance(dip); 2471 2472 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 2473 return (DDI_FAILURE); 2474 2475 nvme = ddi_get_soft_state(nvme_state, instance); 2476 ddi_set_driver_private(dip, nvme); 2477 nvme->n_dip = dip; 2478 2479 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2480 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 2481 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 2482 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 2483 B_TRUE : B_FALSE; 2484 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2485 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 2486 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2487 DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN); 2488 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2489 DDI_PROP_DONTPASS, "async-event-limit", 2490 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 2491 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2492 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 2493 B_TRUE : B_FALSE; 2494 2495 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 2496 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 2497 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 2498 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 2499 2500 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN) 2501 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN; 2502 2503 if (nvme->n_async_event_limit < 1) 2504 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 2505 2506 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 2507 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 2508 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 2509 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 2510 2511 /* 2512 * Setup FMA support. 2513 */ 2514 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 2515 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 2516 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2517 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2518 2519 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 2520 2521 if (nvme->n_fm_cap) { 2522 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 2523 nvme->n_reg_acc_attr.devacc_attr_access = 2524 DDI_FLAGERR_ACC; 2525 2526 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 2527 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2528 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2529 } 2530 2531 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2532 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2533 pci_ereport_setup(dip); 2534 2535 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2536 ddi_fm_handler_register(dip, nvme_fm_errcb, 2537 (void *)nvme); 2538 } 2539 2540 nvme->n_progress |= NVME_FMA_INIT; 2541 2542 /* 2543 * The spec defines several register sets. Only the controller 2544 * registers (set 1) are currently used. 2545 */ 2546 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 2547 nregs < 2 || 2548 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 2549 goto fail; 2550 2551 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 2552 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 2553 dev_err(dip, CE_WARN, "!failed to map regset 1"); 2554 goto fail; 2555 } 2556 2557 nvme->n_progress |= NVME_REGS_MAPPED; 2558 2559 /* 2560 * Create taskq for command completion. 2561 */ 2562 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq", 2563 ddi_driver_name(dip), ddi_get_instance(dip)); 2564 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), 2565 TASKQ_DEFAULTPRI, 0); 2566 if (nvme->n_cmd_taskq == NULL) { 2567 dev_err(dip, CE_WARN, "!failed to create cmd taskq"); 2568 goto fail; 2569 } 2570 2571 /* 2572 * Create PRP DMA cache 2573 */ 2574 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 2575 ddi_driver_name(dip), ddi_get_instance(dip)); 2576 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 2577 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 2578 NULL, (void *)nvme, NULL, 0); 2579 2580 if (nvme_init(nvme) != DDI_SUCCESS) 2581 goto fail; 2582 2583 /* 2584 * Attach the blkdev driver for each namespace. 2585 */ 2586 for (i = 0; i != nvme->n_namespace_count; i++) { 2587 if (nvme->n_ns[i].ns_ignore) 2588 continue; 2589 2590 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 2591 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 2592 2593 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 2594 dev_err(dip, CE_WARN, 2595 "!failed to get blkdev handle for namespace %d", i); 2596 goto fail; 2597 } 2598 2599 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 2600 != DDI_SUCCESS) { 2601 dev_err(dip, CE_WARN, 2602 "!failed to attach blkdev handle for namespace %d", 2603 i); 2604 goto fail; 2605 } 2606 } 2607 2608 return (DDI_SUCCESS); 2609 2610 fail: 2611 /* attach successful anyway so that FMA can retire the device */ 2612 if (nvme->n_dead) 2613 return (DDI_SUCCESS); 2614 2615 (void) nvme_detach(dip, DDI_DETACH); 2616 2617 return (DDI_FAILURE); 2618 } 2619 2620 static int 2621 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2622 { 2623 int instance, i; 2624 nvme_t *nvme; 2625 2626 if (cmd != DDI_DETACH) 2627 return (DDI_FAILURE); 2628 2629 instance = ddi_get_instance(dip); 2630 2631 nvme = ddi_get_soft_state(nvme_state, instance); 2632 2633 if (nvme == NULL) 2634 return (DDI_FAILURE); 2635 2636 if (nvme->n_ns) { 2637 for (i = 0; i != nvme->n_namespace_count; i++) { 2638 if (nvme->n_ns[i].ns_bd_hdl) { 2639 (void) bd_detach_handle( 2640 nvme->n_ns[i].ns_bd_hdl); 2641 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 2642 } 2643 2644 if (nvme->n_ns[i].ns_idns) 2645 kmem_free(nvme->n_ns[i].ns_idns, 2646 sizeof (nvme_identify_nsid_t)); 2647 } 2648 2649 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 2650 nvme->n_namespace_count); 2651 } 2652 2653 if (nvme->n_progress & NVME_INTERRUPTS) 2654 nvme_release_interrupts(nvme); 2655 2656 if (nvme->n_cmd_taskq) 2657 ddi_taskq_wait(nvme->n_cmd_taskq); 2658 2659 if (nvme->n_ioq_count > 0) { 2660 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2661 if (nvme->n_ioq[i] != NULL) { 2662 /* TODO: send destroy queue commands */ 2663 nvme_free_qpair(nvme->n_ioq[i]); 2664 } 2665 } 2666 2667 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 2668 (nvme->n_ioq_count + 1)); 2669 } 2670 2671 if (nvme->n_prp_cache != NULL) { 2672 kmem_cache_destroy(nvme->n_prp_cache); 2673 } 2674 2675 if (nvme->n_progress & NVME_REGS_MAPPED) { 2676 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 2677 (void) nvme_reset(nvme, B_FALSE); 2678 } 2679 2680 if (nvme->n_cmd_taskq) 2681 ddi_taskq_destroy(nvme->n_cmd_taskq); 2682 2683 if (nvme->n_progress & NVME_CTRL_LIMITS) 2684 sema_destroy(&nvme->n_abort_sema); 2685 2686 if (nvme->n_progress & NVME_ADMIN_QUEUE) 2687 nvme_free_qpair(nvme->n_adminq); 2688 2689 if (nvme->n_idctl) 2690 kmem_free(nvme->n_idctl, sizeof (nvme_identify_ctrl_t)); 2691 2692 if (nvme->n_progress & NVME_REGS_MAPPED) 2693 ddi_regs_map_free(&nvme->n_regh); 2694 2695 if (nvme->n_progress & NVME_FMA_INIT) { 2696 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2697 ddi_fm_handler_unregister(nvme->n_dip); 2698 2699 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2700 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2701 pci_ereport_teardown(nvme->n_dip); 2702 2703 ddi_fm_fini(nvme->n_dip); 2704 } 2705 2706 if (nvme->n_vendor != NULL) 2707 strfree(nvme->n_vendor); 2708 2709 if (nvme->n_product != NULL) 2710 strfree(nvme->n_product); 2711 2712 ddi_soft_state_free(nvme_state, instance); 2713 2714 return (DDI_SUCCESS); 2715 } 2716 2717 static int 2718 nvme_quiesce(dev_info_t *dip) 2719 { 2720 int instance; 2721 nvme_t *nvme; 2722 2723 instance = ddi_get_instance(dip); 2724 2725 nvme = ddi_get_soft_state(nvme_state, instance); 2726 2727 if (nvme == NULL) 2728 return (DDI_FAILURE); 2729 2730 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 2731 2732 (void) nvme_reset(nvme, B_TRUE); 2733 2734 return (DDI_FAILURE); 2735 } 2736 2737 static int 2738 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 2739 { 2740 nvme_t *nvme = cmd->nc_nvme; 2741 int nprp_page, nprp; 2742 uint64_t *prp; 2743 2744 if (xfer->x_ndmac == 0) 2745 return (DDI_FAILURE); 2746 2747 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 2748 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2749 2750 if (xfer->x_ndmac == 1) { 2751 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 2752 return (DDI_SUCCESS); 2753 } else if (xfer->x_ndmac == 2) { 2754 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 2755 return (DDI_SUCCESS); 2756 } 2757 2758 xfer->x_ndmac--; 2759 2760 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1; 2761 ASSERT(nprp_page > 0); 2762 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 2763 2764 /* 2765 * We currently don't support chained PRPs and set up our DMA 2766 * attributes to reflect that. If we still get an I/O request 2767 * that needs a chained PRP something is very wrong. 2768 */ 2769 VERIFY(nprp == 1); 2770 2771 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 2772 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len); 2773 2774 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 2775 2776 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2777 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 2778 xfer->x_ndmac > 0; 2779 prp++, xfer->x_ndmac--) { 2780 *prp = xfer->x_dmac.dmac_laddress; 2781 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 2782 } 2783 2784 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 2785 DDI_DMA_SYNC_FORDEV); 2786 return (DDI_SUCCESS); 2787 } 2788 2789 static nvme_cmd_t * 2790 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 2791 { 2792 nvme_t *nvme = ns->ns_nvme; 2793 nvme_cmd_t *cmd; 2794 2795 /* 2796 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 2797 */ 2798 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 2799 KM_NOSLEEP : KM_SLEEP); 2800 2801 if (cmd == NULL) 2802 return (NULL); 2803 2804 cmd->nc_sqe.sqe_opc = opc; 2805 cmd->nc_callback = nvme_bd_xfer_done; 2806 cmd->nc_xfer = xfer; 2807 2808 switch (opc) { 2809 case NVME_OPC_NVM_WRITE: 2810 case NVME_OPC_NVM_READ: 2811 VERIFY(xfer->x_nblks <= 0x10000); 2812 2813 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2814 2815 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 2816 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 2817 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 2818 2819 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 2820 goto fail; 2821 break; 2822 2823 case NVME_OPC_NVM_FLUSH: 2824 cmd->nc_sqe.sqe_nsid = ns->ns_id; 2825 break; 2826 2827 default: 2828 goto fail; 2829 } 2830 2831 return (cmd); 2832 2833 fail: 2834 nvme_free_cmd(cmd); 2835 return (NULL); 2836 } 2837 2838 static void 2839 nvme_bd_xfer_done(void *arg) 2840 { 2841 nvme_cmd_t *cmd = arg; 2842 bd_xfer_t *xfer = cmd->nc_xfer; 2843 int error = 0; 2844 2845 error = nvme_check_cmd_status(cmd); 2846 nvme_free_cmd(cmd); 2847 2848 bd_xfer_done(xfer, error); 2849 } 2850 2851 static void 2852 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 2853 { 2854 nvme_namespace_t *ns = arg; 2855 nvme_t *nvme = ns->ns_nvme; 2856 2857 /* 2858 * blkdev maintains one queue size per instance (namespace), 2859 * but all namespace share the I/O queues. 2860 * TODO: need to figure out a sane default, or use per-NS I/O queues, 2861 * or change blkdev to handle EAGAIN 2862 */ 2863 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len 2864 / nvme->n_namespace_count; 2865 2866 /* 2867 * d_maxxfer is not set, which means the value is taken from the DMA 2868 * attributes specified to bd_alloc_handle. 2869 */ 2870 2871 drive->d_removable = B_FALSE; 2872 drive->d_hotpluggable = B_FALSE; 2873 2874 drive->d_target = ns->ns_id; 2875 drive->d_lun = 0; 2876 2877 drive->d_model = nvme->n_idctl->id_model; 2878 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 2879 drive->d_vendor = nvme->n_vendor; 2880 drive->d_vendor_len = strlen(nvme->n_vendor); 2881 drive->d_product = nvme->n_product; 2882 drive->d_product_len = strlen(nvme->n_product); 2883 drive->d_serial = nvme->n_idctl->id_serial; 2884 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 2885 drive->d_revision = nvme->n_idctl->id_fwrev; 2886 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 2887 } 2888 2889 static int 2890 nvme_bd_mediainfo(void *arg, bd_media_t *media) 2891 { 2892 nvme_namespace_t *ns = arg; 2893 2894 media->m_nblks = ns->ns_block_count; 2895 media->m_blksize = ns->ns_block_size; 2896 media->m_readonly = B_FALSE; 2897 media->m_solidstate = B_TRUE; 2898 2899 media->m_pblksize = ns->ns_best_block_size; 2900 2901 return (0); 2902 } 2903 2904 static int 2905 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 2906 { 2907 nvme_t *nvme = ns->ns_nvme; 2908 nvme_cmd_t *cmd; 2909 2910 if (nvme->n_dead) 2911 return (EIO); 2912 2913 /* No polling for now */ 2914 if (xfer->x_flags & BD_XFER_POLL) 2915 return (EIO); 2916 2917 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 2918 if (cmd == NULL) 2919 return (ENOMEM); 2920 2921 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 2922 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 2923 2924 if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd) 2925 != DDI_SUCCESS) 2926 return (EAGAIN); 2927 2928 return (0); 2929 } 2930 2931 static int 2932 nvme_bd_read(void *arg, bd_xfer_t *xfer) 2933 { 2934 nvme_namespace_t *ns = arg; 2935 2936 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 2937 } 2938 2939 static int 2940 nvme_bd_write(void *arg, bd_xfer_t *xfer) 2941 { 2942 nvme_namespace_t *ns = arg; 2943 2944 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 2945 } 2946 2947 static int 2948 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 2949 { 2950 nvme_namespace_t *ns = arg; 2951 2952 if (ns->ns_nvme->n_dead) 2953 return (EIO); 2954 2955 /* 2956 * If the volatile write cache is not present or not enabled the FLUSH 2957 * command is a no-op, so we can take a shortcut here. 2958 */ 2959 if (!ns->ns_nvme->n_write_cache_present) { 2960 bd_xfer_done(xfer, ENOTSUP); 2961 return (0); 2962 } 2963 2964 if (!ns->ns_nvme->n_write_cache_enabled) { 2965 bd_xfer_done(xfer, 0); 2966 return (0); 2967 } 2968 2969 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 2970 } 2971 2972 static int 2973 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 2974 { 2975 nvme_namespace_t *ns = arg; 2976 2977 return (ddi_devid_init(devinfo, DEVID_ENCAP, strlen(ns->ns_devid), 2978 ns->ns_devid, devid)); 2979 } 2980