1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 16 */ 17 18 /* 19 * blkdev driver for NVMe compliant storage devices 20 * 21 * This driver was written to conform to version 1.2.1 of the NVMe 22 * specification. It may work with newer versions, but that is completely 23 * untested and disabled by default. 24 * 25 * The driver has only been tested on x86 systems and will not work on big- 26 * endian systems without changes to the code accessing registers and data 27 * structures used by the hardware. 28 * 29 * 30 * Interrupt Usage: 31 * 32 * The driver will use a single interrupt while configuring the device as the 33 * specification requires, but contrary to the specification it will try to use 34 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 35 * will switch to multiple-message MSI(-X) if supported. The driver wants to 36 * have one interrupt vector per CPU, but it will work correctly if less are 37 * available. Interrupts can be shared by queues, the interrupt handler will 38 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 39 * the admin queue will share an interrupt with one I/O queue. The interrupt 40 * handler will retrieve completed commands from all queues sharing an interrupt 41 * vector and will post them to a taskq for completion processing. 42 * 43 * 44 * Command Processing: 45 * 46 * NVMe devices can have up to 65536 I/O queue pairs, with each queue holding up 47 * to 65536 I/O commands. The driver will configure one I/O queue pair per 48 * available interrupt vector, with the queue length usually much smaller than 49 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 50 * interrupt vectors will be used. 51 * 52 * Additionally the hardware provides a single special admin queue pair that can 53 * hold up to 4096 admin commands. 54 * 55 * From the hardware perspective both queues of a queue pair are independent, 56 * but they share some driver state: the command array (holding pointers to 57 * commands currently being processed by the hardware) and the active command 58 * counter. Access to the submission side of a queue pair and the shared state 59 * is protected by nq_mutex. The completion side of a queue pair does not need 60 * that protection apart from its access to the shared state; it is called only 61 * in the interrupt handler which does not run concurrently for the same 62 * interrupt vector. 63 * 64 * When a command is submitted to a queue pair the active command counter is 65 * incremented and a pointer to the command is stored in the command array. The 66 * array index is used as command identifier (CID) in the submission queue 67 * entry. Some commands may take a very long time to complete, and if the queue 68 * wraps around in that time a submission may find the next array slot to still 69 * be used by a long-running command. In this case the array is sequentially 70 * searched for the next free slot. The length of the command array is the same 71 * as the configured queue length. 72 * 73 * 74 * Namespace Support: 75 * 76 * NVMe devices can have multiple namespaces, each being a independent data 77 * store. The driver supports multiple namespaces and creates a blkdev interface 78 * for each namespace found. Namespaces can have various attributes to support 79 * thin provisioning and protection information. This driver does not support 80 * any of this and ignores namespaces that have these attributes. 81 * 82 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 83 * (EUI64). This driver uses the EUI64 if present to generate the devid and 84 * passes it to blkdev to use it in the device node names. As this is currently 85 * untested namespaces with EUI64 are ignored by default. 86 * 87 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 88 * single controller. This is an artificial limit imposed by the driver to be 89 * able to address a reasonable number of controllers and namespaces using a 90 * 32bit minor node number. 91 * 92 * 93 * Minor nodes: 94 * 95 * For each NVMe device the driver exposes one minor node for the controller and 96 * one minor node for each namespace. The only operations supported by those 97 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 98 * interface for the nvmeadm(1M) utility. 99 * 100 * 101 * Blkdev Interface: 102 * 103 * This driver uses blkdev to do all the heavy lifting involved with presenting 104 * a disk device to the system. As a result, the processing of I/O requests is 105 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 106 * setup, and splitting of transfers into manageable chunks. 107 * 108 * I/O requests coming in from blkdev are turned into NVM commands and posted to 109 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 110 * queues. There is currently no timeout handling of I/O commands. 111 * 112 * Blkdev also supports querying device/media information and generating a 113 * devid. The driver reports the best block size as determined by the namespace 114 * format back to blkdev as physical block size to support partition and block 115 * alignment. The devid is either based on the namespace EUI64, if present, or 116 * composed using the device vendor ID, model number, serial number, and the 117 * namespace ID. 118 * 119 * 120 * Error Handling: 121 * 122 * Error handling is currently limited to detecting fatal hardware errors, 123 * either by asynchronous events, or synchronously through command status or 124 * admin command timeouts. In case of severe errors the device is fenced off, 125 * all further requests will return EIO. FMA is then called to fault the device. 126 * 127 * The hardware has a limit for outstanding asynchronous event requests. Before 128 * this limit is known the driver assumes it is at least 1 and posts a single 129 * asynchronous request. Later when the limit is known more asynchronous event 130 * requests are posted to allow quicker reception of error information. When an 131 * asynchronous event is posted by the hardware the driver will parse the error 132 * status fields and log information or fault the device, depending on the 133 * severity of the asynchronous event. The asynchronous event request is then 134 * reused and posted to the admin queue again. 135 * 136 * On command completion the command status is checked for errors. In case of 137 * errors indicating a driver bug the driver panics. Almost all other error 138 * status values just cause EIO to be returned. 139 * 140 * Command timeouts are currently detected for all admin commands except 141 * asynchronous event requests. If a command times out and the hardware appears 142 * to be healthy the driver attempts to abort the command. If this fails the 143 * driver assumes the device to be dead, fences it off, and calls FMA to retire 144 * it. In general admin commands are issued at attach time only. No timeout 145 * handling of normal I/O commands is presently done. 146 * 147 * In some cases it may be possible that the ABORT command times out, too. In 148 * that case the device is also declared dead and fenced off. 149 * 150 * 151 * Quiesce / Fast Reboot: 152 * 153 * The driver currently does not support fast reboot. A quiesce(9E) entry point 154 * is still provided which is used to send a shutdown notification to the 155 * device. 156 * 157 * 158 * Driver Configuration: 159 * 160 * The following driver properties can be changed to control some aspects of the 161 * drivers operation: 162 * - strict-version: can be set to 0 to allow devices conforming to newer 163 * versions or namespaces with EUI64 to be used 164 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 165 * specific command status as a fatal error leading device faulting 166 * - admin-queue-len: the maximum length of the admin queue (16-4096) 167 * - io-queue-len: the maximum length of the I/O queues (16-65536) 168 * - async-event-limit: the maximum number of asynchronous event requests to be 169 * posted by the driver 170 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 171 * cache 172 * - min-phys-block-size: the minimum physical block size to report to blkdev, 173 * which is among other things the basis for ZFS vdev ashift 174 * 175 * 176 * TODO: 177 * - figure out sane default for I/O queue depth reported to blkdev 178 * - polled I/O support to support kernel core dumping 179 * - FMA handling of media errors 180 * - support for devices supporting very large I/O requests using chained PRPs 181 * - support for configuring hardware parameters like interrupt coalescing 182 * - support for media formatting and hard partitioning into namespaces 183 * - support for big-endian systems 184 * - support for fast reboot 185 * - support for firmware updates 186 * - support for NVMe Subsystem Reset (1.1) 187 * - support for Scatter/Gather lists (1.1) 188 * - support for Reservations (1.1) 189 * - support for power management 190 */ 191 192 #include <sys/byteorder.h> 193 #ifdef _BIG_ENDIAN 194 #error nvme driver needs porting for big-endian platforms 195 #endif 196 197 #include <sys/modctl.h> 198 #include <sys/conf.h> 199 #include <sys/devops.h> 200 #include <sys/ddi.h> 201 #include <sys/sunddi.h> 202 #include <sys/sunndi.h> 203 #include <sys/bitmap.h> 204 #include <sys/sysmacros.h> 205 #include <sys/param.h> 206 #include <sys/varargs.h> 207 #include <sys/cpuvar.h> 208 #include <sys/disp.h> 209 #include <sys/blkdev.h> 210 #include <sys/atomic.h> 211 #include <sys/archsystm.h> 212 #include <sys/sata/sata_hba.h> 213 #include <sys/stat.h> 214 #include <sys/policy.h> 215 216 #include <sys/nvme.h> 217 218 #ifdef __x86 219 #include <sys/x86_archext.h> 220 #endif 221 222 #include "nvme_reg.h" 223 #include "nvme_var.h" 224 225 226 /* NVMe spec version supported */ 227 static const int nvme_version_major = 1; 228 static const int nvme_version_minor = 2; 229 230 /* tunable for admin command timeout in seconds, default is 1s */ 231 int nvme_admin_cmd_timeout = 1; 232 233 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 234 int nvme_format_cmd_timeout = 600; 235 236 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 237 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 238 static int nvme_quiesce(dev_info_t *); 239 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 240 static int nvme_setup_interrupts(nvme_t *, int, int); 241 static void nvme_release_interrupts(nvme_t *); 242 static uint_t nvme_intr(caddr_t, caddr_t); 243 244 static void nvme_shutdown(nvme_t *, int, boolean_t); 245 static boolean_t nvme_reset(nvme_t *, boolean_t); 246 static int nvme_init(nvme_t *); 247 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 248 static void nvme_free_cmd(nvme_cmd_t *); 249 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 250 bd_xfer_t *); 251 static int nvme_admin_cmd(nvme_cmd_t *, int); 252 static int nvme_submit_cmd(nvme_qpair_t *, nvme_cmd_t *); 253 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 254 static boolean_t nvme_wait_cmd(nvme_cmd_t *, uint_t); 255 static void nvme_wakeup_cmd(void *); 256 static void nvme_async_event_task(void *); 257 258 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 259 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 260 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 261 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 262 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 263 static inline int nvme_check_cmd_status(nvme_cmd_t *); 264 265 static void nvme_abort_cmd(nvme_cmd_t *); 266 static int nvme_async_event(nvme_t *); 267 static int nvme_format_nvm(nvme_t *, uint32_t, uint8_t, boolean_t, uint8_t, 268 boolean_t, uint8_t); 269 static int nvme_get_logpage(nvme_t *, void **, size_t *, uint8_t, ...); 270 static void *nvme_identify(nvme_t *, uint32_t); 271 static boolean_t nvme_set_features(nvme_t *, uint32_t, uint8_t, uint32_t, 272 uint32_t *); 273 static boolean_t nvme_get_features(nvme_t *, uint32_t, uint8_t, uint32_t *, 274 void **, size_t *); 275 static boolean_t nvme_write_cache_set(nvme_t *, boolean_t); 276 static int nvme_set_nqueues(nvme_t *, uint16_t); 277 278 static void nvme_free_dma(nvme_dma_t *); 279 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 280 nvme_dma_t **); 281 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 282 nvme_dma_t **); 283 static void nvme_free_qpair(nvme_qpair_t *); 284 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, int); 285 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 286 287 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 288 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 289 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 290 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 291 292 static boolean_t nvme_check_regs_hdl(nvme_t *); 293 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 294 295 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 296 297 static void nvme_bd_xfer_done(void *); 298 static void nvme_bd_driveinfo(void *, bd_drive_t *); 299 static int nvme_bd_mediainfo(void *, bd_media_t *); 300 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 301 static int nvme_bd_read(void *, bd_xfer_t *); 302 static int nvme_bd_write(void *, bd_xfer_t *); 303 static int nvme_bd_sync(void *, bd_xfer_t *); 304 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 305 306 static int nvme_prp_dma_constructor(void *, void *, int); 307 static void nvme_prp_dma_destructor(void *, void *); 308 309 static void nvme_prepare_devid(nvme_t *, uint32_t); 310 311 static int nvme_open(dev_t *, int, int, cred_t *); 312 static int nvme_close(dev_t, int, int, cred_t *); 313 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 314 315 #define NVME_MINOR_INST_SHIFT 14 316 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 317 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 318 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 319 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 320 321 static void *nvme_state; 322 static kmem_cache_t *nvme_cmd_cache; 323 324 /* 325 * DMA attributes for queue DMA memory 326 * 327 * Queue DMA memory must be page aligned. The maximum length of a queue is 328 * 65536 entries, and an entry can be 64 bytes long. 329 */ 330 static ddi_dma_attr_t nvme_queue_dma_attr = { 331 .dma_attr_version = DMA_ATTR_V0, 332 .dma_attr_addr_lo = 0, 333 .dma_attr_addr_hi = 0xffffffffffffffffULL, 334 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 335 .dma_attr_align = 0x1000, 336 .dma_attr_burstsizes = 0x7ff, 337 .dma_attr_minxfer = 0x1000, 338 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 339 .dma_attr_seg = 0xffffffffffffffffULL, 340 .dma_attr_sgllen = 1, 341 .dma_attr_granular = 1, 342 .dma_attr_flags = 0, 343 }; 344 345 /* 346 * DMA attributes for transfers using Physical Region Page (PRP) entries 347 * 348 * A PRP entry describes one page of DMA memory using the page size specified 349 * in the controller configuration's memory page size register (CC.MPS). It uses 350 * a 64bit base address aligned to this page size. There is no limitation on 351 * chaining PRPs together for arbitrarily large DMA transfers. 352 */ 353 static ddi_dma_attr_t nvme_prp_dma_attr = { 354 .dma_attr_version = DMA_ATTR_V0, 355 .dma_attr_addr_lo = 0, 356 .dma_attr_addr_hi = 0xffffffffffffffffULL, 357 .dma_attr_count_max = 0xfff, 358 .dma_attr_align = 0x1000, 359 .dma_attr_burstsizes = 0x7ff, 360 .dma_attr_minxfer = 0x1000, 361 .dma_attr_maxxfer = 0x1000, 362 .dma_attr_seg = 0xfff, 363 .dma_attr_sgllen = -1, 364 .dma_attr_granular = 1, 365 .dma_attr_flags = 0, 366 }; 367 368 /* 369 * DMA attributes for transfers using scatter/gather lists 370 * 371 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 372 * 32bit length field. SGL Segment and SGL Last Segment entries require the 373 * length to be a multiple of 16 bytes. 374 */ 375 static ddi_dma_attr_t nvme_sgl_dma_attr = { 376 .dma_attr_version = DMA_ATTR_V0, 377 .dma_attr_addr_lo = 0, 378 .dma_attr_addr_hi = 0xffffffffffffffffULL, 379 .dma_attr_count_max = 0xffffffffUL, 380 .dma_attr_align = 1, 381 .dma_attr_burstsizes = 0x7ff, 382 .dma_attr_minxfer = 0x10, 383 .dma_attr_maxxfer = 0xfffffffffULL, 384 .dma_attr_seg = 0xffffffffffffffffULL, 385 .dma_attr_sgllen = -1, 386 .dma_attr_granular = 0x10, 387 .dma_attr_flags = 0 388 }; 389 390 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 391 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 392 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 393 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 394 }; 395 396 static struct cb_ops nvme_cb_ops = { 397 .cb_open = nvme_open, 398 .cb_close = nvme_close, 399 .cb_strategy = nodev, 400 .cb_print = nodev, 401 .cb_dump = nodev, 402 .cb_read = nodev, 403 .cb_write = nodev, 404 .cb_ioctl = nvme_ioctl, 405 .cb_devmap = nodev, 406 .cb_mmap = nodev, 407 .cb_segmap = nodev, 408 .cb_chpoll = nochpoll, 409 .cb_prop_op = ddi_prop_op, 410 .cb_str = 0, 411 .cb_flag = D_NEW | D_MP, 412 .cb_rev = CB_REV, 413 .cb_aread = nodev, 414 .cb_awrite = nodev 415 }; 416 417 static struct dev_ops nvme_dev_ops = { 418 .devo_rev = DEVO_REV, 419 .devo_refcnt = 0, 420 .devo_getinfo = ddi_no_info, 421 .devo_identify = nulldev, 422 .devo_probe = nulldev, 423 .devo_attach = nvme_attach, 424 .devo_detach = nvme_detach, 425 .devo_reset = nodev, 426 .devo_cb_ops = &nvme_cb_ops, 427 .devo_bus_ops = NULL, 428 .devo_power = NULL, 429 .devo_quiesce = nvme_quiesce, 430 }; 431 432 static struct modldrv nvme_modldrv = { 433 .drv_modops = &mod_driverops, 434 .drv_linkinfo = "NVMe v1.1b", 435 .drv_dev_ops = &nvme_dev_ops 436 }; 437 438 static struct modlinkage nvme_modlinkage = { 439 .ml_rev = MODREV_1, 440 .ml_linkage = { &nvme_modldrv, NULL } 441 }; 442 443 static bd_ops_t nvme_bd_ops = { 444 .o_version = BD_OPS_VERSION_0, 445 .o_drive_info = nvme_bd_driveinfo, 446 .o_media_info = nvme_bd_mediainfo, 447 .o_devid_init = nvme_bd_devid, 448 .o_sync_cache = nvme_bd_sync, 449 .o_read = nvme_bd_read, 450 .o_write = nvme_bd_write, 451 }; 452 453 int 454 _init(void) 455 { 456 int error; 457 458 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 459 if (error != DDI_SUCCESS) 460 return (error); 461 462 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 463 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 464 465 bd_mod_init(&nvme_dev_ops); 466 467 error = mod_install(&nvme_modlinkage); 468 if (error != DDI_SUCCESS) { 469 ddi_soft_state_fini(&nvme_state); 470 bd_mod_fini(&nvme_dev_ops); 471 } 472 473 return (error); 474 } 475 476 int 477 _fini(void) 478 { 479 int error; 480 481 error = mod_remove(&nvme_modlinkage); 482 if (error == DDI_SUCCESS) { 483 ddi_soft_state_fini(&nvme_state); 484 kmem_cache_destroy(nvme_cmd_cache); 485 bd_mod_fini(&nvme_dev_ops); 486 } 487 488 return (error); 489 } 490 491 int 492 _info(struct modinfo *modinfop) 493 { 494 return (mod_info(&nvme_modlinkage, modinfop)); 495 } 496 497 static inline void 498 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 499 { 500 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 501 502 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 503 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 504 } 505 506 static inline void 507 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 508 { 509 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 510 511 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 512 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 513 } 514 515 static inline uint64_t 516 nvme_get64(nvme_t *nvme, uintptr_t reg) 517 { 518 uint64_t val; 519 520 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 521 522 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 523 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 524 525 return (val); 526 } 527 528 static inline uint32_t 529 nvme_get32(nvme_t *nvme, uintptr_t reg) 530 { 531 uint32_t val; 532 533 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 534 535 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 536 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 537 538 return (val); 539 } 540 541 static boolean_t 542 nvme_check_regs_hdl(nvme_t *nvme) 543 { 544 ddi_fm_error_t error; 545 546 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 547 548 if (error.fme_status != DDI_FM_OK) 549 return (B_TRUE); 550 551 return (B_FALSE); 552 } 553 554 static boolean_t 555 nvme_check_dma_hdl(nvme_dma_t *dma) 556 { 557 ddi_fm_error_t error; 558 559 if (dma == NULL) 560 return (B_FALSE); 561 562 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 563 564 if (error.fme_status != DDI_FM_OK) 565 return (B_TRUE); 566 567 return (B_FALSE); 568 } 569 570 static void 571 nvme_free_dma_common(nvme_dma_t *dma) 572 { 573 if (dma->nd_dmah != NULL) 574 (void) ddi_dma_unbind_handle(dma->nd_dmah); 575 if (dma->nd_acch != NULL) 576 ddi_dma_mem_free(&dma->nd_acch); 577 if (dma->nd_dmah != NULL) 578 ddi_dma_free_handle(&dma->nd_dmah); 579 } 580 581 static void 582 nvme_free_dma(nvme_dma_t *dma) 583 { 584 nvme_free_dma_common(dma); 585 kmem_free(dma, sizeof (*dma)); 586 } 587 588 /* ARGSUSED */ 589 static void 590 nvme_prp_dma_destructor(void *buf, void *private) 591 { 592 nvme_dma_t *dma = (nvme_dma_t *)buf; 593 594 nvme_free_dma_common(dma); 595 } 596 597 static int 598 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 599 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 600 { 601 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 602 &dma->nd_dmah) != DDI_SUCCESS) { 603 /* 604 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 605 * the only other possible error is DDI_DMA_BADATTR which 606 * indicates a driver bug which should cause a panic. 607 */ 608 dev_err(nvme->n_dip, CE_PANIC, 609 "!failed to get DMA handle, check DMA attributes"); 610 return (DDI_FAILURE); 611 } 612 613 /* 614 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 615 * or the flags are conflicting, which isn't the case here. 616 */ 617 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 618 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 619 &dma->nd_len, &dma->nd_acch); 620 621 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 622 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 623 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 624 dev_err(nvme->n_dip, CE_WARN, 625 "!failed to bind DMA memory"); 626 atomic_inc_32(&nvme->n_dma_bind_err); 627 nvme_free_dma_common(dma); 628 return (DDI_FAILURE); 629 } 630 631 return (DDI_SUCCESS); 632 } 633 634 static int 635 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 636 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 637 { 638 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 639 640 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 641 DDI_SUCCESS) { 642 *ret = NULL; 643 kmem_free(dma, sizeof (nvme_dma_t)); 644 return (DDI_FAILURE); 645 } 646 647 bzero(dma->nd_memp, dma->nd_len); 648 649 *ret = dma; 650 return (DDI_SUCCESS); 651 } 652 653 /* ARGSUSED */ 654 static int 655 nvme_prp_dma_constructor(void *buf, void *private, int flags) 656 { 657 nvme_dma_t *dma = (nvme_dma_t *)buf; 658 nvme_t *nvme = (nvme_t *)private; 659 660 dma->nd_dmah = NULL; 661 dma->nd_acch = NULL; 662 663 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 664 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 665 return (-1); 666 } 667 668 ASSERT(dma->nd_ncookie == 1); 669 670 dma->nd_cached = B_TRUE; 671 672 return (0); 673 } 674 675 static int 676 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 677 uint_t flags, nvme_dma_t **dma) 678 { 679 uint32_t len = nentry * qe_len; 680 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 681 682 len = roundup(len, nvme->n_pagesize); 683 684 q_dma_attr.dma_attr_minxfer = len; 685 686 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 687 != DDI_SUCCESS) { 688 dev_err(nvme->n_dip, CE_WARN, 689 "!failed to get DMA memory for queue"); 690 goto fail; 691 } 692 693 if ((*dma)->nd_ncookie != 1) { 694 dev_err(nvme->n_dip, CE_WARN, 695 "!got too many cookies for queue DMA"); 696 goto fail; 697 } 698 699 return (DDI_SUCCESS); 700 701 fail: 702 if (*dma) { 703 nvme_free_dma(*dma); 704 *dma = NULL; 705 } 706 707 return (DDI_FAILURE); 708 } 709 710 static void 711 nvme_free_qpair(nvme_qpair_t *qp) 712 { 713 int i; 714 715 mutex_destroy(&qp->nq_mutex); 716 717 if (qp->nq_sqdma != NULL) 718 nvme_free_dma(qp->nq_sqdma); 719 if (qp->nq_cqdma != NULL) 720 nvme_free_dma(qp->nq_cqdma); 721 722 if (qp->nq_active_cmds > 0) 723 for (i = 0; i != qp->nq_nentry; i++) 724 if (qp->nq_cmd[i] != NULL) 725 nvme_free_cmd(qp->nq_cmd[i]); 726 727 if (qp->nq_cmd != NULL) 728 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 729 730 kmem_free(qp, sizeof (nvme_qpair_t)); 731 } 732 733 static int 734 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 735 int idx) 736 { 737 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 738 739 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 740 DDI_INTR_PRI(nvme->n_intr_pri)); 741 742 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 743 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 744 goto fail; 745 746 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 747 DDI_DMA_READ, &qp->nq_cqdma) != DDI_SUCCESS) 748 goto fail; 749 750 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 751 qp->nq_cq = (nvme_cqe_t *)qp->nq_cqdma->nd_memp; 752 qp->nq_nentry = nentry; 753 754 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 755 qp->nq_cqhdbl = NVME_REG_CQHDBL(nvme, idx); 756 757 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 758 qp->nq_next_cmd = 0; 759 760 *nqp = qp; 761 return (DDI_SUCCESS); 762 763 fail: 764 nvme_free_qpair(qp); 765 *nqp = NULL; 766 767 return (DDI_FAILURE); 768 } 769 770 static nvme_cmd_t * 771 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 772 { 773 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 774 775 if (cmd == NULL) 776 return (cmd); 777 778 bzero(cmd, sizeof (nvme_cmd_t)); 779 780 cmd->nc_nvme = nvme; 781 782 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 783 DDI_INTR_PRI(nvme->n_intr_pri)); 784 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 785 786 return (cmd); 787 } 788 789 static void 790 nvme_free_cmd(nvme_cmd_t *cmd) 791 { 792 if (cmd->nc_dma) { 793 if (cmd->nc_dma->nd_cached) 794 kmem_cache_free(cmd->nc_nvme->n_prp_cache, 795 cmd->nc_dma); 796 else 797 nvme_free_dma(cmd->nc_dma); 798 cmd->nc_dma = NULL; 799 } 800 801 cv_destroy(&cmd->nc_cv); 802 mutex_destroy(&cmd->nc_mutex); 803 804 kmem_cache_free(nvme_cmd_cache, cmd); 805 } 806 807 static int 808 nvme_submit_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 809 { 810 nvme_reg_sqtdbl_t tail = { 0 }; 811 812 mutex_enter(&qp->nq_mutex); 813 814 if (qp->nq_active_cmds == qp->nq_nentry) { 815 mutex_exit(&qp->nq_mutex); 816 return (DDI_FAILURE); 817 } 818 819 cmd->nc_completed = B_FALSE; 820 821 /* 822 * Try to insert the cmd into the active cmd array at the nq_next_cmd 823 * slot. If the slot is already occupied advance to the next slot and 824 * try again. This can happen for long running commands like async event 825 * requests. 826 */ 827 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 828 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 829 qp->nq_cmd[qp->nq_next_cmd] = cmd; 830 831 qp->nq_active_cmds++; 832 833 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 834 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 835 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 836 sizeof (nvme_sqe_t) * qp->nq_sqtail, 837 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 838 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 839 840 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 841 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 842 843 mutex_exit(&qp->nq_mutex); 844 return (DDI_SUCCESS); 845 } 846 847 static nvme_cmd_t * 848 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 849 { 850 nvme_reg_cqhdbl_t head = { 0 }; 851 852 nvme_cqe_t *cqe; 853 nvme_cmd_t *cmd; 854 855 (void) ddi_dma_sync(qp->nq_cqdma->nd_dmah, 0, 856 sizeof (nvme_cqe_t) * qp->nq_nentry, DDI_DMA_SYNC_FORKERNEL); 857 858 cqe = &qp->nq_cq[qp->nq_cqhead]; 859 860 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 861 if (cqe->cqe_sf.sf_p == qp->nq_phase) 862 return (NULL); 863 864 ASSERT(nvme->n_ioq[cqe->cqe_sqid] == qp); 865 ASSERT(cqe->cqe_cid < qp->nq_nentry); 866 867 mutex_enter(&qp->nq_mutex); 868 cmd = qp->nq_cmd[cqe->cqe_cid]; 869 qp->nq_cmd[cqe->cqe_cid] = NULL; 870 qp->nq_active_cmds--; 871 mutex_exit(&qp->nq_mutex); 872 873 ASSERT(cmd != NULL); 874 ASSERT(cmd->nc_nvme == nvme); 875 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 876 ASSERT(cmd->nc_sqe.sqe_cid == cqe->cqe_cid); 877 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 878 879 qp->nq_sqhead = cqe->cqe_sqhd; 880 881 head.b.cqhdbl_cqh = qp->nq_cqhead = (qp->nq_cqhead + 1) % qp->nq_nentry; 882 883 /* Toggle phase on wrap-around. */ 884 if (qp->nq_cqhead == 0) 885 qp->nq_phase = qp->nq_phase ? 0 : 1; 886 887 nvme_put32(cmd->nc_nvme, qp->nq_cqhdbl, head.r); 888 889 return (cmd); 890 } 891 892 static int 893 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 894 { 895 nvme_cqe_t *cqe = &cmd->nc_cqe; 896 897 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 898 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 899 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 900 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 901 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 902 903 if (cmd->nc_xfer != NULL) 904 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 905 906 if (cmd->nc_nvme->n_strict_version) { 907 cmd->nc_nvme->n_dead = B_TRUE; 908 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 909 } 910 911 return (EIO); 912 } 913 914 static int 915 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 916 { 917 nvme_cqe_t *cqe = &cmd->nc_cqe; 918 919 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 920 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 921 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 922 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 923 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 924 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 925 cmd->nc_nvme->n_dead = B_TRUE; 926 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 927 } 928 929 return (EIO); 930 } 931 932 static int 933 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 934 { 935 nvme_cqe_t *cqe = &cmd->nc_cqe; 936 937 switch (cqe->cqe_sf.sf_sc) { 938 case NVME_CQE_SC_INT_NVM_WRITE: 939 /* write fail */ 940 /* TODO: post ereport */ 941 if (cmd->nc_xfer != NULL) 942 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 943 return (EIO); 944 945 case NVME_CQE_SC_INT_NVM_READ: 946 /* read fail */ 947 /* TODO: post ereport */ 948 if (cmd->nc_xfer != NULL) 949 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 950 return (EIO); 951 952 default: 953 return (nvme_check_unknown_cmd_status(cmd)); 954 } 955 } 956 957 static int 958 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 959 { 960 nvme_cqe_t *cqe = &cmd->nc_cqe; 961 962 switch (cqe->cqe_sf.sf_sc) { 963 case NVME_CQE_SC_GEN_SUCCESS: 964 return (0); 965 966 /* 967 * Errors indicating a bug in the driver should cause a panic. 968 */ 969 case NVME_CQE_SC_GEN_INV_OPC: 970 /* Invalid Command Opcode */ 971 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 972 "invalid opcode in cmd %p", (void *)cmd); 973 return (0); 974 975 case NVME_CQE_SC_GEN_INV_FLD: 976 /* Invalid Field in Command */ 977 if (!cmd->nc_dontpanic) 978 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 979 "programming error: invalid field in cmd %p", 980 (void *)cmd); 981 return (EIO); 982 983 case NVME_CQE_SC_GEN_ID_CNFL: 984 /* Command ID Conflict */ 985 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 986 "cmd ID conflict in cmd %p", (void *)cmd); 987 return (0); 988 989 case NVME_CQE_SC_GEN_INV_NS: 990 /* Invalid Namespace or Format */ 991 if (!cmd->nc_dontpanic) 992 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 993 "programming error: " "invalid NS/format in cmd %p", 994 (void *)cmd); 995 return (EINVAL); 996 997 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 998 /* LBA Out Of Range */ 999 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1000 "LBA out of range in cmd %p", (void *)cmd); 1001 return (0); 1002 1003 /* 1004 * Non-fatal errors, handle gracefully. 1005 */ 1006 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1007 /* Data Transfer Error (DMA) */ 1008 /* TODO: post ereport */ 1009 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1010 if (cmd->nc_xfer != NULL) 1011 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1012 return (EIO); 1013 1014 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1015 /* 1016 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1017 * detailed error information is returned as async event, 1018 * so we pretty much ignore the error here and handle it 1019 * in the async event handler. 1020 */ 1021 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1022 if (cmd->nc_xfer != NULL) 1023 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1024 return (EIO); 1025 1026 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1027 /* 1028 * Command Abort Requested. This normally happens only when a 1029 * command times out. 1030 */ 1031 /* TODO: post ereport or change blkdev to handle this? */ 1032 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1033 return (ECANCELED); 1034 1035 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1036 /* Command Aborted due to Power Loss Notification */ 1037 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1038 cmd->nc_nvme->n_dead = B_TRUE; 1039 return (EIO); 1040 1041 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1042 /* Command Aborted due to SQ Deletion */ 1043 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1044 return (EIO); 1045 1046 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1047 /* Capacity Exceeded */ 1048 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1049 if (cmd->nc_xfer != NULL) 1050 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1051 return (EIO); 1052 1053 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1054 /* Namespace Not Ready */ 1055 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1056 if (cmd->nc_xfer != NULL) 1057 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1058 return (EIO); 1059 1060 default: 1061 return (nvme_check_unknown_cmd_status(cmd)); 1062 } 1063 } 1064 1065 static int 1066 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1067 { 1068 nvme_cqe_t *cqe = &cmd->nc_cqe; 1069 1070 switch (cqe->cqe_sf.sf_sc) { 1071 case NVME_CQE_SC_SPC_INV_CQ: 1072 /* Completion Queue Invalid */ 1073 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1074 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1075 return (EINVAL); 1076 1077 case NVME_CQE_SC_SPC_INV_QID: 1078 /* Invalid Queue Identifier */ 1079 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1080 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1081 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1082 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1083 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1084 return (EINVAL); 1085 1086 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1087 /* Max Queue Size Exceeded */ 1088 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1089 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1090 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1091 return (EINVAL); 1092 1093 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1094 /* Abort Command Limit Exceeded */ 1095 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1096 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1097 "abort command limit exceeded in cmd %p", (void *)cmd); 1098 return (0); 1099 1100 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1101 /* Async Event Request Limit Exceeded */ 1102 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1103 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1104 "async event request limit exceeded in cmd %p", 1105 (void *)cmd); 1106 return (0); 1107 1108 case NVME_CQE_SC_SPC_INV_INT_VECT: 1109 /* Invalid Interrupt Vector */ 1110 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1111 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1112 return (EINVAL); 1113 1114 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1115 /* Invalid Log Page */ 1116 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1117 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1118 return (EINVAL); 1119 1120 case NVME_CQE_SC_SPC_INV_FORMAT: 1121 /* Invalid Format */ 1122 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1123 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1124 if (cmd->nc_xfer != NULL) 1125 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1126 return (EINVAL); 1127 1128 case NVME_CQE_SC_SPC_INV_Q_DEL: 1129 /* Invalid Queue Deletion */ 1130 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1131 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1132 return (EINVAL); 1133 1134 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1135 /* Conflicting Attributes */ 1136 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1137 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1138 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1139 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1140 if (cmd->nc_xfer != NULL) 1141 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1142 return (EINVAL); 1143 1144 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1145 /* Invalid Protection Information */ 1146 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1147 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1148 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1149 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1150 if (cmd->nc_xfer != NULL) 1151 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1152 return (EINVAL); 1153 1154 case NVME_CQE_SC_SPC_NVM_READONLY: 1155 /* Write to Read Only Range */ 1156 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1157 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1158 if (cmd->nc_xfer != NULL) 1159 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1160 return (EROFS); 1161 1162 default: 1163 return (nvme_check_unknown_cmd_status(cmd)); 1164 } 1165 } 1166 1167 static inline int 1168 nvme_check_cmd_status(nvme_cmd_t *cmd) 1169 { 1170 nvme_cqe_t *cqe = &cmd->nc_cqe; 1171 1172 /* take a shortcut if everything is alright */ 1173 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1174 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1175 return (0); 1176 1177 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1178 return (nvme_check_generic_cmd_status(cmd)); 1179 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1180 return (nvme_check_specific_cmd_status(cmd)); 1181 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1182 return (nvme_check_integrity_cmd_status(cmd)); 1183 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1184 return (nvme_check_vendor_cmd_status(cmd)); 1185 1186 return (nvme_check_unknown_cmd_status(cmd)); 1187 } 1188 1189 /* 1190 * nvme_abort_cmd_cb -- replaces nc_callback of aborted commands 1191 * 1192 * This functions takes care of cleaning up aborted commands. The command 1193 * status is checked to catch any fatal errors. 1194 */ 1195 static void 1196 nvme_abort_cmd_cb(void *arg) 1197 { 1198 nvme_cmd_t *cmd = arg; 1199 1200 /* 1201 * Grab the command mutex. Once we have it we hold the last reference 1202 * to the command and can safely free it. 1203 */ 1204 mutex_enter(&cmd->nc_mutex); 1205 (void) nvme_check_cmd_status(cmd); 1206 mutex_exit(&cmd->nc_mutex); 1207 1208 nvme_free_cmd(cmd); 1209 } 1210 1211 static void 1212 nvme_abort_cmd(nvme_cmd_t *abort_cmd) 1213 { 1214 nvme_t *nvme = abort_cmd->nc_nvme; 1215 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1216 nvme_abort_cmd_t ac = { 0 }; 1217 1218 sema_p(&nvme->n_abort_sema); 1219 1220 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1221 ac.b.ac_sqid = abort_cmd->nc_sqid; 1222 1223 /* 1224 * Drop the mutex of the aborted command. From this point on 1225 * we must assume that the abort callback has freed the command. 1226 */ 1227 mutex_exit(&abort_cmd->nc_mutex); 1228 1229 cmd->nc_sqid = 0; 1230 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1231 cmd->nc_callback = nvme_wakeup_cmd; 1232 cmd->nc_sqe.sqe_cdw10 = ac.r; 1233 1234 /* 1235 * Send the ABORT to the hardware. The ABORT command will return _after_ 1236 * the aborted command has completed (aborted or otherwise). 1237 */ 1238 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1239 sema_v(&nvme->n_abort_sema); 1240 dev_err(nvme->n_dip, CE_WARN, 1241 "!nvme_admin_cmd failed for ABORT"); 1242 atomic_inc_32(&nvme->n_abort_failed); 1243 return; 1244 } 1245 sema_v(&nvme->n_abort_sema); 1246 1247 if (nvme_check_cmd_status(cmd)) { 1248 dev_err(nvme->n_dip, CE_WARN, 1249 "!ABORT failed with sct = %x, sc = %x", 1250 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1251 atomic_inc_32(&nvme->n_abort_failed); 1252 } else { 1253 atomic_inc_32(&nvme->n_cmd_aborted); 1254 } 1255 1256 nvme_free_cmd(cmd); 1257 } 1258 1259 /* 1260 * nvme_wait_cmd -- wait for command completion or timeout 1261 * 1262 * Returns B_TRUE if the command completed normally. 1263 * 1264 * Returns B_FALSE if the command timed out and an abort was attempted. The 1265 * command mutex will be dropped and the command must be considered freed. The 1266 * freeing of the command is normally done by the abort command callback. 1267 * 1268 * In case of a serious error or a timeout of the abort command the hardware 1269 * will be declared dead and FMA will be notified. 1270 */ 1271 static boolean_t 1272 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1273 { 1274 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1275 nvme_t *nvme = cmd->nc_nvme; 1276 nvme_reg_csts_t csts; 1277 1278 ASSERT(mutex_owned(&cmd->nc_mutex)); 1279 1280 while (!cmd->nc_completed) { 1281 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1282 break; 1283 } 1284 1285 if (cmd->nc_completed) 1286 return (B_TRUE); 1287 1288 /* 1289 * The command timed out. Change the callback to the cleanup function. 1290 */ 1291 cmd->nc_callback = nvme_abort_cmd_cb; 1292 1293 /* 1294 * Check controller for fatal status, any errors associated with the 1295 * register or DMA handle, or for a double timeout (abort command timed 1296 * out). If necessary log a warning and call FMA. 1297 */ 1298 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1299 dev_err(nvme->n_dip, CE_WARN, "!command timeout, " 1300 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1301 atomic_inc_32(&nvme->n_cmd_timeout); 1302 1303 if (csts.b.csts_cfs || 1304 nvme_check_regs_hdl(nvme) || 1305 nvme_check_dma_hdl(cmd->nc_dma) || 1306 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1307 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1308 nvme->n_dead = B_TRUE; 1309 mutex_exit(&cmd->nc_mutex); 1310 } else { 1311 /* 1312 * Try to abort the command. The command mutex is released by 1313 * nvme_abort_cmd(). 1314 * If the abort succeeds it will have freed the aborted command. 1315 * If the abort fails for other reasons we must assume that the 1316 * command may complete at any time, and the callback will free 1317 * it for us. 1318 */ 1319 nvme_abort_cmd(cmd); 1320 } 1321 1322 return (B_FALSE); 1323 } 1324 1325 static void 1326 nvme_wakeup_cmd(void *arg) 1327 { 1328 nvme_cmd_t *cmd = arg; 1329 1330 mutex_enter(&cmd->nc_mutex); 1331 /* 1332 * There is a slight chance that this command completed shortly after 1333 * the timeout was hit in nvme_wait_cmd() but before the callback was 1334 * changed. Catch that case here and clean up accordingly. 1335 */ 1336 if (cmd->nc_callback == nvme_abort_cmd_cb) { 1337 mutex_exit(&cmd->nc_mutex); 1338 nvme_abort_cmd_cb(cmd); 1339 return; 1340 } 1341 1342 cmd->nc_completed = B_TRUE; 1343 cv_signal(&cmd->nc_cv); 1344 mutex_exit(&cmd->nc_mutex); 1345 } 1346 1347 static void 1348 nvme_async_event_task(void *arg) 1349 { 1350 nvme_cmd_t *cmd = arg; 1351 nvme_t *nvme = cmd->nc_nvme; 1352 nvme_error_log_entry_t *error_log = NULL; 1353 nvme_health_log_t *health_log = NULL; 1354 size_t logsize = 0; 1355 nvme_async_event_t event; 1356 int ret; 1357 1358 /* 1359 * Check for errors associated with the async request itself. The only 1360 * command-specific error is "async event limit exceeded", which 1361 * indicates a programming error in the driver and causes a panic in 1362 * nvme_check_cmd_status(). 1363 * 1364 * Other possible errors are various scenarios where the async request 1365 * was aborted, or internal errors in the device. Internal errors are 1366 * reported to FMA, the command aborts need no special handling here. 1367 */ 1368 if (nvme_check_cmd_status(cmd)) { 1369 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1370 "!async event request returned failure, sct = %x, " 1371 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1372 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1373 cmd->nc_cqe.cqe_sf.sf_m); 1374 1375 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1376 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1377 cmd->nc_nvme->n_dead = B_TRUE; 1378 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1379 DDI_SERVICE_LOST); 1380 } 1381 nvme_free_cmd(cmd); 1382 return; 1383 } 1384 1385 1386 event.r = cmd->nc_cqe.cqe_dw0; 1387 1388 /* Clear CQE and re-submit the async request. */ 1389 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1390 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1391 1392 if (ret != DDI_SUCCESS) { 1393 dev_err(nvme->n_dip, CE_WARN, 1394 "!failed to resubmit async event request"); 1395 atomic_inc_32(&nvme->n_async_resubmit_failed); 1396 nvme_free_cmd(cmd); 1397 } 1398 1399 switch (event.b.ae_type) { 1400 case NVME_ASYNC_TYPE_ERROR: 1401 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1402 (void) nvme_get_logpage(nvme, (void **)&error_log, 1403 &logsize, event.b.ae_logpage); 1404 } else { 1405 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1406 "async event reply: %d", event.b.ae_logpage); 1407 atomic_inc_32(&nvme->n_wrong_logpage); 1408 } 1409 1410 switch (event.b.ae_info) { 1411 case NVME_ASYNC_ERROR_INV_SQ: 1412 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1413 "invalid submission queue"); 1414 return; 1415 1416 case NVME_ASYNC_ERROR_INV_DBL: 1417 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1418 "invalid doorbell write value"); 1419 return; 1420 1421 case NVME_ASYNC_ERROR_DIAGFAIL: 1422 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1423 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1424 nvme->n_dead = B_TRUE; 1425 atomic_inc_32(&nvme->n_diagfail_event); 1426 break; 1427 1428 case NVME_ASYNC_ERROR_PERSISTENT: 1429 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1430 "device error"); 1431 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1432 nvme->n_dead = B_TRUE; 1433 atomic_inc_32(&nvme->n_persistent_event); 1434 break; 1435 1436 case NVME_ASYNC_ERROR_TRANSIENT: 1437 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1438 "device error"); 1439 /* TODO: send ereport */ 1440 atomic_inc_32(&nvme->n_transient_event); 1441 break; 1442 1443 case NVME_ASYNC_ERROR_FW_LOAD: 1444 dev_err(nvme->n_dip, CE_WARN, 1445 "!firmware image load error"); 1446 atomic_inc_32(&nvme->n_fw_load_event); 1447 break; 1448 } 1449 break; 1450 1451 case NVME_ASYNC_TYPE_HEALTH: 1452 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1453 (void) nvme_get_logpage(nvme, (void **)&health_log, 1454 &logsize, event.b.ae_logpage, -1); 1455 } else { 1456 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1457 "async event reply: %d", event.b.ae_logpage); 1458 atomic_inc_32(&nvme->n_wrong_logpage); 1459 } 1460 1461 switch (event.b.ae_info) { 1462 case NVME_ASYNC_HEALTH_RELIABILITY: 1463 dev_err(nvme->n_dip, CE_WARN, 1464 "!device reliability compromised"); 1465 /* TODO: send ereport */ 1466 atomic_inc_32(&nvme->n_reliability_event); 1467 break; 1468 1469 case NVME_ASYNC_HEALTH_TEMPERATURE: 1470 dev_err(nvme->n_dip, CE_WARN, 1471 "!temperature above threshold"); 1472 /* TODO: send ereport */ 1473 atomic_inc_32(&nvme->n_temperature_event); 1474 break; 1475 1476 case NVME_ASYNC_HEALTH_SPARE: 1477 dev_err(nvme->n_dip, CE_WARN, 1478 "!spare space below threshold"); 1479 /* TODO: send ereport */ 1480 atomic_inc_32(&nvme->n_spare_event); 1481 break; 1482 } 1483 break; 1484 1485 case NVME_ASYNC_TYPE_VENDOR: 1486 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1487 "received, info = %x, logpage = %x", event.b.ae_info, 1488 event.b.ae_logpage); 1489 atomic_inc_32(&nvme->n_vendor_event); 1490 break; 1491 1492 default: 1493 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1494 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1495 event.b.ae_info, event.b.ae_logpage); 1496 atomic_inc_32(&nvme->n_unknown_event); 1497 break; 1498 } 1499 1500 if (error_log) 1501 kmem_free(error_log, logsize); 1502 1503 if (health_log) 1504 kmem_free(health_log, logsize); 1505 } 1506 1507 static int 1508 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 1509 { 1510 int ret; 1511 1512 mutex_enter(&cmd->nc_mutex); 1513 ret = nvme_submit_cmd(cmd->nc_nvme->n_adminq, cmd); 1514 1515 if (ret != DDI_SUCCESS) { 1516 mutex_exit(&cmd->nc_mutex); 1517 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1518 "!nvme_submit_cmd failed"); 1519 atomic_inc_32(&cmd->nc_nvme->n_admin_queue_full); 1520 nvme_free_cmd(cmd); 1521 return (DDI_FAILURE); 1522 } 1523 1524 if (nvme_wait_cmd(cmd, sec) == B_FALSE) { 1525 /* 1526 * The command timed out. An abort command was posted that 1527 * will take care of the cleanup. 1528 */ 1529 return (DDI_FAILURE); 1530 } 1531 mutex_exit(&cmd->nc_mutex); 1532 1533 return (DDI_SUCCESS); 1534 } 1535 1536 static int 1537 nvme_async_event(nvme_t *nvme) 1538 { 1539 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1540 int ret; 1541 1542 cmd->nc_sqid = 0; 1543 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1544 cmd->nc_callback = nvme_async_event_task; 1545 1546 ret = nvme_submit_cmd(nvme->n_adminq, cmd); 1547 1548 if (ret != DDI_SUCCESS) { 1549 dev_err(nvme->n_dip, CE_WARN, 1550 "!nvme_submit_cmd failed for ASYNCHRONOUS EVENT"); 1551 nvme_free_cmd(cmd); 1552 return (DDI_FAILURE); 1553 } 1554 1555 return (DDI_SUCCESS); 1556 } 1557 1558 static int 1559 nvme_format_nvm(nvme_t *nvme, uint32_t nsid, uint8_t lbaf, boolean_t ms, 1560 uint8_t pi, boolean_t pil, uint8_t ses) 1561 { 1562 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1563 nvme_format_nvm_t format_nvm = { 0 }; 1564 int ret; 1565 1566 format_nvm.b.fm_lbaf = lbaf & 0xf; 1567 format_nvm.b.fm_ms = ms ? 1 : 0; 1568 format_nvm.b.fm_pi = pi & 0x7; 1569 format_nvm.b.fm_pil = pil ? 1 : 0; 1570 format_nvm.b.fm_ses = ses & 0x7; 1571 1572 cmd->nc_sqid = 0; 1573 cmd->nc_callback = nvme_wakeup_cmd; 1574 cmd->nc_sqe.sqe_nsid = nsid; 1575 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 1576 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 1577 1578 /* 1579 * Some devices like Samsung SM951 don't allow formatting of all 1580 * namespaces in one command. Handle that gracefully. 1581 */ 1582 if (nsid == (uint32_t)-1) 1583 cmd->nc_dontpanic = B_TRUE; 1584 1585 if ((ret = nvme_admin_cmd(cmd, nvme_format_cmd_timeout)) 1586 != DDI_SUCCESS) { 1587 dev_err(nvme->n_dip, CE_WARN, 1588 "!nvme_admin_cmd failed for FORMAT NVM"); 1589 return (EIO); 1590 } 1591 1592 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1593 dev_err(nvme->n_dip, CE_WARN, 1594 "!FORMAT failed with sct = %x, sc = %x", 1595 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1596 } 1597 1598 nvme_free_cmd(cmd); 1599 return (ret); 1600 } 1601 1602 static int 1603 nvme_get_logpage(nvme_t *nvme, void **buf, size_t *bufsize, uint8_t logpage, 1604 ...) 1605 { 1606 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1607 nvme_getlogpage_t getlogpage = { 0 }; 1608 va_list ap; 1609 int ret = DDI_FAILURE; 1610 1611 va_start(ap, logpage); 1612 1613 cmd->nc_sqid = 0; 1614 cmd->nc_callback = nvme_wakeup_cmd; 1615 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1616 1617 getlogpage.b.lp_lid = logpage; 1618 1619 switch (logpage) { 1620 case NVME_LOGPAGE_ERROR: 1621 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1622 /* 1623 * The GET LOG PAGE command can use at most 2 pages to return 1624 * data, PRP lists are not supported. 1625 */ 1626 *bufsize = MIN(2 * nvme->n_pagesize, 1627 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 1628 break; 1629 1630 case NVME_LOGPAGE_HEALTH: 1631 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1632 *bufsize = sizeof (nvme_health_log_t); 1633 break; 1634 1635 case NVME_LOGPAGE_FWSLOT: 1636 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1637 *bufsize = sizeof (nvme_fwslot_log_t); 1638 break; 1639 1640 default: 1641 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1642 logpage); 1643 atomic_inc_32(&nvme->n_unknown_logpage); 1644 goto fail; 1645 } 1646 1647 va_end(ap); 1648 1649 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 1650 1651 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1652 1653 if (nvme_zalloc_dma(nvme, getlogpage.b.lp_numd * sizeof (uint32_t), 1654 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1655 dev_err(nvme->n_dip, CE_WARN, 1656 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1657 goto fail; 1658 } 1659 1660 if (cmd->nc_dma->nd_ncookie > 2) { 1661 dev_err(nvme->n_dip, CE_WARN, 1662 "!too many DMA cookies for GET LOG PAGE"); 1663 atomic_inc_32(&nvme->n_too_many_cookies); 1664 goto fail; 1665 } 1666 1667 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1668 if (cmd->nc_dma->nd_ncookie > 1) { 1669 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1670 &cmd->nc_dma->nd_cookie); 1671 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1672 cmd->nc_dma->nd_cookie.dmac_laddress; 1673 } 1674 1675 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1676 dev_err(nvme->n_dip, CE_WARN, 1677 "!nvme_admin_cmd failed for GET LOG PAGE"); 1678 return (ret); 1679 } 1680 1681 if (nvme_check_cmd_status(cmd)) { 1682 dev_err(nvme->n_dip, CE_WARN, 1683 "!GET LOG PAGE failed with sct = %x, sc = %x", 1684 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1685 goto fail; 1686 } 1687 1688 *buf = kmem_alloc(*bufsize, KM_SLEEP); 1689 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 1690 1691 ret = DDI_SUCCESS; 1692 1693 fail: 1694 nvme_free_cmd(cmd); 1695 1696 return (ret); 1697 } 1698 1699 static void * 1700 nvme_identify(nvme_t *nvme, uint32_t nsid) 1701 { 1702 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1703 void *buf = NULL; 1704 1705 cmd->nc_sqid = 0; 1706 cmd->nc_callback = nvme_wakeup_cmd; 1707 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 1708 cmd->nc_sqe.sqe_nsid = nsid; 1709 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 1710 1711 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 1712 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1713 dev_err(nvme->n_dip, CE_WARN, 1714 "!nvme_zalloc_dma failed for IDENTIFY"); 1715 goto fail; 1716 } 1717 1718 if (cmd->nc_dma->nd_ncookie > 2) { 1719 dev_err(nvme->n_dip, CE_WARN, 1720 "!too many DMA cookies for IDENTIFY"); 1721 atomic_inc_32(&nvme->n_too_many_cookies); 1722 goto fail; 1723 } 1724 1725 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1726 if (cmd->nc_dma->nd_ncookie > 1) { 1727 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1728 &cmd->nc_dma->nd_cookie); 1729 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1730 cmd->nc_dma->nd_cookie.dmac_laddress; 1731 } 1732 1733 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1734 dev_err(nvme->n_dip, CE_WARN, 1735 "!nvme_admin_cmd failed for IDENTIFY"); 1736 return (NULL); 1737 } 1738 1739 if (nvme_check_cmd_status(cmd)) { 1740 dev_err(nvme->n_dip, CE_WARN, 1741 "!IDENTIFY failed with sct = %x, sc = %x", 1742 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1743 goto fail; 1744 } 1745 1746 buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 1747 bcopy(cmd->nc_dma->nd_memp, buf, NVME_IDENTIFY_BUFSIZE); 1748 1749 fail: 1750 nvme_free_cmd(cmd); 1751 1752 return (buf); 1753 } 1754 1755 static boolean_t 1756 nvme_set_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t val, 1757 uint32_t *res) 1758 { 1759 _NOTE(ARGUNUSED(nsid)); 1760 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1761 boolean_t ret = B_FALSE; 1762 1763 ASSERT(res != NULL); 1764 1765 cmd->nc_sqid = 0; 1766 cmd->nc_callback = nvme_wakeup_cmd; 1767 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 1768 cmd->nc_sqe.sqe_cdw10 = feature; 1769 cmd->nc_sqe.sqe_cdw11 = val; 1770 1771 switch (feature) { 1772 case NVME_FEAT_WRITE_CACHE: 1773 if (!nvme->n_write_cache_present) 1774 goto fail; 1775 break; 1776 1777 case NVME_FEAT_NQUEUES: 1778 break; 1779 1780 default: 1781 goto fail; 1782 } 1783 1784 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1785 dev_err(nvme->n_dip, CE_WARN, 1786 "!nvme_admin_cmd failed for SET FEATURES"); 1787 return (ret); 1788 } 1789 1790 if (nvme_check_cmd_status(cmd)) { 1791 dev_err(nvme->n_dip, CE_WARN, 1792 "!SET FEATURES %d failed with sct = %x, sc = %x", 1793 feature, cmd->nc_cqe.cqe_sf.sf_sct, 1794 cmd->nc_cqe.cqe_sf.sf_sc); 1795 goto fail; 1796 } 1797 1798 *res = cmd->nc_cqe.cqe_dw0; 1799 ret = B_TRUE; 1800 1801 fail: 1802 nvme_free_cmd(cmd); 1803 return (ret); 1804 } 1805 1806 static boolean_t 1807 nvme_get_features(nvme_t *nvme, uint32_t nsid, uint8_t feature, uint32_t *res, 1808 void **buf, size_t *bufsize) 1809 { 1810 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1811 boolean_t ret = B_FALSE; 1812 1813 ASSERT(res != NULL); 1814 1815 if (bufsize != NULL) 1816 *bufsize = 0; 1817 1818 cmd->nc_sqid = 0; 1819 cmd->nc_callback = nvme_wakeup_cmd; 1820 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 1821 cmd->nc_sqe.sqe_cdw10 = feature; 1822 cmd->nc_sqe.sqe_cdw11 = *res; 1823 1824 switch (feature) { 1825 case NVME_FEAT_ARBITRATION: 1826 case NVME_FEAT_POWER_MGMT: 1827 case NVME_FEAT_TEMPERATURE: 1828 case NVME_FEAT_ERROR: 1829 case NVME_FEAT_NQUEUES: 1830 case NVME_FEAT_INTR_COAL: 1831 case NVME_FEAT_INTR_VECT: 1832 case NVME_FEAT_WRITE_ATOM: 1833 case NVME_FEAT_ASYNC_EVENT: 1834 case NVME_FEAT_PROGRESS: 1835 break; 1836 1837 case NVME_FEAT_WRITE_CACHE: 1838 if (!nvme->n_write_cache_present) 1839 goto fail; 1840 break; 1841 1842 case NVME_FEAT_LBA_RANGE: 1843 if (!nvme->n_lba_range_supported) 1844 goto fail; 1845 1846 /* 1847 * The LBA Range Type feature is optional. There doesn't seem 1848 * be a method of detecting whether it is supported other than 1849 * using it. This will cause a "invalid field in command" error, 1850 * which is normally considered a programming error and causes 1851 * panic in nvme_check_generic_cmd_status(). 1852 */ 1853 cmd->nc_dontpanic = B_TRUE; 1854 cmd->nc_sqe.sqe_nsid = nsid; 1855 ASSERT(bufsize != NULL); 1856 *bufsize = NVME_LBA_RANGE_BUFSIZE; 1857 1858 break; 1859 1860 case NVME_FEAT_AUTO_PST: 1861 if (!nvme->n_auto_pst_supported) 1862 goto fail; 1863 1864 ASSERT(bufsize != NULL); 1865 *bufsize = NVME_AUTO_PST_BUFSIZE; 1866 break; 1867 1868 default: 1869 goto fail; 1870 } 1871 1872 if (bufsize != NULL && *bufsize != 0) { 1873 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 1874 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1875 dev_err(nvme->n_dip, CE_WARN, 1876 "!nvme_zalloc_dma failed for GET FEATURES"); 1877 goto fail; 1878 } 1879 1880 if (cmd->nc_dma->nd_ncookie > 2) { 1881 dev_err(nvme->n_dip, CE_WARN, 1882 "!too many DMA cookies for GET FEATURES"); 1883 atomic_inc_32(&nvme->n_too_many_cookies); 1884 goto fail; 1885 } 1886 1887 cmd->nc_sqe.sqe_dptr.d_prp[0] = 1888 cmd->nc_dma->nd_cookie.dmac_laddress; 1889 if (cmd->nc_dma->nd_ncookie > 1) { 1890 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1891 &cmd->nc_dma->nd_cookie); 1892 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1893 cmd->nc_dma->nd_cookie.dmac_laddress; 1894 } 1895 } 1896 1897 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1898 dev_err(nvme->n_dip, CE_WARN, 1899 "!nvme_admin_cmd failed for GET FEATURES"); 1900 return (ret); 1901 } 1902 1903 if (nvme_check_cmd_status(cmd)) { 1904 if (feature == NVME_FEAT_LBA_RANGE && 1905 cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1906 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) 1907 nvme->n_lba_range_supported = B_FALSE; 1908 else 1909 dev_err(nvme->n_dip, CE_WARN, 1910 "!GET FEATURES %d failed with sct = %x, sc = %x", 1911 feature, cmd->nc_cqe.cqe_sf.sf_sct, 1912 cmd->nc_cqe.cqe_sf.sf_sc); 1913 goto fail; 1914 } 1915 1916 if (bufsize != NULL && *bufsize != 0) { 1917 ASSERT(buf != NULL); 1918 *buf = kmem_alloc(*bufsize, KM_SLEEP); 1919 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 1920 } 1921 1922 *res = cmd->nc_cqe.cqe_dw0; 1923 ret = B_TRUE; 1924 1925 fail: 1926 nvme_free_cmd(cmd); 1927 return (ret); 1928 } 1929 1930 static boolean_t 1931 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 1932 { 1933 nvme_write_cache_t nwc = { 0 }; 1934 1935 if (enable) 1936 nwc.b.wc_wce = 1; 1937 1938 if (!nvme_set_features(nvme, 0, NVME_FEAT_WRITE_CACHE, nwc.r, &nwc.r)) 1939 return (B_FALSE); 1940 1941 return (B_TRUE); 1942 } 1943 1944 static int 1945 nvme_set_nqueues(nvme_t *nvme, uint16_t nqueues) 1946 { 1947 nvme_nqueues_t nq = { 0 }; 1948 1949 nq.b.nq_nsq = nq.b.nq_ncq = nqueues - 1; 1950 1951 if (!nvme_set_features(nvme, 0, NVME_FEAT_NQUEUES, nq.r, &nq.r)) { 1952 return (0); 1953 } 1954 1955 /* 1956 * Always use the same number of submission and completion queues, and 1957 * never use more than the requested number of queues. 1958 */ 1959 return (MIN(nqueues, MIN(nq.b.nq_nsq, nq.b.nq_ncq) + 1)); 1960 } 1961 1962 static int 1963 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 1964 { 1965 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1966 nvme_create_queue_dw10_t dw10 = { 0 }; 1967 nvme_create_cq_dw11_t c_dw11 = { 0 }; 1968 nvme_create_sq_dw11_t s_dw11 = { 0 }; 1969 1970 dw10.b.q_qid = idx; 1971 dw10.b.q_qsize = qp->nq_nentry - 1; 1972 1973 c_dw11.b.cq_pc = 1; 1974 c_dw11.b.cq_ien = 1; 1975 c_dw11.b.cq_iv = idx % nvme->n_intr_cnt; 1976 1977 cmd->nc_sqid = 0; 1978 cmd->nc_callback = nvme_wakeup_cmd; 1979 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 1980 cmd->nc_sqe.sqe_cdw10 = dw10.r; 1981 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 1982 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_cqdma->nd_cookie.dmac_laddress; 1983 1984 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 1985 dev_err(nvme->n_dip, CE_WARN, 1986 "!nvme_admin_cmd failed for CREATE CQUEUE"); 1987 return (DDI_FAILURE); 1988 } 1989 1990 if (nvme_check_cmd_status(cmd)) { 1991 dev_err(nvme->n_dip, CE_WARN, 1992 "!CREATE CQUEUE failed with sct = %x, sc = %x", 1993 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1994 nvme_free_cmd(cmd); 1995 return (DDI_FAILURE); 1996 } 1997 1998 nvme_free_cmd(cmd); 1999 2000 s_dw11.b.sq_pc = 1; 2001 s_dw11.b.sq_cqid = idx; 2002 2003 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2004 cmd->nc_sqid = 0; 2005 cmd->nc_callback = nvme_wakeup_cmd; 2006 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2007 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2008 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2009 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2010 2011 if (nvme_admin_cmd(cmd, nvme_admin_cmd_timeout) != DDI_SUCCESS) { 2012 dev_err(nvme->n_dip, CE_WARN, 2013 "!nvme_admin_cmd failed for CREATE SQUEUE"); 2014 return (DDI_FAILURE); 2015 } 2016 2017 if (nvme_check_cmd_status(cmd)) { 2018 dev_err(nvme->n_dip, CE_WARN, 2019 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2020 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2021 nvme_free_cmd(cmd); 2022 return (DDI_FAILURE); 2023 } 2024 2025 nvme_free_cmd(cmd); 2026 2027 return (DDI_SUCCESS); 2028 } 2029 2030 static boolean_t 2031 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2032 { 2033 nvme_reg_csts_t csts; 2034 int i; 2035 2036 nvme_put32(nvme, NVME_REG_CC, 0); 2037 2038 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2039 if (csts.b.csts_rdy == 1) { 2040 nvme_put32(nvme, NVME_REG_CC, 0); 2041 for (i = 0; i != nvme->n_timeout * 10; i++) { 2042 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2043 if (csts.b.csts_rdy == 0) 2044 break; 2045 2046 if (quiesce) 2047 drv_usecwait(50000); 2048 else 2049 delay(drv_usectohz(50000)); 2050 } 2051 } 2052 2053 nvme_put32(nvme, NVME_REG_AQA, 0); 2054 nvme_put32(nvme, NVME_REG_ASQ, 0); 2055 nvme_put32(nvme, NVME_REG_ACQ, 0); 2056 2057 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2058 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2059 } 2060 2061 static void 2062 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 2063 { 2064 nvme_reg_cc_t cc; 2065 nvme_reg_csts_t csts; 2066 int i; 2067 2068 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 2069 2070 cc.r = nvme_get32(nvme, NVME_REG_CC); 2071 cc.b.cc_shn = mode & 0x3; 2072 nvme_put32(nvme, NVME_REG_CC, cc.r); 2073 2074 for (i = 0; i != 10; i++) { 2075 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2076 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2077 break; 2078 2079 if (quiesce) 2080 drv_usecwait(100000); 2081 else 2082 delay(drv_usectohz(100000)); 2083 } 2084 } 2085 2086 2087 static void 2088 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2089 { 2090 /* 2091 * Section 7.7 of the spec describes how to get a unique ID for 2092 * the controller: the vendor ID, the model name and the serial 2093 * number shall be unique when combined. 2094 * 2095 * If a namespace has no EUI64 we use the above and add the hex 2096 * namespace ID to get a unique ID for the namespace. 2097 */ 2098 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2099 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2100 2101 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2102 bcopy(nvme->n_idctl->id_serial, serial, 2103 sizeof (nvme->n_idctl->id_serial)); 2104 2105 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2106 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2107 2108 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2109 nvme->n_idctl->id_vid, model, serial, nsid); 2110 } 2111 2112 static int 2113 nvme_init_ns(nvme_t *nvme, int nsid) 2114 { 2115 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1]; 2116 nvme_identify_nsid_t *idns; 2117 int last_rp; 2118 2119 ns->ns_nvme = nvme; 2120 idns = nvme_identify(nvme, nsid); 2121 2122 if (idns == NULL) { 2123 dev_err(nvme->n_dip, CE_WARN, 2124 "!failed to identify namespace %d", nsid); 2125 return (DDI_FAILURE); 2126 } 2127 2128 ns->ns_idns = idns; 2129 ns->ns_id = nsid; 2130 ns->ns_block_count = idns->id_nsize; 2131 ns->ns_block_size = 2132 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2133 ns->ns_best_block_size = ns->ns_block_size; 2134 2135 /* 2136 * Get the EUI64 if present. Use it for devid and device node names. 2137 */ 2138 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2139 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 2140 2141 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 2142 if (*(uint64_t *)ns->ns_eui64 != 0) { 2143 uint8_t *eui64 = ns->ns_eui64; 2144 2145 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), 2146 "%02x%02x%02x%02x%02x%02x%02x%02x", 2147 eui64[0], eui64[1], eui64[2], eui64[3], 2148 eui64[4], eui64[5], eui64[6], eui64[7]); 2149 } else { 2150 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d", 2151 ns->ns_id); 2152 2153 nvme_prepare_devid(nvme, ns->ns_id); 2154 } 2155 2156 /* 2157 * Find the LBA format with no metadata and the best relative 2158 * performance. A value of 3 means "degraded", 0 is best. 2159 */ 2160 last_rp = 3; 2161 for (int j = 0; j <= idns->id_nlbaf; j++) { 2162 if (idns->id_lbaf[j].lbaf_lbads == 0) 2163 break; 2164 if (idns->id_lbaf[j].lbaf_ms != 0) 2165 continue; 2166 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2167 continue; 2168 last_rp = idns->id_lbaf[j].lbaf_rp; 2169 ns->ns_best_block_size = 2170 1 << idns->id_lbaf[j].lbaf_lbads; 2171 } 2172 2173 if (ns->ns_best_block_size < nvme->n_min_block_size) 2174 ns->ns_best_block_size = nvme->n_min_block_size; 2175 2176 /* 2177 * We currently don't support namespaces that use either: 2178 * - thin provisioning 2179 * - protection information 2180 */ 2181 if (idns->id_nsfeat.f_thin || 2182 idns->id_dps.dp_pinfo) { 2183 dev_err(nvme->n_dip, CE_WARN, 2184 "!ignoring namespace %d, unsupported features: " 2185 "thin = %d, pinfo = %d", nsid, 2186 idns->id_nsfeat.f_thin, idns->id_dps.dp_pinfo); 2187 ns->ns_ignore = B_TRUE; 2188 } else { 2189 ns->ns_ignore = B_FALSE; 2190 } 2191 2192 return (DDI_SUCCESS); 2193 } 2194 2195 static int 2196 nvme_init(nvme_t *nvme) 2197 { 2198 nvme_reg_cc_t cc = { 0 }; 2199 nvme_reg_aqa_t aqa = { 0 }; 2200 nvme_reg_asq_t asq = { 0 }; 2201 nvme_reg_acq_t acq = { 0 }; 2202 nvme_reg_cap_t cap; 2203 nvme_reg_vs_t vs; 2204 nvme_reg_csts_t csts; 2205 int i = 0; 2206 int nqueues; 2207 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2208 char *vendor, *product; 2209 2210 /* Check controller version */ 2211 vs.r = nvme_get32(nvme, NVME_REG_VS); 2212 nvme->n_version.v_major = vs.b.vs_mjr; 2213 nvme->n_version.v_minor = vs.b.vs_mnr; 2214 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 2215 nvme->n_version.v_major, nvme->n_version.v_minor); 2216 2217 if (NVME_VERSION_HIGHER(&nvme->n_version, 2218 nvme_version_major, nvme_version_minor)) { 2219 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.%d", 2220 nvme_version_major, nvme_version_minor); 2221 if (nvme->n_strict_version) 2222 goto fail; 2223 } 2224 2225 /* retrieve controller configuration */ 2226 cap.r = nvme_get64(nvme, NVME_REG_CAP); 2227 2228 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 2229 dev_err(nvme->n_dip, CE_WARN, 2230 "!NVM command set not supported by hardware"); 2231 goto fail; 2232 } 2233 2234 nvme->n_nssr_supported = cap.b.cap_nssrs; 2235 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 2236 nvme->n_timeout = cap.b.cap_to; 2237 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 2238 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 2239 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 2240 2241 /* 2242 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 2243 * the base page size of 4k (1<<12), so add 12 here to get the real 2244 * page size value. 2245 */ 2246 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 2247 cap.b.cap_mpsmax + 12); 2248 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 2249 2250 /* 2251 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 2252 */ 2253 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 2254 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2255 2256 /* 2257 * Set up PRP DMA to transfer 1 page-aligned page at a time. 2258 * Maxxfer may be increased after we identified the controller limits. 2259 */ 2260 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 2261 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2262 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 2263 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 2264 2265 /* 2266 * Reset controller if it's still in ready state. 2267 */ 2268 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 2269 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 2270 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2271 nvme->n_dead = B_TRUE; 2272 goto fail; 2273 } 2274 2275 /* 2276 * Create the admin queue pair. 2277 */ 2278 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 2279 != DDI_SUCCESS) { 2280 dev_err(nvme->n_dip, CE_WARN, 2281 "!unable to allocate admin qpair"); 2282 goto fail; 2283 } 2284 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 2285 nvme->n_ioq[0] = nvme->n_adminq; 2286 2287 nvme->n_progress |= NVME_ADMIN_QUEUE; 2288 2289 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2290 "admin-queue-len", nvme->n_admin_queue_len); 2291 2292 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 2293 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 2294 acq = nvme->n_adminq->nq_cqdma->nd_cookie.dmac_laddress; 2295 2296 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 2297 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 2298 2299 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 2300 nvme_put64(nvme, NVME_REG_ASQ, asq); 2301 nvme_put64(nvme, NVME_REG_ACQ, acq); 2302 2303 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 2304 cc.b.cc_css = 0; /* use NVM command set */ 2305 cc.b.cc_mps = nvme->n_pageshift - 12; 2306 cc.b.cc_shn = 0; /* no shutdown in progress */ 2307 cc.b.cc_en = 1; /* enable controller */ 2308 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 2309 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 2310 2311 nvme_put32(nvme, NVME_REG_CC, cc.r); 2312 2313 /* 2314 * Wait for the controller to become ready. 2315 */ 2316 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2317 if (csts.b.csts_rdy == 0) { 2318 for (i = 0; i != nvme->n_timeout * 10; i++) { 2319 delay(drv_usectohz(50000)); 2320 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2321 2322 if (csts.b.csts_cfs == 1) { 2323 dev_err(nvme->n_dip, CE_WARN, 2324 "!controller fatal status at init"); 2325 ddi_fm_service_impact(nvme->n_dip, 2326 DDI_SERVICE_LOST); 2327 nvme->n_dead = B_TRUE; 2328 goto fail; 2329 } 2330 2331 if (csts.b.csts_rdy == 1) 2332 break; 2333 } 2334 } 2335 2336 if (csts.b.csts_rdy == 0) { 2337 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 2338 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2339 nvme->n_dead = B_TRUE; 2340 goto fail; 2341 } 2342 2343 /* 2344 * Assume an abort command limit of 1. We'll destroy and re-init 2345 * that later when we know the true abort command limit. 2346 */ 2347 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 2348 2349 /* 2350 * Setup initial interrupt for admin queue. 2351 */ 2352 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 2353 != DDI_SUCCESS) && 2354 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 2355 != DDI_SUCCESS) && 2356 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 2357 != DDI_SUCCESS)) { 2358 dev_err(nvme->n_dip, CE_WARN, 2359 "!failed to setup initial interrupt"); 2360 goto fail; 2361 } 2362 2363 /* 2364 * Post an asynchronous event command to catch errors. 2365 */ 2366 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2367 dev_err(nvme->n_dip, CE_WARN, 2368 "!failed to post async event"); 2369 goto fail; 2370 } 2371 2372 /* 2373 * Identify Controller 2374 */ 2375 nvme->n_idctl = nvme_identify(nvme, 0); 2376 if (nvme->n_idctl == NULL) { 2377 dev_err(nvme->n_dip, CE_WARN, 2378 "!failed to identify controller"); 2379 goto fail; 2380 } 2381 2382 /* 2383 * Get Vendor & Product ID 2384 */ 2385 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2386 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2387 sata_split_model(model, &vendor, &product); 2388 2389 if (vendor == NULL) 2390 nvme->n_vendor = strdup("NVMe"); 2391 else 2392 nvme->n_vendor = strdup(vendor); 2393 2394 nvme->n_product = strdup(product); 2395 2396 /* 2397 * Get controller limits. 2398 */ 2399 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 2400 MIN(nvme->n_admin_queue_len / 10, 2401 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 2402 2403 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2404 "async-event-limit", nvme->n_async_event_limit); 2405 2406 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 2407 2408 /* 2409 * Reinitialize the semaphore with the true abort command limit 2410 * supported by the hardware. It's not necessary to disable interrupts 2411 * as only command aborts use the semaphore, and no commands are 2412 * executed or aborted while we're here. 2413 */ 2414 sema_destroy(&nvme->n_abort_sema); 2415 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 2416 SEMA_DRIVER, NULL); 2417 2418 nvme->n_progress |= NVME_CTRL_LIMITS; 2419 2420 if (nvme->n_idctl->id_mdts == 0) 2421 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 2422 else 2423 nvme->n_max_data_transfer_size = 2424 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 2425 2426 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 2427 2428 /* 2429 * Limit n_max_data_transfer_size to what we can handle in one PRP. 2430 * Chained PRPs are currently unsupported. 2431 * 2432 * This is a no-op on hardware which doesn't support a transfer size 2433 * big enough to require chained PRPs. 2434 */ 2435 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 2436 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 2437 2438 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 2439 2440 /* 2441 * Make sure the minimum/maximum queue entry sizes are not 2442 * larger/smaller than the default. 2443 */ 2444 2445 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 2446 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 2447 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 2448 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 2449 goto fail; 2450 2451 /* 2452 * Check for the presence of a Volatile Write Cache. If present, 2453 * enable or disable based on the value of the property 2454 * volatile-write-cache-enable (default is enabled). 2455 */ 2456 nvme->n_write_cache_present = 2457 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 2458 2459 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2460 "volatile-write-cache-present", 2461 nvme->n_write_cache_present ? 1 : 0); 2462 2463 if (!nvme->n_write_cache_present) { 2464 nvme->n_write_cache_enabled = B_FALSE; 2465 } else if (!nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)) { 2466 dev_err(nvme->n_dip, CE_WARN, 2467 "!failed to %sable volatile write cache", 2468 nvme->n_write_cache_enabled ? "en" : "dis"); 2469 /* 2470 * Assume the cache is (still) enabled. 2471 */ 2472 nvme->n_write_cache_enabled = B_TRUE; 2473 } 2474 2475 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2476 "volatile-write-cache-enable", 2477 nvme->n_write_cache_enabled ? 1 : 0); 2478 2479 /* 2480 * Assume LBA Range Type feature is supported. If it isn't this 2481 * will be set to B_FALSE by nvme_get_features(). 2482 */ 2483 nvme->n_lba_range_supported = B_TRUE; 2484 2485 /* 2486 * Check support for Autonomous Power State Transition. 2487 */ 2488 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2489 nvme->n_auto_pst_supported = 2490 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 2491 2492 /* 2493 * Identify Namespaces 2494 */ 2495 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2496 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 2497 dev_err(nvme->n_dip, CE_WARN, 2498 "!too many namespaces: %d, limiting to %d\n", 2499 nvme->n_namespace_count, NVME_MINOR_MAX); 2500 nvme->n_namespace_count = NVME_MINOR_MAX; 2501 } 2502 2503 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2504 nvme->n_namespace_count, KM_SLEEP); 2505 2506 for (i = 0; i != nvme->n_namespace_count; i++) { 2507 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER, 2508 NULL); 2509 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS) 2510 goto fail; 2511 } 2512 2513 /* 2514 * Try to set up MSI/MSI-X interrupts. 2515 */ 2516 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2517 != 0) { 2518 nvme_release_interrupts(nvme); 2519 2520 nqueues = MIN(UINT16_MAX, ncpus); 2521 2522 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2523 nqueues) != DDI_SUCCESS) && 2524 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2525 nqueues) != DDI_SUCCESS)) { 2526 dev_err(nvme->n_dip, CE_WARN, 2527 "!failed to setup MSI/MSI-X interrupts"); 2528 goto fail; 2529 } 2530 } 2531 2532 nqueues = nvme->n_intr_cnt; 2533 2534 /* 2535 * Create I/O queue pairs. 2536 */ 2537 nvme->n_ioq_count = nvme_set_nqueues(nvme, nqueues); 2538 if (nvme->n_ioq_count == 0) { 2539 dev_err(nvme->n_dip, CE_WARN, 2540 "!failed to set number of I/O queues to %d", nqueues); 2541 goto fail; 2542 } 2543 2544 /* 2545 * Reallocate I/O queue array 2546 */ 2547 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2548 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2549 (nvme->n_ioq_count + 1), KM_SLEEP); 2550 nvme->n_ioq[0] = nvme->n_adminq; 2551 2552 /* 2553 * If we got less queues than we asked for we might as well give 2554 * some of the interrupt vectors back to the system. 2555 */ 2556 if (nvme->n_ioq_count < nqueues) { 2557 nvme_release_interrupts(nvme); 2558 2559 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 2560 nvme->n_ioq_count) != DDI_SUCCESS) { 2561 dev_err(nvme->n_dip, CE_WARN, 2562 "!failed to reduce number of interrupts"); 2563 goto fail; 2564 } 2565 } 2566 2567 /* 2568 * Alloc & register I/O queue pairs 2569 */ 2570 nvme->n_io_queue_len = 2571 MIN(nvme->n_io_queue_len, nvme->n_max_queue_entries); 2572 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-queue-len", 2573 nvme->n_io_queue_len); 2574 2575 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 2576 if (nvme_alloc_qpair(nvme, nvme->n_io_queue_len, 2577 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 2578 dev_err(nvme->n_dip, CE_WARN, 2579 "!unable to allocate I/O qpair %d", i); 2580 goto fail; 2581 } 2582 2583 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) 2584 != DDI_SUCCESS) { 2585 dev_err(nvme->n_dip, CE_WARN, 2586 "!unable to create I/O qpair %d", i); 2587 goto fail; 2588 } 2589 } 2590 2591 /* 2592 * Post more asynchronous events commands to reduce event reporting 2593 * latency as suggested by the spec. 2594 */ 2595 for (i = 1; i != nvme->n_async_event_limit; i++) { 2596 if (nvme_async_event(nvme) != DDI_SUCCESS) { 2597 dev_err(nvme->n_dip, CE_WARN, 2598 "!failed to post async event %d", i); 2599 goto fail; 2600 } 2601 } 2602 2603 return (DDI_SUCCESS); 2604 2605 fail: 2606 (void) nvme_reset(nvme, B_FALSE); 2607 return (DDI_FAILURE); 2608 } 2609 2610 static uint_t 2611 nvme_intr(caddr_t arg1, caddr_t arg2) 2612 { 2613 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 2614 nvme_t *nvme = (nvme_t *)arg1; 2615 int inum = (int)(uintptr_t)arg2; 2616 int ccnt = 0; 2617 int qnum; 2618 nvme_cmd_t *cmd; 2619 2620 if (inum >= nvme->n_intr_cnt) 2621 return (DDI_INTR_UNCLAIMED); 2622 2623 /* 2624 * The interrupt vector a queue uses is calculated as queue_idx % 2625 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 2626 * in steps of n_intr_cnt to process all queues using this vector. 2627 */ 2628 for (qnum = inum; 2629 qnum < nvme->n_ioq_count + 1 && nvme->n_ioq[qnum] != NULL; 2630 qnum += nvme->n_intr_cnt) { 2631 while ((cmd = nvme_retrieve_cmd(nvme, nvme->n_ioq[qnum]))) { 2632 taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq, 2633 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 2634 ccnt++; 2635 } 2636 } 2637 2638 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 2639 } 2640 2641 static void 2642 nvme_release_interrupts(nvme_t *nvme) 2643 { 2644 int i; 2645 2646 for (i = 0; i < nvme->n_intr_cnt; i++) { 2647 if (nvme->n_inth[i] == NULL) 2648 break; 2649 2650 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2651 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 2652 else 2653 (void) ddi_intr_disable(nvme->n_inth[i]); 2654 2655 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 2656 (void) ddi_intr_free(nvme->n_inth[i]); 2657 } 2658 2659 kmem_free(nvme->n_inth, nvme->n_inth_sz); 2660 nvme->n_inth = NULL; 2661 nvme->n_inth_sz = 0; 2662 2663 nvme->n_progress &= ~NVME_INTERRUPTS; 2664 } 2665 2666 static int 2667 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 2668 { 2669 int nintrs, navail, count; 2670 int ret; 2671 int i; 2672 2673 if (nvme->n_intr_types == 0) { 2674 ret = ddi_intr_get_supported_types(nvme->n_dip, 2675 &nvme->n_intr_types); 2676 if (ret != DDI_SUCCESS) { 2677 dev_err(nvme->n_dip, CE_WARN, 2678 "!%s: ddi_intr_get_supported types failed", 2679 __func__); 2680 return (ret); 2681 } 2682 #ifdef __x86 2683 if (get_hwenv() == HW_VMWARE) 2684 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 2685 #endif 2686 } 2687 2688 if ((nvme->n_intr_types & intr_type) == 0) 2689 return (DDI_FAILURE); 2690 2691 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 2692 if (ret != DDI_SUCCESS) { 2693 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 2694 __func__); 2695 return (ret); 2696 } 2697 2698 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 2699 if (ret != DDI_SUCCESS) { 2700 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 2701 __func__); 2702 return (ret); 2703 } 2704 2705 /* We want at most one interrupt per queue pair. */ 2706 if (navail > nqpairs) 2707 navail = nqpairs; 2708 2709 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 2710 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 2711 2712 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 2713 &count, 0); 2714 if (ret != DDI_SUCCESS) { 2715 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 2716 __func__); 2717 goto fail; 2718 } 2719 2720 nvme->n_intr_cnt = count; 2721 2722 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 2723 if (ret != DDI_SUCCESS) { 2724 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 2725 __func__); 2726 goto fail; 2727 } 2728 2729 for (i = 0; i < count; i++) { 2730 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 2731 (void *)nvme, (void *)(uintptr_t)i); 2732 if (ret != DDI_SUCCESS) { 2733 dev_err(nvme->n_dip, CE_WARN, 2734 "!%s: ddi_intr_add_handler failed", __func__); 2735 goto fail; 2736 } 2737 } 2738 2739 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 2740 2741 for (i = 0; i < count; i++) { 2742 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 2743 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 2744 else 2745 ret = ddi_intr_enable(nvme->n_inth[i]); 2746 2747 if (ret != DDI_SUCCESS) { 2748 dev_err(nvme->n_dip, CE_WARN, 2749 "!%s: enabling interrupt %d failed", __func__, i); 2750 goto fail; 2751 } 2752 } 2753 2754 nvme->n_intr_type = intr_type; 2755 2756 nvme->n_progress |= NVME_INTERRUPTS; 2757 2758 return (DDI_SUCCESS); 2759 2760 fail: 2761 nvme_release_interrupts(nvme); 2762 2763 return (ret); 2764 } 2765 2766 static int 2767 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 2768 { 2769 _NOTE(ARGUNUSED(arg)); 2770 2771 pci_ereport_post(dip, fm_error, NULL); 2772 return (fm_error->fme_status); 2773 } 2774 2775 static int 2776 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2777 { 2778 nvme_t *nvme; 2779 int instance; 2780 int nregs; 2781 off_t regsize; 2782 int i; 2783 char name[32]; 2784 2785 if (cmd != DDI_ATTACH) 2786 return (DDI_FAILURE); 2787 2788 instance = ddi_get_instance(dip); 2789 2790 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 2791 return (DDI_FAILURE); 2792 2793 nvme = ddi_get_soft_state(nvme_state, instance); 2794 ddi_set_driver_private(dip, nvme); 2795 nvme->n_dip = dip; 2796 2797 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL); 2798 2799 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2800 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 2801 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 2802 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 2803 B_TRUE : B_FALSE; 2804 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2805 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 2806 nvme->n_io_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2807 DDI_PROP_DONTPASS, "io-queue-len", NVME_DEFAULT_IO_QUEUE_LEN); 2808 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2809 DDI_PROP_DONTPASS, "async-event-limit", 2810 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 2811 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2812 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 2813 B_TRUE : B_FALSE; 2814 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2815 DDI_PROP_DONTPASS, "min-phys-block-size", 2816 NVME_DEFAULT_MIN_BLOCK_SIZE); 2817 2818 if (!ISP2(nvme->n_min_block_size) || 2819 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 2820 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 2821 "using default %d", ISP2(nvme->n_min_block_size) ? 2822 "too low" : "not a power of 2", 2823 NVME_DEFAULT_MIN_BLOCK_SIZE); 2824 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 2825 } 2826 2827 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 2828 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 2829 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 2830 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 2831 2832 if (nvme->n_io_queue_len < NVME_MIN_IO_QUEUE_LEN) 2833 nvme->n_io_queue_len = NVME_MIN_IO_QUEUE_LEN; 2834 2835 if (nvme->n_async_event_limit < 1) 2836 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 2837 2838 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 2839 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 2840 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 2841 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 2842 2843 /* 2844 * Setup FMA support. 2845 */ 2846 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 2847 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 2848 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 2849 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 2850 2851 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 2852 2853 if (nvme->n_fm_cap) { 2854 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 2855 nvme->n_reg_acc_attr.devacc_attr_access = 2856 DDI_FLAGERR_ACC; 2857 2858 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 2859 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2860 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 2861 } 2862 2863 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 2864 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2865 pci_ereport_setup(dip); 2866 2867 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 2868 ddi_fm_handler_register(dip, nvme_fm_errcb, 2869 (void *)nvme); 2870 } 2871 2872 nvme->n_progress |= NVME_FMA_INIT; 2873 2874 /* 2875 * The spec defines several register sets. Only the controller 2876 * registers (set 1) are currently used. 2877 */ 2878 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 2879 nregs < 2 || 2880 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 2881 goto fail; 2882 2883 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 2884 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 2885 dev_err(dip, CE_WARN, "!failed to map regset 1"); 2886 goto fail; 2887 } 2888 2889 nvme->n_progress |= NVME_REGS_MAPPED; 2890 2891 /* 2892 * Create taskq for command completion. 2893 */ 2894 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq", 2895 ddi_driver_name(dip), ddi_get_instance(dip)); 2896 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), 2897 TASKQ_DEFAULTPRI, 0); 2898 if (nvme->n_cmd_taskq == NULL) { 2899 dev_err(dip, CE_WARN, "!failed to create cmd taskq"); 2900 goto fail; 2901 } 2902 2903 /* 2904 * Create PRP DMA cache 2905 */ 2906 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 2907 ddi_driver_name(dip), ddi_get_instance(dip)); 2908 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 2909 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 2910 NULL, (void *)nvme, NULL, 0); 2911 2912 if (nvme_init(nvme) != DDI_SUCCESS) 2913 goto fail; 2914 2915 /* 2916 * Attach the blkdev driver for each namespace. 2917 */ 2918 for (i = 0; i != nvme->n_namespace_count; i++) { 2919 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name, 2920 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1), 2921 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 2922 dev_err(dip, CE_WARN, 2923 "!failed to create minor node for namespace %d", i); 2924 goto fail; 2925 } 2926 2927 if (nvme->n_ns[i].ns_ignore) 2928 continue; 2929 2930 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 2931 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 2932 2933 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 2934 dev_err(dip, CE_WARN, 2935 "!failed to get blkdev handle for namespace %d", i); 2936 goto fail; 2937 } 2938 2939 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 2940 != DDI_SUCCESS) { 2941 dev_err(dip, CE_WARN, 2942 "!failed to attach blkdev handle for namespace %d", 2943 i); 2944 goto fail; 2945 } 2946 } 2947 2948 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 2949 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 2950 != DDI_SUCCESS) { 2951 dev_err(dip, CE_WARN, "nvme_attach: " 2952 "cannot create devctl minor node"); 2953 goto fail; 2954 } 2955 2956 return (DDI_SUCCESS); 2957 2958 fail: 2959 /* attach successful anyway so that FMA can retire the device */ 2960 if (nvme->n_dead) 2961 return (DDI_SUCCESS); 2962 2963 (void) nvme_detach(dip, DDI_DETACH); 2964 2965 return (DDI_FAILURE); 2966 } 2967 2968 static int 2969 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2970 { 2971 int instance, i; 2972 nvme_t *nvme; 2973 2974 if (cmd != DDI_DETACH) 2975 return (DDI_FAILURE); 2976 2977 instance = ddi_get_instance(dip); 2978 2979 nvme = ddi_get_soft_state(nvme_state, instance); 2980 2981 if (nvme == NULL) 2982 return (DDI_FAILURE); 2983 2984 ddi_remove_minor_node(dip, "devctl"); 2985 mutex_destroy(&nvme->n_minor.nm_mutex); 2986 2987 if (nvme->n_ns) { 2988 for (i = 0; i != nvme->n_namespace_count; i++) { 2989 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name); 2990 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex); 2991 2992 if (nvme->n_ns[i].ns_bd_hdl) { 2993 (void) bd_detach_handle( 2994 nvme->n_ns[i].ns_bd_hdl); 2995 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 2996 } 2997 2998 if (nvme->n_ns[i].ns_idns) 2999 kmem_free(nvme->n_ns[i].ns_idns, 3000 sizeof (nvme_identify_nsid_t)); 3001 if (nvme->n_ns[i].ns_devid) 3002 strfree(nvme->n_ns[i].ns_devid); 3003 } 3004 3005 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 3006 nvme->n_namespace_count); 3007 } 3008 3009 if (nvme->n_progress & NVME_INTERRUPTS) 3010 nvme_release_interrupts(nvme); 3011 3012 if (nvme->n_cmd_taskq) 3013 ddi_taskq_wait(nvme->n_cmd_taskq); 3014 3015 if (nvme->n_ioq_count > 0) { 3016 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3017 if (nvme->n_ioq[i] != NULL) { 3018 /* TODO: send destroy queue commands */ 3019 nvme_free_qpair(nvme->n_ioq[i]); 3020 } 3021 } 3022 3023 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 3024 (nvme->n_ioq_count + 1)); 3025 } 3026 3027 if (nvme->n_prp_cache != NULL) { 3028 kmem_cache_destroy(nvme->n_prp_cache); 3029 } 3030 3031 if (nvme->n_progress & NVME_REGS_MAPPED) { 3032 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 3033 (void) nvme_reset(nvme, B_FALSE); 3034 } 3035 3036 if (nvme->n_cmd_taskq) 3037 ddi_taskq_destroy(nvme->n_cmd_taskq); 3038 3039 if (nvme->n_progress & NVME_CTRL_LIMITS) 3040 sema_destroy(&nvme->n_abort_sema); 3041 3042 if (nvme->n_progress & NVME_ADMIN_QUEUE) 3043 nvme_free_qpair(nvme->n_adminq); 3044 3045 if (nvme->n_idctl) 3046 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 3047 3048 if (nvme->n_progress & NVME_REGS_MAPPED) 3049 ddi_regs_map_free(&nvme->n_regh); 3050 3051 if (nvme->n_progress & NVME_FMA_INIT) { 3052 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3053 ddi_fm_handler_unregister(nvme->n_dip); 3054 3055 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3056 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3057 pci_ereport_teardown(nvme->n_dip); 3058 3059 ddi_fm_fini(nvme->n_dip); 3060 } 3061 3062 if (nvme->n_vendor != NULL) 3063 strfree(nvme->n_vendor); 3064 3065 if (nvme->n_product != NULL) 3066 strfree(nvme->n_product); 3067 3068 ddi_soft_state_free(nvme_state, instance); 3069 3070 return (DDI_SUCCESS); 3071 } 3072 3073 static int 3074 nvme_quiesce(dev_info_t *dip) 3075 { 3076 int instance; 3077 nvme_t *nvme; 3078 3079 instance = ddi_get_instance(dip); 3080 3081 nvme = ddi_get_soft_state(nvme_state, instance); 3082 3083 if (nvme == NULL) 3084 return (DDI_FAILURE); 3085 3086 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 3087 3088 (void) nvme_reset(nvme, B_TRUE); 3089 3090 return (DDI_FAILURE); 3091 } 3092 3093 static int 3094 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 3095 { 3096 nvme_t *nvme = cmd->nc_nvme; 3097 int nprp_page, nprp; 3098 uint64_t *prp; 3099 3100 if (xfer->x_ndmac == 0) 3101 return (DDI_FAILURE); 3102 3103 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 3104 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3105 3106 if (xfer->x_ndmac == 1) { 3107 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 3108 return (DDI_SUCCESS); 3109 } else if (xfer->x_ndmac == 2) { 3110 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 3111 return (DDI_SUCCESS); 3112 } 3113 3114 xfer->x_ndmac--; 3115 3116 nprp_page = nvme->n_pagesize / sizeof (uint64_t) - 1; 3117 ASSERT(nprp_page > 0); 3118 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 3119 3120 /* 3121 * We currently don't support chained PRPs and set up our DMA 3122 * attributes to reflect that. If we still get an I/O request 3123 * that needs a chained PRP something is very wrong. 3124 */ 3125 VERIFY(nprp == 1); 3126 3127 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 3128 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len); 3129 3130 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 3131 3132 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3133 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 3134 xfer->x_ndmac > 0; 3135 prp++, xfer->x_ndmac--) { 3136 *prp = xfer->x_dmac.dmac_laddress; 3137 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3138 } 3139 3140 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 3141 DDI_DMA_SYNC_FORDEV); 3142 return (DDI_SUCCESS); 3143 } 3144 3145 static nvme_cmd_t * 3146 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 3147 { 3148 nvme_t *nvme = ns->ns_nvme; 3149 nvme_cmd_t *cmd; 3150 3151 /* 3152 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 3153 */ 3154 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 3155 KM_NOSLEEP : KM_SLEEP); 3156 3157 if (cmd == NULL) 3158 return (NULL); 3159 3160 cmd->nc_sqe.sqe_opc = opc; 3161 cmd->nc_callback = nvme_bd_xfer_done; 3162 cmd->nc_xfer = xfer; 3163 3164 switch (opc) { 3165 case NVME_OPC_NVM_WRITE: 3166 case NVME_OPC_NVM_READ: 3167 VERIFY(xfer->x_nblks <= 0x10000); 3168 3169 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3170 3171 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 3172 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 3173 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 3174 3175 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 3176 goto fail; 3177 break; 3178 3179 case NVME_OPC_NVM_FLUSH: 3180 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3181 break; 3182 3183 default: 3184 goto fail; 3185 } 3186 3187 return (cmd); 3188 3189 fail: 3190 nvme_free_cmd(cmd); 3191 return (NULL); 3192 } 3193 3194 static void 3195 nvme_bd_xfer_done(void *arg) 3196 { 3197 nvme_cmd_t *cmd = arg; 3198 bd_xfer_t *xfer = cmd->nc_xfer; 3199 int error = 0; 3200 3201 error = nvme_check_cmd_status(cmd); 3202 nvme_free_cmd(cmd); 3203 3204 bd_xfer_done(xfer, error); 3205 } 3206 3207 static void 3208 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 3209 { 3210 nvme_namespace_t *ns = arg; 3211 nvme_t *nvme = ns->ns_nvme; 3212 3213 /* 3214 * blkdev maintains one queue size per instance (namespace), 3215 * but all namespace share the I/O queues. 3216 * TODO: need to figure out a sane default, or use per-NS I/O queues, 3217 * or change blkdev to handle EAGAIN 3218 */ 3219 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_queue_len 3220 / nvme->n_namespace_count; 3221 3222 /* 3223 * d_maxxfer is not set, which means the value is taken from the DMA 3224 * attributes specified to bd_alloc_handle. 3225 */ 3226 3227 drive->d_removable = B_FALSE; 3228 drive->d_hotpluggable = B_FALSE; 3229 3230 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 3231 drive->d_target = ns->ns_id; 3232 drive->d_lun = 0; 3233 3234 drive->d_model = nvme->n_idctl->id_model; 3235 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 3236 drive->d_vendor = nvme->n_vendor; 3237 drive->d_vendor_len = strlen(nvme->n_vendor); 3238 drive->d_product = nvme->n_product; 3239 drive->d_product_len = strlen(nvme->n_product); 3240 drive->d_serial = nvme->n_idctl->id_serial; 3241 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 3242 drive->d_revision = nvme->n_idctl->id_fwrev; 3243 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 3244 } 3245 3246 static int 3247 nvme_bd_mediainfo(void *arg, bd_media_t *media) 3248 { 3249 nvme_namespace_t *ns = arg; 3250 3251 media->m_nblks = ns->ns_block_count; 3252 media->m_blksize = ns->ns_block_size; 3253 media->m_readonly = B_FALSE; 3254 media->m_solidstate = B_TRUE; 3255 3256 media->m_pblksize = ns->ns_best_block_size; 3257 3258 return (0); 3259 } 3260 3261 static int 3262 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 3263 { 3264 nvme_t *nvme = ns->ns_nvme; 3265 nvme_cmd_t *cmd; 3266 3267 if (nvme->n_dead) 3268 return (EIO); 3269 3270 /* No polling for now */ 3271 if (xfer->x_flags & BD_XFER_POLL) 3272 return (EIO); 3273 3274 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 3275 if (cmd == NULL) 3276 return (ENOMEM); 3277 3278 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 3279 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 3280 3281 if (nvme_submit_cmd(nvme->n_ioq[cmd->nc_sqid], cmd) 3282 != DDI_SUCCESS) 3283 return (EAGAIN); 3284 3285 return (0); 3286 } 3287 3288 static int 3289 nvme_bd_read(void *arg, bd_xfer_t *xfer) 3290 { 3291 nvme_namespace_t *ns = arg; 3292 3293 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 3294 } 3295 3296 static int 3297 nvme_bd_write(void *arg, bd_xfer_t *xfer) 3298 { 3299 nvme_namespace_t *ns = arg; 3300 3301 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 3302 } 3303 3304 static int 3305 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 3306 { 3307 nvme_namespace_t *ns = arg; 3308 3309 if (ns->ns_nvme->n_dead) 3310 return (EIO); 3311 3312 /* 3313 * If the volatile write cache is not present or not enabled the FLUSH 3314 * command is a no-op, so we can take a shortcut here. 3315 */ 3316 if (!ns->ns_nvme->n_write_cache_present) { 3317 bd_xfer_done(xfer, ENOTSUP); 3318 return (0); 3319 } 3320 3321 if (!ns->ns_nvme->n_write_cache_enabled) { 3322 bd_xfer_done(xfer, 0); 3323 return (0); 3324 } 3325 3326 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 3327 } 3328 3329 static int 3330 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 3331 { 3332 nvme_namespace_t *ns = arg; 3333 3334 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3335 if (*(uint64_t *)ns->ns_eui64 != 0) { 3336 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN, 3337 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 3338 } else { 3339 return (ddi_devid_init(devinfo, DEVID_ENCAP, 3340 strlen(ns->ns_devid), ns->ns_devid, devid)); 3341 } 3342 } 3343 3344 static int 3345 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 3346 { 3347 #ifndef __lock_lint 3348 _NOTE(ARGUNUSED(cred_p)); 3349 #endif 3350 minor_t minor = getminor(*devp); 3351 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3352 int nsid = NVME_MINOR_NSID(minor); 3353 nvme_minor_state_t *nm; 3354 int rv = 0; 3355 3356 if (otyp != OTYP_CHR) 3357 return (EINVAL); 3358 3359 if (nvme == NULL) 3360 return (ENXIO); 3361 3362 if (nsid > nvme->n_namespace_count) 3363 return (ENXIO); 3364 3365 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3366 3367 mutex_enter(&nm->nm_mutex); 3368 if (nm->nm_oexcl) { 3369 rv = EBUSY; 3370 goto out; 3371 } 3372 3373 if (flag & FEXCL) { 3374 if (nm->nm_ocnt != 0) { 3375 rv = EBUSY; 3376 goto out; 3377 } 3378 nm->nm_oexcl = B_TRUE; 3379 } 3380 3381 nm->nm_ocnt++; 3382 3383 out: 3384 mutex_exit(&nm->nm_mutex); 3385 return (rv); 3386 3387 } 3388 3389 static int 3390 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 3391 { 3392 #ifndef __lock_lint 3393 _NOTE(ARGUNUSED(cred_p)); 3394 _NOTE(ARGUNUSED(flag)); 3395 #endif 3396 minor_t minor = getminor(dev); 3397 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3398 int nsid = NVME_MINOR_NSID(minor); 3399 nvme_minor_state_t *nm; 3400 3401 if (otyp != OTYP_CHR) 3402 return (ENXIO); 3403 3404 if (nvme == NULL) 3405 return (ENXIO); 3406 3407 if (nsid > nvme->n_namespace_count) 3408 return (ENXIO); 3409 3410 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3411 3412 mutex_enter(&nm->nm_mutex); 3413 if (nm->nm_oexcl) 3414 nm->nm_oexcl = B_FALSE; 3415 3416 ASSERT(nm->nm_ocnt > 0); 3417 nm->nm_ocnt--; 3418 mutex_exit(&nm->nm_mutex); 3419 3420 return (0); 3421 } 3422 3423 static int 3424 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3425 cred_t *cred_p) 3426 { 3427 _NOTE(ARGUNUSED(cred_p)); 3428 int rv = 0; 3429 void *idctl; 3430 3431 if ((mode & FREAD) == 0) 3432 return (EPERM); 3433 3434 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 3435 return (EINVAL); 3436 3437 idctl = nvme_identify(nvme, nsid); 3438 if (idctl == NULL) 3439 return (EIO); 3440 3441 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 3442 != 0) 3443 rv = EFAULT; 3444 3445 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 3446 3447 return (rv); 3448 } 3449 3450 static int 3451 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 3452 int mode, cred_t *cred_p) 3453 { 3454 _NOTE(ARGUNUSED(nsid, cred_p)); 3455 int rv = 0; 3456 nvme_reg_cap_t cap = { 0 }; 3457 nvme_capabilities_t nc; 3458 3459 if ((mode & FREAD) == 0) 3460 return (EPERM); 3461 3462 if (nioc->n_len < sizeof (nc)) 3463 return (EINVAL); 3464 3465 cap.r = nvme_get64(nvme, NVME_REG_CAP); 3466 3467 /* 3468 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 3469 * specify the base page size of 4k (1<<12), so add 12 here to 3470 * get the real page size value. 3471 */ 3472 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 3473 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 3474 3475 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 3476 rv = EFAULT; 3477 3478 return (rv); 3479 } 3480 3481 static int 3482 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 3483 int mode, cred_t *cred_p) 3484 { 3485 _NOTE(ARGUNUSED(cred_p)); 3486 void *log = NULL; 3487 size_t bufsize = 0; 3488 int rv = 0; 3489 3490 if ((mode & FREAD) == 0) 3491 return (EPERM); 3492 3493 switch (nioc->n_arg) { 3494 case NVME_LOGPAGE_ERROR: 3495 if (nsid != 0) 3496 return (EINVAL); 3497 break; 3498 case NVME_LOGPAGE_HEALTH: 3499 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 3500 return (EINVAL); 3501 3502 if (nsid == 0) 3503 nsid = (uint32_t)-1; 3504 3505 break; 3506 case NVME_LOGPAGE_FWSLOT: 3507 if (nsid != 0) 3508 return (EINVAL); 3509 break; 3510 default: 3511 return (EINVAL); 3512 } 3513 3514 if (nvme_get_logpage(nvme, &log, &bufsize, nioc->n_arg, nsid) 3515 != DDI_SUCCESS) 3516 return (EIO); 3517 3518 if (nioc->n_len < bufsize) { 3519 kmem_free(log, bufsize); 3520 return (EINVAL); 3521 } 3522 3523 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 3524 rv = EFAULT; 3525 3526 nioc->n_len = bufsize; 3527 kmem_free(log, bufsize); 3528 3529 return (rv); 3530 } 3531 3532 static int 3533 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 3534 int mode, cred_t *cred_p) 3535 { 3536 _NOTE(ARGUNUSED(cred_p)); 3537 void *buf = NULL; 3538 size_t bufsize = 0; 3539 uint32_t res = 0; 3540 uint8_t feature; 3541 int rv = 0; 3542 3543 if ((mode & FREAD) == 0) 3544 return (EPERM); 3545 3546 if ((nioc->n_arg >> 32) > 0xff) 3547 return (EINVAL); 3548 3549 feature = (uint8_t)(nioc->n_arg >> 32); 3550 3551 switch (feature) { 3552 case NVME_FEAT_ARBITRATION: 3553 case NVME_FEAT_POWER_MGMT: 3554 case NVME_FEAT_TEMPERATURE: 3555 case NVME_FEAT_ERROR: 3556 case NVME_FEAT_NQUEUES: 3557 case NVME_FEAT_INTR_COAL: 3558 case NVME_FEAT_WRITE_ATOM: 3559 case NVME_FEAT_ASYNC_EVENT: 3560 case NVME_FEAT_PROGRESS: 3561 if (nsid != 0) 3562 return (EINVAL); 3563 break; 3564 3565 case NVME_FEAT_INTR_VECT: 3566 if (nsid != 0) 3567 return (EINVAL); 3568 3569 res = nioc->n_arg & 0xffffffffUL; 3570 if (res >= nvme->n_intr_cnt) 3571 return (EINVAL); 3572 break; 3573 3574 case NVME_FEAT_LBA_RANGE: 3575 if (nvme->n_lba_range_supported == B_FALSE) 3576 return (EINVAL); 3577 3578 if (nsid == 0 || 3579 nsid > nvme->n_namespace_count) 3580 return (EINVAL); 3581 3582 break; 3583 3584 case NVME_FEAT_WRITE_CACHE: 3585 if (nsid != 0) 3586 return (EINVAL); 3587 3588 if (!nvme->n_write_cache_present) 3589 return (EINVAL); 3590 3591 break; 3592 3593 case NVME_FEAT_AUTO_PST: 3594 if (nsid != 0) 3595 return (EINVAL); 3596 3597 if (!nvme->n_auto_pst_supported) 3598 return (EINVAL); 3599 3600 break; 3601 3602 default: 3603 return (EINVAL); 3604 } 3605 3606 if (nvme_get_features(nvme, nsid, feature, &res, &buf, &bufsize) == 3607 B_FALSE) 3608 return (EIO); 3609 3610 if (nioc->n_len < bufsize) { 3611 kmem_free(buf, bufsize); 3612 return (EINVAL); 3613 } 3614 3615 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 3616 rv = EFAULT; 3617 3618 kmem_free(buf, bufsize); 3619 nioc->n_arg = res; 3620 nioc->n_len = bufsize; 3621 3622 return (rv); 3623 } 3624 3625 static int 3626 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3627 cred_t *cred_p) 3628 { 3629 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 3630 3631 if ((mode & FREAD) == 0) 3632 return (EPERM); 3633 3634 nioc->n_arg = nvme->n_intr_cnt; 3635 return (0); 3636 } 3637 3638 static int 3639 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3640 cred_t *cred_p) 3641 { 3642 _NOTE(ARGUNUSED(nsid, cred_p)); 3643 int rv = 0; 3644 3645 if ((mode & FREAD) == 0) 3646 return (EPERM); 3647 3648 if (nioc->n_len < sizeof (nvme->n_version)) 3649 return (ENOMEM); 3650 3651 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 3652 sizeof (nvme->n_version), mode) != 0) 3653 rv = EFAULT; 3654 3655 return (rv); 3656 } 3657 3658 static int 3659 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3660 cred_t *cred_p) 3661 { 3662 _NOTE(ARGUNUSED(mode)); 3663 nvme_format_nvm_t frmt = { 0 }; 3664 int c_nsid = nsid != 0 ? nsid - 1 : 0; 3665 3666 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 3667 return (EPERM); 3668 3669 frmt.r = nioc->n_arg & 0xffffffff; 3670 3671 /* 3672 * Check whether the FORMAT NVM command is supported. 3673 */ 3674 if (nvme->n_idctl->id_oacs.oa_format == 0) 3675 return (EINVAL); 3676 3677 /* 3678 * Don't allow format or secure erase of individual namespace if that 3679 * would cause a format or secure erase of all namespaces. 3680 */ 3681 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 3682 return (EINVAL); 3683 3684 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 3685 nvme->n_idctl->id_fna.fn_sec_erase != 0) 3686 return (EINVAL); 3687 3688 /* 3689 * Don't allow formatting with Protection Information. 3690 */ 3691 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 3692 return (EINVAL); 3693 3694 /* 3695 * Don't allow formatting using an illegal LBA format, or any LBA format 3696 * that uses metadata. 3697 */ 3698 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf || 3699 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 3700 return (EINVAL); 3701 3702 /* 3703 * Don't allow formatting using an illegal Secure Erase setting. 3704 */ 3705 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 3706 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 3707 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 3708 return (EINVAL); 3709 3710 if (nsid == 0) 3711 nsid = (uint32_t)-1; 3712 3713 return (nvme_format_nvm(nvme, nsid, frmt.b.fm_lbaf, B_FALSE, 0, B_FALSE, 3714 frmt.b.fm_ses)); 3715 } 3716 3717 static int 3718 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3719 cred_t *cred_p) 3720 { 3721 _NOTE(ARGUNUSED(nioc, mode)); 3722 int rv = 0; 3723 3724 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 3725 return (EPERM); 3726 3727 if (nsid == 0) 3728 return (EINVAL); 3729 3730 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl); 3731 if (rv != DDI_SUCCESS) 3732 rv = EBUSY; 3733 3734 return (rv); 3735 } 3736 3737 static int 3738 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3739 cred_t *cred_p) 3740 { 3741 _NOTE(ARGUNUSED(nioc, mode)); 3742 nvme_identify_nsid_t *idns; 3743 int rv = 0; 3744 3745 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 3746 return (EPERM); 3747 3748 if (nsid == 0) 3749 return (EINVAL); 3750 3751 /* 3752 * Identify namespace again, free old identify data. 3753 */ 3754 idns = nvme->n_ns[nsid - 1].ns_idns; 3755 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 3756 return (EIO); 3757 3758 kmem_free(idns, sizeof (nvme_identify_nsid_t)); 3759 3760 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl); 3761 if (rv != DDI_SUCCESS) 3762 rv = EBUSY; 3763 3764 return (rv); 3765 } 3766 3767 static int 3768 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 3769 int *rval_p) 3770 { 3771 #ifndef __lock_lint 3772 _NOTE(ARGUNUSED(rval_p)); 3773 #endif 3774 minor_t minor = getminor(dev); 3775 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3776 int nsid = NVME_MINOR_NSID(minor); 3777 int rv = 0; 3778 nvme_ioctl_t nioc; 3779 3780 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 3781 NULL, 3782 nvme_ioctl_identify, 3783 nvme_ioctl_identify, 3784 nvme_ioctl_capabilities, 3785 nvme_ioctl_get_logpage, 3786 nvme_ioctl_get_features, 3787 nvme_ioctl_intr_cnt, 3788 nvme_ioctl_version, 3789 nvme_ioctl_format, 3790 nvme_ioctl_detach, 3791 nvme_ioctl_attach 3792 }; 3793 3794 if (nvme == NULL) 3795 return (ENXIO); 3796 3797 if (nsid > nvme->n_namespace_count) 3798 return (ENXIO); 3799 3800 if (IS_DEVCTL(cmd)) 3801 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 3802 3803 #ifdef _MULTI_DATAMODEL 3804 switch (ddi_model_convert_from(mode & FMODELS)) { 3805 case DDI_MODEL_ILP32: { 3806 nvme_ioctl32_t nioc32; 3807 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 3808 mode) != 0) 3809 return (EFAULT); 3810 nioc.n_len = nioc32.n_len; 3811 nioc.n_buf = nioc32.n_buf; 3812 nioc.n_arg = nioc32.n_arg; 3813 break; 3814 } 3815 case DDI_MODEL_NONE: 3816 #endif 3817 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 3818 != 0) 3819 return (EFAULT); 3820 #ifdef _MULTI_DATAMODEL 3821 break; 3822 } 3823 #endif 3824 3825 if (cmd == NVME_IOC_IDENTIFY_CTRL) { 3826 /* 3827 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and 3828 * attachment point nodes. 3829 */ 3830 nsid = 0; 3831 } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) { 3832 /* 3833 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it 3834 * will always return identify data for namespace 1. 3835 */ 3836 nsid = 1; 3837 } 3838 3839 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 3840 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 3841 cred_p); 3842 else 3843 rv = EINVAL; 3844 3845 #ifdef _MULTI_DATAMODEL 3846 switch (ddi_model_convert_from(mode & FMODELS)) { 3847 case DDI_MODEL_ILP32: { 3848 nvme_ioctl32_t nioc32; 3849 3850 nioc32.n_len = (size32_t)nioc.n_len; 3851 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 3852 nioc32.n_arg = nioc.n_arg; 3853 3854 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 3855 mode) != 0) 3856 return (EFAULT); 3857 break; 3858 } 3859 case DDI_MODEL_NONE: 3860 #endif 3861 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 3862 != 0) 3863 return (EFAULT); 3864 #ifdef _MULTI_DATAMODEL 3865 break; 3866 } 3867 #endif 3868 3869 return (rv); 3870 } 3871