1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2018 Nexenta Systems, Inc. 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved. 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved. 16 * Copyright 2018 Joyent, Inc. 17 * Copyright 2019 Western Digital Corporation. 18 */ 19 20 /* 21 * blkdev driver for NVMe compliant storage devices 22 * 23 * This driver was written to conform to version 1.2.1 of the NVMe 24 * specification. It may work with newer versions, but that is completely 25 * untested and disabled by default. 26 * 27 * The driver has only been tested on x86 systems and will not work on big- 28 * endian systems without changes to the code accessing registers and data 29 * structures used by the hardware. 30 * 31 * 32 * Interrupt Usage: 33 * 34 * The driver will use a single interrupt while configuring the device as the 35 * specification requires, but contrary to the specification it will try to use 36 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it 37 * will switch to multiple-message MSI(-X) if supported. The driver wants to 38 * have one interrupt vector per CPU, but it will work correctly if less are 39 * available. Interrupts can be shared by queues, the interrupt handler will 40 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only 41 * the admin queue will share an interrupt with one I/O queue. The interrupt 42 * handler will retrieve completed commands from all queues sharing an interrupt 43 * vector and will post them to a taskq for completion processing. 44 * 45 * 46 * Command Processing: 47 * 48 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up 49 * to 65536 I/O commands. The driver will configure one I/O queue pair per 50 * available interrupt vector, with the queue length usually much smaller than 51 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer 52 * interrupt vectors will be used. 53 * 54 * Additionally the hardware provides a single special admin queue pair that can 55 * hold up to 4096 admin commands. 56 * 57 * From the hardware perspective both queues of a queue pair are independent, 58 * but they share some driver state: the command array (holding pointers to 59 * commands currently being processed by the hardware) and the active command 60 * counter. Access to a submission queue and the shared state is protected by 61 * nq_mutex, completion queue is protected by ncq_mutex. 62 * 63 * When a command is submitted to a queue pair the active command counter is 64 * incremented and a pointer to the command is stored in the command array. The 65 * array index is used as command identifier (CID) in the submission queue 66 * entry. Some commands may take a very long time to complete, and if the queue 67 * wraps around in that time a submission may find the next array slot to still 68 * be used by a long-running command. In this case the array is sequentially 69 * searched for the next free slot. The length of the command array is the same 70 * as the configured queue length. Queue overrun is prevented by the semaphore, 71 * so a command submission may block if the queue is full. 72 * 73 * 74 * Polled I/O Support: 75 * 76 * For kernel core dump support the driver can do polled I/O. As interrupts are 77 * turned off while dumping the driver will just submit a command in the regular 78 * way, and then repeatedly attempt a command retrieval until it gets the 79 * command back. 80 * 81 * 82 * Namespace Support: 83 * 84 * NVMe devices can have multiple namespaces, each being a independent data 85 * store. The driver supports multiple namespaces and creates a blkdev interface 86 * for each namespace found. Namespaces can have various attributes to support 87 * protection information. This driver does not support any of this and ignores 88 * namespaces that have these attributes. 89 * 90 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier 91 * (EUI64). This driver uses the EUI64 if present to generate the devid and 92 * passes it to blkdev to use it in the device node names. As this is currently 93 * untested namespaces with EUI64 are ignored by default. 94 * 95 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a 96 * single controller. This is an artificial limit imposed by the driver to be 97 * able to address a reasonable number of controllers and namespaces using a 98 * 32bit minor node number. 99 * 100 * 101 * Minor nodes: 102 * 103 * For each NVMe device the driver exposes one minor node for the controller and 104 * one minor node for each namespace. The only operations supported by those 105 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the 106 * interface for the nvmeadm(1M) utility. 107 * 108 * 109 * Blkdev Interface: 110 * 111 * This driver uses blkdev to do all the heavy lifting involved with presenting 112 * a disk device to the system. As a result, the processing of I/O requests is 113 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA 114 * setup, and splitting of transfers into manageable chunks. 115 * 116 * I/O requests coming in from blkdev are turned into NVM commands and posted to 117 * an I/O queue. The queue is selected by taking the CPU id modulo the number of 118 * queues. There is currently no timeout handling of I/O commands. 119 * 120 * Blkdev also supports querying device/media information and generating a 121 * devid. The driver reports the best block size as determined by the namespace 122 * format back to blkdev as physical block size to support partition and block 123 * alignment. The devid is either based on the namespace EUI64, if present, or 124 * composed using the device vendor ID, model number, serial number, and the 125 * namespace ID. 126 * 127 * 128 * Error Handling: 129 * 130 * Error handling is currently limited to detecting fatal hardware errors, 131 * either by asynchronous events, or synchronously through command status or 132 * admin command timeouts. In case of severe errors the device is fenced off, 133 * all further requests will return EIO. FMA is then called to fault the device. 134 * 135 * The hardware has a limit for outstanding asynchronous event requests. Before 136 * this limit is known the driver assumes it is at least 1 and posts a single 137 * asynchronous request. Later when the limit is known more asynchronous event 138 * requests are posted to allow quicker reception of error information. When an 139 * asynchronous event is posted by the hardware the driver will parse the error 140 * status fields and log information or fault the device, depending on the 141 * severity of the asynchronous event. The asynchronous event request is then 142 * reused and posted to the admin queue again. 143 * 144 * On command completion the command status is checked for errors. In case of 145 * errors indicating a driver bug the driver panics. Almost all other error 146 * status values just cause EIO to be returned. 147 * 148 * Command timeouts are currently detected for all admin commands except 149 * asynchronous event requests. If a command times out and the hardware appears 150 * to be healthy the driver attempts to abort the command. The original command 151 * timeout is also applied to the abort command. If the abort times out too the 152 * driver assumes the device to be dead, fences it off, and calls FMA to retire 153 * it. In all other cases the aborted command should return immediately with a 154 * status indicating it was aborted, and the driver will wait indefinitely for 155 * that to happen. No timeout handling of normal I/O commands is presently done. 156 * 157 * Any command that times out due to the controller dropping dead will be put on 158 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA 159 * memory being reused by the system and later be written to by a "dead" NVMe 160 * controller. 161 * 162 * 163 * Locking: 164 * 165 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held 166 * when accessing shared state and submission queue registers, ncq_mutex 167 * is held when accessing completion queue state and registers. 168 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while 169 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both 170 * mutexes themselves. 171 * 172 * Each command also has its own nc_mutex, which is associated with the 173 * condition variable nc_cv. It is only used on admin commands which are run 174 * synchronously. In that case it must be held across calls to 175 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by 176 * nvme_admin_cmd(). It must also be held whenever the completion state of the 177 * command is changed or while a admin command timeout is handled. 178 * 179 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first. 180 * More than one nc_mutex may only be held when aborting commands. In this case, 181 * the nc_mutex of the command to be aborted must be held across the call to 182 * nvme_abort_cmd() to prevent the command from completing while the abort is in 183 * progress. 184 * 185 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be 186 * acquired first. More than one nq_mutex is never held by a single thread. 187 * The ncq_mutex is only held by nvme_retrieve_cmd() and 188 * nvme_process_iocq(). nvme_process_iocq() is only called from the 189 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the 190 * mutex is non-contentious but is required for implementation completeness 191 * and safety. 192 * 193 * Each minor node has its own nm_mutex, which protects the open count nm_ocnt 194 * and exclusive-open flag nm_oexcl. 195 * 196 * 197 * Quiesce / Fast Reboot: 198 * 199 * The driver currently does not support fast reboot. A quiesce(9E) entry point 200 * is still provided which is used to send a shutdown notification to the 201 * device. 202 * 203 * 204 * DDI UFM Support 205 * 206 * The driver supports the DDI UFM framework for reporting information about 207 * the device's firmware image and slot configuration. This data can be 208 * queried by userland software via ioctls to the ufm driver. For more 209 * information, see ddi_ufm(9E). 210 * 211 * 212 * Driver Configuration: 213 * 214 * The following driver properties can be changed to control some aspects of the 215 * drivers operation: 216 * - strict-version: can be set to 0 to allow devices conforming to newer 217 * major versions to be used 218 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor 219 * specific command status as a fatal error leading device faulting 220 * - admin-queue-len: the maximum length of the admin queue (16-4096) 221 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536) 222 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536) 223 * - async-event-limit: the maximum number of asynchronous event requests to be 224 * posted by the driver 225 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write 226 * cache 227 * - min-phys-block-size: the minimum physical block size to report to blkdev, 228 * which is among other things the basis for ZFS vdev ashift 229 * - max-submission-queues: the maximum number of I/O submission queues. 230 * - max-completion-queues: the maximum number of I/O completion queues, 231 * can be less than max-submission-queues, in which case the completion 232 * queues are shared. 233 * 234 * 235 * TODO: 236 * - figure out sane default for I/O queue depth reported to blkdev 237 * - FMA handling of media errors 238 * - support for devices supporting very large I/O requests using chained PRPs 239 * - support for configuring hardware parameters like interrupt coalescing 240 * - support for media formatting and hard partitioning into namespaces 241 * - support for big-endian systems 242 * - support for fast reboot 243 * - support for NVMe Subsystem Reset (1.1) 244 * - support for Scatter/Gather lists (1.1) 245 * - support for Reservations (1.1) 246 * - support for power management 247 */ 248 249 #include <sys/byteorder.h> 250 #ifdef _BIG_ENDIAN 251 #error nvme driver needs porting for big-endian platforms 252 #endif 253 254 #include <sys/modctl.h> 255 #include <sys/conf.h> 256 #include <sys/devops.h> 257 #include <sys/ddi.h> 258 #include <sys/ddi_ufm.h> 259 #include <sys/sunddi.h> 260 #include <sys/sunndi.h> 261 #include <sys/bitmap.h> 262 #include <sys/sysmacros.h> 263 #include <sys/param.h> 264 #include <sys/varargs.h> 265 #include <sys/cpuvar.h> 266 #include <sys/disp.h> 267 #include <sys/blkdev.h> 268 #include <sys/atomic.h> 269 #include <sys/archsystm.h> 270 #include <sys/sata/sata_hba.h> 271 #include <sys/stat.h> 272 #include <sys/policy.h> 273 #include <sys/list.h> 274 275 #include <sys/nvme.h> 276 277 #ifdef __x86 278 #include <sys/x86_archext.h> 279 #endif 280 281 #include "nvme_reg.h" 282 #include "nvme_var.h" 283 284 /* 285 * Assertions to make sure that we've properly captured various aspects of the 286 * packed structures and haven't broken them during updates. 287 */ 288 CTASSERT(sizeof (nvme_identify_ctrl_t) == 0x1000); 289 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256); 290 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512); 291 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768); 292 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792); 293 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048); 294 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072); 295 296 CTASSERT(sizeof (nvme_identify_nsid_t) == 0x1000); 297 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32); 298 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104); 299 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128); 300 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384); 301 302 CTASSERT(sizeof (nvme_identify_primary_caps_t) == 0x1000); 303 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32); 304 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64); 305 306 307 /* NVMe spec version supported */ 308 static const int nvme_version_major = 1; 309 310 /* tunable for admin command timeout in seconds, default is 1s */ 311 int nvme_admin_cmd_timeout = 1; 312 313 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */ 314 int nvme_format_cmd_timeout = 600; 315 316 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */ 317 int nvme_commit_save_cmd_timeout = 15; 318 319 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t); 320 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t); 321 static int nvme_quiesce(dev_info_t *); 322 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *); 323 static int nvme_setup_interrupts(nvme_t *, int, int); 324 static void nvme_release_interrupts(nvme_t *); 325 static uint_t nvme_intr(caddr_t, caddr_t); 326 327 static void nvme_shutdown(nvme_t *, int, boolean_t); 328 static boolean_t nvme_reset(nvme_t *, boolean_t); 329 static int nvme_init(nvme_t *); 330 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int); 331 static void nvme_free_cmd(nvme_cmd_t *); 332 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t, 333 bd_xfer_t *); 334 static void nvme_admin_cmd(nvme_cmd_t *, int); 335 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *); 336 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *); 337 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *); 338 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int); 339 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *); 340 static void nvme_wait_cmd(nvme_cmd_t *, uint_t); 341 static void nvme_wakeup_cmd(void *); 342 static void nvme_async_event_task(void *); 343 344 static int nvme_check_unknown_cmd_status(nvme_cmd_t *); 345 static int nvme_check_vendor_cmd_status(nvme_cmd_t *); 346 static int nvme_check_integrity_cmd_status(nvme_cmd_t *); 347 static int nvme_check_specific_cmd_status(nvme_cmd_t *); 348 static int nvme_check_generic_cmd_status(nvme_cmd_t *); 349 static inline int nvme_check_cmd_status(nvme_cmd_t *); 350 351 static int nvme_abort_cmd(nvme_cmd_t *, uint_t); 352 static void nvme_async_event(nvme_t *); 353 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t, 354 uint8_t, boolean_t, uint8_t); 355 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t, 356 ...); 357 static int nvme_identify(nvme_t *, boolean_t, uint32_t, void **); 358 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t, 359 uint32_t *); 360 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *, 361 void **, size_t *); 362 static int nvme_write_cache_set(nvme_t *, boolean_t); 363 static int nvme_set_nqueues(nvme_t *); 364 365 static void nvme_free_dma(nvme_dma_t *); 366 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *, 367 nvme_dma_t **); 368 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t, 369 nvme_dma_t **); 370 static void nvme_free_qpair(nvme_qpair_t *); 371 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t); 372 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t); 373 374 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t); 375 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t); 376 static inline uint64_t nvme_get64(nvme_t *, uintptr_t); 377 static inline uint32_t nvme_get32(nvme_t *, uintptr_t); 378 379 static boolean_t nvme_check_regs_hdl(nvme_t *); 380 static boolean_t nvme_check_dma_hdl(nvme_dma_t *); 381 382 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *); 383 384 static void nvme_bd_xfer_done(void *); 385 static void nvme_bd_driveinfo(void *, bd_drive_t *); 386 static int nvme_bd_mediainfo(void *, bd_media_t *); 387 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t); 388 static int nvme_bd_read(void *, bd_xfer_t *); 389 static int nvme_bd_write(void *, bd_xfer_t *); 390 static int nvme_bd_sync(void *, bd_xfer_t *); 391 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *); 392 393 static int nvme_prp_dma_constructor(void *, void *, int); 394 static void nvme_prp_dma_destructor(void *, void *); 395 396 static void nvme_prepare_devid(nvme_t *, uint32_t); 397 398 /* DDI UFM callbacks */ 399 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t, 400 ddi_ufm_image_t *); 401 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t, 402 ddi_ufm_slot_t *); 403 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *); 404 405 static int nvme_open(dev_t *, int, int, cred_t *); 406 static int nvme_close(dev_t, int, int, cred_t *); 407 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 408 409 static ddi_ufm_ops_t nvme_ufm_ops = { 410 NULL, 411 nvme_ufm_fill_image, 412 nvme_ufm_fill_slot, 413 nvme_ufm_getcaps 414 }; 415 416 #define NVME_MINOR_INST_SHIFT 9 417 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid)) 418 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT) 419 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1)) 420 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2) 421 422 static void *nvme_state; 423 static kmem_cache_t *nvme_cmd_cache; 424 425 /* 426 * DMA attributes for queue DMA memory 427 * 428 * Queue DMA memory must be page aligned. The maximum length of a queue is 429 * 65536 entries, and an entry can be 64 bytes long. 430 */ 431 static ddi_dma_attr_t nvme_queue_dma_attr = { 432 .dma_attr_version = DMA_ATTR_V0, 433 .dma_attr_addr_lo = 0, 434 .dma_attr_addr_hi = 0xffffffffffffffffULL, 435 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1, 436 .dma_attr_align = 0x1000, 437 .dma_attr_burstsizes = 0x7ff, 438 .dma_attr_minxfer = 0x1000, 439 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t), 440 .dma_attr_seg = 0xffffffffffffffffULL, 441 .dma_attr_sgllen = 1, 442 .dma_attr_granular = 1, 443 .dma_attr_flags = 0, 444 }; 445 446 /* 447 * DMA attributes for transfers using Physical Region Page (PRP) entries 448 * 449 * A PRP entry describes one page of DMA memory using the page size specified 450 * in the controller configuration's memory page size register (CC.MPS). It uses 451 * a 64bit base address aligned to this page size. There is no limitation on 452 * chaining PRPs together for arbitrarily large DMA transfers. 453 */ 454 static ddi_dma_attr_t nvme_prp_dma_attr = { 455 .dma_attr_version = DMA_ATTR_V0, 456 .dma_attr_addr_lo = 0, 457 .dma_attr_addr_hi = 0xffffffffffffffffULL, 458 .dma_attr_count_max = 0xfff, 459 .dma_attr_align = 0x1000, 460 .dma_attr_burstsizes = 0x7ff, 461 .dma_attr_minxfer = 0x1000, 462 .dma_attr_maxxfer = 0x1000, 463 .dma_attr_seg = 0xfff, 464 .dma_attr_sgllen = -1, 465 .dma_attr_granular = 1, 466 .dma_attr_flags = 0, 467 }; 468 469 /* 470 * DMA attributes for transfers using scatter/gather lists 471 * 472 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a 473 * 32bit length field. SGL Segment and SGL Last Segment entries require the 474 * length to be a multiple of 16 bytes. 475 */ 476 static ddi_dma_attr_t nvme_sgl_dma_attr = { 477 .dma_attr_version = DMA_ATTR_V0, 478 .dma_attr_addr_lo = 0, 479 .dma_attr_addr_hi = 0xffffffffffffffffULL, 480 .dma_attr_count_max = 0xffffffffUL, 481 .dma_attr_align = 1, 482 .dma_attr_burstsizes = 0x7ff, 483 .dma_attr_minxfer = 0x10, 484 .dma_attr_maxxfer = 0xfffffffffULL, 485 .dma_attr_seg = 0xffffffffffffffffULL, 486 .dma_attr_sgllen = -1, 487 .dma_attr_granular = 0x10, 488 .dma_attr_flags = 0 489 }; 490 491 static ddi_device_acc_attr_t nvme_reg_acc_attr = { 492 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 493 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 494 .devacc_attr_dataorder = DDI_STRICTORDER_ACC 495 }; 496 497 static struct cb_ops nvme_cb_ops = { 498 .cb_open = nvme_open, 499 .cb_close = nvme_close, 500 .cb_strategy = nodev, 501 .cb_print = nodev, 502 .cb_dump = nodev, 503 .cb_read = nodev, 504 .cb_write = nodev, 505 .cb_ioctl = nvme_ioctl, 506 .cb_devmap = nodev, 507 .cb_mmap = nodev, 508 .cb_segmap = nodev, 509 .cb_chpoll = nochpoll, 510 .cb_prop_op = ddi_prop_op, 511 .cb_str = 0, 512 .cb_flag = D_NEW | D_MP, 513 .cb_rev = CB_REV, 514 .cb_aread = nodev, 515 .cb_awrite = nodev 516 }; 517 518 static struct dev_ops nvme_dev_ops = { 519 .devo_rev = DEVO_REV, 520 .devo_refcnt = 0, 521 .devo_getinfo = ddi_no_info, 522 .devo_identify = nulldev, 523 .devo_probe = nulldev, 524 .devo_attach = nvme_attach, 525 .devo_detach = nvme_detach, 526 .devo_reset = nodev, 527 .devo_cb_ops = &nvme_cb_ops, 528 .devo_bus_ops = NULL, 529 .devo_power = NULL, 530 .devo_quiesce = nvme_quiesce, 531 }; 532 533 static struct modldrv nvme_modldrv = { 534 .drv_modops = &mod_driverops, 535 .drv_linkinfo = "NVMe v1.1b", 536 .drv_dev_ops = &nvme_dev_ops 537 }; 538 539 static struct modlinkage nvme_modlinkage = { 540 .ml_rev = MODREV_1, 541 .ml_linkage = { &nvme_modldrv, NULL } 542 }; 543 544 static bd_ops_t nvme_bd_ops = { 545 .o_version = BD_OPS_VERSION_0, 546 .o_drive_info = nvme_bd_driveinfo, 547 .o_media_info = nvme_bd_mediainfo, 548 .o_devid_init = nvme_bd_devid, 549 .o_sync_cache = nvme_bd_sync, 550 .o_read = nvme_bd_read, 551 .o_write = nvme_bd_write, 552 }; 553 554 /* 555 * This list will hold commands that have timed out and couldn't be aborted. 556 * As we don't know what the hardware may still do with the DMA memory we can't 557 * free them, so we'll keep them forever on this list where we can easily look 558 * at them with mdb. 559 */ 560 static struct list nvme_lost_cmds; 561 static kmutex_t nvme_lc_mutex; 562 563 int 564 _init(void) 565 { 566 int error; 567 568 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1); 569 if (error != DDI_SUCCESS) 570 return (error); 571 572 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache", 573 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 574 575 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL); 576 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t), 577 offsetof(nvme_cmd_t, nc_list)); 578 579 bd_mod_init(&nvme_dev_ops); 580 581 error = mod_install(&nvme_modlinkage); 582 if (error != DDI_SUCCESS) { 583 ddi_soft_state_fini(&nvme_state); 584 mutex_destroy(&nvme_lc_mutex); 585 list_destroy(&nvme_lost_cmds); 586 bd_mod_fini(&nvme_dev_ops); 587 } 588 589 return (error); 590 } 591 592 int 593 _fini(void) 594 { 595 int error; 596 597 if (!list_is_empty(&nvme_lost_cmds)) 598 return (DDI_FAILURE); 599 600 error = mod_remove(&nvme_modlinkage); 601 if (error == DDI_SUCCESS) { 602 ddi_soft_state_fini(&nvme_state); 603 kmem_cache_destroy(nvme_cmd_cache); 604 mutex_destroy(&nvme_lc_mutex); 605 list_destroy(&nvme_lost_cmds); 606 bd_mod_fini(&nvme_dev_ops); 607 } 608 609 return (error); 610 } 611 612 int 613 _info(struct modinfo *modinfop) 614 { 615 return (mod_info(&nvme_modlinkage, modinfop)); 616 } 617 618 static inline void 619 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val) 620 { 621 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 622 623 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 624 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val); 625 } 626 627 static inline void 628 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val) 629 { 630 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 631 632 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 633 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val); 634 } 635 636 static inline uint64_t 637 nvme_get64(nvme_t *nvme, uintptr_t reg) 638 { 639 uint64_t val; 640 641 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0); 642 643 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 644 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg)); 645 646 return (val); 647 } 648 649 static inline uint32_t 650 nvme_get32(nvme_t *nvme, uintptr_t reg) 651 { 652 uint32_t val; 653 654 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0); 655 656 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 657 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg)); 658 659 return (val); 660 } 661 662 static boolean_t 663 nvme_check_regs_hdl(nvme_t *nvme) 664 { 665 ddi_fm_error_t error; 666 667 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION); 668 669 if (error.fme_status != DDI_FM_OK) 670 return (B_TRUE); 671 672 return (B_FALSE); 673 } 674 675 static boolean_t 676 nvme_check_dma_hdl(nvme_dma_t *dma) 677 { 678 ddi_fm_error_t error; 679 680 if (dma == NULL) 681 return (B_FALSE); 682 683 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION); 684 685 if (error.fme_status != DDI_FM_OK) 686 return (B_TRUE); 687 688 return (B_FALSE); 689 } 690 691 static void 692 nvme_free_dma_common(nvme_dma_t *dma) 693 { 694 if (dma->nd_dmah != NULL) 695 (void) ddi_dma_unbind_handle(dma->nd_dmah); 696 if (dma->nd_acch != NULL) 697 ddi_dma_mem_free(&dma->nd_acch); 698 if (dma->nd_dmah != NULL) 699 ddi_dma_free_handle(&dma->nd_dmah); 700 } 701 702 static void 703 nvme_free_dma(nvme_dma_t *dma) 704 { 705 nvme_free_dma_common(dma); 706 kmem_free(dma, sizeof (*dma)); 707 } 708 709 /* ARGSUSED */ 710 static void 711 nvme_prp_dma_destructor(void *buf, void *private) 712 { 713 nvme_dma_t *dma = (nvme_dma_t *)buf; 714 715 nvme_free_dma_common(dma); 716 } 717 718 static int 719 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma, 720 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr) 721 { 722 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL, 723 &dma->nd_dmah) != DDI_SUCCESS) { 724 /* 725 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and 726 * the only other possible error is DDI_DMA_BADATTR which 727 * indicates a driver bug which should cause a panic. 728 */ 729 dev_err(nvme->n_dip, CE_PANIC, 730 "!failed to get DMA handle, check DMA attributes"); 731 return (DDI_FAILURE); 732 } 733 734 /* 735 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified 736 * or the flags are conflicting, which isn't the case here. 737 */ 738 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr, 739 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp, 740 &dma->nd_len, &dma->nd_acch); 741 742 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp, 743 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, 744 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) { 745 dev_err(nvme->n_dip, CE_WARN, 746 "!failed to bind DMA memory"); 747 atomic_inc_32(&nvme->n_dma_bind_err); 748 nvme_free_dma_common(dma); 749 return (DDI_FAILURE); 750 } 751 752 return (DDI_SUCCESS); 753 } 754 755 static int 756 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags, 757 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret) 758 { 759 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP); 760 761 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) != 762 DDI_SUCCESS) { 763 *ret = NULL; 764 kmem_free(dma, sizeof (nvme_dma_t)); 765 return (DDI_FAILURE); 766 } 767 768 bzero(dma->nd_memp, dma->nd_len); 769 770 *ret = dma; 771 return (DDI_SUCCESS); 772 } 773 774 /* ARGSUSED */ 775 static int 776 nvme_prp_dma_constructor(void *buf, void *private, int flags) 777 { 778 nvme_dma_t *dma = (nvme_dma_t *)buf; 779 nvme_t *nvme = (nvme_t *)private; 780 781 dma->nd_dmah = NULL; 782 dma->nd_acch = NULL; 783 784 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize, 785 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) { 786 return (-1); 787 } 788 789 ASSERT(dma->nd_ncookie == 1); 790 791 dma->nd_cached = B_TRUE; 792 793 return (0); 794 } 795 796 static int 797 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len, 798 uint_t flags, nvme_dma_t **dma) 799 { 800 uint32_t len = nentry * qe_len; 801 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr; 802 803 len = roundup(len, nvme->n_pagesize); 804 805 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma) 806 != DDI_SUCCESS) { 807 dev_err(nvme->n_dip, CE_WARN, 808 "!failed to get DMA memory for queue"); 809 goto fail; 810 } 811 812 if ((*dma)->nd_ncookie != 1) { 813 dev_err(nvme->n_dip, CE_WARN, 814 "!got too many cookies for queue DMA"); 815 goto fail; 816 } 817 818 return (DDI_SUCCESS); 819 820 fail: 821 if (*dma) { 822 nvme_free_dma(*dma); 823 *dma = NULL; 824 } 825 826 return (DDI_FAILURE); 827 } 828 829 static void 830 nvme_free_cq(nvme_cq_t *cq) 831 { 832 mutex_destroy(&cq->ncq_mutex); 833 834 if (cq->ncq_dma != NULL) 835 nvme_free_dma(cq->ncq_dma); 836 837 kmem_free(cq, sizeof (*cq)); 838 } 839 840 static void 841 nvme_free_qpair(nvme_qpair_t *qp) 842 { 843 int i; 844 845 mutex_destroy(&qp->nq_mutex); 846 sema_destroy(&qp->nq_sema); 847 848 if (qp->nq_sqdma != NULL) 849 nvme_free_dma(qp->nq_sqdma); 850 851 if (qp->nq_active_cmds > 0) 852 for (i = 0; i != qp->nq_nentry; i++) 853 if (qp->nq_cmd[i] != NULL) 854 nvme_free_cmd(qp->nq_cmd[i]); 855 856 if (qp->nq_cmd != NULL) 857 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry); 858 859 kmem_free(qp, sizeof (nvme_qpair_t)); 860 } 861 862 /* 863 * Destroy the pre-allocated cq array, but only free individual completion 864 * queues from the given starting index. 865 */ 866 static void 867 nvme_destroy_cq_array(nvme_t *nvme, uint_t start) 868 { 869 uint_t i; 870 871 for (i = start; i < nvme->n_cq_count; i++) 872 if (nvme->n_cq[i] != NULL) 873 nvme_free_cq(nvme->n_cq[i]); 874 875 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count); 876 } 877 878 static int 879 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx) 880 { 881 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP); 882 883 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER, 884 DDI_INTR_PRI(nvme->n_intr_pri)); 885 886 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t), 887 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS) 888 goto fail; 889 890 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp; 891 cq->ncq_nentry = nentry; 892 cq->ncq_id = idx; 893 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx); 894 895 *cqp = cq; 896 return (DDI_SUCCESS); 897 898 fail: 899 nvme_free_cq(cq); 900 *cqp = NULL; 901 902 return (DDI_FAILURE); 903 } 904 905 /* 906 * Create the n_cq array big enough to hold "ncq" completion queues. 907 * If the array already exists it will be re-sized (but only larger). 908 * The admin queue is included in this array, which boosts the 909 * max number of entries to UINT16_MAX + 1. 910 */ 911 static int 912 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry) 913 { 914 nvme_cq_t **cq; 915 uint_t i, cq_count; 916 917 ASSERT3U(ncq, >, nvme->n_cq_count); 918 919 cq = nvme->n_cq; 920 cq_count = nvme->n_cq_count; 921 922 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP); 923 nvme->n_cq_count = ncq; 924 925 for (i = 0; i < cq_count; i++) 926 nvme->n_cq[i] = cq[i]; 927 928 for (; i < nvme->n_cq_count; i++) 929 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i) != 930 DDI_SUCCESS) 931 goto fail; 932 933 if (cq != NULL) 934 kmem_free(cq, sizeof (*cq) * cq_count); 935 936 return (DDI_SUCCESS); 937 938 fail: 939 nvme_destroy_cq_array(nvme, cq_count); 940 /* 941 * Restore the original array 942 */ 943 nvme->n_cq_count = cq_count; 944 nvme->n_cq = cq; 945 946 return (DDI_FAILURE); 947 } 948 949 static int 950 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp, 951 uint_t idx) 952 { 953 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP); 954 uint_t cq_idx; 955 956 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER, 957 DDI_INTR_PRI(nvme->n_intr_pri)); 958 959 /* 960 * The NVMe spec defines that a full queue has one empty (unused) slot; 961 * initialize the semaphore accordingly. 962 */ 963 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL); 964 965 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t), 966 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS) 967 goto fail; 968 969 /* 970 * idx == 0 is adminq, those above 0 are shared io completion queues. 971 */ 972 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1); 973 qp->nq_cq = nvme->n_cq[cq_idx]; 974 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp; 975 qp->nq_nentry = nentry; 976 977 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx); 978 979 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP); 980 qp->nq_next_cmd = 0; 981 982 *nqp = qp; 983 return (DDI_SUCCESS); 984 985 fail: 986 nvme_free_qpair(qp); 987 *nqp = NULL; 988 989 return (DDI_FAILURE); 990 } 991 992 static nvme_cmd_t * 993 nvme_alloc_cmd(nvme_t *nvme, int kmflag) 994 { 995 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag); 996 997 if (cmd == NULL) 998 return (cmd); 999 1000 bzero(cmd, sizeof (nvme_cmd_t)); 1001 1002 cmd->nc_nvme = nvme; 1003 1004 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER, 1005 DDI_INTR_PRI(nvme->n_intr_pri)); 1006 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL); 1007 1008 return (cmd); 1009 } 1010 1011 static void 1012 nvme_free_cmd(nvme_cmd_t *cmd) 1013 { 1014 /* Don't free commands on the lost commands list. */ 1015 if (list_link_active(&cmd->nc_list)) 1016 return; 1017 1018 if (cmd->nc_dma) { 1019 if (cmd->nc_dma->nd_cached) 1020 kmem_cache_free(cmd->nc_nvme->n_prp_cache, 1021 cmd->nc_dma); 1022 else 1023 nvme_free_dma(cmd->nc_dma); 1024 cmd->nc_dma = NULL; 1025 } 1026 1027 cv_destroy(&cmd->nc_cv); 1028 mutex_destroy(&cmd->nc_mutex); 1029 1030 kmem_cache_free(nvme_cmd_cache, cmd); 1031 } 1032 1033 static void 1034 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1035 { 1036 sema_p(&qp->nq_sema); 1037 nvme_submit_cmd_common(qp, cmd); 1038 } 1039 1040 static int 1041 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1042 { 1043 if (sema_tryp(&qp->nq_sema) == 0) 1044 return (EAGAIN); 1045 1046 nvme_submit_cmd_common(qp, cmd); 1047 return (0); 1048 } 1049 1050 static void 1051 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd) 1052 { 1053 nvme_reg_sqtdbl_t tail = { 0 }; 1054 1055 mutex_enter(&qp->nq_mutex); 1056 cmd->nc_completed = B_FALSE; 1057 1058 /* 1059 * Try to insert the cmd into the active cmd array at the nq_next_cmd 1060 * slot. If the slot is already occupied advance to the next slot and 1061 * try again. This can happen for long running commands like async event 1062 * requests. 1063 */ 1064 while (qp->nq_cmd[qp->nq_next_cmd] != NULL) 1065 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1066 qp->nq_cmd[qp->nq_next_cmd] = cmd; 1067 1068 qp->nq_active_cmds++; 1069 1070 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd; 1071 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t)); 1072 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah, 1073 sizeof (nvme_sqe_t) * qp->nq_sqtail, 1074 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV); 1075 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry; 1076 1077 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry; 1078 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r); 1079 1080 mutex_exit(&qp->nq_mutex); 1081 } 1082 1083 static nvme_cmd_t * 1084 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid) 1085 { 1086 nvme_cmd_t *cmd; 1087 1088 ASSERT(mutex_owned(&qp->nq_mutex)); 1089 ASSERT3S(cid, <, qp->nq_nentry); 1090 1091 cmd = qp->nq_cmd[cid]; 1092 qp->nq_cmd[cid] = NULL; 1093 ASSERT3U(qp->nq_active_cmds, >, 0); 1094 qp->nq_active_cmds--; 1095 sema_v(&qp->nq_sema); 1096 1097 ASSERT3P(cmd, !=, NULL); 1098 ASSERT3P(cmd->nc_nvme, ==, nvme); 1099 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid); 1100 1101 return (cmd); 1102 } 1103 1104 /* 1105 * Get the command tied to the next completed cqe and bump along completion 1106 * queue head counter. 1107 */ 1108 static nvme_cmd_t * 1109 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq) 1110 { 1111 nvme_qpair_t *qp; 1112 nvme_cqe_t *cqe; 1113 nvme_cmd_t *cmd; 1114 1115 ASSERT(mutex_owned(&cq->ncq_mutex)); 1116 1117 cqe = &cq->ncq_cq[cq->ncq_head]; 1118 1119 /* Check phase tag of CQE. Hardware inverts it for new entries. */ 1120 if (cqe->cqe_sf.sf_p == cq->ncq_phase) 1121 return (NULL); 1122 1123 qp = nvme->n_ioq[cqe->cqe_sqid]; 1124 1125 mutex_enter(&qp->nq_mutex); 1126 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid); 1127 mutex_exit(&qp->nq_mutex); 1128 1129 ASSERT(cmd->nc_sqid == cqe->cqe_sqid); 1130 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t)); 1131 1132 qp->nq_sqhead = cqe->cqe_sqhd; 1133 1134 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry; 1135 1136 /* Toggle phase on wrap-around. */ 1137 if (cq->ncq_head == 0) 1138 cq->ncq_phase = cq->ncq_phase ? 0 : 1; 1139 1140 return (cmd); 1141 } 1142 1143 /* 1144 * Process all completed commands on the io completion queue. 1145 */ 1146 static uint_t 1147 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq) 1148 { 1149 nvme_reg_cqhdbl_t head = { 0 }; 1150 nvme_cmd_t *cmd; 1151 uint_t completed = 0; 1152 1153 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1154 DDI_SUCCESS) 1155 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1156 __func__); 1157 1158 mutex_enter(&cq->ncq_mutex); 1159 1160 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1161 taskq_dispatch_ent((taskq_t *)cmd->nc_nvme->n_cmd_taskq, 1162 cmd->nc_callback, cmd, TQ_NOSLEEP, &cmd->nc_tqent); 1163 1164 completed++; 1165 } 1166 1167 if (completed > 0) { 1168 /* 1169 * Update the completion queue head doorbell. 1170 */ 1171 head.b.cqhdbl_cqh = cq->ncq_head; 1172 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1173 } 1174 1175 mutex_exit(&cq->ncq_mutex); 1176 1177 return (completed); 1178 } 1179 1180 static nvme_cmd_t * 1181 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp) 1182 { 1183 nvme_cq_t *cq = qp->nq_cq; 1184 nvme_reg_cqhdbl_t head = { 0 }; 1185 nvme_cmd_t *cmd; 1186 1187 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) != 1188 DDI_SUCCESS) 1189 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s", 1190 __func__); 1191 1192 mutex_enter(&cq->ncq_mutex); 1193 1194 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) { 1195 head.b.cqhdbl_cqh = cq->ncq_head; 1196 nvme_put32(nvme, cq->ncq_hdbl, head.r); 1197 } 1198 1199 mutex_exit(&cq->ncq_mutex); 1200 1201 return (cmd); 1202 } 1203 1204 static int 1205 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd) 1206 { 1207 nvme_cqe_t *cqe = &cmd->nc_cqe; 1208 1209 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1210 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1211 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1212 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1213 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1214 1215 if (cmd->nc_xfer != NULL) 1216 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1217 1218 if (cmd->nc_nvme->n_strict_version) { 1219 cmd->nc_nvme->n_dead = B_TRUE; 1220 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1221 } 1222 1223 return (EIO); 1224 } 1225 1226 static int 1227 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd) 1228 { 1229 nvme_cqe_t *cqe = &cmd->nc_cqe; 1230 1231 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1232 "!unknown command status received: opc = %x, sqid = %d, cid = %d, " 1233 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc, 1234 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct, 1235 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m); 1236 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) { 1237 cmd->nc_nvme->n_dead = B_TRUE; 1238 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1239 } 1240 1241 return (EIO); 1242 } 1243 1244 static int 1245 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd) 1246 { 1247 nvme_cqe_t *cqe = &cmd->nc_cqe; 1248 1249 switch (cqe->cqe_sf.sf_sc) { 1250 case NVME_CQE_SC_INT_NVM_WRITE: 1251 /* write fail */ 1252 /* TODO: post ereport */ 1253 if (cmd->nc_xfer != NULL) 1254 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1255 return (EIO); 1256 1257 case NVME_CQE_SC_INT_NVM_READ: 1258 /* read fail */ 1259 /* TODO: post ereport */ 1260 if (cmd->nc_xfer != NULL) 1261 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1262 return (EIO); 1263 1264 default: 1265 return (nvme_check_unknown_cmd_status(cmd)); 1266 } 1267 } 1268 1269 static int 1270 nvme_check_generic_cmd_status(nvme_cmd_t *cmd) 1271 { 1272 nvme_cqe_t *cqe = &cmd->nc_cqe; 1273 1274 switch (cqe->cqe_sf.sf_sc) { 1275 case NVME_CQE_SC_GEN_SUCCESS: 1276 return (0); 1277 1278 /* 1279 * Errors indicating a bug in the driver should cause a panic. 1280 */ 1281 case NVME_CQE_SC_GEN_INV_OPC: 1282 /* Invalid Command Opcode */ 1283 if (!cmd->nc_dontpanic) 1284 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1285 "programming error: invalid opcode in cmd %p", 1286 (void *)cmd); 1287 return (EINVAL); 1288 1289 case NVME_CQE_SC_GEN_INV_FLD: 1290 /* Invalid Field in Command */ 1291 if (!cmd->nc_dontpanic) 1292 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1293 "programming error: invalid field in cmd %p", 1294 (void *)cmd); 1295 return (EIO); 1296 1297 case NVME_CQE_SC_GEN_ID_CNFL: 1298 /* Command ID Conflict */ 1299 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1300 "cmd ID conflict in cmd %p", (void *)cmd); 1301 return (0); 1302 1303 case NVME_CQE_SC_GEN_INV_NS: 1304 /* Invalid Namespace or Format */ 1305 if (!cmd->nc_dontpanic) 1306 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, 1307 "programming error: invalid NS/format in cmd %p", 1308 (void *)cmd); 1309 return (EINVAL); 1310 1311 case NVME_CQE_SC_GEN_NVM_LBA_RANGE: 1312 /* LBA Out Of Range */ 1313 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1314 "LBA out of range in cmd %p", (void *)cmd); 1315 return (0); 1316 1317 /* 1318 * Non-fatal errors, handle gracefully. 1319 */ 1320 case NVME_CQE_SC_GEN_DATA_XFR_ERR: 1321 /* Data Transfer Error (DMA) */ 1322 /* TODO: post ereport */ 1323 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err); 1324 if (cmd->nc_xfer != NULL) 1325 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1326 return (EIO); 1327 1328 case NVME_CQE_SC_GEN_INTERNAL_ERR: 1329 /* 1330 * Internal Error. The spec (v1.0, section 4.5.1.2) says 1331 * detailed error information is returned as async event, 1332 * so we pretty much ignore the error here and handle it 1333 * in the async event handler. 1334 */ 1335 atomic_inc_32(&cmd->nc_nvme->n_internal_err); 1336 if (cmd->nc_xfer != NULL) 1337 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1338 return (EIO); 1339 1340 case NVME_CQE_SC_GEN_ABORT_REQUEST: 1341 /* 1342 * Command Abort Requested. This normally happens only when a 1343 * command times out. 1344 */ 1345 /* TODO: post ereport or change blkdev to handle this? */ 1346 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err); 1347 return (ECANCELED); 1348 1349 case NVME_CQE_SC_GEN_ABORT_PWRLOSS: 1350 /* Command Aborted due to Power Loss Notification */ 1351 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST); 1352 cmd->nc_nvme->n_dead = B_TRUE; 1353 return (EIO); 1354 1355 case NVME_CQE_SC_GEN_ABORT_SQ_DEL: 1356 /* Command Aborted due to SQ Deletion */ 1357 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del); 1358 return (EIO); 1359 1360 case NVME_CQE_SC_GEN_NVM_CAP_EXC: 1361 /* Capacity Exceeded */ 1362 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc); 1363 if (cmd->nc_xfer != NULL) 1364 bd_error(cmd->nc_xfer, BD_ERR_MEDIA); 1365 return (EIO); 1366 1367 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY: 1368 /* Namespace Not Ready */ 1369 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy); 1370 if (cmd->nc_xfer != NULL) 1371 bd_error(cmd->nc_xfer, BD_ERR_NTRDY); 1372 return (EIO); 1373 1374 default: 1375 return (nvme_check_unknown_cmd_status(cmd)); 1376 } 1377 } 1378 1379 static int 1380 nvme_check_specific_cmd_status(nvme_cmd_t *cmd) 1381 { 1382 nvme_cqe_t *cqe = &cmd->nc_cqe; 1383 1384 switch (cqe->cqe_sf.sf_sc) { 1385 case NVME_CQE_SC_SPC_INV_CQ: 1386 /* Completion Queue Invalid */ 1387 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE); 1388 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err); 1389 return (EINVAL); 1390 1391 case NVME_CQE_SC_SPC_INV_QID: 1392 /* Invalid Queue Identifier */ 1393 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1394 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE || 1395 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE || 1396 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1397 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err); 1398 return (EINVAL); 1399 1400 case NVME_CQE_SC_SPC_MAX_QSZ_EXC: 1401 /* Max Queue Size Exceeded */ 1402 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE || 1403 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1404 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc); 1405 return (EINVAL); 1406 1407 case NVME_CQE_SC_SPC_ABRT_CMD_EXC: 1408 /* Abort Command Limit Exceeded */ 1409 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT); 1410 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1411 "abort command limit exceeded in cmd %p", (void *)cmd); 1412 return (0); 1413 1414 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC: 1415 /* Async Event Request Limit Exceeded */ 1416 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT); 1417 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: " 1418 "async event request limit exceeded in cmd %p", 1419 (void *)cmd); 1420 return (0); 1421 1422 case NVME_CQE_SC_SPC_INV_INT_VECT: 1423 /* Invalid Interrupt Vector */ 1424 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE); 1425 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect); 1426 return (EINVAL); 1427 1428 case NVME_CQE_SC_SPC_INV_LOG_PAGE: 1429 /* Invalid Log Page */ 1430 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE); 1431 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page); 1432 return (EINVAL); 1433 1434 case NVME_CQE_SC_SPC_INV_FORMAT: 1435 /* Invalid Format */ 1436 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT); 1437 atomic_inc_32(&cmd->nc_nvme->n_inv_format); 1438 if (cmd->nc_xfer != NULL) 1439 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1440 return (EINVAL); 1441 1442 case NVME_CQE_SC_SPC_INV_Q_DEL: 1443 /* Invalid Queue Deletion */ 1444 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE); 1445 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del); 1446 return (EINVAL); 1447 1448 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR: 1449 /* Conflicting Attributes */ 1450 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT || 1451 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1452 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1453 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr); 1454 if (cmd->nc_xfer != NULL) 1455 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1456 return (EINVAL); 1457 1458 case NVME_CQE_SC_SPC_NVM_INV_PROT: 1459 /* Invalid Protection Information */ 1460 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE || 1461 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ || 1462 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1463 atomic_inc_32(&cmd->nc_nvme->n_inv_prot); 1464 if (cmd->nc_xfer != NULL) 1465 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1466 return (EINVAL); 1467 1468 case NVME_CQE_SC_SPC_NVM_READONLY: 1469 /* Write to Read Only Range */ 1470 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE); 1471 atomic_inc_32(&cmd->nc_nvme->n_readonly); 1472 if (cmd->nc_xfer != NULL) 1473 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ); 1474 return (EROFS); 1475 1476 case NVME_CQE_SC_SPC_INV_FW_SLOT: 1477 /* Invalid Firmware Slot */ 1478 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1479 return (EINVAL); 1480 1481 case NVME_CQE_SC_SPC_INV_FW_IMG: 1482 /* Invalid Firmware Image */ 1483 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1484 return (EINVAL); 1485 1486 case NVME_CQE_SC_SPC_FW_RESET: 1487 /* Conventional Reset Required */ 1488 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1489 return (0); 1490 1491 case NVME_CQE_SC_SPC_FW_NSSR: 1492 /* NVMe Subsystem Reset Required */ 1493 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1494 return (0); 1495 1496 case NVME_CQE_SC_SPC_FW_NEXT_RESET: 1497 /* Activation Requires Reset */ 1498 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1499 return (0); 1500 1501 case NVME_CQE_SC_SPC_FW_MTFA: 1502 /* Activation Requires Maximum Time Violation */ 1503 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1504 return (EAGAIN); 1505 1506 case NVME_CQE_SC_SPC_FW_PROHIBITED: 1507 /* Activation Prohibited */ 1508 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE); 1509 return (EINVAL); 1510 1511 case NVME_CQE_SC_SPC_FW_OVERLAP: 1512 /* Overlapping Firmware Ranges */ 1513 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD); 1514 return (EINVAL); 1515 1516 default: 1517 return (nvme_check_unknown_cmd_status(cmd)); 1518 } 1519 } 1520 1521 static inline int 1522 nvme_check_cmd_status(nvme_cmd_t *cmd) 1523 { 1524 nvme_cqe_t *cqe = &cmd->nc_cqe; 1525 1526 /* 1527 * Take a shortcut if the controller is dead, or if 1528 * command status indicates no error. 1529 */ 1530 if (cmd->nc_nvme->n_dead) 1531 return (EIO); 1532 1533 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1534 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS) 1535 return (0); 1536 1537 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC) 1538 return (nvme_check_generic_cmd_status(cmd)); 1539 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC) 1540 return (nvme_check_specific_cmd_status(cmd)); 1541 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY) 1542 return (nvme_check_integrity_cmd_status(cmd)); 1543 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR) 1544 return (nvme_check_vendor_cmd_status(cmd)); 1545 1546 return (nvme_check_unknown_cmd_status(cmd)); 1547 } 1548 1549 static int 1550 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec) 1551 { 1552 nvme_t *nvme = abort_cmd->nc_nvme; 1553 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1554 nvme_abort_cmd_t ac = { 0 }; 1555 int ret = 0; 1556 1557 sema_p(&nvme->n_abort_sema); 1558 1559 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid; 1560 ac.b.ac_sqid = abort_cmd->nc_sqid; 1561 1562 cmd->nc_sqid = 0; 1563 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT; 1564 cmd->nc_callback = nvme_wakeup_cmd; 1565 cmd->nc_sqe.sqe_cdw10 = ac.r; 1566 1567 /* 1568 * Send the ABORT to the hardware. The ABORT command will return _after_ 1569 * the aborted command has completed (aborted or otherwise), but since 1570 * we still hold the aborted command's mutex its callback hasn't been 1571 * processed yet. 1572 */ 1573 nvme_admin_cmd(cmd, sec); 1574 sema_v(&nvme->n_abort_sema); 1575 1576 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1577 dev_err(nvme->n_dip, CE_WARN, 1578 "!ABORT failed with sct = %x, sc = %x", 1579 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1580 atomic_inc_32(&nvme->n_abort_failed); 1581 } else { 1582 dev_err(nvme->n_dip, CE_WARN, 1583 "!ABORT of command %d/%d %ssuccessful", 1584 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid, 1585 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : ""); 1586 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0) 1587 atomic_inc_32(&nvme->n_cmd_aborted); 1588 } 1589 1590 nvme_free_cmd(cmd); 1591 return (ret); 1592 } 1593 1594 /* 1595 * nvme_wait_cmd -- wait for command completion or timeout 1596 * 1597 * In case of a serious error or a timeout of the abort command the hardware 1598 * will be declared dead and FMA will be notified. 1599 */ 1600 static void 1601 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec) 1602 { 1603 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC); 1604 nvme_t *nvme = cmd->nc_nvme; 1605 nvme_reg_csts_t csts; 1606 nvme_qpair_t *qp; 1607 1608 ASSERT(mutex_owned(&cmd->nc_mutex)); 1609 1610 while (!cmd->nc_completed) { 1611 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1) 1612 break; 1613 } 1614 1615 if (cmd->nc_completed) 1616 return; 1617 1618 /* 1619 * The command timed out. 1620 * 1621 * Check controller for fatal status, any errors associated with the 1622 * register or DMA handle, or for a double timeout (abort command timed 1623 * out). If necessary log a warning and call FMA. 1624 */ 1625 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 1626 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, " 1627 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid, 1628 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs); 1629 atomic_inc_32(&nvme->n_cmd_timeout); 1630 1631 if (csts.b.csts_cfs || 1632 nvme_check_regs_hdl(nvme) || 1633 nvme_check_dma_hdl(cmd->nc_dma) || 1634 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) { 1635 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1636 nvme->n_dead = B_TRUE; 1637 } else if (nvme_abort_cmd(cmd, sec) == 0) { 1638 /* 1639 * If the abort succeeded the command should complete 1640 * immediately with an appropriate status. 1641 */ 1642 while (!cmd->nc_completed) 1643 cv_wait(&cmd->nc_cv, &cmd->nc_mutex); 1644 1645 return; 1646 } 1647 1648 qp = nvme->n_ioq[cmd->nc_sqid]; 1649 1650 mutex_enter(&qp->nq_mutex); 1651 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid); 1652 mutex_exit(&qp->nq_mutex); 1653 1654 /* 1655 * As we don't know what the presumed dead hardware might still do with 1656 * the DMA memory, we'll put the command on the lost commands list if it 1657 * has any DMA memory. 1658 */ 1659 if (cmd->nc_dma != NULL) { 1660 mutex_enter(&nvme_lc_mutex); 1661 list_insert_head(&nvme_lost_cmds, cmd); 1662 mutex_exit(&nvme_lc_mutex); 1663 } 1664 } 1665 1666 static void 1667 nvme_wakeup_cmd(void *arg) 1668 { 1669 nvme_cmd_t *cmd = arg; 1670 1671 mutex_enter(&cmd->nc_mutex); 1672 cmd->nc_completed = B_TRUE; 1673 cv_signal(&cmd->nc_cv); 1674 mutex_exit(&cmd->nc_mutex); 1675 } 1676 1677 static void 1678 nvme_async_event_task(void *arg) 1679 { 1680 nvme_cmd_t *cmd = arg; 1681 nvme_t *nvme = cmd->nc_nvme; 1682 nvme_error_log_entry_t *error_log = NULL; 1683 nvme_health_log_t *health_log = NULL; 1684 size_t logsize = 0; 1685 nvme_async_event_t event; 1686 1687 /* 1688 * Check for errors associated with the async request itself. The only 1689 * command-specific error is "async event limit exceeded", which 1690 * indicates a programming error in the driver and causes a panic in 1691 * nvme_check_cmd_status(). 1692 * 1693 * Other possible errors are various scenarios where the async request 1694 * was aborted, or internal errors in the device. Internal errors are 1695 * reported to FMA, the command aborts need no special handling here. 1696 * 1697 * And finally, at least qemu nvme does not support async events, 1698 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we 1699 * will avoid posting async events. 1700 */ 1701 1702 if (nvme_check_cmd_status(cmd) != 0) { 1703 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 1704 "!async event request returned failure, sct = %x, " 1705 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct, 1706 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr, 1707 cmd->nc_cqe.cqe_sf.sf_m); 1708 1709 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1710 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) { 1711 cmd->nc_nvme->n_dead = B_TRUE; 1712 ddi_fm_service_impact(cmd->nc_nvme->n_dip, 1713 DDI_SERVICE_LOST); 1714 } 1715 1716 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 1717 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC && 1718 cmd->nc_cqe.cqe_sf.sf_dnr == 1) { 1719 nvme->n_async_event_supported = B_FALSE; 1720 } 1721 1722 nvme_free_cmd(cmd); 1723 return; 1724 } 1725 1726 1727 event.r = cmd->nc_cqe.cqe_dw0; 1728 1729 /* Clear CQE and re-submit the async request. */ 1730 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t)); 1731 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1732 1733 switch (event.b.ae_type) { 1734 case NVME_ASYNC_TYPE_ERROR: 1735 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) { 1736 (void) nvme_get_logpage(nvme, B_FALSE, 1737 (void **)&error_log, &logsize, event.b.ae_logpage); 1738 } else { 1739 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1740 "async event reply: %d", event.b.ae_logpage); 1741 atomic_inc_32(&nvme->n_wrong_logpage); 1742 } 1743 1744 switch (event.b.ae_info) { 1745 case NVME_ASYNC_ERROR_INV_SQ: 1746 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1747 "invalid submission queue"); 1748 return; 1749 1750 case NVME_ASYNC_ERROR_INV_DBL: 1751 dev_err(nvme->n_dip, CE_PANIC, "programming error: " 1752 "invalid doorbell write value"); 1753 return; 1754 1755 case NVME_ASYNC_ERROR_DIAGFAIL: 1756 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure"); 1757 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1758 nvme->n_dead = B_TRUE; 1759 atomic_inc_32(&nvme->n_diagfail_event); 1760 break; 1761 1762 case NVME_ASYNC_ERROR_PERSISTENT: 1763 dev_err(nvme->n_dip, CE_WARN, "!persistent internal " 1764 "device error"); 1765 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 1766 nvme->n_dead = B_TRUE; 1767 atomic_inc_32(&nvme->n_persistent_event); 1768 break; 1769 1770 case NVME_ASYNC_ERROR_TRANSIENT: 1771 dev_err(nvme->n_dip, CE_WARN, "!transient internal " 1772 "device error"); 1773 /* TODO: send ereport */ 1774 atomic_inc_32(&nvme->n_transient_event); 1775 break; 1776 1777 case NVME_ASYNC_ERROR_FW_LOAD: 1778 dev_err(nvme->n_dip, CE_WARN, 1779 "!firmware image load error"); 1780 atomic_inc_32(&nvme->n_fw_load_event); 1781 break; 1782 } 1783 break; 1784 1785 case NVME_ASYNC_TYPE_HEALTH: 1786 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) { 1787 (void) nvme_get_logpage(nvme, B_FALSE, 1788 (void **)&health_log, &logsize, event.b.ae_logpage, 1789 -1); 1790 } else { 1791 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in " 1792 "async event reply: %d", event.b.ae_logpage); 1793 atomic_inc_32(&nvme->n_wrong_logpage); 1794 } 1795 1796 switch (event.b.ae_info) { 1797 case NVME_ASYNC_HEALTH_RELIABILITY: 1798 dev_err(nvme->n_dip, CE_WARN, 1799 "!device reliability compromised"); 1800 /* TODO: send ereport */ 1801 atomic_inc_32(&nvme->n_reliability_event); 1802 break; 1803 1804 case NVME_ASYNC_HEALTH_TEMPERATURE: 1805 dev_err(nvme->n_dip, CE_WARN, 1806 "!temperature above threshold"); 1807 /* TODO: send ereport */ 1808 atomic_inc_32(&nvme->n_temperature_event); 1809 break; 1810 1811 case NVME_ASYNC_HEALTH_SPARE: 1812 dev_err(nvme->n_dip, CE_WARN, 1813 "!spare space below threshold"); 1814 /* TODO: send ereport */ 1815 atomic_inc_32(&nvme->n_spare_event); 1816 break; 1817 } 1818 break; 1819 1820 case NVME_ASYNC_TYPE_VENDOR: 1821 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event " 1822 "received, info = %x, logpage = %x", event.b.ae_info, 1823 event.b.ae_logpage); 1824 atomic_inc_32(&nvme->n_vendor_event); 1825 break; 1826 1827 default: 1828 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, " 1829 "type = %x, info = %x, logpage = %x", event.b.ae_type, 1830 event.b.ae_info, event.b.ae_logpage); 1831 atomic_inc_32(&nvme->n_unknown_event); 1832 break; 1833 } 1834 1835 if (error_log) 1836 kmem_free(error_log, logsize); 1837 1838 if (health_log) 1839 kmem_free(health_log, logsize); 1840 } 1841 1842 static void 1843 nvme_admin_cmd(nvme_cmd_t *cmd, int sec) 1844 { 1845 mutex_enter(&cmd->nc_mutex); 1846 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd); 1847 nvme_wait_cmd(cmd, sec); 1848 mutex_exit(&cmd->nc_mutex); 1849 } 1850 1851 static void 1852 nvme_async_event(nvme_t *nvme) 1853 { 1854 nvme_cmd_t *cmd; 1855 1856 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1857 cmd->nc_sqid = 0; 1858 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT; 1859 cmd->nc_callback = nvme_async_event_task; 1860 cmd->nc_dontpanic = B_TRUE; 1861 1862 nvme_submit_admin_cmd(nvme->n_adminq, cmd); 1863 } 1864 1865 static int 1866 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf, 1867 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses) 1868 { 1869 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1870 nvme_format_nvm_t format_nvm = { 0 }; 1871 int ret; 1872 1873 format_nvm.b.fm_lbaf = lbaf & 0xf; 1874 format_nvm.b.fm_ms = ms ? 1 : 0; 1875 format_nvm.b.fm_pi = pi & 0x7; 1876 format_nvm.b.fm_pil = pil ? 1 : 0; 1877 format_nvm.b.fm_ses = ses & 0x7; 1878 1879 cmd->nc_sqid = 0; 1880 cmd->nc_callback = nvme_wakeup_cmd; 1881 cmd->nc_sqe.sqe_nsid = nsid; 1882 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT; 1883 cmd->nc_sqe.sqe_cdw10 = format_nvm.r; 1884 1885 /* 1886 * Some devices like Samsung SM951 don't allow formatting of all 1887 * namespaces in one command. Handle that gracefully. 1888 */ 1889 if (nsid == (uint32_t)-1) 1890 cmd->nc_dontpanic = B_TRUE; 1891 /* 1892 * If this format request was initiated by the user, then don't allow a 1893 * programmer error to panic the system. 1894 */ 1895 if (user) 1896 cmd->nc_dontpanic = B_TRUE; 1897 1898 nvme_admin_cmd(cmd, nvme_format_cmd_timeout); 1899 1900 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1901 dev_err(nvme->n_dip, CE_WARN, 1902 "!FORMAT failed with sct = %x, sc = %x", 1903 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1904 } 1905 1906 nvme_free_cmd(cmd); 1907 return (ret); 1908 } 1909 1910 static int 1911 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize, 1912 uint8_t logpage, ...) 1913 { 1914 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 1915 nvme_getlogpage_t getlogpage = { 0 }; 1916 va_list ap; 1917 int ret; 1918 1919 va_start(ap, logpage); 1920 1921 cmd->nc_sqid = 0; 1922 cmd->nc_callback = nvme_wakeup_cmd; 1923 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE; 1924 1925 if (user) 1926 cmd->nc_dontpanic = B_TRUE; 1927 1928 getlogpage.b.lp_lid = logpage; 1929 1930 switch (logpage) { 1931 case NVME_LOGPAGE_ERROR: 1932 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1933 /* 1934 * The GET LOG PAGE command can use at most 2 pages to return 1935 * data, PRP lists are not supported. 1936 */ 1937 *bufsize = MIN(2 * nvme->n_pagesize, 1938 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t)); 1939 break; 1940 1941 case NVME_LOGPAGE_HEALTH: 1942 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t); 1943 *bufsize = sizeof (nvme_health_log_t); 1944 break; 1945 1946 case NVME_LOGPAGE_FWSLOT: 1947 cmd->nc_sqe.sqe_nsid = (uint32_t)-1; 1948 *bufsize = sizeof (nvme_fwslot_log_t); 1949 break; 1950 1951 default: 1952 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d", 1953 logpage); 1954 atomic_inc_32(&nvme->n_unknown_logpage); 1955 ret = EINVAL; 1956 goto fail; 1957 } 1958 1959 va_end(ap); 1960 1961 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1; 1962 1963 cmd->nc_sqe.sqe_cdw10 = getlogpage.r; 1964 1965 if (nvme_zalloc_dma(nvme, *bufsize, 1966 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 1967 dev_err(nvme->n_dip, CE_WARN, 1968 "!nvme_zalloc_dma failed for GET LOG PAGE"); 1969 ret = ENOMEM; 1970 goto fail; 1971 } 1972 1973 if (cmd->nc_dma->nd_ncookie > 2) { 1974 dev_err(nvme->n_dip, CE_WARN, 1975 "!too many DMA cookies for GET LOG PAGE"); 1976 atomic_inc_32(&nvme->n_too_many_cookies); 1977 ret = ENOMEM; 1978 goto fail; 1979 } 1980 1981 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 1982 if (cmd->nc_dma->nd_ncookie > 1) { 1983 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 1984 &cmd->nc_dma->nd_cookie); 1985 cmd->nc_sqe.sqe_dptr.d_prp[1] = 1986 cmd->nc_dma->nd_cookie.dmac_laddress; 1987 } 1988 1989 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 1990 1991 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 1992 dev_err(nvme->n_dip, CE_WARN, 1993 "!GET LOG PAGE failed with sct = %x, sc = %x", 1994 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 1995 goto fail; 1996 } 1997 1998 *buf = kmem_alloc(*bufsize, KM_SLEEP); 1999 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2000 2001 fail: 2002 nvme_free_cmd(cmd); 2003 2004 return (ret); 2005 } 2006 2007 static int 2008 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, void **buf) 2009 { 2010 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2011 int ret; 2012 2013 if (buf == NULL) 2014 return (EINVAL); 2015 2016 cmd->nc_sqid = 0; 2017 cmd->nc_callback = nvme_wakeup_cmd; 2018 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY; 2019 cmd->nc_sqe.sqe_nsid = nsid; 2020 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL; 2021 2022 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ, 2023 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2024 dev_err(nvme->n_dip, CE_WARN, 2025 "!nvme_zalloc_dma failed for IDENTIFY"); 2026 ret = ENOMEM; 2027 goto fail; 2028 } 2029 2030 if (cmd->nc_dma->nd_ncookie > 2) { 2031 dev_err(nvme->n_dip, CE_WARN, 2032 "!too many DMA cookies for IDENTIFY"); 2033 atomic_inc_32(&nvme->n_too_many_cookies); 2034 ret = ENOMEM; 2035 goto fail; 2036 } 2037 2038 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress; 2039 if (cmd->nc_dma->nd_ncookie > 1) { 2040 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2041 &cmd->nc_dma->nd_cookie); 2042 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2043 cmd->nc_dma->nd_cookie.dmac_laddress; 2044 } 2045 2046 if (user) 2047 cmd->nc_dontpanic = B_TRUE; 2048 2049 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2050 2051 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2052 dev_err(nvme->n_dip, CE_WARN, 2053 "!IDENTIFY failed with sct = %x, sc = %x", 2054 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2055 goto fail; 2056 } 2057 2058 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP); 2059 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE); 2060 2061 fail: 2062 nvme_free_cmd(cmd); 2063 2064 return (ret); 2065 } 2066 2067 static int 2068 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2069 uint32_t val, uint32_t *res) 2070 { 2071 _NOTE(ARGUNUSED(nsid)); 2072 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2073 int ret = EINVAL; 2074 2075 ASSERT(res != NULL); 2076 2077 cmd->nc_sqid = 0; 2078 cmd->nc_callback = nvme_wakeup_cmd; 2079 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES; 2080 cmd->nc_sqe.sqe_cdw10 = feature; 2081 cmd->nc_sqe.sqe_cdw11 = val; 2082 2083 if (user) 2084 cmd->nc_dontpanic = B_TRUE; 2085 2086 switch (feature) { 2087 case NVME_FEAT_WRITE_CACHE: 2088 if (!nvme->n_write_cache_present) 2089 goto fail; 2090 break; 2091 2092 case NVME_FEAT_NQUEUES: 2093 break; 2094 2095 default: 2096 goto fail; 2097 } 2098 2099 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2100 2101 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2102 dev_err(nvme->n_dip, CE_WARN, 2103 "!SET FEATURES %d failed with sct = %x, sc = %x", 2104 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2105 cmd->nc_cqe.cqe_sf.sf_sc); 2106 goto fail; 2107 } 2108 2109 *res = cmd->nc_cqe.cqe_dw0; 2110 2111 fail: 2112 nvme_free_cmd(cmd); 2113 return (ret); 2114 } 2115 2116 static int 2117 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature, 2118 uint32_t *res, void **buf, size_t *bufsize) 2119 { 2120 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2121 int ret = EINVAL; 2122 2123 ASSERT(res != NULL); 2124 2125 if (bufsize != NULL) 2126 *bufsize = 0; 2127 2128 cmd->nc_sqid = 0; 2129 cmd->nc_callback = nvme_wakeup_cmd; 2130 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES; 2131 cmd->nc_sqe.sqe_cdw10 = feature; 2132 cmd->nc_sqe.sqe_cdw11 = *res; 2133 2134 /* 2135 * For some of the optional features there doesn't seem to be a method 2136 * of detecting whether it is supported other than using it. This will 2137 * cause "Invalid Field in Command" error, which is normally considered 2138 * a programming error. Set the nc_dontpanic flag to override the panic 2139 * in nvme_check_generic_cmd_status(). 2140 */ 2141 switch (feature) { 2142 case NVME_FEAT_ARBITRATION: 2143 case NVME_FEAT_POWER_MGMT: 2144 case NVME_FEAT_TEMPERATURE: 2145 case NVME_FEAT_ERROR: 2146 case NVME_FEAT_NQUEUES: 2147 case NVME_FEAT_INTR_COAL: 2148 case NVME_FEAT_INTR_VECT: 2149 case NVME_FEAT_WRITE_ATOM: 2150 case NVME_FEAT_ASYNC_EVENT: 2151 break; 2152 2153 case NVME_FEAT_WRITE_CACHE: 2154 if (!nvme->n_write_cache_present) 2155 goto fail; 2156 break; 2157 2158 case NVME_FEAT_LBA_RANGE: 2159 if (!nvme->n_lba_range_supported) 2160 goto fail; 2161 2162 cmd->nc_dontpanic = B_TRUE; 2163 cmd->nc_sqe.sqe_nsid = nsid; 2164 ASSERT(bufsize != NULL); 2165 *bufsize = NVME_LBA_RANGE_BUFSIZE; 2166 break; 2167 2168 case NVME_FEAT_AUTO_PST: 2169 if (!nvme->n_auto_pst_supported) 2170 goto fail; 2171 2172 ASSERT(bufsize != NULL); 2173 *bufsize = NVME_AUTO_PST_BUFSIZE; 2174 break; 2175 2176 case NVME_FEAT_PROGRESS: 2177 if (!nvme->n_progress_supported) 2178 goto fail; 2179 2180 cmd->nc_dontpanic = B_TRUE; 2181 break; 2182 2183 default: 2184 goto fail; 2185 } 2186 2187 if (user) 2188 cmd->nc_dontpanic = B_TRUE; 2189 2190 if (bufsize != NULL && *bufsize != 0) { 2191 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ, 2192 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 2193 dev_err(nvme->n_dip, CE_WARN, 2194 "!nvme_zalloc_dma failed for GET FEATURES"); 2195 ret = ENOMEM; 2196 goto fail; 2197 } 2198 2199 if (cmd->nc_dma->nd_ncookie > 2) { 2200 dev_err(nvme->n_dip, CE_WARN, 2201 "!too many DMA cookies for GET FEATURES"); 2202 atomic_inc_32(&nvme->n_too_many_cookies); 2203 ret = ENOMEM; 2204 goto fail; 2205 } 2206 2207 cmd->nc_sqe.sqe_dptr.d_prp[0] = 2208 cmd->nc_dma->nd_cookie.dmac_laddress; 2209 if (cmd->nc_dma->nd_ncookie > 1) { 2210 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 2211 &cmd->nc_dma->nd_cookie); 2212 cmd->nc_sqe.sqe_dptr.d_prp[1] = 2213 cmd->nc_dma->nd_cookie.dmac_laddress; 2214 } 2215 } 2216 2217 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2218 2219 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2220 boolean_t known = B_TRUE; 2221 2222 /* Check if this is unsupported optional feature */ 2223 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC && 2224 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) { 2225 switch (feature) { 2226 case NVME_FEAT_LBA_RANGE: 2227 nvme->n_lba_range_supported = B_FALSE; 2228 break; 2229 case NVME_FEAT_PROGRESS: 2230 nvme->n_progress_supported = B_FALSE; 2231 break; 2232 default: 2233 known = B_FALSE; 2234 break; 2235 } 2236 } else { 2237 known = B_FALSE; 2238 } 2239 2240 /* Report the error otherwise */ 2241 if (!known) { 2242 dev_err(nvme->n_dip, CE_WARN, 2243 "!GET FEATURES %d failed with sct = %x, sc = %x", 2244 feature, cmd->nc_cqe.cqe_sf.sf_sct, 2245 cmd->nc_cqe.cqe_sf.sf_sc); 2246 } 2247 2248 goto fail; 2249 } 2250 2251 if (bufsize != NULL && *bufsize != 0) { 2252 ASSERT(buf != NULL); 2253 *buf = kmem_alloc(*bufsize, KM_SLEEP); 2254 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize); 2255 } 2256 2257 *res = cmd->nc_cqe.cqe_dw0; 2258 2259 fail: 2260 nvme_free_cmd(cmd); 2261 return (ret); 2262 } 2263 2264 static int 2265 nvme_write_cache_set(nvme_t *nvme, boolean_t enable) 2266 { 2267 nvme_write_cache_t nwc = { 0 }; 2268 2269 if (enable) 2270 nwc.b.wc_wce = 1; 2271 2272 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE, 2273 nwc.r, &nwc.r)); 2274 } 2275 2276 static int 2277 nvme_set_nqueues(nvme_t *nvme) 2278 { 2279 nvme_nqueues_t nq = { 0 }; 2280 int ret; 2281 2282 /* 2283 * The default is to allocate one completion queue per vector. 2284 */ 2285 if (nvme->n_completion_queues == -1) 2286 nvme->n_completion_queues = nvme->n_intr_cnt; 2287 2288 /* 2289 * There is no point in having more compeletion queues than 2290 * interrupt vectors. 2291 */ 2292 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2293 nvme->n_intr_cnt); 2294 2295 /* 2296 * The default is to use one submission queue per completion queue. 2297 */ 2298 if (nvme->n_submission_queues == -1) 2299 nvme->n_submission_queues = nvme->n_completion_queues; 2300 2301 /* 2302 * There is no point in having more compeletion queues than 2303 * submission queues. 2304 */ 2305 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2306 nvme->n_submission_queues); 2307 2308 ASSERT(nvme->n_submission_queues > 0); 2309 ASSERT(nvme->n_completion_queues > 0); 2310 2311 nq.b.nq_nsq = nvme->n_submission_queues - 1; 2312 nq.b.nq_ncq = nvme->n_completion_queues - 1; 2313 2314 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r, 2315 &nq.r); 2316 2317 if (ret == 0) { 2318 /* 2319 * Never use more than the requested number of queues. 2320 */ 2321 nvme->n_submission_queues = MIN(nvme->n_submission_queues, 2322 nq.b.nq_nsq + 1); 2323 nvme->n_completion_queues = MIN(nvme->n_completion_queues, 2324 nq.b.nq_ncq + 1); 2325 } 2326 2327 return (ret); 2328 } 2329 2330 static int 2331 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq) 2332 { 2333 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2334 nvme_create_queue_dw10_t dw10 = { 0 }; 2335 nvme_create_cq_dw11_t c_dw11 = { 0 }; 2336 int ret; 2337 2338 dw10.b.q_qid = cq->ncq_id; 2339 dw10.b.q_qsize = cq->ncq_nentry - 1; 2340 2341 c_dw11.b.cq_pc = 1; 2342 c_dw11.b.cq_ien = 1; 2343 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt; 2344 2345 cmd->nc_sqid = 0; 2346 cmd->nc_callback = nvme_wakeup_cmd; 2347 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE; 2348 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2349 cmd->nc_sqe.sqe_cdw11 = c_dw11.r; 2350 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress; 2351 2352 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2353 2354 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2355 dev_err(nvme->n_dip, CE_WARN, 2356 "!CREATE CQUEUE failed with sct = %x, sc = %x", 2357 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2358 } 2359 2360 nvme_free_cmd(cmd); 2361 2362 return (ret); 2363 } 2364 2365 static int 2366 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx) 2367 { 2368 nvme_cq_t *cq = qp->nq_cq; 2369 nvme_cmd_t *cmd; 2370 nvme_create_queue_dw10_t dw10 = { 0 }; 2371 nvme_create_sq_dw11_t s_dw11 = { 0 }; 2372 int ret; 2373 2374 /* 2375 * It is possible to have more qpairs than completion queues, 2376 * and when the idx > ncq_id, that completion queue is shared 2377 * and has already been created. 2378 */ 2379 if (idx <= cq->ncq_id && 2380 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS) 2381 return (DDI_FAILURE); 2382 2383 dw10.b.q_qid = idx; 2384 dw10.b.q_qsize = qp->nq_nentry - 1; 2385 2386 s_dw11.b.sq_pc = 1; 2387 s_dw11.b.sq_cqid = cq->ncq_id; 2388 2389 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 2390 cmd->nc_sqid = 0; 2391 cmd->nc_callback = nvme_wakeup_cmd; 2392 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE; 2393 cmd->nc_sqe.sqe_cdw10 = dw10.r; 2394 cmd->nc_sqe.sqe_cdw11 = s_dw11.r; 2395 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress; 2396 2397 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout); 2398 2399 if ((ret = nvme_check_cmd_status(cmd)) != 0) { 2400 dev_err(nvme->n_dip, CE_WARN, 2401 "!CREATE SQUEUE failed with sct = %x, sc = %x", 2402 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 2403 } 2404 2405 nvme_free_cmd(cmd); 2406 2407 return (ret); 2408 } 2409 2410 static boolean_t 2411 nvme_reset(nvme_t *nvme, boolean_t quiesce) 2412 { 2413 nvme_reg_csts_t csts; 2414 int i; 2415 2416 nvme_put32(nvme, NVME_REG_CC, 0); 2417 2418 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2419 if (csts.b.csts_rdy == 1) { 2420 nvme_put32(nvme, NVME_REG_CC, 0); 2421 for (i = 0; i != nvme->n_timeout * 10; i++) { 2422 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2423 if (csts.b.csts_rdy == 0) 2424 break; 2425 2426 if (quiesce) 2427 drv_usecwait(50000); 2428 else 2429 delay(drv_usectohz(50000)); 2430 } 2431 } 2432 2433 nvme_put32(nvme, NVME_REG_AQA, 0); 2434 nvme_put32(nvme, NVME_REG_ASQ, 0); 2435 nvme_put32(nvme, NVME_REG_ACQ, 0); 2436 2437 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2438 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE); 2439 } 2440 2441 static void 2442 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce) 2443 { 2444 nvme_reg_cc_t cc; 2445 nvme_reg_csts_t csts; 2446 int i; 2447 2448 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT); 2449 2450 cc.r = nvme_get32(nvme, NVME_REG_CC); 2451 cc.b.cc_shn = mode & 0x3; 2452 nvme_put32(nvme, NVME_REG_CC, cc.r); 2453 2454 for (i = 0; i != 10; i++) { 2455 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2456 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE) 2457 break; 2458 2459 if (quiesce) 2460 drv_usecwait(100000); 2461 else 2462 delay(drv_usectohz(100000)); 2463 } 2464 } 2465 2466 2467 static void 2468 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid) 2469 { 2470 /* 2471 * Section 7.7 of the spec describes how to get a unique ID for 2472 * the controller: the vendor ID, the model name and the serial 2473 * number shall be unique when combined. 2474 * 2475 * If a namespace has no EUI64 we use the above and add the hex 2476 * namespace ID to get a unique ID for the namespace. 2477 */ 2478 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2479 char serial[sizeof (nvme->n_idctl->id_serial) + 1]; 2480 2481 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2482 bcopy(nvme->n_idctl->id_serial, serial, 2483 sizeof (nvme->n_idctl->id_serial)); 2484 2485 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2486 serial[sizeof (nvme->n_idctl->id_serial)] = '\0'; 2487 2488 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X", 2489 nvme->n_idctl->id_vid, model, serial, nsid); 2490 } 2491 2492 static int 2493 nvme_init_ns(nvme_t *nvme, int nsid) 2494 { 2495 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1]; 2496 nvme_identify_nsid_t *idns; 2497 int last_rp; 2498 2499 ns->ns_nvme = nvme; 2500 2501 if (nvme_identify(nvme, B_FALSE, nsid, (void **)&idns) != 0) { 2502 dev_err(nvme->n_dip, CE_WARN, 2503 "!failed to identify namespace %d", nsid); 2504 return (DDI_FAILURE); 2505 } 2506 2507 ns->ns_idns = idns; 2508 ns->ns_id = nsid; 2509 ns->ns_block_count = idns->id_nsize; 2510 ns->ns_block_size = 2511 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads; 2512 ns->ns_best_block_size = ns->ns_block_size; 2513 2514 /* 2515 * Get the EUI64 if present. Use it for devid and device node names. 2516 */ 2517 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2518 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64)); 2519 2520 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 2521 if (*(uint64_t *)ns->ns_eui64 != 0) { 2522 uint8_t *eui64 = ns->ns_eui64; 2523 2524 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), 2525 "%02x%02x%02x%02x%02x%02x%02x%02x", 2526 eui64[0], eui64[1], eui64[2], eui64[3], 2527 eui64[4], eui64[5], eui64[6], eui64[7]); 2528 } else { 2529 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d", 2530 ns->ns_id); 2531 2532 nvme_prepare_devid(nvme, ns->ns_id); 2533 } 2534 2535 /* 2536 * Find the LBA format with no metadata and the best relative 2537 * performance. A value of 3 means "degraded", 0 is best. 2538 */ 2539 last_rp = 3; 2540 for (int j = 0; j <= idns->id_nlbaf; j++) { 2541 if (idns->id_lbaf[j].lbaf_lbads == 0) 2542 break; 2543 if (idns->id_lbaf[j].lbaf_ms != 0) 2544 continue; 2545 if (idns->id_lbaf[j].lbaf_rp >= last_rp) 2546 continue; 2547 last_rp = idns->id_lbaf[j].lbaf_rp; 2548 ns->ns_best_block_size = 2549 1 << idns->id_lbaf[j].lbaf_lbads; 2550 } 2551 2552 if (ns->ns_best_block_size < nvme->n_min_block_size) 2553 ns->ns_best_block_size = nvme->n_min_block_size; 2554 2555 /* 2556 * We currently don't support namespaces that use either: 2557 * - protection information 2558 * - illegal block size (< 512) 2559 */ 2560 if (idns->id_dps.dp_pinfo) { 2561 dev_err(nvme->n_dip, CE_WARN, 2562 "!ignoring namespace %d, unsupported feature: " 2563 "pinfo = %d", nsid, idns->id_dps.dp_pinfo); 2564 ns->ns_ignore = B_TRUE; 2565 } else if (ns->ns_block_size < 512) { 2566 dev_err(nvme->n_dip, CE_WARN, 2567 "!ignoring namespace %d, unsupported block size %"PRIu64, 2568 nsid, (uint64_t)ns->ns_block_size); 2569 ns->ns_ignore = B_TRUE; 2570 } else { 2571 ns->ns_ignore = B_FALSE; 2572 } 2573 2574 return (DDI_SUCCESS); 2575 } 2576 2577 static int 2578 nvme_init(nvme_t *nvme) 2579 { 2580 nvme_reg_cc_t cc = { 0 }; 2581 nvme_reg_aqa_t aqa = { 0 }; 2582 nvme_reg_asq_t asq = { 0 }; 2583 nvme_reg_acq_t acq = { 0 }; 2584 nvme_reg_cap_t cap; 2585 nvme_reg_vs_t vs; 2586 nvme_reg_csts_t csts; 2587 int i = 0; 2588 uint16_t nqueues; 2589 char model[sizeof (nvme->n_idctl->id_model) + 1]; 2590 char *vendor, *product; 2591 2592 /* Check controller version */ 2593 vs.r = nvme_get32(nvme, NVME_REG_VS); 2594 nvme->n_version.v_major = vs.b.vs_mjr; 2595 nvme->n_version.v_minor = vs.b.vs_mnr; 2596 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d", 2597 nvme->n_version.v_major, nvme->n_version.v_minor); 2598 2599 if (nvme->n_version.v_major > nvme_version_major) { 2600 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x", 2601 nvme_version_major); 2602 if (nvme->n_strict_version) 2603 goto fail; 2604 } 2605 2606 /* retrieve controller configuration */ 2607 cap.r = nvme_get64(nvme, NVME_REG_CAP); 2608 2609 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) { 2610 dev_err(nvme->n_dip, CE_WARN, 2611 "!NVM command set not supported by hardware"); 2612 goto fail; 2613 } 2614 2615 nvme->n_nssr_supported = cap.b.cap_nssrs; 2616 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd; 2617 nvme->n_timeout = cap.b.cap_to; 2618 nvme->n_arbitration_mechanisms = cap.b.cap_ams; 2619 nvme->n_cont_queues_reqd = cap.b.cap_cqr; 2620 nvme->n_max_queue_entries = cap.b.cap_mqes + 1; 2621 2622 /* 2623 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify 2624 * the base page size of 4k (1<<12), so add 12 here to get the real 2625 * page size value. 2626 */ 2627 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT), 2628 cap.b.cap_mpsmax + 12); 2629 nvme->n_pagesize = 1UL << (nvme->n_pageshift); 2630 2631 /* 2632 * Set up Queue DMA to transfer at least 1 page-aligned page at a time. 2633 */ 2634 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize; 2635 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2636 2637 /* 2638 * Set up PRP DMA to transfer 1 page-aligned page at a time. 2639 * Maxxfer may be increased after we identified the controller limits. 2640 */ 2641 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize; 2642 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize; 2643 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize; 2644 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1; 2645 2646 /* 2647 * Reset controller if it's still in ready state. 2648 */ 2649 if (nvme_reset(nvme, B_FALSE) == B_FALSE) { 2650 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller"); 2651 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2652 nvme->n_dead = B_TRUE; 2653 goto fail; 2654 } 2655 2656 /* 2657 * Create the cq array with one completion queue to be assigned 2658 * to the admin queue pair. 2659 */ 2660 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len) != 2661 DDI_SUCCESS) { 2662 dev_err(nvme->n_dip, CE_WARN, 2663 "!failed to pre-allocate admin completion queue"); 2664 goto fail; 2665 } 2666 /* 2667 * Create the admin queue pair. 2668 */ 2669 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0) 2670 != DDI_SUCCESS) { 2671 dev_err(nvme->n_dip, CE_WARN, 2672 "!unable to allocate admin qpair"); 2673 goto fail; 2674 } 2675 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP); 2676 nvme->n_ioq[0] = nvme->n_adminq; 2677 2678 nvme->n_progress |= NVME_ADMIN_QUEUE; 2679 2680 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2681 "admin-queue-len", nvme->n_admin_queue_len); 2682 2683 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1; 2684 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress; 2685 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress; 2686 2687 ASSERT((asq & (nvme->n_pagesize - 1)) == 0); 2688 ASSERT((acq & (nvme->n_pagesize - 1)) == 0); 2689 2690 nvme_put32(nvme, NVME_REG_AQA, aqa.r); 2691 nvme_put64(nvme, NVME_REG_ASQ, asq); 2692 nvme_put64(nvme, NVME_REG_ACQ, acq); 2693 2694 cc.b.cc_ams = 0; /* use Round-Robin arbitration */ 2695 cc.b.cc_css = 0; /* use NVM command set */ 2696 cc.b.cc_mps = nvme->n_pageshift - 12; 2697 cc.b.cc_shn = 0; /* no shutdown in progress */ 2698 cc.b.cc_en = 1; /* enable controller */ 2699 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */ 2700 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */ 2701 2702 nvme_put32(nvme, NVME_REG_CC, cc.r); 2703 2704 /* 2705 * Wait for the controller to become ready. 2706 */ 2707 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2708 if (csts.b.csts_rdy == 0) { 2709 for (i = 0; i != nvme->n_timeout * 10; i++) { 2710 delay(drv_usectohz(50000)); 2711 csts.r = nvme_get32(nvme, NVME_REG_CSTS); 2712 2713 if (csts.b.csts_cfs == 1) { 2714 dev_err(nvme->n_dip, CE_WARN, 2715 "!controller fatal status at init"); 2716 ddi_fm_service_impact(nvme->n_dip, 2717 DDI_SERVICE_LOST); 2718 nvme->n_dead = B_TRUE; 2719 goto fail; 2720 } 2721 2722 if (csts.b.csts_rdy == 1) 2723 break; 2724 } 2725 } 2726 2727 if (csts.b.csts_rdy == 0) { 2728 dev_err(nvme->n_dip, CE_WARN, "!controller not ready"); 2729 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST); 2730 nvme->n_dead = B_TRUE; 2731 goto fail; 2732 } 2733 2734 /* 2735 * Assume an abort command limit of 1. We'll destroy and re-init 2736 * that later when we know the true abort command limit. 2737 */ 2738 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL); 2739 2740 /* 2741 * Setup initial interrupt for admin queue. 2742 */ 2743 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1) 2744 != DDI_SUCCESS) && 2745 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1) 2746 != DDI_SUCCESS) && 2747 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1) 2748 != DDI_SUCCESS)) { 2749 dev_err(nvme->n_dip, CE_WARN, 2750 "!failed to setup initial interrupt"); 2751 goto fail; 2752 } 2753 2754 /* 2755 * Post an asynchronous event command to catch errors. 2756 * We assume the asynchronous events are supported as required by 2757 * specification (Figure 40 in section 5 of NVMe 1.2). 2758 * However, since at least qemu does not follow the specification, 2759 * we need a mechanism to protect ourselves. 2760 */ 2761 nvme->n_async_event_supported = B_TRUE; 2762 nvme_async_event(nvme); 2763 2764 /* 2765 * Identify Controller 2766 */ 2767 if (nvme_identify(nvme, B_FALSE, 0, (void **)&nvme->n_idctl) != 0) { 2768 dev_err(nvme->n_dip, CE_WARN, 2769 "!failed to identify controller"); 2770 goto fail; 2771 } 2772 2773 /* 2774 * Get Vendor & Product ID 2775 */ 2776 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model)); 2777 model[sizeof (nvme->n_idctl->id_model)] = '\0'; 2778 sata_split_model(model, &vendor, &product); 2779 2780 if (vendor == NULL) 2781 nvme->n_vendor = strdup("NVMe"); 2782 else 2783 nvme->n_vendor = strdup(vendor); 2784 2785 nvme->n_product = strdup(product); 2786 2787 /* 2788 * Get controller limits. 2789 */ 2790 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT, 2791 MIN(nvme->n_admin_queue_len / 10, 2792 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit))); 2793 2794 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2795 "async-event-limit", nvme->n_async_event_limit); 2796 2797 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1; 2798 2799 /* 2800 * Reinitialize the semaphore with the true abort command limit 2801 * supported by the hardware. It's not necessary to disable interrupts 2802 * as only command aborts use the semaphore, and no commands are 2803 * executed or aborted while we're here. 2804 */ 2805 sema_destroy(&nvme->n_abort_sema); 2806 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL, 2807 SEMA_DRIVER, NULL); 2808 2809 nvme->n_progress |= NVME_CTRL_LIMITS; 2810 2811 if (nvme->n_idctl->id_mdts == 0) 2812 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536; 2813 else 2814 nvme->n_max_data_transfer_size = 2815 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts); 2816 2817 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1; 2818 2819 /* 2820 * Limit n_max_data_transfer_size to what we can handle in one PRP. 2821 * Chained PRPs are currently unsupported. 2822 * 2823 * This is a no-op on hardware which doesn't support a transfer size 2824 * big enough to require chained PRPs. 2825 */ 2826 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size, 2827 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize)); 2828 2829 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size; 2830 2831 /* 2832 * Make sure the minimum/maximum queue entry sizes are not 2833 * larger/smaller than the default. 2834 */ 2835 2836 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) || 2837 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) || 2838 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) || 2839 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t))) 2840 goto fail; 2841 2842 /* 2843 * Check for the presence of a Volatile Write Cache. If present, 2844 * enable or disable based on the value of the property 2845 * volatile-write-cache-enable (default is enabled). 2846 */ 2847 nvme->n_write_cache_present = 2848 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE; 2849 2850 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2851 "volatile-write-cache-present", 2852 nvme->n_write_cache_present ? 1 : 0); 2853 2854 if (!nvme->n_write_cache_present) { 2855 nvme->n_write_cache_enabled = B_FALSE; 2856 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled) 2857 != 0) { 2858 dev_err(nvme->n_dip, CE_WARN, 2859 "!failed to %sable volatile write cache", 2860 nvme->n_write_cache_enabled ? "en" : "dis"); 2861 /* 2862 * Assume the cache is (still) enabled. 2863 */ 2864 nvme->n_write_cache_enabled = B_TRUE; 2865 } 2866 2867 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, 2868 "volatile-write-cache-enable", 2869 nvme->n_write_cache_enabled ? 1 : 0); 2870 2871 /* 2872 * Assume LBA Range Type feature is supported. If it isn't this 2873 * will be set to B_FALSE by nvme_get_features(). 2874 */ 2875 nvme->n_lba_range_supported = B_TRUE; 2876 2877 /* 2878 * Check support for Autonomous Power State Transition. 2879 */ 2880 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1)) 2881 nvme->n_auto_pst_supported = 2882 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE; 2883 2884 /* 2885 * Assume Software Progress Marker feature is supported. If it isn't 2886 * this will be set to B_FALSE by nvme_get_features(). 2887 */ 2888 nvme->n_progress_supported = B_TRUE; 2889 2890 /* 2891 * Identify Namespaces 2892 */ 2893 nvme->n_namespace_count = nvme->n_idctl->id_nn; 2894 2895 if (nvme->n_namespace_count == 0) { 2896 dev_err(nvme->n_dip, CE_WARN, 2897 "!controllers without namespaces are not supported"); 2898 goto fail; 2899 } 2900 2901 if (nvme->n_namespace_count > NVME_MINOR_MAX) { 2902 dev_err(nvme->n_dip, CE_WARN, 2903 "!too many namespaces: %d, limiting to %d\n", 2904 nvme->n_namespace_count, NVME_MINOR_MAX); 2905 nvme->n_namespace_count = NVME_MINOR_MAX; 2906 } 2907 2908 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) * 2909 nvme->n_namespace_count, KM_SLEEP); 2910 2911 for (i = 0; i != nvme->n_namespace_count; i++) { 2912 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER, 2913 NULL); 2914 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS) 2915 goto fail; 2916 } 2917 2918 /* 2919 * Try to set up MSI/MSI-X interrupts. 2920 */ 2921 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX)) 2922 != 0) { 2923 nvme_release_interrupts(nvme); 2924 2925 nqueues = MIN(UINT16_MAX, ncpus); 2926 2927 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 2928 nqueues) != DDI_SUCCESS) && 2929 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 2930 nqueues) != DDI_SUCCESS)) { 2931 dev_err(nvme->n_dip, CE_WARN, 2932 "!failed to setup MSI/MSI-X interrupts"); 2933 goto fail; 2934 } 2935 } 2936 2937 /* 2938 * Create I/O queue pairs. 2939 */ 2940 2941 if (nvme_set_nqueues(nvme) != 0) { 2942 dev_err(nvme->n_dip, CE_WARN, 2943 "!failed to set number of I/O queues to %d", 2944 nvme->n_intr_cnt); 2945 goto fail; 2946 } 2947 2948 /* 2949 * Reallocate I/O queue array 2950 */ 2951 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *)); 2952 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) * 2953 (nvme->n_submission_queues + 1), KM_SLEEP); 2954 nvme->n_ioq[0] = nvme->n_adminq; 2955 2956 /* 2957 * There should always be at least as many submission queues 2958 * as completion queues. 2959 */ 2960 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues); 2961 2962 nvme->n_ioq_count = nvme->n_submission_queues; 2963 2964 nvme->n_io_squeue_len = 2965 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries); 2966 2967 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len", 2968 nvme->n_io_squeue_len); 2969 2970 /* 2971 * Pre-allocate completion queues. 2972 * When there are the same number of submission and completion 2973 * queues there is no value in having a larger completion 2974 * queue length. 2975 */ 2976 if (nvme->n_submission_queues == nvme->n_completion_queues) 2977 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 2978 nvme->n_io_squeue_len); 2979 2980 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len, 2981 nvme->n_max_queue_entries); 2982 2983 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len", 2984 nvme->n_io_cqueue_len); 2985 2986 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1, 2987 nvme->n_io_cqueue_len) != DDI_SUCCESS) { 2988 dev_err(nvme->n_dip, CE_WARN, 2989 "!failed to pre-allocate completion queues"); 2990 goto fail; 2991 } 2992 2993 /* 2994 * If we use less completion queues than interrupt vectors return 2995 * some of the interrupt vectors back to the system. 2996 */ 2997 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) { 2998 nvme_release_interrupts(nvme); 2999 3000 if (nvme_setup_interrupts(nvme, nvme->n_intr_type, 3001 nvme->n_completion_queues + 1) != DDI_SUCCESS) { 3002 dev_err(nvme->n_dip, CE_WARN, 3003 "!failed to reduce number of interrupts"); 3004 goto fail; 3005 } 3006 } 3007 3008 /* 3009 * Alloc & register I/O queue pairs 3010 */ 3011 3012 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3013 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len, 3014 &nvme->n_ioq[i], i) != DDI_SUCCESS) { 3015 dev_err(nvme->n_dip, CE_WARN, 3016 "!unable to allocate I/O qpair %d", i); 3017 goto fail; 3018 } 3019 3020 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) { 3021 dev_err(nvme->n_dip, CE_WARN, 3022 "!unable to create I/O qpair %d", i); 3023 goto fail; 3024 } 3025 } 3026 3027 /* 3028 * Post more asynchronous events commands to reduce event reporting 3029 * latency as suggested by the spec. 3030 */ 3031 if (nvme->n_async_event_supported) { 3032 for (i = 1; i != nvme->n_async_event_limit; i++) 3033 nvme_async_event(nvme); 3034 } 3035 3036 return (DDI_SUCCESS); 3037 3038 fail: 3039 (void) nvme_reset(nvme, B_FALSE); 3040 return (DDI_FAILURE); 3041 } 3042 3043 static uint_t 3044 nvme_intr(caddr_t arg1, caddr_t arg2) 3045 { 3046 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3047 nvme_t *nvme = (nvme_t *)arg1; 3048 int inum = (int)(uintptr_t)arg2; 3049 int ccnt = 0; 3050 int qnum; 3051 3052 if (inum >= nvme->n_intr_cnt) 3053 return (DDI_INTR_UNCLAIMED); 3054 3055 if (nvme->n_dead) 3056 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ? 3057 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 3058 3059 /* 3060 * The interrupt vector a queue uses is calculated as queue_idx % 3061 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array 3062 * in steps of n_intr_cnt to process all queues using this vector. 3063 */ 3064 for (qnum = inum; 3065 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL; 3066 qnum += nvme->n_intr_cnt) { 3067 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]); 3068 } 3069 3070 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED); 3071 } 3072 3073 static void 3074 nvme_release_interrupts(nvme_t *nvme) 3075 { 3076 int i; 3077 3078 for (i = 0; i < nvme->n_intr_cnt; i++) { 3079 if (nvme->n_inth[i] == NULL) 3080 break; 3081 3082 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3083 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1); 3084 else 3085 (void) ddi_intr_disable(nvme->n_inth[i]); 3086 3087 (void) ddi_intr_remove_handler(nvme->n_inth[i]); 3088 (void) ddi_intr_free(nvme->n_inth[i]); 3089 } 3090 3091 kmem_free(nvme->n_inth, nvme->n_inth_sz); 3092 nvme->n_inth = NULL; 3093 nvme->n_inth_sz = 0; 3094 3095 nvme->n_progress &= ~NVME_INTERRUPTS; 3096 } 3097 3098 static int 3099 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs) 3100 { 3101 int nintrs, navail, count; 3102 int ret; 3103 int i; 3104 3105 if (nvme->n_intr_types == 0) { 3106 ret = ddi_intr_get_supported_types(nvme->n_dip, 3107 &nvme->n_intr_types); 3108 if (ret != DDI_SUCCESS) { 3109 dev_err(nvme->n_dip, CE_WARN, 3110 "!%s: ddi_intr_get_supported types failed", 3111 __func__); 3112 return (ret); 3113 } 3114 #ifdef __x86 3115 if (get_hwenv() == HW_VMWARE) 3116 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX; 3117 #endif 3118 } 3119 3120 if ((nvme->n_intr_types & intr_type) == 0) 3121 return (DDI_FAILURE); 3122 3123 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs); 3124 if (ret != DDI_SUCCESS) { 3125 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed", 3126 __func__); 3127 return (ret); 3128 } 3129 3130 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail); 3131 if (ret != DDI_SUCCESS) { 3132 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed", 3133 __func__); 3134 return (ret); 3135 } 3136 3137 /* We want at most one interrupt per queue pair. */ 3138 if (navail > nqpairs) 3139 navail = nqpairs; 3140 3141 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail; 3142 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP); 3143 3144 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail, 3145 &count, 0); 3146 if (ret != DDI_SUCCESS) { 3147 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed", 3148 __func__); 3149 goto fail; 3150 } 3151 3152 nvme->n_intr_cnt = count; 3153 3154 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri); 3155 if (ret != DDI_SUCCESS) { 3156 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed", 3157 __func__); 3158 goto fail; 3159 } 3160 3161 for (i = 0; i < count; i++) { 3162 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr, 3163 (void *)nvme, (void *)(uintptr_t)i); 3164 if (ret != DDI_SUCCESS) { 3165 dev_err(nvme->n_dip, CE_WARN, 3166 "!%s: ddi_intr_add_handler failed", __func__); 3167 goto fail; 3168 } 3169 } 3170 3171 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap); 3172 3173 for (i = 0; i < count; i++) { 3174 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK) 3175 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1); 3176 else 3177 ret = ddi_intr_enable(nvme->n_inth[i]); 3178 3179 if (ret != DDI_SUCCESS) { 3180 dev_err(nvme->n_dip, CE_WARN, 3181 "!%s: enabling interrupt %d failed", __func__, i); 3182 goto fail; 3183 } 3184 } 3185 3186 nvme->n_intr_type = intr_type; 3187 3188 nvme->n_progress |= NVME_INTERRUPTS; 3189 3190 return (DDI_SUCCESS); 3191 3192 fail: 3193 nvme_release_interrupts(nvme); 3194 3195 return (ret); 3196 } 3197 3198 static int 3199 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg) 3200 { 3201 _NOTE(ARGUNUSED(arg)); 3202 3203 pci_ereport_post(dip, fm_error, NULL); 3204 return (fm_error->fme_status); 3205 } 3206 3207 static int 3208 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 3209 { 3210 nvme_t *nvme; 3211 int instance; 3212 int nregs; 3213 off_t regsize; 3214 int i; 3215 char name[32]; 3216 3217 if (cmd != DDI_ATTACH) 3218 return (DDI_FAILURE); 3219 3220 instance = ddi_get_instance(dip); 3221 3222 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS) 3223 return (DDI_FAILURE); 3224 3225 nvme = ddi_get_soft_state(nvme_state, instance); 3226 ddi_set_driver_private(dip, nvme); 3227 nvme->n_dip = dip; 3228 3229 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL); 3230 3231 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3232 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE; 3233 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY, 3234 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ? 3235 B_TRUE : B_FALSE; 3236 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3237 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN); 3238 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3239 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN); 3240 /* 3241 * Double up the default for completion queues in case of 3242 * queue sharing. 3243 */ 3244 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3245 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN); 3246 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3247 DDI_PROP_DONTPASS, "async-event-limit", 3248 NVME_DEFAULT_ASYNC_EVENT_LIMIT); 3249 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3250 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ? 3251 B_TRUE : B_FALSE; 3252 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3253 DDI_PROP_DONTPASS, "min-phys-block-size", 3254 NVME_DEFAULT_MIN_BLOCK_SIZE); 3255 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3256 DDI_PROP_DONTPASS, "max-submission-queues", -1); 3257 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 3258 DDI_PROP_DONTPASS, "max-completion-queues", -1); 3259 3260 if (!ISP2(nvme->n_min_block_size) || 3261 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) { 3262 dev_err(dip, CE_WARN, "!min-phys-block-size %s, " 3263 "using default %d", ISP2(nvme->n_min_block_size) ? 3264 "too low" : "not a power of 2", 3265 NVME_DEFAULT_MIN_BLOCK_SIZE); 3266 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE; 3267 } 3268 3269 if (nvme->n_submission_queues != -1 && 3270 (nvme->n_submission_queues < 1 || 3271 nvme->n_submission_queues > UINT16_MAX)) { 3272 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not " 3273 "valid. Must be [1..%d]", nvme->n_submission_queues, 3274 UINT16_MAX); 3275 nvme->n_submission_queues = -1; 3276 } 3277 3278 if (nvme->n_completion_queues != -1 && 3279 (nvme->n_completion_queues < 1 || 3280 nvme->n_completion_queues > UINT16_MAX)) { 3281 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not " 3282 "valid. Must be [1..%d]", nvme->n_completion_queues, 3283 UINT16_MAX); 3284 nvme->n_completion_queues = -1; 3285 } 3286 3287 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN) 3288 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN; 3289 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN) 3290 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN; 3291 3292 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN) 3293 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN; 3294 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN) 3295 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN; 3296 3297 if (nvme->n_async_event_limit < 1) 3298 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT; 3299 3300 nvme->n_reg_acc_attr = nvme_reg_acc_attr; 3301 nvme->n_queue_dma_attr = nvme_queue_dma_attr; 3302 nvme->n_prp_dma_attr = nvme_prp_dma_attr; 3303 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr; 3304 3305 /* 3306 * Setup FMA support. 3307 */ 3308 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip, 3309 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable", 3310 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 3311 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 3312 3313 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc); 3314 3315 if (nvme->n_fm_cap) { 3316 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE) 3317 nvme->n_reg_acc_attr.devacc_attr_access = 3318 DDI_FLAGERR_ACC; 3319 3320 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) { 3321 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3322 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 3323 } 3324 3325 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3326 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3327 pci_ereport_setup(dip); 3328 3329 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3330 ddi_fm_handler_register(dip, nvme_fm_errcb, 3331 (void *)nvme); 3332 } 3333 3334 nvme->n_progress |= NVME_FMA_INIT; 3335 3336 /* 3337 * The spec defines several register sets. Only the controller 3338 * registers (set 1) are currently used. 3339 */ 3340 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE || 3341 nregs < 2 || 3342 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE) 3343 goto fail; 3344 3345 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize, 3346 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) { 3347 dev_err(dip, CE_WARN, "!failed to map regset 1"); 3348 goto fail; 3349 } 3350 3351 nvme->n_progress |= NVME_REGS_MAPPED; 3352 3353 /* 3354 * Create taskq for command completion. 3355 */ 3356 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq", 3357 ddi_driver_name(dip), ddi_get_instance(dip)); 3358 nvme->n_cmd_taskq = ddi_taskq_create(dip, name, MIN(UINT16_MAX, ncpus), 3359 TASKQ_DEFAULTPRI, 0); 3360 if (nvme->n_cmd_taskq == NULL) { 3361 dev_err(dip, CE_WARN, "!failed to create cmd taskq"); 3362 goto fail; 3363 } 3364 3365 /* 3366 * Create PRP DMA cache 3367 */ 3368 (void) snprintf(name, sizeof (name), "%s%d_prp_cache", 3369 ddi_driver_name(dip), ddi_get_instance(dip)); 3370 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t), 3371 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor, 3372 NULL, (void *)nvme, NULL, 0); 3373 3374 if (nvme_init(nvme) != DDI_SUCCESS) 3375 goto fail; 3376 3377 /* 3378 * Initialize the driver with the UFM subsystem 3379 */ 3380 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops, 3381 &nvme->n_ufmh, nvme) != 0) { 3382 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem"); 3383 goto fail; 3384 } 3385 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL); 3386 ddi_ufm_update(nvme->n_ufmh); 3387 nvme->n_progress |= NVME_UFM_INIT; 3388 3389 /* 3390 * Attach the blkdev driver for each namespace. 3391 */ 3392 for (i = 0; i != nvme->n_namespace_count; i++) { 3393 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name, 3394 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1), 3395 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 3396 dev_err(dip, CE_WARN, 3397 "!failed to create minor node for namespace %d", i); 3398 goto fail; 3399 } 3400 3401 if (nvme->n_ns[i].ns_ignore) 3402 continue; 3403 3404 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i], 3405 &nvme_bd_ops, &nvme->n_prp_dma_attr, KM_SLEEP); 3406 3407 if (nvme->n_ns[i].ns_bd_hdl == NULL) { 3408 dev_err(dip, CE_WARN, 3409 "!failed to get blkdev handle for namespace %d", i); 3410 goto fail; 3411 } 3412 3413 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl) 3414 != DDI_SUCCESS) { 3415 dev_err(dip, CE_WARN, 3416 "!failed to attach blkdev handle for namespace %d", 3417 i); 3418 goto fail; 3419 } 3420 } 3421 3422 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 3423 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0) 3424 != DDI_SUCCESS) { 3425 dev_err(dip, CE_WARN, "nvme_attach: " 3426 "cannot create devctl minor node"); 3427 goto fail; 3428 } 3429 3430 return (DDI_SUCCESS); 3431 3432 fail: 3433 /* attach successful anyway so that FMA can retire the device */ 3434 if (nvme->n_dead) 3435 return (DDI_SUCCESS); 3436 3437 (void) nvme_detach(dip, DDI_DETACH); 3438 3439 return (DDI_FAILURE); 3440 } 3441 3442 static int 3443 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 3444 { 3445 int instance, i; 3446 nvme_t *nvme; 3447 3448 if (cmd != DDI_DETACH) 3449 return (DDI_FAILURE); 3450 3451 instance = ddi_get_instance(dip); 3452 3453 nvme = ddi_get_soft_state(nvme_state, instance); 3454 3455 if (nvme == NULL) 3456 return (DDI_FAILURE); 3457 3458 ddi_remove_minor_node(dip, "devctl"); 3459 mutex_destroy(&nvme->n_minor.nm_mutex); 3460 3461 if (nvme->n_ns) { 3462 for (i = 0; i != nvme->n_namespace_count; i++) { 3463 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name); 3464 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex); 3465 3466 if (nvme->n_ns[i].ns_bd_hdl) { 3467 (void) bd_detach_handle( 3468 nvme->n_ns[i].ns_bd_hdl); 3469 bd_free_handle(nvme->n_ns[i].ns_bd_hdl); 3470 } 3471 3472 if (nvme->n_ns[i].ns_idns) 3473 kmem_free(nvme->n_ns[i].ns_idns, 3474 sizeof (nvme_identify_nsid_t)); 3475 if (nvme->n_ns[i].ns_devid) 3476 strfree(nvme->n_ns[i].ns_devid); 3477 } 3478 3479 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) * 3480 nvme->n_namespace_count); 3481 } 3482 if (nvme->n_progress & NVME_UFM_INIT) { 3483 ddi_ufm_fini(nvme->n_ufmh); 3484 mutex_destroy(&nvme->n_fwslot_mutex); 3485 } 3486 3487 if (nvme->n_progress & NVME_INTERRUPTS) 3488 nvme_release_interrupts(nvme); 3489 3490 if (nvme->n_cmd_taskq) 3491 ddi_taskq_wait(nvme->n_cmd_taskq); 3492 3493 if (nvme->n_ioq_count > 0) { 3494 for (i = 1; i != nvme->n_ioq_count + 1; i++) { 3495 if (nvme->n_ioq[i] != NULL) { 3496 /* TODO: send destroy queue commands */ 3497 nvme_free_qpair(nvme->n_ioq[i]); 3498 } 3499 } 3500 3501 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) * 3502 (nvme->n_ioq_count + 1)); 3503 } 3504 3505 if (nvme->n_prp_cache != NULL) { 3506 kmem_cache_destroy(nvme->n_prp_cache); 3507 } 3508 3509 if (nvme->n_progress & NVME_REGS_MAPPED) { 3510 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE); 3511 (void) nvme_reset(nvme, B_FALSE); 3512 } 3513 3514 if (nvme->n_cmd_taskq) 3515 ddi_taskq_destroy(nvme->n_cmd_taskq); 3516 3517 if (nvme->n_progress & NVME_CTRL_LIMITS) 3518 sema_destroy(&nvme->n_abort_sema); 3519 3520 if (nvme->n_progress & NVME_ADMIN_QUEUE) 3521 nvme_free_qpair(nvme->n_adminq); 3522 3523 if (nvme->n_cq_count > 0) { 3524 nvme_destroy_cq_array(nvme, 0); 3525 nvme->n_cq = NULL; 3526 nvme->n_cq_count = 0; 3527 } 3528 3529 if (nvme->n_idctl) 3530 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE); 3531 3532 if (nvme->n_progress & NVME_REGS_MAPPED) 3533 ddi_regs_map_free(&nvme->n_regh); 3534 3535 if (nvme->n_progress & NVME_FMA_INIT) { 3536 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3537 ddi_fm_handler_unregister(nvme->n_dip); 3538 3539 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) || 3540 DDI_FM_ERRCB_CAP(nvme->n_fm_cap)) 3541 pci_ereport_teardown(nvme->n_dip); 3542 3543 ddi_fm_fini(nvme->n_dip); 3544 } 3545 3546 if (nvme->n_vendor != NULL) 3547 strfree(nvme->n_vendor); 3548 3549 if (nvme->n_product != NULL) 3550 strfree(nvme->n_product); 3551 3552 ddi_soft_state_free(nvme_state, instance); 3553 3554 return (DDI_SUCCESS); 3555 } 3556 3557 static int 3558 nvme_quiesce(dev_info_t *dip) 3559 { 3560 int instance; 3561 nvme_t *nvme; 3562 3563 instance = ddi_get_instance(dip); 3564 3565 nvme = ddi_get_soft_state(nvme_state, instance); 3566 3567 if (nvme == NULL) 3568 return (DDI_FAILURE); 3569 3570 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE); 3571 3572 (void) nvme_reset(nvme, B_TRUE); 3573 3574 return (DDI_FAILURE); 3575 } 3576 3577 static int 3578 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer) 3579 { 3580 nvme_t *nvme = cmd->nc_nvme; 3581 int nprp_page, nprp; 3582 uint64_t *prp; 3583 3584 if (xfer->x_ndmac == 0) 3585 return (DDI_FAILURE); 3586 3587 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress; 3588 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3589 3590 if (xfer->x_ndmac == 1) { 3591 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0; 3592 return (DDI_SUCCESS); 3593 } else if (xfer->x_ndmac == 2) { 3594 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress; 3595 return (DDI_SUCCESS); 3596 } 3597 3598 xfer->x_ndmac--; 3599 3600 nprp_page = nvme->n_pagesize / sizeof (uint64_t); 3601 ASSERT(nprp_page > 0); 3602 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page; 3603 3604 /* 3605 * We currently don't support chained PRPs and set up our DMA 3606 * attributes to reflect that. If we still get an I/O request 3607 * that needs a chained PRP something is very wrong. 3608 */ 3609 VERIFY(nprp == 1); 3610 3611 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP); 3612 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len); 3613 3614 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress; 3615 3616 /*LINTED: E_PTR_BAD_CAST_ALIGN*/ 3617 for (prp = (uint64_t *)cmd->nc_dma->nd_memp; 3618 xfer->x_ndmac > 0; 3619 prp++, xfer->x_ndmac--) { 3620 *prp = xfer->x_dmac.dmac_laddress; 3621 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac); 3622 } 3623 3624 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len, 3625 DDI_DMA_SYNC_FORDEV); 3626 return (DDI_SUCCESS); 3627 } 3628 3629 static nvme_cmd_t * 3630 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer) 3631 { 3632 nvme_t *nvme = ns->ns_nvme; 3633 nvme_cmd_t *cmd; 3634 3635 /* 3636 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep. 3637 */ 3638 cmd = nvme_alloc_cmd(nvme, (xfer->x_flags & BD_XFER_POLL) ? 3639 KM_NOSLEEP : KM_SLEEP); 3640 3641 if (cmd == NULL) 3642 return (NULL); 3643 3644 cmd->nc_sqe.sqe_opc = opc; 3645 cmd->nc_callback = nvme_bd_xfer_done; 3646 cmd->nc_xfer = xfer; 3647 3648 switch (opc) { 3649 case NVME_OPC_NVM_WRITE: 3650 case NVME_OPC_NVM_READ: 3651 VERIFY(xfer->x_nblks <= 0x10000); 3652 3653 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3654 3655 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu; 3656 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32); 3657 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1); 3658 3659 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS) 3660 goto fail; 3661 break; 3662 3663 case NVME_OPC_NVM_FLUSH: 3664 cmd->nc_sqe.sqe_nsid = ns->ns_id; 3665 break; 3666 3667 default: 3668 goto fail; 3669 } 3670 3671 return (cmd); 3672 3673 fail: 3674 nvme_free_cmd(cmd); 3675 return (NULL); 3676 } 3677 3678 static void 3679 nvme_bd_xfer_done(void *arg) 3680 { 3681 nvme_cmd_t *cmd = arg; 3682 bd_xfer_t *xfer = cmd->nc_xfer; 3683 int error = 0; 3684 3685 error = nvme_check_cmd_status(cmd); 3686 nvme_free_cmd(cmd); 3687 3688 bd_xfer_done(xfer, error); 3689 } 3690 3691 static void 3692 nvme_bd_driveinfo(void *arg, bd_drive_t *drive) 3693 { 3694 nvme_namespace_t *ns = arg; 3695 nvme_t *nvme = ns->ns_nvme; 3696 3697 /* 3698 * blkdev maintains one queue size per instance (namespace), 3699 * but all namespace share the I/O queues. 3700 * TODO: need to figure out a sane default, or use per-NS I/O queues, 3701 * or change blkdev to handle EAGAIN 3702 */ 3703 drive->d_qsize = nvme->n_ioq_count * nvme->n_io_squeue_len 3704 / nvme->n_namespace_count; 3705 3706 /* 3707 * d_maxxfer is not set, which means the value is taken from the DMA 3708 * attributes specified to bd_alloc_handle. 3709 */ 3710 3711 drive->d_removable = B_FALSE; 3712 drive->d_hotpluggable = B_FALSE; 3713 3714 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64)); 3715 drive->d_target = ns->ns_id; 3716 drive->d_lun = 0; 3717 3718 drive->d_model = nvme->n_idctl->id_model; 3719 drive->d_model_len = sizeof (nvme->n_idctl->id_model); 3720 drive->d_vendor = nvme->n_vendor; 3721 drive->d_vendor_len = strlen(nvme->n_vendor); 3722 drive->d_product = nvme->n_product; 3723 drive->d_product_len = strlen(nvme->n_product); 3724 drive->d_serial = nvme->n_idctl->id_serial; 3725 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial); 3726 drive->d_revision = nvme->n_idctl->id_fwrev; 3727 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev); 3728 } 3729 3730 static int 3731 nvme_bd_mediainfo(void *arg, bd_media_t *media) 3732 { 3733 nvme_namespace_t *ns = arg; 3734 3735 media->m_nblks = ns->ns_block_count; 3736 media->m_blksize = ns->ns_block_size; 3737 media->m_readonly = B_FALSE; 3738 media->m_solidstate = B_TRUE; 3739 3740 media->m_pblksize = ns->ns_best_block_size; 3741 3742 return (0); 3743 } 3744 3745 static int 3746 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc) 3747 { 3748 nvme_t *nvme = ns->ns_nvme; 3749 nvme_cmd_t *cmd; 3750 nvme_qpair_t *ioq; 3751 boolean_t poll; 3752 int ret; 3753 3754 if (nvme->n_dead) 3755 return (EIO); 3756 3757 cmd = nvme_create_nvm_cmd(ns, opc, xfer); 3758 if (cmd == NULL) 3759 return (ENOMEM); 3760 3761 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 3762 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 3763 ioq = nvme->n_ioq[cmd->nc_sqid]; 3764 3765 /* 3766 * Get the polling flag before submitting the command. The command may 3767 * complete immediately after it was submitted, which means we must 3768 * treat both cmd and xfer as if they have been freed already. 3769 */ 3770 poll = (xfer->x_flags & BD_XFER_POLL) != 0; 3771 3772 ret = nvme_submit_io_cmd(ioq, cmd); 3773 3774 if (ret != 0) 3775 return (ret); 3776 3777 if (!poll) 3778 return (0); 3779 3780 do { 3781 cmd = nvme_retrieve_cmd(nvme, ioq); 3782 if (cmd != NULL) 3783 cmd->nc_callback(cmd); 3784 else 3785 drv_usecwait(10); 3786 } while (ioq->nq_active_cmds != 0); 3787 3788 return (0); 3789 } 3790 3791 static int 3792 nvme_bd_read(void *arg, bd_xfer_t *xfer) 3793 { 3794 nvme_namespace_t *ns = arg; 3795 3796 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ)); 3797 } 3798 3799 static int 3800 nvme_bd_write(void *arg, bd_xfer_t *xfer) 3801 { 3802 nvme_namespace_t *ns = arg; 3803 3804 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE)); 3805 } 3806 3807 static int 3808 nvme_bd_sync(void *arg, bd_xfer_t *xfer) 3809 { 3810 nvme_namespace_t *ns = arg; 3811 3812 if (ns->ns_nvme->n_dead) 3813 return (EIO); 3814 3815 /* 3816 * If the volatile write cache is not present or not enabled the FLUSH 3817 * command is a no-op, so we can take a shortcut here. 3818 */ 3819 if (!ns->ns_nvme->n_write_cache_present) { 3820 bd_xfer_done(xfer, ENOTSUP); 3821 return (0); 3822 } 3823 3824 if (!ns->ns_nvme->n_write_cache_enabled) { 3825 bd_xfer_done(xfer, 0); 3826 return (0); 3827 } 3828 3829 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH)); 3830 } 3831 3832 static int 3833 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid) 3834 { 3835 nvme_namespace_t *ns = arg; 3836 3837 /*LINTED: E_BAD_PTR_CAST_ALIGN*/ 3838 if (*(uint64_t *)ns->ns_eui64 != 0) { 3839 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN, 3840 sizeof (ns->ns_eui64), ns->ns_eui64, devid)); 3841 } else { 3842 return (ddi_devid_init(devinfo, DEVID_ENCAP, 3843 strlen(ns->ns_devid), ns->ns_devid, devid)); 3844 } 3845 } 3846 3847 static int 3848 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 3849 { 3850 #ifndef __lock_lint 3851 _NOTE(ARGUNUSED(cred_p)); 3852 #endif 3853 minor_t minor = getminor(*devp); 3854 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3855 int nsid = NVME_MINOR_NSID(minor); 3856 nvme_minor_state_t *nm; 3857 int rv = 0; 3858 3859 if (otyp != OTYP_CHR) 3860 return (EINVAL); 3861 3862 if (nvme == NULL) 3863 return (ENXIO); 3864 3865 if (nsid > nvme->n_namespace_count) 3866 return (ENXIO); 3867 3868 if (nvme->n_dead) 3869 return (EIO); 3870 3871 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3872 3873 mutex_enter(&nm->nm_mutex); 3874 if (nm->nm_oexcl) { 3875 rv = EBUSY; 3876 goto out; 3877 } 3878 3879 if (flag & FEXCL) { 3880 if (nm->nm_ocnt != 0) { 3881 rv = EBUSY; 3882 goto out; 3883 } 3884 nm->nm_oexcl = B_TRUE; 3885 } 3886 3887 nm->nm_ocnt++; 3888 3889 out: 3890 mutex_exit(&nm->nm_mutex); 3891 return (rv); 3892 3893 } 3894 3895 static int 3896 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 3897 { 3898 #ifndef __lock_lint 3899 _NOTE(ARGUNUSED(cred_p)); 3900 _NOTE(ARGUNUSED(flag)); 3901 #endif 3902 minor_t minor = getminor(dev); 3903 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 3904 int nsid = NVME_MINOR_NSID(minor); 3905 nvme_minor_state_t *nm; 3906 3907 if (otyp != OTYP_CHR) 3908 return (ENXIO); 3909 3910 if (nvme == NULL) 3911 return (ENXIO); 3912 3913 if (nsid > nvme->n_namespace_count) 3914 return (ENXIO); 3915 3916 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor; 3917 3918 mutex_enter(&nm->nm_mutex); 3919 if (nm->nm_oexcl) 3920 nm->nm_oexcl = B_FALSE; 3921 3922 ASSERT(nm->nm_ocnt > 0); 3923 nm->nm_ocnt--; 3924 mutex_exit(&nm->nm_mutex); 3925 3926 return (0); 3927 } 3928 3929 static int 3930 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 3931 cred_t *cred_p) 3932 { 3933 _NOTE(ARGUNUSED(cred_p)); 3934 int rv = 0; 3935 void *idctl; 3936 3937 if ((mode & FREAD) == 0) 3938 return (EPERM); 3939 3940 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE) 3941 return (EINVAL); 3942 3943 if ((rv = nvme_identify(nvme, B_TRUE, nsid, (void **)&idctl)) != 0) 3944 return (rv); 3945 3946 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode) 3947 != 0) 3948 rv = EFAULT; 3949 3950 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE); 3951 3952 return (rv); 3953 } 3954 3955 /* 3956 * Execute commands on behalf of the various ioctls. 3957 */ 3958 static int 3959 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr, 3960 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout) 3961 { 3962 nvme_cmd_t *cmd; 3963 nvme_qpair_t *ioq; 3964 int rv = 0; 3965 3966 cmd = nvme_alloc_cmd(nvme, KM_SLEEP); 3967 if (is_admin) { 3968 cmd->nc_sqid = 0; 3969 ioq = nvme->n_adminq; 3970 } else { 3971 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1; 3972 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count); 3973 ioq = nvme->n_ioq[cmd->nc_sqid]; 3974 } 3975 3976 cmd->nc_callback = nvme_wakeup_cmd; 3977 cmd->nc_sqe = *sqe; 3978 3979 if ((rwk & (FREAD | FWRITE)) != 0) { 3980 if (data_addr == NULL) { 3981 rv = EINVAL; 3982 goto free_cmd; 3983 } 3984 3985 /* 3986 * Because we use PRPs and haven't implemented PRP 3987 * lists here, the maximum data size is restricted to 3988 * 2 pages. 3989 */ 3990 if (data_len > 2 * nvme->n_pagesize) { 3991 dev_err(nvme->n_dip, CE_WARN, "!Data size %u is too " 3992 "large for nvme_ioc_cmd(). Limit is 2 pages " 3993 "(%u bytes)", data_len, 2 * nvme->n_pagesize); 3994 3995 rv = EINVAL; 3996 goto free_cmd; 3997 } 3998 3999 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ, 4000 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) { 4001 dev_err(nvme->n_dip, CE_WARN, 4002 "!nvme_zalloc_dma failed for nvme_ioc_cmd()"); 4003 4004 rv = ENOMEM; 4005 goto free_cmd; 4006 } 4007 4008 if (cmd->nc_dma->nd_ncookie > 2) { 4009 dev_err(nvme->n_dip, CE_WARN, 4010 "!too many DMA cookies for nvme_ioc_cmd()"); 4011 atomic_inc_32(&nvme->n_too_many_cookies); 4012 4013 rv = E2BIG; 4014 goto free_cmd; 4015 } 4016 4017 cmd->nc_sqe.sqe_dptr.d_prp[0] = 4018 cmd->nc_dma->nd_cookie.dmac_laddress; 4019 4020 if (cmd->nc_dma->nd_ncookie > 1) { 4021 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah, 4022 &cmd->nc_dma->nd_cookie); 4023 cmd->nc_sqe.sqe_dptr.d_prp[1] = 4024 cmd->nc_dma->nd_cookie.dmac_laddress; 4025 } 4026 4027 if ((rwk & FWRITE) != 0) { 4028 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp, 4029 data_len, rwk & FKIOCTL) != 0) { 4030 rv = EFAULT; 4031 goto free_cmd; 4032 } 4033 } 4034 } 4035 4036 if (is_admin) { 4037 nvme_admin_cmd(cmd, timeout); 4038 } else { 4039 mutex_enter(&cmd->nc_mutex); 4040 4041 rv = nvme_submit_io_cmd(ioq, cmd); 4042 4043 if (rv == EAGAIN) { 4044 mutex_exit(&cmd->nc_mutex); 4045 dev_err(cmd->nc_nvme->n_dip, CE_WARN, 4046 "!nvme_ioc_cmd() failed, I/O Q full"); 4047 goto free_cmd; 4048 } 4049 4050 nvme_wait_cmd(cmd, timeout); 4051 4052 mutex_exit(&cmd->nc_mutex); 4053 } 4054 4055 if (cqe != NULL) 4056 *cqe = cmd->nc_cqe; 4057 4058 if ((rv = nvme_check_cmd_status(cmd)) != 0) { 4059 dev_err(nvme->n_dip, CE_WARN, 4060 "!nvme_ioc_cmd() failed with sct = %x, sc = %x", 4061 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc); 4062 4063 goto free_cmd; 4064 } 4065 4066 if ((rwk & FREAD) != 0) { 4067 if (ddi_copyout(cmd->nc_dma->nd_memp, 4068 data_addr, data_len, rwk & FKIOCTL) != 0) 4069 rv = EFAULT; 4070 } 4071 4072 free_cmd: 4073 nvme_free_cmd(cmd); 4074 4075 return (rv); 4076 } 4077 4078 static int 4079 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4080 int mode, cred_t *cred_p) 4081 { 4082 _NOTE(ARGUNUSED(nsid, cred_p)); 4083 int rv = 0; 4084 nvme_reg_cap_t cap = { 0 }; 4085 nvme_capabilities_t nc; 4086 4087 if ((mode & FREAD) == 0) 4088 return (EPERM); 4089 4090 if (nioc->n_len < sizeof (nc)) 4091 return (EINVAL); 4092 4093 cap.r = nvme_get64(nvme, NVME_REG_CAP); 4094 4095 /* 4096 * The MPSMIN and MPSMAX fields in the CAP register use 0 to 4097 * specify the base page size of 4k (1<<12), so add 12 here to 4098 * get the real page size value. 4099 */ 4100 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax); 4101 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin); 4102 4103 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0) 4104 rv = EFAULT; 4105 4106 return (rv); 4107 } 4108 4109 static int 4110 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4111 int mode, cred_t *cred_p) 4112 { 4113 _NOTE(ARGUNUSED(cred_p)); 4114 void *log = NULL; 4115 size_t bufsize = 0; 4116 int rv = 0; 4117 4118 if ((mode & FREAD) == 0) 4119 return (EPERM); 4120 4121 switch (nioc->n_arg) { 4122 case NVME_LOGPAGE_ERROR: 4123 if (nsid != 0) 4124 return (EINVAL); 4125 break; 4126 case NVME_LOGPAGE_HEALTH: 4127 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0) 4128 return (EINVAL); 4129 4130 if (nsid == 0) 4131 nsid = (uint32_t)-1; 4132 4133 break; 4134 case NVME_LOGPAGE_FWSLOT: 4135 if (nsid != 0) 4136 return (EINVAL); 4137 break; 4138 default: 4139 return (EINVAL); 4140 } 4141 4142 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid) 4143 != DDI_SUCCESS) 4144 return (EIO); 4145 4146 if (nioc->n_len < bufsize) { 4147 kmem_free(log, bufsize); 4148 return (EINVAL); 4149 } 4150 4151 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0) 4152 rv = EFAULT; 4153 4154 nioc->n_len = bufsize; 4155 kmem_free(log, bufsize); 4156 4157 return (rv); 4158 } 4159 4160 static int 4161 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4162 int mode, cred_t *cred_p) 4163 { 4164 _NOTE(ARGUNUSED(cred_p)); 4165 void *buf = NULL; 4166 size_t bufsize = 0; 4167 uint32_t res = 0; 4168 uint8_t feature; 4169 int rv = 0; 4170 4171 if ((mode & FREAD) == 0) 4172 return (EPERM); 4173 4174 if ((nioc->n_arg >> 32) > 0xff) 4175 return (EINVAL); 4176 4177 feature = (uint8_t)(nioc->n_arg >> 32); 4178 4179 switch (feature) { 4180 case NVME_FEAT_ARBITRATION: 4181 case NVME_FEAT_POWER_MGMT: 4182 case NVME_FEAT_TEMPERATURE: 4183 case NVME_FEAT_ERROR: 4184 case NVME_FEAT_NQUEUES: 4185 case NVME_FEAT_INTR_COAL: 4186 case NVME_FEAT_WRITE_ATOM: 4187 case NVME_FEAT_ASYNC_EVENT: 4188 case NVME_FEAT_PROGRESS: 4189 if (nsid != 0) 4190 return (EINVAL); 4191 break; 4192 4193 case NVME_FEAT_INTR_VECT: 4194 if (nsid != 0) 4195 return (EINVAL); 4196 4197 res = nioc->n_arg & 0xffffffffUL; 4198 if (res >= nvme->n_intr_cnt) 4199 return (EINVAL); 4200 break; 4201 4202 case NVME_FEAT_LBA_RANGE: 4203 if (nvme->n_lba_range_supported == B_FALSE) 4204 return (EINVAL); 4205 4206 if (nsid == 0 || 4207 nsid > nvme->n_namespace_count) 4208 return (EINVAL); 4209 4210 break; 4211 4212 case NVME_FEAT_WRITE_CACHE: 4213 if (nsid != 0) 4214 return (EINVAL); 4215 4216 if (!nvme->n_write_cache_present) 4217 return (EINVAL); 4218 4219 break; 4220 4221 case NVME_FEAT_AUTO_PST: 4222 if (nsid != 0) 4223 return (EINVAL); 4224 4225 if (!nvme->n_auto_pst_supported) 4226 return (EINVAL); 4227 4228 break; 4229 4230 default: 4231 return (EINVAL); 4232 } 4233 4234 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf, 4235 &bufsize); 4236 if (rv != 0) 4237 return (rv); 4238 4239 if (nioc->n_len < bufsize) { 4240 kmem_free(buf, bufsize); 4241 return (EINVAL); 4242 } 4243 4244 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0) 4245 rv = EFAULT; 4246 4247 kmem_free(buf, bufsize); 4248 nioc->n_arg = res; 4249 nioc->n_len = bufsize; 4250 4251 return (rv); 4252 } 4253 4254 static int 4255 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4256 cred_t *cred_p) 4257 { 4258 _NOTE(ARGUNUSED(nsid, mode, cred_p)); 4259 4260 if ((mode & FREAD) == 0) 4261 return (EPERM); 4262 4263 nioc->n_arg = nvme->n_intr_cnt; 4264 return (0); 4265 } 4266 4267 static int 4268 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4269 cred_t *cred_p) 4270 { 4271 _NOTE(ARGUNUSED(nsid, cred_p)); 4272 int rv = 0; 4273 4274 if ((mode & FREAD) == 0) 4275 return (EPERM); 4276 4277 if (nioc->n_len < sizeof (nvme->n_version)) 4278 return (ENOMEM); 4279 4280 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf, 4281 sizeof (nvme->n_version), mode) != 0) 4282 rv = EFAULT; 4283 4284 return (rv); 4285 } 4286 4287 static int 4288 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4289 cred_t *cred_p) 4290 { 4291 _NOTE(ARGUNUSED(mode)); 4292 nvme_format_nvm_t frmt = { 0 }; 4293 int c_nsid = nsid != 0 ? nsid - 1 : 0; 4294 4295 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4296 return (EPERM); 4297 4298 frmt.r = nioc->n_arg & 0xffffffff; 4299 4300 /* 4301 * Check whether the FORMAT NVM command is supported. 4302 */ 4303 if (nvme->n_idctl->id_oacs.oa_format == 0) 4304 return (EINVAL); 4305 4306 /* 4307 * Don't allow format or secure erase of individual namespace if that 4308 * would cause a format or secure erase of all namespaces. 4309 */ 4310 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0) 4311 return (EINVAL); 4312 4313 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE && 4314 nvme->n_idctl->id_fna.fn_sec_erase != 0) 4315 return (EINVAL); 4316 4317 /* 4318 * Don't allow formatting with Protection Information. 4319 */ 4320 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0) 4321 return (EINVAL); 4322 4323 /* 4324 * Don't allow formatting using an illegal LBA format, or any LBA format 4325 * that uses metadata. 4326 */ 4327 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf || 4328 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0) 4329 return (EINVAL); 4330 4331 /* 4332 * Don't allow formatting using an illegal Secure Erase setting. 4333 */ 4334 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES || 4335 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO && 4336 nvme->n_idctl->id_fna.fn_crypt_erase == 0)) 4337 return (EINVAL); 4338 4339 if (nsid == 0) 4340 nsid = (uint32_t)-1; 4341 4342 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0, 4343 B_FALSE, frmt.b.fm_ses)); 4344 } 4345 4346 static int 4347 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4348 cred_t *cred_p) 4349 { 4350 _NOTE(ARGUNUSED(nioc, mode)); 4351 int rv = 0; 4352 4353 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4354 return (EPERM); 4355 4356 if (nsid == 0) 4357 return (EINVAL); 4358 4359 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl); 4360 if (rv != DDI_SUCCESS) 4361 rv = EBUSY; 4362 4363 return (rv); 4364 } 4365 4366 static int 4367 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode, 4368 cred_t *cred_p) 4369 { 4370 _NOTE(ARGUNUSED(nioc, mode)); 4371 nvme_identify_nsid_t *idns; 4372 int rv = 0; 4373 4374 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4375 return (EPERM); 4376 4377 if (nsid == 0) 4378 return (EINVAL); 4379 4380 /* 4381 * Identify namespace again, free old identify data. 4382 */ 4383 idns = nvme->n_ns[nsid - 1].ns_idns; 4384 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS) 4385 return (EIO); 4386 4387 kmem_free(idns, sizeof (nvme_identify_nsid_t)); 4388 4389 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl); 4390 if (rv != DDI_SUCCESS) 4391 rv = EBUSY; 4392 4393 return (rv); 4394 } 4395 4396 static void 4397 nvme_ufm_update(nvme_t *nvme) 4398 { 4399 mutex_enter(&nvme->n_fwslot_mutex); 4400 ddi_ufm_update(nvme->n_ufmh); 4401 if (nvme->n_fwslot != NULL) { 4402 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t)); 4403 nvme->n_fwslot = NULL; 4404 } 4405 mutex_exit(&nvme->n_fwslot_mutex); 4406 } 4407 4408 static int 4409 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4410 int mode, cred_t *cred_p) 4411 { 4412 int rv = 0; 4413 size_t len, copylen; 4414 offset_t offset; 4415 uintptr_t buf; 4416 nvme_sqe_t sqe = { 4417 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD 4418 }; 4419 4420 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4421 return (EPERM); 4422 4423 if (nsid != 0) 4424 return (EINVAL); 4425 4426 /* 4427 * The offset (in n_len) is restricted to the number of DWORDs in 4428 * 32 bits. 4429 */ 4430 if (nioc->n_len > NVME_FW_OFFSETB_MAX) 4431 return (EINVAL); 4432 4433 /* Confirm that both offset and length are a multiple of DWORD bytes */ 4434 if ((nioc->n_len & NVME_DWORD_MASK) != 0 || 4435 (nioc->n_arg & NVME_DWORD_MASK) != 0) 4436 return (EINVAL); 4437 4438 len = nioc->n_len; 4439 offset = nioc->n_arg; 4440 buf = (uintptr_t)nioc->n_buf; 4441 while (len > 0 && rv == 0) { 4442 /* 4443 * nvme_ioc_cmd() does not use SGLs or PRP lists. 4444 * It is limited to 2 PRPs per NVM command, so limit 4445 * the size of the data to 2 pages. 4446 */ 4447 copylen = MIN(2 * nvme->n_pagesize, len); 4448 4449 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1; 4450 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT); 4451 4452 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen, 4453 FWRITE, NULL, nvme_admin_cmd_timeout); 4454 4455 buf += copylen; 4456 offset += copylen; 4457 len -= copylen; 4458 } 4459 4460 /* 4461 * Let the DDI UFM subsystem know that the firmware information for 4462 * this device has changed. 4463 */ 4464 nvme_ufm_update(nvme); 4465 4466 return (rv); 4467 } 4468 4469 static int 4470 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, 4471 int mode, cred_t *cred_p) 4472 { 4473 nvme_firmware_commit_dw10_t fc_dw10 = { 0 }; 4474 uint32_t slot = nioc->n_arg & 0xffffffff; 4475 uint32_t action = nioc->n_arg >> 32; 4476 nvme_cqe_t cqe = { 0 }; 4477 nvme_sqe_t sqe = { 4478 .sqe_opc = NVME_OPC_FW_ACTIVATE 4479 }; 4480 int timeout; 4481 int rv; 4482 4483 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0) 4484 return (EPERM); 4485 4486 if (nsid != 0) 4487 return (EINVAL); 4488 4489 /* Validate slot is in range. */ 4490 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX) 4491 return (EINVAL); 4492 4493 switch (action) { 4494 case NVME_FWC_SAVE: 4495 case NVME_FWC_SAVE_ACTIVATE: 4496 timeout = nvme_commit_save_cmd_timeout; 4497 break; 4498 case NVME_FWC_ACTIVATE: 4499 case NVME_FWC_ACTIVATE_IMMED: 4500 timeout = nvme_admin_cmd_timeout; 4501 break; 4502 default: 4503 return (EINVAL); 4504 } 4505 4506 fc_dw10.b.fc_slot = slot; 4507 fc_dw10.b.fc_action = action; 4508 sqe.sqe_cdw10 = fc_dw10.r; 4509 4510 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout); 4511 4512 nioc->n_arg = ((uint64_t)cqe.cqe_sf.sf_sct << 16) | cqe.cqe_sf.sf_sc; 4513 4514 /* 4515 * Let the DDI UFM subsystem know that the firmware information for 4516 * this device has changed. 4517 */ 4518 nvme_ufm_update(nvme); 4519 4520 return (rv); 4521 } 4522 4523 static int 4524 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p, 4525 int *rval_p) 4526 { 4527 #ifndef __lock_lint 4528 _NOTE(ARGUNUSED(rval_p)); 4529 #endif 4530 minor_t minor = getminor(dev); 4531 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor)); 4532 int nsid = NVME_MINOR_NSID(minor); 4533 int rv = 0; 4534 nvme_ioctl_t nioc; 4535 4536 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = { 4537 NULL, 4538 nvme_ioctl_identify, 4539 nvme_ioctl_identify, 4540 nvme_ioctl_capabilities, 4541 nvme_ioctl_get_logpage, 4542 nvme_ioctl_get_features, 4543 nvme_ioctl_intr_cnt, 4544 nvme_ioctl_version, 4545 nvme_ioctl_format, 4546 nvme_ioctl_detach, 4547 nvme_ioctl_attach, 4548 nvme_ioctl_firmware_download, 4549 nvme_ioctl_firmware_commit 4550 }; 4551 4552 if (nvme == NULL) 4553 return (ENXIO); 4554 4555 if (nsid > nvme->n_namespace_count) 4556 return (ENXIO); 4557 4558 if (IS_DEVCTL(cmd)) 4559 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0)); 4560 4561 #ifdef _MULTI_DATAMODEL 4562 switch (ddi_model_convert_from(mode & FMODELS)) { 4563 case DDI_MODEL_ILP32: { 4564 nvme_ioctl32_t nioc32; 4565 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t), 4566 mode) != 0) 4567 return (EFAULT); 4568 nioc.n_len = nioc32.n_len; 4569 nioc.n_buf = nioc32.n_buf; 4570 nioc.n_arg = nioc32.n_arg; 4571 break; 4572 } 4573 case DDI_MODEL_NONE: 4574 #endif 4575 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode) 4576 != 0) 4577 return (EFAULT); 4578 #ifdef _MULTI_DATAMODEL 4579 break; 4580 } 4581 #endif 4582 4583 if (nvme->n_dead && cmd != NVME_IOC_DETACH) 4584 return (EIO); 4585 4586 4587 if (cmd == NVME_IOC_IDENTIFY_CTRL) { 4588 /* 4589 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and 4590 * attachment point nodes. 4591 */ 4592 nsid = 0; 4593 } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) { 4594 /* 4595 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it 4596 * will always return identify data for namespace 1. 4597 */ 4598 nsid = 1; 4599 } 4600 4601 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL) 4602 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode, 4603 cred_p); 4604 else 4605 rv = EINVAL; 4606 4607 #ifdef _MULTI_DATAMODEL 4608 switch (ddi_model_convert_from(mode & FMODELS)) { 4609 case DDI_MODEL_ILP32: { 4610 nvme_ioctl32_t nioc32; 4611 4612 nioc32.n_len = (size32_t)nioc.n_len; 4613 nioc32.n_buf = (uintptr32_t)nioc.n_buf; 4614 nioc32.n_arg = nioc.n_arg; 4615 4616 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t), 4617 mode) != 0) 4618 return (EFAULT); 4619 break; 4620 } 4621 case DDI_MODEL_NONE: 4622 #endif 4623 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode) 4624 != 0) 4625 return (EFAULT); 4626 #ifdef _MULTI_DATAMODEL 4627 break; 4628 } 4629 #endif 4630 4631 return (rv); 4632 } 4633 4634 /* 4635 * DDI UFM Callbacks 4636 */ 4637 static int 4638 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 4639 ddi_ufm_image_t *img) 4640 { 4641 nvme_t *nvme = arg; 4642 4643 if (imgno != 0) 4644 return (EINVAL); 4645 4646 ddi_ufm_image_set_desc(img, "Firmware"); 4647 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot); 4648 4649 return (0); 4650 } 4651 4652 /* 4653 * Fill out firmware slot information for the requested slot. The firmware 4654 * slot information is gathered by requesting the Firmware Slot Information log 4655 * page. The format of the page is described in section 5.10.1.3. 4656 * 4657 * We lazily cache the log page on the first call and then invalidate the cache 4658 * data after a successful firmware download or firmware commit command. 4659 * The cached data is protected by a mutex as the state can change 4660 * asynchronous to this callback. 4661 */ 4662 static int 4663 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 4664 uint_t slotno, ddi_ufm_slot_t *slot) 4665 { 4666 nvme_t *nvme = arg; 4667 void *log = NULL; 4668 size_t bufsize; 4669 ddi_ufm_attr_t attr = 0; 4670 char fw_ver[NVME_FWVER_SZ + 1]; 4671 int ret; 4672 4673 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1)) 4674 return (EINVAL); 4675 4676 mutex_enter(&nvme->n_fwslot_mutex); 4677 if (nvme->n_fwslot == NULL) { 4678 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, 4679 NVME_LOGPAGE_FWSLOT, 0); 4680 if (ret != DDI_SUCCESS || 4681 bufsize != sizeof (nvme_fwslot_log_t)) { 4682 if (log != NULL) 4683 kmem_free(log, bufsize); 4684 mutex_exit(&nvme->n_fwslot_mutex); 4685 return (EIO); 4686 } 4687 nvme->n_fwslot = (nvme_fwslot_log_t *)log; 4688 } 4689 4690 /* 4691 * NVMe numbers firmware slots starting at 1 4692 */ 4693 if (slotno == (nvme->n_fwslot->fw_afi - 1)) 4694 attr |= DDI_UFM_ATTR_ACTIVE; 4695 4696 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0) 4697 attr |= DDI_UFM_ATTR_WRITEABLE; 4698 4699 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') { 4700 attr |= DDI_UFM_ATTR_EMPTY; 4701 } else { 4702 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno], 4703 NVME_FWVER_SZ); 4704 fw_ver[NVME_FWVER_SZ] = '\0'; 4705 ddi_ufm_slot_set_version(slot, fw_ver); 4706 } 4707 mutex_exit(&nvme->n_fwslot_mutex); 4708 4709 ddi_ufm_slot_set_attrs(slot, attr); 4710 4711 return (0); 4712 } 4713 4714 static int 4715 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 4716 { 4717 *caps = DDI_UFM_CAP_REPORT; 4718 return (0); 4719 } 4720