1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * 29 * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51) 30 * based chipsets. 31 * 32 * NCQ 33 * --- 34 * 35 * A portion of the NCQ is in place, but is incomplete. NCQ is disabled 36 * and is likely to be revisited in the future. 37 * 38 * 39 * Power Management 40 * ---------------- 41 * 42 * Normally power management would be responsible for ensuring the device 43 * is quiescent and then changing power states to the device, such as 44 * powering down parts or all of the device. mcp5x/ck804 is unique in 45 * that it is only available as part of a larger southbridge chipset, so 46 * removing power to the device isn't possible. Switches to control 47 * power management states D0/D3 in the PCI configuration space appear to 48 * be supported but changes to these states are apparently are ignored. 49 * The only further PM that the driver _could_ do is shut down the PHY, 50 * but in order to deliver the first rev of the driver sooner than later, 51 * that will be deferred until some future phase. 52 * 53 * Since the driver currently will not directly change any power state to 54 * the device, no power() entry point will be required. However, it is 55 * possible that in ACPI power state S3, aka suspend to RAM, that power 56 * can be removed to the device, and the driver cannot rely on BIOS to 57 * have reset any state. For the time being, there is no known 58 * non-default configurations that need to be programmed. This judgement 59 * is based on the port of the legacy ata driver not having any such 60 * functionality and based on conversations with the PM team. If such a 61 * restoration is later deemed necessary it can be incorporated into the 62 * DDI_RESUME processing. 63 * 64 */ 65 66 #include <sys/scsi/scsi.h> 67 #include <sys/pci.h> 68 #include <sys/byteorder.h> 69 #include <sys/sunddi.h> 70 #include <sys/sata/sata_hba.h> 71 #ifdef SGPIO_SUPPORT 72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h> 73 #include <sys/devctl.h> 74 #include <sys/sdt.h> 75 #endif 76 #include <sys/sata/adapters/nv_sata/nv_sata.h> 77 #include <sys/disp.h> 78 #include <sys/note.h> 79 #include <sys/promif.h> 80 81 82 /* 83 * Function prototypes for driver entry points 84 */ 85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 87 static int nv_quiesce(dev_info_t *dip); 88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, 89 void *arg, void **result); 90 91 /* 92 * Function prototypes for entry points from sata service module 93 * These functions are distinguished from other local functions 94 * by the prefix "nv_sata_" 95 */ 96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt); 97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int); 98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd); 99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd); 100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd); 101 102 /* 103 * Local function prototypes 104 */ 105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2); 106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2); 107 static int nv_add_legacy_intrs(nv_ctl_t *nvc); 108 #ifdef NV_MSI_SUPPORTED 109 static int nv_add_msi_intrs(nv_ctl_t *nvc); 110 #endif 111 static void nv_rem_intrs(nv_ctl_t *nvc); 112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt); 113 static int nv_start_nodata(nv_port_t *nvp, int slot); 114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt); 115 static int nv_start_pio_in(nv_port_t *nvp, int slot); 116 static int nv_start_pio_out(nv_port_t *nvp, int slot); 117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt); 118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt); 119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot); 120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp); 121 static int nv_start_dma(nv_port_t *nvp, int slot); 122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt); 123 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...); 124 static void nv_uninit_ctl(nv_ctl_t *nvc); 125 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle); 126 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle); 127 static void nv_uninit_port(nv_port_t *nvp); 128 static int nv_init_port(nv_port_t *nvp); 129 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle); 130 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp); 131 #ifdef NCQ 132 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp); 133 #endif 134 static void nv_start_dma_engine(nv_port_t *nvp, int slot); 135 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, 136 int state); 137 static boolean_t nv_check_link(uint32_t sstatus); 138 static void nv_common_reg_init(nv_ctl_t *nvc); 139 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status); 140 static void nv_reset(nv_port_t *nvp); 141 static void nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot); 142 static void nv_timeout(void *); 143 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt); 144 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...); 145 static void nv_read_signature(nv_port_t *nvp); 146 static void mcp5x_set_intr(nv_port_t *nvp, int flag); 147 static void ck804_set_intr(nv_port_t *nvp, int flag); 148 static void nv_resume(nv_port_t *nvp); 149 static void nv_suspend(nv_port_t *nvp); 150 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt); 151 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason); 152 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, 153 sata_pkt_t *spkt); 154 static void nv_report_add_remove(nv_port_t *nvp, int flags); 155 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt); 156 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1, 157 uchar_t failure_onbits2, uchar_t failure_offbits2, 158 uchar_t failure_onbits3, uchar_t failure_offbits3, 159 uint_t timeout_usec, int type_wait); 160 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, 161 uint_t timeout_usec, int type_wait); 162 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp); 163 164 #ifdef SGPIO_SUPPORT 165 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp); 166 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp); 167 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, 168 cred_t *credp, int *rvalp); 169 170 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle); 171 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp, 172 uint32_t *cbpp); 173 static int nv_sgp_init(nv_ctl_t *nvc); 174 static void nv_sgp_reset(nv_ctl_t *nvc); 175 static int nv_sgp_init_cmd(nv_ctl_t *nvc); 176 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc); 177 static int nv_sgp_csr_read(nv_ctl_t *nvc); 178 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val); 179 static int nv_sgp_write_data(nv_ctl_t *nvc); 180 static void nv_sgp_activity_led_ctl(void *arg); 181 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive); 182 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive); 183 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive); 184 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value); 185 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value); 186 static void nv_sgp_cleanup(nv_ctl_t *nvc); 187 #endif 188 189 190 /* 191 * DMA attributes for the data buffer for x86. dma_attr_burstsizes is unused. 192 * Verify if needed if ported to other ISA. 193 */ 194 static ddi_dma_attr_t buffer_dma_attr = { 195 DMA_ATTR_V0, /* dma_attr_version */ 196 0, /* dma_attr_addr_lo: lowest bus address */ 197 0xffffffffull, /* dma_attr_addr_hi: */ 198 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */ 199 4, /* dma_attr_align */ 200 1, /* dma_attr_burstsizes. */ 201 1, /* dma_attr_minxfer */ 202 0xffffffffull, /* dma_attr_max xfer including all cookies */ 203 0xffffffffull, /* dma_attr_seg */ 204 NV_DMA_NSEGS, /* dma_attr_sgllen */ 205 512, /* dma_attr_granular */ 206 0, /* dma_attr_flags */ 207 }; 208 209 210 /* 211 * DMA attributes for PRD tables 212 */ 213 ddi_dma_attr_t nv_prd_dma_attr = { 214 DMA_ATTR_V0, /* dma_attr_version */ 215 0, /* dma_attr_addr_lo */ 216 0xffffffffull, /* dma_attr_addr_hi */ 217 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max */ 218 4, /* dma_attr_align */ 219 1, /* dma_attr_burstsizes */ 220 1, /* dma_attr_minxfer */ 221 NV_BM_64K_BOUNDARY, /* dma_attr_maxxfer */ 222 NV_BM_64K_BOUNDARY - 1, /* dma_attr_seg */ 223 1, /* dma_attr_sgllen */ 224 1, /* dma_attr_granular */ 225 0 /* dma_attr_flags */ 226 }; 227 228 /* 229 * Device access attributes 230 */ 231 static ddi_device_acc_attr_t accattr = { 232 DDI_DEVICE_ATTR_V0, 233 DDI_STRUCTURE_LE_ACC, 234 DDI_STRICTORDER_ACC 235 }; 236 237 238 #ifdef SGPIO_SUPPORT 239 static struct cb_ops nv_cb_ops = { 240 nv_open, /* open */ 241 nv_close, /* close */ 242 nodev, /* strategy (block) */ 243 nodev, /* print (block) */ 244 nodev, /* dump (block) */ 245 nodev, /* read */ 246 nodev, /* write */ 247 nv_ioctl, /* ioctl */ 248 nodev, /* devmap */ 249 nodev, /* mmap */ 250 nodev, /* segmap */ 251 nochpoll, /* chpoll */ 252 ddi_prop_op, /* prop_op */ 253 NULL, /* streams */ 254 D_NEW | D_MP | 255 D_64BIT | D_HOTPLUG, /* flags */ 256 CB_REV /* rev */ 257 }; 258 #endif /* SGPIO_SUPPORT */ 259 260 261 static struct dev_ops nv_dev_ops = { 262 DEVO_REV, /* devo_rev */ 263 0, /* refcnt */ 264 nv_getinfo, /* info */ 265 nulldev, /* identify */ 266 nulldev, /* probe */ 267 nv_attach, /* attach */ 268 nv_detach, /* detach */ 269 nodev, /* no reset */ 270 #ifdef SGPIO_SUPPORT 271 &nv_cb_ops, /* driver operations */ 272 #else 273 (struct cb_ops *)0, /* driver operations */ 274 #endif 275 NULL, /* bus operations */ 276 NULL, /* power */ 277 nv_quiesce /* quiesce */ 278 }; 279 280 281 /* 282 * Request Sense CDB for ATAPI 283 */ 284 static const uint8_t nv_rqsense_cdb[16] = { 285 SCMD_REQUEST_SENSE, 286 0, 287 0, 288 0, 289 SATA_ATAPI_MIN_RQSENSE_LEN, 290 0, 291 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* pad out to max CDB length */ 292 }; 293 294 295 static sata_tran_hotplug_ops_t nv_hotplug_ops; 296 297 extern struct mod_ops mod_driverops; 298 299 static struct modldrv modldrv = { 300 &mod_driverops, /* driverops */ 301 "Nvidia ck804/mcp51/mcp55 HBA", 302 &nv_dev_ops, /* driver ops */ 303 }; 304 305 static struct modlinkage modlinkage = { 306 MODREV_1, 307 &modldrv, 308 NULL 309 }; 310 311 312 /* 313 * wait between checks of reg status 314 */ 315 int nv_usec_delay = NV_WAIT_REG_CHECK; 316 317 /* 318 * The following is needed for nv_vcmn_err() 319 */ 320 static kmutex_t nv_log_mutex; /* protects nv_log_buf */ 321 static char nv_log_buf[NV_STRING_512]; 322 int nv_debug_flags = NVDBG_ALWAYS; 323 int nv_log_to_console = B_FALSE; 324 325 int nv_log_delay = 0; 326 int nv_prom_print = B_FALSE; 327 328 /* 329 * for debugging 330 */ 331 #ifdef DEBUG 332 int ncq_commands = 0; 333 int non_ncq_commands = 0; 334 #endif 335 336 /* 337 * Opaque state pointer to be initialized by ddi_soft_state_init() 338 */ 339 static void *nv_statep = NULL; 340 341 342 static sata_tran_hotplug_ops_t nv_hotplug_ops = { 343 SATA_TRAN_HOTPLUG_OPS_REV_1, /* structure version */ 344 nv_sata_activate, /* activate port. cfgadm -c connect */ 345 nv_sata_deactivate /* deactivate port. cfgadm -c disconnect */ 346 }; 347 348 349 /* 350 * nv module initialization 351 */ 352 int 353 _init(void) 354 { 355 int error; 356 357 error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0); 358 359 if (error != 0) { 360 361 return (error); 362 } 363 364 mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL); 365 366 if ((error = sata_hba_init(&modlinkage)) != 0) { 367 ddi_soft_state_fini(&nv_statep); 368 mutex_destroy(&nv_log_mutex); 369 370 return (error); 371 } 372 373 error = mod_install(&modlinkage); 374 if (error != 0) { 375 sata_hba_fini(&modlinkage); 376 ddi_soft_state_fini(&nv_statep); 377 mutex_destroy(&nv_log_mutex); 378 379 return (error); 380 } 381 382 return (error); 383 } 384 385 386 /* 387 * nv module uninitialize 388 */ 389 int 390 _fini(void) 391 { 392 int error; 393 394 error = mod_remove(&modlinkage); 395 396 if (error != 0) { 397 return (error); 398 } 399 400 /* 401 * remove the resources allocated in _init() 402 */ 403 mutex_destroy(&nv_log_mutex); 404 sata_hba_fini(&modlinkage); 405 ddi_soft_state_fini(&nv_statep); 406 407 return (error); 408 } 409 410 411 /* 412 * nv _info entry point 413 */ 414 int 415 _info(struct modinfo *modinfop) 416 { 417 return (mod_info(&modlinkage, modinfop)); 418 } 419 420 421 /* 422 * these wrappers for ddi_{get,put}8 are for observability 423 * with dtrace 424 */ 425 #ifdef DEBUG 426 427 static void 428 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value) 429 { 430 ddi_put8(handle, dev_addr, value); 431 } 432 433 static void 434 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value) 435 { 436 ddi_put32(handle, dev_addr, value); 437 } 438 439 static uint32_t 440 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr) 441 { 442 return (ddi_get32(handle, dev_addr)); 443 } 444 445 static void 446 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value) 447 { 448 ddi_put16(handle, dev_addr, value); 449 } 450 451 static uint16_t 452 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr) 453 { 454 return (ddi_get16(handle, dev_addr)); 455 } 456 457 static uint8_t 458 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr) 459 { 460 return (ddi_get8(handle, dev_addr)); 461 } 462 463 #else 464 465 #define nv_put8 ddi_put8 466 #define nv_put32 ddi_put32 467 #define nv_get32 ddi_get32 468 #define nv_put16 ddi_put16 469 #define nv_get16 ddi_get16 470 #define nv_get8 ddi_get8 471 472 #endif 473 474 475 /* 476 * Driver attach 477 */ 478 static int 479 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 480 { 481 int status, attach_state, intr_types, bar, i, command; 482 int inst = ddi_get_instance(dip); 483 ddi_acc_handle_t pci_conf_handle; 484 nv_ctl_t *nvc; 485 uint8_t subclass; 486 uint32_t reg32; 487 #ifdef SGPIO_SUPPORT 488 pci_regspec_t *regs; 489 int rlen; 490 #endif 491 492 switch (cmd) { 493 494 case DDI_ATTACH: 495 496 NVLOG((NVDBG_INIT, NULL, NULL, 497 "nv_attach(): DDI_ATTACH inst %d", inst)); 498 499 attach_state = ATTACH_PROGRESS_NONE; 500 501 status = ddi_soft_state_zalloc(nv_statep, inst); 502 503 if (status != DDI_SUCCESS) { 504 break; 505 } 506 507 nvc = ddi_get_soft_state(nv_statep, inst); 508 509 nvc->nvc_dip = dip; 510 511 attach_state |= ATTACH_PROGRESS_STATEP_ALLOC; 512 513 if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) { 514 nvc->nvc_revid = pci_config_get8(pci_conf_handle, 515 PCI_CONF_REVID); 516 NVLOG((NVDBG_INIT, NULL, NULL, 517 "inst %d: silicon revid is %x nv_debug_flags=%x", 518 inst, nvc->nvc_revid, nv_debug_flags)); 519 } else { 520 break; 521 } 522 523 attach_state |= ATTACH_PROGRESS_CONF_HANDLE; 524 525 /* 526 * If a device is attached after a suspend/resume, sometimes 527 * the command register is zero, as it might not be set by 528 * BIOS or a parent. Set it again here. 529 */ 530 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM); 531 532 if (command == 0) { 533 cmn_err(CE_WARN, "nv_sata%d: restoring PCI command" 534 " register", inst); 535 pci_config_put16(pci_conf_handle, PCI_CONF_COMM, 536 PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME); 537 } 538 539 subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS); 540 541 if (subclass & PCI_MASS_RAID) { 542 cmn_err(CE_WARN, 543 "attach failed: RAID mode not supported"); 544 break; 545 } 546 547 /* 548 * the 6 bars of the controller are: 549 * 0: port 0 task file 550 * 1: port 0 status 551 * 2: port 1 task file 552 * 3: port 1 status 553 * 4: bus master for both ports 554 * 5: extended registers for SATA features 555 */ 556 for (bar = 0; bar < 6; bar++) { 557 status = ddi_regs_map_setup(dip, bar + 1, 558 (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr, 559 &nvc->nvc_bar_hdl[bar]); 560 561 if (status != DDI_SUCCESS) { 562 NVLOG((NVDBG_INIT, nvc, NULL, 563 "ddi_regs_map_setup failure for bar" 564 " %d status = %d", bar, status)); 565 break; 566 } 567 } 568 569 attach_state |= ATTACH_PROGRESS_BARS; 570 571 /* 572 * initialize controller and driver core 573 */ 574 status = nv_init_ctl(nvc, pci_conf_handle); 575 576 if (status == NV_FAILURE) { 577 NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed")); 578 579 break; 580 } 581 582 attach_state |= ATTACH_PROGRESS_CTL_SETUP; 583 584 /* 585 * initialize mutexes 586 */ 587 mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER, 588 DDI_INTR_PRI(nvc->nvc_intr_pri)); 589 590 attach_state |= ATTACH_PROGRESS_MUTEX_INIT; 591 592 /* 593 * get supported interrupt types 594 */ 595 if (ddi_intr_get_supported_types(dip, &intr_types) != 596 DDI_SUCCESS) { 597 nv_cmn_err(CE_WARN, nvc, NULL, 598 "!ddi_intr_get_supported_types failed"); 599 NVLOG((NVDBG_INIT, nvc, NULL, 600 "interrupt supported types failed")); 601 602 break; 603 } 604 605 NVLOG((NVDBG_INIT, nvc, NULL, 606 "ddi_intr_get_supported_types() returned: 0x%x", 607 intr_types)); 608 609 #ifdef NV_MSI_SUPPORTED 610 if (intr_types & DDI_INTR_TYPE_MSI) { 611 NVLOG((NVDBG_INIT, nvc, NULL, 612 "using MSI interrupt type")); 613 614 /* 615 * Try MSI first, but fall back to legacy if MSI 616 * attach fails 617 */ 618 if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) { 619 nvc->nvc_intr_type = DDI_INTR_TYPE_MSI; 620 attach_state |= ATTACH_PROGRESS_INTR_ADDED; 621 NVLOG((NVDBG_INIT, nvc, NULL, 622 "MSI interrupt setup done")); 623 } else { 624 nv_cmn_err(CE_CONT, nvc, NULL, 625 "!MSI registration failed " 626 "will try Legacy interrupts"); 627 } 628 } 629 #endif 630 631 /* 632 * Either the MSI interrupt setup has failed or only 633 * the fixed interrupts are available on the system. 634 */ 635 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) && 636 (intr_types & DDI_INTR_TYPE_FIXED)) { 637 638 NVLOG((NVDBG_INIT, nvc, NULL, 639 "using Legacy interrupt type")); 640 641 if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) { 642 nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED; 643 attach_state |= ATTACH_PROGRESS_INTR_ADDED; 644 NVLOG((NVDBG_INIT, nvc, NULL, 645 "Legacy interrupt setup done")); 646 } else { 647 nv_cmn_err(CE_WARN, nvc, NULL, 648 "!legacy interrupt setup failed"); 649 NVLOG((NVDBG_INIT, nvc, NULL, 650 "legacy interrupt setup failed")); 651 break; 652 } 653 } 654 655 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) { 656 NVLOG((NVDBG_INIT, nvc, NULL, 657 "no interrupts registered")); 658 break; 659 } 660 661 #ifdef SGPIO_SUPPORT 662 /* 663 * save off the controller number 664 */ 665 (void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 666 "reg", (caddr_t)®s, &rlen); 667 nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi); 668 kmem_free(regs, rlen); 669 670 /* 671 * initialize SGPIO 672 */ 673 nv_sgp_led_init(nvc, pci_conf_handle); 674 #endif /* SGPIO_SUPPORT */ 675 676 /* 677 * attach to sata module 678 */ 679 if (sata_hba_attach(nvc->nvc_dip, 680 &nvc->nvc_sata_hba_tran, 681 DDI_ATTACH) != DDI_SUCCESS) { 682 attach_state |= ATTACH_PROGRESS_SATA_MODULE; 683 684 break; 685 } 686 687 pci_config_teardown(&pci_conf_handle); 688 689 NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS")); 690 691 return (DDI_SUCCESS); 692 693 case DDI_RESUME: 694 695 nvc = ddi_get_soft_state(nv_statep, inst); 696 697 NVLOG((NVDBG_INIT, nvc, NULL, 698 "nv_attach(): DDI_RESUME inst %d", inst)); 699 700 if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) { 701 return (DDI_FAILURE); 702 } 703 704 /* 705 * If a device is attached after a suspend/resume, sometimes 706 * the command register is zero, as it might not be set by 707 * BIOS or a parent. Set it again here. 708 */ 709 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM); 710 711 if (command == 0) { 712 pci_config_put16(pci_conf_handle, PCI_CONF_COMM, 713 PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME); 714 } 715 716 /* 717 * Need to set bit 2 to 1 at config offset 0x50 718 * to enable access to the bar5 registers. 719 */ 720 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20); 721 722 if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) { 723 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20, 724 reg32 | NV_BAR5_SPACE_EN); 725 } 726 727 nvc->nvc_state &= ~NV_CTRL_SUSPEND; 728 729 for (i = 0; i < NV_MAX_PORTS(nvc); i++) { 730 nv_resume(&(nvc->nvc_port[i])); 731 } 732 733 pci_config_teardown(&pci_conf_handle); 734 735 return (DDI_SUCCESS); 736 737 default: 738 return (DDI_FAILURE); 739 } 740 741 742 /* 743 * DDI_ATTACH failure path starts here 744 */ 745 746 if (attach_state & ATTACH_PROGRESS_INTR_ADDED) { 747 nv_rem_intrs(nvc); 748 } 749 750 if (attach_state & ATTACH_PROGRESS_SATA_MODULE) { 751 /* 752 * Remove timers 753 */ 754 int port = 0; 755 nv_port_t *nvp; 756 757 for (; port < NV_MAX_PORTS(nvc); port++) { 758 nvp = &(nvc->nvc_port[port]); 759 if (nvp->nvp_timeout_id != 0) { 760 (void) untimeout(nvp->nvp_timeout_id); 761 } 762 } 763 } 764 765 if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) { 766 mutex_destroy(&nvc->nvc_mutex); 767 } 768 769 if (attach_state & ATTACH_PROGRESS_CTL_SETUP) { 770 nv_uninit_ctl(nvc); 771 } 772 773 if (attach_state & ATTACH_PROGRESS_BARS) { 774 while (--bar >= 0) { 775 ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]); 776 } 777 } 778 779 if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) { 780 ddi_soft_state_free(nv_statep, inst); 781 } 782 783 if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) { 784 pci_config_teardown(&pci_conf_handle); 785 } 786 787 cmn_err(CE_WARN, "nv_sata%d attach failed", inst); 788 789 return (DDI_FAILURE); 790 } 791 792 793 static int 794 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 795 { 796 int i, port, inst = ddi_get_instance(dip); 797 nv_ctl_t *nvc; 798 nv_port_t *nvp; 799 800 nvc = ddi_get_soft_state(nv_statep, inst); 801 802 switch (cmd) { 803 804 case DDI_DETACH: 805 806 NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH")); 807 808 /* 809 * Remove interrupts 810 */ 811 nv_rem_intrs(nvc); 812 813 /* 814 * Remove timers 815 */ 816 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 817 nvp = &(nvc->nvc_port[port]); 818 if (nvp->nvp_timeout_id != 0) { 819 (void) untimeout(nvp->nvp_timeout_id); 820 } 821 } 822 823 /* 824 * Remove maps 825 */ 826 for (i = 0; i < 6; i++) { 827 ddi_regs_map_free(&nvc->nvc_bar_hdl[i]); 828 } 829 830 /* 831 * Destroy mutexes 832 */ 833 mutex_destroy(&nvc->nvc_mutex); 834 835 /* 836 * Uninitialize the controller 837 */ 838 nv_uninit_ctl(nvc); 839 840 #ifdef SGPIO_SUPPORT 841 /* 842 * release SGPIO resources 843 */ 844 nv_sgp_cleanup(nvc); 845 #endif 846 847 /* 848 * unregister from the sata module 849 */ 850 (void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH); 851 852 /* 853 * Free soft state 854 */ 855 ddi_soft_state_free(nv_statep, inst); 856 857 return (DDI_SUCCESS); 858 859 case DDI_SUSPEND: 860 /* 861 * The PM functions for suspend and resume are incomplete 862 * and need additional work. It may or may not work in 863 * the current state. 864 */ 865 NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND")); 866 867 for (i = 0; i < NV_MAX_PORTS(nvc); i++) { 868 nv_suspend(&(nvc->nvc_port[i])); 869 } 870 871 nvc->nvc_state |= NV_CTRL_SUSPEND; 872 873 return (DDI_SUCCESS); 874 875 default: 876 return (DDI_FAILURE); 877 } 878 } 879 880 881 /*ARGSUSED*/ 882 static int 883 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 884 { 885 nv_ctl_t *nvc; 886 int instance; 887 dev_t dev; 888 889 dev = (dev_t)arg; 890 instance = getminor(dev); 891 892 switch (infocmd) { 893 case DDI_INFO_DEVT2DEVINFO: 894 nvc = ddi_get_soft_state(nv_statep, instance); 895 if (nvc != NULL) { 896 *result = nvc->nvc_dip; 897 return (DDI_SUCCESS); 898 } else { 899 *result = NULL; 900 return (DDI_FAILURE); 901 } 902 case DDI_INFO_DEVT2INSTANCE: 903 *(int *)result = instance; 904 break; 905 default: 906 break; 907 } 908 return (DDI_SUCCESS); 909 } 910 911 912 #ifdef SGPIO_SUPPORT 913 /* ARGSUSED */ 914 static int 915 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp) 916 { 917 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp)); 918 919 if (nvc == NULL) { 920 return (ENXIO); 921 } 922 923 return (0); 924 } 925 926 927 /* ARGSUSED */ 928 static int 929 nv_close(dev_t dev, int flag, int otyp, cred_t *credp) 930 { 931 return (0); 932 } 933 934 935 /* ARGSUSED */ 936 static int 937 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 938 { 939 nv_ctl_t *nvc; 940 int inst; 941 int status; 942 int ctlr, port; 943 int drive; 944 uint8_t curr_led; 945 struct dc_led_ctl led; 946 947 inst = getminor(dev); 948 if (inst == -1) { 949 return (EBADF); 950 } 951 952 nvc = ddi_get_soft_state(nv_statep, inst); 953 if (nvc == NULL) { 954 return (EBADF); 955 } 956 957 if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) { 958 return (EIO); 959 } 960 961 switch (cmd) { 962 case DEVCTL_SET_LED: 963 status = ddi_copyin((void *)arg, &led, 964 sizeof (struct dc_led_ctl), mode); 965 if (status != 0) 966 return (EFAULT); 967 968 /* 969 * Since only the first two controller currently support 970 * SGPIO (as per NVIDIA docs), this code will as well. 971 * Note that this validate the port value within led_state 972 * as well. 973 */ 974 975 ctlr = SGP_DRV_TO_CTLR(led.led_number); 976 if ((ctlr != 0) && (ctlr != 1)) 977 return (ENXIO); 978 979 if ((led.led_state & DCL_STATE_FAST_BLNK) || 980 (led.led_state & DCL_STATE_SLOW_BLNK)) { 981 return (EINVAL); 982 } 983 984 drive = led.led_number; 985 986 if ((led.led_ctl_active == DCL_CNTRL_OFF) || 987 (led.led_state == DCL_STATE_OFF)) { 988 989 if (led.led_type == DCL_TYPE_DEVICE_FAIL) { 990 nv_sgp_error(nvc, drive, TR_ERROR_DISABLE); 991 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) { 992 nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE); 993 } else { 994 return (ENXIO); 995 } 996 997 port = SGP_DRV_TO_PORT(led.led_number); 998 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type; 999 } 1000 1001 if (led.led_ctl_active == DCL_CNTRL_ON) { 1002 if (led.led_type == DCL_TYPE_DEVICE_FAIL) { 1003 nv_sgp_error(nvc, drive, TR_ERROR_ENABLE); 1004 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) { 1005 nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE); 1006 } else { 1007 return (ENXIO); 1008 } 1009 1010 port = SGP_DRV_TO_PORT(led.led_number); 1011 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type; 1012 } 1013 1014 break; 1015 1016 case DEVCTL_GET_LED: 1017 status = ddi_copyin((void *)arg, &led, 1018 sizeof (struct dc_led_ctl), mode); 1019 if (status != 0) 1020 return (EFAULT); 1021 1022 /* 1023 * Since only the first two controller currently support 1024 * SGPIO (as per NVIDIA docs), this code will as well. 1025 * Note that this validate the port value within led_state 1026 * as well. 1027 */ 1028 1029 ctlr = SGP_DRV_TO_CTLR(led.led_number); 1030 if ((ctlr != 0) && (ctlr != 1)) 1031 return (ENXIO); 1032 1033 curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr, 1034 led.led_number); 1035 1036 port = SGP_DRV_TO_PORT(led.led_number); 1037 if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) { 1038 led.led_ctl_active = DCL_CNTRL_ON; 1039 1040 if (led.led_type == DCL_TYPE_DEVICE_FAIL) { 1041 if (TR_ERROR(curr_led) == TR_ERROR_DISABLE) 1042 led.led_state = DCL_STATE_OFF; 1043 else 1044 led.led_state = DCL_STATE_ON; 1045 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) { 1046 if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE) 1047 led.led_state = DCL_STATE_OFF; 1048 else 1049 led.led_state = DCL_STATE_ON; 1050 } else { 1051 return (ENXIO); 1052 } 1053 } else { 1054 led.led_ctl_active = DCL_CNTRL_OFF; 1055 /* 1056 * Not really off, but never set and no constant for 1057 * tri-state 1058 */ 1059 led.led_state = DCL_STATE_OFF; 1060 } 1061 1062 status = ddi_copyout(&led, (void *)arg, 1063 sizeof (struct dc_led_ctl), mode); 1064 if (status != 0) 1065 return (EFAULT); 1066 1067 break; 1068 1069 case DEVCTL_NUM_LEDS: 1070 led.led_number = SGPIO_DRV_CNT_VALUE; 1071 led.led_ctl_active = 1; 1072 led.led_type = 3; 1073 1074 /* 1075 * According to documentation, NVIDIA SGPIO is supposed to 1076 * support blinking, but it does not seem to work in practice. 1077 */ 1078 led.led_state = DCL_STATE_ON; 1079 1080 status = ddi_copyout(&led, (void *)arg, 1081 sizeof (struct dc_led_ctl), mode); 1082 if (status != 0) 1083 return (EFAULT); 1084 1085 break; 1086 1087 default: 1088 return (EINVAL); 1089 } 1090 1091 return (0); 1092 } 1093 #endif /* SGPIO_SUPPORT */ 1094 1095 1096 /* 1097 * Called by sata module to probe a port. Port and device state 1098 * are not changed here... only reported back to the sata module. 1099 * 1100 * If probe confirms a device is present for the first time, it will 1101 * initiate a device reset, then probe will be called again and the 1102 * signature will be check. If the signature is valid, data structures 1103 * will be initialized. 1104 */ 1105 static int 1106 nv_sata_probe(dev_info_t *dip, sata_device_t *sd) 1107 { 1108 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1109 uint8_t cport = sd->satadev_addr.cport; 1110 uint8_t pmport = sd->satadev_addr.pmport; 1111 uint8_t qual = sd->satadev_addr.qual; 1112 clock_t nv_lbolt = ddi_get_lbolt(); 1113 nv_port_t *nvp; 1114 1115 if (cport >= NV_MAX_PORTS(nvc)) { 1116 sd->satadev_type = SATA_DTYPE_NONE; 1117 sd->satadev_state = SATA_STATE_UNKNOWN; 1118 1119 return (SATA_FAILURE); 1120 } 1121 1122 ASSERT(nvc->nvc_port != NULL); 1123 nvp = &(nvc->nvc_port[cport]); 1124 ASSERT(nvp != NULL); 1125 1126 NVLOG((NVDBG_PROBE, nvc, nvp, 1127 "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, " 1128 "qual: 0x%x", cport, pmport, qual)); 1129 1130 mutex_enter(&nvp->nvp_mutex); 1131 1132 /* 1133 * This check seems to be done in the SATA module. 1134 * It may not be required here 1135 */ 1136 if (nvp->nvp_state & NV_PORT_INACTIVE) { 1137 nv_cmn_err(CE_WARN, nvc, nvp, 1138 "port inactive. Use cfgadm to activate"); 1139 sd->satadev_type = SATA_DTYPE_UNKNOWN; 1140 sd->satadev_state = SATA_PSTATE_SHUTDOWN; 1141 mutex_exit(&nvp->nvp_mutex); 1142 1143 return (SATA_FAILURE); 1144 } 1145 1146 if (qual == SATA_ADDR_PMPORT) { 1147 sd->satadev_type = SATA_DTYPE_NONE; 1148 sd->satadev_state = SATA_STATE_UNKNOWN; 1149 mutex_exit(&nvp->nvp_mutex); 1150 nv_cmn_err(CE_WARN, nvc, nvp, 1151 "controller does not support port multiplier"); 1152 1153 return (SATA_FAILURE); 1154 } 1155 1156 sd->satadev_state = SATA_PSTATE_PWRON; 1157 1158 nv_copy_registers(nvp, sd, NULL); 1159 1160 /* 1161 * determine link status 1162 */ 1163 if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) { 1164 uint8_t det; 1165 1166 /* 1167 * Reset will cause the link to go down for a short period of 1168 * time. If link is lost for less than 2 seconds ignore it 1169 * so that the reset can progress. 1170 */ 1171 if (nvp->nvp_state & NV_PORT_RESET_PROBE) { 1172 1173 if (nvp->nvp_link_lost_time == 0) { 1174 nvp->nvp_link_lost_time = nv_lbolt; 1175 } 1176 1177 if (TICK_TO_SEC(nv_lbolt - 1178 nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) { 1179 NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp, 1180 "probe: intermittent link lost while" 1181 " resetting")); 1182 /* 1183 * fake status of link so that probe continues 1184 */ 1185 SSTATUS_SET_IPM(sd->satadev_scr.sstatus, 1186 SSTATUS_IPM_ACTIVE); 1187 SSTATUS_SET_DET(sd->satadev_scr.sstatus, 1188 SSTATUS_DET_DEVPRE_PHYCOM); 1189 sd->satadev_type = SATA_DTYPE_UNKNOWN; 1190 mutex_exit(&nvp->nvp_mutex); 1191 1192 return (SATA_SUCCESS); 1193 } else { 1194 nvp->nvp_state &= 1195 ~(NV_PORT_RESET_PROBE|NV_PORT_RESET); 1196 } 1197 } 1198 1199 /* 1200 * no link, so tear down port and abort all active packets 1201 */ 1202 1203 det = (sd->satadev_scr.sstatus & SSTATUS_DET) >> 1204 SSTATUS_DET_SHIFT; 1205 1206 switch (det) { 1207 case SSTATUS_DET_NODEV: 1208 case SSTATUS_DET_PHYOFFLINE: 1209 sd->satadev_type = SATA_DTYPE_NONE; 1210 break; 1211 default: 1212 sd->satadev_type = SATA_DTYPE_UNKNOWN; 1213 break; 1214 } 1215 1216 NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp, 1217 "probe: link lost invoking nv_abort_active")); 1218 1219 (void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT); 1220 nv_uninit_port(nvp); 1221 1222 mutex_exit(&nvp->nvp_mutex); 1223 1224 return (SATA_SUCCESS); 1225 } else { 1226 nvp->nvp_link_lost_time = 0; 1227 } 1228 1229 /* 1230 * A device is present so clear hotremoved flag 1231 */ 1232 nvp->nvp_state &= ~NV_PORT_HOTREMOVED; 1233 1234 #ifdef SGPIO_SUPPORT 1235 nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV( 1236 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num)); 1237 #endif 1238 1239 /* 1240 * If the signature was acquired previously there is no need to 1241 * do it again. 1242 */ 1243 if (nvp->nvp_signature != 0) { 1244 NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp, 1245 "probe: signature acquired previously")); 1246 sd->satadev_type = nvp->nvp_type; 1247 mutex_exit(&nvp->nvp_mutex); 1248 1249 return (SATA_SUCCESS); 1250 } 1251 1252 /* 1253 * If NV_PORT_RESET is not set, this is the first time through 1254 * so perform reset and return. 1255 */ 1256 if ((nvp->nvp_state & NV_PORT_RESET) == 0) { 1257 NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp, 1258 "probe: first reset to get sig")); 1259 nvp->nvp_state |= NV_PORT_RESET_PROBE; 1260 nv_reset(nvp); 1261 sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN; 1262 nvp->nvp_probe_time = nv_lbolt; 1263 mutex_exit(&nvp->nvp_mutex); 1264 1265 return (SATA_SUCCESS); 1266 } 1267 1268 /* 1269 * Reset was done previously. see if the signature is 1270 * available. 1271 */ 1272 nv_read_signature(nvp); 1273 sd->satadev_type = nvp->nvp_type; 1274 1275 /* 1276 * Some drives may require additional resets to get a 1277 * valid signature. If a drive was not just powered up, the signature 1278 * should arrive within half a second of reset. Therefore if more 1279 * than 5 seconds has elapsed while waiting for a signature, reset 1280 * again. These extra resets do not appear to create problems when 1281 * the drive is spinning up for more than this reset period. 1282 */ 1283 if (nvp->nvp_signature == 0) { 1284 if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) { 1285 NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset" 1286 " during signature acquisition")); 1287 nv_reset(nvp); 1288 } 1289 1290 mutex_exit(&nvp->nvp_mutex); 1291 1292 return (SATA_SUCCESS); 1293 } 1294 1295 NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms", 1296 TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time))); 1297 1298 /* 1299 * nv_sata only deals with ATA disks and ATAPI CD/DVDs so far. If 1300 * it is not either of those, then just return. 1301 */ 1302 if ((nvp->nvp_type != SATA_DTYPE_ATADISK) && 1303 (nvp->nvp_type != SATA_DTYPE_ATAPICD)) { 1304 NVLOG((NVDBG_PROBE, nvc, nvp, "Driver currently handles only" 1305 " disks/CDs/DVDs. Signature acquired was %X", 1306 nvp->nvp_signature)); 1307 mutex_exit(&nvp->nvp_mutex); 1308 1309 return (SATA_SUCCESS); 1310 } 1311 1312 /* 1313 * make sure structures are initialized 1314 */ 1315 if (nv_init_port(nvp) == NV_SUCCESS) { 1316 NVLOG((NVDBG_PROBE, nvc, nvp, 1317 "device detected and set up at port %d", cport)); 1318 mutex_exit(&nvp->nvp_mutex); 1319 1320 return (SATA_SUCCESS); 1321 } else { 1322 nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data " 1323 "structures for port %d", cport); 1324 mutex_exit(&nvp->nvp_mutex); 1325 1326 return (SATA_FAILURE); 1327 } 1328 /*NOTREACHED*/ 1329 } 1330 1331 1332 /* 1333 * Called by sata module to start a new command. 1334 */ 1335 static int 1336 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt) 1337 { 1338 int cport = spkt->satapkt_device.satadev_addr.cport; 1339 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1340 nv_port_t *nvp = &(nvc->nvc_port[cport]); 1341 int ret; 1342 1343 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x", 1344 spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg)); 1345 1346 mutex_enter(&nvp->nvp_mutex); 1347 1348 /* 1349 * hotremoved is an intermediate state where the link was lost, 1350 * but the hotplug event has not yet been processed by the sata 1351 * module. Fail the request. 1352 */ 1353 if (nvp->nvp_state & NV_PORT_HOTREMOVED) { 1354 spkt->satapkt_reason = SATA_PKT_PORT_ERROR; 1355 spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN; 1356 NVLOG((NVDBG_ERRS, nvc, nvp, 1357 "nv_sata_start: NV_PORT_HOTREMOVED")); 1358 nv_copy_registers(nvp, &spkt->satapkt_device, NULL); 1359 mutex_exit(&nvp->nvp_mutex); 1360 1361 return (SATA_TRAN_PORT_ERROR); 1362 } 1363 1364 if (nvp->nvp_state & NV_PORT_RESET) { 1365 NVLOG((NVDBG_ERRS, nvc, nvp, 1366 "still waiting for reset completion")); 1367 spkt->satapkt_reason = SATA_PKT_BUSY; 1368 mutex_exit(&nvp->nvp_mutex); 1369 1370 /* 1371 * If in panic, timeouts do not occur, so fake one 1372 * so that the signature can be acquired to complete 1373 * the reset handling. 1374 */ 1375 if (ddi_in_panic()) { 1376 nv_timeout(nvp); 1377 } 1378 1379 return (SATA_TRAN_BUSY); 1380 } 1381 1382 if (nvp->nvp_type == SATA_DTYPE_NONE) { 1383 spkt->satapkt_reason = SATA_PKT_PORT_ERROR; 1384 NVLOG((NVDBG_ERRS, nvc, nvp, 1385 "nv_sata_start: SATA_DTYPE_NONE")); 1386 nv_copy_registers(nvp, &spkt->satapkt_device, NULL); 1387 mutex_exit(&nvp->nvp_mutex); 1388 1389 return (SATA_TRAN_PORT_ERROR); 1390 } 1391 1392 if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) { 1393 ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT); 1394 nv_cmn_err(CE_WARN, nvc, nvp, 1395 "port multipliers not supported by controller"); 1396 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED; 1397 mutex_exit(&nvp->nvp_mutex); 1398 1399 return (SATA_TRAN_CMD_UNSUPPORTED); 1400 } 1401 1402 if ((nvp->nvp_state & NV_PORT_INIT) == 0) { 1403 spkt->satapkt_reason = SATA_PKT_PORT_ERROR; 1404 NVLOG((NVDBG_ERRS, nvc, nvp, 1405 "nv_sata_start: port not yet initialized")); 1406 nv_copy_registers(nvp, &spkt->satapkt_device, NULL); 1407 mutex_exit(&nvp->nvp_mutex); 1408 1409 return (SATA_TRAN_PORT_ERROR); 1410 } 1411 1412 if (nvp->nvp_state & NV_PORT_INACTIVE) { 1413 spkt->satapkt_reason = SATA_PKT_PORT_ERROR; 1414 NVLOG((NVDBG_ERRS, nvc, nvp, 1415 "nv_sata_start: NV_PORT_INACTIVE")); 1416 nv_copy_registers(nvp, &spkt->satapkt_device, NULL); 1417 mutex_exit(&nvp->nvp_mutex); 1418 1419 return (SATA_TRAN_PORT_ERROR); 1420 } 1421 1422 if (nvp->nvp_state & NV_PORT_FAILED) { 1423 spkt->satapkt_reason = SATA_PKT_PORT_ERROR; 1424 NVLOG((NVDBG_ERRS, nvc, nvp, 1425 "nv_sata_start: NV_PORT_FAILED state")); 1426 nv_copy_registers(nvp, &spkt->satapkt_device, NULL); 1427 mutex_exit(&nvp->nvp_mutex); 1428 1429 return (SATA_TRAN_PORT_ERROR); 1430 } 1431 1432 /* 1433 * after a device reset, and then when sata module restore processing 1434 * is complete, the sata module will set sata_clear_dev_reset which 1435 * indicates that restore processing has completed and normal 1436 * non-restore related commands should be processed. 1437 */ 1438 if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) { 1439 nvp->nvp_state &= ~NV_PORT_RESTORE; 1440 NVLOG((NVDBG_ENTRY, nvc, nvp, 1441 "nv_sata_start: clearing NV_PORT_RESTORE")); 1442 } 1443 1444 /* 1445 * if the device was recently reset as indicated by NV_PORT_RESTORE, 1446 * only allow commands which restore device state. The sata module 1447 * marks such commands with with sata_ignore_dev_reset. 1448 * 1449 * during coredump, nv_reset is called and but then the restore 1450 * doesn't happen. For now, workaround by ignoring the wait for 1451 * restore if the system is panicing. 1452 */ 1453 if ((nvp->nvp_state & NV_PORT_RESTORE) && 1454 !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) && 1455 (ddi_in_panic() == 0)) { 1456 spkt->satapkt_reason = SATA_PKT_BUSY; 1457 NVLOG((NVDBG_ENTRY, nvc, nvp, 1458 "nv_sata_start: waiting for restore ")); 1459 mutex_exit(&nvp->nvp_mutex); 1460 1461 return (SATA_TRAN_BUSY); 1462 } 1463 1464 if (nvp->nvp_state & NV_PORT_ABORTING) { 1465 spkt->satapkt_reason = SATA_PKT_BUSY; 1466 NVLOG((NVDBG_ERRS, nvc, nvp, 1467 "nv_sata_start: NV_PORT_ABORTING")); 1468 mutex_exit(&nvp->nvp_mutex); 1469 1470 return (SATA_TRAN_BUSY); 1471 } 1472 1473 if (spkt->satapkt_op_mode & 1474 (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) { 1475 1476 ret = nv_start_sync(nvp, spkt); 1477 1478 mutex_exit(&nvp->nvp_mutex); 1479 1480 return (ret); 1481 } 1482 1483 /* 1484 * start command asynchronous command 1485 */ 1486 ret = nv_start_async(nvp, spkt); 1487 1488 mutex_exit(&nvp->nvp_mutex); 1489 1490 return (ret); 1491 } 1492 1493 1494 /* 1495 * SATA_OPMODE_POLLING implies the driver is in a 1496 * synchronous mode, and SATA_OPMODE_SYNCH is also set. 1497 * If only SATA_OPMODE_SYNCH is set, the driver can use 1498 * interrupts and sleep wait on a cv. 1499 * 1500 * If SATA_OPMODE_POLLING is set, the driver can't use 1501 * interrupts and must busy wait and simulate the 1502 * interrupts by waiting for BSY to be cleared. 1503 * 1504 * Synchronous mode has to return BUSY if there are 1505 * any other commands already on the drive. 1506 */ 1507 static int 1508 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt) 1509 { 1510 nv_ctl_t *nvc = nvp->nvp_ctlp; 1511 int ret; 1512 1513 NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry")); 1514 1515 if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) { 1516 spkt->satapkt_reason = SATA_PKT_BUSY; 1517 NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, 1518 "nv_sata_satapkt_sync: device is busy, sync cmd rejected" 1519 "ncq_run: %d non_ncq_run: %d spkt: %p", 1520 nvp->nvp_ncq_run, nvp->nvp_non_ncq_run, 1521 (&(nvp->nvp_slot[0]))->nvslot_spkt)); 1522 1523 return (SATA_TRAN_BUSY); 1524 } 1525 1526 /* 1527 * if SYNC but not POLL, verify that this is not on interrupt thread. 1528 */ 1529 if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) && 1530 servicing_interrupt()) { 1531 spkt->satapkt_reason = SATA_PKT_BUSY; 1532 NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, 1533 "SYNC mode not allowed during interrupt")); 1534 1535 return (SATA_TRAN_BUSY); 1536 1537 } 1538 1539 /* 1540 * disable interrupt generation if in polled mode 1541 */ 1542 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) { 1543 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE); 1544 } 1545 1546 if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) { 1547 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) { 1548 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE); 1549 } 1550 1551 return (ret); 1552 } 1553 1554 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) { 1555 mutex_exit(&nvp->nvp_mutex); 1556 ret = nv_poll_wait(nvp, spkt); 1557 mutex_enter(&nvp->nvp_mutex); 1558 1559 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE); 1560 1561 NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:" 1562 " done % reason %d", ret)); 1563 1564 return (ret); 1565 } 1566 1567 /* 1568 * non-polling synchronous mode handling. The interrupt will signal 1569 * when the IO is completed. 1570 */ 1571 cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex); 1572 1573 if (spkt->satapkt_reason != SATA_PKT_COMPLETED) { 1574 1575 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 1576 } 1577 1578 NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:" 1579 " done % reason %d", spkt->satapkt_reason)); 1580 1581 return (SATA_TRAN_ACCEPTED); 1582 } 1583 1584 1585 static int 1586 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt) 1587 { 1588 int ret; 1589 nv_ctl_t *nvc = nvp->nvp_ctlp; 1590 #if ! defined(__lock_lint) 1591 nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */ 1592 #endif 1593 1594 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter")); 1595 1596 for (;;) { 1597 1598 NV_DELAY_NSEC(400); 1599 1600 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait")); 1601 if (nv_wait(nvp, 0, SATA_STATUS_BSY, 1602 NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) { 1603 mutex_enter(&nvp->nvp_mutex); 1604 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 1605 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 1606 nv_reset(nvp); 1607 nv_complete_io(nvp, spkt, 0); 1608 mutex_exit(&nvp->nvp_mutex); 1609 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: " 1610 "SATA_STATUS_BSY")); 1611 1612 return (SATA_TRAN_ACCEPTED); 1613 } 1614 1615 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr")); 1616 1617 /* 1618 * Simulate interrupt. 1619 */ 1620 ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL); 1621 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr")); 1622 1623 if (ret != DDI_INTR_CLAIMED) { 1624 NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:" 1625 " unclaimed -- resetting")); 1626 mutex_enter(&nvp->nvp_mutex); 1627 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 1628 nv_reset(nvp); 1629 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 1630 nv_complete_io(nvp, spkt, 0); 1631 mutex_exit(&nvp->nvp_mutex); 1632 1633 return (SATA_TRAN_ACCEPTED); 1634 } 1635 1636 #if ! defined(__lock_lint) 1637 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) { 1638 /* 1639 * packet is complete 1640 */ 1641 return (SATA_TRAN_ACCEPTED); 1642 } 1643 #endif 1644 } 1645 /*NOTREACHED*/ 1646 } 1647 1648 1649 /* 1650 * Called by sata module to abort outstanding packets. 1651 */ 1652 /*ARGSUSED*/ 1653 static int 1654 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag) 1655 { 1656 int cport = spkt->satapkt_device.satadev_addr.cport; 1657 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1658 nv_port_t *nvp = &(nvc->nvc_port[cport]); 1659 int c_a, ret; 1660 1661 ASSERT(cport < NV_MAX_PORTS(nvc)); 1662 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt)); 1663 1664 mutex_enter(&nvp->nvp_mutex); 1665 1666 if (nvp->nvp_state & NV_PORT_INACTIVE) { 1667 mutex_exit(&nvp->nvp_mutex); 1668 nv_cmn_err(CE_WARN, nvc, nvp, 1669 "abort request failed: port inactive"); 1670 1671 return (SATA_FAILURE); 1672 } 1673 1674 /* 1675 * spkt == NULL then abort all commands 1676 */ 1677 c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED); 1678 1679 if (c_a) { 1680 NVLOG((NVDBG_ENTRY, nvc, nvp, 1681 "packets aborted running=%d", c_a)); 1682 ret = SATA_SUCCESS; 1683 } else { 1684 if (spkt == NULL) { 1685 NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort")); 1686 } else { 1687 NVLOG((NVDBG_ENTRY, nvc, nvp, 1688 "can't find spkt to abort")); 1689 } 1690 ret = SATA_FAILURE; 1691 } 1692 1693 mutex_exit(&nvp->nvp_mutex); 1694 1695 return (ret); 1696 } 1697 1698 1699 /* 1700 * if spkt == NULL abort all pkts running, otherwise 1701 * abort the requested packet. must be called with nv_mutex 1702 * held and returns with it held. Not NCQ aware. 1703 */ 1704 static int 1705 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason) 1706 { 1707 int aborted = 0, i, reset_once = B_FALSE; 1708 struct nv_slot *nv_slotp; 1709 sata_pkt_t *spkt_slot; 1710 1711 ASSERT(MUTEX_HELD(&nvp->nvp_mutex)); 1712 1713 /* 1714 * return if the port is not configured 1715 */ 1716 if (nvp->nvp_slot == NULL) { 1717 NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, 1718 "nv_abort_active: not configured so returning")); 1719 1720 return (0); 1721 } 1722 1723 NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active")); 1724 1725 nvp->nvp_state |= NV_PORT_ABORTING; 1726 1727 for (i = 0; i < nvp->nvp_queue_depth; i++) { 1728 1729 nv_slotp = &(nvp->nvp_slot[i]); 1730 spkt_slot = nv_slotp->nvslot_spkt; 1731 1732 /* 1733 * skip if not active command in slot 1734 */ 1735 if (spkt_slot == NULL) { 1736 continue; 1737 } 1738 1739 /* 1740 * if a specific packet was requested, skip if 1741 * this is not a match 1742 */ 1743 if ((spkt != NULL) && (spkt != spkt_slot)) { 1744 continue; 1745 } 1746 1747 /* 1748 * stop the hardware. This could need reworking 1749 * when NCQ is enabled in the driver. 1750 */ 1751 if (reset_once == B_FALSE) { 1752 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 1753 1754 /* 1755 * stop DMA engine 1756 */ 1757 nv_put8(bmhdl, nvp->nvp_bmicx, 0); 1758 1759 nv_reset(nvp); 1760 reset_once = B_TRUE; 1761 } 1762 1763 spkt_slot->satapkt_reason = abort_reason; 1764 nv_complete_io(nvp, spkt_slot, i); 1765 aborted++; 1766 } 1767 1768 nvp->nvp_state &= ~NV_PORT_ABORTING; 1769 1770 return (aborted); 1771 } 1772 1773 1774 /* 1775 * Called by sata module to reset a port, device, or the controller. 1776 */ 1777 static int 1778 nv_sata_reset(dev_info_t *dip, sata_device_t *sd) 1779 { 1780 int cport = sd->satadev_addr.cport; 1781 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1782 nv_port_t *nvp = &(nvc->nvc_port[cport]); 1783 int ret = SATA_SUCCESS; 1784 1785 ASSERT(cport < NV_MAX_PORTS(nvc)); 1786 1787 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset")); 1788 1789 mutex_enter(&nvp->nvp_mutex); 1790 1791 switch (sd->satadev_addr.qual) { 1792 1793 case SATA_ADDR_CPORT: 1794 /*FALLTHROUGH*/ 1795 case SATA_ADDR_DCPORT: 1796 nv_reset(nvp); 1797 (void) nv_abort_active(nvp, NULL, SATA_PKT_RESET); 1798 1799 break; 1800 case SATA_ADDR_CNTRL: 1801 NVLOG((NVDBG_ENTRY, nvc, nvp, 1802 "nv_sata_reset: constroller reset not supported")); 1803 1804 break; 1805 case SATA_ADDR_PMPORT: 1806 case SATA_ADDR_DPMPORT: 1807 NVLOG((NVDBG_ENTRY, nvc, nvp, 1808 "nv_sata_reset: port multipliers not supported")); 1809 /*FALLTHROUGH*/ 1810 default: 1811 /* 1812 * unsupported case 1813 */ 1814 ret = SATA_FAILURE; 1815 break; 1816 } 1817 1818 if (ret == SATA_SUCCESS) { 1819 /* 1820 * If the port is inactive, do a quiet reset and don't attempt 1821 * to wait for reset completion or do any post reset processing 1822 */ 1823 if (nvp->nvp_state & NV_PORT_INACTIVE) { 1824 nvp->nvp_state &= ~NV_PORT_RESET; 1825 nvp->nvp_reset_time = 0; 1826 } 1827 1828 /* 1829 * clear the port failed flag 1830 */ 1831 nvp->nvp_state &= ~NV_PORT_FAILED; 1832 } 1833 1834 mutex_exit(&nvp->nvp_mutex); 1835 1836 return (ret); 1837 } 1838 1839 1840 /* 1841 * Sata entry point to handle port activation. cfgadm -c connect 1842 */ 1843 static int 1844 nv_sata_activate(dev_info_t *dip, sata_device_t *sd) 1845 { 1846 int cport = sd->satadev_addr.cport; 1847 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1848 nv_port_t *nvp = &(nvc->nvc_port[cport]); 1849 1850 ASSERT(cport < NV_MAX_PORTS(nvc)); 1851 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate")); 1852 1853 mutex_enter(&nvp->nvp_mutex); 1854 1855 sd->satadev_state = SATA_STATE_READY; 1856 1857 nv_copy_registers(nvp, sd, NULL); 1858 1859 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE); 1860 1861 nvp->nvp_state = 0; 1862 1863 mutex_exit(&nvp->nvp_mutex); 1864 1865 return (SATA_SUCCESS); 1866 } 1867 1868 1869 /* 1870 * Sata entry point to handle port deactivation. cfgadm -c disconnect 1871 */ 1872 static int 1873 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd) 1874 { 1875 int cport = sd->satadev_addr.cport; 1876 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip)); 1877 nv_port_t *nvp = &(nvc->nvc_port[cport]); 1878 1879 ASSERT(cport < NV_MAX_PORTS(nvc)); 1880 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate")); 1881 1882 mutex_enter(&nvp->nvp_mutex); 1883 1884 (void) nv_abort_active(nvp, NULL, SATA_PKT_RESET); 1885 1886 /* 1887 * mark the device as inaccessible 1888 */ 1889 nvp->nvp_state &= ~NV_PORT_INACTIVE; 1890 1891 /* 1892 * disable the interrupts on port 1893 */ 1894 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE); 1895 1896 nv_uninit_port(nvp); 1897 1898 sd->satadev_state = SATA_PSTATE_SHUTDOWN; 1899 nv_copy_registers(nvp, sd, NULL); 1900 1901 mutex_exit(&nvp->nvp_mutex); 1902 1903 return (SATA_SUCCESS); 1904 } 1905 1906 1907 /* 1908 * find an empty slot in the driver's queue, increment counters, 1909 * and then invoke the appropriate PIO or DMA start routine. 1910 */ 1911 static int 1912 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt) 1913 { 1914 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 1915 int on_bit = 0x01, slot, sactive, ret, ncq = 0; 1916 uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg; 1917 int direction = sata_cmdp->satacmd_flags.sata_data_direction; 1918 nv_ctl_t *nvc = nvp->nvp_ctlp; 1919 nv_slot_t *nv_slotp; 1920 boolean_t dma_cmd; 1921 1922 NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common entered: cmd: 0x%x", 1923 sata_cmdp->satacmd_cmd_reg)); 1924 1925 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) || 1926 (cmd == SATAC_READ_FPDMA_QUEUED)) { 1927 nvp->nvp_ncq_run++; 1928 /* 1929 * search for an empty NCQ slot. by the time, it's already 1930 * been determined by the caller that there is room on the 1931 * queue. 1932 */ 1933 for (slot = 0; slot < nvp->nvp_queue_depth; slot++, 1934 on_bit <<= 1) { 1935 if ((nvp->nvp_sactive_cache & on_bit) == 0) { 1936 break; 1937 } 1938 } 1939 1940 /* 1941 * the first empty slot found, should not exceed the queue 1942 * depth of the drive. if it does it's an error. 1943 */ 1944 ASSERT(slot != nvp->nvp_queue_depth); 1945 1946 sactive = nv_get32(nvc->nvc_bar_hdl[5], 1947 nvp->nvp_sactive); 1948 ASSERT((sactive & on_bit) == 0); 1949 nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit); 1950 NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X", 1951 on_bit)); 1952 nvp->nvp_sactive_cache |= on_bit; 1953 1954 ncq = NVSLOT_NCQ; 1955 1956 } else { 1957 nvp->nvp_non_ncq_run++; 1958 slot = 0; 1959 } 1960 1961 nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot]; 1962 1963 ASSERT(nv_slotp->nvslot_spkt == NULL); 1964 1965 nv_slotp->nvslot_spkt = spkt; 1966 nv_slotp->nvslot_flags = ncq; 1967 1968 /* 1969 * the sata module doesn't indicate which commands utilize the 1970 * DMA engine, so find out using this switch table. 1971 */ 1972 switch (spkt->satapkt_cmd.satacmd_cmd_reg) { 1973 case SATAC_READ_DMA_EXT: 1974 case SATAC_WRITE_DMA_EXT: 1975 case SATAC_WRITE_DMA: 1976 case SATAC_READ_DMA: 1977 case SATAC_READ_DMA_QUEUED: 1978 case SATAC_READ_DMA_QUEUED_EXT: 1979 case SATAC_WRITE_DMA_QUEUED: 1980 case SATAC_WRITE_DMA_QUEUED_EXT: 1981 case SATAC_READ_FPDMA_QUEUED: 1982 case SATAC_WRITE_FPDMA_QUEUED: 1983 dma_cmd = B_TRUE; 1984 break; 1985 default: 1986 dma_cmd = B_FALSE; 1987 } 1988 1989 if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) { 1990 NVLOG((NVDBG_DELIVER, nvc, nvp, "DMA command")); 1991 nv_slotp->nvslot_start = nv_start_dma; 1992 nv_slotp->nvslot_intr = nv_intr_dma; 1993 } else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) { 1994 NVLOG((NVDBG_DELIVER, nvc, nvp, "packet command")); 1995 nv_slotp->nvslot_start = nv_start_pkt_pio; 1996 nv_slotp->nvslot_intr = nv_intr_pkt_pio; 1997 if ((direction == SATA_DIR_READ) || 1998 (direction == SATA_DIR_WRITE)) { 1999 nv_slotp->nvslot_byte_count = 2000 spkt->satapkt_cmd.satacmd_bp->b_bcount; 2001 nv_slotp->nvslot_v_addr = 2002 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr; 2003 /* 2004 * Freeing DMA resources allocated by the framework 2005 * now to avoid buffer overwrite (dma sync) problems 2006 * when the buffer is released at command completion. 2007 * Primarily an issue on systems with more than 2008 * 4GB of memory. 2009 */ 2010 sata_free_dma_resources(spkt); 2011 } 2012 } else if (direction == SATA_DIR_NODATA_XFER) { 2013 NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command")); 2014 nv_slotp->nvslot_start = nv_start_nodata; 2015 nv_slotp->nvslot_intr = nv_intr_nodata; 2016 } else if (direction == SATA_DIR_READ) { 2017 NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command")); 2018 nv_slotp->nvslot_start = nv_start_pio_in; 2019 nv_slotp->nvslot_intr = nv_intr_pio_in; 2020 nv_slotp->nvslot_byte_count = 2021 spkt->satapkt_cmd.satacmd_bp->b_bcount; 2022 nv_slotp->nvslot_v_addr = 2023 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr; 2024 /* 2025 * Freeing DMA resources allocated by the framework now to 2026 * avoid buffer overwrite (dma sync) problems when the buffer 2027 * is released at command completion. This is not an issue 2028 * for write because write does not update the buffer. 2029 * Primarily an issue on systems with more than 4GB of memory. 2030 */ 2031 sata_free_dma_resources(spkt); 2032 } else if (direction == SATA_DIR_WRITE) { 2033 NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command")); 2034 nv_slotp->nvslot_start = nv_start_pio_out; 2035 nv_slotp->nvslot_intr = nv_intr_pio_out; 2036 nv_slotp->nvslot_byte_count = 2037 spkt->satapkt_cmd.satacmd_bp->b_bcount; 2038 nv_slotp->nvslot_v_addr = 2039 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr; 2040 } else { 2041 nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction" 2042 " %d cookies %d cmd %x", 2043 sata_cmdp->satacmd_flags.sata_data_direction, 2044 sata_cmdp->satacmd_num_dma_cookies, cmd); 2045 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED; 2046 ret = SATA_TRAN_CMD_UNSUPPORTED; 2047 2048 goto fail; 2049 } 2050 2051 if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) == 2052 SATA_TRAN_ACCEPTED) { 2053 #ifdef SGPIO_SUPPORT 2054 nv_sgp_drive_active(nvp->nvp_ctlp, 2055 (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num); 2056 #endif 2057 nv_slotp->nvslot_stime = ddi_get_lbolt(); 2058 2059 /* 2060 * start timer if it's not already running and this packet 2061 * is not requesting polled mode. 2062 */ 2063 if ((nvp->nvp_timeout_id == 0) && 2064 ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) { 2065 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp, 2066 drv_usectohz(NV_ONE_SEC)); 2067 } 2068 2069 return (SATA_TRAN_ACCEPTED); 2070 } 2071 2072 fail: 2073 2074 spkt->satapkt_reason = SATA_TRAN_PORT_ERROR; 2075 2076 if (ncq == NVSLOT_NCQ) { 2077 nvp->nvp_ncq_run--; 2078 nvp->nvp_sactive_cache &= ~on_bit; 2079 } else { 2080 nvp->nvp_non_ncq_run--; 2081 } 2082 nv_slotp->nvslot_spkt = NULL; 2083 nv_slotp->nvslot_flags = 0; 2084 2085 return (ret); 2086 } 2087 2088 2089 /* 2090 * Check if the signature is ready and if non-zero translate 2091 * it into a solaris sata defined type. 2092 */ 2093 static void 2094 nv_read_signature(nv_port_t *nvp) 2095 { 2096 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 2097 2098 nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count); 2099 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8); 2100 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16); 2101 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24); 2102 2103 switch (nvp->nvp_signature) { 2104 2105 case NV_SIG_DISK: 2106 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk")); 2107 nvp->nvp_type = SATA_DTYPE_ATADISK; 2108 break; 2109 case NV_SIG_ATAPI: 2110 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, 2111 "drive is an optical device")); 2112 nvp->nvp_type = SATA_DTYPE_ATAPICD; 2113 break; 2114 case NV_SIG_PM: 2115 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, 2116 "device is a port multiplier")); 2117 nvp->nvp_type = SATA_DTYPE_PMULT; 2118 break; 2119 case NV_SIG_NOTREADY: 2120 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, 2121 "signature not ready")); 2122 nvp->nvp_type = SATA_DTYPE_UNKNOWN; 2123 break; 2124 default: 2125 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not" 2126 " recognized", nvp->nvp_signature); 2127 nvp->nvp_type = SATA_DTYPE_UNKNOWN; 2128 break; 2129 } 2130 2131 if (nvp->nvp_signature) { 2132 nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET); 2133 } 2134 } 2135 2136 2137 /* 2138 * Reset the port 2139 */ 2140 static void 2141 nv_reset(nv_port_t *nvp) 2142 { 2143 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5]; 2144 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 2145 nv_ctl_t *nvc = nvp->nvp_ctlp; 2146 uint32_t sctrl; 2147 2148 NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()")); 2149 2150 ASSERT(mutex_owned(&nvp->nvp_mutex)); 2151 2152 /* 2153 * clear signature registers 2154 */ 2155 nv_put8(cmdhdl, nvp->nvp_sect, 0); 2156 nv_put8(cmdhdl, nvp->nvp_lcyl, 0); 2157 nv_put8(cmdhdl, nvp->nvp_hcyl, 0); 2158 nv_put8(cmdhdl, nvp->nvp_count, 0); 2159 2160 nvp->nvp_signature = 0; 2161 nvp->nvp_type = 0; 2162 nvp->nvp_state |= NV_PORT_RESET; 2163 nvp->nvp_reset_time = ddi_get_lbolt(); 2164 nvp->nvp_link_lost_time = 0; 2165 2166 /* 2167 * assert reset in PHY by writing a 1 to bit 0 scontrol 2168 */ 2169 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl); 2170 2171 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET); 2172 2173 /* 2174 * wait 1ms 2175 */ 2176 drv_usecwait(1000); 2177 2178 /* 2179 * de-assert reset in PHY 2180 */ 2181 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl); 2182 2183 /* 2184 * make sure timer is running 2185 */ 2186 if (nvp->nvp_timeout_id == 0) { 2187 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp, 2188 drv_usectohz(NV_ONE_SEC)); 2189 } 2190 } 2191 2192 2193 /* 2194 * Initialize register handling specific to mcp51/mcp55 2195 */ 2196 /* ARGSUSED */ 2197 static void 2198 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle) 2199 { 2200 nv_port_t *nvp; 2201 uchar_t *bar5 = nvc->nvc_bar_addr[5]; 2202 uint8_t off, port; 2203 2204 nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL); 2205 nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ); 2206 2207 for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) { 2208 nvp = &(nvc->nvc_port[port]); 2209 nvp->nvp_mcp5x_int_status = 2210 (uint16_t *)(bar5 + MCP5X_INT_STATUS + off); 2211 nvp->nvp_mcp5x_int_ctl = 2212 (uint16_t *)(bar5 + MCP5X_INT_CTL + off); 2213 2214 /* 2215 * clear any previous interrupts asserted 2216 */ 2217 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status, 2218 MCP5X_INT_CLEAR); 2219 2220 /* 2221 * These are the interrupts to accept for now. The spec 2222 * says these are enable bits, but nvidia has indicated 2223 * these are masking bits. Even though they may be masked 2224 * out to prevent asserting the main interrupt, they can 2225 * still be asserted while reading the interrupt status 2226 * register, so that needs to be considered in the interrupt 2227 * handler. 2228 */ 2229 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl, 2230 ~(MCP5X_INT_IGNORE)); 2231 } 2232 2233 /* 2234 * Allow the driver to program the BM on the first command instead 2235 * of waiting for an interrupt. 2236 */ 2237 #ifdef NCQ 2238 flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD; 2239 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags); 2240 flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ; 2241 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags); 2242 #endif 2243 2244 2245 #if 0 2246 /* 2247 * This caused problems on some but not all mcp55 based systems. 2248 * DMA writes would never complete. This happens even on small 2249 * mem systems, and only setting NV_40BIT_PRD below and not 2250 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware 2251 * issue that needs further investigation. 2252 */ 2253 2254 /* 2255 * mcp55 rev A03 and above supports 40-bit physical addressing. 2256 * Enable DMA to take advantage of that. 2257 * 2258 */ 2259 if (nvc->nvc_revid >= 0xa3) { 2260 uint32_t reg32; 2261 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and" 2262 " is capable of 40-bit addressing", nvc->nvc_revid)); 2263 buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull; 2264 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20); 2265 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20, 2266 reg32 |NV_40BIT_PRD); 2267 } else { 2268 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is " 2269 "not capable of 40-bit addressing", nvc->nvc_revid)); 2270 } 2271 #endif 2272 2273 } 2274 2275 2276 /* 2277 * Initialize register handling specific to ck804 2278 */ 2279 static void 2280 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle) 2281 { 2282 uchar_t *bar5 = nvc->nvc_bar_addr[5]; 2283 uint32_t reg32; 2284 uint16_t reg16; 2285 nv_port_t *nvp; 2286 int j; 2287 2288 /* 2289 * delay hotplug interrupts until PHYRDY. 2290 */ 2291 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42); 2292 pci_config_put32(pci_conf_handle, NV_SATA_CFG_42, 2293 reg32 | CK804_CFG_DELAY_HOTPLUG_INTR); 2294 2295 /* 2296 * enable hot plug interrupts for channel x and y 2297 */ 2298 reg16 = nv_get16(nvc->nvc_bar_hdl[5], 2299 (uint16_t *)(bar5 + NV_ADMACTL_X)); 2300 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X), 2301 NV_HIRQ_EN | reg16); 2302 2303 2304 reg16 = nv_get16(nvc->nvc_bar_hdl[5], 2305 (uint16_t *)(bar5 + NV_ADMACTL_Y)); 2306 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y), 2307 NV_HIRQ_EN | reg16); 2308 2309 nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS); 2310 2311 /* 2312 * clear any existing interrupt pending then enable 2313 */ 2314 for (j = 0; j < NV_MAX_PORTS(nvc); j++) { 2315 nvp = &(nvc->nvc_port[j]); 2316 mutex_enter(&nvp->nvp_mutex); 2317 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, 2318 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE); 2319 mutex_exit(&nvp->nvp_mutex); 2320 } 2321 } 2322 2323 2324 /* 2325 * Initialize the controller and set up driver data structures. 2326 * determine if ck804 or mcp5x class. 2327 */ 2328 static int 2329 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle) 2330 { 2331 struct sata_hba_tran stran; 2332 nv_port_t *nvp; 2333 int j, ck804; 2334 uchar_t *cmd_addr, *ctl_addr, *bm_addr; 2335 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 2336 uchar_t *bar5 = nvc->nvc_bar_addr[5]; 2337 uint32_t reg32; 2338 uint8_t reg8, reg8_save; 2339 2340 NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered")); 2341 2342 ck804 = B_TRUE; 2343 #ifdef SGPIO_SUPPORT 2344 nvc->nvc_mcp5x_flag = B_FALSE; 2345 #endif 2346 2347 /* 2348 * Need to set bit 2 to 1 at config offset 0x50 2349 * to enable access to the bar5 registers. 2350 */ 2351 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20); 2352 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20, 2353 reg32 | NV_BAR5_SPACE_EN); 2354 2355 /* 2356 * Determine if this is ck804 or mcp5x. ck804 will map in the 2357 * task file registers into bar5 while mcp5x won't. The offset of 2358 * the task file registers in mcp5x's space is unused, so it will 2359 * return zero. So check one of the task file registers to see if it is 2360 * writable and reads back what was written. If it's mcp5x it will 2361 * return back 0xff whereas ck804 will return the value written. 2362 */ 2363 reg8_save = nv_get8(bar5_hdl, 2364 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X)); 2365 2366 2367 for (j = 1; j < 3; j++) { 2368 2369 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j); 2370 reg8 = nv_get8(bar5_hdl, 2371 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X)); 2372 2373 if (reg8 != j) { 2374 ck804 = B_FALSE; 2375 nvc->nvc_mcp5x_flag = B_TRUE; 2376 break; 2377 } 2378 } 2379 2380 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save); 2381 2382 if (ck804 == B_TRUE) { 2383 NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804")); 2384 nvc->nvc_interrupt = ck804_intr; 2385 nvc->nvc_reg_init = ck804_reg_init; 2386 nvc->nvc_set_intr = ck804_set_intr; 2387 } else { 2388 NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55")); 2389 nvc->nvc_interrupt = mcp5x_intr; 2390 nvc->nvc_reg_init = mcp5x_reg_init; 2391 nvc->nvc_set_intr = mcp5x_set_intr; 2392 } 2393 2394 2395 stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV_2; 2396 stran.sata_tran_hba_dip = nvc->nvc_dip; 2397 stran.sata_tran_hba_dma_attr = &buffer_dma_attr; 2398 stran.sata_tran_hba_num_cports = NV_NUM_CPORTS; 2399 stran.sata_tran_hba_features_support = 2400 SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI; 2401 stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS; 2402 stran.sata_tran_probe_port = nv_sata_probe; 2403 stran.sata_tran_start = nv_sata_start; 2404 stran.sata_tran_abort = nv_sata_abort; 2405 stran.sata_tran_reset_dport = nv_sata_reset; 2406 stran.sata_tran_selftest = NULL; 2407 stran.sata_tran_hotplug_ops = &nv_hotplug_ops; 2408 stran.sata_tran_pwrmgt_ops = NULL; 2409 stran.sata_tran_ioctl = NULL; 2410 nvc->nvc_sata_hba_tran = stran; 2411 2412 nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc), 2413 KM_SLEEP); 2414 2415 /* 2416 * initialize registers common to all chipsets 2417 */ 2418 nv_common_reg_init(nvc); 2419 2420 for (j = 0; j < NV_MAX_PORTS(nvc); j++) { 2421 nvp = &(nvc->nvc_port[j]); 2422 2423 cmd_addr = nvp->nvp_cmd_addr; 2424 ctl_addr = nvp->nvp_ctl_addr; 2425 bm_addr = nvp->nvp_bm_addr; 2426 2427 mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER, 2428 DDI_INTR_PRI(nvc->nvc_intr_pri)); 2429 2430 cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL); 2431 2432 nvp->nvp_data = cmd_addr + NV_DATA; 2433 nvp->nvp_error = cmd_addr + NV_ERROR; 2434 nvp->nvp_feature = cmd_addr + NV_FEATURE; 2435 nvp->nvp_count = cmd_addr + NV_COUNT; 2436 nvp->nvp_sect = cmd_addr + NV_SECT; 2437 nvp->nvp_lcyl = cmd_addr + NV_LCYL; 2438 nvp->nvp_hcyl = cmd_addr + NV_HCYL; 2439 nvp->nvp_drvhd = cmd_addr + NV_DRVHD; 2440 nvp->nvp_status = cmd_addr + NV_STATUS; 2441 nvp->nvp_cmd = cmd_addr + NV_CMD; 2442 nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS; 2443 nvp->nvp_devctl = ctl_addr + NV_DEVCTL; 2444 2445 nvp->nvp_bmicx = bm_addr + BMICX_REG; 2446 nvp->nvp_bmisx = bm_addr + BMISX_REG; 2447 nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG); 2448 2449 nvp->nvp_state = 0; 2450 } 2451 2452 /* 2453 * initialize register by calling chip specific reg initialization 2454 */ 2455 (*(nvc->nvc_reg_init))(nvc, pci_conf_handle); 2456 2457 return (NV_SUCCESS); 2458 } 2459 2460 2461 /* 2462 * Initialize data structures with enough slots to handle queuing, if 2463 * enabled. NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether 2464 * NCQ support is built into the driver and enabled. It might have been 2465 * better to derive the true size from the drive itself, but the sata 2466 * module only sends down that information on the first NCQ command, 2467 * which means possibly re-sizing the structures on an interrupt stack, 2468 * making error handling more messy. The easy way is to just allocate 2469 * all 32 slots, which is what most drives support anyway. 2470 */ 2471 static int 2472 nv_init_port(nv_port_t *nvp) 2473 { 2474 nv_ctl_t *nvc = nvp->nvp_ctlp; 2475 size_t prd_size = sizeof (prde_t) * NV_DMA_NSEGS; 2476 dev_info_t *dip = nvc->nvc_dip; 2477 ddi_device_acc_attr_t dev_attr; 2478 size_t buf_size; 2479 ddi_dma_cookie_t cookie; 2480 uint_t count; 2481 int rc, i; 2482 2483 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2484 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 2485 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2486 2487 if (nvp->nvp_state & NV_PORT_INIT) { 2488 NVLOG((NVDBG_INIT, nvc, nvp, 2489 "nv_init_port previously initialized")); 2490 2491 return (NV_SUCCESS); 2492 } else { 2493 NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing")); 2494 } 2495 2496 nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) * 2497 NV_QUEUE_SLOTS, KM_SLEEP); 2498 2499 nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) * 2500 NV_QUEUE_SLOTS, KM_SLEEP); 2501 2502 nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) * 2503 NV_QUEUE_SLOTS, KM_SLEEP); 2504 2505 nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) * 2506 NV_QUEUE_SLOTS, KM_SLEEP); 2507 2508 nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS, 2509 KM_SLEEP); 2510 2511 for (i = 0; i < NV_QUEUE_SLOTS; i++) { 2512 2513 rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr, 2514 DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i])); 2515 2516 if (rc != DDI_SUCCESS) { 2517 nv_uninit_port(nvp); 2518 2519 return (NV_FAILURE); 2520 } 2521 2522 rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size, 2523 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 2524 NULL, &(nvp->nvp_sg_addr[i]), &buf_size, 2525 &(nvp->nvp_sg_acc_hdl[i])); 2526 2527 if (rc != DDI_SUCCESS) { 2528 nv_uninit_port(nvp); 2529 2530 return (NV_FAILURE); 2531 } 2532 2533 rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL, 2534 nvp->nvp_sg_addr[i], buf_size, 2535 DDI_DMA_WRITE | DDI_DMA_CONSISTENT, 2536 DDI_DMA_SLEEP, NULL, &cookie, &count); 2537 2538 if (rc != DDI_DMA_MAPPED) { 2539 nv_uninit_port(nvp); 2540 2541 return (NV_FAILURE); 2542 } 2543 2544 ASSERT(count == 1); 2545 ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0); 2546 2547 ASSERT(cookie.dmac_laddress <= UINT32_MAX); 2548 2549 nvp->nvp_sg_paddr[i] = cookie.dmac_address; 2550 } 2551 2552 /* 2553 * nvp_queue_depth represents the actual drive queue depth, not the 2554 * number of slots allocated in the structures (which may be more). 2555 * Actual queue depth is only learned after the first NCQ command, so 2556 * initialize it to 1 for now. 2557 */ 2558 nvp->nvp_queue_depth = 1; 2559 2560 nvp->nvp_state |= NV_PORT_INIT; 2561 2562 return (NV_SUCCESS); 2563 } 2564 2565 2566 /* 2567 * Free dynamically allocated structures for port. 2568 */ 2569 static void 2570 nv_uninit_port(nv_port_t *nvp) 2571 { 2572 int i; 2573 2574 /* 2575 * It is possible to reach here before a port has been initialized or 2576 * after it has already been uninitialized. Just return in that case. 2577 */ 2578 if (nvp->nvp_slot == NULL) { 2579 2580 return; 2581 } 2582 2583 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, 2584 "nv_uninit_port uninitializing")); 2585 2586 nvp->nvp_type = SATA_DTYPE_NONE; 2587 2588 for (i = 0; i < NV_QUEUE_SLOTS; i++) { 2589 if (nvp->nvp_sg_paddr[i]) { 2590 (void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]); 2591 } 2592 2593 if (nvp->nvp_sg_acc_hdl[i] != NULL) { 2594 ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i])); 2595 } 2596 2597 if (nvp->nvp_sg_dma_hdl[i] != NULL) { 2598 ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i])); 2599 } 2600 } 2601 2602 kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS); 2603 nvp->nvp_slot = NULL; 2604 2605 kmem_free(nvp->nvp_sg_dma_hdl, 2606 sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS); 2607 nvp->nvp_sg_dma_hdl = NULL; 2608 2609 kmem_free(nvp->nvp_sg_acc_hdl, 2610 sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS); 2611 nvp->nvp_sg_acc_hdl = NULL; 2612 2613 kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS); 2614 nvp->nvp_sg_addr = NULL; 2615 2616 kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS); 2617 nvp->nvp_sg_paddr = NULL; 2618 2619 nvp->nvp_state &= ~NV_PORT_INIT; 2620 nvp->nvp_signature = 0; 2621 } 2622 2623 2624 /* 2625 * Cache register offsets and access handles to frequently accessed registers 2626 * which are common to either chipset. 2627 */ 2628 static void 2629 nv_common_reg_init(nv_ctl_t *nvc) 2630 { 2631 uchar_t *bar5_addr = nvc->nvc_bar_addr[5]; 2632 uchar_t *bm_addr_offset, *sreg_offset; 2633 uint8_t bar, port; 2634 nv_port_t *nvp; 2635 2636 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 2637 if (port == 0) { 2638 bar = NV_BAR_0; 2639 bm_addr_offset = 0; 2640 sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr); 2641 } else { 2642 bar = NV_BAR_2; 2643 bm_addr_offset = (uchar_t *)8; 2644 sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr); 2645 } 2646 2647 nvp = &(nvc->nvc_port[port]); 2648 nvp->nvp_ctlp = nvc; 2649 nvp->nvp_port_num = port; 2650 NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings")); 2651 2652 nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar]; 2653 nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar]; 2654 nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1]; 2655 nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1]; 2656 nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4]; 2657 nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] + 2658 (long)bm_addr_offset; 2659 2660 nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS); 2661 nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR); 2662 nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE); 2663 nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL); 2664 } 2665 } 2666 2667 2668 static void 2669 nv_uninit_ctl(nv_ctl_t *nvc) 2670 { 2671 int port; 2672 nv_port_t *nvp; 2673 2674 NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered")); 2675 2676 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 2677 nvp = &(nvc->nvc_port[port]); 2678 mutex_enter(&nvp->nvp_mutex); 2679 NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port")); 2680 nv_uninit_port(nvp); 2681 mutex_exit(&nvp->nvp_mutex); 2682 mutex_destroy(&nvp->nvp_mutex); 2683 cv_destroy(&nvp->nvp_poll_cv); 2684 } 2685 2686 kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t)); 2687 nvc->nvc_port = NULL; 2688 } 2689 2690 2691 /* 2692 * ck804 interrupt. This is a wrapper around ck804_intr_process so 2693 * that interrupts from other devices can be disregarded while dtracing. 2694 */ 2695 /* ARGSUSED */ 2696 static uint_t 2697 ck804_intr(caddr_t arg1, caddr_t arg2) 2698 { 2699 nv_ctl_t *nvc = (nv_ctl_t *)arg1; 2700 uint8_t intr_status; 2701 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 2702 2703 intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status); 2704 2705 if (intr_status == 0) { 2706 2707 return (DDI_INTR_UNCLAIMED); 2708 } 2709 2710 ck804_intr_process(nvc, intr_status); 2711 2712 return (DDI_INTR_CLAIMED); 2713 } 2714 2715 2716 /* 2717 * Main interrupt handler for ck804. handles normal device 2718 * interrupts as well as port hot plug and remove interrupts. 2719 * 2720 */ 2721 static void 2722 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status) 2723 { 2724 2725 int port, i; 2726 nv_port_t *nvp; 2727 nv_slot_t *nv_slotp; 2728 uchar_t status; 2729 sata_pkt_t *spkt; 2730 uint8_t bmstatus, clear_bits; 2731 ddi_acc_handle_t bmhdl; 2732 int nvcleared = 0; 2733 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 2734 uint32_t sstatus; 2735 int port_mask_hot[] = { 2736 CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT, 2737 }; 2738 int port_mask_pm[] = { 2739 CK804_INT_PDEV_PM, CK804_INT_SDEV_PM, 2740 }; 2741 2742 NVLOG((NVDBG_INTR, nvc, NULL, 2743 "ck804_intr_process entered intr_status=%x", intr_status)); 2744 2745 /* 2746 * For command completion interrupt, explicit clear is not required. 2747 * however, for the error cases explicit clear is performed. 2748 */ 2749 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 2750 2751 int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT}; 2752 2753 if ((port_mask[port] & intr_status) == 0) { 2754 continue; 2755 } 2756 2757 NVLOG((NVDBG_INTR, nvc, NULL, 2758 "ck804_intr_process interrupt on port %d", port)); 2759 2760 nvp = &(nvc->nvc_port[port]); 2761 2762 mutex_enter(&nvp->nvp_mutex); 2763 2764 /* 2765 * there was a corner case found where an interrupt 2766 * arrived before nvp_slot was set. Should 2767 * probably should track down why that happens and try 2768 * to eliminate that source and then get rid of this 2769 * check. 2770 */ 2771 if (nvp->nvp_slot == NULL) { 2772 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status); 2773 NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt " 2774 "received before initialization " 2775 "completed status=%x", status)); 2776 mutex_exit(&nvp->nvp_mutex); 2777 2778 /* 2779 * clear interrupt bits 2780 */ 2781 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, 2782 port_mask[port]); 2783 2784 continue; 2785 } 2786 2787 if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL) { 2788 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status); 2789 NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt " 2790 " no command in progress status=%x", status)); 2791 mutex_exit(&nvp->nvp_mutex); 2792 2793 /* 2794 * clear interrupt bits 2795 */ 2796 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, 2797 port_mask[port]); 2798 2799 continue; 2800 } 2801 2802 bmhdl = nvp->nvp_bm_hdl; 2803 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx); 2804 2805 if (!(bmstatus & BMISX_IDEINTS)) { 2806 mutex_exit(&nvp->nvp_mutex); 2807 2808 continue; 2809 } 2810 2811 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus); 2812 2813 if (status & SATA_STATUS_BSY) { 2814 mutex_exit(&nvp->nvp_mutex); 2815 2816 continue; 2817 } 2818 2819 nv_slotp = &(nvp->nvp_slot[0]); 2820 2821 ASSERT(nv_slotp); 2822 2823 spkt = nv_slotp->nvslot_spkt; 2824 2825 if (spkt == NULL) { 2826 mutex_exit(&nvp->nvp_mutex); 2827 2828 continue; 2829 } 2830 2831 (*nv_slotp->nvslot_intr)(nvp, nv_slotp); 2832 2833 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 2834 2835 /* 2836 * If there is no link cannot be certain about the completion 2837 * of the packet, so abort it. 2838 */ 2839 if (nv_check_link((&spkt->satapkt_device)-> 2840 satadev_scr.sstatus) == B_FALSE) { 2841 2842 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR); 2843 2844 } else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) { 2845 2846 nv_complete_io(nvp, spkt, 0); 2847 } 2848 2849 mutex_exit(&nvp->nvp_mutex); 2850 } 2851 2852 /* 2853 * ck804 often doesn't correctly distinguish hot add/remove 2854 * interrupts. Frequently both the ADD and the REMOVE bits 2855 * are asserted, whether it was a remove or add. Use sstatus 2856 * to distinguish hot add from hot remove. 2857 */ 2858 2859 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 2860 clear_bits = 0; 2861 2862 nvp = &(nvc->nvc_port[port]); 2863 mutex_enter(&nvp->nvp_mutex); 2864 2865 if ((port_mask_pm[port] & intr_status) != 0) { 2866 clear_bits = port_mask_pm[port]; 2867 NVLOG((NVDBG_HOT, nvc, nvp, 2868 "clearing PM interrupt bit: %x", 2869 intr_status & port_mask_pm[port])); 2870 } 2871 2872 if ((port_mask_hot[port] & intr_status) == 0) { 2873 if (clear_bits != 0) { 2874 goto clear; 2875 } else { 2876 mutex_exit(&nvp->nvp_mutex); 2877 continue; 2878 } 2879 } 2880 2881 /* 2882 * reaching here means there was a hot add or remove. 2883 */ 2884 clear_bits |= port_mask_hot[port]; 2885 2886 ASSERT(nvc->nvc_port[port].nvp_sstatus); 2887 2888 sstatus = nv_get32(bar5_hdl, 2889 nvc->nvc_port[port].nvp_sstatus); 2890 2891 if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) == 2892 SSTATUS_DET_DEVPRE_PHYCOM) { 2893 nv_report_add_remove(nvp, 0); 2894 } else { 2895 nv_report_add_remove(nvp, NV_PORT_HOTREMOVED); 2896 } 2897 clear: 2898 /* 2899 * clear interrupt bits. explicit interrupt clear is 2900 * required for hotplug interrupts. 2901 */ 2902 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits); 2903 2904 /* 2905 * make sure it's flushed and cleared. If not try 2906 * again. Sometimes it has been observed to not clear 2907 * on the first try. 2908 */ 2909 intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status); 2910 2911 /* 2912 * make 10 additional attempts to clear the interrupt 2913 */ 2914 for (i = 0; (intr_status & clear_bits) && (i < 10); i++) { 2915 NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x " 2916 "still not clear try=%d", intr_status, 2917 ++nvcleared)); 2918 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, 2919 clear_bits); 2920 intr_status = nv_get8(bar5_hdl, 2921 nvc->nvc_ck804_int_status); 2922 } 2923 2924 /* 2925 * if still not clear, log a message and disable the 2926 * port. highly unlikely that this path is taken, but it 2927 * gives protection against a wedged interrupt. 2928 */ 2929 if (intr_status & clear_bits) { 2930 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE); 2931 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED, 2932 SATA_ADDR_CPORT, SATA_PSTATE_FAILED); 2933 nvp->nvp_state |= NV_PORT_FAILED; 2934 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR); 2935 nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear " 2936 "interrupt. disabling port intr_status=%X", 2937 intr_status); 2938 } 2939 2940 mutex_exit(&nvp->nvp_mutex); 2941 } 2942 } 2943 2944 2945 /* 2946 * Interrupt handler for mcp5x. It is invoked by the wrapper for each port 2947 * on the controller, to handle completion and hot plug and remove events. 2948 * 2949 */ 2950 static uint_t 2951 mcp5x_intr_port(nv_port_t *nvp) 2952 { 2953 nv_ctl_t *nvc = nvp->nvp_ctlp; 2954 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 2955 uint8_t clear = 0, intr_cycles = 0; 2956 int ret = DDI_INTR_UNCLAIMED; 2957 uint16_t int_status; 2958 2959 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port entered")); 2960 2961 for (;;) { 2962 /* 2963 * read current interrupt status 2964 */ 2965 int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status); 2966 2967 NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status)); 2968 2969 /* 2970 * MCP5X_INT_IGNORE interrupts will show up in the status, 2971 * but are masked out from causing an interrupt to be generated 2972 * to the processor. Ignore them here by masking them out. 2973 */ 2974 int_status &= ~(MCP5X_INT_IGNORE); 2975 2976 /* 2977 * exit the loop when no more interrupts to process 2978 */ 2979 if (int_status == 0) { 2980 2981 break; 2982 } 2983 2984 if (int_status & MCP5X_INT_COMPLETE) { 2985 NVLOG((NVDBG_INTR, nvc, nvp, 2986 "mcp5x_packet_complete_intr")); 2987 /* 2988 * since int_status was set, return DDI_INTR_CLAIMED 2989 * from the DDI's perspective even though the packet 2990 * completion may not have succeeded. If it fails, 2991 * need to manually clear the interrupt, otherwise 2992 * clearing is implicit. 2993 */ 2994 ret = DDI_INTR_CLAIMED; 2995 if (mcp5x_packet_complete_intr(nvc, nvp) == 2996 NV_FAILURE) { 2997 clear = MCP5X_INT_COMPLETE; 2998 } else { 2999 intr_cycles = 0; 3000 } 3001 } 3002 3003 if (int_status & MCP5X_INT_DMA_SETUP) { 3004 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr")); 3005 3006 /* 3007 * Needs to be cleared before starting the BM, so do it 3008 * now. make sure this is still working. 3009 */ 3010 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, 3011 MCP5X_INT_DMA_SETUP); 3012 #ifdef NCQ 3013 ret = mcp5x_dma_setup_intr(nvc, nvp); 3014 #endif 3015 } 3016 3017 if (int_status & MCP5X_INT_REM) { 3018 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x device removed")); 3019 clear = MCP5X_INT_REM; 3020 ret = DDI_INTR_CLAIMED; 3021 3022 mutex_enter(&nvp->nvp_mutex); 3023 nv_report_add_remove(nvp, NV_PORT_HOTREMOVED); 3024 mutex_exit(&nvp->nvp_mutex); 3025 3026 } else if (int_status & MCP5X_INT_ADD) { 3027 NVLOG((NVDBG_HOT, nvc, nvp, "mcp5x device added")); 3028 clear = MCP5X_INT_ADD; 3029 ret = DDI_INTR_CLAIMED; 3030 3031 mutex_enter(&nvp->nvp_mutex); 3032 nv_report_add_remove(nvp, 0); 3033 mutex_exit(&nvp->nvp_mutex); 3034 } 3035 3036 if (clear) { 3037 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear); 3038 clear = 0; 3039 } 3040 3041 if (intr_cycles++ == NV_MAX_INTR_LOOP) { 3042 nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt " 3043 "processing. Disabling port int_status=%X" 3044 " clear=%X", int_status, clear); 3045 mutex_enter(&nvp->nvp_mutex); 3046 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE); 3047 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED, 3048 SATA_ADDR_CPORT, SATA_PSTATE_FAILED); 3049 nvp->nvp_state |= NV_PORT_FAILED; 3050 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR); 3051 mutex_exit(&nvp->nvp_mutex); 3052 } 3053 } 3054 3055 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_intr_port: finished ret=%d", ret)); 3056 3057 return (ret); 3058 } 3059 3060 3061 /* ARGSUSED */ 3062 static uint_t 3063 mcp5x_intr(caddr_t arg1, caddr_t arg2) 3064 { 3065 nv_ctl_t *nvc = (nv_ctl_t *)arg1; 3066 int ret; 3067 3068 ret = mcp5x_intr_port(&(nvc->nvc_port[0])); 3069 ret |= mcp5x_intr_port(&(nvc->nvc_port[1])); 3070 3071 return (ret); 3072 } 3073 3074 3075 #ifdef NCQ 3076 /* 3077 * with software driven NCQ on mcp5x, an interrupt occurs right 3078 * before the drive is ready to do a DMA transfer. At this point, 3079 * the PRD table needs to be programmed and the DMA engine enabled 3080 * and ready to go. 3081 * 3082 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt 3083 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready 3084 * -- clear bit 0 of master command reg 3085 * -- program PRD 3086 * -- clear the interrupt status bit for the DMA Setup FIS 3087 * -- set bit 0 of the bus master command register 3088 */ 3089 static int 3090 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp) 3091 { 3092 int slot; 3093 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 3094 uint8_t bmicx; 3095 int port = nvp->nvp_port_num; 3096 uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT, 3097 MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT}; 3098 3099 nv_cmn_err(CE_PANIC, nvc, nvp, 3100 "this is should not be executed at all until NCQ"); 3101 3102 mutex_enter(&nvp->nvp_mutex); 3103 3104 slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq); 3105 3106 slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK; 3107 3108 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d" 3109 " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache)); 3110 3111 /* 3112 * halt the DMA engine. This step is necessary according to 3113 * the mcp5x spec, probably since there may have been a "first" packet 3114 * that already programmed the DMA engine, but may not turn out to 3115 * be the first one processed. 3116 */ 3117 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx); 3118 3119 #if 0 3120 if (bmicx & BMICX_SSBM) { 3121 NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for " 3122 "another packet. Cancelling and reprogramming")); 3123 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM); 3124 } 3125 #endif 3126 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM); 3127 3128 nv_start_dma_engine(nvp, slot); 3129 3130 mutex_exit(&nvp->nvp_mutex); 3131 3132 return (DDI_INTR_CLAIMED); 3133 } 3134 #endif /* NCQ */ 3135 3136 3137 /* 3138 * packet completion interrupt. If the packet is complete, invoke 3139 * the packet completion callback. 3140 */ 3141 static int 3142 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp) 3143 { 3144 uint8_t status, bmstatus; 3145 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 3146 int sactive; 3147 int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE; 3148 sata_pkt_t *spkt; 3149 nv_slot_t *nv_slotp; 3150 3151 mutex_enter(&nvp->nvp_mutex); 3152 3153 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx); 3154 3155 if (!(bmstatus & BMISX_IDEINTS)) { 3156 NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set")); 3157 mutex_exit(&nvp->nvp_mutex); 3158 3159 return (NV_FAILURE); 3160 } 3161 3162 /* 3163 * If the just completed item is a non-ncq command, the busy 3164 * bit should not be set 3165 */ 3166 if (nvp->nvp_non_ncq_run) { 3167 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus); 3168 if (status & SATA_STATUS_BSY) { 3169 nv_cmn_err(CE_WARN, nvc, nvp, 3170 "unexpected SATA_STATUS_BSY set"); 3171 mutex_exit(&nvp->nvp_mutex); 3172 /* 3173 * calling function will clear interrupt. then 3174 * the real interrupt will either arrive or the 3175 * packet timeout handling will take over and 3176 * reset. 3177 */ 3178 return (NV_FAILURE); 3179 } 3180 3181 } else { 3182 /* 3183 * NCQ check for BSY here and wait if still bsy before 3184 * continuing. Rather than wait for it to be cleared 3185 * when starting a packet and wasting CPU time, the starting 3186 * thread can exit immediate, but might have to spin here 3187 * for a bit possibly. Needs more work and experimentation. 3188 */ 3189 ASSERT(nvp->nvp_ncq_run); 3190 } 3191 3192 3193 if (nvp->nvp_ncq_run) { 3194 ncq_command = B_TRUE; 3195 ASSERT(nvp->nvp_non_ncq_run == 0); 3196 } else { 3197 ASSERT(nvp->nvp_non_ncq_run != 0); 3198 } 3199 3200 /* 3201 * active_pkt_bit will represent the bitmap of the single completed 3202 * packet. Because of the nature of sw assisted NCQ, only one 3203 * command will complete per interrupt. 3204 */ 3205 3206 if (ncq_command == B_FALSE) { 3207 active_pkt = 0; 3208 } else { 3209 /* 3210 * NCQ: determine which command just completed, by examining 3211 * which bit cleared in the register since last written. 3212 */ 3213 sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive); 3214 3215 active_pkt_bit = ~sactive & nvp->nvp_sactive_cache; 3216 3217 ASSERT(active_pkt_bit); 3218 3219 3220 /* 3221 * this failure path needs more work to handle the 3222 * error condition and recovery. 3223 */ 3224 if (active_pkt_bit == 0) { 3225 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 3226 3227 nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X " 3228 "nvp->nvp_sactive %X", sactive, 3229 nvp->nvp_sactive_cache); 3230 3231 (void) nv_get8(cmdhdl, nvp->nvp_status); 3232 3233 mutex_exit(&nvp->nvp_mutex); 3234 3235 return (NV_FAILURE); 3236 } 3237 3238 for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1; 3239 active_pkt++, active_pkt_bit >>= 1) { 3240 } 3241 3242 /* 3243 * make sure only one bit is ever turned on 3244 */ 3245 ASSERT(active_pkt_bit == 1); 3246 3247 nvp->nvp_sactive_cache &= ~(0x01 << active_pkt); 3248 } 3249 3250 nv_slotp = &(nvp->nvp_slot[active_pkt]); 3251 3252 spkt = nv_slotp->nvslot_spkt; 3253 3254 ASSERT(spkt != NULL); 3255 3256 (*nv_slotp->nvslot_intr)(nvp, nv_slotp); 3257 3258 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 3259 3260 /* 3261 * If there is no link cannot be certain about the completion 3262 * of the packet, so abort it. 3263 */ 3264 if (nv_check_link((&spkt->satapkt_device)-> 3265 satadev_scr.sstatus) == B_FALSE) { 3266 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR); 3267 3268 } else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) { 3269 3270 nv_complete_io(nvp, spkt, active_pkt); 3271 } 3272 3273 mutex_exit(&nvp->nvp_mutex); 3274 3275 return (NV_SUCCESS); 3276 } 3277 3278 3279 static void 3280 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot) 3281 { 3282 3283 ASSERT(MUTEX_HELD(&nvp->nvp_mutex)); 3284 3285 if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) { 3286 nvp->nvp_ncq_run--; 3287 } else { 3288 nvp->nvp_non_ncq_run--; 3289 } 3290 3291 /* 3292 * mark the packet slot idle so it can be reused. Do this before 3293 * calling satapkt_comp so the slot can be reused. 3294 */ 3295 (&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL; 3296 3297 if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) { 3298 /* 3299 * If this is not timed polled mode cmd, which has an 3300 * active thread monitoring for completion, then need 3301 * to signal the sleeping thread that the cmd is complete. 3302 */ 3303 if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) { 3304 cv_signal(&nvp->nvp_poll_cv); 3305 } 3306 3307 return; 3308 } 3309 3310 if (spkt->satapkt_comp != NULL) { 3311 mutex_exit(&nvp->nvp_mutex); 3312 (*spkt->satapkt_comp)(spkt); 3313 mutex_enter(&nvp->nvp_mutex); 3314 } 3315 } 3316 3317 3318 /* 3319 * check whether packet is ncq command or not. for ncq command, 3320 * start it if there is still room on queue. for non-ncq command only 3321 * start if no other command is running. 3322 */ 3323 static int 3324 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt) 3325 { 3326 uint8_t cmd, ncq; 3327 3328 NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry")); 3329 3330 cmd = spkt->satapkt_cmd.satacmd_cmd_reg; 3331 3332 ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) || 3333 (cmd == SATAC_READ_FPDMA_QUEUED)); 3334 3335 if (ncq == B_FALSE) { 3336 3337 if ((nvp->nvp_non_ncq_run == 1) || 3338 (nvp->nvp_ncq_run > 0)) { 3339 /* 3340 * next command is non-ncq which can't run 3341 * concurrently. exit and return queue full. 3342 */ 3343 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL; 3344 3345 return (SATA_TRAN_QUEUE_FULL); 3346 } 3347 3348 return (nv_start_common(nvp, spkt)); 3349 } 3350 3351 /* 3352 * ncq == B_TRUE 3353 */ 3354 if (nvp->nvp_non_ncq_run == 1) { 3355 /* 3356 * cannot start any NCQ commands when there 3357 * is a non-NCQ command running. 3358 */ 3359 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL; 3360 3361 return (SATA_TRAN_QUEUE_FULL); 3362 } 3363 3364 #ifdef NCQ 3365 /* 3366 * this is not compiled for now as satapkt_device.satadev_qdepth 3367 * is being pulled out until NCQ support is later addressed 3368 * 3369 * nvp_queue_depth is initialized by the first NCQ command 3370 * received. 3371 */ 3372 if (nvp->nvp_queue_depth == 1) { 3373 nvp->nvp_queue_depth = 3374 spkt->satapkt_device.satadev_qdepth; 3375 3376 ASSERT(nvp->nvp_queue_depth > 1); 3377 3378 NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, 3379 "nv_process_queue: nvp_queue_depth set to %d", 3380 nvp->nvp_queue_depth)); 3381 } 3382 #endif 3383 3384 if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) { 3385 /* 3386 * max number of NCQ commands already active 3387 */ 3388 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL; 3389 3390 return (SATA_TRAN_QUEUE_FULL); 3391 } 3392 3393 return (nv_start_common(nvp, spkt)); 3394 } 3395 3396 3397 /* 3398 * configure INTx and legacy interrupts 3399 */ 3400 static int 3401 nv_add_legacy_intrs(nv_ctl_t *nvc) 3402 { 3403 dev_info_t *devinfo = nvc->nvc_dip; 3404 int actual, count = 0; 3405 int x, y, rc, inum = 0; 3406 3407 NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs")); 3408 3409 /* 3410 * get number of interrupts 3411 */ 3412 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count); 3413 if ((rc != DDI_SUCCESS) || (count == 0)) { 3414 NVLOG((NVDBG_INTR, nvc, NULL, 3415 "ddi_intr_get_nintrs() failed, " 3416 "rc %d count %d", rc, count)); 3417 3418 return (DDI_FAILURE); 3419 } 3420 3421 /* 3422 * allocate an array of interrupt handles 3423 */ 3424 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t); 3425 nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP); 3426 3427 /* 3428 * call ddi_intr_alloc() 3429 */ 3430 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED, 3431 inum, count, &actual, DDI_INTR_ALLOC_STRICT); 3432 3433 if ((rc != DDI_SUCCESS) || (actual == 0)) { 3434 nv_cmn_err(CE_WARN, nvc, NULL, 3435 "ddi_intr_alloc() failed, rc %d", rc); 3436 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size); 3437 3438 return (DDI_FAILURE); 3439 } 3440 3441 if (actual < count) { 3442 nv_cmn_err(CE_WARN, nvc, NULL, 3443 "ddi_intr_alloc: requested: %d, received: %d", 3444 count, actual); 3445 3446 goto failure; 3447 } 3448 3449 nvc->nvc_intr_cnt = actual; 3450 3451 /* 3452 * get intr priority 3453 */ 3454 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) != 3455 DDI_SUCCESS) { 3456 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed"); 3457 3458 goto failure; 3459 } 3460 3461 /* 3462 * Test for high level mutex 3463 */ 3464 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) { 3465 nv_cmn_err(CE_WARN, nvc, NULL, 3466 "nv_add_legacy_intrs: high level intr not supported"); 3467 3468 goto failure; 3469 } 3470 3471 for (x = 0; x < actual; x++) { 3472 if (ddi_intr_add_handler(nvc->nvc_htable[x], 3473 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) { 3474 nv_cmn_err(CE_WARN, nvc, NULL, 3475 "ddi_intr_add_handler() failed"); 3476 3477 goto failure; 3478 } 3479 } 3480 3481 /* 3482 * call ddi_intr_enable() for legacy interrupts 3483 */ 3484 for (x = 0; x < nvc->nvc_intr_cnt; x++) { 3485 (void) ddi_intr_enable(nvc->nvc_htable[x]); 3486 } 3487 3488 return (DDI_SUCCESS); 3489 3490 failure: 3491 /* 3492 * free allocated intr and nvc_htable 3493 */ 3494 for (y = 0; y < actual; y++) { 3495 (void) ddi_intr_free(nvc->nvc_htable[y]); 3496 } 3497 3498 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size); 3499 3500 return (DDI_FAILURE); 3501 } 3502 3503 #ifdef NV_MSI_SUPPORTED 3504 /* 3505 * configure MSI interrupts 3506 */ 3507 static int 3508 nv_add_msi_intrs(nv_ctl_t *nvc) 3509 { 3510 dev_info_t *devinfo = nvc->nvc_dip; 3511 int count, avail, actual; 3512 int x, y, rc, inum = 0; 3513 3514 NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs")); 3515 3516 /* 3517 * get number of interrupts 3518 */ 3519 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count); 3520 if ((rc != DDI_SUCCESS) || (count == 0)) { 3521 nv_cmn_err(CE_WARN, nvc, NULL, 3522 "ddi_intr_get_nintrs() failed, " 3523 "rc %d count %d", rc, count); 3524 3525 return (DDI_FAILURE); 3526 } 3527 3528 /* 3529 * get number of available interrupts 3530 */ 3531 rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail); 3532 if ((rc != DDI_SUCCESS) || (avail == 0)) { 3533 nv_cmn_err(CE_WARN, nvc, NULL, 3534 "ddi_intr_get_navail() failed, " 3535 "rc %d avail %d", rc, avail); 3536 3537 return (DDI_FAILURE); 3538 } 3539 3540 if (avail < count) { 3541 nv_cmn_err(CE_WARN, nvc, NULL, 3542 "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d", 3543 avail, count); 3544 } 3545 3546 /* 3547 * allocate an array of interrupt handles 3548 */ 3549 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t); 3550 nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP); 3551 3552 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI, 3553 inum, count, &actual, DDI_INTR_ALLOC_NORMAL); 3554 3555 if ((rc != DDI_SUCCESS) || (actual == 0)) { 3556 nv_cmn_err(CE_WARN, nvc, NULL, 3557 "ddi_intr_alloc() failed, rc %d", rc); 3558 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size); 3559 3560 return (DDI_FAILURE); 3561 } 3562 3563 /* 3564 * Use interrupt count returned or abort? 3565 */ 3566 if (actual < count) { 3567 NVLOG((NVDBG_INIT, nvc, NULL, 3568 "Requested: %d, Received: %d", count, actual)); 3569 } 3570 3571 nvc->nvc_intr_cnt = actual; 3572 3573 /* 3574 * get priority for first msi, assume remaining are all the same 3575 */ 3576 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) != 3577 DDI_SUCCESS) { 3578 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed"); 3579 3580 goto failure; 3581 } 3582 3583 /* 3584 * test for high level mutex 3585 */ 3586 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) { 3587 nv_cmn_err(CE_WARN, nvc, NULL, 3588 "nv_add_msi_intrs: high level intr not supported"); 3589 3590 goto failure; 3591 } 3592 3593 /* 3594 * Call ddi_intr_add_handler() 3595 */ 3596 for (x = 0; x < actual; x++) { 3597 if (ddi_intr_add_handler(nvc->nvc_htable[x], 3598 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) { 3599 nv_cmn_err(CE_WARN, nvc, NULL, 3600 "ddi_intr_add_handler() failed"); 3601 3602 goto failure; 3603 } 3604 } 3605 3606 (void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap); 3607 3608 if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) { 3609 (void) ddi_intr_block_enable(nvc->nvc_htable, 3610 nvc->nvc_intr_cnt); 3611 } else { 3612 /* 3613 * Call ddi_intr_enable() for MSI non block enable 3614 */ 3615 for (x = 0; x < nvc->nvc_intr_cnt; x++) { 3616 (void) ddi_intr_enable(nvc->nvc_htable[x]); 3617 } 3618 } 3619 3620 return (DDI_SUCCESS); 3621 3622 failure: 3623 /* 3624 * free allocated intr and nvc_htable 3625 */ 3626 for (y = 0; y < actual; y++) { 3627 (void) ddi_intr_free(nvc->nvc_htable[y]); 3628 } 3629 3630 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size); 3631 3632 return (DDI_FAILURE); 3633 } 3634 #endif 3635 3636 3637 static void 3638 nv_rem_intrs(nv_ctl_t *nvc) 3639 { 3640 int x, i; 3641 nv_port_t *nvp; 3642 3643 NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs")); 3644 3645 /* 3646 * prevent controller from generating interrupts by 3647 * masking them out. This is an extra precaution. 3648 */ 3649 for (i = 0; i < NV_MAX_PORTS(nvc); i++) { 3650 nvp = (&nvc->nvc_port[i]); 3651 mutex_enter(&nvp->nvp_mutex); 3652 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE); 3653 mutex_exit(&nvp->nvp_mutex); 3654 } 3655 3656 /* 3657 * disable all interrupts 3658 */ 3659 if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) && 3660 (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) { 3661 (void) ddi_intr_block_disable(nvc->nvc_htable, 3662 nvc->nvc_intr_cnt); 3663 } else { 3664 for (x = 0; x < nvc->nvc_intr_cnt; x++) { 3665 (void) ddi_intr_disable(nvc->nvc_htable[x]); 3666 } 3667 } 3668 3669 for (x = 0; x < nvc->nvc_intr_cnt; x++) { 3670 (void) ddi_intr_remove_handler(nvc->nvc_htable[x]); 3671 (void) ddi_intr_free(nvc->nvc_htable[x]); 3672 } 3673 3674 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size); 3675 } 3676 3677 3678 /* 3679 * variable argument wrapper for cmn_err. prefixes the instance and port 3680 * number if possible 3681 */ 3682 static void 3683 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap) 3684 { 3685 char port[NV_STRING_10]; 3686 char inst[NV_STRING_10]; 3687 3688 mutex_enter(&nv_log_mutex); 3689 3690 if (nvc) { 3691 (void) snprintf(inst, NV_STRING_10, "inst %d", 3692 ddi_get_instance(nvc->nvc_dip)); 3693 } else { 3694 inst[0] = '\0'; 3695 } 3696 3697 if (nvp) { 3698 (void) sprintf(port, " port %d", nvp->nvp_port_num); 3699 } else { 3700 port[0] = '\0'; 3701 } 3702 3703 (void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port, 3704 (inst[0]|port[0] ? ": " :"")); 3705 3706 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)], 3707 NV_STRING_512 - strlen(nv_log_buf), fmt, ap); 3708 3709 /* 3710 * normally set to log to console but in some debug situations it 3711 * may be useful to log only to a file. 3712 */ 3713 if (nv_log_to_console) { 3714 if (nv_prom_print) { 3715 prom_printf("%s\n", nv_log_buf); 3716 } else { 3717 cmn_err(ce, "%s", nv_log_buf); 3718 } 3719 3720 3721 } else { 3722 cmn_err(ce, "!%s", nv_log_buf); 3723 } 3724 3725 mutex_exit(&nv_log_mutex); 3726 } 3727 3728 3729 /* 3730 * wrapper for cmn_err 3731 */ 3732 static void 3733 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...) 3734 { 3735 va_list ap; 3736 3737 va_start(ap, fmt); 3738 nv_vcmn_err(ce, nvc, nvp, fmt, ap); 3739 va_end(ap); 3740 } 3741 3742 3743 #if defined(DEBUG) 3744 /* 3745 * prefixes the instance and port number if possible to the debug message 3746 */ 3747 static void 3748 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...) 3749 { 3750 va_list ap; 3751 3752 if ((nv_debug_flags & flag) == 0) { 3753 return; 3754 } 3755 3756 va_start(ap, fmt); 3757 nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap); 3758 va_end(ap); 3759 3760 /* 3761 * useful for some debugging situations 3762 */ 3763 if (nv_log_delay) { 3764 drv_usecwait(nv_log_delay); 3765 } 3766 3767 } 3768 #endif /* DEBUG */ 3769 3770 3771 /* 3772 * program registers which are common to all commands 3773 */ 3774 static void 3775 nv_program_taskfile_regs(nv_port_t *nvp, int slot) 3776 { 3777 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 3778 sata_pkt_t *spkt; 3779 sata_cmd_t *satacmd; 3780 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 3781 uint8_t cmd, ncq = B_FALSE; 3782 3783 spkt = nv_slotp->nvslot_spkt; 3784 satacmd = &spkt->satapkt_cmd; 3785 cmd = satacmd->satacmd_cmd_reg; 3786 3787 ASSERT(nvp->nvp_slot); 3788 3789 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) || 3790 (cmd == SATAC_READ_FPDMA_QUEUED)) { 3791 ncq = B_TRUE; 3792 } 3793 3794 /* 3795 * select the drive 3796 */ 3797 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg); 3798 3799 /* 3800 * make certain the drive selected 3801 */ 3802 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY, 3803 NV_SEC2USEC(5), 0) == B_FALSE) { 3804 3805 return; 3806 } 3807 3808 switch (spkt->satapkt_cmd.satacmd_addr_type) { 3809 3810 case ATA_ADDR_LBA: 3811 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode")); 3812 3813 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb); 3814 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb); 3815 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb); 3816 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb); 3817 3818 break; 3819 3820 case ATA_ADDR_LBA28: 3821 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, 3822 "ATA_ADDR_LBA28 mode")); 3823 /* 3824 * NCQ only uses 48-bit addressing 3825 */ 3826 ASSERT(ncq != B_TRUE); 3827 3828 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb); 3829 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb); 3830 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb); 3831 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb); 3832 3833 break; 3834 3835 case ATA_ADDR_LBA48: 3836 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, 3837 "ATA_ADDR_LBA48 mode")); 3838 3839 /* 3840 * for NCQ, tag goes into count register and real sector count 3841 * into features register. The sata module does the translation 3842 * in the satacmd. 3843 */ 3844 if (ncq == B_TRUE) { 3845 nv_put8(cmdhdl, nvp->nvp_count, slot << 3); 3846 nv_put8(cmdhdl, nvp->nvp_feature, 3847 satacmd->satacmd_features_reg_ext); 3848 nv_put8(cmdhdl, nvp->nvp_feature, 3849 satacmd->satacmd_features_reg); 3850 } else { 3851 nv_put8(cmdhdl, nvp->nvp_count, 3852 satacmd->satacmd_sec_count_msb); 3853 nv_put8(cmdhdl, nvp->nvp_count, 3854 satacmd->satacmd_sec_count_lsb); 3855 } 3856 3857 /* 3858 * send the high-order half first 3859 */ 3860 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb); 3861 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb); 3862 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb); 3863 /* 3864 * Send the low-order half 3865 */ 3866 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb); 3867 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb); 3868 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb); 3869 3870 break; 3871 3872 case 0: 3873 /* 3874 * non-media access commands such as identify and features 3875 * take this path. 3876 */ 3877 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb); 3878 nv_put8(cmdhdl, nvp->nvp_feature, 3879 satacmd->satacmd_features_reg); 3880 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb); 3881 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb); 3882 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb); 3883 3884 break; 3885 3886 default: 3887 break; 3888 } 3889 3890 ASSERT(nvp->nvp_slot); 3891 } 3892 3893 3894 /* 3895 * start a command that involves no media access 3896 */ 3897 static int 3898 nv_start_nodata(nv_port_t *nvp, int slot) 3899 { 3900 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 3901 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 3902 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 3903 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 3904 3905 nv_program_taskfile_regs(nvp, slot); 3906 3907 /* 3908 * This next one sets the controller in motion 3909 */ 3910 nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg); 3911 3912 return (SATA_TRAN_ACCEPTED); 3913 } 3914 3915 3916 int 3917 nv_bm_status_clear(nv_port_t *nvp) 3918 { 3919 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 3920 uchar_t status, ret; 3921 3922 /* 3923 * Get the current BM status 3924 */ 3925 ret = status = nv_get8(bmhdl, nvp->nvp_bmisx); 3926 3927 status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS; 3928 3929 /* 3930 * Clear the latches (and preserve the other bits) 3931 */ 3932 nv_put8(bmhdl, nvp->nvp_bmisx, status); 3933 3934 return (ret); 3935 } 3936 3937 3938 /* 3939 * program the bus master DMA engine with the PRD address for 3940 * the active slot command, and start the DMA engine. 3941 */ 3942 static void 3943 nv_start_dma_engine(nv_port_t *nvp, int slot) 3944 { 3945 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 3946 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 3947 uchar_t direction; 3948 3949 ASSERT(nv_slotp->nvslot_spkt != NULL); 3950 3951 if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction 3952 == SATA_DIR_READ) { 3953 direction = BMICX_RWCON_WRITE_TO_MEMORY; 3954 } else { 3955 direction = BMICX_RWCON_READ_FROM_MEMORY; 3956 } 3957 3958 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, 3959 "nv_start_dma_engine entered")); 3960 3961 /* 3962 * reset the controller's interrupt and error status bits 3963 */ 3964 (void) nv_bm_status_clear(nvp); 3965 3966 /* 3967 * program the PRD table physical start address 3968 */ 3969 nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]); 3970 3971 /* 3972 * set the direction control and start the DMA controller 3973 */ 3974 nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM); 3975 } 3976 3977 /* 3978 * start dma command, either in or out 3979 */ 3980 static int 3981 nv_start_dma(nv_port_t *nvp, int slot) 3982 { 3983 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 3984 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 3985 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 3986 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 3987 uint8_t cmd = sata_cmdp->satacmd_cmd_reg; 3988 #ifdef NCQ 3989 uint8_t ncq = B_FALSE; 3990 #endif 3991 ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot]; 3992 uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot]; 3993 int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx; 3994 ddi_dma_cookie_t *srcp = sata_cmdp->satacmd_dma_cookie_list; 3995 3996 ASSERT(sg_count != 0); 3997 3998 if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) { 3999 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <" 4000 " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS, 4001 sata_cmdp->satacmd_num_dma_cookies); 4002 4003 return (NV_FAILURE); 4004 } 4005 4006 nv_program_taskfile_regs(nvp, slot); 4007 4008 /* 4009 * start the drive in motion 4010 */ 4011 nv_put8(cmdhdl, nvp->nvp_cmd, cmd); 4012 4013 /* 4014 * the drive starts processing the transaction when the cmd register 4015 * is written. This is done here before programming the DMA engine to 4016 * parallelize and save some time. In the event that the drive is ready 4017 * before DMA, it will wait. 4018 */ 4019 #ifdef NCQ 4020 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) || 4021 (cmd == SATAC_READ_FPDMA_QUEUED)) { 4022 ncq = B_TRUE; 4023 } 4024 #endif 4025 4026 /* 4027 * copy the PRD list to PRD table in DMA accessible memory 4028 * so that the controller can access it. 4029 */ 4030 for (idx = 0; idx < sg_count; idx++, srcp++) { 4031 uint32_t size; 4032 4033 ASSERT(srcp->dmac_size <= UINT16_MAX); 4034 4035 nv_put32(sghdl, dstp++, srcp->dmac_address); 4036 4037 size = srcp->dmac_size; 4038 4039 /* 4040 * If this is a 40-bit address, copy bits 32-40 of the 4041 * physical address to bits 16-24 of the PRD count. 4042 */ 4043 if (srcp->dmac_laddress > UINT32_MAX) { 4044 size |= ((srcp->dmac_laddress & 0xff00000000) >> 16); 4045 } 4046 4047 /* 4048 * set the end of table flag for the last entry 4049 */ 4050 if (idx == (sg_count - 1)) { 4051 size |= PRDE_EOT; 4052 } 4053 4054 nv_put32(sghdl, dstp++, size); 4055 } 4056 4057 (void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0, 4058 sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV); 4059 4060 nv_start_dma_engine(nvp, slot); 4061 4062 #ifdef NCQ 4063 /* 4064 * optimization: for SWNCQ, start DMA engine if this is the only 4065 * command running. Preliminary NCQ efforts indicated this needs 4066 * more debugging. 4067 * 4068 * if (nvp->nvp_ncq_run <= 1) 4069 */ 4070 4071 if (ncq == B_FALSE) { 4072 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, 4073 "NOT NCQ so starting DMA NOW non_ncq_commands=%d" 4074 " cmd = %X", non_ncq_commands++, cmd)); 4075 nv_start_dma_engine(nvp, slot); 4076 } else { 4077 NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program " 4078 "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd)); 4079 } 4080 #endif /* NCQ */ 4081 4082 return (SATA_TRAN_ACCEPTED); 4083 } 4084 4085 4086 /* 4087 * start a PIO data-in ATA command 4088 */ 4089 static int 4090 nv_start_pio_in(nv_port_t *nvp, int slot) 4091 { 4092 4093 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 4094 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4095 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4096 4097 nv_program_taskfile_regs(nvp, slot); 4098 4099 /* 4100 * This next one sets the drive in motion 4101 */ 4102 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg); 4103 4104 return (SATA_TRAN_ACCEPTED); 4105 } 4106 4107 4108 /* 4109 * start a PIO data-out ATA command 4110 */ 4111 static int 4112 nv_start_pio_out(nv_port_t *nvp, int slot) 4113 { 4114 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 4115 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4116 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4117 4118 nv_program_taskfile_regs(nvp, slot); 4119 4120 /* 4121 * this next one sets the drive in motion 4122 */ 4123 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg); 4124 4125 /* 4126 * wait for the busy bit to settle 4127 */ 4128 NV_DELAY_NSEC(400); 4129 4130 /* 4131 * wait for the drive to assert DRQ to send the first chunk 4132 * of data. Have to busy wait because there's no interrupt for 4133 * the first chunk. This is bad... uses a lot of cycles if the 4134 * drive responds too slowly or if the wait loop granularity 4135 * is too large. It's even worse if the drive is defective and 4136 * the loop times out. 4137 */ 4138 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */ 4139 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */ 4140 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */ 4141 4000000, 0) == B_FALSE) { 4142 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 4143 4144 goto error; 4145 } 4146 4147 /* 4148 * send the first block. 4149 */ 4150 nv_intr_pio_out(nvp, nv_slotp); 4151 4152 /* 4153 * If nvslot_flags is not set to COMPLETE yet, then processing 4154 * is OK so far, so return. Otherwise, fall into error handling 4155 * below. 4156 */ 4157 if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) { 4158 4159 return (SATA_TRAN_ACCEPTED); 4160 } 4161 4162 error: 4163 /* 4164 * there was an error so reset the device and complete the packet. 4165 */ 4166 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4167 nv_complete_io(nvp, spkt, 0); 4168 nv_reset(nvp); 4169 4170 return (SATA_TRAN_PORT_ERROR); 4171 } 4172 4173 4174 /* 4175 * start a ATAPI Packet command (PIO data in or out) 4176 */ 4177 static int 4178 nv_start_pkt_pio(nv_port_t *nvp, int slot) 4179 { 4180 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]); 4181 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4182 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4183 sata_cmd_t *satacmd = &spkt->satapkt_cmd; 4184 4185 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4186 "nv_start_pkt_pio: start")); 4187 4188 /* 4189 * Write the PACKET command to the command register. Normally 4190 * this would be done through nv_program_taskfile_regs(). It 4191 * is done here because some values need to be overridden. 4192 */ 4193 4194 /* select the drive */ 4195 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg); 4196 4197 /* make certain the drive selected */ 4198 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY, 4199 NV_SEC2USEC(5), 0) == B_FALSE) { 4200 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4201 "nv_start_pkt_pio: drive select failed")); 4202 return (SATA_TRAN_PORT_ERROR); 4203 } 4204 4205 /* 4206 * The command is always sent via PIO, despite whatever the SATA 4207 * framework sets in the command. Overwrite the DMA bit to do this. 4208 * Also, overwrite the overlay bit to be safe (it shouldn't be set). 4209 */ 4210 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */ 4211 4212 /* set appropriately by the sata framework */ 4213 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb); 4214 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb); 4215 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb); 4216 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb); 4217 4218 /* initiate the command by writing the command register last */ 4219 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg); 4220 4221 /* Give the host controller time to do its thing */ 4222 NV_DELAY_NSEC(400); 4223 4224 /* 4225 * Wait for the device to indicate that it is ready for the command 4226 * ATAPI protocol state - HP0: Check_Status_A 4227 */ 4228 4229 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */ 4230 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */ 4231 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */ 4232 4000000, 0) == B_FALSE) { 4233 /* 4234 * Either an error or device fault occurred or the wait 4235 * timed out. According to the ATAPI protocol, command 4236 * completion is also possible. Other implementations of 4237 * this protocol don't handle this last case, so neither 4238 * does this code. 4239 */ 4240 4241 if (nv_get8(cmdhdl, nvp->nvp_status) & 4242 (SATA_STATUS_ERR | SATA_STATUS_DF)) { 4243 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4244 4245 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4246 "nv_start_pkt_pio: device error (HP0)")); 4247 } else { 4248 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 4249 4250 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4251 "nv_start_pkt_pio: timeout (HP0)")); 4252 } 4253 4254 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4255 nv_complete_io(nvp, spkt, 0); 4256 nv_reset(nvp); 4257 4258 return (SATA_TRAN_PORT_ERROR); 4259 } 4260 4261 /* 4262 * Put the ATAPI command in the data register 4263 * ATAPI protocol state - HP1: Send_Packet 4264 */ 4265 4266 ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb, 4267 (ushort_t *)nvp->nvp_data, 4268 (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR); 4269 4270 /* 4271 * See you in nv_intr_pkt_pio. 4272 * ATAPI protocol state - HP3: INTRQ_wait 4273 */ 4274 4275 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4276 "nv_start_pkt_pio: exiting into HP3")); 4277 4278 return (SATA_TRAN_ACCEPTED); 4279 } 4280 4281 4282 /* 4283 * Interrupt processing for a non-data ATA command. 4284 */ 4285 static void 4286 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp) 4287 { 4288 uchar_t status; 4289 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4290 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 4291 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4292 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4293 4294 NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered")); 4295 4296 status = nv_get8(cmdhdl, nvp->nvp_status); 4297 4298 /* 4299 * check for errors 4300 */ 4301 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) { 4302 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4303 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4304 nvp->nvp_altstatus); 4305 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error); 4306 } else { 4307 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4308 } 4309 4310 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4311 } 4312 4313 4314 /* 4315 * ATA command, PIO data in 4316 */ 4317 static void 4318 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp) 4319 { 4320 uchar_t status; 4321 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4322 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 4323 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4324 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4325 int count; 4326 4327 status = nv_get8(cmdhdl, nvp->nvp_status); 4328 4329 if (status & SATA_STATUS_BSY) { 4330 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 4331 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4332 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4333 nvp->nvp_altstatus); 4334 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error); 4335 nv_reset(nvp); 4336 4337 return; 4338 } 4339 4340 /* 4341 * check for errors 4342 */ 4343 if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF | 4344 SATA_STATUS_ERR)) != SATA_STATUS_DRQ) { 4345 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4346 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4347 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4348 4349 return; 4350 } 4351 4352 /* 4353 * read the next chunk of data (if any) 4354 */ 4355 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC); 4356 4357 /* 4358 * read count bytes 4359 */ 4360 ASSERT(count != 0); 4361 4362 ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr, 4363 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR); 4364 4365 nv_slotp->nvslot_v_addr += count; 4366 nv_slotp->nvslot_byte_count -= count; 4367 4368 4369 if (nv_slotp->nvslot_byte_count != 0) { 4370 /* 4371 * more to transfer. Wait for next interrupt. 4372 */ 4373 return; 4374 } 4375 4376 /* 4377 * transfer is complete. wait for the busy bit to settle. 4378 */ 4379 NV_DELAY_NSEC(400); 4380 4381 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4382 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4383 } 4384 4385 4386 /* 4387 * ATA command PIO data out 4388 */ 4389 static void 4390 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp) 4391 { 4392 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4393 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 4394 uchar_t status; 4395 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4396 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4397 int count; 4398 4399 /* 4400 * clear the IRQ 4401 */ 4402 status = nv_get8(cmdhdl, nvp->nvp_status); 4403 4404 if (status & SATA_STATUS_BSY) { 4405 /* 4406 * this should not happen 4407 */ 4408 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 4409 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4410 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4411 nvp->nvp_altstatus); 4412 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error); 4413 4414 return; 4415 } 4416 4417 /* 4418 * check for errors 4419 */ 4420 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) { 4421 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4422 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4423 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4424 4425 return; 4426 } 4427 4428 /* 4429 * this is the condition which signals the drive is 4430 * no longer ready to transfer. Likely that the transfer 4431 * completed successfully, but check that byte_count is 4432 * zero. 4433 */ 4434 if ((status & SATA_STATUS_DRQ) == 0) { 4435 4436 if (nv_slotp->nvslot_byte_count == 0) { 4437 /* 4438 * complete; successful transfer 4439 */ 4440 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4441 } else { 4442 /* 4443 * error condition, incomplete transfer 4444 */ 4445 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4446 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4447 } 4448 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4449 4450 return; 4451 } 4452 4453 /* 4454 * write the next chunk of data 4455 */ 4456 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC); 4457 4458 /* 4459 * read or write count bytes 4460 */ 4461 4462 ASSERT(count != 0); 4463 4464 ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr, 4465 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR); 4466 4467 nv_slotp->nvslot_v_addr += count; 4468 nv_slotp->nvslot_byte_count -= count; 4469 } 4470 4471 4472 /* 4473 * ATAPI PACKET command, PIO in/out interrupt 4474 * 4475 * Under normal circumstances, one of four different interrupt scenarios 4476 * will result in this function being called: 4477 * 4478 * 1. Packet command data transfer 4479 * 2. Packet command completion 4480 * 3. Request sense data transfer 4481 * 4. Request sense command completion 4482 */ 4483 static void 4484 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp) 4485 { 4486 uchar_t status; 4487 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4488 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 4489 int direction = sata_cmdp->satacmd_flags.sata_data_direction; 4490 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4491 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4492 uint16_t ctlr_count; 4493 int count; 4494 4495 /* ATAPI protocol state - HP2: Check_Status_B */ 4496 4497 status = nv_get8(cmdhdl, nvp->nvp_status); 4498 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4499 "nv_intr_pkt_pio: status 0x%x", status)); 4500 4501 if (status & SATA_STATUS_BSY) { 4502 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) { 4503 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4504 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4505 } else { 4506 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4507 spkt->satapkt_reason = SATA_PKT_TIMEOUT; 4508 4509 nv_reset(nvp); 4510 } 4511 4512 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4513 "nv_intr_pkt_pio: busy - status 0x%x", status)); 4514 4515 return; 4516 } 4517 4518 if ((status & SATA_STATUS_DF) != 0) { 4519 /* 4520 * On device fault, just clean up and bail. Request sense 4521 * will just default to its NO SENSE initialized value. 4522 */ 4523 4524 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) { 4525 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4526 } 4527 4528 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4529 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4530 4531 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4532 nvp->nvp_altstatus); 4533 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, 4534 nvp->nvp_error); 4535 4536 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4537 "nv_intr_pkt_pio: device fault")); 4538 4539 return; 4540 } 4541 4542 if ((status & SATA_STATUS_ERR) != 0) { 4543 /* 4544 * On command error, figure out whether we are processing a 4545 * request sense. If so, clean up and bail. Otherwise, 4546 * do a REQUEST SENSE. 4547 */ 4548 4549 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) { 4550 nv_slotp->nvslot_flags |= NVSLOT_RQSENSE; 4551 if (nv_start_rqsense_pio(nvp, nv_slotp) == 4552 NV_FAILURE) { 4553 nv_copy_registers(nvp, &spkt->satapkt_device, 4554 spkt); 4555 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4556 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4557 } 4558 4559 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4560 nvp->nvp_altstatus); 4561 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, 4562 nvp->nvp_error); 4563 } else { 4564 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4565 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4566 4567 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4568 } 4569 4570 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4571 "nv_intr_pkt_pio: error (status 0x%x)", status)); 4572 4573 return; 4574 } 4575 4576 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) { 4577 /* 4578 * REQUEST SENSE command processing 4579 */ 4580 4581 if ((status & (SATA_STATUS_DRQ)) != 0) { 4582 /* ATAPI state - HP4: Transfer_Data */ 4583 4584 /* read the byte count from the controller */ 4585 ctlr_count = 4586 (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8; 4587 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl); 4588 4589 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4590 "nv_intr_pkt_pio: ctlr byte count - %d", 4591 ctlr_count)); 4592 4593 if (ctlr_count == 0) { 4594 /* no data to transfer - some devices do this */ 4595 4596 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4597 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4598 4599 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4600 "nv_intr_pkt_pio: done (no data)")); 4601 4602 return; 4603 } 4604 4605 count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN); 4606 4607 /* transfer the data */ 4608 ddi_rep_get16(cmdhdl, 4609 (ushort_t *)nv_slotp->nvslot_rqsense_buff, 4610 (ushort_t *)nvp->nvp_data, (count >> 1), 4611 DDI_DEV_NO_AUTOINCR); 4612 4613 /* consume residual bytes */ 4614 ctlr_count -= count; 4615 4616 if (ctlr_count > 0) { 4617 for (; ctlr_count > 0; ctlr_count -= 2) 4618 (void) ddi_get16(cmdhdl, 4619 (ushort_t *)nvp->nvp_data); 4620 } 4621 4622 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4623 "nv_intr_pkt_pio: transition to HP2")); 4624 } else { 4625 /* still in ATAPI state - HP2 */ 4626 4627 /* 4628 * In order to avoid clobbering the rqsense data 4629 * set by the SATA framework, the sense data read 4630 * from the device is put in a separate buffer and 4631 * copied into the packet after the request sense 4632 * command successfully completes. 4633 */ 4634 bcopy(nv_slotp->nvslot_rqsense_buff, 4635 spkt->satapkt_cmd.satacmd_rqsense, 4636 SATA_ATAPI_RQSENSE_LEN); 4637 4638 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4639 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4640 4641 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4642 "nv_intr_pkt_pio: request sense done")); 4643 } 4644 4645 return; 4646 } 4647 4648 /* 4649 * Normal command processing 4650 */ 4651 4652 if ((status & (SATA_STATUS_DRQ)) != 0) { 4653 /* ATAPI protocol state - HP4: Transfer_Data */ 4654 4655 /* read the byte count from the controller */ 4656 ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8; 4657 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl); 4658 4659 if (ctlr_count == 0) { 4660 /* no data to transfer - some devices do this */ 4661 4662 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4663 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4664 4665 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4666 "nv_intr_pkt_pio: done (no data)")); 4667 4668 return; 4669 } 4670 4671 count = min(ctlr_count, nv_slotp->nvslot_byte_count); 4672 4673 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4674 "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count)); 4675 4676 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4677 "nv_intr_pkt_pio: byte_count 0x%x", 4678 nv_slotp->nvslot_byte_count)); 4679 4680 /* transfer the data */ 4681 4682 if (direction == SATA_DIR_READ) { 4683 ddi_rep_get16(cmdhdl, 4684 (ushort_t *)nv_slotp->nvslot_v_addr, 4685 (ushort_t *)nvp->nvp_data, (count >> 1), 4686 DDI_DEV_NO_AUTOINCR); 4687 4688 ctlr_count -= count; 4689 4690 if (ctlr_count > 0) { 4691 /* consume remainding bytes */ 4692 4693 for (; ctlr_count > 0; 4694 ctlr_count -= 2) 4695 (void) ddi_get16(cmdhdl, 4696 (ushort_t *)nvp->nvp_data); 4697 4698 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4699 "nv_intr_pkt_pio: bytes remained")); 4700 } 4701 } else { 4702 ddi_rep_put16(cmdhdl, 4703 (ushort_t *)nv_slotp->nvslot_v_addr, 4704 (ushort_t *)nvp->nvp_data, (count >> 1), 4705 DDI_DEV_NO_AUTOINCR); 4706 } 4707 4708 nv_slotp->nvslot_v_addr += count; 4709 nv_slotp->nvslot_byte_count -= count; 4710 4711 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4712 "nv_intr_pkt_pio: transition to HP2")); 4713 } else { 4714 /* still in ATAPI state - HP2 */ 4715 4716 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4717 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4718 4719 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 4720 "nv_intr_pkt_pio: done")); 4721 } 4722 } 4723 4724 4725 /* 4726 * ATA command, DMA data in/out 4727 */ 4728 static void 4729 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp) 4730 { 4731 uchar_t status; 4732 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 4733 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd; 4734 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4735 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 4736 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl; 4737 uchar_t bmicx; 4738 uchar_t bm_status; 4739 4740 nv_slotp->nvslot_flags = NVSLOT_COMPLETE; 4741 4742 /* 4743 * stop DMA engine. 4744 */ 4745 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx); 4746 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM); 4747 4748 /* 4749 * get the status and clear the IRQ, and check for DMA error 4750 */ 4751 status = nv_get8(cmdhdl, nvp->nvp_status); 4752 4753 /* 4754 * check for drive errors 4755 */ 4756 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) { 4757 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 4758 spkt->satapkt_reason = SATA_PKT_DEV_ERROR; 4759 (void) nv_bm_status_clear(nvp); 4760 4761 return; 4762 } 4763 4764 bm_status = nv_bm_status_clear(nvp); 4765 4766 /* 4767 * check for bus master errors 4768 */ 4769 if (bm_status & BMISX_IDERR) { 4770 spkt->satapkt_reason = SATA_PKT_RESET; 4771 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl, 4772 nvp->nvp_altstatus); 4773 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error); 4774 nv_reset(nvp); 4775 4776 return; 4777 } 4778 4779 spkt->satapkt_reason = SATA_PKT_COMPLETED; 4780 } 4781 4782 4783 /* 4784 * Wait for a register of a controller to achieve a specific state. 4785 * To return normally, all the bits in the first sub-mask must be ON, 4786 * all the bits in the second sub-mask must be OFF. 4787 * If timeout_usec microseconds pass without the controller achieving 4788 * the desired bit configuration, return TRUE, else FALSE. 4789 * 4790 * hybrid waiting algorithm: if not in interrupt context, busy looping will 4791 * occur for the first 250 us, then switch over to a sleeping wait. 4792 * 4793 */ 4794 int 4795 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec, 4796 int type_wait) 4797 { 4798 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4799 hrtime_t end, cur, start_sleep, start; 4800 int first_time = B_TRUE; 4801 ushort_t val; 4802 4803 for (;;) { 4804 val = nv_get8(ctlhdl, nvp->nvp_altstatus); 4805 4806 if ((val & onbits) == onbits && (val & offbits) == 0) { 4807 4808 return (B_TRUE); 4809 } 4810 4811 cur = gethrtime(); 4812 4813 /* 4814 * store the start time and calculate the end 4815 * time. also calculate "start_sleep" which is 4816 * the point after which the driver will stop busy 4817 * waiting and change to sleep waiting. 4818 */ 4819 if (first_time) { 4820 first_time = B_FALSE; 4821 /* 4822 * start and end are in nanoseconds 4823 */ 4824 start = cur; 4825 end = start + timeout_usec * 1000; 4826 /* 4827 * add 1 ms to start 4828 */ 4829 start_sleep = start + 250000; 4830 4831 if (servicing_interrupt()) { 4832 type_wait = NV_NOSLEEP; 4833 } 4834 } 4835 4836 if (cur > end) { 4837 4838 break; 4839 } 4840 4841 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) { 4842 #if ! defined(__lock_lint) 4843 delay(1); 4844 #endif 4845 } else { 4846 drv_usecwait(nv_usec_delay); 4847 } 4848 } 4849 4850 return (B_FALSE); 4851 } 4852 4853 4854 /* 4855 * This is a slightly more complicated version that checks 4856 * for error conditions and bails-out rather than looping 4857 * until the timeout is exceeded. 4858 * 4859 * hybrid waiting algorithm: if not in interrupt context, busy looping will 4860 * occur for the first 250 us, then switch over to a sleeping wait. 4861 */ 4862 int 4863 nv_wait3( 4864 nv_port_t *nvp, 4865 uchar_t onbits1, 4866 uchar_t offbits1, 4867 uchar_t failure_onbits2, 4868 uchar_t failure_offbits2, 4869 uchar_t failure_onbits3, 4870 uchar_t failure_offbits3, 4871 uint_t timeout_usec, 4872 int type_wait) 4873 { 4874 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 4875 hrtime_t end, cur, start_sleep, start; 4876 int first_time = B_TRUE; 4877 ushort_t val; 4878 4879 for (;;) { 4880 val = nv_get8(ctlhdl, nvp->nvp_altstatus); 4881 4882 /* 4883 * check for expected condition 4884 */ 4885 if ((val & onbits1) == onbits1 && (val & offbits1) == 0) { 4886 4887 return (B_TRUE); 4888 } 4889 4890 /* 4891 * check for error conditions 4892 */ 4893 if ((val & failure_onbits2) == failure_onbits2 && 4894 (val & failure_offbits2) == 0) { 4895 4896 return (B_FALSE); 4897 } 4898 4899 if ((val & failure_onbits3) == failure_onbits3 && 4900 (val & failure_offbits3) == 0) { 4901 4902 return (B_FALSE); 4903 } 4904 4905 /* 4906 * store the start time and calculate the end 4907 * time. also calculate "start_sleep" which is 4908 * the point after which the driver will stop busy 4909 * waiting and change to sleep waiting. 4910 */ 4911 if (first_time) { 4912 first_time = B_FALSE; 4913 /* 4914 * start and end are in nanoseconds 4915 */ 4916 cur = start = gethrtime(); 4917 end = start + timeout_usec * 1000; 4918 /* 4919 * add 1 ms to start 4920 */ 4921 start_sleep = start + 250000; 4922 4923 if (servicing_interrupt()) { 4924 type_wait = NV_NOSLEEP; 4925 } 4926 } else { 4927 cur = gethrtime(); 4928 } 4929 4930 if (cur > end) { 4931 4932 break; 4933 } 4934 4935 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) { 4936 #if ! defined(__lock_lint) 4937 delay(1); 4938 #endif 4939 } else { 4940 drv_usecwait(nv_usec_delay); 4941 } 4942 } 4943 4944 return (B_FALSE); 4945 } 4946 4947 4948 /* 4949 * nv_check_link() checks if a specified link is active device present 4950 * and communicating. 4951 */ 4952 static boolean_t 4953 nv_check_link(uint32_t sstatus) 4954 { 4955 uint8_t det; 4956 4957 det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT; 4958 4959 return (det == SSTATUS_DET_DEVPRE_PHYCOM); 4960 } 4961 4962 4963 /* 4964 * nv_port_state_change() reports the state of the port to the 4965 * sata module by calling sata_hba_event_notify(). This 4966 * function is called any time the state of the port is changed 4967 */ 4968 static void 4969 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state) 4970 { 4971 sata_device_t sd; 4972 4973 bzero((void *)&sd, sizeof (sata_device_t)); 4974 sd.satadev_rev = SATA_DEVICE_REV; 4975 nv_copy_registers(nvp, &sd, NULL); 4976 4977 /* 4978 * When NCQ is implemented sactive and snotific field need to be 4979 * updated. 4980 */ 4981 sd.satadev_addr.cport = nvp->nvp_port_num; 4982 sd.satadev_addr.qual = addr_type; 4983 sd.satadev_state = state; 4984 4985 sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event); 4986 } 4987 4988 4989 /* 4990 * timeout processing: 4991 * 4992 * Check if any packets have crossed a timeout threshold. If so, then 4993 * abort the packet. This function is not NCQ aware. 4994 * 4995 * If reset was invoked in any other place than nv_sata_probe(), then 4996 * monitor for reset completion here. 4997 * 4998 */ 4999 static void 5000 nv_timeout(void *arg) 5001 { 5002 nv_port_t *nvp = arg; 5003 nv_slot_t *nv_slotp; 5004 int restart_timeout = B_FALSE; 5005 5006 mutex_enter(&nvp->nvp_mutex); 5007 5008 /* 5009 * If the probe entry point is driving the reset and signature 5010 * acquisition, just return. 5011 */ 5012 if (nvp->nvp_state & NV_PORT_RESET_PROBE) { 5013 goto finished; 5014 } 5015 5016 /* 5017 * If the port is not in the init state, it likely 5018 * means the link was lost while a timeout was active. 5019 */ 5020 if ((nvp->nvp_state & NV_PORT_INIT) == 0) { 5021 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5022 "nv_timeout: port uninitialized")); 5023 5024 goto finished; 5025 } 5026 5027 if (nvp->nvp_state & NV_PORT_RESET) { 5028 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5]; 5029 uint32_t sstatus; 5030 5031 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5032 "nv_timeout(): port waiting for signature")); 5033 5034 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus); 5035 5036 /* 5037 * check for link presence. If the link remains 5038 * missing for more than 2 seconds, send a remove 5039 * event and abort signature acquisition. 5040 */ 5041 if (nv_check_link(sstatus) == B_FALSE) { 5042 clock_t e_link_lost = ddi_get_lbolt(); 5043 5044 if (nvp->nvp_link_lost_time == 0) { 5045 nvp->nvp_link_lost_time = e_link_lost; 5046 } 5047 if (TICK_TO_SEC(e_link_lost - 5048 nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) { 5049 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5050 "probe: intermittent link lost while" 5051 " resetting")); 5052 restart_timeout = B_TRUE; 5053 } else { 5054 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5055 "link lost during signature acquisition." 5056 " Giving up")); 5057 nv_port_state_change(nvp, 5058 SATA_EVNT_DEVICE_DETACHED| 5059 SATA_EVNT_LINK_LOST, 5060 SATA_ADDR_CPORT, 0); 5061 nvp->nvp_state |= NV_PORT_HOTREMOVED; 5062 nvp->nvp_state &= ~NV_PORT_RESET; 5063 } 5064 5065 goto finished; 5066 } else { 5067 5068 nvp->nvp_link_lost_time = 0; 5069 } 5070 5071 nv_read_signature(nvp); 5072 5073 if (nvp->nvp_signature != 0) { 5074 if ((nvp->nvp_type == SATA_DTYPE_ATADISK) || 5075 (nvp->nvp_type == SATA_DTYPE_ATAPICD)) { 5076 nvp->nvp_state |= NV_PORT_RESTORE; 5077 nv_port_state_change(nvp, 5078 SATA_EVNT_DEVICE_RESET, 5079 SATA_ADDR_DCPORT, 5080 SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE); 5081 } 5082 5083 goto finished; 5084 } 5085 5086 /* 5087 * Reset if more than 5 seconds has passed without 5088 * acquiring a signature. 5089 */ 5090 if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) { 5091 nv_reset(nvp); 5092 } 5093 5094 restart_timeout = B_TRUE; 5095 goto finished; 5096 } 5097 5098 5099 /* 5100 * not yet NCQ aware 5101 */ 5102 nv_slotp = &(nvp->nvp_slot[0]); 5103 5104 /* 5105 * this happens early on before nv_slotp is set 5106 * up OR when a device was unexpectedly removed and 5107 * there was an active packet. 5108 */ 5109 if (nv_slotp == NULL) { 5110 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5111 "nv_timeout: nv_slotp == NULL")); 5112 5113 goto finished; 5114 } 5115 5116 /* 5117 * perform timeout checking and processing only if there is an 5118 * active packet on the port 5119 */ 5120 if (nv_slotp->nvslot_spkt != NULL) { 5121 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 5122 sata_cmd_t *satacmd = &spkt->satapkt_cmd; 5123 uint8_t cmd = satacmd->satacmd_cmd_reg; 5124 uint64_t lba; 5125 5126 #if ! defined(__lock_lint) && defined(DEBUG) 5127 5128 lba = (uint64_t)satacmd->satacmd_lba_low_lsb | 5129 ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) | 5130 ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) | 5131 ((uint64_t)satacmd->satacmd_lba_low_msb << 24) | 5132 ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) | 5133 ((uint64_t)satacmd->satacmd_lba_high_msb << 40); 5134 #endif 5135 5136 /* 5137 * timeout not needed if there is a polling thread 5138 */ 5139 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) { 5140 5141 goto finished; 5142 } 5143 5144 if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) > 5145 spkt->satapkt_time) { 5146 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5147 "abort timeout: " 5148 "nvslot_stime: %ld max ticks till timeout: " 5149 "%ld cur_time: %ld cmd=%x lba=%d", 5150 nv_slotp->nvslot_stime, drv_usectohz(MICROSEC * 5151 spkt->satapkt_time), ddi_get_lbolt(), cmd, lba)); 5152 5153 (void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT); 5154 5155 } else { 5156 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:" 5157 " still in use so restarting timeout")); 5158 } 5159 restart_timeout = B_TRUE; 5160 5161 } else { 5162 /* 5163 * there was no active packet, so do not re-enable timeout 5164 */ 5165 NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, 5166 "nv_timeout: no active packet so not re-arming timeout")); 5167 } 5168 5169 finished: 5170 5171 if (restart_timeout == B_TRUE) { 5172 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp, 5173 drv_usectohz(NV_ONE_SEC)); 5174 } else { 5175 nvp->nvp_timeout_id = 0; 5176 } 5177 mutex_exit(&nvp->nvp_mutex); 5178 } 5179 5180 5181 /* 5182 * enable or disable the 3 interrupt types the driver is 5183 * interested in: completion, add and remove. 5184 */ 5185 static void 5186 ck804_set_intr(nv_port_t *nvp, int flag) 5187 { 5188 nv_ctl_t *nvc = nvp->nvp_ctlp; 5189 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 5190 uchar_t *bar5 = nvc->nvc_bar_addr[5]; 5191 uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT, 5192 CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT }; 5193 uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL }; 5194 uint8_t int_en, port = nvp->nvp_port_num, intr_status; 5195 5196 if (flag & NV_INTR_DISABLE_NON_BLOCKING) { 5197 int_en = nv_get8(bar5_hdl, 5198 (uint8_t *)(bar5 + CK804_SATA_INT_EN)); 5199 int_en &= ~intr_bits[port]; 5200 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN), 5201 int_en); 5202 return; 5203 } 5204 5205 ASSERT(mutex_owned(&nvp->nvp_mutex)); 5206 5207 /* 5208 * controller level lock also required since access to an 8-bit 5209 * interrupt register is shared between both channels. 5210 */ 5211 mutex_enter(&nvc->nvc_mutex); 5212 5213 if (flag & NV_INTR_CLEAR_ALL) { 5214 NVLOG((NVDBG_INTR, nvc, nvp, 5215 "ck804_set_intr: NV_INTR_CLEAR_ALL")); 5216 5217 intr_status = nv_get8(nvc->nvc_bar_hdl[5], 5218 (uint8_t *)(nvc->nvc_ck804_int_status)); 5219 5220 if (intr_status & clear_all_bits[port]) { 5221 5222 nv_put8(nvc->nvc_bar_hdl[5], 5223 (uint8_t *)(nvc->nvc_ck804_int_status), 5224 clear_all_bits[port]); 5225 5226 NVLOG((NVDBG_INTR, nvc, nvp, 5227 "interrupt bits cleared %x", 5228 intr_status & clear_all_bits[port])); 5229 } 5230 } 5231 5232 if (flag & NV_INTR_DISABLE) { 5233 NVLOG((NVDBG_INTR, nvc, nvp, 5234 "ck804_set_intr: NV_INTR_DISABLE")); 5235 int_en = nv_get8(bar5_hdl, 5236 (uint8_t *)(bar5 + CK804_SATA_INT_EN)); 5237 int_en &= ~intr_bits[port]; 5238 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN), 5239 int_en); 5240 } 5241 5242 if (flag & NV_INTR_ENABLE) { 5243 NVLOG((NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE")); 5244 int_en = nv_get8(bar5_hdl, 5245 (uint8_t *)(bar5 + CK804_SATA_INT_EN)); 5246 int_en |= intr_bits[port]; 5247 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN), 5248 int_en); 5249 } 5250 5251 mutex_exit(&nvc->nvc_mutex); 5252 } 5253 5254 5255 /* 5256 * enable or disable the 3 interrupts the driver is interested in: 5257 * completion interrupt, hot add, and hot remove interrupt. 5258 */ 5259 static void 5260 mcp5x_set_intr(nv_port_t *nvp, int flag) 5261 { 5262 nv_ctl_t *nvc = nvp->nvp_ctlp; 5263 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5]; 5264 uint16_t intr_bits = 5265 MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE; 5266 uint16_t int_en; 5267 5268 if (flag & NV_INTR_DISABLE_NON_BLOCKING) { 5269 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl); 5270 int_en &= ~intr_bits; 5271 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en); 5272 return; 5273 } 5274 5275 ASSERT(mutex_owned(&nvp->nvp_mutex)); 5276 5277 NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag)); 5278 5279 if (flag & NV_INTR_CLEAR_ALL) { 5280 NVLOG((NVDBG_INTR, nvc, nvp, 5281 "mcp5x_set_intr: NV_INTR_CLEAR_ALL")); 5282 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR); 5283 } 5284 5285 if (flag & NV_INTR_ENABLE) { 5286 NVLOG((NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE")); 5287 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl); 5288 int_en |= intr_bits; 5289 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en); 5290 } 5291 5292 if (flag & NV_INTR_DISABLE) { 5293 NVLOG((NVDBG_INTR, nvc, nvp, 5294 "mcp5x_set_intr: NV_INTR_DISABLE")); 5295 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl); 5296 int_en &= ~intr_bits; 5297 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en); 5298 } 5299 } 5300 5301 5302 /* 5303 * The PM functions for suspend and resume are incomplete and need additional 5304 * work. It may or may not work in the current state. 5305 */ 5306 static void 5307 nv_resume(nv_port_t *nvp) 5308 { 5309 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()")); 5310 5311 mutex_enter(&nvp->nvp_mutex); 5312 5313 if (nvp->nvp_state & NV_PORT_INACTIVE) { 5314 mutex_exit(&nvp->nvp_mutex); 5315 5316 return; 5317 } 5318 5319 #ifdef SGPIO_SUPPORT 5320 nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV( 5321 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num)); 5322 #endif 5323 5324 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE); 5325 5326 /* 5327 * power may have been removed to the port and the 5328 * drive, and/or a drive may have been added or removed. 5329 * Force a reset which will cause a probe and re-establish 5330 * any state needed on the drive. 5331 * nv_reset(nvp); 5332 */ 5333 5334 nv_reset(nvp); 5335 5336 mutex_exit(&nvp->nvp_mutex); 5337 } 5338 5339 /* 5340 * The PM functions for suspend and resume are incomplete and need additional 5341 * work. It may or may not work in the current state. 5342 */ 5343 static void 5344 nv_suspend(nv_port_t *nvp) 5345 { 5346 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()")); 5347 5348 mutex_enter(&nvp->nvp_mutex); 5349 5350 #ifdef SGPIO_SUPPORT 5351 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV( 5352 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num)); 5353 #endif 5354 5355 if (nvp->nvp_state & NV_PORT_INACTIVE) { 5356 mutex_exit(&nvp->nvp_mutex); 5357 5358 return; 5359 } 5360 5361 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_DISABLE); 5362 5363 /* 5364 * power may have been removed to the port and the 5365 * drive, and/or a drive may have been added or removed. 5366 * Force a reset which will cause a probe and re-establish 5367 * any state needed on the drive. 5368 * nv_reset(nvp); 5369 */ 5370 5371 mutex_exit(&nvp->nvp_mutex); 5372 } 5373 5374 5375 static void 5376 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt) 5377 { 5378 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5]; 5379 sata_cmd_t *scmd = &spkt->satapkt_cmd; 5380 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl; 5381 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 5382 uchar_t status; 5383 struct sata_cmd_flags flags; 5384 5385 NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()")); 5386 5387 sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus); 5388 sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror); 5389 sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl); 5390 5391 if (spkt == NULL) { 5392 5393 return; 5394 } 5395 5396 /* 5397 * in the error case, implicitly set the return of regs needed 5398 * for error handling. 5399 */ 5400 status = scmd->satacmd_status_reg = nv_get8(ctlhdl, 5401 nvp->nvp_altstatus); 5402 5403 flags = scmd->satacmd_flags; 5404 5405 if (status & SATA_STATUS_ERR) { 5406 flags.sata_copy_out_lba_low_msb = B_TRUE; 5407 flags.sata_copy_out_lba_mid_msb = B_TRUE; 5408 flags.sata_copy_out_lba_high_msb = B_TRUE; 5409 flags.sata_copy_out_lba_low_lsb = B_TRUE; 5410 flags.sata_copy_out_lba_mid_lsb = B_TRUE; 5411 flags.sata_copy_out_lba_high_lsb = B_TRUE; 5412 flags.sata_copy_out_error_reg = B_TRUE; 5413 flags.sata_copy_out_sec_count_msb = B_TRUE; 5414 flags.sata_copy_out_sec_count_lsb = B_TRUE; 5415 scmd->satacmd_status_reg = status; 5416 } 5417 5418 if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) { 5419 5420 /* 5421 * set HOB so that high byte will be read 5422 */ 5423 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3); 5424 5425 /* 5426 * get the requested high bytes 5427 */ 5428 if (flags.sata_copy_out_sec_count_msb) { 5429 scmd->satacmd_sec_count_msb = 5430 nv_get8(cmdhdl, nvp->nvp_count); 5431 } 5432 5433 if (flags.sata_copy_out_lba_low_msb) { 5434 scmd->satacmd_lba_low_msb = 5435 nv_get8(cmdhdl, nvp->nvp_sect); 5436 } 5437 5438 if (flags.sata_copy_out_lba_mid_msb) { 5439 scmd->satacmd_lba_mid_msb = 5440 nv_get8(cmdhdl, nvp->nvp_lcyl); 5441 } 5442 5443 if (flags.sata_copy_out_lba_high_msb) { 5444 scmd->satacmd_lba_high_msb = 5445 nv_get8(cmdhdl, nvp->nvp_hcyl); 5446 } 5447 } 5448 5449 /* 5450 * disable HOB so that low byte is read 5451 */ 5452 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3); 5453 5454 /* 5455 * get the requested low bytes 5456 */ 5457 if (flags.sata_copy_out_sec_count_lsb) { 5458 scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count); 5459 } 5460 5461 if (flags.sata_copy_out_lba_low_lsb) { 5462 scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect); 5463 } 5464 5465 if (flags.sata_copy_out_lba_mid_lsb) { 5466 scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl); 5467 } 5468 5469 if (flags.sata_copy_out_lba_high_lsb) { 5470 scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl); 5471 } 5472 5473 /* 5474 * get the device register if requested 5475 */ 5476 if (flags.sata_copy_out_device_reg) { 5477 scmd->satacmd_device_reg = nv_get8(cmdhdl, nvp->nvp_drvhd); 5478 } 5479 5480 /* 5481 * get the error register if requested 5482 */ 5483 if (flags.sata_copy_out_error_reg) { 5484 scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error); 5485 } 5486 } 5487 5488 5489 /* 5490 * Hot plug and remove interrupts can occur when the device is reset. Just 5491 * masking the interrupt doesn't always work well because if a 5492 * different interrupt arrives on the other port, the driver can still 5493 * end up checking the state of the other port and discover the hot 5494 * interrupt flag is set even though it was masked. Checking for recent 5495 * reset activity and then ignoring turns out to be the easiest way. 5496 */ 5497 static void 5498 nv_report_add_remove(nv_port_t *nvp, int flags) 5499 { 5500 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5]; 5501 clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time; 5502 uint32_t sstatus; 5503 int i; 5504 5505 /* 5506 * If reset within last 1 second ignore. This should be 5507 * reworked and improved instead of having this somewhat 5508 * heavy handed clamping job. 5509 */ 5510 if (time_diff < drv_usectohz(NV_ONE_SEC)) { 5511 NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()" 5512 "ignoring plug interrupt was %dms ago", 5513 TICK_TO_MSEC(time_diff))); 5514 5515 return; 5516 } 5517 5518 /* 5519 * wait up to 1ms for sstatus to settle and reflect the true 5520 * status of the port. Failure to do so can create confusion 5521 * in probe, where the incorrect sstatus value can still 5522 * persist. 5523 */ 5524 for (i = 0; i < 1000; i++) { 5525 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus); 5526 5527 if ((flags == NV_PORT_HOTREMOVED) && 5528 ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) != 5529 SSTATUS_DET_DEVPRE_PHYCOM)) { 5530 break; 5531 } 5532 5533 if ((flags != NV_PORT_HOTREMOVED) && 5534 ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) == 5535 SSTATUS_DET_DEVPRE_PHYCOM)) { 5536 break; 5537 } 5538 drv_usecwait(1); 5539 } 5540 5541 NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, 5542 "sstatus took %i us for DEVPRE_PHYCOM to settle", i)); 5543 5544 if (flags == NV_PORT_HOTREMOVED) { 5545 NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, 5546 "nv_report_add_remove() hot removed")); 5547 nv_port_state_change(nvp, 5548 SATA_EVNT_DEVICE_DETACHED, 5549 SATA_ADDR_CPORT, 0); 5550 5551 nvp->nvp_state |= NV_PORT_HOTREMOVED; 5552 } else { 5553 NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, 5554 "nv_report_add_remove() hot plugged")); 5555 nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED, 5556 SATA_ADDR_CPORT, 0); 5557 } 5558 } 5559 5560 /* 5561 * Get request sense data and stuff it the command's sense buffer. 5562 * Start a request sense command in order to get sense data to insert 5563 * in the sata packet's rqsense buffer. The command completion 5564 * processing is in nv_intr_pkt_pio. 5565 * 5566 * The sata framework provides a function to allocate and set-up a 5567 * request sense packet command. The reasons it is not being used here is: 5568 * a) it cannot be called in an interrupt context and this function is 5569 * called in an interrupt context. 5570 * b) it allocates DMA resources that are not used here because this is 5571 * implemented using PIO. 5572 * 5573 * If, in the future, this is changed to use DMA, the sata framework should 5574 * be used to allocate and set-up the error retrieval (request sense) 5575 * command. 5576 */ 5577 static int 5578 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp) 5579 { 5580 sata_pkt_t *spkt = nv_slotp->nvslot_spkt; 5581 sata_cmd_t *satacmd = &spkt->satapkt_cmd; 5582 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 5583 int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len; 5584 5585 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 5586 "nv_start_rqsense_pio: start")); 5587 5588 /* clear the local request sense buffer before starting the command */ 5589 bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN); 5590 5591 /* Write the request sense PACKET command */ 5592 5593 /* select the drive */ 5594 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg); 5595 5596 /* make certain the drive selected */ 5597 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY, 5598 NV_SEC2USEC(5), 0) == B_FALSE) { 5599 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 5600 "nv_start_rqsense_pio: drive select failed")); 5601 return (NV_FAILURE); 5602 } 5603 5604 /* set up the command */ 5605 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */ 5606 nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8); 5607 nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff); 5608 nv_put8(cmdhdl, nvp->nvp_sect, 0); 5609 nv_put8(cmdhdl, nvp->nvp_count, 0); /* no tag */ 5610 5611 /* initiate the command by writing the command register last */ 5612 nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET); 5613 5614 /* Give the host ctlr time to do its thing, according to ATA/ATAPI */ 5615 NV_DELAY_NSEC(400); 5616 5617 /* 5618 * Wait for the device to indicate that it is ready for the command 5619 * ATAPI protocol state - HP0: Check_Status_A 5620 */ 5621 5622 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */ 5623 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */ 5624 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */ 5625 4000000, 0) == B_FALSE) { 5626 if (nv_get8(cmdhdl, nvp->nvp_status) & 5627 (SATA_STATUS_ERR | SATA_STATUS_DF)) { 5628 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 5629 "nv_start_rqsense_pio: rqsense dev error (HP0)")); 5630 } else { 5631 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 5632 "nv_start_rqsense_pio: rqsense timeout (HP0)")); 5633 } 5634 5635 nv_copy_registers(nvp, &spkt->satapkt_device, spkt); 5636 nv_complete_io(nvp, spkt, 0); 5637 nv_reset(nvp); 5638 5639 return (NV_FAILURE); 5640 } 5641 5642 /* 5643 * Put the ATAPI command in the data register 5644 * ATAPI protocol state - HP1: Send_Packet 5645 */ 5646 5647 ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb, 5648 (ushort_t *)nvp->nvp_data, 5649 (cdb_len >> 1), DDI_DEV_NO_AUTOINCR); 5650 5651 NVLOG((NVDBG_ATAPI, nvp->nvp_ctlp, nvp, 5652 "nv_start_rqsense_pio: exiting into HP3")); 5653 5654 return (NV_SUCCESS); 5655 } 5656 5657 /* 5658 * quiesce(9E) entry point. 5659 * 5660 * This function is called when the system is single-threaded at high 5661 * PIL with preemption disabled. Therefore, this function must not be 5662 * blocked. 5663 * 5664 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5665 * DDI_FAILURE indicates an error condition and should almost never happen. 5666 */ 5667 static int 5668 nv_quiesce(dev_info_t *dip) 5669 { 5670 int port, instance = ddi_get_instance(dip); 5671 nv_ctl_t *nvc; 5672 5673 if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL) 5674 return (DDI_FAILURE); 5675 5676 for (port = 0; port < NV_MAX_PORTS(nvc); port++) { 5677 nv_port_t *nvp = &(nvc->nvc_port[port]); 5678 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl; 5679 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5]; 5680 uint32_t sctrl; 5681 5682 /* 5683 * Stop the controllers from generating interrupts. 5684 */ 5685 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING); 5686 5687 /* 5688 * clear signature registers 5689 */ 5690 nv_put8(cmdhdl, nvp->nvp_sect, 0); 5691 nv_put8(cmdhdl, nvp->nvp_lcyl, 0); 5692 nv_put8(cmdhdl, nvp->nvp_hcyl, 0); 5693 nv_put8(cmdhdl, nvp->nvp_count, 0); 5694 5695 nvp->nvp_signature = 0; 5696 nvp->nvp_type = 0; 5697 nvp->nvp_state |= NV_PORT_RESET; 5698 nvp->nvp_reset_time = ddi_get_lbolt(); 5699 nvp->nvp_link_lost_time = 0; 5700 5701 /* 5702 * assert reset in PHY by writing a 1 to bit 0 scontrol 5703 */ 5704 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl); 5705 5706 nv_put32(bar5_hdl, nvp->nvp_sctrl, 5707 sctrl | SCONTROL_DET_COMRESET); 5708 5709 /* 5710 * wait 1ms 5711 */ 5712 drv_usecwait(1000); 5713 5714 /* 5715 * de-assert reset in PHY 5716 */ 5717 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl); 5718 } 5719 5720 return (DDI_SUCCESS); 5721 } 5722 5723 5724 #ifdef SGPIO_SUPPORT 5725 /* 5726 * NVIDIA specific SGPIO LED support 5727 * Please refer to the NVIDIA documentation for additional details 5728 */ 5729 5730 /* 5731 * nv_sgp_led_init 5732 * Detect SGPIO support. If present, initialize. 5733 */ 5734 static void 5735 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle) 5736 { 5737 uint16_t csrp; /* SGPIO_CSRP from PCI config space */ 5738 uint32_t cbp; /* SGPIO_CBP from PCI config space */ 5739 nv_sgp_cmn_t *cmn; /* shared data structure */ 5740 char tqname[SGPIO_TQ_NAME_LEN]; 5741 extern caddr_t psm_map_phys_new(paddr_t, size_t, int); 5742 5743 /* 5744 * Initialize with appropriately invalid values in case this function 5745 * exits without initializing SGPIO (for example, there is no SGPIO 5746 * support). 5747 */ 5748 nvc->nvc_sgp_csr = 0; 5749 nvc->nvc_sgp_cbp = NULL; 5750 5751 /* 5752 * Only try to initialize SGPIO LED support if this property 5753 * indicates it should be. 5754 */ 5755 if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS, 5756 "enable-sgpio-leds", 0) != 1) 5757 return; 5758 5759 /* 5760 * CK804 can pass the sgpio_detect test even though it does not support 5761 * SGPIO, so don't even look at a CK804. 5762 */ 5763 if (nvc->nvc_mcp5x_flag != B_TRUE) 5764 return; 5765 5766 /* 5767 * The NVIDIA SGPIO support can nominally handle 6 drives. 5768 * However, the current implementation only supports 4 drives. 5769 * With two drives per controller, that means only look at the 5770 * first two controllers. 5771 */ 5772 if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1)) 5773 return; 5774 5775 /* confirm that the SGPIO registers are there */ 5776 if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) { 5777 NVLOG((NVDBG_INIT, nvc, NULL, 5778 "SGPIO registers not detected")); 5779 return; 5780 } 5781 5782 /* save off the SGPIO_CSR I/O address */ 5783 nvc->nvc_sgp_csr = csrp; 5784 5785 /* map in Command Block */ 5786 nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp, 5787 sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE); 5788 5789 /* initialize the SGPIO h/w */ 5790 if (nv_sgp_init(nvc) == NV_FAILURE) { 5791 nv_cmn_err(CE_WARN, nvc, NULL, 5792 "!Unable to initialize SGPIO"); 5793 } 5794 5795 if (nvc->nvc_ctlr_num == 0) { 5796 /* 5797 * Controller 0 on the MCP5X/IO55 initialized the SGPIO 5798 * and the data that is shared between the controllers. 5799 * The clever thing to do would be to let the first controller 5800 * that comes up be the one that initializes all this. 5801 * However, SGPIO state is not necessarily zeroed between 5802 * between OS reboots, so there might be old data there. 5803 */ 5804 5805 /* allocate shared space */ 5806 cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t), 5807 KM_SLEEP); 5808 if (cmn == NULL) { 5809 nv_cmn_err(CE_WARN, nvc, NULL, 5810 "!Failed to allocate shared data"); 5811 return; 5812 } 5813 5814 nvc->nvc_sgp_cmn = cmn; 5815 5816 /* initialize the shared data structure */ 5817 cmn->nvs_magic = SGPIO_MAGIC; 5818 cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num); 5819 cmn->nvs_connected = 0; 5820 cmn->nvs_activity = 0; 5821 5822 mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL); 5823 mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL); 5824 cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL); 5825 5826 /* put the address in the SGPIO scratch register */ 5827 #if defined(__amd64) 5828 nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn; 5829 #else 5830 nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn; 5831 #endif 5832 5833 /* start the activity LED taskq */ 5834 5835 /* 5836 * The taskq name should be unique and the time 5837 */ 5838 (void) snprintf(tqname, SGPIO_TQ_NAME_LEN, 5839 "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff)); 5840 cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1, 5841 TASKQ_DEFAULTPRI, 0); 5842 if (cmn->nvs_taskq == NULL) { 5843 cmn->nvs_taskq_delay = 0; 5844 nv_cmn_err(CE_WARN, nvc, NULL, 5845 "!Failed to start activity LED taskq"); 5846 } else { 5847 cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS; 5848 (void) ddi_taskq_dispatch(cmn->nvs_taskq, 5849 nv_sgp_activity_led_ctl, nvc, DDI_SLEEP); 5850 } 5851 5852 } else if (nvc->nvc_ctlr_num == 1) { 5853 /* 5854 * Controller 1 confirms that SGPIO has been initialized 5855 * and, if so, try to get the shared data pointer, otherwise 5856 * get the shared data pointer when accessing the data. 5857 */ 5858 5859 if (nvc->nvc_sgp_cbp->sgpio_sr != 0) { 5860 cmn = (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr; 5861 5862 /* 5863 * It looks like a pointer, but is it the shared data? 5864 */ 5865 if (cmn->nvs_magic == SGPIO_MAGIC) { 5866 nvc->nvc_sgp_cmn = cmn; 5867 5868 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num); 5869 } 5870 } 5871 } 5872 } 5873 5874 /* 5875 * nv_sgp_detect 5876 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and 5877 * report back whether both were readable. 5878 */ 5879 static int 5880 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp, 5881 uint32_t *cbpp) 5882 { 5883 /* get the SGPIO_CSRP */ 5884 *csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP); 5885 if (*csrpp == 0) { 5886 return (NV_FAILURE); 5887 } 5888 5889 /* SGPIO_CSRP is good, get the SGPIO_CBP */ 5890 *cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP); 5891 if (*cbpp == 0) { 5892 return (NV_FAILURE); 5893 } 5894 5895 /* SGPIO_CBP is good, so we must support SGPIO */ 5896 return (NV_SUCCESS); 5897 } 5898 5899 /* 5900 * nv_sgp_init 5901 * Initialize SGPIO. The process is specified by NVIDIA. 5902 */ 5903 static int 5904 nv_sgp_init(nv_ctl_t *nvc) 5905 { 5906 uint32_t status; 5907 int drive_count; 5908 5909 /* 5910 * if SGPIO status set to SGPIO_STATE_RESET, logic has been 5911 * reset and needs to be initialized. 5912 */ 5913 status = nv_sgp_csr_read(nvc); 5914 if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) { 5915 if (nv_sgp_init_cmd(nvc) == NV_FAILURE) { 5916 /* reset and try again */ 5917 nv_sgp_reset(nvc); 5918 if (nv_sgp_init_cmd(nvc) == NV_FAILURE) { 5919 NVLOG((NVDBG_ALWAYS, nvc, NULL, 5920 "SGPIO init failed")); 5921 return (NV_FAILURE); 5922 } 5923 } 5924 } 5925 5926 /* 5927 * NVIDIA recommends reading the supported drive count even 5928 * though they also indicate that it is 4 at this time. 5929 */ 5930 drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0); 5931 if (drive_count != SGPIO_DRV_CNT_VALUE) { 5932 NVLOG((NVDBG_ALWAYS, nvc, NULL, 5933 "SGPIO reported undocumented drive count - %d", 5934 drive_count)); 5935 } 5936 5937 NVLOG((NVDBG_INIT, nvc, NULL, 5938 "initialized ctlr: %d csr: 0x%08x", 5939 nvc->nvc_ctlr_num, nvc->nvc_sgp_csr)); 5940 5941 return (NV_SUCCESS); 5942 } 5943 5944 static void 5945 nv_sgp_reset(nv_ctl_t *nvc) 5946 { 5947 uint32_t cmd; 5948 uint32_t status; 5949 5950 cmd = SGPIO_CMD_RESET; 5951 nv_sgp_csr_write(nvc, cmd); 5952 5953 status = nv_sgp_csr_read(nvc); 5954 5955 if (SGPIO_CSR_CSTAT(status) != SGPIO_CMD_OK) { 5956 NVLOG((NVDBG_ALWAYS, nvc, NULL, 5957 "SGPIO reset failed: CSR - 0x%x", status)); 5958 } 5959 } 5960 5961 static int 5962 nv_sgp_init_cmd(nv_ctl_t *nvc) 5963 { 5964 int seq; 5965 hrtime_t start, end; 5966 uint32_t status; 5967 uint32_t cmd; 5968 5969 /* get the old sequence value */ 5970 status = nv_sgp_csr_read(nvc); 5971 seq = SGPIO_CSR_SEQ(status); 5972 5973 /* check the state since we have the info anyway */ 5974 if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) { 5975 NVLOG((NVDBG_ALWAYS, nvc, NULL, 5976 "SGPIO init_cmd: state not operational")); 5977 } 5978 5979 /* issue command */ 5980 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS); 5981 nv_sgp_csr_write(nvc, cmd); 5982 5983 DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status); 5984 5985 /* poll for completion */ 5986 start = gethrtime(); 5987 end = start + NV_SGP_CMD_TIMEOUT; 5988 for (;;) { 5989 status = nv_sgp_csr_read(nvc); 5990 5991 /* break on error */ 5992 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) 5993 break; 5994 5995 /* break on command completion (seq changed) */ 5996 if (SGPIO_CSR_SEQ(status) != seq) { 5997 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ACTIVE) { 5998 NVLOG((NVDBG_ALWAYS, nvc, NULL, 5999 "Seq changed but command still active")); 6000 } 6001 6002 break; 6003 } 6004 6005 /* Wait 400 ns and try again */ 6006 NV_DELAY_NSEC(400); 6007 6008 if (gethrtime() > end) 6009 break; 6010 } 6011 6012 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) 6013 return (NV_SUCCESS); 6014 6015 return (NV_FAILURE); 6016 } 6017 6018 static int 6019 nv_sgp_check_set_cmn(nv_ctl_t *nvc) 6020 { 6021 nv_sgp_cmn_t *cmn; 6022 6023 if (nvc->nvc_sgp_cbp == NULL) 6024 return (NV_FAILURE); 6025 6026 /* check to see if Scratch Register is set */ 6027 if (nvc->nvc_sgp_cbp->sgpio_sr != 0) { 6028 nvc->nvc_sgp_cmn = 6029 (nv_sgp_cmn_t *)nvc->nvc_sgp_cbp->sgpio_sr; 6030 6031 if (nvc->nvc_sgp_cmn->nvs_magic != SGPIO_MAGIC) 6032 return (NV_FAILURE); 6033 6034 cmn = nvc->nvc_sgp_cmn; 6035 6036 mutex_enter(&cmn->nvs_slock); 6037 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num); 6038 mutex_exit(&cmn->nvs_slock); 6039 6040 return (NV_SUCCESS); 6041 } 6042 6043 return (NV_FAILURE); 6044 } 6045 6046 /* 6047 * nv_sgp_csr_read 6048 * This is just a 32-bit port read from the value that was obtained from the 6049 * PCI config space. 6050 * 6051 * XXX It was advised to use the in[bwl] function for this, even though they 6052 * are obsolete interfaces. 6053 */ 6054 static int 6055 nv_sgp_csr_read(nv_ctl_t *nvc) 6056 { 6057 return (inl(nvc->nvc_sgp_csr)); 6058 } 6059 6060 /* 6061 * nv_sgp_csr_write 6062 * This is just a 32-bit I/O port write. The port number was obtained from 6063 * the PCI config space. 6064 * 6065 * XXX It was advised to use the out[bwl] function for this, even though they 6066 * are obsolete interfaces. 6067 */ 6068 static void 6069 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val) 6070 { 6071 outl(nvc->nvc_sgp_csr, val); 6072 } 6073 6074 /* 6075 * nv_sgp_write_data 6076 * Cause SGPIO to send Command Block data 6077 */ 6078 static int 6079 nv_sgp_write_data(nv_ctl_t *nvc) 6080 { 6081 hrtime_t start, end; 6082 uint32_t status; 6083 uint32_t cmd; 6084 6085 /* issue command */ 6086 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA); 6087 nv_sgp_csr_write(nvc, cmd); 6088 6089 /* poll for completion */ 6090 start = gethrtime(); 6091 end = start + NV_SGP_CMD_TIMEOUT; 6092 for (;;) { 6093 status = nv_sgp_csr_read(nvc); 6094 6095 /* break on error completion */ 6096 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) 6097 break; 6098 6099 /* break on successful completion */ 6100 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) 6101 break; 6102 6103 /* Wait 400 ns and try again */ 6104 NV_DELAY_NSEC(400); 6105 6106 if (gethrtime() > end) 6107 break; 6108 } 6109 6110 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) 6111 return (NV_SUCCESS); 6112 6113 return (NV_FAILURE); 6114 } 6115 6116 /* 6117 * nv_sgp_activity_led_ctl 6118 * This is run as a taskq. It wakes up at a fixed interval and checks to 6119 * see if any of the activity LEDs need to be changed. 6120 */ 6121 static void 6122 nv_sgp_activity_led_ctl(void *arg) 6123 { 6124 nv_ctl_t *nvc = (nv_ctl_t *)arg; 6125 nv_sgp_cmn_t *cmn; 6126 volatile nv_sgp_cb_t *cbp; 6127 clock_t ticks; 6128 uint8_t drv_leds; 6129 uint32_t old_leds; 6130 uint32_t new_led_state; 6131 int i; 6132 6133 cmn = nvc->nvc_sgp_cmn; 6134 cbp = nvc->nvc_sgp_cbp; 6135 6136 do { 6137 /* save off the old state of all of the LEDs */ 6138 old_leds = cbp->sgpio0_tr; 6139 6140 DTRACE_PROBE3(sgpio__activity__state, 6141 int, cmn->nvs_connected, int, cmn->nvs_activity, 6142 int, old_leds); 6143 6144 new_led_state = 0; 6145 6146 /* for each drive */ 6147 for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) { 6148 6149 /* get the current state of the LEDs for the drive */ 6150 drv_leds = SGPIO0_TR_DRV(old_leds, i); 6151 6152 if ((cmn->nvs_connected & (1 << i)) == 0) { 6153 /* if not connected, turn off activity */ 6154 drv_leds &= ~TR_ACTIVE_MASK; 6155 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE); 6156 6157 new_led_state &= SGPIO0_TR_DRV_CLR(i); 6158 new_led_state |= 6159 SGPIO0_TR_DRV_SET(drv_leds, i); 6160 6161 continue; 6162 } 6163 6164 if ((cmn->nvs_activity & (1 << i)) == 0) { 6165 /* connected, but not active */ 6166 drv_leds &= ~TR_ACTIVE_MASK; 6167 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE); 6168 6169 new_led_state &= SGPIO0_TR_DRV_CLR(i); 6170 new_led_state |= 6171 SGPIO0_TR_DRV_SET(drv_leds, i); 6172 6173 continue; 6174 } 6175 6176 /* connected and active */ 6177 if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) { 6178 /* was enabled, so disable */ 6179 drv_leds &= ~TR_ACTIVE_MASK; 6180 drv_leds |= 6181 TR_ACTIVE_SET(TR_ACTIVE_DISABLE); 6182 6183 new_led_state &= SGPIO0_TR_DRV_CLR(i); 6184 new_led_state |= 6185 SGPIO0_TR_DRV_SET(drv_leds, i); 6186 } else { 6187 /* was disabled, so enable */ 6188 drv_leds &= ~TR_ACTIVE_MASK; 6189 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE); 6190 6191 new_led_state &= SGPIO0_TR_DRV_CLR(i); 6192 new_led_state |= 6193 SGPIO0_TR_DRV_SET(drv_leds, i); 6194 } 6195 6196 /* 6197 * clear the activity bit 6198 * if there is drive activity again within the 6199 * loop interval (now 1/16 second), nvs_activity 6200 * will be reset and the "connected and active" 6201 * condition above will cause the LED to blink 6202 * off and on at the loop interval rate. The 6203 * rate may be increased (interval shortened) as 6204 * long as it is not more than 1/30 second. 6205 */ 6206 mutex_enter(&cmn->nvs_slock); 6207 cmn->nvs_activity &= ~(1 << i); 6208 mutex_exit(&cmn->nvs_slock); 6209 } 6210 6211 DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state); 6212 6213 /* write out LED values */ 6214 6215 mutex_enter(&cmn->nvs_slock); 6216 cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL; 6217 cbp->sgpio0_tr |= new_led_state; 6218 cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK; 6219 mutex_exit(&cmn->nvs_slock); 6220 6221 if (nv_sgp_write_data(nvc) == NV_FAILURE) { 6222 NVLOG((NVDBG_VERBOSE, nvc, NULL, 6223 "nv_sgp_write_data failure updating active LED")); 6224 } 6225 6226 /* now rest for the interval */ 6227 mutex_enter(&cmn->nvs_tlock); 6228 ticks = drv_usectohz(cmn->nvs_taskq_delay); 6229 if (ticks > 0) 6230 (void) cv_timedwait(&cmn->nvs_cv, &cmn->nvs_tlock, 6231 ddi_get_lbolt() + ticks); 6232 mutex_exit(&cmn->nvs_tlock); 6233 } while (ticks > 0); 6234 } 6235 6236 /* 6237 * nv_sgp_drive_connect 6238 * Set the flag used to indicate that the drive is attached to the HBA. 6239 * Used to let the taskq know that it should turn the Activity LED on. 6240 */ 6241 static void 6242 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive) 6243 { 6244 nv_sgp_cmn_t *cmn; 6245 6246 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE) 6247 return; 6248 cmn = nvc->nvc_sgp_cmn; 6249 6250 mutex_enter(&cmn->nvs_slock); 6251 cmn->nvs_connected |= (1 << drive); 6252 mutex_exit(&cmn->nvs_slock); 6253 } 6254 6255 /* 6256 * nv_sgp_drive_disconnect 6257 * Clears the flag used to indicate that the drive is no longer attached 6258 * to the HBA. Used to let the taskq know that it should turn the 6259 * Activity LED off. The flag that indicates that the drive is in use is 6260 * also cleared. 6261 */ 6262 static void 6263 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive) 6264 { 6265 nv_sgp_cmn_t *cmn; 6266 6267 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE) 6268 return; 6269 cmn = nvc->nvc_sgp_cmn; 6270 6271 mutex_enter(&cmn->nvs_slock); 6272 cmn->nvs_connected &= ~(1 << drive); 6273 cmn->nvs_activity &= ~(1 << drive); 6274 mutex_exit(&cmn->nvs_slock); 6275 } 6276 6277 /* 6278 * nv_sgp_drive_active 6279 * Sets the flag used to indicate that the drive has been accessed and the 6280 * LED should be flicked off, then on. It is cleared at a fixed time 6281 * interval by the LED taskq and set by the sata command start. 6282 */ 6283 static void 6284 nv_sgp_drive_active(nv_ctl_t *nvc, int drive) 6285 { 6286 nv_sgp_cmn_t *cmn; 6287 6288 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE) 6289 return; 6290 cmn = nvc->nvc_sgp_cmn; 6291 6292 DTRACE_PROBE1(sgpio__active, int, drive); 6293 6294 mutex_enter(&cmn->nvs_slock); 6295 cmn->nvs_connected |= (1 << drive); 6296 cmn->nvs_activity |= (1 << drive); 6297 mutex_exit(&cmn->nvs_slock); 6298 } 6299 6300 6301 /* 6302 * nv_sgp_locate 6303 * Turns the Locate/OK2RM LED off or on for a particular drive. State is 6304 * maintained in the SGPIO Command Block. 6305 */ 6306 static void 6307 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value) 6308 { 6309 uint8_t leds; 6310 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp; 6311 nv_sgp_cmn_t *cmn; 6312 6313 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE) 6314 return; 6315 cmn = nvc->nvc_sgp_cmn; 6316 6317 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE)) 6318 return; 6319 6320 DTRACE_PROBE2(sgpio__locate, int, drive, int, value); 6321 6322 mutex_enter(&cmn->nvs_slock); 6323 6324 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive); 6325 6326 leds &= ~TR_LOCATE_MASK; 6327 leds |= TR_LOCATE_SET(value); 6328 6329 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive); 6330 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive); 6331 6332 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK; 6333 6334 mutex_exit(&cmn->nvs_slock); 6335 6336 if (nv_sgp_write_data(nvc) == NV_FAILURE) { 6337 nv_cmn_err(CE_WARN, nvc, NULL, 6338 "!nv_sgp_write_data failure updating OK2RM/Locate LED"); 6339 } 6340 } 6341 6342 /* 6343 * nv_sgp_error 6344 * Turns the Error/Failure LED off or on for a particular drive. State is 6345 * maintained in the SGPIO Command Block. 6346 */ 6347 static void 6348 nv_sgp_error(nv_ctl_t *nvc, int drive, int value) 6349 { 6350 uint8_t leds; 6351 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp; 6352 nv_sgp_cmn_t *cmn; 6353 6354 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE) 6355 return; 6356 cmn = nvc->nvc_sgp_cmn; 6357 6358 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE)) 6359 return; 6360 6361 DTRACE_PROBE2(sgpio__error, int, drive, int, value); 6362 6363 mutex_enter(&cmn->nvs_slock); 6364 6365 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive); 6366 6367 leds &= ~TR_ERROR_MASK; 6368 leds |= TR_ERROR_SET(value); 6369 6370 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive); 6371 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive); 6372 6373 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK; 6374 6375 mutex_exit(&cmn->nvs_slock); 6376 6377 if (nv_sgp_write_data(nvc) == NV_FAILURE) { 6378 nv_cmn_err(CE_WARN, nvc, NULL, 6379 "!nv_sgp_write_data failure updating Fail/Error LED"); 6380 } 6381 } 6382 6383 static void 6384 nv_sgp_cleanup(nv_ctl_t *nvc) 6385 { 6386 int drive; 6387 uint8_t drv_leds; 6388 uint32_t led_state; 6389 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp; 6390 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn; 6391 extern void psm_unmap_phys(caddr_t, size_t); 6392 6393 /* 6394 * If the SGPIO command block isn't mapped or the shared data 6395 * structure isn't present in this instance, there isn't much that 6396 * can be cleaned up. 6397 */ 6398 if ((cb == NULL) || (cmn == NULL)) 6399 return; 6400 6401 /* turn off activity LEDs for this controller */ 6402 drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE); 6403 6404 /* get the existing LED state */ 6405 led_state = cb->sgpio0_tr; 6406 6407 /* turn off port 0 */ 6408 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0); 6409 led_state &= SGPIO0_TR_DRV_CLR(drive); 6410 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive); 6411 6412 /* turn off port 1 */ 6413 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1); 6414 led_state &= SGPIO0_TR_DRV_CLR(drive); 6415 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive); 6416 6417 /* set the new led state, which should turn off this ctrl's LEDs */ 6418 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK; 6419 (void) nv_sgp_write_data(nvc); 6420 6421 /* clear the controller's in use bit */ 6422 mutex_enter(&cmn->nvs_slock); 6423 cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num); 6424 mutex_exit(&cmn->nvs_slock); 6425 6426 if (cmn->nvs_in_use == 0) { 6427 /* if all "in use" bits cleared, take everything down */ 6428 6429 if (cmn->nvs_taskq != NULL) { 6430 /* allow activity taskq to exit */ 6431 cmn->nvs_taskq_delay = 0; 6432 cv_broadcast(&cmn->nvs_cv); 6433 6434 /* then destroy it */ 6435 ddi_taskq_destroy(cmn->nvs_taskq); 6436 } 6437 6438 /* turn off all of the LEDs */ 6439 cb->sgpio0_tr = 0; 6440 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK; 6441 (void) nv_sgp_write_data(nvc); 6442 6443 cb->sgpio_sr = NULL; 6444 6445 /* free resources */ 6446 cv_destroy(&cmn->nvs_cv); 6447 mutex_destroy(&cmn->nvs_tlock); 6448 mutex_destroy(&cmn->nvs_slock); 6449 6450 kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t)); 6451 } 6452 6453 nvc->nvc_sgp_cmn = NULL; 6454 6455 /* unmap the SGPIO Command Block */ 6456 psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t)); 6457 } 6458 #endif /* SGPIO_SUPPORT */ 6459