1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 #pragma ident "Copyright 2010 QLogic Corporation; ql_api.c" 30 31 /* 32 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 33 * 34 * *********************************************************************** 35 * * ** 36 * * NOTICE ** 37 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 38 * * ALL RIGHTS RESERVED ** 39 * * ** 40 * *********************************************************************** 41 * 42 */ 43 44 #include <ql_apps.h> 45 #include <ql_api.h> 46 #include <ql_debug.h> 47 #include <ql_init.h> 48 #include <ql_iocb.h> 49 #include <ql_ioctl.h> 50 #include <ql_isr.h> 51 #include <ql_mbx.h> 52 #include <ql_xioctl.h> 53 54 /* 55 * Solaris external defines. 56 */ 57 extern pri_t minclsyspri; 58 extern pri_t maxclsyspri; 59 60 /* 61 * dev_ops functions prototypes 62 */ 63 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 64 static int ql_attach(dev_info_t *, ddi_attach_cmd_t); 65 static int ql_detach(dev_info_t *, ddi_detach_cmd_t); 66 static int ql_power(dev_info_t *, int, int); 67 static int ql_quiesce(dev_info_t *); 68 69 /* 70 * FCA functions prototypes exported by means of the transport table 71 */ 72 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *, 73 fc_fca_bind_info_t *); 74 static void ql_unbind_port(opaque_t); 75 static int ql_init_pkt(opaque_t, fc_packet_t *, int); 76 static int ql_un_init_pkt(opaque_t, fc_packet_t *); 77 static int ql_els_send(opaque_t, fc_packet_t *); 78 static int ql_get_cap(opaque_t, char *, void *); 79 static int ql_set_cap(opaque_t, char *, void *); 80 static int ql_getmap(opaque_t, fc_lilpmap_t *); 81 static int ql_transport(opaque_t, fc_packet_t *); 82 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t); 83 static int ql_ub_free(opaque_t, uint32_t, uint64_t *); 84 static int ql_ub_release(opaque_t, uint32_t, uint64_t *); 85 static int ql_abort(opaque_t, fc_packet_t *, int); 86 static int ql_reset(opaque_t, uint32_t); 87 static int ql_port_manage(opaque_t, fc_fca_pm_t *); 88 static opaque_t ql_get_device(opaque_t, fc_portid_t); 89 90 /* 91 * FCA Driver Support Function Prototypes. 92 */ 93 static uint16_t ql_wait_outstanding(ql_adapter_state_t *); 94 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *, 95 ql_srb_t *); 96 static void ql_task_daemon(void *); 97 static void ql_task_thread(ql_adapter_state_t *); 98 static void ql_unsol_callback(ql_srb_t *); 99 static void ql_free_unsolicited_buffer(ql_adapter_state_t *, 100 fc_unsol_buf_t *); 101 static void ql_timer(void *); 102 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *); 103 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *, 104 uint32_t *, uint32_t *); 105 static void ql_halt(ql_adapter_state_t *, int); 106 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *); 107 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *); 108 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *); 109 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *); 110 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *); 111 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *); 112 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *); 113 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *); 114 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *); 115 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *); 116 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *); 117 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *); 118 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *); 119 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *); 120 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *); 121 static int ql_login_port(ql_adapter_state_t *, port_id_t); 122 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t); 123 static int ql_logout_port(ql_adapter_state_t *, port_id_t); 124 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t); 125 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *); 126 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *); 127 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *); 128 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t); 129 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *, 130 ql_srb_t *); 131 static int ql_kstat_update(kstat_t *, int); 132 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t); 133 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *); 134 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t); 135 static void ql_rst_aen(ql_adapter_state_t *); 136 static void ql_restart_queues(ql_adapter_state_t *); 137 static void ql_abort_queues(ql_adapter_state_t *); 138 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq); 139 static void ql_idle_check(ql_adapter_state_t *); 140 static int ql_loop_resync(ql_adapter_state_t *); 141 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t); 142 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t); 143 static int ql_save_config_regs(dev_info_t *); 144 static int ql_restore_config_regs(dev_info_t *); 145 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *); 146 static int ql_handle_rscn_update(ql_adapter_state_t *); 147 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *); 148 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *); 149 static int ql_dump_firmware(ql_adapter_state_t *); 150 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *); 151 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *); 152 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *); 153 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *); 154 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *); 155 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *); 156 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t, 157 void *); 158 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t, 159 uint8_t); 160 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *); 161 static int ql_suspend_adapter(ql_adapter_state_t *); 162 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t); 163 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *); 164 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int); 165 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int); 166 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *); 167 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *); 168 static int ql_setup_interrupts(ql_adapter_state_t *); 169 static int ql_setup_msi(ql_adapter_state_t *); 170 static int ql_setup_msix(ql_adapter_state_t *); 171 static int ql_setup_fixed(ql_adapter_state_t *); 172 static void ql_release_intr(ql_adapter_state_t *); 173 static void ql_disable_intr(ql_adapter_state_t *); 174 static int ql_legacy_intr(ql_adapter_state_t *); 175 static int ql_init_mutex(ql_adapter_state_t *); 176 static void ql_destroy_mutex(ql_adapter_state_t *); 177 static void ql_iidma(ql_adapter_state_t *); 178 179 static int ql_n_port_plogi(ql_adapter_state_t *); 180 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *, 181 els_descriptor_t *); 182 static void ql_isp_els_request_ctor(els_descriptor_t *, 183 els_passthru_entry_t *); 184 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *); 185 static int ql_wait_for_td_stop(ql_adapter_state_t *ha); 186 187 /* 188 * Global data 189 */ 190 static uint8_t ql_enable_pm = 1; 191 static int ql_flash_sbus_fpga = 0; 192 uint32_t ql_os_release_level; 193 uint32_t ql_disable_aif = 0; 194 uint32_t ql_disable_msi = 0; 195 uint32_t ql_disable_msix = 0; 196 197 /* Timer routine variables. */ 198 static timeout_id_t ql_timer_timeout_id = NULL; 199 static clock_t ql_timer_ticks; 200 201 /* Soft state head pointer. */ 202 void *ql_state = NULL; 203 204 /* Head adapter link. */ 205 ql_head_t ql_hba = { 206 NULL, 207 NULL 208 }; 209 210 /* Global hba index */ 211 uint32_t ql_gfru_hba_index = 1; 212 213 /* 214 * Some IP defines and globals 215 */ 216 uint32_t ql_ip_buffer_count = 128; 217 uint32_t ql_ip_low_water = 10; 218 uint8_t ql_ip_fast_post_count = 5; 219 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */ 220 221 /* Device AL_PA to Device Head Queue index array. */ 222 uint8_t ql_alpa_to_index[] = { 223 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04, 224 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c, 225 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74, 226 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e, 227 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67, 228 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23, 229 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d, 230 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c, 231 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e, 232 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b, 233 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43, 234 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 235 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37, 236 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47, 237 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 238 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c, 239 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27, 240 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f, 241 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e, 242 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15, 243 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e, 244 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a, 245 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f, 246 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00, 247 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01, 248 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04 249 }; 250 251 /* Device loop_id to ALPA array. */ 252 static uint8_t ql_index_to_alpa[] = { 253 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6, 254 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca, 255 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5, 256 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9, 257 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97, 258 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79, 259 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b, 260 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56, 261 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a, 262 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35, 263 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 264 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17, 265 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01 266 }; 267 268 /* 2200 register offsets */ 269 static reg_off_t reg_off_2200 = { 270 0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 271 0x18, 0x18, 0x1A, 0x1A, /* req in, out, resp in, out */ 272 0x00, 0x00, /* intr info lo, hi */ 273 24, /* Number of mailboxes */ 274 /* Mailbox register offsets */ 275 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 276 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 277 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe, 278 /* 2200 does not have mailbox 24-31 */ 279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 280 0x96, 0xa4, 0xb0, 0xb8, 0xc0, 0xcc, 0xce, 281 /* host to host sema */ 282 0x00, 283 /* 2200 does not have pri_req_in, pri_req_out, */ 284 /* atio_req_in, atio_req_out, io_base_addr */ 285 0xff, 0xff, 0xff, 0xff, 0xff 286 }; 287 288 /* 2300 register offsets */ 289 static reg_off_t reg_off_2300 = { 290 0x00, 0x02, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 291 0x10, 0x12, 0x14, 0x16, /* req in, out, resp in, out */ 292 0x18, 0x1A, /* intr info lo, hi */ 293 32, /* Number of mailboxes */ 294 /* Mailbox register offsets */ 295 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 296 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 297 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 298 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e, 299 0x96, 0xa4, 0xb0, 0x80, 0xc0, 0xcc, 0xce, 300 /* host to host sema */ 301 0x1c, 302 /* 2300 does not have pri_req_in, pri_req_out, */ 303 /* atio_req_in, atio_req_out, io_base_addr */ 304 0xff, 0xff, 0xff, 0xff, 0xff 305 }; 306 307 /* 2400/2500 register offsets */ 308 reg_off_t reg_off_2400_2500 = { 309 0x00, 0x04, /* flash_address, flash_data */ 310 0x08, 0x0c, 0x10, /* ctrl_status, ictrl, istatus */ 311 /* 2400 does not have semaphore, nvram */ 312 0x14, 0x18, 313 0x1c, 0x20, 0x24, 0x28, /* req_in, req_out, resp_in, resp_out */ 314 0x44, 0x46, /* intr info lo, hi */ 315 32, /* Number of mailboxes */ 316 /* Mailbox register offsets */ 317 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 318 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e, 319 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 320 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe, 321 /* 2400 does not have fpm_diag_config, pcr, mctr, fb_cmd */ 322 0xff, 0xff, 0xff, 0xff, 323 0x48, 0x4c, 0x50, /* hccr, gpiod, gpioe */ 324 0xff, /* host to host sema */ 325 0x2c, 0x30, /* pri_req_in, pri_req_out */ 326 0x3c, 0x40, /* atio_req_in, atio_req_out */ 327 0x54 /* io_base_addr */ 328 }; 329 330 /* mutex for protecting variables shared by all instances of the driver */ 331 kmutex_t ql_global_mutex; 332 kmutex_t ql_global_hw_mutex; 333 kmutex_t ql_global_el_mutex; 334 335 /* DMA access attribute structure. */ 336 static ddi_device_acc_attr_t ql_dev_acc_attr = { 337 DDI_DEVICE_ATTR_V0, 338 DDI_STRUCTURE_LE_ACC, 339 DDI_STRICTORDER_ACC 340 }; 341 342 /* I/O DMA attributes structures. */ 343 static ddi_dma_attr_t ql_64bit_io_dma_attr = { 344 DMA_ATTR_V0, /* dma_attr_version */ 345 QL_DMA_LOW_ADDRESS, /* low DMA address range */ 346 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */ 347 QL_DMA_XFER_COUNTER, /* DMA counter register */ 348 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 349 QL_DMA_BURSTSIZES, /* DMA burstsizes */ 350 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 351 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 352 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 353 QL_DMA_SG_LIST_LENGTH, /* s/g list length */ 354 QL_DMA_GRANULARITY, /* granularity of device */ 355 QL_DMA_XFER_FLAGS /* DMA transfer flags */ 356 }; 357 358 static ddi_dma_attr_t ql_32bit_io_dma_attr = { 359 DMA_ATTR_V0, /* dma_attr_version */ 360 QL_DMA_LOW_ADDRESS, /* low DMA address range */ 361 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */ 362 QL_DMA_XFER_COUNTER, /* DMA counter register */ 363 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */ 364 QL_DMA_BURSTSIZES, /* DMA burstsizes */ 365 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */ 366 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */ 367 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */ 368 QL_DMA_SG_LIST_LENGTH, /* s/g list length */ 369 QL_DMA_GRANULARITY, /* granularity of device */ 370 QL_DMA_XFER_FLAGS /* DMA transfer flags */ 371 }; 372 373 /* Load the default dma attributes */ 374 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr; 375 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr; 376 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr; 377 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr; 378 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr; 379 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr; 380 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr; 381 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr; 382 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr; 383 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr; 384 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr; 385 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr; 386 static ddi_dma_attr_t ql_32fcp_data_dma_attr; 387 static ddi_dma_attr_t ql_64fcp_data_dma_attr; 388 389 /* Static declarations of cb_ops entry point functions... */ 390 static struct cb_ops ql_cb_ops = { 391 ql_open, /* b/c open */ 392 ql_close, /* b/c close */ 393 nodev, /* b strategy */ 394 nodev, /* b print */ 395 nodev, /* b dump */ 396 nodev, /* c read */ 397 nodev, /* c write */ 398 ql_ioctl, /* c ioctl */ 399 nodev, /* c devmap */ 400 nodev, /* c mmap */ 401 nodev, /* c segmap */ 402 nochpoll, /* c poll */ 403 nodev, /* cb_prop_op */ 404 NULL, /* streamtab */ 405 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */ 406 CB_REV, /* cb_ops revision */ 407 nodev, /* c aread */ 408 nodev /* c awrite */ 409 }; 410 411 /* Static declarations of dev_ops entry point functions... */ 412 static struct dev_ops ql_devops = { 413 DEVO_REV, /* devo_rev */ 414 0, /* refcnt */ 415 ql_getinfo, /* getinfo */ 416 nulldev, /* identify */ 417 nulldev, /* probe */ 418 ql_attach, /* attach */ 419 ql_detach, /* detach */ 420 nodev, /* reset */ 421 &ql_cb_ops, /* char/block ops */ 422 NULL, /* bus operations */ 423 ql_power, /* power management */ 424 ql_quiesce /* quiesce device */ 425 }; 426 427 /* ELS command code to text converter */ 428 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE(); 429 /* Mailbox command code to text converter */ 430 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE(); 431 432 char qlc_driver_version[] = QL_VERSION; 433 434 /* 435 * Loadable Driver Interface Structures. 436 * Declare and initialize the module configuration section... 437 */ 438 static struct modldrv modldrv = { 439 &mod_driverops, /* type of module: driver */ 440 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */ 441 &ql_devops /* driver dev_ops */ 442 }; 443 444 static struct modlinkage modlinkage = { 445 MODREV_1, 446 &modldrv, 447 NULL 448 }; 449 450 /* ************************************************************************ */ 451 /* Loadable Module Routines. */ 452 /* ************************************************************************ */ 453 454 /* 455 * _init 456 * Initializes a loadable module. It is called before any other 457 * routine in a loadable module. 458 * 459 * Returns: 460 * 0 = success 461 * 462 * Context: 463 * Kernel context. 464 */ 465 int 466 _init(void) 467 { 468 uint16_t w16; 469 int rval = 0; 470 471 /* Get OS major release level. */ 472 for (w16 = 0; w16 < sizeof (utsname.release); w16++) { 473 if (utsname.release[w16] == '.') { 474 w16++; 475 break; 476 } 477 } 478 if (w16 < sizeof (utsname.release)) { 479 (void) ql_bstr_to_dec(&utsname.release[w16], 480 &ql_os_release_level, 0); 481 } else { 482 ql_os_release_level = 0; 483 } 484 if (ql_os_release_level < 6) { 485 cmn_err(CE_WARN, "%s Unsupported OS release level = %d", 486 QL_NAME, ql_os_release_level); 487 rval = EINVAL; 488 } 489 if (ql_os_release_level == 6) { 490 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff; 491 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff; 492 } 493 494 if (rval == 0) { 495 rval = ddi_soft_state_init(&ql_state, 496 sizeof (ql_adapter_state_t), 0); 497 } 498 if (rval == 0) { 499 /* allow the FC Transport to tweak the dev_ops */ 500 fc_fca_init(&ql_devops); 501 502 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL); 503 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL); 504 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL); 505 rval = mod_install(&modlinkage); 506 if (rval != 0) { 507 mutex_destroy(&ql_global_hw_mutex); 508 mutex_destroy(&ql_global_mutex); 509 mutex_destroy(&ql_global_el_mutex); 510 ddi_soft_state_fini(&ql_state); 511 } else { 512 /*EMPTY*/ 513 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr; 514 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr; 515 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr; 516 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr; 517 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr; 518 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr; 519 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr; 520 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr; 521 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr; 522 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr; 523 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr; 524 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr; 525 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr; 526 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr; 527 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen = 528 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen = 529 QL_FCSM_CMD_SGLLEN; 530 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen = 531 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen = 532 QL_FCSM_RSP_SGLLEN; 533 ql_32fcip_cmd_dma_attr.dma_attr_sgllen = 534 ql_64fcip_cmd_dma_attr.dma_attr_sgllen = 535 QL_FCIP_CMD_SGLLEN; 536 ql_32fcip_rsp_dma_attr.dma_attr_sgllen = 537 ql_64fcip_rsp_dma_attr.dma_attr_sgllen = 538 QL_FCIP_RSP_SGLLEN; 539 ql_32fcp_cmd_dma_attr.dma_attr_sgllen = 540 ql_64fcp_cmd_dma_attr.dma_attr_sgllen = 541 QL_FCP_CMD_SGLLEN; 542 ql_32fcp_rsp_dma_attr.dma_attr_sgllen = 543 ql_64fcp_rsp_dma_attr.dma_attr_sgllen = 544 QL_FCP_RSP_SGLLEN; 545 } 546 } 547 548 if (rval != 0) { 549 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'", 550 QL_NAME); 551 } 552 553 return (rval); 554 } 555 556 /* 557 * _fini 558 * Prepares a module for unloading. It is called when the system 559 * wants to unload a module. If the module determines that it can 560 * be unloaded, then _fini() returns the value returned by 561 * mod_remove(). Upon successful return from _fini() no other 562 * routine in the module will be called before _init() is called. 563 * 564 * Returns: 565 * 0 = success 566 * 567 * Context: 568 * Kernel context. 569 */ 570 int 571 _fini(void) 572 { 573 int rval; 574 575 rval = mod_remove(&modlinkage); 576 if (rval == 0) { 577 mutex_destroy(&ql_global_hw_mutex); 578 mutex_destroy(&ql_global_mutex); 579 mutex_destroy(&ql_global_el_mutex); 580 ddi_soft_state_fini(&ql_state); 581 } 582 583 return (rval); 584 } 585 586 /* 587 * _info 588 * Returns information about loadable module. 589 * 590 * Input: 591 * modinfo = pointer to module information structure. 592 * 593 * Returns: 594 * Value returned by mod_info(). 595 * 596 * Context: 597 * Kernel context. 598 */ 599 int 600 _info(struct modinfo *modinfop) 601 { 602 return (mod_info(&modlinkage, modinfop)); 603 } 604 605 /* ************************************************************************ */ 606 /* dev_ops functions */ 607 /* ************************************************************************ */ 608 609 /* 610 * ql_getinfo 611 * Returns the pointer associated with arg when cmd is 612 * set to DDI_INFO_DEVT2DEVINFO, or it should return the 613 * instance number associated with arg when cmd is set 614 * to DDI_INFO_DEV2INSTANCE. 615 * 616 * Input: 617 * dip = Do not use. 618 * cmd = command argument. 619 * arg = command specific argument. 620 * resultp = pointer to where request information is stored. 621 * 622 * Returns: 623 * DDI_SUCCESS or DDI_FAILURE. 624 * 625 * Context: 626 * Kernel context. 627 */ 628 /* ARGSUSED */ 629 static int 630 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 631 { 632 ql_adapter_state_t *ha; 633 int minor; 634 int rval = DDI_FAILURE; 635 636 minor = (int)(getminor((dev_t)arg)); 637 ha = ddi_get_soft_state(ql_state, minor); 638 if (ha == NULL) { 639 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n", 640 getminor((dev_t)arg)); 641 *resultp = NULL; 642 return (rval); 643 } 644 645 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 646 647 switch (cmd) { 648 case DDI_INFO_DEVT2DEVINFO: 649 *resultp = ha->dip; 650 rval = DDI_SUCCESS; 651 break; 652 case DDI_INFO_DEVT2INSTANCE: 653 *resultp = (void *)(uintptr_t)(ha->instance); 654 rval = DDI_SUCCESS; 655 break; 656 default: 657 EL(ha, "failed, unsupported cmd=%d\n", cmd); 658 rval = DDI_FAILURE; 659 break; 660 } 661 662 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 663 664 return (rval); 665 } 666 667 /* 668 * ql_attach 669 * Configure and attach an instance of the driver 670 * for a port. 671 * 672 * Input: 673 * dip = pointer to device information structure. 674 * cmd = attach type. 675 * 676 * Returns: 677 * DDI_SUCCESS or DDI_FAILURE. 678 * 679 * Context: 680 * Kernel context. 681 */ 682 static int 683 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 684 { 685 uint32_t size; 686 int rval; 687 int instance; 688 uint_t progress = 0; 689 char *buf; 690 ushort_t caps_ptr, cap; 691 fc_fca_tran_t *tran; 692 ql_adapter_state_t *ha = NULL; 693 694 static char *pmcomps[] = { 695 NULL, 696 PM_LEVEL_D3_STR, /* Device OFF */ 697 PM_LEVEL_D0_STR, /* Device ON */ 698 }; 699 700 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", 701 ddi_get_instance(dip), cmd); 702 703 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP)); 704 705 switch (cmd) { 706 case DDI_ATTACH: 707 /* first get the instance */ 708 instance = ddi_get_instance(dip); 709 710 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n", 711 QL_NAME, instance, QL_VERSION); 712 713 /* Correct OS version? */ 714 if (ql_os_release_level != 11) { 715 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris " 716 "11", QL_NAME, instance); 717 goto attach_failed; 718 } 719 720 /* Hardware is installed in a DMA-capable slot? */ 721 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 722 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME, 723 instance); 724 goto attach_failed; 725 } 726 727 /* No support for high-level interrupts */ 728 if (ddi_intr_hilevel(dip, 0) != 0) { 729 cmn_err(CE_WARN, "%s(%d): High level interrupt" 730 " not supported", QL_NAME, instance); 731 goto attach_failed; 732 } 733 734 /* Allocate our per-device-instance structure */ 735 if (ddi_soft_state_zalloc(ql_state, 736 instance) != DDI_SUCCESS) { 737 cmn_err(CE_WARN, "%s(%d): soft state alloc failed", 738 QL_NAME, instance); 739 goto attach_failed; 740 } 741 progress |= QL_SOFT_STATE_ALLOCED; 742 743 ha = ddi_get_soft_state(ql_state, instance); 744 if (ha == NULL) { 745 cmn_err(CE_WARN, "%s(%d): can't get soft state", 746 QL_NAME, instance); 747 goto attach_failed; 748 } 749 ha->dip = dip; 750 ha->instance = instance; 751 ha->hba.base_address = ha; 752 ha->pha = ha; 753 754 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) { 755 cmn_err(CE_WARN, "%s(%d): can't setup el tracing", 756 QL_NAME, instance); 757 goto attach_failed; 758 } 759 760 /* Get extended logging and dump flags. */ 761 ql_common_properties(ha); 762 763 if (strcmp(ddi_driver_name(ddi_get_parent(dip)), 764 "sbus") == 0) { 765 EL(ha, "%s SBUS card detected", QL_NAME); 766 ha->cfg_flags |= CFG_SBUS_CARD; 767 } 768 769 ha->dev = kmem_zalloc(sizeof (*ha->dev) * 770 DEVICE_HEAD_LIST_SIZE, KM_SLEEP); 771 772 ha->outstanding_cmds = kmem_zalloc( 773 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS, 774 KM_SLEEP); 775 776 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) * 777 QL_UB_LIMIT, KM_SLEEP); 778 779 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats), 780 KM_SLEEP); 781 782 (void) ddi_pathname(dip, buf); 783 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP); 784 if (ha->devpath == NULL) { 785 EL(ha, "devpath mem alloc failed\n"); 786 } else { 787 (void) strcpy(ha->devpath, buf); 788 EL(ha, "devpath is: %s\n", ha->devpath); 789 } 790 791 if (CFG_IST(ha, CFG_SBUS_CARD)) { 792 /* 793 * For cards where PCI is mapped to sbus e.g. Ivory. 794 * 795 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200 796 * : 0x100 - 0x3FF PCI IO space for 2200 797 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga 798 * : 0x100 - 0x3FF PCI IO Space for fpga 799 */ 800 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase, 801 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) 802 != DDI_SUCCESS) { 803 cmn_err(CE_WARN, "%s(%d): Unable to map device" 804 " registers", QL_NAME, instance); 805 goto attach_failed; 806 } 807 if (ddi_regs_map_setup(dip, 1, 808 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400, 809 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) 810 != DDI_SUCCESS) { 811 /* We should not fail attach here */ 812 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA", 813 QL_NAME, instance); 814 ha->sbus_fpga_iobase = NULL; 815 } 816 progress |= QL_REGS_MAPPED; 817 } else { 818 /* 819 * Setup the ISP2200 registers address mapping to be 820 * accessed by this particular driver. 821 * 0x0 Configuration Space 822 * 0x1 I/O Space 823 * 0x2 32-bit Memory Space address 824 * 0x3 64-bit Memory Space address 825 */ 826 if (ddi_regs_map_setup(dip, 2, (caddr_t *)&ha->iobase, 827 0, 0x100, &ql_dev_acc_attr, 828 &ha->dev_handle) != DDI_SUCCESS) { 829 cmn_err(CE_WARN, "%s(%d): regs_map_setup " 830 "failed", QL_NAME, instance); 831 goto attach_failed; 832 } 833 progress |= QL_REGS_MAPPED; 834 835 /* 836 * We need I/O space mappings for 23xx HBAs for 837 * loading flash (FCode). The chip has a bug due to 838 * which loading flash fails through mem space 839 * mappings in PCI-X mode. 840 */ 841 if (ddi_regs_map_setup(dip, 1, 842 (caddr_t *)&ha->iomap_iobase, 0, 0x100, 843 &ql_dev_acc_attr, 844 &ha->iomap_dev_handle) != DDI_SUCCESS) { 845 cmn_err(CE_WARN, "%s(%d): regs_map_setup(I/O)" 846 " failed", QL_NAME, instance); 847 goto attach_failed; 848 } 849 progress |= QL_IOMAP_IOBASE_MAPPED; 850 } 851 852 /* 853 * We should map config space before adding interrupt 854 * So that the chip type (2200 or 2300) can be determined 855 * before the interrupt routine gets a chance to execute. 856 */ 857 if (CFG_IST(ha, CFG_SBUS_CARD)) { 858 if (ddi_regs_map_setup(dip, 0, 859 (caddr_t *)&ha->sbus_config_base, 0, 0x100, 860 &ql_dev_acc_attr, &ha->sbus_config_handle) != 861 DDI_SUCCESS) { 862 cmn_err(CE_WARN, "%s(%d): Unable to map sbus " 863 "config registers", QL_NAME, instance); 864 goto attach_failed; 865 } 866 } else { 867 if (pci_config_setup(ha->dip, &ha->pci_handle) != 868 DDI_SUCCESS) { 869 cmn_err(CE_WARN, "%s(%d): can't setup PCI " 870 "config space", QL_NAME, instance); 871 goto attach_failed; 872 } 873 } 874 progress |= QL_CONFIG_SPACE_SETUP; 875 876 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha, 877 PCI_CONF_SUBSYSID); 878 ha->subven_id = (uint16_t)ql_pci_config_get16(ha, 879 PCI_CONF_SUBVENID); 880 ha->ven_id = (uint16_t)ql_pci_config_get16(ha, 881 PCI_CONF_VENID); 882 ha->device_id = (uint16_t)ql_pci_config_get16(ha, 883 PCI_CONF_DEVID); 884 ha->rev_id = (uint8_t)ql_pci_config_get8(ha, 885 PCI_CONF_REVID); 886 887 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, " 888 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id, 889 ha->subven_id, ha->subsys_id); 890 891 switch (ha->device_id) { 892 case 0x2300: 893 case 0x2312: 894 #if !defined(__sparc) || defined(QL_DEBUG_ROUTINES) 895 /* 896 * per marketing, fibre-lite HBA's are not supported 897 * on sparc platforms 898 */ 899 case 0x6312: 900 case 0x6322: 901 #endif /* !defined(__sparc) || defined(QL_DEBUG_ROUTINES) */ 902 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) { 903 ha->flags |= FUNCTION_1; 904 } 905 if (ha->device_id == 0x6322) { 906 ha->cfg_flags |= CFG_CTRL_6322; 907 ha->fw_class = 0x6322; 908 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE; 909 } else { 910 ha->cfg_flags |= CFG_CTRL_2300; 911 ha->fw_class = 0x2300; 912 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE; 913 } 914 ha->reg_off = ®_off_2300; 915 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) { 916 goto attach_failed; 917 } 918 ha->fcp_cmd = ql_command_iocb; 919 ha->ip_cmd = ql_ip_iocb; 920 ha->ms_cmd = ql_ms_iocb; 921 if (CFG_IST(ha, CFG_SBUS_CARD)) { 922 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS; 923 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS; 924 } else { 925 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS; 926 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS; 927 } 928 break; 929 930 case 0x2200: 931 ha->cfg_flags |= CFG_CTRL_2200; 932 ha->reg_off = ®_off_2200; 933 ha->fw_class = 0x2200; 934 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) { 935 goto attach_failed; 936 } 937 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE; 938 ha->fcp_cmd = ql_command_iocb; 939 ha->ip_cmd = ql_ip_iocb; 940 ha->ms_cmd = ql_ms_iocb; 941 if (CFG_IST(ha, CFG_SBUS_CARD)) { 942 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS; 943 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS; 944 } else { 945 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS; 946 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS; 947 } 948 break; 949 950 case 0x2422: 951 case 0x2432: 952 case 0x5422: 953 case 0x5432: 954 case 0x8432: 955 #ifdef __sparc 956 /* 957 * Per marketing, the QLA/QLE-2440's (which 958 * also use the 2422 & 2432) are only for the 959 * x86 platform (SMB market). 960 */ 961 if (ha->subsys_id == 0x145 || ha->subsys_id == 0x147 || 962 ha->subsys_id == 0x13e) { 963 cmn_err(CE_WARN, 964 "%s(%d): Unsupported HBA ssid: %x", 965 QL_NAME, instance, ha->subsys_id); 966 goto attach_failed; 967 } 968 #endif /* __sparc */ 969 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) { 970 ha->flags |= FUNCTION_1; 971 } 972 ha->cfg_flags |= CFG_CTRL_2422; 973 if (ha->device_id == 0x8432) { 974 ha->cfg_flags |= CFG_CTRL_MENLO; 975 } else { 976 ha->flags |= VP_ENABLED; 977 } 978 979 ha->reg_off = ®_off_2400_2500; 980 ha->fw_class = 0x2400; 981 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) { 982 goto attach_failed; 983 } 984 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE; 985 ha->fcp_cmd = ql_command_24xx_iocb; 986 ha->ip_cmd = ql_ip_24xx_iocb; 987 ha->ms_cmd = ql_ms_24xx_iocb; 988 ha->els_cmd = ql_els_24xx_iocb; 989 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS; 990 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS; 991 break; 992 993 case 0x2522: 994 case 0x2532: 995 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) { 996 ha->flags |= FUNCTION_1; 997 } 998 ha->cfg_flags |= CFG_CTRL_25XX; 999 ha->flags |= VP_ENABLED; 1000 ha->fw_class = 0x2500; 1001 ha->reg_off = ®_off_2400_2500; 1002 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) { 1003 goto attach_failed; 1004 } 1005 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE; 1006 ha->fcp_cmd = ql_command_24xx_iocb; 1007 ha->ip_cmd = ql_ip_24xx_iocb; 1008 ha->ms_cmd = ql_ms_24xx_iocb; 1009 ha->els_cmd = ql_els_24xx_iocb; 1010 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS; 1011 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS; 1012 break; 1013 1014 case 0x8001: 1015 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) { 1016 ha->flags |= FUNCTION_1; 1017 } 1018 ha->cfg_flags |= CFG_CTRL_81XX; 1019 ha->flags |= VP_ENABLED; 1020 ha->fw_class = 0x8100; 1021 ha->reg_off = ®_off_2400_2500; 1022 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) { 1023 goto attach_failed; 1024 } 1025 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE; 1026 ha->fcp_cmd = ql_command_24xx_iocb; 1027 ha->ip_cmd = ql_ip_24xx_iocb; 1028 ha->ms_cmd = ql_ms_24xx_iocb; 1029 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS; 1030 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS; 1031 break; 1032 1033 default: 1034 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x", 1035 QL_NAME, instance, ha->device_id); 1036 goto attach_failed; 1037 } 1038 1039 /* Setup hba buffer. */ 1040 1041 size = CFG_IST(ha, CFG_CTRL_242581) ? 1042 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) : 1043 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE + 1044 RCVBUF_QUEUE_SIZE); 1045 1046 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA, 1047 QL_DMA_RING_ALIGN) != QL_SUCCESS) { 1048 cmn_err(CE_WARN, "%s(%d): request queue DMA memory " 1049 "alloc failed", QL_NAME, instance); 1050 goto attach_failed; 1051 } 1052 progress |= QL_HBA_BUFFER_SETUP; 1053 1054 /* Setup buffer pointers. */ 1055 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress + 1056 REQUEST_Q_BUFFER_OFFSET; 1057 ha->request_ring_bp = (struct cmd_entry *) 1058 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET); 1059 1060 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress + 1061 RESPONSE_Q_BUFFER_OFFSET; 1062 ha->response_ring_bp = (struct sts_entry *) 1063 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET); 1064 1065 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress + 1066 RCVBUF_Q_BUFFER_OFFSET; 1067 ha->rcvbuf_ring_bp = (struct rcvbuf *) 1068 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET); 1069 1070 /* Allocate resource for QLogic IOCTL */ 1071 (void) ql_alloc_xioctl_resource(ha); 1072 1073 /* Setup interrupts */ 1074 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) { 1075 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, " 1076 "rval=%xh", QL_NAME, instance, rval); 1077 goto attach_failed; 1078 } 1079 1080 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED); 1081 1082 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) { 1083 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache", 1084 QL_NAME, instance); 1085 goto attach_failed; 1086 } 1087 1088 /* 1089 * Allocate an N Port information structure 1090 * for use when in P2P topology. 1091 */ 1092 ha->n_port = (ql_n_port_info_t *) 1093 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP); 1094 if (ha->n_port == NULL) { 1095 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info", 1096 QL_NAME, instance); 1097 goto attach_failed; 1098 } 1099 1100 progress |= QL_N_PORT_INFO_CREATED; 1101 1102 /* 1103 * Determine support for Power Management 1104 */ 1105 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR); 1106 1107 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) { 1108 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr); 1109 if (cap == PCI_CAP_ID_PM) { 1110 ha->pm_capable = 1; 1111 break; 1112 } 1113 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr + 1114 PCI_CAP_NEXT_PTR); 1115 } 1116 1117 if (ha->pm_capable) { 1118 /* 1119 * Enable PM for 2200 based HBAs only. 1120 */ 1121 if (ha->device_id != 0x2200) { 1122 ha->pm_capable = 0; 1123 } 1124 } 1125 1126 if (ha->pm_capable) { 1127 ha->pm_capable = ql_enable_pm; 1128 } 1129 1130 if (ha->pm_capable) { 1131 /* 1132 * Initialize power management bookkeeping; 1133 * components are created idle. 1134 */ 1135 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance); 1136 pmcomps[0] = buf; 1137 1138 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/ 1139 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, 1140 dip, "pm-components", pmcomps, 1141 sizeof (pmcomps) / sizeof (pmcomps[0])) != 1142 DDI_PROP_SUCCESS) { 1143 cmn_err(CE_WARN, "%s(%d): failed to create" 1144 " pm-components property", QL_NAME, 1145 instance); 1146 1147 /* Initialize adapter. */ 1148 ha->power_level = PM_LEVEL_D0; 1149 if (ql_initialize_adapter(ha) != QL_SUCCESS) { 1150 cmn_err(CE_WARN, "%s(%d): failed to" 1151 " initialize adapter", QL_NAME, 1152 instance); 1153 goto attach_failed; 1154 } 1155 } else { 1156 ha->power_level = PM_LEVEL_D3; 1157 if (pm_raise_power(dip, QL_POWER_COMPONENT, 1158 PM_LEVEL_D0) != DDI_SUCCESS) { 1159 cmn_err(CE_WARN, "%s(%d): failed to" 1160 " raise power or initialize" 1161 " adapter", QL_NAME, instance); 1162 } 1163 } 1164 } else { 1165 /* Initialize adapter. */ 1166 ha->power_level = PM_LEVEL_D0; 1167 if (ql_initialize_adapter(ha) != QL_SUCCESS) { 1168 cmn_err(CE_WARN, "%s(%d): failed to initialize" 1169 " adapter", QL_NAME, instance); 1170 } 1171 } 1172 1173 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 && 1174 ha->fw_subminor_version == 0) { 1175 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded", 1176 QL_NAME, ha->instance); 1177 } else { 1178 int rval; 1179 char ver_fmt[256]; 1180 1181 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt), 1182 "Firmware version %d.%d.%d", ha->fw_major_version, 1183 ha->fw_minor_version, ha->fw_subminor_version); 1184 1185 if (CFG_IST(ha, CFG_CTRL_81XX)) { 1186 rval = (int)snprintf(ver_fmt + rval, 1187 (size_t)sizeof (ver_fmt), 1188 ", MPI fw version %d.%d.%d", 1189 ha->mpi_fw_major_version, 1190 ha->mpi_fw_minor_version, 1191 ha->mpi_fw_subminor_version); 1192 1193 if (ha->subsys_id == 0x17B || 1194 ha->subsys_id == 0x17D) { 1195 (void) snprintf(ver_fmt + rval, 1196 (size_t)sizeof (ver_fmt), 1197 ", PHY fw version %d.%d.%d", 1198 ha->phy_fw_major_version, 1199 ha->phy_fw_minor_version, 1200 ha->phy_fw_subminor_version); 1201 } 1202 } 1203 cmn_err(CE_NOTE, "!%s(%d): %s", 1204 QL_NAME, ha->instance, ver_fmt); 1205 } 1206 1207 ha->k_stats = kstat_create(QL_NAME, instance, "statistics", 1208 "controller", KSTAT_TYPE_RAW, 1209 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL); 1210 if (ha->k_stats == NULL) { 1211 cmn_err(CE_WARN, "%s(%d): Failed to create kstat", 1212 QL_NAME, instance); 1213 goto attach_failed; 1214 } 1215 progress |= QL_KSTAT_CREATED; 1216 1217 ha->adapter_stats->version = 1; 1218 ha->k_stats->ks_data = (void *)ha->adapter_stats; 1219 ha->k_stats->ks_private = ha; 1220 ha->k_stats->ks_update = ql_kstat_update; 1221 ha->k_stats->ks_ndata = 1; 1222 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t); 1223 kstat_install(ha->k_stats); 1224 1225 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 1226 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 1227 cmn_err(CE_WARN, "%s(%d): failed to create minor node", 1228 QL_NAME, instance); 1229 goto attach_failed; 1230 } 1231 progress |= QL_MINOR_NODE_CREATED; 1232 1233 /* Allocate a transport structure for this instance */ 1234 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP); 1235 if (tran == NULL) { 1236 cmn_err(CE_WARN, "%s(%d): failed to allocate transport", 1237 QL_NAME, instance); 1238 goto attach_failed; 1239 } 1240 1241 progress |= QL_FCA_TRAN_ALLOCED; 1242 1243 /* fill in the structure */ 1244 tran->fca_numports = 1; 1245 tran->fca_version = FCTL_FCA_MODREV_5; 1246 if (CFG_IST(ha, CFG_CTRL_2422)) { 1247 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS; 1248 } else if (CFG_IST(ha, CFG_CTRL_2581)) { 1249 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS; 1250 } 1251 bcopy(ha->loginparams.node_ww_name.raw_wwn, 1252 tran->fca_perm_pwwn.raw_wwn, 8); 1253 1254 EL(ha, "FCA version %d\n", tran->fca_version); 1255 1256 /* Specify the amount of space needed in each packet */ 1257 tran->fca_pkt_size = sizeof (ql_srb_t); 1258 1259 /* command limits are usually dictated by hardware */ 1260 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS; 1261 1262 /* dmaattr are static, set elsewhere. */ 1263 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 1264 tran->fca_dma_attr = &ql_64bit_io_dma_attr; 1265 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr; 1266 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr; 1267 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr; 1268 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr; 1269 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr; 1270 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr; 1271 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr; 1272 } else { 1273 tran->fca_dma_attr = &ql_32bit_io_dma_attr; 1274 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr; 1275 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr; 1276 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr; 1277 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr; 1278 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr; 1279 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr; 1280 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr; 1281 } 1282 1283 tran->fca_acc_attr = &ql_dev_acc_attr; 1284 tran->fca_iblock = &(ha->iblock_cookie); 1285 1286 /* the remaining values are simply function vectors */ 1287 tran->fca_bind_port = ql_bind_port; 1288 tran->fca_unbind_port = ql_unbind_port; 1289 tran->fca_init_pkt = ql_init_pkt; 1290 tran->fca_un_init_pkt = ql_un_init_pkt; 1291 tran->fca_els_send = ql_els_send; 1292 tran->fca_get_cap = ql_get_cap; 1293 tran->fca_set_cap = ql_set_cap; 1294 tran->fca_getmap = ql_getmap; 1295 tran->fca_transport = ql_transport; 1296 tran->fca_ub_alloc = ql_ub_alloc; 1297 tran->fca_ub_free = ql_ub_free; 1298 tran->fca_ub_release = ql_ub_release; 1299 tran->fca_abort = ql_abort; 1300 tran->fca_reset = ql_reset; 1301 tran->fca_port_manage = ql_port_manage; 1302 tran->fca_get_device = ql_get_device; 1303 1304 /* give it to the FC transport */ 1305 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) { 1306 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME, 1307 instance); 1308 goto attach_failed; 1309 } 1310 progress |= QL_FCA_ATTACH_DONE; 1311 1312 /* Stash the structure so it can be freed at detach */ 1313 ha->tran = tran; 1314 1315 /* Acquire global state lock. */ 1316 GLOBAL_STATE_LOCK(); 1317 1318 /* Add adapter structure to link list. */ 1319 ql_add_link_b(&ql_hba, &ha->hba); 1320 1321 /* Start one second driver timer. */ 1322 if (ql_timer_timeout_id == NULL) { 1323 ql_timer_ticks = drv_usectohz(1000000); 1324 ql_timer_timeout_id = timeout(ql_timer, (void *)0, 1325 ql_timer_ticks); 1326 } 1327 1328 /* Release global state lock. */ 1329 GLOBAL_STATE_UNLOCK(); 1330 1331 /* Determine and populate HBA fru info */ 1332 ql_setup_fruinfo(ha); 1333 1334 /* Setup task_daemon thread. */ 1335 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha, 1336 0, &p0, TS_RUN, minclsyspri); 1337 1338 progress |= QL_TASK_DAEMON_STARTED; 1339 1340 ddi_report_dev(dip); 1341 1342 /* Disable link reset in panic path */ 1343 ha->lip_on_panic = 1; 1344 1345 rval = DDI_SUCCESS; 1346 break; 1347 1348 attach_failed: 1349 if (progress & QL_FCA_ATTACH_DONE) { 1350 (void) fc_fca_detach(dip); 1351 progress &= ~QL_FCA_ATTACH_DONE; 1352 } 1353 1354 if (progress & QL_FCA_TRAN_ALLOCED) { 1355 kmem_free(tran, sizeof (fc_fca_tran_t)); 1356 progress &= ~QL_FCA_TRAN_ALLOCED; 1357 } 1358 1359 if (progress & QL_MINOR_NODE_CREATED) { 1360 ddi_remove_minor_node(dip, "devctl"); 1361 progress &= ~QL_MINOR_NODE_CREATED; 1362 } 1363 1364 if (progress & QL_KSTAT_CREATED) { 1365 kstat_delete(ha->k_stats); 1366 progress &= ~QL_KSTAT_CREATED; 1367 } 1368 1369 if (progress & QL_N_PORT_INFO_CREATED) { 1370 kmem_free(ha->n_port, sizeof (ql_n_port_info_t)); 1371 progress &= ~QL_N_PORT_INFO_CREATED; 1372 } 1373 1374 if (progress & QL_TASK_DAEMON_STARTED) { 1375 TASK_DAEMON_LOCK(ha); 1376 1377 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG; 1378 1379 cv_signal(&ha->cv_task_daemon); 1380 1381 /* Release task daemon lock. */ 1382 TASK_DAEMON_UNLOCK(ha); 1383 1384 /* Wait for for task daemon to stop running. */ 1385 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) { 1386 ql_delay(ha, 10000); 1387 } 1388 progress &= ~QL_TASK_DAEMON_STARTED; 1389 } 1390 1391 if (progress & QL_IOMAP_IOBASE_MAPPED) { 1392 ddi_regs_map_free(&ha->iomap_dev_handle); 1393 progress &= ~QL_IOMAP_IOBASE_MAPPED; 1394 } 1395 1396 if (progress & QL_CONFIG_SPACE_SETUP) { 1397 if (CFG_IST(ha, CFG_SBUS_CARD)) { 1398 ddi_regs_map_free(&ha->sbus_config_handle); 1399 } else { 1400 pci_config_teardown(&ha->pci_handle); 1401 } 1402 progress &= ~QL_CONFIG_SPACE_SETUP; 1403 } 1404 1405 if (progress & QL_INTR_ADDED) { 1406 ql_disable_intr(ha); 1407 ql_release_intr(ha); 1408 progress &= ~QL_INTR_ADDED; 1409 } 1410 1411 if (progress & QL_MUTEX_CV_INITED) { 1412 ql_destroy_mutex(ha); 1413 progress &= ~QL_MUTEX_CV_INITED; 1414 } 1415 1416 if (progress & QL_HBA_BUFFER_SETUP) { 1417 ql_free_phys(ha, &ha->hba_buf); 1418 progress &= ~QL_HBA_BUFFER_SETUP; 1419 } 1420 1421 if (progress & QL_REGS_MAPPED) { 1422 ddi_regs_map_free(&ha->dev_handle); 1423 if (ha->sbus_fpga_iobase != NULL) { 1424 ddi_regs_map_free(&ha->sbus_fpga_dev_handle); 1425 } 1426 progress &= ~QL_REGS_MAPPED; 1427 } 1428 1429 if (progress & QL_SOFT_STATE_ALLOCED) { 1430 1431 ql_fcache_rel(ha->fcache); 1432 1433 kmem_free(ha->adapter_stats, 1434 sizeof (*ha->adapter_stats)); 1435 1436 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * 1437 QL_UB_LIMIT); 1438 1439 kmem_free(ha->outstanding_cmds, 1440 sizeof (*ha->outstanding_cmds) * 1441 MAX_OUTSTANDING_COMMANDS); 1442 1443 if (ha->devpath != NULL) { 1444 kmem_free(ha->devpath, 1445 strlen(ha->devpath) + 1); 1446 } 1447 1448 kmem_free(ha->dev, sizeof (*ha->dev) * 1449 DEVICE_HEAD_LIST_SIZE); 1450 1451 if (ha->xioctl != NULL) { 1452 ql_free_xioctl_resource(ha); 1453 } 1454 1455 if (ha->fw_module != NULL) { 1456 (void) ddi_modclose(ha->fw_module); 1457 } 1458 (void) ql_el_trace_desc_dtor(ha); 1459 (void) ql_nvram_cache_desc_dtor(ha); 1460 1461 ddi_soft_state_free(ql_state, instance); 1462 progress &= ~QL_SOFT_STATE_ALLOCED; 1463 } 1464 1465 ddi_prop_remove_all(dip); 1466 rval = DDI_FAILURE; 1467 break; 1468 1469 case DDI_RESUME: 1470 rval = DDI_FAILURE; 1471 1472 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip)); 1473 if (ha == NULL) { 1474 cmn_err(CE_WARN, "%s(%d): can't get soft state", 1475 QL_NAME, instance); 1476 break; 1477 } 1478 1479 ha->power_level = PM_LEVEL_D3; 1480 if (ha->pm_capable) { 1481 /* 1482 * Get ql_power to do power on initialization 1483 */ 1484 if (pm_raise_power(dip, QL_POWER_COMPONENT, 1485 PM_LEVEL_D0) != DDI_SUCCESS) { 1486 cmn_err(CE_WARN, "%s(%d): can't raise adapter" 1487 " power", QL_NAME, instance); 1488 } 1489 } 1490 1491 /* 1492 * There is a bug in DR that prevents PM framework 1493 * from calling ql_power. 1494 */ 1495 if (ha->power_level == PM_LEVEL_D3) { 1496 ha->power_level = PM_LEVEL_D0; 1497 1498 if (ql_initialize_adapter(ha) != QL_SUCCESS) { 1499 cmn_err(CE_WARN, "%s(%d): can't initialize the" 1500 " adapter", QL_NAME, instance); 1501 } 1502 1503 /* Wake up task_daemon. */ 1504 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG, 1505 0); 1506 } 1507 1508 /* Acquire global state lock. */ 1509 GLOBAL_STATE_LOCK(); 1510 1511 /* Restart driver timer. */ 1512 if (ql_timer_timeout_id == NULL) { 1513 ql_timer_timeout_id = timeout(ql_timer, (void *)0, 1514 ql_timer_ticks); 1515 } 1516 1517 /* Release global state lock. */ 1518 GLOBAL_STATE_UNLOCK(); 1519 1520 /* Wake up command start routine. */ 1521 ADAPTER_STATE_LOCK(ha); 1522 ha->flags &= ~ADAPTER_SUSPENDED; 1523 ADAPTER_STATE_UNLOCK(ha); 1524 1525 /* 1526 * Transport doesn't make FC discovery in polled 1527 * mode; So we need the daemon thread's services 1528 * right here. 1529 */ 1530 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME); 1531 1532 rval = DDI_SUCCESS; 1533 1534 /* Restart IP if it was running. */ 1535 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) { 1536 (void) ql_initialize_ip(ha); 1537 ql_isp_rcvbuf(ha); 1538 } 1539 break; 1540 1541 default: 1542 cmn_err(CE_WARN, "%s(%d): attach, unknown code:" 1543 " %x", QL_NAME, ddi_get_instance(dip), cmd); 1544 rval = DDI_FAILURE; 1545 break; 1546 } 1547 1548 kmem_free(buf, MAXPATHLEN); 1549 1550 if (rval != DDI_SUCCESS) { 1551 /*EMPTY*/ 1552 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n", 1553 ddi_get_instance(dip), rval); 1554 } else { 1555 /*EMPTY*/ 1556 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip)); 1557 } 1558 1559 return (rval); 1560 } 1561 1562 /* 1563 * ql_detach 1564 * Used to remove all the states associated with a given 1565 * instances of a device node prior to the removal of that 1566 * instance from the system. 1567 * 1568 * Input: 1569 * dip = pointer to device information structure. 1570 * cmd = type of detach. 1571 * 1572 * Returns: 1573 * DDI_SUCCESS or DDI_FAILURE. 1574 * 1575 * Context: 1576 * Kernel context. 1577 */ 1578 static int 1579 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1580 { 1581 ql_adapter_state_t *ha, *vha; 1582 ql_tgt_t *tq; 1583 int delay_cnt; 1584 uint16_t index; 1585 ql_link_t *link; 1586 char *buf; 1587 timeout_id_t timer_id = NULL; 1588 int suspend, rval = DDI_SUCCESS; 1589 1590 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip)); 1591 if (ha == NULL) { 1592 QL_PRINT_2(CE_CONT, "(%d): no adapter\n", 1593 ddi_get_instance(dip)); 1594 return (DDI_FAILURE); 1595 } 1596 1597 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd); 1598 1599 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP)); 1600 1601 switch (cmd) { 1602 case DDI_DETACH: 1603 ADAPTER_STATE_LOCK(ha); 1604 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO); 1605 ADAPTER_STATE_UNLOCK(ha); 1606 1607 TASK_DAEMON_LOCK(ha); 1608 1609 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) { 1610 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG; 1611 cv_signal(&ha->cv_task_daemon); 1612 1613 TASK_DAEMON_UNLOCK(ha); 1614 1615 (void) ql_wait_for_td_stop(ha); 1616 1617 TASK_DAEMON_LOCK(ha); 1618 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) { 1619 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG; 1620 EL(ha, "failed, could not stop task daemon\n"); 1621 } 1622 } 1623 TASK_DAEMON_UNLOCK(ha); 1624 1625 GLOBAL_STATE_LOCK(); 1626 1627 /* Disable driver timer if no adapters. */ 1628 if (ql_timer_timeout_id && ql_hba.first == &ha->hba && 1629 ql_hba.last == &ha->hba) { 1630 timer_id = ql_timer_timeout_id; 1631 ql_timer_timeout_id = NULL; 1632 } 1633 ql_remove_link(&ql_hba, &ha->hba); 1634 1635 GLOBAL_STATE_UNLOCK(); 1636 1637 if (timer_id) { 1638 (void) untimeout(timer_id); 1639 } 1640 1641 if (ha->pm_capable) { 1642 if (pm_lower_power(dip, QL_POWER_COMPONENT, 1643 PM_LEVEL_D3) != DDI_SUCCESS) { 1644 cmn_err(CE_WARN, "%s(%d): failed to lower the" 1645 " power", QL_NAME, ha->instance); 1646 } 1647 } 1648 1649 /* 1650 * If pm_lower_power shutdown the adapter, there 1651 * isn't much else to do 1652 */ 1653 if (ha->power_level != PM_LEVEL_D3) { 1654 ql_halt(ha, PM_LEVEL_D3); 1655 } 1656 1657 /* Remove virtual ports. */ 1658 while ((vha = ha->vp_next) != NULL) { 1659 ql_vport_destroy(vha); 1660 } 1661 1662 /* Free target queues. */ 1663 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 1664 link = ha->dev[index].first; 1665 while (link != NULL) { 1666 tq = link->base_address; 1667 link = link->next; 1668 ql_dev_free(ha, tq); 1669 } 1670 } 1671 1672 /* 1673 * Free unsolicited buffers. 1674 * If we are here then there are no ULPs still 1675 * alive that wish to talk to ql so free up 1676 * any SRB_IP_UB_UNUSED buffers that are 1677 * lingering around 1678 */ 1679 QL_UB_LOCK(ha); 1680 for (index = 0; index < QL_UB_LIMIT; index++) { 1681 fc_unsol_buf_t *ubp = ha->ub_array[index]; 1682 1683 if (ubp != NULL) { 1684 ql_srb_t *sp = ubp->ub_fca_private; 1685 1686 sp->flags |= SRB_UB_FREE_REQUESTED; 1687 1688 while (!(sp->flags & SRB_UB_IN_FCA) || 1689 (sp->flags & (SRB_UB_CALLBACK | 1690 SRB_UB_ACQUIRED))) { 1691 QL_UB_UNLOCK(ha); 1692 delay(drv_usectohz(100000)); 1693 QL_UB_LOCK(ha); 1694 } 1695 ha->ub_array[index] = NULL; 1696 1697 QL_UB_UNLOCK(ha); 1698 ql_free_unsolicited_buffer(ha, ubp); 1699 QL_UB_LOCK(ha); 1700 } 1701 } 1702 QL_UB_UNLOCK(ha); 1703 1704 /* Free any saved RISC code. */ 1705 if (ha->risc_code != NULL) { 1706 kmem_free(ha->risc_code, ha->risc_code_size); 1707 ha->risc_code = NULL; 1708 ha->risc_code_size = 0; 1709 } 1710 1711 if (ha->fw_module != NULL) { 1712 (void) ddi_modclose(ha->fw_module); 1713 ha->fw_module = NULL; 1714 } 1715 1716 /* Free resources. */ 1717 ddi_prop_remove_all(dip); 1718 (void) fc_fca_detach(dip); 1719 kmem_free(ha->tran, sizeof (fc_fca_tran_t)); 1720 ddi_remove_minor_node(dip, "devctl"); 1721 if (ha->k_stats != NULL) { 1722 kstat_delete(ha->k_stats); 1723 } 1724 1725 if (CFG_IST(ha, CFG_SBUS_CARD)) { 1726 ddi_regs_map_free(&ha->sbus_config_handle); 1727 } else { 1728 ddi_regs_map_free(&ha->iomap_dev_handle); 1729 pci_config_teardown(&ha->pci_handle); 1730 } 1731 1732 ql_disable_intr(ha); 1733 ql_release_intr(ha); 1734 1735 ql_free_xioctl_resource(ha); 1736 1737 ql_destroy_mutex(ha); 1738 1739 ql_free_phys(ha, &ha->hba_buf); 1740 ql_free_phys(ha, &ha->fwexttracebuf); 1741 ql_free_phys(ha, &ha->fwfcetracebuf); 1742 1743 ddi_regs_map_free(&ha->dev_handle); 1744 if (ha->sbus_fpga_iobase != NULL) { 1745 ddi_regs_map_free(&ha->sbus_fpga_dev_handle); 1746 } 1747 1748 ql_fcache_rel(ha->fcache); 1749 if (ha->vcache != NULL) { 1750 kmem_free(ha->vcache, QL_24XX_VPD_SIZE); 1751 } 1752 1753 if (ha->pi_attrs != NULL) { 1754 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t)); 1755 } 1756 1757 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats)); 1758 1759 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT); 1760 1761 kmem_free(ha->outstanding_cmds, 1762 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS); 1763 1764 if (ha->n_port != NULL) { 1765 kmem_free(ha->n_port, sizeof (ql_n_port_info_t)); 1766 } 1767 1768 if (ha->devpath != NULL) { 1769 kmem_free(ha->devpath, strlen(ha->devpath) + 1); 1770 } 1771 1772 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE); 1773 1774 EL(ha, "detached\n"); 1775 1776 ddi_soft_state_free(ql_state, (int)ha->instance); 1777 1778 break; 1779 1780 case DDI_SUSPEND: 1781 ADAPTER_STATE_LOCK(ha); 1782 1783 delay_cnt = 0; 1784 ha->flags |= ADAPTER_SUSPENDED; 1785 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) { 1786 ADAPTER_STATE_UNLOCK(ha); 1787 delay(drv_usectohz(1000000)); 1788 ADAPTER_STATE_LOCK(ha); 1789 } 1790 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) { 1791 ha->flags &= ~ADAPTER_SUSPENDED; 1792 ADAPTER_STATE_UNLOCK(ha); 1793 rval = DDI_FAILURE; 1794 cmn_err(CE_WARN, "!%s(%d): Fail suspend" 1795 " busy %xh flags %xh", QL_NAME, ha->instance, 1796 ha->busy, ha->flags); 1797 break; 1798 } 1799 1800 ADAPTER_STATE_UNLOCK(ha); 1801 1802 if (ha->flags & IP_INITIALIZED) { 1803 (void) ql_shutdown_ip(ha); 1804 } 1805 1806 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) { 1807 ADAPTER_STATE_LOCK(ha); 1808 ha->flags &= ~ADAPTER_SUSPENDED; 1809 ADAPTER_STATE_UNLOCK(ha); 1810 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh", 1811 QL_NAME, ha->instance, suspend); 1812 1813 /* Restart IP if it was running. */ 1814 if (ha->flags & IP_ENABLED && 1815 !(ha->flags & IP_INITIALIZED)) { 1816 (void) ql_initialize_ip(ha); 1817 ql_isp_rcvbuf(ha); 1818 } 1819 rval = DDI_FAILURE; 1820 break; 1821 } 1822 1823 /* Acquire global state lock. */ 1824 GLOBAL_STATE_LOCK(); 1825 1826 /* Disable driver timer if last adapter. */ 1827 if (ql_timer_timeout_id && ql_hba.first == &ha->hba && 1828 ql_hba.last == &ha->hba) { 1829 timer_id = ql_timer_timeout_id; 1830 ql_timer_timeout_id = NULL; 1831 } 1832 GLOBAL_STATE_UNLOCK(); 1833 1834 if (timer_id) { 1835 (void) untimeout(timer_id); 1836 } 1837 1838 EL(ha, "suspended\n"); 1839 1840 break; 1841 1842 default: 1843 rval = DDI_FAILURE; 1844 break; 1845 } 1846 1847 kmem_free(buf, MAXPATHLEN); 1848 1849 if (rval != DDI_SUCCESS) { 1850 if (ha != NULL) { 1851 EL(ha, "failed, rval = %xh\n", rval); 1852 } else { 1853 /*EMPTY*/ 1854 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n", 1855 ddi_get_instance(dip), rval); 1856 } 1857 } else { 1858 /*EMPTY*/ 1859 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip)); 1860 } 1861 1862 return (rval); 1863 } 1864 1865 1866 /* 1867 * ql_power 1868 * Power a device attached to the system. 1869 * 1870 * Input: 1871 * dip = pointer to device information structure. 1872 * component = device. 1873 * level = power level. 1874 * 1875 * Returns: 1876 * DDI_SUCCESS or DDI_FAILURE. 1877 * 1878 * Context: 1879 * Kernel context. 1880 */ 1881 /* ARGSUSED */ 1882 static int 1883 ql_power(dev_info_t *dip, int component, int level) 1884 { 1885 int rval = DDI_FAILURE; 1886 off_t csr; 1887 uint8_t saved_pm_val; 1888 ql_adapter_state_t *ha; 1889 char *buf; 1890 char *path; 1891 1892 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip)); 1893 if (ha == NULL || ha->pm_capable == 0) { 1894 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n", 1895 ddi_get_instance(dip)); 1896 return (rval); 1897 } 1898 1899 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 1900 1901 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP)); 1902 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP)); 1903 1904 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 && 1905 level != PM_LEVEL_D3)) { 1906 EL(ha, "invalid, component=%xh or level=%xh\n", 1907 component, level); 1908 return (rval); 1909 } 1910 1911 GLOBAL_HW_LOCK(); 1912 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR; 1913 GLOBAL_HW_UNLOCK(); 1914 1915 (void) snprintf(buf, sizeof (buf), 1916 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip), 1917 ddi_pathname(dip, path)); 1918 1919 switch (level) { 1920 case PM_LEVEL_D0: /* power up to D0 state - fully on */ 1921 1922 QL_PM_LOCK(ha); 1923 if (ha->power_level == PM_LEVEL_D0) { 1924 QL_PM_UNLOCK(ha); 1925 rval = DDI_SUCCESS; 1926 break; 1927 } 1928 1929 /* 1930 * Enable interrupts now 1931 */ 1932 saved_pm_val = ha->power_level; 1933 ha->power_level = PM_LEVEL_D0; 1934 QL_PM_UNLOCK(ha); 1935 1936 GLOBAL_HW_LOCK(); 1937 1938 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0); 1939 1940 /* 1941 * Delay after reset, for chip to recover. 1942 * Otherwise causes system PANIC 1943 */ 1944 drv_usecwait(200000); 1945 1946 GLOBAL_HW_UNLOCK(); 1947 1948 if (ha->config_saved) { 1949 ha->config_saved = 0; 1950 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) { 1951 QL_PM_LOCK(ha); 1952 ha->power_level = saved_pm_val; 1953 QL_PM_UNLOCK(ha); 1954 cmn_err(CE_WARN, "%s failed to restore " 1955 "config regs", buf); 1956 break; 1957 } 1958 } 1959 1960 if (ql_initialize_adapter(ha) != QL_SUCCESS) { 1961 cmn_err(CE_WARN, "%s adapter initialization failed", 1962 buf); 1963 } 1964 1965 /* Wake up task_daemon. */ 1966 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG | 1967 TASK_DAEMON_SLEEPING_FLG, 0); 1968 1969 /* Restart IP if it was running. */ 1970 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) { 1971 (void) ql_initialize_ip(ha); 1972 ql_isp_rcvbuf(ha); 1973 } 1974 1975 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n", 1976 ha->instance, QL_NAME); 1977 1978 rval = DDI_SUCCESS; 1979 break; 1980 1981 case PM_LEVEL_D3: /* power down to D3 state - off */ 1982 1983 QL_PM_LOCK(ha); 1984 1985 if (ha->busy || ((ha->task_daemon_flags & 1986 TASK_DAEMON_SLEEPING_FLG) == 0)) { 1987 QL_PM_UNLOCK(ha); 1988 break; 1989 } 1990 1991 if (ha->power_level == PM_LEVEL_D3) { 1992 rval = DDI_SUCCESS; 1993 QL_PM_UNLOCK(ha); 1994 break; 1995 } 1996 QL_PM_UNLOCK(ha); 1997 1998 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) { 1999 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save" 2000 " config regs", QL_NAME, ha->instance, buf); 2001 break; 2002 } 2003 ha->config_saved = 1; 2004 2005 /* 2006 * Don't enable interrupts. Running mailbox commands with 2007 * interrupts enabled could cause hangs since pm_run_scan() 2008 * runs out of a callout thread and on single cpu systems 2009 * cv_timedwait(), called from ql_mailbox_command(), would 2010 * not get to run. 2011 */ 2012 TASK_DAEMON_LOCK(ha); 2013 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN; 2014 TASK_DAEMON_UNLOCK(ha); 2015 2016 ql_halt(ha, PM_LEVEL_D3); 2017 2018 /* 2019 * Setup ql_intr to ignore interrupts from here on. 2020 */ 2021 QL_PM_LOCK(ha); 2022 ha->power_level = PM_LEVEL_D3; 2023 QL_PM_UNLOCK(ha); 2024 2025 /* 2026 * Wait for ISR to complete. 2027 */ 2028 INTR_LOCK(ha); 2029 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT); 2030 INTR_UNLOCK(ha); 2031 2032 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n", 2033 ha->instance, QL_NAME); 2034 2035 rval = DDI_SUCCESS; 2036 break; 2037 } 2038 2039 kmem_free(buf, MAXPATHLEN); 2040 kmem_free(path, MAXPATHLEN); 2041 2042 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index); 2043 2044 return (rval); 2045 } 2046 2047 /* 2048 * ql_quiesce 2049 * quiesce a device attached to the system. 2050 * 2051 * Input: 2052 * dip = pointer to device information structure. 2053 * 2054 * Returns: 2055 * DDI_SUCCESS 2056 * 2057 * Context: 2058 * Kernel context. 2059 */ 2060 static int 2061 ql_quiesce(dev_info_t *dip) 2062 { 2063 ql_adapter_state_t *ha; 2064 uint32_t timer; 2065 uint32_t stat; 2066 2067 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip)); 2068 if (ha == NULL) { 2069 /* Oh well.... */ 2070 QL_PRINT_2(CE_CONT, "(%d): no adapter\n", 2071 ddi_get_instance(dip)); 2072 return (DDI_SUCCESS); 2073 } 2074 2075 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2076 2077 if (CFG_IST(ha, CFG_CTRL_242581)) { 2078 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 2079 WRT16_IO_REG(ha, mailbox[0], MBC_STOP_FIRMWARE); 2080 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT); 2081 for (timer = 0; timer < 30000; timer++) { 2082 stat = RD32_IO_REG(ha, intr_info_lo); 2083 if (stat & BIT_15) { 2084 if ((stat & 0xff) < 0x12) { 2085 WRT32_IO_REG(ha, hccr, 2086 HC24_CLR_RISC_INT); 2087 break; 2088 } 2089 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 2090 } 2091 drv_usecwait(100); 2092 } 2093 /* Reset the chip. */ 2094 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN | 2095 MWB_4096_BYTES); 2096 drv_usecwait(100); 2097 2098 } else { 2099 /* Disable ISP interrupts. */ 2100 WRT16_IO_REG(ha, ictrl, 0); 2101 /* Select RISC module registers. */ 2102 WRT16_IO_REG(ha, ctrl_status, 0); 2103 /* Reset ISP semaphore. */ 2104 WRT16_IO_REG(ha, semaphore, 0); 2105 /* Reset RISC module. */ 2106 WRT16_IO_REG(ha, hccr, HC_RESET_RISC); 2107 /* Release RISC module. */ 2108 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC); 2109 } 2110 2111 ql_disable_intr(ha); 2112 2113 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2114 2115 return (DDI_SUCCESS); 2116 } 2117 2118 /* ************************************************************************ */ 2119 /* Fibre Channel Adapter (FCA) Transport Functions. */ 2120 /* ************************************************************************ */ 2121 2122 /* 2123 * ql_bind_port 2124 * Handling port binding. The FC Transport attempts to bind an FCA port 2125 * when it is ready to start transactions on the port. The FC Transport 2126 * will call the fca_bind_port() function specified in the fca_transport 2127 * structure it receives. The FCA must fill in the port_info structure 2128 * passed in the call and also stash the information for future calls. 2129 * 2130 * Input: 2131 * dip = pointer to FCA information structure. 2132 * port_info = pointer to port information structure. 2133 * bind_info = pointer to bind information structure. 2134 * 2135 * Returns: 2136 * NULL = failure 2137 * 2138 * Context: 2139 * Kernel context. 2140 */ 2141 static opaque_t 2142 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 2143 fc_fca_bind_info_t *bind_info) 2144 { 2145 ql_adapter_state_t *ha, *vha; 2146 opaque_t fca_handle = NULL; 2147 port_id_t d_id; 2148 int port_npiv = bind_info->port_npiv; 2149 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn; 2150 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn; 2151 2152 /* get state info based on the dip */ 2153 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip)); 2154 if (ha == NULL) { 2155 QL_PRINT_2(CE_CONT, "(%d): no adapter\n", 2156 ddi_get_instance(dip)); 2157 return (NULL); 2158 } 2159 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 2160 2161 /* Verify port number is supported. */ 2162 if (port_npiv != 0) { 2163 if (!(ha->flags & VP_ENABLED)) { 2164 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n", 2165 ha->instance); 2166 port_info->pi_error = FC_NPIV_NOT_SUPPORTED; 2167 return (NULL); 2168 } 2169 if (!(ha->flags & POINT_TO_POINT)) { 2170 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n", 2171 ha->instance); 2172 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY; 2173 return (NULL); 2174 } 2175 if (!(ha->flags & FDISC_ENABLED)) { 2176 QL_PRINT_2(CE_CONT, "(%d): switch does not support " 2177 "FDISC\n", ha->instance); 2178 port_info->pi_error = FC_NPIV_FDISC_FAILED; 2179 return (NULL); 2180 } 2181 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ? 2182 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) { 2183 QL_PRINT_2(CE_CONT, "(%d): port number=%d " 2184 "FC_OUTOFBOUNDS\n", ha->instance); 2185 port_info->pi_error = FC_OUTOFBOUNDS; 2186 return (NULL); 2187 } 2188 } else if (bind_info->port_num != 0) { 2189 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not " 2190 "supported\n", ha->instance, bind_info->port_num); 2191 port_info->pi_error = FC_OUTOFBOUNDS; 2192 return (NULL); 2193 } 2194 2195 /* Locate port context. */ 2196 for (vha = ha; vha != NULL; vha = vha->vp_next) { 2197 if (vha->vp_index == bind_info->port_num) { 2198 break; 2199 } 2200 } 2201 2202 /* If virtual port does not exist. */ 2203 if (vha == NULL) { 2204 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num); 2205 } 2206 2207 /* make sure this port isn't already bound */ 2208 if (vha->flags & FCA_BOUND) { 2209 port_info->pi_error = FC_ALREADY; 2210 } else { 2211 if (vha->vp_index != 0) { 2212 bcopy(port_nwwn, 2213 vha->loginparams.node_ww_name.raw_wwn, 8); 2214 bcopy(port_pwwn, 2215 vha->loginparams.nport_ww_name.raw_wwn, 8); 2216 } 2217 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) { 2218 if (ql_vport_enable(vha) != QL_SUCCESS) { 2219 QL_PRINT_2(CE_CONT, "(%d): failed to enable " 2220 "virtual port=%d\n", ha->instance, 2221 vha->vp_index); 2222 port_info->pi_error = FC_NPIV_FDISC_FAILED; 2223 return (NULL); 2224 } 2225 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) " 2226 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : " 2227 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n", 2228 QL_NAME, ha->instance, vha->vp_index, 2229 port_pwwn[0], port_pwwn[1], port_pwwn[2], 2230 port_pwwn[3], port_pwwn[4], port_pwwn[5], 2231 port_pwwn[6], port_pwwn[7], 2232 port_nwwn[0], port_nwwn[1], port_nwwn[2], 2233 port_nwwn[3], port_nwwn[4], port_nwwn[5], 2234 port_nwwn[6], port_nwwn[7]); 2235 } 2236 2237 /* stash the bind_info supplied by the FC Transport */ 2238 vha->bind_info.port_handle = bind_info->port_handle; 2239 vha->bind_info.port_statec_cb = 2240 bind_info->port_statec_cb; 2241 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb; 2242 2243 /* Set port's source ID. */ 2244 port_info->pi_s_id.port_id = vha->d_id.b24; 2245 2246 /* copy out the default login parameters */ 2247 bcopy((void *)&vha->loginparams, 2248 (void *)&port_info->pi_login_params, 2249 sizeof (la_els_logi_t)); 2250 2251 /* Set port's hard address if enabled. */ 2252 port_info->pi_hard_addr.hard_addr = 0; 2253 if (bind_info->port_num == 0) { 2254 d_id.b24 = ha->d_id.b24; 2255 if (CFG_IST(ha, CFG_CTRL_242581)) { 2256 if (ha->init_ctrl_blk.cb24. 2257 firmware_options_1[0] & BIT_0) { 2258 d_id.b.al_pa = ql_index_to_alpa[ha-> 2259 init_ctrl_blk.cb24. 2260 hard_address[0]]; 2261 port_info->pi_hard_addr.hard_addr = 2262 d_id.b24; 2263 } 2264 } else if (ha->init_ctrl_blk.cb.firmware_options[0] & 2265 BIT_0) { 2266 d_id.b.al_pa = ql_index_to_alpa[ha-> 2267 init_ctrl_blk.cb.hard_address[0]]; 2268 port_info->pi_hard_addr.hard_addr = d_id.b24; 2269 } 2270 2271 /* Set the node id data */ 2272 if (ql_get_rnid_params(ha, 2273 sizeof (port_info->pi_rnid_params.params), 2274 (caddr_t)&port_info->pi_rnid_params.params) == 2275 QL_SUCCESS) { 2276 port_info->pi_rnid_params.status = FC_SUCCESS; 2277 } else { 2278 port_info->pi_rnid_params.status = FC_FAILURE; 2279 } 2280 2281 /* Populate T11 FC-HBA details */ 2282 ql_populate_hba_fru_details(ha, port_info); 2283 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t), 2284 KM_SLEEP); 2285 if (ha->pi_attrs != NULL) { 2286 bcopy(&port_info->pi_attrs, ha->pi_attrs, 2287 sizeof (fca_port_attrs_t)); 2288 } 2289 } else { 2290 port_info->pi_rnid_params.status = FC_FAILURE; 2291 if (ha->pi_attrs != NULL) { 2292 bcopy(ha->pi_attrs, &port_info->pi_attrs, 2293 sizeof (fca_port_attrs_t)); 2294 } 2295 } 2296 2297 /* Generate handle for this FCA. */ 2298 fca_handle = (opaque_t)vha; 2299 2300 ADAPTER_STATE_LOCK(ha); 2301 vha->flags |= FCA_BOUND; 2302 ADAPTER_STATE_UNLOCK(ha); 2303 /* Set port's current state. */ 2304 port_info->pi_port_state = vha->state; 2305 } 2306 2307 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, " 2308 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index, 2309 port_info->pi_port_state, port_info->pi_s_id.port_id); 2310 2311 return (fca_handle); 2312 } 2313 2314 /* 2315 * ql_unbind_port 2316 * To unbind a Fibre Channel Adapter from an FC Port driver. 2317 * 2318 * Input: 2319 * fca_handle = handle setup by ql_bind_port(). 2320 * 2321 * Context: 2322 * Kernel context. 2323 */ 2324 static void 2325 ql_unbind_port(opaque_t fca_handle) 2326 { 2327 ql_adapter_state_t *ha; 2328 ql_tgt_t *tq; 2329 uint32_t flgs; 2330 2331 ha = ql_fca_handle_to_state(fca_handle); 2332 if (ha == NULL) { 2333 /*EMPTY*/ 2334 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2335 (void *)fca_handle); 2336 } else { 2337 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, 2338 ha->vp_index); 2339 2340 if (!(ha->flags & FCA_BOUND)) { 2341 /*EMPTY*/ 2342 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n", 2343 ha->instance, ha->vp_index); 2344 } else { 2345 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) { 2346 if ((tq = ql_loop_id_to_queue(ha, 2347 FL_PORT_24XX_HDL)) != NULL) { 2348 (void) ql_logout_fabric_port(ha, tq); 2349 } 2350 (void) ql_vport_control(ha, (uint8_t) 2351 (CFG_IST(ha, CFG_CTRL_2425) ? 2352 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT)); 2353 flgs = FCA_BOUND | VP_ENABLED; 2354 } else { 2355 flgs = FCA_BOUND; 2356 } 2357 ADAPTER_STATE_LOCK(ha); 2358 ha->flags &= ~flgs; 2359 ADAPTER_STATE_UNLOCK(ha); 2360 } 2361 2362 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, 2363 ha->vp_index); 2364 } 2365 } 2366 2367 /* 2368 * ql_init_pkt 2369 * Initialize FCA portion of packet. 2370 * 2371 * Input: 2372 * fca_handle = handle setup by ql_bind_port(). 2373 * pkt = pointer to fc_packet. 2374 * 2375 * Returns: 2376 * FC_SUCCESS - the packet has successfully been initialized. 2377 * FC_UNBOUND - the fca_handle specified is not bound. 2378 * FC_NOMEM - the FCA failed initialization due to an allocation error. 2379 * FC_FAILURE - the FCA failed initialization for undisclosed reasons 2380 * 2381 * Context: 2382 * Kernel context. 2383 */ 2384 /* ARGSUSED */ 2385 static int 2386 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep) 2387 { 2388 ql_adapter_state_t *ha; 2389 ql_srb_t *sp; 2390 2391 ha = ql_fca_handle_to_state(fca_handle); 2392 if (ha == NULL) { 2393 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2394 (void *)fca_handle); 2395 return (FC_UNBOUND); 2396 } 2397 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2398 2399 sp = (ql_srb_t *)pkt->pkt_fca_private; 2400 sp->flags = 0; 2401 2402 /* init cmd links */ 2403 sp->cmd.base_address = sp; 2404 sp->cmd.prev = NULL; 2405 sp->cmd.next = NULL; 2406 sp->cmd.head = NULL; 2407 2408 /* init watchdog links */ 2409 sp->wdg.base_address = sp; 2410 sp->wdg.prev = NULL; 2411 sp->wdg.next = NULL; 2412 sp->wdg.head = NULL; 2413 sp->pkt = pkt; 2414 sp->ha = ha; 2415 sp->magic_number = QL_FCA_BRAND; 2416 2417 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2418 2419 return (FC_SUCCESS); 2420 } 2421 2422 /* 2423 * ql_un_init_pkt 2424 * Release all local resources bound to packet. 2425 * 2426 * Input: 2427 * fca_handle = handle setup by ql_bind_port(). 2428 * pkt = pointer to fc_packet. 2429 * 2430 * Returns: 2431 * FC_SUCCESS - the packet has successfully been invalidated. 2432 * FC_UNBOUND - the fca_handle specified is not bound. 2433 * FC_BADPACKET - the packet has not been initialized or has 2434 * already been freed by this FCA. 2435 * 2436 * Context: 2437 * Kernel context. 2438 */ 2439 static int 2440 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt) 2441 { 2442 ql_adapter_state_t *ha; 2443 int rval; 2444 ql_srb_t *sp; 2445 2446 ha = ql_fca_handle_to_state(fca_handle); 2447 if (ha == NULL) { 2448 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2449 (void *)fca_handle); 2450 return (FC_UNBOUND); 2451 } 2452 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2453 2454 sp = (ql_srb_t *)pkt->pkt_fca_private; 2455 2456 if (sp->magic_number != QL_FCA_BRAND) { 2457 EL(ha, "failed, FC_BADPACKET\n"); 2458 rval = FC_BADPACKET; 2459 } else { 2460 sp->magic_number = NULL; 2461 2462 rval = FC_SUCCESS; 2463 } 2464 2465 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2466 2467 return (rval); 2468 } 2469 2470 /* 2471 * ql_els_send 2472 * Issue a extended link service request. 2473 * 2474 * Input: 2475 * fca_handle = handle setup by ql_bind_port(). 2476 * pkt = pointer to fc_packet. 2477 * 2478 * Returns: 2479 * FC_SUCCESS - the command was successful. 2480 * FC_ELS_FREJECT - the command was rejected by a Fabric. 2481 * FC_ELS_PREJECT - the command was rejected by an N-port. 2482 * FC_TRANSPORT_ERROR - a transport error occurred. 2483 * FC_UNBOUND - the fca_handle specified is not bound. 2484 * FC_ELS_BAD - the FCA can not issue the requested ELS. 2485 * 2486 * Context: 2487 * Kernel context. 2488 */ 2489 static int 2490 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt) 2491 { 2492 ql_adapter_state_t *ha; 2493 int rval; 2494 clock_t timer = drv_usectohz(30000000); 2495 ls_code_t els; 2496 la_els_rjt_t rjt; 2497 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private; 2498 2499 /* Verify proper command. */ 2500 ha = ql_cmd_setup(fca_handle, pkt, &rval); 2501 if (ha == NULL) { 2502 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n", 2503 rval, fca_handle); 2504 return (FC_INVALID_REQUEST); 2505 } 2506 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2507 2508 /* Wait for suspension to end. */ 2509 TASK_DAEMON_LOCK(ha); 2510 while (ha->task_daemon_flags & QL_SUSPENDED) { 2511 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG; 2512 2513 /* 30 seconds from now */ 2514 if (cv_reltimedwait(&ha->pha->cv_dr_suspended, 2515 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) { 2516 /* 2517 * The timeout time 'timer' was 2518 * reached without the condition 2519 * being signaled. 2520 */ 2521 pkt->pkt_state = FC_PKT_TRAN_BSY; 2522 pkt->pkt_reason = FC_REASON_XCHG_BSY; 2523 2524 /* Release task daemon lock. */ 2525 TASK_DAEMON_UNLOCK(ha); 2526 2527 EL(ha, "QL_SUSPENDED failed=%xh\n", 2528 QL_FUNCTION_TIMEOUT); 2529 return (FC_TRAN_BUSY); 2530 } 2531 } 2532 /* Release task daemon lock. */ 2533 TASK_DAEMON_UNLOCK(ha); 2534 2535 /* Setup response header. */ 2536 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr, 2537 sizeof (fc_frame_hdr_t)); 2538 2539 if (pkt->pkt_rsplen) { 2540 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen); 2541 } 2542 2543 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24; 2544 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id; 2545 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC | 2546 R_CTL_SOLICITED_CONTROL; 2547 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | 2548 F_CTL_END_SEQ; 2549 2550 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP | 2551 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT | 2552 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT); 2553 2554 sp->flags |= SRB_ELS_PKT; 2555 2556 /* map the type of ELS to a function */ 2557 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els, 2558 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR); 2559 2560 #if 0 2561 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance); 2562 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32, 2563 sizeof (fc_frame_hdr_t) / 4); 2564 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance); 2565 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4); 2566 #endif 2567 2568 sp->iocb = ha->els_cmd; 2569 sp->req_cnt = 1; 2570 2571 switch (els.ls_code) { 2572 case LA_ELS_RJT: 2573 case LA_ELS_ACC: 2574 EL(ha, "LA_ELS_RJT\n"); 2575 pkt->pkt_state = FC_PKT_SUCCESS; 2576 rval = FC_SUCCESS; 2577 break; 2578 case LA_ELS_PLOGI: 2579 case LA_ELS_PDISC: 2580 rval = ql_els_plogi(ha, pkt); 2581 break; 2582 case LA_ELS_FLOGI: 2583 case LA_ELS_FDISC: 2584 rval = ql_els_flogi(ha, pkt); 2585 break; 2586 case LA_ELS_LOGO: 2587 rval = ql_els_logo(ha, pkt); 2588 break; 2589 case LA_ELS_PRLI: 2590 rval = ql_els_prli(ha, pkt); 2591 break; 2592 case LA_ELS_PRLO: 2593 rval = ql_els_prlo(ha, pkt); 2594 break; 2595 case LA_ELS_ADISC: 2596 rval = ql_els_adisc(ha, pkt); 2597 break; 2598 case LA_ELS_LINIT: 2599 rval = ql_els_linit(ha, pkt); 2600 break; 2601 case LA_ELS_LPC: 2602 rval = ql_els_lpc(ha, pkt); 2603 break; 2604 case LA_ELS_LSTS: 2605 rval = ql_els_lsts(ha, pkt); 2606 break; 2607 case LA_ELS_SCR: 2608 rval = ql_els_scr(ha, pkt); 2609 break; 2610 case LA_ELS_RSCN: 2611 rval = ql_els_rscn(ha, pkt); 2612 break; 2613 case LA_ELS_FARP_REQ: 2614 rval = ql_els_farp_req(ha, pkt); 2615 break; 2616 case LA_ELS_FARP_REPLY: 2617 rval = ql_els_farp_reply(ha, pkt); 2618 break; 2619 case LA_ELS_RLS: 2620 rval = ql_els_rls(ha, pkt); 2621 break; 2622 case LA_ELS_RNID: 2623 rval = ql_els_rnid(ha, pkt); 2624 break; 2625 default: 2626 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n", 2627 els.ls_code); 2628 /* Build RJT. */ 2629 bzero(&rjt, sizeof (rjt)); 2630 rjt.ls_code.ls_code = LA_ELS_RJT; 2631 rjt.reason = FC_REASON_CMD_UNSUPPORTED; 2632 2633 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt, 2634 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR); 2635 2636 pkt->pkt_state = FC_PKT_LOCAL_RJT; 2637 pkt->pkt_reason = FC_REASON_UNSUPPORTED; 2638 rval = FC_SUCCESS; 2639 break; 2640 } 2641 2642 #if 0 2643 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance); 2644 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32, 2645 sizeof (fc_frame_hdr_t) / 4); 2646 #endif 2647 /* 2648 * Return success if the srb was consumed by an iocb. The packet 2649 * completion callback will be invoked by the response handler. 2650 */ 2651 if (rval == QL_CONSUMED) { 2652 rval = FC_SUCCESS; 2653 } else if (rval == FC_SUCCESS && 2654 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) { 2655 /* Do command callback only if no error */ 2656 ql_awaken_task_daemon(ha, sp, 0, 0); 2657 } 2658 2659 if (rval != FC_SUCCESS) { 2660 EL(ha, "failed, rval = %xh\n", rval); 2661 } else { 2662 /*EMPTY*/ 2663 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2664 } 2665 return (rval); 2666 } 2667 2668 /* 2669 * ql_get_cap 2670 * Export FCA hardware and software capabilities. 2671 * 2672 * Input: 2673 * fca_handle = handle setup by ql_bind_port(). 2674 * cap = pointer to the capabilities string. 2675 * ptr = buffer pointer for return capability. 2676 * 2677 * Returns: 2678 * FC_CAP_ERROR - no such capability 2679 * FC_CAP_FOUND - the capability was returned and cannot be set 2680 * FC_CAP_SETTABLE - the capability was returned and can be set 2681 * FC_UNBOUND - the fca_handle specified is not bound. 2682 * 2683 * Context: 2684 * Kernel context. 2685 */ 2686 static int 2687 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr) 2688 { 2689 ql_adapter_state_t *ha; 2690 int rval; 2691 uint32_t *rptr = (uint32_t *)ptr; 2692 2693 ha = ql_fca_handle_to_state(fca_handle); 2694 if (ha == NULL) { 2695 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2696 (void *)fca_handle); 2697 return (FC_UNBOUND); 2698 } 2699 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2700 2701 if (strcmp(cap, FC_NODE_WWN) == 0) { 2702 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0], 2703 ptr, 8); 2704 rval = FC_CAP_FOUND; 2705 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2706 bcopy((void *)&ha->loginparams, ptr, 2707 sizeof (la_els_logi_t)); 2708 rval = FC_CAP_FOUND; 2709 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2710 *rptr = (uint32_t)QL_UB_LIMIT; 2711 rval = FC_CAP_FOUND; 2712 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2713 2714 dev_info_t *psydip = NULL; 2715 #ifdef __sparc 2716 /* 2717 * Disable streaming for certain 2 chip adapters 2718 * below Psycho to handle Psycho byte hole issue. 2719 */ 2720 if ((CFG_IST(ha, CFG_MULTI_CHIP_ADAPTER)) && 2721 (!CFG_IST(ha, CFG_SBUS_CARD))) { 2722 for (psydip = ddi_get_parent(ha->dip); psydip; 2723 psydip = ddi_get_parent(psydip)) { 2724 if (strcmp(ddi_driver_name(psydip), 2725 "pcipsy") == 0) { 2726 break; 2727 } 2728 } 2729 } 2730 #endif /* __sparc */ 2731 2732 if (psydip) { 2733 *rptr = (uint32_t)FC_NO_STREAMING; 2734 EL(ha, "No Streaming\n"); 2735 } else { 2736 *rptr = (uint32_t)FC_ALLOW_STREAMING; 2737 EL(ha, "Allow Streaming\n"); 2738 } 2739 rval = FC_CAP_FOUND; 2740 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2741 if (CFG_IST(ha, CFG_CTRL_242581)) { 2742 *rptr = (uint32_t)CHAR_TO_SHORT( 2743 ha->init_ctrl_blk.cb24.max_frame_length[0], 2744 ha->init_ctrl_blk.cb24.max_frame_length[1]); 2745 } else { 2746 *rptr = (uint32_t)CHAR_TO_SHORT( 2747 ha->init_ctrl_blk.cb.max_frame_length[0], 2748 ha->init_ctrl_blk.cb.max_frame_length[1]); 2749 } 2750 rval = FC_CAP_FOUND; 2751 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2752 *rptr = FC_RESET_RETURN_ALL; 2753 rval = FC_CAP_FOUND; 2754 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2755 *rptr = FC_NO_DVMA_SPACE; 2756 rval = FC_CAP_FOUND; 2757 } else { 2758 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap); 2759 rval = FC_CAP_ERROR; 2760 } 2761 2762 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2763 2764 return (rval); 2765 } 2766 2767 /* 2768 * ql_set_cap 2769 * Allow the FC Transport to set FCA capabilities if possible. 2770 * 2771 * Input: 2772 * fca_handle = handle setup by ql_bind_port(). 2773 * cap = pointer to the capabilities string. 2774 * ptr = buffer pointer for capability. 2775 * 2776 * Returns: 2777 * FC_CAP_ERROR - no such capability 2778 * FC_CAP_FOUND - the capability cannot be set by the FC Transport. 2779 * FC_CAP_SETTABLE - the capability was successfully set. 2780 * FC_UNBOUND - the fca_handle specified is not bound. 2781 * 2782 * Context: 2783 * Kernel context. 2784 */ 2785 /* ARGSUSED */ 2786 static int 2787 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr) 2788 { 2789 ql_adapter_state_t *ha; 2790 int rval; 2791 2792 ha = ql_fca_handle_to_state(fca_handle); 2793 if (ha == NULL) { 2794 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2795 (void *)fca_handle); 2796 return (FC_UNBOUND); 2797 } 2798 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2799 2800 if (strcmp(cap, FC_NODE_WWN) == 0) { 2801 rval = FC_CAP_FOUND; 2802 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2803 rval = FC_CAP_FOUND; 2804 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2805 rval = FC_CAP_FOUND; 2806 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2807 rval = FC_CAP_FOUND; 2808 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2809 rval = FC_CAP_FOUND; 2810 } else { 2811 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap); 2812 rval = FC_CAP_ERROR; 2813 } 2814 2815 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2816 2817 return (rval); 2818 } 2819 2820 /* 2821 * ql_getmap 2822 * Request of Arbitrated Loop (AL-PA) map. 2823 * 2824 * Input: 2825 * fca_handle = handle setup by ql_bind_port(). 2826 * mapbuf= buffer pointer for map. 2827 * 2828 * Returns: 2829 * FC_OLDPORT - the specified port is not operating in loop mode. 2830 * FC_OFFLINE - the specified port is not online. 2831 * FC_NOMAP - there is no loop map available for this port. 2832 * FC_UNBOUND - the fca_handle specified is not bound. 2833 * FC_SUCCESS - a valid map has been placed in mapbuf. 2834 * 2835 * Context: 2836 * Kernel context. 2837 */ 2838 static int 2839 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf) 2840 { 2841 ql_adapter_state_t *ha; 2842 clock_t timer = drv_usectohz(30000000); 2843 int rval = FC_SUCCESS; 2844 2845 ha = ql_fca_handle_to_state(fca_handle); 2846 if (ha == NULL) { 2847 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 2848 (void *)fca_handle); 2849 return (FC_UNBOUND); 2850 } 2851 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2852 2853 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP; 2854 mapbuf->lilp_myalpa = ha->d_id.b.al_pa; 2855 2856 /* Wait for suspension to end. */ 2857 TASK_DAEMON_LOCK(ha); 2858 while (ha->task_daemon_flags & QL_SUSPENDED) { 2859 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG; 2860 2861 /* 30 seconds from now */ 2862 if (cv_reltimedwait(&ha->pha->cv_dr_suspended, 2863 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) { 2864 /* 2865 * The timeout time 'timer' was 2866 * reached without the condition 2867 * being signaled. 2868 */ 2869 2870 /* Release task daemon lock. */ 2871 TASK_DAEMON_UNLOCK(ha); 2872 2873 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n"); 2874 return (FC_TRAN_BUSY); 2875 } 2876 } 2877 /* Release task daemon lock. */ 2878 TASK_DAEMON_UNLOCK(ha); 2879 2880 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE, 2881 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) { 2882 /* 2883 * Now, since transport drivers cosider this as an 2884 * offline condition, let's wait for few seconds 2885 * for any loop transitions before we reset the. 2886 * chip and restart all over again. 2887 */ 2888 ql_delay(ha, 2000000); 2889 EL(ha, "failed, FC_NOMAP\n"); 2890 rval = FC_NOMAP; 2891 } else { 2892 /*EMPTY*/ 2893 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh " 2894 "data %xh %xh %xh %xh\n", ha->instance, 2895 mapbuf->lilp_myalpa, mapbuf->lilp_length, 2896 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1], 2897 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]); 2898 } 2899 2900 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2901 #if 0 2902 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t)); 2903 #endif 2904 return (rval); 2905 } 2906 2907 /* 2908 * ql_transport 2909 * Issue an I/O request. Handles all regular requests. 2910 * 2911 * Input: 2912 * fca_handle = handle setup by ql_bind_port(). 2913 * pkt = pointer to fc_packet. 2914 * 2915 * Returns: 2916 * FC_SUCCESS - the packet was accepted for transport. 2917 * FC_TRANSPORT_ERROR - a transport error occurred. 2918 * FC_BADPACKET - the packet to be transported had not been 2919 * initialized by this FCA. 2920 * FC_UNBOUND - the fca_handle specified is not bound. 2921 * 2922 * Context: 2923 * Kernel context. 2924 */ 2925 static int 2926 ql_transport(opaque_t fca_handle, fc_packet_t *pkt) 2927 { 2928 ql_adapter_state_t *ha; 2929 int rval = FC_TRANSPORT_ERROR; 2930 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private; 2931 2932 /* Verify proper command. */ 2933 ha = ql_cmd_setup(fca_handle, pkt, &rval); 2934 if (ha == NULL) { 2935 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n", 2936 rval, fca_handle); 2937 return (rval); 2938 } 2939 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance); 2940 #if 0 2941 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32, 2942 sizeof (fc_frame_hdr_t) / 4); 2943 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance); 2944 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen); 2945 #endif 2946 2947 /* Reset SRB flags. */ 2948 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY | 2949 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK | 2950 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | 2951 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT | 2952 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE | 2953 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED | 2954 SRB_MS_PKT | SRB_ELS_PKT); 2955 2956 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24; 2957 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS; 2958 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id; 2959 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl; 2960 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type; 2961 2962 switch (pkt->pkt_cmd_fhdr.r_ctl) { 2963 case R_CTL_COMMAND: 2964 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) { 2965 sp->flags |= SRB_FCP_CMD_PKT; 2966 rval = ql_fcp_scsi_cmd(ha, pkt, sp); 2967 } 2968 break; 2969 2970 default: 2971 /* Setup response header and buffer. */ 2972 if (pkt->pkt_rsplen) { 2973 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen); 2974 } 2975 2976 switch (pkt->pkt_cmd_fhdr.r_ctl) { 2977 case R_CTL_UNSOL_DATA: 2978 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) { 2979 sp->flags |= SRB_IP_PKT; 2980 rval = ql_fcp_ip_cmd(ha, pkt, sp); 2981 } 2982 break; 2983 2984 case R_CTL_UNSOL_CONTROL: 2985 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) { 2986 sp->flags |= SRB_GENERIC_SERVICES_PKT; 2987 rval = ql_fc_services(ha, pkt); 2988 } 2989 break; 2990 2991 case R_CTL_SOLICITED_DATA: 2992 case R_CTL_STATUS: 2993 default: 2994 pkt->pkt_state = FC_PKT_LOCAL_RJT; 2995 pkt->pkt_reason = FC_REASON_UNSUPPORTED; 2996 rval = FC_TRANSPORT_ERROR; 2997 EL(ha, "unknown, r_ctl=%xh\n", 2998 pkt->pkt_cmd_fhdr.r_ctl); 2999 break; 3000 } 3001 } 3002 3003 if (rval != FC_SUCCESS) { 3004 EL(ha, "failed, rval = %xh\n", rval); 3005 } else { 3006 /*EMPTY*/ 3007 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3008 } 3009 3010 return (rval); 3011 } 3012 3013 /* 3014 * ql_ub_alloc 3015 * Allocate buffers for unsolicited exchanges. 3016 * 3017 * Input: 3018 * fca_handle = handle setup by ql_bind_port(). 3019 * tokens = token array for each buffer. 3020 * size = size of each buffer. 3021 * count = pointer to number of buffers. 3022 * type = the FC-4 type the buffers are reserved for. 3023 * 1 = Extended Link Services, 5 = LLC/SNAP 3024 * 3025 * Returns: 3026 * FC_FAILURE - buffers could not be allocated. 3027 * FC_TOOMANY - the FCA could not allocate the requested 3028 * number of buffers. 3029 * FC_SUCCESS - unsolicited buffers were allocated. 3030 * FC_UNBOUND - the fca_handle specified is not bound. 3031 * 3032 * Context: 3033 * Kernel context. 3034 */ 3035 static int 3036 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size, 3037 uint32_t *count, uint32_t type) 3038 { 3039 ql_adapter_state_t *ha; 3040 caddr_t bufp = NULL; 3041 fc_unsol_buf_t *ubp; 3042 ql_srb_t *sp; 3043 uint32_t index; 3044 uint32_t cnt; 3045 uint32_t ub_array_index = 0; 3046 int rval = FC_SUCCESS; 3047 int ub_updated = FALSE; 3048 3049 /* Check handle. */ 3050 ha = ql_fca_handle_to_state(fca_handle); 3051 if (ha == NULL) { 3052 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 3053 (void *)fca_handle); 3054 return (FC_UNBOUND); 3055 } 3056 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n", 3057 ha->instance, ha->vp_index, *count); 3058 3059 QL_PM_LOCK(ha); 3060 if (ha->power_level != PM_LEVEL_D0) { 3061 QL_PM_UNLOCK(ha); 3062 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance, 3063 ha->vp_index); 3064 return (FC_FAILURE); 3065 } 3066 QL_PM_UNLOCK(ha); 3067 3068 /* Acquire adapter state lock. */ 3069 ADAPTER_STATE_LOCK(ha); 3070 3071 /* Check the count. */ 3072 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) { 3073 *count = 0; 3074 EL(ha, "failed, FC_TOOMANY\n"); 3075 rval = FC_TOOMANY; 3076 } 3077 3078 /* 3079 * reset ub_array_index 3080 */ 3081 ub_array_index = 0; 3082 3083 /* 3084 * Now proceed to allocate any buffers required 3085 */ 3086 for (index = 0; index < *count && rval == FC_SUCCESS; index++) { 3087 /* Allocate all memory needed. */ 3088 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t), 3089 KM_SLEEP); 3090 if (ubp == NULL) { 3091 EL(ha, "failed, FC_FAILURE\n"); 3092 rval = FC_FAILURE; 3093 } else { 3094 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP); 3095 if (sp == NULL) { 3096 kmem_free(ubp, sizeof (fc_unsol_buf_t)); 3097 rval = FC_FAILURE; 3098 } else { 3099 if (type == FC_TYPE_IS8802_SNAP) { 3100 #ifdef __sparc 3101 if (ql_get_dma_mem(ha, 3102 &sp->ub_buffer, size, 3103 BIG_ENDIAN_DMA, 3104 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 3105 rval = FC_FAILURE; 3106 kmem_free(ubp, 3107 sizeof (fc_unsol_buf_t)); 3108 kmem_free(sp, 3109 sizeof (ql_srb_t)); 3110 } else { 3111 bufp = sp->ub_buffer.bp; 3112 sp->ub_size = size; 3113 } 3114 #else 3115 if (ql_get_dma_mem(ha, 3116 &sp->ub_buffer, size, 3117 LITTLE_ENDIAN_DMA, 3118 QL_DMA_DATA_ALIGN) != QL_SUCCESS) { 3119 rval = FC_FAILURE; 3120 kmem_free(ubp, 3121 sizeof (fc_unsol_buf_t)); 3122 kmem_free(sp, 3123 sizeof (ql_srb_t)); 3124 } else { 3125 bufp = sp->ub_buffer.bp; 3126 sp->ub_size = size; 3127 } 3128 #endif 3129 } else { 3130 bufp = kmem_zalloc(size, KM_SLEEP); 3131 if (bufp == NULL) { 3132 rval = FC_FAILURE; 3133 kmem_free(ubp, 3134 sizeof (fc_unsol_buf_t)); 3135 kmem_free(sp, 3136 sizeof (ql_srb_t)); 3137 } else { 3138 sp->ub_size = size; 3139 } 3140 } 3141 } 3142 } 3143 3144 if (rval == FC_SUCCESS) { 3145 /* Find next available slot. */ 3146 QL_UB_LOCK(ha); 3147 while (ha->ub_array[ub_array_index] != NULL) { 3148 ub_array_index++; 3149 } 3150 3151 ubp->ub_fca_private = (void *)sp; 3152 3153 /* init cmd links */ 3154 sp->cmd.base_address = sp; 3155 sp->cmd.prev = NULL; 3156 sp->cmd.next = NULL; 3157 sp->cmd.head = NULL; 3158 3159 /* init wdg links */ 3160 sp->wdg.base_address = sp; 3161 sp->wdg.prev = NULL; 3162 sp->wdg.next = NULL; 3163 sp->wdg.head = NULL; 3164 sp->ha = ha; 3165 3166 ubp->ub_buffer = bufp; 3167 ubp->ub_bufsize = size; 3168 ubp->ub_port_handle = fca_handle; 3169 ubp->ub_token = ub_array_index; 3170 3171 /* Save the token. */ 3172 tokens[index] = ub_array_index; 3173 3174 /* Setup FCA private information. */ 3175 sp->ub_type = type; 3176 sp->handle = ub_array_index; 3177 sp->flags |= SRB_UB_IN_FCA; 3178 3179 ha->ub_array[ub_array_index] = ubp; 3180 ha->ub_allocated++; 3181 ub_updated = TRUE; 3182 QL_UB_UNLOCK(ha); 3183 } 3184 } 3185 3186 /* Release adapter state lock. */ 3187 ADAPTER_STATE_UNLOCK(ha); 3188 3189 /* IP buffer. */ 3190 if (ub_updated) { 3191 if ((type == FC_TYPE_IS8802_SNAP) && 3192 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) { 3193 3194 ADAPTER_STATE_LOCK(ha); 3195 ha->flags |= IP_ENABLED; 3196 ADAPTER_STATE_UNLOCK(ha); 3197 3198 if (!(ha->flags & IP_INITIALIZED)) { 3199 if (CFG_IST(ha, CFG_CTRL_2422)) { 3200 ha->ip_init_ctrl_blk.cb24.mtu_size[0] = 3201 LSB(ql_ip_mtu); 3202 ha->ip_init_ctrl_blk.cb24.mtu_size[1] = 3203 MSB(ql_ip_mtu); 3204 ha->ip_init_ctrl_blk.cb24.buf_size[0] = 3205 LSB(size); 3206 ha->ip_init_ctrl_blk.cb24.buf_size[1] = 3207 MSB(size); 3208 3209 cnt = CHAR_TO_SHORT( 3210 ha->ip_init_ctrl_blk.cb24.cc[0], 3211 ha->ip_init_ctrl_blk.cb24.cc[1]); 3212 3213 if (cnt < *count) { 3214 ha->ip_init_ctrl_blk.cb24.cc[0] 3215 = LSB(*count); 3216 ha->ip_init_ctrl_blk.cb24.cc[1] 3217 = MSB(*count); 3218 } 3219 } else { 3220 ha->ip_init_ctrl_blk.cb.mtu_size[0] = 3221 LSB(ql_ip_mtu); 3222 ha->ip_init_ctrl_blk.cb.mtu_size[1] = 3223 MSB(ql_ip_mtu); 3224 ha->ip_init_ctrl_blk.cb.buf_size[0] = 3225 LSB(size); 3226 ha->ip_init_ctrl_blk.cb.buf_size[1] = 3227 MSB(size); 3228 3229 cnt = CHAR_TO_SHORT( 3230 ha->ip_init_ctrl_blk.cb.cc[0], 3231 ha->ip_init_ctrl_blk.cb.cc[1]); 3232 3233 if (cnt < *count) { 3234 ha->ip_init_ctrl_blk.cb.cc[0] = 3235 LSB(*count); 3236 ha->ip_init_ctrl_blk.cb.cc[1] = 3237 MSB(*count); 3238 } 3239 } 3240 3241 (void) ql_initialize_ip(ha); 3242 } 3243 ql_isp_rcvbuf(ha); 3244 } 3245 } 3246 3247 if (rval != FC_SUCCESS) { 3248 EL(ha, "failed=%xh\n", rval); 3249 } else { 3250 /*EMPTY*/ 3251 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, 3252 ha->vp_index); 3253 } 3254 return (rval); 3255 } 3256 3257 /* 3258 * ql_ub_free 3259 * Free unsolicited buffers. 3260 * 3261 * Input: 3262 * fca_handle = handle setup by ql_bind_port(). 3263 * count = number of buffers. 3264 * tokens = token array for each buffer. 3265 * 3266 * Returns: 3267 * FC_SUCCESS - the requested buffers have been freed. 3268 * FC_UNBOUND - the fca_handle specified is not bound. 3269 * FC_UB_BADTOKEN - an invalid token was encountered. 3270 * No buffers have been released. 3271 * 3272 * Context: 3273 * Kernel context. 3274 */ 3275 static int 3276 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[]) 3277 { 3278 ql_adapter_state_t *ha; 3279 ql_srb_t *sp; 3280 uint32_t index; 3281 uint64_t ub_array_index; 3282 int rval = FC_SUCCESS; 3283 3284 /* Check handle. */ 3285 ha = ql_fca_handle_to_state(fca_handle); 3286 if (ha == NULL) { 3287 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n", 3288 (void *)fca_handle); 3289 return (FC_UNBOUND); 3290 } 3291 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3292 3293 /* Acquire adapter state lock. */ 3294 ADAPTER_STATE_LOCK(ha); 3295 3296 /* Check all returned tokens. */ 3297 for (index = 0; index < count; index++) { 3298 fc_unsol_buf_t *ubp; 3299 3300 /* Check the token range. */ 3301 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) { 3302 EL(ha, "failed, FC_UB_BADTOKEN\n"); 3303 rval = FC_UB_BADTOKEN; 3304 break; 3305 } 3306 3307 /* Check the unsolicited buffer array. */ 3308 QL_UB_LOCK(ha); 3309 ubp = ha->ub_array[ub_array_index]; 3310 3311 if (ubp == NULL) { 3312 EL(ha, "failed, FC_UB_BADTOKEN-2\n"); 3313 rval = FC_UB_BADTOKEN; 3314 QL_UB_UNLOCK(ha); 3315 break; 3316 } 3317 3318 /* Check the state of the unsolicited buffer. */ 3319 sp = ha->ub_array[ub_array_index]->ub_fca_private; 3320 sp->flags |= SRB_UB_FREE_REQUESTED; 3321 3322 while (!(sp->flags & SRB_UB_IN_FCA) || 3323 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) { 3324 QL_UB_UNLOCK(ha); 3325 ADAPTER_STATE_UNLOCK(ha); 3326 delay(drv_usectohz(100000)); 3327 ADAPTER_STATE_LOCK(ha); 3328 QL_UB_LOCK(ha); 3329 } 3330 ha->ub_array[ub_array_index] = NULL; 3331 QL_UB_UNLOCK(ha); 3332 ql_free_unsolicited_buffer(ha, ubp); 3333 } 3334 3335 if (rval == FC_SUCCESS) { 3336 /* 3337 * Signal any pending hardware reset when there are 3338 * no more unsolicited buffers in use. 3339 */ 3340 if (ha->ub_allocated == 0) { 3341 cv_broadcast(&ha->pha->cv_ub); 3342 } 3343 } 3344 3345 /* Release adapter state lock. */ 3346 ADAPTER_STATE_UNLOCK(ha); 3347 3348 if (rval != FC_SUCCESS) { 3349 EL(ha, "failed=%xh\n", rval); 3350 } else { 3351 /*EMPTY*/ 3352 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3353 } 3354 return (rval); 3355 } 3356 3357 /* 3358 * ql_ub_release 3359 * Release unsolicited buffers from FC Transport 3360 * to FCA for future use. 3361 * 3362 * Input: 3363 * fca_handle = handle setup by ql_bind_port(). 3364 * count = number of buffers. 3365 * tokens = token array for each buffer. 3366 * 3367 * Returns: 3368 * FC_SUCCESS - the requested buffers have been released. 3369 * FC_UNBOUND - the fca_handle specified is not bound. 3370 * FC_UB_BADTOKEN - an invalid token was encountered. 3371 * No buffers have been released. 3372 * 3373 * Context: 3374 * Kernel context. 3375 */ 3376 static int 3377 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[]) 3378 { 3379 ql_adapter_state_t *ha; 3380 ql_srb_t *sp; 3381 uint32_t index; 3382 uint64_t ub_array_index; 3383 int rval = FC_SUCCESS; 3384 int ub_ip_updated = FALSE; 3385 3386 /* Check handle. */ 3387 ha = q