1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 /*
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #pragma ident "Copyright 2010 QLogic Corporation; ql_xioctl.c"
33
34 /*
35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 *
37 * ***********************************************************************
38 * * **
39 * * NOTICE **
40 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
41 * * ALL RIGHTS RESERVED **
42 * * **
43 * ***********************************************************************
44 *
45 */
46
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_mbx.h>
54 #include <ql_xioctl.h>
55
56 /*
57 * Local data
58 */
59
60 /*
61 * Local prototypes
62 */
63 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
64 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
65 boolean_t (*)(EXT_IOCTL *));
66 static boolean_t ql_validate_signature(EXT_IOCTL *);
67 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
68 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_qry_chip(ql_adapter_state_t *, EXT_IOCTL *, int);
75 static void ql_qry_driver(ql_adapter_state_t *, EXT_IOCTL *, int);
76 static void ql_fcct(ql_adapter_state_t *, EXT_IOCTL *, int);
77 static void ql_aen_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
78 static void ql_aen_get(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
91 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
92 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
93 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
94
95 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
96 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
97 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
98 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
99 uint8_t);
100 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
101 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
102 static int ql_24xx_flash_desc(ql_adapter_state_t *);
103 static int ql_setup_flash(ql_adapter_state_t *);
104 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
105 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
106 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
107 uint32_t, int);
108 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
109 uint8_t);
110 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
113 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
119 static uint32_t ql_setup_led(ql_adapter_state_t *);
120 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
121 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
125 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
126 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
128 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
131 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
132 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
133 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
135 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
143 static void ql_restart_hba(ql_adapter_state_t *);
144 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
146 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
149 static void ql_update_flash_caches(ql_adapter_state_t *);
150 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
152 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
153 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
154 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
155 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t);
156
157 /* ******************************************************************** */
158 /* External IOCTL support. */
159 /* ******************************************************************** */
160
161 /*
162 * ql_alloc_xioctl_resource
163 * Allocates resources needed by module code.
164 *
165 * Input:
166 * ha: adapter state pointer.
167 *
168 * Returns:
169 * SYS_ERRNO
170 *
171 * Context:
172 * Kernel context.
173 */
174 int
ql_alloc_xioctl_resource(ql_adapter_state_t * ha)175 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
176 {
177 ql_xioctl_t *xp;
178
179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
180
181 if (ha->xioctl != NULL) {
182 QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
183 ha->instance);
184 return (0);
185 }
186
187 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
188 if (xp == NULL) {
189 EL(ha, "failed, kmem_zalloc\n");
190 return (ENOMEM);
191 }
192 ha->xioctl = xp;
193
194 /* Allocate AEN tracking buffer */
195 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
196 sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
197 if (xp->aen_tracking_queue == NULL) {
198 EL(ha, "failed, kmem_zalloc-2\n");
199 ql_free_xioctl_resource(ha);
200 return (ENOMEM);
201 }
202
203 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
204
205 return (0);
206 }
207
208 /*
209 * ql_free_xioctl_resource
210 * Frees resources used by module code.
211 *
212 * Input:
213 * ha: adapter state pointer.
214 *
215 * Context:
216 * Kernel context.
217 */
218 void
ql_free_xioctl_resource(ql_adapter_state_t * ha)219 ql_free_xioctl_resource(ql_adapter_state_t *ha)
220 {
221 ql_xioctl_t *xp = ha->xioctl;
222
223 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
224
225 if (xp == NULL) {
226 QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
227 return;
228 }
229
230 if (xp->aen_tracking_queue != NULL) {
231 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
232 sizeof (EXT_ASYNC_EVENT));
233 xp->aen_tracking_queue = NULL;
234 }
235
236 kmem_free(xp, sizeof (ql_xioctl_t));
237 ha->xioctl = NULL;
238
239 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
240 }
241
242 /*
243 * ql_xioctl
244 * External IOCTL processing.
245 *
246 * Input:
247 * ha: adapter state pointer.
248 * cmd: function to perform
249 * arg: data type varies with request
250 * mode: flags
251 * cred_p: credentials pointer
252 * rval_p: pointer to result value
253 *
254 * Returns:
255 * 0: success
256 * ENXIO: No such device or address
257 * ENOPROTOOPT: Protocol not available
258 *
259 * Context:
260 * Kernel context.
261 */
262 /* ARGSUSED */
263 int
ql_xioctl(ql_adapter_state_t * ha,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)264 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
265 cred_t *cred_p, int *rval_p)
266 {
267 int rval;
268
269 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
270
271 if (ha->xioctl == NULL) {
272 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
273 return (ENXIO);
274 }
275
276 switch (cmd) {
277 case EXT_CC_QUERY:
278 case EXT_CC_SEND_FCCT_PASSTHRU:
279 case EXT_CC_REG_AEN:
280 case EXT_CC_GET_AEN:
281 case EXT_CC_SEND_SCSI_PASSTHRU:
282 case EXT_CC_WWPN_TO_SCSIADDR:
283 case EXT_CC_SEND_ELS_RNID:
284 case EXT_CC_SET_DATA:
285 case EXT_CC_GET_DATA:
286 case EXT_CC_HOST_IDX:
287 case EXT_CC_READ_NVRAM:
288 case EXT_CC_UPDATE_NVRAM:
289 case EXT_CC_READ_OPTION_ROM:
290 case EXT_CC_READ_OPTION_ROM_EX:
291 case EXT_CC_UPDATE_OPTION_ROM:
292 case EXT_CC_UPDATE_OPTION_ROM_EX:
293 case EXT_CC_GET_VPD:
294 case EXT_CC_SET_VPD:
295 case EXT_CC_LOOPBACK:
296 case EXT_CC_GET_FCACHE:
297 case EXT_CC_GET_FCACHE_EX:
298 case EXT_CC_HOST_DRVNAME:
299 case EXT_CC_GET_SFP_DATA:
300 case EXT_CC_PORT_PARAM:
301 case EXT_CC_GET_PCI_DATA:
302 case EXT_CC_GET_FWEXTTRACE:
303 case EXT_CC_GET_FWFCETRACE:
304 case EXT_CC_GET_VP_CNT_ID:
305 case EXT_CC_VPORT_CMD:
306 case EXT_CC_ACCESS_FLASH:
307 case EXT_CC_RESET_FW:
308 case EXT_CC_MENLO_MANAGE_INFO:
309 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
310 break;
311 default:
312 /* function not supported. */
313 EL(ha, "function=%d not supported\n", cmd);
314 rval = ENOPROTOOPT;
315 }
316
317 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
318
319 return (rval);
320 }
321
322 /*
323 * ql_sdm_ioctl
324 * Provides ioctl functions for SAN/Device Management functions
325 * AKA External Ioctl functions.
326 *
327 * Input:
328 * ha: adapter state pointer.
329 * ioctl_code: ioctl function to perform
330 * arg: Pointer to EXT_IOCTL cmd data in application land.
331 * mode: flags
332 *
333 * Returns:
334 * 0: success
335 * ENOMEM: Alloc of local EXT_IOCTL struct failed.
336 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or
337 * copyout of EXT_IOCTL status info failed.
338 * EINVAL: Signature or version of caller's EXT_IOCTL invalid.
339 * EBUSY: Device busy
340 *
341 * Context:
342 * Kernel context.
343 */
344 static int
ql_sdm_ioctl(ql_adapter_state_t * ha,int ioctl_code,void * arg,int mode)345 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
346 {
347 EXT_IOCTL *cmd;
348 int rval;
349 ql_adapter_state_t *vha;
350
351 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
352
353 /* Copy argument structure (EXT_IOCTL) from application land. */
354 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
355 ql_validate_signature)) != 0) {
356 /*
357 * a non-zero value at this time means a problem getting
358 * the requested information from application land, just
359 * return the error code and hope for the best.
360 */
361 EL(ha, "failed, sdm_setup\n");
362 return (rval);
363 }
364
365 /*
366 * Map the physical ha ptr (which the ioctl is called with)
367 * to the virtual ha that the caller is addressing.
368 */
369 if (ha->flags & VP_ENABLED) {
370 /* Check that it is within range. */
371 if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
372 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
373 EL(ha, "Invalid HbaSelect vp index: %xh\n",
374 cmd->HbaSelect);
375 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
376 cmd->ResponseLen = 0;
377 return (EFAULT);
378 }
379 /*
380 * Special case: HbaSelect == 0 is physical ha
381 */
382 if (cmd->HbaSelect != 0) {
383 vha = ha->vp_next;
384 while (vha != NULL) {
385 if (vha->vp_index == cmd->HbaSelect) {
386 ha = vha;
387 break;
388 }
389 vha = vha->vp_next;
390 }
391 /*
392 * The specified vp index may be valid(within range)
393 * but it's not in the list. Currently this is all
394 * we can say.
395 */
396 if (vha == NULL) {
397 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
398 cmd->ResponseLen = 0;
399 return (EFAULT);
400 }
401 }
402 }
403
404 /*
405 * If driver is suspended, stalled, or powered down rtn BUSY
406 */
407 if (ha->flags & ADAPTER_SUSPENDED ||
408 ha->task_daemon_flags & DRIVER_STALL ||
409 ha->power_level != PM_LEVEL_D0) {
410 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
411 "driver suspended" :
412 (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
413 "FCA powered down"));
414 cmd->Status = EXT_STATUS_BUSY;
415 cmd->ResponseLen = 0;
416 rval = EBUSY;
417
418 /* Return results to caller */
419 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
420 EL(ha, "failed, sdm_return\n");
421 rval = EFAULT;
422 }
423 return (rval);
424 }
425
426 switch (ioctl_code) {
427 case EXT_CC_QUERY_OS:
428 ql_query(ha, cmd, mode);
429 break;
430 case EXT_CC_SEND_FCCT_PASSTHRU_OS:
431 ql_fcct(ha, cmd, mode);
432 break;
433 case EXT_CC_REG_AEN_OS:
434 ql_aen_reg(ha, cmd, mode);
435 break;
436 case EXT_CC_GET_AEN_OS:
437 ql_aen_get(ha, cmd, mode);
438 break;
439 case EXT_CC_GET_DATA_OS:
440 ql_get_host_data(ha, cmd, mode);
441 break;
442 case EXT_CC_SET_DATA_OS:
443 ql_set_host_data(ha, cmd, mode);
444 break;
445 case EXT_CC_SEND_ELS_RNID_OS:
446 ql_send_els_rnid(ha, cmd, mode);
447 break;
448 case EXT_CC_SCSI_PASSTHRU_OS:
449 ql_scsi_passthru(ha, cmd, mode);
450 break;
451 case EXT_CC_WWPN_TO_SCSIADDR_OS:
452 ql_wwpn_to_scsiaddr(ha, cmd, mode);
453 break;
454 case EXT_CC_HOST_IDX_OS:
455 ql_host_idx(ha, cmd, mode);
456 break;
457 case EXT_CC_HOST_DRVNAME_OS:
458 ql_host_drvname(ha, cmd, mode);
459 break;
460 case EXT_CC_READ_NVRAM_OS:
461 ql_read_nvram(ha, cmd, mode);
462 break;
463 case EXT_CC_UPDATE_NVRAM_OS:
464 ql_write_nvram(ha, cmd, mode);
465 break;
466 case EXT_CC_READ_OPTION_ROM_OS:
467 case EXT_CC_READ_OPTION_ROM_EX_OS:
468 ql_read_flash(ha, cmd, mode);
469 break;
470 case EXT_CC_UPDATE_OPTION_ROM_OS:
471 case EXT_CC_UPDATE_OPTION_ROM_EX_OS:
472 ql_write_flash(ha, cmd, mode);
473 break;
474 case EXT_CC_LOOPBACK_OS:
475 ql_diagnostic_loopback(ha, cmd, mode);
476 break;
477 case EXT_CC_GET_VPD_OS:
478 ql_read_vpd(ha, cmd, mode);
479 break;
480 case EXT_CC_SET_VPD_OS:
481 ql_write_vpd(ha, cmd, mode);
482 break;
483 case EXT_CC_GET_FCACHE_OS:
484 ql_get_fcache(ha, cmd, mode);
485 break;
486 case EXT_CC_GET_FCACHE_EX_OS:
487 ql_get_fcache_ex(ha, cmd, mode);
488 break;
489 case EXT_CC_GET_SFP_DATA_OS:
490 ql_get_sfp(ha, cmd, mode);
491 break;
492 case EXT_CC_PORT_PARAM_OS:
493 ql_port_param(ha, cmd, mode);
494 break;
495 case EXT_CC_GET_PCI_DATA_OS:
496 ql_get_pci_data(ha, cmd, mode);
497 break;
498 case EXT_CC_GET_FWEXTTRACE_OS:
499 ql_get_fwexttrace(ha, cmd, mode);
500 break;
501 case EXT_CC_GET_FWFCETRACE_OS:
502 ql_get_fwfcetrace(ha, cmd, mode);
503 break;
504 case EXT_CC_MENLO_RESET:
505 ql_menlo_reset(ha, cmd, mode);
506 break;
507 case EXT_CC_MENLO_GET_FW_VERSION:
508 ql_menlo_get_fw_version(ha, cmd, mode);
509 break;
510 case EXT_CC_MENLO_UPDATE_FW:
511 ql_menlo_update_fw(ha, cmd, mode);
512 break;
513 case EXT_CC_MENLO_MANAGE_INFO:
514 ql_menlo_manage_info(ha, cmd, mode);
515 break;
516 case EXT_CC_GET_VP_CNT_ID_OS:
517 ql_get_vp_cnt_id(ha, cmd, mode);
518 break;
519 case EXT_CC_VPORT_CMD_OS:
520 ql_vp_ioctl(ha, cmd, mode);
521 break;
522 case EXT_CC_ACCESS_FLASH_OS:
523 ql_access_flash(ha, cmd, mode);
524 break;
525 case EXT_CC_RESET_FW_OS:
526 ql_reset_cmd(ha, cmd);
527 break;
528 default:
529 /* function not supported. */
530 EL(ha, "failed, function not supported=%d\n", ioctl_code);
531
532 cmd->Status = EXT_STATUS_INVALID_REQUEST;
533 cmd->ResponseLen = 0;
534 break;
535 }
536
537 /* Return results to caller */
538 if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
539 EL(ha, "failed, sdm_return\n");
540 return (EFAULT);
541 }
542
543 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
544
545 return (0);
546 }
547
548 /*
549 * ql_sdm_setup
550 * Make a local copy of the EXT_IOCTL struct and validate it.
551 *
552 * Input:
553 * ha: adapter state pointer.
554 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL.
555 * arg: Address of application EXT_IOCTL cmd data
556 * mode: flags
557 * val_sig: Pointer to a function to validate the ioctl signature.
558 *
559 * Returns:
560 * 0: success
561 * EFAULT: Copy in error of application EXT_IOCTL struct.
562 * EINVAL: Invalid version, signature.
563 * ENOMEM: Local allocation of EXT_IOCTL failed.
564 *
565 * Context:
566 * Kernel context.
567 */
568 static int
ql_sdm_setup(ql_adapter_state_t * ha,EXT_IOCTL ** cmd_struct,void * arg,int mode,boolean_t (* val_sig)(EXT_IOCTL *))569 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
570 int mode, boolean_t (*val_sig)(EXT_IOCTL *))
571 {
572 int rval;
573 EXT_IOCTL *cmd;
574
575 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
576
577 /* Allocate local memory for EXT_IOCTL. */
578 *cmd_struct = NULL;
579 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
580 if (cmd == NULL) {
581 EL(ha, "failed, kmem_zalloc\n");
582 return (ENOMEM);
583 }
584 /* Get argument structure. */
585 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
586 if (rval != 0) {
587 EL(ha, "failed, ddi_copyin\n");
588 rval = EFAULT;
589 } else {
590 /*
591 * Check signature and the version.
592 * If either are not valid then neither is the
593 * structure so don't attempt to return any error status
594 * because we can't trust what caller's arg points to.
595 * Just return the errno.
596 */
597 if (val_sig(cmd) == 0) {
598 EL(ha, "failed, signature\n");
599 rval = EINVAL;
600 } else if (cmd->Version > EXT_VERSION) {
601 EL(ha, "failed, version\n");
602 rval = EINVAL;
603 }
604 }
605
606 if (rval == 0) {
607 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
608 *cmd_struct = cmd;
609 cmd->Status = EXT_STATUS_OK;
610 cmd->DetailStatus = 0;
611 } else {
612 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
613 }
614
615 return (rval);
616 }
617
618 /*
619 * ql_validate_signature
620 * Validate the signature string for an external ioctl call.
621 *
622 * Input:
623 * sg: Pointer to EXT_IOCTL signature to validate.
624 *
625 * Returns:
626 * B_TRUE: Signature is valid.
627 * B_FALSE: Signature is NOT valid.
628 *
629 * Context:
630 * Kernel context.
631 */
632 static boolean_t
ql_validate_signature(EXT_IOCTL * cmd_struct)633 ql_validate_signature(EXT_IOCTL *cmd_struct)
634 {
635 /*
636 * Check signature.
637 *
638 * If signature is not valid then neither is the rest of
639 * the structure (e.g., can't trust it), so don't attempt
640 * to return any error status other than the errno.
641 */
642 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
643 QL_PRINT_2(CE_CONT, "failed,\n");
644 return (B_FALSE);
645 }
646
647 return (B_TRUE);
648 }
649
650 /*
651 * ql_sdm_return
652 * Copies return data/status to application land for
653 * ioctl call using the SAN/Device Management EXT_IOCTL call interface.
654 *
655 * Input:
656 * ha: adapter state pointer.
657 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct.
658 * ioctl_code: ioctl function to perform
659 * arg: EXT_IOCTL cmd data in application land.
660 * mode: flags
661 *
662 * Returns:
663 * 0: success
664 * EFAULT: Copy out error.
665 *
666 * Context:
667 * Kernel context.
668 */
669 /* ARGSUSED */
670 static int
ql_sdm_return(ql_adapter_state_t * ha,EXT_IOCTL * cmd,void * arg,int mode)671 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
672 {
673 int rval = 0;
674
675 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
676
677 rval |= ddi_copyout((void *)&cmd->ResponseLen,
678 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
679 mode);
680
681 rval |= ddi_copyout((void *)&cmd->Status,
682 (void *)&(((EXT_IOCTL*)arg)->Status),
683 sizeof (cmd->Status), mode);
684 rval |= ddi_copyout((void *)&cmd->DetailStatus,
685 (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
686 sizeof (cmd->DetailStatus), mode);
687
688 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
689
690 if (rval != 0) {
691 /* Some copyout operation failed */
692 EL(ha, "failed, ddi_copyout\n");
693 return (EFAULT);
694 }
695
696 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
697
698 return (0);
699 }
700
701 /*
702 * ql_query
703 * Performs all EXT_CC_QUERY functions.
704 *
705 * Input:
706 * ha: adapter state pointer.
707 * cmd: Local EXT_IOCTL cmd struct pointer.
708 * mode: flags.
709 *
710 * Returns:
711 * None, request status indicated in cmd->Status.
712 *
713 * Context:
714 * Kernel context.
715 */
716 static void
ql_query(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)717 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
718 {
719 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
720 cmd->SubCode);
721
722 /* case off on command subcode */
723 switch (cmd->SubCode) {
724 case EXT_SC_QUERY_HBA_NODE:
725 ql_qry_hba_node(ha, cmd, mode);
726 break;
727 case EXT_SC_QUERY_HBA_PORT:
728 ql_qry_hba_port(ha, cmd, mode);
729 break;
730 case EXT_SC_QUERY_DISC_PORT:
731 ql_qry_disc_port(ha, cmd, mode);
732 break;
733 case EXT_SC_QUERY_DISC_TGT:
734 ql_qry_disc_tgt(ha, cmd, mode);
735 break;
736 case EXT_SC_QUERY_DRIVER:
737 ql_qry_driver(ha, cmd, mode);
738 break;
739 case EXT_SC_QUERY_FW:
740 ql_qry_fw(ha, cmd, mode);
741 break;
742 case EXT_SC_QUERY_CHIP:
743 ql_qry_chip(ha, cmd, mode);
744 break;
745 case EXT_SC_QUERY_CNA_PORT:
746 ql_qry_cna_port(ha, cmd, mode);
747 break;
748 case EXT_SC_QUERY_ADAPTER_VERSIONS:
749 ql_qry_adapter_versions(ha, cmd, mode);
750 break;
751 case EXT_SC_QUERY_DISC_LUN:
752 default:
753 /* function not supported. */
754 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
755 EL(ha, "failed, Unsupported Subcode=%xh\n",
756 cmd->SubCode);
757 break;
758 }
759
760 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
761 }
762
763 /*
764 * ql_qry_hba_node
765 * Performs EXT_SC_QUERY_HBA_NODE subfunction.
766 *
767 * Input:
768 * ha: adapter state pointer.
769 * cmd: EXT_IOCTL cmd struct pointer.
770 * mode: flags.
771 *
772 * Returns:
773 * None, request status indicated in cmd->Status.
774 *
775 * Context:
776 * Kernel context.
777 */
778 static void
ql_qry_hba_node(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)779 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
780 {
781 EXT_HBA_NODE tmp_node = {0};
782 uint_t len;
783 caddr_t bufp;
784
785 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
786
787 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
788 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
789 cmd->DetailStatus = sizeof (EXT_HBA_NODE);
790 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
791 "Len=%xh\n", cmd->ResponseLen);
792 cmd->ResponseLen = 0;
793 return;
794 }
795
796 /* fill in the values */
797
798 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
799 EXT_DEF_WWN_NAME_SIZE);
800
801 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
802
803 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
804
805 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
806
807 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
808
809 if (CFG_IST(ha, CFG_SBUS_CARD)) {
810 size_t verlen;
811 uint16_t w;
812 char *tmpptr;
813
814 verlen = strlen((char *)(tmp_node.DriverVersion));
815 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
816 EL(ha, "failed, No room for fpga version string\n");
817 } else {
818 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
819 (uint16_t *)
820 (ha->sbus_fpga_iobase + FPGA_REVISION));
821
822 tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
823 if (tmpptr == NULL) {
824 EL(ha, "Unable to insert fpga version str\n");
825 } else {
826 (void) sprintf(tmpptr, "%d.%d",
827 ((w & 0xf0) >> 4), (w & 0x0f));
828 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
829 }
830 }
831 }
832
833 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
834 ha->fw_major_version, ha->fw_minor_version,
835 ha->fw_subminor_version);
836
837 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
838 switch (ha->fw_attributes) {
839 case FWATTRIB_EF:
840 (void) strcat((char *)(tmp_node.FWVersion), " EF");
841 break;
842 case FWATTRIB_TP:
843 (void) strcat((char *)(tmp_node.FWVersion), " TP");
844 break;
845 case FWATTRIB_IP:
846 (void) strcat((char *)(tmp_node.FWVersion), " IP");
847 break;
848 case FWATTRIB_IPX:
849 (void) strcat((char *)(tmp_node.FWVersion), " IPX");
850 break;
851 case FWATTRIB_FL:
852 (void) strcat((char *)(tmp_node.FWVersion), " FL");
853 break;
854 case FWATTRIB_FPX:
855 (void) strcat((char *)(tmp_node.FWVersion), " FLX");
856 break;
857 default:
858 break;
859 }
860 }
861
862 /* FCode version. */
863 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
864 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
865 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
866 (int *)&len) == DDI_PROP_SUCCESS) {
867 if (len < EXT_DEF_MAX_STR_SIZE) {
868 bcopy(bufp, tmp_node.OptRomVersion, len);
869 } else {
870 bcopy(bufp, tmp_node.OptRomVersion,
871 EXT_DEF_MAX_STR_SIZE - 1);
872 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
873 '\0';
874 }
875 kmem_free(bufp, len);
876 } else {
877 (void) sprintf((char *)tmp_node.OptRomVersion, "0");
878 }
879 tmp_node.PortCount = 1;
880 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
881
882 if (ddi_copyout((void *)&tmp_node,
883 (void *)(uintptr_t)(cmd->ResponseAdr),
884 sizeof (EXT_HBA_NODE), mode) != 0) {
885 cmd->Status = EXT_STATUS_COPY_ERR;
886 cmd->ResponseLen = 0;
887 EL(ha, "failed, ddi_copyout\n");
888 } else {
889 cmd->ResponseLen = sizeof (EXT_HBA_NODE);
890 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
891 }
892 }
893
894 /*
895 * ql_qry_hba_port
896 * Performs EXT_SC_QUERY_HBA_PORT subfunction.
897 *
898 * Input:
899 * ha: adapter state pointer.
900 * cmd: EXT_IOCTL cmd struct pointer.
901 * mode: flags.
902 *
903 * Returns:
904 * None, request status indicated in cmd->Status.
905 *
906 * Context:
907 * Kernel context.
908 */
909 static void
ql_qry_hba_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)910 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
911 {
912 ql_link_t *link;
913 ql_tgt_t *tq;
914 ql_mbx_data_t mr;
915 EXT_HBA_PORT tmp_port = {0};
916 int rval;
917 uint16_t port_cnt, tgt_cnt, index;
918
919 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
920
921 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
922 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
923 cmd->DetailStatus = sizeof (EXT_HBA_PORT);
924 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
925 cmd->ResponseLen);
926 cmd->ResponseLen = 0;
927 return;
928 }
929
930 /* fill in the values */
931
932 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
933 EXT_DEF_WWN_NAME_SIZE);
934 tmp_port.Id[0] = 0;
935 tmp_port.Id[1] = ha->d_id.b.domain;
936 tmp_port.Id[2] = ha->d_id.b.area;
937 tmp_port.Id[3] = ha->d_id.b.al_pa;
938
939 /* For now we are initiator only driver */
940 tmp_port.Type = EXT_DEF_INITIATOR_DEV;
941
942 if (ha->task_daemon_flags & LOOP_DOWN) {
943 tmp_port.State = EXT_DEF_HBA_LOOP_DOWN;
944 } else if (DRIVER_SUSPENDED(ha)) {
945 tmp_port.State = EXT_DEF_HBA_SUSPENDED;
946 } else {
947 tmp_port.State = EXT_DEF_HBA_OK;
948 }
949
950 if (ha->flags & POINT_TO_POINT) {
951 tmp_port.Mode = EXT_DEF_P2P_MODE;
952 } else {
953 tmp_port.Mode = EXT_DEF_LOOP_MODE;
954 }
955 /*
956 * fill in the portspeed values.
957 *
958 * default to not yet negotiated state
959 */
960 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
961
962 if (tmp_port.State == EXT_DEF_HBA_OK) {
963 switch (ha->iidma_rate) {
964 case IIDMA_RATE_1GB:
965 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
966 break;
967 case IIDMA_RATE_2GB:
968 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
969 break;
970 case IIDMA_RATE_4GB:
971 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
972 break;
973 case IIDMA_RATE_8GB:
974 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
975 break;
976 case IIDMA_RATE_10GB:
977 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
978 break;
979 default:
980 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
981 EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
982 break;
983 }
984 }
985
986 /* Report all supported port speeds */
987 if (CFG_IST(ha, CFG_CTRL_25XX)) {
988 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
989 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
990 EXT_DEF_PORTSPEED_1GBIT);
991 /*
992 * Correct supported speeds based on type of
993 * sfp that is present
994 */
995 switch (ha->sfp_stat) {
996 case 1:
997 /* no sfp detected */
998 break;
999 case 2:
1000 case 4:
1001 /* 4GB sfp */
1002 tmp_port.PortSupportedSpeed &=
1003 ~EXT_DEF_PORTSPEED_8GBIT;
1004 break;
1005 case 3:
1006 case 5:
1007 /* 8GB sfp */
1008 tmp_port.PortSupportedSpeed &=
1009 ~EXT_DEF_PORTSPEED_1GBIT;
1010 break;
1011 default:
1012 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1013 break;
1014
1015 }
1016 } else if (CFG_IST(ha, CFG_CTRL_8081)) {
1017 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1018 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
1019 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1020 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1021 } else if (CFG_IST(ha, CFG_CTRL_2300)) {
1022 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1023 EXT_DEF_PORTSPEED_1GBIT);
1024 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
1025 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1026 } else if (CFG_IST(ha, CFG_CTRL_2200)) {
1027 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1028 } else {
1029 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1030 EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1031 }
1032 tmp_port.LinkState2 = LSB(ha->sfp_stat);
1033 port_cnt = 0;
1034 tgt_cnt = 0;
1035
1036 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1037 for (link = ha->dev[index].first; link != NULL;
1038 link = link->next) {
1039 tq = link->base_address;
1040
1041 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1042 continue;
1043 }
1044
1045 port_cnt++;
1046 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1047 tgt_cnt++;
1048 }
1049 }
1050 }
1051
1052 tmp_port.DiscPortCount = port_cnt;
1053 tmp_port.DiscTargetCount = tgt_cnt;
1054
1055 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1056
1057 rval = ddi_copyout((void *)&tmp_port,
1058 (void *)(uintptr_t)(cmd->ResponseAdr),
1059 sizeof (EXT_HBA_PORT), mode);
1060 if (rval != 0) {
1061 cmd->Status = EXT_STATUS_COPY_ERR;
1062 cmd->ResponseLen = 0;
1063 EL(ha, "failed, ddi_copyout\n");
1064 } else {
1065 cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1066 QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1067 ha->instance, port_cnt, tgt_cnt);
1068 }
1069 }
1070
1071 /*
1072 * ql_qry_disc_port
1073 * Performs EXT_SC_QUERY_DISC_PORT subfunction.
1074 *
1075 * Input:
1076 * ha: adapter state pointer.
1077 * cmd: EXT_IOCTL cmd struct pointer.
1078 * mode: flags.
1079 *
1080 * cmd->Instance = Port instance in fcport chain.
1081 *
1082 * Returns:
1083 * None, request status indicated in cmd->Status.
1084 *
1085 * Context:
1086 * Kernel context.
1087 */
1088 static void
ql_qry_disc_port(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1089 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1090 {
1091 EXT_DISC_PORT tmp_port = {0};
1092 ql_link_t *link;
1093 ql_tgt_t *tq;
1094 uint16_t index;
1095 uint16_t inst = 0;
1096
1097 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1098
1099 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1100 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1101 cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1102 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1103 cmd->ResponseLen);
1104 cmd->ResponseLen = 0;
1105 return;
1106 }
1107
1108 for (link = NULL, index = 0;
1109 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1110 for (link = ha->dev[index].first; link != NULL;
1111 link = link->next) {
1112 tq = link->base_address;
1113
1114 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1115 continue;
1116 }
1117 if (inst != cmd->Instance) {
1118 inst++;
1119 continue;
1120 }
1121
1122 /* fill in the values */
1123 bcopy(tq->node_name, tmp_port.WWNN,
1124 EXT_DEF_WWN_NAME_SIZE);
1125 bcopy(tq->port_name, tmp_port.WWPN,
1126 EXT_DEF_WWN_NAME_SIZE);
1127
1128 break;
1129 }
1130 }
1131
1132 if (link == NULL) {
1133 /* no matching device */
1134 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1135 EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1136 cmd->ResponseLen = 0;
1137 return;
1138 }
1139
1140 tmp_port.Id[0] = 0;
1141 tmp_port.Id[1] = tq->d_id.b.domain;
1142 tmp_port.Id[2] = tq->d_id.b.area;
1143 tmp_port.Id[3] = tq->d_id.b.al_pa;
1144
1145 tmp_port.Type = 0;
1146 if (tq->flags & TQF_INITIATOR_DEVICE) {
1147 tmp_port.Type = (uint16_t)(tmp_port.Type |
1148 EXT_DEF_INITIATOR_DEV);
1149 } else if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1150 (void) ql_inq_scan(ha, tq, 1);
1151 } else if (tq->flags & TQF_TAPE_DEVICE) {
1152 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TAPE_DEV);
1153 }
1154
1155 if (tq->flags & TQF_FABRIC_DEVICE) {
1156 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1157 } else {
1158 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1159 }
1160
1161 tmp_port.Status = 0;
1162 tmp_port.Bus = 0; /* Hard-coded for Solaris */
1163
1164 bcopy(tq->port_name, &tmp_port.TargetId, 8);
1165
1166 if (ddi_copyout((void *)&tmp_port,
1167 (void *)(uintptr_t)(cmd->ResponseAdr),
1168 sizeof (EXT_DISC_PORT), mode) != 0) {
1169 cmd->Status = EXT_STATUS_COPY_ERR;
1170 cmd->ResponseLen = 0;
1171 EL(ha, "failed, ddi_copyout\n");
1172 } else {
1173 cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1174 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1175 }
1176 }
1177
1178 /*
1179 * ql_qry_disc_tgt
1180 * Performs EXT_SC_QUERY_DISC_TGT subfunction.
1181 *
1182 * Input:
1183 * ha: adapter state pointer.
1184 * cmd: EXT_IOCTL cmd struct pointer.
1185 * mode: flags.
1186 *
1187 * cmd->Instance = Port instance in fcport chain.
1188 *
1189 * Returns:
1190 * None, request status indicated in cmd->Status.
1191 *
1192 * Context:
1193 * Kernel context.
1194 */
1195 static void
ql_qry_disc_tgt(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1196 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1197 {
1198 EXT_DISC_TARGET tmp_tgt = {0};
1199 ql_link_t *link;
1200 ql_tgt_t *tq;
1201 uint16_t index;
1202 uint16_t inst = 0;
1203
1204 QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1205 cmd->Instance);
1206
1207 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1208 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1209 cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1210 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1211 cmd->ResponseLen);
1212 cmd->ResponseLen = 0;
1213 return;
1214 }
1215
1216 /* Scan port list for requested target and fill in the values */
1217 for (link = NULL, index = 0;
1218 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1219 for (link = ha->dev[index].first; link != NULL;
1220 link = link->next) {
1221 tq = link->base_address;
1222
1223 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1224 tq->flags & TQF_INITIATOR_DEVICE) {
1225 continue;
1226 }
1227 if (inst != cmd->Instance) {
1228 inst++;
1229 continue;
1230 }
1231
1232 /* fill in the values */
1233 bcopy(tq->node_name, tmp_tgt.WWNN,
1234 EXT_DEF_WWN_NAME_SIZE);
1235 bcopy(tq->port_name, tmp_tgt.WWPN,
1236 EXT_DEF_WWN_NAME_SIZE);
1237
1238 break;
1239 }
1240 }
1241
1242 if (link == NULL) {
1243 /* no matching device */
1244 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1245 cmd->DetailStatus = EXT_DSTATUS_TARGET;
1246 EL(ha, "failed, not found target=%d\n", cmd->Instance);
1247 cmd->ResponseLen = 0;
1248 return;
1249 }
1250 tmp_tgt.Id[0] = 0;
1251 tmp_tgt.Id[1] = tq->d_id.b.domain;
1252 tmp_tgt.Id[2] = tq->d_id.b.area;
1253 tmp_tgt.Id[3] = tq->d_id.b.al_pa;
1254
1255 tmp_tgt.LunCount = (uint16_t)ql_lun_count(ha, tq);
1256
1257 if ((tq->flags & TQF_TAPE_DEVICE) == 0) {
1258 (void) ql_inq_scan(ha, tq, 1);
1259 }
1260
1261 tmp_tgt.Type = 0;
1262 if (tq->flags & TQF_TAPE_DEVICE) {
1263 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TAPE_DEV);
1264 }
1265
1266 if (tq->flags & TQF_FABRIC_DEVICE) {
1267 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1268 } else {
1269 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1270 }
1271
1272 tmp_tgt.Status = 0;
1273
1274 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */
1275
1276 bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1277
1278 if (ddi_copyout((void *)&tmp_tgt,
1279 (void *)(uintptr_t)(cmd->ResponseAdr),
1280 sizeof (EXT_DISC_TARGET), mode) != 0) {
1281 cmd->Status = EXT_STATUS_COPY_ERR;
1282 cmd->ResponseLen = 0;
1283 EL(ha, "failed, ddi_copyout\n");
1284 } else {
1285 cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1286 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1287 }
1288 }
1289
1290 /*
1291 * ql_qry_fw
1292 * Performs EXT_SC_QUERY_FW subfunction.
1293 *
1294 * Input:
1295 * ha: adapter state pointer.
1296 * cmd: EXT_IOCTL cmd struct pointer.
1297 * mode: flags.
1298 *
1299 * Returns:
1300 * None, request status indicated in cmd->Status.
1301 *
1302 * Context:
1303 * Kernel context.
1304 */
1305 static void
ql_qry_fw(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1306 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1307 {
1308 EXT_FW fw_info = {0};
1309
1310 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1311
1312 if (cmd->ResponseLen < sizeof (EXT_FW)) {
1313 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1314 cmd->DetailStatus = sizeof (EXT_FW);
1315 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1316 cmd->ResponseLen);
1317 cmd->ResponseLen = 0;
1318 return;
1319 }
1320
1321 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1322 ha->fw_major_version, ha->fw_minor_version,
1323 ha->fw_subminor_version);
1324
1325 fw_info.Attrib = ha->fw_attributes;
1326
1327 if (ddi_copyout((void *)&fw_info,
1328 (void *)(uintptr_t)(cmd->ResponseAdr),
1329 sizeof (EXT_FW), mode) != 0) {
1330 cmd->Status = EXT_STATUS_COPY_ERR;
1331 cmd->ResponseLen = 0;
1332 EL(ha, "failed, ddi_copyout\n");
1333 return;
1334 } else {
1335 cmd->ResponseLen = sizeof (EXT_FW);
1336 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1337 }
1338 }
1339
1340 /*
1341 * ql_qry_chip
1342 * Performs EXT_SC_QUERY_CHIP subfunction.
1343 *
1344 * Input:
1345 * ha: adapter state pointer.
1346 * cmd: EXT_IOCTL cmd struct pointer.
1347 * mode: flags.
1348 *
1349 * Returns:
1350 * None, request status indicated in cmd->Status.
1351 *
1352 * Context:
1353 * Kernel context.
1354 */
1355 static void
ql_qry_chip(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1356 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1357 {
1358 EXT_CHIP chip = {0};
1359
1360 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1361
1362 if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1363 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1364 cmd->DetailStatus = sizeof (EXT_CHIP);
1365 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1366 cmd->ResponseLen);
1367 cmd->ResponseLen = 0;
1368 return;
1369 }
1370
1371 chip.VendorId = ha->ven_id;
1372 chip.DeviceId = ha->device_id;
1373 chip.SubVendorId = ha->subven_id;
1374 chip.SubSystemId = ha->subsys_id;
1375 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1376 chip.IoAddrLen = 0x100;
1377 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1378 chip.MemAddrLen = 0x100;
1379 chip.ChipRevID = ha->rev_id;
1380 if (ha->flags & FUNCTION_1) {
1381 chip.FuncNo = 1;
1382 }
1383
1384 if (ddi_copyout((void *)&chip,
1385 (void *)(uintptr_t)(cmd->ResponseAdr),
1386 sizeof (EXT_CHIP), mode) != 0) {
1387 cmd->Status = EXT_STATUS_COPY_ERR;
1388 cmd->ResponseLen = 0;
1389 EL(ha, "failed, ddi_copyout\n");
1390 } else {
1391 cmd->ResponseLen = sizeof (EXT_CHIP);
1392 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1393 }
1394 }
1395
1396 /*
1397 * ql_qry_driver
1398 * Performs EXT_SC_QUERY_DRIVER subfunction.
1399 *
1400 * Input:
1401 * ha: adapter state pointer.
1402 * cmd: EXT_IOCTL cmd struct pointer.
1403 * mode: flags.
1404 *
1405 * Returns:
1406 * None, request status indicated in cmd->Status.
1407 *
1408 * Context:
1409 * Kernel context.
1410 */
1411 static void
ql_qry_driver(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1412 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1413 {
1414 EXT_DRIVER qd = {0};
1415
1416 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1417
1418 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1419 cmd->Status = EXT_STATUS_DATA_OVERRUN;
1420 cmd->DetailStatus = sizeof (EXT_DRIVER);
1421 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1422 cmd->ResponseLen);
1423 cmd->ResponseLen = 0;
1424 return;
1425 }
1426
1427 (void) strcpy((void *)&qd.Version[0], QL_VERSION);
1428 qd.NumOfBus = 1; /* Fixed for Solaris */
1429 qd.TargetsPerBus = (uint16_t)
1430 (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1431 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1432 qd.LunsPerTarget = 2030;
1433 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1434 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1435
1436 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1437 sizeof (EXT_DRIVER), mode) != 0) {
1438 cmd->Status = EXT_STATUS_COPY_ERR;
1439 cmd->ResponseLen = 0;
1440 EL(ha, "failed, ddi_copyout\n");
1441 } else {
1442 cmd->ResponseLen = sizeof (EXT_DRIVER);
1443 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1444 }
1445 }
1446
1447 /*
1448 * ql_fcct
1449 * IOCTL management server FC-CT passthrough.
1450 *
1451 * Input:
1452 * ha: adapter state pointer.
1453 * cmd: User space CT arguments pointer.
1454 * mode: flags.
1455 *
1456 * Returns:
1457 * None, request status indicated in cmd->Status.
1458 *
1459 * Context:
1460 * Kernel context.
1461 */
1462 static void
ql_fcct(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1463 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1464 {
1465 ql_mbx_iocb_t *pkt;
1466 ql_mbx_data_t mr;
1467 dma_mem_t *dma_mem;
1468 caddr_t pld;
1469 uint32_t pkt_size, pld_byte_cnt, *long_ptr;
1470 int rval;
1471 ql_ct_iu_preamble_t *ct;
1472 ql_xioctl_t *xp = ha->xioctl;
1473 ql_tgt_t tq;
1474 uint16_t comp_status, loop_id;
1475
1476 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1477
1478 /* Get CT argument structure. */
1479 if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1480 EL(ha, "failed, No switch\n");
1481 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1482 cmd->ResponseLen = 0;
1483 return;
1484 }
1485
1486 if (DRIVER_SUSPENDED(ha)) {
1487 EL(ha, "failed, LOOP_NOT_READY\n");
1488 cmd->Status = EXT_STATUS_BUSY;
1489 cmd->ResponseLen = 0;
1490 return;
1491 }
1492
1493 /* Login management server device. */
1494 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1495 tq.d_id.b.al_pa = 0xfa;
1496 tq.d_id.b.area = 0xff;
1497 tq.d_id.b.domain = 0xff;
1498 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1499 MANAGEMENT_SERVER_24XX_LOOP_ID :
1500 MANAGEMENT_SERVER_LOOP_ID);
1501 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1502 if (rval != QL_SUCCESS) {
1503 EL(ha, "failed, server login\n");
1504 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1505 cmd->ResponseLen = 0;
1506 return;
1507 } else {
1508 xp->flags |= QL_MGMT_SERVER_LOGIN;
1509 }
1510 }
1511
1512 QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1513 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1514
1515 /* Allocate a DMA Memory Descriptor */
1516 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1517 if (dma_mem == NULL) {
1518 EL(ha, "failed, kmem_zalloc\n");
1519 cmd->Status = EXT_STATUS_NO_MEMORY;
1520 cmd->ResponseLen = 0;
1521 return;
1522 }
1523 /* Determine maximum buffer size. */
1524 if (cmd->RequestLen < cmd->ResponseLen) {
1525 pld_byte_cnt = cmd->ResponseLen;
1526 } else {
1527 pld_byte_cnt = cmd->RequestLen;
1528 }
1529
1530 /* Allocate command block. */
1531 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1532 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1533 if (pkt == NULL) {
1534 EL(ha, "failed, kmem_zalloc\n");
1535 cmd->Status = EXT_STATUS_NO_MEMORY;
1536 cmd->ResponseLen = 0;
1537 return;
1538 }
1539 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1540
1541 /* Get command payload data. */
1542 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1543 cmd->RequestLen, mode) != cmd->RequestLen) {
1544 EL(ha, "failed, get_buffer_data\n");
1545 kmem_free(pkt, pkt_size);
1546 cmd->Status = EXT_STATUS_COPY_ERR;
1547 cmd->ResponseLen = 0;
1548 return;
1549 }
1550
1551 /* Get DMA memory for the IOCB */
1552 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1553 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1554 cmn_err(CE_WARN, "%s(%d): DMA memory "
1555 "alloc failed", QL_NAME, ha->instance);
1556 kmem_free(pkt, pkt_size);
1557 kmem_free(dma_mem, sizeof (dma_mem_t));
1558 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1559 cmd->ResponseLen = 0;
1560 return;
1561 }
1562
1563 /* Copy out going payload data to IOCB DMA buffer. */
1564 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1565 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1566
1567 /* Sync IOCB DMA buffer. */
1568 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1569 DDI_DMA_SYNC_FORDEV);
1570
1571 /*
1572 * Setup IOCB
1573 */
1574 ct = (ql_ct_iu_preamble_t *)pld;
1575 if (CFG_IST(ha, CFG_CTRL_24258081)) {
1576 pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1577 pkt->ms24.entry_count = 1;
1578
1579 pkt->ms24.vp_index = ha->vp_index;
1580
1581 /* Set loop ID */
1582 pkt->ms24.n_port_hdl = (uint16_t)
1583 (ct->gs_type == GS_TYPE_DIR_SERVER ?
1584 LE_16(SNS_24XX_HDL) :
1585 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1586
1587 /* Set ISP command timeout. */
1588 pkt->ms24.timeout = LE_16(120);
1589
1590 /* Set cmd/response data segment counts. */
1591 pkt->ms24.cmd_dseg_count = LE_16(1);
1592 pkt->ms24.resp_dseg_count = LE_16(1);
1593
1594 /* Load ct cmd byte count. */
1595 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1596
1597 /* Load ct rsp byte count. */
1598 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1599
1600 long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1601
1602 /* Load MS command entry data segments. */
1603 *long_ptr++ = (uint32_t)
1604 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1605 *long_ptr++ = (uint32_t)
1606 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1607 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1608
1609 /* Load MS response entry data segments. */
1610 *long_ptr++ = (uint32_t)
1611 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1612 *long_ptr++ = (uint32_t)
1613 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1614 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1615
1616 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1617 sizeof (ql_mbx_iocb_t));
1618
1619 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1620 if (comp_status == CS_DATA_UNDERRUN) {
1621 if ((BE_16(ct->max_residual_size)) == 0) {
1622 comp_status = CS_COMPLETE;
1623 }
1624 }
1625
1626 if (rval != QL_SUCCESS || (pkt->sts24.entry_status & 0x3c) !=
1627 0) {
1628 EL(ha, "failed, I/O timeout or "
1629 "es=%xh, ss_l=%xh, rval=%xh\n",
1630 pkt->sts24.entry_status,
1631 pkt->sts24.scsi_status_l, rval);
1632 kmem_free(pkt, pkt_size);
1633 ql_free_dma_resource(ha, dma_mem);
1634 kmem_free(dma_mem, sizeof (dma_mem_t));
1635 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1636 cmd->ResponseLen = 0;
1637 return;
1638 }
1639 } else {
1640 pkt->ms.entry_type = MS_TYPE;
1641 pkt->ms.entry_count = 1;
1642
1643 /* Set loop ID */
1644 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1645 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1646 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1647 pkt->ms.loop_id_l = LSB(loop_id);
1648 pkt->ms.loop_id_h = MSB(loop_id);
1649 } else {
1650 pkt->ms.loop_id_h = LSB(loop_id);
1651 }
1652
1653 /* Set ISP command timeout. */
1654 pkt->ms.timeout = LE_16(120);
1655
1656 /* Set data segment counts. */
1657 pkt->ms.cmd_dseg_count_l = 1;
1658 pkt->ms.total_dseg_count = LE_16(2);
1659
1660 /* Response total byte count. */
1661 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1662 pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1663
1664 /* Command total byte count. */
1665 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1666 pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1667
1668 /* Load command/response data segments. */
1669 pkt->ms.dseg_0_address[0] = (uint32_t)
1670 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 pkt->ms.dseg_0_address[1] = (uint32_t)
1672 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 pkt->ms.dseg_1_address[0] = (uint32_t)
1674 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1675 pkt->ms.dseg_1_address[1] = (uint32_t)
1676 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1677
1678 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1679 sizeof (ql_mbx_iocb_t));
1680
1681 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1682 if (comp_status == CS_DATA_UNDERRUN) {
1683 if ((BE_16(ct->max_residual_size)) == 0) {
1684 comp_status = CS_COMPLETE;
1685 }
1686 }
1687 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1688 EL(ha, "failed, I/O timeout or "
1689 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1690 kmem_free(pkt, pkt_size);
1691 ql_free_dma_resource(ha, dma_mem);
1692 kmem_free(dma_mem, sizeof (dma_mem_t));
1693 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1694 cmd->ResponseLen = 0;
1695 return;
1696 }
1697 }
1698
1699 /* Sync in coming DMA buffer. */
1700 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
1701 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1702 /* Copy in coming DMA data. */
1703 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1704 (uint8_t *)dma_mem->bp, pld_byte_cnt,
1705 DDI_DEV_AUTOINCR);
1706
1707 /* Copy response payload from DMA buffer to application. */
1708 if (cmd->ResponseLen != 0) {
1709 QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1710 cmd->ResponseLen);
1711 QL_DUMP_9(pld, 8, cmd->ResponseLen);
1712
1713 /* Send response payload. */
1714 if (ql_send_buffer_data(pld,
1715 (caddr_t)(uintptr_t)cmd->ResponseAdr,
1716 cmd->ResponseLen, mode) != cmd->ResponseLen) {
1717 EL(ha, "failed, send_buffer_data\n");
1718 cmd->Status = EXT_STATUS_COPY_ERR;
1719 cmd->ResponseLen = 0;
1720 }
1721 }
1722
1723 kmem_free(pkt, pkt_size);
1724 ql_free_dma_resource(ha, dma_mem);
1725 kmem_free(dma_mem, sizeof (dma_mem_t));
1726
1727 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1728 }
1729
1730 /*
1731 * ql_aen_reg
1732 * IOCTL management server Asynchronous Event Tracking Enable/Disable.
1733 *
1734 * Input:
1735 * ha: adapter state pointer.
1736 * cmd: EXT_IOCTL cmd struct pointer.
1737 * mode: flags.
1738 *
1739 * Returns:
1740 * None, request status indicated in cmd->Status.
1741 *
1742 * Context:
1743 * Kernel context.
1744 */
1745 static void
ql_aen_reg(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1746 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1747 {
1748 EXT_REG_AEN reg_struct;
1749 int rval = 0;
1750 ql_xioctl_t *xp = ha->xioctl;
1751
1752 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1753
1754 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct,
1755 cmd->RequestLen, mode);
1756
1757 if (rval == 0) {
1758 if (reg_struct.Enable) {
1759 xp->flags |= QL_AEN_TRACKING_ENABLE;
1760 } else {
1761 xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1762 /* Empty the queue. */
1763 INTR_LOCK(ha);
1764 xp->aen_q_head = 0;
1765 xp->aen_q_tail = 0;
1766 INTR_UNLOCK(ha);
1767 }
1768 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1769 } else {
1770 cmd->Status = EXT_STATUS_COPY_ERR;
1771 EL(ha, "failed, ddi_copyin\n");
1772 }
1773 }
1774
1775 /*
1776 * ql_aen_get
1777 * IOCTL management server Asynchronous Event Record Transfer.
1778 *
1779 * Input:
1780 * ha: adapter state pointer.
1781 * cmd: EXT_IOCTL cmd struct pointer.
1782 * mode: flags.
1783 *
1784 * Returns:
1785 * None, request status indicated in cmd->Status.
1786 *
1787 * Context:
1788 * Kernel context.
1789 */
1790 static void
ql_aen_get(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1791 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1792 {
1793 uint32_t out_size;
1794 EXT_ASYNC_EVENT *tmp_q;
1795 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE];
1796 uint8_t i;
1797 uint8_t queue_cnt;
1798 uint8_t request_cnt;
1799 ql_xioctl_t *xp = ha->xioctl;
1800
1801 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1802
1803 /* Compute the number of events that can be returned */
1804 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1805
1806 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1807 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1808 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1809 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1810 "Len=%xh\n", request_cnt);
1811 cmd->ResponseLen = 0;
1812 return;
1813 }
1814
1815 /* 1st: Make a local copy of the entire queue content. */
1816 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1817 queue_cnt = 0;
1818
1819 INTR_LOCK(ha);
1820 i = xp->aen_q_head;
1821
1822 for (; queue_cnt < EXT_DEF_MAX_AEN_QUEUE; ) {
1823 if (tmp_q[i].AsyncEventCode != 0) {
1824 bcopy(&tmp_q[i], &aen[queue_cnt],
1825 sizeof (EXT_ASYNC_EVENT));
1826 queue_cnt++;
1827 tmp_q[i].AsyncEventCode = 0; /* empty out the slot */
1828 }
1829 if (i == xp->aen_q_tail) {
1830 /* done. */
1831 break;
1832 }
1833 i++;
1834 if (i == EXT_DEF_MAX_AEN_QUEUE) {
1835 i = 0;
1836 }
1837 }
1838
1839 /* Empty the queue. */
1840 xp->aen_q_head = 0;
1841 xp->aen_q_tail = 0;
1842
1843 INTR_UNLOCK(ha);
1844
1845 /* 2nd: Now transfer the queue content to user buffer */
1846 /* Copy the entire queue to user's buffer. */
1847 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1848 if (queue_cnt == 0) {
1849 cmd->ResponseLen = 0;
1850 } else if (ddi_copyout((void *)&aen[0],
1851 (void *)(uintptr_t)(cmd->ResponseAdr),
1852 out_size, mode) != 0) {
1853 cmd->Status = EXT_STATUS_COPY_ERR;
1854 cmd->ResponseLen = 0;
1855 EL(ha, "failed, ddi_copyout\n");
1856 } else {
1857 cmd->ResponseLen = out_size;
1858 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1859 }
1860 }
1861
1862 /*
1863 * ql_enqueue_aen
1864 *
1865 * Input:
1866 * ha: adapter state pointer.
1867 * event_code: async event code of the event to add to queue.
1868 * payload: event payload for the queue.
1869 * INTR_LOCK must be already obtained.
1870 *
1871 * Context:
1872 * Interrupt or Kernel context, no mailbox commands allowed.
1873 */
1874 void
ql_enqueue_aen(ql_adapter_state_t * ha,uint16_t event_code,void * payload)1875 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1876 {
1877 uint8_t new_entry; /* index to current entry */
1878 uint16_t *mbx;
1879 EXT_ASYNC_EVENT *aen_queue;
1880 ql_xioctl_t *xp = ha->xioctl;
1881
1882 QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1883 event_code);
1884
1885 if (xp == NULL) {
1886 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1887 return;
1888 }
1889 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1890
1891 if (aen_queue[xp->aen_q_tail].AsyncEventCode != 0) {
1892 /* Need to change queue pointers to make room. */
1893
1894 /* Increment tail for adding new entry. */
1895 xp->aen_q_tail++;
1896 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1897 xp->aen_q_tail = 0;
1898 }
1899 if (xp->aen_q_head == xp->aen_q_tail) {
1900 /*
1901 * We're overwriting the oldest entry, so need to
1902 * update the head pointer.
1903 */
1904 xp->aen_q_head++;
1905 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1906 xp->aen_q_head = 0;
1907 }
1908 }
1909 }
1910
1911 new_entry = xp->aen_q_tail;
1912 aen_queue[new_entry].AsyncEventCode = event_code;
1913
1914 /* Update payload */
1915 if (payload != NULL) {
1916 switch (event_code) {
1917 case MBA_LIP_OCCURRED:
1918 case MBA_LOOP_UP:
1919 case MBA_LOOP_DOWN:
1920 case MBA_LIP_F8:
1921 case MBA_LIP_RESET:
1922 case MBA_PORT_UPDATE:
1923 break;
1924 case MBA_RSCN_UPDATE:
1925 mbx = (uint16_t *)payload;
1926 /* al_pa */
1927 aen_queue[new_entry].Payload.RSCN.RSCNInfo[0] =
1928 LSB(mbx[2]);
1929 /* area */
1930 aen_queue[new_entry].Payload.RSCN.RSCNInfo[1] =
1931 MSB(mbx[2]);
1932 /* domain */
1933 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1934 LSB(mbx[1]);
1935 /* save in big endian */
1936 BIG_ENDIAN_24(&aen_queue[new_entry].
1937 Payload.RSCN.RSCNInfo[0]);
1938
1939 aen_queue[new_entry].Payload.RSCN.AddrFormat =
1940 MSB(mbx[1]);
1941
1942 break;
1943 default:
1944 /* Not supported */
1945 EL(ha, "failed, event code not supported=%xh\n",
1946 event_code);
1947 aen_queue[new_entry].AsyncEventCode = 0;
1948 break;
1949 }
1950 }
1951
1952 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1953 }
1954
1955 /*
1956 * ql_scsi_passthru
1957 * IOCTL SCSI passthrough.
1958 *
1959 * Input:
1960 * ha: adapter state pointer.
1961 * cmd: User space SCSI command pointer.
1962 * mode: flags.
1963 *
1964 * Returns:
1965 * None, request status indicated in cmd->Status.
1966 *
1967 * Context:
1968 * Kernel context.
1969 */
1970 static void
ql_scsi_passthru(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)1971 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1972 {
1973 ql_mbx_iocb_t *pkt;
1974 ql_mbx_data_t mr;
1975 dma_mem_t *dma_mem;
1976 caddr_t pld;
1977 uint32_t pkt_size, pld_size;
1978 uint16_t qlnt, retries, cnt, cnt2;
1979 uint8_t *name;
1980 EXT_FC_SCSI_PASSTHRU *ufc_req;
1981 EXT_SCSI_PASSTHRU *usp_req;
1982 int rval;
1983 union _passthru {
1984 EXT_SCSI_PASSTHRU sp_cmd;
1985 EXT_FC_SCSI_PASSTHRU fc_cmd;
1986 } pt_req; /* Passthru request */
1987 uint32_t status, sense_sz = 0;
1988 ql_tgt_t *tq = NULL;
1989 EXT_SCSI_PASSTHRU *sp_req = &pt_req.sp_cmd;
1990 EXT_FC_SCSI_PASSTHRU *fc_req = &pt_req.fc_cmd;
1991
1992 /* SCSI request struct for SCSI passthrough IOs. */
1993 struct {
1994 uint16_t lun;
1995 uint16_t sense_length; /* Sense buffer size */
1996 size_t resid; /* Residual */
1997 uint8_t *cdbp; /* Requestor's CDB */
1998 uint8_t *u_sense; /* Requestor's sense buffer */
1999 uint8_t cdb_len; /* Requestor's CDB length */
2000 uint8_t direction;
2001 } scsi_req;
2002
2003 struct {
2004 uint8_t *rsp_info;
2005 uint8_t *req_sense_data;
2006 uint32_t residual_length;
2007 uint32_t rsp_info_length;
2008 uint32_t req_sense_length;
2009 uint16_t comp_status;
2010 uint8_t state_flags_l;
2011 uint8_t state_flags_h;
2012 uint8_t scsi_status_l;
2013 uint8_t scsi_status_h;
2014 } sts;
2015
2016 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2017
2018 /* Verify Sub Code and set cnt to needed request size. */
2019 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2020 pld_size = sizeof (EXT_SCSI_PASSTHRU);
2021 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2022 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2023 } else {
2024 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2025 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2026 cmd->ResponseLen = 0;
2027 return;
2028 }
2029
2030 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2031 if (dma_mem == NULL) {
2032 EL(ha, "failed, kmem_zalloc\n");
2033 cmd->Status = EXT_STATUS_NO_MEMORY;
2034 cmd->ResponseLen = 0;
2035 return;
2036 }
2037 /* Verify the size of and copy in the passthru request structure. */
2038 if (cmd->RequestLen != pld_size) {
2039 /* Return error */
2040 EL(ha, "failed, RequestLen != cnt, is=%xh, expected=%xh\n",
2041 cmd->RequestLen, pld_size);
2042 cmd->Status = EXT_STATUS_INVALID_PARAM;
2043 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2044 cmd->ResponseLen = 0;
2045 return;
2046 }
2047
2048 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr, &pt_req,
2049 pld_size, mode) != 0) {
2050 EL(ha, "failed, ddi_copyin\n");
2051 cmd->Status = EXT_STATUS_COPY_ERR;
2052 cmd->ResponseLen = 0;
2053 return;
2054 }
2055
2056 /*
2057 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2058 * request data structure.
2059 */
2060 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2061 scsi_req.lun = sp_req->TargetAddr.Lun;
2062 scsi_req.sense_length = sizeof (sp_req->SenseData);
2063 scsi_req.cdbp = &sp_req->Cdb[0];
2064 scsi_req.cdb_len = sp_req->CdbLength;
2065 scsi_req.direction = sp_req->Direction;
2066 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2067 scsi_req.u_sense = &usp_req->SenseData[0];
2068 cmd->DetailStatus = EXT_DSTATUS_TARGET;
2069
2070 qlnt = QLNT_PORT;
2071 name = (uint8_t *)&sp_req->TargetAddr.Target;
2072 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2073 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2074 tq = ql_find_port(ha, name, qlnt);
2075 } else {
2076 /*
2077 * Must be FC PASSTHRU, verified above.
2078 */
2079 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2080 qlnt = QLNT_PORT;
2081 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2082 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2083 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2084 ha->instance, cmd->SubCode, name[0], name[1],
2085 name[2], name[3], name[4], name[5], name[6],
2086 name[7]);
2087 tq = ql_find_port(ha, name, qlnt);
2088 } else if (fc_req->FCScsiAddr.DestType ==
2089 EXT_DEF_DESTTYPE_WWNN) {
2090 qlnt = QLNT_NODE;
2091 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2092 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2093 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2094 ha->instance, cmd->SubCode, name[0], name[1],
2095 name[2], name[3], name[4], name[5], name[6],
2096 name[7]);
2097 tq = ql_find_port(ha, name, qlnt);
2098 } else if (fc_req->FCScsiAddr.DestType ==
2099 EXT_DEF_DESTTYPE_PORTID) {
2100 qlnt = QLNT_PID;
2101 name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2102 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2103 "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2104 name[0], name[1], name[2]);
2105 tq = ql_find_port(ha, name, qlnt);
2106 } else {
2107 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2108 cmd->SubCode, fc_req->FCScsiAddr.DestType);
2109 cmd->Status = EXT_STATUS_INVALID_PARAM;
2110 cmd->ResponseLen = 0;
2111 return;
2112 }
2113 scsi_req.lun = fc_req->FCScsiAddr.Lun;
2114 scsi_req.sense_length = sizeof (fc_req->SenseData);
2115 scsi_req.cdbp = &sp_req->Cdb[0];
2116 scsi_req.cdb_len = sp_req->CdbLength;
2117 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2118 scsi_req.u_sense = &ufc_req->SenseData[0];
2119 scsi_req.direction = fc_req->Direction;
2120 }
2121
2122 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2123 EL(ha, "failed, fc_port not found\n");
2124 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2125 cmd->ResponseLen = 0;
2126 return;
2127 }
2128
2129 if (tq->flags & TQF_NEED_AUTHENTICATION) {
2130 EL(ha, "target not available; loopid=%xh\n", tq->loop_id);
2131 cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
2132 cmd->ResponseLen = 0;
2133 return;
2134 }
2135
2136 /* Allocate command block. */
2137 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2138 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2139 cmd->ResponseLen) {
2140 pld_size = cmd->ResponseLen;
2141 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2142 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2143 if (pkt == NULL) {
2144 EL(ha, "failed, kmem_zalloc\n");
2145 cmd->Status = EXT_STATUS_NO_MEMORY;
2146 cmd->ResponseLen = 0;
2147 return;
2148 }
2149 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2150
2151 /* Get DMA memory for the IOCB */
2152 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2153 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2154 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2155 "alloc failed", QL_NAME, ha->instance);
2156 kmem_free(pkt, pkt_size);
2157 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2158 cmd->ResponseLen = 0;
2159 return;
2160 }
2161
2162 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2163 scsi_req.direction = (uint8_t)
2164 (CFG_IST(ha, CFG_CTRL_24258081) ?
2165 CF_RD : CF_DATA_IN | CF_STAG);
2166 } else {
2167 scsi_req.direction = (uint8_t)
2168 (CFG_IST(ha, CFG_CTRL_24258081) ?
2169 CF_WR : CF_DATA_OUT | CF_STAG);
2170 cmd->ResponseLen = 0;
2171
2172 /* Get command payload. */
2173 if (ql_get_buffer_data(
2174 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2175 pld, pld_size, mode) != pld_size) {
2176 EL(ha, "failed, get_buffer_data\n");
2177 cmd->Status = EXT_STATUS_COPY_ERR;
2178
2179 kmem_free(pkt, pkt_size);
2180 ql_free_dma_resource(ha, dma_mem);
2181 kmem_free(dma_mem, sizeof (dma_mem_t));
2182 return;
2183 }
2184
2185 /* Copy out going data to DMA buffer. */
2186 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2187 (uint8_t *)dma_mem->bp, pld_size,
2188 DDI_DEV_AUTOINCR);
2189
2190 /* Sync DMA buffer. */
2191 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2192 dma_mem->size, DDI_DMA_SYNC_FORDEV);
2193 }
2194 } else {
2195 scsi_req.direction = (uint8_t)
2196 (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2197 cmd->ResponseLen = 0;
2198
2199 pkt_size = sizeof (ql_mbx_iocb_t);
2200 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2201 if (pkt == NULL) {
2202 EL(ha, "failed, kmem_zalloc-2\n");
2203 cmd->Status = EXT_STATUS_NO_MEMORY;
2204 return;
2205 }
2206 pld = NULL;
2207 pld_size = 0;
2208 }
2209
2210 /* retries = ha->port_down_retry_count; */
2211 retries = 1;
2212 cmd->Status = EXT_STATUS_OK;
2213 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2214
2215 QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2216 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2217
2218 do {
2219 if (DRIVER_SUSPENDED(ha)) {
2220 sts.comp_status = CS_LOOP_DOWN_ABORT;
2221 break;
2222 }
2223
2224 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2225 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2226 pkt->cmd24.entry_count = 1;
2227
2228 /* Set LUN number */
2229 pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2230 pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2231
2232 /* Set N_port handle */
2233 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2234
2235 /* Set VP Index */
2236 pkt->cmd24.vp_index = ha->vp_index;
2237
2238 /* Set target ID */
2239 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2240 pkt->cmd24.target_id[1] = tq->d_id.b.area;
2241 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2242
2243 /* Set ISP command timeout. */
2244 pkt->cmd24.timeout = (uint16_t)LE_16(15);
2245
2246 /* Load SCSI CDB */
2247 ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2248 pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2249 DDI_DEV_AUTOINCR);
2250 for (cnt = 0; cnt < MAX_CMDSZ;
2251 cnt = (uint16_t)(cnt + 4)) {
2252 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2253 + cnt, 4);
2254 }
2255
2256 /* Set tag queue control flags */
2257 pkt->cmd24.task = TA_STAG;
2258
2259 if (pld_size) {
2260 /* Set transfer direction. */
2261 pkt->cmd24.control_flags = scsi_req.direction;
2262
2263 /* Set data segment count. */
2264 pkt->cmd24.dseg_count = LE_16(1);
2265
2266 /* Load total byte count. */
2267 pkt->cmd24.total_byte_count = LE_32(pld_size);
2268
2269 /* Load data descriptor. */
2270 pkt->cmd24.dseg_0_address[0] = (uint32_t)
2271 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2272 pkt->cmd24.dseg_0_address[1] = (uint32_t)
2273 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2274 pkt->cmd24.dseg_0_length = LE_32(pld_size);
2275 }
2276 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2277 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2278 pkt->cmd3.entry_count = 1;
2279 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2280 pkt->cmd3.target_l = LSB(tq->loop_id);
2281 pkt->cmd3.target_h = MSB(tq->loop_id);
2282 } else {
2283 pkt->cmd3.target_h = LSB(tq->loop_id);
2284 }
2285 pkt->cmd3.lun_l = LSB(scsi_req.lun);
2286 pkt->cmd3.lun_h = MSB(scsi_req.lun);
2287 pkt->cmd3.control_flags_l = scsi_req.direction;
2288 pkt->cmd3.timeout = LE_16(15);
2289 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2290 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2291 }
2292 if (pld_size) {
2293 pkt->cmd3.dseg_count = LE_16(1);
2294 pkt->cmd3.byte_count = LE_32(pld_size);
2295 pkt->cmd3.dseg_0_address[0] = (uint32_t)
2296 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2297 pkt->cmd3.dseg_0_address[1] = (uint32_t)
2298 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2299 pkt->cmd3.dseg_0_length = LE_32(pld_size);
2300 }
2301 } else {
2302 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2303 pkt->cmd.entry_count = 1;
2304 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2305 pkt->cmd.target_l = LSB(tq->loop_id);
2306 pkt->cmd.target_h = MSB(tq->loop_id);
2307 } else {
2308 pkt->cmd.target_h = LSB(tq->loop_id);
2309 }
2310 pkt->cmd.lun_l = LSB(scsi_req.lun);
2311 pkt->cmd.lun_h = MSB(scsi_req.lun);
2312 pkt->cmd.control_flags_l = scsi_req.direction;
2313 pkt->cmd.timeout = LE_16(15);
2314 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2315 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2316 }
2317 if (pld_size) {
2318 pkt->cmd.dseg_count = LE_16(1);
2319 pkt->cmd.byte_count = LE_32(pld_size);
2320 pkt->cmd.dseg_0_address = (uint32_t)
2321 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2322 pkt->cmd.dseg_0_length = LE_32(pld_size);
2323 }
2324 }
2325 /* Go issue command and wait for completion. */
2326 QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2327 QL_DUMP_9(pkt, 8, pkt_size);
2328
2329 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2330
2331 if (pld_size) {
2332 /* Sync in coming DMA buffer. */
2333 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2334 dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2335 /* Copy in coming DMA data. */
2336 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2337 (uint8_t *)dma_mem->bp, pld_size,
2338 DDI_DEV_AUTOINCR);
2339 }
2340
2341 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2342 pkt->sts24.entry_status = (uint8_t)
2343 (pkt->sts24.entry_status & 0x3c);
2344 } else {
2345 pkt->sts.entry_status = (uint8_t)
2346 (pkt->sts.entry_status & 0x7e);
2347 }
2348
2349 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2350 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2351 pkt->sts.entry_status, tq->d_id.b24);
2352 status = QL_FUNCTION_PARAMETER_ERROR;
2353 }
2354
2355 sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2356 LE_16(pkt->sts24.comp_status) :
2357 LE_16(pkt->sts.comp_status));
2358
2359 /*
2360 * We have verified about all the request that can be so far.
2361 * Now we need to start verification of our ability to
2362 * actually issue the CDB.
2363 */
2364 if (DRIVER_SUSPENDED(ha)) {
2365 sts.comp_status = CS_LOOP_DOWN_ABORT;
2366 break;
2367 } else if (status == QL_SUCCESS &&
2368 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2369 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2370 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2371 if (tq->flags & TQF_FABRIC_DEVICE) {
2372 rval = ql_login_fport(ha, tq, tq->loop_id,
2373 LFF_NO_PLOGI, &mr);
2374 if (rval != QL_SUCCESS) {
2375 EL(ha, "failed, login_fport=%xh, "
2376 "d_id=%xh\n", rval, tq->d_id.b24);
2377 }
2378 } else {
2379 rval = ql_login_lport(ha, tq, tq->loop_id,
2380 LLF_NONE);
2381 if (rval != QL_SUCCESS) {
2382 EL(ha, "failed, login_lport=%xh, "
2383 "d_id=%xh\n", rval, tq->d_id.b24);
2384 }
2385 }
2386 } else {
2387 break;
2388 }
2389
2390 bzero((caddr_t)pkt, sizeof (ql_mbx_iocb_t));
2391
2392 } while (retries--);
2393
2394 if (sts.comp_status == CS_LOOP_DOWN_ABORT) {
2395 /* Cannot issue command now, maybe later */
2396 EL(ha, "failed, suspended\n");
2397 kmem_free(pkt, pkt_size);
2398 ql_free_dma_resource(ha, dma_mem);
2399 kmem_free(dma_mem, sizeof (dma_mem_t));
2400 cmd->Status = EXT_STATUS_SUSPENDED;
2401 cmd->ResponseLen = 0;
2402 return;
2403 }
2404
2405 if (status != QL_SUCCESS) {
2406 /* Command error */
2407 EL(ha, "failed, I/O\n");
2408 kmem_free(pkt, pkt_size);
2409 ql_free_dma_resource(ha, dma_mem);
2410 kmem_free(dma_mem, sizeof (dma_mem_t));
2411 cmd->Status = EXT_STATUS_ERR;
2412 cmd->DetailStatus = status;
2413 cmd->ResponseLen = 0;
2414 return;
2415 }
2416
2417 /* Setup status. */
2418 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2419 sts.scsi_status_l = pkt->sts24.scsi_status_l;
2420 sts.scsi_status_h = pkt->sts24.scsi_status_h;
2421
2422 /* Setup residuals. */
2423 sts.residual_length = LE_32(pkt->sts24.residual_length);
2424
2425 /* Setup state flags. */
2426 sts.state_flags_l = pkt->sts24.state_flags_l;
2427 sts.state_flags_h = pkt->sts24.state_flags_h;
2428 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2429 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 SF_XFERRED_DATA | SF_GOT_STATUS);
2432 } else {
2433 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2434 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2435 SF_GOT_STATUS);
2436 }
2437 if (scsi_req.direction & CF_WR) {
2438 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2439 SF_DATA_OUT);
2440 } else if (scsi_req.direction & CF_RD) {
2441 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2442 SF_DATA_IN);
2443 }
2444 sts.state_flags_l = (uint8_t)(sts.state_flags_l | SF_SIMPLE_Q);
2445
2446 /* Setup FCP response info. */
2447 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2448 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
2449 sts.rsp_info = &pkt->sts24.rsp_sense_data[0];
2450 for (cnt = 0; cnt < sts.rsp_info_length;
2451 cnt = (uint16_t)(cnt + 4)) {
2452 ql_chg_endian(sts.rsp_info + cnt, 4);
2453 }
2454
2455 /* Setup sense data. */
2456 if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2457 sts.req_sense_length =
2458 LE_32(pkt->sts24.fcp_sense_length);
2459 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2460 SF_ARQ_DONE);
2461 } else {
2462 sts.req_sense_length = 0;
2463 }
2464 sts.req_sense_data =
2465 &pkt->sts24.rsp_sense_data[sts.rsp_info_length];
2466 cnt2 = (uint16_t)(((uintptr_t)pkt + sizeof (sts_24xx_entry_t)) -
2467 (uintptr_t)sts.req_sense_data);
2468 for (cnt = 0; cnt < cnt2; cnt = (uint16_t)(cnt + 4)) {
2469 ql_chg_endian(sts.req_sense_data + cnt, 4);
2470 }
2471 } else {
2472 sts.scsi_status_l = pkt->sts.scsi_status_l;
2473 sts.scsi_status_h = pkt->sts.scsi_status_h;
2474
2475 /* Setup residuals. */
2476 sts.residual_length = LE_32(pkt->sts.residual_length);
2477
2478 /* Setup state flags. */
2479 sts.state_flags_l = pkt->sts.state_flags_l;
2480 sts.state_flags_h = pkt->sts.state_flags_h;
2481
2482 /* Setup FCP response info. */
2483 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2484 LE_16(pkt->sts.rsp_info_length) : 0;
2485 sts.rsp_info = &pkt->sts.rsp_info[0];
2486
2487 /* Setup sense data. */
2488 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2489 LE_16(pkt->sts.req_sense_length) : 0;
2490 sts.req_sense_data = &pkt->sts.req_sense_data[0];
2491 }
2492
2493 QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2494 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2495
2496 switch (sts.comp_status) {
2497 case CS_INCOMPLETE:
2498 case CS_ABORTED:
2499 case CS_DEVICE_UNAVAILABLE:
2500 case CS_PORT_UNAVAILABLE:
2501 case CS_PORT_LOGGED_OUT:
2502 case CS_PORT_CONFIG_CHG:
2503 case CS_PORT_BUSY:
2504 case CS_LOOP_DOWN_ABORT:
2505 cmd->Status = EXT_STATUS_BUSY;
2506 break;
2507 case CS_RESET:
2508 case CS_QUEUE_FULL:
2509 cmd->Status = EXT_STATUS_ERR;
2510 break;
2511 case CS_TIMEOUT:
2512 cmd->Status = EXT_STATUS_ERR;
2513 break;
2514 case CS_DATA_OVERRUN:
2515 cmd->Status = EXT_STATUS_DATA_OVERRUN;
2516 break;
2517 case CS_DATA_UNDERRUN:
2518 cmd->Status = EXT_STATUS_DATA_UNDERRUN;
2519 break;
2520 }
2521
2522 /*
2523 * If non data transfer commands fix tranfer counts.
2524 */
2525 if (scsi_req.cdbp[0] == SCMD_TEST_UNIT_READY ||
2526 scsi_req.cdbp[0] == SCMD_REZERO_UNIT ||
2527 scsi_req.cdbp[0] == SCMD_SEEK ||
2528 scsi_req.cdbp[0] == SCMD_SEEK_G1 ||
2529 scsi_req.cdbp[0] == SCMD_RESERVE ||
2530 scsi_req.cdbp[0] == SCMD_RELEASE ||
2531 scsi_req.cdbp[0] == SCMD_START_STOP ||
2532 scsi_req.cdbp[0] == SCMD_DOORLOCK ||
2533 scsi_req.cdbp[0] == SCMD_VERIFY ||
2534 scsi_req.cdbp[0] == SCMD_WRITE_FILE_MARK ||
2535 scsi_req.cdbp[0] == SCMD_VERIFY_G0 ||
2536 scsi_req.cdbp[0] == SCMD_SPACE ||
2537 scsi_req.cdbp[0] == SCMD_ERASE ||
2538 (scsi_req.cdbp[0] == SCMD_FORMAT &&
2539 (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2540 /*
2541 * Non data transfer command, clear sts_entry residual
2542 * length.
2543 */
2544 sts.residual_length = 0;
2545 cmd->ResponseLen = 0;
2546 if (sts.comp_status == CS_DATA_UNDERRUN) {
2547 sts.comp_status = CS_COMPLETE;
2548 cmd->Status = EXT_STATUS_OK;
2549 }
2550 } else {
2551 cmd->ResponseLen = pld_size;
2552 }
2553
2554 /* Correct ISP completion status */
2555 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2556 (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2557 QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2558 ha->instance);
2559 scsi_req.resid = 0;
2560 } else if (sts.comp_status == CS_DATA_UNDERRUN) {
2561 QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2562 ha->instance);
2563 scsi_req.resid = sts.residual_length;
2564 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2565 cmd->Status = (uint32_t)EXT_STATUS_OK;
2566
2567 cmd->ResponseLen = (uint32_t)
2568 (pld_size - scsi_req.resid);
2569 } else {
2570 EL(ha, "failed, Transfer ERROR\n");
2571 cmd->Status = EXT_STATUS_ERR;
2572 cmd->ResponseLen = 0;
2573 }
2574 } else {
2575 QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2576 "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2577 tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2578 sts.scsi_status_l);
2579
2580 scsi_req.resid = pld_size;
2581 /*
2582 * Handle residual count on SCSI check
2583 * condition.
2584 *
2585 * - If Residual Under / Over is set, use the
2586 * Residual Transfer Length field in IOCB.
2587 * - If Residual Under / Over is not set, and
2588 * Transferred Data bit is set in State Flags
2589 * field of IOCB, report residual value of 0
2590 * (you may want to do this for tape
2591 * Write-type commands only). This takes care
2592 * of logical end of tape problem and does
2593 * not break Unit Attention.
2594 * - If Residual Under / Over is not set, and
2595 * Transferred Data bit is not set in State
2596 * Flags, report residual value equal to
2597 * original data transfer length.
2598 */
2599 if (sts.scsi_status_l & STATUS_CHECK) {
2600 cmd->Status = EXT_STATUS_SCSI_STATUS;
2601 cmd->DetailStatus = sts.scsi_status_l;
2602 if (sts.scsi_status_h &
2603 (FCP_RESID_OVER | FCP_RESID_UNDER)) {
2604 scsi_req.resid = sts.residual_length;
2605 } else if (sts.state_flags_h &
2606 STATE_XFERRED_DATA) {
2607 scsi_req.resid = 0;
2608 }
2609 }
2610 }
2611
2612 if (sts.scsi_status_l & STATUS_CHECK &&
2613 sts.scsi_status_h & FCP_SNS_LEN_VALID &&
2614 sts.req_sense_length) {
2615 /*
2616 * Check condition with vaild sense data flag set and sense
2617 * length != 0
2618 */
2619 if (sts.req_sense_length > scsi_req.sense_length) {
2620 sense_sz = scsi_req.sense_length;
2621 } else {
2622 sense_sz = sts.req_sense_length;
2623 }
2624
2625 EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2626 tq->d_id.b24);
2627 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2628
2629 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2630 (size_t)sense_sz, mode) != 0) {
2631 EL(ha, "failed, request sense ddi_copyout\n");
2632 }
2633
2634 cmd->Status = EXT_STATUS_SCSI_STATUS;
2635 cmd->DetailStatus = sts.scsi_status_l;
2636 }
2637
2638 /* Copy response payload from DMA buffer to application. */
2639 if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2640 cmd->ResponseLen != 0) {
2641 QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2642 "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2643 scsi_req.resid, pld_size, cmd->ResponseLen);
2644 QL_DUMP_9(pld, 8, cmd->ResponseLen);
2645
2646 /* Send response payload. */
2647 if (ql_send_buffer_data(pld,
2648 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2649 cmd->ResponseLen, mode) != cmd->ResponseLen) {
2650 EL(ha, "failed, send_buffer_data\n");
2651 cmd->Status = EXT_STATUS_COPY_ERR;
2652 cmd->ResponseLen = 0;
2653 }
2654 }
2655
2656 if (cmd->Status != EXT_STATUS_OK) {
2657 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2658 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2659 } else {
2660 /*EMPTY*/
2661 QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2662 ha->instance, cmd->ResponseLen);
2663 }
2664
2665 kmem_free(pkt, pkt_size);
2666 ql_free_dma_resource(ha, dma_mem);
2667 kmem_free(dma_mem, sizeof (dma_mem_t));
2668 }
2669
2670 /*
2671 * ql_wwpn_to_scsiaddr
2672 *
2673 * Input:
2674 * ha: adapter state pointer.
2675 * cmd: EXT_IOCTL cmd struct pointer.
2676 * mode: flags.
2677 *
2678 * Context:
2679 * Kernel context.
2680 */
2681 static void
ql_wwpn_to_scsiaddr(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2682 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2683 {
2684 int status;
2685 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
2686 EXT_SCSI_ADDR *tmp_addr;
2687 ql_tgt_t *tq;
2688
2689 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2690
2691 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2692 /* Return error */
2693 EL(ha, "incorrect RequestLen\n");
2694 cmd->Status = EXT_STATUS_INVALID_PARAM;
2695 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2696 return;
2697 }
2698
2699 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2700 cmd->RequestLen, mode);
2701
2702 if (status != 0) {
2703 cmd->Status = EXT_STATUS_COPY_ERR;
2704 EL(ha, "failed, ddi_copyin\n");
2705 return;
2706 }
2707
2708 tq = ql_find_port(ha, wwpn, QLNT_PORT);
2709
2710 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2711 /* no matching device */
2712 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2713 EL(ha, "failed, device not found\n");
2714 return;
2715 }
2716
2717 /* Copy out the IDs found. For now we can only return target ID. */
2718 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2719
2720 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2721
2722 if (status != 0) {
2723 cmd->Status = EXT_STATUS_COPY_ERR;
2724 EL(ha, "failed, ddi_copyout\n");
2725 } else {
2726 cmd->Status = EXT_STATUS_OK;
2727 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2728 }
2729 }
2730
2731 /*
2732 * ql_host_idx
2733 * Gets host order index.
2734 *
2735 * Input:
2736 * ha: adapter state pointer.
2737 * cmd: EXT_IOCTL cmd struct pointer.
2738 * mode: flags.
2739 *
2740 * Returns:
2741 * None, request status indicated in cmd->Status.
2742 *
2743 * Context:
2744 * Kernel context.
2745 */
2746 static void
ql_host_idx(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2747 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2748 {
2749 uint16_t idx;
2750
2751 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2752
2753 if (cmd->ResponseLen < sizeof (uint16_t)) {
2754 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2755 cmd->DetailStatus = sizeof (uint16_t);
2756 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2757 cmd->ResponseLen = 0;
2758 return;
2759 }
2760
2761 idx = (uint16_t)ha->instance;
2762
2763 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2764 sizeof (uint16_t), mode) != 0) {
2765 cmd->Status = EXT_STATUS_COPY_ERR;
2766 cmd->ResponseLen = 0;
2767 EL(ha, "failed, ddi_copyout\n");
2768 } else {
2769 cmd->ResponseLen = sizeof (uint16_t);
2770 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2771 }
2772 }
2773
2774 /*
2775 * ql_host_drvname
2776 * Gets host driver name
2777 *
2778 * Input:
2779 * ha: adapter state pointer.
2780 * cmd: EXT_IOCTL cmd struct pointer.
2781 * mode: flags.
2782 *
2783 * Returns:
2784 * None, request status indicated in cmd->Status.
2785 *
2786 * Context:
2787 * Kernel context.
2788 */
2789 static void
ql_host_drvname(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2790 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2791 {
2792
2793 char drvname[] = QL_NAME;
2794 uint32_t qlnamelen;
2795
2796 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2797
2798 qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2799
2800 if (cmd->ResponseLen < qlnamelen) {
2801 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2802 cmd->DetailStatus = qlnamelen;
2803 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2804 cmd->ResponseLen, qlnamelen);
2805 cmd->ResponseLen = 0;
2806 return;
2807 }
2808
2809 if (ddi_copyout((void *)&drvname,
2810 (void *)(uintptr_t)(cmd->ResponseAdr),
2811 qlnamelen, mode) != 0) {
2812 cmd->Status = EXT_STATUS_COPY_ERR;
2813 cmd->ResponseLen = 0;
2814 EL(ha, "failed, ddi_copyout\n");
2815 } else {
2816 cmd->ResponseLen = qlnamelen-1;
2817 }
2818
2819 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2820 }
2821
2822 /*
2823 * ql_read_nvram
2824 * Get NVRAM contents.
2825 *
2826 * Input:
2827 * ha: adapter state pointer.
2828 * cmd: EXT_IOCTL cmd struct pointer.
2829 * mode: flags.
2830 *
2831 * Returns:
2832 * None, request status indicated in cmd->Status.
2833 *
2834 * Context:
2835 * Kernel context.
2836 */
2837 static void
ql_read_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2838 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2839 {
2840
2841 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2842
2843 if (cmd->ResponseLen < ha->nvram_cache->size) {
2844 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2845 cmd->DetailStatus = ha->nvram_cache->size;
2846 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2847 cmd->ResponseLen);
2848 cmd->ResponseLen = 0;
2849 return;
2850 }
2851
2852 /* Get NVRAM data. */
2853 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2854 mode) != 0) {
2855 cmd->Status = EXT_STATUS_COPY_ERR;
2856 cmd->ResponseLen = 0;
2857 EL(ha, "failed, copy error\n");
2858 } else {
2859 cmd->ResponseLen = ha->nvram_cache->size;
2860 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2861 }
2862 }
2863
2864 /*
2865 * ql_write_nvram
2866 * Loads NVRAM contents.
2867 *
2868 * Input:
2869 * ha: adapter state pointer.
2870 * cmd: EXT_IOCTL cmd struct pointer.
2871 * mode: flags.
2872 *
2873 * Returns:
2874 * None, request status indicated in cmd->Status.
2875 *
2876 * Context:
2877 * Kernel context.
2878 */
2879 static void
ql_write_nvram(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2880 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2881 {
2882
2883 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2884
2885 if (cmd->RequestLen < ha->nvram_cache->size) {
2886 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2887 cmd->DetailStatus = ha->nvram_cache->size;
2888 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2889 cmd->RequestLen);
2890 return;
2891 }
2892
2893 /* Load NVRAM data. */
2894 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2895 mode) != 0) {
2896 cmd->Status = EXT_STATUS_COPY_ERR;
2897 EL(ha, "failed, copy error\n");
2898 } else {
2899 /*EMPTY*/
2900 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2901 }
2902 }
2903
2904 /*
2905 * ql_write_vpd
2906 * Loads VPD contents.
2907 *
2908 * Input:
2909 * ha: adapter state pointer.
2910 * cmd: EXT_IOCTL cmd struct pointer.
2911 * mode: flags.
2912 *
2913 * Returns:
2914 * None, request status indicated in cmd->Status.
2915 *
2916 * Context:
2917 * Kernel context.
2918 */
2919 static void
ql_write_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2920 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2921 {
2922 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2923
2924 int32_t rval = 0;
2925
2926 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2927 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2928 EL(ha, "failed, invalid request for HBA\n");
2929 return;
2930 }
2931
2932 if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2933 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2934 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2935 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2936 cmd->RequestLen);
2937 return;
2938 }
2939
2940 /* Load VPD data. */
2941 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2942 mode)) != 0) {
2943 cmd->Status = EXT_STATUS_COPY_ERR;
2944 cmd->DetailStatus = rval;
2945 EL(ha, "failed, errno=%x\n", rval);
2946 } else {
2947 /*EMPTY*/
2948 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2949 }
2950 }
2951
2952 /*
2953 * ql_read_vpd
2954 * Dumps VPD contents.
2955 *
2956 * Input:
2957 * ha: adapter state pointer.
2958 * cmd: EXT_IOCTL cmd struct pointer.
2959 * mode: flags.
2960 *
2961 * Returns:
2962 * None, request status indicated in cmd->Status.
2963 *
2964 * Context:
2965 * Kernel context.
2966 */
2967 static void
ql_read_vpd(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)2968 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2969 {
2970 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2971
2972 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2973 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2974 EL(ha, "failed, invalid request for HBA\n");
2975 return;
2976 }
2977
2978 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2979 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2980 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2981 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2982 cmd->ResponseLen);
2983 return;
2984 }
2985
2986 /* Dump VPD data. */
2987 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2988 mode)) != 0) {
2989 cmd->Status = EXT_STATUS_COPY_ERR;
2990 EL(ha, "failed,\n");
2991 } else {
2992 /*EMPTY*/
2993 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2994 }
2995 }
2996
2997 /*
2998 * ql_get_fcache
2999 * Dumps flash cache contents.
3000 *
3001 * Input:
3002 * ha: adapter state pointer.
3003 * cmd: EXT_IOCTL cmd struct pointer.
3004 * mode: flags.
3005 *
3006 * Returns:
3007 * None, request status indicated in cmd->Status.
3008 *
3009 * Context:
3010 * Kernel context.
3011 */
3012 static void
ql_get_fcache(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3013 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3014 {
3015 uint32_t bsize, boff, types, cpsize, hsize;
3016 ql_fcache_t *fptr;
3017
3018 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3019
3020 CACHE_LOCK(ha);
3021
3022 if (ha->fcache == NULL) {
3023 CACHE_UNLOCK(ha);
3024 cmd->Status = EXT_STATUS_ERR;
3025 EL(ha, "failed, adapter fcache not setup\n");
3026 return;
3027 }
3028
3029 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3030 bsize = 100;
3031 } else {
3032 bsize = 400;
3033 }
3034
3035 if (cmd->ResponseLen < bsize) {
3036 CACHE_UNLOCK(ha);
3037 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3038 cmd->DetailStatus = bsize;
3039 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3040 bsize, cmd->ResponseLen);
3041 return;
3042 }
3043
3044 boff = 0;
3045 bsize = 0;
3046 fptr = ha->fcache;
3047
3048 /*
3049 * For backwards compatibility, get one of each image type
3050 */
3051 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3052 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3053 /* Get the next image */
3054 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3055
3056 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3057
3058 if (ddi_copyout(fptr->buf,
3059 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3060 cpsize, mode) != 0) {
3061 CACHE_UNLOCK(ha);
3062 EL(ha, "ddicopy failed, done\n");
3063 cmd->Status = EXT_STATUS_COPY_ERR;
3064 cmd->DetailStatus = 0;
3065 return;
3066 }
3067 boff += 100;
3068 bsize += cpsize;
3069 types &= ~(fptr->type);
3070 }
3071 }
3072
3073 /*
3074 * Get the firmware image -- it needs to be last in the
3075 * buffer at offset 300 for backwards compatibility. Also for
3076 * backwards compatibility, the pci header is stripped off.
3077 */
3078 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3079
3080 hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3081 if (hsize > fptr->buflen) {
3082 CACHE_UNLOCK(ha);
3083 EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3084 hsize, fptr->buflen);
3085 cmd->Status = EXT_STATUS_COPY_ERR;
3086 cmd->DetailStatus = 0;
3087 return;
3088 }
3089
3090 cpsize = ((fptr->buflen - hsize) < 100 ?
3091 fptr->buflen - hsize : 100);
3092
3093 if (ddi_copyout(fptr->buf+hsize,
3094 (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3095 cpsize, mode) != 0) {
3096 CACHE_UNLOCK(ha);
3097 EL(ha, "fw ddicopy failed, done\n");
3098 cmd->Status = EXT_STATUS_COPY_ERR;
3099 cmd->DetailStatus = 0;
3100 return;
3101 }
3102 bsize += 100;
3103 }
3104
3105 CACHE_UNLOCK(ha);
3106 cmd->Status = EXT_STATUS_OK;
3107 cmd->DetailStatus = bsize;
3108
3109 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3110 }
3111
3112 /*
3113 * ql_get_fcache_ex
3114 * Dumps flash cache contents.
3115 *
3116 * Input:
3117 * ha: adapter state pointer.
3118 * cmd: EXT_IOCTL cmd struct pointer.
3119 * mode: flags.
3120 *
3121 * Returns:
3122 * None, request status indicated in cmd->Status.
3123 *
3124 * Context:
3125 * Kernel context.
3126 */
3127 static void
ql_get_fcache_ex(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3128 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3129 {
3130 uint32_t bsize = 0;
3131 uint32_t boff = 0;
3132 ql_fcache_t *fptr;
3133
3134 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3135
3136 CACHE_LOCK(ha);
3137 if (ha->fcache == NULL) {
3138 CACHE_UNLOCK(ha);
3139 cmd->Status = EXT_STATUS_ERR;
3140 EL(ha, "failed, adapter fcache not setup\n");
3141 return;
3142 }
3143
3144 /* Make sure user passed enough buffer space */
3145 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3146 bsize += FBUFSIZE;
3147 }
3148
3149 if (cmd->ResponseLen < bsize) {
3150 CACHE_UNLOCK(ha);
3151 if (cmd->ResponseLen != 0) {
3152 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3153 bsize, cmd->ResponseLen);
3154 }
3155 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3156 cmd->DetailStatus = bsize;
3157 return;
3158 }
3159
3160 boff = 0;
3161 fptr = ha->fcache;
3162 while ((fptr != NULL) && (fptr->buf != NULL)) {
3163 /* Get the next image */
3164 if (ddi_copyout(fptr->buf,
3165 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3166 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3167 mode) != 0) {
3168 CACHE_UNLOCK(ha);
3169 EL(ha, "failed, ddicopy at %xh, done\n", boff);
3170 cmd->Status = EXT_STATUS_COPY_ERR;
3171 cmd->DetailStatus = 0;
3172 return;
3173 }
3174 boff += FBUFSIZE;
3175 fptr = fptr->next;
3176 }
3177
3178 CACHE_UNLOCK(ha);
3179 cmd->Status = EXT_STATUS_OK;
3180 cmd->DetailStatus = bsize;
3181
3182 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3183 }
3184
3185 /*
3186 * ql_read_flash
3187 * Get flash contents.
3188 *
3189 * Input:
3190 * ha: adapter state pointer.
3191 * cmd: EXT_IOCTL cmd struct pointer.
3192 * mode: flags.
3193 *
3194 * Returns:
3195 * None, request status indicated in cmd->Status.
3196 *
3197 * Context:
3198 * Kernel context.
3199 */
3200 static void
ql_read_flash(ql_adapter_state_t * ha,EXT_IOCTL * cmd,int mode)3201 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3202 {
3203 ql_xioctl_t *xp = ha->xioctl;
3204
3205 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3206
3207 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3208 EL(ha, "ql_stall_driver failed\n");
3209 cmd->Status = EXT_STATUS_BUSY;
3210 cmd->DetailStatus = xp->fdesc.flash_size;
3211 cmd->ResponseLen = 0;
3212 return;
3213 }
3214
3215 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3216 cmd->Status = EXT_STATUS_ERR;
3217 cmd->DetailStatus = xp->fdesc.flash_size;
3218 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3219 cmd->ResponseLen, xp->fdesc.flash_size);
3220 cmd->ResponseLen = 0;
3221 } else {
3222 /* adjust read size to flash size */
3223 if (cmd->ResponseLen > xp->fdesc.flash_size) {
3224 EL(ha, "adjusting req=%xh, max=%xh\n",
3225 cmd->ResponseLen, xp->fdesc.flash_size);
3226 cmd->ResponseLen = xp->fdesc.