1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI3_C);
32 
33 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
34 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
35 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
36 	uint32_t ha_copy);
37 #ifdef SFCT_SUPPORT
38 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
39 #endif /* SFCT_SUPPORT */
40 
41 static uint32_t	emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
42 
43 static uint32_t emlxs_disable_traffic_cop = 1;
44 
45 static int			emlxs_sli3_map_hdw(emlxs_hba_t *hba);
46 
47 static void			emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
48 
49 static int32_t			emlxs_sli3_online(emlxs_hba_t *hba);
50 
51 static void			emlxs_sli3_offline(emlxs_hba_t *hba,
52 					uint32_t reset_requested);
53 
54 static uint32_t			emlxs_sli3_hba_reset(emlxs_hba_t *hba,
55 					uint32_t restart, uint32_t skip_post,
56 					uint32_t quiesce);
57 
58 static void			emlxs_sli3_hba_kill(emlxs_hba_t *hba);
59 static void			emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
60 static uint32_t			emlxs_sli3_hba_init(emlxs_hba_t *hba);
61 
62 static uint32_t			emlxs_sli2_bde_setup(emlxs_port_t *port,
63 					emlxs_buf_t *sbp);
64 static uint32_t			emlxs_sli3_bde_setup(emlxs_port_t *port,
65 					emlxs_buf_t *sbp);
66 static uint32_t			emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
67 					emlxs_buf_t *sbp);
68 static uint32_t			emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
69 					emlxs_buf_t *sbp);
70 
71 
72 static void			emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
73 					CHANNEL *rp, IOCBQ *iocb_cmd);
74 
75 
76 static uint32_t			emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
77 					MAILBOXQ *mbq, int32_t flg,
78 					uint32_t tmo);
79 
80 
81 #ifdef SFCT_SUPPORT
82 static uint32_t			emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
83 					emlxs_buf_t *cmd_sbp, int channel);
84 
85 #endif /* SFCT_SUPPORT */
86 
87 static uint32_t			emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
88 					emlxs_buf_t *sbp, int ring);
89 
90 static uint32_t			emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
91 					emlxs_buf_t *sbp);
92 
93 static uint32_t			emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
94 					emlxs_buf_t *sbp);
95 
96 
97 static uint32_t			emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
98 					emlxs_buf_t *sbp);
99 
100 
101 static void			emlxs_sli3_poll_intr(emlxs_hba_t *hba);
102 
103 static int32_t			emlxs_sli3_intx_intr(char *arg);
104 #ifdef MSI_SUPPORT
105 static uint32_t			emlxs_sli3_msi_intr(char *arg1, char *arg2);
106 #endif /* MSI_SUPPORT */
107 
108 static void			emlxs_sli3_enable_intr(emlxs_hba_t *hba);
109 
110 static void			emlxs_sli3_disable_intr(emlxs_hba_t *hba,
111 					uint32_t att);
112 
113 
114 static void			emlxs_handle_ff_error(emlxs_hba_t *hba);
115 
116 static uint32_t			emlxs_handle_mb_event(emlxs_hba_t *hba);
117 
118 static void			emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
119 
120 static uint32_t			emlxs_mb_config_port(emlxs_hba_t *hba,
121 					MAILBOXQ *mbq, uint32_t sli_mode,
122 					uint32_t hbainit);
123 static void			emlxs_enable_latt(emlxs_hba_t *hba);
124 
125 static uint32_t			emlxs_check_attention(emlxs_hba_t *hba);
126 
127 static uint32_t			emlxs_get_attention(emlxs_hba_t *hba,
128 					int32_t msgid);
129 static void			emlxs_proc_attention(emlxs_hba_t *hba,
130 					uint32_t ha_copy);
131 /* static int			emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
132 					/* CHANNEL *cp, IOCBQ *iocbq); */
133 /* static void			emlxs_update_HBQ_index(emlxs_hba_t *hba, */
134 					/* uint32_t hbq_id); */
135 /* static void			emlxs_hbq_free_all(emlxs_hba_t *hba, */
136 					/* uint32_t hbq_id); */
137 static uint32_t			emlxs_hbq_setup(emlxs_hba_t *hba,
138 					uint32_t hbq_id);
139 static void			emlxs_sli3_timer(emlxs_hba_t *hba);
140 
141 static void			emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
142 
143 static uint32_t			emlxs_sli3_reg_did(emlxs_port_t *port,
144 					uint32_t did, SERV_PARM *param,
145 					emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
146 					IOCBQ *iocbq);
147 
148 static uint32_t			emlxs_sli3_unreg_node(emlxs_port_t *port,
149 					NODELIST *node, emlxs_buf_t *sbp,
150 					fc_unsol_buf_t *ubp, IOCBQ *iocbq);
151 
152 
153 /* Define SLI3 API functions */
154 emlxs_sli_api_t emlxs_sli3_api = {
155 	emlxs_sli3_map_hdw,
156 	emlxs_sli3_unmap_hdw,
157 	emlxs_sli3_online,
158 	emlxs_sli3_offline,
159 	emlxs_sli3_hba_reset,
160 	emlxs_sli3_hba_kill,
161 	emlxs_sli3_issue_iocb_cmd,
162 	emlxs_sli3_issue_mbox_cmd,
163 #ifdef SFCT_SUPPORT
164 	emlxs_sli3_prep_fct_iocb,
165 #else
166 	NULL,
167 #endif /* SFCT_SUPPORT */
168 	emlxs_sli3_prep_fcp_iocb,
169 	emlxs_sli3_prep_ip_iocb,
170 	emlxs_sli3_prep_els_iocb,
171 	emlxs_sli3_prep_ct_iocb,
172 	emlxs_sli3_poll_intr,
173 	emlxs_sli3_intx_intr,
174 	emlxs_sli3_msi_intr,
175 	emlxs_sli3_disable_intr,
176 	emlxs_sli3_timer,
177 	emlxs_sli3_poll_erratt,
178 	emlxs_sli3_reg_did,
179 	emlxs_sli3_unreg_node
180 };
181 
182 
183 /*
184  * emlxs_sli3_online()
185  *
186  * This routine will start initialization of the SLI2/3 HBA.
187  */
188 static int32_t
emlxs_sli3_online(emlxs_hba_t * hba)189 emlxs_sli3_online(emlxs_hba_t *hba)
190 {
191 	emlxs_port_t *port = &PPORT;
192 	emlxs_config_t *cfg;
193 	emlxs_vpd_t *vpd;
194 	MAILBOX *mb = NULL;
195 	MAILBOXQ *mbq = NULL;
196 	RING *rp;
197 	CHANNEL *cp;
198 	MATCHMAP *mp = NULL;
199 	MATCHMAP *mp1 = NULL;
200 	uint8_t *inptr;
201 	uint8_t *outptr;
202 	uint32_t status;
203 	uint16_t i;
204 	uint32_t j;
205 	uint32_t read_rev_reset;
206 	uint32_t key = 0;
207 	uint32_t fw_check;
208 	uint32_t kern_update = 0;
209 	uint32_t rval = 0;
210 	uint32_t offset;
211 	uint8_t vpd_data[DMP_VPD_SIZE];
212 	uint32_t MaxRbusSize;
213 	uint32_t MaxIbusSize;
214 	uint32_t sli_mode;
215 	uint32_t sli_mode_mask;
216 
217 	cfg = &CFG;
218 	vpd = &VPD;
219 	MaxRbusSize = 0;
220 	MaxIbusSize = 0;
221 	read_rev_reset = 0;
222 	hba->chan_count = MAX_RINGS;
223 
224 	if (hba->bus_type == SBUS_FC) {
225 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
226 	}
227 
228 	/* Set the fw_check flag */
229 	fw_check = cfg[CFG_FW_CHECK].current;
230 
231 	if ((fw_check & 0x04) ||
232 	    (hba->fw_flag & FW_UPDATE_KERNEL)) {
233 		kern_update = 1;
234 	}
235 
236 	hba->mbox_queue_flag = 0;
237 	hba->sli.sli3.hc_copy = 0;
238 	hba->fc_edtov = FF_DEF_EDTOV;
239 	hba->fc_ratov = FF_DEF_RATOV;
240 	hba->fc_altov = FF_DEF_ALTOV;
241 	hba->fc_arbtov = FF_DEF_ARBTOV;
242 
243 	/*
244 	 * Get a buffer which will be used repeatedly for mailbox commands
245 	 */
246 	mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
247 
248 	mb = (MAILBOX *)mbq;
249 
250 	/* Initialize sli mode based on configuration parameter */
251 	switch (cfg[CFG_SLI_MODE].current) {
252 	case 2:	/* SLI2 mode */
253 		sli_mode = EMLXS_HBA_SLI2_MODE;
254 		sli_mode_mask = EMLXS_SLI2_MASK;
255 		break;
256 
257 	case 3:	/* SLI3 mode */
258 		sli_mode = EMLXS_HBA_SLI3_MODE;
259 		sli_mode_mask = EMLXS_SLI3_MASK;
260 		break;
261 
262 	case 0:	/* Best available */
263 	case 1:	/* Best available */
264 	default:
265 		if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
266 			sli_mode = EMLXS_HBA_SLI3_MODE;
267 			sli_mode_mask = EMLXS_SLI3_MASK;
268 		} else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
269 			sli_mode = EMLXS_HBA_SLI2_MODE;
270 			sli_mode_mask = EMLXS_SLI2_MASK;
271 		} else {
272 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
273 			    "No SLI mode available.");
274 			rval = EIO;
275 			goto failed;
276 		}
277 		break;
278 	}
279 	/* SBUS adapters only available in SLI2 */
280 	if (hba->bus_type == SBUS_FC) {
281 		sli_mode = EMLXS_HBA_SLI2_MODE;
282 		sli_mode_mask = EMLXS_SLI2_MASK;
283 	}
284 
285 reset:
286 	/* Reset & Initialize the adapter */
287 	if (emlxs_sli3_hba_init(hba)) {
288 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
289 		    "Unable to init hba.");
290 
291 		rval = EIO;
292 		goto failed;
293 	}
294 
295 #ifdef FMA_SUPPORT
296 	/* Access handle validation */
297 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
298 	    != DDI_FM_OK) ||
299 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
300 	    != DDI_FM_OK) ||
301 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
302 	    != DDI_FM_OK)) {
303 		EMLXS_MSGF(EMLXS_CONTEXT,
304 		    &emlxs_invalid_access_handle_msg, NULL);
305 
306 		rval = EIO;
307 		goto failed;
308 	}
309 #endif	/* FMA_SUPPORT */
310 
311 	/* Check for PEGASUS (This is a special case) */
312 	/* We need to check for dual channel adapter */
313 	if (hba->model_info.vendor_id == PCI_VENDOR_ID_EMULEX &&
314 	    hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) {
315 		/* Try to determine if this is a DC adapter */
316 		if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
317 			if (MaxRbusSize == REDUCED_SRAM_CFG) {
318 				/* LP9802DC */
319 				for (i = 1; i < emlxs_pci_model_count; i++) {
320 					if (emlxs_pci_model[i].id == LP9802DC) {
321 						bcopy(&emlxs_pci_model[i],
322 						    &hba->model_info,
323 						    sizeof (emlxs_model_t));
324 						break;
325 					}
326 				}
327 			} else if (hba->model_info.id != LP9802) {
328 				/* LP9802 */
329 				for (i = 1; i < emlxs_pci_model_count; i++) {
330 					if (emlxs_pci_model[i].id == LP9802) {
331 						bcopy(&emlxs_pci_model[i],
332 						    &hba->model_info,
333 						    sizeof (emlxs_model_t));
334 						break;
335 					}
336 				}
337 			}
338 		}
339 	}
340 
341 	/*
342 	 * Setup and issue mailbox READ REV command
343 	 */
344 	vpd->opFwRev = 0;
345 	vpd->postKernRev = 0;
346 	vpd->sli1FwRev = 0;
347 	vpd->sli2FwRev = 0;
348 	vpd->sli3FwRev = 0;
349 	vpd->sli4FwRev = 0;
350 
351 	vpd->postKernName[0] = 0;
352 	vpd->opFwName[0] = 0;
353 	vpd->sli1FwName[0] = 0;
354 	vpd->sli2FwName[0] = 0;
355 	vpd->sli3FwName[0] = 0;
356 	vpd->sli4FwName[0] = 0;
357 
358 	vpd->opFwLabel[0] = 0;
359 	vpd->sli1FwLabel[0] = 0;
360 	vpd->sli2FwLabel[0] = 0;
361 	vpd->sli3FwLabel[0] = 0;
362 	vpd->sli4FwLabel[0] = 0;
363 
364 	/* Sanity check */
365 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
366 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
367 		    "Adapter / SLI mode mismatch mask:x%x",
368 		    hba->model_info.sli_mask);
369 
370 		rval = EIO;
371 		goto failed;
372 	}
373 
374 	EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
375 	emlxs_mb_read_rev(hba, mbq, 0);
376 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
377 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
378 		    "Unable to read rev. Mailbox cmd=%x status=%x",
379 		    mb->mbxCommand, mb->mbxStatus);
380 
381 		rval = EIO;
382 		goto failed;
383 	}
384 
385 	if (mb->un.varRdRev.rr == 0) {
386 		/* Old firmware */
387 		if (read_rev_reset == 0) {
388 			read_rev_reset = 1;
389 
390 			goto reset;
391 		} else {
392 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
393 			    "Outdated firmware detected.");
394 		}
395 
396 		vpd->rBit = 0;
397 	} else {
398 		if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
399 			if (read_rev_reset == 0) {
400 				read_rev_reset = 1;
401 
402 				goto reset;
403 			} else {
404 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
405 				    "Non-operational firmware detected. "
406 				    "type=%x",
407 				    mb->un.varRdRev.un.b.ProgType);
408 			}
409 		}
410 
411 		vpd->rBit = 1;
412 		vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
413 		bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
414 		    16);
415 		vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
416 		bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
417 		    16);
418 
419 		/*
420 		 * Lets try to read the SLI3 version
421 		 * Setup and issue mailbox READ REV(v3) command
422 		 */
423 		EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
424 
425 		/* Reuse mbq from previous mbox */
426 		bzero(mbq, sizeof (MAILBOXQ));
427 
428 		emlxs_mb_read_rev(hba, mbq, 1);
429 
430 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
431 		    MBX_SUCCESS) {
432 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
433 			    "Unable to read rev (v3). Mailbox cmd=%x status=%x",
434 			    mb->mbxCommand, mb->mbxStatus);
435 
436 			rval = EIO;
437 			goto failed;
438 		}
439 
440 		if (mb->un.varRdRev.rf3) {
441 			/*
442 			 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
443 			 * Not needed
444 			 */
445 			vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
446 			bcopy((char *)mb->un.varRdRev.sliFwName2,
447 			    vpd->sli3FwLabel, 16);
448 		}
449 	}
450 
451 	if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
452 		if (vpd->sli2FwRev) {
453 			sli_mode = EMLXS_HBA_SLI2_MODE;
454 			sli_mode_mask = EMLXS_SLI2_MASK;
455 		} else {
456 			sli_mode = 0;
457 			sli_mode_mask = 0;
458 		}
459 	}
460 
461 	else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
462 		if (vpd->sli3FwRev) {
463 			sli_mode = EMLXS_HBA_SLI3_MODE;
464 			sli_mode_mask = EMLXS_SLI3_MASK;
465 		} else {
466 			sli_mode = 0;
467 			sli_mode_mask = 0;
468 		}
469 	}
470 
471 	if (!(hba->model_info.sli_mask & sli_mode_mask)) {
472 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
473 		    "Firmware not available. sli-mode=%d",
474 		    cfg[CFG_SLI_MODE].current);
475 
476 		rval = EIO;
477 		goto failed;
478 	}
479 
480 	/* Save information as VPD data */
481 	vpd->postKernRev = mb->un.varRdRev.postKernRev;
482 	vpd->opFwRev = mb->un.varRdRev.opFwRev;
483 	bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
484 	vpd->biuRev = mb->un.varRdRev.biuRev;
485 	vpd->smRev = mb->un.varRdRev.smRev;
486 	vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
487 	vpd->endecRev = mb->un.varRdRev.endecRev;
488 	vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
489 	vpd->fcphLow = mb->un.varRdRev.fcphLow;
490 	vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
491 	vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
492 
493 	/* Decode FW names */
494 	emlxs_decode_version(vpd->postKernRev, vpd->postKernName,
495 	    sizeof (vpd->postKernName));
496 	emlxs_decode_version(vpd->opFwRev, vpd->opFwName,
497 	    sizeof (vpd->opFwName));
498 	emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName,
499 	    sizeof (vpd->sli1FwName));
500 	emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName,
501 	    sizeof (vpd->sli2FwName));
502 	emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName,
503 	    sizeof (vpd->sli3FwName));
504 	emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName,
505 	    sizeof (vpd->sli4FwName));
506 
507 	/* Decode FW labels */
508 	emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1,
509 	    sizeof (vpd->opFwLabel));
510 	emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1,
511 	    sizeof (vpd->sli1FwLabel));
512 	emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1,
513 	    sizeof (vpd->sli2FwLabel));
514 	emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1,
515 	    sizeof (vpd->sli3FwLabel));
516 	emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1,
517 	    sizeof (vpd->sli4FwLabel));
518 
519 	/* Reuse mbq from previous mbox */
520 	bzero(mbq, sizeof (MAILBOXQ));
521 
522 	key = emlxs_get_key(hba, mbq);
523 
524 	/* Get adapter VPD information */
525 	offset = 0;
526 	bzero(vpd_data, sizeof (vpd_data));
527 	vpd->port_index = (uint32_t)-1;
528 
529 	while (offset < DMP_VPD_SIZE) {
530 		/* Reuse mbq from previous mbox */
531 		bzero(mbq, sizeof (MAILBOXQ));
532 
533 		emlxs_mb_dump_vpd(hba, mbq, offset);
534 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
535 		    MBX_SUCCESS) {
536 			/*
537 			 * Let it go through even if failed.
538 			 * Not all adapter's have VPD info and thus will
539 			 * fail here. This is not a problem
540 			 */
541 
542 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
543 			    "No VPD found. offset=%x status=%x", offset,
544 			    mb->mbxStatus);
545 			break;
546 		} else {
547 			if (mb->un.varDmp.ra == 1) {
548 				uint32_t *lp1, *lp2;
549 				uint32_t bsize;
550 				uint32_t wsize;
551 
552 				/*
553 				 * mb->un.varDmp.word_cnt is actually byte
554 				 * count for the dump reply
555 				 */
556 				bsize = mb->un.varDmp.word_cnt;
557 
558 				/* Stop if no data was received */
559 				if (bsize == 0) {
560 					break;
561 				}
562 
563 				/* Check limit on byte size */
564 				bsize = (bsize >
565 				    (sizeof (vpd_data) - offset)) ?
566 				    (sizeof (vpd_data) - offset) : bsize;
567 
568 				/*
569 				 * Convert size from bytes to words with
570 				 * minimum of 1 word
571 				 */
572 				wsize = (bsize > 4) ? (bsize >> 2) : 1;
573 
574 				/*
575 				 * Transfer data into vpd_data buffer one
576 				 * word at a time
577 				 */
578 				lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
579 				lp2 = (uint32_t *)&vpd_data[offset];
580 
581 				for (i = 0; i < wsize; i++) {
582 					status = *lp1++;
583 					*lp2++ = BE_SWAP32(status);
584 				}
585 
586 				/* Increment total byte count saved */
587 				offset += (wsize << 2);
588 
589 				/*
590 				 * Stop if less than a full transfer was
591 				 * received
592 				 */
593 				if (wsize < DMP_VPD_DUMP_WCOUNT) {
594 					break;
595 				}
596 
597 			} else {
598 				EMLXS_MSGF(EMLXS_CONTEXT,
599 				    &emlxs_init_debug_msg,
600 				    "No VPD acknowledgment. offset=%x",
601 				    offset);
602 				break;
603 			}
604 		}
605 
606 	}
607 
608 	if (vpd_data[0]) {
609 		(void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
610 
611 		/*
612 		 * If there is a VPD part number, and it does not
613 		 * match the current default HBA model info,
614 		 * replace the default data with an entry that
615 		 * does match.
616 		 *
617 		 * After emlxs_parse_vpd model holds the VPD value
618 		 * for V2 and part_num hold the value for PN. These
619 		 * 2 values are NOT necessarily the same.
620 		 */
621 
622 		rval = 0;
623 		if ((vpd->model[0] != 0) &&
624 		    (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
625 
626 			/* First scan for a V2 match */
627 
628 			for (i = 1; i < emlxs_pci_model_count; i++) {
629 				if (strcmp(&vpd->model[0],
630 				    emlxs_pci_model[i].model) == 0) {
631 					bcopy(&emlxs_pci_model[i],
632 					    &hba->model_info,
633 					    sizeof (emlxs_model_t));
634 					rval = 1;
635 					break;
636 				}
637 			}
638 		}
639 
640 		if (!rval && (vpd->part_num[0] != 0) &&
641 		    (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
642 
643 			/* Next scan for a PN match */
644 
645 			for (i = 1; i < emlxs_pci_model_count; i++) {
646 				if (strcmp(&vpd->part_num[0],
647 				    emlxs_pci_model[i].model) == 0) {
648 					bcopy(&emlxs_pci_model[i],
649 					    &hba->model_info,
650 					    sizeof (emlxs_model_t));
651 					break;
652 				}
653 			}
654 		}
655 
656 		/*
657 		 * Now lets update hba->model_info with the real
658 		 * VPD data, if any.
659 		 */
660 
661 		/*
662 		 * Replace the default model description with vpd data
663 		 */
664 		if (vpd->model_desc[0] != 0) {
665 			(void) strncpy(hba->model_info.model_desc,
666 			    vpd->model_desc,
667 			    (sizeof (hba->model_info.model_desc)-1));
668 		}
669 
670 		/* Replace the default model with vpd data */
671 		if (vpd->model[0] != 0) {
672 			(void) strncpy(hba->model_info.model, vpd->model,
673 			    (sizeof (hba->model_info.model)-1));
674 		}
675 
676 		/* Replace the default program types with vpd data */
677 		if (vpd->prog_types[0] != 0) {
678 			emlxs_parse_prog_types(hba, vpd->prog_types);
679 		}
680 	}
681 
682 	/*
683 	 * Since the adapter model may have changed with the vpd data
684 	 * lets double check if adapter is not supported
685 	 */
686 	if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
687 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
688 		    "Unsupported adapter found.  "
689 		    "Id:%d  Vendor id:0x%x  Device id:0x%x  SSDID:0x%x  "
690 		    "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
691 		    hba->model_info.device_id, hba->model_info.ssdid,
692 		    hba->model_info.model);
693 
694 		rval = EIO;
695 		goto failed;
696 	}
697 
698 	/* Read the adapter's wakeup parms */
699 	(void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
700 	emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
701 	    vpd->boot_version, sizeof (vpd->boot_version));
702 
703 	/* Get fcode version property */
704 	emlxs_get_fcode_version(hba);
705 
706 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
707 	    "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
708 	    vpd->opFwRev, vpd->sli1FwRev);
709 
710 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
711 	    "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
712 	    vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
713 
714 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
715 	    "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
716 
717 	/*
718 	 * If firmware checking is enabled and the adapter model indicates
719 	 * a firmware image, then perform firmware version check
720 	 */
721 	hba->fw_flag = 0;
722 	hba->fw_timer = 0;
723 
724 	if (((fw_check & 0x1) &&
725 	    (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
726 	    hba->model_info.fwid) || ((fw_check & 0x2) &&
727 	    hba->model_info.fwid)) {
728 		emlxs_firmware_t *fw;
729 
730 		/* Find firmware image indicated by adapter model */
731 		fw = NULL;
732 		for (i = 0; i < emlxs_fw_count; i++) {
733 			if (emlxs_fw_table[i].id == hba->model_info.fwid) {
734 				fw = &emlxs_fw_table[i];
735 				break;
736 			}
737 		}
738 
739 		/*
740 		 * If the image was found, then verify current firmware
741 		 * versions of adapter
742 		 */
743 		if (fw) {
744 			if (!kern_update &&
745 			    ((fw->kern && (vpd->postKernRev != fw->kern)) ||
746 			    (fw->stub && (vpd->opFwRev != fw->stub)))) {
747 
748 				hba->fw_flag |= FW_UPDATE_NEEDED;
749 
750 			} else if ((fw->kern && (vpd->postKernRev !=
751 			    fw->kern)) ||
752 			    (fw->stub && (vpd->opFwRev != fw->stub)) ||
753 			    (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
754 			    (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
755 			    (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
756 			    (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
757 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
758 				    "Firmware update needed. "
759 				    "Updating. id=%d fw=%d",
760 				    hba->model_info.id, hba->model_info.fwid);
761 
762 #ifdef MODFW_SUPPORT
763 				/*
764 				 * Load the firmware image now
765 				 * If MODFW_SUPPORT is not defined, the
766 				 * firmware image will already be defined
767 				 * in the emlxs_fw_table
768 				 */
769 				emlxs_fw_load(hba, fw);
770 #endif /* MODFW_SUPPORT */
771 
772 				if (fw->image && fw->size) {
773 					uint32_t rc;
774 
775 					rc = emlxs_fw_download(hba,
776 					    (char *)fw->image, fw->size, 0);
777 					if ((rc != FC_SUCCESS) &&
778 					    (rc != EMLXS_REBOOT_REQUIRED)) {
779 						EMLXS_MSGF(EMLXS_CONTEXT,
780 						    &emlxs_init_msg,
781 						    "Firmware update failed.");
782 						hba->fw_flag |=
783 						    FW_UPDATE_NEEDED;
784 					}
785 #ifdef MODFW_SUPPORT
786 					/*
787 					 * Unload the firmware image from
788 					 * kernel memory
789 					 */
790 					emlxs_fw_unload(hba, fw);
791 #endif /* MODFW_SUPPORT */
792 
793 					fw_check = 0;
794 
795 					goto reset;
796 				}
797 
798 				hba->fw_flag |= FW_UPDATE_NEEDED;
799 
800 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
801 				    "Firmware image unavailable.");
802 			} else {
803 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
804 				    "Firmware update not needed.");
805 			}
806 		} else {
807 			/* This should not happen */
808 
809 			/*
810 			 * This means either the adapter database is not
811 			 * correct or a firmware image is missing from the
812 			 * compile
813 			 */
814 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
815 			    "Firmware image unavailable. id=%d fw=%d",
816 			    hba->model_info.id, hba->model_info.fwid);
817 		}
818 	}
819 
820 	/*
821 	 * Add our interrupt routine to kernel's interrupt chain & enable it
822 	 * If MSI is enabled this will cause Solaris to program the MSI address
823 	 * and data registers in PCI config space
824 	 */
825 	if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
826 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
827 		    "Unable to add interrupt(s).");
828 
829 		rval = EIO;
830 		goto failed;
831 	}
832 
833 	EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
834 
835 	/* Reuse mbq from previous mbox */
836 	bzero(mbq, sizeof (MAILBOXQ));
837 
838 	(void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
839 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
841 		    "Unable to configure port. "
842 		    "Mailbox cmd=%x status=%x slimode=%d key=%x",
843 		    mb->mbxCommand, mb->mbxStatus, sli_mode, key);
844 
845 		for (sli_mode--; sli_mode > 0; sli_mode--) {
846 			/* Check if sli_mode is supported by this adapter */
847 			if (hba->model_info.sli_mask &
848 			    EMLXS_SLI_MASK(sli_mode)) {
849 				sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
850 				break;
851 			}
852 		}
853 
854 		if (sli_mode) {
855 			fw_check = 0;
856 
857 			goto reset;
858 		}
859 
860 		hba->flag &= ~FC_SLIM2_MODE;
861 
862 		rval = EIO;
863 		goto failed;
864 	}
865 
866 	/* Check if SLI3 mode was achieved */
867 	if (mb->un.varCfgPort.rMA &&
868 	    (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
869 
870 		if (mb->un.varCfgPort.vpi_max > 1) {
871 			hba->flag |= FC_NPIV_ENABLED;
872 
873 			if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
874 				hba->vpi_max =
875 				    min(mb->un.varCfgPort.vpi_max,
876 				    MAX_VPORTS - 1);
877 			} else {
878 				hba->vpi_max =
879 				    min(mb->un.varCfgPort.vpi_max,
880 				    MAX_VPORTS_LIMITED - 1);
881 			}
882 		}
883 
884 #if (EMLXS_MODREV >= EMLXS_MODREV5)
885 		hba->fca_tran->fca_num_npivports =
886 		    (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
887 #endif /* >= EMLXS_MODREV5 */
888 
889 		if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
890 			hba->flag |= FC_HBQ_ENABLED;
891 		}
892 
893 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
894 		    "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
895 	} else {
896 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
897 		    "SLI2 mode: flag=%x", hba->flag);
898 		sli_mode = EMLXS_HBA_SLI2_MODE;
899 		sli_mode_mask = EMLXS_SLI2_MASK;
900 		hba->sli_mode = sli_mode;
901 #if (EMLXS_MODREV >= EMLXS_MODREV5)
902 		hba->fca_tran->fca_num_npivports = 0;
903 #endif /* >= EMLXS_MODREV5 */
904 
905 	}
906 
907 	/* Get and save the current firmware version (based on sli_mode) */
908 	emlxs_decode_firmware_rev(hba, vpd);
909 
910 	emlxs_pcix_mxr_update(hba, 0);
911 
912 	/* Reuse mbq from previous mbox */
913 	bzero(mbq, sizeof (MAILBOXQ));
914 
915 	emlxs_mb_read_config(hba, mbq);
916 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
917 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
918 		    "Unable to read configuration.  Mailbox cmd=%x status=%x",
919 		    mb->mbxCommand, mb->mbxStatus);
920 
921 		rval = EIO;
922 		goto failed;
923 	}
924 
925 	/* Save the link speed capabilities */
926 	vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
927 	emlxs_process_link_speed(hba);
928 
929 	/* Set the max node count */
930 	if (cfg[CFG_NUM_NODES].current > 0) {
931 		hba->max_nodes =
932 		    min(cfg[CFG_NUM_NODES].current,
933 		    mb->un.varRdConfig.max_rpi);
934 	} else {
935 		hba->max_nodes = mb->un.varRdConfig.max_rpi;
936 	}
937 
938 	/* Set the io throttle */
939 	hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
940 
941 	/* Set max_iotag */
942 	if (cfg[CFG_NUM_IOTAGS].current) {
943 		hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current;
944 	} else {
945 		hba->max_iotag = mb->un.varRdConfig.max_xri;
946 	}
947 
948 	/* Set out-of-range iotag base */
949 	hba->fc_oor_iotag = hba->max_iotag;
950 
951 	/*
952 	 * Allocate some memory for buffers
953 	 */
954 	if (emlxs_mem_alloc_buffer(hba) == 0) {
955 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
956 		    "Unable to allocate memory buffers.");
957 
958 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
959 		return (ENOMEM);
960 	}
961 
962 	/*
963 	 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
964 	 */
965 	if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) ||
966 	    ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) {
967 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
968 		    "Unable to allocate diag buffers.");
969 
970 		rval = ENOMEM;
971 		goto failed;
972 	}
973 
974 	bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
975 	    MEM_ELSBUF_SIZE);
976 	EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
977 	    DDI_DMA_SYNC_FORDEV);
978 
979 	bzero(mp1->virt, MEM_ELSBUF_SIZE);
980 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
981 	    DDI_DMA_SYNC_FORDEV);
982 
983 	/* Reuse mbq from previous mbox */
984 	bzero(mbq, sizeof (MAILBOXQ));
985 
986 	(void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
987 
988 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
989 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
990 		    "Unable to run BIU diag.  Mailbox cmd=%x status=%x",
991 		    mb->mbxCommand, mb->mbxStatus);
992 
993 		rval = EIO;
994 		goto failed;
995 	}
996 
997 	EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
998 	    DDI_DMA_SYNC_FORKERNEL);
999 
1000 #ifdef FMA_SUPPORT
1001 	if (mp->dma_handle) {
1002 		if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
1003 		    != DDI_FM_OK) {
1004 			EMLXS_MSGF(EMLXS_CONTEXT,
1005 			    &emlxs_invalid_dma_handle_msg,
1006 			    "sli3_online: hdl=%p",
1007 			    mp->dma_handle);
1008 			rval = EIO;
1009 			goto failed;
1010 		}
1011 	}
1012 
1013 	if (mp1->dma_handle) {
1014 		if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
1015 		    != DDI_FM_OK) {
1016 			EMLXS_MSGF(EMLXS_CONTEXT,
1017 			    &emlxs_invalid_dma_handle_msg,
1018 			    "sli3_online: hdl=%p",
1019 			    mp1->dma_handle);
1020 			rval = EIO;
1021 			goto failed;
1022 		}
1023 	}
1024 #endif  /* FMA_SUPPORT */
1025 
1026 	outptr = mp->virt;
1027 	inptr = mp1->virt;
1028 
1029 	for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
1030 		if (*outptr++ != *inptr++) {
1031 			outptr--;
1032 			inptr--;
1033 
1034 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1035 			    "BIU diagnostic failed. "
1036 			    "offset %x value %x should be %x.",
1037 			    i, (uint32_t)*inptr, (uint32_t)*outptr);
1038 
1039 			rval = EIO;
1040 			goto failed;
1041 		}
1042 	}
1043 
1044 	/* Free the buffers since we were polling */
1045 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1046 	mp = NULL;
1047 	emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1048 	mp1 = NULL;
1049 
1050 	hba->channel_fcp = FC_FCP_RING;
1051 	hba->channel_els = FC_ELS_RING;
1052 	hba->channel_ip = FC_IP_RING;
1053 	hba->channel_ct = FC_CT_RING;
1054 	hba->sli.sli3.ring_count = MAX_RINGS;
1055 
1056 	hba->channel_tx_count = 0;
1057 	hba->io_count = 0;
1058 	hba->fc_iotag = 1;
1059 
1060 	for (i = 0; i < hba->chan_count; i++) {
1061 		cp = &hba->chan[i];
1062 
1063 		/* 1 to 1 mapping between ring and channel */
1064 		cp->iopath = (void *)&hba->sli.sli3.ring[i];
1065 
1066 		cp->hba = hba;
1067 		cp->channelno = i;
1068 	}
1069 
1070 	/*
1071 	 * Setup and issue mailbox CONFIGURE RING command
1072 	 */
1073 	for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1074 		/*
1075 		 * Initialize cmd/rsp ring pointers
1076 		 */
1077 		rp = &hba->sli.sli3.ring[i];
1078 
1079 		/* 1 to 1 mapping between ring and channel */
1080 		rp->channelp = &hba->chan[i];
1081 
1082 		rp->hba = hba;
1083 		rp->ringno = (uint8_t)i;
1084 
1085 		rp->fc_cmdidx = 0;
1086 		rp->fc_rspidx = 0;
1087 		EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1088 
1089 		/* Reuse mbq from previous mbox */
1090 		bzero(mbq, sizeof (MAILBOXQ));
1091 
1092 		emlxs_mb_config_ring(hba, i, mbq);
1093 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1094 		    MBX_SUCCESS) {
1095 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1096 			    "Unable to configure ring. "
1097 			    "Mailbox cmd=%x status=%x",
1098 			    mb->mbxCommand, mb->mbxStatus);
1099 
1100 			rval = EIO;
1101 			goto failed;
1102 		}
1103 	}
1104 
1105 	/*
1106 	 * Setup link timers
1107 	 */
1108 	EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1109 
1110 	/* Reuse mbq from previous mbox */
1111 	bzero(mbq, sizeof (MAILBOXQ));
1112 
1113 	emlxs_mb_config_link(hba, mbq);
1114 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1115 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1116 		    "Unable to configure link. Mailbox cmd=%x status=%x",
1117 		    mb->mbxCommand, mb->mbxStatus);
1118 
1119 		rval = EIO;
1120 		goto failed;
1121 	}
1122 
1123 #ifdef MAX_RRDY_SUPPORT
1124 	/* Set MAX_RRDY if one is provided */
1125 	if (cfg[CFG_MAX_RRDY].current) {
1126 
1127 		/* Reuse mbq from previous mbox */
1128 		bzero(mbq, sizeof (MAILBOXQ));
1129 
1130 		emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1131 		    cfg[CFG_MAX_RRDY].current);
1132 
1133 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1134 		    MBX_SUCCESS) {
1135 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1136 			    "MAX_RRDY: Unable to set.  status=%x " \
1137 			    "value=%d",
1138 			    mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1139 		} else {
1140 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1141 			    "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1142 		}
1143 	}
1144 #endif /* MAX_RRDY_SUPPORT */
1145 
1146 	/* Reuse mbq from previous mbox */
1147 	bzero(mbq, sizeof (MAILBOXQ));
1148 
1149 	/*
1150 	 * We need to get login parameters for NID
1151 	 */
1152 	(void) emlxs_mb_read_sparam(hba, mbq);
1153 	mp = (MATCHMAP *)mbq->bp;
1154 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1156 		    "Unable to read parameters. Mailbox cmd=%x status=%x",
1157 		    mb->mbxCommand, mb->mbxStatus);
1158 
1159 		rval = EIO;
1160 		goto failed;
1161 	}
1162 
1163 	/* Free the buffer since we were polling */
1164 	emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1165 	mp = NULL;
1166 
1167 	/* If no serial number in VPD data, then use the WWPN */
1168 	if (vpd->serial_num[0] == 0) {
1169 		outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1170 		for (i = 0; i < 12; i++) {
1171 			status = *outptr++;
1172 			j = ((status & 0xf0) >> 4);
1173 			if (j <= 9) {
1174 				vpd->serial_num[i] =
1175 				    (char)((uint8_t)'0' + (uint8_t)j);
1176 			} else {
1177 				vpd->serial_num[i] =
1178 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1179 			}
1180 
1181 			i++;
1182 			j = (status & 0xf);
1183 			if (j <= 9) {
1184 				vpd->serial_num[i] =
1185 				    (char)((uint8_t)'0' + (uint8_t)j);
1186 			} else {
1187 				vpd->serial_num[i] =
1188 				    (char)((uint8_t)'A' + (uint8_t)(j - 10));
1189 			}
1190 		}
1191 
1192 		/*
1193 		 * Set port number and port index to zero
1194 		 * The WWN's are unique to each port and therefore port_num
1195 		 * must equal zero. This effects the hba_fru_details structure
1196 		 * in fca_bind_port()
1197 		 */
1198 		vpd->port_num[0] = 0;
1199 		vpd->port_index = 0;
1200 	}
1201 
1202 	/*
1203 	 * Make first attempt to set a port index
1204 	 * Check if this is a multifunction adapter
1205 	 */
1206 	if ((vpd->port_index == (uint32_t)-1) &&
1207 	    (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1208 		char *buffer;
1209 		int32_t i;
1210 
1211 		/*
1212 		 * The port address looks like this:
1213 		 * 1	- for port index 0
1214 		 * 1,1	- for port index 1
1215 		 * 1,2	- for port index 2
1216 		 */
1217 		buffer = ddi_get_name_addr(hba->dip);
1218 
1219 		if (buffer) {
1220 			vpd->port_index = 0;
1221 
1222 			/* Reverse scan for a comma */
1223 			for (i = strlen(buffer) - 1; i > 0; i--) {
1224 				if (buffer[i] == ',') {
1225 					/* Comma found - set index now */
1226 					vpd->port_index =
1227 					    emlxs_strtol(&buffer[i + 1], 10);
1228 					break;
1229 				}
1230 			}
1231 		}
1232 	}
1233 
1234 	/* Make final attempt to set a port index */
1235 	if (vpd->port_index == (uint32_t)-1) {
1236 		dev_info_t *p_dip;
1237 		dev_info_t *c_dip;
1238 
1239 		p_dip = ddi_get_parent(hba->dip);
1240 		c_dip = ddi_get_child(p_dip);
1241 
1242 		vpd->port_index = 0;
1243 		while (c_dip && (hba->dip != c_dip)) {
1244 			c_dip = ddi_get_next_sibling(c_dip);
1245 			vpd->port_index++;
1246 		}
1247 	}
1248 
1249 	if (vpd->port_num[0] == 0) {
1250 		if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1251 			(void) snprintf(vpd->port_num,
1252 			    (sizeof (vpd->port_num)-1),
1253 			    "%d", vpd->port_index);
1254 		}
1255 	}
1256 
1257 	if (vpd->id[0] == 0) {
1258 		(void) strncpy(vpd->id, hba->model_info.model_desc,
1259 		    (sizeof (vpd->id)-1));
1260 	}
1261 
1262 	if (vpd->manufacturer[0] == 0) {
1263 		(void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1264 		    (sizeof (vpd->manufacturer)-1));
1265 	}
1266 
1267 	if (vpd->part_num[0] == 0) {
1268 		(void) strncpy(vpd->part_num, hba->model_info.model,
1269 		    (sizeof (vpd->part_num)-1));
1270 	}
1271 
1272 	if (vpd->model_desc[0] == 0) {
1273 		(void) strncpy(vpd->model_desc, hba->model_info.model_desc,
1274 		    (sizeof (vpd->model_desc)-1));
1275 	}
1276 
1277 	if (vpd->model[0] == 0) {
1278 		(void) strncpy(vpd->model, hba->model_info.model,
1279 		    (sizeof (vpd->model)-1));
1280 	}
1281 
1282 	if (vpd->prog_types[0] == 0) {
1283 		emlxs_build_prog_types(hba, vpd);
1284 	}
1285 
1286 	/* Create the symbolic names */
1287 	(void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1288 	    "Emulex %s FV%s DV%s %s",
1289 	    hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1290 	    (char *)utsname.nodename);
1291 
1292 	(void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1293 	    "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1294 	    hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1295 	    hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1296 	    hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1297 
1298 	if (cfg[CFG_NETWORK_ON].current) {
1299 		if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1300 		    (hba->sparam.portName.IEEEextMsn != 0) ||
1301 		    (hba->sparam.portName.IEEEextLsb != 0)) {
1302 
1303 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1304 			    "WWPN doesn't conform to IP profile: "
1305 			    "nameType=%x. Disabling networking.",
1306 			    hba->sparam.portName.nameType);
1307 
1308 			cfg[CFG_NETWORK_ON].current = 0;
1309 		}
1310 	}
1311 
1312 	if (cfg[CFG_NETWORK_ON].current) {
1313 		/* Reuse mbq from previous mbox */
1314 		bzero(mbq, sizeof (MAILBOXQ));
1315 
1316 		/* Issue CONFIG FARP */
1317 		emlxs_mb_config_farp(hba, mbq);
1318 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1319 		    MBX_SUCCESS) {
1320 			/*
1321 			 * Let it go through even if failed.
1322 			 */
1323 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1324 			    "Unable to configure FARP. "
1325 			    "Mailbox cmd=%x status=%x",
1326 			    mb->mbxCommand, mb->mbxStatus);
1327 		}
1328 	}
1329 #ifdef MSI_SUPPORT
1330 	/* Configure MSI map if required */
1331 	if (hba->intr_count > 1) {
1332 
1333 		if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1334 			/* always start from 0 */
1335 			hba->last_msiid = 0;
1336 		}
1337 
1338 		/* Reuse mbq from previous mbox */
1339 		bzero(mbq, sizeof (MAILBOXQ));
1340 
1341 		emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1342 
1343 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1344 		    MBX_SUCCESS) {
1345 			goto msi_configured;
1346 		}
1347 
1348 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1349 		    "Unable to config MSIX.  Mailbox cmd=0x%x status=0x%x",
1350 		    mb->mbxCommand, mb->mbxStatus);
1351 
1352 		/* Reuse mbq from previous mbox */
1353 		bzero(mbq, sizeof (MAILBOXQ));
1354 
1355 		emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1356 
1357 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1358 		    MBX_SUCCESS) {
1359 			goto msi_configured;
1360 		}
1361 
1362 
1363 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1364 		    "Unable to config MSI.  Mailbox cmd=0x%x status=0x%x",
1365 		    mb->mbxCommand, mb->mbxStatus);
1366 
1367 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1368 		    "Attempting single interrupt mode...");
1369 
1370 		/* First cleanup old interrupts */
1371 		(void) emlxs_msi_remove(hba);
1372 		(void) emlxs_msi_uninit(hba);
1373 
1374 		status = emlxs_msi_init(hba, 1);
1375 
1376 		if (status != DDI_SUCCESS) {
1377 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1378 			    "Unable to initialize interrupt. status=%d",
1379 			    status);
1380 
1381 			rval = EIO;
1382 			goto failed;
1383 		}
1384 
1385 		/*
1386 		 * Reset adapter - The adapter needs to be reset because
1387 		 * the bus cannot handle the MSI change without handshaking
1388 		 * with the adapter again
1389 		 */
1390 
1391 		(void) emlxs_mem_free_buffer(hba);
1392 		fw_check = 0;
1393 		goto reset;
1394 	}
1395 
1396 msi_configured:
1397 
1398 
1399 	if ((hba->intr_count >= 1) &&
1400 	    (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1401 		/* intr_count is a sequence of msi id */
1402 		/* Setup msi2chan[msi_id] */
1403 		for (i = 0; i < hba->intr_count; i ++) {
1404 			hba->msi2chan[i] = i;
1405 			if (i >= hba->chan_count)
1406 				hba->msi2chan[i] = (i - hba->chan_count);
1407 		}
1408 	}
1409 #endif /* MSI_SUPPORT */
1410 
1411 	/*
1412 	 * We always disable the firmware traffic cop feature
1413 	 */
1414 	if (emlxs_disable_traffic_cop) {
1415 		/* Reuse mbq from previous mbox */
1416 		bzero(mbq, sizeof (MAILBOXQ));
1417 
1418 		emlxs_disable_tc(hba, mbq);
1419 		if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1420 		    MBX_SUCCESS) {
1421 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1422 			    "Unable to disable traffic cop. "
1423 			    "Mailbox cmd=%x status=%x",
1424 			    mb->mbxCommand, mb->mbxStatus);
1425 
1426 			rval = EIO;
1427 			goto failed;
1428 		}
1429 	}
1430 
1431 
1432 	/* Reuse mbq from previous mbox */
1433 	bzero(mbq, sizeof (MAILBOXQ));
1434 
1435 	/* Register for async events */
1436 	emlxs_mb_async_event(hba, mbq);
1437 	if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1438 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1439 		    "Async events disabled. Mailbox status=%x",
1440 		    mb->mbxStatus);
1441 	} else {
1442 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1443 		    "Async events enabled.");
1444 		hba->flag |= FC_ASYNC_EVENTS;
1445 	}
1446 
1447 	EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1448 
1449 	emlxs_sli3_enable_intr(hba);
1450 
1451 	if (hba->flag & FC_HBQ_ENABLED) {
1452 		if (port->flag & EMLXS_TGT_ENABLED) {
1453 			if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1454 				EMLXS_MSGF(EMLXS_CONTEXT,
1455 				    &emlxs_init_failed_msg,
1456 				    "Unable to setup FCT HBQ.");
1457 
1458 				rval = ENOMEM;
1459 
1460 #ifdef SFCT_SUPPORT
1461 				/* Check if we can fall back to just */
1462 				/* initiator mode */
1463 				if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1464 				    (port->flag & EMLXS_INI_ENABLED) &&
1465 				    (cfg[CFG_DTM_ENABLE].current == 1) &&
1466 				    (cfg[CFG_TARGET_MODE].current == 0)) {
1467 
1468 					cfg[CFG_DTM_ENABLE].current = 0;
1469 
1470 					EMLXS_MSGF(EMLXS_CONTEXT,
1471 					    &emlxs_init_failed_msg,
1472 					    "Disabling dynamic target mode. "
1473 					    "Enabling initiator mode only.");
1474 
1475 					/* This will trigger the driver to */
1476 					/* reattach */
1477 					rval = EAGAIN;
1478 				}
1479 #endif /* SFCT_SUPPORT */
1480 				goto failed;
1481 			}
1482 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1483 			    "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1484 		}
1485 
1486 		if (cfg[CFG_NETWORK_ON].current) {
1487 			if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1488 				EMLXS_MSGF(EMLXS_CONTEXT,
1489 				    &emlxs_init_failed_msg,
1490 				    "Unable to setup IP HBQ.");
1491 
1492 				rval = ENOMEM;
1493 				goto failed;
1494 			}
1495 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1496 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1497 		}
1498 
1499 		if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1500 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1501 			    "Unable to setup ELS HBQ.");
1502 			rval = ENOMEM;
1503 			goto failed;
1504 		}
1505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1506 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1507 
1508 		if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1509 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1510 			    "Unable to setup CT HBQ.");
1511 
1512 			rval = ENOMEM;
1513 			goto failed;
1514 		}
1515 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1516 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1517 	} else {
1518 		if (port->flag & EMLXS_TGT_ENABLED) {
1519 			/* Post the FCT unsol buffers */
1520 			rp = &hba->sli.sli3.ring[FC_FCT_RING];
1521 			for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1522 				(void) emlxs_post_buffer(hba, rp, 2);
1523 			}
1524 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1525 			    "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1526 		}
1527 
1528 		if (cfg[CFG_NETWORK_ON].current) {
1529 			/* Post the IP unsol buffers */
1530 			rp = &hba->sli.sli3.ring[FC_IP_RING];
1531 			for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1532 				(void) emlxs_post_buffer(hba, rp, 2);
1533 			}
1534 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1535 			    "IP  Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1536 		}
1537 
1538 		/* Post the ELS unsol buffers */
1539 		rp = &hba->sli.sli3.ring[FC_ELS_RING];
1540 		for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1541 			(void) emlxs_post_buffer(hba, rp, 2);
1542 		}
1543 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1544 		    "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1545 
1546 
1547 		/* Post the CT unsol buffers */
1548 		rp = &hba->sli.sli3.ring[FC_CT_RING];
1549 		for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1550 			(void) emlxs_post_buffer(hba, rp, 2);
1551 		}
1552 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1553 		    "CT  Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1554 	}
1555 
1556 	(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1557 
1558 	/* Check persist-linkdown */
1559 	if (cfg[CFG_PERSIST_LINKDOWN].current) {
1560 		EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1561 		return (0);
1562 	}
1563 
1564 #ifdef SFCT_SUPPORT
1565 	if ((port->mode == MODE_TARGET) &&
1566 	    !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1567 		emlxs_enable_latt(hba);
1568 		return (0);
1569 	}
1570 #endif /* SFCT_SUPPORT */
1571 
1572 	/*
1573 	 * Setup and issue mailbox INITIALIZE LINK command
1574 	 * At this point, the interrupt will be generated by the HW
1575 	 */
1576 	mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX);
1577 	if (mbq == NULL) {
1578 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1579 		    "Unable to allocate mailbox buffer.");
1580 
1581 		rval = EIO;
1582 		goto failed;
1583 	}
1584 	mb = (MAILBOX *)mbq;
1585 
1586 	emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1587 	    cfg[CFG_LINK_SPEED].current);
1588 
1589 	rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1590 	if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1591 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1592 		    "Unable to initialize link. " \
1593 		    "Mailbox cmd=%x status=%x",
1594 		    mb->mbxCommand, mb->mbxStatus);
1595 
1596 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1597 		mbq = NULL;
1598 		rval = EIO;
1599 		goto failed;
1600 	}
1601 
1602 	/*
1603 	 * Enable link attention interrupt
1604 	 */
1605 	emlxs_enable_latt(hba);
1606 
1607 	/* Wait for link to come up */
1608 	i = cfg[CFG_LINKUP_DELAY].current;
1609 	while (i && (hba->state < FC_LINK_UP)) {
1610 		/* Check for hardware error */
1611 		if (hba->state == FC_ERROR) {
1612 			EMLXS_MSGF(EMLXS_CONTEXT,
1613 			    &emlxs_init_failed_msg,
1614 			    "Adapter error.");
1615 
1616 			mbq = NULL;
1617 			rval = EIO;
1618 			goto failed;
1619 		}
1620 
1621 		BUSYWAIT_MS(1000);
1622 		i--;
1623 	}
1624 
1625 	/*
1626 	 * The leadvile driver will now handle the FLOGI at the driver level
1627 	 */
1628 
1629 	return (0);
1630 
1631 failed:
1632 
1633 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
1634 
1635 	if (hba->intr_flags & EMLXS_MSI_ADDED) {
1636 		(void) EMLXS_INTR_REMOVE(hba);
1637 	}
1638 
1639 	if (mp) {
1640 		emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1641 		mp = NULL;
1642 	}
1643 
1644 	if (mp1) {
1645 		emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1646 		mp1 = NULL;
1647 	}
1648 
1649 	(void) emlxs_mem_free_buffer(hba);
1650 
1651 	if (mbq) {
1652 		(void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1653 		mbq = NULL;
1654 		mb = NULL;
1655 	}
1656 
1657 	if (rval == 0) {
1658 		rval = EIO;
1659 	}
1660 
1661 	return (rval);
1662 
1663 } /* emlxs_sli3_online() */
1664 
1665 
1666 /*ARGSUSED*/
1667 static void
emlxs_sli3_offline(emlxs_hba_t * hba,uint32_t reset_requested)1668 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1669 {
1670 	/* Reverse emlxs_sli3_online */
1671 
1672 	/* Kill the adapter */
1673 	emlxs_sli3_hba_kill(hba);
1674 
1675 	/* Free driver shared memory */
1676 	(void) emlxs_mem_free_buffer(hba);
1677 
1678 } /* emlxs_sli3_offline() */
1679 
1680 
1681 static int
emlxs_sli3_map_hdw(emlxs_hba_t * hba)1682 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1683 {
1684 	emlxs_port_t		*port = &PPORT;
1685 	dev_info_t		*dip;
1686 	ddi_device_acc_attr_t	dev_attr;
1687 	int			status;
1688 
1689 	dip = (dev_info_t *)hba->dip;
1690 	dev_attr = emlxs_dev_acc_attr;
1691 
1692 	if (hba->bus_type == SBUS_FC) {
1693 
1694 		if (hba->sli.sli3.slim_acc_handle == 0) {
1695 			status = ddi_regs_map_setup(dip,
1696 			    SBUS_DFLY_SLIM_RINDEX,
1697 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1698 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1699 			if (status != DDI_SUCCESS) {
1700 				EMLXS_MSGF(EMLXS_CONTEXT,
1701 				    &emlxs_attach_failed_msg,
1702 				    "(SBUS) ddi_regs_map_setup SLIM failed. "
1703 				    "status=%x", status);
1704 				goto failed;
1705 			}
1706 		}
1707 		if (hba->sli.sli3.csr_acc_handle == 0) {
1708 			status = ddi_regs_map_setup(dip,
1709 			    SBUS_DFLY_CSR_RINDEX,
1710 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1711 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1712 			if (status != DDI_SUCCESS) {
1713 				EMLXS_MSGF(EMLXS_CONTEXT,
1714 				    &emlxs_attach_failed_msg,
1715 				    "(SBUS) ddi_regs_map_setup DFLY CSR "
1716 				    "failed. status=%x", status);
1717 				goto failed;
1718 			}
1719 		}
1720 		if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1721 			status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1722 			    (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1723 			    &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1724 			if (status != DDI_SUCCESS) {
1725 				EMLXS_MSGF(EMLXS_CONTEXT,
1726 				    &emlxs_attach_failed_msg,
1727 				    "(SBUS) ddi_regs_map_setup Fcode Flash "
1728 				    "failed. status=%x", status);
1729 				goto failed;
1730 			}
1731 		}
1732 		if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1733 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1734 			    (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1735 			    &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1736 			if (status != DDI_SUCCESS) {
1737 				EMLXS_MSGF(EMLXS_CONTEXT,
1738 				    &emlxs_attach_failed_msg,
1739 				    "(SBUS) ddi_regs_map_setup TITAN CORE "
1740 				    "failed. status=%x", status);
1741 				goto failed;
1742 			}
1743 		}
1744 
1745 		if (hba->sli.sli3.sbus_csr_handle == 0) {
1746 			status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1747 			    (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1748 			    0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1749 			if (status != DDI_SUCCESS) {
1750 				EMLXS_MSGF(EMLXS_CONTEXT,
1751 				    &emlxs_attach_failed_msg,
1752 				    "(SBUS) ddi_regs_map_setup TITAN CSR "
1753 				    "failed. status=%x", status);
1754 				goto failed;
1755 			}
1756 		}
1757 	} else {	/* ****** PCI ****** */
1758 
1759 		if (hba->sli.sli3.slim_acc_handle == 0) {
1760 			status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1761 			    (caddr_t *)&hba->sli.sli3.slim_addr,
1762 			    0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1763 			if (status != DDI_SUCCESS) {
1764 				EMLXS_MSGF(EMLXS_CONTEXT,
1765 				    &emlxs_attach_failed_msg,
1766 				    "(PCI) ddi_regs_map_setup SLIM failed. "
1767 				    "stat=%d mem=%p attr=%p hdl=%p",
1768 				    status, &hba->sli.sli3.slim_addr, &dev_attr,
1769 				    &hba->sli.sli3.slim_acc_handle);
1770 				goto failed;
1771 			}
1772 		}
1773 
1774 		/*
1775 		 * Map in control registers, using memory-mapped version of
1776 		 * the registers rather than the I/O space-mapped registers.
1777 		 */
1778 		if (hba->sli.sli3.csr_acc_handle == 0) {
1779 			status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1780 			    (caddr_t *)&hba->sli.sli3.csr_addr,
1781 			    0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1782 			if (status != DDI_SUCCESS) {
1783 				EMLXS_MSGF(EMLXS_CONTEXT,
1784 				    &emlxs_attach_failed_msg,
1785 				    "ddi_regs_map_setup CSR failed. status=%x",
1786 				    status);
1787 				goto failed;
1788 			}
1789 		}
1790 	}
1791 
1792 	if (hba->sli.sli3.slim2.virt == 0) {
1793 		MBUF_INFO	*buf_info;
1794 		MBUF_INFO	bufinfo;
1795 
1796 		buf_info = &bufinfo;
1797 
1798 		bzero(buf_info, sizeof (MBUF_INFO));
1799 		buf_info->size = SLI_SLIM2_SIZE;
1800 		buf_info->flags =
1801 		    FC_MBUF_DMA | FC_MBUF_SNGLSG;
1802 		buf_info->align = ddi_ptob(dip, 1L);
1803 
1804 		(void) emlxs_mem_alloc(hba, buf_info);
1805 
1806 		if (buf_info->virt == NULL) {
1807 			goto failed;
1808 		}
1809 
1810 		hba->sli.sli3.slim2.virt = buf_info->virt;
1811 		hba->sli.sli3.slim2.phys = buf_info->phys;
1812 		hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1813 		hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1814 		hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1815 		bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1816 	}
1817 
1818 	/* offset from beginning of register space */
1819 	hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1820 	    (sizeof (uint32_t) * HA_REG_OFFSET));
1821 	hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1822 	    (sizeof (uint32_t) * CA_REG_OFFSET));
1823 	hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1824 	    (sizeof (uint32_t) * HS_REG_OFFSET));
1825 	hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1826 	    (sizeof (uint32_t) * HC_REG_OFFSET));
1827 	hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1828 	    (sizeof (uint32_t) * BC_REG_OFFSET));
1829 
1830 	if (hba->bus_type == SBUS_FC) {
1831 		/* offset from beginning of register space */
1832 		/* for TITAN registers */
1833 		hba->sli.sli3.shc_reg_addr =
1834 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1835 		    (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1836 		hba->sli.sli3.shs_reg_addr =
1837 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1838 		    (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1839 		hba->sli.sli3.shu_reg_addr =
1840 		    (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1841 		    (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1842 	}
1843 	hba->chan_count = MAX_RINGS;
1844 
1845 	return (0);
1846 
1847 failed:
1848 
1849 	emlxs_sli3_unmap_hdw(hba);
1850 	return (ENOMEM);
1851 
1852 } /* emlxs_sli3_map_hdw() */
1853 
1854 
1855 static void
emlxs_sli3_unmap_hdw(emlxs_hba_t * hba)1856 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1857 {
1858 	MBUF_INFO	bufinfo;
1859 	MBUF_INFO	*buf_info = &bufinfo;
1860 
1861 	if (hba->sli.sli3.csr_acc_handle) {
1862 		ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1863 		hba->sli.sli3.csr_acc_handle = 0;
1864 	}
1865 
1866 	if (hba->sli.sli3.slim_acc_handle) {
1867 		ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1868 		hba->sli.sli3.slim_acc_handle = 0;
1869 	}
1870 
1871 	if (hba->sli.sli3.sbus_flash_acc_handle) {
1872 		ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1873 		hba->sli.sli3.sbus_flash_acc_handle = 0;
1874 	}
1875 
1876 	if (hba->sli.sli3.sbus_core_acc_handle) {
1877 		ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1878 		hba->sli.sli3.sbus_core_acc_handle = 0;
1879 	}
1880 
1881 	if (hba->sli.sli3.sbus_csr_handle) {
1882 		ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1883 		hba->sli.sli3.sbus_csr_handle = 0;
1884 	}
1885 
1886 	if (hba->sli.sli3.slim2.virt) {
1887 		bzero(buf_info, sizeof (MBUF_INFO));
1888 
1889 		if (hba->sli.sli3.slim2.phys) {
1890 			buf_info->phys = hba->sli.sli3.slim2.phys;
1891 			buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1892 			buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1893 			buf_info->flags = FC_MBUF_DMA;
1894 		}
1895 
1896 		buf_info->virt = hba->sli.sli3.slim2.virt;
1897 		buf_info->size = hba->sli.sli3.slim2.size;
1898 		emlxs_mem_free(hba, buf_info);
1899 
1900 		hba->sli.sli3.slim2.virt = NULL;
1901 	}
1902 
1903 
1904 	return;
1905 
1906 } /* emlxs_sli3_unmap_hdw() */
1907 
1908 
1909 static uint32_t
emlxs_sli3_hba_init(emlxs_hba_t * hba)1910 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1911 {
1912 	emlxs_port_t *port = &PPORT;
1913 	emlxs_port_t *vport;
1914 	emlxs_config_t *cfg;
1915 	uint16_t i;
1916 	VPIobj_t *vpip;
1917 
1918 	cfg = &CFG;
1919 	i = 0;
1920 
1921 	/* Restart the adapter */
1922 	if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1923 		return (1);
1924 	}
1925 
1926 	hba->channel_fcp = FC_FCP_RING;
1927 	hba->channel_els = FC_ELS_RING;
1928 	hba->channel_ip = FC_IP_RING;
1929 	hba->channel_ct = FC_CT_RING;
1930 	hba->chan_count = MAX_RINGS;
1931 	hba->sli.sli3.ring_count = MAX_RINGS;
1932 
1933 	/*
1934 	 * WARNING: There is a max of 6 ring masks allowed
1935 	 */
1936 	/* RING 0 - FCP */
1937 	if (port->flag & EMLXS_TGT_ENABLED) {
1938 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1939 		hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1940 		hba->sli.sli3.ring_rmask[i] = 0;
1941 		hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP;
1942 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1943 	} else {
1944 		hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1945 	}
1946 
1947 	hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1948 	hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1949 
1950 	/* RING 1 - IP */
1951 	if (cfg[CFG_NETWORK_ON].current) {
1952 		hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1953 		hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1954 		hba->sli.sli3.ring_rmask[i] = 0xFF;
1955 		hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */
1956 		hba->sli.sli3.ring_tmask[i++] = 0xFF;
1957 	} else {
1958 		hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1959 	}
1960 
1961 	hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1962 	hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1963 
1964 	/* RING 2 - ELS */
1965 	hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1966 	hba->sli.sli3.ring_rval[i] = FC_ELS_REQ;	/* ELS request/rsp */
1967 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1968 	hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS;	/* ELS */
1969 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1970 
1971 	hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1972 	hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1973 
1974 	/* RING 3 - CT */
1975 	hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1976 	hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL;	/* CT request/rsp */
1977 	hba->sli.sli3.ring_rmask[i] = 0xFE;
1978 	hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES;	/* CT */
1979 	hba->sli.sli3.ring_tmask[i++] = 0xFF;
1980 
1981 	hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1982 	hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1983 
1984 	if (i > 6) {
1985 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1986 		    "hba_init: Too many ring masks defined. cnt=%d", i);
1987 		return (1);
1988 	}
1989 
1990 	/* Initialize all the port objects */
1991 	hba->vpi_max = 0;
1992 	for (i = 0; i < MAX_VPORTS; i++) {
1993 		vport = &VPORT(i);
1994 		vport->hba = hba;
1995 		vport->vpi = i;
1996 
1997 		vpip = &vport->VPIobj;
1998 		vpip->index = i;
1999 		vpip->VPI = i;
2000 		vpip->port = vport;
2001 		vpip->state = VPI_STATE_OFFLINE;
2002 		vport->vpip = vpip;
2003 	}
2004 
2005 	/*
2006 	 * Initialize the max_node count to a default value if needed
2007 	 * This determines how many node objects we preallocate in the pool
2008 	 * The actual max_nodes will be set later based on adapter info
2009 	 */
2010 	if (hba->max_nodes == 0) {
2011 		if (cfg[CFG_NUM_NODES].current > 0) {
2012 			hba->max_nodes = cfg[CFG_NUM_NODES].current;
2013 		} else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2014 			hba->max_nodes = 4096;
2015 		} else {
2016 			hba->max_nodes = 512;
2017 		}
2018 	}
2019 
2020 	return (0);
2021 
2022 } /* emlxs_sli3_hba_init() */
2023 
2024 
2025 /*
2026  * 0: quiesce indicates the call is not from quiesce routine.
2027  * 1: quiesce indicates the call is from quiesce routine.
2028  */
2029 static uint32_t
emlxs_sli3_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2030 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2031     uint32_t quiesce)
2032 {
2033 	emlxs_port_t *port = &PPORT;
2034 	MAILBOX swpmb;
2035 	MAILBOX *mb;
2036 	uint32_t *word0;
2037 	uint16_t cfg_value;
2038 	uint32_t status = 0;
2039 	uint32_t status1;
2040 	uint32_t status2;
2041 	uint32_t i;
2042 	uint32_t ready;
2043 	emlxs_port_t *vport;
2044 	RING *rp;
2045 	emlxs_config_t *cfg = &CFG;
2046 
2047 	if (!cfg[CFG_RESET_ENABLE].current) {
2048 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2049 		    "Adapter reset disabled.");
2050 		EMLXS_STATE_CHANGE(hba, FC_ERROR);
2051 
2052 		return (1);
2053 	}
2054 
2055 	/* Kill the adapter first */
2056 	if (quiesce == 0) {
2057 		emlxs_sli3_hba_kill(hba);
2058 	} else {
2059 		emlxs_sli3_hba_kill4quiesce(hba);
2060 	}
2061 
2062 	if (restart) {
2063 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2064 		    "Restarting.");
2065 		EMLXS_STATE_CHANGE(hba, FC_INIT_START);
2066 
2067 		ready = (HS_FFRDY | HS_MBRDY);
2068 	} else {
2069 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2070 		    "Resetting.");
2071 		EMLXS_STATE_CHANGE(hba, FC_WARM_START);
2072 
2073 		ready = HS_MBRDY;
2074 	}
2075 
2076 	hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
2077 
2078 	mb = FC_SLIM1_MAILBOX(hba);
2079 	word0 = (uint32_t *)&swpmb;
2080 
2081 reset:
2082 
2083 	i = 0;
2084 
2085 	/* Save reset time */
2086 	HBASTATS.ResetTime = hba->timer_tics;
2087 
2088 	if (restart) {
2089 		/* First put restart command in mailbox */
2090 		*word0 = 0;
2091 		swpmb.mbxCommand = MBX_RESTART;
2092 		swpmb.mbxHc = 1;
2093 		WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), *word0);
2094 
2095 		/* Only skip post after emlxs_sli3_online is completed */
2096 		if (skip_post) {
2097 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2098 			    1);
2099 		} else {
2100 			WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2101 			    0);
2102 		}
2103 
2104 	}
2105 
2106 	/*
2107 	 * Turn off SERR, PERR in PCI cmd register
2108 	 */
2109 	cfg_value = ddi_get16(hba->pci_acc_handle,
2110 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2111 
2112 	ddi_put16(hba->pci_acc_handle,
2113 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2114 	    (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2115 
2116 	hba->sli.sli3.hc_copy = HC_INITFF;
2117 	WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2118 
2119 	/* Wait 1 msec before restoring PCI config */
2120 	BUSYWAIT_MS(1);
2121 
2122 	/* Restore PCI cmd register */
2123 	ddi_put16(hba->pci_acc_handle,
2124 	    (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2125 	    (uint16_t)cfg_value);
2126 
2127 	/* Wait 3 seconds before checking */
2128 	BUSYWAIT_MS(3000);
2129 	i += 3;
2130 
2131 	/* Wait for reset completion */
2132 	while (i < 30) {
2133 		/* Check status register to see what current state is */
2134 		status = READ_CSR_REG(hba, FC_HS_REG(hba));
2135 
2136 		/* Check to see if any errors occurred during init */
2137 		if (status & HS_FFERM) {
2138 			status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2139 			    hba->sli.sli3.slim_addr + 0xa8));
2140 			status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2141 			    hba->sli.sli3.slim_addr + 0xac));
2142 
2143 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2144 			    "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2145 			    status, status1, status2);
2146 
2147 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
2148 			return (1);
2149 		}
2150 
2151 		if ((status & ready) == ready) {
2152 			/* Reset Done !! */
2153 			goto done;
2154 		}
2155 
2156 		/*
2157 		 * Check every 1 second for 15 seconds, then reset board
2158 		 * again (w/post), then check every 1 second for 15 * seconds.
2159 		 */
2160 		BUSYWAIT_MS(1000);
2161 		i++;
2162 
2163 		/* Reset again (w/post) at 15 seconds */
2164 		if (i == 15) {
2165 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2166 			    "Reset failed. Retrying...");
2167 
2168 			goto reset;
2169 		}
2170 	}
2171 
2172 #ifdef FMA_SUPPORT
2173 reset_fail:
2174 #endif  /* FMA_SUPPORT */
2175 
2176 	/* Timeout occurred */
2177 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2178 	    "Timeout: status=0x%x", status);
2179 	EMLXS_STATE_CHANGE(hba, FC_ERROR);
2180 
2181 	/* Log a dump event */
2182 	emlxs_log_dump_event(port, NULL, 0);
2183 
2184 	return (1);
2185 
2186 done:
2187 
2188 	/* Initialize hc_copy */
2189 	hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2190 
2191 #ifdef FMA_SUPPORT
2192 	/* Access handle validation */
2193 	if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2194 	    != DDI_FM_OK) ||
2195 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2196 	    != DDI_FM_OK) ||
2197 	    (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2198 	    != DDI_FM_OK)) {
2199 		EMLXS_MSGF(EMLXS_CONTEXT,
2200 		    &emlxs_invalid_access_handle_msg, NULL);
2201 		goto reset_fail;
2202 	}
2203 #endif  /* FMA_SUPPORT */
2204 
2205 	/* Reset the hba structure */
2206 	hba->flag &= FC_RESET_MASK;
2207 	hba->channel_tx_count = 0;
2208 	hba->io_count = 0;
2209 	hba->iodone_count = 0;
2210 	hba->topology = 0;
2211 	hba->linkspeed = 0;
2212 	hba->heartbeat_active = 0;
2213 	hba->discovery_timer = 0;
2214 	hba->linkup_timer = 0;
2215 	hba->loopback_tics = 0;
2216 
2217 	/* Reset the ring objects */
2218 	for (i = 0; i < MAX_RINGS; i++) {
2219 		rp = &hba->sli.sli3.ring[i];
2220 		rp->fc_mpon = 0;
2221 		rp->fc_mpoff = 0;
2222 	}
2223 
2224 	/* Reset the port objects */
2225 	for (i = 0; i < MAX_VPORTS; i++) {
2226 		vport = &VPORT(i);
2227 
2228 		vport->flag &= EMLXS_PORT_RESET_MASK;
2229 		vport->did = 0;
2230 		vport->prev_did = 0;
2231 		vport->lip_type = 0;
2232 		bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2233 		bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2234 
2235 		bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2236 		vport->node_base.nlp_Rpi = 0;
2237 		vport->node_base.nlp_DID = 0xffffff;
2238 		vport->node_base.nlp_list_next = NULL;
2239 		vport->node_base.nlp_list_prev = NULL;
2240 		vport->node_base.nlp_active = 1;
2241 		vport->node_count = 0;
2242 
2243 		if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2244 			vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2245 		}
2246 	}
2247 
2248 	return (0);
2249 
2250 } /* emlxs_sli3_hba_reset */
2251 
2252 
2253 #define	BPL_CMD		0
2254 #define	BPL_RESP	1
2255 #define	BPL_DATA	2
2256 
2257 static ULP_BDE64 *
emlxs_pkt_to_bpl(fc_packet_t * pkt,ULP_BDE64 * bpl,uint32_t bpl_type)2258 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type)
2259 {
2260 	ddi_dma_cookie_t *cp;
2261 	uint_t	i;
2262 	int32_t	size;
2263 	uint_t	cookie_cnt;
2264 	uint8_t bdeFlags;
2265 
2266 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2267 	switch (bpl_type) {
2268 	case BPL_CMD:
2269 		cp = pkt->pkt_cmd_cookie;
2270 		cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2271 		size = (int32_t)pkt->pkt_cmdlen;
2272 		bdeFlags = 0;
2273 		break;
2274 
2275 	case BPL_RESP:
2276 		cp = pkt->pkt_resp_cookie;
2277 		cookie_cnt = pkt->pkt_resp_cookie_cnt;
2278 		size = (int32_t)pkt->pkt_rsplen;
2279 		bdeFlags = BUFF_USE_RCV;
2280 		break;
2281 
2282 
2283 	case BPL_DATA:
2284 		cp = pkt->pkt_data_cookie;
2285 		cookie_cnt = pkt->pkt_data_cookie_cnt;
2286 		size = (int32_t)pkt->pkt_datalen;
2287 		bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2288 		    BUFF_USE_RCV : 0;
2289 		break;
2290 
2291 	default:
2292 		return (NULL);
2293 	}
2294 
2295 #else
2296 	switch (bpl_type) {
2297 	case BPL_CMD:
2298 		cp = &pkt->pkt_cmd_cookie;
2299 		cookie_cnt = 1;
2300 		size = (int32_t)pkt->pkt_cmdlen;
2301 		bdeFlags = 0;
2302 		break;
2303 
2304 	case BPL_RESP:
2305 		cp = &pkt->pkt_resp_cookie;
2306 		cookie_cnt = 1;
2307 		size = (int32_t)pkt->pkt_rsplen;
2308 		bdeFlags = BUFF_USE_RCV;
2309 		break;
2310 
2311 
2312 	case BPL_DATA:
2313 		cp = &pkt->pkt_data_cookie;
2314 		cookie_cnt = 1;
2315 		size = (int32_t)pkt->pkt_datalen;
2316 		bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2317 		    BUFF_USE_RCV : 0;
2318 		break;
2319 
2320 	default:
2321 		return (NULL);
2322 	}
2323 #endif	/* >= EMLXS_MODREV3 */
2324 
2325 	for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2326 		bpl->addrHigh =
2327 		    BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2328 		bpl->addrLow =
2329 		    BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2330 		bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2331 		bpl->tus.f.bdeFlags = bdeFlags;
2332 		bpl->tus.w = BE_SWAP32(bpl->tus.w);
2333 
2334 		bpl++;
2335 		size -= cp->dmac_size;
2336 	}
2337 
2338 	return (bpl);
2339 
2340 } /* emlxs_pkt_to_bpl */
2341 
2342 
2343 static uint32_t
emlxs_sli2_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2344 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2345 {
2346 	emlxs_hba_t	*hba = HBA;
2347 	fc_packet_t	*pkt;
2348 	MATCHMAP	*bmp;
2349 	ULP_BDE64	*bpl;
2350 	uint64_t	bp;
2351 	IOCB		*iocb;
2352 	IOCBQ		*iocbq;
2353 	CHANNEL	*cp;
2354 	uint32_t	data_cookie_cnt;
2355 	uint32_t	channelno;
2356 
2357 	cp = sbp->channel;
2358 	iocb = (IOCB *) & sbp->iocbq;
2359 	pkt = PRIV2PKT(sbp);
2360 
2361 	if (hba->sli.sli3.bpl_table) {
2362 		bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2363 	} else {
2364 		bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2365 	}
2366 
2367 	if (!bmp) {
2368 		return (1);
2369 	}
2370 
2371 	sbp->bmp = bmp;
2372 	bpl = (ULP_BDE64 *)bmp->virt;
2373 	bp = bmp->phys;
2374 
2375 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2376 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2377 #else
2378 	data_cookie_cnt = 1;
2379 #endif	/* >= EMLXS_MODREV3 */
2380 
2381 	iocbq = &sbp->iocbq;
2382 
2383 	channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2384 	switch (channelno) {
2385 		case FC_FCP_RING:
2386 
2387 		/* CMD payload */
2388 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2389 		if (! bpl) {
2390 			return (1);
2391 		}
2392 
2393 		/* Check if response & data payloads are needed */
2394 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2395 			break;
2396 		}
2397 
2398 		/* RSP payload */
2399 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2400 		if (! bpl) {
2401 			return (1);
2402 		}
2403 
2404 		/* Check if data payload is needed */
2405 		if ((pkt->pkt_datalen == 0) ||
2406 		    (data_cookie_cnt == 0)) {
2407 			break;
2408 		}
2409 
2410 		/* DATA payload */
2411 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA);
2412 		if (! bpl) {
2413 			return (1);
2414 		}
2415 		break;
2416 
2417 	case FC_IP_RING:
2418 
2419 		/* CMD payload */
2420 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2421 		if (! bpl) {
2422 			return (1);
2423 		}
2424 		break;
2425 
2426 	case FC_ELS_RING:
2427 
2428 		/* CMD payload */
2429 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2430 		if (! bpl) {
2431 			return (1);
2432 		}
2433 
2434 		/* Check if response payload is needed */
2435 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2436 			break;
2437 		}
2438 
2439 		/* RSP payload */
2440 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2441 		if (! bpl) {
2442 			return (1);
2443 		}
2444 		break;
2445 
2446 	case FC_CT_RING:
2447 
2448 		/* CMD payload */
2449 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2450 		if (! bpl) {
2451 			return (1);
2452 		}
2453 
2454 		/* Check if response payload is needed */
2455 		if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2456 		    (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2457 			break;
2458 		}
2459 
2460 		/* RSP payload */
2461 		bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2462 		if (! bpl) {
2463 			return (1);
2464 		}
2465 		break;
2466 
2467 	}
2468 
2469 	iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2470 	iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2471 	iocb->un.genreq64.bdl.addrLow  = PADDR_LO(bp);
2472 	iocb->un.genreq64.bdl.bdeSize  =
2473 	    (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF);
2474 	iocb->ULPBDECOUNT = 1;
2475 	iocb->ULPLE = 1;
2476 
2477 	return (0);
2478 
2479 } /* emlxs_sli2_bde_setup */
2480 
2481 
2482 static uint32_t
emlxs_sli3_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2483 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2484 {
2485 	ddi_dma_cookie_t *cp_cmd;
2486 	ddi_dma_cookie_t *cp_resp;
2487 	ddi_dma_cookie_t *cp_data;
2488 	fc_packet_t	*pkt;
2489 	ULP_BDE64	*bde;
2490 	int		data_cookie_cnt;
2491 	uint32_t	i;
2492 	uint32_t	channelno;
2493 	IOCB		*iocb;
2494 	IOCBQ		*iocbq;
2495 	CHANNEL		*cp;
2496 
2497 	pkt = PRIV2PKT(sbp);
2498 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2499 	if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2500 	    (pkt->pkt_resp_cookie_cnt > 1) ||
2501 	    ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2502 	    pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2503 		i = emlxs_sli2_bde_setup(port, sbp);
2504 		return (i);
2505 	}
2506 
2507 	cp_cmd = pkt->pkt_cmd_cookie;
2508 	cp_resp = pkt->pkt_resp_cookie;
2509 	cp_data = pkt->pkt_data_cookie;
2510 	data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2511 #else
2512 	cp_cmd  = &pkt->pkt_cmd_cookie;
2513 	cp_resp = &pkt->pkt_resp_cookie;
2514 	cp_data = &pkt->pkt_data_cookie;
2515 	data_cookie_cnt = 1;
2516 #endif	/* >= EMLXS_MODREV3 */
2517 
2518 	cp = sbp->channel;
2519 	iocbq = &sbp->iocbq;
2520 	iocb = (IOCB *)iocbq;
2521 	iocb->unsli3.ext_iocb.ebde_count = 0;
2522 
2523 	channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2524 	switch (channelno) {
2525 	case FC_FCP_RING:
2526 		/* CMD payload */
2527 		iocb->un.fcpi64.bdl.addrHigh =
2528 		    PADDR_HI(cp_cmd->dmac_laddress);
2529 		iocb->un.fcpi64.bdl.addrLow =
2530 		    PADDR_LO(cp_cmd->dmac_laddress);
2531 		iocb->un.fcpi64.bdl.bdeSize  = pkt->pkt_cmdlen;
2532 		iocb->un.fcpi64.bdl.bdeFlags = 0;
2533 
2534 		/* Check if a response & data payload are needed */
2535 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2536 			break;
2537 		}
2538 
2539 		/* RSP payload */
2540 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2541 		    PADDR_HI(cp_resp->dmac_laddress);
2542 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2543 		    PADDR_LO(cp_resp->dmac_laddress);
2544 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2545 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2546 		iocb->unsli3.ext_iocb.ebde_count = 1;
2547 
2548 		/* Check if a data payload is needed */
2549 		if ((pkt->pkt_datalen == 0) ||
2550 		    (data_cookie_cnt == 0)) {
2551 			break;
2552 		}
2553 
2554 		/* DATA payload */
2555 		bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
2556 		for (i = 0; i < data_cookie_cnt; i++) {
2557 			bde->addrHigh = PADDR_HI(cp_data->dmac_laddress);
2558 			bde->addrLow = PADDR_LO(cp_data->dmac_laddress);
2559 			bde->tus.f.bdeSize = cp_data->dmac_size;
2560 			bde->tus.f.bdeFlags = 0;
2561 			cp_data++;
2562 			bde++;
2563 		}
2564 		iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt;
2565 
2566 		break;
2567 
2568 	case FC_IP_RING:
2569 		/* CMD payload */
2570 		iocb->un.xseq64.bdl.addrHigh =
2571 		    PADDR_HI(cp_cmd->dmac_laddress);
2572 		iocb->un.xseq64.bdl.addrLow =
2573 		    PADDR_LO(cp_cmd->dmac_laddress);
2574 		iocb->un.xseq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2575 		iocb->un.xseq64.bdl.bdeFlags = 0;
2576 
2577 		break;
2578 
2579 	case FC_ELS_RING:
2580 
2581 		/* CMD payload */
2582 		iocb->un.elsreq64.bdl.addrHigh =
2583 		    PADDR_HI(cp_cmd->dmac_laddress);
2584 		iocb->un.elsreq64.bdl.addrLow =
2585 		    PADDR_LO(cp_cmd->dmac_laddress);
2586 		iocb->un.elsreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2587 		iocb->un.elsreq64.bdl.bdeFlags = 0;
2588 
2589 		/* Check if a response payload is needed */
2590 		if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2591 			break;
2592 		}
2593 
2594 		/* RSP payload */
2595 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2596 		    PADDR_HI(cp_resp->dmac_laddress);
2597 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2598 		    PADDR_LO(cp_resp->dmac_laddress);
2599 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2600 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2601 		iocb->unsli3.ext_iocb.ebde_count = 1;
2602 		break;
2603 
2604 	case FC_CT_RING:
2605 
2606 		/* CMD payload */
2607 		iocb->un.genreq64.bdl.addrHigh =
2608 		    PADDR_HI(cp_cmd->dmac_laddress);
2609 		iocb->un.genreq64.bdl.addrLow =
2610 		    PADDR_LO(cp_cmd->dmac_laddress);
2611 		iocb->un.genreq64.bdl.bdeSize  = pkt->pkt_cmdlen;
2612 		iocb->un.genreq64.bdl.bdeFlags = 0;
2613 
2614 		/* Check if a response payload is needed */
2615 		if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2616 		    (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2617 			break;
2618 		}
2619 
2620 		/* RSP payload */
2621 		iocb->unsli3.ext_iocb.ebde1.addrHigh =
2622 		    PADDR_HI(cp_resp->dmac_laddress);
2623 		iocb->unsli3.ext_iocb.ebde1.addrLow =
2624 		    PADDR_LO(cp_resp->dmac_laddress);
2625 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2626 		iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2627 		iocb->unsli3.ext_iocb.ebde_count = 1;
2628 		break;
2629 	}
2630 
2631 	iocb->ULPBDECOUNT = 0;
2632 	iocb->ULPLE = 0;
2633 
2634 	return (0);
2635 
2636 } /* emlxs_sli3_bde_setup */
2637 
2638 
2639 /* Only used for FCP Data xfers */
2640 #ifdef SFCT_SUPPORT
2641 /*ARGSUSED*/
2642 static uint32_t
emlxs_sli2_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2643 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2644 {
2645 	emlxs_hba_t *hba = HBA;
2646 	scsi_task_t *fct_task;
2647 	MATCHMAP *bmp;
2648 	ULP_BDE64 *bpl;
2649 	uint64_t bp;
2650 	uint8_t bdeFlags;
2651 	IOCB *iocb;
2652 	uint32_t size;
2653 	MATCHMAP *mp;
2654 
2655 	iocb = (IOCB *)&sbp->iocbq.iocb;
2656 	sbp->bmp = NULL;
2657 
2658 	if (!sbp->fct_buf) {
2659 		iocb->un.fcpt64.bdl.addrHigh = 0;
2660 		iocb->un.fcpt64.bdl.addrLow = 0;
2661 		iocb->un.fcpt64.bdl.bdeSize = 0;
2662 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2663 		iocb->un.fcpt64.fcpt_Offset = 0;
2664 		iocb->un.fcpt64.fcpt_Length = 0;
2665 		iocb->ULPBDECOUNT = 0;
2666 		iocb->ULPLE = 1;
2667 		return (0);
2668 	}
2669 
2670 	if (hba->sli.sli3.bpl_table) {
2671 		bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2672 	} else {
2673 		bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2674 	}
2675 
2676 	if (!bmp) {
2677 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2678 		    "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d",
2679 		    sbp->iotag);
2680 
2681 		iocb->un.fcpt64.bdl.addrHigh = 0;
2682 		iocb->un.fcpt64.bdl.addrLow = 0;
2683 		iocb->un.fcpt64.bdl.bdeSize = 0;
2684 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2685 		iocb->un.fcpt64.fcpt_Offset = 0;
2686 		iocb->un.fcpt64.fcpt_Length = 0;
2687 		iocb->ULPBDECOUNT = 0;
2688 		iocb->ULPLE = 1;
2689 		return (1);
2690 	}
2691 
2692 	bpl = (ULP_BDE64 *)bmp->virt;
2693 	bp = bmp->phys;
2694 
2695 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2696 
2697 	size = sbp->fct_buf->db_data_size;
2698 	mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2699 
2700 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2701 
2702 	/* Init the buffer list */
2703 	bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys));
2704 	bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys));
2705 	bpl->tus.f.bdeSize = size;
2706 	bpl->tus.f.bdeFlags = bdeFlags;
2707 	bpl->tus.w = BE_SWAP32(bpl->tus.w);
2708 
2709 	/* Init the IOCB */
2710 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2711 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2712 	iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64);
2713 	iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2714 
2715 	iocb->un.fcpt64.fcpt_Length =
2716 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2717 	iocb->un.fcpt64.fcpt_Offset = 0;
2718 
2719 	iocb->ULPBDECOUNT = 1;
2720 	iocb->ULPLE = 1;
2721 	sbp->bmp = bmp;
2722 
2723 	return (0);
2724 
2725 } /* emlxs_sli2_fct_bde_setup */
2726 #endif /* SFCT_SUPPORT */
2727 
2728 
2729 #ifdef SFCT_SUPPORT
2730 /*ARGSUSED*/
2731 static uint32_t
emlxs_sli3_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2732 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2733 {
2734 	scsi_task_t *fct_task;
2735 	IOCB *iocb;
2736 	MATCHMAP *mp;
2737 	uint32_t bdeFlags;
2738 	uint32_t size;
2739 
2740 	iocb = (IOCB *)&sbp->iocbq;
2741 
2742 	if (!sbp->fct_buf) {
2743 		iocb->un.fcpt64.bdl.addrHigh = 0;
2744 		iocb->un.fcpt64.bdl.addrLow = 0;
2745 		iocb->un.fcpt64.bdl.bdeSize = 0;
2746 		iocb->un.fcpt64.bdl.bdeFlags = 0;
2747 		iocb->un.fcpt64.fcpt_Offset = 0;
2748 		iocb->un.fcpt64.fcpt_Length = 0;
2749 		iocb->ULPBDECOUNT = 0;
2750 		iocb->ULPLE = 0;
2751 		iocb->unsli3.ext_iocb.ebde_count = 0;
2752 		return (0);
2753 	}
2754 
2755 	fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2756 
2757 	size = sbp->fct_buf->db_data_size;
2758 	mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2759 
2760 	bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2761 
2762 	/* Init first BDE */
2763 	iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys);
2764 	iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys);
2765 	iocb->un.fcpt64.bdl.bdeSize = size;
2766 	iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2767 
2768 	iocb->unsli3.ext_iocb.ebde_count = 0;
2769 	iocb->un.fcpt64.fcpt_Length =
2770 	    (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2771 	iocb->un.fcpt64.fcpt_Offset = 0;
2772 
2773 	iocb->ULPBDECOUNT = 0;
2774 	iocb->ULPLE = 0;
2775 
2776 	return (0);
2777 
2778 } /* emlxs_sli3_fct_bde_setup */
2779 #endif /* SFCT_SUPPORT */
2780 
2781 
2782 static void
emlxs_sli3_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)2783 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2784 {
2785 #ifdef FMA_SUPPORT
2786 	emlxs_port_t *port = &PPORT;
2787 #endif	/* FMA_SUPPORT */
2788 	PGP *pgp;
2789 	emlxs_buf_t *sbp;
2790 	SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2791 	RING *rp;
2792 	uint32_t nextIdx;
2793 	uint32_t status;
2794 	void *ioa2;
2795 	off_t offset;
2796 	uint32_t count = 0;
2797 	uint32_t flag;
2798 	uint32_t channelno;
2799 	int32_t throttle;
2800 #ifdef NODE_THROTTLE_SUPPORT
2801 	int32_t node_throttle;
2802 	NODELIST *marked_node = NULL;
2803 #endif /* NODE_THROTTLE_SUPPORT */
2804 
2805 	channelno = cp->channelno;
2806 	rp = (RING *)cp->iopath;
2807 
2808 	throttle = 0;
2809 
2810 	/* Check if FCP ring and adapter is not ready */
2811 	/* We may use any ring for FCP_CMD */
2812 	if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2813 		if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2814 		    (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2815 			emlxs_tx_put(iocbq, 1);
2816 			return;
2817 		}
2818 	}
2819 
2820 	/* Attempt to acquire CMD_RING lock */
2821 	if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2822 		/* Queue it for later */
2823 		if (iocbq) {
2824 			if ((hba->io_count -
2825 			    hba->channel_tx_count) > 10) {
2826 				emlxs_tx_put(iocbq, 1);
2827 				return;
2828 			} else {
2829 
2830 				/*
2831 				 * EMLXS_MSGF(EMLXS_CONTEXT,
2832 				 * &emlxs_ring_watchdog_msg,
2833 				 * "%s host=%d port=%d cnt=%d,%d  RACE
2834 				 * CONDITION3 DETECTED.",
2835 				 * emlxs_ring_xlate(channelno),
2836 				 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2837 				 * hba->channel_tx_count,
2838 				 * hba->io_count);
2839 				 */
2840 				mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2841 			}
2842 		} else {
2843 			return;
2844 		}
2845 	}
2846 	/* CMD_RING_LOCK acquired */
2847 
2848 	/* Throttle check only applies to non special iocb */
2849 	if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2850 		/* Check if HBA is full */
2851 		throttle = hba->io_throttle - hba->io_active;
2852 		if (throttle <= 0) {
2853 			/* Hitting adapter throttle limit */
2854 			/* Queue it for later */
2855 			if (iocbq) {
2856 				emlxs_tx_put(iocbq, 1);
2857 			}
2858 
2859 			goto busy;
2860 		}
2861 	}
2862 
2863 	/* Read adapter's get index */
2864 	pgp = (PGP *)
2865 	    &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2866 	offset =
2867 	    (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2868 	    (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2869 	EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2870 	    DDI_DMA_SYNC_FORKERNEL);
2871 	rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2872 
2873 	/* Calculate the next put index */
2874 	nextIdx =
2875 	    (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2876 
2877 	/* Check if ring is full */
2878 	if (nextIdx == rp->fc_port_cmdidx) {
2879 		/* Try one more time */
2880 		EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2881 		    DDI_DMA_SYNC_FORKERNEL);
2882 		rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2883 
2884 		if (nextIdx == rp->fc_port_cmdidx) {
2885 			/* Queue it for later */
2886 			if (iocbq) {
2887 				emlxs_tx_put(iocbq, 1);
2888 			}
2889 
2890 			goto busy;
2891 		}
2892 	}
2893 
2894 	/*
2895 	 * We have a command ring slot available
2896 	 * Make sure we have an iocb to send
2897 	 */
2898 	if (iocbq) {
2899 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2900 
2901 		/* Check if the ring already has iocb's waiting */
2902 		if (cp->nodeq.q_first != NULL) {
2903 			/* Put the current iocbq on the tx queue */
2904 			emlxs_tx_put(iocbq, 0);
2905 
2906 			/*
2907 			 * Attempt to replace it with the next iocbq
2908 			 * in the tx queue
2909 			 */
2910 			iocbq = emlxs_tx_get(cp, 0);
2911 		}
2912 
2913 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2914 	} else {
2915 		/* Try to get the next iocb on the tx queue */
2916 		iocbq = emlxs_tx_get(cp, 1);
2917 	}
2918 
2919 sendit:
2920 	count = 0;
2921 
2922 	/* Process each iocbq */
2923 	while (iocbq) {
2924 		sbp = iocbq->sbp;
2925 
2926 #ifdef NODE_THROTTLE_SUPPORT
2927 		if (sbp && sbp->node && sbp->node->io_throttle) {
2928 			node_throttle = sbp->node->io_throttle -
2929 			    sbp->node->io_active;
2930 			if (node_throttle <= 0) {
2931 				/* Node is busy */
2932 				/* Queue this iocb and get next iocb from */
2933 				/* channel */
2934 
2935 				if (!marked_node) {
2936 					marked_node = sbp->node;
2937 				}
2938 
2939 				mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2940 				emlxs_tx_put(iocbq, 0);
2941 
2942 				if (cp->nodeq.q_first == marked_node) {
2943 					mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2944 					goto busy;
2945 				}
2946 
2947 				iocbq = emlxs_tx_get(cp, 0);
2948 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2949 				continue;
2950 			}
2951 		}
2952 		marked_node = 0;
2953 #endif /* NODE_THROTTLE_SUPPORT */
2954 
2955 		if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2956 			/*
2957 			 * Update adapter if needed, since we are about to
2958 			 * delay here
2959 			 */
2960 			if (count) {
2961 				count = 0;
2962 
2963 				/* Update the adapter's cmd put index */
2964 				if (hba->bus_type == SBUS_FC) {
2965 					slim2p->mbx.us.s2.host[channelno].
2966 					    cmdPutInx =
2967 					    BE_SWAP32(rp->fc_cmdidx);
2968 
2969 					/* DMA sync the index for the adapter */
2970 					offset = (off_t)
2971 					    ((uint64_t)
2972 					    ((unsigned long)&(slim2p->mbx.us.
2973 					    s2.host[channelno].cmdPutInx)) -
2974 					    (uint64_t)((unsigned long)slim2p));
2975 					EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2976 					    dma_handle, offset, 4,
2977 					    DDI_DMA_SYNC_FORDEV);
2978 				} else {
2979 					ioa2 = (void *)
2980 					    ((char *)hba->sli.sli3.slim_addr +
2981 					    hba->sli.sli3.hgp_ring_offset +
2982 					    ((channelno * 2) *
2983 					    sizeof (uint32_t)));
2984 					WRITE_SLIM_ADDR(hba,
2985 					    (volatile uint32_t *)ioa2,
2986 					    rp->fc_cmdidx);
2987 				}
2988 
2989 				status = (CA_R0ATT << (channelno * 4));
2990 				WRITE_CSR_REG(hba, FC_CA_REG(hba),
2991 				    (volatile uint32_t)status);
2992 
2993 			}
2994 			/* Perform delay */
2995 			if ((channelno == FC_ELS_RING) &&
2996 			    !(iocbq->flag & IOCB_FCP_CMD)) {
2997 				drv_usecwait(100000);
2998 			} else {
2999 				drv_usecwait(20000);
3000 			}
3001 		}
3002 
3003 		/*
3004 		 * At this point, we have a command ring slot available
3005 		 * and an iocb to send
3006 		 */
3007 		flag =  iocbq->flag;
3008 
3009 		/* Send the iocb */
3010 		emlxs_sli3_issue_iocb(hba, rp, iocbq);
3011 		/*
3012 		 * After this, the sbp / iocb should not be
3013 		 * accessed in the xmit path.
3014 		 */
3015 
3016 		count++;
3017 		if (iocbq && (!(flag & IOCB_SPECIAL))) {
3018 			/* Check if HBA is full */
3019 			throttle = hba->io_throttle - hba->io_active;
3020 			if (throttle <= 0) {
3021 				goto busy;
3022 			}
3023 		}
3024 
3025 		/* Calculate the next put index */
3026 		nextIdx =
3027 		    (rp->fc_cmdidx + 1 >=
3028 		    rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
3029 
3030 		/* Check if ring is full */
3031 		if (nextIdx == rp->fc_port_cmdidx) {
3032 			/* Try one more time */
3033 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3034 			    offset, 4, DDI_DMA_SYNC_FORKERNEL);
3035 			rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
3036 
3037 			if (nextIdx == rp->fc_port_cmdidx) {
3038 				goto busy;
3039 			}
3040 		}
3041 
3042 		/* Get the next iocb from the tx queue if there is one */
3043 		iocbq = emlxs_tx_get(cp, 1);
3044 	}
3045 
3046 	if (count) {
3047 		/* Update the adapter's cmd put index */
3048 		if (hba->bus_type == SBUS_FC) {
3049 			slim2p->mbx.us.s2.host[channelno].
3050 			    cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
3051 
3052 			/* DMA sync the index for the adapter */
3053 			offset = (off_t)
3054 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3055 			    host[channelno].cmdPutInx)) -
3056 			    (uint64_t)((unsigned long)slim2p));
3057 			EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3058 			    offset, 4, DDI_DMA_SYNC_FORDEV);
3059 		} else {
3060 			ioa2 =
3061 			    (void *)((char *)hba->sli.sli3.slim_addr +
3062 			    hba->sli.sli3.hgp_ring_offset +
3063 			    ((channelno * 2) * sizeof (uint32_t)));
3064 			WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3065 			    rp->fc_cmdidx);
3066 		}
3067 
3068 		status = (CA_R0ATT << (channelno * 4));
3069 		WRITE_CSR_REG(hba, FC_CA_REG(hba),
3070 		    (volatile uint32_t)status);
3071 
3072 		/* Check tx queue one more time before releasing */
3073 		if ((iocbq = emlxs_tx_get(cp, 1))) {
3074 			/*
3075 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
3076 			 * "%s host=%d port=%d   RACE CONDITION1
3077 			 * DETECTED.", emlxs_ring_xlate(channelno),
3078 			 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3079 			 */
3080 			goto sendit;
3081 		}
3082 	}
3083 
3084 #ifdef FMA_SUPPORT
3085 	/* Access handle validation */
3086 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3087 	EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3088 #endif  /* FMA_SUPPORT */
3089 
3090 	mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3091 
3092 	return;
3093 
3094 busy:
3095 
3096 	/*
3097 	 * Set ring to SET R0CE_REQ in Chip Att register.
3098 	 * Chip will tell us when an entry is freed.
3099 	 */
3100 	if (count) {
3101 		/* Update the adapter's cmd put index */
3102 		if (hba->bus_type == SBUS_FC) {
3103 			slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3104 			    BE_SWAP32(rp->fc_cmdidx);
3105 
3106 			/* DMA sync the index for the adapter */
3107 			offset = (off_t)
3108 			    ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3109 			    host[