1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 * Copyright 2020 RackTop Systems, Inc.
27 */
28
29 #include <emlxs.h>
30
31
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_SLI4_C);
34
35 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
36 MAILBOXQ *mbq);
37 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
38
39 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
40
41 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
42
43 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
44
45 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys,
46 boolean_t high);
47
48 static void emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid,
49 uint_t posted, uint_t index);
50
51 static void emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid,
52 uint_t count);
53
54 static void emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid,
55 uint_t count);
56
57 static void emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid,
58 uint32_t count, boolean_t arm);
59 static void emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid,
60 uint32_t count, boolean_t arm);
61
62 static int emlxs_sli4_create_queues(emlxs_hba_t *hba,
63 MAILBOXQ *mbq);
64 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
65 MAILBOXQ *mbq);
66 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
67 MAILBOXQ *mbq);
68
69 static int emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
70
71 static int emlxs_sli4_map_hdw(emlxs_hba_t *hba);
72
73 static void emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
74
75 static int32_t emlxs_sli4_online(emlxs_hba_t *hba);
76
77 static void emlxs_sli4_offline(emlxs_hba_t *hba,
78 uint32_t reset_requested);
79
80 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
81 uint32_t skip_post, uint32_t quiesce);
82 static void emlxs_sli4_hba_kill(emlxs_hba_t *hba);
83
84 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t *hba);
85
86 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t *port,
87 emlxs_buf_t *sbp);
88
89 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
90 CHANNEL *cp, IOCBQ *iocb_cmd);
91 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
92 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
93 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
94 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
95 #ifdef SFCT_SUPPORT
96 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
97 emlxs_buf_t *cmd_sbp, int channel);
98 static uint32_t emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
99 emlxs_buf_t *sbp);
100 #endif /* SFCT_SUPPORT */
101
102 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
103 emlxs_buf_t *sbp, int ring);
104 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
105 emlxs_buf_t *sbp);
106 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
107 emlxs_buf_t *sbp);
108 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
109 emlxs_buf_t *sbp);
110 static void emlxs_sli4_poll_intr(emlxs_hba_t *hba);
111 static int32_t emlxs_sli4_intx_intr(char *arg);
112
113 #ifdef MSI_SUPPORT
114 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
115 #endif /* MSI_SUPPORT */
116
117 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
118
119 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
120 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
121
122 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
123 emlxs_buf_t *sbp, RPIobj_t *rpip,
124 uint32_t type);
125 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
126
127 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
128
129 static void emlxs_sli4_timer(emlxs_hba_t *hba);
130
131 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
132
133 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
134
135 extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
136 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
137 static int emlxs_check_hdw_ready(emlxs_hba_t *);
138
139 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
140 uint32_t did, SERV_PARM *param,
141 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
142 IOCBQ *iocbq);
143
144 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
145 emlxs_node_t *node, emlxs_buf_t *sbp,
146 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
147
148 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
149 CQE_ASYNC_t *cqe);
150 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
151 CQE_ASYNC_t *cqe);
152
153
154 static uint16_t emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
155 uint16_t rqid);
156 static uint16_t emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
157 uint16_t wqid);
158 static uint16_t emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
159 uint16_t cqid);
160
161 /* Define SLI4 API functions */
162 emlxs_sli_api_t emlxs_sli4_api = {
163 emlxs_sli4_map_hdw,
164 emlxs_sli4_unmap_hdw,
165 emlxs_sli4_online,
166 emlxs_sli4_offline,
167 emlxs_sli4_hba_reset,
168 emlxs_sli4_hba_kill,
169 emlxs_sli4_issue_iocb_cmd,
170 emlxs_sli4_issue_mbox_cmd,
171 #ifdef SFCT_SUPPORT
172 emlxs_sli4_prep_fct_iocb,
173 #else
174 NULL,
175 #endif /* SFCT_SUPPORT */
176 emlxs_sli4_prep_fcp_iocb,
177 emlxs_sli4_prep_ip_iocb,
178 emlxs_sli4_prep_els_iocb,
179 emlxs_sli4_prep_ct_iocb,
180 emlxs_sli4_poll_intr,
181 emlxs_sli4_intx_intr,
182 emlxs_sli4_msi_intr,
183 emlxs_sli4_disable_intr,
184 emlxs_sli4_timer,
185 emlxs_sli4_poll_erratt,
186 emlxs_sli4_reg_did,
187 emlxs_sli4_unreg_node
188 };
189
190
191 /* ************************************************************************** */
192
193 static void
emlxs_sli4_set_default_params(emlxs_hba_t * hba)194 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
195 {
196 emlxs_port_t *port = &PPORT;
197
198 bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
199
200 hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
201
202 hba->sli.sli4.param.SliHint2 = 0;
203 hba->sli.sli4.param.SliHint1 = 0;
204 hba->sli.sli4.param.IfType = 0;
205 hba->sli.sli4.param.SliFamily = 0;
206 hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
207 hba->sli.sli4.param.FT = 0;
208
209 hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
210 hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
211 hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
212 hba->sli.sli4.param.EqPageCnt = 8;
213 hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
214
215 hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
216 hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
217 hba->sli.sli4.param.CQV = 0;
218 hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
219 hba->sli.sli4.param.CqPageCnt = 4;
220 hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
221
222 hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
223 hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
224 hba->sli.sli4.param.MQV = 0;
225 hba->sli.sli4.param.MqPageCnt = 8;
226 hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
227
228 hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
229 hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
230 hba->sli.sli4.param.WQV = 0;
231 hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
232 hba->sli.sli4.param.WqPageCnt = 4;
233 hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
234
235 hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
236 hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
237 hba->sli.sli4.param.RQV = 0;
238 hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
239 hba->sli.sli4.param.RqPageCnt = 8;
240 hba->sli.sli4.param.RqDbWin = 1;
241 hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
242
243 hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
244 hba->sli.sli4.param.PHWQ = 0;
245 hba->sli.sli4.param.PHON = 0;
246 hba->sli.sli4.param.TRIR = 0;
247 hba->sli.sli4.param.TRTY = 0;
248 hba->sli.sli4.param.TCCA = 0;
249 hba->sli.sli4.param.MWQE = 0;
250 hba->sli.sli4.param.ASSI = 0;
251 hba->sli.sli4.param.TERP = 0;
252 hba->sli.sli4.param.TGT = 0;
253 hba->sli.sli4.param.AREG = 0;
254 hba->sli.sli4.param.FBRR = 0;
255 hba->sli.sli4.param.SGLR = 1;
256 hba->sli.sli4.param.HDRR = 1;
257 hba->sli.sli4.param.EXT = 0;
258 hba->sli.sli4.param.FCOE = 1;
259
260 hba->sli.sli4.param.SgeLength = (64 * 1024);
261 hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
262 hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
263 hba->sli.sli4.param.SglPageCnt = 2;
264
265 hba->sli.sli4.param.MinRqSize = 128;
266 hba->sli.sli4.param.MaxRqSize = 2048;
267
268 hba->sli.sli4.param.RPIMax = 0x3ff;
269 hba->sli.sli4.param.XRIMax = 0x3ff;
270 hba->sli.sli4.param.VFIMax = 0xff;
271 hba->sli.sli4.param.VPIMax = 0xff;
272
273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
274 "Default SLI4 parameters set.");
275
276 } /* emlxs_sli4_set_default_params() */
277
278
279 /*
280 * emlxs_sli4_online()
281 *
282 * This routine will start initialization of the SLI4 HBA.
283 */
284 static int32_t
emlxs_sli4_online(emlxs_hba_t * hba)285 emlxs_sli4_online(emlxs_hba_t *hba)
286 {
287 emlxs_port_t *port = &PPORT;
288 emlxs_config_t *cfg;
289 emlxs_vpd_t *vpd;
290 MAILBOXQ *mbq = NULL;
291 MAILBOX4 *mb = NULL;
292 MATCHMAP *mp = NULL;
293 uint32_t i;
294 uint32_t j;
295 uint32_t rval = 0;
296 uint8_t *vpd_data;
297 uint32_t sli_mode;
298 uint8_t *outptr;
299 uint32_t status;
300 uint32_t fw_check;
301 uint32_t kern_update = 0;
302 emlxs_firmware_t hba_fw;
303 emlxs_firmware_t *fw;
304 uint16_t ssvid;
305 char buf[64];
306
307 cfg = &CFG;
308 vpd = &VPD;
309
310 sli_mode = EMLXS_HBA_SLI4_MODE;
311 hba->sli_mode = sli_mode;
312
313 /* Set the fw_check flag */
314 fw_check = cfg[CFG_FW_CHECK].current;
315
316 if ((fw_check & 0x04) ||
317 (hba->fw_flag & FW_UPDATE_KERNEL)) {
318 kern_update = 1;
319 }
320
321 hba->mbox_queue_flag = 0;
322 hba->fc_edtov = FF_DEF_EDTOV;
323 hba->fc_ratov = FF_DEF_RATOV;
324 hba->fc_altov = FF_DEF_ALTOV;
325 hba->fc_arbtov = FF_DEF_ARBTOV;
326
327 /* Networking not supported */
328 if (cfg[CFG_NETWORK_ON].current) {
329 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
330 "Networking is not supported in SLI4, turning it off");
331 cfg[CFG_NETWORK_ON].current = 0;
332 }
333
334 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
335 if (hba->chan_count > MAX_CHANNEL) {
336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
337 "Max channels exceeded, dropping num-wq from %d to 1",
338 cfg[CFG_NUM_WQ].current);
339 cfg[CFG_NUM_WQ].current = 1;
340 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
341 }
342 hba->channel_fcp = 0; /* First channel */
343
344 /* Default channel for everything else is the last channel */
345 hba->channel_ip = hba->chan_count - 1;
346 hba->channel_els = hba->chan_count - 1;
347 hba->channel_ct = hba->chan_count - 1;
348
349 hba->fc_iotag = 1;
350 hba->io_count = 0;
351 hba->channel_tx_count = 0;
352
353 /* Initialize the local dump region buffer */
354 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
355 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
356 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
357 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
358
359 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
360
361 if (hba->sli.sli4.dump_region.virt == NULL) {
362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
363 "Unable to allocate dump region buffer.");
364
365 return (ENOMEM);
366 }
367
368 /*
369 * Get a buffer which will be used repeatedly for mailbox commands
370 */
371 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
372
373 mb = (MAILBOX4 *)mbq;
374
375 reset:
376 /* Reset & Initialize the adapter */
377 if (emlxs_sli4_hba_init(hba)) {
378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
379 "Unable to init hba.");
380
381 rval = EIO;
382 goto failed1;
383 }
384
385 #ifdef FMA_SUPPORT
386 /* Access handle validation */
387 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
388 case SLI_INTF_IF_TYPE_2:
389 if ((emlxs_fm_check_acc_handle(hba,
390 hba->pci_acc_handle) != DDI_FM_OK) ||
391 (emlxs_fm_check_acc_handle(hba,
392 hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
393 EMLXS_MSGF(EMLXS_CONTEXT,
394 &emlxs_invalid_access_handle_msg, NULL);
395
396 rval = EIO;
397 goto failed1;
398 }
399 break;
400
401 default :
402 if ((emlxs_fm_check_acc_handle(hba,
403 hba->pci_acc_handle) != DDI_FM_OK) ||
404 (emlxs_fm_check_acc_handle(hba,
405 hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
406 (emlxs_fm_check_acc_handle(hba,
407 hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
408 EMLXS_MSGF(EMLXS_CONTEXT,
409 &emlxs_invalid_access_handle_msg, NULL);
410
411 rval = EIO;
412 goto failed1;
413 }
414 break;
415 }
416 #endif /* FMA_SUPPORT */
417
418 /*
419 * Setup and issue mailbox READ REV command
420 */
421 vpd->opFwRev = 0;
422 vpd->postKernRev = 0;
423 vpd->sli1FwRev = 0;
424 vpd->sli2FwRev = 0;
425 vpd->sli3FwRev = 0;
426 vpd->sli4FwRev = 0;
427
428 vpd->postKernName[0] = 0;
429 vpd->opFwName[0] = 0;
430 vpd->sli1FwName[0] = 0;
431 vpd->sli2FwName[0] = 0;
432 vpd->sli3FwName[0] = 0;
433 vpd->sli4FwName[0] = 0;
434
435 vpd->opFwLabel[0] = 0;
436 vpd->sli1FwLabel[0] = 0;
437 vpd->sli2FwLabel[0] = 0;
438 vpd->sli3FwLabel[0] = 0;
439 vpd->sli4FwLabel[0] = 0;
440
441 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
442
443 emlxs_mb_get_sli4_params(hba, mbq);
444 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
445 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
446 "Unable to read parameters. Mailbox cmd=%x status=%x",
447 mb->mbxCommand, mb->mbxStatus);
448
449 /* Set param defaults */
450 emlxs_sli4_set_default_params(hba);
451
452 } else {
453 /* Save parameters */
454 bcopy((char *)&mb->un.varSLIConfig.payload,
455 (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
456
457 emlxs_data_dump(port, "SLI_PARMS",
458 (uint32_t *)&hba->sli.sli4.param,
459 sizeof (sli_params_t), 0);
460 }
461
462 /* Reuse mbq from previous mbox */
463 bzero(mbq, sizeof (MAILBOXQ));
464
465 emlxs_mb_get_port_name(hba, mbq);
466 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
468 "Unable to get port names. Mailbox cmd=%x status=%x",
469 mb->mbxCommand, mb->mbxStatus);
470
471 bzero(hba->sli.sli4.port_name,
472 sizeof (hba->sli.sli4.port_name));
473 } else {
474 /* Save port names */
475 bcopy((char *)&mb->un.varSLIConfig.payload,
476 (char *)&hba->sli.sli4.port_name,
477 sizeof (hba->sli.sli4.port_name));
478 }
479
480 /* Reuse mbq from previous mbox */
481 bzero(mbq, sizeof (MAILBOXQ));
482
483 emlxs_mb_read_rev(hba, mbq, 0);
484 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
486 "Unable to read rev. Mailbox cmd=%x status=%x",
487 mb->mbxCommand, mb->mbxStatus);
488
489 rval = EIO;
490 goto failed1;
491
492 }
493
494 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
495 if (mb->un.varRdRev4.sliLevel != 4) {
496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
497 "Invalid read rev Version for SLI4: 0x%x",
498 mb->un.varRdRev4.sliLevel);
499
500 rval = EIO;
501 goto failed1;
502 }
503
504 switch (mb->un.varRdRev4.dcbxMode) {
505 case EMLXS_DCBX_MODE_CIN: /* Mapped to nonFIP mode */
506 hba->flag &= ~FC_FIP_SUPPORTED;
507 break;
508
509 case EMLXS_DCBX_MODE_CEE: /* Mapped to FIP mode */
510 hba->flag |= FC_FIP_SUPPORTED;
511 break;
512
513 default:
514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
515 "Invalid read rev dcbx mode for SLI4: 0x%x",
516 mb->un.varRdRev4.dcbxMode);
517
518 rval = EIO;
519 goto failed1;
520 }
521
522 /* Set FC/FCoE mode */
523 if (mb->un.varRdRev4.FCoE) {
524 hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
525 } else {
526 hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
527 }
528
529 /* Save information as VPD data */
530 vpd->rBit = 1;
531
532 vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
533 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
534
535 vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
536 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
537
538 vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
539 bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
540
541 vpd->biuRev = mb->un.varRdRev4.HwRev1;
542 vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
543 vpd->fcphLow = mb->un.varRdRev4.fcphLow;
544 vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
545 vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
546
547 /* Decode FW labels */
548 if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) {
549 bcopy(vpd->postKernName, vpd->sli4FwName, 16);
550 }
551 emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
552 sizeof (vpd->sli4FwName));
553 emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
554 sizeof (vpd->opFwName));
555 emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
556 sizeof (vpd->postKernName));
557
558 if (hba->model_info.chip == EMLXS_BE2_CHIP) {
559 (void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
560 sizeof (vpd->sli4FwLabel));
561 } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
562 (void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
563 sizeof (vpd->sli4FwLabel));
564 } else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
565 (void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
566 sizeof (vpd->sli4FwLabel));
567 } else if (hba->model_info.chip == EMLXS_LANCERG6_CHIP) {
568 (void) strlcpy(vpd->sli4FwLabel, "xe501.grp",
569 sizeof (vpd->sli4FwLabel));
570 } else {
571 (void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
572 sizeof (vpd->sli4FwLabel));
573 }
574
575 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
576 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
577 vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
578 vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
579 mb->un.varRdRev4.dcbxMode);
580
581 /* No key information is needed for SLI4 products */
582
583 /* Get adapter VPD information */
584 vpd->port_index = (uint32_t)-1;
585
586 /* Reuse mbq from previous mbox */
587 bzero(mbq, sizeof (MAILBOXQ));
588
589 emlxs_mb_dump_vpd(hba, mbq, 0);
590 vpd_data = hba->sli.sli4.dump_region.virt;
591
592 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
593 MBX_SUCCESS) {
594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
595 "No VPD found. status=%x", mb->mbxStatus);
596 } else {
597 EMLXS_MSGF(EMLXS_CONTEXT,
598 &emlxs_init_debug_msg,
599 "VPD dumped. rsp_cnt=%d status=%x",
600 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
601
602 if (mb->un.varDmp4.rsp_cnt) {
603 EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
604 0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
605
606 #ifdef FMA_SUPPORT
607 if (hba->sli.sli4.dump_region.dma_handle) {
608 if (emlxs_fm_check_dma_handle(hba,
609 hba->sli.sli4.dump_region.dma_handle)
610 != DDI_FM_OK) {
611 EMLXS_MSGF(EMLXS_CONTEXT,
612 &emlxs_invalid_dma_handle_msg,
613 "sli4_online: hdl=%p",
614 hba->sli.sli4.dump_region.
615 dma_handle);
616 rval = EIO;
617 goto failed1;
618 }
619 }
620 #endif /* FMA_SUPPORT */
621
622 }
623 }
624
625 if (vpd_data[0]) {
626 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
627 mb->un.varDmp4.rsp_cnt);
628
629 /*
630 * If there is a VPD part number, and it does not
631 * match the current default HBA model info,
632 * replace the default data with an entry that
633 * does match.
634 *
635 * After emlxs_parse_vpd model holds the VPD value
636 * for V2 and part_num hold the value for PN. These
637 * 2 values are NOT necessarily the same.
638 */
639
640 rval = 0;
641 if ((vpd->model[0] != 0) &&
642 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
643
644 /* First scan for a V2 match */
645
646 for (i = 1; i < emlxs_pci_model_count; i++) {
647 if (strcmp(&vpd->model[0],
648 emlxs_pci_model[i].model) == 0) {
649 bcopy(&emlxs_pci_model[i],
650 &hba->model_info,
651 sizeof (emlxs_model_t));
652 rval = 1;
653 break;
654 }
655 }
656 }
657
658 if (!rval && (vpd->part_num[0] != 0) &&
659 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
660
661 /* Next scan for a PN match */
662
663 for (i = 1; i < emlxs_pci_model_count; i++) {
664 if (strcmp(&vpd->part_num[0],
665 emlxs_pci_model[i].model) == 0) {
666 bcopy(&emlxs_pci_model[i],
667 &hba->model_info,
668 sizeof (emlxs_model_t));
669 break;
670 }
671 }
672 }
673
674 /* HP CNA port indices start at 1 instead of 0 */
675 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
676 ssvid = ddi_get16(hba->pci_acc_handle,
677 (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
678
679 if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
680 vpd->port_index--;
681 }
682 }
683
684 /*
685 * Now lets update hba->model_info with the real
686 * VPD data, if any.
687 */
688
689 /*
690 * Replace the default model description with vpd data
691 */
692 if (vpd->model_desc[0] != 0) {
693 (void) strncpy(hba->model_info.model_desc,
694 vpd->model_desc,
695 (sizeof (hba->model_info.model_desc)-1));
696 }
697
698 /* Replace the default model with vpd data */
699 if (vpd->model[0] != 0) {
700 (void) strncpy(hba->model_info.model, vpd->model,
701 (sizeof (hba->model_info.model)-1));
702 }
703
704 /* Replace the default program types with vpd data */
705 if (vpd->prog_types[0] != 0) {
706 emlxs_parse_prog_types(hba, vpd->prog_types);
707 }
708 }
709
710 /*
711 * Since the adapter model may have changed with the vpd data
712 * lets double check if adapter is not supported
713 */
714 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
716 "Unsupported adapter found. "
717 "Id:%d Vendor id:0x%x Device id:0x%x SSDID:0x%x "
718 "Model:%s", hba->model_info.id, hba->model_info.vendor_id,
719 hba->model_info.device_id, hba->model_info.ssdid,
720 hba->model_info.model);
721
722 rval = EIO;
723 goto failed1;
724 }
725
726 (void) strncpy(vpd->boot_version, vpd->sli4FwName,
727 (sizeof (vpd->boot_version)-1));
728
729 /* Get fcode version property */
730 emlxs_get_fcode_version(hba);
731
732 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
733 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
734 vpd->opFwRev, vpd->sli1FwRev);
735
736 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
737 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
738 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
739
740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
741 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
742
743 /*
744 * If firmware checking is enabled and the adapter model indicates
745 * a firmware image, then perform firmware version check
746 */
747 hba->fw_flag = 0;
748 hba->fw_timer = 0;
749
750 if (((fw_check & 0x1) &&
751 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
752 hba->model_info.fwid) ||
753 ((fw_check & 0x2) && hba->model_info.fwid)) {
754
755 /* Find firmware image indicated by adapter model */
756 fw = NULL;
757 for (i = 0; i < emlxs_fw_count; i++) {
758 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
759 fw = &emlxs_fw_table[i];
760 break;
761 }
762 }
763
764 /*
765 * If the image was found, then verify current firmware
766 * versions of adapter
767 */
768 if (fw) {
769 /* Obtain current firmware version info */
770 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
771 (void) emlxs_be_read_fw_version(hba, &hba_fw);
772 } else {
773 hba_fw.kern = vpd->postKernRev;
774 hba_fw.stub = vpd->opFwRev;
775 hba_fw.sli1 = vpd->sli1FwRev;
776 hba_fw.sli2 = vpd->sli2FwRev;
777 hba_fw.sli3 = vpd->sli3FwRev;
778 hba_fw.sli4 = vpd->sli4FwRev;
779 }
780
781 if (!kern_update &&
782 ((fw->kern && (hba_fw.kern != fw->kern)) ||
783 (fw->stub && (hba_fw.stub != fw->stub)))) {
784
785 hba->fw_flag |= FW_UPDATE_NEEDED;
786
787 } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
788 (fw->stub && (hba_fw.stub != fw->stub)) ||
789 (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
790 (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
791 (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
792 (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
793
794 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
795 "Firmware update needed. "
796 "Updating. id=%d fw=%d",
797 hba->model_info.id, hba->model_info.fwid);
798
799 #ifdef MODFW_SUPPORT
800 /*
801 * Load the firmware image now
802 * If MODFW_SUPPORT is not defined, the
803 * firmware image will already be defined
804 * in the emlxs_fw_table
805 */
806 emlxs_fw_load(hba, fw);
807 #endif /* MODFW_SUPPORT */
808
809 if (fw->image && fw->size) {
810 uint32_t rc;
811
812 rc = emlxs_fw_download(hba,
813 (char *)fw->image, fw->size, 0);
814 if ((rc != FC_SUCCESS) &&
815 (rc != EMLXS_REBOOT_REQUIRED)) {
816 EMLXS_MSGF(EMLXS_CONTEXT,
817 &emlxs_init_msg,
818 "Firmware update failed.");
819 hba->fw_flag |=
820 FW_UPDATE_NEEDED;
821 }
822 #ifdef MODFW_SUPPORT
823 /*
824 * Unload the firmware image from
825 * kernel memory
826 */
827 emlxs_fw_unload(hba, fw);
828 #endif /* MODFW_SUPPORT */
829
830 fw_check = 0;
831
832 goto reset;
833 }
834
835 hba->fw_flag |= FW_UPDATE_NEEDED;
836
837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
838 "Firmware image unavailable.");
839 } else {
840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
841 "Firmware update not needed.");
842 }
843 } else {
844 /*
845 * This means either the adapter database is not
846 * correct or a firmware image is missing from the
847 * compile
848 */
849 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
850 "Firmware image unavailable. id=%d fw=%d",
851 hba->model_info.id, hba->model_info.fwid);
852 }
853 }
854
855 /* Reuse mbq from previous mbox */
856 bzero(mbq, sizeof (MAILBOXQ));
857
858 emlxs_mb_dump_fcoe(hba, mbq, 0);
859
860 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
861 MBX_SUCCESS) {
862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
863 "No FCOE info found. status=%x", mb->mbxStatus);
864 } else {
865 EMLXS_MSGF(EMLXS_CONTEXT,
866 &emlxs_init_debug_msg,
867 "FCOE info dumped. rsp_cnt=%d status=%x",
868 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
869 (void) emlxs_parse_fcoe(hba,
870 (uint8_t *)hba->sli.sli4.dump_region.virt,
871 mb->un.varDmp4.rsp_cnt);
872 }
873
874 /* Reuse mbq from previous mbox */
875 bzero(mbq, sizeof (MAILBOXQ));
876
877 status = 0;
878 if (port->flag & EMLXS_INI_ENABLED) {
879 status |= SLI4_FEATURE_FCP_INITIATOR;
880 }
881 if (port->flag & EMLXS_TGT_ENABLED) {
882 status |= SLI4_FEATURE_FCP_TARGET;
883 }
884 if (cfg[CFG_NPIV_ENABLE].current) {
885 status |= SLI4_FEATURE_NPIV;
886 }
887 if (cfg[CFG_RQD_MODE].current) {
888 status |= SLI4_FEATURE_RQD;
889 }
890 if (cfg[CFG_PERF_HINT].current) {
891 if (hba->sli.sli4.param.PHON) {
892 status |= SLI4_FEATURE_PERF_HINT;
893 }
894 }
895
896 emlxs_mb_request_features(hba, mbq, status);
897
898 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
899 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
900 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
901 mb->mbxCommand, mb->mbxStatus);
902
903 rval = EIO;
904 goto failed1;
905 }
906 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
907
908 /* Check to see if we get the features we requested */
909 if (status != mb->un.varReqFeatures.featuresEnabled) {
910
911 /* Just report descrepencies, don't abort the attach */
912
913 outptr = (uint8_t *)emlxs_request_feature_xlate(
914 mb->un.varReqFeatures.featuresRequested);
915 (void) strlcpy(buf, (char *)outptr, sizeof (buf));
916
917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
918 "REQUEST_FEATURES: wanted:%s got:%s",
919 &buf[0], emlxs_request_feature_xlate(
920 mb->un.varReqFeatures.featuresEnabled));
921
922 }
923
924 if ((port->flag & EMLXS_INI_ENABLED) &&
925 !(mb->un.varReqFeatures.featuresEnabled &
926 SLI4_FEATURE_FCP_INITIATOR)) {
927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
928 "Initiator mode not supported by adapter.");
929
930 rval = EIO;
931
932 #ifdef SFCT_SUPPORT
933 /* Check if we can fall back to just target mode */
934 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
935 (mb->un.varReqFeatures.featuresEnabled &
936 SLI4_FEATURE_FCP_TARGET) &&
937 (cfg[CFG_DTM_ENABLE].current == 1) &&
938 (cfg[CFG_TARGET_MODE].current == 1)) {
939
940 cfg[CFG_DTM_ENABLE].current = 0;
941
942 EMLXS_MSGF(EMLXS_CONTEXT,
943 &emlxs_init_failed_msg,
944 "Disabling dynamic target mode. "
945 "Enabling target mode only.");
946
947 /* This will trigger the driver to reattach */
948 rval = EAGAIN;
949 }
950 #endif /* SFCT_SUPPORT */
951 goto failed1;
952 }
953
954 if ((port->flag & EMLXS_TGT_ENABLED) &&
955 !(mb->un.varReqFeatures.featuresEnabled &
956 SLI4_FEATURE_FCP_TARGET)) {
957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
958 "Target mode not supported by adapter.");
959
960 rval = EIO;
961
962 #ifdef SFCT_SUPPORT
963 /* Check if we can fall back to just initiator mode */
964 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
965 (mb->un.varReqFeatures.featuresEnabled &
966 SLI4_FEATURE_FCP_INITIATOR) &&
967 (cfg[CFG_DTM_ENABLE].current == 1) &&
968 (cfg[CFG_TARGET_MODE].current == 0)) {
969
970 cfg[CFG_DTM_ENABLE].current = 0;
971
972 EMLXS_MSGF(EMLXS_CONTEXT,
973 &emlxs_init_failed_msg,
974 "Disabling dynamic target mode. "
975 "Enabling initiator mode only.");
976
977 /* This will trigger the driver to reattach */
978 rval = EAGAIN;
979 }
980 #endif /* SFCT_SUPPORT */
981 goto failed1;
982 }
983
984 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
985 hba->flag |= FC_NPIV_ENABLED;
986 }
987
988 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
989 hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
990 if (hba->sli.sli4.param.PHWQ) {
991 hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
992 }
993 }
994
995 /* Reuse mbq from previous mbox */
996 bzero(mbq, sizeof (MAILBOXQ));
997
998 emlxs_mb_read_config(hba, mbq);
999 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1001 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1002 mb->mbxCommand, mb->mbxStatus);
1003
1004 rval = EIO;
1005 goto failed1;
1006 }
1007 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1008
1009 /* Set default extents */
1010 hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1011 hba->sli.sli4.XRIExtCount = 1;
1012 hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1013 hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1014
1015 hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1016 hba->sli.sli4.RPIExtCount = 1;
1017 hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1018 hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1019
1020 hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1021 hba->sli.sli4.VPIExtCount = 1;
1022 hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1023 hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1024
1025 hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1026 hba->sli.sli4.VFIExtCount = 1;
1027 hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1028 hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1029
1030 hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1031
1032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1033 "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1034 hba->sli.sli4.XRICount,
1035 hba->sli.sli4.RPICount,
1036 hba->sli.sli4.VPICount,
1037 hba->sli.sli4.VFICount,
1038 hba->sli.sli4.FCFICount);
1039
1040 if ((hba->sli.sli4.XRICount == 0) ||
1041 (hba->sli.sli4.RPICount == 0) ||
1042 (hba->sli.sli4.VPICount == 0) ||
1043 (hba->sli.sli4.VFICount == 0) ||
1044 (hba->sli.sli4.FCFICount == 0)) {
1045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1046 "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1047 "vfi:%d fcfi:%d",
1048 hba->sli.sli4.XRICount,
1049 hba->sli.sli4.RPICount,
1050 hba->sli.sli4.VPICount,
1051 hba->sli.sli4.VFICount,
1052 hba->sli.sli4.FCFICount);
1053
1054 rval = EIO;
1055 goto failed1;
1056 }
1057
1058 if (mb->un.varRdConfig4.extents) {
1059 if (emlxs_sli4_init_extents(hba, mbq)) {
1060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1061 "Unable to initialize extents.");
1062
1063 rval = EIO;
1064 goto failed1;
1065 }
1066 }
1067
1068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1069 "CONFIG: port_name:%c %c %c %c",
1070 hba->sli.sli4.port_name[0],
1071 hba->sli.sli4.port_name[1],
1072 hba->sli.sli4.port_name[2],
1073 hba->sli.sli4.port_name[3]);
1074
1075 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1076 "CONFIG: ldv:%d link_type:%d link_number:%d",
1077 mb->un.varRdConfig4.ldv,
1078 mb->un.varRdConfig4.link_type,
1079 mb->un.varRdConfig4.link_number);
1080
1081 if (mb->un.varRdConfig4.ldv) {
1082 hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1083 } else {
1084 hba->sli.sli4.link_number = (uint32_t)-1;
1085 }
1086
1087 if (hba->sli.sli4.VPICount) {
1088 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1089 }
1090
1091 /* Set the max node count */
1092 if (cfg[CFG_NUM_NODES].current > 0) {
1093 hba->max_nodes =
1094 min(cfg[CFG_NUM_NODES].current,
1095 hba->sli.sli4.RPICount);
1096 } else {
1097 hba->max_nodes = hba->sli.sli4.RPICount;
1098 }
1099
1100 /* Set the io throttle */
1101 hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1102
1103 /* Set max_iotag */
1104 /* We add 1 in case all XRI's are non-zero */
1105 hba->max_iotag = hba->sli.sli4.XRICount + 1;
1106
1107 if (cfg[CFG_NUM_IOTAGS].current) {
1108 hba->max_iotag = min(hba->max_iotag,
1109 (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1110 }
1111
1112 /* Set out-of-range iotag base */
1113 hba->fc_oor_iotag = hba->max_iotag;
1114
1115 /* Save the link speed capabilities */
1116 vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1117 emlxs_process_link_speed(hba);
1118
1119 /*
1120 * Allocate some memory for buffers
1121 */
1122 if (emlxs_mem_alloc_buffer(hba) == 0) {
1123 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1124 "Unable to allocate memory buffers.");
1125
1126 rval = ENOMEM;
1127 goto failed1;
1128 }
1129
1130 if (emlxs_sli4_resource_alloc(hba)) {
1131 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1132 "Unable to allocate resources.");
1133
1134 rval = ENOMEM;
1135 goto failed2;
1136 }
1137 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1138 emlxs_sli4_zero_queue_stat(hba);
1139
1140 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1141 if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1142 hba->fca_tran->fca_num_npivports = hba->vpi_max;
1143 }
1144 #endif /* >= EMLXS_MODREV5 */
1145
1146 /* Reuse mbq from previous mbox */
1147 bzero(mbq, sizeof (MAILBOXQ));
1148
1149 if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1151 "Unable to post sgl pages.");
1152
1153 rval = EIO;
1154 goto failed3;
1155 }
1156
1157 /* Reuse mbq from previous mbox */
1158 bzero(mbq, sizeof (MAILBOXQ));
1159
1160 if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1161 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1162 "Unable to post header templates.");
1163
1164 rval = EIO;
1165 goto failed3;
1166 }
1167
1168 /*
1169 * Add our interrupt routine to kernel's interrupt chain & enable it
1170 * If MSI is enabled this will cause Solaris to program the MSI address
1171 * and data registers in PCI config space
1172 */
1173 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1174 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1175 "Unable to add interrupt(s).");
1176
1177 rval = EIO;
1178 goto failed3;
1179 }
1180
1181 /* Reuse mbq from previous mbox */
1182 bzero(mbq, sizeof (MAILBOXQ));
1183
1184 /* This MUST be done after EMLXS_INTR_ADD */
1185 if (emlxs_sli4_create_queues(hba, mbq)) {
1186 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1187 "Unable to create queues.");
1188
1189 rval = EIO;
1190 goto failed3;
1191 }
1192
1193 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1194
1195 /* Get and save the current firmware version (based on sli_mode) */
1196 emlxs_decode_firmware_rev(hba, vpd);
1197
1198
1199 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1200
1201 if (SLI4_FC_MODE) {
1202 /* Reuse mbq from previous mbox */
1203 bzero(mbq, sizeof (MAILBOXQ));
1204
1205 emlxs_mb_config_link(hba, mbq);
1206 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1207 MBX_SUCCESS) {
1208 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1209 "Unable to configure link. Mailbox cmd=%x "
1210 "status=%x",
1211 mb->mbxCommand, mb->mbxStatus);
1212
1213 rval = EIO;
1214 goto failed3;
1215 }
1216 }
1217
1218 /* Reuse mbq from previous mbox */
1219 bzero(mbq, sizeof (MAILBOXQ));
1220
1221 /*
1222 * We need to get login parameters for NID
1223 */
1224 (void) emlxs_mb_read_sparam(hba, mbq);
1225 mp = (MATCHMAP *)mbq->bp;
1226 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1228 "Unable to read parameters. Mailbox cmd=%x status=%x",
1229 mb->mbxCommand, mb->mbxStatus);
1230
1231 rval = EIO;
1232 goto failed3;
1233 }
1234
1235 /* Free the buffer since we were polling */
1236 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1237 mp = NULL;
1238
1239 /* If no serial number in VPD data, then use the WWPN */
1240 if (vpd->serial_num[0] == 0) {
1241 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1242 for (i = 0; i < 12; i++) {
1243 status = *outptr++;
1244 j = ((status & 0xf0) >> 4);
1245 if (j <= 9) {
1246 vpd->serial_num[i] =
1247 (char)((uint8_t)'0' + (uint8_t)j);
1248 } else {
1249 vpd->serial_num[i] =
1250 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1251 }
1252
1253 i++;
1254 j = (status & 0xf);
1255 if (j <= 9) {
1256 vpd->serial_num[i] =
1257 (char)((uint8_t)'0' + (uint8_t)j);
1258 } else {
1259 vpd->serial_num[i] =
1260 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1261 }
1262 }
1263
1264 /*
1265 * Set port number and port index to zero
1266 * The WWN's are unique to each port and therefore port_num
1267 * must equal zero. This effects the hba_fru_details structure
1268 * in fca_bind_port()
1269 */
1270 vpd->port_num[0] = 0;
1271 vpd->port_index = 0;
1272
1273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1274 "CONFIG: WWPN: port_index=0");
1275 }
1276
1277 /* Make final attempt to set a port index */
1278 if (vpd->port_index == (uint32_t)-1) {
1279 dev_info_t *p_dip;
1280 dev_info_t *c_dip;
1281
1282 p_dip = ddi_get_parent(hba->dip);
1283 c_dip = ddi_get_child(p_dip);
1284
1285 vpd->port_index = 0;
1286 while (c_dip && (hba->dip != c_dip)) {
1287 c_dip = ddi_get_next_sibling(c_dip);
1288
1289 if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1290 continue;
1291 }
1292
1293 vpd->port_index++;
1294 }
1295
1296 EMLXS_MSGF(EMLXS_CONTEXT,
1297 &emlxs_init_debug_msg,
1298 "CONFIG: Device tree: port_index=%d",
1299 vpd->port_index);
1300 }
1301
1302 if (vpd->port_num[0] == 0) {
1303 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1304 (void) snprintf(vpd->port_num,
1305 (sizeof (vpd->port_num)-1),
1306 "%d", vpd->port_index);
1307 }
1308 }
1309
1310 if (vpd->id[0] == 0) {
1311 (void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1312 "%s %d",
1313 hba->model_info.model_desc, vpd->port_index);
1314
1315 }
1316
1317 if (vpd->manufacturer[0] == 0) {
1318 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1319 (sizeof (vpd->manufacturer)-1));
1320 }
1321
1322 if (vpd->part_num[0] == 0) {
1323 (void) strncpy(vpd->part_num, hba->model_info.model,
1324 (sizeof (vpd->part_num)-1));
1325 }
1326
1327 if (vpd->model_desc[0] == 0) {
1328 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1329 "%s %d",
1330 hba->model_info.model_desc, vpd->port_index);
1331 }
1332
1333 if (vpd->model[0] == 0) {
1334 (void) strncpy(vpd->model, hba->model_info.model,
1335 (sizeof (vpd->model)-1));
1336 }
1337
1338 if (vpd->prog_types[0] == 0) {
1339 emlxs_build_prog_types(hba, vpd);
1340 }
1341
1342 /* Create the symbolic names */
1343 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1344 "Emulex %s FV%s DV%s %s",
1345 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1346 (char *)utsname.nodename);
1347
1348 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1349 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1350 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1351 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1352 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1353
1354
1355 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1356 emlxs_sli4_enable_intr(hba);
1357
1358 /* Check persist-linkdown */
1359 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1360 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1361 goto done;
1362 }
1363
1364 #ifdef SFCT_SUPPORT
1365 if ((port->mode == MODE_TARGET) &&
1366 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1367 goto done;
1368 }
1369 #endif /* SFCT_SUPPORT */
1370
1371 /* Reuse mbq from previous mbox */
1372 bzero(mbq, sizeof (MAILBOXQ));
1373
1374 /*
1375 * Interupts are enabled, start the timeout timers now.
1376 */
1377 emlxs_timer_start(hba);
1378
1379 /*
1380 * Setup and issue mailbox INITIALIZE LINK command
1381 * At this point, the interrupt will be generated by the HW
1382 */
1383 emlxs_mb_init_link(hba, mbq,
1384 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1385
1386 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0);
1387 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1388 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1389 "Unable to initialize link. "
1390 "Mailbox cmd=%x status=%x",
1391 mb->mbxCommand, mb->mbxStatus);
1392
1393 rval = EIO;
1394 goto failed4;
1395 }
1396
1397 /* Wait for link to come up */
1398 i = cfg[CFG_LINKUP_DELAY].current;
1399 while (i && (hba->state < FC_LINK_UP)) {
1400 /* Check for hardware error */
1401 if (hba->state == FC_ERROR) {
1402 EMLXS_MSGF(EMLXS_CONTEXT,
1403 &emlxs_init_failed_msg,
1404 "Adapter error.", mb->mbxCommand,
1405 mb->mbxStatus);
1406
1407 rval = EIO;
1408 goto failed4;
1409 }
1410
1411 BUSYWAIT_MS(1000);
1412 i--;
1413 }
1414
1415 done:
1416 /*
1417 * The leadville driver will now handle the FLOGI at the driver level
1418 */
1419
1420 if (mbq) {
1421 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1422 mbq = NULL;
1423 mb = NULL;
1424 }
1425 return (0);
1426
1427 failed4:
1428 emlxs_timer_stop(hba);
1429
1430 failed3:
1431 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1432
1433 if (mp) {
1434 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1435 mp = NULL;
1436 }
1437
1438
1439 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1440 (void) EMLXS_INTR_REMOVE(hba);
1441 }
1442
1443 emlxs_sli4_resource_free(hba);
1444
1445 failed2:
1446 (void) emlxs_mem_free_buffer(hba);
1447
1448 failed1:
1449 if (mbq) {
1450 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1451 mbq = NULL;
1452 mb = NULL;
1453 }
1454
1455 if (hba->sli.sli4.dump_region.virt) {
1456 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1457 }
1458
1459 if (rval == 0) {
1460 rval = EIO;
1461 }
1462
1463 return (rval);
1464
1465 } /* emlxs_sli4_online() */
1466
1467
1468 static void
emlxs_sli4_offline(emlxs_hba_t * hba,uint32_t reset_requested)1469 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1470 {
1471 /* Reverse emlxs_sli4_online */
1472
1473 mutex_enter(&EMLXS_PORT_LOCK);
1474 if (hba->flag & FC_INTERLOCKED) {
1475 mutex_exit(&EMLXS_PORT_LOCK);
1476 goto killed;
1477 }
1478 mutex_exit(&EMLXS_PORT_LOCK);
1479
1480 if (reset_requested) {
1481 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1482 }
1483
1484 /* Shutdown the adapter interface */
1485 emlxs_sli4_hba_kill(hba);
1486
1487 killed:
1488
1489 /* Free SLI shared memory */
1490 emlxs_sli4_resource_free(hba);
1491
1492 /* Free driver shared memory */
1493 (void) emlxs_mem_free_buffer(hba);
1494
1495 /* Free the host dump region buffer */
1496 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1497
1498 } /* emlxs_sli4_offline() */
1499
1500
1501 /*ARGSUSED*/
1502 static int
emlxs_sli4_map_hdw(emlxs_hba_t * hba)1503 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1504 {
1505 emlxs_port_t *port = &PPORT;
1506 dev_info_t *dip;
1507 ddi_device_acc_attr_t dev_attr;
1508 int status;
1509
1510 dip = (dev_info_t *)hba->dip;
1511 dev_attr = emlxs_dev_acc_attr;
1512
1513 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1514 case SLI_INTF_IF_TYPE_0:
1515
1516 /* Map in Hardware BAR pages that will be used for */
1517 /* communication with HBA. */
1518 if (hba->sli.sli4.bar1_acc_handle == 0) {
1519 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1520 (caddr_t *)&hba->sli.sli4.bar1_addr,
1521 0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1522 if (status != DDI_SUCCESS) {
1523 EMLXS_MSGF(EMLXS_CONTEXT,
1524 &emlxs_attach_failed_msg,
1525 "(PCI) ddi_regs_map_setup BAR1 failed. "
1526 "stat=%d mem=%p attr=%p hdl=%p",
1527 status, &hba->sli.sli4.bar1_addr, &dev_attr,
1528 &hba->sli.sli4.bar1_acc_handle);
1529 goto failed;
1530 }
1531 }
1532
1533 if (hba->sli.sli4.bar2_acc_handle == 0) {
1534 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1535 (caddr_t *)&hba->sli.sli4.bar2_addr,
1536 0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1537 if (status != DDI_SUCCESS) {
1538 EMLXS_MSGF(EMLXS_CONTEXT,
1539 &emlxs_attach_failed_msg,
1540 "ddi_regs_map_setup BAR2 failed. status=%x",
1541 status);
1542 goto failed;
1543 }
1544 }
1545
1546 /* offset from beginning of register space */
1547 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1548 (uint32_t *)(hba->sli.sli4.bar1_addr +
1549 CSR_MPU_EP_SEMAPHORE_OFFSET);
1550 hba->sli.sli4.MBDB_reg_addr =
1551 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1552 hba->sli.sli4.CQDB_reg_addr =
1553 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1554 hba->sli.sli4.MQDB_reg_addr =
1555 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1556 hba->sli.sli4.WQDB_reg_addr =
1557 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1558 hba->sli.sli4.RQDB_reg_addr =
1559 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1560
1561 hba->sli.sli4.STATUS_reg_addr = 0;
1562 hba->sli.sli4.CNTL_reg_addr = 0;
1563
1564 hba->sli.sli4.ERR1_reg_addr =
1565 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1566 hba->sli.sli4.ERR2_reg_addr =
1567 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1568
1569 hba->sli.sli4.PHYSDEV_reg_addr = 0;
1570 break;
1571
1572 case SLI_INTF_IF_TYPE_2:
1573
1574 /* Map in Hardware BAR pages that will be used for */
1575 /* communication with HBA. */
1576 if (hba->sli.sli4.bar0_acc_handle == 0) {
1577 status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1578 (caddr_t *)&hba->sli.sli4.bar0_addr,
1579 0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1580 if (status != DDI_SUCCESS) {
1581 EMLXS_MSGF(EMLXS_CONTEXT,
1582 &emlxs_attach_failed_msg,
1583 "(PCI) ddi_regs_map_setup BAR0 failed. "
1584 "stat=%d mem=%p attr=%p hdl=%p",
1585 status, &hba->sli.sli4.bar0_addr, &dev_attr,
1586 &hba->sli.sli4.bar0_acc_handle);
1587 goto failed;
1588 }
1589 }
1590
1591 /* offset from beginning of register space */
1592 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1593 (uint32_t *)(hba->sli.sli4.bar0_addr +
1594 SLIPORT_SEMAPHORE_OFFSET);
1595 hba->sli.sli4.MBDB_reg_addr =
1596 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1597 hba->sli.sli4.CQDB_reg_addr =
1598 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1599 hba->sli.sli4.MQDB_reg_addr =
1600 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1601 hba->sli.sli4.WQDB_reg_addr =
1602 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1603 hba->sli.sli4.RQDB_reg_addr =
1604 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1605
1606 hba->sli.sli4.STATUS_reg_addr =
1607 (uint32_t *)(hba->sli.sli4.bar0_addr +
1608 SLIPORT_STATUS_OFFSET);
1609 hba->sli.sli4.CNTL_reg_addr =
1610 (uint32_t *)(hba->sli.sli4.bar0_addr +
1611 SLIPORT_CONTROL_OFFSET);
1612 hba->sli.sli4.ERR1_reg_addr =
1613 (uint32_t *)(hba->sli.sli4.bar0_addr +
1614 SLIPORT_ERROR1_OFFSET);
1615 hba->sli.sli4.ERR2_reg_addr =
1616 (uint32_t *)(hba->sli.sli4.bar0_addr +
1617 SLIPORT_ERROR2_OFFSET);
1618 hba->sli.sli4.PHYSDEV_reg_addr =
1619 (uint32_t *)(hba->sli.sli4.bar0_addr +
1620 PHYSDEV_CONTROL_OFFSET);
1621
1622 break;
1623
1624 case SLI_INTF_IF_TYPE_1:
1625 case SLI_INTF_IF_TYPE_3:
1626 default:
1627 EMLXS_MSGF(EMLXS_CONTEXT,
1628 &emlxs_attach_failed_msg,
1629 "Map hdw: Unsupported if_type %08x",
1630 (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1631
1632 goto failed;
1633 }
1634
1635 if (hba->sli.sli4.bootstrapmb.virt == 0) {
1636 MBUF_INFO *buf_info;
1637 MBUF_INFO bufinfo;
1638
1639 buf_info = &bufinfo;
1640
1641 bzero(buf_info, sizeof (MBUF_INFO));
1642 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1643 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1644 buf_info->align = ddi_ptob(dip, 1L);
1645
1646 (void) emlxs_mem_alloc(hba, buf_info);
1647
1648 if (buf_info->virt == NULL) {
1649 goto failed;
1650 }
1651
1652 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1653 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1654 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1655 MBOX_EXTENSION_SIZE;
1656 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1657 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1658 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1659 EMLXS_BOOTSTRAP_MB_SIZE);
1660 }
1661
1662 hba->chan_count = MAX_CHANNEL;
1663
1664 return (0);
1665
1666 failed:
1667
1668 emlxs_sli4_unmap_hdw(hba);
1669 return (ENOMEM);
1670
1671
1672 } /* emlxs_sli4_map_hdw() */
1673
1674
1675 /*ARGSUSED*/
1676 static void
emlxs_sli4_unmap_hdw(emlxs_hba_t * hba)1677 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1678 {
1679 MBUF_INFO bufinfo;
1680 MBUF_INFO *buf_info = &bufinfo;
1681
1682
1683 if (hba->sli.sli4.bar0_acc_handle) {
1684 ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1685 hba->sli.sli4.bar0_acc_handle = 0;
1686 }
1687
1688 if (hba->sli.sli4.bar1_acc_handle) {
1689 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1690 hba->sli.sli4.bar1_acc_handle = 0;
1691 }
1692
1693 if (hba->sli.sli4.bar2_acc_handle) {
1694 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1695 hba->sli.sli4.bar2_acc_handle = 0;
1696 }
1697
1698 if (hba->sli.sli4.bootstrapmb.virt) {
1699 bzero(buf_info, sizeof (MBUF_INFO));
1700
1701 if (hba->sli.sli4.bootstrapmb.phys) {
1702 buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1703 buf_info->data_handle =
1704 hba->sli.sli4.bootstrapmb.data_handle;
1705 buf_info->dma_handle =
1706 hba->sli.sli4.bootstrapmb.dma_handle;
1707 buf_info->flags = FC_MBUF_DMA;
1708 }
1709
1710 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1711 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1712 emlxs_mem_free(hba, buf_info);
1713
1714 hba->sli.sli4.bootstrapmb.virt = NULL;
1715 }
1716
1717 return;
1718
1719 } /* emlxs_sli4_unmap_hdw() */
1720
1721
1722 static int
emlxs_check_hdw_ready(emlxs_hba_t * hba)1723 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1724 {
1725 emlxs_port_t *port = &PPORT;
1726 uint32_t status;
1727 uint32_t i = 0;
1728 uint32_t err1;
1729 uint32_t err2;
1730
1731 /* Wait for reset completion */
1732 while (i < 30) {
1733
1734 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1735 case SLI_INTF_IF_TYPE_0:
1736 status = emlxs_sli4_read_sema(hba);
1737
1738 /* Check to see if any errors occurred during init */
1739 if (status & ARM_POST_FATAL) {
1740 EMLXS_MSGF(EMLXS_CONTEXT,
1741 &emlxs_reset_failed_msg,
1742 "SEMA Error: status=%x", status);
1743
1744 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1745
1746 return (1);
1747 }
1748
1749 if ((status & ARM_UNRECOVERABLE_ERROR) ==
1750 ARM_UNRECOVERABLE_ERROR) {
1751 EMLXS_MSGF(EMLXS_CONTEXT,
1752 &emlxs_reset_failed_msg,
1753 "Unrecoverable Error: status=%x", status);
1754
1755 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1756
1757 return (1);
1758 }
1759
1760 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1761 /* ARM Ready !! */
1762 EMLXS_MSGF(EMLXS_CONTEXT,
1763 &emlxs_sli_detail_msg,
1764 "ARM Ready: status=%x", status);
1765
1766 return (0);
1767 }
1768 break;
1769
1770 case SLI_INTF_IF_TYPE_2:
1771 status = emlxs_sli4_read_status(hba);
1772
1773 if (status & SLI_STATUS_READY) {
1774 if (!(status & SLI_STATUS_ERROR)) {
1775 /* ARM Ready !! */
1776 EMLXS_MSGF(EMLXS_CONTEXT,
1777 &emlxs_sli_detail_msg,
1778 "ARM Ready: status=%x", status);
1779
1780 return (0);
1781 }
1782
1783 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1784 hba->sli.sli4.ERR1_reg_addr);
1785 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1786 hba->sli.sli4.ERR2_reg_addr);
1787
1788 if (status & SLI_STATUS_RESET_NEEDED) {
1789 EMLXS_MSGF(EMLXS_CONTEXT,
1790 &emlxs_sli_detail_msg,
1791 "ARM Ready (Reset Needed): "
1792 "status=%x err1=%x "
1793 "err2=%x",
1794 status, err1, err2);
1795
1796 return (1);
1797 }
1798
1799 EMLXS_MSGF(EMLXS_CONTEXT,
1800 &emlxs_reset_failed_msg,
1801 "Unrecoverable Error: status=%x err1=%x "
1802 "err2=%x",
1803 status, err1, err2);
1804
1805 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1806
1807 return (2);
1808 }
1809
1810 break;
1811
1812 default:
1813 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1814
1815 return (3);
1816 }
1817
1818 BUSYWAIT_MS(1000);
1819 i++;
1820 }
1821
1822 /* Timeout occurred */
1823 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1824 case SLI_INTF_IF_TYPE_0:
1825 err1 = ddi_get32(hba->pci_acc_handle,
1826 hba->sli.sli4.ERR1_reg_addr);
1827 err2 = ddi_get32(hba->pci_acc_handle,
1828 hba->sli.sli4.ERR2_reg_addr);
1829 break;
1830
1831 default:
1832 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1833 hba->sli.sli4.ERR1_reg_addr);
1834 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1835 hba->sli.sli4.ERR2_reg_addr);
1836 break;
1837 }
1838
1839 if (status & SLI_STATUS_ERROR) {
1840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1841 "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1842 status, err1, err2);
1843 } else {
1844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1845 "Ready Timeout: status=%x err1=%x err2=%x",
1846 status, err1, err2);
1847 }
1848
1849 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1850
1851 return (3);
1852
1853 } /* emlxs_check_hdw_ready() */
1854
1855
1856 static uint32_t
emlxs_sli4_read_status(emlxs_hba_t * hba)1857 emlxs_sli4_read_status(emlxs_hba_t *hba)
1858 {
1859 #ifdef FMA_SUPPORT
1860 emlxs_port_t *port = &PPORT;
1861 #endif /* FMA_SUPPORT */
1862 uint32_t status;
1863
1864 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1865 case SLI_INTF_IF_TYPE_2:
1866 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1867 hba->sli.sli4.STATUS_reg_addr);
1868 #ifdef FMA_SUPPORT
1869 /* Access handle validation */
1870 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1871 #endif /* FMA_SUPPORT */
1872 break;
1873 default:
1874 status = 0;
1875 break;
1876 }
1877
1878 return (status);
1879
1880 } /* emlxs_sli4_read_status() */
1881
1882
1883 static uint32_t
emlxs_sli4_read_sema(emlxs_hba_t * hba)1884 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1885 {
1886 #ifdef FMA_SUPPORT
1887 emlxs_port_t *port = &PPORT;
1888 #endif /* FMA_SUPPORT */
1889 uint32_t status;
1890
1891 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1892 case SLI_INTF_IF_TYPE_0:
1893 status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1894 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1895 #ifdef FMA_SUPPORT
1896 /* Access handle validation */
1897 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1898 #endif /* FMA_SUPPORT */
1899 break;
1900
1901 case SLI_INTF_IF_TYPE_2:
1902 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1903 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1904 #ifdef FMA_SUPPORT
1905 /* Access handle validation */
1906 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1907 #endif /* FMA_SUPPORT */
1908 break;
1909 default:
1910 status = 0;
1911 break;
1912 }
1913
1914 return (status);
1915
1916 } /* emlxs_sli4_read_sema() */
1917
1918
1919 static uint32_t
emlxs_sli4_read_mbdb(emlxs_hba_t * hba)1920 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1921 {
1922 #ifdef FMA_SUPPORT
1923 emlxs_port_t *port = &PPORT;
1924 #endif /* FMA_SUPPORT */
1925 uint32_t status;
1926
1927 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1928 case SLI_INTF_IF_TYPE_0:
1929 status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1930 hba->sli.sli4.MBDB_reg_addr);
1931
1932 #ifdef FMA_SUPPORT
1933 /* Access handle validation */
1934 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1935 #endif /* FMA_SUPPORT */
1936 break;
1937
1938 case SLI_INTF_IF_TYPE_2:
1939 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1940 hba->sli.sli4.MBDB_reg_addr);
1941 #ifdef FMA_SUPPORT
1942 /* Access handle validation */
1943 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1944 #endif /* FMA_SUPPORT */
1945 break;
1946 default:
1947 status = 0;
1948 break;
1949 }
1950
1951 return (status);
1952
1953 } /* emlxs_sli4_read_mbdb() */
1954
1955
1956 static void
emlxs_sli4_write_mbdb(emlxs_hba_t * hba,uint64_t phys,boolean_t high)1957 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint64_t phys, boolean_t high)
1958 {
1959 uint32_t db;
1960 uint_t shift;
1961
1962 /*
1963 * The bootstrap mailbox is posted as 2 x 30 bit values.
1964 * It is required to be 16 bit aligned, and the 2 low order
1965 * bits are used as flags.
1966 */
1967 shift = high ? 32 : 2;
1968
1969 db = (uint32_t)(phys >> shift) & BMBX_ADDR;
1970
1971 if (high)
1972 db |= BMBX_ADDR_HI;
1973
1974 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1975 case SLI_INTF_IF_TYPE_0:
1976 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1977 hba->sli.sli4.MBDB_reg_addr, db);
1978 break;
1979
1980 case SLI_INTF_IF_TYPE_2:
1981 ddi_put32(hba->sli.sli4.bar0_acc_handle,
1982 hba->sli.sli4.MBDB_reg_addr, db);
1983 break;
1984 }
1985
1986 } /* emlxs_sli4_write_mbdb() */
1987
1988
1989 static void
emlxs_sli4_write_eqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)1990 emlxs_sli4_write_eqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
1991 boolean_t arm)
1992 {
1993 uint32_t db;
1994
1995 /*
1996 * Add the qid to the doorbell. It is split into a low and
1997 * high component.
1998 */
1999
2000 /* Initialize with the low bits */
2001 db = qid & EQ_DB_ID_LO_MASK;
2002
2003 /* drop the low bits */
2004 qid >>= EQ_ID_LO_BITS;
2005
2006 /* Add the high bits */
2007 db |= (qid << EQ_DB_ID_HI_SHIFT) & EQ_DB_ID_HI_MASK;
2008
2009 /*
2010 * Include the number of entries to be popped.
2011 */
2012 db |= (count << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK;
2013
2014 /* The doorbell is for an event queue */
2015 db |= EQ_DB_EVENT;
2016
2017 /* Arm if asked to do so */
2018 if (arm)
2019 db |= EQ_DB_CLEAR | EQ_DB_REARM;
2020
2021 #ifdef DEBUG_FASTPATH
2022 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2023 "EQE: CLEAR db=%08x pops=%d", db, count);
2024 #endif /* DEBUG_FASTPATH */
2025
2026 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2027 case SLI_INTF_IF_TYPE_0:
2028 /* The CQDB_reg_addr is also use for EQs */
2029 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2030 hba->sli.sli4.CQDB_reg_addr, db);
2031 break;
2032
2033 case SLI_INTF_IF_TYPE_2:
2034 /* The CQDB_reg_addr is also use for EQs */
2035 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2036 hba->sli.sli4.CQDB_reg_addr, db);
2037 break;
2038 }
2039 } /* emlxs_sli4_write_eqdb() */
2040
2041 static void
emlxs_sli4_write_cqdb(emlxs_hba_t * hba,uint16_t qid,uint32_t count,boolean_t arm)2042 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint16_t qid, uint32_t count,
2043 boolean_t arm)
2044 {
2045 uint32_t db;
2046
2047 /*
2048 * Add the qid to the doorbell. It is split into a low and
2049 * high component.
2050 */
2051
2052 /* Initialize with the low bits */
2053 db = qid & CQ_DB_ID_LO_MASK;
2054
2055 /* drop the low bits */
2056 qid >>= CQ_ID_LO_BITS;
2057
2058 /* Add the high bits */
2059 db |= (qid << CQ_DB_ID_HI_SHIFT) & CQ_DB_ID_HI_MASK;
2060
2061 /*
2062 * Include the number of entries to be popped.
2063 */
2064 db |= (count << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK;
2065
2066 /* Arm if asked to do so */
2067 if (arm)
2068 db |= CQ_DB_REARM;
2069
2070 #ifdef DEBUG_FASTPATH
2071 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2072 "CQE: CLEAR db=%08x: pops=%d", db, count);
2073 #endif /* DEBUG_FASTPATH */
2074
2075 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2076 case SLI_INTF_IF_TYPE_0:
2077 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2078 hba->sli.sli4.CQDB_reg_addr, db);
2079 break;
2080
2081 case SLI_INTF_IF_TYPE_2:
2082 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2083 hba->sli.sli4.CQDB_reg_addr, db);
2084 break;
2085 }
2086 } /* emlxs_sli4_write_cqdb() */
2087
2088
2089 static void
emlxs_sli4_write_rqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2090 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2091 {
2092 emlxs_rqdbu_t rqdb;
2093
2094 rqdb.word = 0;
2095 rqdb.db.Qid = qid;
2096 rqdb.db.NumPosted = count;
2097
2098 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2099 case SLI_INTF_IF_TYPE_0:
2100 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2101 hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2102 break;
2103
2104 case SLI_INTF_IF_TYPE_2:
2105 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2106 hba->sli.sli4.RQDB_reg_addr, rqdb.word);
2107 break;
2108 }
2109
2110 } /* emlxs_sli4_write_rqdb() */
2111
2112
2113 static void
emlxs_sli4_write_mqdb(emlxs_hba_t * hba,uint16_t qid,uint_t count)2114 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint16_t qid, uint_t count)
2115 {
2116 uint32_t db;
2117
2118 db = qid;
2119 db |= (count << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK;
2120
2121 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2122 case SLI_INTF_IF_TYPE_0:
2123 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2124 hba->sli.sli4.MQDB_reg_addr, db);
2125 break;
2126
2127 case SLI_INTF_IF_TYPE_2:
2128 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2129 hba->sli.sli4.MQDB_reg_addr, db);
2130 break;
2131 }
2132
2133 } /* emlxs_sli4_write_mqdb() */
2134
2135
2136 static void
emlxs_sli4_write_wqdb(emlxs_hba_t * hba,uint16_t qid,uint_t posted,uint_t index)2137 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint16_t qid, uint_t posted,
2138 uint_t index)
2139 {
2140 uint32_t db;
2141
2142 db = qid;
2143 db |= (posted << WQ_DB_POST_SHIFT) & WQ_DB_POST_MASK;
2144 db |= (index << WQ_DB_IDX_SHIFT) & WQ_DB_IDX_MASK;
2145
2146 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2147 case SLI_INTF_IF_TYPE_0:
2148 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2149 hba->sli.sli4.WQDB_reg_addr, db);
2150 break;
2151
2152 case SLI_INTF_IF_TYPE_2:
2153 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2154 hba->sli.sli4.WQDB_reg_addr, db);
2155 break;
2156 }
2157
2158 #ifdef DEBUG_FASTPATH
2159 EMLXS_MSGF(&hba->port[0], _FILENO_, __LINE__, &emlxs_sli_detail_msg,
2160 "WQ RING: %08x", db);
2161 #endif /* DEBUG_FASTPATH */
2162 } /* emlxs_sli4_write_wqdb() */
2163
2164
2165 static uint32_t
emlxs_check_bootstrap_ready(emlxs_hba_t * hba,uint32_t tmo)2166 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2167 {
2168 emlxs_port_t *port = &PPORT;
2169 uint32_t status = 0;
2170 uint32_t err1;
2171 uint32_t err2;
2172
2173 /* Wait for reset completion, tmo is in 10ms ticks */
2174 while (tmo) {
2175 status = emlxs_sli4_read_mbdb(hba);
2176
2177 /* Check to see if any errors occurred during init */
2178 if (status & BMBX_READY) {
2179 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2180 "BMBX Ready: status=0x%x", status);
2181
2182 return (tmo);
2183 }
2184
2185 BUSYWAIT_MS(10);
2186 tmo--;
2187 }
2188
2189 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2190 case SLI_INTF_IF_TYPE_0:
2191 err1 = ddi_get32(hba->pci_acc_handle,
2192 hba->sli.sli4.ERR1_reg_addr);
2193 err2 = ddi_get32(hba->pci_acc_handle,
2194 hba->sli.sli4.ERR2_reg_addr);
2195 break;
2196
2197 default:
2198 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2199 hba->sli.sli4.ERR1_reg_addr);
2200 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2201 hba->sli.sli4.ERR2_reg_addr);
2202 break;
2203 }
2204
2205 /* Timeout occurred */
2206 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2207 "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2208 status, err1, err2);
2209
2210 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2211
2212 return (0);
2213
2214 } /* emlxs_check_bootstrap_ready() */
2215
2216
2217 static uint32_t
emlxs_issue_bootstrap_mb(emlxs_hba_t * hba,uint32_t tmo)2218 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2219 {
2220 emlxs_port_t *port = &PPORT;
2221 uint32_t *iptr;
2222
2223 /*
2224 * This routine assumes the bootstrap mbox is loaded
2225 * with the mailbox command to be executed.
2226 *
2227 * First, load the high 30 bits of bootstrap mailbox
2228 */
2229 emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_TRUE);
2230
2231 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2232 if (tmo == 0) {
2233 return (0);
2234 }
2235
2236 /* Load the low 30 bits of bootstrap mailbox */
2237 emlxs_sli4_write_mbdb(hba, hba->sli.sli4.bootstrapmb.phys, B_FALSE);
2238
2239 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2240 if (tmo == 0) {
2241 return (0);
2242 }
2243
2244 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2245
2246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2247 "BootstrapMB: %p Completed %08x %08x %08x",
2248 hba->sli.sli4.bootstrapmb.virt,
2249 *iptr, *(iptr+1), *(iptr+2));
2250
2251 return (tmo);
2252
2253 } /* emlxs_issue_bootstrap_mb() */
2254
2255
2256 static int
emlxs_init_bootstrap_mb(emlxs_hba_t * hba)2257 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2258 {
2259 #ifdef FMA_SUPPORT
2260 emlxs_port_t *port = &PPORT;
2261 #endif /* FMA_SUPPORT */
2262 uint32_t *iptr;
2263 uint32_t tmo;
2264
2265 if (emlxs_check_hdw_ready(hba)) {
2266 return (1);
2267 }
2268
2269 if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2270 return (0); /* Already initialized */
2271 }
2272
2273 /* NOTE: tmo is in 10ms ticks */
2274 tmo = emlxs_check_bootstrap_ready(hba, 3000);
2275 if (tmo == 0) {
2276 return (1);
2277 }
2278
2279 /* Issue FW_INITIALIZE command */
2280
2281 /* Special words to initialize bootstrap mbox MUST be little endian */
2282 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2283 *iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2284 *(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2285
2286 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2287 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2288
2289 emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2290 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2291 return (1);
2292 }
2293
2294 #ifdef FMA_SUPPORT
2295 if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2296 != DDI_FM_OK) {
2297 EMLXS_MSGF(EMLXS_CONTEXT,
2298 &emlxs_invalid_dma_handle_msg,
2299 "init_bootstrap_mb: hdl=%p",
2300 hba->sli.sli4.bootstrapmb.dma_handle);
2301 return (1);
2302 }
2303 #endif
2304 hba->flag |= FC_BOOTSTRAPMB_INIT;
2305 return (0);
2306
2307 } /* emlxs_init_bootstrap_mb() */
2308
2309
2310
2311
2312 static uint32_t
emlxs_sli4_hba_init(emlxs_hba_t * hba)2313 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2314 {
2315 int rc;
2316 uint16_t i;
2317 emlxs_port_t *vport;
2318 emlxs_config_t *cfg = &CFG;
2319 CHANNEL *cp;
2320 VPIobj_t *vpip;
2321
2322 /* Restart the adapter */
2323 if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2324 return (1);
2325 }
2326
2327 for (i = 0; i < hba->chan_count; i++) {
2328 cp = &hba->chan[i];
2329 cp->iopath = (void *)&hba->sli.sli4.wq[i];
2330 }
2331
2332 /* Initialize all the port objects */
2333 hba->vpi_max = 0;
2334 for (i = 0; i < MAX_VPORTS; i++) {
2335 vport = &VPORT(i);
2336 vport->hba = hba;
2337 vport->vpi = i;
2338
2339 vpip = &vport->VPIobj;
2340 vpip->index = i;
2341 vpip->VPI = i;
2342 vpip->port = vport;
2343 vpip->state = VPI_STATE_OFFLINE;
2344 vport->vpip = vpip;
2345 }
2346
2347 /* Set the max node count */
2348 if (hba->max_nodes == 0) {
2349 if (cfg[CFG_NUM_NODES].current > 0) {
2350 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2351 } else {
2352 hba->max_nodes = 4096;
2353 }
2354 }
2355
2356 rc = emlxs_init_bootstrap_mb(hba);
2357 if (rc) {
2358 return (rc);
2359 }
2360
2361 hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2362 hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2363 hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2364
2365 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2366 /* Cache the UE MASK registers value for UE error detection */
2367 hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2368 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2369 hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2370 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2371 }
2372
2373 return (0);
2374
2375 } /* emlxs_sli4_hba_init() */
2376
2377
2378 /*ARGSUSED*/
2379 static uint32_t
emlxs_sli4_hba_reset(emlxs_hba_t * hba,uint32_t restart,uint32_t skip_post,uint32_t quiesce)2380 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2381 uint32_t quiesce)
2382 {
2383 emlxs_port_t *port = &PPORT;
2384 emlxs_port_t *vport;
2385 CHANNEL *cp;
2386 emlxs_config_t *cfg = &CFG;
2387 MAILBOXQ mboxq;
2388 uint32_t value;
2389 uint32_t i;
2390 uint32_t rc;
2391 uint16_t channelno;
2392 uint32_t status;
2393 uint32_t err1;
2394 uint32_t err2;
2395 uint8_t generate_event = 0;
2396
2397 if (!cfg[CFG_RESET_ENABLE].current) {
2398 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2399 "Adapter reset disabled.");
2400 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2401
2402 return (1);
2403 }
2404
2405 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2406 case SLI_INTF_IF_TYPE_0:
2407 if (quiesce == 0) {
2408 emlxs_sli4_hba_kill(hba);
2409
2410 /*
2411 * Initalize Hardware that will be used to bring
2412 * SLI4 online.
2413 */
2414 rc = emlxs_init_bootstrap_mb(hba);
2415 if (rc) {
2416 return (rc);
2417 }
2418 }
2419
2420 bzero((void *)&mboxq, sizeof (MAILBOXQ));
2421 emlxs_mb_resetport(hba, &mboxq);
2422
2423 if (quiesce == 0) {
2424 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2425 MBX_POLL, 0) != MBX_SUCCESS) {
2426 /* Timeout occurred */
2427 EMLXS_MSGF(EMLXS_CONTEXT,
2428 &emlxs_reset_failed_msg,
2429 "Timeout: RESET");
2430 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2431 /* Log a dump event - not supported */
2432 return (1);
2433 }
2434 } else {
2435 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2436 MBX_POLL, 0) != MBX_SUCCESS) {
2437 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2438 /* Log a dump event - not supported */
2439 return (1);
2440 }
2441 }
2442 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2443 break;
2444
2445 case SLI_INTF_IF_TYPE_2:
2446 if (quiesce == 0) {
2447 emlxs_sli4_hba_kill(hba);
2448 }
2449
2450 rc = emlxs_check_hdw_ready(hba);
2451 if (rc > 1) {
2452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2453 "Adapter not ready for reset.");
2454 return (1);
2455 }
2456
2457 if (rc == 1) {
2458 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2459 hba->sli.sli4.ERR1_reg_addr);
2460 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2461 hba->sli.sli4.ERR2_reg_addr);
2462
2463 /* Don't generate an event if dump was forced */
2464 if ((err1 != 0x2) || (err2 != 0x2)) {
2465 generate_event = 1;
2466 }
2467 }
2468
2469 /* Reset the port now */
2470
2471 mutex_enter(&EMLXS_PORT_LOCK);
2472 value = SLI_CNTL_INIT_PORT;
2473
2474 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2475 hba->sli.sli4.CNTL_reg_addr, value);
2476 mutex_exit(&EMLXS_PORT_LOCK);
2477
2478 break;
2479 }
2480
2481 /* Reset the hba structure */
2482 hba->flag &= FC_RESET_MASK;
2483
2484 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2485 cp = &hba->chan[channelno];
2486 cp->hba = hba;
2487 cp->channelno = channelno;
2488 }
2489
2490 hba->channel_tx_count = 0;
2491 hba->io_count = 0;
2492 hba->iodone_count = 0;
2493 hba->topology = 0;
2494 hba->linkspeed = 0;
2495 hba->heartbeat_active = 0;
2496 hba->discovery_timer = 0;
2497 hba->linkup_timer = 0;
2498 hba->loopback_tics = 0;
2499
2500 /* Reset the port objects */
2501 for (i = 0; i < MAX_VPORTS; i++) {
2502 vport = &VPORT(i);
2503
2504 vport->flag &= EMLXS_PORT_RESET_MASK;
2505 vport->did = 0;
2506 vport->prev_did = 0;
2507 vport->lip_type = 0;
2508 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2509 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2510
2511 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2512 vport->node_base.nlp_Rpi = 0;
2513 vport->node_base.nlp_DID = 0xffffff;
2514 vport->node_base.nlp_list_next = NULL;
2515 vport->node_base.nlp_list_prev = NULL;
2516 vport->node_base.nlp_active = 1;
2517 vport->node_count = 0;
2518
2519 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2520 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2521 }
2522 }
2523
2524 if (emlxs_check_hdw_ready(hba)) {
2525 return (1);
2526 }
2527
2528 if (generate_event) {
2529 status = emlxs_sli4_read_status(hba);
2530 if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2531 emlxs_log_dump_event(port, NULL, 0);
2532 }
2533 }
2534
2535 return (0);
2536
2537 } /* emlxs_sli4_hba_reset */
2538
2539
2540 #define SGL_CMD 0
2541 #define SGL_RESP 1
2542 #define SGL_DATA 2
2543 #define SGL_LAST 0x80
2544
2545 /*ARGSUSED*/
2546 static ULP_SGE64 *
emlxs_pkt_to_sgl(emlxs_port_t * port,fc_packet_t * pkt,ULP_SGE64 * sge,uint32_t sgl_type,uint32_t * pcnt)2547 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2548 uint32_t sgl_type, uint32_t *pcnt)
2549 {
2550 #ifdef DEBUG_SGE
2551 emlxs_hba_t *hba = HBA;
2552 #endif /* DEBUG_SGE */
2553 ddi_dma_cookie_t *cp;
2554 uint_t i;
2555 uint_t last;
2556 int32_t size;
2557 int32_t sge_size;
2558 uint64_t sge_addr;
2559 int32_t len;
2560 uint32_t cnt;
2561 uint_t cookie_cnt;
2562 ULP_SGE64 stage_sge;
2563
2564 last = sgl_type & SGL_LAST;
2565 sgl_type &= ~SGL_LAST;
2566
2567 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2568 switch (sgl_type) {
2569 case SGL_CMD:
2570 cp = pkt->pkt_cmd_cookie;
2571 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2572 size = (int32_t)pkt->pkt_cmdlen;
2573 break;
2574
2575 case SGL_RESP:
2576 cp = pkt->pkt_resp_cookie;
2577 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2578 size = (int32_t)pkt->pkt_rsplen;
2579 break;
2580
2581
2582 case SGL_DATA:
2583 cp = pkt->pkt_data_cookie;
2584 cookie_cnt = pkt->pkt_data_cookie_cnt;
2585 size = (int32_t)pkt->pkt_datalen;
2586 break;
2587
2588 default:
2589 return (NULL);
2590 }
2591
2592 #else
2593 switch (sgl_type) {
2594 case SGL_CMD:
2595 cp = &pkt->pkt_cmd_cookie;
2596 cookie_cnt = 1;
2597 size = (int32_t)pkt->pkt_cmdlen;
2598 break;
2599
2600 case SGL_RESP:
2601 cp = &pkt->pkt_resp_cookie;
2602 cookie_cnt = 1;
2603 size = (int32_t)pkt->pkt_rsplen;
2604 break;
2605
2606
2607 case SGL_DATA:
2608 cp = &pkt->pkt_data_cookie;
2609 cookie_cnt = 1;
2610 size = (int32_t)pkt->pkt_datalen;
2611 break;
2612
2613 default:
2614 return (NULL);
2615 }
2616 #endif /* >= EMLXS_MODREV3 */
2617
2618 stage_sge.offset = 0;
2619 stage_sge.type = 0;
2620 stage_sge.last = 0;
2621 cnt = 0;
2622 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2623
2624 sge_size = cp->dmac_size;
2625 sge_addr = cp->dmac_laddress;
2626 while (sge_size && size) {
2627 if (cnt) {
2628 /* Copy staged SGE before we build next one */
2629 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2630 (uint8_t *)sge, sizeof (ULP_SGE64));
2631 sge++;
2632 }
2633 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2634 len = MIN(size, len);
2635
2636 stage_sge.addrHigh =
2637 PADDR_HI(sge_addr);
2638 stage_sge.addrLow =
2639 PADDR_LO(sge_addr);
2640 stage_sge.length = len;
2641 if (sgl_type == SGL_DATA) {
2642 stage_sge.offset = cnt;
2643 }
2644 #ifdef DEBUG_SGE
2645 emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2646 4, 0);
2647 #endif /* DEBUG_SGE */
2648 sge_addr += len;
2649 sge_size -= len;
2650
2651 cnt += len;
2652 size -= len;
2653 }
2654 }
2655
2656 if (last) {
2657 stage_sge.last = 1;
2658 }
2659 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2660 sizeof (ULP_SGE64));
2661
2662 sge++;
2663
2664 if (pcnt) {
2665 *pcnt = cnt;
2666 }
2667 return (sge);
2668
2669 } /* emlxs_pkt_to_sgl */
2670
2671
2672 /*ARGSUSED*/
2673 uint32_t
emlxs_sli4_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2674 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2675 {
2676 emlxs_hba_t *hba = HBA;
2677 fc_packet_t *pkt;
2678 XRIobj_t *xrip;
2679 ULP_SGE64 *sge;
2680 emlxs_wqe_t *wqe;
2681 IOCBQ *iocbq;
2682 ddi_dma_cookie_t *cp_cmd;
2683 ddi_dma_cookie_t *cp_data;
2684 uint64_t sge_addr;
2685 uint32_t cmd_cnt;
2686 uint32_t resp_cnt;
2687
2688 iocbq = (IOCBQ *) &sbp->iocbq;
2689 wqe = &iocbq->wqe;
2690 pkt = PRIV2PKT(sbp);
2691 xrip = sbp->xrip;
2692 sge = xrip->SGList->virt;
2693
2694 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2695 cp_cmd = pkt->pkt_cmd_cookie;
2696 cp_data = pkt->pkt_data_cookie;
2697 #else
2698 cp_cmd = &pkt->pkt_cmd_cookie;
2699 cp_data = &pkt->pkt_data_cookie;
2700 #endif /* >= EMLXS_MODREV3 */
2701
2702 iocbq = &sbp->iocbq;
2703 if (iocbq->flag & IOCB_FCP_CMD) {
2704
2705 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2706 return (1);
2707 }
2708
2709 /* CMD payload */
2710 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2711 if (! sge) {
2712 return (1);
2713 }
2714
2715 /* DATA payload */
2716 if (pkt->pkt_datalen != 0) {
2717 /* RSP payload */
2718 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2719 SGL_RESP, &resp_cnt);
2720 if (! sge) {
2721 return (1);
2722 }
2723
2724 /* Data payload */
2725 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2726 SGL_DATA | SGL_LAST, 0);
2727 if (! sge) {
2728 return (1);
2729 }
2730 sgl_done:
2731 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2732 sge_addr = cp_data->dmac_laddress;
2733 wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2734 wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2735 wqe->FirstData.tus.f.bdeSize =
2736 cp_data->dmac_size;
2737 }
2738 } else {
2739 /* RSP payload */
2740 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2741 SGL_RESP | SGL_LAST, &resp_cnt);
2742 if (! sge) {
2743 return (1);
2744 }
2745 }
2746
2747 wqe->un.FcpCmd.Payload.addrHigh =
2748 PADDR_HI(cp_cmd->dmac_laddress);
2749 wqe->un.FcpCmd.Payload.addrLow =
2750 PADDR_LO(cp_cmd->dmac_laddress);
2751 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2752 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2753
2754 } else {
2755
2756 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2757 /* CMD payload */
2758 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2759 SGL_CMD | SGL_LAST, &cmd_cnt);
2760 if (! sge) {
2761 return (1);
2762 }
2763 } else {
2764 /* CMD payload */
2765 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2766 SGL_CMD, &cmd_cnt);
2767 if (! sge) {
2768 return (1);
2769 }
2770
2771 /* RSP payload */
2772 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2773 SGL_RESP | SGL_LAST, &resp_cnt);
2774 if (! sge) {
2775 return (1);
2776 }
2777 wqe->un.GenReq.PayloadLength = cmd_cnt;
2778 }
2779
2780 wqe->un.GenReq.Payload.addrHigh =
2781 PADDR_HI(cp_cmd->dmac_laddress);
2782 wqe->un.GenReq.Payload.addrLow =
2783 PADDR_LO(cp_cmd->dmac_laddress);
2784 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2785 }
2786 return (0);
2787 } /* emlxs_sli4_bde_setup */
2788
2789
2790
2791
2792 #ifdef SFCT_SUPPORT
2793 /*ARGSUSED*/
2794 static uint32_t
emlxs_sli4_fct_bde_setup(emlxs_port_t * port,emlxs_buf_t * sbp)2795 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2796 {
2797 emlxs_hba_t *hba = HBA;
2798 emlxs_wqe_t *wqe;
2799 ULP_SGE64 stage_sge;
2800 ULP_SGE64 *sge;
2801 IOCB *iocb;
2802 IOCBQ *iocbq;
2803 MATCHMAP *mp;
2804 MATCHMAP *fct_mp;
2805 XRIobj_t *xrip;
2806 uint64_t sge_addr;
2807 uint32_t sge_size;
2808 uint32_t cnt;
2809 uint32_t len;
2810 uint32_t size;
2811 uint32_t *xrdy_vaddr;
2812 stmf_data_buf_t *dbuf;
2813
2814 iocbq = &sbp->iocbq;
2815 iocb = &iocbq->iocb;
2816 wqe = &iocbq->wqe;
2817 xrip = sbp->xrip;
2818
2819 if (!sbp->fct_buf) {
2820 return (0);
2821 }
2822
2823 size = sbp->fct_buf->db_data_size;
2824
2825 /*
2826 * The hardware will automaticlly round up
2827 * to multiple of 4.
2828 *
2829 * if (size & 3) {
2830 * size = (size + 3) & 0xfffffffc;
2831 * }
2832 */
2833 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2834
2835 if (sbp->fct_buf->db_sglist_length != 1) {
2836 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2837 "fct_bde_setup: Only 1 sglist entry supported: %d",
2838 sbp->fct_buf->db_sglist_length);
2839 return (1);
2840 }
2841
2842 sge = xrip->SGList->virt;
2843
2844 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2845
2846 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2847 if (!mp || !mp->virt || !mp->phys) {
2848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2849 "fct_bde_setup: Cannot allocate XRDY memory");
2850 return (1);
2851 }
2852 /* Save the MATCHMAP info to free this memory later */
2853 iocbq->bp = mp;
2854
2855 /* Point to XRDY payload */
2856 xrdy_vaddr = (uint32_t *)(mp->virt);
2857
2858 /* Fill in burstsize in payload */
2859 *xrdy_vaddr++ = 0;
2860 *xrdy_vaddr++ = LE_SWAP32(size);
2861 *xrdy_vaddr = 0;
2862
2863 /* First 2 SGEs are XRDY and SKIP */
2864 stage_sge.addrHigh = PADDR_HI(mp->phys);
2865 stage_sge.addrLow = PADDR_LO(mp->phys);
2866 stage_sge.length = EMLXS_XFER_RDY_SIZE;
2867 stage_sge.offset = 0;
2868 stage_sge.type = 0;
2869 stage_sge.last = 0;
2870
2871 /* Words 0-3 */
2872 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2873 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2874 wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2875 wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2876
2877 } else { /* CMD_FCP_TSEND64_CX */
2878 /* First 2 SGEs are SKIP */
2879 stage_sge.addrHigh = 0;
2880 stage_sge.addrLow = 0;
2881 stage_sge.length = 0;
2882 stage_sge.offset = 0;
2883 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2884 stage_sge.last = 0;
2885
2886 /* Words 0-3 */
2887 wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2888 wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2889
2890 /* The BDE should match the contents of the first SGE payload */
2891 len = MIN(EMLXS_MAX_SGE_SIZE, size);
2892 wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2893
2894 /* The PayloadLength should be set to 0 for TSEND64. */
2895 wqe->un.FcpCmd.PayloadLength = 0;
2896 }
2897
2898 dbuf = sbp->fct_buf;
2899 /*
2900 * TotalTransferCount equals to Relative Offset field (Word 4)
2901 * in both TSEND64 and TRECEIVE64 WQE.
2902 */
2903 wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2904
2905 /* Copy staged SGE into SGL */
2906 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2907 (uint8_t *)sge, sizeof (ULP_SGE64));
2908 sge++;
2909
2910 stage_sge.addrHigh = 0;
2911 stage_sge.addrLow = 0;
2912 stage_sge.length = 0;
2913 stage_sge.offset = 0;
2914 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2915 stage_sge.last = 0;
2916
2917 /* Copy staged SGE into SGL */
2918 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2919 (uint8_t *)sge, sizeof (ULP_SGE64));
2920 sge++;
2921
2922 sge_size = size;
2923 sge_addr = fct_mp->phys;
2924 cnt = 0;
2925
2926 /* Build SGEs */
2927 while (sge_size) {
2928 if (cnt) {
2929 /* Copy staged SGE before we build next one */
2930 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2931 (uint8_t *)sge, sizeof (ULP_SGE64));
2932 sge++;
2933 }
2934
2935 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2936
2937 stage_sge.addrHigh = PADDR_HI(sge_addr);
2938 stage_sge.addrLow = PADDR_LO(sge_addr);
2939 stage_sge.length = len;
2940 stage_sge.offset = cnt;
2941 stage_sge.type = EMLXS_SGE_TYPE_DATA;
2942
2943 sge_addr += len;
2944 sge_size -= len;
2945 cnt += len;
2946 }
2947
2948 stage_sge.last = 1;
2949
2950 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2951 wqe->FirstData.addrHigh = stage_sge.addrHigh;
2952 wqe->FirstData.addrLow = stage_sge.addrLow;
2953 wqe->FirstData.tus.f.bdeSize = stage_sge.length;
2954 }
2955 /* Copy staged SGE into SGL */
2956 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2957 (uint8_t *)sge, sizeof (ULP_SGE64));
2958
2959 return (0);
2960
2961 } /* emlxs_sli4_fct_bde_setup */
2962 #endif /* SFCT_SUPPORT */
2963
2964
2965 static void
emlxs_sli4_issue_iocb_cmd(emlxs_hba_t * hba,CHANNEL * cp,IOCBQ * iocbq)2966 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2967 {
2968 emlxs_port_t *port = &PPORT;
2969 emlxs_buf_t *sbp;
2970 uint32_t channelno;
2971 int32_t throttle;
2972 emlxs_wqe_t *wqe;
2973 emlxs_wqe_t *wqeslot;
2974 WQ_DESC_t *wq;
2975 uint32_t flag;
2976 uint16_t next_wqe;
2977 off_t offset;
2978 #ifdef NODE_THROTTLE_SUPPORT
2979 int32_t node_throttle;
2980 NODELIST *marked_node = NULL;
2981 #endif /* NODE_THROTTLE_SUPPORT */
2982
2983
2984 channelno = cp->channelno;
2985 wq = (WQ_DESC_t *)cp->iopath;
2986
2987 #ifdef DEBUG_FASTPATH
2988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2989 "ISSUE WQE channel: %x %p", channelno, wq);
2990 #endif /* DEBUG_FASTPATH */
2991
2992 throttle = 0;
2993
2994 /* Check if FCP ring and adapter is not ready */
2995 /* We may use any ring for FCP_CMD */
2996 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2997 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2998 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2999 emlxs_tx_put(iocbq, 1);
3000 return;
3001 }
3002 }
3003
3004 /* Attempt to acquire CMD_RING lock */
3005 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
3006 /* Queue it for later */
3007 if (