emlxs_hba.c (728bdc9b) emlxs_hba.c (291a2b48)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 6 unchanged lines hidden (view full) ---

15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 6 unchanged lines hidden (view full) ---

15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2008 Emulex. All rights reserved.
23 * Copyright 2009 Emulex. All rights reserved.
24 * Use is subject to License terms.
25 */
26
24 * Use is subject to License terms.
25 */
26
27
27#define EMLXS_FW_TABLE_DEF
28#define EMLXS_MODEL_DEF
29
28#define EMLXS_MODEL_DEF
29
30#include "emlxs.h"
30#include <emlxs.h>
31
32/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33EMLXS_MSG_DEF(EMLXS_HBA_C);
34
31
32/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33EMLXS_MSG_DEF(EMLXS_HBA_C);
34
35static void emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy);
36static uint32_t emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid);
37static void emlxs_handle_link_event(emlxs_hba_t *hba);
38static void emlxs_handle_ring_event(emlxs_hba_t *hba, int32_t ring,
39 uint32_t ha_copy);
40static uint32_t emlxs_decode_biu_rev(uint32_t rev);
41static uint32_t emlxs_decode_endec_rev(uint32_t rev);
42static void emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
35static uint32_t emlxs_decode_biu_rev(uint32_t rev);
36static uint32_t emlxs_decode_endec_rev(uint32_t rev);
37static void emlxs_parse_prog_types(emlxs_hba_t *hba, char *types);
43static int32_t emlxs_hba_init(emlxs_hba_t *hba);
44static int32_t emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd, uint32_t size);
45static void emlxs_proc_ring_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
46static void emlxs_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
38static int32_t emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd,
39 uint32_t size);
47static void emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types);
40static void emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types);
48static void emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
41static void emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp,
42 IOCBQ *iocbq);
49static void emlxs_process_link_speed(emlxs_hba_t *hba);
43static void emlxs_process_link_speed(emlxs_hba_t *hba);
50static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
51static void emlxs_decode_label(char *label, char *buffer);
52
44static void emlxs_decode_label(char *label, char *buffer);
45
46#ifdef MODFW_SUPPORT
47static void emlxs_fw_load(emlxs_hba_t *hba, emlxs_firmware_t *fw);
48static void emlxs_fw_unload(emlxs_hba_t *hba, emlxs_firmware_t *fw);
49#endif /* MODFW_SUPPORT */
50
53#ifdef MSI_SUPPORT
51#ifdef MSI_SUPPORT
54static uint32_t emlxs_msi_intr(char *arg1, char *arg2);
55uint32_t emlxs_msi_map[EMLXS_MSI_MODES][EMLXS_MSI_MAX_INTRS] =
52uint32_t emlxs_msi_map[EMLXS_MSI_MODES][EMLXS_MSI_MAX_INTRS] =
56{
57 EMLXS_MSI_MAP1,
58 EMLXS_MSI_MAP2,
59 EMLXS_MSI_MAP4,
60 EMLXS_MSI_MAP8
61};
53 {EMLXS_MSI_MAP1, EMLXS_MSI_MAP2, EMLXS_MSI_MAP4, EMLXS_MSI_MAP8};
62uint32_t emlxs_msi_mask[EMLXS_MSI_MODES] =
54uint32_t emlxs_msi_mask[EMLXS_MSI_MODES] =
63{
64 EMLXS_MSI0_MASK1,
65 EMLXS_MSI0_MASK2,
66 EMLXS_MSI0_MASK4,
67 EMLXS_MSI0_MASK8
68};
69#endif /* MSI_SUPPORT */
55 {EMLXS_MSI0_MASK1, EMLXS_MSI0_MASK2, EMLXS_MSI0_MASK4,
56 EMLXS_MSI0_MASK8};
57#endif /* MSI_SUPPORT */
70
58
71static int32_t emlxs_intx_intr(char *arg);
72
59
73
74static uint32_t emlxs_disable_traffic_cop = 1;
75
60static uint32_t emlxs_disable_traffic_cop = 1;
61
76emlxs_table_t emlxs_ring_table[] =
77{
62emlxs_table_t emlxs_ring_table[] = {
78 {FC_FCP_RING, "FCP Ring"},
79 {FC_IP_RING, "IP Ring"},
80 {FC_ELS_RING, "ELS Ring"},
81 {FC_CT_RING, "CT Ring"}
82
63 {FC_FCP_RING, "FCP Ring"},
64 {FC_IP_RING, "IP Ring"},
65 {FC_ELS_RING, "ELS Ring"},
66 {FC_CT_RING, "CT Ring"}
67
83}; /* emlxs_ring_table */
68}; /* emlxs_ring_table */
84
85
69
70
86emlxs_table_t emlxs_ffstate_table[] =
87{
71emlxs_table_t emlxs_ffstate_table[] = {
88 {0, "NULL"},
89 {FC_ERROR, "ERROR"},
90 {FC_KILLED, "KILLED"},
91 {FC_WARM_START, "WARM_START"},
92 {FC_INIT_START, "INIT_START"},
93 {FC_INIT_NVPARAMS, "INIT_NVPARAMS"},
94 {FC_INIT_REV, "INIT_REV"},
95 {FC_INIT_CFGPORT, "INIT_CFGPORT"},
96 {FC_INIT_CFGRING, "INIT_CFGRING"},
97 {FC_INIT_INITLINK, "INIT_INITLINK"},
98 {FC_LINK_DOWN, "LINK_DOWN"},
99 {FC_LINK_UP, "LINK_UP"},
100 {FC_CLEAR_LA, "CLEAR_LA"},
101 {FC_READY, "READY"}
102
72 {0, "NULL"},
73 {FC_ERROR, "ERROR"},
74 {FC_KILLED, "KILLED"},
75 {FC_WARM_START, "WARM_START"},
76 {FC_INIT_START, "INIT_START"},
77 {FC_INIT_NVPARAMS, "INIT_NVPARAMS"},
78 {FC_INIT_REV, "INIT_REV"},
79 {FC_INIT_CFGPORT, "INIT_CFGPORT"},
80 {FC_INIT_CFGRING, "INIT_CFGRING"},
81 {FC_INIT_INITLINK, "INIT_INITLINK"},
82 {FC_LINK_DOWN, "LINK_DOWN"},
83 {FC_LINK_UP, "LINK_UP"},
84 {FC_CLEAR_LA, "CLEAR_LA"},
85 {FC_READY, "READY"}
86
103}; /* emlxs_ffstate_table */
87}; /* emlxs_ffstate_table */
104
105
88
89
106
107/*
90/*
91 * emlxs_ffinit()
108 *
92 *
109 * emlxs_ffinit
110 * This routine will start initialization of the FireFly Chipset
93 * This routine will start initialization of the FireFly Chipset
111 *
112 */
113extern int
114emlxs_ffinit(emlxs_hba_t *hba)
115{
116 emlxs_port_t *port = &PPORT;
117 emlxs_config_t *cfg;
118 emlxs_vpd_t *vpd;
119 MAILBOX *mb;

--- 9 unchanged lines hidden (view full) ---

129 uint32_t key = 0;
130 uint32_t fw_check;
131 uint32_t rval;
132 uint32_t offset;
133 uint8_t vpd_data[DMP_VPD_SIZE];
134 uint32_t MaxRbusSize;
135 uint32_t MaxIbusSize;
136 uint32_t sli_mode;
94 */
95extern int
96emlxs_ffinit(emlxs_hba_t *hba)
97{
98 emlxs_port_t *port = &PPORT;
99 emlxs_config_t *cfg;
100 emlxs_vpd_t *vpd;
101 MAILBOX *mb;

--- 9 unchanged lines hidden (view full) ---

111 uint32_t key = 0;
112 uint32_t fw_check;
113 uint32_t rval;
114 uint32_t offset;
115 uint8_t vpd_data[DMP_VPD_SIZE];
116 uint32_t MaxRbusSize;
117 uint32_t MaxIbusSize;
118 uint32_t sli_mode;
119 uint32_t sli_mode_mask;
137
138 cfg = &CFG;
139 vpd = &VPD;
140 mb = 0;
141 MaxRbusSize = 0;
142 MaxIbusSize = 0;
143 read_rev_reset = 0;
120
121 cfg = &CFG;
122 vpd = &VPD;
123 mb = 0;
124 MaxRbusSize = 0;
125 MaxIbusSize = 0;
126 read_rev_reset = 0;
144 sli_mode = 2;
145
127
128 if (hba->bus_type == SBUS_FC) {
129 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba,
130 hba->sbus_csr_addr));
131 }
132
146#ifdef SLI3_SUPPORT
147 /* Initialize sli mode based on configuration parameter */
148 switch (cfg[CFG_SLI_MODE].current) {
149 case 2: /* SLI2 mode */
133#ifdef SLI3_SUPPORT
134 /* Initialize sli mode based on configuration parameter */
135 switch (cfg[CFG_SLI_MODE].current) {
136 case 2: /* SLI2 mode */
150 sli_mode = 2;
137 sli_mode = EMLXS_HBA_SLI2_MODE;
138 sli_mode_mask = EMLXS_SLI2_MASK;
151 break;
152
139 break;
140
141 case 3: /* SLI3 mode */
142 sli_mode = EMLXS_HBA_SLI3_MODE;
143 sli_mode_mask = EMLXS_SLI3_MASK;
144 break;
145
153 case 0: /* Best available */
154 case 1: /* Best available */
146 case 0: /* Best available */
147 case 1: /* Best available */
155 case 3: /* SLI3 mode */
156 default:
148 default:
157 /* SBUS adapters only available in SLI2 */
158 if (hba->bus_type == SBUS_FC) {
159 sli_mode = 2;
160 } else {
161 sli_mode = 3;
149 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
150 sli_mode = EMLXS_HBA_SLI4_MODE;
151 sli_mode_mask = EMLXS_SLI4_MASK;
152 } else if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
153 sli_mode = EMLXS_HBA_SLI3_MODE;
154 sli_mode_mask = EMLXS_SLI3_MASK;
155 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
156 sli_mode = EMLXS_HBA_SLI2_MODE;
157 sli_mode_mask = EMLXS_SLI2_MASK;
162 }
158 }
163 break;
164 }
159 }
165#endif /* SLI3_SUPPORT */
160 /* SBUS adapters only available in SLI2 */
161 if (hba->bus_type == SBUS_FC) {
162 sli_mode = EMLXS_HBA_SLI2_MODE;
163 sli_mode_mask = EMLXS_SLI2_MASK;
164 }
166
165
166#endif /* SLI3_SUPPORT */
167
167 /* Set the fw_check flag */
168 fw_check = cfg[CFG_FW_CHECK].current;
169
170 hba->mbox_queue_flag = 0;
171 hba->hc_copy = 0;
172 hba->fc_edtov = FF_DEF_EDTOV;
173 hba->fc_ratov = FF_DEF_RATOV;
174 hba->fc_altov = FF_DEF_ALTOV;
175 hba->fc_arbtov = FF_DEF_ARBTOV;
176
177reset:
178
179 /* Reset and initialize the adapter */
168 /* Set the fw_check flag */
169 fw_check = cfg[CFG_FW_CHECK].current;
170
171 hba->mbox_queue_flag = 0;
172 hba->hc_copy = 0;
173 hba->fc_edtov = FF_DEF_EDTOV;
174 hba->fc_ratov = FF_DEF_RATOV;
175 hba->fc_altov = FF_DEF_ALTOV;
176 hba->fc_arbtov = FF_DEF_ARBTOV;
177
178reset:
179
180 /* Reset and initialize the adapter */
180 if (emlxs_hba_init(hba)) {
181 if (emlxs_sli_online(hba)) {
181 return (EIO);
182 }
182 return (EIO);
183 }
184
185#ifdef FMA_SUPPORT
186 /* Access handle validation */
187 if (hba->fm_caps & DDI_FM_ACCCHK_CAPABLE) {
188 if ((emlxs_fm_check_acc_handle(hba->pci_acc_handle)
189 != DDI_FM_OK) ||
190 (emlxs_fm_check_acc_handle(hba->slim_acc_handle)
191 != DDI_FM_OK) ||
192 (emlxs_fm_check_acc_handle(hba->csr_acc_handle)
193 != DDI_FM_OK)) {
194 EMLXS_MSGF(EMLXS_CONTEXT,
195 &emlxs_invalid_access_handle_msg, NULL);
196 return (EIO);
197 }
198 }
199#endif /* FMA_SUPPORT */
200
183 /*
184 * Allocate some memory for buffers
185 */
186 if (emlxs_mem_alloc_buffer(hba) == 0) {
187 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
188 "Unable to allocate memory buffers.");
189
190 emlxs_ffstate_change(hba, FC_ERROR);
191
192 return (ENOMEM);
193 }
201 /*
202 * Allocate some memory for buffers
203 */
204 if (emlxs_mem_alloc_buffer(hba) == 0) {
205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
206 "Unable to allocate memory buffers.");
207
208 emlxs_ffstate_change(hba, FC_ERROR);
209
210 return (ENOMEM);
211 }
212
194 /*
195 * Get a buffer which will be used repeatedly for mailbox commands
196 */
197 if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
198 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
199 "Unable to allocate mailbox buffer.");
200
201 emlxs_ffstate_change(hba, FC_ERROR);
202 (void) emlxs_mem_free_buffer(hba);
203
204 return (ENOMEM);
205 }
213 /*
214 * Get a buffer which will be used repeatedly for mailbox commands
215 */
216 if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
217 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
218 "Unable to allocate mailbox buffer.");
219
220 emlxs_ffstate_change(hba, FC_ERROR);
221 (void) emlxs_mem_free_buffer(hba);
222
223 return (ENOMEM);
224 }
225
206 /* Check for the LP9802 (This is a special case) */
207 /* We need to check for dual channel adapter */
208 if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
209 /* Try to determine if this is a DC adapter */
210 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
211 if (MaxRbusSize == REDUCED_SRAM_CFG) {
212 /* LP9802DC */
213 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {

--- 12 unchanged lines hidden (view full) ---

226 &hba->model_info,
227 sizeof (emlxs_model_t));
228 break;
229 }
230 }
231 }
232 }
233 }
226 /* Check for the LP9802 (This is a special case) */
227 /* We need to check for dual channel adapter */
228 if (hba->model_info.device_id == PCI_DEVICE_ID_LP9802) {
229 /* Try to determine if this is a DC adapter */
230 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
231 if (MaxRbusSize == REDUCED_SRAM_CFG) {
232 /* LP9802DC */
233 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {

--- 12 unchanged lines hidden (view full) ---

246 &hba->model_info,
247 sizeof (emlxs_model_t));
248 break;
249 }
250 }
251 }
252 }
253 }
254
234 /*
235 * Setup and issue mailbox READ REV command
236 */
237 vpd->opFwRev = 0;
238 vpd->postKernRev = 0;
239 vpd->sli1FwRev = 0;
240 vpd->sli2FwRev = 0;
241 vpd->sli3FwRev = 0;

--- 9 unchanged lines hidden (view full) ---

251 vpd->opFwLabel[0] = 0;
252 vpd->sli1FwLabel[0] = 0;
253 vpd->sli2FwLabel[0] = 0;
254 vpd->sli3FwLabel[0] = 0;
255 vpd->sli4FwLabel[0] = 0;
256
257 emlxs_ffstate_change(hba, FC_INIT_REV);
258 emlxs_mb_read_rev(hba, mb, 0);
255 /*
256 * Setup and issue mailbox READ REV command
257 */
258 vpd->opFwRev = 0;
259 vpd->postKernRev = 0;
260 vpd->sli1FwRev = 0;
261 vpd->sli2FwRev = 0;
262 vpd->sli3FwRev = 0;

--- 9 unchanged lines hidden (view full) ---

272 vpd->opFwLabel[0] = 0;
273 vpd->sli1FwLabel[0] = 0;
274 vpd->sli2FwLabel[0] = 0;
275 vpd->sli3FwLabel[0] = 0;
276 vpd->sli4FwLabel[0] = 0;
277
278 emlxs_ffstate_change(hba, FC_INIT_REV);
279 emlxs_mb_read_rev(hba, mb, 0);
259 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
280 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
261 "Unable to read rev. Mailbox cmd=%x status=%x",
262 mb->mbxCommand, mb->mbxStatus);
263
264 emlxs_ffstate_change(hba, FC_ERROR);
265 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
266 (void) emlxs_mem_free_buffer(hba);
267
268 return (EIO);
269 }
281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
282 "Unable to read rev. Mailbox cmd=%x status=%x",
283 mb->mbxCommand, mb->mbxStatus);
284
285 emlxs_ffstate_change(hba, FC_ERROR);
286 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
287 (void) emlxs_mem_free_buffer(hba);
288
289 return (EIO);
290 }
291
270 if (mb->un.varRdRev.rr == 0) {
271 /* Old firmware */
272 if (read_rev_reset == 0) {
273 /* Clean up */
274 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
275 (void) emlxs_mem_free_buffer(hba);
276
277 read_rev_reset = 1;

--- 18 unchanged lines hidden (view full) ---

296 goto reset;
297 } else {
298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
299 "Non-operational firmware detected. "
300 "type=%x",
301 mb->un.varRdRev.un.b.ProgType);
302 }
303 }
292 if (mb->un.varRdRev.rr == 0) {
293 /* Old firmware */
294 if (read_rev_reset == 0) {
295 /* Clean up */
296 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
297 (void) emlxs_mem_free_buffer(hba);
298
299 read_rev_reset = 1;

--- 18 unchanged lines hidden (view full) ---

318 goto reset;
319 } else {
320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
321 "Non-operational firmware detected. "
322 "type=%x",
323 mb->un.varRdRev.un.b.ProgType);
324 }
325 }
326
304 vpd->rBit = 1;
305 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
327 vpd->rBit = 1;
328 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
306 bcopy((char *)mb->un.varRdRev.sliFwName1,
307 vpd->sli1FwLabel, 16);
329 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
330 16);
308 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
331 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
309 bcopy((char *)mb->un.varRdRev.sliFwName2,
310 vpd->sli2FwLabel, 16);
332 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
333 16);
311
334
312 /* Lets try to read the SLI3 version */
313 /* Setup and issue mailbox READ REV(v3) command */
335 /*
336 * Lets try to read the SLI3 version
337 * Setup and issue mailbox READ REV(v3) command
338 */
314 emlxs_ffstate_change(hba, FC_INIT_REV);
315 emlxs_mb_read_rev(hba, mb, 1);
316
339 emlxs_ffstate_change(hba, FC_INIT_REV);
340 emlxs_mb_read_rev(hba, mb, 1);
341
317 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
342 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
343 MBX_SUCCESS) {
318 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
319 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
320 mb->mbxCommand, mb->mbxStatus);
321
322 emlxs_ffstate_change(hba, FC_ERROR);
323 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
324 (void) emlxs_mem_free_buffer(hba);
325
326 return (EIO);
327 }
344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
345 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
346 mb->mbxCommand, mb->mbxStatus);
347
348 emlxs_ffstate_change(hba, FC_ERROR);
349 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
350 (void) emlxs_mem_free_buffer(hba);
351
352 return (EIO);
353 }
354
328 if (mb->un.varRdRev.rf3) {
329 /*
355 if (mb->un.varRdRev.rf3) {
356 /*
330 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1; Not
331 * needed
357 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
358 * Not needed
332 */
333 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
334 bcopy((char *)mb->un.varRdRev.sliFwName2,
335 vpd->sli3FwLabel, 16);
336 }
337 }
338
359 */
360 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
361 bcopy((char *)mb->un.varRdRev.sliFwName2,
362 vpd->sli3FwLabel, 16);
363 }
364 }
365
339 /* Check sli mode against available firmware levels */
340 if ((sli_mode == 4) && (vpd->sli4FwRev == 0)) {
341 if (vpd->sli3FwRev) {
342 sli_mode = 3;
343 } else if (vpd->sli2FwRev) {
344 sli_mode = 2;
366
367 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
368 if (vpd->sli2FwRev) {
369 sli_mode = EMLXS_HBA_SLI2_MODE;
370 sli_mode_mask = EMLXS_SLI2_MASK;
345 } else {
346 sli_mode = 0;
371 } else {
372 sli_mode = 0;
373 sli_mode_mask = 0;
347 }
374 }
348 } else if ((sli_mode == 3) && (vpd->sli3FwRev == 0)) {
349 if (vpd->sli4FwRev) {
350 sli_mode = 4;
351 } else if (vpd->sli2FwRev) {
352 sli_mode = 2;
375 }
376
377 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
378 if (vpd->sli3FwRev) {
379 sli_mode = EMLXS_HBA_SLI3_MODE;
380 sli_mode_mask = EMLXS_SLI3_MASK;
353 } else {
354 sli_mode = 0;
381 } else {
382 sli_mode = 0;
383 sli_mode_mask = 0;
355 }
384 }
356 } else if ((sli_mode == 2) && (vpd->sli2FwRev == 0)) {
357 if (vpd->sli4FwRev) {
358 sli_mode = 4;
359 } else if (vpd->sli3FwRev) {
360 sli_mode = 3;
361 } else {
362 sli_mode = 0;
363 }
364 }
385 }
365 if (sli_mode == 0) {
386
387 if (!(hba->model_info.sli_mask & sli_mode_mask)) {
366#ifdef SLI3_SUPPORT
367 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
368 "Firmware not available. sli-mode=%d",
369 cfg[CFG_SLI_MODE].current);
370#else
371 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
372 "Firmware not available. sli-mode=2");
388#ifdef SLI3_SUPPORT
389 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
390 "Firmware not available. sli-mode=%d",
391 cfg[CFG_SLI_MODE].current);
392#else
393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
394 "Firmware not available. sli-mode=2");
373#endif /* SLI3_SUPPORT */
395#endif /* SLI3_SUPPORT */
374
375 emlxs_ffstate_change(hba, FC_ERROR);
376 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
377 (void) emlxs_mem_free_buffer(hba);
378
379 return (EIO);
380 }
396
397 emlxs_ffstate_change(hba, FC_ERROR);
398 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
399 (void) emlxs_mem_free_buffer(hba);
400
401 return (EIO);
402 }
403
381 /* Save information as VPD data */
382 vpd->postKernRev = mb->un.varRdRev.postKernRev;
383 vpd->opFwRev = mb->un.varRdRev.opFwRev;
384 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
385 vpd->biuRev = mb->un.varRdRev.biuRev;
386 vpd->smRev = mb->un.varRdRev.smRev;
387 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
388 vpd->endecRev = mb->un.varRdRev.endecRev;

--- 21 unchanged lines hidden (view full) ---

410
411 /* Get adapter VPD information */
412 offset = 0;
413 bzero(vpd_data, sizeof (vpd_data));
414 vpd->port_index = (uint32_t)-1;
415
416 while (offset < DMP_VPD_SIZE) {
417 emlxs_mb_dump_vpd(hba, mb, offset);
404 /* Save information as VPD data */
405 vpd->postKernRev = mb->un.varRdRev.postKernRev;
406 vpd->opFwRev = mb->un.varRdRev.opFwRev;
407 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
408 vpd->biuRev = mb->un.varRdRev.biuRev;
409 vpd->smRev = mb->un.varRdRev.smRev;
410 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
411 vpd->endecRev = mb->un.varRdRev.endecRev;

--- 21 unchanged lines hidden (view full) ---

433
434 /* Get adapter VPD information */
435 offset = 0;
436 bzero(vpd_data, sizeof (vpd_data));
437 vpd->port_index = (uint32_t)-1;
438
439 while (offset < DMP_VPD_SIZE) {
440 emlxs_mb_dump_vpd(hba, mb, offset);
418 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
441 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
442 MBX_SUCCESS) {
419 /*
443 /*
420 * Let it go through even if failed.*
444 * Let it go through even if failed.
445 * Not all adapter's have VPD info and thus will
446 * fail here. This is not a problem
421 */
447 */
422 /*
423 * Not all adapter's have VPD info and thus will fail
424 * here
425 */
426 /* This is not a problem */
427
428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
448
449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
429 "No VPD found. offset=%x status=%x",
430 offset, mb->mbxStatus);
450 "No VPD found. offset=%x status=%x", offset,
451 mb->mbxStatus);
431 break;
432 } else {
433 if (mb->un.varDmp.ra == 1) {
434 uint32_t *lp1, *lp2;
435 uint32_t bsize;
436 uint32_t wsize;
437
438 /*
439 * mb->un.varDmp.word_cnt is actually byte
440 * count for the dump reply
441 */
442 bsize = mb->un.varDmp.word_cnt;
443
444 /* Stop if no data was received */
445 if (bsize == 0) {
446 break;
447 }
452 break;
453 } else {
454 if (mb->un.varDmp.ra == 1) {
455 uint32_t *lp1, *lp2;
456 uint32_t bsize;
457 uint32_t wsize;
458
459 /*
460 * mb->un.varDmp.word_cnt is actually byte
461 * count for the dump reply
462 */
463 bsize = mb->un.varDmp.word_cnt;
464
465 /* Stop if no data was received */
466 if (bsize == 0) {
467 break;
468 }
469
448 /* Check limit on byte size */
470 /* Check limit on byte size */
449 bsize = (bsize > (sizeof (vpd_data) - offset)) ?
471 bsize = (bsize >
472 (sizeof (vpd_data) - offset)) ?
450 (sizeof (vpd_data) - offset) : bsize;
451
452 /*
453 * Convert size from bytes to words with
454 * minimum of 1 word
455 */
456 wsize = (bsize > 4) ? (bsize >> 2) : 1;
457

--- 14 unchanged lines hidden (view full) ---

472
473 /*
474 * Stop if less than a full transfer was
475 * received
476 */
477 if (wsize < DMP_VPD_DUMP_WCOUNT) {
478 break;
479 }
473 (sizeof (vpd_data) - offset) : bsize;
474
475 /*
476 * Convert size from bytes to words with
477 * minimum of 1 word
478 */
479 wsize = (bsize > 4) ? (bsize >> 2) : 1;
480

--- 14 unchanged lines hidden (view full) ---

495
496 /*
497 * Stop if less than a full transfer was
498 * received
499 */
500 if (wsize < DMP_VPD_DUMP_WCOUNT) {
501 break;
502 }
503
480 } else {
504 } else {
481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
482 "No VPD acknowledgment. offset=%x", offset);
505 EMLXS_MSGF(EMLXS_CONTEXT,
506 &emlxs_init_debug_msg,
507 "No VPD acknowledgment. offset=%x",
508 offset);
483 break;
484 }
485 }
486
487 }
488
489 if (vpd_data[0]) {
509 break;
510 }
511 }
512
513 }
514
515 if (vpd_data[0]) {
490
491 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
492
493 /*
516 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
517
518 /*
494 * Some adapter models require the vpd data to identify the
495 * exact model
519 * If there is a VPD part number, and it does not
520 * match the current default HBA model info,
521 * replace the default data with an entry that
522 * does match.
523 *
524 * After emlxs_parse_vpd model holds the VPD value
525 * for V2 and part_num hold the value for PN. These
526 * 2 values are NOT necessarily the same.
496 */
497
527 */
528
498 /*
499 * Check if vpd->part_num is now defined and the LP8000
500 * adapter (This is a special case)
501 */
502 /* We need to look for LP8000DC */
503 if ((hba->model_info.device_id == PCI_DEVICE_ID_LP8000) &&
504 (vpd->part_num[0] != 0)) {
505 if (strncmp(vpd->part_num, "LP8000DC", 8) == 0) {
506 /* LP8000DC */
507 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
508 if (emlxs_pci_model[i].id == LP8000DC) {
509 bcopy(&emlxs_pci_model[i],
510 &hba->model_info,
511 sizeof (emlxs_model_t));
512 break;
513 }
529 rval = 0;
530 if ((vpd->model[0] != 0) &&
531 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
532
533 /* First scan for a V2 match */
534
535 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
536 if (strcmp(&vpd->model[0],
537 emlxs_pci_model[i].model) == 0) {
538 bcopy(&emlxs_pci_model[i],
539 &hba->model_info,
540 sizeof (emlxs_model_t));
541 rval = 1;
542 break;
514 }
543 }
515 } else if (hba->model_info.id != LP8000) {
516 /* LP8000 */
517 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
518 if (emlxs_pci_model[i].id == LP8000) {
519 bcopy(&emlxs_pci_model[i],
520 &hba->model_info,
521 sizeof (emlxs_model_t));
522 break;
523 }
524 }
525 }
526 }
544 }
545 }
527 /* PCI_DEVICE_ID_LP8000 */
528 /*
529 * Check if vpd->part_num is now defined and the LP9002L
530 * adapter (This is a special case)
531 */
532 /*
533 * We need to look for LP9002C, LP9002DC, and the LP9402DC
534 * adapters
535 */
536 else if ((hba->model_info.device_id == PCI_DEVICE_ID_LP9002L) &&
537 (vpd->part_num[0] != 0)) {
538 if (strncmp(vpd->part_num, "LP9002C", 7) == 0) {
539 /* LP9002C */
540 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
541 if (emlxs_pci_model[i].id == LP9002C) {
542 bcopy(&emlxs_pci_model[i],
543 &hba->model_info,
544 sizeof (emlxs_model_t));
545 break;
546 }
546
547 if (!rval && (vpd->part_num[0] != 0) &&
548 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
549
550 /* Next scan for a PN match */
551
552 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
553 if (strcmp(&vpd->part_num[0],
554 emlxs_pci_model[i].model) == 0) {
555 bcopy(&emlxs_pci_model[i],
556 &hba->model_info,
557 sizeof (emlxs_model_t));
558 break;
547 }
559 }
548 } else if (strncmp(vpd->part_num, "LP9002DC", 8) == 0) {
549 /* LP9002DC */
550 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
551 if (emlxs_pci_model[i].id == LP9002DC) {
552 bcopy(&emlxs_pci_model[i],
553 &hba->model_info,
554 sizeof (emlxs_model_t));
555 break;
556 }
557 }
558 } else if (strncmp(vpd->part_num, "LP9402DC", 8) == 0) {
559 /* LP9402DC */
560 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
561 if (emlxs_pci_model[i].id == LP9402DC) {
562 bcopy(&emlxs_pci_model[i],
563 &hba->model_info,
564 sizeof (emlxs_model_t));
565 break;
566 }
567 }
568 } else if (hba->model_info.id != LP9002L) {
569 /* LP9002 */
570 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
571 if (emlxs_pci_model[i].id == LP9002L) {
572 bcopy(&emlxs_pci_model[i],
573 &hba->model_info,
574 sizeof (emlxs_model_t));
575 break;
576 }
577 }
578 }
579 }
560 }
561 }
580 /* PCI_DEVICE_ID_LP9002 */
562
581 /*
563 /*
582 * We need the vpd->part_num to decern between the LP10000DC
583 * and LP10000ExDC
564 * Now lets update hba->model_info with the real
565 * VPD data, if any.
584 */
566 */
585 else if ((hba->model_info.device_id == PCI_DEVICE_ID_LP10000) &&
586 (vpd->part_num[0] != 0)) {
587 if (strncmp(vpd->part_num, "LP10000DC", 9) == 0) {
588 /* LP10000DC */
589 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
590 if (emlxs_pci_model[i].id ==
591 LP10000DC) {
592 bcopy(&emlxs_pci_model[i],
593 &hba->model_info,
594 sizeof (emlxs_model_t));
595 break;
596 }
597 }
598 } else if (strncmp(vpd->part_num, "LP10000ExDC", 11)
599 == 0) {
600 /* LP10000ExDC */
601 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
602 if (emlxs_pci_model[i].id ==
603 LP10000ExDC) {
604 bcopy(&emlxs_pci_model[i],
605 &hba->model_info,
606 sizeof (emlxs_model_t));
607 break;
608 }
609 }
610 } else if (hba->model_info.id != LP10000) {
611 /* LP10000 */
612 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
613 if (emlxs_pci_model[i].id == LP10000) {
614 bcopy(&emlxs_pci_model[i],
615 &hba->model_info,
616 sizeof (emlxs_model_t));
617 break;
618 }
619 }
620 }
621 } /* PCI_DEVICE_ID_LP10000 */
622 /* Replace the default model description with vpd data */
567
568 /*
569 * Replace the default model description with vpd data
570 */
623 if (vpd->model_desc[0] != 0) {
624 (void) strcpy(hba->model_info.model_desc,
625 vpd->model_desc);
626 }
571 if (vpd->model_desc[0] != 0) {
572 (void) strcpy(hba->model_info.model_desc,
573 vpd->model_desc);
574 }
575
627 /* Replace the default model with vpd data */
628 if (vpd->model[0] != 0) {
629 (void) strcpy(hba->model_info.model, vpd->model);
630 }
576 /* Replace the default model with vpd data */
577 if (vpd->model[0] != 0) {
578 (void) strcpy(hba->model_info.model, vpd->model);
579 }
580
631 /* Replace the default program types with vpd data */
632 if (vpd->prog_types[0] != 0) {
633 emlxs_parse_prog_types(hba, vpd->prog_types);
634 }
635 }
581 /* Replace the default program types with vpd data */
582 if (vpd->prog_types[0] != 0) {
583 emlxs_parse_prog_types(hba, vpd->prog_types);
584 }
585 }
636 /* Since the adapter model may have changed with the vpd data */
637 /* lets double check if adapter is not supported */
586
587 /*
588 * Since the adapter model may have changed with the vpd data
589 * lets double check if adapter is not supported
590 */
638 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
591 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
640 "Unsupported adapter found. Id:%d Device id:0x%x "
641 "SSDID:0x%x Model:%s", hba->model_info.id,
642 hba->model_info.device_id, hba->model_info.ssdid,
643 hba->model_info.model);
593 "Unsupported adapter found. "
594 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
595 hba->model_info.id, hba->model_info.device_id,
596 hba->model_info.ssdid, hba->model_info.model);
644
645 emlxs_ffstate_change(hba, FC_ERROR);
646 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
647 (void) emlxs_mem_free_buffer(hba);
648
649 return (EIO);
650 }
597
598 emlxs_ffstate_change(hba, FC_ERROR);
599 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
600 (void) emlxs_mem_free_buffer(hba);
601
602 return (EIO);
603 }
604
651 /* Read the adapter's wakeup parms */
652 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
653 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
654 vpd->boot_version);
655
656 /* Get fcode version property */
657 emlxs_get_fcode_version(hba);
658
659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
605 /* Read the adapter's wakeup parms */
606 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
607 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
608 vpd->boot_version);
609
610 /* Get fcode version property */
611 emlxs_get_fcode_version(hba);
612
613 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
660 "Firmware: kern=%08x stub=%08x sli1=%08x",
661 vpd->postKernRev, vpd->opFwRev, vpd->sli1FwRev);
614 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
615 vpd->opFwRev, vpd->sli1FwRev);
662
663 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
616
617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
664 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x",
665 vpd->sli2FwRev, vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
618 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
619 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
666
667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
668 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
669
670 /*
620
621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
622 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
623
624 /*
671 * If firmware checking is enabled and the adapter model indicates a
672 * firmware image,
625 * If firmware checking is enabled and the adapter model indicates
626 * a firmware image, then perform firmware version check
673 */
627 */
674 /* then perform firmware version check */
675 if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
628 if (((fw_check == 1) && (hba->model_info.flags & EMLXS_SUN_BRANDED) &&
676 hba->model_info.fwid) ||
677 ((fw_check == 2) && hba->model_info.fwid)) {
678 emlxs_image_t *image;
629 hba->model_info.fwid) || ((fw_check == 2) &&
630 hba->model_info.fwid)) {
631 emlxs_firmware_t *fw;
679
680 /* Find firmware image indicated by adapter model */
632
633 /* Find firmware image indicated by adapter model */
681 image = NULL;
682 for (i = 0; i < EMLXS_IMAGE_COUNT; i++) {
683 if (emlxs_fw_image[i].id == hba->model_info.fwid) {
684 image = &emlxs_fw_image[i];
634 fw = NULL;
635 for (i = 0; i < EMLXS_FW_COUNT; i++) {
636 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
637 fw = &emlxs_fw_table[i];
685 break;
686 }
687 }
688
689 /*
690 * If the image was found, then verify current firmware
691 * versions of adapter
692 */
638 break;
639 }
640 }
641
642 /*
643 * If the image was found, then verify current firmware
644 * versions of adapter
645 */
693 if (image) {
694 if ((vpd->postKernRev != image->kern) ||
695 (vpd->opFwRev != image->stub) ||
696 (vpd->sli1FwRev != image->sli1) ||
697 (vpd->sli2FwRev != image->sli2) ||
698 (image->sli3 && (vpd->sli3FwRev != image->sli3)) ||
699 (image->sli4 && (vpd->sli4FwRev != image->sli4))) {
646 if (fw) {
647 if ((vpd->postKernRev != fw->kern) ||
648 (vpd->opFwRev != fw->stub) ||
649 (vpd->sli1FwRev != fw->sli1) ||
650 (vpd->sli2FwRev != fw->sli2) ||
651 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
652 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
700 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
701 "Firmware update needed. Updating... "
702 "(id=%d fw=%d)", hba->model_info.id,
703 hba->model_info.fwid);
654 "Firmware update needed. "
655 "Updating. id=%d fw=%d",
656 hba->model_info.id, hba->model_info.fwid);
704
657
705 if (emlxs_fw_download(hba,
706 (char *)image->buffer, image->size, 0)) {
707 EMLXS_MSGF(EMLXS_CONTEXT,
708 &emlxs_init_failed_msg,
709 "Firmware update failed.");
710 }
711 /* Clean up */
712 (void) emlxs_mem_put(hba, MEM_MBOX,
713 (uint8_t *)mb);
714 (void) emlxs_mem_free_buffer(hba);
658#ifdef MODFW_SUPPORT
659 /*
660 * Load the firmware image now
661 * If MODFW_SUPPORT is not defined, the
662 * firmware image will already be defined
663 * in the emlxs_fw_table
664 */
665 emlxs_fw_load(hba, fw);
666#endif /* MODFW_SUPPORT */
715
667
716 fw_check = 0;
668 if (fw->image && fw->size) {
669 if (emlxs_fw_download(hba,
670 (char *)fw->image, fw->size, 0)) {
671 EMLXS_MSGF(EMLXS_CONTEXT,
672 &emlxs_init_msg,
673 "Firmware update failed.");
674 }
675#ifdef MODFW_SUPPORT
676 /*
677 * Unload the firmware image from
678 * kernel memory
679 */
680 emlxs_fw_unload(hba, fw);
681#endif /* MODFW_SUPPORT */
717
682
718 goto reset;
683 (void) emlxs_mem_put(hba, MEM_MBOX,
684 (uint8_t *)mb);
685 (void) emlxs_mem_free_buffer(hba);
686
687 fw_check = 0;
688
689 goto reset;
690 }
691
692 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
693 "Firmware image unavailable.");
719 }
720 } else {
721 /* This should not happen */
722
723 /*
724 * This means either the adapter database is not
725 * correct or a firmware image is missing from the
726 * compile
727 */
728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
694 }
695 } else {
696 /* This should not happen */
697
698 /*
699 * This means either the adapter database is not
700 * correct or a firmware image is missing from the
701 * compile
702 */
703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
729 "Driver firmware image unavailable. (id=%d fw=%d)",
704 "Firmware image unavailable. id=%d fw=%d",
730 hba->model_info.id, hba->model_info.fwid);
731 }
705 hba->model_info.id, hba->model_info.fwid);
706 }
732
733 }
707 }
734 /* Add our interrupt routine to kernel's interrupt chain & enable it */
708
735 /*
709 /*
736 * If MSI is enabled this will cause Solaris to program the MSI
737 * address
710 * Add our interrupt routine to kernel's interrupt chain & enable it
711 * If MSI is enabled this will cause Solaris to program the MSI address
712 * and data registers in PCI config space
738 */
713 */
739 /* and data registers in PCI config space */
740 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
741 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
742 "Unable to add interrupt(s).");
743
744 emlxs_ffstate_change(hba, FC_ERROR);
745 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
746 (void) emlxs_mem_free_buffer(hba);
747
748 return (EIO);
749 }
714 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
716 "Unable to add interrupt(s).");
717
718 emlxs_ffstate_change(hba, FC_ERROR);
719 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
720 (void) emlxs_mem_free_buffer(hba);
721
722 return (EIO);
723 }
750 /*
751 * Initialize cmd/rsp ring pointers
752 */
753 for (i = 0; i < (uint32_t)hba->ring_count; i++) {
754 rp = &hba->ring[i];
755
724
756 rp->hba = hba;
757 rp->ringno = (uint8_t)i;
758
759 rp->fc_iocbhd = 0;
760 rp->fc_iocbtl = 0;
761 rp->fc_cmdidx = 0;
762 rp->fc_rspidx = 0;
763 /* Used for pkt io */
764 rp->fc_iotag = 1;
765 /* Used for abort or close XRI iotags */
766 rp->fc_abort_iotag = rp->max_iotag;
767
768 }
769
770 emlxs_ffstate_change(hba, FC_INIT_CFGPORT);
771 (void) emlxs_mb_config_port(hba, mb, sli_mode, key);
725 emlxs_ffstate_change(hba, FC_INIT_CFGPORT);
726 (void) emlxs_mb_config_port(hba, mb, sli_mode, key);
772 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
727 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
773 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
774 "Unable to configure port. Mailbox cmd=%x status=%x "
775 "slimode=%d key=%x", mb->mbxCommand, mb->mbxStatus,
776 sli_mode, key);
729 "Unable to configure port. "
730 "Mailbox cmd=%x status=%x slimode=%d key=%x",
731 mb->mbxCommand, mb->mbxStatus, sli_mode, key);
777
778#ifdef SLI3_SUPPORT
732
733#ifdef SLI3_SUPPORT
779 /* Try to fall back to SLI2 if possible */
780 if (sli_mode >= 3) {
781 sli_mode = 2;
734 for (sli_mode--; sli_mode > 0; sli_mode--) {
735 /* Check if sli_mode is supported by this adapter */
736 if (hba->model_info.sli_mask &
737 EMLXS_SLI_MASK(sli_mode)) {
738 sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
739 break;
740 }
741 }
782
742
743 if (sli_mode) {
783 /* Clean up */
784 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
785 (void) emlxs_mem_free_buffer(hba);
786
787 fw_check = 0;
788
789 goto reset;
790 }
744 /* Clean up */
745 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
746 (void) emlxs_mem_free_buffer(hba);
747
748 fw_check = 0;
749
750 goto reset;
751 }
791#endif /* SLI3_SUPPORT */
752#endif /* SLI3_SUPPORT */
792
793 hba->flag &= ~FC_SLIM2_MODE;
794 emlxs_ffstate_change(hba, FC_ERROR);
795 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
796 (void) emlxs_mem_free_buffer(hba);
797
798 return (EIO);
799 }
800#ifdef SLI3_SUPPORT
801 /* Check if SLI3 mode was achieved */
753
754 hba->flag &= ~FC_SLIM2_MODE;
755 emlxs_ffstate_change(hba, FC_ERROR);
756 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
757 (void) emlxs_mem_free_buffer(hba);
758
759 return (EIO);
760 }
761#ifdef SLI3_SUPPORT
762 /* Check if SLI3 mode was achieved */
802 if (mb->un.varCfgPort.rMA && (mb->un.varCfgPort.sli_mode == 3)) {
803 hba->sli_mode = 3;
763 if (mb->un.varCfgPort.rMA &&
764 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
804
805#ifdef NPIV_SUPPORT
806 if (mb->un.varCfgPort.vpi_max > 1) {
807 hba->flag |= FC_NPIV_ENABLED;
808
809 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
765
766#ifdef NPIV_SUPPORT
767 if (mb->un.varCfgPort.vpi_max > 1) {
768 hba->flag |= FC_NPIV_ENABLED;
769
770 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
810 hba->vpi_max = min(mb->un.varCfgPort.vpi_max,
771 hba->vpi_max =
772 min(mb->un.varCfgPort.vpi_max,
811 MAX_VPORTS - 1);
812 } else {
773 MAX_VPORTS - 1);
774 } else {
813 hba->vpi_max = min(mb->un.varCfgPort.vpi_max,
775 hba->vpi_max =
776 min(mb->un.varCfgPort.vpi_max,
814 MAX_VPORTS_LIMITED - 1);
815 }
816 }
777 MAX_VPORTS_LIMITED - 1);
778 }
779 }
817#endif /* NPIV_SUPPORT */
818
780
781#if (EMLXS_MODREV >= EMLXS_MODREV5)
782 hba->fca_tran->fca_num_npivports =
783 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
784#endif /* >= EMLXS_MODREV5 */
785
786#endif /* NPIV_SUPPORT */
787
819 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
820 hba->flag |= FC_HBQ_ENABLED;
821 }
788 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
789 hba->flag |= FC_HBQ_ENABLED;
790 }
791
822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
823 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
824 } else {
825 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
826 "SLI2 mode: flag=%x", hba->flag);
792 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
793 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
794 } else {
795 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
796 "SLI2 mode: flag=%x", hba->flag);
797 sli_mode = EMLXS_HBA_SLI2_MODE;
798 sli_mode_mask = EMLXS_SLI2_MASK;
799 hba->sli_mode = sli_mode;
827 }
800 }
828#endif /* SLI3_SUPPORT */
801#endif /* SLI3_SUPPORT */
829
830 /* Get and save the current firmware version (based on sli_mode) */
831 emlxs_decode_firmware_rev(hba, vpd);
832
833 emlxs_pcix_mxr_update(hba, 0);
834
835 /*
836 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
837 */
838 mp = 0;
839 mp1 = 0;
802
803 /* Get and save the current firmware version (based on sli_mode) */
804 emlxs_decode_firmware_rev(hba, vpd);
805
806 emlxs_pcix_mxr_update(hba, 0);
807
808 /*
809 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
810 */
811 mp = 0;
812 mp1 = 0;
840 if (((mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0) ||
841 ((mp1 = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0)) {
813 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0) ||
814 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0)) {
842 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
843 "Unable to allocate diag buffers.");
844
845 emlxs_ffstate_change(hba, FC_ERROR);
846
847 if (mp) {
848 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
849 }
850 if (mp1) {
851 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
852 }
815 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
816 "Unable to allocate diag buffers.");
817
818 emlxs_ffstate_change(hba, FC_ERROR);
819
820 if (mp) {
821 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
822 }
823 if (mp1) {
824 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
825 }
826
853 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
854 (void) emlxs_mem_free_buffer(hba);
855
856 return (ENOMEM);
857 }
827 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
828 (void) emlxs_mem_free_buffer(hba);
829
830 return (ENOMEM);
831 }
832
858 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
859 MEM_ELSBUF_SIZE);
860 emlxs_mpdata_sync(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
861 DDI_DMA_SYNC_FORDEV);
862
863 bzero(mp1->virt, MEM_ELSBUF_SIZE);
864 emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
865 DDI_DMA_SYNC_FORDEV);
866
867 (void) emlxs_mb_run_biu_diag(hba, mb, mp->phys, mp1->phys);
868
833 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
834 MEM_ELSBUF_SIZE);
835 emlxs_mpdata_sync(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
836 DDI_DMA_SYNC_FORDEV);
837
838 bzero(mp1->virt, MEM_ELSBUF_SIZE);
839 emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
840 DDI_DMA_SYNC_FORDEV);
841
842 (void) emlxs_mb_run_biu_diag(hba, mb, mp->phys, mp1->phys);
843
869 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
844 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
871 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
872 mb->mbxCommand, mb->mbxStatus);
873
874 emlxs_ffstate_change(hba, FC_ERROR);
875
876 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
877 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
878 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
879 (void) emlxs_mem_free_buffer(hba);
880
881 return (EIO);
882 }
845 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
846 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
847 mb->mbxCommand, mb->mbxStatus);
848
849 emlxs_ffstate_change(hba, FC_ERROR);
850
851 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
852 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
853 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
854 (void) emlxs_mem_free_buffer(hba);
855
856 return (EIO);
857 }
858
883 emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
884 DDI_DMA_SYNC_FORKERNEL);
885
886 outptr = mp->virt;
887 inptr = mp1->virt;
888
889 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
890 if (*outptr++ != *inptr++) {
891 outptr--;
892 inptr--;
893
894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
859 emlxs_mpdata_sync(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
860 DDI_DMA_SYNC_FORKERNEL);
861
862 outptr = mp->virt;
863 inptr = mp1->virt;
864
865 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
866 if (*outptr++ != *inptr++) {
867 outptr--;
868 inptr--;
869
870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
895 "BIU diagnostic failed. offset %x value %x "
896 "should be %x.", i, (uint32_t)*inptr,
897 (uint32_t)*outptr);
871 "BIU diagnostic failed. "
872 "offset %x value %x should be %x.",
873 i, (uint32_t)*inptr, (uint32_t)*outptr);
898
899 emlxs_ffstate_change(hba, FC_ERROR);
900
901 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
902 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
874
875 emlxs_ffstate_change(hba, FC_ERROR);
876
877 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
878 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
903
904 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
905 (void) emlxs_mem_free_buffer(hba);
906
907 return (EIO);
908 }
909 }
910
911 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
912 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
913
914 /*
915 * Setup and issue mailbox CONFIGURE RING command
916 */
917 for (i = 0; i < (uint32_t)hba->ring_count; i++) {
879 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
880 (void) emlxs_mem_free_buffer(hba);
881
882 return (EIO);
883 }
884 }
885
886 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
887 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp1);
888
889 /*
890 * Setup and issue mailbox CONFIGURE RING command
891 */
892 for (i = 0; i < (uint32_t)hba->ring_count; i++) {
893 /*
894 * Initialize cmd/rsp ring pointers
895 */
896 rp = &hba->ring[i];
897
898 rp->hba = hba;
899 rp->ringno = (uint8_t)i;
900
901 rp->fc_iocbhd = 0;
902 rp->fc_iocbtl = 0;
903 rp->fc_cmdidx = 0;
904 rp->fc_rspidx = 0;
905 rp->fc_iotag = 1; /* Used for pkt io */
906 rp->fc_abort_iotag = rp->max_iotag; /* Used for abort or */
907 /* close XRI iotags */
918 emlxs_ffstate_change(hba, FC_INIT_CFGRING);
919 emlxs_mb_config_ring(hba, i, mb);
908 emlxs_ffstate_change(hba, FC_INIT_CFGRING);
909 emlxs_mb_config_ring(hba, i, mb);
920 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
910 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
911 MBX_SUCCESS) {
921 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
922 "Unable to configure ring. Mailbox cmd=%x "
923 "status=%x", mb->mbxCommand, mb->mbxStatus);
913 "Unable to configure ring. "
914 "Mailbox cmd=%x status=%x",
915 mb->mbxCommand, mb->mbxStatus);
924
925 emlxs_ffstate_change(hba, FC_ERROR);
926 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
927 (void) emlxs_mem_free_buffer(hba);
928
929 return (EIO);
930 }
931 }
932
933 /*
934 * Setup link timers
935 */
936 emlxs_ffstate_change(hba, FC_INIT_INITLINK);
937 emlxs_mb_config_link(hba, mb);
916
917 emlxs_ffstate_change(hba, FC_ERROR);
918 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
919 (void) emlxs_mem_free_buffer(hba);
920
921 return (EIO);
922 }
923 }
924
925 /*
926 * Setup link timers
927 */
928 emlxs_ffstate_change(hba, FC_INIT_INITLINK);
929 emlxs_mb_config_link(hba, mb);
938 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
930 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
940 "Unable to configure link. Mailbox cmd=%x status=%x",
941 mb->mbxCommand, mb->mbxStatus);
942
943 emlxs_ffstate_change(hba, FC_ERROR);
944 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
945 emlxs_ffcleanup(hba);
946 (void) emlxs_mem_free_buffer(hba);
947
948 return (EIO);
949 }
950#ifdef MAX_RRDY_PATCH
951 /* Set MAX_RRDY if one is provided */
952 if (cfg[CFG_MAX_RRDY].current) {
931 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
932 "Unable to configure link. Mailbox cmd=%x status=%x",
933 mb->mbxCommand, mb->mbxStatus);
934
935 emlxs_ffstate_change(hba, FC_ERROR);
936 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
937 emlxs_ffcleanup(hba);
938 (void) emlxs_mem_free_buffer(hba);
939
940 return (EIO);
941 }
942#ifdef MAX_RRDY_PATCH
943 /* Set MAX_RRDY if one is provided */
944 if (cfg[CFG_MAX_RRDY].current) {
953 emlxs_mb_set_var(hba, (MAILBOX *) mb, 0x00060412,
945 emlxs_mb_set_var(hba, (MAILBOX *)mb, 0x00060412,
954 cfg[CFG_MAX_RRDY].current);
955
946 cfg[CFG_MAX_RRDY].current);
947
956 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
948 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
949 MBX_SUCCESS) {
957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
958 "MAX_RRDY: Unable to set. status=%x value=%d",
959 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
960 } else {
961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
962 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
963 }
964 }
950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
951 "MAX_RRDY: Unable to set. status=%x value=%d",
952 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
953 } else {
954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
955 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
956 }
957 }
965#endif /* MAX_RRDY_PATCH */
958#endif /* MAX_RRDY_PATCH */
966
967 /*
968 * We need to get login parameters for NID
969 */
970 (void) emlxs_mb_read_sparam(hba, mb);
959
960 /*
961 * We need to get login parameters for NID
962 */
963 (void) emlxs_mb_read_sparam(hba, mb);
971 mp = (MATCHMAP *) (((MAILBOXQ *)mb)->bp);
972 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
964 mp = (MATCHMAP *)(((MAILBOXQ *)mb)->bp);
965 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
974 "Unable to read parameters. Mailbox cmd=%x status=%x",
975 mb->mbxCommand, mb->mbxStatus);
976
977 emlxs_ffstate_change(hba, FC_ERROR);
978 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
979 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
980 emlxs_ffcleanup(hba);
981 (void) emlxs_mem_free_buffer(hba);
982
983 return (EIO);
984 }
966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
967 "Unable to read parameters. Mailbox cmd=%x status=%x",
968 mb->mbxCommand, mb->mbxStatus);
969
970 emlxs_ffstate_change(hba, FC_ERROR);
971 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
972 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
973 emlxs_ffcleanup(hba);
974 (void) emlxs_mem_free_buffer(hba);
975
976 return (EIO);
977 }
978
985 /* Free the buffer since we were polling */
986 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
987
988 /* If no serial number in VPD data, then use the WWPN */
989 if (vpd->serial_num[0] == 0) {
990 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
991 for (i = 0; i < 12; i++) {
992 status = *outptr++;

--- 12 unchanged lines hidden (view full) ---

1005 vpd->serial_num[i] =
1006 (char)((uint8_t)'0' + (uint8_t)j);
1007 } else {
1008 vpd->serial_num[i] =
1009 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1010 }
1011 }
1012
979 /* Free the buffer since we were polling */
980 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mp);
981
982 /* If no serial number in VPD data, then use the WWPN */
983 if (vpd->serial_num[0] == 0) {
984 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
985 for (i = 0; i < 12; i++) {
986 status = *outptr++;

--- 12 unchanged lines hidden (view full) ---

999 vpd->serial_num[i] =
1000 (char)((uint8_t)'0' + (uint8_t)j);
1001 } else {
1002 vpd->serial_num[i] =
1003 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1004 }
1005 }
1006
1013 /* Set port number and port index to zero */
1014 /*
1007 /*
1008 * Set port number and port index to zero
1015 * The WWN's are unique to each port and therefore port_num
1009 * The WWN's are unique to each port and therefore port_num
1016 * must equal zero
1010 * must equal zero. This effects the hba_fru_details structure
1011 * in fca_bind_port()
1017 */
1012 */
1018 /*
1019 * This effects the hba_fru_details structure in
1020 * fca_bind_port()
1021 */
1022 vpd->port_num[0] = 0;
1023 vpd->port_index = 0;
1024 }
1013 vpd->port_num[0] = 0;
1014 vpd->port_index = 0;
1015 }
1025 /* Make first attempt to set a port index */
1026 /* Check if this is a multifunction adapter */
1016
1017 /*
1018 * Make first attempt to set a port index
1019 * Check if this is a multifunction adapter
1020 */
1027 if ((vpd->port_index == -1) &&
1028 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1029 char *buffer;
1030 int32_t i;
1031
1021 if ((vpd->port_index == -1) &&
1022 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1023 char *buffer;
1024 int32_t i;
1025
1032 /* The port address looks like this: */
1033 /* 1 - for port index 0 */
1034 /* 1,1 - for port index 1 */
1035 /* 1,2 - for port index 2 */
1026 /*
1027 * The port address looks like this:
1028 * 1 - for port index 0
1029 * 1,1 - for port index 1
1030 * 1,2 - for port index 2
1031 */
1036 buffer = ddi_get_name_addr(hba->dip);
1037
1038 if (buffer) {
1039 vpd->port_index = 0;
1040
1041 /* Reverse scan for a comma */
1042 for (i = strlen(buffer) - 1; i > 0; i--) {
1043 if (buffer[i] == ',') {
1044 /* Comma found - set index now */
1045 vpd->port_index =
1046 emlxs_strtol(&buffer[i + 1], 10);
1047 break;
1048 }
1049 }
1050 }
1051 }
1032 buffer = ddi_get_name_addr(hba->dip);
1033
1034 if (buffer) {
1035 vpd->port_index = 0;
1036
1037 /* Reverse scan for a comma */
1038 for (i = strlen(buffer) - 1; i > 0; i--) {
1039 if (buffer[i] == ',') {
1040 /* Comma found - set index now */
1041 vpd->port_index =
1042 emlxs_strtol(&buffer[i + 1], 10);
1043 break;
1044 }
1045 }
1046 }
1047 }
1048
1052 /* Make final attempt to set a port index */
1053 if (vpd->port_index == -1) {
1054 dev_info_t *p_dip;
1055 dev_info_t *c_dip;
1056
1057 p_dip = ddi_get_parent(hba->dip);
1058 c_dip = ddi_get_child(p_dip);
1059
1060 vpd->port_index = 0;
1061 while (c_dip && (hba->dip != c_dip)) {
1062 c_dip = ddi_get_next_sibling(c_dip);
1063 vpd->port_index++;
1064 }
1065 }
1049 /* Make final attempt to set a port index */
1050 if (vpd->port_index == -1) {
1051 dev_info_t *p_dip;
1052 dev_info_t *c_dip;
1053
1054 p_dip = ddi_get_parent(hba->dip);
1055 c_dip = ddi_get_child(p_dip);
1056
1057 vpd->port_index = 0;
1058 while (c_dip && (hba->dip != c_dip)) {
1059 c_dip = ddi_get_next_sibling(c_dip);
1060 vpd->port_index++;
1061 }
1062 }
1063
1066 if (vpd->port_num[0] == 0) {
1067 if (hba->model_info.channels > 1) {
1068 (void) sprintf(vpd->port_num, "%d", vpd->port_index);
1069 }
1070 }
1064 if (vpd->port_num[0] == 0) {
1065 if (hba->model_info.channels > 1) {
1066 (void) sprintf(vpd->port_num, "%d", vpd->port_index);
1067 }
1068 }
1069
1071 if (vpd->id[0] == 0) {
1072 (void) strcpy(vpd->id, hba->model_info.model_desc);
1073 }
1070 if (vpd->id[0] == 0) {
1071 (void) strcpy(vpd->id, hba->model_info.model_desc);
1072 }
1073
1074 if (vpd->manufacturer[0] == 0) {
1075 (void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1076 }
1074 if (vpd->manufacturer[0] == 0) {
1075 (void) strcpy(vpd->manufacturer, hba->model_info.manufacturer);
1076 }
1077
1077 if (vpd->part_num[0] == 0) {
1078 (void) strcpy(vpd->part_num, hba->model_info.model);
1079 }
1078 if (vpd->part_num[0] == 0) {
1079 (void) strcpy(vpd->part_num, hba->model_info.model);
1080 }
1081
1080 if (vpd->model_desc[0] == 0) {
1081 (void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1082 }
1082 if (vpd->model_desc[0] == 0) {
1083 (void) strcpy(vpd->model_desc, hba->model_info.model_desc);
1084 }
1085
1083 if (vpd->model[0] == 0) {
1084 (void) strcpy(vpd->model, hba->model_info.model);
1085 }
1086 if (vpd->model[0] == 0) {
1087 (void) strcpy(vpd->model, hba->model_info.model);
1088 }
1089
1086 if (vpd->prog_types[0] == 0) {
1087 emlxs_build_prog_types(hba, vpd->prog_types);
1088 }
1090 if (vpd->prog_types[0] == 0) {
1091 emlxs_build_prog_types(hba, vpd->prog_types);
1092 }
1093
1089 /* Create the symbolic names */
1090 (void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1091 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1092 (char *)utsname.nodename);
1093
1094 (void) sprintf(hba->spn,
1095 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1096 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,

--- 6 unchanged lines hidden (view full) ---

1103 (hba->sparam.portName.IEEEextLsb != 0)) {
1104
1105 cfg[CFG_NETWORK_ON].current = 0;
1106
1107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1108 "WWPN doesn't conform to IP profile: nameType=%x",
1109 hba->sparam.portName.nameType);
1110 }
1094 /* Create the symbolic names */
1095 (void) sprintf(hba->snn, "Emulex %s FV%s DV%s %s",
1096 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1097 (char *)utsname.nodename);
1098
1099 (void) sprintf(hba->spn,
1100 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1101 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,

--- 6 unchanged lines hidden (view full) ---

1108 (hba->sparam.portName.IEEEextLsb != 0)) {
1109
1110 cfg[CFG_NETWORK_ON].current = 0;
1111
1112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1113 "WWPN doesn't conform to IP profile: nameType=%x",
1114 hba->sparam.portName.nameType);
1115 }
1116
1111 /* Issue CONFIG FARP */
1112 emlxs_mb_config_farp(hba, mb);
1117 /* Issue CONFIG FARP */
1118 emlxs_mb_config_farp(hba, mb);
1113 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1119 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
1120 MBX_SUCCESS) {
1114 /*
1115 * Let it go through even if failed.
1116 */
1117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1121 /*
1122 * Let it go through even if failed.
1123 */
1124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1118 "Unable to configure FARP. Mailbox cmd=%x "
1119 "status=%x", mb->mbxCommand, mb->mbxStatus);
1125 "Unable to configure FARP. "
1126 "Mailbox cmd=%x status=%x",
1127 mb->mbxCommand, mb->mbxStatus);
1120 }
1121 }
1122#ifdef MSI_SUPPORT
1123 /* Configure MSI map if required */
1124 if (hba->intr_count > 1) {
1125 emlxs_mb_config_msix(hba, mb, hba->intr_map, hba->intr_count);
1126
1128 }
1129 }
1130#ifdef MSI_SUPPORT
1131 /* Configure MSI map if required */
1132 if (hba->intr_count > 1) {
1133 emlxs_mb_config_msix(hba, mb, hba->intr_map, hba->intr_count);
1134
1127 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) == MBX_SUCCESS) {
1135 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) ==
1136 MBX_SUCCESS) {
1128 goto msi_configured;
1129 }
1137 goto msi_configured;
1138 }
1139
1130 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1131 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1132 mb->mbxCommand, mb->mbxStatus);
1133
1134 emlxs_mb_config_msi(hba, mb, hba->intr_map, hba->intr_count);
1135
1140 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1141 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1142 mb->mbxCommand, mb->mbxStatus);
1143
1144 emlxs_mb_config_msi(hba, mb, hba->intr_map, hba->intr_count);
1145
1136 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) == MBX_SUCCESS) {
1146 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) ==
1147 MBX_SUCCESS) {
1137 goto msi_configured;
1138 }
1148 goto msi_configured;
1149 }
1150
1151
1139 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1140 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1141 mb->mbxCommand, mb->mbxStatus);
1142
1143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1144 "Attempting single interrupt mode...");
1145
1146 /* First cleanup old interrupts */

--- 9 unchanged lines hidden (view full) ---

1156
1157 emlxs_ffstate_change(hba, FC_ERROR);
1158 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1159 emlxs_ffcleanup(hba);
1160 (void) emlxs_mem_free_buffer(hba);
1161
1162 return (EIO);
1163 }
1152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1153 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1154 mb->mbxCommand, mb->mbxStatus);
1155
1156 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1157 "Attempting single interrupt mode...");
1158
1159 /* First cleanup old interrupts */

--- 9 unchanged lines hidden (view full) ---

1169
1170 emlxs_ffstate_change(hba, FC_ERROR);
1171 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1172 emlxs_ffcleanup(hba);
1173 (void) emlxs_mem_free_buffer(hba);
1174
1175 return (EIO);
1176 }
1177
1164 /*
1178 /*
1165 * Reset adapter - The adapter needs to be reset because the
1166 * bus cannot handle the MSI change without handshaking with
1167 * the adapter again
1179 * Reset adapter - The adapter needs to be reset because
1180 * the bus cannot handle the MSI change without handshaking
1181 * with the adapter again
1168 */
1169
1170 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1171 (void) emlxs_mem_free_buffer(hba);
1172 fw_check = 0;
1173 goto reset;
1174 }
1182 */
1183
1184 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1185 (void) emlxs_mem_free_buffer(hba);
1186 fw_check = 0;
1187 goto reset;
1188 }
1189
1175msi_configured:
1176
1190msi_configured:
1191
1177#endif /* MSI_SUPPORT */
1192#endif /* MSI_SUPPORT */
1178
1179 /*
1180 * We always disable the firmware traffic cop feature
1181 */
1182 if (emlxs_disable_traffic_cop) {
1193
1194 /*
1195 * We always disable the firmware traffic cop feature
1196 */
1197 if (emlxs_disable_traffic_cop) {
1183 emlxs_disable_tc(hba, (MAILBOX *) mb);
1184 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1198 emlxs_disable_tc(hba, (MAILBOX *)mb);
1199 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) !=
1200 MBX_SUCCESS) {
1185 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1186 "Unable to disable traffic cop. Mailbox cmd=%x "
1187 "status=%x", mb->mbxCommand, mb->mbxStatus);
1202 "Unable to disable traffic cop. "
1203 "Mailbox cmd=%x status=%x",
1204 mb->mbxCommand, mb->mbxStatus);
1188
1189 (void) EMLXS_INTR_REMOVE(hba);
1190 emlxs_ffstate_change(hba, FC_ERROR);
1191 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1192 emlxs_ffcleanup(hba);
1193 (void) emlxs_mem_free_buffer(hba);
1194
1195 return (EIO);
1196 }
1197 }
1205
1206 (void) EMLXS_INTR_REMOVE(hba);
1207 emlxs_ffstate_change(hba, FC_ERROR);
1208 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1209 emlxs_ffcleanup(hba);
1210 (void) emlxs_mem_free_buffer(hba);
1211
1212 return (EIO);
1213 }
1214 }
1198 emlxs_mb_read_config(hba, (MAILBOX *) mb);
1199 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1215
1216 emlxs_mb_read_config(hba, (MAILBOX *)mb);
1217 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1200 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1201 "Unable to read configuration. Mailbox cmd=%x status=%x",
1202 mb->mbxCommand, mb->mbxStatus);
1203
1204 (void) EMLXS_INTR_REMOVE(hba);
1205 emlxs_ffstate_change(hba, FC_ERROR);
1206 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1207 emlxs_ffcleanup(hba);
1208 (void) emlxs_mem_free_buffer(hba);
1209
1210 return (EIO);
1211 }
1218 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1219 "Unable to read configuration. Mailbox cmd=%x status=%x",
1220 mb->mbxCommand, mb->mbxStatus);
1221
1222 (void) EMLXS_INTR_REMOVE(hba);
1223 emlxs_ffstate_change(hba, FC_ERROR);
1224 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1225 emlxs_ffcleanup(hba);
1226 (void) emlxs_mem_free_buffer(hba);
1227
1228 return (EIO);
1229 }
1230
1212 /* Save the link speed capabilities */
1213 vpd->link_speed = mb->un.varRdConfig.lmt;
1214 emlxs_process_link_speed(hba);
1215
1216 /* Set the io throttle */
1217 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
1218
1219 /* Set the max node count */
1220 if (cfg[CFG_NUM_NODES].current > 0) {
1221 hba->max_nodes =
1231 /* Save the link speed capabilities */
1232 vpd->link_speed = mb->un.varRdConfig.lmt;
1233 emlxs_process_link_speed(hba);
1234
1235 /* Set the io throttle */
1236 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
1237
1238 /* Set the max node count */
1239 if (cfg[CFG_NUM_NODES].current > 0) {
1240 hba->max_nodes =
1222 min(cfg[CFG_NUM_NODES].current, mb->un.varRdConfig.max_rpi);
1241 min(cfg[CFG_NUM_NODES].current,
1242 mb->un.varRdConfig.max_rpi);
1223 } else {
1224 hba->max_nodes = mb->un.varRdConfig.max_rpi;
1225 }
1226
1243 } else {
1244 hba->max_nodes = mb->un.varRdConfig.max_rpi;
1245 }
1246
1247 /* Register for async events */
1248 emlxs_mb_async_event(hba, mb);
1249 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1251 "Async events disabled. Mailbox status=%x",
1252 mb->mbxStatus);
1253 } else {
1254 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1255 "Async events enabled.");
1256 hba->flag |= FC_ASYNC_EVENTS;
1257 }
1258
1227 emlxs_ffstate_change(hba, FC_LINK_DOWN);
1228
1259 emlxs_ffstate_change(hba, FC_LINK_DOWN);
1260
1229 /* Enable mailbox, error attention interrupts */
1230 status = (uint32_t)(HC_MBINT_ENA | HC_ERINT_ENA);
1261 emlxs_intr_initialize(hba);
1231
1262
1232 /* Enable ring interrupts */
1233 if (hba->ring_count >= 4) {
1234 status |= (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
1235 HC_R0INT_ENA);
1236 } else if (hba->ring_count == 3) {
1237 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
1238 } else if (hba->ring_count == 2) {
1239 status |= (HC_R1INT_ENA | HC_R0INT_ENA);
1240 } else if (hba->ring_count == 1) {
1241 status |= (HC_R0INT_ENA);
1242 }
1243 hba->hc_copy = status;
1244 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1245
1246#ifdef SLI3_SUPPORT
1247
1248 if (hba->flag & FC_HBQ_ENABLED) {
1249 if (hba->tgt_mode) {
1250 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1251 return (ENOMEM);
1252 }
1253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1254 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1255 }
1263#ifdef SLI3_SUPPORT
1264
1265 if (hba->flag & FC_HBQ_ENABLED) {
1266 if (hba->tgt_mode) {
1267 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1268 return (ENOMEM);
1269 }
1270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1271 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1272 }
1273
1256 if (cfg[CFG_NETWORK_ON].current) {
1257 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1258 return (ENOMEM);
1259 }
1260 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1261 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1262 }
1274 if (cfg[CFG_NETWORK_ON].current) {
1275 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1276 return (ENOMEM);
1277 }
1278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1279 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1280 }
1281
1263 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1264 return (ENOMEM);
1265 }
1266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1267 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1268
1269 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1270 return (ENOMEM);
1271 }
1272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1273 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1274 } else
1282 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1283 return (ENOMEM);
1284 }
1285 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1286 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1287
1288 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1289 return (ENOMEM);
1290 }
1291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1292 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1293 } else
1275#endif /* SLI3_SUPPORT */
1294#endif /* SLI3_SUPPORT */
1276 {
1277 if (hba->tgt_mode) {
1278 /* Post the FCT unsol buffers */
1279 rp = &hba->ring[FC_FCT_RING];
1280 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1281 (void) emlxs_post_buffer(hba, rp, 2);
1282 }
1283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1284 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1285 }
1295 {
1296 if (hba->tgt_mode) {
1297 /* Post the FCT unsol buffers */
1298 rp = &hba->ring[FC_FCT_RING];
1299 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1300 (void) emlxs_post_buffer(hba, rp, 2);
1301 }
1302 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1303 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1304 }
1305
1286 if (cfg[CFG_NETWORK_ON].current) {
1287 /* Post the IP unsol buffers */
1288 rp = &hba->ring[FC_IP_RING];
1289 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1290 (void) emlxs_post_buffer(hba, rp, 2);
1291 }
1292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1293 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1294 }
1306 if (cfg[CFG_NETWORK_ON].current) {
1307 /* Post the IP unsol buffers */
1308 rp = &hba->ring[FC_IP_RING];
1309 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1310 (void) emlxs_post_buffer(hba, rp, 2);
1311 }
1312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1313 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1314 }
1315
1295 /* Post the ELS unsol buffers */
1296 rp = &hba->ring[FC_ELS_RING];
1297 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1298 (void) emlxs_post_buffer(hba, rp, 2);
1299 }
1300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1301 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1302
1303
1304 /* Post the CT unsol buffers */
1305 rp = &hba->ring[FC_CT_RING];
1306 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1307 (void) emlxs_post_buffer(hba, rp, 2);
1308 }
1309 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1310 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1311 }
1312
1316 /* Post the ELS unsol buffers */
1317 rp = &hba->ring[FC_ELS_RING];
1318 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1319 (void) emlxs_post_buffer(hba, rp, 2);
1320 }
1321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1322 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1323
1324
1325 /* Post the CT unsol buffers */
1326 rp = &hba->ring[FC_CT_RING];
1327 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1328 (void) emlxs_post_buffer(hba, rp, 2);
1329 }
1330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1331 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1332 }
1333
1313 /* Register for async events */
1314 emlxs_mb_async_event(hba, mb);
1315 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1317 "Async events disabled. Mailbox status=%x", mb->mbxStatus);
1318 } else {
1319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1320 "Async events enabled.");
1321 hba->flag |= FC_ASYNC_EVENTS;
1322 }
1323
1334
1324
1325 /*
1335 /*
1326 * Setup and issue mailbox INITIALIZE LINK command At this point, the
1327 * interrupt will be generated by the HW
1336 * Setup and issue mailbox INITIALIZE LINK command At this point,
1337 * the interrupt will be generated by the HW
1328 */
1338 */
1329 emlxs_mb_init_link(hba, mb, cfg[CFG_TOPOLOGY].current,
1330 cfg[CFG_LINK_SPEED].current);
1339 emlxs_mb_init_link(hba,
1340 mb, cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1331
1341
1332 rval = emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0);
1342 rval = emlxs_sli_issue_mbox_cmd(hba, mb, MBX_NOWAIT, 0);
1333
1334 if (rval != MBX_SUCCESS && rval != MBX_BUSY) {
1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1336 "Unable to initialize link. Mailbox cmd=%x status=%x",
1337 mb->mbxCommand, mb->mbxStatus);
1338
1339 (void) EMLXS_INTR_REMOVE(hba);
1340 emlxs_ffstate_change(hba, FC_ERROR);
1341 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1342 emlxs_ffcleanup(hba);
1343 (void) emlxs_mem_free_buffer(hba);
1344
1345 return (EIO);
1346 }
1343
1344 if (rval != MBX_SUCCESS && rval != MBX_BUSY) {
1345 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1346 "Unable to initialize link. Mailbox cmd=%x status=%x",
1347 mb->mbxCommand, mb->mbxStatus);
1348
1349 (void) EMLXS_INTR_REMOVE(hba);
1350 emlxs_ffstate_change(hba, FC_ERROR);
1351 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1352 emlxs_ffcleanup(hba);
1353 (void) emlxs_mem_free_buffer(hba);
1354
1355 return (EIO);
1356 }
1357
1347 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1348
1349 /*
1350 * Enable link attention interrupt
1351 */
1358 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1359
1360 /*
1361 * Enable link attention interrupt
1362 */
1352 mutex_enter(&EMLXS_PORT_LOCK);
1353 hba->hc_copy |= HC_LAINT_ENA;
1354 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1355 mutex_exit(&EMLXS_PORT_LOCK);
1363 emlxs_enable_latt(hba);
1356
1364
1357
1358 /* Wait for link to come up */
1359 i = cfg[CFG_LINKUP_DELAY].current;
1360 while (i && (hba->state < FC_LINK_UP)) {
1361 /* Check for hardware error */
1362 if (hba->state == FC_ERROR) {
1363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1364 "Adapter error.", mb->mbxCommand, mb->mbxStatus);
1365
1366 (void) EMLXS_INTR_REMOVE(hba);
1367 emlxs_ffcleanup(hba);
1368 (void) emlxs_mem_free_buffer(hba);
1369
1370 return (EIO);
1371 }
1365 /* Wait for link to come up */
1366 i = cfg[CFG_LINKUP_DELAY].current;
1367 while (i && (hba->state < FC_LINK_UP)) {
1368 /* Check for hardware error */
1369 if (hba->state == FC_ERROR) {
1370 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1371 "Adapter error.", mb->mbxCommand, mb->mbxStatus);
1372
1373 (void) EMLXS_INTR_REMOVE(hba);
1374 emlxs_ffcleanup(hba);
1375 (void) emlxs_mem_free_buffer(hba);
1376
1377 return (EIO);
1378 }
1379
1372 DELAYMS(1000);
1373 i--;
1374 }
1375
1376out:
1377
1378 /*
1379 * The leadvile driver will now handle the FLOGI at the driver level
1380 */
1381
1382 return (0);
1380 DELAYMS(1000);
1381 i--;
1382 }
1383
1384out:
1385
1386 /*
1387 * The leadvile driver will now handle the FLOGI at the driver level
1388 */
1389
1390 return (0);
1391} /* emlxs_ffinit() */
1383
1392
1384} /* emlxs_ffinit() */
1385
1393
1386
1387#ifdef MSI_SUPPORT
1388
1389/* EMLXS_INTR_INIT */
1390int32_t
1391emlxs_msi_init(emlxs_hba_t *hba, uint32_t max)
1392{
1393 emlxs_port_t *port = &PPORT;
1394 int32_t pass = 0;

--- 13 unchanged lines hidden (view full) ---

1408 int32_t *intr_cap = NULL;
1409 int32_t hilevel_pri;
1410 emlxs_config_t *cfg = &CFG;
1411 char buf[64];
1412
1413 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1414 return (emlxs_intx_init(hba, max));
1415 }
1394#ifdef MSI_SUPPORT
1395
1396/* EMLXS_INTR_INIT */
1397int32_t
1398emlxs_msi_init(emlxs_hba_t *hba, uint32_t max)
1399{
1400 emlxs_port_t *port = &PPORT;
1401 int32_t pass = 0;

--- 13 unchanged lines hidden (view full) ---

1415 int32_t *intr_cap = NULL;
1416 int32_t hilevel_pri;
1417 emlxs_config_t *cfg = &CFG;
1418 char buf[64];
1419
1420 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1421 return (emlxs_intx_init(hba, max));
1422 }
1423
1416 if (hba->intr_flags & EMLXS_MSI_INITED) {
1417 return (DDI_SUCCESS);
1418 }
1424 if (hba->intr_flags & EMLXS_MSI_INITED) {
1425 return (DDI_SUCCESS);
1426 }
1427
1419 /* Set max interrupt count if not specified */
1420 if (max == 0) {
1421 if ((cfg[CFG_MSI_MODE].current == 2) ||
1422 (cfg[CFG_MSI_MODE].current == 3)) {
1423 max = EMLXS_MSI_MAX_INTRS;
1424 } else {
1425 max = 1;
1426 }
1427 }
1428 /* Set max interrupt count if not specified */
1429 if (max == 0) {
1430 if ((cfg[CFG_MSI_MODE].current == 2) ||
1431 (cfg[CFG_MSI_MODE].current == 3)) {
1432 max = EMLXS_MSI_MAX_INTRS;
1433 } else {
1434 max = 1;
1435 }
1436 }
1437
1428 /* Filter max interrupt count with adapter model specification */
1429 if (hba->model_info.intr_limit && (max > hba->model_info.intr_limit)) {
1430 max = hba->model_info.intr_limit;
1431 }
1438 /* Filter max interrupt count with adapter model specification */
1439 if (hba->model_info.intr_limit && (max > hba->model_info.intr_limit)) {
1440 max = hba->model_info.intr_limit;
1441 }
1442
1432 /* Get the available interrupt types from the kernel */
1433 types = 0;
1434 ret = ddi_intr_get_supported_types(hba->dip, &types);
1435
1436 if ((ret != DDI_SUCCESS)) {
1437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1438 "MSI: ddi_intr_get_supported_types failed. ret=%d", ret);
1439
1440 /* Default to fixed type */
1441 types = DDI_INTR_TYPE_FIXED;
1442 }
1443 /* Get the available interrupt types from the kernel */
1444 types = 0;
1445 ret = ddi_intr_get_supported_types(hba->dip, &types);
1446
1447 if ((ret != DDI_SUCCESS)) {
1448 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1449 "MSI: ddi_intr_get_supported_types failed. ret=%d", ret);
1450
1451 /* Default to fixed type */
1452 types = DDI_INTR_TYPE_FIXED;
1453 }
1454
1443 /* Check if fixed interrupts are being forced */
1444 if (cfg[CFG_MSI_MODE].current == 0) {
1445 types &= DDI_INTR_TYPE_FIXED;
1446 }
1455 /* Check if fixed interrupts are being forced */
1456 if (cfg[CFG_MSI_MODE].current == 0) {
1457 types &= DDI_INTR_TYPE_FIXED;
1458 }
1459
1447 /* Check if MSI interrupts are being forced */
1448 else if ((cfg[CFG_MSI_MODE].current == 1) ||
1449 (cfg[CFG_MSI_MODE].current == 2)) {
1450 types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1451 }
1460 /* Check if MSI interrupts are being forced */
1461 else if ((cfg[CFG_MSI_MODE].current == 1) ||
1462 (cfg[CFG_MSI_MODE].current == 2)) {
1463 types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1464 }
1465
1452begin:
1453
1454 /* Set interrupt type and interrupt count */
1455 type = 0;
1456
1457 /* Check if MSIX is fully supported */
1458 if ((types & DDI_INTR_TYPE_MSIX) &&
1459 (hba->model_info.flags & EMLXS_MSIX_SUPPORTED)) {
1460 /* Get the max interrupt count from the adapter */
1461 nintrs = 0;
1462 ret =
1466begin:
1467
1468 /* Set interrupt type and interrupt count */
1469 type = 0;
1470
1471 /* Check if MSIX is fully supported */
1472 if ((types & DDI_INTR_TYPE_MSIX) &&
1473 (hba->model_info.flags & EMLXS_MSIX_SUPPORTED)) {
1474 /* Get the max interrupt count from the adapter */
1475 nintrs = 0;
1476 ret =
1463 ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSIX, &nintrs);
1477 ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSIX,
1478 &nintrs);
1464
1465 if (ret == DDI_SUCCESS && nintrs) {
1466 type = DDI_INTR_TYPE_MSIX;
1467 (void) strcpy(s_type, "TYPE_MSIX");
1468 goto initialize;
1469 }
1470 }
1479
1480 if (ret == DDI_SUCCESS && nintrs) {
1481 type = DDI_INTR_TYPE_MSIX;
1482 (void) strcpy(s_type, "TYPE_MSIX");
1483 goto initialize;
1484 }
1485 }
1486
1471 /* Check if MSI is fully supported */
1472 if ((types & DDI_INTR_TYPE_MSI) &&
1473 (hba->model_info.flags & EMLXS_MSI_SUPPORTED)) {
1474 /* Get the max interrupt count from the adapter */
1475 nintrs = 0;
1487 /* Check if MSI is fully supported */
1488 if ((types & DDI_INTR_TYPE_MSI) &&
1489 (hba->model_info.flags & EMLXS_MSI_SUPPORTED)) {
1490 /* Get the max interrupt count from the adapter */
1491 nintrs = 0;
1476 ret = ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSI, &nintrs);
1492 ret =
1493 ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_MSI, &nintrs);
1477
1478 if (ret == DDI_SUCCESS && nintrs) {
1479 type = DDI_INTR_TYPE_MSI;
1480 (void) strcpy(s_type, "TYPE_MSI");
1481 goto initialize;
1482 }
1483 }
1494
1495 if (ret == DDI_SUCCESS && nintrs) {
1496 type = DDI_INTR_TYPE_MSI;
1497 (void) strcpy(s_type, "TYPE_MSI");
1498 goto initialize;
1499 }
1500 }
1501
1484 /* Check if fixed interrupts are fully supported */
1485 if ((types & DDI_INTR_TYPE_FIXED) &&
1486 (hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
1487 /* Get the max interrupt count from the adapter */
1488 nintrs = 0;
1489 ret =
1502 /* Check if fixed interrupts are fully supported */
1503 if ((types & DDI_INTR_TYPE_FIXED) &&
1504 (hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
1505 /* Get the max interrupt count from the adapter */
1506 nintrs = 0;
1507 ret =
1490 ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_FIXED, &nintrs);
1508 ddi_intr_get_nintrs(hba->dip, DDI_INTR_TYPE_FIXED,
1509 &nintrs);
1491
1492 if (ret == DDI_SUCCESS) {
1493 type = DDI_INTR_TYPE_FIXED;
1494 (void) strcpy(s_type, "TYPE_FIXED");
1495 goto initialize;
1496 }
1497 }
1510
1511 if (ret == DDI_SUCCESS) {
1512 type = DDI_INTR_TYPE_FIXED;
1513 (void) strcpy(s_type, "TYPE_FIXED");
1514 goto initialize;
1515 }
1516 }
1517
1498 goto init_failed;
1499
1500
1501initialize:
1502
1503 pass++;
1504 mode = 0;
1505 actual = 0;
1506 htable = NULL;
1507 intr_pri = NULL;
1508 intr_cap = NULL;
1509 hilevel_pri = 0;
1510
1511 if (pass == 1) {
1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1518 goto init_failed;
1519
1520
1521initialize:
1522
1523 pass++;
1524 mode = 0;
1525 actual = 0;
1526 htable = NULL;
1527 intr_pri = NULL;
1528 intr_cap = NULL;
1529 hilevel_pri = 0;
1530
1531 if (pass == 1) {
1532 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1513 "MSI: %s: mode=%d types=0x%x nintrs=%d",
1514 s_type, cfg[CFG_MSI_MODE].current, types, nintrs);
1533 "MSI: %s: mode=%d types=0x%x nintrs=%d", s_type,
1534 cfg[CFG_MSI_MODE].current, types, nintrs);
1515 }
1535 }
1536
1516 /* Validate interrupt count */
1517 count = min(nintrs, max);
1518
1519 if (count >= 8) {
1520 count = 8;
1521 } else if (count >= 4) {
1522 count = 4;
1523 } else if (count >= 2) {
1524 count = 2;
1525 } else {
1526 count = 1;
1527 }
1528
1529 /* Allocate an array of interrupt handles */
1530 htable =
1537 /* Validate interrupt count */
1538 count = min(nintrs, max);
1539
1540 if (count >= 8) {
1541 count = 8;
1542 } else if (count >= 4) {
1543 count = 4;
1544 } else if (count >= 2) {
1545 count = 2;
1546 } else {
1547 count = 1;
1548 }
1549
1550 /* Allocate an array of interrupt handles */
1551 htable =
1531 kmem_alloc((size_t)(count * sizeof (ddi_intr_handle_t)), KM_SLEEP);
1552 kmem_alloc((size_t)(count * sizeof (ddi_intr_handle_t)),
1553 KM_SLEEP);
1532
1533 if (htable == NULL) {
1534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1535 "MSI: Unable to allocate interrupt handle table");
1536
1537 goto init_failed;
1538 }
1554
1555 if (htable == NULL) {
1556 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1557 "MSI: Unable to allocate interrupt handle table");
1558
1559 goto init_failed;
1560 }
1561
1539 /* Allocate 'count' interrupts */
1562 /* Allocate 'count' interrupts */
1540 ret = ddi_intr_alloc(hba->dip, htable, type, EMLXS_MSI_INUMBER, count,
1563 ret =
1564 ddi_intr_alloc(hba->dip, htable, type, EMLXS_MSI_INUMBER, count,
1541 &actual, DDI_INTR_ALLOC_NORMAL);
1542
1543 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1544 "MSI: %s: count=%d actual=%d", s_type, count, actual);
1545
1546 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1548 "MSI: Unable to allocate interrupts. error=%d", ret);
1549
1550 goto init_failed;
1551 }
1565 &actual, DDI_INTR_ALLOC_NORMAL);
1566
1567 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1568 "MSI: %s: count=%d actual=%d", s_type, count, actual);
1569
1570 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1572 "MSI: Unable to allocate interrupts. error=%d", ret);
1573
1574 goto init_failed;
1575 }
1576
1552 if (actual != count) {
1553 /* Validate actual count */
1554 if (actual >= 8) {
1555 new_actual = 8;
1556 } else if (actual >= 4) {
1557 new_actual = 4;
1558 } else if (actual >= 2) {
1559 new_actual = 2;

--- 4 unchanged lines hidden (view full) ---

1564 if (new_actual < actual) {
1565 /* Free extra handles */
1566 for (i = new_actual; i < actual; i++) {
1567 (void) ddi_intr_free(htable[i]);
1568 }
1569
1570 actual = new_actual;
1571 }
1577 if (actual != count) {
1578 /* Validate actual count */
1579 if (actual >= 8) {
1580 new_actual = 8;
1581 } else if (actual >= 4) {
1582 new_actual = 4;
1583 } else if (actual >= 2) {
1584 new_actual = 2;

--- 4 unchanged lines hidden (view full) ---

1589 if (new_actual < actual) {
1590 /* Free extra handles */
1591 for (i = new_actual; i < actual; i++) {
1592 (void) ddi_intr_free(htable[i]);
1593 }
1594
1595 actual = new_actual;
1596 }
1597
1572 /* Allocate a new array of interrupt handles */
1573 new_htable =
1574 kmem_alloc((size_t)(actual * sizeof (ddi_intr_handle_t)),
1575 KM_SLEEP);
1576
1577 if (new_htable == NULL) {
1578 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1598 /* Allocate a new array of interrupt handles */
1599 new_htable =
1600 kmem_alloc((size_t)(actual * sizeof (ddi_intr_handle_t)),
1601 KM_SLEEP);
1602
1603 if (new_htable == NULL) {
1604 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1579 "MSI: Unable to allocate new interrupt handle "
1580 "table");
1605 "MSI: Unable to allocate new "
1606 "interrupt handle table");
1581
1582 goto init_failed;
1583 }
1607
1608 goto init_failed;
1609 }
1610
1584 /* Copy old array to new array */
1585 bcopy((uint8_t *)htable, (uint8_t *)new_htable,
1586 (actual * sizeof (ddi_intr_handle_t)));
1587
1588 /* Free the old array */
1589 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1590
1591 htable = new_htable;
1592 count = actual;
1593 }
1611 /* Copy old array to new array */
1612 bcopy((uint8_t *)htable, (uint8_t *)new_htable,
1613 (actual * sizeof (ddi_intr_handle_t)));
1614
1615 /* Free the old array */
1616 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1617
1618 htable = new_htable;
1619 count = actual;
1620 }
1621
1594 /* Allocate interrupt priority table */
1595 intr_pri =
1596 (uint32_t *)kmem_alloc((size_t)(count * sizeof (uint32_t)),
1597 KM_SLEEP);
1598
1599 if (intr_pri == NULL) {
1600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1601 "MSI: Unable to allocate interrupt priority table");
1602
1603 goto init_failed;
1604 }
1622 /* Allocate interrupt priority table */
1623 intr_pri =
1624 (uint32_t *)kmem_alloc((size_t)(count * sizeof (uint32_t)),
1625 KM_SLEEP);
1626
1627 if (intr_pri == NULL) {
1628 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1629 "MSI: Unable to allocate interrupt priority table");
1630
1631 goto init_failed;
1632 }
1633
1605 /* Allocate interrupt capability table */
1606 intr_cap = kmem_alloc((size_t)(count * sizeof (uint32_t)), KM_SLEEP);
1607
1608 if (intr_cap == NULL) {
1609 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1610 "MSI: Unable to allocate interrupt capability table");
1611
1612 goto init_failed;
1613 }
1634 /* Allocate interrupt capability table */
1635 intr_cap = kmem_alloc((size_t)(count * sizeof (uint32_t)), KM_SLEEP);
1636
1637 if (intr_cap == NULL) {
1638 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1639 "MSI: Unable to allocate interrupt capability table");
1640
1641 goto init_failed;
1642 }
1643
1614 /* Get minimum hilevel priority */
1615 hilevel_pri = ddi_intr_get_hilevel_pri();
1616
1617 /* Fill the priority and capability tables */
1618 for (i = 0; i < count; ++i) {
1619 ret = ddi_intr_get_pri(htable[i], &intr_pri[i]);
1620
1621 if (ret != DDI_SUCCESS) {
1622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1623 "MSI: ddi_intr_get_pri(%d) failed. "
1644 /* Get minimum hilevel priority */
1645 hilevel_pri = ddi_intr_get_hilevel_pri();
1646
1647 /* Fill the priority and capability tables */
1648 for (i = 0; i < count; ++i) {
1649 ret = ddi_intr_get_pri(htable[i], &intr_pri[i]);
1650
1651 if (ret != DDI_SUCCESS) {
1652 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1653 "MSI: ddi_intr_get_pri(%d) failed. "
1624 "handle=%p ret=%d", i, &htable[i], ret);
1654 "handle=%p ret=%d",
1655 i, &htable[i], ret);
1625
1626 /* Clean up the interrupts */
1627 goto init_failed;
1628 }
1656
1657 /* Clean up the interrupts */
1658 goto init_failed;
1659 }
1660
1629 if (intr_pri[i] >= hilevel_pri) {
1630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1631 "MSI: Interrupt(%d) level too high. "
1632 "pri=0x%x hilevel=0x%x",
1633 i, intr_pri[i], hilevel_pri);
1634
1635 /* Clean up the interrupts */
1636 goto init_failed;
1637 }
1661 if (intr_pri[i] >= hilevel_pri) {
1662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1663 "MSI: Interrupt(%d) level too high. "
1664 "pri=0x%x hilevel=0x%x",
1665 i, intr_pri[i], hilevel_pri);
1666
1667 /* Clean up the interrupts */
1668 goto init_failed;
1669 }
1670
1638 ret = ddi_intr_get_cap(htable[i], &intr_cap[i]);
1639
1640 if (ret != DDI_SUCCESS) {
1641 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1671 ret = ddi_intr_get_cap(htable[i], &intr_cap[i]);
1672
1673 if (ret != DDI_SUCCESS) {
1674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1642 "MSI: ddi_intr_get_cap(%d) failed. handle=%p "
1643 "ret=%d", i, &htable[i], ret);
1675 "MSI: ddi_intr_get_cap(%d) failed. "
1676 "handle=%p ret=%d",
1677 i, &htable[i], ret);
1644
1645 /* Clean up the interrupts */
1646 goto init_failed;
1647 }
1678
1679 /* Clean up the interrupts */
1680 goto init_failed;
1681 }
1682
1648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1649 "MSI: %s: %d: cap=0x%x pri=0x%x hilevel=0x%x",
1650 s_type, i, intr_cap[i], intr_pri[i], hilevel_pri);
1684 "MSI: %s: %d: cap=0x%x pri=0x%x hilevel=0x%x", s_type, i,
1685 intr_cap[i], intr_pri[i], hilevel_pri);
1651
1652 }
1653
1654 /* Set mode */
1655 switch (count) {
1656 case 8:
1657 mode = EMLXS_MSI_MODE8;
1658 break;

--- 11 unchanged lines hidden (view full) ---

1670 }
1671
1672 /* Save the info */
1673 hba->intr_htable = htable;
1674 hba->intr_count = count;
1675 hba->intr_pri = intr_pri;
1676 hba->intr_cap = intr_cap;
1677 hba->intr_type = type;
1686
1687 }
1688
1689 /* Set mode */
1690 switch (count) {
1691 case 8:
1692 mode = EMLXS_MSI_MODE8;
1693 break;

--- 11 unchanged lines hidden (view full) ---

1705 }
1706
1707 /* Save the info */
1708 hba->intr_htable = htable;
1709 hba->intr_count = count;
1710 hba->intr_pri = intr_pri;
1711 hba->intr_cap = intr_cap;
1712 hba->intr_type = type;
1678 hba->intr_arg = (void *)(unsigned long) intr_pri[0];
1713 hba->intr_arg = (void *)((unsigned long)intr_pri[0]);
1679 hba->intr_mask = emlxs_msi_mask[mode];
1680
1681 hba->intr_cond = 0;
1682 for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1683 hba->intr_map[i] = emlxs_msi_map[mode][i];
1684 hba->intr_cond |= emlxs_msi_map[mode][i];
1685
1686 (void) sprintf(buf, "%s%d_msi%d mutex", DRIVER_NAME,
1687 hba->ddiinst, i);
1688 mutex_init(&hba->intr_lock[i], buf, MUTEX_DRIVER,
1714 hba->intr_mask = emlxs_msi_mask[mode];
1715
1716 hba->intr_cond = 0;
1717 for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1718 hba->intr_map[i] = emlxs_msi_map[mode][i];
1719 hba->intr_cond |= emlxs_msi_map[mode][i];
1720
1721 (void) sprintf(buf, "%s%d_msi%d mutex", DRIVER_NAME,
1722 hba->ddiinst, i);
1723 mutex_init(&hba->intr_lock[i], buf, MUTEX_DRIVER,
1689 (void *) hba->intr_arg);
1724 (void *)hba->intr_arg);
1690 }
1691
1692 /* Set flag to indicate support */
1693 hba->intr_flags |= EMLXS_MSI_INITED;
1694
1695 /* Create the interrupt threads */
1696 for (i = 0; i < MAX_RINGS; i++) {
1697 (void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
1698 hba->ddiinst, i);
1699 mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
1725 }
1726
1727 /* Set flag to indicate support */
1728 hba->intr_flags |= EMLXS_MSI_INITED;
1729
1730 /* Create the interrupt threads */
1731 for (i = 0; i < MAX_RINGS; i++) {
1732 (void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
1733 hba->ddiinst, i);
1734 mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
1700 (void *) hba->intr_arg);
1735 (void *)hba->intr_arg);
1701
1702 emlxs_thread_create(hba, &hba->ring[i].intr_thread);
1703 }
1704
1705 return (DDI_SUCCESS);
1706
1707
1708init_failed:
1709
1710 if (intr_cap) {
1711 kmem_free(intr_cap, (count * sizeof (int32_t)));
1712 }
1736
1737 emlxs_thread_create(hba, &hba->ring[i].intr_thread);
1738 }
1739
1740 return (DDI_SUCCESS);
1741
1742
1743init_failed:
1744
1745 if (intr_cap) {
1746 kmem_free(intr_cap, (count * sizeof (int32_t)));
1747 }
1748
1713 if (intr_pri) {
1714 kmem_free(intr_pri, (count * sizeof (int32_t)));
1715 }
1749 if (intr_pri) {
1750 kmem_free(intr_pri, (count * sizeof (int32_t)));
1751 }
1752
1716 if (htable) {
1717 /* Process the interrupt handlers */
1718 for (i = 0; i < actual; i++) {
1719 /* Free the handle[i] */
1720 (void) ddi_intr_free(htable[i]);
1721 }
1722
1723 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1724 }
1753 if (htable) {
1754 /* Process the interrupt handlers */
1755 for (i = 0; i < actual; i++) {
1756 /* Free the handle[i] */
1757 (void) ddi_intr_free(htable[i]);
1758 }
1759
1760 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1761 }
1762
1725 /* Initialize */
1726 hba->intr_htable = NULL;
1727 hba->intr_count = 0;
1728 hba->intr_pri = NULL;
1729 hba->intr_cap = NULL;
1730 hba->intr_type = 0;
1731 hba->intr_arg = NULL;
1732 hba->intr_cond = 0;
1733 bzero(hba->intr_map, sizeof (hba->intr_map));
1734 bzero(hba->intr_lock, sizeof (hba->intr_lock));
1735
1736 if (type == DDI_INTR_TYPE_MSIX) {
1737 types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1738 goto begin;
1739 } else if (type == DDI_INTR_TYPE_MSI) {
1740 types &= DDI_INTR_TYPE_FIXED;
1741 goto begin;
1742 }
1763 /* Initialize */
1764 hba->intr_htable = NULL;
1765 hba->intr_count = 0;
1766 hba->intr_pri = NULL;
1767 hba->intr_cap = NULL;
1768 hba->intr_type = 0;
1769 hba->intr_arg = NULL;
1770 hba->intr_cond = 0;
1771 bzero(hba->intr_map, sizeof (hba->intr_map));
1772 bzero(hba->intr_lock, sizeof (hba->intr_lock));
1773
1774 if (type == DDI_INTR_TYPE_MSIX) {
1775 types &= (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1776 goto begin;
1777 } else if (type == DDI_INTR_TYPE_MSI) {
1778 types &= DDI_INTR_TYPE_FIXED;
1779 goto begin;
1780 }
1781
1743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1744 "MSI: Unable to initialize interrupts");
1745
1746 return (DDI_FAILURE);
1747
1748
1782 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1783 "MSI: Unable to initialize interrupts");
1784
1785 return (DDI_FAILURE);
1786
1787
1749} /* emlxs_msi_init() */
1788} /* emlxs_msi_init() */
1750
1751
1752/* EMLXS_INTR_UNINIT */
1753int32_t
1754emlxs_msi_uninit(emlxs_hba_t *hba)
1755{
1756 uint32_t count;
1757 int32_t i;
1758 ddi_intr_handle_t *htable;
1759 uint32_t *intr_pri;
1760 int32_t *intr_cap;
1761 int32_t ret;
1762
1763 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1764 return (emlxs_intx_uninit(hba));
1765 }
1789
1790
1791/* EMLXS_INTR_UNINIT */
1792int32_t
1793emlxs_msi_uninit(emlxs_hba_t *hba)
1794{
1795 uint32_t count;
1796 int32_t i;
1797 ddi_intr_handle_t *htable;
1798 uint32_t *intr_pri;
1799 int32_t *intr_cap;
1800 int32_t ret;
1801
1802 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1803 return (emlxs_intx_uninit(hba));
1804 }
1805
1766 /*
1806 /*
1767 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1768 * emlxs_msi_uninit called. flags=%x", hba->intr_flags);
1807 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1808 * "MSI: emlxs_msi_uninit called. flags=%x",
1809 * hba->intr_flags);
1769 */
1770
1771 /* Make sure interrupts have been removed first */
1772 if ((hba->intr_flags & EMLXS_MSI_ADDED)) {
1773 ret = emlxs_msi_remove(hba);
1774
1775 if (ret != DDI_SUCCESS) {
1776 return (ret);
1777 }
1778 }
1810 */
1811
1812 /* Make sure interrupts have been removed first */
1813 if ((hba->intr_flags & EMLXS_MSI_ADDED)) {
1814 ret = emlxs_msi_remove(hba);
1815
1816 if (ret != DDI_SUCCESS) {
1817 return (ret);
1818 }
1819 }
1820
1779 /* Check if the interrupts are still initialized */
1780 if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1781 return (DDI_SUCCESS);
1782 }
1783 hba->intr_flags &= ~EMLXS_MSI_INITED;
1784
1785 /* Get handle table parameters */
1786 htable = hba->intr_htable;

--- 9 unchanged lines hidden (view full) ---

1796 hba->intr_type = 0;
1797 hba->intr_arg = NULL;
1798 hba->intr_cond = 0;
1799 bzero(hba->intr_map, sizeof (hba->intr_map));
1800
1801 if (intr_cap) {
1802 kmem_free(intr_cap, (count * sizeof (int32_t)));
1803 }
1821 /* Check if the interrupts are still initialized */
1822 if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1823 return (DDI_SUCCESS);
1824 }
1825 hba->intr_flags &= ~EMLXS_MSI_INITED;
1826
1827 /* Get handle table parameters */
1828 htable = hba->intr_htable;

--- 9 unchanged lines hidden (view full) ---

1838 hba->intr_type = 0;
1839 hba->intr_arg = NULL;
1840 hba->intr_cond = 0;
1841 bzero(hba->intr_map, sizeof (hba->intr_map));
1842
1843 if (intr_cap) {
1844 kmem_free(intr_cap, (count * sizeof (int32_t)));
1845 }
1846
1804 if (intr_pri) {
1805 kmem_free(intr_pri, (count * sizeof (int32_t)));
1806 }
1847 if (intr_pri) {
1848 kmem_free(intr_pri, (count * sizeof (int32_t)));
1849 }
1850
1807 if (htable) {
1808 /* Process the interrupt handlers */
1809 for (i = 0; i < count; ++i) {
1810 /* Free the handle[i] */
1851 if (htable) {
1852 /* Process the interrupt handlers */
1853 for (i = 0; i < count; ++i) {
1854 /* Free the handle[i] */
1811 (void) ddi_intr_free(htable[i]);
1855 ret = ddi_intr_free(htable[i]);
1812 }
1813
1814 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1815 }
1856 }
1857
1858 kmem_free(htable, (count * sizeof (ddi_intr_handle_t)));
1859 }
1860
1816 /* Destroy the intr locks */
1817 for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1818 mutex_destroy(&hba->intr_lock[i]);
1819 }
1820
1821 /* Destroy the interrupt threads */
1822 for (i = 0; i < MAX_RINGS; i++) {
1823 emlxs_thread_destroy(&hba->ring[i].intr_thread);
1824 mutex_destroy(&hba->ring[i].rsp_lock);
1825 }
1826
1827 /*
1861 /* Destroy the intr locks */
1862 for (i = 0; i < EMLXS_MSI_MAX_INTRS; i++) {
1863 mutex_destroy(&hba->intr_lock[i]);
1864 }
1865
1866 /* Destroy the interrupt threads */
1867 for (i = 0; i < MAX_RINGS; i++) {
1868 emlxs_thread_destroy(&hba->ring[i].intr_thread);
1869 mutex_destroy(&hba->ring[i].rsp_lock);
1870 }
1871
1872 /*
1828 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1829 * emlxs_msi_uninit done. flags=%x", hba->intr_flags);
1873 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1874 * "MSI: emlxs_msi_uninit done. flags=%x",
1875 * hba->intr_flags);
1830 */
1831
1832 return (DDI_SUCCESS);
1833
1876 */
1877
1878 return (DDI_SUCCESS);
1879
1834} /* emlxs_msi_uninit() */
1880} /* emlxs_msi_uninit() */
1835
1836
1837/* EMLXS_INTR_ADD */
1838int32_t
1839emlxs_msi_add(emlxs_hba_t *hba)
1840{
1841 emlxs_port_t *port = &PPORT;
1842 int32_t count;
1843 int32_t i;
1844 int32_t ret;
1845 ddi_intr_handle_t *htable = NULL;
1846 int32_t *intr_cap = NULL;
1847
1848 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1849 return (emlxs_intx_add(hba));
1850 }
1881
1882
1883/* EMLXS_INTR_ADD */
1884int32_t
1885emlxs_msi_add(emlxs_hba_t *hba)
1886{
1887 emlxs_port_t *port = &PPORT;
1888 int32_t count;
1889 int32_t i;
1890 int32_t ret;
1891 ddi_intr_handle_t *htable = NULL;
1892 int32_t *intr_cap = NULL;
1893
1894 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1895 return (emlxs_intx_add(hba));
1896 }
1897
1851 /* Check if interrupts have already been added */
1852 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1853 return (DDI_SUCCESS);
1854 }
1898 /* Check if interrupts have already been added */
1899 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1900 return (DDI_SUCCESS);
1901 }
1902
1855 /* Check if interrupts have been initialized */
1856 if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1857 ret = emlxs_msi_init(hba, 0);
1858
1859 if (ret != DDI_SUCCESS) {
1860 return (ret);
1861 }
1862 }
1903 /* Check if interrupts have been initialized */
1904 if (!(hba->intr_flags & EMLXS_MSI_INITED)) {
1905 ret = emlxs_msi_init(hba, 0);
1906
1907 if (ret != DDI_SUCCESS) {
1908 return (ret);
1909 }
1910 }
1911
1863 /* Get handle table parameters */
1864 htable = hba->intr_htable;
1865 count = hba->intr_count;
1866 intr_cap = hba->intr_cap;
1867
1868 /* Add the interrupt handlers */
1869 for (i = 0; i < count; ++i) {
1870 /* add handler for handle[i] */
1912 /* Get handle table parameters */
1913 htable = hba->intr_htable;
1914 count = hba->intr_count;
1915 intr_cap = hba->intr_cap;
1916
1917 /* Add the interrupt handlers */
1918 for (i = 0; i < count; ++i) {
1919 /* add handler for handle[i] */
1871 ret = ddi_intr_add_handler(htable[i], emlxs_msi_intr,
1872 (char *)hba, (char *)(unsigned long)i);
1920 ret =
1921 ddi_intr_add_handler(htable[i], emlxs_sli_msi_intr,
1922 (char *)hba, (char *)((unsigned long)i));
1873
1874 if (ret != DDI_SUCCESS) {
1875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1923
1924 if (ret != DDI_SUCCESS) {
1925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1876 "MSI: ddi_intr_add_handler(%d) failed. handle=%p "
1877 "ret=%d", i, &htable[i], ret);
1926 "MSI: ddi_intr_add_handler(%d) failed. "
1927 "handle=%p ret=%d",
1928 i, &htable[i], ret);
1878
1879 /* Process the remaining interrupt handlers */
1880 while (i) {
1881 /* Decrement i */
1882 i--;
1883
1884 /* Remove the handler */
1885 ret = ddi_intr_remove_handler(htable[i]);

--- 15 unchanged lines hidden (view full) ---

1901
1902 for (i = 0; i < count; ++i) {
1903 ret = ddi_intr_enable(htable[i]);
1904
1905 if (ret != DDI_SUCCESS) {
1906 EMLXS_MSGF(EMLXS_CONTEXT,
1907 &emlxs_init_debug_msg,
1908 "MSI: ddi_intr_enable(%d) failed. "
1929
1930 /* Process the remaining interrupt handlers */
1931 while (i) {
1932 /* Decrement i */
1933 i--;
1934
1935 /* Remove the handler */
1936 ret = ddi_intr_remove_handler(htable[i]);

--- 15 unchanged lines hidden (view full) ---

1952
1953 for (i = 0; i < count; ++i) {
1954 ret = ddi_intr_enable(htable[i]);
1955
1956 if (ret != DDI_SUCCESS) {
1957 EMLXS_MSGF(EMLXS_CONTEXT,
1958 &emlxs_init_debug_msg,
1959 "MSI: ddi_intr_enable(%d) failed. "
1909 "ret=%d", i, ret);
1960 "ret=%d",
1961 i, ret);
1910 }
1911 }
1912 }
1913 } else {
1914 for (i = 0; i < count; ++i) {
1915 ret = ddi_intr_enable(htable[i]);
1916
1917 if (ret != DDI_SUCCESS) {
1962 }
1963 }
1964 }
1965 } else {
1966 for (i = 0; i < count; ++i) {
1967 ret = ddi_intr_enable(htable[i]);
1968
1969 if (ret != DDI_SUCCESS) {
1918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1970 EMLXS_MSGF(EMLXS_CONTEXT,
1971 &emlxs_init_debug_msg,
1919 "MSI: ddi_intr_enable(%d) failed. ret=%d",
1920 i, ret);
1921 }
1922 }
1923 }
1924
1925
1926 /* Set flag to indicate support */
1927 hba->intr_flags |= EMLXS_MSI_ADDED;
1928
1929 return (DDI_SUCCESS);
1930
1972 "MSI: ddi_intr_enable(%d) failed. ret=%d",
1973 i, ret);
1974 }
1975 }
1976 }
1977
1978
1979 /* Set flag to indicate support */
1980 hba->intr_flags |= EMLXS_MSI_ADDED;
1981
1982 return (DDI_SUCCESS);
1983
1931} /* emlxs_msi_add() */
1984} /* emlxs_msi_add() */
1932
1933
1934
1935/* EMLXS_INTR_REMOVE */
1936int32_t
1937emlxs_msi_remove(emlxs_hba_t *hba)
1938{
1939 emlxs_port_t *port = &PPORT;
1940 uint32_t count;
1941 int32_t i;
1942 ddi_intr_handle_t *htable;
1943 int32_t *intr_cap;
1944 int32_t ret;
1945
1946 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
1947 return (emlxs_intx_remove(hba));
1948 }
1985
1986
1987
1988/* EMLXS_INTR_REMOVE */
1989int32_t
1990emlxs_msi_remove(emlxs_hba_t *hba)
1991{
1992 emlxs_port_t *port = &PPORT;
1993 uint32_t count;
1994 int32_t i;
1995 ddi_intr_handle_t *htable;
1996 int32_t *intr_cap;
1997 int32_t ret;
1998
1999 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
2000 return (emlxs_intx_remove(hba));
2001 }
2002
1949 /*
2003 /*
1950 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "MSI:
1951 * emlxs_msi_remove called. flags=%x", hba->intr_flags);
2004 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2005 * "MSI: emlxs_msi_remove called. flags=%x",
2006 * hba->intr_flags);
1952 */
1953
1954 /* Check if interrupts have already been removed */
1955 if (!(hba->intr_flags & EMLXS_MSI_ADDED)) {
1956 return (DDI_SUCCESS);
1957 }
1958 hba->intr_flags &= ~EMLXS_MSI_ADDED;
1959
1960 /* Disable all adapter interrupts */
2007 */
2008
2009 /* Check if interrupts have already been removed */
2010 if (!(hba->intr_flags & EMLXS_MSI_ADDED)) {
2011 return (DDI_SUCCESS);
2012 }
2013 hba->intr_flags &= ~EMLXS_MSI_ADDED;
2014
2015 /* Disable all adapter interrupts */
1961 hba->hc_copy = 0;
1962 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
2016 emlxs_disable_intr(hba, 0);
1963
1964 /* Get handle table parameters */
1965 htable = hba->intr_htable;
1966 count = hba->intr_count;
1967 intr_cap = hba->intr_cap;
1968
1969 /* Disable the interrupts */
1970 if (intr_cap[0] & DDI_INTR_FLAG_BLOCK) {

--- 6 unchanged lines hidden (view full) ---

1977
1978 for (i = 0; i < count; i++) {
1979 ret = ddi_intr_disable(htable[i]);
1980
1981 if (ret != DDI_SUCCESS) {
1982 EMLXS_MSGF(EMLXS_CONTEXT,
1983 &emlxs_init_debug_msg,
1984 "MSI: ddi_intr_disable(%d) failed. "
2017
2018 /* Get handle table parameters */
2019 htable = hba->intr_htable;
2020 count = hba->intr_count;
2021 intr_cap = hba->intr_cap;
2022
2023 /* Disable the interrupts */
2024 if (intr_cap[0] & DDI_INTR_FLAG_BLOCK) {

--- 6 unchanged lines hidden (view full) ---

2031
2032 for (i = 0; i < count; i++) {
2033 ret = ddi_intr_disable(htable[i]);
2034
2035 if (ret != DDI_SUCCESS) {
2036 EMLXS_MSGF(EMLXS_CONTEXT,
2037 &emlxs_init_debug_msg,
2038 "MSI: ddi_intr_disable(%d) failed. "
1985 "ret=%d", i, ret);
2039 "ret=%d",
2040 i, ret);
1986 }
1987 }
1988 }
1989 } else {
1990 for (i = 0; i < count; i++) {
1991 ret = ddi_intr_disable(htable[i]);
1992
1993 if (ret != DDI_SUCCESS) {
2041 }
2042 }
2043 }
2044 } else {
2045 for (i = 0; i < count; i++) {
2046 ret = ddi_intr_disable(htable[i]);
2047
2048 if (ret != DDI_SUCCESS) {
1994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2049 EMLXS_MSGF(EMLXS_CONTEXT,
2050 &emlxs_init_debug_msg,
1995 "MSI: ddi_intr_disable(%d) failed. ret=%d",
1996 i, ret);
1997 }
1998 }
1999 }
2000
2001 /* Process the interrupt handlers */
2002 for (i = 0; i < count; i++) {
2003 /* Remove the handler */
2004 ret = ddi_intr_remove_handler(htable[i]);
2005
2006
2007 }
2008
2009 return (DDI_SUCCESS);
2010
2051 "MSI: ddi_intr_disable(%d) failed. ret=%d",
2052 i, ret);
2053 }
2054 }
2055 }
2056
2057 /* Process the interrupt handlers */
2058 for (i = 0; i < count; i++) {
2059 /* Remove the handler */
2060 ret = ddi_intr_remove_handler(htable[i]);
2061
2062
2063 }
2064
2065 return (DDI_SUCCESS);
2066
2011} /* emlxs_msi_remove() */
2067} /* emlxs_msi_remove() */
2012
2013
2068
2069
2014#endif /* MSI_SUPPORT */
2070#endif /* MSI_SUPPORT */
2015
2016
2017/* EMLXS_INTR_INIT */
2018/* ARGSUSED */
2019int32_t
2020emlxs_intx_init(emlxs_hba_t *hba, uint32_t max)
2021{
2022 emlxs_port_t *port = &PPORT;
2023 int32_t ret;
2024 uint32_t i;
2025 char buf[64];
2026
2027 /* Check if interrupts have already been initialized */
2028 if (hba->intr_flags & EMLXS_INTX_INITED) {
2029 return (DDI_SUCCESS);
2030 }
2071
2072
2073/* EMLXS_INTR_INIT */
2074/* ARGSUSED */
2075int32_t
2076emlxs_intx_init(emlxs_hba_t *hba, uint32_t max)
2077{
2078 emlxs_port_t *port = &PPORT;
2079 int32_t ret;
2080 uint32_t i;
2081 char buf[64];
2082
2083 /* Check if interrupts have already been initialized */
2084 if (hba->intr_flags & EMLXS_INTX_INITED) {
2085 return (DDI_SUCCESS);
2086 }
2087
2031 /* Check if adapter is flagged for INTX support */
2032 if (!(hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
2033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2034 "INTX: %s does not support INTX. flags=0x%x",
2035 hba->model_info.model, hba->model_info.flags);
2036
2037 return (DDI_FAILURE);
2038 }
2088 /* Check if adapter is flagged for INTX support */
2089 if (!(hba->model_info.flags & EMLXS_INTX_SUPPORTED)) {
2090 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2091 "INTX: %s does not support INTX. flags=0x%x",
2092 hba->model_info.model, hba->model_info.flags);
2093
2094 return (DDI_FAILURE);
2095 }
2096
2039 /*
2097 /*
2040 * Interrupt number '0' is a high-level interrupt. This driver does
2041 * not support having its interrupts mapped above scheduler priority;
2042 * i.e., we always expect to be able to call general kernel routines
2043 * that may invoke the scheduler.
2098 * Interrupt number '0' is a high-level interrupt. This driver
2099 * does not support having its interrupts mapped above scheduler
2100 * priority; i.e., we always expect to be able to call general
2101 * kernel routines that may invoke the scheduler.
2044 */
2045 if (ddi_intr_hilevel(hba->dip, EMLXS_INUMBER) != 0) {
2046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2047 "INTX: High-level interrupt not supported.");
2048
2049 return (DDI_FAILURE);
2050 }
2102 */
2103 if (ddi_intr_hilevel(hba->dip, EMLXS_INUMBER) != 0) {
2104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2105 "INTX: High-level interrupt not supported.");
2106
2107 return (DDI_FAILURE);
2108 }
2109
2051 /* Get an iblock cookie */
2110 /* Get an iblock cookie */
2052 ret = ddi_get_iblock_cookie(hba->dip, (uint32_t)EMLXS_INUMBER,
2111 ret =
2112 ddi_get_iblock_cookie(hba->dip, (uint32_t)EMLXS_INUMBER,
2053 (ddi_iblock_cookie_t *)&hba->intr_arg);
2054 if (ret != DDI_SUCCESS) {
2055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2056 "INTX: ddi_get_iblock_cookie failed. ret=%d", ret);
2057
2058 return (ret);
2059 }
2113 (ddi_iblock_cookie_t *)&hba->intr_arg);
2114 if (ret != DDI_SUCCESS) {
2115 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
2116 "INTX: ddi_get_iblock_cookie failed. ret=%d", ret);
2117
2118 return (ret);
2119 }
2120
2060 hba->intr_flags |= EMLXS_INTX_INITED;
2061
2062 /* Create the interrupt threads */
2063 for (i = 0; i < MAX_RINGS; i++) {
2064 (void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
2065 hba->ddiinst, i);
2066 mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
2067 (void *)hba->intr_arg);
2068
2069 emlxs_thread_create(hba, &hba->ring[i].intr_thread);
2070 }
2071
2072 return (DDI_SUCCESS);
2073
2121 hba->intr_flags |= EMLXS_INTX_INITED;
2122
2123 /* Create the interrupt threads */
2124 for (i = 0; i < MAX_RINGS; i++) {
2125 (void) sprintf(buf, "%s%d_ring%d mutex", DRIVER_NAME,
2126 hba->ddiinst, i);
2127 mutex_init(&hba->ring[i].rsp_lock, buf, MUTEX_DRIVER,
2128 (void *)hba->intr_arg);
2129
2130 emlxs_thread_create(hba, &hba->ring[i].intr_thread);
2131 }
2132
2133 return (DDI_SUCCESS);
2134
2074} /* emlxs_intx_init() */
2135} /* emlxs_intx_init() */
2075
2076
2077/* EMLXS_INTR_UNINIT */
2078int32_t
2079emlxs_intx_uninit(emlxs_hba_t *hba)
2080{
2081 int32_t ret;
2082 uint32_t i;
2083
2084 /* Make sure interrupts have been removed */
2085 if ((hba->intr_flags & EMLXS_INTX_ADDED)) {
2086 ret = emlxs_intx_remove(hba);
2087
2088 if (ret != DDI_SUCCESS) {
2089 return (ret);
2090 }
2091 }
2136
2137
2138/* EMLXS_INTR_UNINIT */
2139int32_t
2140emlxs_intx_uninit(emlxs_hba_t *hba)
2141{
2142 int32_t ret;
2143 uint32_t i;
2144
2145 /* Make sure interrupts have been removed */
2146 if ((hba->intr_flags & EMLXS_INTX_ADDED)) {
2147 ret = emlxs_intx_remove(hba);
2148
2149 if (ret != DDI_SUCCESS) {
2150 return (ret);
2151 }
2152 }
2153
2092 /* Check if the interrupts are still initialized */
2093 if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2094 return (DDI_SUCCESS);
2095 }
2096 hba->intr_flags &= ~EMLXS_INTX_INITED;
2097
2098 hba->intr_arg = NULL;
2099
2100 /* Create the interrupt threads */
2101 for (i = 0; i < MAX_RINGS; i++) {
2102 emlxs_thread_destroy(&hba->ring[i].intr_thread);
2103 mutex_destroy(&hba->ring[i].rsp_lock);
2104 }
2105
2106 return (DDI_SUCCESS);
2107
2154 /* Check if the interrupts are still initialized */
2155 if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2156 return (DDI_SUCCESS);
2157 }
2158 hba->intr_flags &= ~EMLXS_INTX_INITED;
2159
2160 hba->intr_arg = NULL;
2161
2162 /* Create the interrupt threads */
2163 for (i = 0; i < MAX_RINGS; i++) {
2164 emlxs_thread_destroy(&hba->ring[i].intr_thread);
2165 mutex_destroy(&hba->ring[i].rsp_lock);
2166 }
2167
2168 return (DDI_SUCCESS);
2169
2108} /* emlxs_intx_uninit() */
2170} /* emlxs_intx_uninit() */
2109
2110
2171
2172
2111/* This is the legacy method for adding interrupts in Solaris */
2112/* EMLXS_INTR_ADD */
2173/*
2174 * This is the legacy method for adding interrupts in Solaris
2175 * EMLXS_INTR_ADD
2176 */
2113int32_t
2114emlxs_intx_add(emlxs_hba_t *hba)
2115{
2116 emlxs_port_t *port = &PPORT;
2117 int32_t ret;
2118
2119 /* Check if interrupts have already been added */
2120 if (hba->intr_flags & EMLXS_INTX_ADDED) {
2121 return (DDI_SUCCESS);
2122 }
2177int32_t
2178emlxs_intx_add(emlxs_hba_t *hba)
2179{
2180 emlxs_port_t *port = &PPORT;
2181 int32_t ret;
2182
2183 /* Check if interrupts have already been added */
2184 if (hba->intr_flags & EMLXS_INTX_ADDED) {
2185 return (DDI_SUCCESS);
2186 }
2187
2123 /* Check if interrupts have been initialized */
2124 if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2125 ret = emlxs_intx_init(hba, 0);
2126
2127 if (ret != DDI_SUCCESS) {
2128 return (ret);
2129 }
2130 }
2188 /* Check if interrupts have been initialized */
2189 if (!(hba->intr_flags & EMLXS_INTX_INITED)) {
2190 ret = emlxs_intx_init(hba, 0);
2191
2192 if (ret != DDI_SUCCESS) {
2193 return (ret);
2194 }
2195 }
2196
2131 /* add intrrupt handler routine */
2197 /* add intrrupt handler routine */
2132 ret = ddi_add_intr((void *)hba->dip, (uint_t)EMLXS_INUMBER,
2133 (ddi_iblock_cookie_t *)&hba->intr_arg, (ddi_idevice_cookie_t *)0,
2134 (uint_t(*) ())emlxs_intx_intr, (caddr_t)hba);
2198 ret = ddi_add_intr((void *)hba->dip,
2199 (uint_t)EMLXS_INUMBER,
2200 (ddi_iblock_cookie_t *)&hba->intr_arg,
2201 (ddi_idevice_cookie_t *)0,
2202 (uint_t(*)())emlxs_sli_intx_intr, (caddr_t)hba);
2135
2136 if (ret != DDI_SUCCESS) {
2137 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
2138 "INTX: ddi_add_intr failed. ret=%d", ret);
2139
2140 return (ret);
2141 }
2203
2204 if (ret != DDI_SUCCESS) {
2205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
2206 "INTX: ddi_add_intr failed. ret=%d", ret);
2207
2208 return (ret);
2209 }
2210
2142 hba->intr_flags |= EMLXS_INTX_ADDED;
2143
2144 return (DDI_SUCCESS);
2145
2211 hba->intr_flags |= EMLXS_INTX_ADDED;
2212
2213 return (DDI_SUCCESS);
2214
2146} /* emlxs_intx_add() */
2215} /* emlxs_intx_add() */
2147
2148
2149/* EMLXS_INTR_REMOVE */
2150int32_t
2151emlxs_intx_remove(emlxs_hba_t *hba)
2152{
2216
2217
2218/* EMLXS_INTR_REMOVE */
2219int32_t
2220emlxs_intx_remove(emlxs_hba_t *hba)
2221{
2153
2154 /* Check if interrupts have already been removed */
2155 if (!(hba->intr_flags & EMLXS_INTX_ADDED)) {
2156 return (DDI_SUCCESS);
2157 }
2158 hba->intr_flags &= ~EMLXS_INTX_ADDED;
2159
2160 /* Diable all adapter interrupts */
2222 /* Check if interrupts have already been removed */
2223 if (!(hba->intr_flags & EMLXS_INTX_ADDED)) {
2224 return (DDI_SUCCESS);
2225 }
2226 hba->intr_flags &= ~EMLXS_INTX_ADDED;
2227
2228 /* Diable all adapter interrupts */
2161 hba->hc_copy = 0;
2162 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
2229 emlxs_disable_intr(hba, 0);
2163
2164 /* Remove the interrupt */
2165 (void) ddi_remove_intr((void *)hba->dip, (uint_t)EMLXS_INUMBER,
2166 hba->intr_arg);
2167
2168 return (DDI_SUCCESS);
2169
2230
2231 /* Remove the interrupt */
2232 (void) ddi_remove_intr((void *)hba->dip, (uint_t)EMLXS_INUMBER,
2233 hba->intr_arg);
2234
2235 return (DDI_SUCCESS);
2236
2170} /* emlxs_intx_remove() */
2237} /* emlxs_intx_remove() */
2171
2172
2238
2239
2173extern int
2174emlxs_hba_init(emlxs_hba_t *hba)
2175{
2176 emlxs_port_t *port = &PPORT;
2177 emlxs_port_t *vport;
2178 emlxs_config_t *cfg;
2179 int32_t i;
2180
2181 cfg = &CFG;
2182 i = 0;
2183
2184 /* Restart the adapter */
2185 if (emlxs_hba_reset(hba, 1, 0)) {
2186 return (1);
2187 }
2188 hba->ring_count = MAX_RINGS; /* number of rings used */
2189
2190 /* WARNING: There is a max of 6 ring masks allowed */
2191 /*
2192 * RING 0 - FCP
2193 */
2194 if (hba->tgt_mode) {
2195 hba->ring_masks[FC_FCP_RING] = 1;
2196 hba->ring_rval[i] = FC_FCP_CMND;
2197 hba->ring_rmask[i] = 0;
2198 hba->ring_tval[i] = FC_FCP_DATA;
2199 hba->ring_tmask[i++] = 0xFF;
2200 } else {
2201 hba->ring_masks[FC_FCP_RING] = 0;
2202 }
2203
2204 hba->ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
2205 hba->ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
2206
2207 /*
2208 * RING 1 - IP
2209 */
2210 if (cfg[CFG_NETWORK_ON].current) {
2211 hba->ring_masks[FC_IP_RING] = 1;
2212 hba->ring_rval[i] = FC_UNSOL_DATA; /* Unsolicited Data */
2213 hba->ring_rmask[i] = 0xFF;
2214 hba->ring_tval[i] = FC_LLC_SNAP; /* LLC/SNAP */
2215 hba->ring_tmask[i++] = 0xFF;
2216 } else {
2217 hba->ring_masks[FC_IP_RING] = 0;
2218 }
2219
2220 hba->ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
2221 hba->ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
2222
2223 /*
2224 * RING 2 - ELS
2225 */
2226 hba->ring_masks[FC_ELS_RING] = 1;
2227 hba->ring_rval[i] = FC_ELS_REQ; /* ELS request/response */
2228 hba->ring_rmask[i] = 0xFE;
2229 hba->ring_tval[i] = FC_ELS_DATA; /* ELS */
2230 hba->ring_tmask[i++] = 0xFF;
2231
2232 hba->ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
2233 hba->ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
2234
2235 /*
2236 * RING 3 - CT
2237 */
2238 hba->ring_masks[FC_CT_RING] = 1;
2239 hba->ring_rval[i] = FC_UNSOL_CTL; /* CT request/response */
2240 hba->ring_rmask[i] = 0xFE;
2241 hba->ring_tval[i] = FC_CT_TYPE; /* CT */
2242 hba->ring_tmask[i++] = 0xFF;
2243
2244 hba->ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
2245 hba->ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
2246
2247 if (i > 6) {
2248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
2249 "emlxs_hba_init: Too many ring masks defined. cnt=%d", i);
2250 return (1);
2251 }
2252 /* Initialize all the port objects */
2253 hba->vpi_max = 1;
2254 for (i = 0; i < MAX_VPORTS; i++) {
2255 vport = &VPORT(i);
2256 vport->hba = hba;
2257 vport->vpi = i;
2258 }
2259
2260 /*
2261 * Initialize the max_node count to a default value if needed
2262 * This determines how many node objects we preallocate in the pool
2263 * The actual max_nodes will be set later based on adapter info
2264 */
2265 if (hba->max_nodes == 0) {
2266 if (cfg[CFG_NUM_NODES].current > 0) {
2267 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2268 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2269 hba->max_nodes = 4096;
2270 } else {
2271 hba->max_nodes = 512;
2272 }
2273 }
2274 return (0);
2275
2276} /* emlxs_hba_init() */
2277
2278
2279static void
2280emlxs_process_link_speed(emlxs_hba_t *hba)
2281{
2282 emlxs_vpd_t *vpd;
2283 emlxs_config_t *cfg;
2284 char *cptr;
2285 uint32_t hi;
2286
2287 /*
2240static void
2241emlxs_process_link_speed(emlxs_hba_t *hba)
2242{
2243 emlxs_vpd_t *vpd;
2244 emlxs_config_t *cfg;
2245 char *cptr;
2246 uint32_t hi;
2247
2248 /*
2288 * This routine modifies the link-speed config parameter entry based
2289 * on adapter capabilities
2249 * This routine modifies the link-speed config parameter entry
2250 * based on adapter capabilities
2290 */
2291 vpd = &VPD;
2292 cfg = &hba->config[CFG_LINK_SPEED];
2293
2294 cptr = cfg->help;
2295 (void) strcpy(cptr, "Select link speed. [0=Auto");
2296 cptr += 26;
2297 hi = 0;
2298
2299 if (vpd->link_speed & LMT_1GB_CAPABLE) {
2300 (void) strcpy(cptr, ", 1=1Gb");
2301 cptr += 7;
2302 hi = 1;
2303 }
2251 */
2252 vpd = &VPD;
2253 cfg = &hba->config[CFG_LINK_SPEED];
2254
2255 cptr = cfg->help;
2256 (void) strcpy(cptr, "Select link speed. [0=Auto");
2257 cptr += 26;
2258 hi = 0;
2259
2260 if (vpd->link_speed & LMT_1GB_CAPABLE) {
2261 (void) strcpy(cptr, ", 1=1Gb");
2262 cptr += 7;
2263 hi = 1;
2264 }
2265
2304 if (vpd->link_speed & LMT_2GB_CAPABLE) {
2305 (void) strcpy(cptr, ", 2=2Gb");
2306 cptr += 7;
2307 hi = 2;
2308 }
2266 if (vpd->link_speed & LMT_2GB_CAPABLE) {
2267 (void) strcpy(cptr, ", 2=2Gb");
2268 cptr += 7;
2269 hi = 2;
2270 }
2271
2309 if (vpd->link_speed & LMT_4GB_CAPABLE) {
2310 (void) strcpy(cptr, ", 4=4Gb");
2311 cptr += 7;
2312 hi = 4;
2313 }
2272 if (vpd->link_speed & LMT_4GB_CAPABLE) {
2273 (void) strcpy(cptr, ", 4=4Gb");
2274 cptr += 7;
2275 hi = 4;
2276 }
2277
2314 if (vpd->link_speed & LMT_8GB_CAPABLE) {
2315 (void) strcpy(cptr, ", 8=8Gb");
2316 cptr += 7;
2317 hi = 8;
2318 }
2278 if (vpd->link_speed & LMT_8GB_CAPABLE) {
2279 (void) strcpy(cptr, ", 8=8Gb");
2280 cptr += 7;
2281 hi = 8;
2282 }
2283
2319 if (vpd->link_speed & LMT_10GB_CAPABLE) {
2320 (void) strcpy(cptr, ", 10=10Gb");
2321 cptr += 9;
2322 hi = 10;
2323 }
2284 if (vpd->link_speed & LMT_10GB_CAPABLE) {
2285 (void) strcpy(cptr, ", 10=10Gb");
2286 cptr += 9;
2287 hi = 10;
2288 }
2289
2324 (void) strcpy(cptr, "]");
2325 cfg->hi = hi;
2326
2327 /* Now revalidate the current parameter setting */
2328 cfg->current = emlxs_check_parm(hba, CFG_LINK_SPEED, cfg->current);
2329
2330 return;
2331
2290 (void) strcpy(cptr, "]");
2291 cfg->hi = hi;
2292
2293 /* Now revalidate the current parameter setting */
2294 cfg->current = emlxs_check_parm(hba, CFG_LINK_SPEED, cfg->current);
2295
2296 return;
2297
2332} /* emlxs_process_link_speed() */
2298} /* emlxs_process_link_speed() */
2333
2334
2335/*
2299
2300
2301/*
2302 * emlxs_parse_vpd()
2336 *
2303 *
2337 * emlxs_parse_vpd
2338 * This routine will parse the VPD data
2304 * This routine will parse the VPD data
2339 *
2340 */
2341extern int
2342emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd_buf, uint32_t size)
2343{
2344 emlxs_port_t *port = &PPORT;
2345 char tag[3];
2346 uint8_t lenlo, lenhi;
2347 uint32_t n;
2348 uint16_t block_size;
2349 uint32_t block_index = 0;
2350 uint8_t sub_size;
2351 uint32_t sub_index;
2352 int32_t finished = 0;
2353 int32_t index = 0;
2354 char buffer[128];
2355 emlxs_vpd_t *vpd;
2305 */
2306extern int
2307emlxs_parse_vpd(emlxs_hba_t *hba, uint8_t *vpd_buf, uint32_t size)
2308{
2309 emlxs_port_t *port = &PPORT;
2310 char tag[3];
2311 uint8_t lenlo, lenhi;
2312 uint32_t n;
2313 uint16_t block_size;
2314 uint32_t block_index = 0;
2315 uint8_t sub_size;
2316 uint32_t sub_index;
2317 int32_t finished = 0;
2318 int32_t index = 0;
2319 char buffer[128];
2320 emlxs_vpd_t *vpd;
2356 emlxs_config_t *cfg;
2357
2358 vpd = &VPD;
2321
2322 vpd = &VPD;
2359 cfg = &CFG;
2360
2323
2361#ifdef MENLO_TEST
2362 /* Check if VPD is disabled Hornet adapters */
2363 if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
2364 (cfg[CFG_HORNET_VPD].current == 0)) {
2365 return (1);
2366 }
2367#endif /* MENLO_TEST */
2368
2324
2369
2370 while (!finished && (block_index < size)) {
2371 /*
2325 while (!finished && (block_index < size)) {
2326 /*
2372 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "block_index =
2373 * %x", block_index);
2327 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2328 * "block_index = %x", block_index);
2374 */
2375
2376 switch (vpd_buf[block_index]) {
2377 case 0x82:
2378 index = block_index;
2379 index += 1;
2380 lenlo = vpd_buf[index];
2381 index += 1;
2382 lenhi = vpd_buf[index];
2383 index += 1;
2384 block_index = index;
2385
2386 block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2387 block_index += block_size;
2388
2389 /*
2390 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2329 */
2330
2331 switch (vpd_buf[block_index]) {
2332 case 0x82:
2333 index = block_index;
2334 index += 1;
2335 lenlo = vpd_buf[index];
2336 index += 1;
2337 lenhi = vpd_buf[index];
2338 index += 1;
2339 block_index = index;
2340
2341 block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2342 block_index += block_size;
2343
2344 /*
2345 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2391 * "block_size = %x", block_size);
2346 * "block_size = %x", block_size);
2392 */
2393
2394 n = sizeof (buffer);
2395 bzero(buffer, n);
2396 bcopy(&vpd_buf[index], buffer,
2397 (block_size < (n - 1)) ? block_size : (n - 1));
2398
2399 (void) strcpy(vpd->id, buffer);
2347 */
2348
2349 n = sizeof (buffer);
2350 bzero(buffer, n);
2351 bcopy(&vpd_buf[index], buffer,
2352 (block_size < (n - 1)) ? block_size : (n - 1));
2353
2354 (void) strcpy(vpd->id, buffer);
2400 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2401 "ID: %s", vpd->id);
2355 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "ID: %s",
2356 vpd->id);
2402
2403 break;
2404
2405 case 0x90:
2406 index = block_index;
2407 index += 1;
2408 lenlo = vpd_buf[index];
2409 index += 1;
2410 lenhi = vpd_buf[index];
2411 index += 1;
2412 block_index = index;
2413 sub_index = index;
2414
2415 block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2416 block_index += block_size;
2417
2418 /*
2419 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2357
2358 break;
2359
2360 case 0x90:
2361 index = block_index;
2362 index += 1;
2363 lenlo = vpd_buf[index];
2364 index += 1;
2365 lenhi = vpd_buf[index];
2366 index += 1;
2367 block_index = index;
2368 sub_index = index;
2369
2370 block_size = ((((uint16_t)lenhi) << 8) + lenlo);
2371 block_index += block_size;
2372
2373 /*
2374 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2420 * "block_size = %x", block_size);
2375 * "block_size = %x", block_size);
2421 */
2422
2423 /* Scan for sub-blocks */
2424 while ((sub_index < block_index) &&
2425 (sub_index < size)) {
2426 /*
2427 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2376 */
2377
2378 /* Scan for sub-blocks */
2379 while ((sub_index < block_index) &&
2380 (sub_index < size)) {
2381 /*
2382 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2428 * "sub_index = %x", sub_index);
2383 * "sub_index = %x", sub_index);
2429 */
2430
2431 index = sub_index;
2432 tag[0] = vpd_buf[index++];
2433 tag[1] = vpd_buf[index++];
2434 tag[2] = 0;
2435 sub_size = vpd_buf[index++];
2436
2437 /*
2438 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2384 */
2385
2386 index = sub_index;
2387 tag[0] = vpd_buf[index++];
2388 tag[1] = vpd_buf[index++];
2389 tag[2] = 0;
2390 sub_size = vpd_buf[index++];
2391
2392 /*
2393 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2439 * "sub_size = %x", sub_size);
2394 * "sub_size = %x", sub_size);
2440 */
2441
2442 sub_index = (index + sub_size);
2443
2444 n = sizeof (buffer);
2445 bzero(buffer, n);
2446 bcopy(&vpd_buf[index], buffer,
2447 (sub_size < (n - 1)) ? sub_size : (n - 1));
2448
2449 /*
2450 * Look for Engineering Change (EC)
2451 */
2452 if (strcmp(tag, "EC") == 0) {
2453 (void) strcpy(vpd->eng_change, buffer);
2454 EMLXS_MSGF(EMLXS_CONTEXT,
2395 */
2396
2397 sub_index = (index + sub_size);
2398
2399 n = sizeof (buffer);
2400 bzero(buffer, n);
2401 bcopy(&vpd_buf[index], buffer,
2402 (sub_size < (n - 1)) ? sub_size : (n - 1));
2403
2404 /*
2405 * Look for Engineering Change (EC)
2406 */
2407 if (strcmp(tag, "EC") == 0) {
2408 (void) strcpy(vpd->eng_change, buffer);
2409 EMLXS_MSGF(EMLXS_CONTEXT,
2455 &emlxs_vpd_msg,
2456 "EC: %s", vpd->eng_change);
2410 &emlxs_vpd_msg, "EC: %s",
2411 vpd->eng_change);
2457 }
2458 /*
2459 * Look for Manufacturer (MN)
2460 */
2461 else if (strcmp(tag, "MN") == 0) {
2462 (void) strcpy(vpd->manufacturer,
2463 buffer);
2464 EMLXS_MSGF(EMLXS_CONTEXT,
2412 }
2413 /*
2414 * Look for Manufacturer (MN)
2415 */
2416 else if (strcmp(tag, "MN") == 0) {
2417 (void) strcpy(vpd->manufacturer,
2418 buffer);
2419 EMLXS_MSGF(EMLXS_CONTEXT,
2465 &emlxs_vpd_msg,
2466 "MN: %s", vpd->manufacturer);
2420 &emlxs_vpd_msg, "MN: %s",
2421 vpd->manufacturer);
2467 }
2468 /*
2469 * Look for Serial Number (SN)
2470 */
2471 else if (strcmp(tag, "SN") == 0) {
2472 (void) strcpy(vpd->serial_num, buffer);
2473 EMLXS_MSGF(EMLXS_CONTEXT,
2422 }
2423 /*
2424 * Look for Serial Number (SN)
2425 */
2426 else if (strcmp(tag, "SN") == 0) {
2427 (void) strcpy(vpd->serial_num, buffer);
2428 EMLXS_MSGF(EMLXS_CONTEXT,
2474 &emlxs_vpd_msg,
2475 "SN: %s", vpd->serial_num);
2429 &emlxs_vpd_msg, "SN: %s",
2430 vpd->serial_num);
2476
2477 /* Validate the serial number */
2431
2432 /* Validate the serial number */
2478 if ((strncmp(buffer, "FFFFFFFFFF",
2479 10) == 0) ||
2480 (strncmp(buffer, "0000000000",
2481 10) == 0)) {
2433 if (strncmp(buffer, "FFFFFFFFFF", 10) ==
2434 0 ||
2435 strncmp(buffer, "0000000000", 10) ==
2436 0) {
2482 vpd->serial_num[0] = 0;
2483 }
2484 }
2485 /*
2486 * Look for Part Number (PN)
2487 */
2488 else if (strcmp(tag, "PN") == 0) {
2489 (void) strcpy(vpd->part_num, buffer);
2490 EMLXS_MSGF(EMLXS_CONTEXT,
2437 vpd->serial_num[0] = 0;
2438 }
2439 }
2440 /*
2441 * Look for Part Number (PN)
2442 */
2443 else if (strcmp(tag, "PN") == 0) {
2444 (void) strcpy(vpd->part_num, buffer);
2445 EMLXS_MSGF(EMLXS_CONTEXT,
2491 &emlxs_vpd_msg,
2492 "PN: %s", vpd->part_num);
2446 &emlxs_vpd_msg, "PN: %s",
2447 vpd->part_num);
2493 }
2494 /*
2495 * Look for (V0)
2496 */
2497 else if (strcmp(tag, "V0") == 0) {
2498 /* Not used */
2499 EMLXS_MSGF(EMLXS_CONTEXT,
2448 }
2449 /*
2450 * Look for (V0)
2451 */
2452 else if (strcmp(tag, "V0") == 0) {
2453 /* Not used */
2454 EMLXS_MSGF(EMLXS_CONTEXT,
2500 &emlxs_vpd_msg,
2501 "V0: %s", buffer);
2455 &emlxs_vpd_msg, "V0: %s", buffer);
2502 }
2503 /*
2504 * Look for model description (V1)
2505 */
2506 else if (strcmp(tag, "V1") == 0) {
2507 (void) strcpy(vpd->model_desc, buffer);
2508 EMLXS_MSGF(EMLXS_CONTEXT,
2456 }
2457 /*
2458 * Look for model description (V1)
2459 */
2460 else if (strcmp(tag, "V1") == 0) {
2461 (void) strcpy(vpd->model_desc, buffer);
2462 EMLXS_MSGF(EMLXS_CONTEXT,
2509 &emlxs_vpd_msg,
2510 "Desc: %s", vpd->model_desc);
2463 &emlxs_vpd_msg, "Desc: %s",
2464 vpd->model_desc);
2511 }
2512 /*
2513 * Look for model (V2)
2514 */
2515 else if (strcmp(tag, "V2") == 0) {
2516 (void) strcpy(vpd->model, buffer);
2517 EMLXS_MSGF(EMLXS_CONTEXT,
2465 }
2466 /*
2467 * Look for model (V2)
2468 */
2469 else if (strcmp(tag, "V2") == 0) {
2470 (void) strcpy(vpd->model, buffer);
2471 EMLXS_MSGF(EMLXS_CONTEXT,
2518 &emlxs_vpd_msg,
2519 "Model: %s", vpd->model);
2472 &emlxs_vpd_msg, "Model: %s",
2473 vpd->model);
2520 }
2521 /*
2522 * Look for program type (V3)
2523 */
2524
2525 else if (strcmp(tag, "V3") == 0) {
2526 (void) strcpy(vpd->prog_types, buffer);
2527 EMLXS_MSGF(EMLXS_CONTEXT,
2474 }
2475 /*
2476 * Look for program type (V3)
2477 */
2478
2479 else if (strcmp(tag, "V3") == 0) {
2480 (void) strcpy(vpd->prog_types, buffer);
2481 EMLXS_MSGF(EMLXS_CONTEXT,
2528 &emlxs_vpd_msg,
2529 "Prog Types: %s", vpd->prog_types);
2482 &emlxs_vpd_msg, "Prog Types: %s",
2483 vpd->prog_types);
2530 }
2531 /*
2532 * Look for port number (V4)
2533 */
2534 else if (strcmp(tag, "V4") == 0) {
2535 (void) strcpy(vpd->port_num, buffer);
2536 vpd->port_index =
2537 emlxs_strtol(vpd->port_num, 10);
2538
2539 EMLXS_MSGF(EMLXS_CONTEXT,
2484 }
2485 /*
2486 * Look for port number (V4)
2487 */
2488 else if (strcmp(tag, "V4") == 0) {
2489 (void) strcpy(vpd->port_num, buffer);
2490 vpd->port_index =
2491 emlxs_strtol(vpd->port_num, 10);
2492
2493 EMLXS_MSGF(EMLXS_CONTEXT,
2540 &emlxs_vpd_msg,
2541 "Port: %s",
2542 (vpd->port_num[0]) ?
2543 vpd->port_num : "not applicable");
2494 &emlxs_vpd_msg, "Port: %s",
2495 (vpd->port_num[0]) ? vpd->
2496 port_num : "not applicable");
2544 }
2545 /*
2546 * Look for checksum (RV)
2547 */
2548 else if (strcmp(tag, "RV") == 0) {
2549 /* Not used */
2550 EMLXS_MSGF(EMLXS_CONTEXT,
2497 }
2498 /*
2499 * Look for checksum (RV)
2500 */
2501 else if (strcmp(tag, "RV") == 0) {
2502 /* Not used */
2503 EMLXS_MSGF(EMLXS_CONTEXT,
2551 &emlxs_vpd_msg,
2552 "Checksum: 0x%x", buffer[0]);
2553 } else {
2504 &emlxs_vpd_msg, "Checksum: 0x%x",
2505 buffer[0]);
2506 }
2507
2508 else {
2554 /* Generic */
2555 EMLXS_MSGF(EMLXS_CONTEXT,
2509 /* Generic */
2510 EMLXS_MSGF(EMLXS_CONTEXT,
2556 &emlxs_vpd_msg,
2557 "Tag: %s: %s", tag, buffer);
2511 &emlxs_vpd_msg, "Tag: %s: %s",
2512 tag, buffer);
2558 }
2559 }
2560
2561 break;
2562
2563 case 0x78:
2564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "End Tag.");
2565 finished = 1;

--- 7 unchanged lines hidden (view full) ---

2573 vpd_buf[index + 4], vpd_buf[index + 5],
2574 vpd_buf[index + 6], vpd_buf[index + 7]);
2575 return (0);
2576 }
2577 }
2578
2579 return (1);
2580
2513 }
2514 }
2515
2516 break;
2517
2518 case 0x78:
2519 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg, "End Tag.");
2520 finished = 1;

--- 7 unchanged lines hidden (view full) ---

2528 vpd_buf[index + 4], vpd_buf[index + 5],
2529 vpd_buf[index + 6], vpd_buf[index + 7]);
2530 return (0);
2531 }
2532 }
2533
2534 return (1);
2535
2581} /* emlxs_parse_vpd */
2536} /* emlxs_parse_vpd */
2582
2583
2584
2585static uint32_t
2586emlxs_decode_biu_rev(uint32_t rev)
2587{
2588 return (rev & 0xf);
2537
2538
2539
2540static uint32_t
2541emlxs_decode_biu_rev(uint32_t rev)
2542{
2543 return (rev & 0xf);
2589} /* End emlxs_decode_biu_rev */
2544} /* End emlxs_decode_biu_rev */
2590
2591
2592static uint32_t
2593emlxs_decode_endec_rev(uint32_t rev)
2594{
2595 return ((rev >> 28) & 0xf);
2545
2546
2547static uint32_t
2548emlxs_decode_endec_rev(uint32_t rev)
2549{
2550 return ((rev >> 28) & 0xf);
2596} /* End emlxs_decode_endec_rev */
2551} /* End emlxs_decode_endec_rev */
2597
2598
2599extern void
2600emlxs_decode_firmware_rev(emlxs_hba_t *hba, emlxs_vpd_t *vpd)
2601{
2602 if (vpd->rBit) {
2603 switch (hba->sli_mode) {
2552
2553
2554extern void
2555emlxs_decode_firmware_rev(emlxs_hba_t *hba, emlxs_vpd_t *vpd)
2556{
2557 if (vpd->rBit) {
2558 switch (hba->sli_mode) {
2604 case 4:
2559 case EMLXS_HBA_SLI4_MODE:
2605 (void) strcpy(vpd->fw_version, vpd->sli4FwName);
2606 (void) strcpy(vpd->fw_label, vpd->sli4FwLabel);
2607 break;
2560 (void) strcpy(vpd->fw_version, vpd->sli4FwName);
2561 (void) strcpy(vpd->fw_label, vpd->sli4FwLabel);
2562 break;
2608 case 3:
2563 case EMLXS_HBA_SLI3_MODE:
2609 (void) strcpy(vpd->fw_version, vpd->sli3FwName);
2610 (void) strcpy(vpd->fw_label, vpd->sli3FwLabel);
2611 break;
2564 (void) strcpy(vpd->fw_version, vpd->sli3FwName);
2565 (void) strcpy(vpd->fw_label, vpd->sli3FwLabel);
2566 break;
2612 case 2:
2567 case EMLXS_HBA_SLI2_MODE:
2613 (void) strcpy(vpd->fw_version, vpd->sli2FwName);
2614 (void) strcpy(vpd->fw_label, vpd->sli2FwLabel);
2615 break;
2568 (void) strcpy(vpd->fw_version, vpd->sli2FwName);
2569 (void) strcpy(vpd->fw_label, vpd->sli2FwLabel);
2570 break;
2616 case 1:
2571 case EMLXS_HBA_SLI1_MODE:
2617 (void) strcpy(vpd->fw_version, vpd->sli1FwName);
2618 (void) strcpy(vpd->fw_label, vpd->sli1FwLabel);
2619 break;
2620 default:
2621 (void) strcpy(vpd->fw_version, "unknown");
2622 (void) strcpy(vpd->fw_label, vpd->fw_version);
2623 }
2624 } else {
2625 emlxs_decode_version(vpd->smFwRev, vpd->fw_version);
2626 (void) strcpy(vpd->fw_label, vpd->fw_version);
2627 }
2628
2629 return;
2630
2572 (void) strcpy(vpd->fw_version, vpd->sli1FwName);
2573 (void) strcpy(vpd->fw_label, vpd->sli1FwLabel);
2574 break;
2575 default:
2576 (void) strcpy(vpd->fw_version, "unknown");
2577 (void) strcpy(vpd->fw_label, vpd->fw_version);
2578 }
2579 } else {
2580 emlxs_decode_version(vpd->smFwRev, vpd->fw_version);
2581 (void) strcpy(vpd->fw_label, vpd->fw_version);
2582 }
2583
2584 return;
2585
2631} /* emlxs_decode_firmware_rev() */
2586} /* emlxs_decode_firmware_rev() */
2632
2633
2634
2635extern void
2636emlxs_decode_version(uint32_t version, char *buffer)
2637{
2638 uint32_t b1, b2, b3, b4;
2639 char c;
2640
2641 b1 = (version & 0x0000f000) >> 12;
2642 b2 = (version & 0x00000f00) >> 8;
2643 b3 = (version & 0x000000c0) >> 6;
2644 b4 = (version & 0x00000030) >> 4;
2645
2646 if (b1 == 0 && b2 == 0) {
2647 (void) sprintf(buffer, "none");
2648 return;
2649 }
2587
2588
2589
2590extern void
2591emlxs_decode_version(uint32_t version, char *buffer)
2592{
2593 uint32_t b1, b2, b3, b4;
2594 char c;
2595
2596 b1 = (version & 0x0000f000) >> 12;
2597 b2 = (version & 0x00000f00) >> 8;
2598 b3 = (version & 0x000000c0) >> 6;
2599 b4 = (version & 0x00000030) >> 4;
2600
2601 if (b1 == 0 && b2 == 0) {
2602 (void) sprintf(buffer, "none");
2603 return;
2604 }
2605
2650 c = 0;
2651 switch (b4) {
2652 case 0:
2653 c = 'n';
2654 break;
2655 case 1:
2656 c = 'a';
2657 break;

--- 12 unchanged lines hidden (view full) ---

2670 if (c == 0) {
2671 (void) sprintf(buffer, "%d.%d%d", b1, b2, b3);
2672 } else {
2673 (void) sprintf(buffer, "%d.%d%d%c%d", b1, b2, b3, c, b4);
2674 }
2675
2676 return;
2677
2606 c = 0;
2607 switch (b4) {
2608 case 0:
2609 c = 'n';
2610 break;
2611 case 1:
2612 c = 'a';
2613 break;

--- 12 unchanged lines hidden (view full) ---

2626 if (c == 0) {
2627 (void) sprintf(buffer, "%d.%d%d", b1, b2, b3);
2628 } else {
2629 (void) sprintf(buffer, "%d.%d%d%c%d", b1, b2, b3, c, b4);
2630 }
2631
2632 return;
2633
2678} /* emlxs_decode_version() */
2634} /* emlxs_decode_version() */
2679
2680
2681static void
2682emlxs_decode_label(char *label, char *buffer)
2683{
2684 uint32_t i;
2685 char name[16];
2686#ifdef EMLXS_LITTLE_ENDIAN
2687 uint32_t *wptr;
2688 uint32_t word;
2635
2636
2637static void
2638emlxs_decode_label(char *label, char *buffer)
2639{
2640 uint32_t i;
2641 char name[16];
2642#ifdef EMLXS_LITTLE_ENDIAN
2643 uint32_t *wptr;
2644 uint32_t word;
2689#endif /* EMLXS_LITTLE_ENDIAN */
2645#endif /* EMLXS_LITTLE_ENDIAN */
2690
2691 bcopy(label, name, 16);
2692
2693#ifdef EMLXS_LITTLE_ENDIAN
2694 wptr = (uint32_t *)name;
2695 for (i = 0; i < 3; i++) {
2696 word = *wptr;
2697 word = SWAP_DATA32(word);
2698 *wptr++ = word;
2699 }
2646
2647 bcopy(label, name, 16);
2648
2649#ifdef EMLXS_LITTLE_ENDIAN
2650 wptr = (uint32_t *)name;
2651 for (i = 0; i < 3; i++) {
2652 word = *wptr;
2653 word = SWAP_DATA32(word);
2654 *wptr++ = word;
2655 }
2700#endif /* EMLXS_LITTLE_ENDIAN */
2656#endif /* EMLXS_LITTLE_ENDIAN */
2701
2702 for (i = 0; i < 16; i++) {
2703 if (name[i] == 0x20) {
2704 name[i] = 0;
2705 }
2706 }
2707
2708 (void) strcpy(buffer, name);
2709
2710 return;
2711
2657
2658 for (i = 0; i < 16; i++) {
2659 if (name[i] == 0x20) {
2660 name[i] = 0;
2661 }
2662 }
2663
2664 (void) strcpy(buffer, name);
2665
2666 return;
2667
2712} /* emlxs_decode_label() */
2668} /* emlxs_decode_label() */
2713
2714
2715extern uint32_t
2716emlxs_strtol(char *str, uint32_t base)
2717{
2718 uint32_t value = 0;
2719 char *ptr;
2720 uint32_t factor = 1;
2721 uint32_t digits;
2722
2723 if (*str == 0) {
2724 return (0);
2725 }
2669
2670
2671extern uint32_t
2672emlxs_strtol(char *str, uint32_t base)
2673{
2674 uint32_t value = 0;
2675 char *ptr;
2676 uint32_t factor = 1;
2677 uint32_t digits;
2678
2679 if (*str == 0) {
2680 return (0);
2681 }
2682
2726 if (base != 10 && base != 16) {
2727 return (0);
2728 }
2683 if (base != 10 && base != 16) {
2684 return (0);
2685 }
2686
2729 /* Get max digits of value */
2730 digits = (base == 10) ? 9 : 8;
2731
2732 /* Position pointer to end of string */
2733 ptr = str + strlen(str);
2734
2735 /* Process string backwards */
2736 while ((ptr-- > str) && digits) {
2737 /* check for base 10 numbers */
2738 if (*ptr >= '0' && *ptr <= '9') {
2739 value += ((uint32_t)(*ptr - '0')) * factor;
2740 factor *= base;
2741 digits--;
2742 } else if (base == 16) {
2743 /* Check for base 16 numbers */
2744 if (*ptr >= 'a' && *ptr <= 'f') {
2687 /* Get max digits of value */
2688 digits = (base == 10) ? 9 : 8;
2689
2690 /* Position pointer to end of string */
2691 ptr = str + strlen(str);
2692
2693 /* Process string backwards */
2694 while ((ptr-- > str) && digits) {
2695 /* check for base 10 numbers */
2696 if (*ptr >= '0' && *ptr <= '9') {
2697 value += ((uint32_t)(*ptr - '0')) * factor;
2698 factor *= base;
2699 digits--;
2700 } else if (base == 16) {
2701 /* Check for base 16 numbers */
2702 if (*ptr >= 'a' && *ptr <= 'f') {
2745 value += ((uint32_t)(*ptr - 'a') + 10) * factor;
2703 value +=
2704 ((uint32_t)(*ptr - 'a') + 10) * factor;
2746 factor *= base;
2747 digits--;
2748 } else if (*ptr >= 'A' && *ptr <= 'F') {
2705 factor *= base;
2706 digits--;
2707 } else if (*ptr >= 'A' && *ptr <= 'F') {
2749 value += ((uint32_t)(*ptr - 'A') + 10) * factor;
2708 value +=
2709 ((uint32_t)(*ptr - 'A') + 10) * factor;
2750 factor *= base;
2751 digits--;
2752 } else if (factor > 1) {
2753 break;
2754 }
2755 } else if (factor > 1) {
2756 break;
2757 }
2758 }
2759
2760 return (value);
2761
2710 factor *= base;
2711 digits--;
2712 } else if (factor > 1) {
2713 break;
2714 }
2715 } else if (factor > 1) {
2716 break;
2717 }
2718 }
2719
2720 return (value);
2721
2762} /* emlxs_strtol() */
2722} /* emlxs_strtol() */
2763
2764
2765extern uint64_t
2766emlxs_strtoll(char *str, uint32_t base)
2767{
2768 uint64_t value = 0;
2769 char *ptr;
2770 uint32_t factor = 1;
2771 uint32_t digits;
2772
2773 if (*str == 0) {
2774 return (0);
2775 }
2723
2724
2725extern uint64_t
2726emlxs_strtoll(char *str, uint32_t base)
2727{
2728 uint64_t value = 0;
2729 char *ptr;
2730 uint32_t factor = 1;
2731 uint32_t digits;
2732
2733 if (*str == 0) {
2734 return (0);
2735 }
2736
2776 if (base != 10 && base != 16) {
2777 return (0);
2778 }
2737 if (base != 10 && base != 16) {
2738 return (0);
2739 }
2740
2779 /* Get max digits of value */
2780 digits = (base == 10) ? 19 : 16;
2781
2782 /* Position pointer to end of string */
2783 ptr = str + strlen(str);
2784
2785 /* Process string backwards */
2786 while ((ptr-- > str) && digits) {
2787 /* check for base 10 numbers */
2788 if (*ptr >= '0' && *ptr <= '9') {
2789 value += ((uint32_t)(*ptr - '0')) * factor;
2790 factor *= base;
2791 digits--;
2792 } else if (base == 16) {
2793 /* Check for base 16 numbers */
2794 if (*ptr >= 'a' && *ptr <= 'f') {
2741 /* Get max digits of value */
2742 digits = (base == 10) ? 19 : 16;
2743
2744 /* Position pointer to end of string */
2745 ptr = str + strlen(str);
2746
2747 /* Process string backwards */
2748 while ((ptr-- > str) && digits) {
2749 /* check for base 10 numbers */
2750 if (*ptr >= '0' && *ptr <= '9') {
2751 value += ((uint32_t)(*ptr - '0')) * factor;
2752 factor *= base;
2753 digits--;
2754 } else if (base == 16) {
2755 /* Check for base 16 numbers */
2756 if (*ptr >= 'a' && *ptr <= 'f') {
2795 value += ((uint32_t)(*ptr - 'a') + 10) * factor;
2757 value +=
2758 ((uint32_t)(*ptr - 'a') + 10) * factor;
2796 factor *= base;
2797 digits--;
2798 } else if (*ptr >= 'A' && *ptr <= 'F') {
2759 factor *= base;
2760 digits--;
2761 } else if (*ptr >= 'A' && *ptr <= 'F') {
2799 value += ((uint32_t)(*ptr - 'A') + 10) * factor;
2762 value +=
2763 ((uint32_t)(*ptr - 'A') + 10) * factor;
2800 factor *= base;
2801 digits--;
2802 } else if (factor > 1) {
2803 break;
2804 }
2805 } else if (factor > 1) {
2806 break;
2807 }
2808 }
2809
2810 return (value);
2811
2764 factor *= base;
2765 digits--;
2766 } else if (factor > 1) {
2767 break;
2768 }
2769 } else if (factor > 1) {
2770 break;
2771 }
2772 }
2773
2774 return (value);
2775
2812} /* emlxs_strtoll() */
2776} /* emlxs_strtoll() */
2813
2814static void
2815emlxs_parse_prog_types(emlxs_hba_t *hba, char *prog_types)
2816{
2817 emlxs_port_t *port = &PPORT;
2818 uint32_t i;
2819 char *ptr;
2820 emlxs_model_t *model;

--- 172 unchanged lines hidden (view full) ---

2993 /*
2994 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2995 * "TF[%d]: 0x%x", i-1, model->pt_FF[i-1]);
2996 */
2997
2998 /* Move the str pointer */
2999 types = ptr + 1;
3000 }
2777
2778static void
2779emlxs_parse_prog_types(emlxs_hba_t *hba, char *prog_types)
2780{
2781 emlxs_port_t *port = &PPORT;
2782 uint32_t i;
2783 char *ptr;
2784 emlxs_model_t *model;

--- 172 unchanged lines hidden (view full) ---

2957 /*
2958 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2959 * "TF[%d]: 0x%x", i-1, model->pt_FF[i-1]);
2960 */
2961
2962 /* Move the str pointer */
2963 types = ptr + 1;
2964 }
2965 } else if (strncmp(types, "T20:", 4) == 0) {
2966 bzero(model->pt_20, sizeof (model->pt_20));
2967 types += 4;
2968
2969 i = 0;
2970 while (*types && *types != 'T') {
2971 /* Null terminate the next value */
2972 ptr = types;
2973 while (*ptr && (*ptr != ','))
2974 ptr++;
2975 *ptr = 0;
2976
2977 /* Save the value */
2978 model->pt_20[i++] =
2979 (uint8_t)emlxs_strtol(types, 16);
2980 model->pt_20[i] = 0;
2981
2982 /*
2983 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2984 * "T20[%d]: 0x%x", i-1, model->pt_20[i-1]);
2985 */
2986
2987 /* Move the str pointer */
2988 types = ptr + 1;
2989 }
3001 } else {
3002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
3003 "Unknown prog type string = %s", types);
3004 break;
3005 }
3006 }
3007
3008 return;
3009
2990 } else {
2991 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_vpd_msg,
2992 "Unknown prog type string = %s", types);
2993 break;
2994 }
2995 }
2996
2997 return;
2998
3010} /* emlxs_parse_prog_types() */
2999} /* emlxs_parse_prog_types() */
3011
3012
3013static void
3014emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types)
3015{
3016 uint32_t i;
3017 uint32_t found = 0;
3018 char buffer[256];

--- 7 unchanged lines hidden (view full) ---

3026
3027 i = 0;
3028 while (hba->model_info.pt_2[i] && i < 8) {
3029 (void) sprintf(buffer, "%X,", hba->model_info.pt_2[i]);
3030 (void) strcat(prog_types, buffer);
3031 i++;
3032 }
3033 }
3000
3001
3002static void
3003emlxs_build_prog_types(emlxs_hba_t *hba, char *prog_types)
3004{
3005 uint32_t i;
3006 uint32_t found = 0;
3007 char buffer[256];

--- 7 unchanged lines hidden (view full) ---

3015
3016 i = 0;
3017 while (hba->model_info.pt_2[i] && i < 8) {
3018 (void) sprintf(buffer, "%X,", hba->model_info.pt_2[i]);
3019 (void) strcat(prog_types, buffer);
3020 i++;
3021 }
3022 }
3023
3034 if (hba->model_info.pt_3[0]) {
3035 (void) strcat(prog_types, "T3:");
3036 found = 1;
3037
3038 i = 0;
3039 while (hba->model_info.pt_3[i] && i < 8) {
3040 (void) sprintf(buffer, "%X,", hba->model_info.pt_3[i]);
3041 (void) strcat(prog_types, buffer);
3042 i++;
3043
3044 }
3045 }
3024 if (hba->model_info.pt_3[0]) {
3025 (void) strcat(prog_types, "T3:");
3026 found = 1;
3027
3028 i = 0;
3029 while (hba->model_info.pt_3[i] && i < 8) {
3030 (void) sprintf(buffer, "%X,", hba->model_info.pt_3[i]);
3031 (void) strcat(prog_types, buffer);
3032 i++;
3033
3034 }
3035 }
3036
3046 if (hba->model_info.pt_6[0]) {
3047 (void) strcat(prog_types, "T6:");
3048 found = 1;
3049
3050 i = 0;
3051 while (hba->model_info.pt_6[i] && i < 8) {
3052 (void) sprintf(buffer, "%X,", hba->model_info.pt_6[i]);
3053 (void) strcat(prog_types, buffer);
3054 i++;
3055 }
3056 }
3037 if (hba->model_info.pt_6[0]) {
3038 (void) strcat(prog_types, "T6:");
3039 found = 1;
3040
3041 i = 0;
3042 while (hba->model_info.pt_6[i] && i < 8) {
3043 (void) sprintf(buffer, "%X,", hba->model_info.pt_6[i]);
3044 (void) strcat(prog_types, buffer);
3045 i++;
3046 }
3047 }
3048
3057 if (hba->model_info.pt_7[0]) {
3058 (void) strcat(prog_types, "T7:");
3059 found = 1;
3060
3061 i = 0;
3062 while (hba->model_info.pt_7[i] && i < 8) {
3063 (void) sprintf(buffer, "%X,", hba->model_info.pt_7[i]);
3064 (void) strcat(prog_types, buffer);
3065 i++;
3066 }
3067 }
3049 if (hba->model_info.pt_7[0]) {
3050 (void) strcat(prog_types, "T7:");
3051 found = 1;
3052
3053 i = 0;
3054 while (hba->model_info.pt_7[i] && i < 8) {
3055 (void) sprintf(buffer, "%X,", hba->model_info.pt_7[i]);
3056 (void) strcat(prog_types, buffer);
3057 i++;
3058 }
3059 }
3060
3068 if (hba->model_info.pt_A[0]) {
3069 (void) strcat(prog_types, "TA:");
3070 found = 1;
3071
3072 i = 0;
3073 while (hba->model_info.pt_A[i] && i < 8) {
3074 (void) sprintf(buffer, "%X,", hba->model_info.pt_A[i]);
3075 (void) strcat(prog_types, buffer);
3076 i++;
3077 }
3078 }
3061 if (hba->model_info.pt_A[0]) {
3062 (void) strcat(prog_types, "TA:");
3063 found = 1;
3064
3065 i = 0;
3066 while (hba->model_info.pt_A[i] && i < 8) {
3067 (void) sprintf(buffer, "%X,", hba->model_info.pt_A[i]);
3068 (void) strcat(prog_types, buffer);
3069 i++;
3070 }
3071 }
3072
3073
3079 if (hba->model_info.pt_B[0]) {
3080 (void) strcat(prog_types, "TB:");
3081 found = 1;
3082
3083 i = 0;
3084 while (hba->model_info.pt_B[i] && i < 8) {
3085 (void) sprintf(buffer, "%X,", hba->model_info.pt_B[i]);
3086 (void) strcat(prog_types, buffer);
3087 i++;
3088 }
3089 }
3074 if (hba->model_info.pt_B[0]) {
3075 (void) strcat(prog_types, "TB:");
3076 found = 1;
3077
3078 i = 0;
3079 while (hba->model_info.pt_B[i] && i < 8) {
3080 (void) sprintf(buffer, "%X,", hba->model_info.pt_B[i]);
3081 (void) strcat(prog_types, buffer);
3082 i++;
3083 }
3084 }
3085
3086 if (hba->model_info.pt_20[0]) {
3087 (void) strcat(prog_types, "T20:");
3088 found = 1;
3089
3090 i = 0;
3091 while (hba->model_info.pt_20[i] && i < 8) {
3092 (void) sprintf(buffer, "%X,", hba->model_info.pt_20[i]);
3093 (void) strcat(prog_types, buffer);
3094 i++;
3095 }
3096 }
3097
3090 if (hba->model_info.pt_FF[0]) {
3091 (void) strcat(prog_types, "TFF:");
3092 found = 1;
3093
3094 i = 0;
3095 while (hba->model_info.pt_FF[i] && i < 8) {
3096 (void) sprintf(buffer, "%X,", hba->model_info.pt_FF[i]);
3097 (void) strcat(prog_types, buffer);
3098 i++;
3099 }
3100 }
3098 if (hba->model_info.pt_FF[0]) {
3099 (void) strcat(prog_types, "TFF:");
3100 found = 1;
3101
3102 i = 0;
3103 while (hba->model_info.pt_FF[i] && i < 8) {
3104 (void) sprintf(buffer, "%X,", hba->model_info.pt_FF[i]);
3105 (void) strcat(prog_types, buffer);
3106 i++;
3107 }
3108 }
3109
3101 if (found) {
3102 /* Terminate at the last comma in string */
3103 prog_types[(strlen(prog_types) - 1)] = 0;
3104 }
3110 if (found) {
3111 /* Terminate at the last comma in string */
3112 prog_types[(strlen(prog_types) - 1)] = 0;
3113 }
3114
3105 return;
3106
3115 return;
3116
3107} /* emlxs_build_prog_types() */
3117} /* emlxs_build_prog_types() */
3108
3109
3110
3111
3112extern uint32_t
3113emlxs_init_adapter_info(emlxs_hba_t *hba)
3114{
3115 emlxs_port_t *port = &PPORT;
3118
3119
3120
3121
3122extern uint32_t
3123emlxs_init_adapter_info(emlxs_hba_t *hba)
3124{
3125 emlxs_port_t *port = &PPORT;
3116 emlxs_config_t *cfg;
3117 uint32_t pci_id;
3118 uint32_t cache_line;
3119 uint32_t channels;
3120 uint16_t device_id;
3121 uint16_t ssdid;
3122 uint32_t i;
3123 uint32_t found = 0;
3124
3126 uint32_t pci_id;
3127 uint32_t cache_line;
3128 uint32_t channels;
3129 uint16_t device_id;
3130 uint16_t ssdid;
3131 uint32_t i;
3132 uint32_t found = 0;
3133
3125 cfg = &CFG;
3126
3127 if (hba->bus_type == SBUS_FC) {
3128 if (hba->pci_acc_handle == NULL) {
3129 bcopy(&emlxs_sbus_model[0], &hba->model_info,
3130 sizeof (emlxs_model_t));
3131
3132 hba->model_info.device_id = 0;
3133
3134 return (0);
3135 }
3134 if (hba->bus_type == SBUS_FC) {
3135 if (hba->pci_acc_handle == NULL) {
3136 bcopy(&emlxs_sbus_model[0], &hba->model_info,
3137 sizeof (emlxs_model_t));
3138
3139 hba->model_info.device_id = 0;
3140
3141 return (0);
3142 }
3143
3136 /* Read the PCI device id */
3144 /* Read the PCI device id */
3137 pci_id = ddi_get32(hba->pci_acc_handle,
3145 pci_id =
3146 ddi_get32(hba->pci_acc_handle,
3138 (uint32_t *)(hba->pci_addr + PCI_VENDOR_ID_REGISTER));
3139 device_id = (uint16_t)(pci_id >> 16);
3140
3141 /* Find matching adapter model */
3142 for (i = 1; i < EMLXS_SBUS_MODEL_COUNT; i++) {
3143 if (emlxs_sbus_model[i].device_id == device_id) {
3144 bcopy(&emlxs_sbus_model[i], &hba->model_info,
3145 sizeof (emlxs_model_t));

--- 7 unchanged lines hidden (view full) ---

3153 bcopy(&emlxs_sbus_model[0], &hba->model_info,
3154 sizeof (emlxs_model_t));
3155
3156 hba->model_info.device_id = device_id;
3157
3158 return (0);
3159 }
3160 } else { /* PCI model */
3147 (uint32_t *)(hba->pci_addr + PCI_VENDOR_ID_REGISTER));
3148 device_id = (uint16_t)(pci_id >> 16);
3149
3150 /* Find matching adapter model */
3151 for (i = 1; i < EMLXS_SBUS_MODEL_COUNT; i++) {
3152 if (emlxs_sbus_model[i].device_id == device_id) {
3153 bcopy(&emlxs_sbus_model[i], &hba->model_info,
3154 sizeof (emlxs_model_t));

--- 7 unchanged lines hidden (view full) ---

3162 bcopy(&emlxs_sbus_model[0], &hba->model_info,
3163 sizeof (emlxs_model_t));
3164
3165 hba->model_info.device_id = device_id;
3166
3167 return (0);
3168 }
3169 } else { /* PCI model */
3170
3161 if (hba->pci_acc_handle == NULL) {
3162 bcopy(&emlxs_pci_model[0], &hba->model_info,
3163 sizeof (emlxs_model_t));
3164
3165 hba->model_info.device_id = 0;
3166
3167 return (0);
3168 }
3171 if (hba->pci_acc_handle == NULL) {
3172 bcopy(&emlxs_pci_model[0], &hba->model_info,
3173 sizeof (emlxs_model_t));
3174
3175 hba->model_info.device_id = 0;
3176
3177 return (0);
3178 }
3179
3169 /* Read the PCI device id */
3180 /* Read the PCI device id */
3170 device_id = ddi_get16(hba->pci_acc_handle,
3181 device_id =
3182 ddi_get16(hba->pci_acc_handle,
3171 (uint16_t *)(hba->pci_addr + PCI_DEVICE_ID_REGISTER));
3172
3173 /* Read the PCI Subsystem id */
3183 (uint16_t *)(hba->pci_addr + PCI_DEVICE_ID_REGISTER));
3184
3185 /* Read the PCI Subsystem id */
3174 ssdid = ddi_get16(hba->pci_acc_handle,
3186 ssdid =
3187 ddi_get16(hba->pci_acc_handle,
3175 (uint16_t *)(hba->pci_addr + PCI_SSDID_REGISTER));
3176
3177 if (ssdid == 0 || ssdid == 0xffff) {
3178 ssdid = device_id;
3179 }
3188 (uint16_t *)(hba->pci_addr + PCI_SSDID_REGISTER));
3189
3190 if (ssdid == 0 || ssdid == 0xffff) {
3191 ssdid = device_id;
3192 }
3193
3180 /* Read the Cache Line reg */
3194 /* Read the Cache Line reg */
3181 cache_line = ddi_get32(hba->pci_acc_handle,
3195 cache_line =
3196 ddi_get32(hba->pci_acc_handle,
3182 (uint32_t *)(hba->pci_addr + PCI_CACHE_LINE_REGISTER));
3183
3184 /* Check for the multifunction bit being set */
3185 if ((cache_line & 0x00ff0000) == 0x00800000) {
3186 channels = 2;
3187 } else {
3188 channels = 1;
3189 }
3190
3197 (uint32_t *)(hba->pci_addr + PCI_CACHE_LINE_REGISTER));
3198
3199 /* Check for the multifunction bit being set */
3200 if ((cache_line & 0x00ff0000) == 0x00800000) {
3201 channels = 2;
3202 } else {
3203 channels = 1;
3204 }
3205
3191#ifdef MENLO_TEST
3192 /* Convert Zephyr adapters to Hornet adapters */
3193 if ((device_id == PCI_DEVICE_ID_LPe11000_M4) &&
3194 (cfg[CFG_HORNET_ID].current == 0)) {
3195 device_id = PCI_DEVICE_ID_LP21000_M;
3196 ssdid = PCI_SSDID_LP21000_M;
3197 }
3198#endif /* MENLO_TEST */
3199
3200 /* If device ids are unique, then use them for search */
3201 if (device_id != ssdid) {
3202 if (channels > 1) {
3203 /*
3204 * Find matching adapter model using
3205 * device_id, ssdid and channels
3206 */
3207 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3206 /* If device ids are unique, then use them for search */
3207 if (device_id != ssdid) {
3208 if (channels > 1) {
3209 /*
3210 * Find matching adapter model using
3211 * device_id, ssdid and channels
3212 */
3213 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3208 if ((emlxs_pci_model[i].device_id ==
3209 device_id) &&
3210 (emlxs_pci_model[i].ssdid ==
3211 ssdid) &&
3212 (emlxs_pci_model[i].channels ==
3213 channels)) {
3214 if (emlxs_pci_model[i].device_id ==
3215 device_id &&
3216 emlxs_pci_model[i].ssdid == ssdid &&
3217 emlxs_pci_model[i].channels ==
3218 channels) {
3214 bcopy(&emlxs_pci_model[i],
3215 &hba->model_info,
3216 sizeof (emlxs_model_t));
3217 found = 1;
3218 break;
3219 }
3220 }
3221 } else {
3222 /*
3223 * Find matching adapter model using
3224 * device_id and ssdid
3225 */
3226 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3219 bcopy(&emlxs_pci_model[i],
3220 &hba->model_info,
3221 sizeof (emlxs_model_t));
3222 found = 1;
3223 break;
3224 }
3225 }
3226 } else {
3227 /*
3228 * Find matching adapter model using
3229 * device_id and ssdid
3230 */
3231 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3227 if ((emlxs_pci_model[i].device_id ==
3228 device_id) &&
3229 (emlxs_pci_model[i].ssdid ==
3230 ssdid)) {
3232 if (emlxs_pci_model[i].device_id ==
3233 device_id &&
3234 emlxs_pci_model[i].ssdid == ssdid) {
3231 bcopy(&emlxs_pci_model[i],
3232 &hba->model_info,
3233 sizeof (emlxs_model_t));
3234 found = 1;
3235 break;
3236 }
3237 }
3238 }
3239 }
3235 bcopy(&emlxs_pci_model[i],
3236 &hba->model_info,
3237 sizeof (emlxs_model_t));
3238 found = 1;
3239 break;
3240 }
3241 }
3242 }
3243 }
3244
3240 /* If adapter not found, try again */
3241 if (!found) {
3242 /* Find matching adapter model */
3243 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3244 if (emlxs_pci_model[i].device_id == device_id &&
3245 emlxs_pci_model[i].channels == channels) {
3246 bcopy(&emlxs_pci_model[i],
3247 &hba->model_info,
3248 sizeof (emlxs_model_t));
3249 found = 1;
3250 break;
3251 }
3252 }
3253 }
3245 /* If adapter not found, try again */
3246 if (!found) {
3247 /* Find matching adapter model */
3248 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3249 if (emlxs_pci_model[i].device_id == device_id &&
3250 emlxs_pci_model[i].channels == channels) {
3251 bcopy(&emlxs_pci_model[i],
3252 &hba->model_info,
3253 sizeof (emlxs_model_t));
3254 found = 1;
3255 break;
3256 }
3257 }
3258 }
3259
3254 /* If adapter not found, try one last time */
3255 if (!found) {
3256 /* Find matching adapter model */
3257 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3258 if (emlxs_pci_model[i].device_id == device_id) {
3259 bcopy(&emlxs_pci_model[i],
3260 &hba->model_info,
3261 sizeof (emlxs_model_t));
3262 found = 1;
3263 break;
3264 }
3265 }
3266 }
3260 /* If adapter not found, try one last time */
3261 if (!found) {
3262 /* Find matching adapter model */
3263 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3264 if (emlxs_pci_model[i].device_id == device_id) {
3265 bcopy(&emlxs_pci_model[i],
3266 &hba->model_info,
3267 sizeof (emlxs_model_t));
3268 found = 1;
3269 break;
3270 }
3271 }
3272 }
3273
3267 /* If not found, set adapter to unknown */
3268 if (!found) {
3269 bcopy(&emlxs_pci_model[0], &hba->model_info,
3270 sizeof (emlxs_model_t));
3271
3272 hba->model_info.device_id = device_id;
3273 hba->model_info.ssdid = ssdid;
3274
3275 return (0);
3276 }
3274 /* If not found, set adapter to unknown */
3275 if (!found) {
3276 bcopy(&emlxs_pci_model[0], &hba->model_info,
3277 sizeof (emlxs_model_t));
3278
3279 hba->model_info.device_id = device_id;
3280 hba->model_info.ssdid = ssdid;
3281
3282 return (0);
3283 }
3277#ifdef MENLO_TEST
3278 /* Convert Hornet program types to Zephyr program types */
3279 if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
3280 (cfg[CFG_HORNET_PTYPES].current == 0)) {
3281 /*
3282 * Find matching Zephyr card and copy Zephyr program
3283 * types
3284 */
3285 for (i = 1; i < EMLXS_PCI_MODEL_COUNT; i++) {
3286 if ((emlxs_pci_model[i].device_id ==
3287 PCI_DEVICE_ID_LPe11000_M4) &&
3288 (emlxs_pci_model[i].ssdid ==
3289 PCI_SSDID_LPe11000_M4) &&
3290 (emlxs_pci_model[i].channels == channels)) {
3291 bcopy(emlxs_pci_model[i].pt_2,
3292 hba->model_info.pt_2, 8);
3293 bcopy(emlxs_pci_model[i].pt_3,
3294 hba->model_info.pt_3, 8);
3295 bcopy(emlxs_pci_model[i].pt_6,
3296 hba->model_info.pt_6, 8);
3297 bcopy(emlxs_pci_model[i].pt_7,
3298 hba->model_info.pt_7, 8);
3299 bcopy(emlxs_pci_model[i].pt_A,
3300 hba->model_info.pt_A, 8);
3301 bcopy(emlxs_pci_model[i].pt_B,
3302 hba->model_info.pt_B, 8);
3303 bcopy(emlxs_pci_model[i].pt_E,
3304 hba->model_info.pt_E, 8);
3305 bcopy(emlxs_pci_model[i].pt_FF,
3306 hba->model_info.pt_FF, 8);
3307 break;
3308 }
3309 }
3310 }
3311#endif /* MENLO_TEST */
3312
3313#ifndef SATURN_MSI_SUPPORT
3314 /*
3284
3285#ifndef SATURN_MSI_SUPPORT
3286 /*
3315 * This will disable MSI support for Saturn adapter's due to
3316 * a PCI bus issue
3287 * This will disable MSI support for Saturn adapter's
3288 * due to a PCI bus issue
3317 */
3318 if (hba->model_info.chip == EMLXS_SATURN_CHIP) {
3319 hba->model_info.flags &=
3320 ~(EMLXS_MSI_SUPPORTED | EMLXS_MSIX_SUPPORTED);
3321 }
3289 */
3290 if (hba->model_info.chip == EMLXS_SATURN_CHIP) {
3291 hba->model_info.flags &=
3292 ~(EMLXS_MSI_SUPPORTED | EMLXS_MSIX_SUPPORTED);
3293 }
3322#endif /* !SATURN_MSI_SUPPORT */
3294#endif /* !SATURN_MSI_SUPPORT */
3323
3324
3325#ifdef MSI_SUPPORT
3326 /* Verify MSI support */
3327 if (hba->model_info.flags & EMLXS_MSI_SUPPORTED) {
3328 uint32_t offset;
3329 uint32_t reg;
3330
3331 /* Scan for MSI capabilities register */
3295
3296
3297#ifdef MSI_SUPPORT
3298 /* Verify MSI support */
3299 if (hba->model_info.flags & EMLXS_MSI_SUPPORTED) {
3300 uint32_t offset;
3301 uint32_t reg;
3302
3303 /* Scan for MSI capabilities register */
3332 offset = ddi_get32(hba->pci_acc_handle,
3304 offset =
3305 ddi_get32(hba->pci_acc_handle,
3333 (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3334 offset &= 0xff;
3335
3336 while (offset) {
3306 (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3307 offset &= 0xff;
3308
3309 while (offset) {
3337 reg = ddi_get32(hba->pci_acc_handle,
3310 reg =
3311 ddi_get32(hba->pci_acc_handle,
3338 (uint32_t *)(hba->pci_addr + offset));
3339
3340 if ((reg & 0xff) == MSI_CAP_ID) {
3341 break;
3342 }
3343 offset = (reg >> 8) & 0xff;
3344 }
3345
3346 if (offset) {
3347 hba->msi_cap_offset = offset + 2;
3348 } else {
3349 hba->msi_cap_offset = 0;
3350 hba->model_info.flags &= ~EMLXS_MSI_SUPPORTED;
3351
3312 (uint32_t *)(hba->pci_addr + offset));
3313
3314 if ((reg & 0xff) == MSI_CAP_ID) {
3315 break;
3316 }
3317 offset = (reg >> 8) & 0xff;
3318 }
3319
3320 if (offset) {
3321 hba->msi_cap_offset = offset + 2;
3322 } else {
3323 hba->msi_cap_offset = 0;
3324 hba->model_info.flags &= ~EMLXS_MSI_SUPPORTED;
3325
3352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3326 EMLXS_MSGF(EMLXS_CONTEXT,
3327 &emlxs_init_debug_msg,
3353 "MSI: control_reg capability not found!");
3354 }
3355 }
3328 "MSI: control_reg capability not found!");
3329 }
3330 }
3331
3356 /* Verify MSI-X support */
3357 if (hba->model_info.flags & EMLXS_MSIX_SUPPORTED) {
3358 uint32_t offset;
3359 uint32_t reg;
3360
3361 /* Scan for MSI capabilities register */
3332 /* Verify MSI-X support */
3333 if (hba->model_info.flags & EMLXS_MSIX_SUPPORTED) {
3334 uint32_t offset;
3335 uint32_t reg;
3336
3337 /* Scan for MSI capabilities register */
3362 offset = ddi_get32(hba->pci_acc_handle,
3338 offset =
3339 ddi_get32(hba->pci_acc_handle,
3363 (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3364 offset &= 0xff;
3365
3366 while (offset) {
3340 (uint32_t *)(hba->pci_addr + PCI_CAP_POINTER));
3341 offset &= 0xff;
3342
3343 while (offset) {
3367 reg = ddi_get32(hba->pci_acc_handle,
3344 reg =
3345 ddi_get32(hba->pci_acc_handle,
3368 (uint32_t *)(hba->pci_addr + offset));
3369
3370 if ((reg & 0xff) == MSIX_CAP_ID) {
3371 break;
3372 }
3373 offset = (reg >> 8) & 0xff;
3374 }
3375
3376 if (offset) {
3377 hba->msix_cap_offset = offset;
3378 } else {
3379 hba->msix_cap_offset = 0;
3346 (uint32_t *)(hba->pci_addr + offset));
3347
3348 if ((reg & 0xff) == MSIX_CAP_ID) {
3349 break;
3350 }
3351 offset = (reg >> 8) & 0xff;
3352 }
3353
3354 if (offset) {
3355 hba->msix_cap_offset = offset;
3356 } else {
3357 hba->msix_cap_offset = 0;
3380 hba->model_info.flags &= ~EMLXS_MSIX_SUPPORTED;
3358 hba->model_info.flags &=
3359 ~EMLXS_MSIX_SUPPORTED;
3381
3360
3382 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3361 EMLXS_MSGF(EMLXS_CONTEXT,
3362 &emlxs_init_debug_msg,
3383 "MSIX: control_reg capability not found!");
3384 }
3385 }
3363 "MSIX: control_reg capability not found!");
3364 }
3365 }
3386#endif /* MSI_SUPPORT */
3366#endif /* MSI_SUPPORT */
3387
3388 }
3389
3367
3368 }
3369
3390 return (1);
3391
3392} /* emlxs_init_adapter_info() */
3393
3394
3395/* EMLXS_PORT_LOCK must be held when call this routine */
3396static uint32_t
3397emlxs_get_attention(emlxs_hba_t *hba, uint32_t msgid)
3398{
3399 uint32_t ha_copy = 0;
3400 uint32_t ha_copy2;
3401 uint32_t mask = hba->hc_copy;
3402
3403#ifdef MSI_SUPPORT
3404
3405read_ha_register:
3406
3407 /* Check for default MSI interrupt */
3408 if (msgid == 0) {
3409 /* Read host attention register to determine interrupt source */
3410 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3411
3412 /* Filter out MSI non-default attention bits */
3413 ha_copy2 &= ~(hba->intr_cond);
3370 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
3371 return (0);
3414 }
3372 }
3415 /* Check for polled or fixed type interrupt */
3416 else if (msgid == -1) {
3417 /* Read host attention register to determine interrupt source */
3418 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3419 }
3420 /* Otherwise, assume a mapped MSI interrupt */
3421 else {
3422 /* Convert MSI msgid to mapped attention bits */
3423 ha_copy2 = hba->intr_map[msgid];
3424 }
3425
3373
3426#else /* !MSI_SUPPORT */
3374 /* For now we just support SLI2 and SLI3 */
3375 hba->emlxs_sli_api_map_hdw = emlxs_sli3_map_hdw;
3376 hba->emlxs_sli_api_unmap_hdw = emlxs_sli3_unmap_hdw;
3377 hba->emlxs_sli_api_online = emlxs_sli3_online;
3378 hba->emlxs_sli_api_offline = emlxs_sli3_offline;
3379 hba->emlxs_sli_api_hba_reset = emlxs_sli3_hba_reset;
3380 hba->emlxs_sli_api_issue_iocb_cmd = emlxs_sli3_issue_iocb_cmd;
3381 hba->emlxs_sli_api_issue_mbox_cmd = emlxs_sli3_issue_mbox_cmd;
3382#ifdef SFCT_SUPPORT
3383 hba->emlxs_sli_api_prep_fct_iocb = emlxs_sli3_prep_fct_iocb;
3384#endif /* SFCT_SUPPORT */
3385 hba->emlxs_sli_api_prep_fcp_iocb = emlxs_sli3_prep_fcp_iocb;
3386 hba->emlxs_sli_api_prep_ip_iocb = emlxs_sli3_prep_ip_iocb;
3387 hba->emlxs_sli_api_prep_els_iocb = emlxs_sli3_prep_els_iocb;
3388 hba->emlxs_sli_api_prep_ct_iocb = emlxs_sli3_prep_ct_iocb;
3389 hba->emlxs_sli_api_poll_intr = emlxs_sli3_poll_intr;
3390 hba->emlxs_sli_api_intx_intr = emlxs_sli3_intx_intr;
3391 hba->emlxs_sli_api_msi_intr = emlxs_sli3_msi_intr;
3392 return (1);
3427
3393
3428 /* Read host attention register to determine interrupt source */
3429 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
3394} /* emlxs_init_adapter_info() */
3430
3395
3431#endif /* MSI_SUPPORT */
3432
3396
3433 /* Check if Hardware error interrupt is enabled */
3434 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
3435 ha_copy2 &= ~HA_ERATT;
3436 }
3437 /* Check if link interrupt is enabled */
3438 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
3439 ha_copy2 &= ~HA_LATT;
3440 }
3441 /* Check if Mailbox interrupt is enabled */
3442 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
3443 ha_copy2 &= ~HA_MBATT;
3444 }
3445 /* Check if ring0 interrupt is enabled */
3446 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
3447 ha_copy2 &= ~HA_R0ATT;
3448 }
3449 /* Check if ring1 interrupt is enabled */
3450 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
3451 ha_copy2 &= ~HA_R1ATT;
3452 }
3453 /* Check if ring2 interrupt is enabled */
3454 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
3455 ha_copy2 &= ~HA_R2ATT;
3456 }
3457 /* Check if ring3 interrupt is enabled */
3458 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
3459 ha_copy2 &= ~HA_R3ATT;
3460 }
3461 /* Accumulate attention bits */
3462 ha_copy |= ha_copy2;
3463
3464 /* Clear attentions except for error, link, and autoclear(MSIX) */
3465 ha_copy2 &= ~(HA_ERATT | HA_LATT /* | hba->intr_autoClear */);
3466
3467 if (ha_copy2) {
3468 WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), ha_copy2);
3469 }
3470 return (ha_copy);
3471
3472} /* emlxs_get_attention() */
3473
3474
3475static void
3476emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
3477{
3478 /* ha_copy should be pre-filtered */
3479
3480 /*
3481 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3482 * "emlxs_proc_attention: ha_copy=%x", ha_copy);
3483 */
3484
3485 if (hba->state < FC_WARM_START) {
3486 return;
3487 }
3488 if (!ha_copy) {
3489 return;
3490 }
3491 if (hba->bus_type == SBUS_FC) {
3492 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba,
3493 hba->sbus_csr_addr));
3494 }
3495 /* Adapter error */
3496 if (ha_copy & HA_ERATT) {
3497 HBASTATS.IntrEvent[6]++;
3498 emlxs_handle_ff_error(hba);
3499 return;
3500 }
3501 /* Mailbox interrupt */
3502 if (ha_copy & HA_MBATT) {
3503 HBASTATS.IntrEvent[5]++;
3504 (void) emlxs_handle_mb_event(hba);
3505 }
3506 /* Link Attention interrupt */
3507 if (ha_copy & HA_LATT) {
3508 HBASTATS.IntrEvent[4]++;
3509 emlxs_handle_link_event(hba);
3510 }
3511 /* event on ring 0 - FCP Ring */
3512 if (ha_copy & HA_R0ATT) {
3513 HBASTATS.IntrEvent[0]++;
3514 emlxs_handle_ring_event(hba, 0, ha_copy);
3515 }
3516 /* event on ring 1 - IP Ring */
3517 if (ha_copy & HA_R1ATT) {
3518 HBASTATS.IntrEvent[1]++;
3519 emlxs_handle_ring_event(hba, 1, ha_copy);
3520 }
3521 /* event on ring 2 - ELS Ring */
3522 if (ha_copy & HA_R2ATT) {
3523 HBASTATS.IntrEvent[2]++;
3524 emlxs_handle_ring_event(hba, 2, ha_copy);
3525 }
3526 /* event on ring 3 - CT Ring */
3527 if (ha_copy & HA_R3ATT) {
3528 HBASTATS.IntrEvent[3]++;
3529 emlxs_handle_ring_event(hba, 3, ha_copy);
3530 }
3531 if (hba->bus_type == SBUS_FC) {
3532 WRITE_SBUS_CSR_REG(hba,
3533 FC_SHS_REG(hba, hba->sbus_csr_addr),
3534 SBUS_STAT_IP);
3535 }
3536 /* Set heartbeat flag to show activity */
3537 hba->heartbeat_flag = 1;
3538
3539 return;
3540
3541} /* emlxs_proc_attention() */
3542
3543
3544#ifdef MSI_SUPPORT
3545
3546static uint32_t
3547emlxs_msi_intr(char *arg1, char *arg2)
3548{
3549 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
3550 uint16_t msgid;
3551 uint32_t hc_copy;
3552 uint32_t ha_copy;
3553 uint32_t restore = 0;
3554
3555 /*
3556 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "emlxs_msi_intr:
3557 * arg1=%p arg2=%p", arg1, arg2);
3558 */
3559
3560 /* Check for legacy interrupt handling */
3561 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
3562 mutex_enter(&EMLXS_PORT_LOCK);
3563
3564 if (hba->flag & FC_OFFLINE_MODE) {
3565 mutex_exit(&EMLXS_PORT_LOCK);
3566
3567 if (hba->bus_type == SBUS_FC) {
3568 return (DDI_INTR_CLAIMED);
3569 } else {
3570 return (DDI_INTR_UNCLAIMED);
3571 }
3572 }
3573 /* Get host attention bits */
3574 ha_copy = emlxs_get_attention(hba, -1);
3575
3576 if (ha_copy == 0) {
3577 if (hba->intr_unclaimed) {
3578 mutex_exit(&EMLXS_PORT_LOCK);
3579 return (DDI_INTR_UNCLAIMED);
3580 }
3581 hba->intr_unclaimed = 1;
3582 } else {
3583 hba->intr_unclaimed = 0;
3584 }
3585
3586 mutex_exit(&EMLXS_PORT_LOCK);
3587
3588 /* Process the interrupt */
3589 emlxs_proc_attention(hba, ha_copy);
3590
3591 return (DDI_INTR_CLAIMED);
3592 }
3593 /* DDI_INTR_TYPE_MSI */
3594 /* DDI_INTR_TYPE_MSIX */
3595
3596 /* Get MSI message id */
3597 msgid = (uint16_t)(unsigned long)arg2;
3598
3599 /* Validate the message id */
3600 if (msgid >= hba->intr_count) {
3601 msgid = 0;
3602 }
3603 mutex_enter(&EMLXS_INTR_LOCK(msgid));
3604
3605 mutex_enter(&EMLXS_PORT_LOCK);
3606
3607 /* Check if adapter is offline */
3608 if (hba->flag & FC_OFFLINE_MODE) {
3609 mutex_exit(&EMLXS_PORT_LOCK);
3610 mutex_exit(&EMLXS_INTR_LOCK(msgid));
3611
3612 /* Always claim an MSI interrupt */
3613 return (DDI_INTR_CLAIMED);
3614 }
3615 /* Disable interrupts associated with this msgid */
3616 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
3617 hc_copy = hba->hc_copy & ~hba->intr_mask;
3618 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hc_copy);
3619 restore = 1;
3620 }
3621 /* Get host attention bits */
3622 ha_copy = emlxs_get_attention(hba, msgid);
3623
3624 mutex_exit(&EMLXS_PORT_LOCK);
3625
3626 /* Process the interrupt */
3627 emlxs_proc_attention(hba, ha_copy);
3628
3629 /* Restore interrupts */
3630 if (restore) {
3631 mutex_enter(&EMLXS_PORT_LOCK);
3632 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
3633 mutex_exit(&EMLXS_PORT_LOCK);
3634 }
3635 mutex_exit(&EMLXS_INTR_LOCK(msgid));
3636
3637 return (DDI_INTR_CLAIMED);
3638
3639} /* emlxs_msi_intr() */
3640
3641#endif /* MSI_SUPPORT */
3642
3643static int
3644emlxs_intx_intr(char *arg)
3645{
3646 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3647 uint32_t ha_copy = 0;
3648
3649 mutex_enter(&EMLXS_PORT_LOCK);
3650
3651 if (hba->flag & FC_OFFLINE_MODE) {
3652 mutex_exit(&EMLXS_PORT_LOCK);
3653
3654 if (hba->bus_type == SBUS_FC) {
3655 return (DDI_INTR_CLAIMED);
3656 } else {
3657 return (DDI_INTR_UNCLAIMED);
3658 }
3659 }
3660 /* Get host attention bits */
3661 ha_copy = emlxs_get_attention(hba, -1);
3662
3663 if (ha_copy == 0) {
3664 if (hba->intr_unclaimed) {
3665 mutex_exit(&EMLXS_PORT_LOCK);
3666 return (DDI_INTR_UNCLAIMED);
3667 }
3668 hba->intr_unclaimed = 1;
3669 } else {
3670 hba->intr_unclaimed = 0;
3671 }
3672
3673 mutex_exit(&EMLXS_PORT_LOCK);
3674
3675 /* Process the interrupt */
3676 emlxs_proc_attention(hba, ha_copy);
3677
3678 return (DDI_INTR_CLAIMED);
3679
3680} /* emlxs_intx_intr() */
3681
3682
3683/* ARGSUSED */
3684static void
3685emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
3686{
3687 emlxs_port_t *port = &PPORT;
3688 IOCB *iocb;
3397/* ARGSUSED */
3398static void
3399emlxs_handle_async_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
3400{
3401 emlxs_port_t *port = &PPORT;
3402 IOCB *iocb;
3403 uint32_t *w;
3404 int i, j;
3689
3690 iocb = &iocbq->iocb;
3691
3692 if (iocb->ulpStatus != 0) {
3693 return;
3694 }
3405
3406 iocb = &iocbq->iocb;
3407
3408 if (iocb->ulpStatus != 0) {
3409 return;
3410 }
3411
3695 switch (iocb->un.astat.EventCode) {
3696 case 0x0100: /* Temp Warning */
3697
3698 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_warning_msg,
3699 "Adapter is very hot (%d �C). Take corrective action.",
3700 iocb->ulpContext);
3701
3702 emlxs_log_temp_event(port, 0x02, iocb->ulpContext);

--- 5 unchanged lines hidden (view full) ---

3708
3709 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_msg,
3710 "Adapter temperature now safe (%d �C).",
3711 iocb->ulpContext);
3712
3713 emlxs_log_temp_event(port, 0x03, iocb->ulpContext);
3714
3715 break;
3412 switch (iocb->un.astat.EventCode) {
3413 case 0x0100: /* Temp Warning */
3414
3415 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_warning_msg,
3416 "Adapter is very hot (%d �C). Take corrective action.",
3417 iocb->ulpContext);
3418
3419 emlxs_log_temp_event(port, 0x02, iocb->ulpContext);

--- 5 unchanged lines hidden (view full) ---

3425
3426 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_temp_msg,
3427 "Adapter temperature now safe (%d �C).",
3428 iocb->ulpContext);
3429
3430 emlxs_log_temp_event(port, 0x03, iocb->ulpContext);
3431
3432 break;
3716 }
3717
3433
3718 return;
3434 default:
3719
3435
3720} /* emlxs_handle_async_event() */
3436 w = (uint32_t *)iocb;
3437 for (i = 0, j = 0; i < 8; i++, j += 2) {
3438 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_async_msg,
3439 "(Word[%d]=%x Word[%d]=%x)", j, w[j], j + 1,
3440 w[j + 1]);
3441 }
3721
3442
3722
3723/*
3724 * emlxs_handle_ff_error
3725 *
3726 * Description: Processes a FireFly error
3727 * Runs at Interrupt level
3728 *
3729 */
3730extern void
3731emlxs_handle_ff_error(emlxs_hba_t *hba)
3732{
3733 emlxs_port_t *port = &PPORT;
3734 uint32_t status;
3735 uint32_t status1;
3736 uint32_t status2;
3737
3738 /* do what needs to be done, get error from STATUS REGISTER */
3739 status = READ_CSR_REG(hba, FC_HS_REG(hba, hba->csr_addr));
3740
3741 /* Clear Chip error bit */
3742 WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), HA_ERATT);
3743
3744 if (status & HS_OVERTEMP) {
3745 status1 = READ_SLIM_ADDR(hba,
3746 ((volatile uint8_t *) hba->slim_addr + 0xb0));
3747
3748 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
3749 "Maximum adapter temperature exceeded (%d �C).",
3750 status1);
3751
3752 hba->flag |= FC_OVERTEMP_EVENT;
3753 emlxs_log_temp_event(port, 0x01, status1);
3754 } else {
3755 status1 = READ_SLIM_ADDR(hba,
3756 ((volatile uint8_t *) hba->slim_addr + 0xa8));
3757 status2 = READ_SLIM_ADDR(hba,
3758 ((volatile uint8_t *) hba->slim_addr + 0xac));
3759
3760 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
3761 "Host Error Attention: status=0x%x status1=0x%x "
3762 "status2=0x%x", status, status1, status2);
3443 emlxs_log_async_event(port, iocb);
3763 }
3764
3444 }
3445
3765 emlxs_ffstate_change(hba, FC_ERROR);
3446 return;
3766
3447
3767 if (status & HS_FFER6) {
3768 (void) thread_create(NULL, 0, emlxs_restart_thread,
3769 (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3770 } else {
3771 (void) thread_create(NULL, 0, emlxs_shutdown_thread,
3772 (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3773 }
3448} /* emlxs_handle_async_event() */
3774
3449
3775} /* emlxs_handle_ff_error() */
3776
3450
3777
3778
3779extern void
3780emlxs_reset_link_thread(void *arg)
3781{
3782 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3783 emlxs_port_t *port = &PPORT;
3784
3785 /* Attempt a link reset to recover */
3786 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
3787
3451extern void
3452emlxs_reset_link_thread(void *arg)
3453{
3454 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3455 emlxs_port_t *port = &PPORT;
3456
3457 /* Attempt a link reset to recover */
3458 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
3459
3788 (void) thread_exit();
3460 thread_exit();
3789
3461
3790} /* emlxs_reset_link_thread() */
3462} /* emlxs_reset_link_thread() */
3791
3792
3793extern void
3794emlxs_restart_thread(void *arg)
3795{
3796 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3797 emlxs_port_t *port = &PPORT;
3798
3799 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Restarting...");
3800
3801 /* Attempt a full hardware reset to recover */
3802 if (emlxs_reset(port, FC_FCA_RESET) != FC_SUCCESS) {
3803 emlxs_ffstate_change(hba, FC_ERROR);
3804
3463
3464
3465extern void
3466emlxs_restart_thread(void *arg)
3467{
3468 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3469 emlxs_port_t *port = &PPORT;
3470
3471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Restarting...");
3472
3473 /* Attempt a full hardware reset to recover */
3474 if (emlxs_reset(port, FC_FCA_RESET) != FC_SUCCESS) {
3475 emlxs_ffstate_change(hba, FC_ERROR);
3476
3805 (void) thread_create(NULL, 0, emlxs_shutdown_thread,
3806 (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
3477 thread_create(NULL, 0, emlxs_shutdown_thread, (char *)hba, 0,
3478 &p0, TS_RUN, v.v_maxsyspri - 2);
3807 }
3479 }
3808 (void) thread_exit();
3809
3480
3810} /* emlxs_restart_thread() */
3481 thread_exit();
3811
3482
3483} /* emlxs_restart_thread() */
3812
3484
3485
3813extern void
3814emlxs_shutdown_thread(void *arg)
3815{
3816 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3817 emlxs_port_t *port = &PPORT;
3818
3819 mutex_enter(&EMLXS_PORT_LOCK);
3820 if (hba->flag & FC_SHUTDOWN) {
3821 mutex_exit(&EMLXS_PORT_LOCK);
3486extern void
3487emlxs_shutdown_thread(void *arg)
3488{
3489 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
3490 emlxs_port_t *port = &PPORT;
3491
3492 mutex_enter(&EMLXS_PORT_LOCK);
3493 if (hba->flag & FC_SHUTDOWN) {
3494 mutex_exit(&EMLXS_PORT_LOCK);
3822 (void) thread_exit();
3495 thread_exit();
3823 }
3824 hba->flag |= FC_SHUTDOWN;
3825 mutex_exit(&EMLXS_PORT_LOCK);
3826
3496 }
3497 hba->flag |= FC_SHUTDOWN;
3498 mutex_exit(&EMLXS_PORT_LOCK);
3499
3827 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Shutting down...");
3500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
3501 "Shutting down...");
3828
3829 /* Take adapter offline and leave it there */
3830 (void) emlxs_offline(hba);
3831
3832 /* Log a dump event */
3833 emlxs_log_dump_event(port, NULL, 0);
3834
3835 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_shutdown_msg, "Reboot required.");
3836
3502
3503 /* Take adapter offline and leave it there */
3504 (void) emlxs_offline(hba);
3505
3506 /* Log a dump event */
3507 emlxs_log_dump_event(port, NULL, 0);
3508
3509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_shutdown_msg, "Reboot required.");
3510
3837 (void) thread_exit();
3511 thread_exit();
3838
3512
3839} /* emlxs_shutdown_thread() */
3513} /* emlxs_shutdown_thread() */
3840
3841
3514
3515
3842
3843/*
3844 * emlxs_handle_link_event
3845 *
3846 * Description: Process a Link Attention.
3847 *
3848 */
3849static void
3850emlxs_handle_link_event(emlxs_hba_t *hba)
3851{
3852 emlxs_port_t *port = &PPORT;
3853 MAILBOX *mb;
3854
3855 HBASTATS.LinkEvent++;
3856
3857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg,
3858 "event=%x", HBASTATS.LinkEvent);
3859
3860
3861 /* Get a buffer which will be used for mailbox commands */
3862 if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
3863 /* Get link attention message */
3864 if (emlxs_mb_read_la(hba, mb) == 0) {
3865 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) !=
3866 MBX_BUSY) {
3867 (void) emlxs_mem_put(hba, MEM_MBOX,
3868 (uint8_t *)mb);
3869 }
3870 mutex_enter(&EMLXS_PORT_LOCK);
3871
3872
3873 /*
3874 * Clear Link Attention in HA REG
3875 */
3876 WRITE_CSR_REG(hba,
3877 FC_HA_REG(hba, hba->csr_addr), HA_LATT);
3878
3879 mutex_exit(&EMLXS_PORT_LOCK);
3880 } else {
3881 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3882 }
3883 }
3884} /* emlxs_handle_link_event() */
3885
3886
3887/*
3888 * emlxs_handle_ring_event
3889 *
3890 * Description: Process a Ring Attention.
3891 *
3892 */
3893static void
3894emlxs_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no, uint32_t ha_copy)
3895{
3896 emlxs_port_t *port = &PPORT;
3897 RING *rp;
3898 IOCB *entry;
3899 IOCBQ *iocbq;
3900 IOCBQ local_iocbq;
3901 PGP *pgp;
3902 uint32_t count;
3903 volatile uint32_t chipatt;
3904 void *ioa2;
3905 uint32_t reg;
3906 off_t offset;
3907 IOCBQ *rsp_head = NULL;
3908 IOCBQ *rsp_tail = NULL;
3909 emlxs_buf_t *sbp;
3910
3911 count = 0;
3912 rp = &hba->ring[ring_no];
3913
3914 /* Isolate this ring's host attention bits */
3915 /* This makes all ring attention bits equal to Ring0 attention bits */
3916 reg = (ha_copy >> (ring_no * 4)) & 0x0f;
3917
3918 /*
3919 * Gather iocb entries off response ring. Ensure entry is owned by
3920 * the host.
3921 */
3922 pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ring_no];
3923 offset = (off_t)((uint64_t)(unsigned long)&(pgp->rspPutInx) -
3924 (uint64_t)(unsigned long)hba->slim2.virt);
3925 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
3926 DDI_DMA_SYNC_FORKERNEL);
3927 rp->fc_port_rspidx = PCIMEM_LONG(pgp->rspPutInx);
3928
3929 /* While ring is not empty */
3930 while (rp->fc_rspidx != rp->fc_port_rspidx) {
3931 HBASTATS.IocbReceived[ring_no]++;
3932
3933 /* Get the next response ring iocb */
3934 entry = (IOCB *) (((char *)rp->fc_rspringaddr +
3935 (rp->fc_rspidx * hba->iocb_rsp_size)));
3936
3937 /* DMA sync the response ring iocb for the adapter */
3938 offset = (off_t)((uint64_t)(unsigned long)entry -
3939 (uint64_t)(unsigned long)hba->slim2.virt);
3940 emlxs_mpdata_sync(hba->slim2.dma_handle, offset,
3941 hba->iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
3942
3943 count++;
3944
3945 /* Copy word6 and word7 to local iocb for now */
3946 iocbq = &local_iocbq;
3947 emlxs_pcimem_bcopy((uint32_t *)entry + 6, (uint32_t *)iocbq + 6,
3948 (sizeof (uint32_t) * 2));
3949
3950 /* when LE is not set, entire Command has not been received */
3951 if (!iocbq->iocb.ulpLe) {
3952 /* This should never happen */
3953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
3954 "ulpLE is not set. ring=%d iotag=%x cmd=%x "
3955 "status=%x", ring_no, iocbq->iocb.ulpIoTag,
3956 iocbq->iocb.ulpCommand, iocbq->iocb.ulpStatus);
3957
3958 goto next;
3959 }
3960 switch (iocbq->iocb.ulpCommand) {
3961 /* Ring 0 registered commands */
3962 case CMD_FCP_ICMND_CR:
3963 case CMD_FCP_ICMND_CX:
3964 case CMD_FCP_IREAD_CR:
3965 case CMD_FCP_IREAD_CX:
3966 case CMD_FCP_IWRITE_CR:
3967 case CMD_FCP_IWRITE_CX:
3968 case CMD_FCP_ICMND64_CR:
3969 case CMD_FCP_ICMND64_CX:
3970 case CMD_FCP_IREAD64_CR:
3971 case CMD_FCP_IREAD64_CX:
3972 case CMD_FCP_IWRITE64_CR:
3973 case CMD_FCP_IWRITE64_CX:
3974#ifdef SFCT_SUPPORT
3975 case CMD_FCP_TSEND_CX:
3976 case CMD_FCP_TSEND64_CX:
3977 case CMD_FCP_TRECEIVE_CX:
3978 case CMD_FCP_TRECEIVE64_CX:
3979 case CMD_FCP_TRSP_CX:
3980 case CMD_FCP_TRSP64_CX:
3981#endif /* SFCT_SUPPORT */
3982
3983 /* Ring 1 registered commands */
3984 case CMD_XMIT_BCAST_CN:
3985 case CMD_XMIT_BCAST_CX:
3986 case CMD_XMIT_SEQUENCE_CX:
3987 case CMD_XMIT_SEQUENCE_CR:
3988 case CMD_XMIT_BCAST64_CN:
3989 case CMD_XMIT_BCAST64_CX:
3990 case CMD_XMIT_SEQUENCE64_CX:
3991 case CMD_XMIT_SEQUENCE64_CR:
3992 case CMD_CREATE_XRI_CR:
3993 case CMD_CREATE_XRI_CX:
3994
3995 /* Ring 2 registered commands */
3996 case CMD_ELS_REQUEST_CR:
3997 case CMD_ELS_REQUEST_CX:
3998 case CMD_XMIT_ELS_RSP_CX:
3999 case CMD_ELS_REQUEST64_CR:
4000 case CMD_ELS_REQUEST64_CX:
4001 case CMD_XMIT_ELS_RSP64_CX:
4002
4003 /* Ring 3 registered commands */
4004 case CMD_GEN_REQUEST64_CR:
4005 case CMD_GEN_REQUEST64_CX:
4006
4007 sbp = emlxs_unregister_pkt(rp, iocbq->iocb.ulpIoTag, 0);
4008 break;
4009
4010 default:
4011 sbp = NULL;
4012 }
4013
4014 /* If packet is stale, then drop it. */
4015 if (sbp == STALE_PACKET) {
4016 /* Copy entry to the local iocbq */
4017 emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4018 hba->iocb_rsp_size);
4019
4020 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4021 "ringno=%d iocb=%p cmd=%x status=%x error=%x "
4022 "iotag=%x context=%x info=%x", ring_no, iocbq,
4023 (uint8_t)iocbq->iocb.ulpCommand,
4024 iocbq->iocb.ulpStatus,
4025 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4026 (uint16_t)iocbq->iocb.ulpIoTag,
4027 (uint16_t)iocbq->iocb.ulpContext,
4028 (uint8_t)iocbq->iocb.ulpRsvdByte);
4029
4030 goto next;
4031 }
4032 /*
4033 * If a packet was found, then queue the packet's iocb for
4034 * deferred processing
4035 */
4036 else if (sbp) {
4037 atomic_add_32(&hba->io_active, -1);
4038
4039 /* Copy entry to sbp's iocbq */
4040 iocbq = &sbp->iocbq;
4041 emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4042 hba->iocb_rsp_size);
4043
4044 iocbq->next = NULL;
4045
4046 /*
4047 * If this is NOT a polled command completion or a
4048 * driver allocated pkt, then defer pkt completion.
4049 */
4050 if (!(sbp->pkt_flags &
4051 (PACKET_POLLED | PACKET_ALLOCATED))) {
4052 /* Add the IOCB to the local list */
4053 if (!rsp_head) {
4054 rsp_head = iocbq;
4055 } else {
4056 rsp_tail->next = iocbq;
4057 }
4058
4059 rsp_tail = iocbq;
4060
4061 goto next;
4062 }
4063 } else {
4064 /* Copy entry to the local iocbq */
4065 emlxs_pcimem_bcopy((uint32_t *)entry, (uint32_t *)iocbq,
4066 hba->iocb_rsp_size);
4067
4068 iocbq->next = NULL;
4069 iocbq->bp = NULL;
4070 iocbq->port = &PPORT;
4071 iocbq->ring = rp;
4072 iocbq->node = NULL;
4073 iocbq->sbp = NULL;
4074 iocbq->flag = 0;
4075 }
4076
4077 /* process the ring event now */
4078 emlxs_proc_ring_event(hba, rp, iocbq);
4079
4080next:
4081 /* Increment the driver's local response get index */
4082 if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4083 rp->fc_rspidx = 0;
4084 }
4085 } /* while(TRUE) */
4086
4087 if (rsp_head) {
4088 mutex_enter(&rp->rsp_lock);
4089 if (rp->rsp_head == NULL) {
4090 rp->rsp_head = rsp_head;
4091 rp->rsp_tail = rsp_tail;
4092 } else {
4093 rp->rsp_tail->next = rsp_head;
4094 rp->rsp_tail = rsp_tail;
4095 }
4096 mutex_exit(&rp->rsp_lock);
4097
4098 emlxs_thread_trigger2(&rp->intr_thread, emlxs_proc_ring, rp);
4099 }
4100 /* Check if at least one response entry was processed */
4101 if (count) {
4102 /* Update response get index for the adapter */
4103 if (hba->bus_type == SBUS_FC) {
4104 ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.host[ring_no].
4105 rspGetInx = PCIMEM_LONG(rp->fc_rspidx);
4106
4107 /* DMA sync the index for the adapter */
4108 offset = (off_t)((uint64_t)(unsigned long)&((
4109 (SLIM2 *)hba->slim2.virt)->mbx.us.s2.host[ring_no].
4110 rspGetInx) -
4111 (uint64_t)(unsigned long)hba->slim2.virt);
4112 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4113 DDI_DMA_SYNC_FORDEV);
4114 } else {
4115 ioa2 = (void *) ((char *)hba->slim_addr +
4116 hba->hgp_ring_offset + (((ring_no * 2) + 1) *
4117 sizeof (uint32_t)));
4118 WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2,
4119 rp->fc_rspidx);
4120 }
4121
4122 if (reg & HA_R0RE_REQ) {
4123 /* HBASTATS.chipRingFree++; */
4124
4125 mutex_enter(&EMLXS_PORT_LOCK);
4126
4127 /* Tell the adapter we serviced the ring */
4128 chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
4129 (ring_no * 4));
4130 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4131 chipatt);
4132
4133 mutex_exit(&EMLXS_PORT_LOCK);
4134 }
4135 }
4136 if (reg & HA_R0CE_RSP) {
4137 /* HBASTATS.hostRingFree++; */
4138
4139 /* Cmd ring may be available. Try sending more iocbs */
4140 emlxs_issue_iocb_cmd(hba, rp, 0);
4141 }
4142 /* HBASTATS.ringEvent++; */
4143
4144 return;
4145
4146} /* emlxs_handle_ring_event() */
4147
4148
4149/* ARGSUSED */
4150extern void
4151emlxs_proc_ring(emlxs_hba_t *hba, RING *rp, void *arg2)
4152{
4153 IOCBQ *iocbq;
4154 IOCBQ *rsp_head;
4155
4156 /*
3516/* ARGSUSED */
3517extern void
3518emlxs_proc_ring(emlxs_hba_t *hba, RING *rp, void *arg2)
3519{
3520 IOCBQ *iocbq;
3521 IOCBQ *rsp_head;
3522
3523 /*
4157 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, "emlxs_proc_ring:
4158 * ringo=%d", rp->ringno);
3524 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3525 * "emlxs_proc_ring: ringo=%d", rp->ringno);
4159 */
4160
4161 mutex_enter(&rp->rsp_lock);
4162
4163 while ((rsp_head = rp->rsp_head) != NULL) {
4164 rp->rsp_head = NULL;
4165 rp->rsp_tail = NULL;
4166

--- 5 unchanged lines hidden (view full) ---

4172 emlxs_proc_ring_event(hba, rp, iocbq);
4173 }
4174
4175 mutex_enter(&rp->rsp_lock);
4176 }
4177
4178 mutex_exit(&rp->rsp_lock);
4179
3526 */
3527
3528 mutex_enter(&rp->rsp_lock);
3529
3530 while ((rsp_head = rp->rsp_head) != NULL) {
3531 rp->rsp_head = NULL;
3532 rp->rsp_tail = NULL;
3533

--- 5 unchanged lines hidden (view full) ---

3539 emlxs_proc_ring_event(hba, rp, iocbq);
3540 }
3541
3542 mutex_enter(&rp->rsp_lock);
3543 }
3544
3545 mutex_exit(&rp->rsp_lock);
3546
4180 emlxs_issue_iocb_cmd(hba, rp, 0);
3547 emlxs_sli_issue_iocb_cmd(hba, rp, 0);
4181
4182 return;
4183
3548
3549 return;
3550
4184} /* emlxs_proc_ring() */
3551} /* emlxs_proc_ring() */
4185
4186
4187/*
3552
3553
3554/*
4188 * Called from SLI-1 and SLI-2 ring event routines to process a rsp ring IOCB.
3555 * Called from SLI ring event routines to process a rsp ring IOCB.
4189 */
3556 */
4190static void
3557void
4191emlxs_proc_ring_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4192{
4193 emlxs_port_t *port = &PPORT;
4194 char buffer[MAX_MSG_DATA + 1];
4195 IOCB *iocb;
4196
4197 iocb = &iocbq->iocb;
4198
4199 /* Check for IOCB local error */
4200 if (iocb->ulpStatus == IOSTAT_LOCAL_REJECT) {
4201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
3558emlxs_proc_ring_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
3559{
3560 emlxs_port_t *port = &PPORT;
3561 char buffer[MAX_MSG_DATA + 1];
3562 IOCB *iocb;
3563
3564 iocb = &iocbq->iocb;
3565
3566 /* Check for IOCB local error */
3567 if (iocb->ulpStatus == IOSTAT_LOCAL_REJECT) {
3568 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
4202 "Local reject. ringno=%d iocb=%p cmd=%x iotag=%x "
4203 "context=%x info=%x error=%x",
3569 "Local reject. ringno=%d iocb=%p cmd=%x "
3570 "iotag=%x context=%x info=%x error=%x",
4204 rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
4205 (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
4206 (uint8_t)iocb->ulpRsvdByte,
4207 (uint8_t)iocb->un.grsp.perr.statLocalError);
4208 } else if (iocb->ulpStatus == IOSTAT_ILLEGAL_FRAME_RCVD) {
4209 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
3571 rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
3572 (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
3573 (uint8_t)iocb->ulpRsvdByte,
3574 (uint8_t)iocb->un.grsp.perr.statLocalError);
3575 } else if (iocb->ulpStatus == IOSTAT_ILLEGAL_FRAME_RCVD) {
3576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_event_msg,
4210 "Illegal frame. ringno=%d iocb=%p cmd=%x iotag=%x "
4211 "context=%x info=%x error=%x",
3577 "Illegal frame. ringno=%d iocb=%p cmd=%x "
3578 "iotag=%x context=%x info=%x error=%x",
4212 rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
4213 (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
4214 (uint8_t)iocb->ulpRsvdByte,
4215 (uint8_t)iocb->un.grsp.perr.statLocalError);
4216 }
3579 rp->ringno, iocb, (uint8_t)iocb->ulpCommand,
3580 (uint16_t)iocb->ulpIoTag, (uint16_t)iocb->ulpContext,
3581 (uint8_t)iocb->ulpRsvdByte,
3582 (uint8_t)iocb->un.grsp.perr.statLocalError);
3583 }
3584
4217 switch (iocb->ulpCommand) {
4218 /* RING 0 FCP commands */
4219 case CMD_FCP_ICMND_CR:
4220 case CMD_FCP_ICMND_CX:
4221 case CMD_FCP_IREAD_CR:
4222 case CMD_FCP_IREAD_CX:
4223 case CMD_FCP_IWRITE_CR:
4224 case CMD_FCP_IWRITE_CX:
4225 case CMD_FCP_ICMND64_CR:
4226 case CMD_FCP_ICMND64_CX:
4227 case CMD_FCP_IREAD64_CR:
4228 case CMD_FCP_IREAD64_CX:
4229 case CMD_FCP_IWRITE64_CR:
4230 case CMD_FCP_IWRITE64_CX:
3585 switch (iocb->ulpCommand) {
3586 /* RING 0 FCP commands */
3587 case CMD_FCP_ICMND_CR:
3588 case CMD_FCP_ICMND_CX:
3589 case CMD_FCP_IREAD_CR:
3590 case CMD_FCP_IREAD_CX:
3591 case CMD_FCP_IWRITE_CR:
3592 case CMD_FCP_IWRITE_CX:
3593 case CMD_FCP_ICMND64_CR:
3594 case CMD_FCP_ICMND64_CX:
3595 case CMD_FCP_IREAD64_CR:
3596 case CMD_FCP_IREAD64_CX:
3597 case CMD_FCP_IWRITE64_CR:
3598 case CMD_FCP_IWRITE64_CX:
4231 (void) emlxs_handle_fcp_event(hba, rp, iocbq);
3599 emlxs_handle_fcp_event(hba, rp, iocbq);
4232 break;
4233
4234#ifdef SFCT_SUPPORT
3600 break;
3601
3602#ifdef SFCT_SUPPORT
4235 case CMD_FCP_TSEND_CX: /* FCP_TARGET IOCB command */
3603 case CMD_FCP_TSEND_CX: /* FCP_TARGET IOCB command */
4236 case CMD_FCP_TSEND64_CX: /* FCP_TARGET IOCB command */
4237 case CMD_FCP_TRECEIVE_CX: /* FCP_TARGET IOCB command */
4238 case CMD_FCP_TRECEIVE64_CX: /* FCP_TARGET IOCB command */
3604 case CMD_FCP_TSEND64_CX: /* FCP_TARGET IOCB command */
3605 case CMD_FCP_TRECEIVE_CX: /* FCP_TARGET IOCB command */
3606 case CMD_FCP_TRECEIVE64_CX: /* FCP_TARGET IOCB command */
4239 case CMD_FCP_TRSP_CX: /* FCP_TARGET IOCB command */
4240 case CMD_FCP_TRSP64_CX: /* FCP_TARGET IOCB command */
3607 case CMD_FCP_TRSP_CX: /* FCP_TARGET IOCB command */
3608 case CMD_FCP_TRSP64_CX: /* FCP_TARGET IOCB command */
4241 (void) emlxs_fct_handle_fcp_event(hba, rp, iocbq);
4242 break;
3609 (void) emlxs_fct_handle_fcp_event(hba, rp, iocbq);
3610 break;
4243#endif /* SFCT_SUPPORT */
3611#endif /* SFCT_SUPPORT */
4244
4245 /* RING 1 IP commands */
4246 case CMD_XMIT_BCAST_CN:
4247 case CMD_XMIT_BCAST_CX:
4248 case CMD_XMIT_BCAST64_CN:
4249 case CMD_XMIT_BCAST64_CX:
4250 (void) emlxs_ip_handle_event(hba, rp, iocbq);
4251 break;

--- 10 unchanged lines hidden (view full) ---

4262 case FC_TYPE_FC_SERVICES:
4263 (void) emlxs_ct_handle_event(hba, rp, iocbq);
4264 break;
4265
4266 default:
4267 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4268 "cmd=%x type=%x status=%x iotag=%x context=%x ",
4269 iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
3612
3613 /* RING 1 IP commands */
3614 case CMD_XMIT_BCAST_CN:
3615 case CMD_XMIT_BCAST_CX:
3616 case CMD_XMIT_BCAST64_CN:
3617 case CMD_XMIT_BCAST64_CX:
3618 (void) emlxs_ip_handle_event(hba, rp, iocbq);
3619 break;

--- 10 unchanged lines hidden (view full) ---

3630 case FC_TYPE_FC_SERVICES:
3631 (void) emlxs_ct_handle_event(hba, rp, iocbq);
3632 break;
3633
3634 default:
3635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
3636 "cmd=%x type=%x status=%x iotag=%x context=%x ",
3637 iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
4270 iocb->ulpStatus, iocb->ulpIoTag, iocb->ulpContext);
3638 iocb->ulpStatus, iocb->ulpIoTag,
3639 iocb->ulpContext);
4271 }
4272 break;
4273
4274 case CMD_RCV_SEQUENCE_CX:
4275 case CMD_RCV_SEQUENCE64_CX:
4276 case CMD_RCV_SEQ64_CX:
4277 case CMD_RCV_ELS_REQ_CX: /* Unsolicited ELS frame */
4278 case CMD_RCV_ELS_REQ64_CX: /* Unsolicited ELS frame */
3640 }
3641 break;
3642
3643 case CMD_RCV_SEQUENCE_CX:
3644 case CMD_RCV_SEQUENCE64_CX:
3645 case CMD_RCV_SEQ64_CX:
3646 case CMD_RCV_ELS_REQ_CX: /* Unsolicited ELS frame */
3647 case CMD_RCV_ELS_REQ64_CX: /* Unsolicited ELS frame */
4279 case CMD_RCV_ELS64_CX: /* Unsolicited ELS frame */
3648 case CMD_RCV_ELS64_CX: /* Unsolicited ELS frame */
4280 (void) emlxs_handle_rcv_seq(hba, rp, iocbq);
4281 break;
4282
4283 case CMD_RCV_SEQ_LIST64_CX:
4284 (void) emlxs_ip_handle_rcv_seq_list(hba, rp, iocbq);
4285 break;
4286
4287 case CMD_CREATE_XRI_CR:

--- 14 unchanged lines hidden (view full) ---

4302 /* RING 3 CT commands */
4303 case CMD_GEN_REQUEST64_CR:
4304 case CMD_GEN_REQUEST64_CX:
4305 switch (iocb->un.rcvseq64.w5.hcsw.Type) {
4306#ifdef MENLO_SUPPORT
4307 case EMLXS_MENLO_TYPE:
4308 (void) emlxs_menlo_handle_event(hba, rp, iocbq);
4309 break;
3649 (void) emlxs_handle_rcv_seq(hba, rp, iocbq);
3650 break;
3651
3652 case CMD_RCV_SEQ_LIST64_CX:
3653 (void) emlxs_ip_handle_rcv_seq_list(hba, rp, iocbq);
3654 break;
3655
3656 case CMD_CREATE_XRI_CR:

--- 14 unchanged lines hidden (view full) ---

3671 /* RING 3 CT commands */
3672 case CMD_GEN_REQUEST64_CR:
3673 case CMD_GEN_REQUEST64_CX:
3674 switch (iocb->un.rcvseq64.w5.hcsw.Type) {
3675#ifdef MENLO_SUPPORT
3676 case EMLXS_MENLO_TYPE:
3677 (void) emlxs_menlo_handle_event(hba, rp, iocbq);
3678 break;
4310#endif /* MENLO_SUPPORT */
3679#endif /* MENLO_SUPPORT */
4311
4312 case FC_TYPE_FC_SERVICES:
4313 (void) emlxs_ct_handle_event(hba, rp, iocbq);
4314 break;
4315
4316 default:
4317 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4318 "cmd=%x type=%x status=%x iotag=%x context=%x ",
4319 iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
3680
3681 case FC_TYPE_FC_SERVICES:
3682 (void) emlxs_ct_handle_event(hba, rp, iocbq);
3683 break;
3684
3685 default:
3686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
3687 "cmd=%x type=%x status=%x iotag=%x context=%x ",
3688 iocb->ulpCommand, iocb->un.rcvseq64.w5.hcsw.Type,
4320 iocb->ulpStatus, iocb->ulpIoTag, iocb->ulpContext);
3689 iocb->ulpStatus, iocb->ulpIoTag,
3690 iocb->ulpContext);
4321 }
4322 break;
4323
4324 case CMD_ABORT_XRI_CN: /* Abort fcp command */
4325
4326 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4327 "ABORT_XRI_CN: rpi=%d iotag=%x status=%x parm=%x",
4328 (uint32_t)iocb->un.acxri.abortContextTag,
3691 }
3692 break;
3693
3694 case CMD_ABORT_XRI_CN: /* Abort fcp command */
3695
3696 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
3697 "ABORT_XRI_CN: rpi=%d iotag=%x status=%x parm=%x",
3698 (uint32_t)iocb->un.acxri.abortContextTag,
4329 (uint32_t)iocb->un.acxri.abortIoTag,
4330 iocb->ulpStatus, iocb->un.acxri.parm);
3699 (uint32_t)iocb->un.acxri.abortIoTag, iocb->ulpStatus,
3700 iocb->un.acxri.parm);
4331
3701
3702#ifdef SFCT_SUPPORT
3703 if (port->tgt_mode) {
3704 (void) emlxs_fct_handle_abort(hba, rp, iocbq);
3705 }
3706#endif /* SFCT_SUPPORT */
4332 break;
4333
4334 case CMD_ABORT_XRI_CX: /* Abort command */
4335
4336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
3707 break;
3708
3709 case CMD_ABORT_XRI_CX: /* Abort command */
3710
3711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4337 "ABORT_XRI_CX: rpi=%d iotag=%x status=%x parm=%x",
3712 "ABORT_XRI_CX: rpi=%d iotag=%x status=%x parm=%x sbp=%p",
4338 (uint32_t)iocb->un.acxri.abortContextTag,
3713 (uint32_t)iocb->un.acxri.abortContextTag,
4339 (uint32_t)iocb->un.acxri.abortIoTag,
4340 iocb->ulpStatus, iocb->un.acxri.parm);
3714 (uint32_t)iocb->un.acxri.abortIoTag, iocb->ulpStatus,
3715 iocb->un.acxri.parm, iocbq->sbp);
4341
3716
3717#ifdef SFCT_SUPPORT
3718 if (port->tgt_mode) {
3719 (void) emlxs_fct_handle_abort(hba, rp, iocbq);
3720 }
3721#endif /* SFCT_SUPPORT */
4342 break;
4343
4344 case CMD_XRI_ABORTED_CX: /* Handle ABORT condition */
4345
4346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4347 "XRI_ABORTED_CX: rpi=%d iotag=%x status=%x parm=%x",
4348 (uint32_t)iocb->un.acxri.abortContextTag,
3722 break;
3723
3724 case CMD_XRI_ABORTED_CX: /* Handle ABORT condition */
3725
3726 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
3727 "XRI_ABORTED_CX: rpi=%d iotag=%x status=%x parm=%x",
3728 (uint32_t)iocb->un.acxri.abortContextTag,
4349 (uint32_t)iocb->un.acxri.abortIoTag,
4350 iocb->ulpStatus, iocb->un.acxri.parm);
3729 (uint32_t)iocb->un.acxri.abortIoTag, iocb->ulpStatus,
3730 iocb->un.acxri.parm);
4351
3731
3732#ifdef SFCT_SUPPORT
3733 if (port->tgt_mode) {
3734 (void) emlxs_fct_handle_abort(hba, rp, iocbq);
3735 }
3736#endif /* SFCT_SUPPORT */
4352 break;
4353
4354 case CMD_CLOSE_XRI_CN: /* Handle CLOSE condition */
4355
4356 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4357 "CLOSE_XRI_CR: rpi=%d iotag=%x status=%x parm=%x",
4358 (uint32_t)iocb->un.acxri.abortContextTag,
3737 break;
3738
3739 case CMD_CLOSE_XRI_CN: /* Handle CLOSE condition */
3740
3741 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
3742 "CLOSE_XRI_CR: rpi=%d iotag=%x status=%x parm=%x",
3743 (uint32_t)iocb->un.acxri.abortContextTag,
4359 (uint32_t)iocb->un.acxri.abortIoTag,
4360 iocb->ulpStatus, iocb->un.acxri.parm);
3744 (uint32_t)iocb->un.acxri.abortIoTag, iocb->ulpStatus,
3745 iocb->un.acxri.parm);
4361
3746
3747#ifdef SFCT_SUPPORT
3748 if (port->tgt_mode) {
3749 (void) emlxs_fct_handle_abort(hba, rp, iocbq);
3750 }
3751#endif /* SFCT_SUPPORT */
4362 break;
4363
4364 case CMD_CLOSE_XRI_CX: /* Handle CLOSE condition */
4365
4366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
3752 break;
3753
3754 case CMD_CLOSE_XRI_CX: /* Handle CLOSE condition */
3755
3756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flushed_msg,
4367 "CLOSE_XRI_CX: rpi=%d iotag=%x status=%x parm=%x",
3757 "CLOSE_XRI_CX: rpi=%d iotag=%x status=%x parm=%x sbp=%p",
4368 (uint32_t)iocb->un.acxri.abortContextTag,
3758 (uint32_t)iocb->un.acxri.abortContextTag,
4369 (uint32_t)iocb->un.acxri.abortIoTag,
4370 iocb->ulpStatus, iocb->un.acxri.parm);
3759 (uint32_t)iocb->un.acxri.abortIoTag, iocb->ulpStatus,
3760 iocb->un.acxri.parm, iocbq->sbp);
4371
3761
3762#ifdef SFCT_SUPPORT
3763 if (port->tgt_mode) {
3764 (void) emlxs_fct_handle_abort(hba, rp, iocbq);
3765 }
3766#endif /* SFCT_SUPPORT */
4372 break;
4373
4374 case CMD_ADAPTER_MSG:
4375 /* Allows debug adapter firmware messages to print on host */
4376 bzero(buffer, sizeof (buffer));
4377 bcopy((uint8_t *)iocb, buffer, MAX_MSG_DATA);
4378
4379 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_msg, "%s", buffer);
4380
4381 break;
4382
4383 case CMD_QUE_RING_LIST64_CN:
4384 case CMD_QUE_RING_BUF64_CN:
4385 break;
4386
4387 case CMD_ASYNC_STATUS:
3767 break;
3768
3769 case CMD_ADAPTER_MSG:
3770 /* Allows debug adapter firmware messages to print on host */
3771 bzero(buffer, sizeof (buffer));
3772 bcopy((uint8_t *)iocb, buffer, MAX_MSG_DATA);
3773
3774 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_msg, "%s", buffer);
3775
3776 break;
3777
3778 case CMD_QUE_RING_LIST64_CN:
3779 case CMD_QUE_RING_BUF64_CN:
3780 break;
3781
3782 case CMD_ASYNC_STATUS:
4388 (void) emlxs_handle_async_event(hba, rp, iocbq);
3783 emlxs_handle_async_event(hba, rp, iocbq);
4389 break;
4390
4391 default:
4392 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
3784 break;
3785
3786 default:
3787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4393 "cmd=%x status=%x iotag=%x context=%x",
4394 iocb->ulpCommand, iocb->ulpStatus, iocb->ulpIoTag,
4395 iocb->ulpContext);
3788 "cmd=%x status=%x iotag=%x context=%x", iocb->ulpCommand,
3789 iocb->ulpStatus, iocb->ulpIoTag, iocb->ulpContext);
4396
4397 break;
4398 } /* switch(entry->ulpCommand) */
4399
4400 return;
4401
3790
3791 break;
3792 } /* switch(entry->ulpCommand) */
3793
3794 return;
3795
4402} /* emlxs_proc_ring_event() */
3796} /* emlxs_proc_ring_event() */
4403
4404
4405
3797
3798
3799
4406static int
4407emlxs_handle_rcv_seq(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4408{
4409 emlxs_port_t *port = &PPORT;
4410 IOCB *iocb;
4411 MATCHMAP *mp = NULL;
4412 uint64_t bdeAddr;
4413 uint32_t vpi = 0;
4414 uint32_t ringno;
4415 uint32_t size = 0;
4416 uint32_t *RcvError;
4417 uint32_t *RcvDropped;
4418 uint32_t *UbPosted;
4419 emlxs_msg_t *dropped_msg;
4420 char error_str[64];
4421 uint32_t buf_type;
4422 uint32_t *word;
4423
4424#ifdef SLI3_SUPPORT
4425 uint32_t hbq_id;
4426#endif /* SLI3_SUPPORT */
4427
4428 ringno = rp->ringno;
4429 iocb = &iocbq->iocb;
4430 word = (uint32_t *)iocb;
4431
4432 switch (ringno) {
4433#ifdef SFCT_SUPPORT
4434 case FC_FCT_RING:
4435 HBASTATS.FctRingEvent++;
4436 RcvError = &HBASTATS.FctRingError;
4437 RcvDropped = &HBASTATS.FctRingDropped;
4438 UbPosted = &HBASTATS.FctUbPosted;
4439 dropped_msg = &emlxs_fct_detail_msg;
4440 buf_type = MEM_FCTBUF;
4441 break;
4442#endif /* SFCT_SUPPORT */
4443
4444 case FC_IP_RING:
4445 HBASTATS.IpRcvEvent++;
4446 RcvError = &HBASTATS.IpDropped;
4447 RcvDropped = &HBASTATS.IpDropped;
4448 UbPosted = &HBASTATS.IpUbPosted;
4449 dropped_msg = &emlxs_unsol_ip_dropped_msg;
4450 buf_type = MEM_IPBUF;
4451 break;
4452
4453 case FC_ELS_RING:
4454 HBASTATS.ElsRcvEvent++;
4455 RcvError = &HBASTATS.ElsRcvError;
4456 RcvDropped = &HBASTATS.ElsRcvDropped;
4457 UbPosted = &HBASTATS.ElsUbPosted;
4458 dropped_msg = &emlxs_unsol_els_dropped_msg;
4459 buf_type = MEM_ELSBUF;
4460 break;
4461
4462 case FC_CT_RING:
4463 HBASTATS.CtRcvEvent++;
4464 RcvError = &HBASTATS.CtRcvError;
4465 RcvDropped = &HBASTATS.CtRcvDropped;
4466 UbPosted = &HBASTATS.CtUbPosted;
4467 dropped_msg = &emlxs_unsol_ct_dropped_msg;
4468 buf_type = MEM_CTBUF;
4469 break;
4470
4471 default:
4472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
4473 "ring=%d cmd=%x %s %x %x %x %x",
4474 ringno, iocb->ulpCommand,
4475 emlxs_state_xlate(iocb->ulpStatus),
4476 word[4], word[5], word[6], word[7]);
4477 return (1);
4478 }
4479
4480 if (iocb->ulpStatus) {
4481 if ((iocb->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4482 (iocb->un.grsp.perr.statLocalError ==
4483 IOERR_RCV_BUFFER_TIMEOUT)) {
4484 (void) strcpy(error_str, "Out of posted buffers:");
4485 } else if ((iocb->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4486 (iocb->un.grsp.perr.statLocalError ==
4487 IOERR_RCV_BUFFER_WAITING)) {
4488 (void) strcpy(error_str, "Buffer waiting:");
4489 goto done;
4490 } else if (iocb->ulpStatus == IOSTAT_ILLEGAL_FRAME_RCVD) {
4491 (void) strcpy(error_str, "Illegal frame:");
4492 } else {
4493 (void) strcpy(error_str, "General error:");
4494 }
4495
4496 goto failed;
4497 }
4498#ifdef SLI3_SUPPORT
4499 if (hba->flag & FC_HBQ_ENABLED) {
4500 HBQ_INIT_t *hbq;
4501 HBQE_t *hbqE;
4502 uint32_t hbqe_tag;
4503
4504 *UbPosted -= 1;
4505
4506 hbqE = (HBQE_t *)iocb;
4507 hbq_id = hbqE->unt.ext.HBQ_tag;
4508 hbqe_tag = hbqE->unt.ext.HBQE_tag;
4509
4510 hbq = &hba->hbq_table[hbq_id];
4511
4512 if (hbqe_tag >= hbq->HBQ_numEntries) {
4513 (void) sprintf(error_str, "Invalid HBQE tag=%x:",
4514 hbqe_tag);
4515 goto dropped;
4516 }
4517 mp = hba->hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
4518
4519 size = iocb->unsli3.ext_rcv.seq_len;
4520 } else
4521#endif /* SLI3_SUPPORT */
4522 {
4523 bdeAddr = getPaddr(iocb->un.cont64[0].addrHigh,
4524 iocb->un.cont64[0].addrLow);
4525
4526 /* Check for invalid buffer */
4527 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
4528 (void) strcpy(error_str, "Invalid buffer:");
4529 goto dropped;
4530 }
4531 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
4532
4533 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
4534 }
4535
4536 if (!mp) {
4537 (void) strcpy(error_str, "Buffer not mapped:");
4538 goto dropped;
4539 }
4540 if (!size) {
4541 (void) strcpy(error_str, "Buffer empty:");
4542 goto dropped;
4543 }
4544#ifdef SLI3_SUPPORT
4545 /* To avoid we drop the broadcast packets */
4546 if (ringno != FC_IP_RING) {
4547 /* Get virtual port */
4548 if (hba->flag & FC_NPIV_ENABLED) {
4549 vpi = iocb->unsli3.ext_rcv.vpi;
4550 if (vpi >= hba->vpi_max) {
4551 (void) sprintf(error_str, "Invalid VPI=%d:",
4552 vpi);
4553 goto dropped;
4554 }
4555 port = &VPORT(vpi);
4556 }
4557 }
4558#endif /* SLI3_SUPPORT */
4559
4560 /* Process request */
4561 switch (ringno) {
4562#ifdef SFCT_SUPPORT
4563 case FC_FCT_RING:
4564 (void) emlxs_fct_handle_unsol_req(port, rp, iocbq, mp, size);
4565 break;
4566#endif /* SFCT_SUPPORT */
4567
4568 case FC_IP_RING:
4569 (void) emlxs_ip_handle_unsol_req(port, rp, iocbq, mp, size);
4570 break;
4571
4572 case FC_ELS_RING:
4573 /* If this is a target port, then let fct handle this */
4574#ifdef SFCT_SUPPORT
4575 if (port->tgt_mode) {
4576 (void) emlxs_fct_handle_unsol_els(port, rp, iocbq,
4577 mp, size);
4578 } else {
4579 (void) emlxs_els_handle_unsol_req(port, rp, iocbq,
4580 mp, size);
4581 }
4582#else
4583 (void) emlxs_els_handle_unsol_req(port, rp, iocbq,
4584 mp, size);
4585#endif /* SFCT_SUPPORT */
4586 break;
4587
4588 case FC_CT_RING:
4589 (void) emlxs_ct_handle_unsol_req(port, rp, iocbq, mp, size);
4590 break;
4591 }
4592
4593 goto done;
4594
4595dropped:
4596 *RcvDropped += 1;
4597
4598 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
4599 "%s: cmd=%x %s %x %x %x %x",
4600 error_str, iocb->ulpCommand, emlxs_state_xlate(iocb->ulpStatus),
4601 word[4], word[5], word[6], word[7]);
4602
4603 if (ringno == FC_FCT_RING) {
4604 uint32_t sid;
4605
4606#ifdef SLI3_SUPPORT
4607 if (hba->sli_mode >= 3) {
4608 emlxs_node_t *ndlp;
4609 ndlp = emlxs_node_find_rpi(port, iocb->ulpIoTag);
4610 sid = ndlp->nlp_DID;
4611 } else
4612#endif /* SLI3_SUPPORT */
4613 {
4614 sid = iocb->un.ulpWord[4] & 0xFFFFFF;
4615 }
4616
4617 emlxs_send_logo(port, sid);
4618 }
4619 goto done;
4620
4621failed:
4622 *RcvError += 1;
4623
4624 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
4625 "%s: cmd=%x %s %x %x %x %x hba:%x %x",
4626 error_str, iocb->ulpCommand, emlxs_state_xlate(iocb->ulpStatus),
4627 word[4], word[5], word[6], word[7], hba->state, hba->flag);
4628
4629done:
4630
4631#ifdef SLI3_SUPPORT
4632 if (hba->flag & FC_HBQ_ENABLED) {
4633 emlxs_update_HBQ_index(hba, hbq_id);
4634 } else
4635#endif /* SLI3_SUPPORT */
4636 {
4637 if (mp) {
4638 (void) emlxs_mem_put(hba, buf_type, (uint8_t *)mp);
4639 }
4640 (void) emlxs_post_buffer(hba, rp, 1);
4641 }
4642
4643 return (0);
4644
4645} /* emlxs_handle_rcv_seq() */
4646
4647
4648
4649extern void
4650emlxs_issue_iocb_cmd(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4651{
4652 PGP *pgp;
4653 emlxs_buf_t *sbp;
4654 SLIM2 *slim2p = (SLIM2 *)hba->slim2.virt;
4655 uint32_t nextIdx;
4656 uint32_t status;
4657 void *ioa2;
4658 off_t offset;
4659 uint32_t count;
4660 uint32_t ringno;
4661 int32_t throttle;
4662
4663 ringno = rp->ringno;
4664 throttle = 0;
4665
4666begin:
4667
4668 /* Check if FCP ring and adapter is not ready */
4669 if ((ringno == FC_FCP_RING) && (hba->state != FC_READY)) {
4670 if (!iocbq) {
4671 return;
4672 }
4673 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
4674 !(((emlxs_port_t *)iocbq->port)->tgt_mode)) {
4675 emlxs_tx_put(iocbq, 1);
4676 return;
4677 }
4678 }
4679 /* Attempt to acquire CMD_RING lock */
4680 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(ringno)) == 0) {
4681 /* Queue it for later */
4682 if (iocbq) {
4683 if ((hba->io_count[ringno] -
4684 hba->ring_tx_count[ringno]) > 10) {
4685 emlxs_tx_put(iocbq, 1);
4686 return;
4687 } else {
4688
4689 /*
4690 * EMLXS_MSGF(EMLXS_CONTEXT,
4691 * &emlxs_ring_watchdog_msg, "%s host=%d
4692 * port=%d cnt=%d,%d RACE CONDITION3
4693 * DETECTED.", emlxs_ring_xlate(ringno),
4694 * rp->fc_cmdidx, rp->fc_port_cmdidx,
4695 * hba->ring_tx_count[ringno],
4696 * hba->io_count[ringno]);
4697 */
4698 mutex_enter(&EMLXS_CMD_RING_LOCK(ringno));
4699 }
4700 } else {
4701 return;
4702 }
4703 }
4704 /* CMD_RING_LOCK acquired */
4705
4706 /* Check if HBA is full */
4707 throttle = hba->io_throttle - hba->io_active;
4708 if (throttle <= 0) {
4709 /* Hitting adapter throttle limit */
4710 /* Queue it for later */
4711 if (iocbq) {
4712 emlxs_tx_put(iocbq, 1);
4713 }
4714 goto busy;
4715 }
4716 /* Read adapter's get index */
4717 pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ringno];
4718 offset = (off_t)((uint64_t)(unsigned long)&(pgp->cmdGetInx) -
4719 (uint64_t)(unsigned long)hba->slim2.virt);
4720 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4721 DDI_DMA_SYNC_FORKERNEL);
4722 rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4723
4724 /* Calculate the next put index */
4725 nextIdx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4726 0 : rp->fc_cmdidx + 1;
4727
4728 /* Check if ring is full */
4729 if (nextIdx == rp->fc_port_cmdidx) {
4730 /* Try one more time */
4731 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4732 DDI_DMA_SYNC_FORKERNEL);
4733 rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4734
4735 if (nextIdx == rp->fc_port_cmdidx) {
4736 /* Queue it for later */
4737 if (iocbq) {
4738 emlxs_tx_put(iocbq, 1);
4739 }
4740 goto busy;
4741 }
4742 }
4743 /* We have a command ring slot available */
4744 /* Make sure we have an iocb to send */
4745
4746 if (iocbq) {
4747 mutex_enter(&EMLXS_RINGTX_LOCK);
4748
4749 /* Check if the ring already has iocb's waiting */
4750 if (rp->nodeq.q_first != NULL) {
4751 /* Put the current iocbq on the tx queue */
4752 emlxs_tx_put(iocbq, 0);
4753
4754 /*
4755 * Attempt to replace it with the next iocbq in the
4756 * tx queue
4757 */
4758 iocbq = emlxs_tx_get(rp, 0);
4759 }
4760 mutex_exit(&EMLXS_RINGTX_LOCK);
4761 } else {
4762 /* Try to get the next iocb on the tx queue */
4763 iocbq = emlxs_tx_get(rp, 1);
4764 }
4765
4766sendit:
4767 count = 0;
4768
4769 /* Process each iocbq */
4770 while (iocbq) {
4771
4772#ifdef NPIV_SUPPORT
4773 sbp = iocbq->sbp;
4774 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
4775 /*
4776 * Update adapter if needed, since we are about to
4777 * delay here
4778 */
4779 if (count) {
4780 count = 0;
4781
4782 /* Update the adapter's cmd put index */
4783 if (hba->bus_type == SBUS_FC) {
4784 slim2p->mbx.us.s2.host[ringno].
4785 cmdPutInx =
4786 PCIMEM_LONG(rp->fc_cmdidx);
4787
4788 /* DMA sync the index for the adapter */
4789 offset =
4790 (off_t)
4791 ((uint64_t)(unsigned long)&(slim2p->
4792 mbx.us.s2.host[ringno].cmdPutInx) -
4793 (uint64_t)(unsigned long)slim2p);
4794 emlxs_mpdata_sync(hba->slim2.dma_handle,
4795 offset, 4, DDI_DMA_SYNC_FORDEV);
4796 } else {
4797 ioa2 = (void *)((char *)hba->slim_addr +
4798 hba->hgp_ring_offset +
4799 ((ringno * 2) * sizeof (uint32_t)));
4800 WRITE_SLIM_ADDR(hba,
4801 (volatile uint32_t *)ioa2,
4802 rp->fc_cmdidx);
4803 }
4804
4805 status = (CA_R0ATT << (ringno * 4));
4806 WRITE_CSR_REG(hba,
4807 FC_CA_REG(hba, hba->csr_addr),
4808 (volatile uint32_t)status);
4809
4810 }
4811 /* Perform delay */
4812 if (ringno == FC_ELS_RING) {
4813 (void) drv_usecwait(100000);
4814 } else {
4815 (void) drv_usecwait(20000);
4816 }
4817 }
4818#endif /* NPIV_SUPPORT */
4819
4820 /* At this point, we have a command ring slot available */
4821 /* and an iocb to send */
4822
4823 /* Send the iocb */
4824 emlxs_issue_iocb(hba, rp, iocbq);
4825
4826 count++;
4827
4828 /* Check if HBA is full */
4829 throttle = hba->io_throttle - hba->io_active;
4830 if (throttle <= 0) {
4831 goto busy;
4832 }
4833 /* Calculate the next put index */
4834 nextIdx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4835 0 : rp->fc_cmdidx + 1;
4836
4837 /* Check if ring is full */
4838 if (nextIdx == rp->fc_port_cmdidx) {
4839 /* Try one more time */
4840 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4841 DDI_DMA_SYNC_FORKERNEL);
4842 rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
4843
4844 if (nextIdx == rp->fc_port_cmdidx) {
4845 goto busy;
4846 }
4847 }
4848 /* Get the next iocb from the tx queue if there is one */
4849 iocbq = emlxs_tx_get(rp, 1);
4850 }
4851
4852 if (count) {
4853 /* Update the adapter's cmd put index */
4854 if (hba->bus_type == SBUS_FC) {
4855 slim2p->mbx.us.s2.host[ringno].
4856 cmdPutInx = PCIMEM_LONG(rp->fc_cmdidx);
4857
4858 /* DMA sync the index for the adapter */
4859 offset = (off_t)
4860 ((uint64_t)(unsigned long)&(slim2p->mbx.us.s2.
4861 host[ringno].cmdPutInx) -
4862 (uint64_t)(unsigned long)slim2p);
4863 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4864 DDI_DMA_SYNC_FORDEV);
4865 } else {
4866 ioa2 = (void *) ((char *)hba->slim_addr +
4867 hba->hgp_ring_offset + ((ringno * 2) *
4868 sizeof (uint32_t)));
4869 WRITE_SLIM_ADDR(hba,
4870 (volatile uint32_t *)ioa2, rp->fc_cmdidx);
4871 }
4872
4873 status = (CA_R0ATT << (ringno * 4));
4874 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4875 (volatile uint32_t)status);
4876
4877 /* Check tx queue one more time before releasing */
4878 if ((iocbq = emlxs_tx_get(rp, 1))) {
4879 /*
4880 * EMLXS_MSGF(EMLXS_CONTEXT,
4881 * &emlxs_ring_watchdog_msg, "%s host=%d port=%d
4882 * RACE CONDITION1 DETECTED.",
4883 * emlxs_ring_xlate(ringno), rp->fc_cmdidx,
4884 * rp->fc_port_cmdidx);
4885 */
4886 goto sendit;
4887 }
4888 }
4889 mutex_exit(&EMLXS_CMD_RING_LOCK(ringno));
4890
4891 return;
4892
4893busy:
4894
4895 /*
4896 * Set ring to SET R0CE_REQ in Chip Att register. Chip will tell us
4897 * when an entry is freed.
4898 */
4899 if (count) {
4900 /* Update the adapter's cmd put index */
4901 if (hba->bus_type == SBUS_FC) {
4902 slim2p->mbx.us.s2.host[ringno].cmdPutInx =
4903 PCIMEM_LONG(rp->fc_cmdidx);
4904
4905 /* DMA sync the index for the adapter */
4906 offset = (off_t)
4907 ((uint64_t)(unsigned long)&(slim2p->mbx.us.s2.
4908 host[ringno].cmdPutInx) -
4909 (uint64_t)(unsigned long)slim2p);
4910 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
4911 DDI_DMA_SYNC_FORDEV);
4912 } else {
4913 ioa2 = (void *) ((char *)hba->slim_addr +
4914 hba->hgp_ring_offset + ((ringno * 2) *
4915 sizeof (uint32_t)));
4916 WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2,
4917 rp->fc_cmdidx);
4918 }
4919 }
4920 status = ((CA_R0ATT | CA_R0CE_REQ) << (ringno * 4));
4921 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr),
4922 (volatile uint32_t) status);
4923
4924 if (throttle <= 0) {
4925 HBASTATS.IocbThrottled++;
4926 } else {
4927 HBASTATS.IocbRingFull[ringno]++;
4928 }
4929
4930 mutex_exit(&EMLXS_CMD_RING_LOCK(ringno));
4931
4932 return;
4933
4934} /* emlxs_issue_iocb_cmd() */
4935
4936
4937
4938/* EMLXS_CMD_RING_LOCK must be held when calling this function */
4939static void
4940emlxs_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
4941{
4942 emlxs_port_t *port;
4943 IOCB *icmd;
4944 IOCB *iocb;
4945 emlxs_buf_t *sbp;
4946 off_t offset;
4947 uint32_t ringno;
4948
4949 ringno = rp->ringno;
4950 sbp = iocbq->sbp;
4951 icmd = &iocbq->iocb;
4952 port = iocbq->port;
4953
4954 HBASTATS.IocbIssued[ringno]++;
4955
4956 /* Check for ULP pkt request */
4957 if (sbp) {
4958 mutex_enter(&sbp->mtx);
4959
4960 if (sbp->node == NULL) {
4961 /* Set node to base node by default */
4962 iocbq->node = (void *) &port->node_base;
4963 sbp->node = (void *) &port->node_base;
4964 }
4965 sbp->pkt_flags |= PACKET_IN_CHIPQ;
4966 mutex_exit(&sbp->mtx);
4967
4968 atomic_add_32(&hba->io_active, 1);
4969 }
4970 /* get the next available command ring iocb */
4971 iocb = (IOCB *) (((char *)rp->fc_cmdringaddr +
4972 (rp->fc_cmdidx * hba->iocb_cmd_size)));
4973
4974 /* Copy the local iocb to the command ring iocb */
4975 emlxs_pcimem_bcopy((uint32_t *)icmd, (uint32_t *)iocb,
4976 hba->iocb_cmd_size);
4977
4978 /* DMA sync the command ring iocb for the adapter */
4979 offset = (off_t)((uint64_t)(unsigned long)iocb -
4980 (uint64_t)(unsigned long)hba->slim2.virt);
4981 emlxs_mpdata_sync(hba->slim2.dma_handle, offset,
4982 hba->iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
4983
4984 /* Free the local iocb if there is no sbp tracking it */
4985 if (!sbp) {
4986 (void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
4987 }
4988 /* update local ring index to next available ring index */
4989 rp->fc_cmdidx = (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ?
4990 0 : rp->fc_cmdidx + 1;
4991
4992
4993 return;
4994
4995} /* emlxs_issue_iocb() */
4996
4997
4998extern uint32_t
4999emlxs_interlock(emlxs_hba_t *hba)
5000{
5001 emlxs_port_t *port = &PPORT;
5002 MAILBOX *swpmb;
5003 MAILBOX *mb2;
5004 MAILBOX *mb1;
5005 uint32_t word0;
5006 uint32_t j;
5007 uint32_t interlock_failed;
5008 uint32_t ha_copy;
5009 uint32_t value;
5010 off_t offset;
5011 uint32_t size;
5012
5013 interlock_failed = 0;
5014
5015 mutex_enter(&EMLXS_PORT_LOCK);
5016 if (hba->flag & FC_INTERLOCKED) {
5017 emlxs_ffstate_change_locked(hba, FC_KILLED);
5018
5019 mutex_exit(&EMLXS_PORT_LOCK);
5020
5021 return (FC_SUCCESS);
5022 }
5023 j = 0;
5024 while (j++ < 10000) {
5025 if (hba->mbox_queue_flag == 0) {
5026 break;
5027 }
5028 mutex_exit(&EMLXS_PORT_LOCK);
5029 DELAYUS(100);
5030 mutex_enter(&EMLXS_PORT_LOCK);
5031 }
5032
5033 if (hba->mbox_queue_flag != 0) {
5034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5035 "Interlock failed. Mailbox busy.");
5036 mutex_exit(&EMLXS_PORT_LOCK);
5037 return (FC_SUCCESS);
5038 }
5039 hba->flag |= FC_INTERLOCKED;
5040 hba->mbox_queue_flag = 1;
5041
5042 /* Disable all host interrupts */
5043 hba->hc_copy = 0;
5044 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
5045 WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), 0xffffffff);
5046
5047 mb2 = FC_SLIM2_MAILBOX(hba);
5048 mb1 = FC_SLIM1_MAILBOX(hba);
5049 swpmb = (MAILBOX *) & word0;
5050
5051 if (!(hba->flag & FC_SLIM2_MODE)) {
5052 goto mode_B;
5053 }
5054mode_A:
5055
5056 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5057 "Attempting SLIM2 Interlock...");
5058
5059interlock_A:
5060
5061 value = 0xFFFFFFFF;
5062 word0 = 0;
5063 swpmb->mbxCommand = MBX_KILL_BOARD;
5064 swpmb->mbxOwner = OWN_CHIP;
5065
5066 /* Write value to SLIM */
5067 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1), value);
5068 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1)), word0);
5069
5070 /* Send Kill board request */
5071 mb2->un.varWords[0] = value;
5072 mb2->mbxCommand = MBX_KILL_BOARD;
5073 mb2->mbxOwner = OWN_CHIP;
5074
5075 /* Sync the memory */
5076 offset = (off_t)((uint64_t)(unsigned long)mb2 -
5077 (uint64_t)(unsigned long)hba->slim2.virt);
5078 size = (sizeof (uint32_t) * 2);
5079 emlxs_pcimem_bcopy((uint32_t *)mb2, (uint32_t *)mb2, size);
5080 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, size,
5081 DDI_DMA_SYNC_FORDEV);
5082
5083 /* interrupt board to do it right away */
5084 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr), CA_MBATT);
5085
5086 /* First wait for command acceptence */
5087 j = 0;
5088 while (j++ < 1000) {
5089 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1));
5090
5091 if (value == 0) {
5092 break;
5093 }
5094 DELAYUS(50);
5095 }
5096
5097 if (value == 0) {
5098 /* Now wait for mailbox ownership to clear */
5099 while (j++ < 10000) {
5100 word0 = READ_SLIM_ADDR(hba,
5101 ((volatile uint32_t *)mb1));
5102
5103 if (swpmb->mbxOwner == 0) {
5104 break;
5105 }
5106 DELAYUS(50);
5107 }
5108
5109 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5110 "Interlock succeeded.");
5111
5112 goto done;
5113 }
5114 /* Interlock failed !!! */
5115 interlock_failed = 1;
5116
5117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5118 "Interlock failed.");
5119
5120mode_B:
5121
5122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5123 "Attempting SLIM1 Interlock...");
5124
5125interlock_B:
5126
5127 value = 0xFFFFFFFF;
5128 word0 = 0;
5129 swpmb->mbxCommand = MBX_KILL_BOARD;
5130 swpmb->mbxOwner = OWN_CHIP;
5131
5132 /* Write KILL BOARD to mailbox */
5133 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1), value);
5134 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mb1), word0);
5135
5136 /* interrupt board to do it right away */
5137 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr), CA_MBATT);
5138
5139 /* First wait for command acceptence */
5140 j = 0;
5141 while (j++ < 1000) {
5142 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *) mb1) + 1));
5143
5144 if (value == 0) {
5145 break;
5146 }
5147 DELAYUS(50);
5148 }
5149
5150 if (value == 0) {
5151 /* Now wait for mailbox ownership to clear */
5152 while (j++ < 10000) {
5153 word0 = READ_SLIM_ADDR(hba,
5154 ((volatile uint32_t *)mb1));
5155
5156 if (swpmb->mbxOwner == 0) {
5157 break;
5158 }
5159 DELAYUS(50);
5160 }
5161
5162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5163 "Interlock succeeded.");
5164
5165 goto done;
5166 }
5167 /* Interlock failed !!! */
5168
5169 /* If this is the first time then try again */
5170 if (interlock_failed == 0) {
5171 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5172 "Interlock failed. Retrying...");
5173
5174 /* Try again */
5175 interlock_failed = 1;
5176 goto interlock_B;
5177 }
5178 /*
5179 * Now check for error attention to indicate the board has been
5180 * kiilled
5181 */
5182 j = 0;
5183 while (j++ < 10000) {
5184 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
5185
5186 if (ha_copy & HA_ERATT) {
5187 break;
5188 }
5189 DELAYUS(50);
5190 }
5191
5192 if (ha_copy & HA_ERATT) {
5193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5194 "Interlock failed. Board killed.");
5195 } else {
5196 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5197 "Interlock failed. Board not killed.");
5198 }
5199
5200done:
5201
5202 hba->mbox_queue_flag = 0;
5203
5204 emlxs_ffstate_change_locked(hba, FC_KILLED);
5205
5206 mutex_exit(&EMLXS_PORT_LOCK);
5207
5208 return (FC_SUCCESS);
5209
5210} /* emlxs_interlock() */
5211
5212
5213
5214extern uint32_t
5215emlxs_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post)
5216{
5217 emlxs_port_t *port = &PPORT;
5218 MAILBOX *swpmb;
5219 MAILBOX *mb;
5220 uint32_t word0;
5221 uint16_t cfg_value;
5222 uint32_t status;
5223 uint32_t status1;
5224 uint32_t status2;
5225 uint32_t i;
5226 uint32_t ready;
5227 emlxs_port_t *vport;
5228 RING *rp;
5229 emlxs_config_t *cfg = &CFG;
5230
5231 i = 0;
5232
5233 if (!cfg[CFG_RESET_ENABLE].current) {
5234 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5235 "Adapter reset disabled.");
5236 emlxs_ffstate_change(hba, FC_ERROR);
5237
5238 return (1);
5239 }
5240 /* Make sure we have called interlock */
5241 (void) emlxs_interlock(hba);
5242
5243 if (restart) {
5244 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Restarting.");
5245 emlxs_ffstate_change(hba, FC_INIT_START);
5246
5247 ready = (HS_FFRDY | HS_MBRDY);
5248 } else {
5249 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Resetting.");
5250 emlxs_ffstate_change(hba, FC_WARM_START);
5251
5252 ready = HS_MBRDY;
5253 }
5254
5255 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
5256
5257 mb = FC_SLIM1_MAILBOX(hba);
5258 swpmb = (MAILBOX *) & word0;
5259
5260reset:
5261
5262 /* Save reset time */
5263 HBASTATS.ResetTime = hba->timer_tics;
5264
5265 if (restart) {
5266 /* First put restart command in mailbox */
5267 word0 = 0;
5268 swpmb->mbxCommand = MBX_RESTART;
5269 swpmb->mbxHc = 1;
5270 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mb), word0);
5271
5272 /* Only skip post after emlxs_ffinit is completed */
5273 if (skip_post) {
5274 WRITE_SLIM_ADDR(hba,
5275 (((volatile uint32_t *)mb) + 1), 1);
5276 } else {
5277 WRITE_SLIM_ADDR(hba,
5278 (((volatile uint32_t *)mb) + 1), 0);
5279 }
5280
5281 }
5282 /*
5283 * Turn off SERR, PERR in PCI cmd register
5284 */
5285 cfg_value = ddi_get16(hba->pci_acc_handle,
5286 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
5287
5288 (void) ddi_put16(hba->pci_acc_handle,
5289 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
5290 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
5291
5292 hba->hc_copy = HC_INITFF;
5293 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
5294
5295 /* Wait 1 msec before restoring PCI config */
5296 DELAYMS(1);
5297
5298 /* Restore PCI cmd register */
5299 (void) ddi_put16(hba->pci_acc_handle,
5300 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
5301 (uint16_t)cfg_value);
5302
5303 /* Wait 3 seconds before checking */
5304 DELAYMS(3000);
5305 i += 3;
5306
5307 /* Wait for reset completion */
5308 while (i < 30) {
5309 /* Check status register to see what current state is */
5310 status = READ_CSR_REG(hba, FC_HS_REG(hba, hba->csr_addr));
5311
5312 /* Check to see if any errors occurred during init */
5313 if (status & HS_FFERM) {
5314 status1 = READ_SLIM_ADDR(hba,
5315 ((volatile uint8_t *) hba->slim_addr + 0xa8));
5316 status2 = READ_SLIM_ADDR(hba,
5317 ((volatile uint8_t *) hba->slim_addr + 0xac));
5318
5319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5320 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
5321 status, status1, status2);
5322
5323 emlxs_ffstate_change(hba, FC_ERROR);
5324 return (1);
5325 }
5326 if ((status & ready) == ready) {
5327 /* Reset Done !! */
5328 goto done;
5329 }
5330 /*
5331 * Check every 1 second for 15 seconds, then reset board
5332 * again (w/post), then check every 1 second for 15 seconds.
5333 */
5334 DELAYMS(1000);
5335 i++;
5336
5337 /* Reset again (w/post) at 15 seconds */
5338 if (i == 15) {
5339 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5340 "Reset failed. Retrying...");
5341
5342 goto reset;
5343 }
5344 }
5345
5346 /* Timeout occurred */
5347 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
5348 "Timeout: status=0x%x", status);
5349 emlxs_ffstate_change(hba, FC_ERROR);
5350
5351 /* Log a dump event */
5352 emlxs_log_dump_event(port, NULL, 0);
5353
5354 return (1);
5355
5356done:
5357
5358 /* Reset the hba structure */
5359 hba->flag &= FC_RESET_MASK;
5360 bzero(hba->ring_tx_count, sizeof (hba->ring_tx_count));
5361 bzero(hba->io_count, sizeof (hba->io_count));
5362 hba->iodone_count = 0;
5363 hba->topology = 0;
5364 hba->linkspeed = 0;
5365 hba->heartbeat_active = 0;
5366 hba->discovery_timer = 0;
5367 hba->linkup_timer = 0;
5368 hba->loopback_tics = 0;
5369
5370 /* Initialize hc_copy */
5371 hba->hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr));
5372
5373 /* Reset the ring objects */
5374 for (i = 0; i < MAX_RINGS; i++) {
5375 rp = &hba->ring[i];
5376 rp->fc_mpon = 0;
5377 rp->fc_mpoff = 0;
5378 }
5379
5380 /* Reset the port objects */
5381 for (i = 0; i < MAX_VPORTS; i++) {
5382 vport = &VPORT(i);
5383
5384 vport->flag &= EMLXS_PORT_RESET_MASK;
5385 vport->did = 0;
5386 vport->prev_did = 0;
5387 vport->lip_type = 0;
5388 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
5389
5390 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
5391 vport->node_base.nlp_Rpi = 0;
5392 vport->node_base.nlp_DID = 0xffffff;
5393 vport->node_base.nlp_list_next = NULL;
5394 vport->node_base.nlp_list_prev = NULL;
5395 vport->node_base.nlp_active = 1;
5396 vport->node_count = 0;
5397
5398 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
5399 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
5400 }
5401 }
5402
5403 return (0);
5404
5405} /* emlxs_hba_reset */
5406
5407
5408
5409extern void
5410emlxs_poll_intr(emlxs_hba_t *hba, uint32_t att_bit)
5411{
5412 uint32_t ha_copy;
5413
5414 /*
5415 * Polling a specific attention bit.
5416 */
5417 for (;;) {
5418 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
5419
5420 if (ha_copy & att_bit) {
5421 break;
5422 }
5423 }
5424
5425 mutex_enter(&EMLXS_PORT_LOCK);
5426 ha_copy = emlxs_get_attention(hba, -1);
5427 mutex_exit(&EMLXS_PORT_LOCK);
5428
5429 /* Process the attentions */
5430 emlxs_proc_attention(hba, ha_copy);
5431
5432 return;
5433
5434} /* emlxs_poll_intr() */
5435
5436
5437extern uint32_t
5438emlxs_reset_ring(emlxs_hba_t *hba, uint32_t ringno)
5439{
5440 emlxs_port_t *port = &PPORT;
5441 RING *rp;
5442 MAILBOX *mb;
5443 PGP *pgp;
5444 off_t offset;
5445 NODELIST *ndlp;
5446 uint32_t i;
5447 emlxs_port_t *vport;
5448
5449 rp = &hba->ring[ringno];
5450 pgp = (PGP *) & ((SLIM2 *) hba->slim2.virt)->mbx.us.s2.port[ringno];
5451
5452 if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
5453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5454 "%s: Unable to allocate mailbox buffer.",
5455 emlxs_ring_xlate(ringno));
5456
5457 return ((uint32_t)FC_FAILURE);
5458 }
5459 emlxs_mb_reset_ring(hba, mb, ringno);
5460 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5461 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5462 "%s: Unable to reset ring. Mailbox cmd=%x status=%x",
5463 emlxs_ring_xlate(ringno), mb->mbxCommand, mb->mbxStatus);
5464
5465 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5466 return ((uint32_t)FC_FAILURE);
5467 }
5468 /* Free the mailbox */
5469 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5470
5471 /* Update the response ring indicies */
5472 offset = (off_t)((uint64_t)(unsigned long)&(pgp->rspPutInx) -
5473 (uint64_t)(unsigned long)hba->slim2.virt);
5474 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
5475 DDI_DMA_SYNC_FORKERNEL);
5476 rp->fc_rspidx = rp->fc_port_rspidx = PCIMEM_LONG(pgp->rspPutInx);
5477
5478 /* Update the command ring indicies */
5479 offset = (off_t)((uint64_t)(unsigned long)&(pgp->cmdGetInx) -
5480 (uint64_t)(unsigned long)hba->slim2.virt);
5481 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 4,
5482 DDI_DMA_SYNC_FORKERNEL);
5483 rp->fc_cmdidx = rp->fc_port_cmdidx = PCIMEM_LONG(pgp->cmdGetInx);
5484
5485
5486 for (i = 0; i < MAX_VPORTS; i++) {
5487 vport = &VPORT(i);
5488
5489 if (!(vport->flag & EMLXS_PORT_BOUND)) {
5490 continue;
5491 }
5492 /* Clear all node XRI contexts */
5493 rw_enter(&vport->node_rwlock, RW_WRITER);
5494 mutex_enter(&EMLXS_RINGTX_LOCK);
5495 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5496 ndlp = vport->node_table[i];
5497 while (ndlp != NULL) {
5498 ndlp->nlp_flag[FC_IP_RING] &= ~NLP_RPI_XRI;
5499 ndlp = ndlp->nlp_list_next;
5500 }
5501 }
5502 mutex_exit(&EMLXS_RINGTX_LOCK);
5503 rw_exit(&vport->node_rwlock);
5504 }
5505
5506 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_reset_msg,
5507 "%s", emlxs_ring_xlate(ringno));
5508
5509 return (FC_SUCCESS);
5510
5511} /* emlxs_reset_ring() */
5512
5513
5514extern char *
5515emlxs_ffstate_xlate(uint32_t state)
5516{
5517 static char buffer[32];
5518 uint32_t i;
5519 uint32_t count;
5520
5521 count = sizeof (emlxs_ffstate_table) / sizeof (emlxs_table_t);
5522 for (i = 0; i < count; i++) {
5523 if (state == emlxs_ffstate_table[i].code) {
5524 return (emlxs_ffstate_table[i].string);
5525 }
5526 }
5527
5528 (void) sprintf(buffer, "state=0x%x", state);
5529 return (buffer);
5530
3800extern char *
3801emlxs_ffstate_xlate(uint32_t state)
3802{
3803 static char buffer[32];
3804 uint32_t i;
3805 uint32_t count;
3806
3807 count = sizeof (emlxs_ffstate_table) / sizeof (emlxs_table_t);
3808 for (i = 0; i < count; i++) {
3809 if (state == emlxs_ffstate_table[i].code) {
3810 return (emlxs_ffstate_table[i].string);
3811 }
3812 }
3813
3814 (void) sprintf(buffer, "state=0x%x", state);
3815 return (buffer);
3816
5531} /* emlxs_ffstate_xlate() */
3817} /* emlxs_ffstate_xlate() */
5532
5533
5534extern char *
5535emlxs_ring_xlate(uint32_t ringno)
5536{
5537 static char buffer[32];
5538 uint32_t i;
5539 uint32_t count;
5540
5541 count = sizeof (emlxs_ring_table) / sizeof (emlxs_table_t);
5542 for (i = 0; i < count; i++) {
5543 if (ringno == emlxs_ring_table[i].code) {
5544 return (emlxs_ring_table[i].string);
5545 }
5546 }
5547
5548 (void) sprintf(buffer, "ring=0x%x", ringno);
5549 return (buffer);
5550
3818
3819
3820extern char *
3821emlxs_ring_xlate(uint32_t ringno)
3822{
3823 static char buffer[32];
3824 uint32_t i;
3825 uint32_t count;
3826
3827 count = sizeof (emlxs_ring_table) / sizeof (emlxs_table_t);
3828 for (i = 0; i < count; i++) {
3829 if (ringno == emlxs_ring_table[i].code) {
3830 return (emlxs_ring_table[i].string);
3831 }
3832 }
3833
3834 (void) sprintf(buffer, "ring=0x%x", ringno);
3835 return (buffer);
3836
5551} /* emlxs_ring_xlate() */
3837} /* emlxs_ring_xlate() */
5552
5553
5554
5555extern void
5556emlxs_pcix_mxr_update(emlxs_hba_t *hba, uint32_t verbose)
5557{
5558 emlxs_port_t *port = &PPORT;
5559 MAILBOX *mb;

--- 25 unchanged lines hidden (view full) ---

5585 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5586 "PCI_MAX_READ: Invalid parameter value. old=%d new=%d",
5587 cfg[CFG_PCI_MAX_READ].current, cfg[CFG_PCI_MAX_READ].def);
5588
5589 cfg[CFG_PCI_MAX_READ].current = cfg[CFG_PCI_MAX_READ].def;
5590 goto xlate;
5591 }
5592
3838
3839
3840
3841extern void
3842emlxs_pcix_mxr_update(emlxs_hba_t *hba, uint32_t verbose)
3843{
3844 emlxs_port_t *port = &PPORT;
3845 MAILBOX *mb;

--- 25 unchanged lines hidden (view full) ---

3871 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3872 "PCI_MAX_READ: Invalid parameter value. old=%d new=%d",
3873 cfg[CFG_PCI_MAX_READ].current, cfg[CFG_PCI_MAX_READ].def);
3874
3875 cfg[CFG_PCI_MAX_READ].current = cfg[CFG_PCI_MAX_READ].def;
3876 goto xlate;
3877 }
3878
5593 if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
3879 if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
5594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5595 "PCI_MAX_READ: Unable to allocate mailbox buffer.");
5596 return;
5597 }
3880 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3881 "PCI_MAX_READ: Unable to allocate mailbox buffer.");
3882 return;
3883 }
5598 emlxs_mb_set_var(hba, (MAILBOX *) mb, 0x00100506, value);
5599
3884
5600 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
3885 emlxs_mb_set_var(hba, (MAILBOX *)mb, 0x00100506, value);
3886
3887 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5601 if (verbose || (mb->mbxStatus != 0x12)) {
5602 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3888 if (verbose || (mb->mbxStatus != 0x12)) {
3889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5603 "PCI_MAX_READ: Unable to update. status=%x "
5604 "value=%d (%d bytes)", mb->mbxStatus, value,
3890 "PCI_MAX_READ: Unable to update. "
3891 "status=%x value=%d (%d bytes)",
3892 mb->mbxStatus, value,
5605 cfg[CFG_PCI_MAX_READ].current);
5606 }
5607 } else {
3893 cfg[CFG_PCI_MAX_READ].current);
3894 }
3895 } else {
5608 if (verbose && (cfg[CFG_PCI_MAX_READ].current !=
3896 if (verbose &&
3897 (cfg[CFG_PCI_MAX_READ].current !=
5609 cfg[CFG_PCI_MAX_READ].def)) {
5610 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5611 "PCI_MAX_READ: Updated. %d bytes",
5612 cfg[CFG_PCI_MAX_READ].current);
5613 }
5614 }
5615
5616 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
5617
5618 return;
5619
3898 cfg[CFG_PCI_MAX_READ].def)) {
3899 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3900 "PCI_MAX_READ: Updated. %d bytes",
3901 cfg[CFG_PCI_MAX_READ].current);
3902 }
3903 }
3904
3905 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
3906
3907 return;
3908
5620} /* emlxs_pcix_mxr_update */
3909} /* emlxs_pcix_mxr_update */
5621
5622
5623
5624extern uint32_t
5625emlxs_get_key(emlxs_hba_t *hba, MAILBOX *mb)
5626{
5627 emlxs_port_t *port = &PPORT;
5628 uint32_t npname0, npname1;
5629 uint32_t tmpkey, theKey;
5630 uint16_t key850;
5631 uint32_t t1, t2, t3, t4;
5632 uint32_t ts;
5633
5634#define SEED 0x876EDC21
5635
5636 /* This key is only used currently for SBUS adapters */
5637 if (hba->bus_type != SBUS_FC) {
5638 return (0);
5639 }
3910
3911
3912
3913extern uint32_t
3914emlxs_get_key(emlxs_hba_t *hba, MAILBOX *mb)
3915{
3916 emlxs_port_t *port = &PPORT;
3917 uint32_t npname0, npname1;
3918 uint32_t tmpkey, theKey;
3919 uint16_t key850;
3920 uint32_t t1, t2, t3, t4;
3921 uint32_t ts;
3922
3923#define SEED 0x876EDC21
3924
3925 /* This key is only used currently for SBUS adapters */
3926 if (hba->bus_type != SBUS_FC) {
3927 return (0);
3928 }
3929
5640 tmpkey = mb->un.varWords[30];
5641 emlxs_ffstate_change(hba, FC_INIT_NVPARAMS);
5642
5643 emlxs_mb_read_nv(hba, mb);
3930 tmpkey = mb->un.varWords[30];
3931 emlxs_ffstate_change(hba, FC_INIT_NVPARAMS);
3932
3933 emlxs_mb_read_nv(hba, mb);
5644 if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
3934 if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
5645 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
3935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5646 "Unable to read nvram. cmd=%x status=%x",
5647 mb->mbxCommand, mb->mbxStatus);
3936 "Unable to read nvram. cmd=%x status=%x", mb->mbxCommand,
3937 mb->mbxStatus);
5648
5649 return (0);
5650 }
5651 npname0 = mb->un.varRDnvp.portname[0];
5652 npname1 = mb->un.varRDnvp.portname[1];
5653
5654 key850 = (uint16_t)((tmpkey & 0x00FFFF00) >> 8);
5655 ts = (uint16_t)(npname1 + 1);
5656 t1 = ts * key850;
5657 ts = (uint16_t)((npname1 >> 16) + 1);
5658 t2 = ts * key850;
5659 ts = (uint16_t)(npname0 + 1);
5660 t3 = ts * key850;
5661 ts = (uint16_t)((npname0 >> 16) + 1);
5662 t4 = ts * key850;
5663 theKey = SEED + t1 + t2 + t3 + t4;
5664
5665 return (theKey);
5666
3938
3939 return (0);
3940 }
3941 npname0 = mb->un.varRDnvp.portname[0];
3942 npname1 = mb->un.varRDnvp.portname[1];
3943
3944 key850 = (uint16_t)((tmpkey & 0x00FFFF00) >> 8);
3945 ts = (uint16_t)(npname1 + 1);
3946 t1 = ts * key850;
3947 ts = (uint16_t)((npname1 >> 16) + 1);
3948 t2 = ts * key850;
3949 ts = (uint16_t)(npname0 + 1);
3950 t3 = ts * key850;
3951 ts = (uint16_t)((npname0 >> 16) + 1);
3952 t4 = ts * key850;
3953 theKey = SEED + t1 + t2 + t3 + t4;
3954
3955 return (theKey);
3956
5667} /* emlxs_get_key() */
3957} /* emlxs_get_key() */
3958
3959
3960extern void
3961emlxs_fw_show(emlxs_hba_t *hba)
3962{
3963 emlxs_port_t *port = &PPORT;
3964 uint32_t i;
3965
3966 /* Display firmware library one time */
3967 for (i = 0; i < EMLXS_FW_COUNT; i++) {
3968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_image_library_msg, "%s",
3969 emlxs_fw_table[i].label);
3970 }
3971
3972 return;
3973
3974} /* emlxs_fw_show() */
3975
3976
3977#ifdef MODFW_SUPPORT
3978static void
3979emlxs_fw_load(emlxs_hba_t *hba, emlxs_firmware_t *fw)
3980{
3981 emlxs_port_t *port = &PPORT;
3982 int (*emlxs_fw_get)(emlxs_firmware_t *);
3983 int err;
3984
3985 /* Make sure image is unloaded and image buffer pointer is clear */
3986 emlxs_fw_unload(hba, fw);
3987
3988 err = 0;
3989 hba->fw_modhandle =
3990 ddi_modopen(EMLXS_FW_MODULE, KRTLD_MODE_FIRST, &err);
3991 if (!hba->fw_modhandle) {
3992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
3993 "Unable to load firmware module. error=%d", err);
3994
3995 return;
3996 } else {
3997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
3998 "Firmware module loaded.");
3999 }
4000
4001 err = 0;
4002 emlxs_fw_get =
4003 (int (*)())ddi_modsym(hba->fw_modhandle, "emlxs_fw_get", &err);
4004 if ((void *)emlxs_fw_get == NULL) {
4005 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
4006 "emlxs_fw_get not present. error=%d", err);
4007
4008 emlxs_fw_unload(hba, fw);
4009 return;
4010 }
4011
4012 if (emlxs_fw_get(fw)) {
4013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
4014 "Invalid firmware image module found. %s", fw->label);
4015
4016 emlxs_fw_unload(hba, fw);
4017 return;
4018 }
4019
4020 return;
4021
4022} /* emlxs_fw_load() */
4023
4024
4025static void
4026emlxs_fw_unload(emlxs_hba_t *hba, emlxs_firmware_t *fw)
4027{
4028 emlxs_port_t *port = &PPORT;
4029
4030 /* Clear the firmware image */
4031 fw->image = NULL;
4032 fw->size = 0;
4033
4034 if (hba->fw_modhandle) {
4035 /* Close the module */
4036 (void) ddi_modclose(hba->fw_modhandle);
4037 hba->fw_modhandle = NULL;
4038
4039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
4040 "Firmware module unloaded.");
4041 }
4042
4043 return;
4044
4045} /* emlxs_fw_unload() */
4046#endif /* MODFW_SUPPORT */