xref: /illumos-gate/usr/src/uts/common/io/qede/qede_main.c (revision 14b24e2b79293068c8e016a69ef1d872fb5e2fd5)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 
37 #include "qede.h"
38 
39 ddi_device_acc_attr_t qede_regs_acc_attr = {
40 	DDI_DEVICE_ATTR_V1,     // devacc_attr_version;
41 	DDI_STRUCTURE_LE_ACC,   // devacc_attr_endian_flags;
42 	DDI_STRICTORDER_ACC,    // devacc_attr_dataorder;
43 	DDI_FLAGERR_ACC         // devacc_attr_access;
44 };
45 
46 ddi_device_acc_attr_t qede_desc_acc_attr = {
47 	DDI_DEVICE_ATTR_V0,    // devacc_attr_version;
48 	DDI_STRUCTURE_LE_ACC,  // devacc_attr_endian_flags;
49 	DDI_STRICTORDER_ACC    // devacc_attr_dataorder;
50 };
51 
52 /*
53  * DMA access attributes for BUFFERS.
54  */
55 ddi_device_acc_attr_t qede_buf_acc_attr =
56 {
57 	DDI_DEVICE_ATTR_V0,   // devacc_attr_version;
58 	DDI_NEVERSWAP_ACC,    // devacc_attr_endian_flags;
59 	DDI_STRICTORDER_ACC   // devacc_attr_dataorder;
60 };
61 
62 
63 ddi_dma_attr_t qede_desc_dma_attr =
64 {
65 	DMA_ATTR_V0,
66 	0x0000000000000000ull,
67 	0xFFFFFFFFFFFFFFFFull,
68 	0x00000000FFFFFFFFull,
69 	QEDE_PAGE_ALIGNMENT,
70 	0x00000FFF,
71 	0x00000001,
72 	0x00000000FFFFFFFFull,
73 	0xFFFFFFFFFFFFFFFFull,
74 	1,
75 	0x00000001,
76 	DDI_DMA_FLAGERR
77 };
78 
79 ddi_dma_attr_t qede_gen_buf_dma_attr =
80 {
81 	DMA_ATTR_V0,
82 	0x0000000000000000ull,
83 	0xFFFFFFFFFFFFFFFFull,
84 	0x00000000FFFFFFFFull,
85 	QEDE_PAGE_ALIGNMENT,
86 	0x00000FFF,
87 	0x00000001,
88 	0x00000000FFFFFFFFull,
89 	0xFFFFFFFFFFFFFFFFull,
90 	1,
91 	0x00000001,
92 	DDI_DMA_FLAGERR
93 };
94 
95 /*
96  * DMA attributes for transmit.
97  */
98 ddi_dma_attr_t qede_tx_buf_dma_attr =
99 {
100 	DMA_ATTR_V0,
101 	0x0000000000000000ull,
102 	0xFFFFFFFFFFFFFFFFull,
103 	0x00000000FFFFFFFFull,
104 	1,
105 	0x00000FFF,
106 	0x00000001,
107 	0x00000000FFFFFFFFull,
108 	0xFFFFFFFFFFFFFFFFull,
109 	ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1,
110 	0x00000001,
111 	DDI_DMA_FLAGERR
112 };
113 
114 
115 ddi_dma_attr_t qede_dma_attr_desc =
116 {
117 	DMA_ATTR_V0,		/* dma_attr_version */
118 	0,			/* dma_attr_addr_lo */
119 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
120 	0x000fffffull,		/* dma_attr_count_max */
121 	4096,			/* dma_attr_align */
122 	0x000fffffull,		/* dma_attr_burstsizes */
123 	4,			/* dma_attr_minxfer */
124 	0xffffffffull,		/* dma_attr_maxxfer */
125 	0xffffffffull,		/* dma_attr_seg */
126 	1,			/* dma_attr_sgllen */
127 	1,			/* dma_attr_granular */
128 	DDI_DMA_FLAGERR		/* dma_attr_flags */
129 };
130 
131 static ddi_dma_attr_t qede_dma_attr_txbuf =
132 {
133 	DMA_ATTR_V0,		/* dma_attr_version */
134 	0,			/* dma_attr_addr_lo */
135 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
136 	0x00000000FFFFFFFFull,	/* dma_attr_count_max */
137 	QEDE_PAGE_ALIGNMENT, /* dma_attr_align */
138 	0xfff8ull,		/* dma_attr_burstsizes */
139 	1,			/* dma_attr_minxfer */
140 	0xffffffffull,		/* dma_attr_maxxfer */
141 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
142 	1,			/* dma_attr_sgllen */
143 	1,			/* dma_attr_granular */
144 	0			/* dma_attr_flags */
145 };
146 
147 ddi_dma_attr_t qede_dma_attr_rxbuf =
148 {
149 	DMA_ATTR_V0,		/* dma_attr_version */
150 	0,			/* dma_attr_addr_lo */
151 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
152 	0x00000000FFFFFFFFull,	/* dma counter max */
153 	QEDE_PAGE_ALIGNMENT,	/* dma_attr_align */
154 	0xfff8ull,		/* dma_attr_burstsizes */
155 	1,			/* dma_attr_minxfer */
156 	0xffffffffull,		/* dma_attr_maxxfer */
157 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
158 	1,			/* dma_attr_sgllen */
159 	1,			/* dma_attr_granular */
160 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
161 };
162 
163 /* LINTED E_STATIC_UNUSED */
164 static ddi_dma_attr_t qede_dma_attr_cmddesc =
165 {
166 	DMA_ATTR_V0,		/* dma_attr_version */
167 	0,			/* dma_attr_addr_lo */
168 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
169 	0xffffffffull,		/* dma_attr_count_max */
170 	1,			/* dma_attr_align */
171 	0xfff8ull,		/* dma_attr_burstsizes */
172 	1,			/* dma_attr_minxfer */
173 	0xffffffff,		/* dma_attr_maxxfer */
174 	0xffffffff,		/* dma_attr_seg */
175 	ETH_TX_MAX_BDS_PER_NON_LSO_PACKET,	/* dma_attr_sgllen */
176 	1,			/* dma_attr_granular */
177 	0			/* dma_attr_flags */
178 };
179 
180 
181 
182 /*
183  * Generic dma attribute for single sg
184  */
185 /* LINTED E_STATIC_UNUSED */
186 static ddi_dma_attr_t qede_gen_dma_attr_desc =
187 {
188 	DMA_ATTR_V0,            /* dma_attr_version */
189 	0,                      /* dma_attr_addr_lo */
190 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
191 	0x000fffffull,          /* dma_attr_count_max */
192 	4096,                   /* dma_attr_align */
193 	0x000fffffull,          /* dma_attr_burstsizes */
194 	4,                      /* dma_attr_minxfer */
195 	0xffffffffull,          /* dma_attr_maxxfer */
196 	0xffffffffull,          /* dma_attr_seg */
197 	1,                      /* dma_attr_sgllen */
198 	1,                      /* dma_attr_granular */
199 	DDI_DMA_FLAGERR         /* dma_attr_flags */
200 };
201 
202 ddi_dma_attr_t qede_buf2k_dma_attr_txbuf =
203 {
204 	DMA_ATTR_V0,		/* dma_attr_version */
205 	0,			/* dma_attr_addr_lo */
206 	0xffffffffffffffffull,	/* dma_attr_addr_hi */
207 	0x00000000FFFFFFFFull,	/* dma_attr_count_max */
208 	BUF_2K_ALIGNMENT,	/* dma_attr_align */
209 	0xfff8ull,		/* dma_attr_burstsizes */
210 	1,			/* dma_attr_minxfer */
211 	0xffffffffull,		/* dma_attr_maxxfer */
212 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
213 	1,			/* dma_attr_sgllen */
214 	0x00000001,		/* dma_attr_granular */
215 	0			/* dma_attr_flags */
216 };
217 
218 char *
219 qede_get_ddi_fail(int status)
220 {
221 	switch (status) {
222 	case DDI_FAILURE:
223 		return ("DDI_FAILURE");
224 	case DDI_NOT_WELL_FORMED:
225 		return ("DDI_NOT_WELL_FORMED");
226 	case DDI_EAGAIN:
227 		return ("DDI_EAGAIN");
228 	case DDI_EINVAL:
229 		return ("DDI_EINVAL");
230 	case DDI_ENOTSUP:
231 		return ("DDI_ENOTSUP");
232 	case DDI_EPENDING:
233 		return ("DDI_EPENDING");
234 	case DDI_EALREADY:
235 		return ("DDI_EALREADY");
236 	case DDI_ENOMEM:
237 		return ("DDI_ENOMEM");
238 	case DDI_EBUSY:
239 		return ("DDI_EBUSY");
240 	case DDI_ETRANSPORT:
241 		return ("DDI_ETRANSPORT");
242 	case DDI_ECONTEXT:
243 		return ("DDI_ECONTEXT");
244 	default:
245 		return ("ERROR CODE NOT FOUND!");
246 	}
247 }
248 
249 char *
250 qede_get_ecore_fail(int status)
251 {
252 	switch (status) {
253 	case ECORE_UNKNOWN_ERROR:
254 		return ("ECORE_UNKNOWN_ERROR");
255 	case ECORE_NORESOURCES:
256 		return ("ECORE_NORESOURCES");
257 	case ECORE_NODEV:
258 		return ("ECORE_NODEV");
259 	case ECORE_ABORTED:
260 		return ("ECORE_ABORTED");
261 	case ECORE_AGAIN:
262 		return ("ECORE_AGAIN");
263 	case ECORE_NOTIMPL:
264 		return ("ECORE_NOTIMPL");
265 	case ECORE_EXISTS:
266 		return ("ECORE_EXISTS");
267 	case ECORE_IO:
268 		return ("ECORE_IO");
269 	case ECORE_TIMEOUT:
270 		return ("ECORE_TIMEOUT");
271 	case ECORE_INVAL:
272 		return ("ECORE_INVAL");
273 	case ECORE_BUSY:
274 		return ("ECORE_BUSY");
275 	case ECORE_NOMEM:
276 		return ("ECORE_NOMEM");
277 	case ECORE_SUCCESS:
278 		return ("ECORE_SUCCESS");
279 	case ECORE_PENDING:
280 		return ("ECORE_PENDING");
281 	default:
282 		return ("ECORE ERROR CODE NOT FOUND!");
283 	}
284 }
285 
286 #define QEDE_CHIP_NUM(_p)\
287  (((_p)->edev.chip_num) & 0xffff)
288 
289 char *
290 qede_chip_name(qede_t *qede)
291 {
292     switch (QEDE_CHIP_NUM(qede)) {
293         case 0x1634:
294 		return ("BCM57980E");
295 
296         case 0x1629:
297 		return ("BCM57980S");
298 
299         case 0x1630:
300 		return ("BCM57940_KR2");
301 
302 	case 0x8070:
303 		return ("ARROWHEAD");
304 
305 	case 0x8071:
306 		return ("ARROWHEAD");
307 
308 	case 0x8072:
309 		return ("ARROWHEAD");
310 
311 	case 0x8073:
312 		return ("ARROWHEAD");
313 
314         default:
315 		return ("UNKNOWN");
316     }
317 }
318 
319 
320 
321 
322 static void
323 qede_destroy_locks(qede_t *qede)
324 {
325 	qede_fastpath_t *fp = &qede->fp_array[0];
326 	qede_rx_ring_t *rx_ring;
327 	qede_tx_ring_t *tx_ring;
328 	int i, j;
329 
330 	mutex_destroy(&qede->drv_lock);
331 	mutex_destroy(&qede->watch_lock);
332 
333 	for (i = 0; i < qede->num_fp; i++, fp++) {
334 		mutex_destroy(&fp->fp_lock);
335 
336 		rx_ring = fp->rx_ring;
337 		mutex_destroy(&rx_ring->rx_lock);
338 		mutex_destroy(&rx_ring->rx_replen_lock);
339 
340 		for (j = 0; j < qede->num_tc; j++) {
341 			tx_ring = fp->tx_ring[j];
342 			mutex_destroy(&tx_ring->tx_lock);
343 		}
344 	}
345 	mutex_destroy(&qede->gld_lock);
346 	mutex_destroy(&qede->kstat_lock);
347 }
348 
349 static void
350 qede_init_locks(qede_t *qede)
351 {
352 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
353 	qede_fastpath_t *fp = &qede->fp_array[0];
354 	qede_rx_ring_t *rx_ring;
355 	qede_tx_ring_t *tx_ring;
356 	int i, tc;
357 
358 	mutex_init(&qede->drv_lock, NULL,
359 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
360 	mutex_init(&qede->watch_lock, NULL,
361 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
362 
363 	for (i = 0; i < qede->num_fp; i++, fp++) {
364 		mutex_init(&fp->fp_lock, NULL,
365 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
366 
367 		rx_ring = fp->rx_ring;
368 		mutex_init(&rx_ring->rx_lock, NULL,
369 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
370 		mutex_init(&rx_ring->rx_replen_lock, NULL,
371 		    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
372 
373 		for (tc = 0; tc < qede->num_tc; tc++) {
374 			tx_ring = fp->tx_ring[tc];
375 			mutex_init(&tx_ring->tx_lock, NULL,
376 		    	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
377 		}
378 	}
379 
380 	mutex_init(&qede->gld_lock, NULL,
381 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
382 	mutex_init(&qede->kstat_lock, NULL,
383 	    MUTEX_DRIVER, DDI_INTR_PRI(intr_ctx->intr_pri));
384 }
385 
386 /* LINTED E_FUNC_ARG_UNUSED */
387 static void qede_free_io_structs(qede_t *qede)
388 {
389 }
390 
391 static int
392 qede_alloc_io_structs(qede_t *qede)
393 {
394 	qede_fastpath_t *fp;
395 	qede_rx_ring_t *rx_ring;
396 	qede_tx_ring_t *tx_array, *tx_ring;
397 	int i, tc;
398 
399 	/*
400 	 * Put rx ring + tx_ring pointers paired
401 	 * into the fp data structure array
402 	 */
403 	for (i = 0; i < qede->num_fp; i++) {
404 		fp = &qede->fp_array[i];
405 		rx_ring = &qede->rx_array[i];
406 
407 		for (tc = 0; tc < qede->num_tc; tc++) {
408 			tx_array = qede->tx_array[tc];
409 			tx_ring = &tx_array[i];
410 			fp->tx_ring[tc] = tx_ring;
411 		}
412 
413 		fp->rx_ring = rx_ring;
414 		rx_ring->group_index = 0;
415 	}
416 
417 	return (DDI_SUCCESS);
418 }
419 
420 static int
421 qede_get_config_params(qede_t *qede)
422 {
423 	struct ecore_dev *edev = &qede->edev;
424 
425 	qede_cfg_init(qede);
426 
427 	qede->num_tc = DEFAULT_TRFK_CLASS_COUNT;
428 	qede->num_hwfns = edev->num_hwfns;
429 	qede->rx_buf_count = qede->rx_ring_size;
430 	qede->rx_buf_size = DEFAULT_RX_BUF_SIZE;
431 	qede_print("!%s:%d: qede->num_fp = %d\n", __func__, qede->instance,
432 		qede->num_fp);
433 	qede_print("!%s:%d: qede->rx_ring_size = %d\n", __func__,
434 		qede->instance, qede->rx_ring_size);
435 	qede_print("!%s:%d: qede->rx_buf_count = %d\n", __func__,
436 		qede->instance, qede->rx_buf_count);
437 	qede_print("!%s:%d: qede->rx_buf_size = %d\n", __func__,
438 		qede->instance, qede->rx_buf_size);
439 	qede_print("!%s:%d: qede->rx_copy_threshold = %d\n", __func__,
440 		qede->instance, qede->rx_copy_threshold);
441 	qede_print("!%s:%d: qede->tx_ring_size = %d\n", __func__,
442 		qede->instance, qede->tx_ring_size);
443 	qede_print("!%s:%d: qede->tx_copy_threshold = %d\n", __func__,
444 		qede->instance, qede->tx_bcopy_threshold);
445 	qede_print("!%s:%d: qede->lso_enable = %d\n", __func__,
446 		qede->instance, qede->lso_enable);
447 	qede_print("!%s:%d: qede->lro_enable = %d\n", __func__,
448 		qede->instance, qede->lro_enable);
449 	qede_print("!%s:%d: qede->jumbo_enable = %d\n", __func__,
450 		qede->instance, qede->jumbo_enable);
451 	qede_print("!%s:%d: qede->log_enable = %d\n", __func__,
452 		qede->instance, qede->log_enable);
453 	qede_print("!%s:%d: qede->checksum = %d\n", __func__,
454 		qede->instance, qede->checksum);
455 	qede_print("!%s:%d: qede->debug_level = 0x%x\n", __func__,
456 		qede->instance, qede->ecore_debug_level);
457 	qede_print("!%s:%d: qede->num_hwfns = %d\n", __func__,
458 		qede->instance,qede->num_hwfns);
459 
460 	//qede->tx_buf_size = qede->mtu + QEDE_MAX_ETHER_HDR;
461 	qede->tx_buf_size = BUF_2K_SIZE;
462 	return (DDI_SUCCESS);
463 }
464 
465 void
466 qede_config_debug(qede_t *qede)
467 {
468 
469 	struct ecore_dev *edev = &qede->edev;
470 	u32 dp_level = 0;
471 	u8 dp_module = 0;
472 
473 	dp_level = qede->ecore_debug_level;
474 	dp_module = qede->ecore_debug_module;
475 	ecore_init_dp(edev, dp_module, dp_level, NULL);
476 }
477 
478 
479 
480 static int
481 qede_set_operating_params(qede_t *qede)
482 {
483 	int status = 0;
484 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
485 
486 	/* Get qede.conf paramters from user */
487 	status = qede_get_config_params(qede);
488 	if (status != DDI_SUCCESS) {
489 		return (DDI_FAILURE);
490 	}
491 	/* config debug level */
492 	qede_config_debug(qede);
493 
494 
495 	intr_ctx->intr_vect_to_request =
496 		qede->num_fp + qede->num_hwfns;
497 	intr_ctx->intr_fp_vector_count = qede->num_fp - qede->num_hwfns;
498 
499 	/* set max number of Unicast list */
500 	qede->ucst_total = QEDE_MAX_UCST_CNT;
501 	qede->ucst_avail = QEDE_MAX_UCST_CNT;
502 	bzero(&qede->ucst_mac[0], sizeof (qede_mac_addr_t) * qede->ucst_total);
503 	qede->params.multi_promisc_fl = B_FALSE;
504 	qede->params.promisc_fl = B_FALSE;
505 	qede->mc_cnt = 0;
506 	qede->rx_low_buffer_threshold = RX_LOW_BUFFER_THRESHOLD;
507 
508 	return (status);
509 }
510 
511 /* Resume the interface */
512 static int
513 qede_resume(qede_t *qede)
514 {
515 	mutex_enter(&qede->drv_lock);
516 	cmn_err(CE_NOTE, "%s:%d Enter\n", __func__, qede->instance);
517 	qede->qede_state = QEDE_STATE_ATTACHED;
518 	mutex_exit(&qede->drv_lock);
519 	return (DDI_FAILURE);
520 }
521 
522 /*
523  * Write dword to doorbell from tx_path
524  * Avoid use of qede_t * pointer
525  */
526 #pragma inline(qede_bar2_write32_tx_doorbell)
527 void
528 qede_bar2_write32_tx_doorbell(qede_tx_ring_t *tx_ring, u32 val)
529 {
530 	u64 addr = (u64)tx_ring->doorbell_addr;
531 	ddi_put32(tx_ring->doorbell_handle, (u32 *)addr, val);
532 }
533 
534 static void
535 qede_unconfig_pci(qede_t *qede)
536 {
537 	if (qede->doorbell_handle != NULL) {
538 		ddi_regs_map_free(&(qede->doorbell_handle));
539 		qede->doorbell_handle = NULL;
540 	}
541 
542 	if (qede->regs_handle != NULL) {
543 		ddi_regs_map_free(&qede->regs_handle);
544 		qede->regs_handle = NULL;
545 	}
546 	if (qede->pci_cfg_handle != NULL) {
547 		pci_config_teardown(&qede->pci_cfg_handle);
548 		qede->pci_cfg_handle = NULL;
549 	}
550 }
551 
552 static int
553 qede_config_pci(qede_t *qede)
554 {
555 	int ret;
556 
557 	ret = pci_config_setup(qede->dip, &qede->pci_cfg_handle);
558 	if (ret != DDI_SUCCESS) {
559 		cmn_err(CE_NOTE, "%s:%d Failed to get PCI config handle\n",
560 			__func__, qede->instance);
561 		return (DDI_FAILURE);
562 	}
563 
564 	/* get register size */
565 	ret = ddi_dev_regsize(qede->dip, 1, &qede->regview_size);
566 	if (ret != DDI_SUCCESS) {
567 		cmn_err(CE_WARN, "%s%d: failed to read reg size for bar0",
568 			__func__, qede->instance);
569 		goto err_exit;
570 	}
571 
572 	/* get doorbell size */
573 	ret = ddi_dev_regsize(qede->dip, 3, &qede->doorbell_size);
574 	if (ret != DDI_SUCCESS) {
575 		cmn_err(CE_WARN, "%s%d: failed to read doorbell size for bar2",
576 			__func__, qede->instance);
577 		goto err_exit;
578 	}
579 
580 	/* map register space */
581 	ret = ddi_regs_map_setup(
582 	/* Pointer to the device's dev_info structure. */
583 	    qede->dip,
584 	/*
585 	 * Index number to the register address space  set.
586 	 * A  value of 0 indicates PCI configuration space,
587 	 * while a value of 1 indicates the real  start  of
588 	 * device register sets.
589 	 */
590 	    1,
591 	/*
592 	 * A platform-dependent value that, when  added  to
593 	 * an  offset that is less than or equal to the len
594 	 * parameter (see below), is used for the  dev_addr
595 	 * argument   to   the  ddi_get,  ddi_mem_get,  and
596 	 * ddi_io_get/put routines.
597 	 */
598 	    &qede->regview,
599 	/*
600 	 * Offset into the register address space.
601 	 */
602 	    0,
603 	/* Length to be mapped. */
604 	    qede->regview_size,
605 	/*
606 	 * Pointer to a device access  attribute  structure
607 	 * of this mapping.
608 	 */
609 	    &qede_regs_acc_attr,
610 	/* Pointer to a data access handle. */
611 	    &qede->regs_handle);
612 
613 	if (ret != DDI_SUCCESS) {
614 		cmn_err(CE_WARN, "!qede(%d): failed to map registers, err %d",
615 		    qede->instance, ret);
616 		goto err_exit;
617 	}
618 
619 	qede->pci_bar0_base = (unsigned long)qede->regview;
620 
621 	/* map doorbell space */
622 	ret = ddi_regs_map_setup(qede->dip,
623 	    2,
624 	    &qede->doorbell,
625 	    0,
626 	    qede->doorbell_size,
627 	    &qede_regs_acc_attr,
628 	    &qede->doorbell_handle);
629 
630 	if (ret != DDI_SUCCESS) {
631 		cmn_err(CE_WARN, "qede%d: failed to map doorbell, err %d",
632 		    qede->instance, ret);
633 		goto err_exit;
634 	}
635 
636 	qede->pci_bar2_base = (unsigned long)qede->doorbell;
637 
638 	return (ret);
639 err_exit:
640 	qede_unconfig_pci(qede);
641 	return (DDI_FAILURE);
642 }
643 
644 static uint_t
645 qede_sp_handler(caddr_t arg1, caddr_t arg2)
646 {
647 	/*LINTED E_BAD_PTR_CAST_ALIGN*/
648 	struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)arg1;
649 	/* LINTED E_BAD_PTR_CAST_ALIGN */
650 	qede_vector_info_t *vect_info = (qede_vector_info_t *)arg2;
651 	struct ecore_dev *edev = p_hwfn->p_dev;
652 	qede_t *qede = (qede_t *)edev;
653 
654 	if ((arg1 == NULL) || (arg2 == NULL)) {
655 		cmn_err(CE_WARN, "qede_sp_handler: invalid parameters");
656 		/*
657 		 * MSIX intr should always
658 		 * return DDI_INTR_CLAIMED
659 		 */
660         	return (DDI_INTR_CLAIMED);
661 	}
662 
663 
664 	vect_info->in_isr = B_TRUE;
665 
666 	atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
667 	qede->intrSbCnt[vect_info->vect_index]++;
668 
669 
670 	ecore_int_sp_dpc((osal_int_ptr_t)p_hwfn);
671 
672 	vect_info->in_isr = B_FALSE;
673 
674     	return (DDI_INTR_CLAIMED);
675 }
676 
677 void
678 qede_enable_hw_intr(qede_fastpath_t *fp)
679 {
680 	ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
681 	ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
682 }
683 
684 void
685 qede_disable_hw_intr(qede_fastpath_t *fp)
686 {
687 	ddi_dma_sync(fp->sb_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL);
688 	ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
689 }
690 
691 
692 static uint_t
693 qede_fp_handler(caddr_t arg1, caddr_t arg2)
694 {
695 	/* LINTED E_BAD_PTR_CAST_ALIGN */
696 	qede_vector_info_t *vect_info = (qede_vector_info_t *)arg1;
697 	/* LINTED E_BAD_PTR_CAST_ALIGN */
698 	qede_t *qede = (qede_t *)arg2;
699 	qede_fastpath_t *fp;
700 	qede_rx_ring_t *rx_ring;
701 	mblk_t *mp;
702 	int work_done = 0;
703 
704 	if ((vect_info == NULL) || (vect_info->fp == NULL)) {
705 		cmn_err(CE_WARN, "qede_fp_handler: invalid parameters");
706         	return (DDI_INTR_UNCLAIMED);
707 	}
708 
709 	fp = (qede_fastpath_t *)vect_info->fp;
710 	rx_ring = fp->rx_ring;
711 
712 	mutex_enter(&fp->fp_lock);
713 
714 	atomic_add_64((volatile uint64_t *)&qede->intrFired, 1);
715 	qede->intrSbCnt[vect_info->vect_index]++;
716 
717 	mutex_enter(&fp->qede->drv_lock);
718 	qede_disable_hw_intr(fp);
719 	mutex_exit(&fp->qede->drv_lock);
720 
721 	mp = qede_process_fastpath(fp, QEDE_POLL_ALL,
722 	    QEDE_MAX_RX_PKTS_PER_INTR, &work_done);
723 
724 	if (mp)
725 #ifndef NO_CROSSBOW
726 	{
727 		mac_rx_ring(rx_ring->qede->mac_handle,
728 		    rx_ring->mac_ring_handle,
729 		    mp,
730 		    rx_ring->mr_gen_num);
731 	}
732 #else
733 	{
734 		mac_rx(qede->mac_handle, NULL, mp);
735 	}
736 #endif
737        else if (!mp && (work_done == 0)) {
738 		qede->intrSbNoChangeCnt[vect_info->vect_index]++;
739 	}
740 
741 
742 	mutex_enter(&fp->qede->drv_lock);
743 	/*
744 	 * The mac layer may disabled interrupts
745 	 * in the context of the mac_rx_ring call
746 	 * above while readying for poll process.
747 	 * In this case we do not want to
748 	 * enable them here.
749 	 */
750 	if (fp->disabled_by_poll == 0) {
751 		qede_enable_hw_intr(fp);
752 	}
753 	mutex_exit(&fp->qede->drv_lock);
754 
755 	mutex_exit(&fp->fp_lock);
756 
757 	return (work_done ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
758 }
759 
760 static int
761 qede_disable_intr(qede_t *qede, uint32_t index)
762 {
763 	int status;
764 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
765 
766 	status = ddi_intr_disable(intr_ctx->intr_hdl_array[index]);
767 	if (status != DDI_SUCCESS) {
768 		cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
769 		    " for index %d\n",
770 		    __func__, qede_get_ddi_fail(status), index);
771 		return (status);
772 	}
773 	atomic_and_32(&intr_ctx->intr_state, ~(1 << index));
774 
775 	return (status);
776 }
777 
778 static int
779 qede_enable_intr(qede_t *qede, int index)
780 {
781 	int status = 0;
782 
783 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
784 
785 	status = ddi_intr_enable(intr_ctx->intr_hdl_array[index]);
786 
787 	if (status != DDI_SUCCESS) {
788 		cmn_err(CE_WARN, "qede:%s: Failed ddi_intr_enable with %s"
789 		    " for index %d\n",
790 		    __func__, qede_get_ddi_fail(status), index);
791 		return (status);
792 	}
793 
794 	atomic_or_32(&intr_ctx->intr_state, (1 << index));
795 
796 	return (status);
797 }
798 
799 static int
800 qede_disable_all_fastpath_intrs(qede_t *qede)
801 {
802 	int i, status;
803 
804 	for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
805 		status = qede_disable_intr(qede, i);
806 		if (status != DDI_SUCCESS) {
807 			return (status);
808 		}
809 	}
810 	return (DDI_SUCCESS);
811 }
812 
813 static int
814 qede_enable_all_fastpath_intrs(qede_t *qede)
815 {
816 	int status = 0, i;
817 
818 	for (i = qede->num_hwfns; i <= qede->num_fp; i++) {
819 		status = qede_enable_intr(qede, i);
820 		if (status != DDI_SUCCESS) {
821 			return (status);
822 		}
823 	}
824 	return (DDI_SUCCESS);
825 }
826 
827 static int
828 qede_disable_slowpath_intrs(qede_t *qede)
829 {
830 	int i, status;
831 
832 	for (i = 0; i < qede->num_hwfns; i++) {
833 		status = qede_disable_intr(qede, i);
834 		if (status != DDI_SUCCESS) {
835 			return (status);
836 		}
837 	}
838 	return (DDI_SUCCESS);
839 }
840 
841 static int
842 qede_enable_slowpath_intrs(qede_t *qede)
843 {
844 	int i, status;
845 
846 	for (i = 0; i < qede->num_hwfns; i++) {
847 		status = qede_enable_intr(qede, i);
848 		if (status != DDI_SUCCESS) {
849 			return (status);
850 		}
851 	}
852 	return (DDI_SUCCESS);
853 }
854 
855 static int
856 qede_prepare_edev(qede_t *qede)
857 {
858 	struct ecore_dev *edev = &qede->edev;
859 	struct ecore_hw_prepare_params p_params;
860 
861 	/*
862 	 * Setup the bar0 and bar2 base address
863 	 * in ecore_device
864 	 */
865 	edev->regview = (void *)qede->regview;
866 	edev->doorbells = (void *)qede->doorbell;
867 
868 	/* LINTED E_FUNC_RET_MAYBE_IGNORED2 */
869 	strcpy(edev->name, qede->name);
870 	ecore_init_struct(edev);
871 
872 	p_params.personality = ECORE_PCI_ETH;
873 	p_params.drv_resc_alloc = 0;
874 	p_params.chk_reg_fifo = 1;
875 	p_params.initiate_pf_flr = 1;
876 	//p_params->epoch = time(&epoch);
877 	p_params.allow_mdump = 1;
878 	p_params.b_relaxed_probe = 0;
879 	return (ecore_hw_prepare(edev, &p_params));
880 }
881 
882 static int
883 qede_config_edev(qede_t *qede)
884 {
885 	int status, i;
886 	struct ecore_dev *edev = &qede->edev;
887 	struct ecore_pf_params *params;
888 
889 	for (i = 0; i < qede->num_hwfns; i++) {
890 		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
891 		params = &p_hwfn->pf_params;
892 		memset((void *)params, 0, sizeof (struct ecore_pf_params));
893 		params->eth_pf_params.num_cons = 32;
894 	}
895 	status = ecore_resc_alloc(edev);
896 	if (status != ECORE_SUCCESS) {
897 		cmn_err(CE_NOTE, "%s: Could not allocate ecore resources\n",
898 		 __func__);
899 		return (DDI_ENOMEM);
900 	}
901 	ecore_resc_setup(edev);
902 	return (DDI_SUCCESS);
903 }
904 
905 static void
906 qede_unconfig_intrs(qede_t *qede)
907 {
908 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
909 	qede_vector_info_t *vect_info;
910 	int i, status = 0;
911 
912 	for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
913 		vect_info = &intr_ctx->intr_vect_info[i];
914 		if (intr_ctx->intr_vect_info[i].handler_added == B_TRUE) {
915 			status = ddi_intr_remove_handler(
916 				intr_ctx->intr_hdl_array[i]);
917 			if (status != DDI_SUCCESS) {
918 				cmn_err(CE_WARN, "qede:%s: Failed"
919 					" ddi_intr_remove_handler with %s"
920 					" for index %d\n",
921 				__func__, qede_get_ddi_fail(
922 				status), i);
923 			}
924 
925 			(void) ddi_intr_free(intr_ctx->intr_hdl_array[i]);
926 
927 			vect_info->handler_added = B_FALSE;
928 			intr_ctx->intr_hdl_array[i] = NULL;
929 		}
930 	}
931 }
932 
933 static int
934 qede_config_intrs(qede_t *qede)
935 {
936 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
937 	qede_vector_info_t *vect_info;
938 	struct ecore_dev *edev = &qede->edev;
939 	int i, status = DDI_FAILURE;
940 	ddi_intr_handler_t *handler;
941 	void *arg1, *arg2;
942 
943 	/*
944 	 * Set up the interrupt handler argument
945 	 * for the slowpath
946 	 */
947 	for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
948 		vect_info = &intr_ctx->intr_vect_info[i];
949 		/* Store the table index */
950 		vect_info->vect_index = i;
951 		vect_info->qede = qede;
952 		/*
953 		 * Store the interrupt handler's argument.
954 		 * This will be the a pointer to ecore_dev->hwfns
955 		 * for slowpath, a pointer to the fastpath
956 		 * structure for fastpath.
957 		 */
958 		if (i < qede->num_hwfns) {
959 		   	vect_info->fp = (void *)&edev->hwfns[i];
960 			handler = qede_sp_handler;
961 			arg1 = (caddr_t)&qede->edev.hwfns[i];
962 			arg2 = (caddr_t)vect_info;
963 		} else {
964 			/*
965 			 * loop index includes hwfns
966 			 * so they need to be subtracked
967 			 * for fp_array
968 			 */
969 			vect_info->fp =
970 			    (void *)&qede->fp_array[i - qede->num_hwfns];
971 			handler = qede_fp_handler;
972 			arg1 = (caddr_t)vect_info;
973 			arg2 = (caddr_t)qede;
974 		}
975 
976 		status = ddi_intr_add_handler(
977 		    intr_ctx->intr_hdl_array[i],
978 		    handler,
979 		    arg1,
980 		    arg2);
981 		if (status != DDI_SUCCESS) {
982 			cmn_err(CE_WARN, "qede:%s: Failed "
983 			    " ddi_intr_add_handler with %s"
984 			    " for index %d\n",
985 			    __func__, qede_get_ddi_fail(
986 			    status), i);
987 			qede_unconfig_intrs(qede);
988 			return (DDI_FAILURE);
989 		}
990 		vect_info->handler_added = B_TRUE;
991 	}
992 
993 	return (status);
994 }
995 
996 static void
997 qede_free_intrs(qede_t *qede)
998 {
999 	qede_intr_context_t *intr_ctx;
1000 	int i, status;
1001 
1002 	ASSERT(qede != NULL);
1003 	intr_ctx = &qede->intr_ctx;
1004 	ASSERT(intr_ctx != NULL);
1005 
1006 	if (intr_ctx->intr_hdl_array) {
1007 		for (i = 0; i < intr_ctx->intr_vect_allocated; i++) {
1008 			if (intr_ctx->intr_hdl_array[i]) {
1009 				status =
1010 				    ddi_intr_free(intr_ctx->intr_hdl_array[i]);
1011 				if (status != DDI_SUCCESS) {
1012 					cmn_err(CE_NOTE,
1013 					    "qede:%s: Failed ddi_intr_free"
1014 					    " with %s\n",
1015 					    __func__,
1016 					    qede_get_ddi_fail(status));
1017 				}
1018 			}
1019 		}
1020 		intr_ctx->intr_hdl_array = NULL;
1021 	}
1022 
1023 	if (intr_ctx->intr_hdl_array) {
1024 		kmem_free(intr_ctx->intr_hdl_array,
1025 		    intr_ctx->intr_hdl_array_size);
1026 		intr_ctx->intr_hdl_array = NULL;
1027 	}
1028 
1029 	if (intr_ctx->intr_vect_info) {
1030 		kmem_free(intr_ctx->intr_vect_info,
1031 		    intr_ctx->intr_vect_info_array_size);
1032 		intr_ctx->intr_vect_info = NULL;
1033 	}
1034 }
1035 
1036 static int
1037 qede_alloc_intrs(qede_t *qede)
1038 {
1039 	int status, type_supported, num_supported;
1040 	int actual, num_available, num_to_request;
1041 	dev_info_t *dip;
1042 	qede_intr_context_t *intr_ctx = &qede->intr_ctx;
1043 
1044 	dip = qede->dip;
1045 
1046 	status = ddi_intr_get_supported_types(dip, &type_supported);
1047 	if (status != DDI_SUCCESS) {
1048 		cmn_err(CE_WARN,
1049 		    "qede:%s: Failed ddi_intr_get_supported_types with %s\n",
1050 		    __func__, qede_get_ddi_fail(status));
1051 		return (status);
1052 	}
1053 	intr_ctx->intr_types_available = type_supported;
1054 
1055 	if (type_supported & DDI_INTR_TYPE_MSIX) {
1056 		intr_ctx->intr_type_in_use = DDI_INTR_TYPE_MSIX;
1057 
1058 		/*
1059 		 * get the total number of vectors
1060 		 * supported by the device
1061 		 */
1062 		status = ddi_intr_get_nintrs(qede->dip,
1063 		             DDI_INTR_TYPE_MSIX, &num_supported);
1064 		if (status != DDI_SUCCESS) {
1065 			cmn_err(CE_WARN,
1066 			    "qede:%s: Failed ddi_intr_get_nintrs with %s\n",
1067 			    __func__, qede_get_ddi_fail(status));
1068 			return (status);
1069 		}
1070 		intr_ctx->intr_vect_supported = num_supported;
1071 
1072 		/*
1073 		 * get the total number of vectors
1074 		 * available for this instance
1075 		 */
1076 		status = ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX,
1077 		             &num_available);
1078 		if (status != DDI_SUCCESS) {
1079 			cmn_err(CE_WARN,
1080 			    "qede:%s: Failed ddi_intr_get_navail with %s\n",
1081 			    __func__, qede_get_ddi_fail(status));
1082 			return (status);
1083 		}
1084 
1085                 if ((num_available < intr_ctx->intr_vect_to_request) &&
1086 			(num_available >= 2)) {
1087 			qede->num_fp = num_available - qede->num_hwfns;
1088 			cmn_err(CE_NOTE,
1089 			    "qede:%s: allocated %d interrupts"
1090 			    " requested was %d\n",
1091 			    __func__, num_available,
1092 			    intr_ctx->intr_vect_to_request);
1093 			intr_ctx->intr_vect_to_request = num_available;
1094 		} else if(num_available < 2) {
1095 			cmn_err(CE_WARN,
1096 			    "qede:%s: Failed ddi_intr_get_navail with %s\n",
1097 				__func__, qede_get_ddi_fail(status));
1098 			return (DDI_FAILURE);
1099 		}
1100 
1101 		intr_ctx->intr_vect_available = num_available;
1102 		num_to_request = intr_ctx->intr_vect_to_request;
1103 		intr_ctx->intr_hdl_array_size = num_to_request *
1104 		    sizeof (ddi_intr_handle_t);
1105 		intr_ctx->intr_vect_info_array_size = num_to_request *
1106 		    sizeof (qede_vector_info_t);
1107 
1108 		/* Allocate an array big enough for maximum supported */
1109 		intr_ctx->intr_hdl_array = kmem_zalloc(
1110 		    intr_ctx->intr_hdl_array_size, KM_SLEEP);
1111 		if (intr_ctx->intr_hdl_array == NULL) {
1112 			cmn_err(CE_WARN,
1113 			    "qede:%s: Failed to allocate"
1114 			    " intr_ctx->intr_hdl_array\n",
1115 				__func__);
1116 			return (status);
1117 		}
1118 		intr_ctx->intr_vect_info = kmem_zalloc(
1119 		    intr_ctx->intr_vect_info_array_size, KM_SLEEP);
1120 		if (intr_ctx->intr_vect_info_array_size == NULL) {
1121 			cmn_err(CE_WARN,
1122 			    "qede:%s: Failed to allocate"
1123 			    " intr_ctx->vect_info_array_size\n",
1124 				__func__);
1125 			goto err_exit;
1126 		}
1127 
1128 		/*
1129 		 * Use strict allocation. It will fail if we do not get
1130 		 * exactly what we want.  Later we can shift through with
1131 		 * power of two like this:
1132 		 *   for (i = intr_ctx->intr_requested; i > 0; i >>= 1)
1133 		 * (Though we would need to account for the slowpath vector)
1134 		 */
1135 		status = ddi_intr_alloc(qede->dip,
1136 			intr_ctx->intr_hdl_array,
1137 			DDI_INTR_TYPE_MSIX,
1138 			0,
1139 			num_to_request,
1140 			&actual,
1141 			DDI_INTR_ALLOC_STRICT);
1142 		if (status != DDI_SUCCESS) {
1143 			cmn_err(CE_WARN,
1144 			    "qede:%s: Failed to allocate"
1145 			    " %d interrupts with %s\n",
1146 			    __func__, num_to_request,
1147 			    qede_get_ddi_fail(status));
1148 			cmn_err(CE_WARN,
1149 			    "qede:%s: Only %d interrupts available.\n",
1150 			    __func__, actual);
1151 			goto err_exit;
1152 		}
1153 		intr_ctx->intr_vect_allocated = num_to_request;
1154 
1155 		status = ddi_intr_get_pri(intr_ctx->intr_hdl_array[0],
1156 			    &intr_ctx->intr_pri);
1157 		if (status != DDI_SUCCESS) {
1158 			cmn_err(CE_WARN,
1159 			    "qede:%s: Failed ddi_intr_get_pri with %s\n",
1160 			    __func__, qede_get_ddi_fail(status));
1161 			goto err_exit;
1162 		}
1163 
1164 		status = ddi_intr_get_cap(intr_ctx->intr_hdl_array[0],
1165 			    &intr_ctx->intr_cap);
1166 		if (status != DDI_SUCCESS) {
1167 			cmn_err(CE_WARN,
1168 			    "qede:%s: Failed ddi_intr_get_cap with %s\n",
1169 				__func__, qede_get_ddi_fail(status));
1170 			goto err_exit;
1171 		}
1172 
1173 	} else {
1174 		/* For now we only support type MSIX */
1175 		cmn_err(CE_WARN,
1176 		    "qede:%s: Failed to allocate intr_ctx->intr_hdl_array\n",
1177 			__func__);
1178 		return (DDI_FAILURE);
1179 	}
1180 
1181 	intr_ctx->intr_mode = ECORE_INT_MODE_MSIX;
1182 	return (status);
1183 err_exit:
1184 	qede_free_intrs(qede);
1185 	return (status);
1186 }
1187 
1188 static void
1189 /* LINTED E_FUNC_ARG_UNUSED */
1190 qede_unconfig_fm(qede_t *qede)
1191 {
1192 }
1193 
1194 /* LINTED E_FUNC_ARG_UNUSED */
1195 static int
1196 qede_fm_err_cb(dev_info_t *dip, ddi_fm_error_t *err,
1197     const void *impl_data)
1198 {
1199         pci_ereport_post(dip, err, NULL);
1200         return (err->fme_status);
1201 }
1202 
1203 
1204 static int
1205 qede_config_fm(qede_t * qede)
1206 {
1207         ddi_iblock_cookie_t iblk;
1208 
1209         cmn_err(CE_NOTE, "Entered qede_config_fm\n");
1210         qede_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1211         qede_desc_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1212         qede_buf_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
1213         qede_desc_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1214         qede_gen_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1215         qede_tx_buf_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
1216         qede_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1217         qede_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1218         qede_dma_attr_rxbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1219         qede_dma_attr_cmddesc.dma_attr_flags = DDI_DMA_FLAGERR;
1220         qede_gen_dma_attr_desc.dma_attr_flags = DDI_DMA_FLAGERR;
1221         qede_buf2k_dma_attr_txbuf.dma_attr_flags = DDI_DMA_FLAGERR;
1222 
1223         ddi_fm_init(qede->dip, &qede->fm_cap, &iblk);
1224 
1225         if (DDI_FM_EREPORT_CAP(qede->fm_cap) ||
1226             DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1227                 pci_ereport_setup(qede->dip);
1228         }
1229 
1230         if (DDI_FM_ERRCB_CAP(qede->fm_cap)) {
1231                 ddi_fm_handler_register(qede->dip,
1232                     qede_fm_err_cb, (void *)qede);
1233         }
1234         return (DDI_SUCCESS);
1235 
1236 }
1237 
1238 int
1239 qede_dma_mem_alloc(qede_t *qede,
1240     int size, uint_t dma_flags, caddr_t *address, ddi_dma_cookie_t *cookie,
1241     ddi_dma_handle_t *dma_handle, ddi_acc_handle_t *handlep,
1242     ddi_dma_attr_t *dma_attr, ddi_device_acc_attr_t *dev_acc_attr)
1243 {
1244 	int err;
1245 	uint32_t ncookies;
1246 	size_t ring_len;
1247 
1248 	*dma_handle = NULL;
1249 
1250 	if (size <= 0) {
1251 		return (DDI_ENOMEM);
1252 	}
1253 
1254 	err = ddi_dma_alloc_handle(qede->dip,
1255 	    dma_attr,
1256 	    DDI_DMA_DONTWAIT, NULL, dma_handle);
1257 	if (err != DDI_SUCCESS) {
1258 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1259 		    "ddi_dma_alloc_handle FAILED: %d", qede->instance, err);
1260 		*dma_handle = NULL;
1261 		return (DDI_ENOMEM);
1262 	}
1263 
1264 	err = ddi_dma_mem_alloc(*dma_handle,
1265 	    size, dev_acc_attr,
1266 	    dma_flags,
1267 	    DDI_DMA_DONTWAIT, NULL, address, &ring_len,
1268 	    handlep);
1269 	if (err != DDI_SUCCESS) {
1270 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1271 		    "ddi_dma_mem_alloc FAILED: %d, request size: %d",
1272 		    qede->instance, err, size);
1273 		ddi_dma_free_handle(dma_handle);
1274 		*dma_handle = NULL;
1275 		*handlep = NULL;
1276 		return (DDI_ENOMEM);
1277 	}
1278 
1279 	if (ring_len < size) {
1280 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1281 		    "could not allocate required: %d, request size: %d",
1282 		    qede->instance, err, size);
1283 		ddi_dma_mem_free(handlep);
1284 		ddi_dma_free_handle(dma_handle);
1285 		*dma_handle = NULL;
1286 		*handlep = NULL;
1287 		return (DDI_FAILURE);
1288 	}
1289 
1290 	(void) memset(*address, 0, size);
1291 
1292 	if (((err = ddi_dma_addr_bind_handle(*dma_handle,
1293 	    NULL, *address, ring_len,
1294 	    dma_flags,
1295 	    DDI_DMA_DONTWAIT, NULL,
1296 	    cookie, &ncookies)) != DDI_DMA_MAPPED) ||
1297 	    (ncookies != 1)) {
1298 		cmn_err(CE_WARN, "!qede(%d): pci_alloc_consistent: "
1299 		    "ddi_dma_addr_bind_handle Failed: %d",
1300 		    qede->instance, err);
1301 		ddi_dma_mem_free(handlep);
1302 		ddi_dma_free_handle(dma_handle);
1303 		*dma_handle = NULL;
1304 		*handlep = NULL;
1305 		return (DDI_FAILURE);
1306 	}
1307 
1308 	return (DDI_SUCCESS);
1309 }
1310 
1311 void
1312 qede_pci_free_consistent(ddi_dma_handle_t *dma_handle,
1313     ddi_acc_handle_t *acc_handle)
1314 {
1315 	int err;
1316 
1317 	if (*dma_handle != NULL) {
1318 		err = ddi_dma_unbind_handle(*dma_handle);
1319 		if (err != DDI_SUCCESS) {
1320 			cmn_err(CE_WARN, "!pci_free_consistent: "
1321 			    "Error unbinding memory, err %d", err);
1322 			return;
1323 		}
1324 	} else {
1325 		goto exit;
1326 	}
1327 	ddi_dma_mem_free(acc_handle);
1328 	ddi_dma_free_handle(dma_handle);
1329 exit:
1330 	*dma_handle = NULL;
1331 	*acc_handle = NULL;
1332 }
1333 
1334 static int
1335 qede_vport_stop(qede_t *qede)
1336 {
1337 	struct ecore_dev *edev = &qede->edev;
1338 	struct ecore_hwfn *p_hwfn;
1339 	int i, status = ECORE_BUSY;
1340 
1341 	for (i = 0; i < edev->num_hwfns; i++) {
1342 		p_hwfn = &edev->hwfns[i];
1343 
1344 		if (qede->vport_state[i] !=
1345 		    QEDE_VPORT_STARTED) {
1346 			qede_info(qede, "vport %d not started", i);
1347 			continue;
1348 		}
1349 
1350 		status = ecore_sp_vport_stop(p_hwfn,
1351 			p_hwfn->hw_info.opaque_fid,
1352 			i); /* vport needs fix */
1353 		if (status != ECORE_SUCCESS) {
1354 			cmn_err(CE_WARN, "!qede_vport_stop: "
1355 			    "FAILED for hwfn%d ", i);
1356 			return (DDI_FAILURE);
1357 		}
1358 		cmn_err(CE_WARN, "!qede_vport_stop: "
1359 		    "SUCCESS for hwfn%d ", i);
1360 
1361 		qede->vport_state[i] =
1362 		    QEDE_VPORT_STOPPED;
1363 	}
1364 
1365 	return (status);
1366 }
1367 
1368 static uint8_t
1369 qede_get_active_rss_params(qede_t *qede, u8 hwfn_id)
1370 {
1371 	struct ecore_rss_params rss_params;
1372 	qede_fastpath_t *fp;
1373 	int i;
1374 	const uint64_t hash_key[] =
1375 	{
1376 		0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
1377 		0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
1378 		0x255b0ec26d5a56daULL
1379 	};
1380 	uint8_t enable_rss = 0;
1381 
1382 	bzero(&rss_params, sizeof (rss_params));
1383 	if (qede->num_fp > 1) {
1384 		qede_info(qede, "Configuring RSS parameters");
1385 		enable_rss = 1;
1386 	} else {
1387 		qede_info(qede, "RSS configuration not needed");
1388 		enable_rss = 0;
1389 		goto exit;
1390 	}
1391 
1392 	rss_params.update_rss_config = 1;
1393 	rss_params.rss_enable = 1;
1394 	rss_params.update_rss_capabilities = 1;
1395 	rss_params.update_rss_ind_table = 1;
1396 	rss_params.update_rss_key = 1;
1397 
1398 	rss_params.rss_caps = ECORE_RSS_IPV4 |
1399 	    ECORE_RSS_IPV6 |
1400 	    ECORE_RSS_IPV4_TCP |
1401 	    ECORE_RSS_IPV6_TCP |
1402 	    ECORE_RSS_IPV4_UDP |
1403 	    ECORE_RSS_IPV6_UDP;
1404 
1405 	rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1406 
1407 	bcopy(&hash_key[0], &rss_params.rss_key[0],
1408 		sizeof (rss_params.rss_key));
1409 
1410 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
1411 		fp = &qede->fp_array[i % qede->num_fp];
1412 		rss_params.rss_ind_table[i] = (void *)(fp->rx_ring->p_cid);
1413 	}
1414 exit:
1415 	bcopy(&rss_params, &qede->rss_params[hwfn_id], sizeof (rss_params));
1416 	return (enable_rss);
1417 }
1418 
1419 static int
1420 qede_vport_update(qede_t *qede,
1421     enum qede_vport_state state)
1422 {
1423 	struct ecore_dev *edev = &qede->edev;
1424 	struct ecore_hwfn *p_hwfn;
1425 	struct ecore_sp_vport_update_params *vport_params;
1426 	struct ecore_sge_tpa_params tpa_params;
1427 	int  status = DDI_SUCCESS;
1428 	bool new_state;
1429 	uint8_t i;
1430 
1431 	cmn_err(CE_NOTE, "qede_vport_update: "
1432 	    "Enter, state = %s%s%s%s%s",
1433 	    state == QEDE_VPORT_STARTED ? "QEDE_VPORT_STARTED" : "",
1434 	    state == QEDE_VPORT_ON ? "QEDE_VPORT_ON" : "",
1435 	    state == QEDE_VPORT_OFF ? "QEDE_VPORT_OFF" : "",
1436 	    state == QEDE_VPORT_STOPPED ? "QEDE_VPORT_STOPPED" : "",
1437 	    state == QEDE_VPORT_UNKNOWN ? "" : "");
1438 
1439 	/*
1440 	 * Update only does on and off.
1441 	 * For now we combine TX and RX
1442 	 * together.  Later we can split them
1443 	 * and set other params as well.
1444 	 */
1445 	if (state == QEDE_VPORT_ON) {
1446 	    new_state = B_TRUE;
1447 	} else if (state == QEDE_VPORT_OFF) {
1448 	    new_state = B_FALSE;
1449 	} else {
1450 		cmn_err(CE_WARN, "qede_vport_update: "
1451 		    "invalid, state = %d", state);
1452 		return (DDI_EINVAL);
1453 	}
1454 
1455 	for (i = 0; i < edev->num_hwfns; i++) {
1456 		p_hwfn = &edev->hwfns[i];
1457 		vport_params = &qede->vport_params[i];
1458 
1459 		vport_params->opaque_fid =
1460 		    p_hwfn->hw_info.opaque_fid;
1461 		vport_params->vport_id =
1462 		    i;
1463 
1464 		vport_params->update_vport_active_rx_flg =
1465 		    1;
1466                 if (new_state == B_TRUE)
1467                         vport_params->vport_active_rx_flg = 1;
1468                 else
1469                         vport_params->vport_active_rx_flg = 0;
1470 
1471 		vport_params->update_vport_active_tx_flg =
1472 		    1;
1473                 if (new_state == B_TRUE)
1474                         vport_params->vport_active_tx_flg = 1;
1475                 else
1476                         vport_params->vport_active_tx_flg = 0;
1477 
1478 		vport_params->update_inner_vlan_removal_flg =
1479 		    0;
1480 		vport_params->inner_vlan_removal_flg =
1481 		    0;
1482 		vport_params->update_default_vlan_enable_flg =
1483 		    0;
1484 		vport_params->default_vlan_enable_flg =
1485 		    0;
1486 		vport_params->update_default_vlan_flg =
1487 		    1;
1488 		vport_params->default_vlan =
1489 		    0;
1490 		vport_params->update_tx_switching_flg =
1491 		    0;
1492 		vport_params->tx_switching_flg =
1493 		    0;
1494 		vport_params->update_approx_mcast_flg =
1495 		    0;
1496 		vport_params->update_anti_spoofing_en_flg =
1497 		    0;
1498 		vport_params->anti_spoofing_en = 0;
1499 		vport_params->update_accept_any_vlan_flg =
1500 		    1;
1501 		vport_params->accept_any_vlan = 1;
1502 
1503 		vport_params->accept_flags.update_rx_mode_config = 1;
1504 		vport_params->accept_flags.update_tx_mode_config = 1;
1505 		vport_params->accept_flags.rx_accept_filter =
1506 		    ECORE_ACCEPT_BCAST |
1507 		    ECORE_ACCEPT_UCAST_UNMATCHED |
1508 		    ECORE_ACCEPT_MCAST_UNMATCHED;
1509 		vport_params->accept_flags.tx_accept_filter =
1510 		    ECORE_ACCEPT_BCAST |
1511 		    ECORE_ACCEPT_UCAST_UNMATCHED |
1512 		    ECORE_ACCEPT_MCAST_UNMATCHED;
1513 
1514 		vport_params->sge_tpa_params = NULL;
1515 
1516 		if (qede->lro_enable &&
1517 		    (new_state == B_TRUE)) {
1518 			qede_print("!%s(%d): enabling LRO ",
1519 				__func__, qede->instance);
1520 
1521 			memset(&tpa_params, 0,
1522 			    sizeof (struct ecore_sge_tpa_params));
1523 			tpa_params.max_buffers_per_cqe = 5;
1524 			tpa_params.update_tpa_en_flg = 1;
1525 			tpa_params.tpa_ipv4_en_flg = 1;
1526 			tpa_params.tpa_ipv6_en_flg = 1;
1527 			tpa_params.tpa_ipv4_tunn_en_flg = 0;
1528 			tpa_params.tpa_ipv6_tunn_en_flg = 0;
1529 			tpa_params.update_tpa_param_flg = 1;
1530 			tpa_params.tpa_pkt_split_flg = 0;
1531 			tpa_params.tpa_hdr_data_split_flg = 0;
1532 			tpa_params.tpa_gro_consistent_flg = 0;
1533 			tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
1534 			tpa_params.tpa_max_size = 65535;
1535 			tpa_params.tpa_min_size_to_start = qede->mtu/2;
1536 			tpa_params.tpa_min_size_to_cont = qede->mtu/2;
1537 			vport_params->sge_tpa_params = &tpa_params;
1538 		}
1539 
1540 		/*
1541 		 * Get the rss_params to be configured
1542 		 */
1543 		if (qede_get_active_rss_params(qede, i /* hwfn id */)) {
1544 			vport_params->rss_params = &qede->rss_params[i];
1545 		} else {
1546 			vport_params->rss_params = NULL;
1547 		}
1548 
1549 		status = ecore_sp_vport_update(p_hwfn,
1550 		    vport_params,
1551 		    ECORE_SPQ_MODE_EBLOCK,
1552 		    NULL);
1553 
1554 		if (status != ECORE_SUCCESS) {
1555 			cmn_err(CE_WARN, "ecore_sp_vport_update: "
1556 			    "FAILED for hwfn%d "
1557 			    " with ", i);
1558 			return (DDI_FAILURE);
1559 		}
1560 		cmn_err(CE_NOTE, "!ecore_sp_vport_update: "
1561 		    "SUCCESS for hwfn%d ", i);
1562 
1563 
1564 	}
1565 	return (DDI_SUCCESS);
1566 }
1567 
1568 
1569 static int
1570 qede_vport_start(qede_t *qede)
1571 {
1572 	struct ecore_dev *edev = &qede->edev;
1573 	struct ecore_hwfn *p_hwfn;
1574 	struct ecore_sp_vport_start_params params;
1575 	uint8_t i;
1576 	int  status = ECORE_BUSY;
1577 
1578 	for (i = 0; i < edev->num_hwfns; i++) {
1579 		p_hwfn = &edev->hwfns[i];
1580 		if ((qede->vport_state[i] !=
1581 		    QEDE_VPORT_UNKNOWN) &&
1582 		    (qede->vport_state[i] !=
1583 		    QEDE_VPORT_STOPPED)) {
1584 		    continue;
1585 		}
1586 
1587 		params.tpa_mode = ECORE_TPA_MODE_NONE;
1588 		params.remove_inner_vlan = 0;
1589 		params.tx_switching = 0;
1590 		params.handle_ptp_pkts = 0;
1591 		params.only_untagged = 0;
1592 		params.drop_ttl0 = 1;
1593 		params.max_buffers_per_cqe = 16;
1594 		params.concrete_fid = p_hwfn->hw_info.concrete_fid;
1595 		params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1596 		params.vport_id = i;
1597 		params.mtu = qede->mtu;
1598 		status = ecore_sp_vport_start(p_hwfn, &params);
1599 		if (status != ECORE_SUCCESS) {
1600 			cmn_err(CE_WARN, "qede_vport_start: "
1601 			    "FAILED for hwfn%d", i);
1602 			return (DDI_FAILURE);
1603 		}
1604 		cmn_err(CE_NOTE, "!ecore_sp_vport_start: "
1605 		    "SUCCESS for hwfn%d ", i);
1606 
1607 		ecore_hw_start_fastpath(p_hwfn);
1608 		qede->vport_state[i] = QEDE_VPORT_STARTED;
1609 	}
1610 	ecore_reset_vport_stats(edev);
1611 	return (status);
1612 }
1613 
1614 void
1615 qede_update_rx_q_producer(qede_rx_ring_t *rx_ring)
1616 {
1617 	u16 bd_prod = ecore_chain_get_prod_idx(&rx_ring->rx_bd_ring);
1618 	u16 cqe_prod = ecore_chain_get_prod_idx(&rx_ring->rx_cqe_ring);
1619 	/* LINTED E_FUNC_SET_NOT_USED */
1620         struct eth_rx_prod_data rx_prod_cmd = { 0 };
1621 
1622 
1623 	rx_prod_cmd.bd_prod = HOST_TO_LE_32(bd_prod);
1624 	rx_prod_cmd.cqe_prod = HOST_TO_LE_32(cqe_prod);
1625 	UPDATE_RX_PROD(rx_ring, rx_prod_cmd);
1626 }
1627 
1628 static int
1629 qede_fastpath_stop_queues(qede_t *qede)
1630 {
1631 	int i, j;
1632 	int status = DDI_FAILURE;
1633 	struct ecore_dev *edev;
1634 	struct ecore_hwfn *p_hwfn;
1635 	struct ecore_queue_cid *p_tx_cid, *p_rx_cid;
1636 
1637 	qede_fastpath_t *fp;
1638 	qede_rx_ring_t *rx_ring;
1639 	qede_tx_ring_t *tx_ring;
1640 
1641 	ASSERT(qede != NULL);
1642 	/* ASSERT(qede->edev != NULL); */
1643 
1644 	edev = &qede->edev;
1645 
1646 	status = qede_vport_update(qede, QEDE_VPORT_OFF);
1647 	if (status != DDI_SUCCESS) {
1648 		cmn_err(CE_WARN, "FAILED to "
1649 		    "update vports");
1650 		return (DDI_FAILURE);
1651 	}
1652 
1653 	for (i = 0; i < qede->num_fp; i++) {
1654 		fp = &qede->fp_array[i];
1655 		rx_ring = fp->rx_ring;
1656 		p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1657 		for (j = 0; j < qede->num_tc; j++) {
1658 			tx_ring = fp->tx_ring[j];
1659 			if (tx_ring->queue_started == B_TRUE) {
1660 				cmn_err(CE_WARN, "Stopping tx queue "
1661 				    "%d:%d. ", i, j);
1662 				p_tx_cid = tx_ring->p_cid;
1663 				status = ecore_eth_tx_queue_stop(p_hwfn,
1664 					(void *)p_tx_cid);
1665 				if (status != ECORE_SUCCESS) {
1666 					cmn_err(CE_WARN, "FAILED to "
1667 			    	    	    "stop tx queue %d:%d", i, j);
1668 					return (DDI_FAILURE);
1669 				}
1670 				tx_ring->queue_started = B_FALSE;
1671 				cmn_err(CE_NOTE, "tx_ring %d:%d stopped\n", i,
1672 				    j);
1673 			}
1674 		}
1675 
1676 		if (rx_ring->queue_started == B_TRUE) {
1677 			cmn_err(CE_WARN, "Stopping rx queue "
1678 			    "%d. ", i);
1679 			p_rx_cid = rx_ring->p_cid;
1680 			status = ecore_eth_rx_queue_stop(p_hwfn,
1681 			    (void *)p_rx_cid, B_TRUE, B_FALSE);
1682 			if (status != ECORE_SUCCESS) {
1683 				cmn_err(CE_WARN, "FAILED to "
1684 			    	    "stop rx queue %d "
1685 			    	    "with ecore status %s",
1686 				    i, qede_get_ecore_fail(status));
1687 				return (DDI_FAILURE);
1688 			}
1689 			rx_ring->queue_started = B_FALSE;
1690 			cmn_err(CE_NOTE, "rx_ring%d stopped\n", i);
1691 		}
1692 	}
1693 
1694 	status = qede_vport_stop(qede);
1695 	if (status != DDI_SUCCESS) {
1696 		cmn_err(CE_WARN, "qede_vport_stop "
1697 		    "FAILED to stop vports");
1698 		return (DDI_FAILURE);
1699 	}
1700 
1701 	ecore_hw_stop_fastpath(edev);
1702 
1703 	return (DDI_SUCCESS);
1704 }
1705 
1706 static int
1707 qede_fastpath_start_queues(qede_t *qede)
1708 {
1709 	int i, j;
1710 	int status = DDI_FAILURE;
1711 	struct ecore_dev *edev;
1712 	struct ecore_hwfn *p_hwfn;
1713 	struct ecore_queue_start_common_params params;
1714 	struct ecore_txq_start_ret_params tx_ret_params;
1715 	struct ecore_rxq_start_ret_params rx_ret_params;
1716 	qede_fastpath_t *fp;
1717 	qede_rx_ring_t *rx_ring;
1718 	qede_tx_ring_t *tx_ring;
1719 	dma_addr_t p_phys_table;
1720         u16 page_cnt;
1721 
1722 	ASSERT(qede != NULL);
1723 	/* ASSERT(qede->edev != NULL); */
1724 	edev = &qede->edev;
1725 
1726 	status = qede_vport_start(qede);
1727 	if (status != DDI_SUCCESS) {
1728 		cmn_err(CE_WARN, "Failed to "
1729 		    "start vports");
1730 		return (DDI_FAILURE);
1731 	}
1732 
1733 	for (i = 0; i < qede->num_fp; i++) {
1734 		fp = &qede->fp_array[i];
1735 		rx_ring = fp->rx_ring;
1736 		p_hwfn = &edev->hwfns[fp->fp_hw_eng_index];
1737 
1738 		params.vport_id = fp->vport_id;
1739 		params.queue_id = fp->rx_queue_index;
1740 		params.stats_id = fp->stats_id;
1741 		params.p_sb = fp->sb_info;
1742 		params.sb_idx = RX_PI;
1743 		p_phys_table = ecore_chain_get_pbl_phys(&rx_ring->rx_cqe_ring);
1744 		page_cnt = ecore_chain_get_page_cnt(&rx_ring->rx_cqe_ring);
1745 
1746 		status = ecore_eth_rx_queue_start(p_hwfn,
1747 		    p_hwfn->hw_info.opaque_fid,
1748 		    &params,
1749 		    qede->rx_buf_size,
1750 		    rx_ring->rx_bd_ring.p_phys_addr,
1751 		    p_phys_table,
1752 		    page_cnt,
1753 		    &rx_ret_params);
1754 
1755 		rx_ring->hw_rxq_prod_addr = rx_ret_params.p_prod;
1756 		rx_ring->p_cid = rx_ret_params.p_handle;
1757 		if (status != DDI_SUCCESS) {
1758 			cmn_err(CE_WARN, "ecore_sp_eth_rx_queue_start "
1759 		            "FAILED for rxq%d", i);
1760 			return (DDI_FAILURE);
1761 		}
1762 		rx_ring->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
1763 
1764 		OSAL_MSLEEP(20);
1765 		*rx_ring->hw_cons_ptr = 0;
1766 
1767 		qede_update_rx_q_producer(rx_ring);
1768 		rx_ring->queue_started = B_TRUE;
1769 		cmn_err(CE_NOTE, "rx_ring%d started\n", i);
1770 
1771 		for (j = 0; j < qede->num_tc; j++) {
1772 			tx_ring = fp->tx_ring[j];
1773 
1774 			params.vport_id = fp->vport_id;
1775 			params.queue_id = tx_ring->tx_queue_index;
1776 			params.stats_id = fp->stats_id;
1777 			params.p_sb = fp->sb_info;
1778 			params.sb_idx = TX_PI(j);
1779 
1780 			p_phys_table = ecore_chain_get_pbl_phys(
1781 			    &tx_ring->tx_bd_ring);
1782 			page_cnt = ecore_chain_get_page_cnt(
1783 			    &tx_ring->tx_bd_ring);
1784 			status = ecore_eth_tx_queue_start(p_hwfn,
1785 			    p_hwfn->hw_info.opaque_fid,
1786 			    &params,
1787 			    0,
1788 			    p_phys_table,
1789 			    page_cnt,
1790 			    &tx_ret_params);
1791 			tx_ring->doorbell_addr = tx_ret_params.p_doorbell;
1792 			tx_ring->p_cid = tx_ret_params.p_handle;
1793 			if (status != DDI_SUCCESS) {
1794 				cmn_err(CE_WARN, "ecore_sp_eth_tx_queue_start "
1795 				    "FAILED for txq%d:%d", i,j);
1796 				return (DDI_FAILURE);
1797 			}
1798 			tx_ring->hw_cons_ptr =
1799 			    &fp->sb_info->sb_virt->pi_array[TX_PI(j)];
1800 			/* LINTED E_CONSTANT_CONDITION */
1801 			SET_FIELD(tx_ring->tx_db.data.params,
1802 			    ETH_DB_DATA_DEST, DB_DEST_XCM);
1803 			/* LINTED E_CONSTANT_CONDITION */
1804 			SET_FIELD(tx_ring->tx_db.data.params,
1805 			    ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1806 			/* LINTED E_CONSTANT_CONDITION */
1807 			SET_FIELD(tx_ring->tx_db.data.params,
1808 			    ETH_DB_DATA_AGG_VAL_SEL, DQ_XCM_ETH_TX_BD_PROD_CMD);
1809 			tx_ring->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1810 			tx_ring->queue_started = B_TRUE;
1811 			cmn_err(CE_NOTE, "tx_ring %d:%d started\n", i, j);
1812 		}
1813 	}
1814 
1815 	status = qede_vport_update(qede, QEDE_VPORT_ON);
1816 	if (status != DDI_SUCCESS) {
1817 		cmn_err(CE_WARN, "Failed to "
1818 		    "update vports");
1819 		return (DDI_FAILURE);
1820 	}
1821 	return (status);
1822 }
1823 
1824 static void
1825 qede_free_mag_elem(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer,
1826     struct eth_rx_bd *bd)
1827 {
1828 	int i;
1829 
1830 	if (bd != NULL) {
1831 		bzero(bd, sizeof (*bd));
1832 	}
1833 
1834 	if (rx_buffer->mp != NULL) {
1835 		freemsg(rx_buffer->mp);
1836 		rx_buffer->mp = NULL;
1837 	}
1838 }
1839 
1840 static void
1841 qede_free_lro_rx_buffers(qede_rx_ring_t *rx_ring)
1842 {
1843 	int i, j;
1844 	qede_lro_info_t *lro_info;
1845 
1846 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1847 		lro_info = &rx_ring->lro_info[i];
1848 		if (lro_info->agg_state == QEDE_AGG_STATE_NONE) {
1849 			continue;
1850 		}
1851 		for (j = 0; j < QEDE_MAX_BD_PER_AGG; j++) {
1852 			if (lro_info->rx_buffer[j] == NULL) {
1853 				break;
1854 			}
1855 			qede_recycle_copied_rx_buffer(
1856 			    lro_info->rx_buffer[j]);
1857 			lro_info->rx_buffer[j] = NULL;
1858 		}
1859 		lro_info->agg_state = QEDE_AGG_STATE_NONE;
1860 	}
1861 }
1862 
1863 static void
1864 qede_free_rx_buffers_legacy(qede_t *qede, qede_rx_buf_area_t *rx_buf_area)
1865 {
1866 	int i, j;
1867 	u32 ref_cnt, bufs_per_page;
1868 	qede_rx_buffer_t *rx_buffer, *first_rx_buf_in_page = 0;
1869 	qede_rx_ring_t *rx_ring = rx_buf_area->rx_ring;
1870 	bool free_rx_buffer;
1871 
1872 	bufs_per_page = rx_buf_area->bufs_per_page;
1873 
1874 	rx_buffer = &rx_buf_area->rx_buf_pool[0];
1875 
1876 	if (rx_buf_area) {
1877 		for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
1878 			free_rx_buffer = B_TRUE;
1879 			for (j = 0; j < bufs_per_page; j++) {
1880 				if (!j) {
1881 					first_rx_buf_in_page = rx_buffer;
1882 				}
1883 				if (rx_buffer->ref_cnt != 0) {
1884 					ref_cnt = atomic_dec_32_nv(
1885 					    &rx_buffer->ref_cnt);
1886 					if (ref_cnt == 0) {
1887 						/*
1888 						 * Buffer is now
1889 						 * completely free
1890 						 */
1891 						if (rx_buffer->mp) {
1892 							freemsg(rx_buffer->mp);
1893 							rx_buffer->mp = NULL;
1894 						}
1895 					} else {
1896 						/*
1897 						 * Since Buffer still
1898 						 * held up in Stack,
1899 						 * we cant free the whole page
1900 						 */
1901 						free_rx_buffer = B_FALSE;
1902 					}
1903 				}
1904 				rx_buffer++;
1905 			}
1906 
1907 			if (free_rx_buffer == B_TRUE) {
1908 				qede_pci_free_consistent(
1909 				    &first_rx_buf_in_page->dma_info.dma_handle,
1910 			    	    &first_rx_buf_in_page->dma_info.acc_handle);
1911 			}
1912 		}
1913 
1914 		/*
1915 		 * If no more buffers are with the stack
1916 		 *  then free the buf pools
1917 		 */
1918 		if (rx_buf_area->buf_upstream == 0) {
1919 			mutex_destroy(&rx_buf_area->active_buf_list.lock);
1920 			mutex_destroy(&rx_buf_area->passive_buf_list.lock);
1921 
1922 			kmem_free(rx_buf_area, sizeof (qede_rx_buf_area_t));
1923 			rx_buf_area = NULL;
1924 			if (atomic_cas_32(&qede->detach_unsafe, 2, 2)) {
1925 				atomic_dec_32(&qede->detach_unsafe);
1926 			}
1927 
1928 		}
1929 	}
1930 }
1931 
1932 
1933 static void
1934 qede_free_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
1935 {
1936 	qede_free_lro_rx_buffers(rx_ring);
1937 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1938 	qede_free_rx_buffers_legacy(qede, rx_buf_area);
1939 }
1940 
1941 static void
1942 qede_free_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
1943 {
1944 	qede_rx_ring_t *rx_ring;
1945 
1946 	ASSERT(qede != NULL);
1947 	ASSERT(fp != NULL);
1948 
1949 
1950 	rx_ring = fp->rx_ring;
1951 	rx_ring->rx_buf_area->inactive = 1;
1952 
1953 	qede_free_rx_buffers(qede, rx_ring);
1954 
1955 
1956 	if (rx_ring->rx_bd_ring.p_virt_addr) {
1957 		ecore_chain_free(&qede->edev, &rx_ring->rx_bd_ring);
1958 		rx_ring->rx_bd_ring.p_virt_addr = NULL;
1959 	}
1960 
1961 	if (rx_ring->rx_cqe_ring.p_virt_addr) {
1962 		ecore_chain_free(&qede->edev, &rx_ring->rx_cqe_ring);
1963 		rx_ring->rx_cqe_ring.p_virt_addr = NULL;
1964 		if (rx_ring->rx_cqe_ring.pbl_sp.p_virt_table) {
1965 			rx_ring->rx_cqe_ring.pbl_sp.p_virt_table = NULL;
1966 		}
1967 	}
1968 	rx_ring->hw_cons_ptr = NULL;
1969 	rx_ring->hw_rxq_prod_addr = NULL;
1970 	rx_ring->sw_rx_cons = 0;
1971 	rx_ring->sw_rx_prod = 0;
1972 
1973 }
1974 
1975 
1976 static int
1977 qede_init_bd(qede_t *qede, qede_rx_ring_t *rx_ring)
1978 {
1979 	struct eth_rx_bd *bd = NULL;
1980 	int ret = DDI_SUCCESS;
1981 	int i;
1982 	qede_rx_buffer_t *rx_buffer;
1983 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
1984 	qede_rx_buf_list_t *active_buf_list = &rx_buf_area->active_buf_list;
1985 
1986 	for (i = 0; i < rx_ring->rx_buf_count; i++) {
1987 		rx_buffer = &rx_buf_area->rx_buf_pool[i];
1988 		active_buf_list->buf_list[i] = rx_buffer;
1989 		active_buf_list->num_entries++;
1990 		bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
1991 		if (bd == NULL) {
1992 			qede_print_err("!%s(%d): invalid NULL bd in "
1993 			    "rx_bd_ring", __func__, qede->instance);
1994 			ret = DDI_FAILURE;
1995 			goto err;
1996 		}
1997 
1998 		bd->addr.lo = HOST_TO_LE_32(U64_LO(
1999 				rx_buffer->dma_info.phys_addr));
2000 		bd->addr.hi = HOST_TO_LE_32(U64_HI(
2001 				rx_buffer->dma_info.phys_addr));
2002 
2003 	}
2004 	active_buf_list->tail = 0;
2005 err:
2006 	return (ret);
2007 }
2008 
2009 
2010 qede_rx_buffer_t *
2011 qede_get_from_active_list(qede_rx_ring_t *rx_ring,
2012     uint32_t *num_entries)
2013 {
2014 	qede_rx_buffer_t *rx_buffer;
2015 	qede_rx_buf_list_t *active_buf_list =
2016 	    &rx_ring->rx_buf_area->active_buf_list;
2017 	u16 head = active_buf_list->head;
2018 
2019 	rx_buffer = active_buf_list->buf_list[head];
2020 	active_buf_list->buf_list[head] = NULL;
2021 	head = (head + 1) & RX_RING_MASK;
2022 
2023 	if (rx_buffer) {
2024 		atomic_dec_32(&active_buf_list->num_entries);
2025 		atomic_inc_32(&rx_ring->rx_buf_area->buf_upstream);
2026 		atomic_inc_32(&rx_buffer->ref_cnt);
2027 		rx_buffer->buf_state = RX_BUF_STATE_WITH_OS;
2028 
2029 		if (rx_buffer->mp == NULL) {
2030 			rx_buffer->mp =
2031 			    desballoc(rx_buffer->dma_info.virt_addr,
2032 			    rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2033 		}
2034 	}
2035 
2036 	*num_entries = active_buf_list->num_entries;
2037 	active_buf_list->head = head;
2038 
2039 	return (rx_buffer);
2040 }
2041 
2042 qede_rx_buffer_t *
2043 qede_get_from_passive_list(qede_rx_ring_t *rx_ring)
2044 {
2045 	qede_rx_buf_list_t *passive_buf_list =
2046 	    &rx_ring->rx_buf_area->passive_buf_list;
2047 	qede_rx_buffer_t *rx_buffer;
2048 	u32 head;
2049 
2050 	mutex_enter(&passive_buf_list->lock);
2051 	head = passive_buf_list->head;
2052 	if (passive_buf_list->buf_list[head] == NULL) {
2053 		mutex_exit(&passive_buf_list->lock);
2054 		return (NULL);
2055 	}
2056 
2057 	rx_buffer = passive_buf_list->buf_list[head];
2058 	passive_buf_list->buf_list[head] = NULL;
2059 
2060 	passive_buf_list->head = (passive_buf_list->head + 1) & RX_RING_MASK;
2061 	mutex_exit(&passive_buf_list->lock);
2062 
2063 	atomic_dec_32(&passive_buf_list->num_entries);
2064 
2065 	return (rx_buffer);
2066 }
2067 
2068 void
2069 qede_put_to_active_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2070 {
2071 	qede_rx_buf_list_t *active_buf_list =
2072 	    &rx_ring->rx_buf_area->active_buf_list;
2073 	u16 tail = active_buf_list->tail;
2074 
2075 	active_buf_list->buf_list[tail] = rx_buffer;
2076 	tail = (tail + 1) & RX_RING_MASK;
2077 
2078 	active_buf_list->tail = tail;
2079 	atomic_inc_32(&active_buf_list->num_entries);
2080 }
2081 
2082 void
2083 qede_replenish_rx_buffers(qede_rx_ring_t *rx_ring)
2084 {
2085 	qede_rx_buffer_t *rx_buffer;
2086 	int count = 0;
2087 	struct eth_rx_bd *bd;
2088 
2089         /*
2090          * Only replenish when we have at least
2091          * 1/4th of the ring to do.  We don't want
2092          * to incur many lock contentions and
2093          * cycles for just a few buffers.
2094          * We don't bother with the passive area lock
2095          * here because we're just getting an
2096          * estimate.  Also, we only pull from
2097          * the passive list in this function.
2098          */
2099 
2100 	/*
2101 	 * Use a replenish lock because we can do the
2102 	 * replenish operation at the end of
2103 	 * processing the rx_ring, but also when
2104 	 * we get buffers back from the upper
2105 	 * layers.
2106 	 */
2107 	if (mutex_tryenter(&rx_ring->rx_replen_lock) == 0) {
2108 		qede_info(rx_ring->qede, "!%s(%d): Failed to take"
2109 			" replenish_lock",
2110 			__func__, rx_ring->qede->instance);
2111 		return;
2112 	}
2113 
2114 	rx_buffer = qede_get_from_passive_list(rx_ring);
2115 
2116 	while (rx_buffer != NULL) {
2117 		bd = ecore_chain_produce(&rx_ring->rx_bd_ring);
2118 		if (bd == NULL) {
2119 			qede_info(rx_ring->qede, "!%s(%d): bd = null",
2120 				__func__, rx_ring->qede->instance);
2121 			qede_put_to_passive_list(rx_ring, rx_buffer);
2122 			break;
2123 		}
2124 
2125 		bd->addr.lo = HOST_TO_LE_32(U64_LO(
2126 				rx_buffer->dma_info.phys_addr));
2127 		bd->addr.hi = HOST_TO_LE_32(
2128 				U64_HI(rx_buffer->dma_info.phys_addr));
2129 
2130 		/*
2131 		 * Put the buffer in active list since it will be
2132 		 * posted to fw now
2133 		 */
2134 		qede_put_to_active_list(rx_ring, rx_buffer);
2135 		rx_buffer->buf_state = RX_BUF_STATE_WITH_FW;
2136 		count++;
2137 		rx_buffer = qede_get_from_passive_list(rx_ring);
2138 	}
2139 	mutex_exit(&rx_ring->rx_replen_lock);
2140 }
2141 
2142 /*
2143  * Put the rx_buffer to the passive_buf_list
2144  */
2145 int
2146 qede_put_to_passive_list(qede_rx_ring_t *rx_ring, qede_rx_buffer_t *rx_buffer)
2147 {
2148 	qede_rx_buf_list_t *passive_buf_list =
2149 	    &rx_ring->rx_buf_area->passive_buf_list;
2150 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2151 	int tail = 0;
2152 
2153 	mutex_enter(&passive_buf_list->lock);
2154 
2155 	tail = passive_buf_list->tail;
2156 	passive_buf_list->tail = (passive_buf_list->tail + 1) & RX_RING_MASK;
2157 
2158 	rx_buf_area->passive_buf_list.buf_list[tail] = rx_buffer;
2159 	atomic_inc_32(&passive_buf_list->num_entries);
2160 
2161 	if (passive_buf_list->num_entries > rx_ring->rx_buf_count) {
2162 		/* Sanity check */
2163 		qede_info(rx_ring->qede, "ERROR: num_entries (%d)"
2164 		    " > max count (%d)",
2165 		    passive_buf_list->num_entries,
2166 		    rx_ring->rx_buf_count);
2167 	}
2168 	mutex_exit(&passive_buf_list->lock);
2169 	return (passive_buf_list->num_entries);
2170 }
2171 
2172 void
2173 qede_recycle_rx_buffer(char *arg)
2174 {
2175 	/* LINTED E_BAD_PTR_CAST_ALIGN */
2176 	qede_rx_buffer_t *rx_buffer = (qede_rx_buffer_t *)arg;
2177 	qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2178 	qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2179 	qede_t *qede = rx_ring->qede;
2180 	u32 buf_upstream = 0, ref_cnt;
2181 	u32 num_entries;
2182 
2183 	if (rx_buffer->ref_cnt == 0) {
2184 		return;
2185 	}
2186 
2187 	/*
2188 	 * Since the data buffer associated with the mblk is free'ed
2189 	 * by upper layer, allocate it again to contain proper
2190 	 * free_func pointer
2191 	 */
2192     	rx_buffer->mp = desballoc(rx_buffer->dma_info.virt_addr,
2193 	    rx_ring->rx_buf_size, 0, &rx_buffer->recycle);
2194 
2195 	ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2196 	if (ref_cnt == 1) {
2197 		/* Put the buffer into passive_buf_list to be reused */
2198 		num_entries = qede_put_to_passive_list(rx_ring, rx_buffer);
2199 		if(num_entries >= 32) {
2200 			if(mutex_tryenter(&rx_ring->rx_lock) != 0) {
2201 				qede_replenish_rx_buffers(rx_ring);
2202 				qede_update_rx_q_producer(rx_ring);
2203 				mutex_exit(&rx_ring->rx_lock);
2204 			}
2205 		}
2206 	} else if (ref_cnt == 0) {
2207 		/*
2208 		 * This is a buffer from a previous load instance of
2209 		 * rx_buf_area. Free the rx_buffer and if no more
2210 		 * buffers are upstream from this rx_buf_area instance
2211 		 * then free the rx_buf_area;
2212 		 */
2213 		if (rx_buffer->mp != NULL) {
2214 			freemsg(rx_buffer->mp);
2215 			rx_buffer->mp = NULL;
2216 		}
2217 		mutex_enter(&qede->drv_lock);
2218 
2219 		buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2220 		if (buf_upstream >= 1) {
2221 			atomic_dec_32(&rx_buf_area->buf_upstream);
2222 		}
2223 		if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2224 			qede_free_rx_buffers_legacy(qede, rx_buf_area);
2225 		}
2226 
2227 		mutex_exit(&qede->drv_lock);
2228 	} else {
2229 		/* Sanity check */
2230 		qede_info(rx_ring->qede, "rx_buffer %p"
2231 		    " ref_cnt %d is invalid",
2232 		    rx_buffer, ref_cnt);
2233 	}
2234 }
2235 
2236 void
2237 qede_recycle_copied_rx_buffer(qede_rx_buffer_t *rx_buffer)
2238 {
2239 	qede_rx_ring_t *rx_ring = rx_buffer->rx_ring;
2240 	qede_rx_buf_area_t *rx_buf_area = rx_buffer->rx_buf_area;
2241 	qede_t *qede = rx_ring->qede;
2242 	u32 buf_upstream = 0, ref_cnt;
2243 
2244 	if (rx_buffer->ref_cnt == 0) {
2245 		/*
2246 		 * Can happen if the buffer is being free'd
2247 		 * in the stop routine
2248 		 */
2249 		qede_info(qede, "!%s(%d): rx_buffer->ref_cnt = 0",
2250 		    __func__, qede->instance);
2251 		return;
2252 	}
2253 
2254 	buf_upstream = atomic_cas_32(&rx_buf_area->buf_upstream, 1, 1);
2255 	if (buf_upstream >= 1) {
2256 		atomic_dec_32(&rx_buf_area->buf_upstream);
2257 	}
2258 
2259 	/*
2260 	 * Since the data buffer associated with the mblk is free'ed
2261 	 * by upper layer, allocate it again to contain proper
2262 	 * free_func pointer
2263 	 * Though we could also be recycling a buffer that got copied,
2264 	 * so in that case the mp would still be intact.
2265 	 */
2266 
2267 	ref_cnt = atomic_dec_32_nv(&rx_buffer->ref_cnt);
2268 	if (ref_cnt == 1) {
2269 		qede_put_to_passive_list(rx_ring, rx_buffer);
2270 		/* Put the buffer into passive_buf_list to be reused */
2271 	} else if (ref_cnt == 0) {
2272 		/*
2273 		 * This is a buffer from a previous load instance of
2274 		 * rx_buf_area. Free the rx_buffer and if no more
2275 		 * buffers are upstream from this rx_buf_area instance
2276 		 * then free the rx_buf_area;
2277 		 */
2278 		qede_info(rx_ring->qede, "Free up rx_buffer %p, index %d"
2279 		    " ref_cnt %d from a previous driver iteration",
2280 		    rx_buffer, rx_buffer->index, ref_cnt);
2281 		if (rx_buffer->mp != NULL) {
2282 			freemsg(rx_buffer->mp);
2283 			rx_buffer->mp = NULL;
2284 		}
2285 
2286 		if (rx_buf_area->inactive && (rx_buf_area->buf_upstream == 0)) {
2287 			mutex_enter(&qede->drv_lock);
2288 			qede_free_rx_buffers_legacy(qede, rx_buf_area);
2289 			mutex_exit(&qede->drv_lock);
2290 		}
2291 	} else {
2292 		/* Sanity check */
2293 		qede_info(rx_ring->qede, "rx_buffer %p"
2294 		    " ref_cnt %d is invalid",
2295 		    rx_buffer, ref_cnt);
2296 	}
2297 }
2298 
2299 
2300 static int
2301 qede_alloc_rx_buffers(qede_t *qede, qede_rx_ring_t *rx_ring)
2302 {
2303 	int ret = DDI_SUCCESS, i, j;
2304 	qede_rx_buffer_t *rx_buffer;
2305 	qede_rx_buf_area_t *rx_buf_area = rx_ring->rx_buf_area;
2306 	u32 bufs_per_page, buf_size;
2307 	int page_size = (int)ddi_ptob(qede->dip, 1);
2308 	qede_dma_info_t *dma_info;
2309 	ddi_dma_cookie_t temp_cookie;
2310 	int allocated = 0;
2311 	u64 dma_addr;
2312 	u8 *vaddr;
2313 	ddi_dma_handle_t dma_handle;
2314 	ddi_acc_handle_t acc_handle;
2315 
2316 	if (rx_ring->rx_buf_size > page_size) {
2317 		bufs_per_page = 1;
2318 		buf_size = rx_ring->rx_buf_size;
2319 	} else {
2320 		bufs_per_page =
2321 		    (page_size) / DEFAULT_RX_BUF_SIZE;
2322 		buf_size = page_size;
2323 	}
2324 
2325 	rx_buffer = &rx_buf_area->rx_buf_pool[0];
2326 	rx_buf_area->bufs_per_page = bufs_per_page;
2327 
2328 	mutex_init(&rx_buf_area->active_buf_list.lock, NULL,
2329 	    MUTEX_DRIVER, 0);
2330 	mutex_init(&rx_buf_area->passive_buf_list.lock, NULL,
2331 	    MUTEX_DRIVER, 0);
2332 
2333 	for (i = 0; i < rx_ring->rx_buf_count; i += bufs_per_page) {
2334 		dma_info = &rx_buffer->dma_info;
2335 
2336 		ret = qede_dma_mem_alloc(qede,
2337 			buf_size,
2338 			DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2339 			(caddr_t *)&dma_info->virt_addr,
2340 			&temp_cookie,
2341 			&dma_info->dma_handle,
2342 			&dma_info->acc_handle,
2343 			&qede_dma_attr_rxbuf,
2344 			&qede_buf_acc_attr);
2345 		if (ret != DDI_SUCCESS) {
2346 			goto err;
2347 		}
2348 
2349 		allocated++;
2350 		vaddr = dma_info->virt_addr;
2351 		dma_addr = temp_cookie.dmac_laddress;
2352 		dma_handle = dma_info->dma_handle;
2353 		acc_handle = dma_info->acc_handle;
2354 
2355 		for (j = 0; j < bufs_per_page; j++) {
2356 			dma_info = &rx_buffer->dma_info;
2357 			dma_info->virt_addr = vaddr;
2358 			dma_info->phys_addr = dma_addr;
2359 			dma_info->dma_handle = dma_handle;
2360 			dma_info->acc_handle = acc_handle;
2361 			dma_info->offset = j * rx_ring->rx_buf_size;
2362 			/* Populate the recycle func and arg for the buffer */
2363 			rx_buffer->recycle.free_func = qede_recycle_rx_buffer;
2364 			rx_buffer->recycle.free_arg = (caddr_t)rx_buffer;
2365 
2366 			rx_buffer->mp = desballoc(dma_info->virt_addr,
2367 				    	rx_ring->rx_buf_size, 0,
2368 				    	&rx_buffer->recycle);
2369 			if (rx_buffer->mp == NULL) {
2370 				qede_warn(qede, "desballoc() failed, index %d",
2371 				     i);
2372 			}
2373 			rx_buffer->rx_ring = rx_ring;
2374 			rx_buffer->rx_buf_area = rx_buf_area;
2375 			rx_buffer->index = i + j;
2376 			rx_buffer->ref_cnt = 1;
2377 			rx_buffer++;
2378 
2379 			vaddr += rx_ring->rx_buf_size;
2380 			dma_addr += rx_ring->rx_buf_size;
2381 		}
2382 		rx_ring->sw_rx_prod++;
2383 	}
2384 
2385 	/*
2386 	 * Fill the rx_bd_ring with the allocated
2387 	 * buffers
2388 	 */
2389 	ret = qede_init_bd(qede, rx_ring);
2390 	if (ret != DDI_SUCCESS) {
2391 		goto err;
2392 	}
2393 
2394 	rx_buf_area->buf_upstream = 0;
2395 
2396 	return (ret);
2397 err:
2398 	qede_free_rx_buffers(qede, rx_ring);
2399 	return (ret);
2400 }
2401 
2402 static int
2403 qede_alloc_rx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2404 {
2405 	qede_rx_ring_t *rx_ring;
2406 	qede_rx_buf_area_t *rx_buf_area;
2407 	size_t size;
2408 
2409 	ASSERT(qede != NULL);
2410 	ASSERT(fp != NULL);
2411 
2412 	rx_ring = fp->rx_ring;
2413 
2414 	atomic_inc_32(&qede->detach_unsafe);
2415 	/*
2416 	 * Allocate rx_buf_area for the plumb instance
2417 	 */
2418 	rx_buf_area = kmem_zalloc(sizeof (*rx_buf_area), KM_SLEEP);
2419 	if (rx_buf_area == NULL) {
2420 		qede_info(qede, "!%s(%d): Cannot alloc rx_buf_area",
2421 			__func__, qede->instance);
2422 		return (DDI_FAILURE);
2423 	}
2424 
2425 	rx_buf_area->inactive = 0;
2426 	rx_buf_area->rx_ring = rx_ring;
2427 	rx_ring->rx_buf_area = rx_buf_area;
2428 	/* Rx Buffer descriptor queue */
2429 	if (ecore_chain_alloc(&qede->edev,
2430 			ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2431 			ECORE_CHAIN_MODE_NEXT_PTR,
2432 			ECORE_CHAIN_CNT_TYPE_U16,
2433 			qede->rx_ring_size,
2434 			sizeof (struct eth_rx_bd),
2435 			&rx_ring->rx_bd_ring,
2436 			NULL) != ECORE_SUCCESS) {
2437 		cmn_err(CE_WARN, "Failed to allocate "
2438 		    "ecore cqe chain");
2439 		return (DDI_FAILURE);
2440 	}
2441 
2442 	/* Rx Completion Descriptor queue */
2443 	if (ecore_chain_alloc(&qede->edev,
2444 			ECORE_CHAIN_USE_TO_CONSUME,
2445 			ECORE_CHAIN_MODE_PBL,
2446 			ECORE_CHAIN_CNT_TYPE_U16,
2447 			qede->rx_ring_size,
2448 			sizeof (union eth_rx_cqe),
2449 			&rx_ring->rx_cqe_ring,
2450 			NULL) != ECORE_SUCCESS) {
2451 		cmn_err(CE_WARN, "Failed to allocate "
2452 		    "ecore bd chain");
2453 		return (DDI_FAILURE);
2454 	}
2455 
2456 	/* Rx Data buffers */
2457 	if (qede_alloc_rx_buffers(qede, rx_ring) != DDI_SUCCESS) {
2458 		qede_print_err("!%s(%d): Failed to alloc rx buffers",
2459 		    __func__, qede->instance);
2460 		return (DDI_FAILURE);
2461 	}
2462 	return (DDI_SUCCESS);
2463 }
2464 
2465 static void
2466 qede_free_tx_bd_ring(qede_t *qede, qede_fastpath_t *fp)
2467 {
2468 	int i;
2469 	qede_tx_ring_t *tx_ring;
2470 
2471 	ASSERT(qede != NULL);
2472 	ASSERT(fp != NULL);
2473 
2474 	for (i = 0; i < qede->num_tc; i++) {
2475 		tx_ring = fp->tx_ring[i];
2476 
2477 		if (tx_ring->tx_bd_ring.p_virt_addr) {
2478 			ecore_chain_free(&qede->edev, &tx_ring->tx_bd_ring);
2479 			tx_ring->tx_bd_ring.p_virt_addr = NULL;
2480 		}
2481 		tx_ring->hw_cons_ptr = NULL;
2482 		tx_ring->sw_tx_cons = 0;
2483 		tx_ring->sw_tx_prod = 0;
2484 
2485 	}
2486 }
2487 
2488 static u32
2489 qede_alloc_tx_bd_ring(qede_t *qede, qede_tx_ring_t *tx_ring)
2490 {
2491 	u32 ret = 0;
2492 
2493 	ret = ecore_chain_alloc(&qede->edev,
2494 	    ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
2495 	    ECORE_CHAIN_MODE_PBL,
2496 	    ECORE_CHAIN_CNT_TYPE_U16,
2497 	    tx_ring->bd_ring_size,
2498 	    sizeof (union eth_tx_bd_types),
2499 	    &tx_ring->tx_bd_ring,
2500 	    NULL);
2501 	if (ret) {
2502 		cmn_err(CE_WARN, "!%s(%d): Failed to alloc tx bd chain",
2503 		    __func__, qede->instance);
2504 		goto error;
2505 	}
2506 
2507 
2508 error:
2509 	return (ret);
2510 }
2511 
2512 static void
2513 qede_free_tx_bcopy_buffers(qede_tx_ring_t *tx_ring)
2514 {
2515 	qede_tx_bcopy_pkt_t *bcopy_pkt;
2516 	int i;
2517 
2518 	for (i = 0; i < tx_ring->tx_ring_size; i++) {
2519 		bcopy_pkt = &tx_ring->bcopy_list.bcopy_pool[i];
2520 		if(bcopy_pkt->dma_handle != NULL)
2521 			(void) ddi_dma_unbind_handle(bcopy_pkt->dma_handle);
2522 		if(bcopy_pkt->acc_handle != NULL) {
2523 			ddi_dma_mem_free(&bcopy_pkt->acc_handle);
2524 			bcopy_pkt->acc_handle = NULL;
2525 		}
2526 		if(bcopy_pkt->dma_handle != NULL) {
2527 			ddi_dma_free_handle(&bcopy_pkt->dma_handle);
2528 			bcopy_pkt->dma_handle = NULL;
2529 		}
2530 		if (bcopy_pkt) {
2531 			if (bcopy_pkt->mp) {
2532 				freemsg(bcopy_pkt->mp);
2533 			}
2534 		}
2535 	}
2536 
2537 	if (tx_ring->bcopy_list.bcopy_pool != NULL) {
2538 		kmem_free(tx_ring->bcopy_list.bcopy_pool,
2539 		    tx_ring->bcopy_list.size);
2540 		tx_ring->bcopy_list.bcopy_pool = NULL;
2541 	}
2542 
2543 	mutex_destroy(&tx_ring->bcopy_list.lock);
2544 }
2545 
2546 static u32
2547 qede_alloc_tx_bcopy_buffers(qede_t *qede, qede_tx_ring_t *tx_ring)
2548 {
2549 	u32 ret = DDI_SUCCESS;
2550 	int page_size = (int)ddi_ptob(qede->dip, 1);
2551 	size_t size;
2552 	qede_tx_bcopy_pkt_t *bcopy_pkt, *bcopy_list;
2553 	int i;
2554 	qede_dma_info_t dma_info;
2555 	ddi_dma_cookie_t temp_cookie;
2556 
2557 	/*
2558 	 * If the tx_buffers size if less than the page size
2559 	 * then try to use multiple copy buffers inside the
2560 	 * same page. Otherwise use the whole page (or more)
2561 	 * for the copy buffers
2562 	 */
2563 	if (qede->tx_buf_size > page_size) {
2564 		size = qede->tx_buf_size;
2565 	} else {
2566 		size = page_size;
2567 	}
2568 
2569 	size = sizeof (qede_tx_bcopy_pkt_t) * qede->tx_ring_size;
2570 	bcopy_list = kmem_zalloc(size, KM_SLEEP);
2571 	if (bcopy_list == NULL) {
2572 		qede_warn(qede, "!%s(%d): Failed to allocate bcopy_list",
2573 		    __func__, qede->instance);
2574 		ret = DDI_FAILURE;
2575 		goto exit;
2576 	}
2577 
2578 	tx_ring->bcopy_list.size = size;
2579 	tx_ring->bcopy_list.bcopy_pool = bcopy_list;
2580 	bcopy_pkt = bcopy_list;
2581 
2582 	tx_ring->bcopy_list.head = 0;
2583 	tx_ring->bcopy_list.tail = 0;
2584 	mutex_init(&tx_ring->bcopy_list.lock, NULL, MUTEX_DRIVER, 0);
2585 
2586 	for (i = 0; i < qede->tx_ring_size; i++) {
2587 
2588 		ret = qede_dma_mem_alloc(qede,
2589 					qede->tx_buf_size,
2590 					DDI_DMA_READ | DDI_DMA_STREAMING | DDI_DMA_CONSISTENT,
2591 					(caddr_t *)&dma_info.virt_addr,
2592 					&temp_cookie,
2593 					&dma_info.dma_handle,
2594 					&dma_info.acc_handle,
2595 					&qede_dma_attr_txbuf,
2596 					&qede_buf_acc_attr);
2597 		if(ret) {
2598 			ret = DDI_FAILURE;
2599 			goto exit;
2600 		}
2601 
2602 
2603 		bcopy_pkt->virt_addr = dma_info.virt_addr;
2604 		bcopy_pkt->phys_addr = temp_cookie.dmac_laddress;
2605 		bcopy_pkt->dma_handle = dma_info.dma_handle;
2606 		bcopy_pkt->acc_handle = dma_info.acc_handle;
2607 
2608 		tx_ring->bcopy_list.free_list[i] = bcopy_pkt;
2609 		bcopy_pkt++;
2610 	}
2611 
2612 exit:
2613 	return (ret);
2614 }
2615 
2616 static void
2617 qede_free_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2618 {
2619 	qede_dma_handle_entry_t *dmah_entry;
2620 	int i;
2621 
2622 	for (i = 0; i < tx_ring->tx_ring_size; i++) {
2623 		dmah_entry = &tx_ring->dmah_list.dmah_pool[i];
2624 		if (dmah_entry) {
2625 			if (dmah_entry->dma_handle != NULL) {
2626 				ddi_dma_free_handle(&dmah_entry->dma_handle);
2627 				dmah_entry->dma_handle = NULL;
2628 			} else {
2629 				qede_info(qede, "dmah_entry %p, handle is NULL",
2630 				     dmah_entry);
2631 			}
2632 		}
2633 	}
2634 
2635 	if (tx_ring->dmah_list.dmah_pool != NULL) {
2636 		kmem_free(tx_ring->dmah_list.dmah_pool,
2637 		    tx_ring->dmah_list.size);
2638 		tx_ring->dmah_list.dmah_pool = NULL;
2639 	}
2640 
2641 	mutex_destroy(&tx_ring->dmah_list.lock);
2642 }
2643 
2644 static u32
2645 qede_alloc_tx_dma_handles(qede_t *qede, qede_tx_ring_t *tx_ring)
2646 {
2647 	int i;
2648 	size_t size;
2649 	u32 ret = DDI_SUCCESS;
2650 	qede_dma_handle_entry_t *dmah_entry, *dmah_list;
2651 
2652 	size = sizeof (qede_dma_handle_entry_t) * qede->tx_ring_size;
2653 	dmah_list = kmem_zalloc(size, KM_SLEEP);
2654 	if (dmah_list == NULL) {
2655 		qede_warn(qede, "!%s(%d): Failed to allocated dmah_list",
2656 		    __func__, qede->instance);
2657                 /* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2658 		ret = DDI_FAILURE;
2659 		goto exit;
2660 	}
2661 
2662 	tx_ring->dmah_list.size = size;
2663 	tx_ring->dmah_list.dmah_pool = dmah_list;
2664 	dmah_entry = dmah_list;
2665 
2666 	tx_ring->dmah_list.head = 0;
2667 	tx_ring->dmah_list.tail = 0;
2668 	mutex_init(&tx_ring->dmah_list.lock, NULL, MUTEX_DRIVER, 0);
2669 
2670 	/*
2671 	 *
2672 	 */
2673 	for (i = 0; i < qede->tx_ring_size; i++) {
2674 		ret = ddi_dma_alloc_handle(qede->dip,
2675 		    &qede_tx_buf_dma_attr,
2676 		    DDI_DMA_DONTWAIT,
2677 		    NULL,
2678 		    &dmah_entry->dma_handle);
2679 		if (ret != DDI_SUCCESS) {
2680 			qede_print_err("!%s(%d): dma alloc handle failed "
2681 			    "for index %d",
2682 			    __func__, qede->instance, i);
2683 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2684 			ret = DDI_FAILURE;
2685 			goto exit;
2686 		}
2687 
2688 		tx_ring->dmah_list.free_list[i] = dmah_entry;
2689 		dmah_entry++;
2690 	}
2691 exit:
2692 	return (ret);
2693 }
2694 
2695 static u32
2696 qede_alloc_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2697 {
2698 	int i;
2699 	qede_tx_ring_t *tx_ring;
2700 	u32 ret = DDI_SUCCESS;
2701 	size_t size;
2702 	qede_tx_recycle_list_t *recycle_list;
2703 
2704 	ASSERT(qede != NULL);
2705 	ASSERT(fp != NULL);
2706 
2707 	for (i = 0; i < qede->num_tc; i++) {
2708 		tx_ring = fp->tx_ring[i];
2709 		tx_ring->bd_ring_size = qede->tx_ring_size;
2710 
2711 		/*
2712 		 * Allocate the buffer descriptor chain
2713 		 */
2714 		ret = qede_alloc_tx_bd_ring(qede, tx_ring);
2715 		if (ret) {
2716 			cmn_err(CE_WARN, "!%s(%d): failed, %s",
2717 			    __func__, qede->instance, qede_get_ddi_fail(ret));
2718 			return (ret);
2719 		}
2720 
2721 		/*
2722 		 * Allocate copy mode buffers
2723 		 */
2724 		ret = qede_alloc_tx_bcopy_buffers(qede, tx_ring);
2725 		if (ret) {
2726 			qede_print_err("!%s(%d): Failed to alloc tx copy "
2727 			    "buffers", __func__, qede->instance);
2728 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2729 			ret = DDI_FAILURE;
2730 			goto exit;
2731 		}
2732 
2733 		/*
2734 		 * Allocate dma handles for mapped mode
2735 		 */
2736 		ret = qede_alloc_tx_dma_handles(qede, tx_ring);
2737 		if (ret) {
2738 			qede_print_err("!%s(%d): Failed to alloc tx dma "
2739 			    "handles", __func__, qede->instance);
2740 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2741 			ret = DDI_FAILURE;
2742 			goto exit;
2743 		}
2744 
2745 		/* Allocate tx_recycle list */
2746 		size = sizeof (qede_tx_recycle_list_t) * qede->tx_ring_size;
2747 		recycle_list = kmem_zalloc(size, KM_SLEEP);
2748 		if (recycle_list == NULL) {
2749 			qede_warn(qede, "!%s(%d): Failed to allocate"
2750 			    " tx_recycle_list", __func__, qede->instance);
2751 			/* LINTED E_CONST_TRUNCATED_BY_ASSIGN */
2752 			ret = DDI_FAILURE;
2753 			goto exit;
2754 		}
2755 
2756 		tx_ring->tx_recycle_list = recycle_list;
2757 	}
2758 exit:
2759 	return (ret);
2760 }
2761 
2762 static void
2763 /* LINTED E_FUNC_ARG_UNUSED */
2764 qede_free_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2765 {
2766 	qede_pci_free_consistent(&fp->sb_dma_handle, &fp->sb_acc_handle);
2767 	fp->sb_virt = NULL;
2768 	fp->sb_phys = 0;
2769 }
2770 
2771 static int
2772 qede_alloc_sb_phys(qede_t *qede, qede_fastpath_t *fp)
2773 {
2774 	int status;
2775 	int sb_id;
2776 	struct ecore_dev *edev = &qede->edev;
2777 	struct ecore_hwfn *p_hwfn;
2778 	qede_vector_info_t *vect_info = fp->vect_info;
2779 	ddi_dma_cookie_t sb_cookie;
2780 
2781 	ASSERT(qede != NULL);
2782 	ASSERT(fp != NULL);
2783 
2784 	/*
2785 	 * In the case of multiple hardware engines,
2786 	 * interrupts are spread across all of them.
2787 	 * In the case of only one engine, all
2788 	 * interrupts are handled by that engine.
2789 	 * In the case of 2 engines, each has half
2790 	 * of the interrupts.
2791 	 */
2792 	sb_id = vect_info->vect_index;
2793 	p_hwfn = &edev->hwfns[sb_id % qede->num_hwfns];
2794 
2795 	/* Allocate dma mem. for status_block */
2796 	status = qede_dma_mem_alloc(qede,
2797 	    sizeof (struct status_block),
2798 	    (DDI_DMA_RDWR | DDI_DMA_CONSISTENT | DDI_DMA_STREAMING),
2799 	    (caddr_t *)&fp->sb_virt,
2800 	    &sb_cookie,
2801 	    &fp->sb_dma_handle,
2802 	    &fp->sb_acc_handle,
2803 	    &qede_desc_dma_attr,
2804 	    &qede_desc_acc_attr);
2805 
2806 	if (status != DDI_SUCCESS) {
2807 		qede_info(qede, "Failed to allocate status_block dma mem");
2808 		return (status);
2809 	}
2810 
2811 	fp->sb_phys = sb_cookie.dmac_laddress;
2812 
2813 
2814 	status = ecore_int_sb_init(p_hwfn,
2815 			p_hwfn->p_main_ptt,
2816 			fp->sb_info,
2817 			(void *)fp->sb_virt,
2818 			fp->sb_phys,
2819 			fp->fp_index);
2820 	if (status != ECORE_SUCCESS) {
2821 		cmn_err(CE_WARN, "Failed ecore_int_sb_init");
2822 		return (DDI_FAILURE);
2823 	}
2824 
2825 	return (status);
2826 }
2827 
2828 static void
2829 qede_free_tx_ring_phys(qede_t *qede, qede_fastpath_t *fp)
2830 {
2831 	qede_tx_ring_t *tx_ring;
2832 	int i;
2833 
2834 	for (i = 0; i < qede->num_tc; i++) {
2835 		tx_ring = fp->tx_ring[i];
2836 		qede_free_tx_dma_handles(qede, tx_ring);
2837 		qede_free_tx_bcopy_buffers(tx_ring);
2838 		qede_free_tx_bd_ring(qede, fp);
2839 
2840 		if (tx_ring->tx_recycle_list) {
2841 			kmem_free(tx_ring->tx_recycle_list,
2842 			    sizeof (qede_tx_recycle_list_t)
2843 			    * qede->tx_ring_size);
2844 		}
2845 	}
2846 }
2847 
2848 static void
2849 qede_fastpath_free_phys_mem(qede_t *qede)
2850 {
2851 	int  i;
2852 	qede_fastpath_t *fp;
2853 
2854 	for (i = 0; i < qede->num_fp; i++) {
2855 		fp = &qede->fp_array[i];
2856 
2857 		qede_free_rx_ring_phys(qede, fp);
2858 		qede_free_tx_ring_phys(qede, fp);
2859 		qede_free_sb_phys(qede, fp);
2860 	}
2861 }
2862 
2863 /*
2864  * Save dma_handles associated with the fastpath elements
2865  * allocate by ecore for doing dma_sync in the fast_path
2866  */
2867 static int
2868 qede_save_fp_dma_handles(qede_t *qede, qede_fastpath_t *fp)
2869 {
2870 	int ret, i;
2871 	qede_rx_ring_t *rx_ring;
2872 	qede_tx_ring_t *tx_ring;
2873 
2874 	rx_ring = fp->rx_ring;
2875 
2876 	/* Rx bd ring dma_handle */
2877 	ret = qede_osal_find_dma_handle_for_block(qede,
2878 	    (void *)rx_ring->rx_bd_ring.p_phys_addr,
2879 	    &rx_ring->rx_bd_dmah);
2880 	if (ret != DDI_SUCCESS) {
2881 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2882 		    "rx_bd_ring, addr %p", __func__, qede->instance,
2883 		    rx_ring->rx_bd_ring.p_phys_addr);
2884 		goto exit;
2885 	}
2886 
2887 	/* rx cqe ring dma_handle */
2888 	ret = qede_osal_find_dma_handle_for_block(qede,
2889 	    (void *)rx_ring->rx_cqe_ring.p_phys_addr,
2890 	    &rx_ring->rx_cqe_dmah);
2891 	if (ret != DDI_SUCCESS) {
2892 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2893 		    "rx_cqe_ring, addr %p", __func__, qede->instance,
2894 		    rx_ring->rx_cqe_ring.p_phys_addr);
2895 		goto exit;
2896 	}
2897 	/* rx cqe ring pbl */
2898 	ret = qede_osal_find_dma_handle_for_block(qede,
2899 	    (void *)rx_ring->rx_cqe_ring.pbl_sp.p_phys_table,
2900 	    &rx_ring->rx_cqe_pbl_dmah);
2901 	if (ret) {
2902 		qede_print_err("!%s(%d): Cannot find dma_handle for "
2903 		    "rx_cqe pbl, addr %p", __func__, qede->instance,
2904 		    rx_ring->rx_cqe_ring.pbl_sp.p_phys_table);
2905 		goto exit;
2906 	}
2907 
2908 	/* tx_bd ring dma_handle(s) */
2909 	for (i = 0; i < qede->num_tc; i++) {
2910 		tx_ring = fp->tx_ring[i];
2911 
2912 		ret = qede_osal_find_dma_handle_for_block(qede,
2913 		    (void *)tx_ring->tx_bd_ring.p_phys_addr,
2914 		    &tx_ring->tx_bd_dmah);
2915 		if (ret != DDI_SUCCESS) {
2916 			qede_print_err("!%s(%d): Cannot find dma_handle "
2917 			    "for tx_bd_ring, addr %p", __func__,
2918 			    qede->instance,
2919 			    tx_ring->tx_bd_ring.p_phys_addr);
2920 			goto exit;
2921 		}
2922 
2923 		ret = qede_osal_find_dma_handle_for_block(qede,
2924 		    (void *)tx_ring->tx_bd_ring.pbl_sp.p_phys_table,
2925 		    &tx_ring->tx_pbl_dmah);
2926 		if (ret) {
2927 			qede_print_err("!%s(%d): Cannot find dma_handle for "
2928 			    "tx_bd pbl, addr %p", __func__, qede->instance,
2929 			    tx_ring->tx_bd_ring.pbl_sp.p_phys_table);
2930 			goto exit;
2931 		}
2932 	}
2933 
2934 exit:
2935 	return (ret);
2936 }
2937 
2938 int
2939 qede_fastpath_alloc_phys_mem(qede_t *qede)
2940 {
2941 	int status = 0, i;
2942 	qede_fastpath_t *fp;
2943 
2944 	for (i = 0; i < qede->num_fp; i++) {
2945 		fp = &qede->fp_array[i];
2946 
2947 		status = qede_alloc_sb_phys(qede, fp);
2948 		if (status != DDI_SUCCESS) {
2949 			goto err;
2950 		}
2951 
2952 		status = qede_alloc_rx_ring_phys(qede, fp);
2953 		if (status != DDI_SUCCESS) {
2954 			goto err;
2955 		}
2956 
2957 		status = qede_alloc_tx_ring_phys(qede, fp);
2958 		if (status != DDI_SUCCESS) {
2959 			goto err;
2960 		}
2961 		status = qede_save_fp_dma_handles(qede, fp);
2962 		if (status != DDI_SUCCESS) {
2963 			goto err;
2964 		}
2965 	}
2966 	return (status);
2967 err:
2968 	qede_fastpath_free_phys_mem(qede);
2969 	return (status);
2970 }
2971 
2972 static int
2973 qede_fastpath_config(qede_t *qede)
2974 {
2975 	int i, j;
2976 	qede_fastpath_t *fp;
2977 	qede_rx_ring_t *rx_ring;
2978 	qede_tx_ring_t *tx_ring;
2979 	qede_vector_info_t *vect_info;
2980 	int num_fp, num_hwfns;
2981 
2982 	ASSERT(qede != NULL);
2983 
2984 	num_fp = qede->num_fp;
2985 	num_hwfns = qede->num_hwfns;
2986 
2987 	vect_info = &qede->intr_ctx.intr_vect_info[num_hwfns];
2988 	fp = &qede->fp_array[0];
2989 	tx_ring = &qede->tx_array[0][0];
2990 
2991 	for (i = 0; i < num_fp; i++, fp++, vect_info++) {
2992 		fp->sb_info = &qede->sb_array[i];
2993 		fp->qede = qede;
2994 		fp->fp_index = i;
2995 		/*
2996 		 * With a single hwfn, all fp's hwfn index should be zero
2997 		 * for all fp entries. If there are two engines this
2998 		 * index should altenate between 0 and 1.
2999 		 */
3000 		fp->fp_hw_eng_index = fp->fp_index % num_hwfns;
3001 		fp->vport_id = 0;
3002 		fp->stats_id = 0;
3003 		fp->rss_id = fp->fp_index;
3004 		fp->rx_queue_index = fp->fp_index;
3005 		fp->vect_info = vect_info;
3006 		/*
3007 		 * After vport update, interrupts will be
3008 		 * running, so we need to intialize our
3009 		 * enable/disable gate as such.
3010 		 */
3011 		fp->disabled_by_poll = 0;
3012 
3013 		/* rx_ring setup */
3014 		rx_ring = &qede->rx_array[i];
3015 		fp->rx_ring = rx_ring;
3016 		rx_ring->fp = fp;
3017 		rx_ring->rx_buf_count = qede->rx_buf_count;
3018 		rx_ring->rx_buf_size = qede->rx_buf_size;
3019 		rx_ring->qede = qede;
3020 		rx_ring->sw_rx_cons = 0;
3021 		rx_ring->rx_copy_threshold = qede->rx_copy_threshold;
3022 		rx_ring->rx_low_buffer_threshold =
3023 		    qede->rx_low_buffer_threshold;
3024 		rx_ring->queue_started = B_FALSE;
3025 
3026 		/* tx_ring setup */
3027 		for (j = 0; j < qede->num_tc; j++) {
3028 			tx_ring = &qede->tx_array[j][i];
3029 			fp->tx_ring[j] = tx_ring;
3030 			tx_ring->qede = qede;
3031 			tx_ring->fp = fp;
3032 			tx_ring->fp_idx = i;
3033 			tx_ring->tx_queue_index = i * qede->num_fp +
3034 			    fp->fp_index;
3035 			tx_ring->tx_buf_size = qede->tx_buf_size;
3036 			tx_ring->tx_ring_size = qede->tx_ring_size;
3037 			tx_ring->queue_started = B_FALSE;
3038 #ifdef	DBLK_DMA_PREMAP
3039 			tx_ring->pm_handle = qede->pm_handle;
3040 #endif
3041 
3042 			tx_ring->doorbell_addr =
3043 			    qede->doorbell;
3044 			tx_ring->doorbell_handle =
3045 			    qede->doorbell_handle;
3046 		}
3047 	}
3048 
3049 	return (DDI_SUCCESS);
3050 }
3051 
3052 /*
3053  * op = 1, Initialize link
3054  * op = 0, Destroy link
3055  */
3056 int
3057 qede_configure_link(qede_t *qede, bool op)
3058 {
3059 	struct ecore_dev *edev = &qede->edev;
3060 	struct ecore_hwfn *hwfn;
3061 	struct ecore_ptt *ptt = NULL;
3062 	int i, ret = DDI_SUCCESS;
3063 
3064 	for_each_hwfn(edev, i) {
3065 		hwfn = &edev->hwfns[i];
3066 		qede_info(qede, "Configuring link for hwfn#%d", i);
3067 
3068 		ptt = ecore_ptt_acquire(hwfn);
3069 		if (ptt == NULL) {
3070 			qede_info(qede, "Cannot reserver ptt from ecore");
3071 			ret = DDI_FAILURE;
3072 			goto exit;
3073 		}
3074 
3075 		ret = ecore_mcp_set_link(hwfn, ptt, op);
3076 
3077 		ecore_ptt_release(hwfn, ptt);
3078 		if (ret) {
3079 			/* if link config fails, make sure ptt is released */
3080 			goto exit;
3081 		}
3082 	}
3083 exit:
3084 	return (ret);
3085 }
3086 
3087 /*
3088  * drv_lock must be held by the caller.
3089  */
3090 int
3091 qede_stop(qede_t *qede)
3092 {
3093 	int status;
3094 
3095 	ASSERT(mutex_owned(&qede->drv_lock));
3096 	qede->qede_state = QEDE_STATE_STOPPING;
3097 
3098 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3099 
3100 	qede_disable_all_fastpath_intrs(qede);
3101 	status = qede_configure_link(qede, 0 /* Re-Set */);
3102 	if (status) {
3103 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3104 		cmn_err(CE_NOTE, "!%s(%d): Failed to reset link",
3105 		    __func__, qede->instance);
3106 		return (status);
3107 	}
3108 	qede_clear_filters(qede);
3109 	status = qede_fastpath_stop_queues(qede);
3110 	if (status != DDI_SUCCESS) {
3111 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3112 		cmn_err(CE_WARN, "qede_stop:"
3113 		    " qede_fastpath_stop_queues FAILED "
3114 		    " qede=%p\n",
3115 		    qede);
3116 		return (status);
3117 	}
3118 
3119 	qede_fastpath_free_phys_mem(qede);
3120 
3121 	qede->qede_state = QEDE_STATE_STOPPED;
3122 	/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3123 	cmn_err(CE_WARN, "qede_stop SUCCESS =%p\n", qede);
3124 	return (DDI_SUCCESS);
3125 }
3126 
3127 /*
3128  * drv_lock must be held by the caller.
3129  */
3130 int
3131 qede_start(qede_t *qede)
3132 {
3133 	int status;
3134 
3135 	ASSERT(mutex_owned(&qede->drv_lock));
3136 
3137 	qede->qede_state = QEDE_STATE_STARTING;
3138 
3139 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3140 
3141 	/*
3142 	 * Configure the fastpath blocks with
3143 	 * the sb_info, rx_ring and tx_rings
3144 	 */
3145 	if (qede_fastpath_config(qede) != DDI_SUCCESS) {
3146 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3147 		qede_print_err("!%s(%d): qede_fastpath_config failed",
3148 		    __func__, qede->instance);
3149 		return (DDI_FAILURE);
3150 	}
3151 
3152 
3153 	/*
3154 	 * Allocate the physical memory
3155 	 * for fastpath.
3156 	 */
3157 	status = qede_fastpath_alloc_phys_mem(qede);
3158 	if (status) {
3159 		cmn_err(CE_NOTE, "fastpath_alloc_phys_mem "
3160 		    " failed qede=%p\n", qede);
3161 		return (DDI_FAILURE);
3162 	}
3163 
3164 	status = qede_fastpath_start_queues(qede);
3165 	if (status) {
3166 		cmn_err(CE_NOTE, "fp_start_queues "
3167 		    " failed qede=%p\n", qede);
3168 		goto err_out1;
3169 	}
3170 
3171 	cmn_err(CE_NOTE, "qede_start fp_start_queues qede=%p\n", qede);
3172 
3173 	status = qede_configure_link(qede, 1 /* Set */);
3174 	if (status) {
3175 		cmn_err(CE_NOTE, "!%s(%d): Failed to configure link",
3176 		    __func__, qede->instance);
3177 		goto err_out1;
3178 	}
3179 
3180 	/*
3181 	 * Put interface in regular mode
3182 	 */
3183 	if (qede_set_filter_rx_mode(qede,
3184 		QEDE_FILTER_RX_MODE_REGULAR) != DDI_SUCCESS) {
3185 		cmn_err(CE_NOTE, "!%s(%d): Failed to set filter mode",
3186 		    __func__, qede->instance);
3187 		goto err_out1;
3188 	}
3189 
3190 	status = qede_enable_all_fastpath_intrs(qede);
3191 	if (status) {
3192 		/* LINTED E_BAD_FORMAT_ARG_TYPE2 */
3193 		cmn_err(CE_NOTE, "!%s(%d): Failed to enable intrs",
3194 		    __func__, qede->instance);
3195 		goto err_out2;
3196 	}
3197 	qede->qede_state = QEDE_STATE_STARTED;
3198 	cmn_err(CE_NOTE, "!%s(%d): SUCCESS",
3199 		    __func__, qede->instance);
3200 
3201 	return (status);
3202 
3203 err_out2:
3204 	(void) qede_fastpath_stop_queues(qede);
3205 err_out1:
3206 	qede_fastpath_free_phys_mem(qede);
3207 	return (DDI_FAILURE);
3208 }
3209 
3210 static void
3211 qede_free_attach_resources(qede_t *qede)
3212 {
3213 	struct ecore_dev *edev;
3214 	int status;
3215 
3216 	edev = &qede->edev;
3217 
3218 	if (qede->attach_resources & QEDE_ECORE_HW_INIT) {
3219 		if (ecore_hw_stop(edev) != ECORE_SUCCESS) {
3220 			cmn_err(CE_NOTE, "%s(%d): ecore_hw_stop: failed\n",
3221 			    __func__, qede->instance);
3222 		}
3223 		qede->attach_resources &= ~QEDE_ECORE_HW_INIT;
3224 	}
3225 
3226 	if (qede->attach_resources & QEDE_SP_INTR_ENBL) {
3227 		status = qede_disable_slowpath_intrs(qede);
3228 		if (status != DDI_SUCCESS) {
3229 			qede_print("%s(%d): qede_disable_slowpath_intrs Failed",
3230 			    __func__, qede->instance);
3231 		}
3232 		qede->attach_resources &= ~QEDE_SP_INTR_ENBL;
3233 	}
3234 	if (qede->attach_resources & QEDE_KSTAT_INIT) {
3235 		qede_kstat_fini(qede);
3236 		qede->attach_resources &= ~QEDE_KSTAT_INIT;
3237 	}
3238 
3239 
3240 	if (qede->attach_resources & QEDE_GLD_INIT) {
3241 		status = mac_unregister(qede->mac_handle);
3242 		if (status != 0) {
3243 			qede_print("%s(%d): mac_unregister Failed",
3244 			    __func__, qede->instance);
3245 		}
3246 		qede->attach_resources &= ~QEDE_GLD_INIT;
3247 	}
3248 
3249 	if (qede->attach_resources & QEDE_EDEV_CONFIG) {
3250 		ecore_resc_free(edev);
3251 		qede->attach_resources &= ~QEDE_EDEV_CONFIG;
3252 	}
3253 
3254 	if (qede->attach_resources & QEDE_INTR_CONFIG) {
3255 		qede_unconfig_intrs(qede);
3256 		qede->attach_resources &= ~QEDE_INTR_CONFIG;
3257 	}
3258 
3259 	if (qede->attach_resources & QEDE_INTR_ALLOC) {
3260 		qede_free_intrs(qede);
3261 		qede->attach_resources &= ~QEDE_INTR_ALLOC;
3262 	}
3263 
3264 	if (qede->attach_resources & QEDE_INIT_LOCKS) {
3265 		qede_destroy_locks(qede);
3266 		qede->attach_resources &= ~QEDE_INIT_LOCKS;
3267 	}
3268 
3269 	if (qede->attach_resources & QEDE_IO_STRUCT_ALLOC) {
3270 		qede_free_io_structs(qede);
3271 		qede->attach_resources &= ~QEDE_IO_STRUCT_ALLOC;
3272 	}
3273 #ifdef QEDE_LSR
3274 	if (qede->attach_resources & QEDE_CALLBACK) {
3275 
3276 
3277 		status = ddi_cb_unregister(qede->callback_hdl);
3278 		if (status != DDI_SUCCESS) {
3279 		}
3280 		qede->attach_resources &= ~QEDE_CALLBACK;
3281 	}
3282 #endif
3283 	if (qede->attach_resources & QEDE_ECORE_HW_PREP) {
3284 		ecore_hw_remove(edev);
3285 		qede->attach_resources &= ~QEDE_ECORE_HW_PREP;
3286 	}
3287 
3288 	if (qede->attach_resources & QEDE_PCI) {
3289 		qede_unconfig_pci(qede);
3290 		qede->attach_resources &= ~QEDE_PCI;
3291 	}
3292 
3293 	if (qede->attach_resources & QEDE_FM) {
3294 		qede_unconfig_fm(qede);
3295 		qede->attach_resources &= ~QEDE_FM;
3296 	}
3297 
3298 	/*
3299 	 * Check for possible mem. left behind by ecore
3300 	 */
3301 	(void) qede_osal_cleanup(qede);
3302 
3303 	if (qede->attach_resources & QEDE_STRUCT_ALLOC) {
3304 		ddi_set_driver_private(qede->dip, NULL);
3305 		qede->attach_resources &= ~QEDE_STRUCT_ALLOC;
3306 		kmem_free(qede, sizeof (qede_t));
3307 	}
3308 }
3309 
3310 /*
3311  * drv_lock must be held by the caller.
3312  */
3313 static int
3314 qede_suspend(qede_t *qede)
3315 {
3316 	// STUB
3317 	ASSERT(mutex_owned(&qede->drv_lock));
3318 	printf("in qede_suspend\n");
3319 	return (DDI_FAILURE);
3320 }
3321 
3322 static int
3323 qede_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3324 {
3325     	qede_t *qede;
3326 	struct ecore_dev *edev;
3327 	int instance;
3328 	uint32_t vendor_id;
3329 	uint32_t device_id;
3330 	struct ecore_hwfn *p_hwfn;
3331 	struct ecore_ptt *p_ptt;
3332 	struct ecore_mcp_link_params *link_params;
3333 	struct ecore_hw_init_params hw_init_params;
3334 	struct ecore_drv_load_params load_params;
3335 	int *props;
3336        	uint32_t num_props;
3337 	int rc = 0;
3338 
3339     	switch (cmd) {
3340     	default:
3341        		return (DDI_FAILURE);
3342 
3343 	case DDI_RESUME:
3344 	{
3345        		qede = (qede_t * )ddi_get_driver_private(dip);
3346         	if (qede == NULL || qede->dip != dip) {
3347 			cmn_err(CE_NOTE, "qede:%s: Could not allocate"
3348 			    " adapter structure\n", __func__);
3349 			return (DDI_FAILURE);
3350         	}
3351 
3352 		mutex_enter(&qede->drv_lock);
3353 		if (qede->qede_state != QEDE_STATE_SUSPENDED) {
3354 			mutex_exit(&qede->drv_lock);
3355         		return (DDI_FAILURE);
3356 		}
3357 
3358 		if (qede_resume(qede) != DDI_SUCCESS) {
3359 			cmn_err(CE_NOTE, "%s:%d resume operation failure\n",
3360 			    __func__, qede->instance);
3361 			mutex_exit(&qede->drv_lock);
3362             		return (DDI_FAILURE);
3363         	}
3364 
3365 		qede->qede_state = QEDE_STATE_ATTACHED;
3366 		mutex_exit(&qede->drv_lock);
3367         	return (DDI_SUCCESS);
3368 	}
3369 	case DDI_ATTACH:
3370 	{
3371     		instance = ddi_get_instance(dip);
3372 	    	cmn_err(CE_NOTE, "qede_attach(%d): Enter",
3373 		    instance);
3374 
3375     		/* Allocate main structure rounded up to cache line size */
3376     		if ((qede = kmem_zalloc(sizeof (qede_t), KM_SLEEP)) == NULL) {
3377 			cmn_err(CE_NOTE, "!%s(%d): Could not allocate adapter "
3378 			    "structure\n", __func__, instance);
3379         		return (DDI_FAILURE);
3380     		}
3381 
3382 		qede->attach_resources |= QEDE_STRUCT_ALLOC;
3383     		ddi_set_driver_private(dip, qede);
3384 		qede->dip = dip;
3385    		qede->instance = instance;
3386     		snprintf(qede->name, sizeof (qede->name), "qede%d", instance);
3387 		edev = &qede->edev;
3388 
3389 		if (qede_config_fm(qede) != DDI_SUCCESS) {
3390         		goto exit_with_err;
3391 		}
3392 		qede->attach_resources |= QEDE_FM;
3393 
3394 		/*
3395 		 * Do PCI config setup and map the register
3396 		 * and doorbell space */
3397 		if (qede_config_pci(qede) != DDI_SUCCESS) {
3398         		goto exit_with_err;
3399 		}
3400 		qede->attach_resources |= QEDE_PCI;
3401 
3402 		/*
3403 		 * Setup OSAL mem alloc related locks.
3404 		 * Do not call any ecore functions without
3405 		 * initializing these locks
3406 		 */
3407 		mutex_init(&qede->mem_list.mem_list_lock, NULL,
3408 		    MUTEX_DRIVER, 0);
3409 		mutex_init(&qede->phys_mem_list.lock, NULL,
3410 		    MUTEX_DRIVER, 0);
3411 		QEDE_INIT_LIST_HEAD(&qede->mem_list.mem_list_head);
3412 		QEDE_INIT_LIST_HEAD(&qede->phys_mem_list.head);
3413 		QEDE_INIT_LIST_HEAD(&qede->mclist.head);
3414 
3415 
3416 		/*
3417 		 * FIXME: this function calls ecore api, but
3418 		 * dp_level and module are not yet set
3419 		 */
3420 		if (qede_prepare_edev(qede) != ECORE_SUCCESS) {
3421 			// report fma
3422         		goto exit_with_err;
3423 		}
3424 
3425 		qede->num_hwfns = edev->num_hwfns;
3426 		qede->num_tc = 1;
3427 		memcpy(qede->ether_addr, edev->hwfns->hw_info.hw_mac_addr,
3428 		    ETHERADDRL);
3429 		qede_info(qede, "Interface mac_addr : " MAC_STRING,
3430 		    MACTOSTR(qede->ether_addr));
3431 		qede->attach_resources |= QEDE_ECORE_HW_PREP;
3432 
3433 		if (qede_set_operating_params(qede) != DDI_SUCCESS) {
3434         		goto exit_with_err;
3435 		}
3436 		qede->attach_resources |= QEDE_SET_PARAMS;
3437 #ifdef QEDE_LSR
3438 		if (ddi_cb_register(qede->dip,
3439 	    	    qede->callback_flags,
3440 	    	    qede_callback,
3441 		    qede,
3442 	    	    NULL,
3443 	    	    &qede->callback_hdl)) {
3444 			goto exit_with_err;
3445 		}
3446 		qede->attach_resources |= QEDE_CALLBACK;
3447 #endif
3448 		qede_cfg_reset(qede);
3449 
3450 		if (qede_alloc_intrs(qede)) {
3451 			cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3452 			    __func__);
3453         		goto exit_with_err;
3454 		}
3455 
3456 		qede->attach_resources |= QEDE_INTR_ALLOC;
3457 
3458 		if (qede_config_intrs(qede)) {
3459 			cmn_err(CE_NOTE, "%s: Could not allocate interrupts\n",
3460 			    __func__);
3461         		goto exit_with_err;
3462 		}
3463 		qede->attach_resources |= QEDE_INTR_CONFIG;
3464 
3465     		if (qede_alloc_io_structs(qede) != DDI_SUCCESS) {
3466 			cmn_err(CE_NOTE, "%s: Could not allocate data"
3467 			    " path structures\n", __func__);
3468         		goto exit_with_err;
3469     		}
3470 
3471 		qede->attach_resources |= QEDE_IO_STRUCT_ALLOC;
3472 
3473 		/* Lock init cannot fail */
3474 		qede_init_locks(qede);
3475 		qede->attach_resources |= QEDE_INIT_LOCKS;
3476 
3477 
3478 		if (qede_config_edev(qede)) {
3479 			cmn_err(CE_NOTE, "%s: Could not configure ecore \n",
3480 			    __func__);
3481 			goto exit_with_err;
3482 		}
3483 		qede->attach_resources |= QEDE_EDEV_CONFIG;
3484 
3485 		if (qede_kstat_init(qede) == B_FALSE) {
3486 			cmn_err(CE_NOTE, "%s: Could not initialize kstat \n",
3487 			    __func__);
3488 			goto exit_with_err;
3489 
3490 		}
3491 		qede->attach_resources |= QEDE_KSTAT_INIT;
3492 
3493 		if (qede_gld_init(qede) == B_FALSE) {
3494 			cmn_err(CE_NOTE, "%s: Failed call to qede_gld_init",
3495 			    __func__);
3496 			goto exit_with_err;
3497 		}
3498 
3499 		qede->attach_resources |= QEDE_GLD_INIT;
3500 
3501 		if (qede_enable_slowpath_intrs(qede)) {
3502 			cmn_err(CE_NOTE, "%s: Could not enable interrupts\n",
3503 			    __func__);
3504 			goto exit_with_err;
3505 		}
3506 
3507 		qede->attach_resources |= QEDE_SP_INTR_ENBL;
3508 
3509 		cmn_err(CE_NOTE, "qede->attach_resources = %x\n",
3510 		    qede->attach_resources);
3511 
3512 		memset((void *)&hw_init_params, 0,
3513 		    sizeof (struct ecore_hw_init_params));
3514 		hw_init_params.p_drv_load_params = &load_params;
3515 
3516 		hw_init_params.p_tunn = NULL;
3517 		hw_init_params.b_hw_start = true;
3518 		hw_init_params.int_mode = qede->intr_ctx.intr_mode;
3519 		hw_init_params.allow_npar_tx_switch = false;
3520 		hw_init_params.bin_fw_data = NULL;
3521 		load_params.is_crash_kernel = false;
3522 		load_params.mfw_timeout_val = 0;
3523 		load_params.avoid_eng_reset = false;
3524 		load_params.override_force_load =
3525 		    ECORE_OVERRIDE_FORCE_LOAD_NONE;
3526 
3527 		if (ecore_hw_init(edev, &hw_init_params) != ECORE_SUCCESS) {
3528 			cmn_err(CE_NOTE,
3529 			    "%s: Could not initialze ecore block\n",
3530 			     __func__);
3531 			goto exit_with_err;
3532 		}
3533 		qede->attach_resources |= QEDE_ECORE_HW_INIT;
3534 		qede->qede_state = QEDE_STATE_ATTACHED;
3535 
3536 		qede->detach_unsafe = 0;
3537 
3538 		snprintf(qede->version,
3539              		sizeof (qede->version),
3540              		"%d.%d.%d",
3541              		MAJVERSION,
3542              		MINVERSION,
3543              		REVVERSION);
3544 
3545 		snprintf(qede->versionFW,
3546              		sizeof (qede->versionFW),
3547              		"%d.%d.%d.%d",
3548              		FW_MAJOR_VERSION,
3549              		FW_MINOR_VERSION,
3550              		FW_REVISION_VERSION,
3551              		FW_ENGINEERING_VERSION);
3552 
3553 		p_hwfn = &qede->edev.hwfns[0];
3554 		p_ptt = ecore_ptt_acquire(p_hwfn);
3555 		/*
3556 		 * (test) : saving the default link_input params
3557 		 */
3558 		link_params = ecore_mcp_get_link_params(p_hwfn);
3559 		memset(&qede->link_input_params, 0,
3560 		    sizeof (qede_link_input_params_t));
3561 		memcpy(&qede->link_input_params.default_link_params,
3562 		    link_params,
3563 		    sizeof (struct ecore_mcp_link_params));
3564 
3565 		p_hwfn = ECORE_LEADING_HWFN(edev);
3566         	ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &qede->mfw_ver, NULL);
3567 
3568 		ecore_ptt_release(p_hwfn, p_ptt);
3569 
3570 		snprintf(qede->versionMFW,
3571 			sizeof (qede->versionMFW),
3572 			"%d.%d.%d.%d",
3573 			(qede->mfw_ver >> 24) & 0xFF,
3574 	        	(qede->mfw_ver >> 16) & 0xFF,
3575 			(qede->mfw_ver >> 8) & 0xFF,
3576 			qede->mfw_ver & 0xFF);
3577 
3578 		snprintf(qede->chip_name,
3579              		sizeof (qede->chip_name),
3580 			"%s",
3581 			ECORE_IS_BB(edev) ? "BB" : "AH");
3582 
3583 	   	snprintf(qede->chipID,
3584 			sizeof (qede->chipID),
3585              		"0x%x",
3586              		qede->edev.chip_num);
3587 
3588 		*qede->bus_dev_func = 0;
3589 		vendor_id = 0;
3590 		device_id = 0;
3591 
3592 
3593 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3594 					0, "reg", &props, &num_props);
3595 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3596 
3597 		snprintf(qede->bus_dev_func,
3598 			sizeof (qede->bus_dev_func),
3599 			"%04x:%02x:%02x",
3600 			PCI_REG_BUS_G(props[0]),
3601 			PCI_REG_DEV_G(props[0]),
3602 			PCI_REG_FUNC_G(props[0]));
3603 
3604 		/*
3605 		 * This information is used
3606 		 * in the QEDE_FUNC_INFO ioctl
3607 		 */
3608 		qede->pci_func = (uint8_t) PCI_REG_FUNC_G(props[0]);
3609 
3610 		ddi_prop_free(props);
3611 
3612 		}
3613 
3614 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3615 					0, "vendor-id", &props, &num_props);
3616 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3617 			vendor_id = props[0];
3618 			ddi_prop_free(props);
3619 		}
3620 		rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, qede->dip,
3621 					0, "device-id", &props, &num_props);
3622 		if((rc == DDI_PROP_SUCCESS) && (num_props > 0)) {
3623 			device_id = props[0];
3624 			ddi_prop_free(props);
3625 		}
3626 
3627 
3628 		snprintf(qede->vendor_device,
3629 			sizeof (qede->vendor_device),
3630 			"%04x:%04x",
3631 			vendor_id,
3632 			device_id);
3633 
3634 
3635 		snprintf(qede->intrAlloc,
3636 			sizeof (qede->intrAlloc), "%d %s",
3637 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_FIXED)
3638  			? 1 :
3639 			qede->intr_ctx.intr_vect_allocated,
3640 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSIX)
3641 			? "MSIX" :
3642 			(qede->intr_ctx.intr_type_in_use == DDI_INTR_TYPE_MSI)
3643 			? "MSI"  : "Fixed");
3644 
3645 	        qede_print("%s(%d): success, addr %p chip %s id %s intr %s\n",
3646 		    __func__, qede->instance, qede, qede->chip_name,
3647 		    qede->vendor_device,qede->intrAlloc);
3648 
3649 	        qede_print("%s(%d): version %s FW %s MFW %s\n",
3650 		    __func__, qede->instance, qede->version,
3651 		    qede->versionFW, qede->versionMFW);
3652 
3653 		return (DDI_SUCCESS);
3654 	}
3655 	}
3656 exit_with_err:
3657 	cmn_err(CE_WARN, "%s:%d   failed %x\n", __func__, qede->instance,
3658 	    qede->attach_resources);
3659 	(void)qede_free_attach_resources(qede);
3660 	return (DDI_FAILURE);
3661 }
3662 
3663 static int
3664 qede_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3665 {
3666 
3667 	qede_t *qede;
3668 	int status;
3669 	uint32_t count = 0;
3670 
3671 	qede = (qede_t *)ddi_get_driver_private(dip);
3672 	if ((qede == NULL) || (qede->dip != dip)) {
3673 		return (DDI_FAILURE);
3674 	}
3675 
3676 	switch (cmd) {
3677 	default:
3678 		return (DDI_FAILURE);
3679 	case DDI_SUSPEND:
3680 		mutex_enter(&qede->drv_lock);
3681 		status = qede_suspend(qede);
3682 		if (status != DDI_SUCCESS) {
3683 			mutex_exit(&qede->drv_lock);
3684 			return (DDI_FAILURE);
3685 		}
3686 
3687 		qede->qede_state = QEDE_STATE_SUSPENDED;
3688 		mutex_exit(&qede->drv_lock);
3689 		return (DDI_SUCCESS);
3690 
3691 	case DDI_DETACH:
3692 		mutex_enter(&qede->drv_lock);
3693 		if (qede->qede_state == QEDE_STATE_STARTED) {
3694 			qede->plumbed = 0;
3695 			status = qede_stop(qede);
3696 			if (status != DDI_SUCCESS) {
3697 				qede->qede_state = QEDE_STATE_FAILED;
3698 				mutex_exit(&qede->drv_lock);
3699 				return (DDI_FAILURE);
3700 			}
3701 		}
3702 		mutex_exit(&qede->drv_lock);
3703                 if (qede->detach_unsafe) {
3704                         /*
3705                          * wait for rx buffers to be returned from
3706                          * upper layers
3707                          */
3708                         count = 0;
3709                         while ((qede->detach_unsafe) && (count < 100)) {
3710                                 qede_delay(100);
3711                                 count++;
3712                         }
3713                         if (qede->detach_unsafe) {
3714                                 qede_info(qede, "!%s(%d) : Buffers still with"
3715                                     " OS, failing detach\n",
3716                                     qede->name, qede->instance);
3717                                 return (DDI_FAILURE);
3718                         }
3719                 }
3720 		qede_free_attach_resources(qede);
3721 		return (DDI_SUCCESS);
3722 	}
3723 }
3724 
3725 static int
3726 /* LINTED E_FUNC_ARG_UNUSED */
3727 qede_quiesce(dev_info_t *dip)
3728 {
3729 	qede_t *qede = (qede_t *)ddi_get_driver_private(dip);
3730 	struct ecore_dev *edev = &qede->edev;
3731 	int status = DDI_SUCCESS;
3732 	struct ecore_hwfn *p_hwfn;
3733 	struct ecore_ptt *p_ptt = NULL;
3734 
3735 	mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
3736 	p_hwfn = ECORE_LEADING_HWFN(edev);
3737 	p_ptt = ecore_ptt_acquire(p_hwfn);
3738 	if (p_ptt) {
3739 		status = ecore_start_recovery_process(p_hwfn, p_ptt);
3740 		ecore_ptt_release(p_hwfn, p_ptt);
3741 		OSAL_MSLEEP(5000);
3742 	}
3743 	return (status);
3744 
3745 }
3746 
3747 
3748 DDI_DEFINE_STREAM_OPS(qede_dev_ops, nulldev, nulldev, qede_attach, qede_detach,
3749     nodev, NULL, D_MP, NULL, qede_quiesce);
3750 
3751 static struct modldrv qede_modldrv =
3752 {
3753     &mod_driverops,    /* drv_modops (must be mod_driverops for drivers) */
3754     QEDE_PRODUCT_INFO, /* drv_linkinfo (string displayed by modinfo) */
3755     &qede_dev_ops      /* drv_dev_ops */
3756 };
3757 
3758 
3759 static struct modlinkage qede_modlinkage =
3760 {
3761     MODREV_1,        /* ml_rev */
3762     (&qede_modldrv), /* ml_linkage */
3763     NULL           /* NULL termination */
3764 };
3765 
3766 int
3767 _init(void)
3768 {
3769     int rc;
3770 
3771     qede_dev_ops.devo_cb_ops->cb_str = NULL;
3772     mac_init_ops(&qede_dev_ops, "qede");
3773 
3774     /* Install module information with O/S */
3775     if ((rc = mod_install(&qede_modlinkage)) != DDI_SUCCESS) {
3776         mac_fini_ops(&qede_dev_ops);
3777 	cmn_err(CE_NOTE, "mod_install failed");
3778         return (rc);
3779     }
3780 
3781     return (rc);
3782 }
3783 
3784 
3785 int
3786 _fini(void)
3787 {
3788     int rc;
3789 
3790     if ((rc = mod_remove(&qede_modlinkage)) == DDI_SUCCESS) {
3791         mac_fini_ops(&qede_dev_ops);
3792     }
3793 
3794     return (rc);
3795 }
3796 
3797 
3798 int
3799 _info(struct modinfo * modinfop)
3800 {
3801     return (mod_info(&qede_modlinkage, modinfop));
3802 }
3803