xref: /illumos-gate/usr/src/uts/common/io/qede/qede_fp.c (revision 12c2600c)
114b24e2bSVaishali Kulkarni /*
214b24e2bSVaishali Kulkarni * CDDL HEADER START
314b24e2bSVaishali Kulkarni *
414b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the
514b24e2bSVaishali Kulkarni * Common Development and Distribution License, v.1,  (the "License").
614b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License.
714b24e2bSVaishali Kulkarni *
814b24e2bSVaishali Kulkarni * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
914b24e2bSVaishali Kulkarni * or http://opensource.org/licenses/CDDL-1.0.
1014b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions
1114b24e2bSVaishali Kulkarni * and limitations under the License.
1214b24e2bSVaishali Kulkarni *
1314b24e2bSVaishali Kulkarni * When distributing Covered Code, include this CDDL HEADER in each
1414b24e2bSVaishali Kulkarni * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1514b24e2bSVaishali Kulkarni * If applicable, add the following below this CDDL HEADER, with the
1614b24e2bSVaishali Kulkarni * fields enclosed by brackets "[]" replaced with your own identifying
1714b24e2bSVaishali Kulkarni * information: Portions Copyright [yyyy] [name of copyright owner]
1814b24e2bSVaishali Kulkarni *
1914b24e2bSVaishali Kulkarni * CDDL HEADER END
2014b24e2bSVaishali Kulkarni */
2114b24e2bSVaishali Kulkarni 
2214b24e2bSVaishali Kulkarni /*
2314b24e2bSVaishali Kulkarni * Copyright 2014-2017 Cavium, Inc.
2414b24e2bSVaishali Kulkarni * The contents of this file are subject to the terms of the Common Development
2514b24e2bSVaishali Kulkarni * and Distribution License, v.1,  (the "License").
2614b24e2bSVaishali Kulkarni 
2714b24e2bSVaishali Kulkarni * You may not use this file except in compliance with the License.
2814b24e2bSVaishali Kulkarni 
2914b24e2bSVaishali Kulkarni * You can obtain a copy of the License at available
3014b24e2bSVaishali Kulkarni * at http://opensource.org/licenses/CDDL-1.0
3114b24e2bSVaishali Kulkarni 
3214b24e2bSVaishali Kulkarni * See the License for the specific language governing permissions and
3314b24e2bSVaishali Kulkarni * limitations under the License.
3414b24e2bSVaishali Kulkarni */
3514b24e2bSVaishali Kulkarni 
3614b24e2bSVaishali Kulkarni #include "qede.h"
3714b24e2bSVaishali Kulkarni 
3814b24e2bSVaishali Kulkarni static qede_dma_handle_entry_t *
qede_get_dmah_entry(qede_tx_ring_t * tx_ring)3914b24e2bSVaishali Kulkarni qede_get_dmah_entry(qede_tx_ring_t *tx_ring)
4014b24e2bSVaishali Kulkarni {
4114b24e2bSVaishali Kulkarni 	qede_dma_handles_list_t *list = &tx_ring->dmah_list;
4214b24e2bSVaishali Kulkarni 	qede_dma_handle_entry_t *dmah;
4314b24e2bSVaishali Kulkarni 
4414b24e2bSVaishali Kulkarni 	mutex_enter(&list->lock);
4514b24e2bSVaishali Kulkarni 	dmah = list->free_list[list->head];
4614b24e2bSVaishali Kulkarni 	list->free_list[list->head] = NULL;
4714b24e2bSVaishali Kulkarni 	list->head = (list->head + 1) & TX_RING_MASK;
4814b24e2bSVaishali Kulkarni 	mutex_exit(&list->lock);
4914b24e2bSVaishali Kulkarni 
5014b24e2bSVaishali Kulkarni 	return (dmah);
5114b24e2bSVaishali Kulkarni }
5214b24e2bSVaishali Kulkarni 
5314b24e2bSVaishali Kulkarni static void
qede_put_dmah_entries(qede_tx_ring_t * tx_ring,qede_dma_handle_entry_t * dmah)5414b24e2bSVaishali Kulkarni qede_put_dmah_entries(qede_tx_ring_t *tx_ring, qede_dma_handle_entry_t *dmah)
5514b24e2bSVaishali Kulkarni {
5614b24e2bSVaishali Kulkarni 	qede_dma_handles_list_t *list = &tx_ring->dmah_list;
5714b24e2bSVaishali Kulkarni 	qede_dma_handle_entry_t *next;
5814b24e2bSVaishali Kulkarni 	u16 index;
5914b24e2bSVaishali Kulkarni 
6014b24e2bSVaishali Kulkarni 	mutex_enter(&list->lock);
6114b24e2bSVaishali Kulkarni 	index = list->tail;
6214b24e2bSVaishali Kulkarni 
6314b24e2bSVaishali Kulkarni 	while (dmah != NULL) {
6414b24e2bSVaishali Kulkarni 		next = dmah->next;
6514b24e2bSVaishali Kulkarni 		dmah->next = NULL;
6614b24e2bSVaishali Kulkarni 		list->free_list[index] = dmah;
6714b24e2bSVaishali Kulkarni 		index = (index + 1) & TX_RING_MASK;
6814b24e2bSVaishali Kulkarni 		dmah = next;
6914b24e2bSVaishali Kulkarni 	}
7014b24e2bSVaishali Kulkarni 
7114b24e2bSVaishali Kulkarni 	list->tail = index;
7214b24e2bSVaishali Kulkarni 
7314b24e2bSVaishali Kulkarni 	mutex_exit(&list->lock);
7414b24e2bSVaishali Kulkarni }
7514b24e2bSVaishali Kulkarni 
7614b24e2bSVaishali Kulkarni static qede_tx_bcopy_pkt_t *
qede_get_bcopy_pkt(qede_tx_ring_t * tx_ring)7714b24e2bSVaishali Kulkarni qede_get_bcopy_pkt(qede_tx_ring_t *tx_ring)
7814b24e2bSVaishali Kulkarni {
7914b24e2bSVaishali Kulkarni 	qede_tx_bcopy_list_t *list = &tx_ring->bcopy_list;
8014b24e2bSVaishali Kulkarni 	qede_tx_bcopy_pkt_t *pkt;
8114b24e2bSVaishali Kulkarni 
8214b24e2bSVaishali Kulkarni 	mutex_enter(&list->lock);
8314b24e2bSVaishali Kulkarni 	pkt = list->free_list[list->head];
8414b24e2bSVaishali Kulkarni 	list->free_list[list->head] = NULL;
8514b24e2bSVaishali Kulkarni 	list->head = (list->head + 1) & TX_RING_MASK;
8614b24e2bSVaishali Kulkarni 	mutex_exit(&list->lock);
8714b24e2bSVaishali Kulkarni 
8814b24e2bSVaishali Kulkarni 	return (pkt);
8914b24e2bSVaishali Kulkarni }
9014b24e2bSVaishali Kulkarni 
9114b24e2bSVaishali Kulkarni static void
qede_put_bcopy_pkt(qede_tx_ring_t * tx_ring,qede_tx_bcopy_pkt_t * pkt)9214b24e2bSVaishali Kulkarni qede_put_bcopy_pkt(qede_tx_ring_t *tx_ring, qede_tx_bcopy_pkt_t *pkt)
9314b24e2bSVaishali Kulkarni {
9414b24e2bSVaishali Kulkarni 	qede_tx_bcopy_list_t *list = &tx_ring->bcopy_list;
9514b24e2bSVaishali Kulkarni 
9614b24e2bSVaishali Kulkarni 	mutex_enter(&list->lock);
9714b24e2bSVaishali Kulkarni 	list->free_list[list->tail] = pkt;
9814b24e2bSVaishali Kulkarni 	list->tail = (list->tail + 1) & TX_RING_MASK;
9914b24e2bSVaishali Kulkarni 	mutex_exit(&list->lock);
10014b24e2bSVaishali Kulkarni }
10114b24e2bSVaishali Kulkarni 
10214b24e2bSVaishali Kulkarni void
qede_print_tx_indexes(qede_tx_ring_t * tx_ring)10314b24e2bSVaishali Kulkarni qede_print_tx_indexes(qede_tx_ring_t *tx_ring)
10414b24e2bSVaishali Kulkarni {
10514b24e2bSVaishali Kulkarni 	uint16_t hw_consumer = LE_16(*tx_ring->hw_cons_ptr);
10614b24e2bSVaishali Kulkarni 	uint16_t chain_idx = ecore_chain_get_cons_idx(&tx_ring->tx_bd_ring);
10714b24e2bSVaishali Kulkarni 	hw_consumer &= TX_RING_MASK;
10814b24e2bSVaishali Kulkarni 	chain_idx &= TX_RING_MASK;
10914b24e2bSVaishali Kulkarni 	qede_print_err("!indices: hw_cons %d, chain_cons = %d, sw_prod = %d",
11014b24e2bSVaishali Kulkarni 	    hw_consumer, chain_idx, tx_ring->sw_tx_prod);
11114b24e2bSVaishali Kulkarni }
11214b24e2bSVaishali Kulkarni 
11314b24e2bSVaishali Kulkarni void
qede_print_rx_indexes(qede_rx_ring_t * rx_ring)11414b24e2bSVaishali Kulkarni qede_print_rx_indexes(qede_rx_ring_t *rx_ring)
11514b24e2bSVaishali Kulkarni {
11614b24e2bSVaishali Kulkarni 	u16 hw_bd_cons = HOST_TO_LE_16(*rx_ring->hw_cons_ptr);
11714b24e2bSVaishali Kulkarni 	u16 sw_bd_cons = ecore_chain_get_cons_idx(&rx_ring->rx_cqe_ring);
11814b24e2bSVaishali Kulkarni 
11914b24e2bSVaishali Kulkarni 	hw_bd_cons &= (rx_ring->qede->rx_ring_size - 1);
12014b24e2bSVaishali Kulkarni 	sw_bd_cons &= (rx_ring->qede->rx_ring_size - 1);
12114b24e2bSVaishali Kulkarni 	qede_print_err("!RX indices: hw_cons %d, chain_cons = %d",
12214b24e2bSVaishali Kulkarni 	    hw_bd_cons, sw_bd_cons);
12314b24e2bSVaishali Kulkarni }
12414b24e2bSVaishali Kulkarni 
12514b24e2bSVaishali Kulkarni 
12614b24e2bSVaishali Kulkarni /*
12714b24e2bSVaishali Kulkarni  * Called from tx_completion intr handler.
12814b24e2bSVaishali Kulkarni  * NOTE: statu_block dma mem. must be sync'ed
12914b24e2bSVaishali Kulkarni  * in the interrupt handler
13014b24e2bSVaishali Kulkarni  */
13114b24e2bSVaishali Kulkarni int
qede_process_tx_completions(qede_tx_ring_t * tx_ring)13214b24e2bSVaishali Kulkarni qede_process_tx_completions(qede_tx_ring_t *tx_ring)
13314b24e2bSVaishali Kulkarni {
13414b24e2bSVaishali Kulkarni 	int count = 0;
13514b24e2bSVaishali Kulkarni 	u16 hw_consumer;
13614b24e2bSVaishali Kulkarni 	struct eth_tx_bd *tx_bd;
13714b24e2bSVaishali Kulkarni 	uint16_t chain_idx;
13814b24e2bSVaishali Kulkarni 	u16 nbd, sw_consumer = tx_ring->sw_tx_cons;
13914b24e2bSVaishali Kulkarni 	struct eth_tx_1st_bd *first_bd;
14014b24e2bSVaishali Kulkarni 	u16 bd_consumed = 0;
14114b24e2bSVaishali Kulkarni 	qede_tx_recycle_list_t *recycle_entry;
14214b24e2bSVaishali Kulkarni 	qede_dma_handle_entry_t *dmah, *head = NULL, *tail = NULL;
14314b24e2bSVaishali Kulkarni 	qede_tx_bcopy_pkt_t *bcopy_pkt;
14414b24e2bSVaishali Kulkarni 
14514b24e2bSVaishali Kulkarni 	hw_consumer = LE_16(*tx_ring->hw_cons_ptr);
14614b24e2bSVaishali Kulkarni 	chain_idx = ecore_chain_get_cons_idx(&tx_ring->tx_bd_ring);
14714b24e2bSVaishali Kulkarni 
14814b24e2bSVaishali Kulkarni 	while (hw_consumer != chain_idx) {
14914b24e2bSVaishali Kulkarni 		nbd = 0;
15014b24e2bSVaishali Kulkarni 		bd_consumed = 0;
15114b24e2bSVaishali Kulkarni 		first_bd = NULL;
15214b24e2bSVaishali Kulkarni 
15314b24e2bSVaishali Kulkarni 		recycle_entry = &tx_ring->tx_recycle_list[sw_consumer];
15414b24e2bSVaishali Kulkarni 		if (recycle_entry->dmah_entry != NULL) {
15514b24e2bSVaishali Kulkarni 			dmah = recycle_entry->dmah_entry;
15614b24e2bSVaishali Kulkarni 
15714b24e2bSVaishali Kulkarni 			head = dmah;
15814b24e2bSVaishali Kulkarni 
15914b24e2bSVaishali Kulkarni 			if (head->mp) {
16014b24e2bSVaishali Kulkarni 				freemsg(head->mp);
16114b24e2bSVaishali Kulkarni 			}
16214b24e2bSVaishali Kulkarni 
16314b24e2bSVaishali Kulkarni 			while (dmah != NULL) {
16414b24e2bSVaishali Kulkarni 				(void) ddi_dma_unbind_handle(dmah->dma_handle);
16514b24e2bSVaishali Kulkarni 				dmah = dmah->next;
16614b24e2bSVaishali Kulkarni 			}
16714b24e2bSVaishali Kulkarni 
16814b24e2bSVaishali Kulkarni 
16914b24e2bSVaishali Kulkarni 			qede_put_dmah_entries(tx_ring,
17014b24e2bSVaishali Kulkarni 			    head);
17114b24e2bSVaishali Kulkarni 			recycle_entry->dmah_entry = NULL;
17214b24e2bSVaishali Kulkarni 		} else if (recycle_entry->bcopy_pkt != NULL) {
17314b24e2bSVaishali Kulkarni 			bcopy_pkt = recycle_entry->bcopy_pkt;
17414b24e2bSVaishali Kulkarni 
17514b24e2bSVaishali Kulkarni 			qede_put_bcopy_pkt(tx_ring, bcopy_pkt);
17614b24e2bSVaishali Kulkarni 			recycle_entry->bcopy_pkt = NULL;
17714b24e2bSVaishali Kulkarni 		} else {
17814b24e2bSVaishali Kulkarni 			qede_warn(tx_ring->qede,
17914b24e2bSVaishali Kulkarni 			    "Invalid completion at index %d",
18014b24e2bSVaishali Kulkarni 			    sw_consumer);
18114b24e2bSVaishali Kulkarni 		}
18214b24e2bSVaishali Kulkarni 
18314b24e2bSVaishali Kulkarni 		sw_consumer = (sw_consumer + 1) & TX_RING_MASK;
18414b24e2bSVaishali Kulkarni 
18514b24e2bSVaishali Kulkarni 		first_bd =
18614b24e2bSVaishali Kulkarni 		    (struct eth_tx_1st_bd *)ecore_chain_consume(
18714b24e2bSVaishali Kulkarni 		    &tx_ring->tx_bd_ring);
18814b24e2bSVaishali Kulkarni 		bd_consumed++;
18914b24e2bSVaishali Kulkarni 
19014b24e2bSVaishali Kulkarni 		nbd = first_bd->data.nbds;
19114b24e2bSVaishali Kulkarni 
19214b24e2bSVaishali Kulkarni 		while (bd_consumed++ < nbd) {
19314b24e2bSVaishali Kulkarni 			ecore_chain_consume(&tx_ring->tx_bd_ring);
19414b24e2bSVaishali Kulkarni 		}
19514b24e2bSVaishali Kulkarni 
19614b24e2bSVaishali Kulkarni 		chain_idx = ecore_chain_get_cons_idx(&tx_ring->tx_bd_ring);
19714b24e2bSVaishali Kulkarni 		count++;
19814b24e2bSVaishali Kulkarni 	}
19914b24e2bSVaishali Kulkarni 
20014b24e2bSVaishali Kulkarni 	tx_ring->sw_tx_cons = sw_consumer;
20114b24e2bSVaishali Kulkarni 
20214b24e2bSVaishali Kulkarni 	if (count && tx_ring->tx_q_sleeping) {
20314b24e2bSVaishali Kulkarni 		tx_ring->tx_q_sleeping = 0;
20414b24e2bSVaishali Kulkarni #ifndef NO_CROSSBOW
20514b24e2bSVaishali Kulkarni 		RESUME_TX(tx_ring);
20614b24e2bSVaishali Kulkarni #else
20714b24e2bSVaishali Kulkarni 		mac_tx_update(tx_ring->qede->mac_handle);
20814b24e2bSVaishali Kulkarni #endif
20914b24e2bSVaishali Kulkarni 	}
21014b24e2bSVaishali Kulkarni 
21114b24e2bSVaishali Kulkarni 	return (count);
21214b24e2bSVaishali Kulkarni }
21314b24e2bSVaishali Kulkarni 
21414b24e2bSVaishali Kulkarni static int
qede_has_tx_work(qede_tx_ring_t * tx_ring)21514b24e2bSVaishali Kulkarni qede_has_tx_work(qede_tx_ring_t *tx_ring)
21614b24e2bSVaishali Kulkarni {
21714b24e2bSVaishali Kulkarni 	u16 hw_bd_cons = LE_16(*tx_ring->hw_cons_ptr);
21814b24e2bSVaishali Kulkarni 	u16 sw_bd_cons = ecore_chain_get_cons_idx(&tx_ring->tx_bd_ring);
21914b24e2bSVaishali Kulkarni 
22014b24e2bSVaishali Kulkarni 	if (sw_bd_cons == (hw_bd_cons + 1)) {
22114b24e2bSVaishali Kulkarni 		return (0);
22214b24e2bSVaishali Kulkarni 	}
22314b24e2bSVaishali Kulkarni 	return (hw_bd_cons != sw_bd_cons);
22414b24e2bSVaishali Kulkarni }
22514b24e2bSVaishali Kulkarni 
22614b24e2bSVaishali Kulkarni static int
qede_has_rx_work(qede_rx_ring_t * rx_ring)22714b24e2bSVaishali Kulkarni qede_has_rx_work(qede_rx_ring_t *rx_ring)
22814b24e2bSVaishali Kulkarni {
22914b24e2bSVaishali Kulkarni 	u16 hw_bd_cons = HOST_TO_LE_16(*rx_ring->hw_cons_ptr);
23014b24e2bSVaishali Kulkarni 	u16 sw_bd_cons = ecore_chain_get_cons_idx(&rx_ring->rx_cqe_ring);
23114b24e2bSVaishali Kulkarni 	return (hw_bd_cons != sw_bd_cons);
23214b24e2bSVaishali Kulkarni }
23314b24e2bSVaishali Kulkarni 
23414b24e2bSVaishali Kulkarni static void
qede_set_cksum_flags(mblk_t * mp,uint16_t parse_flags)23514b24e2bSVaishali Kulkarni qede_set_cksum_flags(mblk_t *mp,
23614b24e2bSVaishali Kulkarni     uint16_t parse_flags)
23714b24e2bSVaishali Kulkarni {
23814b24e2bSVaishali Kulkarni 	uint32_t cksum_flags = 0;
23914b24e2bSVaishali Kulkarni 	int error = 0;
24014b24e2bSVaishali Kulkarni 	bool l4_is_calc, l4_csum_err, iphdr_len_err;
24114b24e2bSVaishali Kulkarni 
24214b24e2bSVaishali Kulkarni 	l4_is_calc =
24314b24e2bSVaishali Kulkarni 	    (parse_flags >> PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)
24414b24e2bSVaishali Kulkarni 	    & PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK;
24514b24e2bSVaishali Kulkarni 	l4_csum_err = (parse_flags >> PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT)
24614b24e2bSVaishali Kulkarni 	    & PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK;
24714b24e2bSVaishali Kulkarni 	iphdr_len_err = (parse_flags >> PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT)
24814b24e2bSVaishali Kulkarni 	    & PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK;
24914b24e2bSVaishali Kulkarni 
25014b24e2bSVaishali Kulkarni 	if (l4_is_calc) {
25114b24e2bSVaishali Kulkarni 		if (l4_csum_err) {
25214b24e2bSVaishali Kulkarni 			error = 1;
25314b24e2bSVaishali Kulkarni         	} else if (iphdr_len_err) {
25414b24e2bSVaishali Kulkarni             		error = 2;
25514b24e2bSVaishali Kulkarni         	} else {
25614b24e2bSVaishali Kulkarni 			cksum_flags =  HCK_FULLCKSUM_OK | HCK_IPV4_HDRCKSUM_OK;
25714b24e2bSVaishali Kulkarni 		}
25814b24e2bSVaishali Kulkarni 	}
25914b24e2bSVaishali Kulkarni 
26014b24e2bSVaishali Kulkarni 	if (error == 1) {
26114b24e2bSVaishali Kulkarni 		qede_print_err("!%s: got L4 csum error",__func__);
26214b24e2bSVaishali Kulkarni 	} else if (error == 2) {
26314b24e2bSVaishali Kulkarni 		qede_print_err("!%s: got IPHDER csum error" ,__func__);
26414b24e2bSVaishali Kulkarni 	}
26514b24e2bSVaishali Kulkarni 
26614b24e2bSVaishali Kulkarni 	mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
26714b24e2bSVaishali Kulkarni }
26814b24e2bSVaishali Kulkarni 
26914b24e2bSVaishali Kulkarni static qede_rx_buffer_t *
qede_get_next_rx_buffer(qede_rx_ring_t * rx_ring,uint32_t * free_buffer_count)27014b24e2bSVaishali Kulkarni qede_get_next_rx_buffer(qede_rx_ring_t *rx_ring,
27114b24e2bSVaishali Kulkarni     uint32_t *free_buffer_count)
27214b24e2bSVaishali Kulkarni {
27314b24e2bSVaishali Kulkarni 	qede_rx_buffer_t *rx_buffer;
27414b24e2bSVaishali Kulkarni 	uint32_t num_entries;
27514b24e2bSVaishali Kulkarni 
27614b24e2bSVaishali Kulkarni 	rx_buffer = qede_get_from_active_list(rx_ring, &num_entries);
27714b24e2bSVaishali Kulkarni 	ASSERT(rx_buffer != NULL);
27814b24e2bSVaishali Kulkarni 	ecore_chain_consume(&rx_ring->rx_bd_ring);
27914b24e2bSVaishali Kulkarni 	*free_buffer_count = num_entries;
28014b24e2bSVaishali Kulkarni 
28114b24e2bSVaishali Kulkarni 	return (rx_buffer);
28214b24e2bSVaishali Kulkarni }
28314b24e2bSVaishali Kulkarni 
28414b24e2bSVaishali Kulkarni static uint32_t
qede_get_next_lro_buffer(qede_rx_ring_t * rx_ring,qede_lro_info_t * lro_info)28514b24e2bSVaishali Kulkarni qede_get_next_lro_buffer(qede_rx_ring_t *rx_ring,
28614b24e2bSVaishali Kulkarni     qede_lro_info_t *lro_info)
28714b24e2bSVaishali Kulkarni {
28814b24e2bSVaishali Kulkarni 	lro_info->rx_buffer[lro_info->bd_count] =
28914b24e2bSVaishali Kulkarni 	    qede_get_next_rx_buffer(rx_ring,
29014b24e2bSVaishali Kulkarni 	    &lro_info->free_buffer_count);
29114b24e2bSVaishali Kulkarni 	lro_info->bd_count++;
29214b24e2bSVaishali Kulkarni 	return (DDI_SUCCESS);
29314b24e2bSVaishali Kulkarni }
29414b24e2bSVaishali Kulkarni #ifdef DEBUG_LRO
29514b24e2bSVaishali Kulkarni int agg_count = 0;
29614b24e2bSVaishali Kulkarni bool agg_print = B_TRUE;
29714b24e2bSVaishali Kulkarni #endif
29814b24e2bSVaishali Kulkarni static void
qede_lro_start(qede_rx_ring_t * rx_ring,struct eth_fast_path_rx_tpa_start_cqe * cqe)29914b24e2bSVaishali Kulkarni qede_lro_start(qede_rx_ring_t *rx_ring,
30014b24e2bSVaishali Kulkarni     struct eth_fast_path_rx_tpa_start_cqe *cqe)
30114b24e2bSVaishali Kulkarni {
30214b24e2bSVaishali Kulkarni 	qede_lro_info_t *lro_info;
30314b24e2bSVaishali Kulkarni 	int i, len_on_first_bd, seg_len;
30414b24e2bSVaishali Kulkarni 
30514b24e2bSVaishali Kulkarni 	lro_info = &rx_ring->lro_info[cqe->tpa_agg_index];
30614b24e2bSVaishali Kulkarni 
30714b24e2bSVaishali Kulkarni 	/* ASSERT(lro_info->agg_state != QEDE_AGG_STATE_NONE); */
30814b24e2bSVaishali Kulkarni 
30914b24e2bSVaishali Kulkarni #ifdef DEBUG_LRO
31014b24e2bSVaishali Kulkarni 	if (agg_count++ < 30)  {
31114b24e2bSVaishali Kulkarni 		qede_dump_start_lro_cqe(cqe);
31214b24e2bSVaishali Kulkarni 	} else {
31314b24e2bSVaishali Kulkarni 		agg_print = B_FALSE;
31414b24e2bSVaishali Kulkarni 	}
31514b24e2bSVaishali Kulkarni #endif
31614b24e2bSVaishali Kulkarni 
31714b24e2bSVaishali Kulkarni 	memset(lro_info, 0, sizeof (qede_lro_info_t));
31814b24e2bSVaishali Kulkarni 	lro_info->agg_state = QEDE_AGG_STATE_START;
31914b24e2bSVaishali Kulkarni 	rx_ring->lro_active_count++;
32014b24e2bSVaishali Kulkarni 
32114b24e2bSVaishali Kulkarni 	/* Parsing and error flags from the parser */;
32214b24e2bSVaishali Kulkarni 
32314b24e2bSVaishali Kulkarni 	lro_info->pars_flags = LE_16(cqe->pars_flags.flags);
32414b24e2bSVaishali Kulkarni 	lro_info->pad = LE_16(cqe->placement_offset);
32514b24e2bSVaishali Kulkarni 	lro_info->header_len = (uint32_t)cqe->header_len;
32614b24e2bSVaishali Kulkarni 	lro_info->vlan_tag = LE_16(cqe->vlan_tag);
32714b24e2bSVaishali Kulkarni 	lro_info->rss_hash = LE_32(cqe->rss_hash);
32814b24e2bSVaishali Kulkarni 
32914b24e2bSVaishali Kulkarni 	seg_len = (int)LE_16(cqe->seg_len);
33014b24e2bSVaishali Kulkarni 	len_on_first_bd = (int)LE_16(cqe->len_on_first_bd);
33114b24e2bSVaishali Kulkarni 	/*
33214b24e2bSVaishali Kulkarni 	 * Get the first bd
33314b24e2bSVaishali Kulkarni 	 */
33414b24e2bSVaishali Kulkarni 	qede_get_next_lro_buffer(rx_ring, lro_info);
33514b24e2bSVaishali Kulkarni 
33614b24e2bSVaishali Kulkarni 	if (len_on_first_bd < seg_len) {
33714b24e2bSVaishali Kulkarni 		/*
33814b24e2bSVaishali Kulkarni 		 * We end up here with jumbo frames
33914b24e2bSVaishali Kulkarni 		 * since a TCP segment can span
34014b24e2bSVaishali Kulkarni 		 * multiple buffer descriptors.
34114b24e2bSVaishali Kulkarni 		 */
34214b24e2bSVaishali Kulkarni 		for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) {
34314b24e2bSVaishali Kulkarni 			if (cqe->ext_bd_len_list[i] == 0) {
34414b24e2bSVaishali Kulkarni 			    break;
34514b24e2bSVaishali Kulkarni 			}
34614b24e2bSVaishali Kulkarni 			qede_get_next_lro_buffer(rx_ring, lro_info);
34714b24e2bSVaishali Kulkarni 		}
34814b24e2bSVaishali Kulkarni 	}
34914b24e2bSVaishali Kulkarni }
35014b24e2bSVaishali Kulkarni 
35114b24e2bSVaishali Kulkarni static void
qede_lro_cont(qede_rx_ring_t * rx_ring,struct eth_fast_path_rx_tpa_cont_cqe * cqe)35214b24e2bSVaishali Kulkarni qede_lro_cont(qede_rx_ring_t *rx_ring,
35314b24e2bSVaishali Kulkarni     struct eth_fast_path_rx_tpa_cont_cqe *cqe)
35414b24e2bSVaishali Kulkarni {
35514b24e2bSVaishali Kulkarni 	qede_lro_info_t *lro_info;
35614b24e2bSVaishali Kulkarni 	int i;
35714b24e2bSVaishali Kulkarni 
35814b24e2bSVaishali Kulkarni 	lro_info = &rx_ring->lro_info[cqe->tpa_agg_index];
35914b24e2bSVaishali Kulkarni 
36014b24e2bSVaishali Kulkarni 	/* ASSERT(lro_info->agg_state != QEDE_AGG_STATE_START); */
36114b24e2bSVaishali Kulkarni #ifdef DEBUG_LRO
36214b24e2bSVaishali Kulkarni 	if (agg_print) {
36314b24e2bSVaishali Kulkarni 		qede_dump_cont_lro_cqe(cqe);
36414b24e2bSVaishali Kulkarni 	}
36514b24e2bSVaishali Kulkarni #endif
36614b24e2bSVaishali Kulkarni 
36714b24e2bSVaishali Kulkarni 	for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) {
36814b24e2bSVaishali Kulkarni 		if (cqe->len_list[i] == 0) {
36914b24e2bSVaishali Kulkarni 			break;
37014b24e2bSVaishali Kulkarni 		}
37114b24e2bSVaishali Kulkarni 		qede_get_next_lro_buffer(rx_ring, lro_info);
37214b24e2bSVaishali Kulkarni 	}
37314b24e2bSVaishali Kulkarni }
37414b24e2bSVaishali Kulkarni 
37514b24e2bSVaishali Kulkarni static mblk_t *
qede_lro_end(qede_rx_ring_t * rx_ring,struct eth_fast_path_rx_tpa_end_cqe * cqe,int * pkt_bytes)37614b24e2bSVaishali Kulkarni qede_lro_end(qede_rx_ring_t *rx_ring,
37714b24e2bSVaishali Kulkarni     struct eth_fast_path_rx_tpa_end_cqe *cqe,
37814b24e2bSVaishali Kulkarni     int *pkt_bytes)
37914b24e2bSVaishali Kulkarni {
38014b24e2bSVaishali Kulkarni 	qede_lro_info_t *lro_info;
38114b24e2bSVaishali Kulkarni 	mblk_t *head = NULL, *tail = NULL, *mp = NULL;
38214b24e2bSVaishali Kulkarni 	qede_rx_buffer_t *rx_buffer;
38314b24e2bSVaishali Kulkarni 	int i, bd_len;
38414b24e2bSVaishali Kulkarni 	uint16_t work_length, total_packet_length;
38514b24e2bSVaishali Kulkarni 	uint32_t rx_buf_size = rx_ring->rx_buf_size;
38614b24e2bSVaishali Kulkarni 	qede_dma_info_t *dma_info;
38714b24e2bSVaishali Kulkarni 
38814b24e2bSVaishali Kulkarni 	lro_info = &rx_ring->lro_info[cqe->tpa_agg_index];
38914b24e2bSVaishali Kulkarni 
39014b24e2bSVaishali Kulkarni 	/* ASSERT(lro_info->agg_state != QEDE_AGG_STATE_START); */
39114b24e2bSVaishali Kulkarni 
39214b24e2bSVaishali Kulkarni #ifdef DEBUG_LRO
39314b24e2bSVaishali Kulkarni 	if (agg_print) {
39414b24e2bSVaishali Kulkarni 		qede_dump_end_lro_cqe(cqe);
39514b24e2bSVaishali Kulkarni 	}
39614b24e2bSVaishali Kulkarni #endif
39714b24e2bSVaishali Kulkarni 
39814b24e2bSVaishali Kulkarni 	work_length = total_packet_length = LE_16(cqe->total_packet_len);
39914b24e2bSVaishali Kulkarni 
40014b24e2bSVaishali Kulkarni 	/*
40114b24e2bSVaishali Kulkarni 	 * Get any buffer descriptors for this cqe
40214b24e2bSVaishali Kulkarni 	 */
40314b24e2bSVaishali Kulkarni 	for (i=0; i<ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) {
40414b24e2bSVaishali Kulkarni 		if (cqe->len_list[i] == 0) {
40514b24e2bSVaishali Kulkarni 		    break;
40614b24e2bSVaishali Kulkarni 		}
40714b24e2bSVaishali Kulkarni 		qede_get_next_lro_buffer(rx_ring, lro_info);
40814b24e2bSVaishali Kulkarni 	}
40914b24e2bSVaishali Kulkarni 
41014b24e2bSVaishali Kulkarni 	/* ASSERT(lro_info->bd_count != cqe->num_of_bds); */
41114b24e2bSVaishali Kulkarni 
41214b24e2bSVaishali Kulkarni 	if (lro_info->free_buffer_count <
41314b24e2bSVaishali Kulkarni 	    rx_ring->rx_low_buffer_threshold) {
41414b24e2bSVaishali Kulkarni 		for (i = 0; i < lro_info->bd_count; i++) {
41514b24e2bSVaishali Kulkarni 			qede_recycle_copied_rx_buffer(
41614b24e2bSVaishali Kulkarni 			    lro_info->rx_buffer[i]);
41714b24e2bSVaishali Kulkarni 			lro_info->rx_buffer[i] = NULL;
41814b24e2bSVaishali Kulkarni 		}
41914b24e2bSVaishali Kulkarni 		rx_ring->rx_low_water_cnt++;
42014b24e2bSVaishali Kulkarni 		lro_info->agg_state = QEDE_AGG_STATE_NONE;
42114b24e2bSVaishali Kulkarni 		return (NULL);
42214b24e2bSVaishali Kulkarni 	}
42314b24e2bSVaishali Kulkarni 	/*
42414b24e2bSVaishali Kulkarni 	 * Loop through list of buffers for this
42514b24e2bSVaishali Kulkarni 	 * aggregation.  For each one:
42614b24e2bSVaishali Kulkarni 	 * 1. Calculate the buffer length
42714b24e2bSVaishali Kulkarni 	 * 2. Adjust the mblk read/write pointers
42814b24e2bSVaishali Kulkarni 	 * 3. Link the mblk to the local chain using
42914b24e2bSVaishali Kulkarni 	 *    b_cont pointers.
43014b24e2bSVaishali Kulkarni 	 * Note: each buffer will be rx_buf_size except
43114b24e2bSVaishali Kulkarni 	 * the first (subtract the placement_offset)
43214b24e2bSVaishali Kulkarni 	 * and the last which contains the remainder
43314b24e2bSVaishali Kulkarni 	 * of cqe_end->total_packet_len minus length
43414b24e2bSVaishali Kulkarni 	 * of all other buffers.
43514b24e2bSVaishali Kulkarni 	 */
43614b24e2bSVaishali Kulkarni 	for (i = 0; i < lro_info->bd_count; i++) {
43714b24e2bSVaishali Kulkarni 
43814b24e2bSVaishali Kulkarni 		rx_buffer = lro_info->rx_buffer[i];
43914b24e2bSVaishali Kulkarni 
44014b24e2bSVaishali Kulkarni 		bd_len =
44114b24e2bSVaishali Kulkarni 		    (work_length > rx_buf_size) ? rx_buf_size : work_length;
44214b24e2bSVaishali Kulkarni 		if (i == 0 &&
44314b24e2bSVaishali Kulkarni 		    (cqe->num_of_bds > 1)) {
44414b24e2bSVaishali Kulkarni 			bd_len -= lro_info->pad;
44514b24e2bSVaishali Kulkarni 		}
44614b24e2bSVaishali Kulkarni 
44714b24e2bSVaishali Kulkarni 		dma_info = &rx_buffer->dma_info;
44814b24e2bSVaishali Kulkarni 		ddi_dma_sync(dma_info->dma_handle,
44914b24e2bSVaishali Kulkarni 		    dma_info->offset,
45014b24e2bSVaishali Kulkarni 		    rx_buf_size,
45114b24e2bSVaishali Kulkarni 		    DDI_DMA_SYNC_FORKERNEL);
45214b24e2bSVaishali Kulkarni 
45314b24e2bSVaishali Kulkarni 		mp = rx_buffer->mp;
45414b24e2bSVaishali Kulkarni 		mp->b_next = mp->b_cont = NULL;
45514b24e2bSVaishali Kulkarni 
45614b24e2bSVaishali Kulkarni 		if (head == NULL) {
45714b24e2bSVaishali Kulkarni 			head = tail = mp;
45814b24e2bSVaishali Kulkarni 			mp->b_rptr += lro_info->pad;
45914b24e2bSVaishali Kulkarni 		} else {
46014b24e2bSVaishali Kulkarni 			tail->b_cont = mp;
46114b24e2bSVaishali Kulkarni 			tail = mp;
46214b24e2bSVaishali Kulkarni 		}
46314b24e2bSVaishali Kulkarni 
46414b24e2bSVaishali Kulkarni 		mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + bd_len);
46514b24e2bSVaishali Kulkarni 		work_length -= bd_len;
46614b24e2bSVaishali Kulkarni 	}
46714b24e2bSVaishali Kulkarni 
46814b24e2bSVaishali Kulkarni 	qede_set_cksum_flags(head, lro_info->pars_flags);
46914b24e2bSVaishali Kulkarni 
47014b24e2bSVaishali Kulkarni 	rx_ring->rx_lro_pkt_cnt++;
47114b24e2bSVaishali Kulkarni 	rx_ring->lro_active_count--;
47214b24e2bSVaishali Kulkarni 	lro_info->agg_state = QEDE_AGG_STATE_NONE;
47314b24e2bSVaishali Kulkarni 
47414b24e2bSVaishali Kulkarni #ifdef DEBUG_LRO
47514b24e2bSVaishali Kulkarni 	if (agg_print) {
47614b24e2bSVaishali Kulkarni 		qede_dump_mblk_chain_bcont_ptr(rx_ring->qede, head);
47714b24e2bSVaishali Kulkarni 	}
47814b24e2bSVaishali Kulkarni #endif
47914b24e2bSVaishali Kulkarni 	*pkt_bytes = (int)total_packet_length;
48014b24e2bSVaishali Kulkarni 	return (head);
48114b24e2bSVaishali Kulkarni }
48214b24e2bSVaishali Kulkarni 
48314b24e2bSVaishali Kulkarni 
48414b24e2bSVaishali Kulkarni 
48514b24e2bSVaishali Kulkarni #ifdef DEBUG_JUMBO
48614b24e2bSVaishali Kulkarni int jumbo_count = 0;
48714b24e2bSVaishali Kulkarni bool jumbo_print = B_TRUE;
48814b24e2bSVaishali Kulkarni #endif
48914b24e2bSVaishali Kulkarni static mblk_t *
qede_reg_jumbo_cqe(qede_rx_ring_t * rx_ring,struct eth_fast_path_rx_reg_cqe * cqe)49014b24e2bSVaishali Kulkarni qede_reg_jumbo_cqe(qede_rx_ring_t *rx_ring,
49114b24e2bSVaishali Kulkarni    struct eth_fast_path_rx_reg_cqe *cqe)
49214b24e2bSVaishali Kulkarni {
49314b24e2bSVaishali Kulkarni 	int i;
49414b24e2bSVaishali Kulkarni 	qede_rx_buffer_t *rx_buf, *rx_buffer[ETH_RX_MAX_BUFF_PER_PKT];
49514b24e2bSVaishali Kulkarni 	mblk_t *mp = NULL, *head = NULL, *tail = NULL;
496*3df3d01fSToomas Soome 	uint32_t free_buffer_count = 0;
49714b24e2bSVaishali Kulkarni 	uint16_t work_length;
49814b24e2bSVaishali Kulkarni 	uint32_t rx_buf_size = rx_ring->rx_buf_size, bd_len;
49914b24e2bSVaishali Kulkarni 	qede_dma_info_t *dma_info;
50014b24e2bSVaishali Kulkarni 	u8 pad = cqe->placement_offset;
50114b24e2bSVaishali Kulkarni 
50214b24e2bSVaishali Kulkarni #ifdef DEBUG_JUMBO
50314b24e2bSVaishali Kulkarni 	if (jumbo_count++ < 8) {
50414b24e2bSVaishali Kulkarni 		qede_dump_reg_cqe(cqe);
50514b24e2bSVaishali Kulkarni 	} else {
50614b24e2bSVaishali Kulkarni 		jumbo_print = B_FALSE;
50714b24e2bSVaishali Kulkarni 	}
50814b24e2bSVaishali Kulkarni #endif
50914b24e2bSVaishali Kulkarni 
51014b24e2bSVaishali Kulkarni 	work_length = HOST_TO_LE_16(cqe->pkt_len);
51114b24e2bSVaishali Kulkarni 
51214b24e2bSVaishali Kulkarni 	/*
51314b24e2bSVaishali Kulkarni 	 * Get the buffers/mps for this cqe
51414b24e2bSVaishali Kulkarni 	 */
51514b24e2bSVaishali Kulkarni 	for (i = 0; i < cqe->bd_num; i++) {
51614b24e2bSVaishali Kulkarni 		rx_buffer[i] =
51714b24e2bSVaishali Kulkarni 		    qede_get_next_rx_buffer(rx_ring, &free_buffer_count);
51814b24e2bSVaishali Kulkarni 	}
51914b24e2bSVaishali Kulkarni 
52014b24e2bSVaishali Kulkarni 	/*
52114b24e2bSVaishali Kulkarni 	 * If the buffer ring is running low, drop the
52214b24e2bSVaishali Kulkarni 	 * packet and return these buffers.
52314b24e2bSVaishali Kulkarni 	 */
52414b24e2bSVaishali Kulkarni 	if (free_buffer_count <
52514b24e2bSVaishali Kulkarni 	    rx_ring->rx_low_buffer_threshold) {
52614b24e2bSVaishali Kulkarni 		for (i = 0; i < cqe->bd_num; i++) {
52714b24e2bSVaishali Kulkarni 			qede_recycle_copied_rx_buffer(rx_buffer[i]);
52814b24e2bSVaishali Kulkarni 		}
52914b24e2bSVaishali Kulkarni 		rx_ring->rx_low_water_cnt++;
53014b24e2bSVaishali Kulkarni 		return (NULL);
53114b24e2bSVaishali Kulkarni 	}
53214b24e2bSVaishali Kulkarni 
53314b24e2bSVaishali Kulkarni 	for (i = 0; i < cqe->bd_num; i++) {
53414b24e2bSVaishali Kulkarni 		rx_buf = rx_buffer[i];
53514b24e2bSVaishali Kulkarni 
53614b24e2bSVaishali Kulkarni 		bd_len =
53714b24e2bSVaishali Kulkarni 		    (work_length > rx_buf_size) ? rx_buf_size : work_length;
53814b24e2bSVaishali Kulkarni 
53914b24e2bSVaishali Kulkarni 		/*
54014b24e2bSVaishali Kulkarni 		 * Adjust for placement offset
54114b24e2bSVaishali Kulkarni 		 * on first bufffer.
54214b24e2bSVaishali Kulkarni 		 */
54314b24e2bSVaishali Kulkarni 		if (i == 0) {
54414b24e2bSVaishali Kulkarni 			bd_len -= pad;
54514b24e2bSVaishali Kulkarni 		}
54614b24e2bSVaishali Kulkarni 
54714b24e2bSVaishali Kulkarni 		dma_info = &rx_buf->dma_info;
54814b24e2bSVaishali Kulkarni 		ddi_dma_sync(dma_info->dma_handle,
54914b24e2bSVaishali Kulkarni 		    dma_info->offset,
55014b24e2bSVaishali Kulkarni 		    rx_buf_size,
55114b24e2bSVaishali Kulkarni 		    DDI_DMA_SYNC_FORKERNEL);
55214b24e2bSVaishali Kulkarni 
55314b24e2bSVaishali Kulkarni 		mp = rx_buf->mp;
55414b24e2bSVaishali Kulkarni 		mp->b_next = mp->b_cont = NULL;
55514b24e2bSVaishali Kulkarni 		/*
55614b24e2bSVaishali Kulkarni 		 * Adjust for placement offset
55714b24e2bSVaishali Kulkarni 		 * on first bufffer.
55814b24e2bSVaishali Kulkarni 		 */
55914b24e2bSVaishali Kulkarni 		if (i == 0) {
56014b24e2bSVaishali Kulkarni 			mp->b_rptr += pad;
56114b24e2bSVaishali Kulkarni 		}
56214b24e2bSVaishali Kulkarni 
56314b24e2bSVaishali Kulkarni 		mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + bd_len);
56414b24e2bSVaishali Kulkarni 
56514b24e2bSVaishali Kulkarni 		if (head == NULL) {
56614b24e2bSVaishali Kulkarni 			head = tail = mp;
56714b24e2bSVaishali Kulkarni 		} else {
56814b24e2bSVaishali Kulkarni 			tail->b_cont = mp;
56914b24e2bSVaishali Kulkarni 			tail = mp;
57014b24e2bSVaishali Kulkarni 		}
57114b24e2bSVaishali Kulkarni 
57214b24e2bSVaishali Kulkarni 		work_length -= bd_len;
57314b24e2bSVaishali Kulkarni 	}
57414b24e2bSVaishali Kulkarni 
57514b24e2bSVaishali Kulkarni 	qede_set_cksum_flags(head,
57614b24e2bSVaishali Kulkarni 		    HOST_TO_LE_16(cqe->pars_flags.flags));
57714b24e2bSVaishali Kulkarni #ifdef DEBUG_JUMBO
57814b24e2bSVaishali Kulkarni 	if (jumbo_print) {
57914b24e2bSVaishali Kulkarni 		qede_dump_mblk_chain_bcont_ptr(rx_ring->qede, head);
58014b24e2bSVaishali Kulkarni 	}
58114b24e2bSVaishali Kulkarni #endif
58214b24e2bSVaishali Kulkarni 	rx_ring->rx_jumbo_pkt_cnt++;
58314b24e2bSVaishali Kulkarni 	return (head);
58414b24e2bSVaishali Kulkarni }
58514b24e2bSVaishali Kulkarni 
58614b24e2bSVaishali Kulkarni static mblk_t *
qede_reg_cqe(qede_rx_ring_t * rx_ring,struct eth_fast_path_rx_reg_cqe * cqe,int * pkt_bytes)58714b24e2bSVaishali Kulkarni qede_reg_cqe(qede_rx_ring_t *rx_ring,
58814b24e2bSVaishali Kulkarni     struct eth_fast_path_rx_reg_cqe *cqe,
58914b24e2bSVaishali Kulkarni     int *pkt_bytes)
59014b24e2bSVaishali Kulkarni {
59114b24e2bSVaishali Kulkarni 	qede_t *qede = rx_ring->qede;
59214b24e2bSVaishali Kulkarni 	qede_rx_buffer_t *rx_buffer;
59314b24e2bSVaishali Kulkarni 	uint32_t free_buffer_count;
59414b24e2bSVaishali Kulkarni 	mblk_t *mp;
59514b24e2bSVaishali Kulkarni 	uint16_t pkt_len = HOST_TO_LE_16(cqe->pkt_len);
59614b24e2bSVaishali Kulkarni 	u8 pad = cqe->placement_offset;
59714b24e2bSVaishali Kulkarni 	qede_dma_info_t *dma_info;
59814b24e2bSVaishali Kulkarni 	ddi_dma_handle_t dma_handle;
59914b24e2bSVaishali Kulkarni 	char *virt_addr;
60014b24e2bSVaishali Kulkarni 
60114b24e2bSVaishali Kulkarni 	/*
60214b24e2bSVaishali Kulkarni 	 * Update the byte count as it will
60314b24e2bSVaishali Kulkarni 	 * be the same for normal and jumbo
60414b24e2bSVaishali Kulkarni 	 */
60514b24e2bSVaishali Kulkarni 	*pkt_bytes = (int)pkt_len;
60614b24e2bSVaishali Kulkarni 
60714b24e2bSVaishali Kulkarni 	if (cqe->bd_num > 1) {
60814b24e2bSVaishali Kulkarni 		/*
60914b24e2bSVaishali Kulkarni 		 * If this cqe uses more than one
61014b24e2bSVaishali Kulkarni 		 * rx buffer then it must be
61114b24e2bSVaishali Kulkarni 		 * jumbo.  Call another handler
61214b24e2bSVaishali Kulkarni 		 * for this because the process is
61314b24e2bSVaishali Kulkarni 		 * quite different.
61414b24e2bSVaishali Kulkarni 		 */
61514b24e2bSVaishali Kulkarni 		return (qede_reg_jumbo_cqe(rx_ring, cqe));
61614b24e2bSVaishali Kulkarni 	}
61714b24e2bSVaishali Kulkarni 
61814b24e2bSVaishali Kulkarni 
61914b24e2bSVaishali Kulkarni 	rx_buffer = qede_get_next_rx_buffer(rx_ring,
62014b24e2bSVaishali Kulkarni             &free_buffer_count);
62114b24e2bSVaishali Kulkarni 
62214b24e2bSVaishali Kulkarni 	if (free_buffer_count <
62314b24e2bSVaishali Kulkarni 	    rx_ring->rx_low_buffer_threshold) {
62414b24e2bSVaishali Kulkarni 		qede_recycle_copied_rx_buffer(rx_buffer);
62514b24e2bSVaishali Kulkarni 		rx_ring->rx_low_water_cnt++;
62614b24e2bSVaishali Kulkarni 		*pkt_bytes = 0;
62714b24e2bSVaishali Kulkarni 		return (NULL);
62814b24e2bSVaishali Kulkarni 	}
62914b24e2bSVaishali Kulkarni 
63014b24e2bSVaishali Kulkarni 	dma_info = &rx_buffer->dma_info;
63114b24e2bSVaishali Kulkarni 	virt_addr = dma_info->virt_addr;
63214b24e2bSVaishali Kulkarni 	dma_handle = dma_info->dma_handle;
63314b24e2bSVaishali Kulkarni 	ddi_dma_sync(dma_handle,
63414b24e2bSVaishali Kulkarni 	    0, 0, DDI_DMA_SYNC_FORKERNEL);
63514b24e2bSVaishali Kulkarni 
63614b24e2bSVaishali Kulkarni 	if (pkt_len <= rx_ring->rx_copy_threshold) {
63714b24e2bSVaishali Kulkarni 		mp = allocb(pkt_len + 2, 0); /* IP HDR_ALIGN */
63814b24e2bSVaishali Kulkarni 		if (mp != NULL) {
63914b24e2bSVaishali Kulkarni 			virt_addr += pad;
64014b24e2bSVaishali Kulkarni 			bcopy(virt_addr, mp->b_rptr, pkt_len);
64114b24e2bSVaishali Kulkarni 		} else {
64214b24e2bSVaishali Kulkarni 			/*
64314b24e2bSVaishali Kulkarni 			 * Post the buffer back to fw and
64414b24e2bSVaishali Kulkarni 			 * drop packet
64514b24e2bSVaishali Kulkarni 			 */
64614b24e2bSVaishali Kulkarni 			qede_print_err("!%s(%d): allocb failed",
64714b24e2bSVaishali Kulkarni 		    	    __func__,
64814b24e2bSVaishali Kulkarni 			    rx_ring->qede->instance);
64914b24e2bSVaishali Kulkarni 			qede->allocbFailures++;
65014b24e2bSVaishali Kulkarni                         goto freebuf;
65114b24e2bSVaishali Kulkarni 		}
65214b24e2bSVaishali Kulkarni 		/*
65314b24e2bSVaishali Kulkarni 		 * We've copied it (or not) and are done with it
65414b24e2bSVaishali Kulkarni 		 * so put it back into the passive list.
65514b24e2bSVaishali Kulkarni 		 */
65614b24e2bSVaishali Kulkarni 		ddi_dma_sync(dma_handle,
65714b24e2bSVaishali Kulkarni 	            0, 0, DDI_DMA_SYNC_FORDEV);
65814b24e2bSVaishali Kulkarni 		qede_recycle_copied_rx_buffer(rx_buffer);
65914b24e2bSVaishali Kulkarni 		rx_ring->rx_copy_cnt++;
66014b24e2bSVaishali Kulkarni 	} else {
66114b24e2bSVaishali Kulkarni 
66214b24e2bSVaishali Kulkarni 		/*
66314b24e2bSVaishali Kulkarni 		 * We are going to send this mp/buffer
66414b24e2bSVaishali Kulkarni 		 * up to the mac layer.  Adjust the
66514b24e2bSVaishali Kulkarni 		 * pointeres and link it to our chain.
66614b24e2bSVaishali Kulkarni 		 * the rx_buffer is returned to us in
66714b24e2bSVaishali Kulkarni 		 * the recycle function so we drop it
66814b24e2bSVaishali Kulkarni 		 * here.
66914b24e2bSVaishali Kulkarni 		 */
67014b24e2bSVaishali Kulkarni 		mp = rx_buffer->mp;
67114b24e2bSVaishali Kulkarni 		mp->b_rptr += pad;
67214b24e2bSVaishali Kulkarni 	}
67314b24e2bSVaishali Kulkarni 	mp->b_cont = mp->b_next = NULL;
67414b24e2bSVaishali Kulkarni 	mp->b_wptr = (uchar_t *)((unsigned long)mp->b_rptr + pkt_len);
67514b24e2bSVaishali Kulkarni 
67614b24e2bSVaishali Kulkarni 	qede_set_cksum_flags(mp,
67714b24e2bSVaishali Kulkarni 	    HOST_TO_LE_16(cqe->pars_flags.flags));
67814b24e2bSVaishali Kulkarni #ifdef DEBUG_JUMBO
67914b24e2bSVaishali Kulkarni 	if (jumbo_print) {
68014b24e2bSVaishali Kulkarni 	    qede_dump_mblk_chain_bnext_ptr(rx_ring->qede, mp);
68114b24e2bSVaishali Kulkarni 	}
68214b24e2bSVaishali Kulkarni #endif
68314b24e2bSVaishali Kulkarni 
68414b24e2bSVaishali Kulkarni 	rx_ring->rx_reg_pkt_cnt++;
68514b24e2bSVaishali Kulkarni 	return (mp);
68614b24e2bSVaishali Kulkarni 
68714b24e2bSVaishali Kulkarni freebuf:
68814b24e2bSVaishali Kulkarni         qede_recycle_copied_rx_buffer(rx_buffer);
68914b24e2bSVaishali Kulkarni         return (NULL);
69014b24e2bSVaishali Kulkarni }
69114b24e2bSVaishali Kulkarni 
69214b24e2bSVaishali Kulkarni /*
69314b24e2bSVaishali Kulkarni  * Routine to process the rx packets on the
69414b24e2bSVaishali Kulkarni  * passed rx_ring. Can be called for intr or
69514b24e2bSVaishali Kulkarni  * poll context/routines
69614b24e2bSVaishali Kulkarni  */
69714b24e2bSVaishali Kulkarni static mblk_t *
qede_process_rx_ring(qede_rx_ring_t * rx_ring,int nbytes,int npkts)69814b24e2bSVaishali Kulkarni qede_process_rx_ring(qede_rx_ring_t *rx_ring, int nbytes, int npkts)
69914b24e2bSVaishali Kulkarni {
70014b24e2bSVaishali Kulkarni 	union eth_rx_cqe *cqe;
70114b24e2bSVaishali Kulkarni 	u16 last_cqe_consumer = rx_ring->last_cqe_consumer;
70214b24e2bSVaishali Kulkarni 	enum eth_rx_cqe_type cqe_type;
70314b24e2bSVaishali Kulkarni 	u16 sw_comp_cons, hw_comp_cons;
70414b24e2bSVaishali Kulkarni 	mblk_t *mp = NULL, *first_mp = NULL, *last_mp = NULL;
70514b24e2bSVaishali Kulkarni 	int pkt_bytes = 0, byte_cnt = 0, pkt_cnt = 0;
70614b24e2bSVaishali Kulkarni 
70714b24e2bSVaishali Kulkarni 	hw_comp_cons = HOST_TO_LE_16(*rx_ring->hw_cons_ptr);
70814b24e2bSVaishali Kulkarni 
70914b24e2bSVaishali Kulkarni 	/* Completion ring sw consumer */
71014b24e2bSVaishali Kulkarni 	sw_comp_cons = ecore_chain_get_cons_idx(&rx_ring->rx_cqe_ring);
71114b24e2bSVaishali Kulkarni 
71214b24e2bSVaishali Kulkarni 	while (sw_comp_cons != hw_comp_cons) {
71314b24e2bSVaishali Kulkarni 		if ((byte_cnt >= nbytes) ||
71414b24e2bSVaishali Kulkarni 		    (pkt_cnt >= npkts)) {
71514b24e2bSVaishali Kulkarni 			break;
71614b24e2bSVaishali Kulkarni 		}
71714b24e2bSVaishali Kulkarni 
71814b24e2bSVaishali Kulkarni 		cqe = (union eth_rx_cqe *)
71914b24e2bSVaishali Kulkarni 		    ecore_chain_consume(&rx_ring->rx_cqe_ring);
72014b24e2bSVaishali Kulkarni 		/* Get next element and increment the cons_idx */
72114b24e2bSVaishali Kulkarni 
72214b24e2bSVaishali Kulkarni 		(void) ddi_dma_sync(rx_ring->rx_cqe_dmah,
72314b24e2bSVaishali Kulkarni 		    last_cqe_consumer, sizeof (*cqe),
72414b24e2bSVaishali Kulkarni 		    DDI_DMA_SYNC_FORKERNEL);
72514b24e2bSVaishali Kulkarni 
72614b24e2bSVaishali Kulkarni 		cqe_type = cqe->fast_path_regular.type;
72714b24e2bSVaishali Kulkarni 
72814b24e2bSVaishali Kulkarni 		switch (cqe_type) {
72914b24e2bSVaishali Kulkarni 		case ETH_RX_CQE_TYPE_SLOW_PATH:
73014b24e2bSVaishali Kulkarni 			ecore_eth_cqe_completion(&rx_ring->qede->edev.hwfns[0],
73114b24e2bSVaishali Kulkarni 			    (struct eth_slow_path_rx_cqe *)cqe);
73214b24e2bSVaishali Kulkarni 			goto next_cqe;
73314b24e2bSVaishali Kulkarni 		case ETH_RX_CQE_TYPE_REGULAR:
73414b24e2bSVaishali Kulkarni 			mp = qede_reg_cqe(rx_ring,
73514b24e2bSVaishali Kulkarni 			    &cqe->fast_path_regular,
73614b24e2bSVaishali Kulkarni 			    &pkt_bytes);
73714b24e2bSVaishali Kulkarni 			break;
73814b24e2bSVaishali Kulkarni 		case ETH_RX_CQE_TYPE_TPA_START:
73914b24e2bSVaishali Kulkarni 			qede_lro_start(rx_ring,
74014b24e2bSVaishali Kulkarni 			    &cqe->fast_path_tpa_start);
74114b24e2bSVaishali Kulkarni 			goto next_cqe;
74214b24e2bSVaishali Kulkarni 		case ETH_RX_CQE_TYPE_TPA_CONT:
74314b24e2bSVaishali Kulkarni 			qede_lro_cont(rx_ring,
74414b24e2bSVaishali Kulkarni 			    &cqe->fast_path_tpa_cont);
74514b24e2bSVaishali Kulkarni 			goto next_cqe;
74614b24e2bSVaishali Kulkarni 		case ETH_RX_CQE_TYPE_TPA_END:
74714b24e2bSVaishali Kulkarni 			mp = qede_lro_end(rx_ring,
74814b24e2bSVaishali Kulkarni 			    &cqe->fast_path_tpa_end,
74914b24e2bSVaishali Kulkarni 			    &pkt_bytes);
75014b24e2bSVaishali Kulkarni 			break;
75114b24e2bSVaishali Kulkarni 		default:
75214b24e2bSVaishali Kulkarni 			if (cqe_type != 0) {
75314b24e2bSVaishali Kulkarni 				qede_print_err("!%s(%d): cqe_type %x not "
75414b24e2bSVaishali Kulkarni 				    "supported", __func__,
75514b24e2bSVaishali Kulkarni 				    rx_ring->qede->instance,
75614b24e2bSVaishali Kulkarni 				    cqe_type);
75714b24e2bSVaishali Kulkarni 			}
75814b24e2bSVaishali Kulkarni 			goto exit_rx;
75914b24e2bSVaishali Kulkarni 		}
76014b24e2bSVaishali Kulkarni 
76114b24e2bSVaishali Kulkarni 		/*
76214b24e2bSVaishali Kulkarni 		 * If we arrive here with no mp,
76314b24e2bSVaishali Kulkarni 		 * then we hit an RX buffer threshold
76414b24e2bSVaishali Kulkarni 		 * where we had to drop the packet and
76514b24e2bSVaishali Kulkarni 		 * give the buffers back to the device.
76614b24e2bSVaishali Kulkarni 		 */
76714b24e2bSVaishali Kulkarni 		if (mp == NULL) {
76814b24e2bSVaishali Kulkarni 			rx_ring->rx_drop_cnt++;
76914b24e2bSVaishali Kulkarni 			goto next_cqe;
77014b24e2bSVaishali Kulkarni 		}
77114b24e2bSVaishali Kulkarni 
77214b24e2bSVaishali Kulkarni 		if (first_mp) {
77314b24e2bSVaishali Kulkarni 			last_mp->b_next = mp;
77414b24e2bSVaishali Kulkarni 		} else {
77514b24e2bSVaishali Kulkarni 			first_mp = mp;
77614b24e2bSVaishali Kulkarni 		}
77714b24e2bSVaishali Kulkarni 		last_mp = mp;
77814b24e2bSVaishali Kulkarni 		pkt_cnt++;
77914b24e2bSVaishali Kulkarni 		byte_cnt += pkt_bytes;
78014b24e2bSVaishali Kulkarni next_cqe:
78114b24e2bSVaishali Kulkarni 		ecore_chain_recycle_consumed(&rx_ring->rx_cqe_ring);
78214b24e2bSVaishali Kulkarni 		last_cqe_consumer = sw_comp_cons;
78314b24e2bSVaishali Kulkarni 		sw_comp_cons = ecore_chain_get_cons_idx(&rx_ring->rx_cqe_ring);
78414b24e2bSVaishali Kulkarni 		if (!(qede_has_rx_work(rx_ring))) {
78514b24e2bSVaishali Kulkarni 			ecore_sb_update_sb_idx(rx_ring->fp->sb_info);
78614b24e2bSVaishali Kulkarni 		}
78714b24e2bSVaishali Kulkarni 		hw_comp_cons = HOST_TO_LE_16(*rx_ring->hw_cons_ptr);
78814b24e2bSVaishali Kulkarni 	}
78914b24e2bSVaishali Kulkarni 	rx_ring->rx_pkt_cnt += pkt_cnt;
79014b24e2bSVaishali Kulkarni 	rx_ring->rx_byte_cnt += byte_cnt;
79114b24e2bSVaishali Kulkarni 
79214b24e2bSVaishali Kulkarni exit_rx:
79314b24e2bSVaishali Kulkarni 	if (first_mp) {
79414b24e2bSVaishali Kulkarni 		last_mp->b_next = NULL;
79514b24e2bSVaishali Kulkarni 	}
79614b24e2bSVaishali Kulkarni 
79714b24e2bSVaishali Kulkarni 	/*
79814b24e2bSVaishali Kulkarni 	 * Since prod update will result in
79914b24e2bSVaishali Kulkarni 	 * reading of the bd's, do a dma_sync
80014b24e2bSVaishali Kulkarni 	 */
80114b24e2bSVaishali Kulkarni 	qede_replenish_rx_buffers(rx_ring);
80214b24e2bSVaishali Kulkarni 	qede_update_rx_q_producer(rx_ring);
80314b24e2bSVaishali Kulkarni 	rx_ring->last_cqe_consumer = last_cqe_consumer;
80414b24e2bSVaishali Kulkarni 
80514b24e2bSVaishali Kulkarni 	return (first_mp);
80614b24e2bSVaishali Kulkarni }
80714b24e2bSVaishali Kulkarni 
80814b24e2bSVaishali Kulkarni mblk_t *
qede_process_fastpath(qede_fastpath_t * fp,int nbytes,int npkts,int * work_done)80914b24e2bSVaishali Kulkarni qede_process_fastpath(qede_fastpath_t *fp,
81014b24e2bSVaishali Kulkarni     int nbytes, int npkts, int *work_done)
81114b24e2bSVaishali Kulkarni {
81214b24e2bSVaishali Kulkarni 	int i = 0;
81314b24e2bSVaishali Kulkarni 	qede_tx_ring_t *tx_ring;
81414b24e2bSVaishali Kulkarni 	qede_rx_ring_t *rx_ring;
81514b24e2bSVaishali Kulkarni 	mblk_t *mp = NULL;
81614b24e2bSVaishali Kulkarni 
81714b24e2bSVaishali Kulkarni 	rx_ring = fp->rx_ring;
81814b24e2bSVaishali Kulkarni 
81914b24e2bSVaishali Kulkarni 	for (i = 0; i < fp->qede->num_tc; i++) {
82014b24e2bSVaishali Kulkarni 		tx_ring = fp->tx_ring[i];
82114b24e2bSVaishali Kulkarni 		if (qede_has_tx_work(tx_ring)) {
82214b24e2bSVaishali Kulkarni 		/* process tx completions */
82314b24e2bSVaishali Kulkarni 			if (mutex_tryenter(&tx_ring->tx_lock) != 0) {
82414b24e2bSVaishali Kulkarni 				*work_done +=
82514b24e2bSVaishali Kulkarni 				    qede_process_tx_completions(tx_ring);
82614b24e2bSVaishali Kulkarni 				mutex_exit(&tx_ring->tx_lock);
82714b24e2bSVaishali Kulkarni 			}
82814b24e2bSVaishali Kulkarni 		}
82914b24e2bSVaishali Kulkarni 	}
83014b24e2bSVaishali Kulkarni 
83114b24e2bSVaishali Kulkarni 	if (!(qede_has_rx_work(rx_ring))) {
83214b24e2bSVaishali Kulkarni 		ecore_sb_update_sb_idx(fp->sb_info);
83314b24e2bSVaishali Kulkarni 	}
83414b24e2bSVaishali Kulkarni 
83514b24e2bSVaishali Kulkarni 	rx_ring = fp->rx_ring;
83614b24e2bSVaishali Kulkarni 	if (qede_has_rx_work(rx_ring)) {
83714b24e2bSVaishali Kulkarni 		mutex_enter(&rx_ring->rx_lock);
83814b24e2bSVaishali Kulkarni 		mp = qede_process_rx_ring(rx_ring,
83914b24e2bSVaishali Kulkarni 		    nbytes, npkts);
84014b24e2bSVaishali Kulkarni 		if (mp) {
84114b24e2bSVaishali Kulkarni 			*work_done += 1;
84214b24e2bSVaishali Kulkarni 		}
84314b24e2bSVaishali Kulkarni 		mutex_exit(&rx_ring->rx_lock);
84414b24e2bSVaishali Kulkarni 	}
84514b24e2bSVaishali Kulkarni 
84614b24e2bSVaishali Kulkarni 	return (mp);
84714b24e2bSVaishali Kulkarni }
84814b24e2bSVaishali Kulkarni 
84914b24e2bSVaishali Kulkarni /*
85014b24e2bSVaishali Kulkarni  * Parse the mblk to extract information
85114b24e2bSVaishali Kulkarni  * from the protocol headers.
85214b24e2bSVaishali Kulkarni  * The routine assumes that the l4 header is tcp. Also
85314b24e2bSVaishali Kulkarni  * it does not account for ipv6 headers since ipv6 lso is
85414b24e2bSVaishali Kulkarni  * unsupported
85514b24e2bSVaishali Kulkarni  */
85614b24e2bSVaishali Kulkarni static void
qede_pkt_parse_lso_headers(qede_tx_pktinfo_t * pktinfo,mblk_t * mp)85714b24e2bSVaishali Kulkarni qede_pkt_parse_lso_headers(qede_tx_pktinfo_t *pktinfo, mblk_t *mp)
85814b24e2bSVaishali Kulkarni {
85914b24e2bSVaishali Kulkarni 	struct ether_header *eth_hdr =
86014b24e2bSVaishali Kulkarni 	    (struct ether_header *)(void *)mp->b_rptr;
86114b24e2bSVaishali Kulkarni 	ipha_t *ip_hdr;
86214b24e2bSVaishali Kulkarni 	struct tcphdr *tcp_hdr;
86314b24e2bSVaishali Kulkarni 
86414b24e2bSVaishali Kulkarni 	/* mac header type and len */
86514b24e2bSVaishali Kulkarni 	if (ntohs(eth_hdr->ether_type) == ETHERTYPE_IP) {
86614b24e2bSVaishali Kulkarni 		pktinfo->ether_type = ntohs(eth_hdr->ether_type);
86714b24e2bSVaishali Kulkarni 		pktinfo->mac_hlen = sizeof (struct ether_header);
86814b24e2bSVaishali Kulkarni 	} else if (ntohs(eth_hdr->ether_type) == ETHERTYPE_VLAN) {
86914b24e2bSVaishali Kulkarni 		struct ether_vlan_header *vlan_hdr =
87014b24e2bSVaishali Kulkarni 		    (struct ether_vlan_header *)(void *)mp->b_rptr;
87114b24e2bSVaishali Kulkarni 		pktinfo->ether_type = ntohs(vlan_hdr->ether_type);
87214b24e2bSVaishali Kulkarni 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
87314b24e2bSVaishali Kulkarni 	}
87414b24e2bSVaishali Kulkarni 
87514b24e2bSVaishali Kulkarni 	/* ip header type and len */
87614b24e2bSVaishali Kulkarni 	ip_hdr = (ipha_t *)(void *)((u8 *)mp->b_rptr + pktinfo->mac_hlen);
87714b24e2bSVaishali Kulkarni 	pktinfo->ip_hlen = IPH_HDR_LENGTH(ip_hdr);
87814b24e2bSVaishali Kulkarni 
87914b24e2bSVaishali Kulkarni 	/* Assume TCP protocol */
88014b24e2bSVaishali Kulkarni 	pktinfo->l4_proto = 0x06;
88114b24e2bSVaishali Kulkarni 
88214b24e2bSVaishali Kulkarni 	tcp_hdr = (struct tcphdr *)(void *)
88314b24e2bSVaishali Kulkarni 	    ((u8 *)mp->b_rptr + pktinfo->mac_hlen + pktinfo->ip_hlen);
88414b24e2bSVaishali Kulkarni 	pktinfo->l4_hlen = TCP_HDR_LENGTH(tcp_hdr);
88514b24e2bSVaishali Kulkarni 
88614b24e2bSVaishali Kulkarni 
88714b24e2bSVaishali Kulkarni 	pktinfo->total_hlen =
88814b24e2bSVaishali Kulkarni 	    pktinfo->mac_hlen +
88914b24e2bSVaishali Kulkarni 	    pktinfo->ip_hlen +
89014b24e2bSVaishali Kulkarni 	    pktinfo->l4_hlen;
89114b24e2bSVaishali Kulkarni }
89214b24e2bSVaishali Kulkarni 
89314b24e2bSVaishali Kulkarni static void
qede_get_pkt_offload_info(qede_t * qede,mblk_t * mp,u32 * use_cksum,boolean_t * use_lso,uint16_t * mss)89414b24e2bSVaishali Kulkarni qede_get_pkt_offload_info(qede_t *qede, mblk_t *mp,
89514b24e2bSVaishali Kulkarni     u32 *use_cksum, boolean_t *use_lso, uint16_t *mss)
89614b24e2bSVaishali Kulkarni {
89714b24e2bSVaishali Kulkarni 	u32 pflags;
89814b24e2bSVaishali Kulkarni 
89914b24e2bSVaishali Kulkarni 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
90014b24e2bSVaishali Kulkarni 
90114b24e2bSVaishali Kulkarni 	*use_cksum = pflags;
90214b24e2bSVaishali Kulkarni 	if (qede->lso_enable) {
90314b24e2bSVaishali Kulkarni 		u32 pkt_mss = 0;
90414b24e2bSVaishali Kulkarni 		u32 lso_flags = 0;
90514b24e2bSVaishali Kulkarni 
90614b24e2bSVaishali Kulkarni 		mac_lso_get(mp, &pkt_mss, &lso_flags);
90714b24e2bSVaishali Kulkarni 		*use_lso = (lso_flags == HW_LSO);
90814b24e2bSVaishali Kulkarni 		*mss = (u16)pkt_mss;
90914b24e2bSVaishali Kulkarni 	}
91014b24e2bSVaishali Kulkarni }
91114b24e2bSVaishali Kulkarni 
91214b24e2bSVaishali Kulkarni static void
91314b24e2bSVaishali Kulkarni /* LINTED E_FUNC_ARG_UNUSED */
qede_get_pkt_info(qede_t * qede,mblk_t * mp,qede_tx_pktinfo_t * pktinfo)91414b24e2bSVaishali Kulkarni qede_get_pkt_info(qede_t *qede, mblk_t *mp,
91514b24e2bSVaishali Kulkarni     qede_tx_pktinfo_t *pktinfo)
91614b24e2bSVaishali Kulkarni {
91714b24e2bSVaishali Kulkarni 	mblk_t *bp;
91814b24e2bSVaishali Kulkarni 	size_t size;
91914b24e2bSVaishali Kulkarni 	struct ether_header *eth_hdr =
92014b24e2bSVaishali Kulkarni 	    (struct ether_header *)(void *)mp->b_rptr;
92114b24e2bSVaishali Kulkarni 
92214b24e2bSVaishali Kulkarni 	pktinfo->total_len = 0;
92314b24e2bSVaishali Kulkarni 	pktinfo->mblk_no = 0;
92414b24e2bSVaishali Kulkarni 
92514b24e2bSVaishali Kulkarni 	/*
92614b24e2bSVaishali Kulkarni 	 * Count the total length and the number of
92714b24e2bSVaishali Kulkarni 	 * chained mblks in the packet
92814b24e2bSVaishali Kulkarni 	 */
92914b24e2bSVaishali Kulkarni 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
93014b24e2bSVaishali Kulkarni 		size = MBLKL(bp);
93114b24e2bSVaishali Kulkarni 		if (size == 0) {
93214b24e2bSVaishali Kulkarni 			continue;
93314b24e2bSVaishali Kulkarni 		}
93414b24e2bSVaishali Kulkarni 
93514b24e2bSVaishali Kulkarni 		pktinfo->total_len += size;
93614b24e2bSVaishali Kulkarni 		pktinfo->mblk_no++;
93714b24e2bSVaishali Kulkarni 	}
93814b24e2bSVaishali Kulkarni 	/* mac header type and len */
93914b24e2bSVaishali Kulkarni 	if (ntohs(eth_hdr->ether_type) == ETHERTYPE_IP) {
94014b24e2bSVaishali Kulkarni 		pktinfo->ether_type = ntohs(eth_hdr->ether_type);
94114b24e2bSVaishali Kulkarni 		pktinfo->mac_hlen = sizeof (struct ether_header);
94214b24e2bSVaishali Kulkarni 	} else if (ntohs(eth_hdr->ether_type) == ETHERTYPE_VLAN) {
94314b24e2bSVaishali Kulkarni 		struct ether_vlan_header *vlan_hdr =
94414b24e2bSVaishali Kulkarni 		    (struct ether_vlan_header *)(void *)mp->b_rptr;
94514b24e2bSVaishali Kulkarni 		pktinfo->ether_type = ntohs(vlan_hdr->ether_type);
94614b24e2bSVaishali Kulkarni 		pktinfo->mac_hlen = sizeof (struct ether_vlan_header);
94714b24e2bSVaishali Kulkarni 	}
94814b24e2bSVaishali Kulkarni 
94914b24e2bSVaishali Kulkarni }
95014b24e2bSVaishali Kulkarni 
95114b24e2bSVaishali Kulkarni /*
95214b24e2bSVaishali Kulkarni  * Routine to sync dma mem for multiple
95314b24e2bSVaishali Kulkarni  * descriptors in a chain
95414b24e2bSVaishali Kulkarni  */
95514b24e2bSVaishali Kulkarni void
qede_desc_dma_mem_sync(ddi_dma_handle_t * dma_handle,uint_t start,uint_t count,uint_t range,uint_t unit_size,uint_t direction)95614b24e2bSVaishali Kulkarni qede_desc_dma_mem_sync(ddi_dma_handle_t *dma_handle,
95714b24e2bSVaishali Kulkarni     uint_t start, uint_t count, uint_t range,
95814b24e2bSVaishali Kulkarni     uint_t unit_size, uint_t direction)
95914b24e2bSVaishali Kulkarni {
96014b24e2bSVaishali Kulkarni 	if ((start + count) < range) {
96114b24e2bSVaishali Kulkarni 		(void) ddi_dma_sync(*dma_handle,
96214b24e2bSVaishali Kulkarni 		    start * unit_size, count * unit_size, direction);
96314b24e2bSVaishali Kulkarni 	} else {
96414b24e2bSVaishali Kulkarni 		(void) ddi_dma_sync(*dma_handle, start * unit_size,
96514b24e2bSVaishali Kulkarni 		    0, direction);
96614b24e2bSVaishali Kulkarni 		(void) ddi_dma_sync(*dma_handle, 0,
96714b24e2bSVaishali Kulkarni 		    (start + count - range) * unit_size,
96814b24e2bSVaishali Kulkarni 		    direction);
96914b24e2bSVaishali Kulkarni 	}
97014b24e2bSVaishali Kulkarni }
97114b24e2bSVaishali Kulkarni 
97214b24e2bSVaishali Kulkarni /*
97314b24e2bSVaishali Kulkarni  * Send tx pkt by copying incoming packet in a
97414b24e2bSVaishali Kulkarni  * preallocated and mapped dma buffer
97514b24e2bSVaishali Kulkarni  * Not designed to handle lso for now
97614b24e2bSVaishali Kulkarni  */
97714b24e2bSVaishali Kulkarni static enum qede_xmit_status
qede_tx_bcopy(qede_tx_ring_t * tx_ring,mblk_t * mp,qede_tx_pktinfo_t * pktinfo)97814b24e2bSVaishali Kulkarni qede_tx_bcopy(qede_tx_ring_t *tx_ring, mblk_t *mp, qede_tx_pktinfo_t *pktinfo)
97914b24e2bSVaishali Kulkarni {
98014b24e2bSVaishali Kulkarni 	qede_tx_bcopy_pkt_t *bcopy_pkt = NULL;
98114b24e2bSVaishali Kulkarni 	/* Only one bd will be needed for bcopy packets */
98214b24e2bSVaishali Kulkarni 	struct eth_tx_1st_bd *first_bd;
98314b24e2bSVaishali Kulkarni 	u16 last_producer = tx_ring->sw_tx_prod;
98414b24e2bSVaishali Kulkarni 	uint8_t *txb;
98514b24e2bSVaishali Kulkarni 	mblk_t *bp;
98614b24e2bSVaishali Kulkarni 	u32 mblen;
98714b24e2bSVaishali Kulkarni 
98814b24e2bSVaishali Kulkarni 	bcopy_pkt = qede_get_bcopy_pkt(tx_ring);
98914b24e2bSVaishali Kulkarni 	if (bcopy_pkt == NULL) {
99014b24e2bSVaishali Kulkarni 		qede_print_err("!%s(%d): entry NULL at _tx_ bcopy_list head",
99114b24e2bSVaishali Kulkarni 		    __func__, tx_ring->qede->instance);
99214b24e2bSVaishali Kulkarni 		return (XMIT_FAILED);
99314b24e2bSVaishali Kulkarni 	}
99414b24e2bSVaishali Kulkarni 
99514b24e2bSVaishali Kulkarni 	/*
99614b24e2bSVaishali Kulkarni 	 * Copy the packet data to our copy
99714b24e2bSVaishali Kulkarni 	 * buffer
99814b24e2bSVaishali Kulkarni 	 */
99914b24e2bSVaishali Kulkarni 	txb = bcopy_pkt->virt_addr;
100014b24e2bSVaishali Kulkarni 
100114b24e2bSVaishali Kulkarni 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
100214b24e2bSVaishali Kulkarni 		mblen = MBLKL(bp);
100314b24e2bSVaishali Kulkarni 		if (mblen == 0) {
100414b24e2bSVaishali Kulkarni 			continue;
100514b24e2bSVaishali Kulkarni 		}
100614b24e2bSVaishali Kulkarni 		bcopy(bp->b_rptr, txb, mblen);
100714b24e2bSVaishali Kulkarni 		txb += mblen;
100814b24e2bSVaishali Kulkarni 	}
100914b24e2bSVaishali Kulkarni 
101014b24e2bSVaishali Kulkarni 	(void) ddi_dma_sync(bcopy_pkt->dma_handle,
101114b24e2bSVaishali Kulkarni 	    0, pktinfo->total_len,
101214b24e2bSVaishali Kulkarni 	    DDI_DMA_SYNC_FORDEV);
101314b24e2bSVaishali Kulkarni 
101414b24e2bSVaishali Kulkarni 
101514b24e2bSVaishali Kulkarni 	mutex_enter(&tx_ring->tx_lock);
101614b24e2bSVaishali Kulkarni 	if (ecore_chain_get_elem_left(&tx_ring->tx_bd_ring)<
101714b24e2bSVaishali Kulkarni 	    QEDE_TX_COPY_PATH_PAUSE_THRESHOLD) {
101814b24e2bSVaishali Kulkarni 		tx_ring->tx_q_sleeping = 1;
101914b24e2bSVaishali Kulkarni 		qede_put_bcopy_pkt(tx_ring, bcopy_pkt);
102014b24e2bSVaishali Kulkarni 		mutex_exit(&tx_ring->tx_lock);
102114b24e2bSVaishali Kulkarni #ifdef	DEBUG_TX_RECYCLE
102214b24e2bSVaishali Kulkarni 		qede_print_err("!%s(%d): Pausing tx queue",
102314b24e2bSVaishali Kulkarni 		    __func__, tx_ring->qede->instance);
102414b24e2bSVaishali Kulkarni #endif
102514b24e2bSVaishali Kulkarni 		return (XMIT_PAUSE_QUEUE);
102614b24e2bSVaishali Kulkarni 	}
102714b24e2bSVaishali Kulkarni 
102814b24e2bSVaishali Kulkarni 	first_bd = ecore_chain_produce(&tx_ring->tx_bd_ring);
102914b24e2bSVaishali Kulkarni 	bzero(first_bd, sizeof (*first_bd));
103014b24e2bSVaishali Kulkarni 	first_bd->data.nbds = 1;
103114b24e2bSVaishali Kulkarni 	first_bd->data.bd_flags.bitfields =
103214b24e2bSVaishali Kulkarni 	    (1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
103314b24e2bSVaishali Kulkarni 
103414b24e2bSVaishali Kulkarni 	if (pktinfo->cksum_flags & HCK_IPV4_HDRCKSUM) {
103514b24e2bSVaishali Kulkarni 		first_bd->data.bd_flags.bitfields |=
103614b24e2bSVaishali Kulkarni 		    (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
103714b24e2bSVaishali Kulkarni 	}
103814b24e2bSVaishali Kulkarni 
103914b24e2bSVaishali Kulkarni 	if (pktinfo->cksum_flags & HCK_FULLCKSUM) {
104014b24e2bSVaishali Kulkarni 		first_bd->data.bd_flags.bitfields |=
104114b24e2bSVaishali Kulkarni 		    (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
104214b24e2bSVaishali Kulkarni 	}
104314b24e2bSVaishali Kulkarni 
104414b24e2bSVaishali Kulkarni 	BD_SET_ADDR_LEN(first_bd,
104514b24e2bSVaishali Kulkarni 	    bcopy_pkt->phys_addr,
104614b24e2bSVaishali Kulkarni 	    pktinfo->total_len);
104714b24e2bSVaishali Kulkarni 
104814b24e2bSVaishali Kulkarni 	first_bd->data.bitfields |=
104914b24e2bSVaishali Kulkarni 		(pktinfo->total_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
105014b24e2bSVaishali Kulkarni 		<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
105114b24e2bSVaishali Kulkarni 
105214b24e2bSVaishali Kulkarni 	tx_ring->tx_db.data.bd_prod =
105314b24e2bSVaishali Kulkarni 	    HOST_TO_LE_16(ecore_chain_get_prod_idx(&tx_ring->tx_bd_ring));
105414b24e2bSVaishali Kulkarni 
105514b24e2bSVaishali Kulkarni 	tx_ring->tx_recycle_list[tx_ring->sw_tx_prod].bcopy_pkt = bcopy_pkt;
105614b24e2bSVaishali Kulkarni 	tx_ring->tx_recycle_list[tx_ring->sw_tx_prod].dmah_entry =  NULL;
105714b24e2bSVaishali Kulkarni 
105814b24e2bSVaishali Kulkarni 	tx_ring->sw_tx_prod++;
105914b24e2bSVaishali Kulkarni 	tx_ring->sw_tx_prod &= TX_RING_MASK;
106014b24e2bSVaishali Kulkarni 
106114b24e2bSVaishali Kulkarni 	(void) ddi_dma_sync(tx_ring->tx_bd_dmah,
106214b24e2bSVaishali Kulkarni 	    last_producer, sizeof (*first_bd),
106314b24e2bSVaishali Kulkarni 	    DDI_DMA_SYNC_FORDEV);
106414b24e2bSVaishali Kulkarni 
106514b24e2bSVaishali Kulkarni 	QEDE_DOORBELL_WR(tx_ring, tx_ring->tx_db.raw);
106614b24e2bSVaishali Kulkarni 	mutex_exit(&tx_ring->tx_lock);
106714b24e2bSVaishali Kulkarni 
106814b24e2bSVaishali Kulkarni 	freemsg(mp);
106914b24e2bSVaishali Kulkarni 
107014b24e2bSVaishali Kulkarni 	return (XMIT_DONE);
107114b24e2bSVaishali Kulkarni }
107214b24e2bSVaishali Kulkarni 
107314b24e2bSVaishali Kulkarni /*
107414b24e2bSVaishali Kulkarni  * Send tx packet by mapping the mp(kernel addr)
107514b24e2bSVaishali Kulkarni  * to an existing dma_handle in the driver
107614b24e2bSVaishali Kulkarni  */
107714b24e2bSVaishali Kulkarni static enum qede_xmit_status
qede_tx_mapped(qede_tx_ring_t * tx_ring,mblk_t * mp,qede_tx_pktinfo_t * pktinfo)107814b24e2bSVaishali Kulkarni qede_tx_mapped(qede_tx_ring_t *tx_ring, mblk_t *mp, qede_tx_pktinfo_t *pktinfo)
107914b24e2bSVaishali Kulkarni {
108014b24e2bSVaishali Kulkarni 	enum qede_xmit_status status = XMIT_FAILED;
108114b24e2bSVaishali Kulkarni 	int ret;
108214b24e2bSVaishali Kulkarni 	qede_dma_handle_entry_t *dmah_entry = NULL;
108314b24e2bSVaishali Kulkarni 	qede_dma_handle_entry_t *head = NULL, *tail = NULL, *hdl;
108414b24e2bSVaishali Kulkarni 	struct eth_tx_1st_bd *first_bd;
108514b24e2bSVaishali Kulkarni 	struct eth_tx_2nd_bd *second_bd = 0;
108614b24e2bSVaishali Kulkarni 	struct eth_tx_3rd_bd *third_bd = 0;
108714b24e2bSVaishali Kulkarni 	struct eth_tx_bd *tx_data_bd;
108814b24e2bSVaishali Kulkarni 	struct eth_tx_bd local_bd[64] = { 0 };
108914b24e2bSVaishali Kulkarni 	ddi_dma_cookie_t cookie[64];
109014b24e2bSVaishali Kulkarni 	u32 ncookies, total_cookies = 0, max_cookies = 0, index = 0;
109114b24e2bSVaishali Kulkarni 	ddi_dma_handle_t dma_handle;
109214b24e2bSVaishali Kulkarni 	mblk_t *bp;
109314b24e2bSVaishali Kulkarni 	u32 mblen;
109414b24e2bSVaishali Kulkarni 	bool is_premapped = B_FALSE;
109514b24e2bSVaishali Kulkarni 	u64 dma_premapped = 0, dma_bound = 0;
109614b24e2bSVaishali Kulkarni 	u32 hdl_reserved = 0;
109714b24e2bSVaishali Kulkarni 	u8 nbd = 0;
109814b24e2bSVaishali Kulkarni 	int i, bd_index;
109914b24e2bSVaishali Kulkarni 	u16 last_producer;
110014b24e2bSVaishali Kulkarni 	qede_tx_recycle_list_t *tx_recycle_list = tx_ring->tx_recycle_list;
110114b24e2bSVaishali Kulkarni 	u64 data_addr;
110214b24e2bSVaishali Kulkarni 	size_t data_size;
110314b24e2bSVaishali Kulkarni 
110414b24e2bSVaishali Kulkarni 	if (pktinfo->use_lso) {
110514b24e2bSVaishali Kulkarni 		/*
110614b24e2bSVaishali Kulkarni 		 * For tso pkt, we can use as many as 255 bds
110714b24e2bSVaishali Kulkarni 		 */
110814b24e2bSVaishali Kulkarni 		max_cookies = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
110914b24e2bSVaishali Kulkarni 		qede_pkt_parse_lso_headers(pktinfo, mp);
111014b24e2bSVaishali Kulkarni 	} else {
111114b24e2bSVaishali Kulkarni 		/*
111214b24e2bSVaishali Kulkarni 		 * For non-tso packet, only 18 bds can be used
111314b24e2bSVaishali Kulkarni 		 */
111414b24e2bSVaishali Kulkarni 		max_cookies = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
111514b24e2bSVaishali Kulkarni 	}
111614b24e2bSVaishali Kulkarni 
111714b24e2bSVaishali Kulkarni 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
111814b24e2bSVaishali Kulkarni 		mblen = MBLKL(bp);
111914b24e2bSVaishali Kulkarni 		if (mblen == 0) {
112014b24e2bSVaishali Kulkarni 			continue;
112114b24e2bSVaishali Kulkarni 		}
112214b24e2bSVaishali Kulkarni 		is_premapped = B_FALSE;
112314b24e2bSVaishali Kulkarni 		/*
112414b24e2bSVaishali Kulkarni 		 * If the mblk is premapped then get the
112514b24e2bSVaishali Kulkarni 		 * dma_handle and sync the dma mem. otherwise
112614b24e2bSVaishali Kulkarni 		 * reserve an handle from the driver dma
112714b24e2bSVaishali Kulkarni 		 * handles list
112814b24e2bSVaishali Kulkarni 		 */
112914b24e2bSVaishali Kulkarni #ifdef	DBLK_DMA_PREMAP
113014b24e2bSVaishali Kulkarni 		if (bp->b_datap->db_flags & DBLK_DMA_PREMAP) {
113114b24e2bSVaishali Kulkarni #ifdef	DEBUG_PREMAP
113214b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede, "mp is premapped");
113314b24e2bSVaishali Kulkarni #endif
113414b24e2bSVaishali Kulkarni 			tx_ring->tx_premap_count++;
113514b24e2bSVaishali Kulkarni 			ret = dblk_dma_info_get(tx_ring->pm_handle,
113614b24e2bSVaishali Kulkarni 			    bp->b_rptr, mblen,
113714b24e2bSVaishali Kulkarni 			    bp->b_datap, &cookie[index],
113814b24e2bSVaishali Kulkarni 			    &ncookies, &dma_handle);
113914b24e2bSVaishali Kulkarni 			if (ret == DDI_DMA_MAPPED) {
114014b24e2bSVaishali Kulkarni 				is_premapped = B_TRUE;
114114b24e2bSVaishali Kulkarni 				dma_premapped++;
114214b24e2bSVaishali Kulkarni 				(void) ddi_dma_sync(dma_handle, 0, 0,
114314b24e2bSVaishali Kulkarni 				    DDI_DMA_SYNC_FORDEV);
114414b24e2bSVaishali Kulkarni 			} else {
114514b24e2bSVaishali Kulkarni 				tx_ring->tx_premap_fail++;
114614b24e2bSVaishali Kulkarni 			}
114714b24e2bSVaishali Kulkarni 		}
114814b24e2bSVaishali Kulkarni #endif	/* DBLK_DMA_PREMAP */
114914b24e2bSVaishali Kulkarni 
115014b24e2bSVaishali Kulkarni 		if (!is_premapped) {
115114b24e2bSVaishali Kulkarni 			dmah_entry = qede_get_dmah_entry(tx_ring);
115214b24e2bSVaishali Kulkarni 			if (dmah_entry == NULL) {
115314b24e2bSVaishali Kulkarni 				qede_info(tx_ring->qede, "dmah_entry NULL, "
115414b24e2bSVaishali Kulkarni 				    "Fallback to copy mode...");
115514b24e2bSVaishali Kulkarni 				status = XMIT_FAILED;
115614b24e2bSVaishali Kulkarni 				goto err_map;
115714b24e2bSVaishali Kulkarni 			}
115814b24e2bSVaishali Kulkarni 
115914b24e2bSVaishali Kulkarni 			if (ddi_dma_addr_bind_handle(dmah_entry->dma_handle,
116014b24e2bSVaishali Kulkarni 			    NULL, (caddr_t)bp->b_rptr, mblen,
116114b24e2bSVaishali Kulkarni 			    DDI_DMA_STREAMING | DDI_DMA_WRITE,
116214b24e2bSVaishali Kulkarni 			    DDI_DMA_DONTWAIT, NULL, &cookie[index], &ncookies)
116314b24e2bSVaishali Kulkarni 			    != DDI_DMA_MAPPED) {
116414b24e2bSVaishali Kulkarni 
116514b24e2bSVaishali Kulkarni #ifdef DEBUG_PULLUP
116614b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede, "addr_bind() failed for "
116714b24e2bSVaishali Kulkarni 			    "handle %p, len %d mblk_no %d tot_len 0x%x"
116814b24e2bSVaishali Kulkarni 			    " use_lso %d",  dmah_entry->dma_handle,
116914b24e2bSVaishali Kulkarni 			    mblen, pktinfo->mblk_no, pktinfo->total_len,
117014b24e2bSVaishali Kulkarni 			    pktinfo->use_lso);
117114b24e2bSVaishali Kulkarni 
117214b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede, "Falling back to pullup");
117314b24e2bSVaishali Kulkarni #endif
117414b24e2bSVaishali Kulkarni 				status = XMIT_FALLBACK_PULLUP;
117514b24e2bSVaishali Kulkarni 				tx_ring->tx_bind_fail++;
117614b24e2bSVaishali Kulkarni 				goto err_map;
117714b24e2bSVaishali Kulkarni 			}
117814b24e2bSVaishali Kulkarni 			tx_ring->tx_bind_count++;
117914b24e2bSVaishali Kulkarni 
118014b24e2bSVaishali Kulkarni 			if (index == 0) {
118114b24e2bSVaishali Kulkarni 				dmah_entry->mp = mp;
118214b24e2bSVaishali Kulkarni 			} else {
118314b24e2bSVaishali Kulkarni 				dmah_entry->mp = NULL;
118414b24e2bSVaishali Kulkarni 			}
118514b24e2bSVaishali Kulkarni 
118614b24e2bSVaishali Kulkarni 			/* queue into recycle list for tx completion routine */
118714b24e2bSVaishali Kulkarni 			if (tail == NULL) {
118814b24e2bSVaishali Kulkarni 				head = tail = dmah_entry;
118914b24e2bSVaishali Kulkarni 			} else {
119014b24e2bSVaishali Kulkarni 				tail->next = dmah_entry;
119114b24e2bSVaishali Kulkarni 				tail = dmah_entry;
119214b24e2bSVaishali Kulkarni 			}
119314b24e2bSVaishali Kulkarni 
119414b24e2bSVaishali Kulkarni 			hdl_reserved++;
119514b24e2bSVaishali Kulkarni 			dma_bound++;
119614b24e2bSVaishali Kulkarni 		}
119714b24e2bSVaishali Kulkarni 
119814b24e2bSVaishali Kulkarni 		total_cookies += ncookies;
119914b24e2bSVaishali Kulkarni 		if (total_cookies > max_cookies) {
120014b24e2bSVaishali Kulkarni 			tx_ring->tx_too_many_cookies++;
120114b24e2bSVaishali Kulkarni #ifdef DEBUG_PULLUP
120214b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede,
120314b24e2bSVaishali Kulkarni 			    "total_cookies > max_cookies, "
120414b24e2bSVaishali Kulkarni 			    "pktlen %d, mb num %d",
120514b24e2bSVaishali Kulkarni 			    pktinfo->total_len, pktinfo->mblk_no);
120614b24e2bSVaishali Kulkarni #endif
120714b24e2bSVaishali Kulkarni 			status = XMIT_TOO_MANY_COOKIES;
120814b24e2bSVaishali Kulkarni 			goto err_map_sec;
120914b24e2bSVaishali Kulkarni 		}
121014b24e2bSVaishali Kulkarni 
121114b24e2bSVaishali Kulkarni 		if (is_premapped) {
121214b24e2bSVaishali Kulkarni 			index += ncookies;
121314b24e2bSVaishali Kulkarni 		} else {
121414b24e2bSVaishali Kulkarni 			index++;
121514b24e2bSVaishali Kulkarni 			/*
121614b24e2bSVaishali Kulkarni 			 * Dec. ncookies since we already stored cookie[0]
121714b24e2bSVaishali Kulkarni 			 */
121814b24e2bSVaishali Kulkarni 			ncookies--;
121914b24e2bSVaishali Kulkarni 
122014b24e2bSVaishali Kulkarni 			for (i = 0; i < ncookies; i++, index++)
122114b24e2bSVaishali Kulkarni 				ddi_dma_nextcookie(dmah_entry->dma_handle,
122214b24e2bSVaishali Kulkarni 				    &cookie[index]);
122314b24e2bSVaishali Kulkarni 		}
122414b24e2bSVaishali Kulkarni 	}
122514b24e2bSVaishali Kulkarni 
122614b24e2bSVaishali Kulkarni 	/*
122714b24e2bSVaishali Kulkarni 	 * Guard against the case where we get a series of mblks that cause us
122814b24e2bSVaishali Kulkarni 	 * not to end up with any mapped data.
122914b24e2bSVaishali Kulkarni 	 */
123014b24e2bSVaishali Kulkarni 	if (total_cookies == 0) {
123114b24e2bSVaishali Kulkarni 		status = XMIT_FAILED;
123214b24e2bSVaishali Kulkarni 		goto err_map_sec;
123314b24e2bSVaishali Kulkarni 	}
123414b24e2bSVaishali Kulkarni 
123514b24e2bSVaishali Kulkarni 	if (total_cookies > max_cookies) {
123614b24e2bSVaishali Kulkarni 		tx_ring->tx_too_many_cookies++;
123714b24e2bSVaishali Kulkarni 		status = XMIT_TOO_MANY_COOKIES;
123814b24e2bSVaishali Kulkarni 		goto err_map_sec;
123914b24e2bSVaishali Kulkarni 	}
124014b24e2bSVaishali Kulkarni 	first_bd = (struct eth_tx_1st_bd *)&local_bd[0];
124114b24e2bSVaishali Kulkarni 
124214b24e2bSVaishali Kulkarni 	/*
124314b24e2bSVaishali Kulkarni 	 * Mark this bd as start bd
124414b24e2bSVaishali Kulkarni 	 */
124514b24e2bSVaishali Kulkarni 	first_bd->data.bd_flags.bitfields =
124614b24e2bSVaishali Kulkarni 	    (1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
124714b24e2bSVaishali Kulkarni 
124814b24e2bSVaishali Kulkarni 	if (pktinfo->cksum_flags & HCK_IPV4_HDRCKSUM) {
124914b24e2bSVaishali Kulkarni 		first_bd->data.bd_flags.bitfields |=
125014b24e2bSVaishali Kulkarni 		    (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT);
125114b24e2bSVaishali Kulkarni 	}
125214b24e2bSVaishali Kulkarni 
125314b24e2bSVaishali Kulkarni 	if (pktinfo->cksum_flags & HCK_FULLCKSUM) {
125414b24e2bSVaishali Kulkarni 		first_bd->data.bd_flags.bitfields |=
125514b24e2bSVaishali Kulkarni 		    (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT);
125614b24e2bSVaishali Kulkarni 	}
125714b24e2bSVaishali Kulkarni 
125814b24e2bSVaishali Kulkarni 
125914b24e2bSVaishali Kulkarni 	/* Fill-up local bds with the tx data and flags */
126014b24e2bSVaishali Kulkarni 	for (i = 0, bd_index = 0; i < total_cookies; i++, bd_index++) {
126114b24e2bSVaishali Kulkarni 		if (bd_index == 0) {
126214b24e2bSVaishali Kulkarni 			BD_SET_ADDR_LEN(first_bd,
126314b24e2bSVaishali Kulkarni 			    cookie[i].dmac_laddress,
126414b24e2bSVaishali Kulkarni 			    cookie[i].dmac_size);
126514b24e2bSVaishali Kulkarni 
126614b24e2bSVaishali Kulkarni 			if (pktinfo->use_lso) {
126714b24e2bSVaishali Kulkarni 			first_bd->data.bd_flags.bitfields |=
126814b24e2bSVaishali Kulkarni 			    1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
126914b24e2bSVaishali Kulkarni 
127014b24e2bSVaishali Kulkarni 			second_bd = (struct eth_tx_2nd_bd *)&local_bd[1];
127114b24e2bSVaishali Kulkarni 
127214b24e2bSVaishali Kulkarni 			/*
127314b24e2bSVaishali Kulkarni 			 * If the fisrt bd contains
127414b24e2bSVaishali Kulkarni 			 * hdr + data (partial or full data), then spilt
127514b24e2bSVaishali Kulkarni 			 * the hdr and data between 1st and 2nd
127614b24e2bSVaishali Kulkarni 			 * bd respectively
127714b24e2bSVaishali Kulkarni 			 */
127814b24e2bSVaishali Kulkarni 			if (first_bd->nbytes > pktinfo->total_hlen) {
127914b24e2bSVaishali Kulkarni 				data_addr = cookie[0].dmac_laddress
128014b24e2bSVaishali Kulkarni 				    + pktinfo->total_hlen;
128114b24e2bSVaishali Kulkarni 				data_size = cookie[i].dmac_size
128214b24e2bSVaishali Kulkarni 				    - pktinfo->total_hlen;
128314b24e2bSVaishali Kulkarni 
128414b24e2bSVaishali Kulkarni 				BD_SET_ADDR_LEN(second_bd,
128514b24e2bSVaishali Kulkarni 				    data_addr,
128614b24e2bSVaishali Kulkarni 				    data_size);
128714b24e2bSVaishali Kulkarni 
128814b24e2bSVaishali Kulkarni 				/*
128914b24e2bSVaishali Kulkarni 				 * First bd already contains the addr to
129014b24e2bSVaishali Kulkarni 				 * to start of pkt, just adjust the dma
129114b24e2bSVaishali Kulkarni 				 * len of first_bd
129214b24e2bSVaishali Kulkarni 				 */
129314b24e2bSVaishali Kulkarni 				first_bd->nbytes = pktinfo->total_hlen;
129414b24e2bSVaishali Kulkarni 				bd_index++;
129514b24e2bSVaishali Kulkarni 			} else if (first_bd->nbytes < pktinfo->total_hlen) {
129614b24e2bSVaishali Kulkarni #ifdef DEBUG_PULLUP
129714b24e2bSVaishali Kulkarni 				qede_info(tx_ring->qede,
129814b24e2bSVaishali Kulkarni 				    "Headers not in single bd");
129914b24e2bSVaishali Kulkarni #endif
130014b24e2bSVaishali Kulkarni 				status = XMIT_FALLBACK_PULLUP;
130114b24e2bSVaishali Kulkarni 				goto err_map_sec;
130214b24e2bSVaishali Kulkarni 
130314b24e2bSVaishali Kulkarni 			}
130414b24e2bSVaishali Kulkarni 
130514b24e2bSVaishali Kulkarni 			/*
130614b24e2bSVaishali Kulkarni 			 * Third bd is used to indicates to fw
130714b24e2bSVaishali Kulkarni 			 * that tso needs to be performed. It should
130814b24e2bSVaishali Kulkarni 			 * be present even if only two cookies are
130914b24e2bSVaishali Kulkarni 			 * needed for the mblk
131014b24e2bSVaishali Kulkarni 			 */
131114b24e2bSVaishali Kulkarni 			third_bd = (struct eth_tx_3rd_bd *)&local_bd[2];
131214b24e2bSVaishali Kulkarni 			third_bd->data.lso_mss |=
131314b24e2bSVaishali Kulkarni 			    HOST_TO_LE_16(pktinfo->mss);
131414b24e2bSVaishali Kulkarni 			third_bd->data.bitfields |=
131514b24e2bSVaishali Kulkarni 			    1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT;
131614b24e2bSVaishali Kulkarni 			}
131714b24e2bSVaishali Kulkarni 
131814b24e2bSVaishali Kulkarni 			continue;
131914b24e2bSVaishali Kulkarni 		}
132014b24e2bSVaishali Kulkarni 
132114b24e2bSVaishali Kulkarni 		tx_data_bd = &local_bd[bd_index];
132214b24e2bSVaishali Kulkarni 		BD_SET_ADDR_LEN(tx_data_bd,
132314b24e2bSVaishali Kulkarni 		    cookie[i].dmac_laddress,
132414b24e2bSVaishali Kulkarni 		    cookie[i].dmac_size);
132514b24e2bSVaishali Kulkarni 	}
132614b24e2bSVaishali Kulkarni 
132714b24e2bSVaishali Kulkarni 	if (pktinfo->use_lso) {
132814b24e2bSVaishali Kulkarni 		if (bd_index < 3) {
132914b24e2bSVaishali Kulkarni 			nbd = 3;
133014b24e2bSVaishali Kulkarni 		} else {
133114b24e2bSVaishali Kulkarni 			nbd = bd_index;
133214b24e2bSVaishali Kulkarni 		}
133314b24e2bSVaishali Kulkarni 	} else {
133414b24e2bSVaishali Kulkarni 		nbd = total_cookies;
133514b24e2bSVaishali Kulkarni 		first_bd->data.bitfields |=
133614b24e2bSVaishali Kulkarni 		    (pktinfo->total_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
133714b24e2bSVaishali Kulkarni 		    << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
133814b24e2bSVaishali Kulkarni 	}
133914b24e2bSVaishali Kulkarni 
134014b24e2bSVaishali Kulkarni 	first_bd->data.nbds = nbd;
134114b24e2bSVaishali Kulkarni 
134214b24e2bSVaishali Kulkarni 	mutex_enter(&tx_ring->tx_lock);
134314b24e2bSVaishali Kulkarni 
134414b24e2bSVaishali Kulkarni 	/*
134514b24e2bSVaishali Kulkarni 	 * Before copying the local bds into actual,
134614b24e2bSVaishali Kulkarni 	 * check if we have enough on the bd_chain
134714b24e2bSVaishali Kulkarni 	 */
134814b24e2bSVaishali Kulkarni 	if (ecore_chain_get_elem_left(&tx_ring->tx_bd_ring) <
134914b24e2bSVaishali Kulkarni 	    nbd) {
135014b24e2bSVaishali Kulkarni 		tx_ring->tx_q_sleeping = 1;
135114b24e2bSVaishali Kulkarni 		status = XMIT_PAUSE_QUEUE;
135214b24e2bSVaishali Kulkarni #ifdef	DEBUG_TX_RECYCLE
135314b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede, "Pausing tx queue...");
135414b24e2bSVaishali Kulkarni #endif
135514b24e2bSVaishali Kulkarni 		mutex_exit(&tx_ring->tx_lock);
135614b24e2bSVaishali Kulkarni 		goto err_map_sec ;
135714b24e2bSVaishali Kulkarni 	}
135814b24e2bSVaishali Kulkarni 
135914b24e2bSVaishali Kulkarni 	/* Copy the local_bd(s) into the actual bds */
136014b24e2bSVaishali Kulkarni 	for (i = 0; i < nbd; i++) {
136114b24e2bSVaishali Kulkarni 		tx_data_bd = ecore_chain_produce(&tx_ring->tx_bd_ring);
136214b24e2bSVaishali Kulkarni 		bcopy(&local_bd[i], tx_data_bd, sizeof (*tx_data_bd));
136314b24e2bSVaishali Kulkarni 	}
136414b24e2bSVaishali Kulkarni 
136514b24e2bSVaishali Kulkarni 	last_producer = tx_ring->sw_tx_prod;
136614b24e2bSVaishali Kulkarni 
136714b24e2bSVaishali Kulkarni 	tx_ring->tx_recycle_list[tx_ring->sw_tx_prod].dmah_entry = head;
136814b24e2bSVaishali Kulkarni 	tx_ring->tx_recycle_list[tx_ring->sw_tx_prod].bcopy_pkt = NULL;
136914b24e2bSVaishali Kulkarni 	tx_ring->sw_tx_prod = (tx_ring->sw_tx_prod + 1) & TX_RING_MASK;
137014b24e2bSVaishali Kulkarni 
137114b24e2bSVaishali Kulkarni 	tx_ring->tx_db.data.bd_prod =
137214b24e2bSVaishali Kulkarni 	    HOST_TO_LE_16(ecore_chain_get_prod_idx(&tx_ring->tx_bd_ring));
137314b24e2bSVaishali Kulkarni 
137414b24e2bSVaishali Kulkarni 	/* Sync the tx_bd dma mem */
137514b24e2bSVaishali Kulkarni 	qede_desc_dma_mem_sync(&tx_ring->tx_bd_dmah,
137614b24e2bSVaishali Kulkarni 	    last_producer, nbd,
137714b24e2bSVaishali Kulkarni 	    tx_ring->tx_ring_size,
137814b24e2bSVaishali Kulkarni 	    sizeof (struct eth_tx_bd),
137914b24e2bSVaishali Kulkarni 	    DDI_DMA_SYNC_FORDEV);
138014b24e2bSVaishali Kulkarni 
138114b24e2bSVaishali Kulkarni 	/*
138214b24e2bSVaishali Kulkarni 	 * Write to doorbell bar
138314b24e2bSVaishali Kulkarni 	 */
138414b24e2bSVaishali Kulkarni 	QEDE_DOORBELL_WR(tx_ring, tx_ring->tx_db.raw);
138514b24e2bSVaishali Kulkarni 
138614b24e2bSVaishali Kulkarni 	mutex_exit(&tx_ring->tx_lock);
138714b24e2bSVaishali Kulkarni 
138814b24e2bSVaishali Kulkarni 	return (XMIT_DONE);
138914b24e2bSVaishali Kulkarni err_map:
139014b24e2bSVaishali Kulkarni 	if (dmah_entry != NULL) {
139114b24e2bSVaishali Kulkarni 		if (tail == NULL) {
139214b24e2bSVaishali Kulkarni 			head = tail = dmah_entry;
139314b24e2bSVaishali Kulkarni 		} else {
139414b24e2bSVaishali Kulkarni 			tail->next = dmah_entry;
139514b24e2bSVaishali Kulkarni 			tail = dmah_entry;
139614b24e2bSVaishali Kulkarni 		}
139714b24e2bSVaishali Kulkarni 		hdl_reserved++;
139814b24e2bSVaishali Kulkarni 	}
139914b24e2bSVaishali Kulkarni 
140014b24e2bSVaishali Kulkarni err_map_sec:
140114b24e2bSVaishali Kulkarni 
140214b24e2bSVaishali Kulkarni 	hdl = head;
140314b24e2bSVaishali Kulkarni 
140414b24e2bSVaishali Kulkarni 	while (hdl != NULL) {
140514b24e2bSVaishali Kulkarni 		(void) ddi_dma_unbind_handle(hdl->dma_handle);
140614b24e2bSVaishali Kulkarni 		hdl = hdl->next;
140714b24e2bSVaishali Kulkarni 	}
140814b24e2bSVaishali Kulkarni 
140914b24e2bSVaishali Kulkarni 	if (head != NULL) {
141014b24e2bSVaishali Kulkarni 		qede_put_dmah_entries(tx_ring, head);
141114b24e2bSVaishali Kulkarni 	}
141214b24e2bSVaishali Kulkarni 
141314b24e2bSVaishali Kulkarni 	return (status);
141414b24e2bSVaishali Kulkarni }
141514b24e2bSVaishali Kulkarni 
141614b24e2bSVaishali Kulkarni static enum qede_xmit_status
qede_send_tx_packet(qede_t * qede,qede_tx_ring_t * tx_ring,mblk_t * mp)141714b24e2bSVaishali Kulkarni qede_send_tx_packet(qede_t *qede, qede_tx_ring_t *tx_ring, mblk_t *mp)
141814b24e2bSVaishali Kulkarni {
141914b24e2bSVaishali Kulkarni 	boolean_t force_pullup = B_FALSE;
142014b24e2bSVaishali Kulkarni 	enum qede_xmit_status status = XMIT_FAILED;
142114b24e2bSVaishali Kulkarni 	enum qede_xmit_mode xmit_mode = USE_BCOPY;
142214b24e2bSVaishali Kulkarni 	qede_tx_pktinfo_t pktinfo;
142314b24e2bSVaishali Kulkarni 	mblk_t *original_mp = NULL, *pulled_up_mp = NULL;
142414b24e2bSVaishali Kulkarni 	struct ether_vlan_header *ethvhdr;
142514b24e2bSVaishali Kulkarni 
142614b24e2bSVaishali Kulkarni 	mutex_enter(&tx_ring->tx_lock);
142714b24e2bSVaishali Kulkarni 	if (ecore_chain_get_elem_left(&tx_ring->tx_bd_ring) <
142814b24e2bSVaishali Kulkarni 	    qede->tx_recycle_threshold) {
142914b24e2bSVaishali Kulkarni #ifdef	DEBUG_TX_RECYCLE
143014b24e2bSVaishali Kulkarni 		qede_info(qede, "Recyclycling from tx routine");
143114b24e2bSVaishali Kulkarni #endif
143214b24e2bSVaishali Kulkarni 		if (qede_process_tx_completions(tx_ring) <
143314b24e2bSVaishali Kulkarni 		    qede->tx_recycle_threshold) {
143414b24e2bSVaishali Kulkarni #ifdef	DEBUG_TX_RECYCLE
143514b24e2bSVaishali Kulkarni 			qede_info(qede, "Still not enough bd after cleanup, "
143614b24e2bSVaishali Kulkarni 			    "pausing tx queue...");
143714b24e2bSVaishali Kulkarni #endif
143814b24e2bSVaishali Kulkarni 			tx_ring->tx_q_sleeping = 1;
143914b24e2bSVaishali Kulkarni 			mutex_exit(&tx_ring->tx_lock);
144014b24e2bSVaishali Kulkarni 			return (XMIT_PAUSE_QUEUE);
144114b24e2bSVaishali Kulkarni 		}
144214b24e2bSVaishali Kulkarni 	}
144314b24e2bSVaishali Kulkarni 
144414b24e2bSVaishali Kulkarni 	mutex_exit(&tx_ring->tx_lock);
144514b24e2bSVaishali Kulkarni 
144614b24e2bSVaishali Kulkarni 	bzero(&pktinfo, sizeof (pktinfo));
144714b24e2bSVaishali Kulkarni 
144814b24e2bSVaishali Kulkarni 	/* Get the offload reqd. on the pkt */
144914b24e2bSVaishali Kulkarni 	qede_get_pkt_offload_info(qede, mp, &pktinfo.cksum_flags,
145014b24e2bSVaishali Kulkarni 	    &pktinfo.use_lso, &pktinfo.mss);
145114b24e2bSVaishali Kulkarni 
145214b24e2bSVaishali Kulkarni do_pullup:
145314b24e2bSVaishali Kulkarni 	if (force_pullup) {
145414b24e2bSVaishali Kulkarni 		tx_ring->tx_pullup_count++;
145514b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
145614b24e2bSVaishali Kulkarni 		qede_info(qede, "Pulling up original mp %p", mp);
145714b24e2bSVaishali Kulkarni #endif
145814b24e2bSVaishali Kulkarni 		/*
145914b24e2bSVaishali Kulkarni 		 * Try to accumulate all mblks of this pkt
146014b24e2bSVaishali Kulkarni 		 * into a single mblk
146114b24e2bSVaishali Kulkarni 		 */
146214b24e2bSVaishali Kulkarni 		original_mp = mp;
146314b24e2bSVaishali Kulkarni 		if ((pulled_up_mp = msgpullup(mp, -1)) != NULL) {
146414b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
146514b24e2bSVaishali Kulkarni 			qede_info(qede, "New mp %p, ori %p", pulled_up_mp, mp);
146614b24e2bSVaishali Kulkarni #endif
146714b24e2bSVaishali Kulkarni 			/*
146814b24e2bSVaishali Kulkarni 			 * Proceed with the new single
146914b24e2bSVaishali Kulkarni 			 * mp
147014b24e2bSVaishali Kulkarni 			 */
147114b24e2bSVaishali Kulkarni 			mp = pulled_up_mp;
147214b24e2bSVaishali Kulkarni 			xmit_mode = XMIT_MODE_UNUSED;
147314b24e2bSVaishali Kulkarni 			pktinfo.pulled_up = B_TRUE;
147414b24e2bSVaishali Kulkarni 		} else {
147514b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
147614b24e2bSVaishali Kulkarni 			qede_info(tx_ring->qede, "Pullup failed");
147714b24e2bSVaishali Kulkarni #endif
147814b24e2bSVaishali Kulkarni 			status = XMIT_FAILED;
147914b24e2bSVaishali Kulkarni 			goto exit;
148014b24e2bSVaishali Kulkarni 		}
148114b24e2bSVaishali Kulkarni 	}
148214b24e2bSVaishali Kulkarni 
148314b24e2bSVaishali Kulkarni 	qede_get_pkt_info(qede, mp, &pktinfo);
148414b24e2bSVaishali Kulkarni 
148514b24e2bSVaishali Kulkarni 
148614b24e2bSVaishali Kulkarni 	if ((!pktinfo.use_lso) &&
148714b24e2bSVaishali Kulkarni                  (pktinfo.total_len > (qede->mtu + pktinfo.mac_hlen))) {
148814b24e2bSVaishali Kulkarni   		qede_info(tx_ring->qede,
148914b24e2bSVaishali Kulkarni 		    "Packet drop as packet len 0x%x > 0x%x",
149014b24e2bSVaishali Kulkarni 		    pktinfo.total_len, (qede->mtu + QEDE_MAX_ETHER_HDR));
149114b24e2bSVaishali Kulkarni 		status = XMIT_FAILED;
149214b24e2bSVaishali Kulkarni 		goto exit;
149314b24e2bSVaishali Kulkarni 	}
149414b24e2bSVaishali Kulkarni 
149514b24e2bSVaishali Kulkarni 
149614b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
149714b24e2bSVaishali Kulkarni 	if (force_pullup) {
149814b24e2bSVaishali Kulkarni 	qede_print_err("!%s: mp %p, pktinfo : total_len %d,"
149914b24e2bSVaishali Kulkarni 	    " mblk_no %d, ether_type %d\n"
150014b24e2bSVaishali Kulkarni 	    "mac_hlen %d, ip_hlen %d, l4_hlen %d\n"
150114b24e2bSVaishali Kulkarni 	    "l4_proto %d, use_cksum:use_lso %d:%d mss %d", __func__, mp,
150214b24e2bSVaishali Kulkarni 	    pktinfo.total_len, pktinfo.mblk_no, pktinfo.ether_type,
150314b24e2bSVaishali Kulkarni 	    pktinfo.mac_hlen, pktinfo.ip_hlen, pktinfo.l4_hlen,
150414b24e2bSVaishali Kulkarni 	    pktinfo.l4_proto, pktinfo.cksum_flags, pktinfo.use_lso,
150514b24e2bSVaishali Kulkarni 	    pktinfo.mss);
150614b24e2bSVaishali Kulkarni 	}
150714b24e2bSVaishali Kulkarni #endif
150814b24e2bSVaishali Kulkarni 
150914b24e2bSVaishali Kulkarni #ifdef	DEBUG_PREMAP
151014b24e2bSVaishali Kulkarni 	if (DBLK_IS_PREMAPPED(mp->b_datap)) {
151114b24e2bSVaishali Kulkarni 		qede_print_err("!%s(%d): mp %p id PREMAPPMED",
151214b24e2bSVaishali Kulkarni 		    __func__, qede->instance);
151314b24e2bSVaishali Kulkarni 	}
151414b24e2bSVaishali Kulkarni #endif
151514b24e2bSVaishali Kulkarni 
151614b24e2bSVaishali Kulkarni #ifdef	DBLK_DMA_PREMAP
151714b24e2bSVaishali Kulkarni 	if (DBLK_IS_PREMAPPED(mp->b_datap) ||
151814b24e2bSVaishali Kulkarni 	    pktinfo.total_len > qede->tx_bcopy_threshold) {
151914b24e2bSVaishali Kulkarni 		xmit_mode = USE_DMA_BIND;
152014b24e2bSVaishali Kulkarni 	}
152114b24e2bSVaishali Kulkarni #else
152214b24e2bSVaishali Kulkarni 	if (pktinfo.total_len > qede->tx_bcopy_threshold) {
152314b24e2bSVaishali Kulkarni 		xmit_mode = USE_DMA_BIND;
152414b24e2bSVaishali Kulkarni 	}
152514b24e2bSVaishali Kulkarni #endif
152614b24e2bSVaishali Kulkarni 
152714b24e2bSVaishali Kulkarni 	if (pktinfo.total_len <= qede->tx_bcopy_threshold) {
152814b24e2bSVaishali Kulkarni 		xmit_mode = USE_BCOPY;
152914b24e2bSVaishali Kulkarni 	}
153014b24e2bSVaishali Kulkarni 
153114b24e2bSVaishali Kulkarni 	/*
153214b24e2bSVaishali Kulkarni 	 * if mac + ip hdr not in one contiguous block,
153314b24e2bSVaishali Kulkarni 	 * use copy mode
153414b24e2bSVaishali Kulkarni 	 */
153514b24e2bSVaishali Kulkarni 	if (MBLKL(mp) < (ETHER_HEADER_LEN + IP_HEADER_LEN)) {
153614b24e2bSVaishali Kulkarni 		/*qede_info(qede, "mblk too small, using copy mode, len = %d", MBLKL(mp));*/
153714b24e2bSVaishali Kulkarni 		xmit_mode = USE_BCOPY;
153814b24e2bSVaishali Kulkarni 	}
153914b24e2bSVaishali Kulkarni 
154014b24e2bSVaishali Kulkarni 	if ((uintptr_t)mp->b_rptr & 1) {
154114b24e2bSVaishali Kulkarni 		xmit_mode = USE_BCOPY;
154214b24e2bSVaishali Kulkarni 	}
154314b24e2bSVaishali Kulkarni 
154414b24e2bSVaishali Kulkarni 	/*
154514b24e2bSVaishali Kulkarni 	 * if too many mblks and hence the dma cookies, needed
154614b24e2bSVaishali Kulkarni 	 * for tx, then use bcopy or pullup on packet
154714b24e2bSVaishali Kulkarni 	 * currently, ETH_TX_MAX_BDS_PER_NON_LSO_PACKET = 18
154814b24e2bSVaishali Kulkarni 	 */
154914b24e2bSVaishali Kulkarni 	if (pktinfo.mblk_no > (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)) {
155014b24e2bSVaishali Kulkarni 		if (force_pullup) {
155114b24e2bSVaishali Kulkarni 			tx_ring->tx_too_many_mblks++;
155214b24e2bSVaishali Kulkarni 			status = XMIT_FAILED;
155314b24e2bSVaishali Kulkarni 			goto exit;
155414b24e2bSVaishali Kulkarni 		} else {
155514b24e2bSVaishali Kulkarni 			xmit_mode = USE_PULLUP;
155614b24e2bSVaishali Kulkarni 		}
155714b24e2bSVaishali Kulkarni 	}
155814b24e2bSVaishali Kulkarni 
155914b24e2bSVaishali Kulkarni #ifdef	TX_FORCE_COPY_MODE
156014b24e2bSVaishali Kulkarni 	xmit_mode = USE_BCOPY;
156114b24e2bSVaishali Kulkarni #elif	TX_FORCE_MAPPED_MODE
156214b24e2bSVaishali Kulkarni 	xmit_mode = USE_DMA_BIND;
156314b24e2bSVaishali Kulkarni #endif
156414b24e2bSVaishali Kulkarni 
156514b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
156614b24e2bSVaishali Kulkarni 	if (force_pullup) {
156714b24e2bSVaishali Kulkarni 		qede_info(qede, "using mode %d on pulled mp %p",
156814b24e2bSVaishali Kulkarni 		    xmit_mode, mp);
156914b24e2bSVaishali Kulkarni 	}
157014b24e2bSVaishali Kulkarni #endif
157114b24e2bSVaishali Kulkarni 
157214b24e2bSVaishali Kulkarni 	/*
157314b24e2bSVaishali Kulkarni 	 * Use Mapped mode for the packet
157414b24e2bSVaishali Kulkarni 	 */
157514b24e2bSVaishali Kulkarni 	if (xmit_mode == USE_DMA_BIND) {
157614b24e2bSVaishali Kulkarni 		status = qede_tx_mapped(tx_ring, mp, &pktinfo);
157714b24e2bSVaishali Kulkarni 		if (status == XMIT_DONE) {
157814b24e2bSVaishali Kulkarni 			if (pktinfo.use_lso) {
157914b24e2bSVaishali Kulkarni 				tx_ring->tx_lso_pkt_count++;
158014b24e2bSVaishali Kulkarni 			} else if(pktinfo.total_len > 1518) {
158114b24e2bSVaishali Kulkarni 				tx_ring->tx_jumbo_pkt_count++;
158214b24e2bSVaishali Kulkarni 			}
158314b24e2bSVaishali Kulkarni 			tx_ring->tx_mapped_pkts++;
158414b24e2bSVaishali Kulkarni 			goto exit;
158514b24e2bSVaishali Kulkarni                 } else if ((status == XMIT_TOO_MANY_COOKIES ||
158614b24e2bSVaishali Kulkarni 		    (status == XMIT_FALLBACK_PULLUP)) && !force_pullup) {
158714b24e2bSVaishali Kulkarni 			xmit_mode = USE_PULLUP;
158814b24e2bSVaishali Kulkarni 		} else {
158914b24e2bSVaishali Kulkarni 			status = XMIT_FAILED;
159014b24e2bSVaishali Kulkarni 			goto exit;
159114b24e2bSVaishali Kulkarni 		}
159214b24e2bSVaishali Kulkarni 	}
159314b24e2bSVaishali Kulkarni 
159414b24e2bSVaishali Kulkarni 	if (xmit_mode == USE_BCOPY) {
159514b24e2bSVaishali Kulkarni 		status = qede_tx_bcopy(tx_ring, mp, &pktinfo);
159614b24e2bSVaishali Kulkarni 		if (status == XMIT_DONE) {
159714b24e2bSVaishali Kulkarni 			tx_ring->tx_copy_count++;
159814b24e2bSVaishali Kulkarni 			goto exit;
159914b24e2bSVaishali Kulkarni 		} else if ((status == XMIT_FALLBACK_PULLUP) &&
160014b24e2bSVaishali Kulkarni 		    !force_pullup) {
160114b24e2bSVaishali Kulkarni 			xmit_mode = USE_PULLUP;
160214b24e2bSVaishali Kulkarni 		} else {
160314b24e2bSVaishali Kulkarni 			goto exit;
160414b24e2bSVaishali Kulkarni 		}
160514b24e2bSVaishali Kulkarni 	}
160614b24e2bSVaishali Kulkarni 
160714b24e2bSVaishali Kulkarni 	if (xmit_mode == USE_PULLUP) {
160814b24e2bSVaishali Kulkarni 		force_pullup = B_TRUE;
160914b24e2bSVaishali Kulkarni 		tx_ring->tx_pullup_count++;
161014b24e2bSVaishali Kulkarni 		goto do_pullup;
161114b24e2bSVaishali Kulkarni 	}
161214b24e2bSVaishali Kulkarni 
161314b24e2bSVaishali Kulkarni exit:
161414b24e2bSVaishali Kulkarni 	if (status != XMIT_DONE) {
161514b24e2bSVaishali Kulkarni 		/*
161614b24e2bSVaishali Kulkarni 		 * if msgpullup succeeded, but something else  failed,
161714b24e2bSVaishali Kulkarni 		 * free the pulled-up msg and return original mblk to
161814b24e2bSVaishali Kulkarni 		 * stack, indicating tx failure
161914b24e2bSVaishali Kulkarni 		 */
162014b24e2bSVaishali Kulkarni 		if (pulled_up_mp) {
162114b24e2bSVaishali Kulkarni 			qede_info(qede, "tx failed, free pullup pkt %p", mp);
162214b24e2bSVaishali Kulkarni 			freemsg(pulled_up_mp);
162314b24e2bSVaishali Kulkarni 			mp = original_mp;
162414b24e2bSVaishali Kulkarni 		}
162514b24e2bSVaishali Kulkarni 	} else {
162614b24e2bSVaishali Kulkarni 		tx_ring->tx_byte_count += pktinfo.total_len;
162714b24e2bSVaishali Kulkarni 		/*
162814b24e2bSVaishali Kulkarni 		 * If tx was successfull after a pullup, then free the
162914b24e2bSVaishali Kulkarni 		 * original mp. The pulled-up will be freed as part of
163014b24e2bSVaishali Kulkarni 		 * tx completions processing
163114b24e2bSVaishali Kulkarni 		 */
163214b24e2bSVaishali Kulkarni 		if (pulled_up_mp) {
163314b24e2bSVaishali Kulkarni #ifdef	DEBUG_PULLUP
163414b24e2bSVaishali Kulkarni 			qede_info(qede,
163514b24e2bSVaishali Kulkarni 			    "success, free ori mp %p", original_mp);
163614b24e2bSVaishali Kulkarni #endif
163714b24e2bSVaishali Kulkarni 			freemsg(original_mp);
163814b24e2bSVaishali Kulkarni 		}
163914b24e2bSVaishali Kulkarni 	}
164014b24e2bSVaishali Kulkarni 
164114b24e2bSVaishali Kulkarni 	return (status);
164214b24e2bSVaishali Kulkarni }
164314b24e2bSVaishali Kulkarni 
164414b24e2bSVaishali Kulkarni typedef	uint32_t	ub4; /* unsigned 4-byte quantities */
164514b24e2bSVaishali Kulkarni typedef	uint8_t		ub1;
164614b24e2bSVaishali Kulkarni 
164714b24e2bSVaishali Kulkarni #define	hashsize(n)	((ub4)1<<(n))
164814b24e2bSVaishali Kulkarni #define	hashmask(n)	(hashsize(n)-1)
164914b24e2bSVaishali Kulkarni 
165014b24e2bSVaishali Kulkarni #define	mix(a, b, c) \
165114b24e2bSVaishali Kulkarni { \
165214b24e2bSVaishali Kulkarni 	a -= b; a -= c; a ^= (c>>13); \
165314b24e2bSVaishali Kulkarni 	b -= c; b -= a; b ^= (a<<8); \
165414b24e2bSVaishali Kulkarni 	c -= a; c -= b; c ^= (b>>13); \
165514b24e2bSVaishali Kulkarni 	a -= b; a -= c; a ^= (c>>12);  \
165614b24e2bSVaishali Kulkarni 	b -= c; b -= a; b ^= (a<<16); \
165714b24e2bSVaishali Kulkarni 	c -= a; c -= b; c ^= (b>>5); \
165814b24e2bSVaishali Kulkarni 	a -= b; a -= c; a ^= (c>>3);  \
165914b24e2bSVaishali Kulkarni 	b -= c; b -= a; b ^= (a<<10); \
166014b24e2bSVaishali Kulkarni 	c -= a; c -= b; c ^= (b>>15); \
166114b24e2bSVaishali Kulkarni }
166214b24e2bSVaishali Kulkarni 
166314b24e2bSVaishali Kulkarni ub4
hash(k,length,initval)166414b24e2bSVaishali Kulkarni hash(k, length, initval)
166514b24e2bSVaishali Kulkarni register ub1 *k;	/* the key */
166614b24e2bSVaishali Kulkarni register ub4 length;	/* the length of the key */
166714b24e2bSVaishali Kulkarni register ub4 initval;	/* the previous hash, or an arbitrary value */
166814b24e2bSVaishali Kulkarni {
166914b24e2bSVaishali Kulkarni 	register ub4 a, b, c, len;
167014b24e2bSVaishali Kulkarni 
167114b24e2bSVaishali Kulkarni 	/* Set up the internal state */
167214b24e2bSVaishali Kulkarni 	len = length;
167314b24e2bSVaishali Kulkarni 	a = b = 0x9e3779b9;	/* the golden ratio; an arbitrary value */
167414b24e2bSVaishali Kulkarni 	c = initval;		/* the previous hash value */
167514b24e2bSVaishali Kulkarni 
167614b24e2bSVaishali Kulkarni 	/* handle most of the key */
167714b24e2bSVaishali Kulkarni 	while (len >= 12)
167814b24e2bSVaishali Kulkarni 	{
167914b24e2bSVaishali Kulkarni 		a += (k[0] +((ub4)k[1]<<8) +((ub4)k[2]<<16) +((ub4)k[3]<<24));
168014b24e2bSVaishali Kulkarni 		b += (k[4] +((ub4)k[5]<<8) +((ub4)k[6]<<16) +((ub4)k[7]<<24));
168114b24e2bSVaishali Kulkarni 		c += (k[8] +((ub4)k[9]<<8) +((ub4)k[10]<<16)+((ub4)k[11]<<24));
168214b24e2bSVaishali Kulkarni 		mix(a, b, c);
168314b24e2bSVaishali Kulkarni 		k += 12;
168414b24e2bSVaishali Kulkarni 		len -= 12;
168514b24e2bSVaishali Kulkarni 	}
168614b24e2bSVaishali Kulkarni 
168714b24e2bSVaishali Kulkarni 	/* handle the last 11 bytes */
168814b24e2bSVaishali Kulkarni 	c += length;
168914b24e2bSVaishali Kulkarni 	/* all the case statements fall through */
169014b24e2bSVaishali Kulkarni 	switch (len)
169114b24e2bSVaishali Kulkarni 	{
169214b24e2bSVaishali Kulkarni 	/* FALLTHRU */
169314b24e2bSVaishali Kulkarni 	case 11:
169414b24e2bSVaishali Kulkarni 		c += ((ub4)k[10]<<24);
169514b24e2bSVaishali Kulkarni 	/* FALLTHRU */
169614b24e2bSVaishali Kulkarni 	case 10:
169714b24e2bSVaishali Kulkarni 		c += ((ub4)k[9]<<16);
169814b24e2bSVaishali Kulkarni 	/* FALLTHRU */
169914b24e2bSVaishali Kulkarni 	case 9 :
170014b24e2bSVaishali Kulkarni 		c += ((ub4)k[8]<<8);
170114b24e2bSVaishali Kulkarni 	/* the first byte of c is reserved for the length */
170214b24e2bSVaishali Kulkarni 	/* FALLTHRU */
170314b24e2bSVaishali Kulkarni 	case 8 :
170414b24e2bSVaishali Kulkarni 		b += ((ub4)k[7]<<24);
170514b24e2bSVaishali Kulkarni 	/* FALLTHRU */
170614b24e2bSVaishali Kulkarni 	case 7 :
170714b24e2bSVaishali Kulkarni 		b += ((ub4)k[6]<<16);
170814b24e2bSVaishali Kulkarni 	/* FALLTHRU */
170914b24e2bSVaishali Kulkarni 	case 6 :
171014b24e2bSVaishali Kulkarni 		b += ((ub4)k[5]<<8);
171114b24e2bSVaishali Kulkarni 	/* FALLTHRU */
171214b24e2bSVaishali Kulkarni 	case 5 :
171314b24e2bSVaishali Kulkarni 		b += k[4];
171414b24e2bSVaishali Kulkarni 	/* FALLTHRU */
171514b24e2bSVaishali Kulkarni 	case 4 :
171614b24e2bSVaishali Kulkarni 		a += ((ub4)k[3]<<24);
171714b24e2bSVaishali Kulkarni 	/* FALLTHRU */
171814b24e2bSVaishali Kulkarni 	case 3 :
171914b24e2bSVaishali Kulkarni 		a += ((ub4)k[2]<<16);
172014b24e2bSVaishali Kulkarni 	/* FALLTHRU */
172114b24e2bSVaishali Kulkarni 	case 2 :
172214b24e2bSVaishali Kulkarni 		a += ((ub4)k[1]<<8);
172314b24e2bSVaishali Kulkarni 	/* FALLTHRU */
172414b24e2bSVaishali Kulkarni 	case 1 :
172514b24e2bSVaishali Kulkarni 		a += k[0];
172614b24e2bSVaishali Kulkarni 	/* case 0: nothing left to add */
172714b24e2bSVaishali Kulkarni 	}
172814b24e2bSVaishali Kulkarni 	mix(a, b, c);
172914b24e2bSVaishali Kulkarni 	/* report the result */
173014b24e2bSVaishali Kulkarni 	return (c);
173114b24e2bSVaishali Kulkarni }
173214b24e2bSVaishali Kulkarni 
173314b24e2bSVaishali Kulkarni #ifdef	NO_CROSSBOW
173414b24e2bSVaishali Kulkarni static uint8_t
qede_hash_get_txq(qede_t * qede,caddr_t bp)173514b24e2bSVaishali Kulkarni qede_hash_get_txq(qede_t *qede, caddr_t bp)
173614b24e2bSVaishali Kulkarni {
173714b24e2bSVaishali Kulkarni 	struct ip *iphdr = NULL;
173814b24e2bSVaishali Kulkarni 	struct ether_header *ethhdr;
173914b24e2bSVaishali Kulkarni 	struct ether_vlan_header *ethvhdr;
174014b24e2bSVaishali Kulkarni 	struct tcphdr *tcp_hdr;
174114b24e2bSVaishali Kulkarni 	struct udphdr *udp_hdr;
174214b24e2bSVaishali Kulkarni 	uint32_t etherType;
174314b24e2bSVaishali Kulkarni 	int mac_hdr_len, ip_hdr_len;
174414b24e2bSVaishali Kulkarni 	uint32_t h = 0; /* 0 by default */
174514b24e2bSVaishali Kulkarni 	uint8_t tx_ring_id = 0;
174614b24e2bSVaishali Kulkarni 	uint32_t ip_src_addr = 0;
174714b24e2bSVaishali Kulkarni 	uint32_t ip_desc_addr = 0;
174814b24e2bSVaishali Kulkarni 	uint16_t src_port = 0;
174914b24e2bSVaishali Kulkarni 	uint16_t dest_port = 0;
175014b24e2bSVaishali Kulkarni 	uint8_t key[12];
175114b24e2bSVaishali Kulkarni 
175214b24e2bSVaishali Kulkarni 	if (qede->num_fp == 1) {
175314b24e2bSVaishali Kulkarni 		return (tx_ring_id);
175414b24e2bSVaishali Kulkarni 	}
175514b24e2bSVaishali Kulkarni 
175614b24e2bSVaishali Kulkarni 	ethhdr = (struct ether_header *)((void *)bp);
175714b24e2bSVaishali Kulkarni 	ethvhdr = (struct ether_vlan_header *)((void *)bp);
175814b24e2bSVaishali Kulkarni 
175914b24e2bSVaishali Kulkarni 	/* Is this vlan packet? */
176014b24e2bSVaishali Kulkarni 	if (ntohs(ethvhdr->ether_tpid) == ETHERTYPE_VLAN) {
176114b24e2bSVaishali Kulkarni 		mac_hdr_len = sizeof (struct ether_vlan_header);
176214b24e2bSVaishali Kulkarni 		etherType = ntohs(ethvhdr->ether_type);
176314b24e2bSVaishali Kulkarni 	} else {
176414b24e2bSVaishali Kulkarni 		mac_hdr_len = sizeof (struct ether_header);
176514b24e2bSVaishali Kulkarni 		etherType = ntohs(ethhdr->ether_type);
176614b24e2bSVaishali Kulkarni 	}
176714b24e2bSVaishali Kulkarni 	/* Is this IPv4 or IPv6 packet? */
176814b24e2bSVaishali Kulkarni 	if (etherType == ETHERTYPE_IP /* 0800 */) {
176914b24e2bSVaishali Kulkarni 		if (IPH_HDR_VERSION((ipha_t *)(void *)(bp+mac_hdr_len))
177014b24e2bSVaishali Kulkarni 		    == IPV4_VERSION) {
177114b24e2bSVaishali Kulkarni 			iphdr = (struct ip *)(void *)(bp+mac_hdr_len);
177214b24e2bSVaishali Kulkarni 		}
177314b24e2bSVaishali Kulkarni 		if (((unsigned long)iphdr) & 0x3) {
177414b24e2bSVaishali Kulkarni 			/*  IP hdr not 4-byte aligned */
177514b24e2bSVaishali Kulkarni 			return (tx_ring_id);
177614b24e2bSVaishali Kulkarni 		}
177714b24e2bSVaishali Kulkarni 	}
177814b24e2bSVaishali Kulkarni 	/* ipV4 packets */
177914b24e2bSVaishali Kulkarni 	if (iphdr) {
178014b24e2bSVaishali Kulkarni 
178114b24e2bSVaishali Kulkarni 		ip_hdr_len = IPH_HDR_LENGTH(iphdr);
178214b24e2bSVaishali Kulkarni 		ip_src_addr = iphdr->ip_src.s_addr;
178314b24e2bSVaishali Kulkarni 		ip_desc_addr = iphdr->ip_dst.s_addr;
178414b24e2bSVaishali Kulkarni 
178514b24e2bSVaishali Kulkarni 		if (iphdr->ip_p == IPPROTO_TCP) {
178614b24e2bSVaishali Kulkarni 			tcp_hdr = (struct tcphdr *)(void *)
178714b24e2bSVaishali Kulkarni 			    ((uint8_t *)iphdr + ip_hdr_len);
178814b24e2bSVaishali Kulkarni 			src_port = tcp_hdr->th_sport;
178914b24e2bSVaishali Kulkarni 			dest_port = tcp_hdr->th_dport;
179014b24e2bSVaishali Kulkarni 		} else if (iphdr->ip_p == IPPROTO_UDP) {
179114b24e2bSVaishali Kulkarni 			udp_hdr = (struct udphdr *)(void *)
179214b24e2bSVaishali Kulkarni 			    ((uint8_t *)iphdr + ip_hdr_len);
179314b24e2bSVaishali Kulkarni 			src_port = udp_hdr->uh_sport;
179414b24e2bSVaishali Kulkarni 			dest_port = udp_hdr->uh_dport;
179514b24e2bSVaishali Kulkarni 		}
179614b24e2bSVaishali Kulkarni 		key[0] = (uint8_t)((ip_src_addr) &0xFF);
179714b24e2bSVaishali Kulkarni 		key[1] = (uint8_t)((ip_src_addr >> 8) &0xFF);
179814b24e2bSVaishali Kulkarni 		key[2] = (uint8_t)((ip_src_addr >> 16) &0xFF);
179914b24e2bSVaishali Kulkarni 		key[3] = (uint8_t)((ip_src_addr >> 24) &0xFF);
180014b24e2bSVaishali Kulkarni 		key[4] = (uint8_t)((ip_desc_addr) &0xFF);
180114b24e2bSVaishali Kulkarni 		key[5] = (uint8_t)((ip_desc_addr >> 8) &0xFF);
180214b24e2bSVaishali Kulkarni 		key[6] = (uint8_t)((ip_desc_addr >> 16) &0xFF);
180314b24e2bSVaishali Kulkarni 		key[7] = (uint8_t)((ip_desc_addr >> 24) &0xFF);
180414b24e2bSVaishali Kulkarni 		key[8] = (uint8_t)((src_port) &0xFF);
180514b24e2bSVaishali Kulkarni 		key[9] = (uint8_t)((src_port >> 8) &0xFF);
180614b24e2bSVaishali Kulkarni 		key[10] = (uint8_t)((dest_port) &0xFF);
180714b24e2bSVaishali Kulkarni 		key[11] = (uint8_t)((dest_port >> 8) &0xFF);
180814b24e2bSVaishali Kulkarni 		h = hash(key, 12, 0); /* return 32 bit */
180914b24e2bSVaishali Kulkarni 		tx_ring_id = (h & (qede->num_fp - 1));
181014b24e2bSVaishali Kulkarni 		if (tx_ring_id >= qede->num_fp) {
181114b24e2bSVaishali Kulkarni 			cmn_err(CE_WARN, "%s bad tx_ring_id %d\n",
181214b24e2bSVaishali Kulkarni 			    __func__, tx_ring_id);
181314b24e2bSVaishali Kulkarni 			tx_ring_id = 0;
181414b24e2bSVaishali Kulkarni 		}
181514b24e2bSVaishali Kulkarni 	}
181614b24e2bSVaishali Kulkarni 	return (tx_ring_id);
181714b24e2bSVaishali Kulkarni }
181814b24e2bSVaishali Kulkarni #endif
181914b24e2bSVaishali Kulkarni 
182014b24e2bSVaishali Kulkarni mblk_t *
qede_ring_tx(void * arg,mblk_t * mp)182114b24e2bSVaishali Kulkarni qede_ring_tx(void *arg, mblk_t *mp)
182214b24e2bSVaishali Kulkarni {
182314b24e2bSVaishali Kulkarni 	qede_fastpath_t *fp = (qede_fastpath_t *)arg;
182414b24e2bSVaishali Kulkarni 	qede_t *qede = fp->qede;
182514b24e2bSVaishali Kulkarni #ifndef	NO_CROSSBOW
182614b24e2bSVaishali Kulkarni 	qede_tx_ring_t *tx_ring = fp->tx_ring[0];
182714b24e2bSVaishali Kulkarni #else
182814b24e2bSVaishali Kulkarni 	qede_tx_ring_t *tx_ring;
182914b24e2bSVaishali Kulkarni #endif
183014b24e2bSVaishali Kulkarni 	uint32_t ring_id;
183114b24e2bSVaishali Kulkarni 	mblk_t *next = NULL;
183214b24e2bSVaishali Kulkarni 	enum qede_xmit_status status = XMIT_FAILED;
183314b24e2bSVaishali Kulkarni 	caddr_t bp;
183414b24e2bSVaishali Kulkarni 
183514b24e2bSVaishali Kulkarni 	ASSERT(mp->b_next == NULL);
183614b24e2bSVaishali Kulkarni 
183714b24e2bSVaishali Kulkarni #ifndef	NO_CROSSBOW
183814b24e2bSVaishali Kulkarni 	if (!fp || !tx_ring) {
183914b24e2bSVaishali Kulkarni 		qede_print_err("!%s: error, fp %p, tx_ring %p",
184014b24e2bSVaishali Kulkarni 		    __func__, fp, tx_ring);
184114b24e2bSVaishali Kulkarni 		goto exit;
184214b24e2bSVaishali Kulkarni 	}
184314b24e2bSVaishali Kulkarni #endif
184414b24e2bSVaishali Kulkarni 	if (qede->qede_state != QEDE_STATE_STARTED) {
184514b24e2bSVaishali Kulkarni 		qede_print_err("!%s(%d): qede_state %d invalid",
184614b24e2bSVaishali Kulkarni 		    __func__, qede->instance, qede->qede_state);
184714b24e2bSVaishali Kulkarni 		goto exit;
184814b24e2bSVaishali Kulkarni 	}
184914b24e2bSVaishali Kulkarni 
185014b24e2bSVaishali Kulkarni 	if (!qede->params.link_state) {
185114b24e2bSVaishali Kulkarni 		goto exit;
185214b24e2bSVaishali Kulkarni 	}
185314b24e2bSVaishali Kulkarni 
185414b24e2bSVaishali Kulkarni 	while (mp != NULL) {
185514b24e2bSVaishali Kulkarni #ifdef	NO_CROSSBOW
185614b24e2bSVaishali Kulkarni 		/*
185714b24e2bSVaishali Kulkarni 		 * Figure out which tx ring to send this packet to.
185814b24e2bSVaishali Kulkarni 		 * Currently multiple rings are not exposed to mac layer
185914b24e2bSVaishali Kulkarni 		 * and fanout done by driver
186014b24e2bSVaishali Kulkarni 		 */
186114b24e2bSVaishali Kulkarni 		bp = (caddr_t)mp->b_rptr;
186214b24e2bSVaishali Kulkarni 		ring_id = qede_hash_get_txq(qede, bp);
186314b24e2bSVaishali Kulkarni 		fp = &qede->fp_array[ring_id];
186414b24e2bSVaishali Kulkarni 		tx_ring = fp->tx_ring[0];
186514b24e2bSVaishali Kulkarni 
186614b24e2bSVaishali Kulkarni 		if (qede->num_tc > 1) {
186714b24e2bSVaishali Kulkarni 			qede_info(qede,
186814b24e2bSVaishali Kulkarni 			    "Traffic classes(%d) > 1 not supported",
186914b24e2bSVaishali Kulkarni 			    qede->num_tc);
187014b24e2bSVaishali Kulkarni 			goto exit;
187114b24e2bSVaishali Kulkarni 		}
187214b24e2bSVaishali Kulkarni #endif
187314b24e2bSVaishali Kulkarni 		next = mp->b_next;
187414b24e2bSVaishali Kulkarni 		mp->b_next = NULL;
187514b24e2bSVaishali Kulkarni 
187614b24e2bSVaishali Kulkarni 		status = qede_send_tx_packet(qede, tx_ring, mp);
187714b24e2bSVaishali Kulkarni 		if (status == XMIT_DONE) {
187814b24e2bSVaishali Kulkarni 			tx_ring->tx_pkt_count++;
187914b24e2bSVaishali Kulkarni 			mp = next;
188014b24e2bSVaishali Kulkarni 		} else if (status == XMIT_PAUSE_QUEUE) {
188114b24e2bSVaishali Kulkarni 			tx_ring->tx_ring_pause++;
188214b24e2bSVaishali Kulkarni 			mp->b_next = next;
188314b24e2bSVaishali Kulkarni 			break;
188414b24e2bSVaishali Kulkarni 		} else if (status == XMIT_FAILED) {
188514b24e2bSVaishali Kulkarni 			goto exit;
188614b24e2bSVaishali Kulkarni 		}
188714b24e2bSVaishali Kulkarni 	}
188814b24e2bSVaishali Kulkarni 
188914b24e2bSVaishali Kulkarni 	return (mp);
189014b24e2bSVaishali Kulkarni exit:
189114b24e2bSVaishali Kulkarni 	tx_ring->tx_pkt_dropped++;
189214b24e2bSVaishali Kulkarni 	freemsgchain(mp);
189314b24e2bSVaishali Kulkarni 	mp = NULL;
189414b24e2bSVaishali Kulkarni 	return (mp);
189514b24e2bSVaishali Kulkarni }
1896