1*d14abf15SRobert Mustacchi /*******************************************************************************
2*d14abf15SRobert Mustacchi *
3*d14abf15SRobert Mustacchi * CDDL HEADER START
4*d14abf15SRobert Mustacchi *
5*d14abf15SRobert Mustacchi * The contents of this file are subject to the terms of the
6*d14abf15SRobert Mustacchi * Common Development and Distribution License (the "License").
7*d14abf15SRobert Mustacchi * You may not use this file except in compliance with the License.
8*d14abf15SRobert Mustacchi *
9*d14abf15SRobert Mustacchi * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*d14abf15SRobert Mustacchi * or http://www.opensolaris.org/os/licensing.
11*d14abf15SRobert Mustacchi * See the License for the specific language governing permissions
12*d14abf15SRobert Mustacchi * and limitations under the License.
13*d14abf15SRobert Mustacchi *
14*d14abf15SRobert Mustacchi * When distributing Covered Code, include this CDDL HEADER in each
15*d14abf15SRobert Mustacchi * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*d14abf15SRobert Mustacchi * If applicable, add the following below this CDDL HEADER, with the
17*d14abf15SRobert Mustacchi * fields enclosed by brackets "[]" replaced with your own identifying
18*d14abf15SRobert Mustacchi * information: Portions Copyright [yyyy] [name of copyright owner]
19*d14abf15SRobert Mustacchi *
20*d14abf15SRobert Mustacchi * CDDL HEADER END
21*d14abf15SRobert Mustacchi *
22*d14abf15SRobert Mustacchi * Copyright 2014 QLogic Corporation
23*d14abf15SRobert Mustacchi * The contents of this file are subject to the terms of the
24*d14abf15SRobert Mustacchi * QLogic End User License (the "License").
25*d14abf15SRobert Mustacchi * You may not use this file except in compliance with the License.
26*d14abf15SRobert Mustacchi *
27*d14abf15SRobert Mustacchi * You can obtain a copy of the License at
28*d14abf15SRobert Mustacchi * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
29*d14abf15SRobert Mustacchi * QLogic_End_User_Software_License.txt
30*d14abf15SRobert Mustacchi * See the License for the specific language governing permissions
31*d14abf15SRobert Mustacchi * and limitations under the License.
32*d14abf15SRobert Mustacchi *
33*d14abf15SRobert Mustacchi * Module Description:
34*d14abf15SRobert Mustacchi *
35*d14abf15SRobert Mustacchi *
36*d14abf15SRobert Mustacchi * History:
37*d14abf15SRobert Mustacchi * 11/15/01 Hav Khauv Inception.
38*d14abf15SRobert Mustacchi * 4/4/06 Eliezer begin modifying
39*d14abf15SRobert Mustacchi ******************************************************************************/
40*d14abf15SRobert Mustacchi
41*d14abf15SRobert Mustacchi #include "lm5710.h"
42*d14abf15SRobert Mustacchi #include "microcode_constants.h"
43*d14abf15SRobert Mustacchi #include "eth_constants.h"
44*d14abf15SRobert Mustacchi #include "bd_chain.h"
45*d14abf15SRobert Mustacchi #include "ecore_common.h"
46*d14abf15SRobert Mustacchi
lm_is_tx_completion(lm_device_t * pdev,u8_t chain_idx)47*d14abf15SRobert Mustacchi u8_t lm_is_tx_completion(lm_device_t *pdev, u8_t chain_idx)
48*d14abf15SRobert Mustacchi {
49*d14abf15SRobert Mustacchi u8_t result = FALSE;
50*d14abf15SRobert Mustacchi lm_tx_chain_t *tx_chain = &LM_TXQ(pdev, chain_idx);
51*d14abf15SRobert Mustacchi
52*d14abf15SRobert Mustacchi DbgBreakIf(!(pdev && tx_chain));
53*d14abf15SRobert Mustacchi
54*d14abf15SRobert Mustacchi //the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
55*d14abf15SRobert Mustacchi //changed from *tx_chain->hw_con_idx_ptr != tx_chain->cons_idx
56*d14abf15SRobert Mustacchi if ( tx_chain->hw_con_idx_ptr && (mm_le16_to_cpu(*tx_chain->hw_con_idx_ptr) != tx_chain->pkt_idx))
57*d14abf15SRobert Mustacchi {
58*d14abf15SRobert Mustacchi result = TRUE;
59*d14abf15SRobert Mustacchi }
60*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMi, "lm_is_tx_completion: result is:%s\n", result? "TRUE" : "FALSE");
61*d14abf15SRobert Mustacchi
62*d14abf15SRobert Mustacchi return result;
63*d14abf15SRobert Mustacchi }
64*d14abf15SRobert Mustacchi
lm_handle_lso_split(IN lm_address_t frag_addr_data_offset,IN u16_t data_part_size,IN lm_tx_chain_t * tx_chain,IN struct eth_tx_start_bd * start_bd,IN struct eth_tx_bd * generic_bd)65*d14abf15SRobert Mustacchi static void lm_handle_lso_split(IN lm_address_t frag_addr_data_offset,
66*d14abf15SRobert Mustacchi IN u16_t data_part_size,
67*d14abf15SRobert Mustacchi IN lm_tx_chain_t *tx_chain,
68*d14abf15SRobert Mustacchi IN struct eth_tx_start_bd *start_bd,
69*d14abf15SRobert Mustacchi IN struct eth_tx_bd *generic_bd
70*d14abf15SRobert Mustacchi )
71*d14abf15SRobert Mustacchi {
72*d14abf15SRobert Mustacchi struct eth_tx_bd *prod_bd;
73*d14abf15SRobert Mustacchi u16_t old_nbd = mm_le16_to_cpu(start_bd->nbd);
74*d14abf15SRobert Mustacchi u16_t old_nbytes = mm_le16_to_cpu(generic_bd->nbytes);
75*d14abf15SRobert Mustacchi
76*d14abf15SRobert Mustacchi ASSERT_STATIC(OFFSETOF(struct eth_tx_bd, nbytes) == OFFSETOF(struct eth_tx_start_bd, nbytes)) ;
77*d14abf15SRobert Mustacchi DbgBreakIfFastPath(!(start_bd && generic_bd));
78*d14abf15SRobert Mustacchi
79*d14abf15SRobert Mustacchi //increase nbd on account of the split BD
80*d14abf15SRobert Mustacchi start_bd->nbd = mm_cpu_to_le16(old_nbd + 1);
81*d14abf15SRobert Mustacchi
82*d14abf15SRobert Mustacchi //fix the num of bytes of the BD which has the headers+data to correspond only to the headers part
83*d14abf15SRobert Mustacchi generic_bd->nbytes = mm_cpu_to_le16(old_nbytes - data_part_size);
84*d14abf15SRobert Mustacchi //this is phys addr which points to the start of the data part right after the end of the headers
85*d14abf15SRobert Mustacchi LM_INC64(&frag_addr_data_offset, mm_le16_to_cpu(generic_bd->nbytes));
86*d14abf15SRobert Mustacchi
87*d14abf15SRobert Mustacchi //Advance to the next BD.
88*d14abf15SRobert Mustacchi prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
89*d14abf15SRobert Mustacchi
90*d14abf15SRobert Mustacchi //fill the fields of the new additional BD which holds _only_ data
91*d14abf15SRobert Mustacchi prod_bd->addr_lo = mm_cpu_to_le32(frag_addr_data_offset.as_u32.low);
92*d14abf15SRobert Mustacchi prod_bd->addr_hi = mm_cpu_to_le32(frag_addr_data_offset.as_u32.high);
93*d14abf15SRobert Mustacchi prod_bd->nbytes = mm_cpu_to_le16(data_part_size);
94*d14abf15SRobert Mustacchi
95*d14abf15SRobert Mustacchi tx_chain->lso_split_used++;
96*d14abf15SRobert Mustacchi
97*d14abf15SRobert Mustacchi DbgMessage(NULL, WARNl2tx, "#lm_handle_lso_split: after split: original bd nbytes=0x%x,new bd nbytes=0x%x\n",
98*d14abf15SRobert Mustacchi mm_le16_to_cpu(generic_bd->nbytes), mm_le16_to_cpu(prod_bd->nbytes));
99*d14abf15SRobert Mustacchi }
100*d14abf15SRobert Mustacchi
lm_pre_process_lso_packet(IN lm_device_t * pdev,IN lm_packet_t * packet,IN lm_frag_list_t * frags,OUT u8_t * split_required,IN u16_t total_hlen_bytes)101*d14abf15SRobert Mustacchi static void lm_pre_process_lso_packet(
102*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
103*d14abf15SRobert Mustacchi IN lm_packet_t *packet,
104*d14abf15SRobert Mustacchi IN lm_frag_list_t *frags,
105*d14abf15SRobert Mustacchi OUT u8_t *split_required,
106*d14abf15SRobert Mustacchi IN u16_t total_hlen_bytes
107*d14abf15SRobert Mustacchi )
108*d14abf15SRobert Mustacchi {
109*d14abf15SRobert Mustacchi /* find headers nbds, for that calc eth_hlen and total_hlen_bytes,
110*d14abf15SRobert Mustacchi and take the opportunity to decide if header data separation is required */
111*d14abf15SRobert Mustacchi u32_t cnt;
112*d14abf15SRobert Mustacchi u16_t sum_frag_size = 0;
113*d14abf15SRobert Mustacchi u8_t hdr_nbds = 0;
114*d14abf15SRobert Mustacchi
115*d14abf15SRobert Mustacchi *split_required = FALSE;
116*d14abf15SRobert Mustacchi
117*d14abf15SRobert Mustacchi for(cnt = 0; cnt < frags->cnt; cnt++)
118*d14abf15SRobert Mustacchi {
119*d14abf15SRobert Mustacchi hdr_nbds++;
120*d14abf15SRobert Mustacchi sum_frag_size += (u16_t)frags->frag_arr[cnt].size;
121*d14abf15SRobert Mustacchi if (total_hlen_bytes <= sum_frag_size)
122*d14abf15SRobert Mustacchi {
123*d14abf15SRobert Mustacchi if (total_hlen_bytes < sum_frag_size)
124*d14abf15SRobert Mustacchi {
125*d14abf15SRobert Mustacchi *split_required = TRUE;
126*d14abf15SRobert Mustacchi }
127*d14abf15SRobert Mustacchi break;
128*d14abf15SRobert Mustacchi }
129*d14abf15SRobert Mustacchi }
130*d14abf15SRobert Mustacchi DbgBreakIfFastPath(total_hlen_bytes > sum_frag_size);
131*d14abf15SRobert Mustacchi packet->u1.tx.hdr_nbds = hdr_nbds;
132*d14abf15SRobert Mustacchi }
133*d14abf15SRobert Mustacchi
lm_process_lso_packet(IN lm_packet_t * packet,IN lm_device_t * pdev,IN lm_tx_chain_t * tx_chain,IN lm_frag_list_t * frags,IN void * parse_bd,IN struct eth_tx_start_bd * start_bd,OUT lm_frag_t ** frag,IN u16_t total_hlen_bytes,IN u8_t split_required)134*d14abf15SRobert Mustacchi static void lm_process_lso_packet(IN lm_packet_t *packet,
135*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
136*d14abf15SRobert Mustacchi IN lm_tx_chain_t *tx_chain,
137*d14abf15SRobert Mustacchi IN lm_frag_list_t *frags,
138*d14abf15SRobert Mustacchi IN void *parse_bd,
139*d14abf15SRobert Mustacchi IN struct eth_tx_start_bd *start_bd,
140*d14abf15SRobert Mustacchi OUT lm_frag_t **frag,
141*d14abf15SRobert Mustacchi IN u16_t total_hlen_bytes,
142*d14abf15SRobert Mustacchi IN u8_t split_required)
143*d14abf15SRobert Mustacchi {
144*d14abf15SRobert Mustacchi struct eth_tx_bd *prod_bd = NULL;
145*d14abf15SRobert Mustacchi u32_t cnt = 0;
146*d14abf15SRobert Mustacchi u16_t hlen_reminder = total_hlen_bytes;
147*d14abf15SRobert Mustacchi
148*d14abf15SRobert Mustacchi /* "Sanity check. Maximum total length for IP and TCP headers
149*d14abf15SRobert Mustacchi * is 120 bytes." was here. The sanity check is removed. Corresponding statistics is added */
150*d14abf15SRobert Mustacchi if ((packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len) > 120) {
151*d14abf15SRobert Mustacchi pdev->debug_info.number_of_long_LSO_headers++;
152*d14abf15SRobert Mustacchi }
153*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
154*d14abf15SRobert Mustacchi
155*d14abf15SRobert Mustacchi if (CHIP_IS_E1x(pdev))
156*d14abf15SRobert Mustacchi {
157*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e1x *parse_bd_e1x = (struct eth_tx_parse_bd_e1x *)parse_bd;
158*d14abf15SRobert Mustacchi parse_bd_e1x->lso_mss = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_mss);
159*d14abf15SRobert Mustacchi parse_bd_e1x->ip_id = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
160*d14abf15SRobert Mustacchi parse_bd_e1x->tcp_send_seq = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
161*d14abf15SRobert Mustacchi parse_bd_e1x->tcp_flags = packet->l2pkt_tx_info->lso_tcp_flags; // no endianity since it is u8_t
162*d14abf15SRobert Mustacchi
163*d14abf15SRobert Mustacchi
164*d14abf15SRobert Mustacchi //in case of LSO it is required according to fw to toggle the ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN flag since the TCP seg len is 0
165*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
166*d14abf15SRobert Mustacchi
167*d14abf15SRobert Mustacchi if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
168*d14abf15SRobert Mustacchi {
169*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
170*d14abf15SRobert Mustacchi }
171*d14abf15SRobert Mustacchi }
172*d14abf15SRobert Mustacchi else
173*d14abf15SRobert Mustacchi {
174*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e2 *parse_bd_e2 = (struct eth_tx_parse_bd_e2 *)parse_bd;
175*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_LSO_MSS & (packet->l2pkt_tx_info->lso_mss << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT);
176*d14abf15SRobert Mustacchi }
177*d14abf15SRobert Mustacchi
178*d14abf15SRobert Mustacchi
179*d14abf15SRobert Mustacchi //enforce this due to miniport design in case of LSO and CSUM
180*d14abf15SRobert Mustacchi SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM);
181*d14abf15SRobert Mustacchi
182*d14abf15SRobert Mustacchi if (!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
183*d14abf15SRobert Mustacchi {
184*d14abf15SRobert Mustacchi SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM);
185*d14abf15SRobert Mustacchi }
186*d14abf15SRobert Mustacchi
187*d14abf15SRobert Mustacchi
188*d14abf15SRobert Mustacchi //required only in case of LSO - num of bds the headers occupy all together
189*d14abf15SRobert Mustacchi RESET_FLAGS(start_bd->general_data, ETH_TX_START_BD_HDR_NBDS);
190*d14abf15SRobert Mustacchi start_bd->general_data |= ((packet->u1.tx.hdr_nbds & ETH_TX_START_BD_HDR_NBDS) << ETH_TX_START_BD_HDR_NBDS_SHIFT);
191*d14abf15SRobert Mustacchi
192*d14abf15SRobert Mustacchi //check for split in START BD
193*d14abf15SRobert Mustacchi if (split_required)
194*d14abf15SRobert Mustacchi {
195*d14abf15SRobert Mustacchi if ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) == 1)
196*d14abf15SRobert Mustacchi {
197*d14abf15SRobert Mustacchi lm_handle_lso_split(frags->frag_arr[0].addr,
198*d14abf15SRobert Mustacchi mm_le16_to_cpu(start_bd->nbytes) - hlen_reminder,
199*d14abf15SRobert Mustacchi tx_chain,
200*d14abf15SRobert Mustacchi start_bd,
201*d14abf15SRobert Mustacchi (struct eth_tx_bd *)start_bd );
202*d14abf15SRobert Mustacchi split_required = FALSE;
203*d14abf15SRobert Mustacchi }
204*d14abf15SRobert Mustacchi else
205*d14abf15SRobert Mustacchi {
206*d14abf15SRobert Mustacchi u16_t start_bd_nbytes = mm_le16_to_cpu(start_bd->nbytes);
207*d14abf15SRobert Mustacchi
208*d14abf15SRobert Mustacchi DbgBreakIfFastPath(hlen_reminder <= start_bd_nbytes);
209*d14abf15SRobert Mustacchi hlen_reminder -= start_bd_nbytes;
210*d14abf15SRobert Mustacchi }
211*d14abf15SRobert Mustacchi }
212*d14abf15SRobert Mustacchi
213*d14abf15SRobert Mustacchi for(cnt = 1; cnt < frags->cnt; cnt++)
214*d14abf15SRobert Mustacchi {
215*d14abf15SRobert Mustacchi DbgBreakIfFastPath((*frag)->size >= 0x10000 || (*frag)->size == 0);
216*d14abf15SRobert Mustacchi
217*d14abf15SRobert Mustacchi //Advance to the next BD.
218*d14abf15SRobert Mustacchi prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
219*d14abf15SRobert Mustacchi
220*d14abf15SRobert Mustacchi prod_bd->addr_lo = mm_cpu_to_le32((*frag)->addr.as_u32.low);
221*d14abf15SRobert Mustacchi prod_bd->addr_hi = mm_cpu_to_le32((*frag)->addr.as_u32.high);
222*d14abf15SRobert Mustacchi prod_bd->nbytes = mm_cpu_to_le16((u16_t) (*frag)->size);
223*d14abf15SRobert Mustacchi
224*d14abf15SRobert Mustacchi //if there is a split condition and we are on the exact BD, do it! we don't enter here if there was a split already!
225*d14abf15SRobert Mustacchi if (split_required)
226*d14abf15SRobert Mustacchi {
227*d14abf15SRobert Mustacchi if (cnt == ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) - 1))
228*d14abf15SRobert Mustacchi {
229*d14abf15SRobert Mustacchi lm_handle_lso_split((*frag)->addr,
230*d14abf15SRobert Mustacchi mm_le16_to_cpu(prod_bd->nbytes) - hlen_reminder,
231*d14abf15SRobert Mustacchi tx_chain,
232*d14abf15SRobert Mustacchi start_bd,
233*d14abf15SRobert Mustacchi prod_bd
234*d14abf15SRobert Mustacchi );
235*d14abf15SRobert Mustacchi split_required = FALSE;
236*d14abf15SRobert Mustacchi }
237*d14abf15SRobert Mustacchi else
238*d14abf15SRobert Mustacchi {
239*d14abf15SRobert Mustacchi u16_t prod_bd_nbytes = mm_le16_to_cpu(prod_bd->nbytes);
240*d14abf15SRobert Mustacchi
241*d14abf15SRobert Mustacchi DbgBreakIfFastPath(hlen_reminder <= prod_bd_nbytes);
242*d14abf15SRobert Mustacchi hlen_reminder -= prod_bd_nbytes;
243*d14abf15SRobert Mustacchi }
244*d14abf15SRobert Mustacchi }
245*d14abf15SRobert Mustacchi
246*d14abf15SRobert Mustacchi packet->size += (*frag)->size;
247*d14abf15SRobert Mustacchi (*frag)++;
248*d14abf15SRobert Mustacchi }
249*d14abf15SRobert Mustacchi
250*d14abf15SRobert Mustacchi
251*d14abf15SRobert Mustacchi
252*d14abf15SRobert Mustacchi //statistics
253*d14abf15SRobert Mustacchi //since this is fast path, we do not use ATOMIC INC.
254*d14abf15SRobert Mustacchi //therefore the statistic might not be completely accurate
255*d14abf15SRobert Mustacchi //possible fix (FUTURE, if required): count the statistic item per RSS/TSS
256*d14abf15SRobert Mustacchi LM_COMMON_DRV_STATS_INC_ETH(pdev, tx_lso_frames);
257*d14abf15SRobert Mustacchi }
258*d14abf15SRobert Mustacchi
259*d14abf15SRobert Mustacchi /**
260*d14abf15SRobert Mustacchi * @Description:
261*d14abf15SRobert Mustacchi * returns coalesce buffer of size >= buf_size, or NULL if none available
262*d14abf15SRobert Mustacchi * @Assumptions:
263*d14abf15SRobert Mustacchi * txq lock is taken by the caller
264*d14abf15SRobert Mustacchi */
265*d14abf15SRobert Mustacchi lm_coalesce_buffer_t *
lm_get_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN u32_t buf_size)266*d14abf15SRobert Mustacchi lm_get_coalesce_buffer(
267*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
268*d14abf15SRobert Mustacchi IN lm_tx_chain_t *txq,
269*d14abf15SRobert Mustacchi IN u32_t buf_size)
270*d14abf15SRobert Mustacchi {
271*d14abf15SRobert Mustacchi lm_coalesce_buffer_t *coalesce_buf = NULL;
272*d14abf15SRobert Mustacchi u32_t coalesce_buf_cnt, cnt;
273*d14abf15SRobert Mustacchi
274*d14abf15SRobert Mustacchi if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || !buf_size)) {
275*d14abf15SRobert Mustacchi DbgBreakFastPath();
276*d14abf15SRobert Mustacchi return NULL;
277*d14abf15SRobert Mustacchi }
278*d14abf15SRobert Mustacchi
279*d14abf15SRobert Mustacchi coalesce_buf_cnt = s_list_entry_cnt(&txq->coalesce_buf_list);
280*d14abf15SRobert Mustacchi for(cnt = 0; cnt < coalesce_buf_cnt; cnt++)
281*d14abf15SRobert Mustacchi {
282*d14abf15SRobert Mustacchi coalesce_buf = (lm_coalesce_buffer_t *) s_list_pop_head(
283*d14abf15SRobert Mustacchi &txq->coalesce_buf_list);
284*d14abf15SRobert Mustacchi
285*d14abf15SRobert Mustacchi DbgBreakIfFastPath(coalesce_buf == NULL);
286*d14abf15SRobert Mustacchi if(NULL == coalesce_buf)
287*d14abf15SRobert Mustacchi {
288*d14abf15SRobert Mustacchi //this case were coalesce buffer in the list is equal to null shouldn't happen.
289*d14abf15SRobert Mustacchi DbgMessage(pdev, FATAL, "lm_get_coalesce_buffer:coalesce buffer was null\n");
290*d14abf15SRobert Mustacchi break;
291*d14abf15SRobert Mustacchi }
292*d14abf15SRobert Mustacchi if(coalesce_buf->buf_size >= buf_size)
293*d14abf15SRobert Mustacchi {
294*d14abf15SRobert Mustacchi txq->coalesce_buf_used++;
295*d14abf15SRobert Mustacchi break;
296*d14abf15SRobert Mustacchi }
297*d14abf15SRobert Mustacchi
298*d14abf15SRobert Mustacchi s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
299*d14abf15SRobert Mustacchi
300*d14abf15SRobert Mustacchi coalesce_buf = NULL;
301*d14abf15SRobert Mustacchi }
302*d14abf15SRobert Mustacchi
303*d14abf15SRobert Mustacchi return coalesce_buf;
304*d14abf15SRobert Mustacchi } /* lm_get_coalesce_buffer */
305*d14abf15SRobert Mustacchi
306*d14abf15SRobert Mustacchi /**
307*d14abf15SRobert Mustacchi * @Description:
308*d14abf15SRobert Mustacchi * returns coalesce_buf into txq list
309*d14abf15SRobert Mustacchi * @Assumptions:
310*d14abf15SRobert Mustacchi * txq lock is taken by the caller
311*d14abf15SRobert Mustacchi */
312*d14abf15SRobert Mustacchi void
lm_put_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN lm_coalesce_buffer_t * coalesce_buf)313*d14abf15SRobert Mustacchi lm_put_coalesce_buffer(
314*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
315*d14abf15SRobert Mustacchi IN lm_tx_chain_t *txq,
316*d14abf15SRobert Mustacchi IN lm_coalesce_buffer_t *coalesce_buf)
317*d14abf15SRobert Mustacchi {
318*d14abf15SRobert Mustacchi if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || CHK_NULL(coalesce_buf))) {
319*d14abf15SRobert Mustacchi DbgBreakFastPath();
320*d14abf15SRobert Mustacchi return;
321*d14abf15SRobert Mustacchi }
322*d14abf15SRobert Mustacchi
323*d14abf15SRobert Mustacchi s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
324*d14abf15SRobert Mustacchi
325*d14abf15SRobert Mustacchi return;
326*d14abf15SRobert Mustacchi } /* lm_put_coalesce_buffer */
327*d14abf15SRobert Mustacchi
328*d14abf15SRobert Mustacchi /**
329*d14abf15SRobert Mustacchi * @Description:
330*d14abf15SRobert Mustacchi * copy given packet into available coalesce buffer of given txq
331*d14abf15SRobert Mustacchi * @Assumptions:
332*d14abf15SRobert Mustacchi * txq lock is taken by the caller
333*d14abf15SRobert Mustacchi * @Returns:
334*d14abf15SRobert Mustacchi * - SUCCESS -
335*d14abf15SRobert Mustacchi * - The OUT parameter coal_buf will be set to point the allocated
336*d14abf15SRobert Mustacchi * coalesce buffer
337*d14abf15SRobert Mustacchi * - The coalesce buffer frag size will be set to the given packet size
338*d14abf15SRobert Mustacchi * - RESOURCE - no available coalecse buffer for given packet
339*d14abf15SRobert Mustacchi * (according to packet size)
340*d14abf15SRobert Mustacchi */
341*d14abf15SRobert Mustacchi static lm_status_t
lm_copy_packet_to_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN lm_packet_t * lmpkt,IN lm_frag_list_t * frags,OUT lm_coalesce_buffer_t ** coal_buf)342*d14abf15SRobert Mustacchi lm_copy_packet_to_coalesce_buffer(
343*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
344*d14abf15SRobert Mustacchi IN lm_tx_chain_t *txq,
345*d14abf15SRobert Mustacchi IN lm_packet_t *lmpkt,
346*d14abf15SRobert Mustacchi IN lm_frag_list_t *frags,
347*d14abf15SRobert Mustacchi OUT lm_coalesce_buffer_t **coal_buf
348*d14abf15SRobert Mustacchi )
349*d14abf15SRobert Mustacchi {
350*d14abf15SRobert Mustacchi lm_coalesce_buffer_t *coalesce_buf;
351*d14abf15SRobert Mustacchi lm_frag_t* frag;
352*d14abf15SRobert Mustacchi u32_t pkt_size = 0;
353*d14abf15SRobert Mustacchi u32_t copied_bytes;
354*d14abf15SRobert Mustacchi u32_t cnt;
355*d14abf15SRobert Mustacchi
356*d14abf15SRobert Mustacchi if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) ||
357*d14abf15SRobert Mustacchi CHK_NULL(lmpkt) || CHK_NULL(frags)))
358*d14abf15SRobert Mustacchi {
359*d14abf15SRobert Mustacchi DbgBreakFastPath();
360*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE;
361*d14abf15SRobert Mustacchi }
362*d14abf15SRobert Mustacchi
363*d14abf15SRobert Mustacchi /* Determine packet size. */
364*d14abf15SRobert Mustacchi frag = &frags->frag_arr[0];
365*d14abf15SRobert Mustacchi for (cnt = 0; cnt < frags->cnt; cnt++, frag++) {
366*d14abf15SRobert Mustacchi pkt_size += frag->size;
367*d14abf15SRobert Mustacchi }
368*d14abf15SRobert Mustacchi
369*d14abf15SRobert Mustacchi /* Find a buffer large enough for copying this packet. In the case
370*d14abf15SRobert Mustacchi * of an LSO frame, we should have at least one 64k coalesce buffer. */
371*d14abf15SRobert Mustacchi coalesce_buf = lm_get_coalesce_buffer(pdev, txq, pkt_size);
372*d14abf15SRobert Mustacchi if(coalesce_buf == NULL)
373*d14abf15SRobert Mustacchi {
374*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2tx,
375*d14abf15SRobert Mustacchi "#copy to coalesce buffer FAILED, (lmpkt=0x%p,pkt_size=%d)\n",
376*d14abf15SRobert Mustacchi lmpkt, pkt_size);
377*d14abf15SRobert Mustacchi LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_coalesce_buf);
378*d14abf15SRobert Mustacchi return LM_STATUS_RESOURCE;
379*d14abf15SRobert Mustacchi }
380*d14abf15SRobert Mustacchi
381*d14abf15SRobert Mustacchi /* copy the packet into the coalesce buffer */
382*d14abf15SRobert Mustacchi copied_bytes = mm_copy_packet_buf(
383*d14abf15SRobert Mustacchi pdev, lmpkt, coalesce_buf->mem_virt, pkt_size);
384*d14abf15SRobert Mustacchi if (ERR_IF(copied_bytes != pkt_size)) {
385*d14abf15SRobert Mustacchi DbgBreakFastPath();
386*d14abf15SRobert Mustacchi lm_put_coalesce_buffer(pdev, txq, coalesce_buf);
387*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE;
388*d14abf15SRobert Mustacchi }
389*d14abf15SRobert Mustacchi
390*d14abf15SRobert Mustacchi /* adjust frag size in coalesce buf */
391*d14abf15SRobert Mustacchi coalesce_buf->frags.frag_arr[0].size = pkt_size;
392*d14abf15SRobert Mustacchi
393*d14abf15SRobert Mustacchi *coal_buf = coalesce_buf;
394*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
395*d14abf15SRobert Mustacchi } /* lm_copy_packet_to_coalesce_buffer */
396*d14abf15SRobert Mustacchi
397*d14abf15SRobert Mustacchi /**
398*d14abf15SRobert Mustacchi * @Description:
399*d14abf15SRobert Mustacchi * check if packet requires copying to coalesce buf (packet too fregmented)
400*d14abf15SRobert Mustacchi * @Returns:
401*d14abf15SRobert Mustacchi * TRUE or FALSE
402*d14abf15SRobert Mustacchi */
403*d14abf15SRobert Mustacchi static u8_t
lm_is_packet_coalescing_required(IN lm_device_t * pdev,IN lm_packet_t * lmpkt,IN lm_frag_list_t * frags,IN u8_t num_parsing_bds)404*d14abf15SRobert Mustacchi lm_is_packet_coalescing_required(
405*d14abf15SRobert Mustacchi IN lm_device_t *pdev,
406*d14abf15SRobert Mustacchi IN lm_packet_t *lmpkt,
407*d14abf15SRobert Mustacchi IN lm_frag_list_t *frags,
408*d14abf15SRobert Mustacchi IN u8_t num_parsing_bds
409*d14abf15SRobert Mustacchi )
410*d14abf15SRobert Mustacchi {
411*d14abf15SRobert Mustacchi u8_t to_copy = FALSE;
412*d14abf15SRobert Mustacchi u8_t wnd_size = 0;
413*d14abf15SRobert Mustacchi static u32_t const MAX_FETCH_BD = 13; /* HW max bds per packet capabitily */
414*d14abf15SRobert Mustacchi
415*d14abf15SRobert Mustacchi // each window size consective TCP payload BDs, must hold payload size
416*d14abf15SRobert Mustacchi // which is greater than, or equal to MSS size.
417*d14abf15SRobert Mustacchi wnd_size = MAX_FETCH_BD - lmpkt->u1.tx.hdr_nbds - num_parsing_bds - 1;
418*d14abf15SRobert Mustacchi
419*d14abf15SRobert Mustacchi if (frags->cnt > wnd_size)
420*d14abf15SRobert Mustacchi {
421*d14abf15SRobert Mustacchi if GET_FLAGS(lmpkt->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
422*d14abf15SRobert Mustacchi {
423*d14abf15SRobert Mustacchi /* Too fragmented LSO packet, check if it needs to be copied: */
424*d14abf15SRobert Mustacchi u8_t num_frags = (u8_t)frags->cnt;
425*d14abf15SRobert Mustacchi u8_t wnd_idx = 0;
426*d14abf15SRobert Mustacchi u8_t frag_idx = 0;
427*d14abf15SRobert Mustacchi u32_t wnd_sum = 0;
428*d14abf15SRobert Mustacchi
429*d14abf15SRobert Mustacchi for (wnd_idx = lmpkt->u1.tx.hdr_nbds; wnd_idx <= (num_frags - wnd_size); wnd_idx++)
430*d14abf15SRobert Mustacchi {
431*d14abf15SRobert Mustacchi for (frag_idx = 0; frag_idx < wnd_size; frag_idx++)
432*d14abf15SRobert Mustacchi {
433*d14abf15SRobert Mustacchi wnd_sum += frags->frag_arr[wnd_idx + frag_idx].size;
434*d14abf15SRobert Mustacchi }
435*d14abf15SRobert Mustacchi
436*d14abf15SRobert Mustacchi if (wnd_sum < lmpkt->l2pkt_tx_info->lso_mss)
437*d14abf15SRobert Mustacchi {
438*d14abf15SRobert Mustacchi DbgMessage(pdev, WARNl2tx,
439*d14abf15SRobert Mustacchi "#copy to coalesce buffer IS REQUIRED for LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
440*d14abf15SRobert Mustacchi lmpkt, num_frags);
441*d14abf15SRobert Mustacchi to_copy = TRUE;
442*d14abf15SRobert Mustacchi break;
443*d14abf15SRobert Mustacchi }
444*d14abf15SRobert Mustacchi wnd_sum = 0;
445*d14abf15SRobert Mustacchi }
446*d14abf15SRobert Mustacchi }
447*d14abf15SRobert Mustacchi else
448*d14abf15SRobert Mustacchi {
449*d14abf15SRobert Mustacchi /* in non LSO, too fragmented packet should always
450*d14abf15SRobert Mustacchi be copied to coalesce buffer */
451*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2tx,
452*d14abf15SRobert Mustacchi "#copy to coalesce buffer IS REQUIRED for NON LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
453*d14abf15SRobert Mustacchi lmpkt, frags->cnt);
454*d14abf15SRobert Mustacchi to_copy = TRUE;
455*d14abf15SRobert Mustacchi }
456*d14abf15SRobert Mustacchi }
457*d14abf15SRobert Mustacchi
458*d14abf15SRobert Mustacchi return to_copy;
459*d14abf15SRobert Mustacchi } /* lm_is_packet_coalescing_required */
460*d14abf15SRobert Mustacchi
461*d14abf15SRobert Mustacchi #define LM_VLAN_PRI_BIT_LOCATION (13)
462*d14abf15SRobert Mustacchi #define LM_GET_PRI_FROM_VLAN(_vlan) ((_vlan) >> LM_VLAN_PRI_BIT_LOCATION)
463*d14abf15SRobert Mustacchi /**
464*d14abf15SRobert Mustacchi * @description
465*d14abf15SRobert Mustacchi * Check if VLAN exist and if the VLAN exists get priority.
466*d14abf15SRobert Mustacchi * @param pdev
467*d14abf15SRobert Mustacchi * @param packet
468*d14abf15SRobert Mustacchi *
469*d14abf15SRobert Mustacchi * @return u32_t
470*d14abf15SRobert Mustacchi */
471*d14abf15SRobert Mustacchi u8_t
lm_get_pri_from_send_packet_param(lm_device_t * pdev,lm_packet_t * packet)472*d14abf15SRobert Mustacchi lm_get_pri_from_send_packet_param(
473*d14abf15SRobert Mustacchi lm_device_t *pdev,
474*d14abf15SRobert Mustacchi lm_packet_t *packet)
475*d14abf15SRobert Mustacchi {
476*d14abf15SRobert Mustacchi //untagged packets should be treated as priority 0
477*d14abf15SRobert Mustacchi u8_t pri = 0;
478*d14abf15SRobert Mustacchi
479*d14abf15SRobert Mustacchi if GET_FLAGS(packet->l2pkt_tx_info->flags , (LM_TX_FLAG_INSERT_VLAN_TAG | LM_TX_FLAG_VLAN_TAG_EXISTS))
480*d14abf15SRobert Mustacchi {
481*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
482*d14abf15SRobert Mustacchi
483*d14abf15SRobert Mustacchi pri = LM_GET_PRI_FROM_VLAN(packet->l2pkt_tx_info->vlan_tag);
484*d14abf15SRobert Mustacchi }
485*d14abf15SRobert Mustacchi
486*d14abf15SRobert Mustacchi return pri;
487*d14abf15SRobert Mustacchi }
488*d14abf15SRobert Mustacchi
489*d14abf15SRobert Mustacchi void
fill_bds_for_encapsulated_packet(lm_device_t * pdev,lm_packet_t * packet,struct eth_tunnel_data * tunnel_data,struct eth_tx_parse_2nd_bd * parse_bd_2nd_ptr,u8_t eth_hlen)490*d14abf15SRobert Mustacchi fill_bds_for_encapsulated_packet(
491*d14abf15SRobert Mustacchi lm_device_t *pdev,
492*d14abf15SRobert Mustacchi lm_packet_t *packet,
493*d14abf15SRobert Mustacchi struct eth_tunnel_data *tunnel_data,
494*d14abf15SRobert Mustacchi struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr,
495*d14abf15SRobert Mustacchi u8_t eth_hlen)
496*d14abf15SRobert Mustacchi {
497*d14abf15SRobert Mustacchi DbgBreakIf(CHIP_IS_E1x(pdev));
498*d14abf15SRobert Mustacchi
499*d14abf15SRobert Mustacchi ecore_set_fw_mac_addr(&tunnel_data->dst_hi,
500*d14abf15SRobert Mustacchi &tunnel_data->dst_mid,
501*d14abf15SRobert Mustacchi &tunnel_data->dst_lo,
502*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->dst_mac_addr);
503*d14abf15SRobert Mustacchi
504*d14abf15SRobert Mustacchi // Inner IP header offset in WORDs (16-bit) from start of packet
505*d14abf15SRobert Mustacchi tunnel_data->ip_hdr_start_inner_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
506*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset) >> 1;
507*d14abf15SRobert Mustacchi
508*d14abf15SRobert Mustacchi // Checksum of pseudo header with length field = 0
509*d14abf15SRobert Mustacchi tunnel_data->pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
510*d14abf15SRobert Mustacchi // Outer ip header checksum (with ALL ip header fields) for non-lso encaulated packet
511*d14abf15SRobert Mustacchi tunnel_data->fw_ip_hdr_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
512*d14abf15SRobert Mustacchi
513*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
514*d14abf15SRobert Mustacchi {
515*d14abf15SRobert Mustacchi // Set in case outer IP header is ipV6
516*d14abf15SRobert Mustacchi SET_FLAGS(tunnel_data->flags, ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER);
517*d14abf15SRobert Mustacchi }
518*d14abf15SRobert Mustacchi
519*d14abf15SRobert Mustacchi if (!parse_bd_2nd_ptr)
520*d14abf15SRobert Mustacchi {
521*d14abf15SRobert Mustacchi return;
522*d14abf15SRobert Mustacchi }
523*d14abf15SRobert Mustacchi
524*d14abf15SRobert Mustacchi // Outer IP header offset in WORDs (16-bit) from start of packet
525*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->global_data |= ( ((eth_hlen) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT);
526*d14abf15SRobert Mustacchi
527*d14abf15SRobert Mustacchi if (!(GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET)))
528*d14abf15SRobert Mustacchi {
529*d14abf15SRobert Mustacchi // Outer ipV4 header length in words
530*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->global_data |= ( ((packet->l2pkt_tx_info->lso_ip_hdr_len) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT);
531*d14abf15SRobert Mustacchi }
532*d14abf15SRobert Mustacchi
533*d14abf15SRobert Mustacchi // An optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender
534*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->global_data |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT);
535*d14abf15SRobert Mustacchi
536*d14abf15SRobert Mustacchi // Checksum of pseudo header with length field=0
537*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->tcp_send_seq = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
538*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->tcp_flags = packet->l2pkt_tx_info->lso_tcp_flags; // no endianity since it is u8_t
539*d14abf15SRobert Mustacchi
540*d14abf15SRobert Mustacchi /* We set tunnel_lso_inc_ip_id as constant, INT_HEADER, so the "HW IP header" is the inner header.
541*d14abf15SRobert Mustacchi Assuming "FW IP header" is the outer IP header, and "HW IP header" is the inner IP header:
542*d14abf15SRobert Mustacchi fw_ip_csum_wo_len_flags_frag - is the IP checksum without length, flags and fragment offset of the outer ip header
543*d14abf15SRobert Mustacchi hw_ip_id - is the ip id of the inner ip id */
544*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->fw_ip_csum_wo_len_flags_frag = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
545*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->hw_ip_id = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
546*d14abf15SRobert Mustacchi
547*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
548*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
549*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
550*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->lso_tcp_hdr_len -
551*d14abf15SRobert Mustacchi eth_hlen) >> 1;
552*d14abf15SRobert Mustacchi
553*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
554*d14abf15SRobert Mustacchi {
555*d14abf15SRobert Mustacchi /* In IpV4, the length (in WORDs) from the FW IpV4 header start to the payload start. In IpV6, the length (in WORDs) from the FW IpV6 header end to the payload start. However, if extension headers are included, their length is counted here as well. */;
556*d14abf15SRobert Mustacchi
557*d14abf15SRobert Mustacchi // if the outer header (fw header) is ipv4 than fw_ip_hdr_to_payload_w will be set to:
558*d14abf15SRobert Mustacchi // the length in words from start of outer IP header to start of payload
559*d14abf15SRobert Mustacchi // = outer ip header + gre header + inner mac header + inner ip header + tcp header length
560*d14abf15SRobert Mustacchi //
561*d14abf15SRobert Mustacchi // If the outer header is ipv6 than fw_ip_hdr_to_payload_w will be set to:
562*d14abf15SRobert Mustacchi // the length in words from end of inner IP header to start of payload + extension headers (if exists)
563*d14abf15SRobert Mustacchi // = outer ip header - fixed ip header + gre header + inner mac header + inner ip header + tcp header length
564*d14abf15SRobert Mustacchi // fixed ipv6 header length is 40 bytes = 20 words
565*d14abf15SRobert Mustacchi parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w -= 20;
566*d14abf15SRobert Mustacchi }
567*d14abf15SRobert Mustacchi }
568*d14abf15SRobert Mustacchi
569*d14abf15SRobert Mustacchi /*******************************************************************************
570*d14abf15SRobert Mustacchi * Description:
571*d14abf15SRobert Mustacchi *
572*d14abf15SRobert Mustacchi * Return:
573*d14abf15SRobert Mustacchi ******************************************************************************/
574*d14abf15SRobert Mustacchi lm_status_t
lm_send_packet(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,lm_frag_list_t * frags)575*d14abf15SRobert Mustacchi lm_send_packet(
576*d14abf15SRobert Mustacchi lm_device_t *pdev,
577*d14abf15SRobert Mustacchi u32_t chain_idx,
578*d14abf15SRobert Mustacchi lm_packet_t *packet,
579*d14abf15SRobert Mustacchi lm_frag_list_t *frags)
580*d14abf15SRobert Mustacchi {
581*d14abf15SRobert Mustacchi lm_tx_chain_t *tx_chain = NULL;
582*d14abf15SRobert Mustacchi struct eth_tx_start_bd *start_bd = NULL;
583*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e1x *parse_bd_e1x = NULL;
584*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e2 *parse_bd_e2 = NULL;
585*d14abf15SRobert Mustacchi struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr = NULL;
586*d14abf15SRobert Mustacchi struct eth_tx_bd *prod_bd = NULL;
587*d14abf15SRobert Mustacchi lm_frag_t *frag = NULL;
588*d14abf15SRobert Mustacchi u16_t old_prod_idx = 0;
589*d14abf15SRobert Mustacchi u32_t cnt = 0;
590*d14abf15SRobert Mustacchi #if defined(__BIG_ENDIAN)
591*d14abf15SRobert Mustacchi struct doorbell_set_prod dq_msg = {0, 0, {0}};
592*d14abf15SRobert Mustacchi #elif defined(__LITTLE_ENDIAN)
593*d14abf15SRobert Mustacchi struct doorbell_set_prod dq_msg = {{0}, 0, 0};
594*d14abf15SRobert Mustacchi #endif
595*d14abf15SRobert Mustacchi
596*d14abf15SRobert Mustacchi u8_t eth_hlen = ETHERNET_PACKET_HEADER_SIZE;
597*d14abf15SRobert Mustacchi u8_t split_required = FALSE;
598*d14abf15SRobert Mustacchi u8_t eth_addr_type = UNKNOWN_ADDRESS;
599*d14abf15SRobert Mustacchi u16_t total_hlen_bytes = 0;
600*d14abf15SRobert Mustacchi u16_t start_bd_nbd = 0;
601*d14abf15SRobert Mustacchi u16_t vlan_tag = 0;
602*d14abf15SRobert Mustacchi void* parse_bd_ptr = NULL;
603*d14abf15SRobert Mustacchi u8_t is_encapsulated_offload = 0;
604*d14abf15SRobert Mustacchi u8_t num_parsing_bds = 1;
605*d14abf15SRobert Mustacchi
606*d14abf15SRobert Mustacchi //DbgBreakIfFastPath(chain_idx >= pdev->params.rss_chain_cnt);
607*d14abf15SRobert Mustacchi
608*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, "### lm_send_packet\n");
609*d14abf15SRobert Mustacchi
610*d14abf15SRobert Mustacchi tx_chain = &LM_TXQ(pdev, chain_idx);
611*d14abf15SRobert Mustacchi old_prod_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
612*d14abf15SRobert Mustacchi
613*d14abf15SRobert Mustacchi // Compute Ethernet Header Len
614*d14abf15SRobert Mustacchi if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS)
615*d14abf15SRobert Mustacchi {
616*d14abf15SRobert Mustacchi eth_hlen += ETHERNET_VLAN_TAG_SIZE;
617*d14abf15SRobert Mustacchi }
618*d14abf15SRobert Mustacchi
619*d14abf15SRobert Mustacchi if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
620*d14abf15SRobert Mustacchi {
621*d14abf15SRobert Mustacchi eth_hlen += ETHERNET_LLC_SNAP_SIZE;
622*d14abf15SRobert Mustacchi }
623*d14abf15SRobert Mustacchi
624*d14abf15SRobert Mustacchi is_encapsulated_offload = (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IS_ENCAP_PACKET) &&
625*d14abf15SRobert Mustacchi GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM | LM_TX_FLAG_TCP_LSO_FRAME)));
626*d14abf15SRobert Mustacchi
627*d14abf15SRobert Mustacchi if (is_encapsulated_offload)
628*d14abf15SRobert Mustacchi {
629*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
630*d14abf15SRobert Mustacchi {
631*d14abf15SRobert Mustacchi // only for encapsulated packet with lso offload we need second parsing bd
632*d14abf15SRobert Mustacchi num_parsing_bds = 2;
633*d14abf15SRobert Mustacchi }
634*d14abf15SRobert Mustacchi // encapsulated packet header size includes both outer and inner headers
635*d14abf15SRobert Mustacchi total_hlen_bytes = packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
636*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
637*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
638*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->lso_tcp_hdr_len;
639*d14abf15SRobert Mustacchi }
640*d14abf15SRobert Mustacchi else
641*d14abf15SRobert Mustacchi {
642*d14abf15SRobert Mustacchi //calculate the total sum of ETH + IP + TCP headers in term of bytes
643*d14abf15SRobert Mustacchi total_hlen_bytes = packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len + eth_hlen;
644*d14abf15SRobert Mustacchi }
645*d14abf15SRobert Mustacchi
646*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
647*d14abf15SRobert Mustacchi {
648*d14abf15SRobert Mustacchi lm_pre_process_lso_packet(pdev, packet, frags, &split_required, total_hlen_bytes);
649*d14abf15SRobert Mustacchi }
650*d14abf15SRobert Mustacchi
651*d14abf15SRobert Mustacchi /* handle packet coalescing - if required, copy the too fregmented packet
652*d14abf15SRobert Mustacchi into a pre-allocated coalesce buffer */
653*d14abf15SRobert Mustacchi if (lm_is_packet_coalescing_required(pdev, packet, frags, num_parsing_bds))
654*d14abf15SRobert Mustacchi {
655*d14abf15SRobert Mustacchi lm_coalesce_buffer_t *coalesce_buf = NULL;
656*d14abf15SRobert Mustacchi lm_status_t lm_status;
657*d14abf15SRobert Mustacchi
658*d14abf15SRobert Mustacchi if (ERR_IF(packet->u1.tx.coalesce_buf != NULL))
659*d14abf15SRobert Mustacchi {
660*d14abf15SRobert Mustacchi /* pkt coal buf can't already be set */
661*d14abf15SRobert Mustacchi DbgBreakFastPath();
662*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE;
663*d14abf15SRobert Mustacchi }
664*d14abf15SRobert Mustacchi
665*d14abf15SRobert Mustacchi lm_status = lm_copy_packet_to_coalesce_buffer(
666*d14abf15SRobert Mustacchi pdev, tx_chain, packet, frags, &coalesce_buf);
667*d14abf15SRobert Mustacchi
668*d14abf15SRobert Mustacchi if (lm_status == LM_STATUS_SUCCESS)
669*d14abf15SRobert Mustacchi {
670*d14abf15SRobert Mustacchi LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_l2_assembly_buf_use);
671*d14abf15SRobert Mustacchi
672*d14abf15SRobert Mustacchi packet->u1.tx.coalesce_buf = coalesce_buf; /* saved to be freed upon completion */
673*d14abf15SRobert Mustacchi
674*d14abf15SRobert Mustacchi packet->u1.tx.hdr_nbds = 1;
675*d14abf15SRobert Mustacchi split_required = 1;
676*d14abf15SRobert Mustacchi
677*d14abf15SRobert Mustacchi /* from here on, use the coalesce buf frags list
678*d14abf15SRobert Mustacchi instead of the frags list given by the caller */
679*d14abf15SRobert Mustacchi frags = &coalesce_buf->frags;
680*d14abf15SRobert Mustacchi }
681*d14abf15SRobert Mustacchi else
682*d14abf15SRobert Mustacchi {
683*d14abf15SRobert Mustacchi return lm_status; /* no coalesce buf available, can't continue */
684*d14abf15SRobert Mustacchi }
685*d14abf15SRobert Mustacchi }
686*d14abf15SRobert Mustacchi
687*d14abf15SRobert Mustacchi // stringent heuristic - number of parsing bds + a split of hdr & data
688*d14abf15SRobert Mustacchi if ((frags->cnt + num_parsing_bds + 1) > lm_bd_chain_avail_bds(&tx_chain->bd_chain))
689*d14abf15SRobert Mustacchi {
690*d14abf15SRobert Mustacchi LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_l2_bd);
691*d14abf15SRobert Mustacchi if (packet->u1.tx.coalesce_buf)
692*d14abf15SRobert Mustacchi {
693*d14abf15SRobert Mustacchi /* TODO: change this to "goto out_err:" */
694*d14abf15SRobert Mustacchi lm_put_coalesce_buffer(pdev, tx_chain, packet->u1.tx.coalesce_buf);
695*d14abf15SRobert Mustacchi packet->u1.tx.coalesce_buf = NULL;
696*d14abf15SRobert Mustacchi }
697*d14abf15SRobert Mustacchi return LM_STATUS_RESOURCE;
698*d14abf15SRobert Mustacchi }
699*d14abf15SRobert Mustacchi
700*d14abf15SRobert Mustacchi packet->size = 0;
701*d14abf15SRobert Mustacchi start_bd = (struct eth_tx_start_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
702*d14abf15SRobert Mustacchi mm_mem_zero(start_bd, sizeof(union eth_tx_bd_types));
703*d14abf15SRobert Mustacchi //initialize the start BD
704*d14abf15SRobert Mustacchi frag = frags->frag_arr;
705*d14abf15SRobert Mustacchi start_bd->addr_lo = mm_cpu_to_le32(frag->addr.as_u32.low);
706*d14abf15SRobert Mustacchi start_bd->addr_hi = mm_cpu_to_le32(frag->addr.as_u32.high);
707*d14abf15SRobert Mustacchi start_bd->nbytes = mm_cpu_to_le16((u16_t) frag->size);
708*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield = (u8_t) ETH_TX_BD_FLAGS_START_BD;
709*d14abf15SRobert Mustacchi start_bd->nbd = 0;
710*d14abf15SRobert Mustacchi // set the number of parsing BDs in packet.
711*d14abf15SRobert Mustacchi // parse_nbds is set to: the number of parsing BDs in packet - 1
712*d14abf15SRobert Mustacchi start_bd->general_data |= ((num_parsing_bds - 1) << ETH_TX_START_BD_PARSE_NBDS_SHIFT);
713*d14abf15SRobert Mustacchi if (is_encapsulated_offload)
714*d14abf15SRobert Mustacchi {
715*d14abf15SRobert Mustacchi // tunnel_exist should be set iff the packet is encapsulated
716*d14abf15SRobert Mustacchi start_bd->general_data |= ETH_TX_START_BD_TUNNEL_EXIST;
717*d14abf15SRobert Mustacchi
718*d14abf15SRobert Mustacchi // for encapsulated packets ETH_TX_BD_FLAGS_IPV6 refers to the inner header
719*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6))
720*d14abf15SRobert Mustacchi {
721*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
722*d14abf15SRobert Mustacchi }
723*d14abf15SRobert Mustacchi }
724*d14abf15SRobert Mustacchi else
725*d14abf15SRobert Mustacchi {
726*d14abf15SRobert Mustacchi // set in case ipV6 packet
727*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
728*d14abf15SRobert Mustacchi {
729*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
730*d14abf15SRobert Mustacchi }
731*d14abf15SRobert Mustacchi }
732*d14abf15SRobert Mustacchi
733*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags , LM_TX_FLAG_INSERT_VLAN_TAG))
734*d14abf15SRobert Mustacchi {
735*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
736*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
737*d14abf15SRobert Mustacchi
738*d14abf15SRobert Mustacchi vlan_tag = packet->l2pkt_tx_info->vlan_tag;
739*d14abf15SRobert Mustacchi }
740*d14abf15SRobert Mustacchi else if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS))
741*d14abf15SRobert Mustacchi {
742*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2, "Inband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
743*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
744*d14abf15SRobert Mustacchi
745*d14abf15SRobert Mustacchi vlan_tag = packet->l2pkt_tx_info->vlan_tag;
746*d14abf15SRobert Mustacchi }
747*d14abf15SRobert Mustacchi else
748*d14abf15SRobert Mustacchi {
749*d14abf15SRobert Mustacchi if (IS_VFDEV(pdev)) {
750*d14abf15SRobert Mustacchi ((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[1]; //VF is in secure mode
751*d14abf15SRobert Mustacchi ((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[0]; //VF is in secure mode
752*d14abf15SRobert Mustacchi if (vlan_tag == VLAN_TAGGED_FRAME_ETH_TYPE) {
753*d14abf15SRobert Mustacchi ((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[3]; //VF is in secure mode
754*d14abf15SRobert Mustacchi ((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[2]; //VF is in secure mode
755*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2, "Inband vlan (from packet) 0X%x\n",vlan_tag);
756*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
757*d14abf15SRobert Mustacchi }
758*d14abf15SRobert Mustacchi } else {
759*d14abf15SRobert Mustacchi /* for debug only - to discover driver/fw lack of synchronization */
760*d14abf15SRobert Mustacchi vlan_tag = (u16_t)(pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod);
761*d14abf15SRobert Mustacchi }
762*d14abf15SRobert Mustacchi }
763*d14abf15SRobert Mustacchi start_bd->vlan_or_ethertype = mm_cpu_to_le16(vlan_tag);
764*d14abf15SRobert Mustacchi
765*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_FORCE_VLAN_MODE))
766*d14abf15SRobert Mustacchi {
767*d14abf15SRobert Mustacchi //force vlan mode according to bds (vlan mode can change accroding to global configuration)
768*d14abf15SRobert Mustacchi SET_FLAGS(start_bd->general_data, ETH_TX_START_BD_FORCE_VLAN_MODE);
769*d14abf15SRobert Mustacchi }
770*d14abf15SRobert Mustacchi
771*d14abf15SRobert Mustacchi packet->size += frag->size;
772*d14abf15SRobert Mustacchi frag++;
773*d14abf15SRobert Mustacchi
774*d14abf15SRobert Mustacchi //SNAP
775*d14abf15SRobert Mustacchi //parse bd is always present for FW simplicity
776*d14abf15SRobert Mustacchi //adjust the parse BD pointer
777*d14abf15SRobert Mustacchi /////////////////start parse BD handling ////////////////////////////////////////////
778*d14abf15SRobert Mustacchi parse_bd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
779*d14abf15SRobert Mustacchi mm_mem_zero(parse_bd_ptr, sizeof(union eth_tx_bd_types));
780*d14abf15SRobert Mustacchi
781*d14abf15SRobert Mustacchi if (CHIP_IS_E1x(pdev))
782*d14abf15SRobert Mustacchi {
783*d14abf15SRobert Mustacchi parse_bd_e1x = parse_bd_ptr;
784*d14abf15SRobert Mustacchi parse_bd_e1x->global_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
785*d14abf15SRobert Mustacchi }
786*d14abf15SRobert Mustacchi else
787*d14abf15SRobert Mustacchi {
788*d14abf15SRobert Mustacchi parse_bd_e2 = parse_bd_ptr;
789*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
790*d14abf15SRobert Mustacchi }
791*d14abf15SRobert Mustacchi // first parse BD taken into account
792*d14abf15SRobert Mustacchi start_bd_nbd++;
793*d14abf15SRobert Mustacchi
794*d14abf15SRobert Mustacchi if (num_parsing_bds > 1)
795*d14abf15SRobert Mustacchi {
796*d14abf15SRobert Mustacchi // lso offload for encapsulated packet - two parsing bds are required
797*d14abf15SRobert Mustacchi parse_bd_2nd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
798*d14abf15SRobert Mustacchi mm_mem_zero(parse_bd_2nd_ptr, sizeof(union eth_tx_bd_types));
799*d14abf15SRobert Mustacchi //second parse BD taken into account
800*d14abf15SRobert Mustacchi start_bd_nbd++;
801*d14abf15SRobert Mustacchi }
802*d14abf15SRobert Mustacchi
803*d14abf15SRobert Mustacchi if (is_encapsulated_offload)
804*d14abf15SRobert Mustacchi {
805*d14abf15SRobert Mustacchi fill_bds_for_encapsulated_packet(pdev, packet, &parse_bd_e2->data.tunnel_data, parse_bd_2nd_ptr, eth_hlen);
806*d14abf15SRobert Mustacchi }
807*d14abf15SRobert Mustacchi /////////////////end parse BD handling ////////////////////////////////////////////
808*d14abf15SRobert Mustacchi
809*d14abf15SRobert Mustacchi if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
810*d14abf15SRobert Mustacchi {
811*d14abf15SRobert Mustacchi pdev->tx_info.forward_packets++;
812*d14abf15SRobert Mustacchi }
813*d14abf15SRobert Mustacchi if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
814*d14abf15SRobert Mustacchi {
815*d14abf15SRobert Mustacchi start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
816*d14abf15SRobert Mustacchi lm_process_lso_packet(packet, pdev, tx_chain, frags, parse_bd_ptr, start_bd,
817*d14abf15SRobert Mustacchi &frag, total_hlen_bytes, split_required);
818*d14abf15SRobert Mustacchi start_bd_nbd = mm_cpu_to_le16(start_bd->nbd);
819*d14abf15SRobert Mustacchi if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
820*d14abf15SRobert Mustacchi {
821*d14abf15SRobert Mustacchi pdev->tx_info.lso_forward_packets++;
822*d14abf15SRobert Mustacchi }
823*d14abf15SRobert Mustacchi
824*d14abf15SRobert Mustacchi }
825*d14abf15SRobert Mustacchi else //This is the regular path in case we're not LSO
826*d14abf15SRobert Mustacchi {
827*d14abf15SRobert Mustacchi // In non-LSO packets, if there are more than 1 data bds, the second data bd (the one after
828*d14abf15SRobert Mustacchi // the parsing bd) will be of the above type.total_pkt_bytes will hold the total packet length,
829*d14abf15SRobert Mustacchi // without outer vlan and without vlan in case there is vlan offload.
830*d14abf15SRobert Mustacchi struct eth_tx_bd *total_pkt_bytes_bd = NULL;
831*d14abf15SRobert Mustacchi
832*d14abf15SRobert Mustacchi //pass on all frags except the first one
833*d14abf15SRobert Mustacchi for(cnt = 1; cnt < frags->cnt; cnt++)
834*d14abf15SRobert Mustacchi {
835*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, " frag %d, hi 0x%x, lo 0x%x, size %d\n",
836*d14abf15SRobert Mustacchi cnt, frag->addr.as_u32.high, frag->addr.as_u32.low, frag->size);
837*d14abf15SRobert Mustacchi
838*d14abf15SRobert Mustacchi DbgBreakIfFastPath(frag->size >= 0x10000 || frag->size == 0);
839*d14abf15SRobert Mustacchi // TODO: assert/ fixup if to many SGE's per MTU
840*d14abf15SRobert Mustacchi
841*d14abf15SRobert Mustacchi //Advance to the next BD.
842*d14abf15SRobert Mustacchi prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
843*d14abf15SRobert Mustacchi
844*d14abf15SRobert Mustacchi prod_bd->addr_lo = mm_cpu_to_le32(frag->addr.as_u32.low);
845*d14abf15SRobert Mustacchi prod_bd->addr_hi = mm_cpu_to_le32(frag->addr.as_u32.high);
846*d14abf15SRobert Mustacchi prod_bd->nbytes = mm_cpu_to_le16((u16_t) frag->size);
847*d14abf15SRobert Mustacchi if (NULL == total_pkt_bytes_bd)
848*d14abf15SRobert Mustacchi {
849*d14abf15SRobert Mustacchi //second data bd saved for updating total_pkt_bytes.
850*d14abf15SRobert Mustacchi total_pkt_bytes_bd = prod_bd;
851*d14abf15SRobert Mustacchi }
852*d14abf15SRobert Mustacchi packet->size += frag->size;
853*d14abf15SRobert Mustacchi
854*d14abf15SRobert Mustacchi frag++;
855*d14abf15SRobert Mustacchi }
856*d14abf15SRobert Mustacchi
857*d14abf15SRobert Mustacchi if (NULL != total_pkt_bytes_bd)
858*d14abf15SRobert Mustacchi {
859*d14abf15SRobert Mustacchi //we have a second data bd
860*d14abf15SRobert Mustacchi total_pkt_bytes_bd->total_pkt_bytes = mm_cpu_to_le16((u16_t) packet->size);
861*d14abf15SRobert Mustacchi }
862*d14abf15SRobert Mustacchi }
863*d14abf15SRobert Mustacchi
864*d14abf15SRobert Mustacchi //we might have IP csum, TCP csum, both or none.
865*d14abf15SRobert Mustacchi //It is definitely legit for a packet to be csum offloaded with or without LSO!
866*d14abf15SRobert Mustacchi //If the packet is LSO, we must enter here!!!!
867*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)))
868*d14abf15SRobert Mustacchi {
869*d14abf15SRobert Mustacchi // non-encapsulated packet: set bit if LM_TX_FLAG_COMPUTE_IP_CKSUM is on (LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6 is always equal to zero)
870*d14abf15SRobert Mustacchi // encapsulated packet: set bit if LM_TX_FLAG_COMPUTE_IP_CKSUM is on and inner ip header is ipv4
871*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM) &&
872*d14abf15SRobert Mustacchi (!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6)))
873*d14abf15SRobert Mustacchi {
874*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
875*d14abf15SRobert Mustacchi }
876*d14abf15SRobert Mustacchi
877*d14abf15SRobert Mustacchi if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM))
878*d14abf15SRobert Mustacchi {
879*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
880*d14abf15SRobert Mustacchi if(packet->l2pkt_tx_info->cs_any_offset)
881*d14abf15SRobert Mustacchi {
882*d14abf15SRobert Mustacchi start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
883*d14abf15SRobert Mustacchi }
884*d14abf15SRobert Mustacchi }
885*d14abf15SRobert Mustacchi
886*d14abf15SRobert Mustacchi if (CHIP_IS_E1x(pdev)) {
887*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
888*d14abf15SRobert Mustacchi
889*d14abf15SRobert Mustacchi if (CHK_NULL(parse_bd_ptr)) {
890*d14abf15SRobert Mustacchi DbgBreakIfFastPath( !parse_bd_ptr ) ;
891*d14abf15SRobert Mustacchi return LM_STATUS_FAILURE ;
892*d14abf15SRobert Mustacchi }
893*d14abf15SRobert Mustacchi
894*d14abf15SRobert Mustacchi parse_bd_e1x->ip_hlen_w = packet->l2pkt_tx_info->lso_ip_hdr_len >> 1;
895*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= (( (eth_hlen) >> 1) << ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT);
896*d14abf15SRobert Mustacchi parse_bd_e1x->total_hlen_w = mm_cpu_to_le16((packet->l2pkt_tx_info->lso_ip_hdr_len >> 1) + ( (eth_hlen) >> 1));
897*d14abf15SRobert Mustacchi
898*d14abf15SRobert Mustacchi if(packet->l2pkt_tx_info->flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME) {
899*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
900*d14abf15SRobert Mustacchi }
901*d14abf15SRobert Mustacchi
902*d14abf15SRobert Mustacchi if (packet->l2pkt_tx_info->flags & LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)
903*d14abf15SRobert Mustacchi {
904*d14abf15SRobert Mustacchi parse_bd_e1x->tcp_pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
905*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT);
906*d14abf15SRobert Mustacchi parse_bd_e1x->total_hlen_w = mm_cpu_to_le16((total_hlen_bytes) >> 1);
907*d14abf15SRobert Mustacchi }
908*d14abf15SRobert Mustacchi
909*d14abf15SRobert Mustacchi } else {
910*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
911*d14abf15SRobert Mustacchi u32_t val;
912*d14abf15SRobert Mustacchi
913*d14abf15SRobert Mustacchi // TCP header Offset in WORDs from start of packet
914*d14abf15SRobert Mustacchi if (is_encapsulated_offload)
915*d14abf15SRobert Mustacchi {
916*d14abf15SRobert Mustacchi val = (( packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
917*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
918*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset ) >> 1 );
919*d14abf15SRobert Mustacchi
920*d14abf15SRobert Mustacchi /* set if the inner ip header is ipv6 with extension headers */
921*d14abf15SRobert Mustacchi if (packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset > 40) {
922*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
923*d14abf15SRobert Mustacchi }
924*d14abf15SRobert Mustacchi }
925*d14abf15SRobert Mustacchi else
926*d14abf15SRobert Mustacchi {
927*d14abf15SRobert Mustacchi val = ((packet->l2pkt_tx_info->lso_ip_hdr_len + eth_hlen) >> 1);
928*d14abf15SRobert Mustacchi
929*d14abf15SRobert Mustacchi if (packet->l2pkt_tx_info->lso_ip_hdr_len > 40) {
930*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
931*d14abf15SRobert Mustacchi }
932*d14abf15SRobert Mustacchi }
933*d14abf15SRobert Mustacchi
934*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W & (val << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT);
935*d14abf15SRobert Mustacchi
936*d14abf15SRobert Mustacchi val = (packet->l2pkt_tx_info->lso_tcp_hdr_len >> 2);
937*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW & (val << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT);
938*d14abf15SRobert Mustacchi
939*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data = mm_cpu_to_le32(parse_bd_e2->parsing_data);
940*d14abf15SRobert Mustacchi }
941*d14abf15SRobert Mustacchi }
942*d14abf15SRobert Mustacchi
943*d14abf15SRobert Mustacchi if ((!is_encapsulated_offload) &&
944*d14abf15SRobert Mustacchi ((!CHIP_IS_E1x(pdev) || IS_VFDEV(pdev))))
945*d14abf15SRobert Mustacchi {
946*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
947*d14abf15SRobert Mustacchi
948*d14abf15SRobert Mustacchi ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.dst_hi,
949*d14abf15SRobert Mustacchi &parse_bd_e2->data.mac_addr.dst_mid,
950*d14abf15SRobert Mustacchi &parse_bd_e2->data.mac_addr.dst_lo,
951*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->dst_mac_addr);
952*d14abf15SRobert Mustacchi ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.src_hi,
953*d14abf15SRobert Mustacchi &parse_bd_e2->data.mac_addr.src_mid,
954*d14abf15SRobert Mustacchi &parse_bd_e2->data.mac_addr.src_lo,
955*d14abf15SRobert Mustacchi packet->l2pkt_tx_info->src_mac_addr);
956*d14abf15SRobert Mustacchi if (pdev->params.mac_spoof_test) {
957*d14abf15SRobert Mustacchi parse_bd_e2->data.mac_addr.src_lo++;
958*d14abf15SRobert Mustacchi }
959*d14abf15SRobert Mustacchi
960*d14abf15SRobert Mustacchi }
961*d14abf15SRobert Mustacchi
962*d14abf15SRobert Mustacchi /* set dst addr type, if different from unicast */
963*d14abf15SRobert Mustacchi if (IS_ETH_MULTICAST(packet->l2pkt_tx_info->dst_mac_addr))
964*d14abf15SRobert Mustacchi {
965*d14abf15SRobert Mustacchi if (IS_ETH_BROADCAST(packet->l2pkt_tx_info->dst_mac_addr))
966*d14abf15SRobert Mustacchi {
967*d14abf15SRobert Mustacchi eth_addr_type = BROADCAST_ADDRESS;
968*d14abf15SRobert Mustacchi }
969*d14abf15SRobert Mustacchi else
970*d14abf15SRobert Mustacchi {
971*d14abf15SRobert Mustacchi eth_addr_type = MULTICAST_ADDRESS;
972*d14abf15SRobert Mustacchi }
973*d14abf15SRobert Mustacchi
974*d14abf15SRobert Mustacchi if (CHIP_IS_E1x(pdev))
975*d14abf15SRobert Mustacchi {
976*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
977*d14abf15SRobert Mustacchi RESET_FLAGS(parse_bd_e1x->global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE);
978*d14abf15SRobert Mustacchi parse_bd_e1x->global_data |= (eth_addr_type << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
979*d14abf15SRobert Mustacchi }
980*d14abf15SRobert Mustacchi else
981*d14abf15SRobert Mustacchi {
982*d14abf15SRobert Mustacchi struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
983*d14abf15SRobert Mustacchi RESET_FLAGS(parse_bd_e2->parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE);
984*d14abf15SRobert Mustacchi parse_bd_e2->parsing_data |= (eth_addr_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
985*d14abf15SRobert Mustacchi }
986*d14abf15SRobert Mustacchi }
987*d14abf15SRobert Mustacchi
988*d14abf15SRobert Mustacchi // Save the number of BDs used. Later we need to add this value back
989*d14abf15SRobert Mustacchi // to tx_chain->bd_left when the packet is sent.
990*d14abf15SRobert Mustacchi packet->u1.tx.bd_used = start_bd_nbd += (u16_t)frags->cnt;
991*d14abf15SRobert Mustacchi
992*d14abf15SRobert Mustacchi packet->u1.tx.next_bd_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
993*d14abf15SRobert Mustacchi tx_chain->prod_bseq += packet->size;
994*d14abf15SRobert Mustacchi
995*d14abf15SRobert Mustacchi /* There is a PBF limitation on minimum packet size (9B)
996*d14abf15SRobert Mustacchi * We assert since we do not expect packet length < 14 */
997*d14abf15SRobert Mustacchi DbgBreakIfFastPath(packet->size < ETHERNET_PACKET_HEADER_SIZE);
998*d14abf15SRobert Mustacchi
999*d14abf15SRobert Mustacchi #if DBG
1000*d14abf15SRobert Mustacchi for(cnt = 0; cnt < start_bd_nbd; cnt++)
1001*d14abf15SRobert Mustacchi {
1002*d14abf15SRobert Mustacchi if (parse_bd_ptr && (cnt == 1))
1003*d14abf15SRobert Mustacchi {
1004*d14abf15SRobert Mustacchi if (CHIP_IS_E1x(pdev))
1005*d14abf15SRobert Mustacchi {
1006*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx,
1007*d14abf15SRobert Mustacchi " parse_bd: global_data 0x%x",
1008*d14abf15SRobert Mustacchi ((struct eth_tx_parse_bd_e1x *)(&start_bd[cnt]))->global_data);
1009*d14abf15SRobert Mustacchi }
1010*d14abf15SRobert Mustacchi else /* E2 */
1011*d14abf15SRobert Mustacchi {
1012*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx,
1013*d14abf15SRobert Mustacchi " parse_bd: parsing_data 0x%08x",
1014*d14abf15SRobert Mustacchi mm_le32_to_cpu(((struct eth_tx_parse_bd_e2 *)(&start_bd[cnt]))->parsing_data));
1015*d14abf15SRobert Mustacchi }
1016*d14abf15SRobert Mustacchi }
1017*d14abf15SRobert Mustacchi else
1018*d14abf15SRobert Mustacchi {
1019*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx,
1020*d14abf15SRobert Mustacchi "-> frag: %d, bd_flags: %d, nbytes: %d, hi: 0x%x, lo: 0x%x",
1021*d14abf15SRobert Mustacchi cnt, start_bd[cnt].bd_flags.as_bitfield, mm_le16_to_cpu(start_bd[cnt].nbytes),
1022*d14abf15SRobert Mustacchi mm_le32_to_cpu(start_bd[cnt].addr_hi), mm_le32_to_cpu(start_bd[cnt].addr_lo));
1023*d14abf15SRobert Mustacchi if (cnt == 0)
1024*d14abf15SRobert Mustacchi {
1025*d14abf15SRobert Mustacchi DbgMessage(pdev, VERBOSEl2tx,
1026*d14abf15SRobert Mustacchi " start bd info: nbds: %d, vlan: 0x%x, hdr_nbds: %d",
1027*d14abf15SRobert Mustacchi start_bd_nbd, mm_le16_to_cpu(start_bd->vlan_or_ethertype),
1028*d14abf15SRobert Mustacchi (start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
1029*d14abf15SRobert Mustacchi }
1030*d14abf15SRobert Mustacchi }
1031*d14abf15SRobert Mustacchi }
1032*d14abf15SRobert Mustacchi #endif
1033*d14abf15SRobert Mustacchi
1034*d14abf15SRobert Mustacchi start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
1035*d14abf15SRobert Mustacchi
1036*d14abf15SRobert Mustacchi s_list_push_tail(&tx_chain->active_descq, &packet->link);
1037*d14abf15SRobert Mustacchi
1038*d14abf15SRobert Mustacchi //in case of a packet consisting of 1 frag only, but with the use of parsing info BD,
1039*d14abf15SRobert Mustacchi //the last_bd will point to the START BD!
1040*d14abf15SRobert Mustacchi //this is since we need to mark both the START & END on the START BD.
1041*d14abf15SRobert Mustacchi //Only the start BD can fill the flags and we always have 2 BDs.
1042*d14abf15SRobert Mustacchi
1043*d14abf15SRobert Mustacchi // Debug message on the parsed_bd
1044*d14abf15SRobert Mustacchi //DbgMessage(pdev, INFORM, "lm_send_packet() parse_bd: total_hlen %d ip_hlen %d lso_mss %d tcp_flags 0x%x\n",
1045*d14abf15SRobert Mustacchi // parse_bd->total_hlen, parse_bd->ip_hlen, parse_bd->lso_mss, parse_bd->tcp_flags);
1046*d14abf15SRobert Mustacchi //DbgMessage(pdev, INFORM, "lm_send_packet() start_bd: bd_flags 0x%x\n",start_bd->bd_flags);
1047*d14abf15SRobert Mustacchi
1048*d14abf15SRobert Mustacchi // Make sure that the BD data is updated before updating the producer
1049*d14abf15SRobert Mustacchi // since FW might read the BD right after the producer is updated.
1050*d14abf15SRobert Mustacchi // This is only applicable for weak-ordered memory model archs such
1051*d14abf15SRobert Mustacchi // as IA-64, The following barrier is also mandatory since FW will
1052*d14abf15SRobert Mustacchi // assumes packets must have BDs
1053*d14abf15SRobert Mustacchi //order is crucial in case of preemption
1054*d14abf15SRobert Mustacchi pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod +
1055*d14abf15SRobert Mustacchi S16_SUB(lm_bd_chain_prod_idx(&tx_chain->bd_chain), old_prod_idx);
1056*d14abf15SRobert Mustacchi pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod + 1;
1057*d14abf15SRobert Mustacchi
1058*d14abf15SRobert Mustacchi //DB
1059*d14abf15SRobert Mustacchi dq_msg.header.data = DOORBELL_HDR_T_DB_TYPE; /* tx doorbell normal doorbell type eth */
1060*d14abf15SRobert Mustacchi dq_msg.zero_fill1 = 0;
1061*d14abf15SRobert Mustacchi dq_msg.prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod;
1062*d14abf15SRobert Mustacchi
1063*d14abf15SRobert Mustacchi // Make sure that the BD data is updated before updating the producer
1064*d14abf15SRobert Mustacchi // since FW might read the BD right after the producer is updated.
1065*d14abf15SRobert Mustacchi // This is only applicable for weak-ordered memory model archs such
1066*d14abf15SRobert Mustacchi // as IA-64, The following barrier is also mandatory since FW will
1067*d14abf15SRobert Mustacchi // assumes packets must have BDs
1068*d14abf15SRobert Mustacchi //order is crucial in case of preemption
1069*d14abf15SRobert Mustacchi mm_write_barrier();
1070*d14abf15SRobert Mustacchi DOORBELL(pdev, chain_idx, *((u32_t *)&dq_msg));
1071*d14abf15SRobert Mustacchi
1072*d14abf15SRobert Mustacchi return LM_STATUS_SUCCESS;
1073*d14abf15SRobert Mustacchi } /* lm_send_packet */
1074*d14abf15SRobert Mustacchi
1075*d14abf15SRobert Mustacchi /*******************************************************************************
1076*d14abf15SRobert Mustacchi * Description:
1077*d14abf15SRobert Mustacchi *
1078*d14abf15SRobert Mustacchi * Return:
1079*d14abf15SRobert Mustacchi ******************************************************************************/
1080*d14abf15SRobert Mustacchi u32_t
lm_get_packets_sent(struct _lm_device_t * pdev,u32_t chain_idx,s_list_t * sent_list)1081*d14abf15SRobert Mustacchi lm_get_packets_sent( struct _lm_device_t* pdev,
1082*d14abf15SRobert Mustacchi u32_t chain_idx,
1083*d14abf15SRobert Mustacchi s_list_t *sent_list)
1084*d14abf15SRobert Mustacchi {
1085*d14abf15SRobert Mustacchi lm_tx_chain_t* tx_chain = &LM_TXQ(pdev, chain_idx);
1086*d14abf15SRobert Mustacchi lm_packet_t* pkt = 0;
1087*d14abf15SRobert Mustacchi u32_t pkt_cnt = 0;
1088*d14abf15SRobert Mustacchi u16_t old_idx = lm_bd_chain_cons_idx(&tx_chain->bd_chain);
1089*d14abf15SRobert Mustacchi
1090*d14abf15SRobert Mustacchi /* Get the new consumer idx. The bd's between new_idx and old_idx
1091*d14abf15SRobert Mustacchi * are bd's that have been consumered by the chip. */
1092*d14abf15SRobert Mustacchi u16_t new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
1093*d14abf15SRobert Mustacchi u16_t pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
1094*d14abf15SRobert Mustacchi
1095*d14abf15SRobert Mustacchi //We work here with packets granularity(pkt_idx) as opposed to Teton which
1096*d14abf15SRobert Mustacchi //work in BDs granularity. the cons_idx is not relevant anymore in Tx chain, but
1097*d14abf15SRobert Mustacchi //we keep it for debugging as the firmware still maintains a BD consumer.
1098*d14abf15SRobert Mustacchi
1099*d14abf15SRobert Mustacchi
1100*d14abf15SRobert Mustacchi
1101*d14abf15SRobert Mustacchi DbgBreakIfFastPath(pkt_num == 0);
1102*d14abf15SRobert Mustacchi
1103*d14abf15SRobert Mustacchi while(pkt_num > 0)
1104*d14abf15SRobert Mustacchi {
1105*d14abf15SRobert Mustacchi pkt = (lm_packet_t *) s_list_peek_head(&tx_chain->active_descq);
1106*d14abf15SRobert Mustacchi
1107*d14abf15SRobert Mustacchi //instead of the assert, lets check the db counter in the hw!
1108*d14abf15SRobert Mustacchi //DbgBreakIfFastPath(pkt == NULL);
1109*d14abf15SRobert Mustacchi if (pkt == NULL)
1110*d14abf15SRobert Mustacchi {
1111*d14abf15SRobert Mustacchi
1112*d14abf15SRobert Mustacchi lm_collect_idle_storms_dorrbell_asserts(PFDEV(pdev), TRUE, TRUE, TRUE);
1113*d14abf15SRobert Mustacchi
1114*d14abf15SRobert Mustacchi DbgBreakIfFastPath(pkt == NULL);
1115*d14abf15SRobert Mustacchi
1116*d14abf15SRobert Mustacchi return pkt_cnt;
1117*d14abf15SRobert Mustacchi }
1118*d14abf15SRobert Mustacchi
1119*d14abf15SRobert Mustacchi // TODO check LSO condition as in teton
1120*d14abf15SRobert Mustacchi pkt = (lm_packet_t *) s_list_pop_head(&tx_chain->active_descq);
1121*d14abf15SRobert Mustacchi
1122*d14abf15SRobert Mustacchi /* Advance the old_idx to the start bd_idx of the next packet. */
1123*d14abf15SRobert Mustacchi old_idx = pkt->u1.tx.next_bd_idx;
1124*d14abf15SRobert Mustacchi
1125*d14abf15SRobert Mustacchi pkt->status = LM_STATUS_SUCCESS;
1126*d14abf15SRobert Mustacchi
1127*d14abf15SRobert Mustacchi lm_bd_chain_bds_consumed(&tx_chain->bd_chain, pkt->u1.tx.bd_used);
1128*d14abf15SRobert Mustacchi
1129*d14abf15SRobert Mustacchi if (pkt->u1.tx.coalesce_buf) {
1130*d14abf15SRobert Mustacchi /* return coalesce buffer to the chain's pool */
1131*d14abf15SRobert Mustacchi lm_put_coalesce_buffer(pdev, tx_chain, pkt->u1.tx.coalesce_buf);
1132*d14abf15SRobert Mustacchi pkt->u1.tx.coalesce_buf = NULL;
1133*d14abf15SRobert Mustacchi }
1134*d14abf15SRobert Mustacchi
1135*d14abf15SRobert Mustacchi /* Get an updated new_idx from the status block. The index may
1136*d14abf15SRobert Mustacchi * end at the last BD of a page. This BD is a pointer to the next
1137*d14abf15SRobert Mustacchi * BD page which we need to skip over. */
1138*d14abf15SRobert Mustacchi //TODO: need to verify that we have fairness among other protocols since we are also using the
1139*d14abf15SRobert Mustacchi // in_dpc_loop_cnt - so don't starve!
1140*d14abf15SRobert Mustacchi new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
1141*d14abf15SRobert Mustacchi tx_chain->pkt_idx++;
1142*d14abf15SRobert Mustacchi pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
1143*d14abf15SRobert Mustacchi pkt_cnt++;
1144*d14abf15SRobert Mustacchi s_list_push_tail(sent_list, &pkt->link);
1145*d14abf15SRobert Mustacchi }
1146*d14abf15SRobert Mustacchi
1147*d14abf15SRobert Mustacchi // TODO: currently bd_chain doesn't maintain the cons_idx...
1148*d14abf15SRobert Mustacchi tx_chain->bd_chain.cons_idx = old_idx;
1149*d14abf15SRobert Mustacchi
1150*d14abf15SRobert Mustacchi DbgMessage(pdev, INFORMl2tx , "lm_get_packets_sent()- func: %d, txidx: %d, txbd con: %d txbd prod: %d \n",
1151*d14abf15SRobert Mustacchi FUNC_ID(pdev), chain_idx , lm_bd_chain_cons_idx(&tx_chain->bd_chain), lm_bd_chain_prod_idx(&tx_chain->bd_chain));
1152*d14abf15SRobert Mustacchi
1153*d14abf15SRobert Mustacchi return pkt_cnt;
1154*d14abf15SRobert Mustacchi } /* lm_get_packets_sent */
1155