1 /*******************************************************************************
2  *
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  *
22  * Copyright 2014 QLogic Corporation
23  * The contents of this file are subject to the terms of the
24  * QLogic End User License (the "License").
25  * You may not use this file except in compliance with the License.
26  *
27  * You can obtain a copy of the License at
28  * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/
29  * QLogic_End_User_Software_License.txt
30  * See the License for the specific language governing permissions
31  * and limitations under the License.
32  *
33  * Module Description:
34  *
35  *
36  * History:
37  *    11/15/01 Hav Khauv        Inception.
38  *    4/4/06  Eliezer           begin modifying
39  ******************************************************************************/
40 
41 #include "lm5710.h"
42 #include "microcode_constants.h"
43 #include "eth_constants.h"
44 #include "bd_chain.h"
45 #include "ecore_common.h"
46 
lm_is_tx_completion(lm_device_t * pdev,u8_t chain_idx)47 u8_t lm_is_tx_completion(lm_device_t *pdev, u8_t chain_idx)
48 {
49     u8_t result             = FALSE;
50     lm_tx_chain_t *tx_chain = &LM_TXQ(pdev, chain_idx);
51 
52     DbgBreakIf(!(pdev && tx_chain));
53 
54     //the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
55     //changed from *tx_chain->hw_con_idx_ptr != tx_chain->cons_idx
56     if ( tx_chain->hw_con_idx_ptr && (mm_le16_to_cpu(*tx_chain->hw_con_idx_ptr) != tx_chain->pkt_idx))
57     {
58         result = TRUE;
59     }
60     DbgMessage(pdev, INFORMi, "lm_is_tx_completion: result is:%s\n", result? "TRUE" : "FALSE");
61 
62     return result;
63 }
64 
lm_handle_lso_split(IN lm_address_t frag_addr_data_offset,IN u16_t data_part_size,IN lm_tx_chain_t * tx_chain,IN struct eth_tx_start_bd * start_bd,IN struct eth_tx_bd * generic_bd)65 static void lm_handle_lso_split(IN  lm_address_t frag_addr_data_offset,
66                                 IN  u16_t data_part_size,
67                                 IN  lm_tx_chain_t *tx_chain,
68                                 IN  struct eth_tx_start_bd *start_bd,
69                                 IN  struct eth_tx_bd *generic_bd
70                                 )
71 {
72     struct eth_tx_bd *prod_bd;
73     u16_t old_nbd = mm_le16_to_cpu(start_bd->nbd);
74     u16_t old_nbytes = mm_le16_to_cpu(generic_bd->nbytes);
75 
76     ASSERT_STATIC(OFFSETOF(struct eth_tx_bd, nbytes) == OFFSETOF(struct eth_tx_start_bd, nbytes)) ;
77     DbgBreakIfFastPath(!(start_bd && generic_bd));
78 
79     //increase nbd on account of the split BD
80     start_bd->nbd = mm_cpu_to_le16(old_nbd + 1);
81 
82     //fix the num of bytes of the BD which has the headers+data to correspond only to the headers part
83     generic_bd->nbytes = mm_cpu_to_le16(old_nbytes - data_part_size);
84     //this is phys addr which points to the start of the data part right after the end of the headers
85     LM_INC64(&frag_addr_data_offset, mm_le16_to_cpu(generic_bd->nbytes));
86 
87     //Advance to the next BD.
88     prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
89 
90     //fill the fields of the new additional BD which holds _only_ data
91     prod_bd->addr_lo              = mm_cpu_to_le32(frag_addr_data_offset.as_u32.low);
92     prod_bd->addr_hi              = mm_cpu_to_le32(frag_addr_data_offset.as_u32.high);
93     prod_bd->nbytes               = mm_cpu_to_le16(data_part_size);
94 
95     tx_chain->lso_split_used++;
96 
97     DbgMessage(NULL, WARNl2tx, "#lm_handle_lso_split: after split: original bd nbytes=0x%x,new bd nbytes=0x%x\n",
98                mm_le16_to_cpu(generic_bd->nbytes), mm_le16_to_cpu(prod_bd->nbytes));
99 }
100 
lm_pre_process_lso_packet(IN lm_device_t * pdev,IN lm_packet_t * packet,IN lm_frag_list_t * frags,OUT u8_t * split_required,IN u16_t total_hlen_bytes)101 static void lm_pre_process_lso_packet(
102     IN  lm_device_t     *pdev,
103     IN  lm_packet_t     *packet,
104     IN  lm_frag_list_t  *frags,
105     OUT u8_t            *split_required,
106     IN  u16_t            total_hlen_bytes
107     )
108 {
109     /* find headers nbds, for that calc eth_hlen and total_hlen_bytes,
110        and take the opportunity to decide if header data separation is required */
111     u32_t cnt;
112     u16_t sum_frag_size = 0;
113     u8_t  hdr_nbds      = 0;
114 
115     *split_required = FALSE;
116 
117     for(cnt = 0; cnt < frags->cnt; cnt++)
118     {
119         hdr_nbds++;
120         sum_frag_size += (u16_t)frags->frag_arr[cnt].size;
121         if (total_hlen_bytes <= sum_frag_size)
122         {
123             if (total_hlen_bytes < sum_frag_size)
124             {
125                 *split_required = TRUE;
126             }
127             break;
128         }
129     }
130     DbgBreakIfFastPath(total_hlen_bytes > sum_frag_size);
131     packet->u1.tx.hdr_nbds = hdr_nbds;
132 }
133 
lm_process_lso_packet(IN lm_packet_t * packet,IN lm_device_t * pdev,IN lm_tx_chain_t * tx_chain,IN lm_frag_list_t * frags,IN void * parse_bd,IN struct eth_tx_start_bd * start_bd,OUT lm_frag_t ** frag,IN u16_t total_hlen_bytes,IN u8_t split_required)134 static void lm_process_lso_packet(IN  lm_packet_t *packet,
135                                   IN  lm_device_t *pdev,
136                                   IN  lm_tx_chain_t *tx_chain,
137                                   IN  lm_frag_list_t *frags,
138                                   IN  void *parse_bd,
139                                   IN  struct eth_tx_start_bd *start_bd,
140                                   OUT lm_frag_t **frag,
141                                   IN  u16_t total_hlen_bytes,
142                                   IN  u8_t split_required)
143 {
144     struct eth_tx_bd *prod_bd = NULL;
145     u32_t cnt                 = 0;
146     u16_t hlen_reminder       = total_hlen_bytes;
147 
148     /* "Sanity check.  Maximum total length for IP and TCP headers
149      * is 120 bytes." was here. The sanity check is removed. Corresponding statistics is added */
150     if ((packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len) > 120) {
151         pdev->debug_info.number_of_long_LSO_headers++;
152     }
153     start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
154 
155     if (CHIP_IS_E1x(pdev))
156     {
157         struct eth_tx_parse_bd_e1x *parse_bd_e1x = (struct eth_tx_parse_bd_e1x *)parse_bd;
158         parse_bd_e1x->lso_mss          = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_mss);
159         parse_bd_e1x->ip_id            = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
160         parse_bd_e1x->tcp_send_seq     = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
161         parse_bd_e1x->tcp_flags        = packet->l2pkt_tx_info->lso_tcp_flags; // no endianity since it is u8_t
162 
163 
164         //in case of LSO it is required according to fw to toggle the ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN flag since the TCP seg len is 0
165         parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
166 
167         if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
168         {
169             parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
170         }
171     }
172     else
173     {
174         struct eth_tx_parse_bd_e2 *parse_bd_e2 = (struct eth_tx_parse_bd_e2 *)parse_bd;
175         parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_LSO_MSS & (packet->l2pkt_tx_info->lso_mss << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT);
176     }
177 
178 
179     //enforce this due to miniport design in case of LSO and CSUM
180     SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM);
181 
182     if (!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
183     {
184         SET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM);
185     }
186 
187 
188     //required only in case of LSO - num of bds the headers occupy all together
189     RESET_FLAGS(start_bd->general_data, ETH_TX_START_BD_HDR_NBDS);
190     start_bd->general_data |= ((packet->u1.tx.hdr_nbds & ETH_TX_START_BD_HDR_NBDS) << ETH_TX_START_BD_HDR_NBDS_SHIFT);
191 
192     //check for split in START BD
193     if (split_required)
194     {
195         if ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) == 1)
196         {
197             lm_handle_lso_split(frags->frag_arr[0].addr,
198                                 mm_le16_to_cpu(start_bd->nbytes) - hlen_reminder,
199                                 tx_chain,
200                                 start_bd,
201                                 (struct eth_tx_bd *)start_bd );
202             split_required = FALSE;
203         }
204         else
205         {
206         u16_t start_bd_nbytes = mm_le16_to_cpu(start_bd->nbytes);
207 
208             DbgBreakIfFastPath(hlen_reminder <= start_bd_nbytes);
209             hlen_reminder -= start_bd_nbytes;
210         }
211     }
212 
213     for(cnt = 1; cnt < frags->cnt; cnt++)
214     {
215         DbgBreakIfFastPath((*frag)->size >= 0x10000 || (*frag)->size == 0);
216 
217         //Advance to the next BD.
218         prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
219 
220         prod_bd->addr_lo              = mm_cpu_to_le32((*frag)->addr.as_u32.low);
221         prod_bd->addr_hi              = mm_cpu_to_le32((*frag)->addr.as_u32.high);
222         prod_bd->nbytes               = mm_cpu_to_le16((u16_t) (*frag)->size);
223 
224         //if there is a split condition and we are on the exact BD, do it! we don't enter here if there was a split already!
225         if (split_required)
226         {
227             if (cnt == ((start_bd->general_data & ETH_TX_START_BD_HDR_NBDS) - 1))
228             {
229                 lm_handle_lso_split((*frag)->addr,
230                                     mm_le16_to_cpu(prod_bd->nbytes) - hlen_reminder,
231                                     tx_chain,
232                                     start_bd,
233                                     prod_bd
234                                     );
235                 split_required = FALSE;
236             }
237             else
238             {
239                 u16_t prod_bd_nbytes = mm_le16_to_cpu(prod_bd->nbytes);
240 
241                 DbgBreakIfFastPath(hlen_reminder <= prod_bd_nbytes);
242                 hlen_reminder -= prod_bd_nbytes;
243             }
244         }
245 
246         packet->size += (*frag)->size;
247         (*frag)++;
248     }
249 
250 
251 
252     //statistics
253     //since this is fast path, we do not use ATOMIC INC.
254     //therefore the statistic might not be completely accurate
255     //possible fix (FUTURE, if required): count the statistic item per RSS/TSS
256     LM_COMMON_DRV_STATS_INC_ETH(pdev, tx_lso_frames);
257 }
258 
259 /**
260  * @Description:
261  *   returns coalesce buffer of size >= buf_size, or NULL if none available
262  * @Assumptions:
263  *   txq lock is taken by the caller
264 */
265 lm_coalesce_buffer_t *
lm_get_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN u32_t buf_size)266 lm_get_coalesce_buffer(
267     IN lm_device_t      *pdev,
268     IN lm_tx_chain_t    *txq,
269     IN u32_t            buf_size)
270 {
271     lm_coalesce_buffer_t *coalesce_buf = NULL;
272     u32_t coalesce_buf_cnt, cnt;
273 
274     if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || !buf_size)) {
275         DbgBreakFastPath();
276         return NULL;
277     }
278 
279     coalesce_buf_cnt = s_list_entry_cnt(&txq->coalesce_buf_list);
280     for(cnt = 0; cnt < coalesce_buf_cnt; cnt++)
281     {
282         coalesce_buf = (lm_coalesce_buffer_t *) s_list_pop_head(
283             &txq->coalesce_buf_list);
284 
285         DbgBreakIfFastPath(coalesce_buf == NULL);
286         if(NULL == coalesce_buf)
287         {
288             //this case were coalesce buffer in the list is equal to null shouldn't happen.
289             DbgMessage(pdev, FATAL, "lm_get_coalesce_buffer:coalesce buffer was null\n");
290             break;
291         }
292         if(coalesce_buf->buf_size >= buf_size)
293         {
294             txq->coalesce_buf_used++;
295             break;
296         }
297 
298         s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
299 
300         coalesce_buf = NULL;
301     }
302 
303     return coalesce_buf;
304 } /* lm_get_coalesce_buffer */
305 
306 /**
307  * @Description:
308  *   returns coalesce_buf into txq list
309  * @Assumptions:
310  *   txq lock is taken by the caller
311 */
312 void
lm_put_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN lm_coalesce_buffer_t * coalesce_buf)313 lm_put_coalesce_buffer(
314     IN lm_device_t          *pdev,
315     IN lm_tx_chain_t        *txq,
316     IN lm_coalesce_buffer_t *coalesce_buf)
317 {
318     if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) || CHK_NULL(coalesce_buf))) {
319         DbgBreakFastPath();
320         return;
321     }
322 
323     s_list_push_tail(&txq->coalesce_buf_list, &coalesce_buf->link);
324 
325     return;
326 } /* lm_put_coalesce_buffer */
327 
328 /**
329  * @Description:
330  *   copy given packet into available coalesce buffer of given txq
331  * @Assumptions:
332  *   txq lock is taken by the caller
333  * @Returns:
334  *   - SUCCESS -
335  *      - The OUT parameter coal_buf will be set to point the allocated
336  *        coalesce buffer
337  *      - The coalesce buffer frag size will be set to the given packet size
338  *   - RESOURCE - no available coalecse buffer for given packet
339  *                (according to packet size)
340  */
341 static lm_status_t
lm_copy_packet_to_coalesce_buffer(IN lm_device_t * pdev,IN lm_tx_chain_t * txq,IN lm_packet_t * lmpkt,IN lm_frag_list_t * frags,OUT lm_coalesce_buffer_t ** coal_buf)342 lm_copy_packet_to_coalesce_buffer(
343     IN  lm_device_t             *pdev,
344     IN  lm_tx_chain_t           *txq,
345     IN  lm_packet_t             *lmpkt,
346     IN  lm_frag_list_t          *frags,
347     OUT lm_coalesce_buffer_t    **coal_buf
348     )
349 {
350     lm_coalesce_buffer_t *coalesce_buf;
351     lm_frag_t*            frag;
352     u32_t                 pkt_size      = 0;
353     u32_t                 copied_bytes;
354     u32_t                 cnt;
355 
356     if (ERR_IF(CHK_NULL(pdev) || CHK_NULL(txq) ||
357                CHK_NULL(lmpkt) || CHK_NULL(frags)))
358     {
359         DbgBreakFastPath();
360         return LM_STATUS_FAILURE;
361     }
362 
363     /* Determine packet size. */
364     frag = &frags->frag_arr[0];
365     for (cnt = 0; cnt < frags->cnt; cnt++, frag++) {
366         pkt_size += frag->size;
367     }
368 
369     /* Find a buffer large enough for copying this packet.  In the case
370      * of an LSO frame, we should have at least one 64k coalesce buffer. */
371     coalesce_buf = lm_get_coalesce_buffer(pdev, txq, pkt_size);
372     if(coalesce_buf == NULL)
373     {
374         DbgMessage(pdev, INFORMl2tx,
375                     "#copy to coalesce buffer FAILED, (lmpkt=0x%p,pkt_size=%d)\n",
376                     lmpkt, pkt_size);
377         LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_coalesce_buf);
378         return LM_STATUS_RESOURCE;
379     }
380 
381     /* copy the packet into the coalesce buffer */
382     copied_bytes = mm_copy_packet_buf(
383         pdev, lmpkt, coalesce_buf->mem_virt, pkt_size);
384     if (ERR_IF(copied_bytes != pkt_size)) {
385         DbgBreakFastPath();
386         lm_put_coalesce_buffer(pdev, txq, coalesce_buf);
387         return LM_STATUS_FAILURE;
388     }
389 
390     /* adjust frag size in coalesce buf */
391     coalesce_buf->frags.frag_arr[0].size = pkt_size;
392 
393     *coal_buf = coalesce_buf;
394     return LM_STATUS_SUCCESS;
395 } /* lm_copy_packet_to_coalesce_buffer */
396 
397 /**
398  * @Description:
399  *   check if packet requires copying to coalesce buf (packet too fregmented)
400  * @Returns:
401  *   TRUE or FALSE
402 */
403 static u8_t
lm_is_packet_coalescing_required(IN lm_device_t * pdev,IN lm_packet_t * lmpkt,IN lm_frag_list_t * frags,IN u8_t num_parsing_bds)404 lm_is_packet_coalescing_required(
405     IN     lm_device_t    *pdev,
406     IN     lm_packet_t    *lmpkt,
407     IN     lm_frag_list_t *frags,
408     IN     u8_t           num_parsing_bds
409     )
410 {
411     u8_t to_copy                    = FALSE;
412     u8_t wnd_size                   = 0;
413     static u32_t const MAX_FETCH_BD = 13;  /* HW max bds per packet capabitily */
414 
415     // each window size consective TCP payload BDs, must hold payload size
416     // which is greater than, or equal to MSS size.
417     wnd_size  = MAX_FETCH_BD - lmpkt->u1.tx.hdr_nbds - num_parsing_bds - 1;
418 
419     if (frags->cnt > wnd_size)
420     {
421         if GET_FLAGS(lmpkt->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
422         {
423             /* Too fragmented LSO packet, check if it needs to be copied: */
424             u8_t num_frags = (u8_t)frags->cnt;
425             u8_t wnd_idx   = 0;
426             u8_t frag_idx  = 0;
427             u32_t wnd_sum  = 0;
428 
429             for (wnd_idx = lmpkt->u1.tx.hdr_nbds; wnd_idx <= (num_frags - wnd_size); wnd_idx++)
430             {
431                 for (frag_idx = 0; frag_idx < wnd_size; frag_idx++)
432                 {
433                     wnd_sum += frags->frag_arr[wnd_idx + frag_idx].size;
434                 }
435 
436                 if (wnd_sum < lmpkt->l2pkt_tx_info->lso_mss)
437                 {
438                     DbgMessage(pdev, WARNl2tx,
439                                 "#copy to coalesce buffer IS REQUIRED for LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
440                                 lmpkt, num_frags);
441                     to_copy = TRUE;
442                     break;
443                 }
444                 wnd_sum = 0;
445             }
446         }
447         else
448         {
449             /* in non LSO, too fragmented packet should always
450                be copied to coalesce buffer */
451             DbgMessage(pdev, INFORMl2tx,
452                         "#copy to coalesce buffer IS REQUIRED for NON LSO packet, (lmpkt=0x%p,num_frags=%d)\n",
453                         lmpkt, frags->cnt);
454             to_copy = TRUE;
455         }
456     }
457 
458     return to_copy;
459 } /* lm_is_packet_coalescing_required */
460 
461 #define LM_VLAN_PRI_BIT_LOCATION            (13)
462 #define LM_GET_PRI_FROM_VLAN(_vlan)         ((_vlan) >> LM_VLAN_PRI_BIT_LOCATION)
463 /**
464  * @description
465  * Check if VLAN exist and if the VLAN exists get priority.
466  * @param pdev
467  * @param packet
468  *
469  * @return u32_t
470  */
471 u8_t
lm_get_pri_from_send_packet_param(lm_device_t * pdev,lm_packet_t * packet)472 lm_get_pri_from_send_packet_param(
473     lm_device_t *pdev,
474     lm_packet_t *packet)
475 {
476     //untagged packets should be treated as priority 0
477     u8_t pri = 0;
478 
479     if GET_FLAGS(packet->l2pkt_tx_info->flags , (LM_TX_FLAG_INSERT_VLAN_TAG | LM_TX_FLAG_VLAN_TAG_EXISTS))
480     {
481         DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
482 
483         pri = LM_GET_PRI_FROM_VLAN(packet->l2pkt_tx_info->vlan_tag);
484     }
485 
486     return pri;
487 }
488 
489 void
fill_bds_for_encapsulated_packet(lm_device_t * pdev,lm_packet_t * packet,struct eth_tunnel_data * tunnel_data,struct eth_tx_parse_2nd_bd * parse_bd_2nd_ptr,u8_t eth_hlen)490 fill_bds_for_encapsulated_packet(
491     lm_device_t                *pdev,
492     lm_packet_t                *packet,
493     struct eth_tunnel_data     *tunnel_data,
494     struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr,
495     u8_t                       eth_hlen)
496 {
497     DbgBreakIf(CHIP_IS_E1x(pdev));
498 
499     ecore_set_fw_mac_addr(&tunnel_data->dst_hi,
500                           &tunnel_data->dst_mid,
501                           &tunnel_data->dst_lo,
502                           packet->l2pkt_tx_info->dst_mac_addr);
503 
504     // Inner IP header offset in WORDs (16-bit) from start of packet
505     tunnel_data->ip_hdr_start_inner_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
506                                          packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset) >> 1;
507 
508     // Checksum of pseudo header with length field = 0
509     tunnel_data->pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
510     // Outer ip header checksum (with ALL ip header fields) for non-lso encaulated packet
511     tunnel_data->fw_ip_hdr_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
512 
513     if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
514     {
515         // Set in case outer IP header is ipV6
516         SET_FLAGS(tunnel_data->flags, ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER);
517     }
518 
519     if (!parse_bd_2nd_ptr)
520     {
521         return;
522     }
523 
524     // Outer IP header offset in WORDs (16-bit) from start of packet
525     parse_bd_2nd_ptr->global_data |= ( ((eth_hlen) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT);
526 
527     if (!(GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET)))
528     {
529         // Outer ipV4 header length in words
530         parse_bd_2nd_ptr->global_data |= ( ((packet->l2pkt_tx_info->lso_ip_hdr_len) >> 1) << ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT);
531     }
532 
533     // An optional addition to ECN that protects against accidental or malicious concealment of marked packets from the TCP sender
534     parse_bd_2nd_ptr->global_data |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT);
535 
536     // Checksum of pseudo header with length field=0
537     parse_bd_2nd_ptr->tcp_send_seq    = mm_cpu_to_le32(packet->l2pkt_tx_info->lso_tcp_send_seq);
538     parse_bd_2nd_ptr->tcp_flags       = packet->l2pkt_tx_info->lso_tcp_flags; // no endianity since it is u8_t
539 
540     /* We set tunnel_lso_inc_ip_id as constant, INT_HEADER, so the "HW IP header" is the inner header.
541        Assuming "FW IP header" is the outer IP header, and "HW IP header" is the inner IP header:
542        fw_ip_csum_wo_len_flags_frag - is the IP checksum without length, flags and fragment offset of the outer ip header
543        hw_ip_id - is the ip id of the inner ip id */
544     parse_bd_2nd_ptr->fw_ip_csum_wo_len_flags_frag = mm_cpu_to_le16(packet->l2pkt_tx_info->fw_ip_csum);
545     parse_bd_2nd_ptr->hw_ip_id                     = mm_cpu_to_le16(packet->l2pkt_tx_info->lso_ipid);
546 
547     parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w = (packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
548                                                 packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
549                                                 packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
550                                                 packet->l2pkt_tx_info->lso_tcp_hdr_len -
551                                                 eth_hlen) >> 1;
552 
553     if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
554     {
555 /* In IpV4, the length (in WORDs) from the FW IpV4 header start to the payload start. In IpV6, the length (in WORDs) from the FW IpV6 header end to the payload start. However, if extension headers are included, their length is counted here as well. */;
556 
557         // if the outer header (fw header) is ipv4 than fw_ip_hdr_to_payload_w will be set to:
558         // the length in words from start of outer IP header to start of payload
559         // = outer ip header + gre header + inner mac header + inner ip header + tcp header length
560         //
561         // If the outer header is ipv6 than fw_ip_hdr_to_payload_w will be set to:
562         // the length in words from end of inner IP header to start of payload + extension headers (if exists)
563         // = outer ip header - fixed ip header + gre header + inner mac header + inner ip header + tcp header length
564         // fixed ipv6 header length is 40 bytes = 20 words
565         parse_bd_2nd_ptr->fw_ip_hdr_to_payload_w -= 20;
566     }
567 }
568 
569 /*******************************************************************************
570  * Description:
571  *
572  * Return:
573  ******************************************************************************/
574 lm_status_t
lm_send_packet(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,lm_frag_list_t * frags)575 lm_send_packet(
576     lm_device_t *pdev,
577     u32_t chain_idx,
578     lm_packet_t *packet,
579     lm_frag_list_t *frags)
580 {
581     lm_tx_chain_t              *tx_chain         = NULL;
582     struct eth_tx_start_bd     *start_bd         = NULL;
583     struct eth_tx_parse_bd_e1x *parse_bd_e1x     = NULL;
584     struct eth_tx_parse_bd_e2  *parse_bd_e2      = NULL;
585     struct eth_tx_parse_2nd_bd *parse_bd_2nd_ptr = NULL;
586     struct eth_tx_bd           *prod_bd          = NULL;
587     lm_frag_t                  *frag             = NULL;
588     u16_t                      old_prod_idx      = 0;
589     u32_t                      cnt               = 0;
590 #if defined(__BIG_ENDIAN)
591     struct doorbell_set_prod  dq_msg   = {0, 0, {0}};
592 #elif defined(__LITTLE_ENDIAN)
593     struct doorbell_set_prod  dq_msg   = {{0}, 0, 0};
594 #endif
595 
596     u8_t eth_hlen                                = ETHERNET_PACKET_HEADER_SIZE;
597     u8_t split_required                          = FALSE;
598     u8_t eth_addr_type                           = UNKNOWN_ADDRESS;
599     u16_t total_hlen_bytes                       = 0;
600     u16_t start_bd_nbd                           = 0;
601     u16_t vlan_tag                               = 0;
602     void* parse_bd_ptr                           = NULL;
603     u8_t  is_encapsulated_offload                = 0;
604     u8_t  num_parsing_bds                        = 1;
605 
606     //DbgBreakIfFastPath(chain_idx >= pdev->params.rss_chain_cnt);
607 
608     DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, "### lm_send_packet\n");
609 
610     tx_chain = &LM_TXQ(pdev, chain_idx);
611     old_prod_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
612 
613     // Compute Ethernet Header Len
614     if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS)
615     {
616         eth_hlen += ETHERNET_VLAN_TAG_SIZE;
617     }
618 
619     if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_SNAP_FRAME)
620     {
621         eth_hlen += ETHERNET_LLC_SNAP_SIZE;
622     }
623 
624     is_encapsulated_offload = (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IS_ENCAP_PACKET) &&
625                                GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM | LM_TX_FLAG_TCP_LSO_FRAME)));
626 
627     if (is_encapsulated_offload)
628     {
629         if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
630         {
631             // only for encapsulated packet with lso offload we need second parsing bd
632             num_parsing_bds = 2;
633         }
634         // encapsulated packet header size includes both outer and inner headers
635         total_hlen_bytes = packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
636                            packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
637                            packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset +
638                            packet->l2pkt_tx_info->lso_tcp_hdr_len;
639     }
640     else
641     {
642         //calculate the total sum of ETH + IP + TCP headers in term of bytes
643         total_hlen_bytes = packet->l2pkt_tx_info->lso_ip_hdr_len + packet->l2pkt_tx_info->lso_tcp_hdr_len + eth_hlen;
644     }
645 
646     if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME))
647     {
648         lm_pre_process_lso_packet(pdev, packet, frags, &split_required, total_hlen_bytes);
649     }
650 
651     /* handle packet coalescing - if required, copy the too fregmented packet
652        into a pre-allocated coalesce buffer */
653     if (lm_is_packet_coalescing_required(pdev, packet, frags, num_parsing_bds))
654     {
655         lm_coalesce_buffer_t *coalesce_buf = NULL;
656         lm_status_t lm_status;
657 
658         if (ERR_IF(packet->u1.tx.coalesce_buf != NULL))
659         {
660             /* pkt coal buf can't already be set */
661             DbgBreakFastPath();
662             return LM_STATUS_FAILURE;
663         }
664 
665         lm_status = lm_copy_packet_to_coalesce_buffer(
666             pdev, tx_chain, packet, frags, &coalesce_buf);
667 
668         if (lm_status == LM_STATUS_SUCCESS)
669         {
670             LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_l2_assembly_buf_use);
671 
672             packet->u1.tx.coalesce_buf = coalesce_buf; /* saved to be freed upon completion */
673 
674             packet->u1.tx.hdr_nbds = 1;
675             split_required = 1;
676 
677             /* from here on, use the coalesce buf frags list
678                instead of the frags list given by the caller */
679             frags = &coalesce_buf->frags;
680         }
681         else
682         {
683             return lm_status; /* no coalesce buf available, can't continue */
684         }
685     }
686 
687     // stringent heuristic - number of parsing bds + a split of hdr & data
688     if ((frags->cnt + num_parsing_bds + 1) > lm_bd_chain_avail_bds(&tx_chain->bd_chain))
689     {
690         LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, tx_no_l2_bd);
691         if (packet->u1.tx.coalesce_buf)
692         {
693             /* TODO: change this to "goto out_err:" */
694             lm_put_coalesce_buffer(pdev, tx_chain, packet->u1.tx.coalesce_buf);
695             packet->u1.tx.coalesce_buf = NULL;
696         }
697         return LM_STATUS_RESOURCE;
698     }
699 
700     packet->size = 0;
701     start_bd = (struct eth_tx_start_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
702     mm_mem_zero(start_bd, sizeof(union eth_tx_bd_types));
703     //initialize the start BD
704     frag = frags->frag_arr;
705     start_bd->addr_lo              = mm_cpu_to_le32(frag->addr.as_u32.low);
706     start_bd->addr_hi              = mm_cpu_to_le32(frag->addr.as_u32.high);
707     start_bd->nbytes               = mm_cpu_to_le16((u16_t) frag->size);
708     start_bd->bd_flags.as_bitfield = (u8_t) ETH_TX_BD_FLAGS_START_BD;
709     start_bd->nbd = 0;
710     // set the number of parsing BDs in packet.
711     // parse_nbds is set to: the number of parsing BDs in packet - 1
712     start_bd->general_data |= ((num_parsing_bds - 1) << ETH_TX_START_BD_PARSE_NBDS_SHIFT);
713     if (is_encapsulated_offload)
714     {
715         // tunnel_exist should be set iff the packet is encapsulated
716         start_bd->general_data |= ETH_TX_START_BD_TUNNEL_EXIST;
717 
718         // for encapsulated packets ETH_TX_BD_FLAGS_IPV6 refers to the inner header
719         if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6))
720         {
721             start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
722         }
723     }
724     else
725     {
726         // set in case ipV6 packet
727         if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_IPV6_PACKET))
728         {
729             start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
730         }
731     }
732 
733     if (GET_FLAGS(packet->l2pkt_tx_info->flags , LM_TX_FLAG_INSERT_VLAN_TAG))
734     {
735         DbgMessage(pdev, INFORMl2, "Outband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
736         start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
737 
738         vlan_tag = packet->l2pkt_tx_info->vlan_tag;
739     }
740     else if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_VLAN_TAG_EXISTS))
741     {
742         DbgMessage(pdev, INFORMl2, "Inband vlan 0X%x\n",packet->l2pkt_tx_info->vlan_tag);
743         start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
744 
745         vlan_tag = packet->l2pkt_tx_info->vlan_tag;
746     }
747     else
748     {
749         if (IS_VFDEV(pdev)) {
750             ((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[1]; //VF is in secure mode
751             ((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[0]; //VF is in secure mode
752             if (vlan_tag == VLAN_TAGGED_FRAME_ETH_TYPE) {
753                 ((u8_t*)&vlan_tag)[0] = packet->l2pkt_tx_info->eth_type[3]; //VF is in secure mode
754                 ((u8_t*)&vlan_tag)[1] = packet->l2pkt_tx_info->eth_type[2]; //VF is in secure mode
755                 DbgMessage(pdev, INFORMl2, "Inband vlan (from packet) 0X%x\n",vlan_tag);
756                 start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_VLAN_MODE & (X_ETH_INBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT));
757             }
758         } else {
759             /* for debug only - to discover driver/fw lack of synchronization */
760             vlan_tag = (u16_t)(pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod);
761         }
762     }
763     start_bd->vlan_or_ethertype = mm_cpu_to_le16(vlan_tag);
764 
765     if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_FORCE_VLAN_MODE))
766     {
767         //force vlan mode according to bds (vlan mode can change accroding to global configuration)
768         SET_FLAGS(start_bd->general_data, ETH_TX_START_BD_FORCE_VLAN_MODE);
769     }
770 
771     packet->size += frag->size;
772     frag++;
773 
774     //SNAP
775     //parse bd is always present for FW simplicity
776     //adjust the parse BD pointer
777     /////////////////start parse BD handling ////////////////////////////////////////////
778     parse_bd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
779     mm_mem_zero(parse_bd_ptr, sizeof(union eth_tx_bd_types));
780 
781     if (CHIP_IS_E1x(pdev))
782     {
783         parse_bd_e1x = parse_bd_ptr;
784         parse_bd_e1x->global_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
785     }
786     else
787     {
788         parse_bd_e2 = parse_bd_ptr;
789         parse_bd_e2->parsing_data = (UNICAST_ADDRESS << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
790     }
791     // first parse BD taken into account
792     start_bd_nbd++;
793 
794     if (num_parsing_bds > 1)
795     {
796         // lso offload for encapsulated packet - two parsing bds are required
797         parse_bd_2nd_ptr = lm_bd_chain_produce_bd(&tx_chain->bd_chain);
798         mm_mem_zero(parse_bd_2nd_ptr, sizeof(union eth_tx_bd_types));
799         //second parse BD taken into account
800         start_bd_nbd++;
801     }
802 
803     if (is_encapsulated_offload)
804     {
805         fill_bds_for_encapsulated_packet(pdev, packet, &parse_bd_e2->data.tunnel_data, parse_bd_2nd_ptr, eth_hlen);
806     }
807     /////////////////end parse BD handling ////////////////////////////////////////////
808 
809     if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
810     {
811         pdev->tx_info.forward_packets++;
812     }
813     if GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_TCP_LSO_FRAME)
814     {
815         start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
816         lm_process_lso_packet(packet, pdev, tx_chain, frags, parse_bd_ptr, start_bd,
817                               &frag, total_hlen_bytes, split_required);
818         start_bd_nbd = mm_cpu_to_le16(start_bd->nbd);
819         if (IS_PFDEV(pdev) && (tx_chain->idx == FWD_CID(pdev)))
820         {
821             pdev->tx_info.lso_forward_packets++;
822         }
823 
824     }
825     else //This is the regular path in case we're not LSO
826     {
827         // In non-LSO packets, if there are more than 1 data bds, the second data bd (the one after
828         // the parsing bd) will be of the above type.total_pkt_bytes will hold the total packet length,
829         // without outer vlan and without vlan in case there is vlan offload.
830         struct eth_tx_bd *total_pkt_bytes_bd        = NULL;
831 
832         //pass on all frags except the first one
833         for(cnt = 1; cnt < frags->cnt; cnt++)
834         {
835             DbgMessage(pdev, VERBOSEl2tx | VERBOSEl4tx, "   frag %d, hi 0x%x, lo 0x%x, size %d\n",
836                 cnt, frag->addr.as_u32.high, frag->addr.as_u32.low, frag->size);
837 
838             DbgBreakIfFastPath(frag->size >= 0x10000 || frag->size == 0);
839             // TODO: assert/ fixup if to many SGE's per MTU
840 
841             //Advance to the next BD.
842             prod_bd = (struct eth_tx_bd *)lm_bd_chain_produce_bd(&tx_chain->bd_chain);
843 
844             prod_bd->addr_lo              = mm_cpu_to_le32(frag->addr.as_u32.low);
845             prod_bd->addr_hi              = mm_cpu_to_le32(frag->addr.as_u32.high);
846             prod_bd->nbytes               = mm_cpu_to_le16((u16_t) frag->size);
847             if (NULL == total_pkt_bytes_bd)
848             {
849                 //second data bd saved for updating total_pkt_bytes.
850                 total_pkt_bytes_bd = prod_bd;
851             }
852             packet->size += frag->size;
853 
854             frag++;
855         }
856 
857         if (NULL != total_pkt_bytes_bd)
858         {
859             //we have a second data bd
860             total_pkt_bytes_bd->total_pkt_bytes = mm_cpu_to_le16((u16_t) packet->size);
861         }
862     }
863 
864     //we might have IP csum, TCP csum, both or none.
865     //It is definitely legit for a packet to be csum offloaded with or without LSO!
866     //If the packet is LSO, we must enter here!!!!
867     if (GET_FLAGS(packet->l2pkt_tx_info->flags, (LM_TX_FLAG_COMPUTE_IP_CKSUM | LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)))
868     {
869         // non-encapsulated packet: set bit if LM_TX_FLAG_COMPUTE_IP_CKSUM is on (LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6 is always equal to zero)
870         // encapsulated packet: set bit if LM_TX_FLAG_COMPUTE_IP_CKSUM is on and inner ip header is ipv4
871         if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_IP_CKSUM) &&
872            (!GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_ENCAP_PACKET_IS_INNER_IPV6)))
873         {
874             start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
875         }
876 
877         if (GET_FLAGS(packet->l2pkt_tx_info->flags, LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM))
878         {
879             start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
880             if(packet->l2pkt_tx_info->cs_any_offset)
881             {
882                 start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
883             }
884         }
885 
886         if (CHIP_IS_E1x(pdev)) {
887             struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
888 
889             if (CHK_NULL(parse_bd_ptr)) {
890                 DbgBreakIfFastPath( !parse_bd_ptr ) ;
891                 return LM_STATUS_FAILURE ;
892             }
893 
894             parse_bd_e1x->ip_hlen_w    = packet->l2pkt_tx_info->lso_ip_hdr_len >> 1;
895             parse_bd_e1x->global_data |= (( (eth_hlen) >> 1) << ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT);
896             parse_bd_e1x->total_hlen_w = mm_cpu_to_le16((packet->l2pkt_tx_info->lso_ip_hdr_len >> 1) + ( (eth_hlen) >> 1));
897 
898             if(packet->l2pkt_tx_info->flags & LM_TX_FLAG_TCP_LSO_SNAP_FRAME) {
899                 parse_bd_e1x->global_data |= ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN;
900             }
901 
902             if (packet->l2pkt_tx_info->flags & LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM)
903             {
904                 parse_bd_e1x->tcp_pseudo_csum = mm_cpu_to_le16(packet->l2pkt_tx_info->tcp_pseudo_csum);
905                 parse_bd_e1x->global_data     |= (packet->l2pkt_tx_info->tcp_nonce_sum_bit << ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT);
906                 parse_bd_e1x->total_hlen_w     = mm_cpu_to_le16((total_hlen_bytes) >> 1);
907             }
908 
909         } else {
910             struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
911             u32_t val;
912 
913             // TCP header Offset in WORDs from start of packet
914             if (is_encapsulated_offload)
915             {
916                 val = (( packet->l2pkt_tx_info->encap_packet_inner_frame_offset +
917                          packet->l2pkt_tx_info->encap_packet_inner_ip_relative_offset +
918                          packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset ) >> 1 );
919 
920                 /* set if the inner ip header is ipv6 with extension headers */
921                 if (packet->l2pkt_tx_info->encap_packet_inner_tcp_relative_offset > 40) {
922                     parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
923                 }
924             }
925             else
926             {
927                 val = ((packet->l2pkt_tx_info->lso_ip_hdr_len + eth_hlen) >> 1);
928 
929                 if (packet->l2pkt_tx_info->lso_ip_hdr_len > 40) {
930                     parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
931                 }
932             }
933 
934             parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W & (val << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT);
935 
936             val = (packet->l2pkt_tx_info->lso_tcp_hdr_len >> 2);
937             parse_bd_e2->parsing_data |= ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW & (val << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT);
938 
939             parse_bd_e2->parsing_data = mm_cpu_to_le32(parse_bd_e2->parsing_data);
940         }
941     }
942 
943     if  ((!is_encapsulated_offload) &&
944         ((!CHIP_IS_E1x(pdev) || IS_VFDEV(pdev))))
945     {
946         struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
947 
948         ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.dst_hi,
949                               &parse_bd_e2->data.mac_addr.dst_mid,
950                               &parse_bd_e2->data.mac_addr.dst_lo,
951                               packet->l2pkt_tx_info->dst_mac_addr);
952         ecore_set_fw_mac_addr(&parse_bd_e2->data.mac_addr.src_hi,
953                               &parse_bd_e2->data.mac_addr.src_mid,
954                               &parse_bd_e2->data.mac_addr.src_lo,
955                               packet->l2pkt_tx_info->src_mac_addr);
956         if (pdev->params.mac_spoof_test) {
957             parse_bd_e2->data.mac_addr.src_lo++;
958         }
959 
960     }
961 
962     /* set dst addr type, if different from unicast */
963     if (IS_ETH_MULTICAST(packet->l2pkt_tx_info->dst_mac_addr))
964     {
965         if (IS_ETH_BROADCAST(packet->l2pkt_tx_info->dst_mac_addr))
966         {
967             eth_addr_type = BROADCAST_ADDRESS;
968         }
969         else
970         {
971             eth_addr_type = MULTICAST_ADDRESS;
972         }
973 
974         if (CHIP_IS_E1x(pdev))
975         {
976             struct eth_tx_parse_bd_e1x *parse_bd_e1x = parse_bd_ptr;
977             RESET_FLAGS(parse_bd_e1x->global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE);
978             parse_bd_e1x->global_data |= (eth_addr_type << ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
979         }
980         else
981         {
982             struct eth_tx_parse_bd_e2 *parse_bd_e2 = parse_bd_ptr;
983             RESET_FLAGS(parse_bd_e2->parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE);
984             parse_bd_e2->parsing_data |= (eth_addr_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
985         }
986     }
987 
988     // Save the number of BDs used.  Later we need to add this value back
989     // to tx_chain->bd_left when the packet is sent.
990     packet->u1.tx.bd_used = start_bd_nbd += (u16_t)frags->cnt;
991 
992     packet->u1.tx.next_bd_idx = lm_bd_chain_prod_idx(&tx_chain->bd_chain);
993     tx_chain->prod_bseq += packet->size;
994 
995     /* There is a PBF limitation on minimum packet size (9B)
996      * We assert since we do not expect packet length < 14 */
997     DbgBreakIfFastPath(packet->size < ETHERNET_PACKET_HEADER_SIZE);
998 
999 #if DBG
1000     for(cnt = 0; cnt < start_bd_nbd; cnt++)
1001     {
1002         if (parse_bd_ptr && (cnt == 1))
1003         {
1004             if (CHIP_IS_E1x(pdev))
1005             {
1006                 DbgMessage(pdev, VERBOSEl2tx,
1007                             "   parse_bd: global_data 0x%x",
1008                             ((struct eth_tx_parse_bd_e1x *)(&start_bd[cnt]))->global_data);
1009             }
1010             else /* E2 */
1011             {
1012                 DbgMessage(pdev, VERBOSEl2tx,
1013                             "   parse_bd: parsing_data 0x%08x",
1014                             mm_le32_to_cpu(((struct eth_tx_parse_bd_e2 *)(&start_bd[cnt]))->parsing_data));
1015             }
1016         }
1017         else
1018     {
1019         DbgMessage(pdev, VERBOSEl2tx,
1020                         "-> frag: %d, bd_flags: %d, nbytes: %d, hi: 0x%x, lo: 0x%x",
1021                         cnt, start_bd[cnt].bd_flags.as_bitfield, mm_le16_to_cpu(start_bd[cnt].nbytes),
1022                         mm_le32_to_cpu(start_bd[cnt].addr_hi), mm_le32_to_cpu(start_bd[cnt].addr_lo));
1023         if (cnt == 0)
1024         {
1025             DbgMessage(pdev, VERBOSEl2tx,
1026                             "      start bd info: nbds: %d, vlan: 0x%x, hdr_nbds: %d",
1027                             start_bd_nbd, mm_le16_to_cpu(start_bd->vlan_or_ethertype),
1028                             (start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
1029             }
1030         }
1031     }
1032 #endif
1033 
1034     start_bd->nbd = mm_cpu_to_le16(start_bd_nbd);
1035 
1036     s_list_push_tail(&tx_chain->active_descq, &packet->link);
1037 
1038     //in case of a packet consisting of 1 frag only, but with the use of parsing info BD,
1039     //the last_bd will point to the START BD!
1040     //this is since we need to mark both the START & END on the START BD.
1041     //Only the start BD can fill the flags and we always have 2 BDs.
1042 
1043     // Debug message on the parsed_bd
1044     //DbgMessage(pdev, INFORM, "lm_send_packet() parse_bd: total_hlen %d ip_hlen %d lso_mss %d tcp_flags 0x%x\n",
1045     //        parse_bd->total_hlen, parse_bd->ip_hlen, parse_bd->lso_mss, parse_bd->tcp_flags);
1046     //DbgMessage(pdev, INFORM, "lm_send_packet() start_bd: bd_flags 0x%x\n",start_bd->bd_flags);
1047 
1048     // Make sure that the BD data is updated before updating the producer
1049     // since FW might read the BD right after the producer is updated.
1050     // This is only applicable for weak-ordered memory model archs such
1051     // as IA-64, The following barrier is also mandatory since FW will
1052     // assumes packets must have BDs
1053     //order is crucial in case of preemption
1054     pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod +
1055                                 S16_SUB(lm_bd_chain_prod_idx(&tx_chain->bd_chain), old_prod_idx);
1056     pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod = pdev->tx_info.chain[chain_idx].eth_tx_prods.packets_prod + 1;
1057 
1058     //DB
1059     dq_msg.header.data  = DOORBELL_HDR_T_DB_TYPE; /* tx doorbell normal doorbell type eth */
1060     dq_msg.zero_fill1   = 0;
1061     dq_msg.prod         = pdev->tx_info.chain[chain_idx].eth_tx_prods.bds_prod;
1062 
1063     // Make sure that the BD data is updated before updating the producer
1064     // since FW might read the BD right after the producer is updated.
1065     // This is only applicable for weak-ordered memory model archs such
1066     // as IA-64, The following barrier is also mandatory since FW will
1067     // assumes packets must have BDs
1068     //order is crucial in case of preemption
1069     mm_write_barrier();
1070     DOORBELL(pdev, chain_idx, *((u32_t *)&dq_msg));
1071 
1072     return LM_STATUS_SUCCESS;
1073 } /* lm_send_packet */
1074 
1075 /*******************************************************************************
1076  * Description:
1077  *
1078  * Return:
1079  ******************************************************************************/
1080 u32_t
lm_get_packets_sent(struct _lm_device_t * pdev,u32_t chain_idx,s_list_t * sent_list)1081 lm_get_packets_sent( struct _lm_device_t* pdev,
1082     u32_t chain_idx,
1083     s_list_t *sent_list)
1084 {
1085     lm_tx_chain_t* tx_chain = &LM_TXQ(pdev, chain_idx);
1086     lm_packet_t*   pkt      = 0;
1087     u32_t          pkt_cnt  = 0;
1088     u16_t          old_idx  = lm_bd_chain_cons_idx(&tx_chain->bd_chain);
1089 
1090     /* Get the new consumer idx.  The bd's between new_idx and old_idx
1091      * are bd's that have been consumered by the chip. */
1092     u16_t new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
1093     u16_t pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
1094 
1095     //We work here with packets granularity(pkt_idx) as opposed to Teton which
1096     //work in BDs granularity. the cons_idx is not relevant anymore in Tx chain, but
1097     //we keep it for debugging as the firmware still maintains a BD consumer.
1098 
1099 
1100 
1101     DbgBreakIfFastPath(pkt_num == 0);
1102 
1103     while(pkt_num > 0)
1104     {
1105         pkt = (lm_packet_t *) s_list_peek_head(&tx_chain->active_descq);
1106 
1107         //instead of the assert, lets check the db counter in the hw!
1108         //DbgBreakIfFastPath(pkt == NULL);
1109         if (pkt == NULL)
1110         {
1111 
1112             lm_collect_idle_storms_dorrbell_asserts(PFDEV(pdev), TRUE, TRUE, TRUE);
1113 
1114             DbgBreakIfFastPath(pkt == NULL);
1115 
1116             return pkt_cnt;
1117         }
1118 
1119         // TODO check LSO condition as in teton
1120         pkt = (lm_packet_t *) s_list_pop_head(&tx_chain->active_descq);
1121 
1122         /* Advance the old_idx to the start bd_idx of the next packet. */
1123         old_idx = pkt->u1.tx.next_bd_idx;
1124 
1125         pkt->status = LM_STATUS_SUCCESS;
1126 
1127         lm_bd_chain_bds_consumed(&tx_chain->bd_chain, pkt->u1.tx.bd_used);
1128 
1129         if (pkt->u1.tx.coalesce_buf) {
1130             /* return coalesce buffer to the chain's pool */
1131             lm_put_coalesce_buffer(pdev, tx_chain, pkt->u1.tx.coalesce_buf);
1132             pkt->u1.tx.coalesce_buf = NULL;
1133         }
1134 
1135         /* Get an updated new_idx from the status block.  The index may
1136          * end at the last BD of a page.  This BD is a pointer to the next
1137          * BD page which we need to skip over. */
1138         //TODO: need to verify that we have fairness among other protocols since we are also using the
1139         //      in_dpc_loop_cnt - so don't starve!
1140         new_idx = mm_le16_to_cpu(*(tx_chain->hw_con_idx_ptr));
1141         tx_chain->pkt_idx++;
1142         pkt_num = S16_SUB(new_idx,tx_chain->pkt_idx);
1143         pkt_cnt++;
1144         s_list_push_tail(sent_list, &pkt->link);
1145     }
1146 
1147     // TODO: currently bd_chain doesn't maintain the cons_idx...
1148     tx_chain->bd_chain.cons_idx = old_idx;
1149 
1150     DbgMessage(pdev, INFORMl2tx , "lm_get_packets_sent()- func: %d, txidx: %d, txbd con: %d txbd prod: %d \n",
1151         FUNC_ID(pdev), chain_idx , lm_bd_chain_cons_idx(&tx_chain->bd_chain), lm_bd_chain_prod_idx(&tx_chain->bd_chain));
1152 
1153     return pkt_cnt;
1154 } /* lm_get_packets_sent */
1155