1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 #include "lm5706.h"
16 #include <sys/crc32.h>
17 
18 /*
19  * When using "crc32" or "crc16" these initial CRC values must be given to
20  * the respective function the first time it is called. The function can
21  * then be called with the return value from the last call of the function
22  * to generate a running CRC over multiple data blocks.
23  * When the last data block has been processed using the "crc32" algorithm
24  * the CRC value should be inverted to produce the final CRC value:
25  * e.g. CRC = ~CRC
26  */
27 
28 #define startCRC32  (0xFFFFFFFF)    /* CRC initialised to all 1s */
29 
30 /*
31  * For the CRC-32 residual to be calculated correctly requires that the CRC
32  * value is in memory little-endian due to the byte read, bit-ordering
33  * nature of the algorithm.
34  */
35 #define CRC32residual   (0xDEBB20E3)    /* good CRC-32 residual */
36 
37 
38 
39 /*******************************************************************************
40  * Description:
41  *
42  * Return:
43  ******************************************************************************/
44 static void
post_bd_buffer(lm_rx_chain_t * rxq,u64_t phy_addr,u32_t bd_len)45 post_bd_buffer(
46     lm_rx_chain_t *rxq,
47     u64_t  phy_addr,
48     u32_t bd_len)
49 {
50     rx_bd_t *prod_bd;
51     rx_bd_t *cur_bd;
52     u16_t prod_idx;
53 
54 
55     prod_bd = rxq->prod_bd;
56     prod_idx = rxq->prod_idx;
57 
58     cur_bd = prod_bd;
59 
60     prod_bd++;
61     prod_idx++;
62 
63     /* Check for the last bd on this BD page. */
64     if((prod_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
65     {
66         prod_idx++;
67         prod_bd = *((rx_bd_t **) ((tx_bd_next_t *)
68             prod_bd)->tx_bd_next_reserved);
69     }
70 
71     cur_bd->rx_bd_haddr_lo = ((lm_u64_t *)&phy_addr)->as_u32.low;
72     cur_bd->rx_bd_haddr_hi = ((lm_u64_t *)&phy_addr)->as_u32.high;
73     cur_bd->rx_bd_len = bd_len;
74     cur_bd->rx_bd_flags = (RX_BD_FLAGS_END | RX_BD_FLAGS_START);
75 
76     rxq->bd_left--;
77     rxq->prod_idx = prod_idx;
78     rxq->prod_bd = prod_bd;
79 
80 } /* post_bd_buffer */
81 
82 #ifndef LM_NON_LEGACY_MODE_SUPPORT
83 u32_t
lm_post_buffers(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet)84 lm_post_buffers(
85     lm_device_t *pdev,
86     u32_t chain_idx,
87     lm_packet_t *packet)    /* optional. */
88 {
89     lm_rx_chain_t *rxq;
90     u32_t pkt_queued;
91     rx_bd_t *cur_bd;
92     u16_t cur_idx;
93 
94     rxq = &pdev->rx_info.chain[chain_idx];
95 
96     pkt_queued = 0;
97 
98     /* Make sure we have a bd left for posting a receive buffer. */
99     if(packet)
100     {
101         DbgBreakIf(SIG(packet) != L2PACKET_RX_SIG);
102 
103         if(rxq->bd_left == 0)
104         {
105             s_list_push_tail(&rxq->free_descq, &packet->link);
106             packet = NULL;
107         }
108     }
109     else if(rxq->bd_left)
110     {
111         packet = (lm_packet_t *) s_list_pop_head(&rxq->free_descq);
112     }
113 
114     while(packet)
115     {
116         cur_bd = rxq->prod_bd;
117         cur_idx = rxq->prod_idx;
118         #if DBG
119         ((u32_t *) packet->u1.rx.mem_virt)[0] = 0;
120         ((u32_t *) packet->u1.rx.mem_virt)[1] = 0;
121         ((u32_t *) packet->u1.rx.mem_virt)[2] = 0;
122         ((u32_t *) packet->u1.rx.mem_virt)[3] = 0;
123 
124         packet->u1.rx.dbg_bd = cur_bd;
125 
126         DbgBreakIf(SIG(packet) != L2PACKET_RX_SIG);
127         #endif
128         post_bd_buffer(
129                 rxq,
130                 packet->u1.rx.mem_phy.as_u64,
131                 packet->u1.rx.buf_size);
132         rxq->prod_bseq += packet->u1.rx.buf_size;
133         packet->u1.rx.next_bd_idx = rxq->prod_idx;
134 
135         /* Tag this bd for debugging.  The last nibble is the chain cid. */
136         if(pdev->params.test_mode & TEST_MODE_RX_BD_TAGGING)
137         {
138             cur_bd->rx_bd_flags |= (u16_t)cur_idx << 4;     // put bd idx at the 12 msb of flags
139 
140             cur_bd->unused_0 = (u16_t) (rxq->cid_addr);
141         }
142         else
143         {
144             cur_bd->unused_0 = 0;
145         }
146         // Move on to next packet
147         s_list_push_tail(&rxq->active_descq, &packet->link);
148         pkt_queued++;
149 
150         if(rxq->bd_left == 0)
151         {
152             break;
153         }
154 
155         packet = (lm_packet_t *) s_list_pop_head(&rxq->free_descq);
156    }
157 
158 
159     if(pkt_queued)
160     {
161         MBQ_WR16(
162             pdev,
163             GET_CID(rxq->cid_addr),
164             OFFSETOF(l2_bd_chain_context_t, l2ctx_host_bdidx),
165             rxq->prod_idx);
166 
167         MBQ_WR32(
168             pdev,
169             GET_CID(rxq->cid_addr),
170             OFFSETOF(l2_bd_chain_context_t, l2ctx_host_bseq),
171             rxq->prod_bseq);
172     }
173 
174     return pkt_queued;
175 } /* lm_post_buffers */
176 
177 
178 
179 /*******************************************************************************
180  * Description:
181  *
182  * Return:
183  ******************************************************************************/
184 STATIC u32_t
get_packets_rcvd(struct _lm_device_t * pdev,lm_rx_chain_t * rxq,u16_t hw_con_idx,s_list_t * rcvd_list)185 get_packets_rcvd(
186     struct _lm_device_t *pdev,
187     lm_rx_chain_t *rxq,
188     u16_t hw_con_idx,
189     s_list_t *rcvd_list)
190 {
191     l2_fhdr_t *rx_hdr;
192     lm_packet_t *pkt;
193     u32_t byte_cnt;
194     u32_t pkt_cnt;
195 
196     pkt_cnt = 0;
197     byte_cnt = 0;
198 
199     /* The consumer index may stop at the end of a page boundary.
200      * In this case, we need to advance the next to the next one. */
201     if((hw_con_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
202     {
203         hw_con_idx++;
204     }
205 
206     while(rxq->con_idx != hw_con_idx)
207     {
208         DbgBreakIf(S16_SUB(hw_con_idx, rxq->con_idx) <= 0);
209 
210         pkt = (lm_packet_t *) s_list_pop_head(&rxq->active_descq);
211 
212         DbgBreakIf(pkt == NULL);
213         DbgBreakIf(SIG(pkt) != L2PACKET_RX_SIG);
214 
215         mm_flush_cache(
216             pdev,
217             pkt->u1.rx.mem_virt,
218             pkt->u1.rx.mem_phy,
219             pkt->u1.rx.buf_size,
220             FLUSH_CACHE_AFTER_DMA_WRITE);
221 
222         rxq->bd_left++;
223 
224         /* Advance the rxq->con_idx to the start bd_idx of the next packet. */
225         rxq->con_idx = pkt->u1.rx.next_bd_idx;
226 
227         rx_hdr = (l2_fhdr_t *) pkt->u1.rx.mem_virt;
228         pkt->status = LM_STATUS_SUCCESS;
229         pkt->size = rx_hdr->l2_fhdr_pkt_len - 4 /* CRC32 */;
230         pkt->u1.rx.flags = 0;
231 
232         DbgBreakIf(
233             (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
234             pdev->params.keep_vlan_tag &&
235             (pkt->size < MIN_ETHERNET_PACKET_SIZE ||
236             pkt->size > pdev->params.mtu+4));
237         DbgBreakIf(
238             (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
239             pdev->params.keep_vlan_tag == 0 &&
240             (pkt->size < MIN_ETHERNET_PACKET_SIZE-4 ||
241             pkt->size > pdev->params.mtu));
242         DbgBreakIf(
243             (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) == 0 &&
244             (pkt->size < MIN_ETHERNET_PACKET_SIZE ||
245             pkt->size > pdev->params.mtu));
246 
247         if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_RSS_HASH)
248         {
249             pkt->u1.rx.flags |= LM_RX_FLAG_VALID_HASH_VALUE;
250             pkt->u1.rx.hash_value = rx_hdr->l2_fhdr_hash;
251         }
252 
253         if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG)
254         {
255             pkt->u1.rx.flags |= LM_RX_FLAG_VALID_VLAN_TAG;
256             pkt->u1.rx.vlan_tag = rx_hdr->l2_fhdr_vlan_tag;
257         }
258 
259         if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_IP_DATAGRAM)
260         {
261             if(rx_hdr->l2_fhdr_errors & 0x40)
262             {
263                 pkt->u1.rx.flags |= LM_RX_FLAG_IS_IPV6_DATAGRAM;
264             }
265             else
266             {
267                 pkt->u1.rx.flags |= LM_RX_FLAG_IS_IPV4_DATAGRAM;
268             }
269 
270             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_IP_BAD_XSUM)
271             {
272                 pkt->u1.rx.ip_cksum = rx_hdr->l2_fhdr_ip_xsum;
273             }
274             else
275             {
276                 pkt->u1.rx.ip_cksum = 0xffff;
277             }
278         }
279 
280         if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_TCP_SEGMENT)
281         {
282             pkt->u1.rx.flags |= LM_RX_FLAG_IS_TCP_SEGMENT;
283 
284             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_TCP_BAD_XSUM)
285             {
286                 pkt->u1.rx.tcp_or_udp_cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
287             }
288             else
289             {
290                 pkt->u1.rx.tcp_or_udp_cksum = 0xffff;
291             }
292         }
293         else if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_UDP_DATAGRAM)
294         {
295             pkt->u1.rx.flags |= LM_RX_FLAG_IS_UDP_DATAGRAM;
296 
297             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_UDP_BAD_XSUM)
298             {
299                 pkt->u1.rx.tcp_or_udp_cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
300             }
301             else
302             {
303                 pkt->u1.rx.tcp_or_udp_cksum = 0xffff;
304             }
305         }
306 
307         if((rx_hdr->l2_fhdr_errors & (
308             L2_FHDR_ERRORS_BAD_CRC |
309             L2_FHDR_ERRORS_PHY_DECODE |
310             L2_FHDR_ERRORS_ALIGNMENT |
311             L2_FHDR_ERRORS_TOO_SHORT |
312             L2_FHDR_ERRORS_GIANT_FRAME)) == 0)
313         {
314             if(pdev->params.test_mode & TEST_MODE_VERIFY_RX_CRC)
315             {
316 		    uint32_t crc;
317                 // Offset for CRC depends if there is lookahead buffer
318                 // since L2 frame header could be in lookahead buffer
319 		    CRC32(crc, (u8_t *)(pkt->u1.rx.mem_virt + L2RX_FRAME_HDR_LEN),
320                     rx_hdr->l2_fhdr_pkt_len, startCRC32, crc32_table);
321 		    if (crc != CRC32residual)
322                 {
323                     TRIGGER(pdev, TEST_MODE_VERIFY_RX_CRC);
324 
325                     DbgBreakMsg("Bad CRC32 in rx packet.\n");
326 
327                     pkt->status = LM_STATUS_FAILURE;
328                 }
329             }
330         }
331         else
332         {
333             if(!(pdev->rx_info.mask[rxq->idx] & LM_RX_MASK_ACCEPT_ERROR_PACKET))
334             {
335                 pkt->status = LM_STATUS_FAILURE;
336             }
337 
338             pdev->rx_info.stats.err++;
339 
340             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_BAD_CRC)
341             {
342                 pdev->rx_info.stats.crc++;
343             }
344 
345             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_PHY_DECODE)
346             {
347                 pdev->rx_info.stats.phy_err++;
348             }
349 
350             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_ALIGNMENT)
351             {
352                 pdev->rx_info.stats.alignment++;
353             }
354 
355             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_TOO_SHORT)
356             {
357                 pdev->rx_info.stats.short_packet++;
358             }
359 
360             if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_GIANT_FRAME)
361             {
362                 pdev->rx_info.stats.giant_packet++;
363             }
364 
365             DbgBreakIf(
366                 rx_hdr->l2_fhdr_errors & ~(L2_FHDR_ERRORS_BAD_CRC |
367                 L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
368                 L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME));
369         }
370 
371         pkt_cnt++;
372         byte_cnt += pkt->size;
373 
374         s_list_push_tail(rcvd_list, &pkt->link);
375     }
376 
377     return pkt_cnt;
378 } /* get_packets_rcvd */
379 
380 
381 
382 /*******************************************************************************
383  * Description:
384  *
385  * Return:
386  ******************************************************************************/
387 u32_t
lm_get_packets_rcvd(struct _lm_device_t * pdev,u32_t qidx,u32_t con_idx,s_list_t * rcvd_list)388 lm_get_packets_rcvd(
389     struct _lm_device_t *pdev,
390     u32_t qidx,
391     u32_t con_idx,
392     s_list_t *rcvd_list)
393 {
394     lm_rx_chain_t *rxq;
395     u16_t hw_con_idx;
396     u32_t pkts_added;
397     u32_t pkt_cnt;
398 
399     rxq = &pdev->rx_info.chain[qidx];
400 
401     if(con_idx)
402     {
403         hw_con_idx = con_idx & 0xffff;
404 
405         pkt_cnt = get_packets_rcvd(pdev, rxq, hw_con_idx, rcvd_list);
406     }
407     else
408     {
409         pkt_cnt = 0;
410 
411         for(; ;)
412         {
413             hw_con_idx = *rxq->hw_con_idx_ptr;
414 
415             pkts_added = get_packets_rcvd(pdev, rxq, hw_con_idx, rcvd_list);
416             if(pkts_added == 0)
417             {
418                 break;
419             }
420 
421             pkt_cnt += pkts_added;
422         }
423     }
424 
425     return pkt_cnt;
426 } /* lm_get_packets_rcvd */
427 
428 
429 
430 /*******************************************************************************
431  * Description:
432  *
433  * Return:
434  ******************************************************************************/
435 void
lm_service_rx_int(lm_device_t * pdev,u32_t chain_idx)436 lm_service_rx_int(
437     lm_device_t *pdev,
438     u32_t chain_idx)
439 {
440     lm_packet_t *pkt_arr[MAX_PACKETS_PER_INDICATION];
441     lm_packet_t **pkt_arr_ptr;
442     s_list_t rcvd_list;
443     lm_packet_t *pkt;
444     u32_t pkt_cnt;
445 
446     s_list_init(&rcvd_list, NULL, NULL, 0);
447 
448     (void) lm_get_packets_rcvd(pdev, chain_idx, 0, &rcvd_list);
449 
450     while(!s_list_is_empty(&rcvd_list))
451     {
452         pkt_arr_ptr = pkt_arr;
453 
454         for(pkt_cnt = 0; pkt_cnt < MAX_PACKETS_PER_INDICATION; pkt_cnt++)
455         {
456             pkt = (lm_packet_t *) s_list_pop_head(&rcvd_list);
457             if(pkt == NULL)
458             {
459                 break;
460             }
461 
462             *pkt_arr_ptr = pkt;
463             pkt_arr_ptr++;
464         }
465 
466         mm_indicate_rx(pdev, chain_idx, pkt_arr, pkt_cnt);
467     }
468 } /* lm_service_rx_int */
469 
470 
471 
472 /*******************************************************************************
473  * Description:
474  *
475  * Return:
476  ******************************************************************************/
477 void
lm_recv_abort(struct _lm_device_t * pdev,u32_t idx)478 lm_recv_abort(
479     struct _lm_device_t *pdev,
480     u32_t idx)
481 {
482     lm_rx_chain_t *rxq;
483     lm_packet_t *pkt;
484 
485     DbgBreakIf(idx >= pdev->rx_info.num_rxq);
486 
487     rxq = &pdev->rx_info.chain[idx];
488 
489     for(; ;)
490     {
491         pkt = (lm_packet_t *) s_list_pop_head(&rxq->active_descq);
492         if(pkt == NULL)
493         {
494             break;
495         }
496 
497         pkt->status = LM_STATUS_ABORTED;
498         rxq->bd_left++;
499         pdev->rx_info.stats.aborted++;
500 
501         s_list_push_tail(&rxq->free_descq, &pkt->link);
502     }
503 } /* lm_recv_abort */
504 #else /* LM_NON_LEGACY_MODE_SUPPORT */
505 /*******************************************************************************
506  * Description:
507  *
508  * Return:
509  ******************************************************************************/
510 u32_t
lm_post_buffers(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,lm_frag_list_t * frags)511 lm_post_buffers(
512     lm_device_t *pdev,
513     u32_t chain_idx,
514     lm_packet_t *packet,
515     lm_frag_list_t *frags)
516 {
517     lm_rx_chain_t *rxq;
518     u32_t pkt_queued;
519     rx_bd_t *cur_bd;
520     u16_t cur_idx;
521     lm_pkt_rx_info_t *pkt_info;
522     lm_address_t mem_phy;
523 
524     rxq = &pdev->rx_info.chain[chain_idx];
525 
526     pkt_queued = 0;
527 
528     /* Make sure we have a bd left for posting a receive buffer. */
529     if(packet)
530     {
531         if(rxq->vmq_lookahead_size && rxq->bd_left < 2)
532         {
533             return pkt_queued;
534         }
535         else if(rxq->bd_left == 0)
536         {
537             return pkt_queued;
538         }
539 
540         pkt_info = packet->u1.rx.rx_pkt_info;
541 
542         cur_bd = rxq->prod_bd;
543         cur_idx = rxq->prod_idx;
544         #if DBG
545         ((u32_t *) pkt_info->mem_virt)[0] = 0;
546         ((u32_t *) pkt_info->mem_virt)[1] = 0;
547         ((u32_t *) pkt_info->mem_virt)[2] = 0;
548         ((u32_t *) pkt_info->mem_virt)[3] = 0;
549         packet->u1.rx.dbg_bd = cur_bd;
550         packet->u1.rx.dbg_bd1 = NULL;
551         #endif
552         if (rxq->vmq_lookahead_size)
553         {
554             // Break down 2 BDs for lookahead header support
555             // We cannot allow odd number of BDs
556             // The first BD must at least fit the L2 frame header
557             DbgBreakIf(frags->cnt != 2);
558             DbgBreakIf(frags->frag_arr[0].size < rxq->vmq_lookahead_size);
559 
560             post_bd_buffer(
561                 rxq,
562                 frags->frag_arr[0].addr.as_u64,
563                 frags->frag_arr[0].size);
564             cur_bd->rx_bd_flags |= RX_BD_FLAGS_HEADERSPLIT;
565             rxq->prod_bseq += frags->frag_arr[0].size;
566 
567             #if DBG
568             packet->u1.rx.dbg_bd1 = rxq->prod_bd;
569             #endif
570             post_bd_buffer(
571                 rxq,
572                 frags->frag_arr[1].addr.as_u64,
573                 frags->frag_arr[1].size);
574             rxq->prod_bseq += frags->frag_arr[1].size;
575         }
576         else
577         {
578             DbgBreakIf(frags->cnt != 1);
579             post_bd_buffer(
580                 rxq,
581                 frags->frag_arr[0].addr.as_u64,
582                 frags->frag_arr[0].size);
583             rxq->prod_bseq += frags->frag_arr[0].size;
584             if(pdev->params.test_mode & TEST_MODE_RX_BD_TAGGING)
585             {
586                 // put bd idx at the 12 msb of flags
587                 cur_bd->rx_bd_flags |= (u16_t)cur_idx << 4;
588             }
589         }
590 
591         packet->u1.rx.next_bd_idx = rxq->prod_idx;
592         /* Tag this bd for debugging.  The last nibble is the chain cid. */
593         if(pdev->params.test_mode & TEST_MODE_RX_BD_TAGGING)
594         {
595             cur_bd->unused_0 = (u16_t) (rxq->cid_addr);
596         }
597         else
598         {
599             cur_bd->unused_0 = 0;
600         }
601 
602         // Move on to next packet
603         s_list_push_tail(&rxq->active_descq, &packet->link);
604         pkt_queued++;
605     }
606 
607     return pkt_queued;
608 } /* lm_post_buffers */
609 
610 /*******************************************************************************
611  * DescriptionX_BD_FLAGS_HEADERSPLIT
612  *
613  * Return:
614  ******************************************************************************/
615 void
lm_post_rx_bd(lm_device_t * pdev,lm_rx_chain_t * rxq)616 lm_post_rx_bd(
617     lm_device_t *pdev,
618     lm_rx_chain_t *rxq
619     )
620 {
621     MBQ_WR16(
622         pdev,
623         GET_CID(rxq->cid_addr),
624         OFFSETOF(l2_bd_chain_context_t, l2ctx_host_bdidx),
625         rxq->prod_idx);
626 
627     MBQ_WR32(
628         pdev,
629         GET_CID(rxq->cid_addr),
630         OFFSETOF(l2_bd_chain_context_t, l2ctx_host_bseq),
631         rxq->prod_bseq);
632 }
633 
634 /*******************************************************************************
635  * Description:
636  *
637  * Return:
638  ******************************************************************************/
639 STATIC u32_t
get_packets_rcvd(struct _lm_device_t * pdev,lm_rx_chain_t * rxq,u16_t hw_con_idx,s_list_t * rcvd_list)640 get_packets_rcvd(
641     struct _lm_device_t *pdev,
642     lm_rx_chain_t *rxq,
643     u16_t hw_con_idx,
644     s_list_t *rcvd_list)
645 {
646     l2_fhdr_t *rx_hdr;
647     lm_packet_t *pkt;
648     u32_t byte_cnt;
649     u32_t pkt_cnt;
650     lm_pkt_rx_info_t *pkt_info;
651     u8_t l2_abort_packet = FALSE;
652 
653     pkt_cnt = 0;
654     byte_cnt = 0;
655 
656     /* The consumer index may stop at the end of a page boundary.
657      * In this case, we need to advance the next to the next one. */
658     if((hw_con_idx & MAX_BD_PER_PAGE) == MAX_BD_PER_PAGE)
659     {
660         hw_con_idx++;
661     }
662 
663     while(rxq->con_idx != hw_con_idx)
664     {
665         DbgBreakIf(S16_SUB(hw_con_idx, rxq->con_idx) <= 0);
666 
667         pkt = (lm_packet_t *) s_list_pop_head(&rxq->active_descq);
668 
669         DbgBreakIf(pkt == NULL);
670         if(!pkt)
671 		{
672 			DbgBreakIf(!s_list_is_empty(&rxq->active_descq));
673 			break;
674 		}
675         pkt_info = pkt->u1.rx.rx_pkt_info;
676 
677         //mm_flush_cache(
678         //    pdev,
679         //    pkt_info->mem_virt,
680         //    pkt->sgl->Elements[0].Address,
681         //    pkt_info->size,
682         //    FLUSH_CACHE_AFTER_DMA_WRITE);
683 
684         // In case of Lookahead header support, each packet was split to 2 BDs
685         rxq->bd_left += rxq->vmq_lookahead_size? 2 : 1;
686 
687         /* Advance the rxq->con_idx to the start bd_idx of the next packet. */
688         rxq->con_idx = pkt->u1.rx.next_bd_idx;
689 
690         rx_hdr = (l2_fhdr_t *) pkt_info->mem_virt;
691         if(l2_abort_packet == FALSE &&
692            rx_hdr->l2_fhdr_pkt_len == 0)
693         {
694             DbgBreakIf(!(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_ABORT_PKT));
695             // Set upon the first BD detecting L2_FHDR_ERRORS_ABORT_PKT
696             l2_abort_packet = TRUE;
697         }
698 
699         if(l2_abort_packet)
700         {
701             pkt->status = LM_STATUS_ABORTED;
702             pkt_info->size = 0;
703             pdev->rx_info.stats.aborted++;
704         }
705         else
706         {
707             pkt->status = LM_STATUS_SUCCESS;
708             pkt_info->size = rx_hdr->l2_fhdr_pkt_len - 4 /* CRC32 */;
709             pkt_info->flags = 0;
710 
711             DbgBreakIf(
712                 (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
713                 pdev->params.keep_vlan_tag &&
714                 (pkt_info->size < MIN_ETHERNET_PACKET_SIZE ||
715                 pkt_info->size > pdev->params.mtu+4));
716             DbgBreakIf(
717                 (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
718                 pdev->params.keep_vlan_tag == 0 &&
719                 (pkt_info->size < MIN_ETHERNET_PACKET_SIZE-4 ||
720                 pkt_info->size > pdev->params.mtu));
721             DbgBreakIf(
722                 (rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG) == 0 &&
723                 (pkt_info->size < MIN_ETHERNET_PACKET_SIZE ||
724                 pkt_info->size > pdev->params.mtu));
725 
726             if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_RSS_HASH)
727             {
728                 pkt_info->flags |= LM_RX_FLAG_VALID_HASH_VALUE;
729                 pkt->u1.rx.hash_value = rx_hdr->l2_fhdr_hash;
730             }
731 
732             if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_L2_VLAN_TAG)
733             {
734                 pkt_info->flags |= LM_RX_FLAG_VALID_VLAN_TAG;
735                 pkt_info->vlan_tag = rx_hdr->l2_fhdr_vlan_tag;
736             }
737 
738             if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_IP_DATAGRAM)
739             {
740                 if(rx_hdr->l2_fhdr_errors & 0x40)
741                 {
742                     pkt_info->flags |= LM_RX_FLAG_IS_IPV6_DATAGRAM;
743                 }
744                 else
745                 {
746                     pkt_info->flags |= LM_RX_FLAG_IS_IPV4_DATAGRAM;
747                 }
748 
749                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_IP_BAD_XSUM)
750                 {
751                     pkt_info->flags |= LM_RX_FLAG_IP_CKSUM_IS_BAD;
752                 }
753                 else
754                 {
755                     pkt_info->flags |= LM_RX_FLAG_IP_CKSUM_IS_GOOD;
756                 }
757             }
758 
759             if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_TCP_SEGMENT)
760             {
761                 pkt_info->flags |= LM_RX_FLAG_IS_TCP_SEGMENT;
762 
763                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_TCP_BAD_XSUM)
764                 {
765                     pkt_info->flags |= LM_RX_FLAG_TCP_CKSUM_IS_BAD;
766                 }
767                 else
768                 {
769                     pkt_info->flags |= LM_RX_FLAG_TCP_CKSUM_IS_GOOD;
770                 }
771             }
772             else if(rx_hdr->l2_fhdr_status & L2_FHDR_STATUS_UDP_DATAGRAM)
773             {
774                 pkt_info->flags |= LM_RX_FLAG_IS_UDP_DATAGRAM;
775 
776                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_UDP_BAD_XSUM)
777                 {
778                     pkt_info->flags |= LM_RX_FLAG_UDP_CKSUM_IS_BAD;
779                 }
780                 else
781                 {
782                     pkt_info->flags |= LM_RX_FLAG_UDP_CKSUM_IS_GOOD;
783                 }
784             }
785 
786             if((rx_hdr->l2_fhdr_errors & (
787                 L2_FHDR_ERRORS_BAD_CRC |
788                 L2_FHDR_ERRORS_PHY_DECODE |
789                 L2_FHDR_ERRORS_ALIGNMENT |
790                 L2_FHDR_ERRORS_TOO_SHORT |
791                 L2_FHDR_ERRORS_GIANT_FRAME)) == 0)
792             {
793                 if(pdev->params.test_mode & TEST_MODE_VERIFY_RX_CRC)
794                 {
795 			uint32_t crc;
796                     // Offset for CRC depends if there is lookahead buffer
797                     // since L2 frame header could be in lookahead buffer
798 			CRC32(crc, (u8_t *)pkt_info->mem_virt + L2RX_FRAME_HDR_LEN,
799                         rx_hdr->l2_fhdr_pkt_len, startCRC32, crc32_table);
800 		    if (crc != CRC32residual)
801                     {
802                         TRIGGER(pdev, TEST_MODE_VERIFY_RX_CRC);
803 
804                         DbgBreakMsg("Bad CRC32 in rx packet.\n");
805 
806                         pkt->status = LM_STATUS_FAILURE;
807                     }
808                 }
809             }
810             else
811             {
812                 if(!(pdev->rx_info.mask[rxq->idx] & LM_RX_MASK_ACCEPT_ERROR_PACKET))
813                 {
814                     pkt->status = LM_STATUS_FAILURE;
815                 }
816 
817                 pdev->rx_info.stats.err++;
818 
819                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_BAD_CRC)
820                 {
821                     pdev->rx_info.stats.crc++;
822                 }
823 
824                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_PHY_DECODE)
825                 {
826                     pdev->rx_info.stats.phy_err++;
827                 }
828 
829                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_ALIGNMENT)
830                 {
831                     pdev->rx_info.stats.alignment++;
832                 }
833 
834                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_TOO_SHORT)
835                 {
836                     pdev->rx_info.stats.short_packet++;
837                 }
838 
839                 if(rx_hdr->l2_fhdr_errors & L2_FHDR_ERRORS_GIANT_FRAME)
840                 {
841                     pdev->rx_info.stats.giant_packet++;
842                 }
843 
844                 DbgBreakIf(
845                     rx_hdr->l2_fhdr_errors & ~(L2_FHDR_ERRORS_BAD_CRC |
846                     L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT |
847                     L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME));
848             }
849         }
850         pkt_cnt++;
851         byte_cnt += pkt_info->size;
852 
853         s_list_push_tail(rcvd_list, &pkt->link);
854     }
855 
856     return pkt_cnt;
857 } /* get_packets_rcvd */
858 
859 
860 
861 /*******************************************************************************
862  * Description:
863  *
864  * Return:
865  ******************************************************************************/
866 u32_t
lm_get_packets_rcvd(struct _lm_device_t * pdev,u32_t qidx,u32_t con_idx,s_list_t * rcvd_list)867 lm_get_packets_rcvd(
868     struct _lm_device_t *pdev,
869     u32_t qidx,
870     u32_t con_idx,
871     s_list_t *rcvd_list)
872 {
873     lm_rx_chain_t *rxq;
874     u16_t hw_con_idx;
875     u32_t pkts_added;
876     u32_t pkt_cnt;
877 
878     rxq = &pdev->rx_info.chain[qidx];
879 
880     if(con_idx)
881     {
882         hw_con_idx = con_idx & 0xffff;
883 
884         pkt_cnt = get_packets_rcvd(pdev, rxq, hw_con_idx, rcvd_list);
885     }
886     else
887     {
888         pkt_cnt = 0;
889 
890         for(; ;)
891         {
892             hw_con_idx = *rxq->hw_con_idx_ptr;
893 
894             pkts_added = get_packets_rcvd(pdev, rxq, hw_con_idx, rcvd_list);
895             if(pkts_added == 0)
896             {
897                 break;
898             }
899 
900             pkt_cnt += pkts_added;
901         }
902     }
903 
904     return pkt_cnt;
905 } /* lm_get_packets_rcvd */
906 
907 
908 
909 /*******************************************************************************
910  * Description:
911  *
912  * Return:
913  ******************************************************************************/
914 void
lm_service_rx_int(lm_device_t * pdev,u32_t chain_idx)915 lm_service_rx_int(
916     lm_device_t *pdev,
917     u32_t chain_idx)
918 {
919     lm_packet_t *pkt_arr[MAX_PACKETS_PER_INDICATION];
920     lm_packet_t **pkt_arr_ptr;
921     s_list_t rcvd_list;
922     lm_packet_t *pkt;
923     u32_t pkt_cnt;
924 
925     s_list_init(&rcvd_list, NULL, NULL, 0);
926 
927     lm_get_packets_rcvd(pdev, chain_idx, 0, &rcvd_list);
928 
929     while(!s_list_is_empty(&rcvd_list))
930     {
931         pkt_arr_ptr = pkt_arr;
932 
933         for(pkt_cnt = 0; pkt_cnt < MAX_PACKETS_PER_INDICATION; pkt_cnt++)
934         {
935             pkt = (lm_packet_t *) s_list_pop_head(&rcvd_list);
936             if(pkt == NULL)
937             {
938                 break;
939             }
940 
941             *pkt_arr_ptr = pkt;
942             pkt_arr_ptr++;
943         }
944 
945         mm_indicate_rx(pdev, chain_idx, pkt_arr, pkt_cnt, TRUE);
946     }
947 } /* lm_service_rx_int */
948 
949 
950 
951 /*******************************************************************************
952  * Description:
953  *
954  * Return:
955  ******************************************************************************/
956 void
lm_recv_abort(struct _lm_device_t * pdev,u32_t idx)957 lm_recv_abort(
958     struct _lm_device_t *pdev,
959     u32_t idx)
960 {
961     lm_rx_chain_t *rxq;
962     lm_packet_t *pkt;
963     lm_packet_t *pkt_arr[MAX_PACKETS_PER_INDICATION];
964     lm_packet_t **pkt_arr_ptr;
965     u32_t pkt_cnt;
966 
967     rxq = &pdev->rx_info.chain[idx];
968 
969     while(!s_list_is_empty(&rxq->active_descq))
970     {
971         pkt_arr_ptr = pkt_arr;
972 
973         for(pkt_cnt = 0; pkt_cnt < MAX_PACKETS_PER_INDICATION; pkt_cnt++)
974         {
975             pkt = (lm_packet_t *) s_list_pop_head(&rxq->active_descq);
976             if(pkt == NULL)
977             {
978                 break;
979             }
980 
981             pkt->status = LM_STATUS_ABORTED;
982             // In case of Lookahead header support, each packet was split to 2 BDs
983             rxq->bd_left += rxq->vmq_lookahead_size? 2 : 1;
984             pdev->rx_info.stats.aborted++;
985 
986                 *pkt_arr_ptr = pkt;
987                 pkt_arr_ptr++;
988         }
989 
990         mm_indicate_rx(pdev, idx, pkt_arr, pkt_cnt, FALSE);
991     }
992 } /* lm_recv_abort */
993 
994 #endif /*LM_NON_LEGACY_MODE_SUPPORT*/
995