1d14abf15SRobert Mustacchi #include "lm5710.h"
2d14abf15SRobert Mustacchi #include "command.h"
3d14abf15SRobert Mustacchi #include "bd_chain.h"
4d14abf15SRobert Mustacchi #include "ecore_common.h"
5d14abf15SRobert Mustacchi #include "mm.h"
6d14abf15SRobert Mustacchi 
7d14abf15SRobert Mustacchi #define OOO_CID_USTRORM_PROD_DIFF           (0x4000)
8d14abf15SRobert Mustacchi 
lm_is_rx_completion(lm_device_t * pdev,u8_t chain_idx)9d14abf15SRobert Mustacchi u8_t lm_is_rx_completion(lm_device_t *pdev, u8_t chain_idx)
10d14abf15SRobert Mustacchi {
11d14abf15SRobert Mustacchi     u8_t result               = FALSE;
12d14abf15SRobert Mustacchi     lm_rcq_chain_t *rcq_chain = &LM_RCQ(pdev, chain_idx);
13d14abf15SRobert Mustacchi 
14d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && rcq_chain));
15d14abf15SRobert Mustacchi 
16d14abf15SRobert Mustacchi     //the hw_con_idx_ptr of the rcq_chain points directly to the Rx index in the USTORM part of the non-default status block
17d14abf15SRobert Mustacchi     if (rcq_chain->hw_con_idx_ptr &&
18d14abf15SRobert Mustacchi         (mm_le16_to_cpu(*rcq_chain->hw_con_idx_ptr) !=
19d14abf15SRobert Mustacchi         lm_bd_chain_cons_idx(&rcq_chain->bd_chain)))
20d14abf15SRobert Mustacchi     {
21d14abf15SRobert Mustacchi         result = TRUE;
22d14abf15SRobert Mustacchi     }
23d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMi, "lm_is_rx_completion: result is:%s\n", result? "TRUE" : "FALSE");
24d14abf15SRobert Mustacchi 
25d14abf15SRobert Mustacchi     return result;
26d14abf15SRobert Mustacchi }
27d14abf15SRobert Mustacchi 
28d14abf15SRobert Mustacchi /*******************************************************************************
29d14abf15SRobert Mustacchi  * Description:
30d14abf15SRobert Mustacchi  *  set both rcq, rx bd and rx sge (if valid) prods
31d14abf15SRobert Mustacchi  * Return:
32d14abf15SRobert Mustacchi  ******************************************************************************/
lm_rx_set_prods(lm_device_t * pdev,u16_t const iro_prod_offset,lm_bd_chain_t * rcq_chain_bd,lm_bd_chain_t * rx_chain_bd,lm_bd_chain_t * rx_chain_sge,const u32_t chain_idx)33d14abf15SRobert Mustacchi static void FORCEINLINE lm_rx_set_prods( lm_device_t     *pdev,
34d14abf15SRobert Mustacchi                                          u16_t const     iro_prod_offset,
35d14abf15SRobert Mustacchi                                          lm_bd_chain_t   *rcq_chain_bd,
36d14abf15SRobert Mustacchi                                          lm_bd_chain_t   *rx_chain_bd,
37d14abf15SRobert Mustacchi                                          lm_bd_chain_t   *rx_chain_sge,
38d14abf15SRobert Mustacchi                                          const u32_t     chain_idx )
39d14abf15SRobert Mustacchi {
40d14abf15SRobert Mustacchi     lm_rx_chain_t*  rxq_chain           = &LM_RXQ(pdev, chain_idx);
41d14abf15SRobert Mustacchi     u32_t           val32               = 0;
42d14abf15SRobert Mustacchi     u64_t           val64               = 0;
43d14abf15SRobert Mustacchi     u16_t           val16_lo            = lm_bd_chain_prod_idx(rcq_chain_bd);
44d14abf15SRobert Mustacchi     u16_t           val16_hi            = lm_bd_chain_prod_idx(rx_chain_bd);
45d14abf15SRobert Mustacchi     u32_t const     ustorm_bar_offset   = (IS_CHANNEL_VFDEV(pdev)) ? VF_BAR0_USDM_QUEUES_OFFSET: BAR_USTRORM_INTMEM ;
46d14abf15SRobert Mustacchi 
47d14abf15SRobert Mustacchi     if(OOO_CID(pdev) == chain_idx)
48d14abf15SRobert Mustacchi     {
49d14abf15SRobert Mustacchi         DbgBreakIfFastPath( NULL != rx_chain_sge );
50d14abf15SRobert Mustacchi         DbgBreakIfFastPath(IS_CHANNEL_VFDEV(pdev));
51d14abf15SRobert Mustacchi 
52d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(PFDEV(pdev),
53d14abf15SRobert Mustacchi                           TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(FUNC_ID(pdev)),
54d14abf15SRobert Mustacchi                           rxq_chain->common.bd_prod_without_next,
55d14abf15SRobert Mustacchi                           BAR_TSTRORM_INTMEM);
56d14abf15SRobert Mustacchi 
57d14abf15SRobert Mustacchi         // Ugly FW solution OOO FW wants the
58d14abf15SRobert Mustacchi         val16_lo    += OOO_CID_USTRORM_PROD_DIFF;
59d14abf15SRobert Mustacchi         val16_hi    += OOO_CID_USTRORM_PROD_DIFF;
60d14abf15SRobert Mustacchi     }
61d14abf15SRobert Mustacchi 
62d14abf15SRobert Mustacchi     val32       = ((u32_t)(val16_hi << 16) | val16_lo);
63d14abf15SRobert Mustacchi 
64d14abf15SRobert Mustacchi     //notify the fw of the prod of the RCQ. No need to do that for the Rx bd chain.
65d14abf15SRobert Mustacchi     if( rx_chain_sge )
66d14abf15SRobert Mustacchi     {
67d14abf15SRobert Mustacchi         val64 = (((u64_t)lm_bd_chain_prod_idx(rx_chain_sge))<<32) | val32 ;
68d14abf15SRobert Mustacchi 
69d14abf15SRobert Mustacchi         LM_INTMEM_WRITE64(PFDEV(pdev),
70d14abf15SRobert Mustacchi                           iro_prod_offset,
71d14abf15SRobert Mustacchi                           val64,
72d14abf15SRobert Mustacchi                           ustorm_bar_offset);
73d14abf15SRobert Mustacchi     }
74d14abf15SRobert Mustacchi     else
75d14abf15SRobert Mustacchi     {
76d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(PFDEV(pdev),
77d14abf15SRobert Mustacchi                           iro_prod_offset,
78d14abf15SRobert Mustacchi                           val32,
79d14abf15SRobert Mustacchi                           ustorm_bar_offset);
80d14abf15SRobert Mustacchi     }
81d14abf15SRobert Mustacchi }
82d14abf15SRobert Mustacchi /*******************************************************************************
83d14abf15SRobert Mustacchi  * Description:
84d14abf15SRobert Mustacchi  *  rx_chain_bd always valid, rx_chain_sge valid only in case we are LAH enabled in this queue
85*48bbca81SDaniel Hoffman  *  all if() checking will be always done on rx_chain_bd since it is always valid and sge should be consistent
86d14abf15SRobert Mustacchi  *  We verify it in case sge is valid
87d14abf15SRobert Mustacchi  *  all bd_xxx operations will be done on both
88d14abf15SRobert Mustacchi  * Return:
89d14abf15SRobert Mustacchi  ******************************************************************************/
90d14abf15SRobert Mustacchi u32_t
lm_post_buffers(lm_device_t * pdev,u32_t chain_idx,lm_packet_t * packet,u8_t const is_tpa)91d14abf15SRobert Mustacchi lm_post_buffers(
92d14abf15SRobert Mustacchi     lm_device_t *pdev,
93d14abf15SRobert Mustacchi     u32_t chain_idx,
94d14abf15SRobert Mustacchi     lm_packet_t *packet,/* optional. */
95d14abf15SRobert Mustacchi     u8_t const  is_tpa)
96d14abf15SRobert Mustacchi {
97d14abf15SRobert Mustacchi     lm_rx_chain_common_t*   rxq_chain_common    = NULL;
98d14abf15SRobert Mustacchi     lm_bd_chain_t*          rx_chain_bd         = NULL;
99d14abf15SRobert Mustacchi     lm_rx_chain_t*          rxq_chain           = NULL;
100d14abf15SRobert Mustacchi     lm_tpa_chain_t *        tpa_chain           = NULL;
101d14abf15SRobert Mustacchi     lm_bd_chain_t*          bd_chain_to_check   = NULL;
102d14abf15SRobert Mustacchi     lm_rcq_chain_t*         rcq_chain           = &LM_RCQ(pdev, chain_idx);
103d14abf15SRobert Mustacchi     lm_bd_chain_t*          rx_chain_sge        = NULL;
104d14abf15SRobert Mustacchi     u32_t                   pkt_queued          = 0;
105d14abf15SRobert Mustacchi     struct eth_rx_bd*       cur_bd              = NULL;
106d14abf15SRobert Mustacchi     struct eth_rx_sge*      cur_sge             = NULL;
107d14abf15SRobert Mustacchi     u32_t                   prod_bseq           = 0;
108d14abf15SRobert Mustacchi     u32_t                   rcq_prod_bseq       = 0;
109d14abf15SRobert Mustacchi     u16_t                   current_prod        = 0;
110d14abf15SRobert Mustacchi     u16_t                   active_entry        = 0;
111d14abf15SRobert Mustacchi 
112d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2 , "### lm_post_buffers\n");
113d14abf15SRobert Mustacchi 
114d14abf15SRobert Mustacchi     // Verify BD's consistent
115d14abf15SRobert Mustacchi     DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
116d14abf15SRobert Mustacchi 
117d14abf15SRobert Mustacchi     if(FALSE == is_tpa)
118d14abf15SRobert Mustacchi     {
119d14abf15SRobert Mustacchi         rxq_chain_common    = &LM_RXQ_COMMON(pdev, chain_idx);
120d14abf15SRobert Mustacchi         rx_chain_bd         = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
121d14abf15SRobert Mustacchi         rx_chain_sge        = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
122d14abf15SRobert Mustacchi         rxq_chain           = &LM_RXQ(pdev, chain_idx);
123d14abf15SRobert Mustacchi         tpa_chain           = NULL;
124d14abf15SRobert Mustacchi         /* the assumption is that the number of cqes is less or equal to the corresponding rx bds,
125d14abf15SRobert Mustacchi            therefore if there no cqes left, break */
126d14abf15SRobert Mustacchi         bd_chain_to_check   = &rcq_chain->bd_chain;
127d14abf15SRobert Mustacchi     }
128d14abf15SRobert Mustacchi     else
129d14abf15SRobert Mustacchi     {
130d14abf15SRobert Mustacchi         rxq_chain_common    = &LM_TPA_COMMON(pdev, chain_idx);
131d14abf15SRobert Mustacchi         rx_chain_bd         = &LM_TPA_CHAIN_BD(pdev, chain_idx);
132d14abf15SRobert Mustacchi         rx_chain_sge        = NULL;
133d14abf15SRobert Mustacchi         rxq_chain           = NULL;
134d14abf15SRobert Mustacchi         tpa_chain           = &LM_TPA(pdev, chain_idx);
135d14abf15SRobert Mustacchi         // In TPA we don't add to the RCQ when posting buffers
136d14abf15SRobert Mustacchi         bd_chain_to_check   = rx_chain_bd;
137d14abf15SRobert Mustacchi     }
138d14abf15SRobert Mustacchi     /* Make sure we have a bd left for posting a receive buffer. */
139d14abf15SRobert Mustacchi     if(packet)
140d14abf15SRobert Mustacchi     {
141d14abf15SRobert Mustacchi         // Insert given packet.
142d14abf15SRobert Mustacchi         DbgBreakIfFastPath(SIG(packet) != L2PACKET_RX_SIG);
143d14abf15SRobert Mustacchi 
144d14abf15SRobert Mustacchi         if(lm_bd_chain_is_empty(bd_chain_to_check))
145d14abf15SRobert Mustacchi         {
146d14abf15SRobert Mustacchi             s_list_push_tail(&rxq_chain_common->free_descq, &packet->link);
147d14abf15SRobert Mustacchi             packet = NULL;
148d14abf15SRobert Mustacchi         }
149d14abf15SRobert Mustacchi     }
150d14abf15SRobert Mustacchi     else if(!lm_bd_chain_is_empty(bd_chain_to_check))
151d14abf15SRobert Mustacchi     {
152d14abf15SRobert Mustacchi         packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
153d14abf15SRobert Mustacchi     }
154d14abf15SRobert Mustacchi     prod_bseq     = rxq_chain_common->prod_bseq;
155d14abf15SRobert Mustacchi 
156d14abf15SRobert Mustacchi     // In TPA we won't increment rcq_prod_bseq
157d14abf15SRobert Mustacchi     rcq_prod_bseq = rcq_chain->prod_bseq;
158d14abf15SRobert Mustacchi 
159d14abf15SRobert Mustacchi     while(packet)
160d14abf15SRobert Mustacchi     {
161d14abf15SRobert Mustacchi 
162d14abf15SRobert Mustacchi         current_prod = lm_bd_chain_prod_idx(rx_chain_bd);
163d14abf15SRobert Mustacchi         cur_bd  = lm_bd_chain_produce_bd(rx_chain_bd);
164d14abf15SRobert Mustacchi         rxq_chain_common->bd_prod_without_next++;
165d14abf15SRobert Mustacchi         cur_sge = rx_chain_sge ? lm_bd_chain_produce_bd(rx_chain_sge) : NULL;
166d14abf15SRobert Mustacchi 
167d14abf15SRobert Mustacchi         prod_bseq += packet->l2pkt_rx_info->mem_size;
168d14abf15SRobert Mustacchi 
169d14abf15SRobert Mustacchi         if(FALSE == is_tpa)
170d14abf15SRobert Mustacchi         {
171d14abf15SRobert Mustacchi             //take care of the RCQ related prod stuff.
172d14abf15SRobert Mustacchi 
173d14abf15SRobert Mustacchi             //update the prod of the RCQ only AFTER the Rx bd!
174d14abf15SRobert Mustacchi             rcq_prod_bseq += packet->l2pkt_rx_info->mem_size;
175d14abf15SRobert Mustacchi 
176d14abf15SRobert Mustacchi             /* These were actually produced before by fw, but we only produce them now to make sure they're synced with the rx-chain */
177d14abf15SRobert Mustacchi             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
178d14abf15SRobert Mustacchi         }
179d14abf15SRobert Mustacchi 
180d14abf15SRobert Mustacchi         packet->u1.rx.next_bd_idx = lm_bd_chain_prod_idx(rx_chain_bd);
181d14abf15SRobert Mustacchi #if L2_RX_BUF_SIG
182d14abf15SRobert Mustacchi         /* make sure signitures exist before and after the buffer */
183d14abf15SRobert Mustacchi         DbgBreakIfFastPath(SIG(packet->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
184d14abf15SRobert Mustacchi         DbgBreakIfFastPath(END_SIG(packet->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
185d14abf15SRobert Mustacchi #endif /* L2_RX_BUF_SIG */
186d14abf15SRobert Mustacchi 
187d14abf15SRobert Mustacchi         cur_bd->addr_lo  = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.low);
188d14abf15SRobert Mustacchi         cur_bd->addr_hi  = mm_cpu_to_le32(packet->u1.rx.mem_phys[0].as_u32.high);
189d14abf15SRobert Mustacchi 
190d14abf15SRobert Mustacchi         if( cur_sge )
191d14abf15SRobert Mustacchi         {
192d14abf15SRobert Mustacchi             cur_sge->addr_lo = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.low);
193d14abf15SRobert Mustacchi             cur_sge->addr_hi = mm_cpu_to_le32(packet->u1.rx.mem_phys[1].as_u32.high);
194d14abf15SRobert Mustacchi         }
195d14abf15SRobert Mustacchi 
196d14abf15SRobert Mustacchi         pkt_queued++;
197d14abf15SRobert Mustacchi 
198d14abf15SRobert Mustacchi         if(FALSE == is_tpa)
199d14abf15SRobert Mustacchi         {
200d14abf15SRobert Mustacchi             s_list_push_tail(&rxq_chain->active_descq, &packet->link);
201d14abf15SRobert Mustacchi         }
202d14abf15SRobert Mustacchi         else
203d14abf15SRobert Mustacchi         {
204d14abf15SRobert Mustacchi             // Active descriptor must sit in the same entry
205d14abf15SRobert Mustacchi             active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, current_prod);
206d14abf15SRobert Mustacchi 
207d14abf15SRobert Mustacchi             LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx,active_entry);
208d14abf15SRobert Mustacchi             tpa_chain->sge_chain.active_descq_array[active_entry] = packet;
209d14abf15SRobert Mustacchi         }
210d14abf15SRobert Mustacchi 
211d14abf15SRobert Mustacchi         if(lm_bd_chain_is_empty(bd_chain_to_check))
212d14abf15SRobert Mustacchi             {
213d14abf15SRobert Mustacchi                 break;
214d14abf15SRobert Mustacchi             }
215d14abf15SRobert Mustacchi 
216d14abf15SRobert Mustacchi         /* Make sure we have a bd left for posting a receive buffer. */
217d14abf15SRobert Mustacchi         packet = (lm_packet_t *) s_list_pop_head(&rxq_chain_common->free_descq);
218d14abf15SRobert Mustacchi     }
219d14abf15SRobert Mustacchi 
220d14abf15SRobert Mustacchi     rxq_chain_common->prod_bseq = prod_bseq;
221d14abf15SRobert Mustacchi 
222d14abf15SRobert Mustacchi 
223d14abf15SRobert Mustacchi     //update the prod of the RCQ only AFTER the Rx bd!
224d14abf15SRobert Mustacchi     // This code seems unnecessary maybe should be deleted.
225d14abf15SRobert Mustacchi     // Im TPA we won't increment rcq_prod_bseq
226d14abf15SRobert Mustacchi     rcq_chain->prod_bseq = rcq_prod_bseq;
227d14abf15SRobert Mustacchi 
228d14abf15SRobert Mustacchi     if(pkt_queued)
229d14abf15SRobert Mustacchi     {
230d14abf15SRobert Mustacchi         //notify the fw of the prod
231d14abf15SRobert Mustacchi         if(FALSE == is_tpa)
232d14abf15SRobert Mustacchi         {
233d14abf15SRobert Mustacchi             lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
234d14abf15SRobert Mustacchi         }
235d14abf15SRobert Mustacchi         else
236d14abf15SRobert Mustacchi         {
237d14abf15SRobert Mustacchi             lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, &LM_RXQ_CHAIN_BD(pdev, chain_idx), &LM_TPA_CHAIN_BD(pdev, chain_idx) ,chain_idx);
238d14abf15SRobert Mustacchi         }
239d14abf15SRobert Mustacchi     }
240d14abf15SRobert Mustacchi 
241d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2 , "lm_post_buffers - bd con: %d bd prod: %d \n",
242d14abf15SRobert Mustacchi                 lm_bd_chain_cons_idx(rx_chain_bd),lm_bd_chain_prod_idx(rx_chain_bd));
243d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2 , "lm_post_buffers - cq con: %d cq prod: %d \n",
244d14abf15SRobert Mustacchi                 lm_bd_chain_cons_idx(&rcq_chain->bd_chain) ,lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
245d14abf15SRobert Mustacchi 
246d14abf15SRobert Mustacchi     return pkt_queued;
247d14abf15SRobert Mustacchi } /* lm_post_buffers */
248d14abf15SRobert Mustacchi 
249d14abf15SRobert Mustacchi /**
250d14abf15SRobert Mustacchi  * @description
251d14abf15SRobert Mustacchi  * Updates  tpa_chain->last_max_cons_sge if there is a new max.
252d14abf15SRobert Mustacchi  * Basic assumption is that is BD prod is always higher that BD
253d14abf15SRobert Mustacchi  * cons.
254d14abf15SRobert Mustacchi  * The minus will tell us who is closer to BD prod.
255d14abf15SRobert Mustacchi  * @param pdev
256d14abf15SRobert Mustacchi  * @param chain_idx
257d14abf15SRobert Mustacchi  * @param new_index
258d14abf15SRobert Mustacchi  *
259d14abf15SRobert Mustacchi  * @return STATIC void
260d14abf15SRobert Mustacchi  */
261d14abf15SRobert Mustacchi __inline STATIC void
lm_tpa_sge_update_last_max(IN lm_device_t * pdev,IN const u32_t chain_idx,IN const u16_t new_index)262d14abf15SRobert Mustacchi lm_tpa_sge_update_last_max(IN       lm_device_t*  pdev,
263d14abf15SRobert Mustacchi                            IN const u32_t         chain_idx,
264d14abf15SRobert Mustacchi                            IN const u16_t         new_index)
265d14abf15SRobert Mustacchi {
266d14abf15SRobert Mustacchi     lm_tpa_sge_chain_t* sge_tpa_chain       = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
267d14abf15SRobert Mustacchi     u16_t const         prod_idx            = lm_bd_chain_prod_idx(&LM_TPA_CHAIN_BD(pdev, chain_idx));
268d14abf15SRobert Mustacchi     u16_t const         prod_minus_new_sge  = prod_idx - new_index;
269d14abf15SRobert Mustacchi     u16_t const         prod_minus_saved    = prod_idx - sge_tpa_chain->last_max_con;
270d14abf15SRobert Mustacchi 
271d14abf15SRobert Mustacchi     if(prod_minus_new_sge < prod_minus_saved)
272d14abf15SRobert Mustacchi     {
273d14abf15SRobert Mustacchi         sge_tpa_chain->last_max_con = new_index;
274d14abf15SRobert Mustacchi     }
275d14abf15SRobert Mustacchi 
276d14abf15SRobert Mustacchi     /*
277d14abf15SRobert Mustacchi     Cyclic would have been a nicer sulotion, but adds a limitation on bd ring size that would be (2^15) instead of 2^16
278d14abf15SRobert Mustacchi     This limitation should be closed done when allocating the TPA BD chain
279d14abf15SRobert Mustacchi     DbgBreakIf(LM_TPA_CHAIN_BD_NUM_ELEM(_pdev, chain_idx) < (2^15) );
280d14abf15SRobert Mustacchi     if (CYCLIC_GT_16(sge_index, sge_tpa_chain->last_max_con))
281d14abf15SRobert Mustacchi         sge_tpa_chain->last_max_con = sge_index;
282d14abf15SRobert Mustacchi     */
283d14abf15SRobert Mustacchi }
284d14abf15SRobert Mustacchi 
285d14abf15SRobert Mustacchi /**
286d14abf15SRobert Mustacchi  * @description
287d14abf15SRobert Mustacchi  * The TPA sge consumer will be increments in 64 bit
288d14abf15SRobert Mustacchi  * resolutions.
289d14abf15SRobert Mustacchi  * @param pdev
290d14abf15SRobert Mustacchi  * @param chain_idx
291d14abf15SRobert Mustacchi  *
292d14abf15SRobert Mustacchi  * @return STATIC u32_t
293d14abf15SRobert Mustacchi  */
294d14abf15SRobert Mustacchi __inline STATIC void
lm_tpa_incr_sge_cons(IN lm_device_t * pdev,IN const u32_t chain_idx,IN const u16_t mask_entry_idx)295d14abf15SRobert Mustacchi lm_tpa_incr_sge_cons( IN        lm_device_t*    pdev,
296d14abf15SRobert Mustacchi                       IN const  u32_t           chain_idx,
297d14abf15SRobert Mustacchi                       IN const  u16_t           mask_entry_idx)
298d14abf15SRobert Mustacchi {
299d14abf15SRobert Mustacchi     lm_tpa_sge_chain_t* sge_tpa_chain   = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
300d14abf15SRobert Mustacchi     lm_bd_chain_t*      bd_chain        = &LM_TPA_CHAIN_BD(pdev, chain_idx);
301d14abf15SRobert Mustacchi     u16_t               bd_entry        = 0;
302d14abf15SRobert Mustacchi     u16_t               active_entry    = 0;
303d14abf15SRobert Mustacchi     u16_t               i               = 0;
304d14abf15SRobert Mustacchi 
305d14abf15SRobert Mustacchi     bd_chain->cons_idx += BIT_VEC64_ELEM_SZ;
306d14abf15SRobert Mustacchi 
307d14abf15SRobert Mustacchi     DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= mask_entry_idx);
308d14abf15SRobert Mustacchi     sge_tpa_chain->mask_array[mask_entry_idx] = BIT_VEC64_ELEM_ONE_MASK;
309d14abf15SRobert Mustacchi 
310d14abf15SRobert Mustacchi     // Make sure bds_per_page_mask is a power of 2 that is higher than 64
311d14abf15SRobert Mustacchi     DbgBreakIf(0 != (lm_bd_chain_bds_per_page(bd_chain) & BIT_VEC64_ELEM_MASK));
312d14abf15SRobert Mustacchi     DbgBreakIf(BIT_VEC64_ELEM_SZ >= lm_bd_chain_bds_per_page(bd_chain));
313d14abf15SRobert Mustacchi 
314d14abf15SRobert Mustacchi     if((lm_bd_chain_cons_idx(bd_chain) & lm_bd_chain_bds_per_page_mask(bd_chain)) == 0)
315d14abf15SRobert Mustacchi     {
316d14abf15SRobert Mustacchi         // Just closed a page must refer to page end entries
317d14abf15SRobert Mustacchi         lm_bd_chain_bds_consumed(bd_chain, (BIT_VEC64_ELEM_SZ - lm_bd_chain_bds_skip_eop(bd_chain)));
318d14abf15SRobert Mustacchi 
319d14abf15SRobert Mustacchi         /* clear page-end entries */
320d14abf15SRobert Mustacchi         for(i = 1; i <= lm_bd_chain_bds_skip_eop(bd_chain); i++ )
321d14abf15SRobert Mustacchi         {
322d14abf15SRobert Mustacchi             bd_entry = lm_bd_chain_cons_idx(bd_chain) - i;
323d14abf15SRobert Mustacchi             active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, bd_entry);
324d14abf15SRobert Mustacchi             LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
325d14abf15SRobert Mustacchi         }
326d14abf15SRobert Mustacchi     }
327d14abf15SRobert Mustacchi     else
328d14abf15SRobert Mustacchi     {
329d14abf15SRobert Mustacchi         // Same page
330d14abf15SRobert Mustacchi         lm_bd_chain_bds_consumed(bd_chain, BIT_VEC64_ELEM_SZ);
331d14abf15SRobert Mustacchi     }
332d14abf15SRobert Mustacchi }
333d14abf15SRobert Mustacchi /**
334d14abf15SRobert Mustacchi  * @description
335d14abf15SRobert Mustacchi  * Handle TPA stop code.
336d14abf15SRobert Mustacchi  * @param pdev
337d14abf15SRobert Mustacchi  * @param rcvd_list -Global receive list
338d14abf15SRobert Mustacchi  * @param cqe
339d14abf15SRobert Mustacchi  * @param chain_idx
340d14abf15SRobert Mustacchi  * @param pkt_cnt
341d14abf15SRobert Mustacchi  * @param queue_index
342d14abf15SRobert Mustacchi  *
343d14abf15SRobert Mustacchi  * @return STATIC u32_t pkt_cnt number of packets. The number is
344d14abf15SRobert Mustacchi  *         an input parameter and packets add to the global list
345d14abf15SRobert Mustacchi  *         are add.
346d14abf15SRobert Mustacchi  */
347d14abf15SRobert Mustacchi STATIC u32_t
lm_tpa_stop(IN lm_device_t * pdev,INOUT s_list_t * rcvd_list,IN const struct eth_end_agg_rx_cqe * cqe,IN const u32_t chain_idx,IN u32_t pkt_cnt,IN const u8_t queue_index)348d14abf15SRobert Mustacchi lm_tpa_stop( IN         lm_device_t*                pdev,
349d14abf15SRobert Mustacchi              INOUT      s_list_t*                   rcvd_list,
350d14abf15SRobert Mustacchi              IN const   struct eth_end_agg_rx_cqe*  cqe,
351d14abf15SRobert Mustacchi              IN const   u32_t                       chain_idx,
352d14abf15SRobert Mustacchi              IN         u32_t                       pkt_cnt,
353d14abf15SRobert Mustacchi              IN const   u8_t                        queue_index)
354d14abf15SRobert Mustacchi {
355d14abf15SRobert Mustacchi     lm_tpa_chain_t*     tpa_chain           = &LM_TPA(pdev, chain_idx);
356d14abf15SRobert Mustacchi     lm_tpa_sge_chain_t* sge_tpa_chain       = &LM_SGE_TPA_CHAIN(pdev, chain_idx);
357d14abf15SRobert Mustacchi     lm_bd_chain_t*      bd_chain            = &LM_TPA_CHAIN_BD(pdev, chain_idx);
358d14abf15SRobert Mustacchi     lm_packet_t*        pkt                 = tpa_chain->start_coales_bd[queue_index].packet;//Reads the TPA start coalesce array(PD_R)
359d14abf15SRobert Mustacchi     u32_t               sge_size            = mm_le16_to_cpu(cqe->pkt_len) - pkt->l2pkt_rx_info->size;
360d14abf15SRobert Mustacchi     u32_t const         sge_num_elem        = DIV_ROUND_UP_BITS(sge_size, LM_TPA_PAGE_BITS);
361d14abf15SRobert Mustacchi     u32_t               fw_sge_index        = 0;
362d14abf15SRobert Mustacchi     u16_t               active_entry        = 0;
363d14abf15SRobert Mustacchi     u16_t               first_max_set       = 0;
364d14abf15SRobert Mustacchi     u16_t               last_max_set        = 0;
365d14abf15SRobert Mustacchi     u16_t               i                   = 0;
366d14abf15SRobert Mustacchi     u8_t                b_force_first_enter = FALSE;
367d14abf15SRobert Mustacchi     u16_t               loop_cnt_dbg        = 0;
368d14abf15SRobert Mustacchi     const u32_t         lm_tpa_page_size    = LM_TPA_PAGE_SIZE;
369d14abf15SRobert Mustacchi 
370d14abf15SRobert Mustacchi     // Total packet size given in end aggregation must be larger than the size given in start aggregation.
371d14abf15SRobert Mustacchi     // The only case that the both size are equal is if stop aggregation doesn't contain data.
372d14abf15SRobert Mustacchi     DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) < pkt->l2pkt_rx_info->size);
373d14abf15SRobert Mustacchi 
374d14abf15SRobert Mustacchi     DbgBreakIf( TRUE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
375d14abf15SRobert Mustacchi     tpa_chain->start_coales_bd[queue_index].is_entry_used = FALSE;
376d14abf15SRobert Mustacchi 
377d14abf15SRobert Mustacchi     // Indicate to upper layer this is a TPA packet
378d14abf15SRobert Mustacchi     SET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_START_RSC_TPA);
379d14abf15SRobert Mustacchi     // Updates the TPA only fields from the CQE
380d14abf15SRobert Mustacchi     pkt->l2pkt_rx_info->total_packet_size   = mm_le16_to_cpu(cqe->pkt_len);
381d14abf15SRobert Mustacchi     pkt->l2pkt_rx_info->coal_seg_cnt        = mm_le16_to_cpu(cqe->num_of_coalesced_segs);
382d14abf15SRobert Mustacchi     pkt->l2pkt_rx_info->dup_ack_cnt         = cqe->pure_ack_count;
383d14abf15SRobert Mustacchi     pkt->l2pkt_rx_info->ts_delta            = mm_le32_to_cpu(cqe->timestamp_delta);
384d14abf15SRobert Mustacchi 
385d14abf15SRobert Mustacchi     /* make sure packet size is larger than header size */
386d14abf15SRobert Mustacchi     DbgBreakIfFastPath(pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE);
387d14abf15SRobert Mustacchi 
388d14abf15SRobert Mustacchi     // Adds this packet descriptor to the global receive list (rcvd_list that is later indicated to miniport).
389d14abf15SRobert Mustacchi     s_list_push_tail(rcvd_list, &pkt->link);
390d14abf15SRobert Mustacchi     pkt_cnt++;
391d14abf15SRobert Mustacchi 
392d14abf15SRobert Mustacchi     ASSERT_STATIC(LM_TPA_MAX_AGG_SIZE == ARRSIZE(cqe->sgl_or_raw_data.sgl));
393d14abf15SRobert Mustacchi     DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) < sge_num_elem);
394d14abf15SRobert Mustacchi 
395d14abf15SRobert Mustacchi     // If the TPA stop doesn't contain any new BDs.
396d14abf15SRobert Mustacchi     if(0 == sge_num_elem )
397d14abf15SRobert Mustacchi     {
398d14abf15SRobert Mustacchi         // Total packet size given in end aggregation must be equal to the size given in start aggregation.
399d14abf15SRobert Mustacchi         // if stop aggregation doesn't contain data.
400d14abf15SRobert Mustacchi         DbgBreakIf( mm_le16_to_cpu(cqe->pkt_len) != pkt->l2pkt_rx_info->size);
401d14abf15SRobert Mustacchi 
402d14abf15SRobert Mustacchi         return pkt_cnt;
403d14abf15SRobert Mustacchi     }
404d14abf15SRobert Mustacchi 
405d14abf15SRobert Mustacchi     for(fw_sge_index = 0; fw_sge_index < sge_num_elem; fw_sge_index++)
406d14abf15SRobert Mustacchi     {
407d14abf15SRobert Mustacchi         DbgBreakIf(ARRSIZE(cqe->sgl_or_raw_data.sgl) <= fw_sge_index);
408d14abf15SRobert Mustacchi         active_entry = LM_TPA_BD_ENTRY_TO_ACTIVE_ENTRY(pdev, chain_idx, mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[fw_sge_index]));
409d14abf15SRobert Mustacchi 
410d14abf15SRobert Mustacchi         LM_TPA_ACTIVE_ENTRY_BOUNDARIES_VERIFY(pdev, chain_idx, active_entry);
411d14abf15SRobert Mustacchi         pkt = tpa_chain->sge_chain.active_descq_array[active_entry];
412d14abf15SRobert Mustacchi         LM_TPA_MASK_CLEAR_ACTIVE_BIT(pdev, chain_idx, active_entry);
413d14abf15SRobert Mustacchi 
414d14abf15SRobert Mustacchi #if (DBG)
415d14abf15SRobert Mustacchi         /************start TPA debbug code******************************/
416d14abf15SRobert Mustacchi         tpa_chain->dbg_params.pck_ret_from_chip++;
417d14abf15SRobert Mustacchi         /************end TPA debbug code******************************/
418d14abf15SRobert Mustacchi #endif //(DBG)
419d14abf15SRobert Mustacchi         // For last SGE
420d14abf15SRobert Mustacchi         DbgBreakIf((fw_sge_index != (sge_num_elem - 1)) && (sge_size < LM_TPA_PAGE_SIZE ));
421d14abf15SRobert Mustacchi         pkt->l2pkt_rx_info->size = min(sge_size ,lm_tpa_page_size);
422d14abf15SRobert Mustacchi         s_list_push_tail(rcvd_list, &(pkt->link));
423d14abf15SRobert Mustacchi         pkt_cnt++;
424d14abf15SRobert Mustacchi         sge_size -= LM_TPA_PAGE_SIZE;
425d14abf15SRobert Mustacchi     }
426d14abf15SRobert Mustacchi 
427d14abf15SRobert Mustacchi #if defined(_NTDDK_)
428d14abf15SRobert Mustacchi //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
429d14abf15SRobert Mustacchi #pragma warning (push)
430d14abf15SRobert Mustacchi #pragma warning( disable:6385 )
431d14abf15SRobert Mustacchi #endif // !_NTDDK_
432d14abf15SRobert Mustacchi     /* Here we assume that the last SGE index is the biggest  */
433d14abf15SRobert Mustacchi     lm_tpa_sge_update_last_max(pdev,
434d14abf15SRobert Mustacchi                               chain_idx,
435d14abf15SRobert Mustacchi                               mm_le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_num_elem -1]));
436d14abf15SRobert Mustacchi 
437d14abf15SRobert Mustacchi #if defined(_NTDDK_)
438d14abf15SRobert Mustacchi #pragma warning (pop)
439d14abf15SRobert Mustacchi #endif // !_NTDDK_
440d14abf15SRobert Mustacchi     // Find the first cosumer that is a candidate to free and the last.
441d14abf15SRobert Mustacchi     first_max_set = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, lm_bd_chain_cons_idx(bd_chain));
442d14abf15SRobert Mustacchi     last_max_set  = LM_TPA_BD_ENTRY_TO_MASK_ENTRY(pdev, chain_idx, sge_tpa_chain->last_max_con);
443d14abf15SRobert Mustacchi 
444d14abf15SRobert Mustacchi     DbgBreakIf(0 != (lm_bd_chain_cons_idx(bd_chain) & BIT_VEC64_ELEM_MASK));
445d14abf15SRobert Mustacchi     /* If ring is full enter anyway*/
446d14abf15SRobert Mustacchi     if((last_max_set == first_max_set) && (lm_bd_chain_is_full(bd_chain)))
447d14abf15SRobert Mustacchi     {
448d14abf15SRobert Mustacchi         b_force_first_enter = TRUE;
449d14abf15SRobert Mustacchi     }
450d14abf15SRobert Mustacchi     /* Now update the cons */
451d14abf15SRobert Mustacchi     for (i = first_max_set;((i != last_max_set) || (TRUE == b_force_first_enter)); i = LM_TPA_MASK_NEXT_ELEM(pdev, chain_idx, i))
452d14abf15SRobert Mustacchi     {
453d14abf15SRobert Mustacchi         DbgBreakIf(LM_TPA_MASK_LEN(pdev, chain_idx) <= i);
454d14abf15SRobert Mustacchi         if (sge_tpa_chain->mask_array[i])
455d14abf15SRobert Mustacchi         {
456d14abf15SRobert Mustacchi             break;
457d14abf15SRobert Mustacchi         }
458d14abf15SRobert Mustacchi         b_force_first_enter = FALSE;
459d14abf15SRobert Mustacchi 
460d14abf15SRobert Mustacchi         lm_tpa_incr_sge_cons(pdev,
461d14abf15SRobert Mustacchi                              chain_idx,
462d14abf15SRobert Mustacchi                              i);
463d14abf15SRobert Mustacchi         loop_cnt_dbg++;
464d14abf15SRobert Mustacchi         DbgBreakIf(LM_TPA_MASK_LEN(pdev,chain_idx) < loop_cnt_dbg);
465d14abf15SRobert Mustacchi     }
466d14abf15SRobert Mustacchi 
467d14abf15SRobert Mustacchi     return pkt_cnt;
468d14abf15SRobert Mustacchi }
469d14abf15SRobert Mustacchi /**
470d14abf15SRobert Mustacchi  * @description
471d14abf15SRobert Mustacchi  * Handle TPA start code.
472d14abf15SRobert Mustacchi  * @param pdev
473d14abf15SRobert Mustacchi  * @param pkt
474d14abf15SRobert Mustacchi  * @param chain_idx
475d14abf15SRobert Mustacchi  * @param queue_index
476d14abf15SRobert Mustacchi  *
477d14abf15SRobert Mustacchi  * @return STATIC void
478d14abf15SRobert Mustacchi  */
479d14abf15SRobert Mustacchi __inline STATIC void
lm_tpa_start(IN lm_device_t * pdev,IN lm_packet_t * pkt,IN const u32_t chain_idx,IN const u8_t queue_index)480d14abf15SRobert Mustacchi lm_tpa_start( IN        lm_device_t*    pdev,
481d14abf15SRobert Mustacchi               IN        lm_packet_t*    pkt,
482d14abf15SRobert Mustacchi               IN const  u32_t           chain_idx,
483d14abf15SRobert Mustacchi               IN const  u8_t            queue_index)
484d14abf15SRobert Mustacchi {
485d14abf15SRobert Mustacchi     lm_tpa_chain_t*   tpa_chain    = &LM_TPA(pdev, chain_idx);
486d14abf15SRobert Mustacchi 
487d14abf15SRobert Mustacchi     DbgBreakIf( FALSE != tpa_chain->start_coales_bd[queue_index].is_entry_used);
488d14abf15SRobert Mustacchi 
489d14abf15SRobert Mustacchi     tpa_chain->start_coales_bd[queue_index].is_entry_used   = TRUE;
490d14abf15SRobert Mustacchi     tpa_chain->start_coales_bd[queue_index].packet          = pkt;
491d14abf15SRobert Mustacchi }
492d14abf15SRobert Mustacchi /**
493d14abf15SRobert Mustacchi  * @description
494d14abf15SRobert Mustacchi  * Set TPA start known flags.
495d14abf15SRobert Mustacchi  * This is only an optimization to avoid known if's
496d14abf15SRobert Mustacchi  * @param pdev
497d14abf15SRobert Mustacchi  *
498d14abf15SRobert Mustacchi  * @return STATIC void
499d14abf15SRobert Mustacchi  */
500d14abf15SRobert Mustacchi __inline STATIC void
lm_tpa_start_flags_handle(IN lm_device_t * pdev,IN const struct eth_fast_path_rx_cqe * cqe,INOUT lm_packet_t * pkt,IN const u16_t parse_flags)501d14abf15SRobert Mustacchi lm_tpa_start_flags_handle( IN       lm_device_t*                    pdev,
502d14abf15SRobert Mustacchi                            IN const struct eth_fast_path_rx_cqe*    cqe,
503d14abf15SRobert Mustacchi                            INOUT    lm_packet_t*                    pkt,
504d14abf15SRobert Mustacchi                            IN const u16_t                           parse_flags)
505d14abf15SRobert Mustacchi {
506d14abf15SRobert Mustacchi     // TPA is always(only) above IPV4 or IPV6.
507d14abf15SRobert Mustacchi     DbgBreakIf(FALSE ==
508d14abf15SRobert Mustacchi                ((GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
509d14abf15SRobert Mustacchi                    PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
510d14abf15SRobert Mustacchi                  (GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
511d14abf15SRobert Mustacchi                    PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6)));
512d14abf15SRobert Mustacchi 
513d14abf15SRobert Mustacchi     if(PRS_FLAG_OVERETH_IPV4 == GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL,
514d14abf15SRobert Mustacchi          PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT))
515d14abf15SRobert Mustacchi     {
516d14abf15SRobert Mustacchi         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV4_DATAGRAM);
517d14abf15SRobert Mustacchi 
518d14abf15SRobert Mustacchi         DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
519d14abf15SRobert Mustacchi         // In IPV4 there is always a checksum
520d14abf15SRobert Mustacchi         // TPA ip cksum is always valid
521d14abf15SRobert Mustacchi         DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG));
522d14abf15SRobert Mustacchi 
523d14abf15SRobert Mustacchi         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
524d14abf15SRobert Mustacchi     }
525d14abf15SRobert Mustacchi     else
526d14abf15SRobert Mustacchi     {
527d14abf15SRobert Mustacchi         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_IPV6_DATAGRAM);
528d14abf15SRobert Mustacchi         // In IPV6 there is no checksum
529d14abf15SRobert Mustacchi         DbgBreakIf(0 == GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG));
530d14abf15SRobert Mustacchi     }
531d14abf15SRobert Mustacchi 
532d14abf15SRobert Mustacchi 
533d14abf15SRobert Mustacchi     // If there was a fagmentation it will be delivered by a regular BD (the TPA aggregation is stoped).
534d14abf15SRobert Mustacchi     DbgBreakIf( GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS));
535d14abf15SRobert Mustacchi     /* check if TCP segment */
536d14abf15SRobert Mustacchi     // TPA is always above TCP.
537d14abf15SRobert Mustacchi     DbgBreakIf(PRS_FLAG_OVERIP_TCP != GET_FLAGS_WITH_OFFSET(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL,
538d14abf15SRobert Mustacchi                                                             PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT));
539d14abf15SRobert Mustacchi 
540d14abf15SRobert Mustacchi     SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
541d14abf15SRobert Mustacchi 
542d14abf15SRobert Mustacchi 
543d14abf15SRobert Mustacchi     // TCP was checked before. TCP checksum must be done by FW in TPA.
544d14abf15SRobert Mustacchi     DbgBreakIf(GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG));
545d14abf15SRobert Mustacchi     // TCP checksum must be valid in a successful TPA aggregation.
546d14abf15SRobert Mustacchi     DbgBreakIf(GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG));
547d14abf15SRobert Mustacchi 
548d14abf15SRobert Mustacchi /* IN TPA tcp cksum is always validated */
549d14abf15SRobert Mustacchi /* valid tcp/udp cksum */
550d14abf15SRobert Mustacchi #define SHIFT_IS_GOOD  1
551d14abf15SRobert Mustacchi #define SHIFT_IS_BAD   2
552d14abf15SRobert Mustacchi     ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
553d14abf15SRobert Mustacchi     ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
554d14abf15SRobert Mustacchi     ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_GOOD);
555d14abf15SRobert Mustacchi     ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_BAD);
556d14abf15SRobert Mustacchi 
557d14abf15SRobert Mustacchi     SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT)) << SHIFT_IS_GOOD ) );
558d14abf15SRobert Mustacchi }
559d14abf15SRobert Mustacchi 
560d14abf15SRobert Mustacchi /**
561d14abf15SRobert Mustacchi  * @description
562d14abf15SRobert Mustacchi  * Set regular flags.
563d14abf15SRobert Mustacchi  * This is only an optimization
564d14abf15SRobert Mustacchi  * @param pdev
565d14abf15SRobert Mustacchi  *
566d14abf15SRobert Mustacchi  * @return STATIC void
567d14abf15SRobert Mustacchi  */
568d14abf15SRobert Mustacchi STATIC void
lm_regular_flags_handle(IN lm_device_t * pdev,IN const struct eth_fast_path_rx_cqe * cqe,INOUT lm_packet_t * pkt,IN const u16_t parse_flags)569d14abf15SRobert Mustacchi lm_regular_flags_handle( IN         lm_device_t*    pdev,
570d14abf15SRobert Mustacchi                          IN const struct eth_fast_path_rx_cqe*    cqe,
571d14abf15SRobert Mustacchi                          INOUT      lm_packet_t*    pkt,
572d14abf15SRobert Mustacchi                          IN const   u16_t           parse_flags)
573d14abf15SRobert Mustacchi {
574d14abf15SRobert Mustacchi     /* check if IP datagram (either IPv4 or IPv6) */
575d14abf15SRobert Mustacchi     if(((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
576d14abf15SRobert Mustacchi         PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4) ||
577d14abf15SRobert Mustacchi        ((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
578d14abf15SRobert Mustacchi         PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV6))
579d14abf15SRobert Mustacchi     {
580d14abf15SRobert Mustacchi         pkt->l2pkt_rx_info->flags  |=
581d14abf15SRobert Mustacchi             (GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >>
582d14abf15SRobert Mustacchi              PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) == PRS_FLAG_OVERETH_IPV4 ?
583d14abf15SRobert Mustacchi             LM_RX_FLAG_IS_IPV4_DATAGRAM :
584d14abf15SRobert Mustacchi             LM_RX_FLAG_IS_IPV6_DATAGRAM;
585d14abf15SRobert Mustacchi         if(!GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG))
586d14abf15SRobert Mustacchi         {
587d14abf15SRobert Mustacchi             /* ip cksum validated */
588d14abf15SRobert Mustacchi             if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)
589d14abf15SRobert Mustacchi             {
590d14abf15SRobert Mustacchi                 /* invalid ip cksum */
591d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
592d14abf15SRobert Mustacchi 
593d14abf15SRobert Mustacchi                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ip_cs_error_count);
594d14abf15SRobert Mustacchi             }
595d14abf15SRobert Mustacchi             else
596d14abf15SRobert Mustacchi             {
597d14abf15SRobert Mustacchi                 /* valid ip cksum */
598d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
599d14abf15SRobert Mustacchi             }
600d14abf15SRobert Mustacchi         }
601d14abf15SRobert Mustacchi     }
602d14abf15SRobert Mustacchi 
603d14abf15SRobert Mustacchi     // TCP or UDP segment.
604d14abf15SRobert Mustacchi     if(!GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
605d14abf15SRobert Mustacchi     {
606d14abf15SRobert Mustacchi         /* check if TCP segment */
607d14abf15SRobert Mustacchi         if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
608d14abf15SRobert Mustacchi             PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_TCP)
609d14abf15SRobert Mustacchi         {
610d14abf15SRobert Mustacchi             SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
611d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "--- TCP Packet --- \n");
612d14abf15SRobert Mustacchi         }
613d14abf15SRobert Mustacchi         /* check if UDP segment */
614d14abf15SRobert Mustacchi         else if((GET_FLAGS(parse_flags,PARSING_FLAGS_OVER_IP_PROTOCOL) >>
615d14abf15SRobert Mustacchi                  PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT) == PRS_FLAG_OVERIP_UDP)
616d14abf15SRobert Mustacchi         {
617d14abf15SRobert Mustacchi             SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IS_UDP_DATAGRAM);
618d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "--- UDP Packet --- \n");
619d14abf15SRobert Mustacchi         }
620d14abf15SRobert Mustacchi     }
621d14abf15SRobert Mustacchi 
622d14abf15SRobert Mustacchi 
623d14abf15SRobert Mustacchi     if( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
624d14abf15SRobert Mustacchi        !GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
625d14abf15SRobert Mustacchi     {
626d14abf15SRobert Mustacchi         ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_GOOD);
627d14abf15SRobert Mustacchi         ASSERT_STATIC(LM_RX_FLAG_UDP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_UDP_DATAGRAM << SHIFT_IS_BAD);
628d14abf15SRobert Mustacchi         ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_GOOD == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_GOOD);
629d14abf15SRobert Mustacchi         ASSERT_STATIC(LM_RX_FLAG_TCP_CKSUM_IS_BAD  == LM_RX_FLAG_IS_TCP_SEGMENT  << SHIFT_IS_BAD);
630d14abf15SRobert Mustacchi 
631d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORM, "  Checksum validated.\n");
632d14abf15SRobert Mustacchi 
633d14abf15SRobert Mustacchi         /* tcp/udp cksum validated */
634d14abf15SRobert Mustacchi         if GET_FLAGS(cqe->type_error_flags, ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)
635d14abf15SRobert Mustacchi         {
636d14abf15SRobert Mustacchi             /* invalid tcp/udp cksum */
637d14abf15SRobert Mustacchi             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
638d14abf15SRobert Mustacchi 
639d14abf15SRobert Mustacchi             LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_tcp_cs_error_count);
640d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "  BAD checksum.\n");
641d14abf15SRobert Mustacchi         }
642d14abf15SRobert Mustacchi         else if (GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
643d14abf15SRobert Mustacchi         {
644d14abf15SRobert Mustacchi             /* invalid tcp/udp cksum due to invalid ip cksum */
645d14abf15SRobert Mustacchi             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
646d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "  BAD IP checksum\n");
647d14abf15SRobert Mustacchi         }
648d14abf15SRobert Mustacchi         else
649d14abf15SRobert Mustacchi         {
650d14abf15SRobert Mustacchi             /* valid tcp/udp cksum */
651d14abf15SRobert Mustacchi             SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
652d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "  GOOD checksum.\n");
653d14abf15SRobert Mustacchi         }
654d14abf15SRobert Mustacchi     }
655d14abf15SRobert Mustacchi     else
656d14abf15SRobert Mustacchi     {
657d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORM, "  Checksum NOT validated.\n");
658d14abf15SRobert Mustacchi         /*Packets with invalid TCP options are reported with L4_XSUM_NO_VALIDATION due to HW limitation. In this case we assume that
659d14abf15SRobert Mustacchi           their checksum is OK.*/
660d14abf15SRobert Mustacchi         if(GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) &&
661d14abf15SRobert Mustacchi            GET_FLAGS(cqe->status_flags, ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) &&
662d14abf15SRobert Mustacchi            GET_FLAGS(cqe->pars_flags.flags, PARSING_FLAGS_TCP_OPTIONS_EXIST))
663d14abf15SRobert Mustacchi         {
664d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORM, "  TCP Options exist - forcing return value.\n");
665d14abf15SRobert Mustacchi             if(GET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_IP_CKSUM_IS_BAD))
666d14abf15SRobert Mustacchi             {
667d14abf15SRobert Mustacchi                 DbgMessage(pdev, INFORM, "  IP checksum invalid - reporting BAD checksum.\n");
668d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_BAD ) );
669d14abf15SRobert Mustacchi             }
670d14abf15SRobert Mustacchi             else
671d14abf15SRobert Mustacchi             {
672d14abf15SRobert Mustacchi                 DbgMessage(pdev, INFORM, "  IP checksum ok - reporting GOOD checksum.\n");
673d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags , ( GET_FLAGS(pkt->l2pkt_rx_info->flags, (LM_RX_FLAG_IS_TCP_SEGMENT | LM_RX_FLAG_IS_UDP_DATAGRAM)) << SHIFT_IS_GOOD ) );
674d14abf15SRobert Mustacchi             }
675d14abf15SRobert Mustacchi         }
676d14abf15SRobert Mustacchi     }
677d14abf15SRobert Mustacchi }
678d14abf15SRobert Mustacchi 
679d14abf15SRobert Mustacchi __inline STATIC void
lm_recv_set_pkt_len(IN lm_device_t * pdev,INOUT lm_packet_t * pkt,IN const u16_t pkt_len,IN const u32_t chain_idx)680d14abf15SRobert Mustacchi lm_recv_set_pkt_len( IN       lm_device_t*   pdev,
681d14abf15SRobert Mustacchi                      INOUT    lm_packet_t*   pkt,
682d14abf15SRobert Mustacchi                      IN const u16_t          pkt_len,
683d14abf15SRobert Mustacchi                      IN const u32_t          chain_idx)
684d14abf15SRobert Mustacchi {
685d14abf15SRobert Mustacchi     //changed, as we dont have fhdr infrastructure
686d14abf15SRobert Mustacchi     pkt->l2pkt_rx_info->size = pkt_len; //- 4; /* CRC32 */
687d14abf15SRobert Mustacchi 
688d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl2, "pkt_size: %d\n",pkt->l2pkt_rx_info->size);
689d14abf15SRobert Mustacchi }
690d14abf15SRobert Mustacchi 
691d14abf15SRobert Mustacchi INLINE STATIC u32_t
calc_cksum(u16_t * hdr,u32_t len_in_bytes,u32_t sum)692d14abf15SRobert Mustacchi calc_cksum(u16_t *hdr, u32_t len_in_bytes, u32_t sum)
693d14abf15SRobert Mustacchi {
694d14abf15SRobert Mustacchi     // len_in_bytes - the length in bytes of the header
695d14abf15SRobert Mustacchi     // sum - initial checksum
696d14abf15SRobert Mustacchi     while (len_in_bytes > 1)
697d14abf15SRobert Mustacchi     {
698d14abf15SRobert Mustacchi         sum += NTOH16(*hdr);
699d14abf15SRobert Mustacchi         len_in_bytes -= 2;
700d14abf15SRobert Mustacchi         hdr++;
701d14abf15SRobert Mustacchi     }
702d14abf15SRobert Mustacchi 
703d14abf15SRobert Mustacchi     /* add left-over byte, if any */
704d14abf15SRobert Mustacchi     if (len_in_bytes)
705d14abf15SRobert Mustacchi     {
706d14abf15SRobert Mustacchi         sum += ((NTOH16(*hdr)) & 0xFF00);
707d14abf15SRobert Mustacchi     }
708d14abf15SRobert Mustacchi 
709d14abf15SRobert Mustacchi     return sum;
710d14abf15SRobert Mustacchi }
711d14abf15SRobert Mustacchi 
712d14abf15SRobert Mustacchi INLINE STATIC u8_t
validate_cksum(u32_t sum)713d14abf15SRobert Mustacchi validate_cksum(u32_t sum)
714d14abf15SRobert Mustacchi {
715d14abf15SRobert Mustacchi     // len - the length in words of the header
716d14abf15SRobert Mustacchi     // returns true iff the checksum (already written in the headr) is valid
717d14abf15SRobert Mustacchi 
718d14abf15SRobert Mustacchi     // fold 32-bit sum to 16 bits
719d14abf15SRobert Mustacchi     while (sum >> 16)
720d14abf15SRobert Mustacchi     {
721d14abf15SRobert Mustacchi         sum = (sum & 0xffff) + (sum >> 16);
722d14abf15SRobert Mustacchi     }
723d14abf15SRobert Mustacchi 
724d14abf15SRobert Mustacchi     return ((u16_t)(sum) == 0xffff);
725d14abf15SRobert Mustacchi }
726d14abf15SRobert Mustacchi 
727d14abf15SRobert Mustacchi INLINE STATIC u16_t
get_ip_hdr_len(u8_t * hdr)728d14abf15SRobert Mustacchi get_ip_hdr_len(u8_t *hdr)
729d14abf15SRobert Mustacchi {
730d14abf15SRobert Mustacchi     // returns the ip header length in bytes
731d14abf15SRobert Mustacchi     u16_t ip_hdr_len = 40; // ipv6 header length, we won't support ipv6 with extension header for now
732d14abf15SRobert Mustacchi 
733d14abf15SRobert Mustacchi     if ((hdr[0] & 0xf0) == 0x40)
734d14abf15SRobert Mustacchi     {
735d14abf15SRobert Mustacchi         // ipv4, the lower 4 bit of the 1st byte of ip header
736d14abf15SRobert Mustacchi         // contains the ip header length in unit of dword(32-bit)
737d14abf15SRobert Mustacchi         ip_hdr_len = ((hdr[0] & 0xf) << 2);
738d14abf15SRobert Mustacchi     }
739d14abf15SRobert Mustacchi     return ip_hdr_len;
740d14abf15SRobert Mustacchi }
741d14abf15SRobert Mustacchi 
742d14abf15SRobert Mustacchi INLINE void
encap_pkt_parsing(struct _lm_device_t * pdev,lm_packet_t * pkt)743d14abf15SRobert Mustacchi encap_pkt_parsing(struct _lm_device_t *pdev,
744d14abf15SRobert Mustacchi                   lm_packet_t         *pkt)
745d14abf15SRobert Mustacchi {
746d14abf15SRobert Mustacchi     u16_t tmp, inner_ip_hdr_len, tcp_length;
747d14abf15SRobert Mustacchi     u32_t psuedo_cksum;
748d14abf15SRobert Mustacchi     u8_t *hdr;
749d14abf15SRobert Mustacchi 
750d14abf15SRobert Mustacchi     // encapsulated packet:
751d14abf15SRobert Mustacchi     // outer mac | outer ip | gre | inner mac | inner ip | tcp
752d14abf15SRobert Mustacchi     // minimum encapsultaed packet size is:
753d14abf15SRobert Mustacchi     // two mac headers + gre header size + tcp header size + two ipv4 headers
754d14abf15SRobert Mustacchi     if (pkt->l2pkt_rx_info->total_packet_size < (2*ETHERNET_PACKET_HEADER_SIZE + 2*20 + ETHERNET_GRE_SIZE + 20))
755d14abf15SRobert Mustacchi     {
756d14abf15SRobert Mustacchi         return;
757d14abf15SRobert Mustacchi     }
758d14abf15SRobert Mustacchi 
759d14abf15SRobert Mustacchi 
760d14abf15SRobert Mustacchi     // set hdr to the outer ip header
761d14abf15SRobert Mustacchi     hdr = pkt->l2pkt_rx_info->mem_virt + pdev->params.rcv_buffer_offset + ETHERNET_PACKET_HEADER_SIZE;
762d14abf15SRobert Mustacchi     if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_VALID_VLAN_TAG)
763d14abf15SRobert Mustacchi     {
764d14abf15SRobert Mustacchi         hdr += ETHERNET_VLAN_TAG_SIZE;
765d14abf15SRobert Mustacchi     }
766d14abf15SRobert Mustacchi 
767d14abf15SRobert Mustacchi     // in case this is not standard ETH packet (e.g. managment, or in general non ipv4/ipv6), it is for sure
768d14abf15SRobert Mustacchi     // not gre so we can end here
769d14abf15SRobert Mustacchi     // if outer header is ipv4, protocol is the nine'th octet
770d14abf15SRobert Mustacchi     // if outer header is ipv6, next header is the sixth octet
771d14abf15SRobert Mustacchi     if (!(((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV4_DATAGRAM) && (hdr[9] == 0x2f)) ||
772d14abf15SRobert Mustacchi           ((pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IS_IPV6_DATAGRAM) && (hdr[6] == 0x2f))))
773d14abf15SRobert Mustacchi     {
774d14abf15SRobert Mustacchi         // this is not encapsulated packet, no gre tunneling
775d14abf15SRobert Mustacchi 		// on ipv6 we don't support extension header
776d14abf15SRobert Mustacchi         return;
777d14abf15SRobert Mustacchi     }
778d14abf15SRobert Mustacchi 
779d14abf15SRobert Mustacchi     // get the length of the outer ip header and set hdr to the gre header
780d14abf15SRobert Mustacchi     hdr += get_ip_hdr_len(hdr);
781d14abf15SRobert Mustacchi 
782d14abf15SRobert Mustacchi /* GRE header
783d14abf15SRobert Mustacchi    | Bits 0�4 | 5�7   | 8�12  | 13�15   | 16�31         |
784d14abf15SRobert Mustacchi    | C|0|K|S  | Recur | Flags | Version | Protocol Type |
785d14abf15SRobert Mustacchi    |           Checksum (optional)      | Reserved      |
786d14abf15SRobert Mustacchi    |           Key (optional)                           |
787d14abf15SRobert Mustacchi    |           Sequence Number (optional)               | */
788d14abf15SRobert Mustacchi 
789d14abf15SRobert Mustacchi     // check that:
790d14abf15SRobert Mustacchi     // checksum present bit is set to 0
791d14abf15SRobert Mustacchi     // key present bit is set to 1
792d14abf15SRobert Mustacchi     // sequence number present bit is set to 0
793d14abf15SRobert Mustacchi     // protocol type should be always equal to 0x6558 (for encapsulating ethernet packets in GRE)
794d14abf15SRobert Mustacchi     if (((hdr[0] & 0xb0) != 0x20) || (hdr[2] != 0x65) || (hdr[3] != 0x58))
795d14abf15SRobert Mustacchi     {
796d14abf15SRobert Mustacchi         return;
797d14abf15SRobert Mustacchi     }
798d14abf15SRobert Mustacchi     // set hdr to the inner mac header
799d14abf15SRobert Mustacchi     hdr += ETHERNET_GRE_SIZE;
800d14abf15SRobert Mustacchi 
801d14abf15SRobert Mustacchi     // The first two octets of the tag are the Tag Protocol Identifier (TPID) value of 0x8100.
802d14abf15SRobert Mustacchi     // This is located in the same place as the EtherType/Length field in untagged frames
803d14abf15SRobert Mustacchi     if ((hdr[12] == 0x81) && (hdr[13] == 0x00))
804d14abf15SRobert Mustacchi     {
805d14abf15SRobert Mustacchi         hdr += ETHERNET_VLAN_TAG_SIZE;
806d14abf15SRobert Mustacchi     }
807d14abf15SRobert Mustacchi     // set hdr to the inner ip header
808d14abf15SRobert Mustacchi     hdr += ETHERNET_PACKET_HEADER_SIZE;
809d14abf15SRobert Mustacchi 
810d14abf15SRobert Mustacchi     // get the length of the inner ip header
811d14abf15SRobert Mustacchi     inner_ip_hdr_len = get_ip_hdr_len(hdr);
812d14abf15SRobert Mustacchi 
813d14abf15SRobert Mustacchi     if ((hdr[0] & 0xf0) == 0x40)
814d14abf15SRobert Mustacchi     {
815d14abf15SRobert Mustacchi         // inner ip header is ipv4
816d14abf15SRobert Mustacchi         // if the ip header checksum of the outer header is ok than validate the ip checksum of the inner header
817d14abf15SRobert Mustacchi         if (pkt->l2pkt_rx_info->flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)
818d14abf15SRobert Mustacchi         {
819d14abf15SRobert Mustacchi             // validate the checksum
820d14abf15SRobert Mustacchi             if (!validate_cksum(calc_cksum((u16_t*)hdr, inner_ip_hdr_len, 0)))
821d14abf15SRobert Mustacchi             {
822d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_BAD);
823d14abf15SRobert Mustacchi                 RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IP_CKSUM_IS_GOOD);
824d14abf15SRobert Mustacchi             }
825d14abf15SRobert Mustacchi         }
826d14abf15SRobert Mustacchi         // check if protocol field is tcp
827d14abf15SRobert Mustacchi         if (hdr[9] == 0x06)
828d14abf15SRobert Mustacchi         {
829d14abf15SRobert Mustacchi             // create the psuedo header
830d14abf15SRobert Mustacchi /* | Bit offset | 0�7    |    8�15  |    16�31   |
831d14abf15SRobert Mustacchi    |     0      |    Source address              |
832d14abf15SRobert Mustacchi    |    32      |  Destination address           |
833d14abf15SRobert Mustacchi    |    64      | Zeros  | Protocol | TCP length | */
834d14abf15SRobert Mustacchi 
835d14abf15SRobert Mustacchi             // adding 1 byte of zeros + protocol to the sum
836d14abf15SRobert Mustacchi             // and adding source and destination address
837d14abf15SRobert Mustacchi             psuedo_cksum = calc_cksum((u16_t*)&hdr[12], 8, 0x06);
838d14abf15SRobert Mustacchi             // calculate the tcp length
839d14abf15SRobert Mustacchi             mm_memcpy(&tmp, &hdr[2], sizeof(u16_t));
840d14abf15SRobert Mustacchi             tcp_length = NTOH16(tmp) - inner_ip_hdr_len;
841d14abf15SRobert Mustacchi             // the TCP length field is the length of the TCP header and data (measured in octets).
842d14abf15SRobert Mustacchi             psuedo_cksum += tcp_length;
843d14abf15SRobert Mustacchi         }
844d14abf15SRobert Mustacchi         else
845d14abf15SRobert Mustacchi         {
846d14abf15SRobert Mustacchi             // no tcp over ip
847d14abf15SRobert Mustacchi             return;
848d14abf15SRobert Mustacchi         }
849d14abf15SRobert Mustacchi     }
850d14abf15SRobert Mustacchi     else if ((hdr[0] & 0xf0) == 0x60)
851d14abf15SRobert Mustacchi     {
852d14abf15SRobert Mustacchi         // inner ip header is ipv6
853d14abf15SRobert Mustacchi         // check if next header field is tcp
854d14abf15SRobert Mustacchi         if (hdr[6] == 0x06)
855d14abf15SRobert Mustacchi         {
856d14abf15SRobert Mustacchi             // tcp over ipv6
857d14abf15SRobert Mustacchi             // create the psuedo header
858d14abf15SRobert Mustacchi /* | Bit offset | 0�7 | 8�15 | 16�23 |  24�31     |
859d14abf15SRobert Mustacchi    |     0      |     Source address              |
860d14abf15SRobert Mustacchi    |    32      |                                 |
861d14abf15SRobert Mustacchi    |    64      |                                 |
862d14abf15SRobert Mustacchi    |    96      |                                 |
863d14abf15SRobert Mustacchi    |   128      |   Destination address           |
864d14abf15SRobert Mustacchi    |   160      |                                 |
865d14abf15SRobert Mustacchi    |   192      |                                 |
866d14abf15SRobert Mustacchi    |   224      |                                 |
867d14abf15SRobert Mustacchi    |   256      |        TCP length               |
868d14abf15SRobert Mustacchi    |   288      |        Zeros       |Next header |*/
869d14abf15SRobert Mustacchi 
870d14abf15SRobert Mustacchi             // adding 3 byte of zeros + protocol to the sum
871d14abf15SRobert Mustacchi             // and adding source and destination address
872d14abf15SRobert Mustacchi             psuedo_cksum = calc_cksum((u16_t*)&hdr[8], 32, 0x06);
873d14abf15SRobert Mustacchi             // calculate the tcp length
874d14abf15SRobert Mustacchi             // in the ip header: the size of the payload in octets, including any extension headers
875d14abf15SRobert Mustacchi             mm_memcpy(&tmp, &hdr[4], sizeof(u16_t));
876d14abf15SRobert Mustacchi             // reduce the length of the extension headers
877d14abf15SRobert Mustacchi             tcp_length = NTOH16(tmp) - (inner_ip_hdr_len - 40);
878d14abf15SRobert Mustacchi             psuedo_cksum += tcp_length;
879d14abf15SRobert Mustacchi         }
880d14abf15SRobert Mustacchi         else
881d14abf15SRobert Mustacchi         {
882d14abf15SRobert Mustacchi             // no tcp over ip
883d14abf15SRobert Mustacchi             return;
884d14abf15SRobert Mustacchi         }
885d14abf15SRobert Mustacchi     }
886d14abf15SRobert Mustacchi     else
887d14abf15SRobert Mustacchi     {
888d14abf15SRobert Mustacchi         // no ipv4 or ipv6
889d14abf15SRobert Mustacchi         return;
890d14abf15SRobert Mustacchi     }
891d14abf15SRobert Mustacchi     // set hdr to the tcp header
892d14abf15SRobert Mustacchi     hdr += inner_ip_hdr_len;
893d14abf15SRobert Mustacchi 
894d14abf15SRobert Mustacchi     SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_IS_TCP_SEGMENT);
895d14abf15SRobert Mustacchi     // claculate the checksum of the rest of the packet
896d14abf15SRobert Mustacchi     // validate the checksum
897d14abf15SRobert Mustacchi     if (validate_cksum(calc_cksum((u16_t*)hdr, tcp_length, psuedo_cksum)))
898d14abf15SRobert Mustacchi     {
899d14abf15SRobert Mustacchi         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
900d14abf15SRobert Mustacchi         RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
901d14abf15SRobert Mustacchi     }
902d14abf15SRobert Mustacchi     else
903d14abf15SRobert Mustacchi     {
904d14abf15SRobert Mustacchi         SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_BAD);
905d14abf15SRobert Mustacchi         RESET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_TCP_CKSUM_IS_GOOD);
906d14abf15SRobert Mustacchi     }
907d14abf15SRobert Mustacchi }
908d14abf15SRobert Mustacchi 
909d14abf15SRobert Mustacchi /*******************************************************************************
910d14abf15SRobert Mustacchi  * Description:
911d14abf15SRobert Mustacchi  * Here the RCQ chain is the chain coordinated with the status block, that is,
912d14abf15SRobert Mustacchi  * the index in the status block describes the RCQ and NOT the rx_bd chain as in
913d14abf15SRobert Mustacchi  * the case of Teton. We run on the delta between the new consumer index of the RCQ
914d14abf15SRobert Mustacchi  * which we get from the sb and the old consumer index of the RCQ.
915d14abf15SRobert Mustacchi  * In cases of both slow and fast path, the consumer of the RCQ is always incremented.
916d14abf15SRobert Mustacchi  *
917d14abf15SRobert Mustacchi  * The assumption which we must stick to all the way is: RCQ and Rx bd chain
918d14abf15SRobert Mustacchi  * have the same size at all times! Otherwise, so help us Alan Bertkey!
919d14abf15SRobert Mustacchi  *
920d14abf15SRobert Mustacchi  * Return:
921d14abf15SRobert Mustacchi  ******************************************************************************/
922d14abf15SRobert Mustacchi u32_t
lm_get_packets_rcvd(struct _lm_device_t * pdev,u32_t const chain_idx,s_list_t * rcvd_list,struct _sp_cqes_info * sp_cqes)923d14abf15SRobert Mustacchi lm_get_packets_rcvd( struct _lm_device_t  *pdev,
924d14abf15SRobert Mustacchi                      u32_t const          chain_idx,
925d14abf15SRobert Mustacchi                      s_list_t             *rcvd_list,
926d14abf15SRobert Mustacchi                      struct _sp_cqes_info *sp_cqes)
927d14abf15SRobert Mustacchi {
928d14abf15SRobert Mustacchi     lm_rx_chain_t*          rxq_chain    = &LM_RXQ(pdev, chain_idx); //get a hold of the matching Rx bd chain according to index
929d14abf15SRobert Mustacchi     lm_rcq_chain_t*         rcq_chain    = &LM_RCQ(pdev, chain_idx); //get a hold of the matching RCQ chain according to index
930d14abf15SRobert Mustacchi     lm_bd_chain_t*          rx_chain_bd  = &LM_RXQ_CHAIN_BD(pdev, chain_idx);
931d14abf15SRobert Mustacchi     lm_bd_chain_t*          rx_chain_sge = LM_RXQ_SGE_PTR_IF_VALID(pdev, chain_idx);
932d14abf15SRobert Mustacchi     lm_tpa_chain_t*         tpa_chain    = &LM_TPA(pdev, chain_idx);
933d14abf15SRobert Mustacchi     union eth_rx_cqe*       cqe          = NULL;
934d14abf15SRobert Mustacchi     lm_packet_t*            pkt          = NULL;
935d14abf15SRobert Mustacchi     u32_t                   pkt_cnt      = 0;
936d14abf15SRobert Mustacchi     u16_t                   rx_old_idx   = 0;
937d14abf15SRobert Mustacchi     u16_t                   cq_new_idx   = 0;
938d14abf15SRobert Mustacchi     u16_t                   cq_old_idx   = 0;
939d14abf15SRobert Mustacchi     enum eth_rx_cqe_type    cqe_type     = MAX_ETH_RX_CQE_TYPE;
940d14abf15SRobert Mustacchi 
941d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2 , "lm_get_packets_rcvd inside!\n");
942d14abf15SRobert Mustacchi 
943d14abf15SRobert Mustacchi     /* make sure to zeroize the sp_cqes... */
944d14abf15SRobert Mustacchi     mm_mem_zero( sp_cqes, sizeof(struct _sp_cqes_info) );
945d14abf15SRobert Mustacchi 
946d14abf15SRobert Mustacchi     /* Get the new consumer idx.  The bd's between rcq_new_idx and rcq_old_idx
947d14abf15SRobert Mustacchi      * are bd's containing receive packets.
948d14abf15SRobert Mustacchi      */
949d14abf15SRobert Mustacchi     cq_new_idx = mm_le16_to_cpu(*(rcq_chain->hw_con_idx_ptr));
950d14abf15SRobert Mustacchi 
951d14abf15SRobert Mustacchi     /* The consumer index of the RCQ only, may stop at the end of a page boundary.  In
952d14abf15SRobert Mustacchi      * this case, we need to advance the next to the next one.
953d14abf15SRobert Mustacchi      * In here we do not increase the cons_bd as well! this is since we're dealing here
954d14abf15SRobert Mustacchi      * with the new cons index and not with the actual old one for which, as we progress, we
955d14abf15SRobert Mustacchi      * need to maintain the bd_cons as well.
956d14abf15SRobert Mustacchi      */
957d14abf15SRobert Mustacchi     if((cq_new_idx & lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain)) == lm_bd_chain_usable_bds_per_page(&rcq_chain->bd_chain))
958d14abf15SRobert Mustacchi     {
959d14abf15SRobert Mustacchi         cq_new_idx+= lm_bd_chain_bds_skip_eop(&rcq_chain->bd_chain);
960d14abf15SRobert Mustacchi     }
961d14abf15SRobert Mustacchi 
962d14abf15SRobert Mustacchi     DbgBreakIfFastPath( rx_chain_sge && !lm_bd_chains_are_consistent( rx_chain_sge, rx_chain_bd ) );
963d14abf15SRobert Mustacchi 
964d14abf15SRobert Mustacchi     rx_old_idx = lm_bd_chain_cons_idx(rx_chain_bd);
965d14abf15SRobert Mustacchi     cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
966d14abf15SRobert Mustacchi 
967d14abf15SRobert Mustacchi     //there is no change in the RCQ consumer index so exit!
968d14abf15SRobert Mustacchi     if (cq_old_idx == cq_new_idx)
969d14abf15SRobert Mustacchi     {
970d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl2rx , "there is no change in the RCQ consumer index so exit!\n");
971d14abf15SRobert Mustacchi         return pkt_cnt;
972d14abf15SRobert Mustacchi     }
973d14abf15SRobert Mustacchi 
974d14abf15SRobert Mustacchi     while(cq_old_idx != cq_new_idx)
975d14abf15SRobert Mustacchi     {
976d14abf15SRobert Mustacchi         DbgBreakIfFastPath(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
977d14abf15SRobert Mustacchi         //get hold of the cqe, and find out what it's type corresponds to
978d14abf15SRobert Mustacchi         cqe = (union eth_rx_cqe *)lm_bd_chain_consume_bd(&rcq_chain->bd_chain);
979d14abf15SRobert Mustacchi         DbgBreakIfFastPath(cqe == NULL);
980d14abf15SRobert Mustacchi 
981d14abf15SRobert Mustacchi         //update the cons of the RCQ and the bd_prod pointer of the RCQ as well!
982d14abf15SRobert Mustacchi         //this holds both for slow and fast path!
983d14abf15SRobert Mustacchi         cq_old_idx = lm_bd_chain_cons_idx(&rcq_chain->bd_chain);
984d14abf15SRobert Mustacchi 
985d14abf15SRobert Mustacchi         cqe_type = GET_FLAGS_WITH_OFFSET(cqe->ramrod_cqe.ramrod_type, COMMON_RAMROD_ETH_RX_CQE_TYPE, COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT);
986d14abf15SRobert Mustacchi         DbgBreakIf(MAX_ETH_RX_CQE_TYPE <= cqe_type);
987d14abf15SRobert Mustacchi 
988d14abf15SRobert Mustacchi         //the cqe is a ramrod, so do the ramrod and recycle the cqe.
989d14abf15SRobert Mustacchi         //TODO: replace this with the #defines: 1- eth ramrod, 2- toe init ofld ramrod
990d14abf15SRobert Mustacchi         switch(cqe_type)
991d14abf15SRobert Mustacchi         {
992d14abf15SRobert Mustacchi         case RX_ETH_CQE_TYPE_ETH_RAMROD:
993d14abf15SRobert Mustacchi         {
994d14abf15SRobert Mustacchi             /* 13/08/08 NirV: bugbug, temp workaround for dpc watch dog bug,
995d14abf15SRobert Mustacchi              * ignore toe completions on L2 ring - initiate offload */
996d14abf15SRobert Mustacchi             if (cqe->ramrod_cqe.conn_type != TOE_CONNECTION_TYPE)
997d14abf15SRobert Mustacchi             {
998d14abf15SRobert Mustacchi                 if (ERR_IF(sp_cqes->idx >= MAX_NUM_SPE))
999d14abf15SRobert Mustacchi                 {
1000d14abf15SRobert Mustacchi                     DbgBreakMsgFastPath("too many spe completed\n");
1001d14abf15SRobert Mustacchi                     /* we shouldn't get here - there is something very wrong if we did... in this case we will risk
1002d14abf15SRobert Mustacchi                      * completing the ramrods - even though we're holding a lock!!! */
1003d14abf15SRobert Mustacchi                     /* bugbug... */
1004d14abf15SRobert Mustacchi                     DbgBreakIfAll(sp_cqes->idx >= MAX_NUM_SPE);
1005d14abf15SRobert Mustacchi                     return pkt_cnt;
1006d14abf15SRobert Mustacchi                 }
1007d14abf15SRobert Mustacchi                 mm_memcpy((void*)(&(sp_cqes->sp_cqe[sp_cqes->idx++])), (const void*)cqe, sizeof(*cqe));
1008d14abf15SRobert Mustacchi             }
1009d14abf15SRobert Mustacchi 
1010d14abf15SRobert Mustacchi             //update the prod of the RCQ - by this, we recycled the CQE.
1011d14abf15SRobert Mustacchi             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
1012d14abf15SRobert Mustacchi 
1013d14abf15SRobert Mustacchi #if 0
1014d14abf15SRobert Mustacchi             //in case of ramrod, pop out the Rx bd and push it to the free descriptors list
1015d14abf15SRobert Mustacchi             pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
1016d14abf15SRobert Mustacchi 
1017d14abf15SRobert Mustacchi             DbgBreakIfFastPath(pkt == NULL);
1018d14abf15SRobert Mustacchi 
1019d14abf15SRobert Mustacchi             s_list_push_tail( &LM_RXQ(pdev, chain_idx).free_descq,
1020d14abf15SRobert Mustacchi                               &pkt->link);
1021d14abf15SRobert Mustacchi #endif
1022d14abf15SRobert Mustacchi             break;
1023d14abf15SRobert Mustacchi         }
1024d14abf15SRobert Mustacchi         case RX_ETH_CQE_TYPE_ETH_FASTPATH:
1025d14abf15SRobert Mustacchi         case RX_ETH_CQE_TYPE_ETH_START_AGG: //Fall through case
1026d14abf15SRobert Mustacchi         { //enter here in case the cqe is a fast path type (data)
1027d14abf15SRobert Mustacchi             u16_t parse_flags = 0;
1028d14abf15SRobert Mustacchi 
1029d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- it is fast path, func=%d\n", FUNC_ID(pdev));
1030d14abf15SRobert Mustacchi 
1031d14abf15SRobert Mustacchi             DbgBreakIf( (RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)&&
1032d14abf15SRobert Mustacchi                         (lm_tpa_state_disable == tpa_chain->state));
1033d14abf15SRobert Mustacchi 
1034d14abf15SRobert Mustacchi             pkt = (lm_packet_t *) s_list_pop_head(&rxq_chain->active_descq);
1035d14abf15SRobert Mustacchi             parse_flags = mm_le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
1036d14abf15SRobert Mustacchi 
1037d14abf15SRobert Mustacchi             DbgBreakIfFastPath( NULL == pkt );
1038d14abf15SRobert Mustacchi 
1039d14abf15SRobert Mustacchi #if DBG
1040d14abf15SRobert Mustacchi             if CHK_NULL( pkt )
1041d14abf15SRobert Mustacchi             {
1042d14abf15SRobert Mustacchi                 return 0;
1043d14abf15SRobert Mustacchi             }
1044d14abf15SRobert Mustacchi #endif // DBG
1045d14abf15SRobert Mustacchi 
1046d14abf15SRobert Mustacchi             DbgBreakIfFastPath(SIG(pkt) != L2PACKET_RX_SIG);
1047d14abf15SRobert Mustacchi 
1048d14abf15SRobert Mustacchi #if L2_RX_BUF_SIG
1049d14abf15SRobert Mustacchi             /* make sure signitures exist before and after the buffer */
1050d14abf15SRobert Mustacchi             DbgBreakIfFastPath(SIG(pkt->u1.rx.mem_virt - pdev->params.rcv_buffer_offset) != L2PACKET_RX_SIG);
1051d14abf15SRobert Mustacchi             DbgBreakIfFastPath(END_SIG(pkt->u1.rx.mem_virt, MAX_L2_CLI_BUFFER_SIZE(pdev, chain_idx)) != L2PACKET_RX_SIG);
1052d14abf15SRobert Mustacchi #endif /* L2_RX_BUF_SIG */
1053d14abf15SRobert Mustacchi 
1054d14abf15SRobert Mustacchi             lm_bd_chain_bds_consumed(rx_chain_bd, 1);
1055d14abf15SRobert Mustacchi             if( rx_chain_sge )
1056d14abf15SRobert Mustacchi             {
1057d14abf15SRobert Mustacchi                 lm_bd_chain_bds_consumed(rx_chain_sge, 1);
1058d14abf15SRobert Mustacchi             }
1059d14abf15SRobert Mustacchi #if defined(_NTDDK_)
1060d14abf15SRobert Mustacchi //PreFast 28182 :Prefast reviewed and suppress this situation shouldn't occur.
1061d14abf15SRobert Mustacchi #pragma warning (push)
1062d14abf15SRobert Mustacchi #pragma warning( disable:28182 )
1063d14abf15SRobert Mustacchi #endif // !_NTDDK_
1064d14abf15SRobert Mustacchi             /* Advance the rx_old_idx to the start bd_idx of the next packet. */
1065d14abf15SRobert Mustacchi             rx_old_idx = pkt->u1.rx.next_bd_idx;
1066d14abf15SRobert Mustacchi             //cq_old_idx = pkt->u1.rx.next_bd_idx;
1067d14abf15SRobert Mustacchi 
1068d14abf15SRobert Mustacchi             CLEAR_FLAGS( pkt->l2pkt_rx_info->flags );
1069d14abf15SRobert Mustacchi 
1070d14abf15SRobert Mustacchi 
1071d14abf15SRobert Mustacchi             if(RX_ETH_CQE_TYPE_ETH_START_AGG == cqe_type)
1072d14abf15SRobert Mustacchi             {
1073d14abf15SRobert Mustacchi                 lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.len_on_bd), chain_idx);
1074d14abf15SRobert Mustacchi                 // total_packet_size is only known in stop_TPA
1075d14abf15SRobert Mustacchi 
1076d14abf15SRobert Mustacchi                 DbgBreakIf(0 != cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
1077d14abf15SRobert Mustacchi 
1078d14abf15SRobert Mustacchi                 lm_tpa_start(pdev,
1079d14abf15SRobert Mustacchi                              pkt,
1080d14abf15SRobert Mustacchi                              chain_idx,
1081d14abf15SRobert Mustacchi                              cqe->fast_path_cqe.queue_index);
1082d14abf15SRobert Mustacchi 
1083d14abf15SRobert Mustacchi                 lm_tpa_start_flags_handle(pdev,
1084d14abf15SRobert Mustacchi                                           &(cqe->fast_path_cqe),
1085d14abf15SRobert Mustacchi                                           pkt,
1086d14abf15SRobert Mustacchi                                           parse_flags);
1087d14abf15SRobert Mustacchi             }
1088d14abf15SRobert Mustacchi             else
1089d14abf15SRobert Mustacchi             {
1090d14abf15SRobert Mustacchi                 lm_recv_set_pkt_len(pdev, pkt, mm_le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len), chain_idx);
1091d14abf15SRobert Mustacchi 
1092d14abf15SRobert Mustacchi                 // In regular mode pkt->l2pkt_rx_info->size == pkt->l2pkt_rx_info->total_packet_size
1093d14abf15SRobert Mustacchi                 // We need total_packet_size for Dynamic HC in order not to ask a question there if we are RSC or regular flow.
1094d14abf15SRobert Mustacchi                 pkt->l2pkt_rx_info->total_packet_size = pkt->l2pkt_rx_info->size;
1095d14abf15SRobert Mustacchi 
1096d14abf15SRobert Mustacchi                 /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1097d14abf15SRobert Mustacchi                 DbgBreakIfFastPath((pkt->l2pkt_rx_info->total_packet_size < MIN_ETHERNET_PACKET_SIZE) || (pkt->l2pkt_rx_info->total_packet_size > MAX_CLI_PACKET_SIZE(pdev, chain_idx)));
1098d14abf15SRobert Mustacchi 
1099d14abf15SRobert Mustacchi                 // ShayH:packet->size isn't useed anymore by windows we directly put the data on l2pkt_rx_info->size and l2pkt_rx_info->total_packet_size.
1100d14abf15SRobert Mustacchi                 // Need to ask if other UM clients use/need packet->size.
1101d14abf15SRobert Mustacchi                 pkt->size = pkt->l2pkt_rx_info->size;
1102d14abf15SRobert Mustacchi 
1103d14abf15SRobert Mustacchi                 if(OOO_CID(pdev) == chain_idx)
1104d14abf15SRobert Mustacchi                 {
1105d14abf15SRobert Mustacchi                     DbgBreakIfFastPath( ETH_FP_CQE_RAW != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL ) >>
1106d14abf15SRobert Mustacchi                                                            ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT));
1107d14abf15SRobert Mustacchi 
1108d14abf15SRobert Mustacchi                     //optimized
1109d14abf15SRobert Mustacchi                     /* make sure packet size if larger than header size and smaller than max packet size of the specific L2 client */
1110d14abf15SRobert Mustacchi                     // TODO_OOO - check with flag
1111d14abf15SRobert Mustacchi                     ASSERT_STATIC( sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) == sizeof(cqe->fast_path_cqe.sgl_or_raw_data.raw_data) );
1112d14abf15SRobert Mustacchi                     mm_memcpy( pkt->u1.rx.sgl_or_raw_data.raw_data, cqe->fast_path_cqe.sgl_or_raw_data.raw_data, sizeof(pkt->u1.rx.sgl_or_raw_data.raw_data) );
1113d14abf15SRobert Mustacchi                 }
1114d14abf15SRobert Mustacchi                 else
1115d14abf15SRobert Mustacchi                 {
1116d14abf15SRobert Mustacchi                     DbgBreakIfFastPath( ETH_FP_CQE_REGULAR != (GET_FLAGS( cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL )>>
1117d14abf15SRobert Mustacchi                                                            ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT)  ) ;
1118d14abf15SRobert Mustacchi                 }
1119d14abf15SRobert Mustacchi 
1120d14abf15SRobert Mustacchi                 lm_regular_flags_handle(pdev,
1121d14abf15SRobert Mustacchi                                         &(cqe->fast_path_cqe),
1122d14abf15SRobert Mustacchi                                         pkt,
1123d14abf15SRobert Mustacchi                                         parse_flags);
1124d14abf15SRobert Mustacchi 
1125d14abf15SRobert Mustacchi                 if (GET_FLAGS(pdev->params.ofld_cap_to_ndis, LM_OFFLOAD_ENCAP_PACKET))
1126d14abf15SRobert Mustacchi                 {
1127d14abf15SRobert Mustacchi                     // SW rx checksum for gre encapsulated packets
1128d14abf15SRobert Mustacchi                     encap_pkt_parsing(pdev, pkt);
1129d14abf15SRobert Mustacchi                 }
1130d14abf15SRobert Mustacchi 
1131d14abf15SRobert Mustacchi                 pkt_cnt++;
1132d14abf15SRobert Mustacchi                 s_list_push_tail(rcvd_list, &pkt->link);
1133d14abf15SRobert Mustacchi             }
1134d14abf15SRobert Mustacchi 
1135d14abf15SRobert Mustacchi             if GET_FLAGS(cqe->fast_path_cqe.status_flags, ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)
1136d14abf15SRobert Mustacchi             {
1137d14abf15SRobert Mustacchi                 SET_FLAGS(pkt->l2pkt_rx_info->flags, LM_RX_FLAG_VALID_HASH_VALUE );
1138d14abf15SRobert Mustacchi                 *pkt->u1.rx.hash_val_ptr = mm_le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
1139d14abf15SRobert Mustacchi             }
1140d14abf15SRobert Mustacchi 
1141d14abf15SRobert Mustacchi             if(GET_FLAGS(parse_flags,PARSING_FLAGS_INNER_VLAN_EXIST))
1142d14abf15SRobert Mustacchi             {
1143d14abf15SRobert Mustacchi                 u16_t vlan_tag = mm_le16_to_cpu(cqe->fast_path_cqe.vlan_tag);
1144d14abf15SRobert Mustacchi 
1145d14abf15SRobert Mustacchi                 DbgMessage(pdev, INFORMl2, "vlan frame recieved: %x\n",vlan_tag);
1146d14abf15SRobert Mustacchi                   /* fw always set ETH_FAST_PATH_RX_CQE_VLAN_TAG_FLG and pass vlan tag when
1147d14abf15SRobert Mustacchi                      packet with vlan arrives but it remove the vlan from the packet only when
1148d14abf15SRobert Mustacchi                      it configured to remove vlan using params.vlan_removal_enable
1149d14abf15SRobert Mustacchi                   */
1150d14abf15SRobert Mustacchi                   if ((!pdev->params.keep_vlan_tag) &&
1151d14abf15SRobert Mustacchi                       ( OOO_CID(pdev) != chain_idx))
1152d14abf15SRobert Mustacchi                   {
1153d14abf15SRobert Mustacchi                       SET_FLAGS(pkt->l2pkt_rx_info->flags , LM_RX_FLAG_VALID_VLAN_TAG);
1154d14abf15SRobert Mustacchi                       pkt->l2pkt_rx_info->vlan_tag = vlan_tag;
1155d14abf15SRobert Mustacchi                       DbgMessage(pdev, INFORMl2rx, "vlan removed from frame: %x\n",vlan_tag);
1156d14abf15SRobert Mustacchi                   }
1157d14abf15SRobert Mustacchi             }
1158d14abf15SRobert Mustacchi 
1159d14abf15SRobert Mustacchi #if defined(_NTDDK_)
1160d14abf15SRobert Mustacchi #pragma warning (pop)
1161d14abf15SRobert Mustacchi #endif // !_NTDDK_
1162d14abf15SRobert Mustacchi #if DBG
1163d14abf15SRobert Mustacchi             if(GET_FLAGS(parse_flags,PARSING_FLAGS_FRAGMENTATION_STATUS))
1164d14abf15SRobert Mustacchi             {
1165d14abf15SRobert Mustacchi                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv4_frag_count);
1166d14abf15SRobert Mustacchi             }
1167d14abf15SRobert Mustacchi             if(GET_FLAGS(parse_flags,PARSING_FLAGS_LLC_SNAP))
1168d14abf15SRobert Mustacchi             {
1169d14abf15SRobert Mustacchi                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_llc_snap_count);
1170d14abf15SRobert Mustacchi             }
1171d14abf15SRobert Mustacchi             if(GET_FLAGS(parse_flags,PARSING_FLAGS_IP_OPTIONS) &&
1172d14abf15SRobert Mustacchi                 GET_FLAGS(pkt->l2pkt_rx_info->flags ,LM_RX_FLAG_IS_IPV6_DATAGRAM))
1173d14abf15SRobert Mustacchi             {
1174d14abf15SRobert Mustacchi                 LM_COMMON_DRV_STATS_ATOMIC_INC_ETH(pdev, rx_ipv6_ext_count);
1175d14abf15SRobert Mustacchi             }
1176d14abf15SRobert Mustacchi #endif // DBG
1177d14abf15SRobert Mustacchi 
1178d14abf15SRobert Mustacchi             /* We use to assert that if we got the PHY_DECODE_ERROR it was always a result of DROP_MAC_ERR, since we don't configure
1179d14abf15SRobert Mustacchi              * DROP_MAC_ERR anymore, we don't expect this flag to ever be on.*/
1180d14abf15SRobert Mustacchi             DbgBreakIfFastPath( GET_FLAGS(cqe->fast_path_cqe.type_error_flags, ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG) );
1181d14abf15SRobert Mustacchi 
1182d14abf15SRobert Mustacchi             DbgBreakIfFastPath(cqe->fast_path_cqe.type_error_flags &
1183d14abf15SRobert Mustacchi                             ~(ETH_FAST_PATH_RX_CQE_TYPE |
1184d14abf15SRobert Mustacchi                               ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG |
1185d14abf15SRobert Mustacchi                               ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
1186d14abf15SRobert Mustacchi                               ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG |
1187d14abf15SRobert Mustacchi                               ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL));
1188d14abf15SRobert Mustacchi 
1189d14abf15SRobert Mustacchi 
1190d14abf15SRobert Mustacchi             break;
1191d14abf15SRobert Mustacchi         }
1192d14abf15SRobert Mustacchi         case RX_ETH_CQE_TYPE_ETH_STOP_AGG:
1193d14abf15SRobert Mustacchi         {//TPA stop
1194d14abf15SRobert Mustacchi             DbgBreakIf( lm_tpa_state_disable == tpa_chain->state);
1195d14abf15SRobert Mustacchi 
1196d14abf15SRobert Mustacchi             pkt_cnt = lm_tpa_stop(pdev,
1197d14abf15SRobert Mustacchi                                   rcvd_list,
1198d14abf15SRobert Mustacchi                                   &(cqe->end_agg_cqe),
1199d14abf15SRobert Mustacchi                                   chain_idx,
1200d14abf15SRobert Mustacchi                                   pkt_cnt,
1201d14abf15SRobert Mustacchi                                   cqe->end_agg_cqe.queue_index);
1202d14abf15SRobert Mustacchi 
1203d14abf15SRobert Mustacchi             //update the prod of the RCQ - by this, we recycled the CQE.
1204d14abf15SRobert Mustacchi             lm_bd_chain_bd_produced(&rcq_chain->bd_chain);
1205d14abf15SRobert Mustacchi             break;
1206d14abf15SRobert Mustacchi         }
1207d14abf15SRobert Mustacchi         case MAX_ETH_RX_CQE_TYPE:
1208d14abf15SRobert Mustacchi         default:
1209d14abf15SRobert Mustacchi             {
1210d14abf15SRobert Mustacchi                 DbgBreakMsg("CQE type not supported");
1211d14abf15SRobert Mustacchi             }
1212d14abf15SRobert Mustacchi 
1213d14abf15SRobert Mustacchi         }
1214d14abf15SRobert Mustacchi     }
1215d14abf15SRobert Mustacchi 
1216d14abf15SRobert Mustacchi     // TODO: Move index update to a more suitable place
1217d14abf15SRobert Mustacchi     rx_chain_bd->cons_idx = rx_old_idx;
1218d14abf15SRobert Mustacchi     if( rx_chain_sge )
1219d14abf15SRobert Mustacchi     {
1220d14abf15SRobert Mustacchi         rx_chain_sge->cons_idx = rx_old_idx;
1221d14abf15SRobert Mustacchi     }
1222d14abf15SRobert Mustacchi 
1223d14abf15SRobert Mustacchi     //notify the fw of the prod
1224d14abf15SRobert Mustacchi     lm_rx_set_prods(pdev, rcq_chain->iro_prod_offset, &rcq_chain->bd_chain, rx_chain_bd, rx_chain_sge ,chain_idx);
1225d14abf15SRobert Mustacchi 
1226d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- bd con: %d bd prod: %d \n",
1227d14abf15SRobert Mustacchi                                 lm_bd_chain_cons_idx(rx_chain_bd), lm_bd_chain_prod_idx(rx_chain_bd));
1228d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl2rx, "lm_get_packets_rcvd- cq con: %d cq prod: %d \n",
1229d14abf15SRobert Mustacchi                                 lm_bd_chain_cons_idx(&rcq_chain->bd_chain), lm_bd_chain_prod_idx(&rcq_chain->bd_chain));
1230d14abf15SRobert Mustacchi     return pkt_cnt;
1231d14abf15SRobert Mustacchi } /* lm_get_packets_rcvd */
1232d14abf15SRobert Mustacchi 
lm_complete_ramrods(struct _lm_device_t * pdev,struct _sp_cqes_info * sp_cqes)1233d14abf15SRobert Mustacchi lm_status_t lm_complete_ramrods(
1234d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1235d14abf15SRobert Mustacchi     struct _sp_cqes_info *sp_cqes)
1236d14abf15SRobert Mustacchi {
1237d14abf15SRobert Mustacchi     u8_t idx;
1238d14abf15SRobert Mustacchi 
1239d14abf15SRobert Mustacchi     for (idx = 0; idx < sp_cqes->idx; idx++) {
1240d14abf15SRobert Mustacchi         lm_eth_init_command_comp(pdev, &(sp_cqes->sp_cqe[idx].ramrod_cqe));
1241d14abf15SRobert Mustacchi     }
1242d14abf15SRobert Mustacchi 
1243d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1244d14abf15SRobert Mustacchi }
1245d14abf15SRobert Mustacchi 
1246d14abf15SRobert Mustacchi /* called by um whenever packets are returned by client
1247d14abf15SRobert Mustacchi    rxq lock is taken by caller */
1248d14abf15SRobert Mustacchi void
lm_return_packet_bytes(struct _lm_device_t * pdev,u32_t const qidx,u32_t const returned_bytes)1249d14abf15SRobert Mustacchi lm_return_packet_bytes( struct _lm_device_t *pdev,
1250d14abf15SRobert Mustacchi                         u32_t const          qidx,
1251d14abf15SRobert Mustacchi                         u32_t const          returned_bytes)
1252d14abf15SRobert Mustacchi {
1253d14abf15SRobert Mustacchi     lm_rx_chain_t *rxq = &LM_RXQ(pdev, qidx);
1254d14abf15SRobert Mustacchi 
1255d14abf15SRobert Mustacchi     rxq->ret_bytes += returned_bytes;
1256d14abf15SRobert Mustacchi 
1257d14abf15SRobert Mustacchi     /* aggregate updates over PCI */
1258d14abf15SRobert Mustacchi 
1259d14abf15SRobert Mustacchi     /* HC_RET_BYTES_TH = min(l2_hc_threshold0 / 2 , 16KB) */
1260d14abf15SRobert Mustacchi     #define HC_RET_BYTES_TH(pdev) (((pdev)->params.hc_threshold0[SM_RX_ID] < 32768) ? ((pdev)->params.hc_threshold0[SM_RX_ID] >> 1) : 16384)
1261d14abf15SRobert Mustacchi 
1262d14abf15SRobert Mustacchi     /* TODO: Future: Add #updatesTH = 20 */
1263d14abf15SRobert Mustacchi 
1264d14abf15SRobert Mustacchi     /* time to update fw ? */
1265d14abf15SRobert Mustacchi     if(S32_SUB(rxq->ret_bytes, rxq->ret_bytes_last_fw_update + HC_RET_BYTES_TH(pdev)) >= 0)
1266d14abf15SRobert Mustacchi     {
1267d14abf15SRobert Mustacchi         /*
1268d14abf15SRobert Mustacchi           !!DP
1269d14abf15SRobert Mustacchi           The test below is to disable dynamic HC for the iSCSI chains
1270d14abf15SRobert Mustacchi         */
1271d14abf15SRobert Mustacchi         // TODO: VF dhc
1272d14abf15SRobert Mustacchi         if (qidx < LM_MAX_RSS_CHAINS(pdev) && IS_PFDEV(pdev)) /* should be fine, if not, you can go for less robust case of != LM_CLI_RX_CHAIN_IDX(pdev, LM_CLI_IDX_ISCSI) */
1273d14abf15SRobert Mustacchi         {
1274d14abf15SRobert Mustacchi             /* There are HC_USTORM_SB_NUM_INDICES (4) index values for each SB to set and we're using the corresponding U indexes from the microcode consts */
1275d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(PFDEV(pdev), rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes, BAR_CSTRORM_INTMEM);
1276d14abf15SRobert Mustacchi             rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
1277d14abf15SRobert Mustacchi         } else if (IS_VFDEV(pdev)) {
1278d14abf15SRobert Mustacchi             VF_REG_WR(pdev, VF_BAR0_CSDM_QUEUES_OFFSET + rxq->hc_sb_info.iro_dhc_offset, rxq->ret_bytes);
1279d14abf15SRobert Mustacchi             rxq->ret_bytes_last_fw_update = rxq->ret_bytes;
1280d14abf15SRobert Mustacchi         }
1281d14abf15SRobert Mustacchi     }
1282d14abf15SRobert Mustacchi }
1283d14abf15SRobert Mustacchi 
1284