1 
2 /*******************************************************************************
3 * lm_l4fp.h - l4 common fast path interface
4 *******************************************************************************/
5 #ifndef _LM_L4FP_H
6 #define _LM_L4FP_H
7 
8 /* Maximum size of the SGE BD may point at */
9 #define TCP_MAX_SGE_SIZE                   0xffff   /* 64KB */
10 /* Maximum size of the SGL */
11 #define TCP_MAX_SGL_SIZE                   0xffff   /* 64KB - bd_used field is u16_t */
12 
13 /* Assumptions: Called only from DPC flow OR deferred cqes. Holds the fp-lock */
14 void lm_tcp_complete_bufs(
15     struct _lm_device_t *pdev,
16     lm_tcp_state_t      *tcp,
17     lm_tcp_con_t        *con);
18 
19 /* Assumptions: Called only from DPC flow OR deferred cqes. Does not hold the fp-lock */
20 u32_t lm_tcp_complete_nbytes(
21     struct _lm_device_t *pdev,
22 	lm_tcp_state_t      *tcp,
23 	lm_tcp_con_t        *con,            /* Rx OR Tx connection */
24 	u32_t               completed_bytes, /* num bytes completed (might be 0) */
25 	u8_t                push             /* if == 0, don't complete partialy
26 										 completed buffers towards mm */);
27 
28 /**
29  * Description:
30  *      Aborts the pending buffers on the given connection :
31  * immediately completes them with the  given status.
32  */
33 void lm_tcp_abort_bufs(
34     struct _lm_device_t * pdev,   /* device handle */
35     lm_tcp_state_t      * tcp,    /* L4 state */
36     lm_tcp_con_t    * con,        /* L4 connection to abort buffers on */
37     lm_status_t       stat        /* status to abort with */
38     );
39 
40 
41 /******** qe_buffer interface: cyclic NO-OVERRIDE buffer  ****************/
42 /** Description
43  *  returns the next cqe in the cqe_buffer and updates the buffer params
44  *  (head)
45  */
46 char * lm_tcp_qe_buffer_next_free_cqe(lm_tcp_qe_buffer_t * cqe_buffer);
47 
48 /** Description
49  *  returns the next occupied cqe in the cqe_buffer and updates the buffer params
50  * (tail)
51  */
52 char * lm_tcp_qe_buffer_next_occupied_cqe(lm_tcp_qe_buffer_t * cqe_buffer);
53 
54 /** Description
55  *  returns whether the buffer is empty or not (head == tail)
56  */
57 u8_t lm_tcp_qe_buffer_is_empty(lm_tcp_qe_buffer_t * cqe_buffer);
58 
59 
60 /******** qe_buffer interface: cyclic OVERRIDE buffer  ****************/
61 /** Description
62  * returns the next head location in a cyclic manner. This is an override
63  * function, meaning that the returned head could be overriding a previous
64  * written cqe
65  */
66 char * lm_tcp_qe_buffer_next_cqe_override(lm_tcp_qe_buffer_t * cqe_buffer);
67 
68 /** Description
69  * processes a single rx cqe
70  * called as a result of deferred cqes
71  */
72 void lm_tcp_rx_process_cqe(
73     lm_device_t       * pdev,
74     struct toe_rx_cqe * cqe,
75     lm_tcp_state_t    * tcp,
76     u8_t                sb_idx
77     );
78 /** Description
79  * processes a single tx cqe
80  * called as a result of deferred cqes
81  */
82 void lm_tcp_tx_process_cqe(
83     lm_device_t        * pdev,
84     struct toe_tx_cqe  * cqe,
85     lm_tcp_state_t     * tcp
86     );
87 
88 void lm_tcp_rx_complete_tcp_fp(
89     lm_device_t * pdev,
90     lm_tcp_state_t * tcp,
91     lm_tcp_con_t * con
92     );
93 
94 void lm_tcp_tx_complete_tcp_fp(
95     lm_device_t * pdev,
96     lm_tcp_state_t * tcp,
97     lm_tcp_con_t * con
98     );
99 
100 /** Description
101  *  adds another nbytes to the sws counter, and posts a doorbell if we're
102  *  above a certain threshold
103  *  assumptions : caller took the rx-lock
104  */
105 void lm_tcp_rx_post_sws (
106     lm_device_t    * pdev,
107     lm_tcp_state_t * tcp,
108     lm_tcp_con_t   * rx_con,
109     u32_t            nbytes,
110     u8_t             op /* Increase / Decrease */
111     );
112 #define TCP_RX_POST_SWS_INC 0
113 #define TCP_RX_POST_SWS_DEC 1
114 #define TCP_RX_POST_SWS_SET 2
115 
116 /** Description
117  *  while we are in a DPC, we don't pop buffers from the active_tb_list, this function
118  *  helps in determining the next buffer in the active tb list that is valid (i.e. the
119  *  head of active_tb_list had we popped buffers)
120  */
lm_tcp_next_entry_dpc_active_list(lm_tcp_con_t * con)121 static __inline lm_tcp_buffer_t * lm_tcp_next_entry_dpc_active_list(lm_tcp_con_t * con)
122 {
123     if (con->dpc_info.dpc_completed_tail) {
124         return (lm_tcp_buffer_t *)s_list_next_entry(con->dpc_info.dpc_completed_tail);
125     } else {
126         return (lm_tcp_buffer_t *)s_list_peek_head(&con->active_tb_list);
127     }
128 }
129 #endif /* _LM_L4FP_H */
130