1d14abf15SRobert Mustacchi 
2d14abf15SRobert Mustacchi #include "lm5710.h"
3d14abf15SRobert Mustacchi #include "lm.h"
4d14abf15SRobert Mustacchi #include "lm_l4sp.h"
5d14abf15SRobert Mustacchi #include "command.h"
6d14abf15SRobert Mustacchi #include "context.h"
7d14abf15SRobert Mustacchi #include "bd_chain.h"
8d14abf15SRobert Mustacchi #include "mm.h"
9d14abf15SRobert Mustacchi #include "mm_l4if.h"
10d14abf15SRobert Mustacchi #include "lm_l4fp.h"
11d14abf15SRobert Mustacchi #include "lm_l4sp.h"
12d14abf15SRobert Mustacchi #include "everest_l5cm_constants.h"
13d14abf15SRobert Mustacchi #include "l4debug.h"
14d14abf15SRobert Mustacchi 
15d14abf15SRobert Mustacchi /* Sizes of objects that need to be allocated in physical memory */
16d14abf15SRobert Mustacchi #define TOE_SP_PHYS_DATA_SIZE ((sizeof(lm_tcp_slow_path_phys_data_t) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
17d14abf15SRobert Mustacchi #define TOE_DB_RX_DATA_SIZE   ((sizeof(struct toe_rx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
18d14abf15SRobert Mustacchi #define TOE_DB_TX_DATA_SIZE   ((sizeof(struct toe_tx_db_data) + CACHE_LINE_SIZE_MASK) & ~CACHE_LINE_SIZE_MASK)
19d14abf15SRobert Mustacchi 
20d14abf15SRobert Mustacchi #define TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT 2
21d14abf15SRobert Mustacchi 
22d14abf15SRobert Mustacchi l4_tcp_con_state_t lm_tcp_calc_state (
23d14abf15SRobert Mustacchi     lm_device_t    * pdev,
24d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp,
25d14abf15SRobert Mustacchi     u8_t             fin_was_sent
26d14abf15SRobert Mustacchi     );
27d14abf15SRobert Mustacchi 
28d14abf15SRobert Mustacchi /** Description Callback function for spe being completed
29d14abf15SRobert Mustacchi  *  internally in vbd driver (not via FW)
30d14abf15SRobert Mustacchi  */
31d14abf15SRobert Mustacchi void lm_tcp_comp_cb(
32d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
33d14abf15SRobert Mustacchi     struct sq_pending_command *pending);
34d14abf15SRobert Mustacchi 
35d14abf15SRobert Mustacchi 
36d14abf15SRobert Mustacchi /* GilR 11/13/2006 - TODO - ttl is temporarily overloaded for ethearel capture L4/L2 debugging */
37d14abf15SRobert Mustacchi #define TOE_DBG_TTL 200
38d14abf15SRobert Mustacchi #define ISCSI_DBG_TTL 222
39d14abf15SRobert Mustacchi 
40d14abf15SRobert Mustacchi #define TIMERS_TICKS_PER_SEC        (u32_t)(1000)//(1 / TIMERS_TICK_SIZE_CHIP)
41d14abf15SRobert Mustacchi #define TSEMI_CLK1_TICKS_PER_SEC    (u32_t)(1000)//(1 / TSEMI_CLK1_RESUL_CHIP)
42d14abf15SRobert Mustacchi 
lm_get_num_of_cashed_grq_bds(struct _lm_device_t * pdev)43d14abf15SRobert Mustacchi u32_t lm_get_num_of_cashed_grq_bds(struct _lm_device_t *pdev)
44d14abf15SRobert Mustacchi {
45d14abf15SRobert Mustacchi     return USTORM_TOE_GRQ_CACHE_NUM_BDS;
46d14abf15SRobert Mustacchi }
47d14abf15SRobert Mustacchi 
48d14abf15SRobert Mustacchi // this function is used only to verify that the defines above are correct (on compile time - save the runtime checkings...)
_fake_func_verify_defines(void)49d14abf15SRobert Mustacchi static void _fake_func_verify_defines(void)
50d14abf15SRobert Mustacchi {
51d14abf15SRobert Mustacchi     ASSERT_STATIC( TIMERS_TICKS_PER_SEC     == (1 / TIMERS_TICK_SIZE_CHIP) ) ;
52d14abf15SRobert Mustacchi     ASSERT_STATIC( TSEMI_CLK1_TICKS_PER_SEC == (1 / TSEMI_CLK1_RESUL_CHIP) ) ;
53d14abf15SRobert Mustacchi }
54d14abf15SRobert Mustacchi 
lm_time_resolution(lm_device_t * pdev,u32_t src_time,u32_t src_ticks_per_sec,u32_t trg_ticks_per_sec)55d14abf15SRobert Mustacchi static __inline u32_t lm_time_resolution(
56d14abf15SRobert Mustacchi     lm_device_t *pdev,
57d14abf15SRobert Mustacchi     u32_t src_time,
58d14abf15SRobert Mustacchi     u32_t src_ticks_per_sec,
59d14abf15SRobert Mustacchi     u32_t trg_ticks_per_sec)
60d14abf15SRobert Mustacchi {
61d14abf15SRobert Mustacchi     u64_t result;
62d14abf15SRobert Mustacchi     u64_t tmp_result;
63d14abf15SRobert Mustacchi     u32_t dev_factor;
64d14abf15SRobert Mustacchi 
65d14abf15SRobert Mustacchi     DbgBreakIf(!(src_ticks_per_sec && trg_ticks_per_sec));
66d14abf15SRobert Mustacchi 
67d14abf15SRobert Mustacchi     if (trg_ticks_per_sec > src_ticks_per_sec){
68d14abf15SRobert Mustacchi         dev_factor =  trg_ticks_per_sec / src_ticks_per_sec;
69d14abf15SRobert Mustacchi         result = src_time * dev_factor;
70d14abf15SRobert Mustacchi     } else {
71d14abf15SRobert Mustacchi         tmp_result = src_time * trg_ticks_per_sec;
72d14abf15SRobert Mustacchi 
73d14abf15SRobert Mustacchi #if defined(_VBD_)
74d14abf15SRobert Mustacchi         result = CEIL_DIV(tmp_result, src_ticks_per_sec);
75d14abf15SRobert Mustacchi #else
76d14abf15SRobert Mustacchi         /* Here we try a avoid 64-bit division operation */
77d14abf15SRobert Mustacchi         if (tmp_result < 0xffffffff) {
78d14abf15SRobert Mustacchi             result = (u32_t)tmp_result / src_ticks_per_sec;
79d14abf15SRobert Mustacchi         } else {
80d14abf15SRobert Mustacchi             /* src_ticks_per_sec and trg_ticks_per_sec parameters come
81d14abf15SRobert Mustacchi                from NDIS and so far the values observed were 100 or 1000,
82d14abf15SRobert Mustacchi                depending on Windows version. These parameters define
83d14abf15SRobert Mustacchi                TCP timers resolution and are unlikely to change significantly
84d14abf15SRobert Mustacchi                in the future.
85d14abf15SRobert Mustacchi                So, here we assume that if (src_time * trg_ticks_per_sec) product
86d14abf15SRobert Mustacchi                is out of 32-bit range it is because src_time value.
87d14abf15SRobert Mustacchi             */
88d14abf15SRobert Mustacchi             DbgBreakIf(src_time < src_ticks_per_sec);
89d14abf15SRobert Mustacchi             result = ((u64_t)(src_time / src_ticks_per_sec)) * trg_ticks_per_sec;
90d14abf15SRobert Mustacchi         }
91d14abf15SRobert Mustacchi #endif
92d14abf15SRobert Mustacchi     }
93d14abf15SRobert Mustacchi 
94d14abf15SRobert Mustacchi     if(src_time && !result) {
95d14abf15SRobert Mustacchi         result = 1;
96d14abf15SRobert Mustacchi     }
97d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp,
98d14abf15SRobert Mustacchi                 "lm_time_resulition: src_time=%d, src_ticks_per_sec=%d, trg_ticks_per_sec=%d, result=%d\n",
99d14abf15SRobert Mustacchi                 src_time, src_ticks_per_sec, trg_ticks_per_sec, result);
100d14abf15SRobert Mustacchi 
101d14abf15SRobert Mustacchi     DbgBreakIf(result > 0xffffffff);
102d14abf15SRobert Mustacchi     return (u32_t)result;
103d14abf15SRobert Mustacchi }
104d14abf15SRobert Mustacchi 
lm_tcp_erase_connection(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)105d14abf15SRobert Mustacchi lm_status_t lm_tcp_erase_connection(
106d14abf15SRobert Mustacchi     IN    struct _lm_device_t   * pdev,
107d14abf15SRobert Mustacchi     IN    lm_tcp_state_t        * tcp)
108d14abf15SRobert Mustacchi {
109d14abf15SRobert Mustacchi     lm_status_t status = LM_STATUS_SUCCESS;
110d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con;
111d14abf15SRobert Mustacchi     lm_tcp_con_t *tx_con;
112d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
113d14abf15SRobert Mustacchi     if (!lm_fl_reset_is_inprogress(pdev)) {
114d14abf15SRobert Mustacchi         return LM_STATUS_FAILURE;
115d14abf15SRobert Mustacchi     }
116d14abf15SRobert Mustacchi 
117d14abf15SRobert Mustacchi     DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x)\n",tcp->cid);
118d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
119d14abf15SRobert Mustacchi         rx_con = tcp->rx_con;
120d14abf15SRobert Mustacchi         tx_con = tcp->tx_con;
121d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, tx_con);
122d14abf15SRobert Mustacchi         tx_con->flags |= TCP_POST_BLOCKED;
123d14abf15SRobert Mustacchi         lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_CLOSED);
124d14abf15SRobert Mustacchi         if (tx_con->abortion_under_flr) {
125d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Tx aborted\n",tcp->cid);
126d14abf15SRobert Mustacchi         }
127d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, tx_con);
128d14abf15SRobert Mustacchi 
129d14abf15SRobert Mustacchi         /* Rx abortive part... */
130d14abf15SRobert Mustacchi 
131d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, rx_con);
132d14abf15SRobert Mustacchi         /* Abort pending buffers */
133d14abf15SRobert Mustacchi         rx_con->flags |= TCP_POST_BLOCKED;
134d14abf15SRobert Mustacchi         if (mm_tcp_indicating_bufs(rx_con)) {
135d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): under indication\n",tcp->cid);
136d14abf15SRobert Mustacchi             DbgBreak();
137d14abf15SRobert Mustacchi             mm_release_tcp_lock(pdev, rx_con);
138d14abf15SRobert Mustacchi             return LM_STATUS_FAILURE;
139d14abf15SRobert Mustacchi         }
140d14abf15SRobert Mustacchi         lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_CLOSED);
141d14abf15SRobert Mustacchi         if (rx_con->abortion_under_flr) {
142d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "##lm_tcp_erase_connection(0x%x): Rx aborted\n",tcp->cid);
143d14abf15SRobert Mustacchi         }
144d14abf15SRobert Mustacchi 
145d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, rx_con);
146d14abf15SRobert Mustacchi     }
147d14abf15SRobert Mustacchi     mm_tcp_del_tcp_state(pdev,tcp);
148d14abf15SRobert Mustacchi     return status;
149d14abf15SRobert Mustacchi }
150d14abf15SRobert Mustacchi 
lm_tcp_flush_db(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)151d14abf15SRobert Mustacchi void lm_tcp_flush_db(
152d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
153d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
154d14abf15SRobert Mustacchi {
155d14abf15SRobert Mustacchi     struct toe_tx_doorbell  dq_flush_msg;
156d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con, *tx_con;
157d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
158d14abf15SRobert Mustacchi 
159d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
160d14abf15SRobert Mustacchi 
161d14abf15SRobert Mustacchi     if (tcp->ulp_type != TOE_CONNECTION_TYPE) {
162d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "##lm_tcp_flush_db is not sent for connection(0x%x) of type %d\n",tcp->cid, tcp->ulp_type);
163d14abf15SRobert Mustacchi         return;
164d14abf15SRobert Mustacchi     }
165d14abf15SRobert Mustacchi 
166d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "##lm_tcp_flush_db (cid=0x%x)\n",tcp->cid);
167d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
168d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
169d14abf15SRobert Mustacchi 
170d14abf15SRobert Mustacchi     dq_flush_msg.hdr.data = (TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT);
171d14abf15SRobert Mustacchi     dq_flush_msg.params = TOE_TX_DOORBELL_FLUSH;
172d14abf15SRobert Mustacchi     dq_flush_msg.nbytes = 0;
173d14abf15SRobert Mustacchi 
174d14abf15SRobert Mustacchi 
175d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
176d14abf15SRobert Mustacchi     tx_con->flags |= TCP_DB_BLOCKED;
177d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
178d14abf15SRobert Mustacchi 
179d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
180d14abf15SRobert Mustacchi     rx_con->flags |= TCP_DB_BLOCKED;
181d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
182d14abf15SRobert Mustacchi 
183d14abf15SRobert Mustacchi     DOORBELL(pdev, tcp->cid, *((u32_t *)&dq_flush_msg));
184d14abf15SRobert Mustacchi }
185d14abf15SRobert Mustacchi 
186d14abf15SRobert Mustacchi /* Desciption:
187d14abf15SRobert Mustacchi  *  allocate l4 resources
188d14abf15SRobert Mustacchi  * Assumptions:
189d14abf15SRobert Mustacchi  *  - lm_init_params was already called
190d14abf15SRobert Mustacchi  * Returns:
191d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_alloc_resc(lm_device_t * pdev)192d14abf15SRobert Mustacchi static lm_status_t lm_tcp_alloc_resc(lm_device_t *pdev)
193d14abf15SRobert Mustacchi {
194d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
195d14abf15SRobert Mustacchi     lm_bd_chain_t *bd_chain;
196d14abf15SRobert Mustacchi     u32_t mem_size;
197d14abf15SRobert Mustacchi     long i;
198d14abf15SRobert Mustacchi     u8_t mm_cli_idx       = 0;
199d14abf15SRobert Mustacchi 
200d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_alloc_resc\n");
201d14abf15SRobert Mustacchi 
202d14abf15SRobert Mustacchi     // NOP, call this function only to prevent compile warning.
203d14abf15SRobert Mustacchi     _fake_func_verify_defines();
204d14abf15SRobert Mustacchi 
205d14abf15SRobert Mustacchi     mm_cli_idx = LM_RESOURCE_NDIS;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_NDIS);
206d14abf15SRobert Mustacchi 
207d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
208d14abf15SRobert Mustacchi     LM_TOE_FOREACH_TSS_IDX(pdev, i)
209d14abf15SRobert Mustacchi     {
210d14abf15SRobert Mustacchi         /* allocate SCQs */
211d14abf15SRobert Mustacchi         bd_chain = &toe_info->scqs[i].bd_chain;
212d14abf15SRobert Mustacchi         mem_size = pdev->params.l4_scq_page_cnt * LM_PAGE_SIZE;
213d14abf15SRobert Mustacchi         bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
214d14abf15SRobert Mustacchi         if (!bd_chain->bd_chain_virt) {
215d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
216d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
217d14abf15SRobert Mustacchi         }
218d14abf15SRobert Mustacchi         mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
219d14abf15SRobert Mustacchi     }
220d14abf15SRobert Mustacchi 
221d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev, i)
222d14abf15SRobert Mustacchi     {
223d14abf15SRobert Mustacchi         /* allocate RCQs */
224d14abf15SRobert Mustacchi         bd_chain = &toe_info->rcqs[i].bd_chain;
225d14abf15SRobert Mustacchi         mem_size = pdev->params.l4_rcq_page_cnt * LM_PAGE_SIZE;
226d14abf15SRobert Mustacchi         bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
227d14abf15SRobert Mustacchi         if (!bd_chain->bd_chain_virt) {
228d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
229d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
230d14abf15SRobert Mustacchi         }
231d14abf15SRobert Mustacchi         mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
232d14abf15SRobert Mustacchi 
233d14abf15SRobert Mustacchi         /* allocate GRQs */
234d14abf15SRobert Mustacchi         bd_chain = &toe_info->grqs[i].bd_chain;
235d14abf15SRobert Mustacchi         mem_size = pdev->params.l4_grq_page_cnt * LM_PAGE_SIZE;
236d14abf15SRobert Mustacchi         bd_chain->bd_chain_virt = mm_alloc_phys_mem(pdev, mem_size, &bd_chain->bd_chain_phy, 0, mm_cli_idx);
237d14abf15SRobert Mustacchi         if (!bd_chain->bd_chain_virt) {
238d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
239d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
240d14abf15SRobert Mustacchi         }
241d14abf15SRobert Mustacchi         mm_memset(bd_chain->bd_chain_virt, 0, mem_size);
242d14abf15SRobert Mustacchi 
243d14abf15SRobert Mustacchi         DbgBreakIf(toe_info->grqs[i].isles_pool);
244d14abf15SRobert Mustacchi         if (!pdev->params.l4_isles_pool_size) {
245d14abf15SRobert Mustacchi             pdev->params.l4_isles_pool_size = 2 * T_TCP_ISLE_ARRAY_SIZE;
246d14abf15SRobert Mustacchi         } else if (pdev->params.l4_isles_pool_size < T_TCP_ISLE_ARRAY_SIZE) {
247d14abf15SRobert Mustacchi             pdev->params.l4_isles_pool_size = T_TCP_ISLE_ARRAY_SIZE;
248d14abf15SRobert Mustacchi         }
249d14abf15SRobert Mustacchi         mem_size = pdev->params.l4_isles_pool_size * sizeof(lm_isle_t);
250d14abf15SRobert Mustacchi         toe_info->grqs[i].isles_pool = (lm_isle_t*)mm_alloc_mem(pdev, mem_size, mm_cli_idx);
251d14abf15SRobert Mustacchi         if (!toe_info->grqs[i].isles_pool) {
252d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
253d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
254d14abf15SRobert Mustacchi         }
255d14abf15SRobert Mustacchi         mm_memset(toe_info->grqs[i].isles_pool, 0, mem_size);
256d14abf15SRobert Mustacchi     }
257d14abf15SRobert Mustacchi     if (pdev->params.l4_data_integrity) {
258d14abf15SRobert Mustacchi         u32_t pb_idx;
259d14abf15SRobert Mustacchi         pdev->toe_info.integrity_info.pattern_size = 256;
260d14abf15SRobert Mustacchi         pdev->toe_info.integrity_info.pattern_buf_size = 0x10000 + pdev->toe_info.integrity_info.pattern_size;
261d14abf15SRobert Mustacchi         pdev->toe_info.integrity_info.pattern_buf = mm_alloc_mem(pdev, pdev->toe_info.integrity_info.pattern_buf_size, mm_cli_idx);
262d14abf15SRobert Mustacchi         if (!pdev->toe_info.integrity_info.pattern_buf) {
263d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
264d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
265d14abf15SRobert Mustacchi         }
266d14abf15SRobert Mustacchi         for (pb_idx = 0; pb_idx < pdev->toe_info.integrity_info.pattern_buf_size; pb_idx++) {
267d14abf15SRobert Mustacchi             pdev->toe_info.integrity_info.pattern_buf[pb_idx] = pb_idx %  pdev->toe_info.integrity_info.pattern_size;
268d14abf15SRobert Mustacchi         }
269d14abf15SRobert Mustacchi     }
270d14abf15SRobert Mustacchi 
271d14abf15SRobert Mustacchi     /* Allocate rss-update physical data */
272d14abf15SRobert Mustacchi     pdev->toe_info.rss_update_data = (struct toe_rss_update_ramrod_data *)
273d14abf15SRobert Mustacchi                                       mm_alloc_phys_mem(pdev, sizeof(*pdev->toe_info.rss_update_data),
274d14abf15SRobert Mustacchi                                                         &pdev->toe_info.rss_update_data_phys,
275d14abf15SRobert Mustacchi                                                         0,0);
276d14abf15SRobert Mustacchi 
277d14abf15SRobert Mustacchi     if (pdev->toe_info.rss_update_data == NULL)
278d14abf15SRobert Mustacchi     {
279d14abf15SRobert Mustacchi         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
280d14abf15SRobert Mustacchi         return LM_STATUS_RESOURCE;
281d14abf15SRobert Mustacchi     }
282d14abf15SRobert Mustacchi 
283d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
284d14abf15SRobert Mustacchi }
285d14abf15SRobert Mustacchi 
_lm_get_default_l4cli_params(lm_device_t * pdev,l4_ofld_params_t * l4_params)286d14abf15SRobert Mustacchi static void _lm_get_default_l4cli_params(lm_device_t *pdev, l4_ofld_params_t *l4_params)
287d14abf15SRobert Mustacchi {
288d14abf15SRobert Mustacchi     lm_params_t *def_params = &pdev->params;
289d14abf15SRobert Mustacchi 
290d14abf15SRobert Mustacchi     DbgBreakIf(def_params->l4cli_ack_frequency > 0xff);
291d14abf15SRobert Mustacchi     l4_params->ack_frequency = def_params->l4cli_ack_frequency & 0xff;
292d14abf15SRobert Mustacchi 
293d14abf15SRobert Mustacchi     DbgBreakIf(def_params->l4cli_delayed_ack_ticks > 0xff);
294d14abf15SRobert Mustacchi     l4_params->delayed_ack_ticks = def_params->l4cli_delayed_ack_ticks & 0xff;
295d14abf15SRobert Mustacchi 
296d14abf15SRobert Mustacchi     DbgBreakIf(def_params->l4cli_doubt_reachability_retx > 0xff);
297d14abf15SRobert Mustacchi     l4_params->doubt_reachability_retx = def_params->l4cli_doubt_reachability_retx & 0xff;
298d14abf15SRobert Mustacchi 
299d14abf15SRobert Mustacchi     l4_params->dup_ack_threshold = def_params->l4cli_dup_ack_threshold;
300d14abf15SRobert Mustacchi 
301d14abf15SRobert Mustacchi     DbgBreakIf((def_params->l4cli_flags != 0) &&
302d14abf15SRobert Mustacchi                (def_params->l4cli_flags != OFLD_PARAM_FLAG_SNAP_ENCAP));
303d14abf15SRobert Mustacchi     l4_params->flags = def_params->l4cli_flags;
304d14abf15SRobert Mustacchi 
305d14abf15SRobert Mustacchi     DbgBreakIf(def_params->l4cli_max_retx > 0xff);
306d14abf15SRobert Mustacchi     l4_params->max_retx = def_params->l4cli_max_retx & 0xff;
307d14abf15SRobert Mustacchi 
308d14abf15SRobert Mustacchi     l4_params->nce_stale_ticks = def_params->l4cli_nce_stale_ticks;
309d14abf15SRobert Mustacchi     l4_params->push_ticks = def_params->l4cli_push_ticks;
310d14abf15SRobert Mustacchi 
311d14abf15SRobert Mustacchi     DbgBreakIf(def_params->l4cli_starting_ip_id > 0xffff);
312d14abf15SRobert Mustacchi     l4_params->starting_ip_id = def_params->l4cli_starting_ip_id & 0xffff;
313d14abf15SRobert Mustacchi 
314d14abf15SRobert Mustacchi     l4_params->sws_prevention_ticks = def_params->l4cli_sws_prevention_ticks;
315d14abf15SRobert Mustacchi     l4_params->ticks_per_second = def_params->l4cli_ticks_per_second;
316d14abf15SRobert Mustacchi 
317d14abf15SRobert Mustacchi }
318d14abf15SRobert Mustacchi 
319d14abf15SRobert Mustacchi /** Description
320d14abf15SRobert Mustacchi  *  requests generic buffers from the generic buffer pool and attaches the generic buffers
321d14abf15SRobert Mustacchi  *  to the grq-bd chain. It attaches the amount of buffers received, no matter if they were
322d14abf15SRobert Mustacchi  *  less than requested. Function always tries to fill bd-chain (i.e. requests bd_chain->bd_left)
323d14abf15SRobert Mustacchi  * Assumptions:
324d14abf15SRobert Mustacchi  *  - called after the generic buffer pool is ready to deliver generic buffers
325d14abf15SRobert Mustacchi  *  - who ever will call this function will handle checking if a work item for allocating more
326d14abf15SRobert Mustacchi  *    buffers is needed.
327d14abf15SRobert Mustacchi  * Returns:
328d14abf15SRobert Mustacchi  *  - TRUE: buffers were written
329d14abf15SRobert Mustacchi  *  - FALSE: o/w
330d14abf15SRobert Mustacchi  */
lm_tcp_rx_fill_grq(struct _lm_device_t * pdev,u8_t sb_idx,d_list_t * bypass_gen_pool_list,u8_t filling_mode)331d14abf15SRobert Mustacchi u8_t lm_tcp_rx_fill_grq(struct _lm_device_t * pdev, u8_t sb_idx, d_list_t * bypass_gen_pool_list, u8_t filling_mode)
332d14abf15SRobert Mustacchi {
333d14abf15SRobert Mustacchi     lm_toe_info_t        * toe_info;
334d14abf15SRobert Mustacchi     lm_tcp_grq_t         * grq;
335d14abf15SRobert Mustacchi     struct toe_rx_grq_bd * grq_bd;
336d14abf15SRobert Mustacchi     lm_tcp_gen_buf_t     * curr_gen_buf;
337d14abf15SRobert Mustacchi     lm_bd_chain_t        * bd_chain;
338d14abf15SRobert Mustacchi     d_list_t               tmp_gen_buf_list;
339d14abf15SRobert Mustacchi     d_list_t               free_gen_buf_list;
340d14abf15SRobert Mustacchi     u16_t                  num_bufs; /* limited by bd_chain->bd_left */
341d14abf15SRobert Mustacchi     u16_t                  num_bufs_threshold;
342d14abf15SRobert Mustacchi     u32_t                  num_bypass_buffs;
343d14abf15SRobert Mustacchi     u32_t                  avg_dpc_cnt;
344d14abf15SRobert Mustacchi 
345d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
346d14abf15SRobert Mustacchi     grq      = &toe_info->grqs[sb_idx];
347d14abf15SRobert Mustacchi     bd_chain = &grq->bd_chain;
348d14abf15SRobert Mustacchi     num_bufs = bd_chain->bd_left; /* required number of bufs from grq pool */
349d14abf15SRobert Mustacchi 
350d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4rx, "###lm_tcp_rx_fill_grq bd_left (to be filled)= %d\n", bd_chain->bd_left);
351d14abf15SRobert Mustacchi 
352d14abf15SRobert Mustacchi     if (!pdev->params.l4_grq_filling_threshold_divider) {
353d14abf15SRobert Mustacchi         num_bufs_threshold = 1;
354d14abf15SRobert Mustacchi     } else {
355d14abf15SRobert Mustacchi         if (pdev->params.l4_grq_filling_threshold_divider < 2) {
356d14abf15SRobert Mustacchi             pdev->params.l4_grq_filling_threshold_divider = 2;
357d14abf15SRobert Mustacchi         }
358d14abf15SRobert Mustacchi         num_bufs_threshold = bd_chain->capacity / pdev->params.l4_grq_filling_threshold_divider;
359d14abf15SRobert Mustacchi     }
360d14abf15SRobert Mustacchi 
361d14abf15SRobert Mustacchi     d_list_init(&tmp_gen_buf_list, NULL, NULL, 0);
362d14abf15SRobert Mustacchi     d_list_init(&free_gen_buf_list, NULL, NULL, 0);
363d14abf15SRobert Mustacchi     if (bypass_gen_pool_list != NULL) {
364d14abf15SRobert Mustacchi         num_bypass_buffs = d_list_entry_cnt(bypass_gen_pool_list);
365d14abf15SRobert Mustacchi     } else {
366d14abf15SRobert Mustacchi         num_bypass_buffs = 0;
367d14abf15SRobert Mustacchi     }
368d14abf15SRobert Mustacchi 
369d14abf15SRobert Mustacchi     if (filling_mode == FILL_GRQ_MIN_CASHED_BDS) {
370d14abf15SRobert Mustacchi         u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
371d14abf15SRobert Mustacchi         if (bufs_in_chain >= USTORM_TOE_GRQ_CACHE_NUM_BDS) {
372d14abf15SRobert Mustacchi             return 0;
373d14abf15SRobert Mustacchi         } else {
374d14abf15SRobert Mustacchi             num_bufs = USTORM_TOE_GRQ_CACHE_NUM_BDS - bufs_in_chain;
375d14abf15SRobert Mustacchi         }
376d14abf15SRobert Mustacchi     } else if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
377d14abf15SRobert Mustacchi         u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
378d14abf15SRobert Mustacchi         DbgBreakIf(grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS);
379d14abf15SRobert Mustacchi         if (grq->low_bds_threshold < USTORM_TOE_GRQ_CACHE_NUM_BDS) {
380d14abf15SRobert Mustacchi             grq->low_bds_threshold = 3*GRQ_XOFF_TH;
381d14abf15SRobert Mustacchi         }
382d14abf15SRobert Mustacchi         if (bufs_in_chain >= grq->low_bds_threshold) {
383d14abf15SRobert Mustacchi             return 0;
384d14abf15SRobert Mustacchi         } else {
385d14abf15SRobert Mustacchi             num_bufs = grq->low_bds_threshold - bufs_in_chain;
386d14abf15SRobert Mustacchi         }
387d14abf15SRobert Mustacchi     } else {
388d14abf15SRobert Mustacchi         if (grq->high_bds_threshold) {
389d14abf15SRobert Mustacchi             u16_t bufs_in_chain = bd_chain->capacity - num_bufs;
390d14abf15SRobert Mustacchi             if (bufs_in_chain >= grq->high_bds_threshold) {
391d14abf15SRobert Mustacchi                 return 0;
392d14abf15SRobert Mustacchi             } else {
393d14abf15SRobert Mustacchi                 num_bufs = grq->high_bds_threshold - bufs_in_chain;
394d14abf15SRobert Mustacchi             }
395d14abf15SRobert Mustacchi         }
396d14abf15SRobert Mustacchi         if (num_bufs < num_bufs_threshold) {
397d14abf15SRobert Mustacchi             if (num_bufs > num_bypass_buffs) {
398d14abf15SRobert Mustacchi                 num_bufs = (u16_t)num_bypass_buffs; /* Partly fill grq from bypass only*/
399d14abf15SRobert Mustacchi                 grq->gen_bufs_compensated_from_bypass_only += num_bypass_buffs;
400d14abf15SRobert Mustacchi             }
401d14abf15SRobert Mustacchi             if (!num_bufs) {
402d14abf15SRobert Mustacchi                 return 0; /* nothing to fill or to fill later and more
403d14abf15SRobert Mustacchi                             to avoid abundant GEN_POOL_LOCK acquiring*/
404d14abf15SRobert Mustacchi             }
405d14abf15SRobert Mustacchi         }
406d14abf15SRobert Mustacchi     }
407d14abf15SRobert Mustacchi 
408d14abf15SRobert Mustacchi     if (num_bypass_buffs < num_bufs) {
409d14abf15SRobert Mustacchi         /* we can safely cast the returned value since we know we ask for max 2^16 */
410d14abf15SRobert Mustacchi         u16_t num_required_buffs = num_bufs - num_bypass_buffs;
411d14abf15SRobert Mustacchi         mm_tcp_get_gen_bufs(pdev, &tmp_gen_buf_list, num_required_buffs, sb_idx);
412d14abf15SRobert Mustacchi     }
413d14abf15SRobert Mustacchi     while ((d_list_entry_cnt(&tmp_gen_buf_list) < num_bufs) && num_bypass_buffs) {
414d14abf15SRobert Mustacchi 		lm_tcp_gen_buf_t * tmp_buf = NULL;
415d14abf15SRobert Mustacchi         d_list_entry_t * curr_entry = d_list_pop_head(bypass_gen_pool_list);
416d14abf15SRobert Mustacchi 		tmp_buf = (lm_tcp_gen_buf_t *)curr_entry;
417d14abf15SRobert Mustacchi         DbgBreakIf(!curr_entry);
418d14abf15SRobert Mustacchi 		if (tmp_buf->flags & GEN_FLAG_FREE_WHEN_DONE)
419d14abf15SRobert Mustacchi 		{
420d14abf15SRobert Mustacchi 			d_list_push_head(&free_gen_buf_list, curr_entry);
421d14abf15SRobert Mustacchi 		}
422d14abf15SRobert Mustacchi 		else
423d14abf15SRobert Mustacchi 		{
424d14abf15SRobert Mustacchi             d_list_push_head(&tmp_gen_buf_list, curr_entry);
425d14abf15SRobert Mustacchi 		}
426d14abf15SRobert Mustacchi         num_bypass_buffs--;
427d14abf15SRobert Mustacchi     }
428d14abf15SRobert Mustacchi     num_bufs = (u16_t)d_list_entry_cnt(&tmp_gen_buf_list);
429d14abf15SRobert Mustacchi 	if ((bypass_gen_pool_list != NULL) && d_list_entry_cnt(&free_gen_buf_list))
430d14abf15SRobert Mustacchi 	{
431d14abf15SRobert Mustacchi 		d_list_add_tail(bypass_gen_pool_list, &free_gen_buf_list);
432d14abf15SRobert Mustacchi 	}
433d14abf15SRobert Mustacchi     /* stats... */
434d14abf15SRobert Mustacchi     grq->num_grqs_last_dpc = num_bufs;
435d14abf15SRobert Mustacchi     if (grq->num_grqs_last_dpc) {  /* Exclude zeroed value from statistics*/
436d14abf15SRobert Mustacchi         if (grq->num_grqs_last_dpc > grq->max_grqs_per_dpc) {
437d14abf15SRobert Mustacchi             grq->max_grqs_per_dpc = grq->num_grqs_last_dpc;
438d14abf15SRobert Mustacchi         }
439d14abf15SRobert Mustacchi         /* we don't want to wrap around...*/
440d14abf15SRobert Mustacchi         if ((grq->sum_grqs_last_x_dpcs + grq->num_grqs_last_dpc) < grq->sum_grqs_last_x_dpcs) {
441d14abf15SRobert Mustacchi             grq->avg_dpc_cnt = 0;
442d14abf15SRobert Mustacchi             grq->sum_grqs_last_x_dpcs = 0;
443d14abf15SRobert Mustacchi         }
444d14abf15SRobert Mustacchi         grq->sum_grqs_last_x_dpcs += grq->num_grqs_last_dpc;
445d14abf15SRobert Mustacchi         grq->avg_dpc_cnt++;
446d14abf15SRobert Mustacchi         avg_dpc_cnt = grq->avg_dpc_cnt;
447d14abf15SRobert Mustacchi         if (avg_dpc_cnt) { /*Prevent division by 0*/
448d14abf15SRobert Mustacchi             grq->avg_grqs_per_dpc = grq->sum_grqs_last_x_dpcs / avg_dpc_cnt;
449d14abf15SRobert Mustacchi         } else {
450d14abf15SRobert Mustacchi             grq->sum_grqs_last_x_dpcs = 0;
451d14abf15SRobert Mustacchi         }
452d14abf15SRobert Mustacchi     }
453d14abf15SRobert Mustacchi 
454d14abf15SRobert Mustacchi     DbgBreakIf(num_bufs != tmp_gen_buf_list.cnt);
455d14abf15SRobert Mustacchi 
456d14abf15SRobert Mustacchi     if (num_bufs < bd_chain->bd_left) {
457d14abf15SRobert Mustacchi         grq->num_deficient++;
458d14abf15SRobert Mustacchi     }
459d14abf15SRobert Mustacchi 
460d14abf15SRobert Mustacchi     if (!num_bufs) {
461d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4rx, "no buffers returned from generic pool\n");
462d14abf15SRobert Mustacchi         return 0; /* nothing to do */
463d14abf15SRobert Mustacchi     }
464d14abf15SRobert Mustacchi     curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(&tmp_gen_buf_list);
465d14abf15SRobert Mustacchi 
466d14abf15SRobert Mustacchi     if (filling_mode == FILL_GRQ_LOW_THRESHOLD) {
467d14abf15SRobert Mustacchi         grq->gen_bufs_compensated_till_low_threshold += num_bufs;
468d14abf15SRobert Mustacchi     }
469d14abf15SRobert Mustacchi     while (num_bufs--) {
470d14abf15SRobert Mustacchi         DbgBreakIf(SIG(curr_gen_buf->buf_virt) != L4GEN_BUFFER_SIG);
471d14abf15SRobert Mustacchi         DbgMessage(pdev, VERBOSEl4rx, "curr_gen_buf->buf_virt=0x%p, END_SIG=0x%x\n", curr_gen_buf->buf_virt,
472d14abf15SRobert Mustacchi                     END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)));
473d14abf15SRobert Mustacchi         DbgBreakIf(END_SIG(curr_gen_buf->buf_virt, LM_TCP_GEN_BUF_SIZE(pdev)) != L4GEN_BUFFER_SIG_END);
474d14abf15SRobert Mustacchi 
475d14abf15SRobert Mustacchi         /* initialize curr_gen_buf */
476d14abf15SRobert Mustacchi         curr_gen_buf->ind_bytes    = 0;
477d14abf15SRobert Mustacchi         curr_gen_buf->ind_nbufs    = 0;
478d14abf15SRobert Mustacchi         curr_gen_buf->placed_bytes = 0;
479d14abf15SRobert Mustacchi         curr_gen_buf->refcnt       = 0;
480d14abf15SRobert Mustacchi         curr_gen_buf->tcp          = NULL;
481d14abf15SRobert Mustacchi 
482d14abf15SRobert Mustacchi         grq_bd = (struct toe_rx_grq_bd *)lm_toe_bd_chain_produce_bd(bd_chain);
483d14abf15SRobert Mustacchi         DbgBreakIf(!grq_bd);
484d14abf15SRobert Mustacchi         /* attach gen buf to grq */
485d14abf15SRobert Mustacchi 		DbgBreakIf(!curr_gen_buf || !curr_gen_buf->buf_phys.as_u64);
486d14abf15SRobert Mustacchi         grq_bd->addr_hi = curr_gen_buf->buf_phys.as_u32.high;
487d14abf15SRobert Mustacchi         grq_bd->addr_lo = curr_gen_buf->buf_phys.as_u32.low;
488d14abf15SRobert Mustacchi 
489d14abf15SRobert Mustacchi         curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
490d14abf15SRobert Mustacchi         /* enlist gen buf to active list will be done at the end of the loop (more efficient) */
491d14abf15SRobert Mustacchi     }
492d14abf15SRobert Mustacchi 
493d14abf15SRobert Mustacchi     if (bd_chain->bd_left) {
494d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4rx, "GRQ bd-chain wasn't filled completely\n");
495d14abf15SRobert Mustacchi     }
496d14abf15SRobert Mustacchi 	if (d_list_entry_cnt(&tmp_gen_buf_list))
497d14abf15SRobert Mustacchi 	{
498d14abf15SRobert Mustacchi         d_list_add_tail(&grq->active_gen_list, &tmp_gen_buf_list);
499d14abf15SRobert Mustacchi 	}
500d14abf15SRobert Mustacchi     return (tmp_gen_buf_list.cnt != 0); /* how many buffers were actually placed */
501d14abf15SRobert Mustacchi }
502d14abf15SRobert Mustacchi 
503d14abf15SRobert Mustacchi /* Desciption:
504d14abf15SRobert Mustacchi  *  initialize l4 VBD resources
505d14abf15SRobert Mustacchi  * Assumptions:
506d14abf15SRobert Mustacchi  *  - lm_init_params was already called
507d14abf15SRobert Mustacchi  *  - lm_tcp_alloc_resc was already called
508d14abf15SRobert Mustacchi  *  - um GRQ pool is ready to supply buffers to lm (?)
509d14abf15SRobert Mustacchi  * Returns:
510d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_resc(struct _lm_device_t * pdev,u8_t b_is_init)511d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_resc(struct _lm_device_t *pdev, u8_t b_is_init )
512d14abf15SRobert Mustacchi {
513d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
514d14abf15SRobert Mustacchi     lm_bd_chain_t *bd_chain;
515d14abf15SRobert Mustacchi     long i;
516d14abf15SRobert Mustacchi     u16_t volatile * sb_indexes;
517d14abf15SRobert Mustacchi     u32_t sb_id;
518d14abf15SRobert Mustacchi 
519d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_resc\n");
520d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
521d14abf15SRobert Mustacchi     toe_info->state = LM_TOE_STATE_INIT;
522d14abf15SRobert Mustacchi 
523d14abf15SRobert Mustacchi     /* init rest of toe_info fields */
524d14abf15SRobert Mustacchi     toe_info->rss_update_cnt = 0;
525d14abf15SRobert Mustacchi     toe_info->gen_buf_size = lm_tcp_calc_gen_buf_size(pdev);
526d14abf15SRobert Mustacchi     LM_TCP_SET_UPDATE_WINDOW_MODE(pdev, LM_TOE_UPDATE_MODE_SHORT_LOOP);
527d14abf15SRobert Mustacchi 
528d14abf15SRobert Mustacchi     if( b_is_init )
529d14abf15SRobert Mustacchi     {
530d14abf15SRobert Mustacchi         d_list_init(&toe_info->state_blk.neigh_list, NULL, NULL, 0);
531d14abf15SRobert Mustacchi         d_list_init(&toe_info->state_blk.path_list, NULL, NULL, 0);
532d14abf15SRobert Mustacchi         d_list_init(&toe_info->state_blk.tcp_list, NULL, NULL, 0);
533d14abf15SRobert Mustacchi     }
534d14abf15SRobert Mustacchi 
535d14abf15SRobert Mustacchi     /* TODO: consider enabling the assertion */
536d14abf15SRobert Mustacchi     //DbgBreakIf(pdev->ofld_info.state_blks[STATE_BLOCK_TOE]);
537d14abf15SRobert Mustacchi     pdev->ofld_info.state_blks[STATE_BLOCK_TOE] = &toe_info->state_blk;
538d14abf15SRobert Mustacchi 
539d14abf15SRobert Mustacchi     LM_TOE_FOREACH_TSS_IDX(pdev, i)
540d14abf15SRobert Mustacchi     {
541d14abf15SRobert Mustacchi         /* init SCQs */
542d14abf15SRobert Mustacchi         lm_tcp_scq_t *scq = &toe_info->scqs[i];
543d14abf15SRobert Mustacchi         bd_chain = &scq->bd_chain;
544d14abf15SRobert Mustacchi         lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
545d14abf15SRobert Mustacchi                           bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_scq_page_cnt, sizeof(struct toe_tx_cqe), 1, TRUE);
546d14abf15SRobert Mustacchi         /* Assign the SCQ chain consumer pointer to the consumer index in the status block. */
547d14abf15SRobert Mustacchi         sb_id = RSS_ID_TO_SB_ID(i);
548d14abf15SRobert Mustacchi #ifdef _VBD_
549*48bbca81SDaniel Hoffman 	if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
550d14abf15SRobert Mustacchi 	{
551d14abf15SRobert Mustacchi             sb_id = LM_NON_RSS_SB(pdev);
552d14abf15SRobert Mustacchi 	}
553d14abf15SRobert Mustacchi #endif
554d14abf15SRobert Mustacchi         sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
555d14abf15SRobert Mustacchi         sb_indexes[HC_INDEX_TOE_TX_CQ_CONS] = 0;
556d14abf15SRobert Mustacchi         scq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_TX_CQ_CONS;
557d14abf15SRobert Mustacchi         scq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_TYPE;
558d14abf15SRobert Mustacchi         scq->hc_sb_info.hc_index_value = HC_INDEX_TOE_TX_CQ_CONS;
559d14abf15SRobert Mustacchi     }
560d14abf15SRobert Mustacchi 
561d14abf15SRobert Mustacchi 
562d14abf15SRobert Mustacchi     /* Before initializing GRQs, we need to check if there are left-overs from before (incase this isn't the iniitiali 'init', for that we need to clear
563d14abf15SRobert Mustacchi      * them - but outside the loop... */
564d14abf15SRobert Mustacchi     if ( !b_is_init ) {
565d14abf15SRobert Mustacchi         /* we need to return what ever buffers are still on the grq back to the pool before
566d14abf15SRobert Mustacchi          * the new initialization... */
567d14abf15SRobert Mustacchi          lm_tcp_clear_grqs(pdev);
568d14abf15SRobert Mustacchi     }
569d14abf15SRobert Mustacchi 
570d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev, i)
571d14abf15SRobert Mustacchi     {
572d14abf15SRobert Mustacchi         lm_tcp_rcq_t *rcq = &toe_info->rcqs[i];
573d14abf15SRobert Mustacchi         lm_tcp_grq_t *grq = &toe_info->grqs[i];
574d14abf15SRobert Mustacchi 	u8_t byte_counter_id;
575d14abf15SRobert Mustacchi 
576d14abf15SRobert Mustacchi 	sb_id = RSS_ID_TO_SB_ID(i);
577d14abf15SRobert Mustacchi #ifdef _VBD_
578*48bbca81SDaniel Hoffman 	if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
579d14abf15SRobert Mustacchi 	{
580d14abf15SRobert Mustacchi 	    sb_id = LM_NON_RSS_SB(pdev);
581d14abf15SRobert Mustacchi 	}
582d14abf15SRobert Mustacchi #endif
583d14abf15SRobert Mustacchi 	byte_counter_id = CHIP_IS_E1x(pdev)? LM_FW_SB_ID(pdev, sb_id) : LM_FW_DHC_QZONE_ID(pdev, sb_id);
584d14abf15SRobert Mustacchi 
585d14abf15SRobert Mustacchi         /* init RCQs */
586d14abf15SRobert Mustacchi         bd_chain = &rcq->bd_chain;
587d14abf15SRobert Mustacchi         lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
588d14abf15SRobert Mustacchi                           bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_rcq_page_cnt, sizeof(struct toe_rx_cqe), 1, TRUE);
589d14abf15SRobert Mustacchi         rcq->rss_update_pending = 0;
590d14abf15SRobert Mustacchi         rcq->suspend_processing = FALSE;
591d14abf15SRobert Mustacchi         rcq->update_cid = 0;
592d14abf15SRobert Mustacchi 
593d14abf15SRobert Mustacchi         /* Assign the RCQ chain consumer pointer to the consumer index in the status block. */
594d14abf15SRobert Mustacchi         sb_indexes = lm_get_sb_indexes(pdev, (u8_t)sb_id);
595d14abf15SRobert Mustacchi         sb_indexes[HC_INDEX_TOE_RX_CQ_CONS] = 0;
596d14abf15SRobert Mustacchi         rcq->hw_con_idx_ptr = sb_indexes + HC_INDEX_TOE_RX_CQ_CONS;
597d14abf15SRobert Mustacchi         rcq->hc_sb_info.hc_sb = STATUS_BLOCK_NORMAL_SL_TYPE;
598d14abf15SRobert Mustacchi         rcq->hc_sb_info.hc_index_value = HC_INDEX_TOE_RX_CQ_CONS;
599d14abf15SRobert Mustacchi         if (IS_PFDEV(pdev))
600d14abf15SRobert Mustacchi         {
601d14abf15SRobert Mustacchi             rcq->hc_sb_info.iro_dhc_offset = CSTORM_BYTE_COUNTER_OFFSET(byte_counter_id, HC_INDEX_TOE_RX_CQ_CONS);
602d14abf15SRobert Mustacchi         }
603d14abf15SRobert Mustacchi         else
604d14abf15SRobert Mustacchi         {
605d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "Dhc not implemented for VF yet\n");
606d14abf15SRobert Mustacchi         }
607d14abf15SRobert Mustacchi 
608d14abf15SRobert Mustacchi         /* init GRQs */
609d14abf15SRobert Mustacchi         if( b_is_init )
610d14abf15SRobert Mustacchi         {
611d14abf15SRobert Mustacchi             d_list_init(&grq->active_gen_list, NULL, NULL, 0);
612d14abf15SRobert Mustacchi             d_list_init(&grq->aux_gen_list, NULL, NULL, 0);
613d14abf15SRobert Mustacchi             if ((u8_t)i != LM_TOE_BASE_RSS_ID(pdev)  ) {
614d14abf15SRobert Mustacchi                 grq->grq_compensate_on_alloc = TRUE;
615d14abf15SRobert Mustacchi                 pdev->toe_info.grqs[i].high_bds_threshold = 3*GRQ_XOFF_TH + 1;
616d14abf15SRobert Mustacchi             } else {
617d14abf15SRobert Mustacchi                 grq->grq_compensate_on_alloc = FALSE;
618d14abf15SRobert Mustacchi                 pdev->toe_info.grqs[i].high_bds_threshold = 0;
619d14abf15SRobert Mustacchi             }
620d14abf15SRobert Mustacchi             grq->low_bds_threshold = 3*GRQ_XOFF_TH;
621d14abf15SRobert Mustacchi         }
622d14abf15SRobert Mustacchi 
623d14abf15SRobert Mustacchi         bd_chain = &grq->bd_chain;
624d14abf15SRobert Mustacchi         lm_bd_chain_setup(pdev, bd_chain, bd_chain->bd_chain_virt,
625d14abf15SRobert Mustacchi                           bd_chain->bd_chain_phy, (u16_t)pdev->params.l4_grq_page_cnt, sizeof(struct toe_rx_grq_bd), 0, TRUE);
626d14abf15SRobert Mustacchi         /* fill GRQ (minimum mode)*/
627d14abf15SRobert Mustacchi         lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_MIN_CASHED_BDS);
628d14abf15SRobert Mustacchi     }
629d14abf15SRobert Mustacchi 
630d14abf15SRobert Mustacchi 
631d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev, i)
632d14abf15SRobert Mustacchi     {
633d14abf15SRobert Mustacchi         // lm_tcp_grq_t *grq = &toe_info->grqs[i];
634d14abf15SRobert Mustacchi         lm_tcp_rx_fill_grq(pdev, (u8_t)i, NULL, FILL_GRQ_FULL);
635d14abf15SRobert Mustacchi     }
636d14abf15SRobert Mustacchi 
637d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
638d14abf15SRobert Mustacchi }
639d14abf15SRobert Mustacchi 
640d14abf15SRobert Mustacchi 
641d14abf15SRobert Mustacchi /* init cstorm internal memory for toe
642d14abf15SRobert Mustacchi  * assumption - strom's common intmem (if any) already initiated */
_lm_tcp_init_cstorm_intmem(lm_device_t * pdev)643d14abf15SRobert Mustacchi static void _lm_tcp_init_cstorm_intmem(lm_device_t *pdev)
644d14abf15SRobert Mustacchi {
645d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
646d14abf15SRobert Mustacchi     lm_address_t phys_addr;
647d14abf15SRobert Mustacchi     lm_tcp_scq_t *scq;
648d14abf15SRobert Mustacchi     u16_t idx;
649d14abf15SRobert Mustacchi     u8_t drv_toe_rss_id;
650d14abf15SRobert Mustacchi     u8_t port;
651d14abf15SRobert Mustacchi     u8_t fw_sb_id;
652d14abf15SRobert Mustacchi 
653d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
654d14abf15SRobert Mustacchi     port = PORT_ID(pdev);
655d14abf15SRobert Mustacchi 
656d14abf15SRobert Mustacchi     LM_TOE_FOREACH_TSS_IDX(pdev, drv_toe_rss_id)
657d14abf15SRobert Mustacchi     {
658d14abf15SRobert Mustacchi         scq = &toe_info->scqs[drv_toe_rss_id];
659d14abf15SRobert Mustacchi 
660d14abf15SRobert Mustacchi         /* SCQ consumer ptr - scq first page addr */
661d14abf15SRobert Mustacchi         phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 0);
662d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
663d14abf15SRobert Mustacchi 
664d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
665d14abf15SRobert Mustacchi 
666d14abf15SRobert Mustacchi         DbgBreakIf (CSTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
667d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
668d14abf15SRobert Mustacchi 
669d14abf15SRobert Mustacchi         /* SCQ producer idx */
670d14abf15SRobert Mustacchi         idx = lm_bd_chain_prod_idx(&scq->bd_chain);
671d14abf15SRobert Mustacchi 
672d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_PROD_SIZE != 2);
673d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
674d14abf15SRobert Mustacchi 
675d14abf15SRobert Mustacchi         /* SCQ consumer idx */
676d14abf15SRobert Mustacchi         idx = lm_bd_chain_cons_idx(&scq->bd_chain);
677d14abf15SRobert Mustacchi         DbgBreakIf(idx != 0);
678d14abf15SRobert Mustacchi 
679d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_CONS_SIZE != 2);
680d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_CSTRORM_INTMEM);
681d14abf15SRobert Mustacchi 
682d14abf15SRobert Mustacchi         /* SCQ second page addr */
683d14abf15SRobert Mustacchi         phys_addr = lm_bd_chain_phys_addr(&scq->bd_chain, 1);
684d14abf15SRobert Mustacchi 
685d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
686d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_CSTRORM_INTMEM);
687d14abf15SRobert Mustacchi 
688d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
689d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, CSTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_CSTRORM_INTMEM);
690d14abf15SRobert Mustacchi 
691d14abf15SRobert Mustacchi         DbgBreakIf(CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
692d14abf15SRobert Mustacchi 
693d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, CSTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_CSTRORM_INTMEM);
694d14abf15SRobert Mustacchi 
695d14abf15SRobert Mustacchi         //LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), BAR_CSTRORM_INTMEM);
696d14abf15SRobert Mustacchi 	fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
697d14abf15SRobert Mustacchi #ifdef _VBD_
698*48bbca81SDaniel Hoffman 	if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
699d14abf15SRobert Mustacchi 	{
700d14abf15SRobert Mustacchi 		fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
701*48bbca81SDaniel Hoffman 		if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
702d14abf15SRobert Mustacchi 		{
703d14abf15SRobert Mustacchi 			DbgBreak();
704d14abf15SRobert Mustacchi 		}
705d14abf15SRobert Mustacchi 	}
706d14abf15SRobert Mustacchi #endif
707d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), fw_sb_id, BAR_CSTRORM_INTMEM);
708d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, CSTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_TX_CQ_CONS, BAR_CSTRORM_INTMEM);
709d14abf15SRobert Mustacchi     }
710d14abf15SRobert Mustacchi }
711d14abf15SRobert Mustacchi 
712d14abf15SRobert Mustacchi /* init ustorm offload params private to TOE */
_lm_set_ofld_params_ustorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)713d14abf15SRobert Mustacchi static void _lm_set_ofld_params_ustorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
714d14abf15SRobert Mustacchi {
715d14abf15SRobert Mustacchi     u8_t func;
716d14abf15SRobert Mustacchi     u32_t val32;
717d14abf15SRobert Mustacchi 
718d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
719d14abf15SRobert Mustacchi 
720d14abf15SRobert Mustacchi     /* global push timer ticks */
721d14abf15SRobert Mustacchi     /* This value is in milliseconds instead of ticks in SNP
722d14abf15SRobert Mustacchi      * and Longhorn.  In the future microsoft will change these
723d14abf15SRobert Mustacchi      * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
724d14abf15SRobert Mustacchi     /* val32 = lm_time_resolution(pdev, l4_params->push_ticks, l4_params->ticks_per_second, 1000); */
725d14abf15SRobert Mustacchi     val32 = lm_time_resolution(pdev, l4_params->push_ticks, 1000, 1000);
726d14abf15SRobert Mustacchi 
727d14abf15SRobert Mustacchi     DbgBreakIf (USTORM_TOE_TCP_PUSH_TIMER_TICKS_SIZE != 4);
728d14abf15SRobert Mustacchi     LM_INTMEM_WRITE32(pdev, USTORM_TOE_TCP_PUSH_TIMER_TICKS_OFFSET(func), val32, BAR_USTRORM_INTMEM);
729d14abf15SRobert Mustacchi }
730d14abf15SRobert Mustacchi 
731d14abf15SRobert Mustacchi /* init ustorm internal memory for toe
732d14abf15SRobert Mustacchi  * assumption - strom's common intmem (if any) already initiated */
_lm_tcp_init_ustorm_intmem(lm_device_t * pdev)733d14abf15SRobert Mustacchi static void _lm_tcp_init_ustorm_intmem(lm_device_t *pdev)
734d14abf15SRobert Mustacchi {
735d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
736d14abf15SRobert Mustacchi     lm_address_t phys_addr;
737d14abf15SRobert Mustacchi     lm_tcp_rcq_t *rcq;
738d14abf15SRobert Mustacchi     lm_tcp_grq_t *grq;
739d14abf15SRobert Mustacchi     struct toe_rx_grq_bd *grq_bd;
740d14abf15SRobert Mustacchi     u16_t idx;
741d14abf15SRobert Mustacchi     u8_t drv_toe_rss_id, grq_bd_idx;
742d14abf15SRobert Mustacchi     u8_t port;
743d14abf15SRobert Mustacchi     u8_t fw_sb_id;
744d14abf15SRobert Mustacchi     u8_t sw_sb_id;
745d14abf15SRobert Mustacchi 
746d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
747d14abf15SRobert Mustacchi     port = PORT_ID(pdev);
748d14abf15SRobert Mustacchi 
749d14abf15SRobert Mustacchi     _lm_set_ofld_params_ustorm_toe(pdev, &(pdev->ofld_info.l4_params));
750d14abf15SRobert Mustacchi 
751d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev,drv_toe_rss_id)
752d14abf15SRobert Mustacchi     {
753d14abf15SRobert Mustacchi 
754d14abf15SRobert Mustacchi         rcq = &toe_info->rcqs[drv_toe_rss_id];
755d14abf15SRobert Mustacchi         grq = &toe_info->grqs[drv_toe_rss_id];
756d14abf15SRobert Mustacchi 
757d14abf15SRobert Mustacchi         /* GRQ cache bds */
758d14abf15SRobert Mustacchi         grq_bd = (struct toe_rx_grq_bd *)grq->bd_chain.bd_chain_virt;
759d14abf15SRobert Mustacchi 
760d14abf15SRobert Mustacchi         DbgBreakIf( USTORM_TOE_GRQ_CACHE_NUM_BDS > lm_bd_chain_usable_bds_per_page(&grq->bd_chain));
761d14abf15SRobert Mustacchi 
762d14abf15SRobert Mustacchi         for(grq_bd_idx = 0; grq_bd_idx < USTORM_TOE_GRQ_CACHE_NUM_BDS; grq_bd_idx++) {
763d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) ,port,grq_bd_idx), grq_bd->addr_lo, BAR_USTRORM_INTMEM);
764d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(pdev, USTORM_GRQ_CACHE_BD_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id),port,grq_bd_idx), grq_bd->addr_hi, BAR_USTRORM_INTMEM);
765d14abf15SRobert Mustacchi             grq_bd++;
766d14abf15SRobert Mustacchi         }
767d14abf15SRobert Mustacchi 
768d14abf15SRobert Mustacchi         /* GRQ cache prod idx */
769d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_LOCAL_PROD_SIZE != 1);
770d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),  (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
771d14abf15SRobert Mustacchi 
772d14abf15SRobert Mustacchi         /* GRQ cache cons idx */
773d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_LOCAL_CONS_SIZE != 1);
774d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, USTORM_TOE_GRQ_LOCAL_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),  0, BAR_USTRORM_INTMEM);
775d14abf15SRobert Mustacchi 
776d14abf15SRobert Mustacchi         /* GRQ producer idx */
777d14abf15SRobert Mustacchi         idx = lm_bd_chain_prod_idx(&grq->bd_chain);
778d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_PROD_SIZE != 2);
779d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
780d14abf15SRobert Mustacchi 
781d14abf15SRobert Mustacchi         /* GRQ consumer idx */
782d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_CONS_SIZE != 2);
783d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u8_t)USTORM_TOE_GRQ_CACHE_NUM_BDS, BAR_USTRORM_INTMEM);
784d14abf15SRobert Mustacchi 
785d14abf15SRobert Mustacchi         /* GRQ consumer ptr */
786d14abf15SRobert Mustacchi         phys_addr = lm_bd_chain_phys_addr(&grq->bd_chain, 0);
787d14abf15SRobert Mustacchi         LM_INC64(&phys_addr, sizeof(struct toe_rx_grq_bd) * USTORM_TOE_GRQ_CACHE_NUM_BDS);
788d14abf15SRobert Mustacchi 
789d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
790d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
791d14abf15SRobert Mustacchi 
792d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
793d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
794d14abf15SRobert Mustacchi 
795d14abf15SRobert Mustacchi         /* Generic buffer size */
796d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_GRQ_BUF_SIZE_SIZE != 2);
797d14abf15SRobert Mustacchi 
798d14abf15SRobert Mustacchi         DbgBreakIf(LM_TCP_GEN_BUF_SIZE(pdev) > 0xffff); /* the size available in ustorm */
799d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_BUF_SIZE_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), (u16_t)LM_TCP_GEN_BUF_SIZE(pdev), BAR_USTRORM_INTMEM);
800d14abf15SRobert Mustacchi 
801d14abf15SRobert Mustacchi         /* RCQ consumer ptr - rcq first page addr */
802d14abf15SRobert Mustacchi         phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 0);
803d14abf15SRobert Mustacchi 
804d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_LO_SIZE != 4);
805d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
806d14abf15SRobert Mustacchi 
807d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_CONS_PTR_HI_SIZE != 4);
808d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
809d14abf15SRobert Mustacchi 
810d14abf15SRobert Mustacchi         /* RCQ second page addr */
811d14abf15SRobert Mustacchi         phys_addr = lm_bd_chain_phys_addr(&rcq->bd_chain, 1);
812d14abf15SRobert Mustacchi 
813d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_SIZE != 4);
814d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.low, BAR_USTRORM_INTMEM);
815d14abf15SRobert Mustacchi 
816d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_SIZE != 4);
817d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, USTORM_TOE_CQ_NEXT_PAGE_BASE_ADDR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), phys_addr.as_u32.high, BAR_USTRORM_INTMEM);
818d14abf15SRobert Mustacchi 
819d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_SIZE != 1);
820d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, USTORM_TOE_CQ_NXT_PAGE_ADDR_VALID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), 1, BAR_USTRORM_INTMEM);
821d14abf15SRobert Mustacchi 
822d14abf15SRobert Mustacchi         /* RCQ producer idx */
823d14abf15SRobert Mustacchi         idx = lm_bd_chain_prod_idx(&rcq->bd_chain);
824d14abf15SRobert Mustacchi 
825d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_PROD_SIZE != 2);
826d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
827d14abf15SRobert Mustacchi         if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
828d14abf15SRobert Mustacchi             u32_t l4_quasi_byte_counter;
829d14abf15SRobert Mustacchi             u16_t prod_idx_diff = lm_bd_chain_prod_idx(&rcq->bd_chain) - rcq->bd_chain.bds_per_page * rcq->bd_chain.page_cnt;
830d14abf15SRobert Mustacchi             l4_quasi_byte_counter = prod_idx_diff;
831d14abf15SRobert Mustacchi             l4_quasi_byte_counter <<= 16;
832d14abf15SRobert Mustacchi //            LM_INTMEM_WRITE32(pdev, CSTORM_BYTE_COUNTER_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), HC_INDEX_TOE_RX_CQ_CONS), l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
833d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(pdev, rcq->hc_sb_info.iro_dhc_offset, l4_quasi_byte_counter, BAR_CSTRORM_INTMEM);
834d14abf15SRobert Mustacchi         }
835d14abf15SRobert Mustacchi         /* RCQ consumer idx */
836d14abf15SRobert Mustacchi         idx = lm_bd_chain_cons_idx(&rcq->bd_chain);
837d14abf15SRobert Mustacchi         DbgBreakIf(idx != 0);
838d14abf15SRobert Mustacchi 
839d14abf15SRobert Mustacchi         DbgBreakIf (USTORM_TOE_CQ_CONS_SIZE != 2);
840d14abf15SRobert Mustacchi         LM_INTMEM_WRITE16(pdev, USTORM_TOE_CQ_CONS_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), idx, BAR_USTRORM_INTMEM);
841d14abf15SRobert Mustacchi 
842d14abf15SRobert Mustacchi         fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(drv_toe_rss_id));
843d14abf15SRobert Mustacchi 	sw_sb_id = RSS_ID_TO_SB_ID(drv_toe_rss_id);
844d14abf15SRobert Mustacchi         if (RSS_ID_TO_SB_ID(drv_toe_rss_id) >= MAX_NDSB) { //To suppress Prefast warning
845d14abf15SRobert Mustacchi             DbgBreak();
846d14abf15SRobert Mustacchi             break;
847d14abf15SRobert Mustacchi         }
848d14abf15SRobert Mustacchi #ifdef _VBD_
849*48bbca81SDaniel Hoffman 	if (!CHIP_IS_E1x(pdev) && (pdev->params.l4_enable_rss == L4_RSS_DISABLED))
850d14abf15SRobert Mustacchi 	{
851d14abf15SRobert Mustacchi 		fw_sb_id = LM_FW_SB_ID(pdev, RSS_ID_TO_SB_ID(LM_NON_RSS_SB(pdev)));
852d14abf15SRobert Mustacchi 		sw_sb_id = LM_NON_RSS_SB(pdev);
853*48bbca81SDaniel Hoffman 		if (drv_toe_rss_id != LM_NON_RSS_CHAIN(pdev))
854d14abf15SRobert Mustacchi 		{
855d14abf15SRobert Mustacchi 			DbgBreak();
856d14abf15SRobert Mustacchi 		}
857d14abf15SRobert Mustacchi 	}
858d14abf15SRobert Mustacchi #endif
859d14abf15SRobert Mustacchi         if (CHIP_IS_E1x(pdev)) {
860d14abf15SRobert Mustacchi 
861d14abf15SRobert Mustacchi             if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
862d14abf15SRobert Mustacchi                 pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
863d14abf15SRobert Mustacchi             } else {
864d14abf15SRobert Mustacchi                 pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
865d14abf15SRobert Mustacchi             }
866d14abf15SRobert Mustacchi             LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
867d14abf15SRobert Mustacchi                               + OFFSETOF(struct hc_status_block_data_e1x, index_data)
868d14abf15SRobert Mustacchi                               + sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
869d14abf15SRobert Mustacchi                               + OFFSETOF(struct hc_index_data,flags),
870d14abf15SRobert Mustacchi                               pdev->vars.status_blocks_arr[RSS_ID_TO_SB_ID(drv_toe_rss_id)].hc_status_block_data.e1x_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
871d14abf15SRobert Mustacchi         } else {
872d14abf15SRobert Mustacchi 
873d14abf15SRobert Mustacchi             if (pdev->params.enable_dynamic_hc[HC_INDEX_TOE_RX_CQ_CONS]) {
874d14abf15SRobert Mustacchi                 pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags |= HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
875d14abf15SRobert Mustacchi             } else {
876d14abf15SRobert Mustacchi                 pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_TOE_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_DYNAMIC_HC_ENABLED;
877d14abf15SRobert Mustacchi             }
878d14abf15SRobert Mustacchi             LM_INTMEM_WRITE8(PFDEV(pdev), CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id)
879d14abf15SRobert Mustacchi                               + OFFSETOF(struct hc_status_block_data_e2, index_data)
880d14abf15SRobert Mustacchi                               + sizeof(struct hc_index_data)*HC_INDEX_TOE_RX_CQ_CONS
881d14abf15SRobert Mustacchi                               + OFFSETOF(struct hc_index_data,flags),
882d14abf15SRobert Mustacchi                               pdev->vars.status_blocks_arr[sw_sb_id].hc_status_block_data.e2_sb_data.index_data[HC_INDEX_ETH_RX_CQ_CONS].flags, BAR_CSTRORM_INTMEM);
883d14abf15SRobert Mustacchi 
884d14abf15SRobert Mustacchi         }
885d14abf15SRobert Mustacchi 
886d14abf15SRobert Mustacchi //        LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), BAR_USTRORM_INTMEM);
887d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_ID_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port),fw_sb_id, BAR_USTRORM_INTMEM);
888d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, USTORM_TOE_STATUS_BLOCK_INDEX_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id), port), HC_INDEX_TOE_RX_CQ_CONS, BAR_USTRORM_INTMEM);
889d14abf15SRobert Mustacchi     }
890d14abf15SRobert Mustacchi 
891d14abf15SRobert Mustacchi     /* Initialize Indirection Table : Only in entries that match status - blocks : L4 base--> L4 base + cnt */
892d14abf15SRobert Mustacchi     DbgBreakIf (USTORM_INDIRECTION_TABLE_ENTRY_SIZE != 1);
893d14abf15SRobert Mustacchi 
894d14abf15SRobert Mustacchi     if (pdev->params.l4_enable_rss == L4_RSS_DISABLED) {
895d14abf15SRobert Mustacchi         LM_TOE_FOREACH_RSS_IDX(pdev, idx)
896d14abf15SRobert Mustacchi         {
897d14abf15SRobert Mustacchi             LM_INTMEM_WRITE8(pdev, USTORM_INDIRECTION_TABLE_OFFSET(port) + LM_TOE_FW_RSS_ID(pdev,idx), LM_TOE_FW_RSS_ID(pdev,(u8_t)idx), BAR_USTRORM_INTMEM);
898d14abf15SRobert Mustacchi         }
899d14abf15SRobert Mustacchi     } else {
900d14abf15SRobert Mustacchi         for (idx = 0; idx < RSS_INDIRECTION_TABLE_SIZE; idx++) {
901d14abf15SRobert Mustacchi             LM_INTMEM_WRITE8(pdev,USTORM_INDIRECTION_TABLE_OFFSET(port) + idx, pdev->toe_info.indirection_table[idx], BAR_USTRORM_INTMEM);
902d14abf15SRobert Mustacchi         }
903d14abf15SRobert Mustacchi     }
904d14abf15SRobert Mustacchi }
905d14abf15SRobert Mustacchi 
906d14abf15SRobert Mustacchi /* init tstorm offload params common to TOE/RDMA/ISCSI */
_lm_set_ofld_params_tstorm_common(lm_device_t * pdev,l4_ofld_params_t * l4_params)907d14abf15SRobert Mustacchi static void _lm_set_ofld_params_tstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
908d14abf15SRobert Mustacchi {
909d14abf15SRobert Mustacchi     u8_t func;
910d14abf15SRobert Mustacchi     u32_t dup_ack_threshold;
911d14abf15SRobert Mustacchi 
912d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
913d14abf15SRobert Mustacchi 
914d14abf15SRobert Mustacchi     dup_ack_threshold = l4_params->dup_ack_threshold;
915d14abf15SRobert Mustacchi     if(dup_ack_threshold > TCP_TSTORM_MAX_DUP_ACK_TH) {
916d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp,
917d14abf15SRobert Mustacchi                    "given dup_ack_threshold (=%d) too high. setting it to maximum allowed (=%d)\n",
918d14abf15SRobert Mustacchi                    dup_ack_threshold, TCP_TSTORM_MAX_DUP_ACK_TH);
919d14abf15SRobert Mustacchi         dup_ack_threshold = TCP_TSTORM_MAX_DUP_ACK_TH;
920d14abf15SRobert Mustacchi     }
921d14abf15SRobert Mustacchi 
922d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_SIZE != 4);
923d14abf15SRobert Mustacchi     LM_INTMEM_WRITE32(pdev, TSTORM_TCP_DUPLICATE_ACK_THRESHOLD_OFFSET(func), dup_ack_threshold, BAR_TSTRORM_INTMEM);
924d14abf15SRobert Mustacchi 
925d14abf15SRobert Mustacchi     /* MaxCwnd  */
926d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TCP_MAX_CWND_SIZE != 4);
927d14abf15SRobert Mustacchi     if(pdev->params.network_type == LM_NETOWRK_TYPE_WAN) {
928d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_wan, BAR_TSTRORM_INTMEM);
929d14abf15SRobert Mustacchi     } else {
930d14abf15SRobert Mustacchi         DbgBreakIf(pdev->params.network_type != LM_NETOWRK_TYPE_LAN);
931d14abf15SRobert Mustacchi         LM_INTMEM_WRITE32(pdev, TSTORM_TCP_MAX_CWND_OFFSET(func), pdev->params.max_cwnd_lan, BAR_TSTRORM_INTMEM);
932d14abf15SRobert Mustacchi     }
933d14abf15SRobert Mustacchi }
934d14abf15SRobert Mustacchi 
935d14abf15SRobert Mustacchi /* init tstorm offload params private to TOE */
_lm_set_ofld_params_tstorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)936d14abf15SRobert Mustacchi static void _lm_set_ofld_params_tstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
937d14abf15SRobert Mustacchi {
938d14abf15SRobert Mustacchi     u8_t func;
939d14abf15SRobert Mustacchi 
940d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
941d14abf15SRobert Mustacchi 
942d14abf15SRobert Mustacchi     /* max retransmit (TOE param only) */
943d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TOE_MAX_SEG_RETRANSMIT_SIZE != 4);
944d14abf15SRobert Mustacchi     LM_INTMEM_WRITE32(pdev, TSTORM_TOE_MAX_SEG_RETRANSMIT_OFFSET(func), l4_params->max_retx, BAR_TSTRORM_INTMEM);
945d14abf15SRobert Mustacchi 
946d14abf15SRobert Mustacchi     /* TcpDoubtReachability (TOE param only) */
947d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TOE_DOUBT_REACHABILITY_SIZE != 1);
948d14abf15SRobert Mustacchi     LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOUBT_REACHABILITY_OFFSET(func), l4_params->doubt_reachability_retx, BAR_TSTRORM_INTMEM);
949d14abf15SRobert Mustacchi 
950d14abf15SRobert Mustacchi }
951d14abf15SRobert Mustacchi 
952d14abf15SRobert Mustacchi /* init tstorm internal memory for toe
953d14abf15SRobert Mustacchi  * assumption - strom's common intmem already initiated */
_lm_tcp_init_tstorm_intmem(lm_device_t * pdev)954d14abf15SRobert Mustacchi static void _lm_tcp_init_tstorm_intmem(lm_device_t *pdev)
955d14abf15SRobert Mustacchi {
956d14abf15SRobert Mustacchi     _lm_set_ofld_params_tstorm_toe(pdev, &(pdev->ofld_info.l4_params));
957d14abf15SRobert Mustacchi 
958d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TOE_MAX_DOMINANCE_VALUE_SIZE != 1);
959d14abf15SRobert Mustacchi     LM_INTMEM_WRITE8(pdev, TSTORM_TOE_MAX_DOMINANCE_VALUE_OFFSET, (u8_t)pdev->params.l4_max_dominance_value, BAR_TSTRORM_INTMEM);
960d14abf15SRobert Mustacchi     DbgBreakIf (TSTORM_TOE_DOMINANCE_THRESHOLD_SIZE != 1);
961d14abf15SRobert Mustacchi     LM_INTMEM_WRITE8(pdev, TSTORM_TOE_DOMINANCE_THRESHOLD_OFFSET, (u8_t)pdev->params.l4_dominance_threshold, BAR_TSTRORM_INTMEM);
962d14abf15SRobert Mustacchi 
963d14abf15SRobert Mustacchi }
964d14abf15SRobert Mustacchi 
965d14abf15SRobert Mustacchi 
966d14abf15SRobert Mustacchi /* init xstorm offload params common to TOE/RDMA/ISCSI */
_lm_set_ofld_params_xstorm_common(lm_device_t * pdev,l4_ofld_params_t * l4_params)967d14abf15SRobert Mustacchi static void _lm_set_ofld_params_xstorm_common(lm_device_t *pdev, l4_ofld_params_t *l4_params)
968d14abf15SRobert Mustacchi {
969d14abf15SRobert Mustacchi     u8_t func, ack_frequency;
970d14abf15SRobert Mustacchi     u32_t val32, max_reg, tmr_reg, delayed_ack_ticks;
971d14abf15SRobert Mustacchi 
972d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
973d14abf15SRobert Mustacchi     if (PORT_ID(pdev)) {
974d14abf15SRobert Mustacchi         max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_1;
975d14abf15SRobert Mustacchi         tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_1;
976d14abf15SRobert Mustacchi     } else {
977d14abf15SRobert Mustacchi         max_reg = XCM_REG_GLB_DEL_ACK_MAX_CNT_0;
978d14abf15SRobert Mustacchi         tmr_reg = XCM_REG_GLB_DEL_ACK_TMR_VAL_0;
979d14abf15SRobert Mustacchi     }
980d14abf15SRobert Mustacchi     /* if ack_frequency is 0, it means use default value of 2. */
981d14abf15SRobert Mustacchi     /* delayed max ack count, (both in internal ram and in XCM!!!) */
982d14abf15SRobert Mustacchi     ack_frequency = l4_params->ack_frequency;
983d14abf15SRobert Mustacchi     if(ack_frequency < TCP_XCM_MIN_GLB_DEL_ACK_MAX_CNT) {
984d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp,
985d14abf15SRobert Mustacchi                    "given ack_frequency (=%d) too low. setting it to minimum allowed (=%d)\n",
986d14abf15SRobert Mustacchi                    ack_frequency, TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT);
987d14abf15SRobert Mustacchi         ack_frequency = TCP_XCM_DEFAULT_DEL_ACK_MAX_CNT;
988d14abf15SRobert Mustacchi     }
989d14abf15SRobert Mustacchi 
990d14abf15SRobert Mustacchi 
991d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_SIZE != 1);
992d14abf15SRobert Mustacchi     LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func), ack_frequency, BAR_XSTRORM_INTMEM);
993d14abf15SRobert Mustacchi     REG_WR(pdev,  max_reg, ack_frequency);
994d14abf15SRobert Mustacchi 
995d14abf15SRobert Mustacchi     /* This value is in milliseconds instead of ticks in SNP
996d14abf15SRobert Mustacchi      * and Longhorn.  In the future microsoft will change these
997d14abf15SRobert Mustacchi      * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
998d14abf15SRobert Mustacchi     /* delayed_ack_ticks = lm_time_resolution(pdev, l4_params->delayed_ack_ticks, l4_params->ticks_per_second, 1000); */
999d14abf15SRobert Mustacchi     delayed_ack_ticks = lm_time_resolution(pdev, l4_params->delayed_ack_ticks, 1000, TIMERS_TICKS_PER_SEC);
1000d14abf15SRobert Mustacchi 
1001d14abf15SRobert Mustacchi     /* delayed ack timer */
1002d14abf15SRobert Mustacchi     REG_WR(pdev,   tmr_reg, delayed_ack_ticks);
1003d14abf15SRobert Mustacchi 
1004d14abf15SRobert Mustacchi     /* sws timer */
1005d14abf15SRobert Mustacchi     /* This value (sws_prevention_ticks) is in milliseconds instead of ticks in SNP
1006d14abf15SRobert Mustacchi      * and Longhorn.  In the future microsoft will change these
1007d14abf15SRobert Mustacchi      * values to ticks. TBA : When fix takes place, uncomment first line and remove second line */
1008d14abf15SRobert Mustacchi     /* val32 = lm_time_resolution(pdev, l4_params->sws_prevention_ticks, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC); */
1009d14abf15SRobert Mustacchi     val32 = lm_time_resolution(pdev, l4_params->sws_prevention_ticks, 1000 , TIMERS_TICKS_PER_SEC);
1010d14abf15SRobert Mustacchi 
1011d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_TCP_TX_SWS_TIMER_VAL_SIZE != 4);
1012d14abf15SRobert Mustacchi     LM_INTMEM_WRITE32(pdev, XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), val32, BAR_XSTRORM_INTMEM);
1013d14abf15SRobert Mustacchi 
1014d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_COMMON_RTC_RESOLUTION_SIZE != 2);
1015d14abf15SRobert Mustacchi     LM_INTMEM_WRITE16(pdev, XSTORM_COMMON_RTC_RESOLUTION_OFFSET, 1000 / l4_params->ticks_per_second , BAR_XSTRORM_INTMEM);
1016d14abf15SRobert Mustacchi }
1017d14abf15SRobert Mustacchi 
1018d14abf15SRobert Mustacchi /* init xstorm offload params private to TOE */
_lm_set_ofld_params_xstorm_toe(lm_device_t * pdev,l4_ofld_params_t * l4_params)1019d14abf15SRobert Mustacchi static void _lm_set_ofld_params_xstorm_toe(lm_device_t *pdev, l4_ofld_params_t *l4_params)
1020d14abf15SRobert Mustacchi {
1021d14abf15SRobert Mustacchi     u8_t func;
1022d14abf15SRobert Mustacchi 
1023d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
1024d14abf15SRobert Mustacchi 
1025d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_TOE_LLC_SNAP_ENABLED_SIZE != 1);
1026d14abf15SRobert Mustacchi     if(l4_params->flags & OFLD_PARAM_FLAG_SNAP_ENCAP) {
1027d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 1, BAR_XSTRORM_INTMEM);
1028d14abf15SRobert Mustacchi     } else {
1029d14abf15SRobert Mustacchi         LM_INTMEM_WRITE8(pdev, XSTORM_TOE_LLC_SNAP_ENABLED_OFFSET(func), 0, BAR_XSTRORM_INTMEM);
1030d14abf15SRobert Mustacchi     }
1031d14abf15SRobert Mustacchi }
1032d14abf15SRobert Mustacchi 
1033d14abf15SRobert Mustacchi /* init xstorm internal memory for toe
1034d14abf15SRobert Mustacchi  * assumption - strom's common intmem already initiated */
_lm_tcp_init_xstorm_intmem(lm_device_t * pdev)1035d14abf15SRobert Mustacchi static void _lm_tcp_init_xstorm_intmem(lm_device_t *pdev)
1036d14abf15SRobert Mustacchi {
1037d14abf15SRobert Mustacchi     _lm_set_ofld_params_xstorm_toe(pdev, &(pdev->ofld_info.l4_params));
1038d14abf15SRobert Mustacchi }
1039d14abf15SRobert Mustacchi 
1040d14abf15SRobert Mustacchi /* Desciption:
1041d14abf15SRobert Mustacchi  *  init chip internal memory and hw that is common for TOE, ISCSI and RDMA
1042d14abf15SRobert Mustacchi  * Assumptions:
1043d14abf15SRobert Mustacchi  *  - lm_init_params was already called
1044d14abf15SRobert Mustacchi  * Returns:
1045d14abf15SRobert Mustacchi  *  SUCCESS or any failure  */
lm_tcp_init_chip_common(lm_device_t * pdev)1046d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_chip_common(lm_device_t *pdev)
1047d14abf15SRobert Mustacchi {
1048d14abf15SRobert Mustacchi     l4_ofld_params_t l4_params;
1049d14abf15SRobert Mustacchi     u8_t func;
1050d14abf15SRobert Mustacchi 
1051d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_chip_common\n");
1052d14abf15SRobert Mustacchi     DbgBreakIf(!pdev);
1053d14abf15SRobert Mustacchi 
1054d14abf15SRobert Mustacchi     func = FUNC_ID(pdev);
1055d14abf15SRobert Mustacchi 
1056d14abf15SRobert Mustacchi     _lm_get_default_l4cli_params(pdev, &l4_params);
1057d14abf15SRobert Mustacchi 
1058d14abf15SRobert Mustacchi     pdev->ofld_info.l4_params = l4_params;
1059*48bbca81SDaniel Hoffman 
1060d14abf15SRobert Mustacchi     /* init common internal memory/hw for each storm
1061d14abf15SRobert Mustacchi      * (c+u storms do not have common offload params) */
1062d14abf15SRobert Mustacchi     _lm_set_ofld_params_xstorm_common(pdev, &l4_params);
1063d14abf15SRobert Mustacchi     _lm_set_ofld_params_tstorm_common(pdev, &l4_params);
1064d14abf15SRobert Mustacchi 
1065d14abf15SRobert Mustacchi 
1066d14abf15SRobert Mustacchi     /* init internal memory constatns (non-dependant on l4_params)*/
1067d14abf15SRobert Mustacchi 
1068d14abf15SRobert Mustacchi     /* enable delayed acks */
1069d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_SIZE != 1);
1070d14abf15SRobert Mustacchi     LM_INTMEM_WRITE8(pdev, XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1 /* always enabled */, BAR_XSTRORM_INTMEM);
1071d14abf15SRobert Mustacchi 
1072d14abf15SRobert Mustacchi     /* ip id (init value currently constant: 0x8000) */
1073d14abf15SRobert Mustacchi     DbgBreakIf (XSTORM_TCP_IPID_SIZE != 2);
1074d14abf15SRobert Mustacchi     LM_INTMEM_WRITE16(pdev, XSTORM_TCP_IPID_OFFSET(func), TOE_XSTORM_IP_ID_INIT_HI, BAR_XSTRORM_INTMEM);
1075d14abf15SRobert Mustacchi 
1076d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1077d14abf15SRobert Mustacchi }
1078d14abf15SRobert Mustacchi 
1079d14abf15SRobert Mustacchi /* Desciption:
1080d14abf15SRobert Mustacchi  *  init chip internal memory for L4
1081d14abf15SRobert Mustacchi  * Returns:
1082d14abf15SRobert Mustacchi  *  SUCCESS or any failure  */
lm_tcp_init_chip(lm_device_t * pdev)1083d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_chip(lm_device_t *pdev)
1084d14abf15SRobert Mustacchi {
1085d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_chip\n");
1086d14abf15SRobert Mustacchi 
1087d14abf15SRobert Mustacchi     /* GilR 4/9/2006 - TODO - Assaf - RSS indirection table default initialization, done in L2? */
1088d14abf15SRobert Mustacchi 
1089d14abf15SRobert Mustacchi     /* init XSTORM internal RAM */
1090d14abf15SRobert Mustacchi     _lm_tcp_init_xstorm_intmem(pdev);
1091d14abf15SRobert Mustacchi 
1092d14abf15SRobert Mustacchi     /* init CSTORM internal RAM */
1093d14abf15SRobert Mustacchi     _lm_tcp_init_cstorm_intmem(pdev);
1094d14abf15SRobert Mustacchi 
1095d14abf15SRobert Mustacchi     /* init TSTORM internal RAM */
1096d14abf15SRobert Mustacchi     _lm_tcp_init_tstorm_intmem(pdev);
1097d14abf15SRobert Mustacchi 
1098d14abf15SRobert Mustacchi     /* init USTORM internal RAM */
1099d14abf15SRobert Mustacchi     _lm_tcp_init_ustorm_intmem(pdev);
1100d14abf15SRobert Mustacchi 
1101d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1102d14abf15SRobert Mustacchi }
1103d14abf15SRobert Mustacchi 
1104d14abf15SRobert Mustacchi /* Desciption:
1105d14abf15SRobert Mustacchi  *  send TOE START ramrod wait for completion and return
1106d14abf15SRobert Mustacchi  * Assumptions:
1107d14abf15SRobert Mustacchi  *  - there is no pending slow path request for the leading connection (cid=0)
1108d14abf15SRobert Mustacchi  *  - interrupts are already enabled
1109d14abf15SRobert Mustacchi  * Returns:
1110d14abf15SRobert Mustacchi  *  SUCCESS or any failure  */
lm_tcp_start_chip(lm_device_t * pdev)1111d14abf15SRobert Mustacchi lm_status_t lm_tcp_start_chip(lm_device_t *pdev)
1112d14abf15SRobert Mustacchi {
1113d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
1114d14abf15SRobert Mustacchi     u32_t to_cnt = 100000; /* GilR 4/9/2006 - TBA - 'to_cnt' in lm_tcp_init_chip need to be removed? */
1115d14abf15SRobert Mustacchi     u64_t data;
1116d14abf15SRobert Mustacchi     struct toe_init_ramrod_data toe_init_data;
1117d14abf15SRobert Mustacchi 
1118d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_start_chip\n");
1119d14abf15SRobert Mustacchi 
1120d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
1121d14abf15SRobert Mustacchi 
1122d14abf15SRobert Mustacchi     /* send TOE INIT ramrod and wait for completion */
1123d14abf15SRobert Mustacchi     DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
1124d14abf15SRobert Mustacchi 
1125d14abf15SRobert Mustacchi     toe_init_data.rss_num = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
1126d14abf15SRobert Mustacchi     data = *((u64_t*)(&toe_init_data));
1127d14abf15SRobert Mustacchi     lm_command_post(pdev, LM_SW_LEADING_RSS_CID(pdev), RAMROD_OPCODE_TOE_INIT, CMD_PRIORITY_NORMAL, TOE_CONNECTION_TYPE, data);
1128d14abf15SRobert Mustacchi     while (toe_info->state != LM_TOE_STATE_NORMAL && to_cnt) {
1129d14abf15SRobert Mustacchi         mm_wait(pdev,100);
1130d14abf15SRobert Mustacchi         to_cnt--;
1131d14abf15SRobert Mustacchi     }
1132d14abf15SRobert Mustacchi     /* GilR 5/16/2006 - TODO - DbgBreakIf(toe_info->state != LM_TOE_STATE_NORMAL); commented out for windows user mode */
1133d14abf15SRobert Mustacchi     if(toe_info->state != LM_TOE_STATE_NORMAL) {
1134d14abf15SRobert Mustacchi #ifndef _VBD_CMD_
1135d14abf15SRobert Mustacchi         DbgMessage(pdev, FATAL, "TOE init ramrod did not complete\n");
1136d14abf15SRobert Mustacchi #else
1137d14abf15SRobert Mustacchi         toe_info->state = LM_TOE_STATE_NORMAL;
1138d14abf15SRobert Mustacchi         lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INIT, TOE_CONNECTION_TYPE, LM_SW_LEADING_RSS_CID(pdev));
1139d14abf15SRobert Mustacchi #endif
1140d14abf15SRobert Mustacchi 
1141d14abf15SRobert Mustacchi         #if defined(_VBD_)
1142d14abf15SRobert Mustacchi         DbgBreak();
1143d14abf15SRobert Mustacchi         #endif
1144d14abf15SRobert Mustacchi     }
1145d14abf15SRobert Mustacchi 
1146d14abf15SRobert Mustacchi     /* cid recycled cb registration  */
1147d14abf15SRobert Mustacchi     lm_cid_recycled_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_recycle_cid_cb);
1148d14abf15SRobert Mustacchi 
1149d14abf15SRobert Mustacchi     /* Sq-completion cb registration (sq that get completed internally in driver */
1150d14abf15SRobert Mustacchi     lm_sq_comp_cb_register(pdev, TOE_CONNECTION_TYPE, lm_tcp_comp_cb);
1151d14abf15SRobert Mustacchi 
1152d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1153d14abf15SRobert Mustacchi }
1154d14abf15SRobert Mustacchi 
1155d14abf15SRobert Mustacchi /* Desciption:
1156d14abf15SRobert Mustacchi  *  allocate and initiate l4 (lm driver and chip)
1157d14abf15SRobert Mustacchi  * Assumptions:
1158d14abf15SRobert Mustacchi  *  - lm_init_params was already called
1159d14abf15SRobert Mustacchi  *  - um GRQ pool is ready to supply buffers to lm (?)
1160d14abf15SRobert Mustacchi  *  - there is no pending slow path request for the leading connection (cid=0)
1161d14abf15SRobert Mustacchi  *  - interrupts are already enabled
1162d14abf15SRobert Mustacchi  * Returns:
1163d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init(lm_device_t * pdev)1164d14abf15SRobert Mustacchi lm_status_t lm_tcp_init(lm_device_t *pdev)
1165d14abf15SRobert Mustacchi {
1166d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
1167d14abf15SRobert Mustacchi     lm_status_t lm_status;
1168d14abf15SRobert Mustacchi 
1169d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init\n");
1170d14abf15SRobert Mustacchi     if (IS_VFDEV(pdev)) {
1171d14abf15SRobert Mustacchi         DbgMessage(pdev, FATAL, "###lm_tcp_init is not supported for VF\n");
1172d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS;
1173d14abf15SRobert Mustacchi     }
1174*48bbca81SDaniel Hoffman 
1175d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
1176d14abf15SRobert Mustacchi     mm_memset(toe_info, 0 , sizeof(lm_toe_info_t));
1177d14abf15SRobert Mustacchi     toe_info->pdev = pdev;
1178d14abf15SRobert Mustacchi 
1179d14abf15SRobert Mustacchi     /* allocate resources */
1180d14abf15SRobert Mustacchi     lm_status = lm_tcp_alloc_resc(pdev);
1181d14abf15SRobert Mustacchi     DbgBreakIf((lm_status!=LM_STATUS_SUCCESS) && DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
1182d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
1183d14abf15SRobert Mustacchi         return lm_status;
1184d14abf15SRobert Mustacchi     }
1185d14abf15SRobert Mustacchi 
1186d14abf15SRobert Mustacchi     /* initialize resources */
1187d14abf15SRobert Mustacchi     lm_status = lm_tcp_init_resc(pdev, TRUE);
1188d14abf15SRobert Mustacchi     DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1189d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
1190d14abf15SRobert Mustacchi         return lm_status;
1191d14abf15SRobert Mustacchi     }
1192d14abf15SRobert Mustacchi 
1193d14abf15SRobert Mustacchi     /* initialize chip resources */
1194d14abf15SRobert Mustacchi     lm_status = lm_tcp_init_chip(pdev);
1195d14abf15SRobert Mustacchi     DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1196d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
1197d14abf15SRobert Mustacchi         return lm_status;
1198d14abf15SRobert Mustacchi     }
1199d14abf15SRobert Mustacchi 
1200d14abf15SRobert Mustacchi     /* activate chip for tcp */
1201d14abf15SRobert Mustacchi     lm_status = lm_tcp_start_chip(pdev);
1202d14abf15SRobert Mustacchi     DbgBreakIf(lm_status!=LM_STATUS_SUCCESS);
1203d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
1204d14abf15SRobert Mustacchi         return lm_status;
1205d14abf15SRobert Mustacchi     }
1206d14abf15SRobert Mustacchi 
1207d14abf15SRobert Mustacchi     return lm_status;
1208d14abf15SRobert Mustacchi }
1209d14abf15SRobert Mustacchi 
1210d14abf15SRobert Mustacchi /* Desciption:
1211d14abf15SRobert Mustacchi  *  handle TOE init protocol ramrod completion */
lm_tcp_init_ramrod_comp(lm_device_t * pdev)1212d14abf15SRobert Mustacchi void lm_tcp_init_ramrod_comp(lm_device_t *pdev)
1213d14abf15SRobert Mustacchi {
1214d14abf15SRobert Mustacchi     lm_toe_info_t *toe_info;
1215d14abf15SRobert Mustacchi 
1216d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_ramrod_comp\n");
1217d14abf15SRobert Mustacchi     DbgBreakIf(!pdev);
1218d14abf15SRobert Mustacchi 
1219d14abf15SRobert Mustacchi     toe_info = &pdev->toe_info;
1220d14abf15SRobert Mustacchi     DbgBreakIf(toe_info->state != LM_TOE_STATE_INIT);
1221d14abf15SRobert Mustacchi     toe_info->state = LM_TOE_STATE_NORMAL;
1222d14abf15SRobert Mustacchi }
1223d14abf15SRobert Mustacchi 
1224d14abf15SRobert Mustacchi /* Desciption:
1225d14abf15SRobert Mustacchi  *  handle TOE RSS-update ramrod completion
1226d14abf15SRobert Mustacchi  * Assumptions:
1227d14abf15SRobert Mustacchi  * - called once for each RCQ
1228d14abf15SRobert Mustacchi  */
lm_tcp_rss_update_ramrod_comp(struct _lm_device_t * pdev,lm_tcp_rcq_t * rcq,u32_t cid,u32_t update_stats_type,u8_t update_suspend_rcq)1229d14abf15SRobert Mustacchi void lm_tcp_rss_update_ramrod_comp(
1230d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1231d14abf15SRobert Mustacchi     lm_tcp_rcq_t *rcq,
1232d14abf15SRobert Mustacchi     u32_t cid,
1233d14abf15SRobert Mustacchi     u32_t update_stats_type,
1234d14abf15SRobert Mustacchi     u8_t update_suspend_rcq)
1235d14abf15SRobert Mustacchi {
1236d14abf15SRobert Mustacchi 
1237d14abf15SRobert Mustacchi    /* decrement the completion count and check if we need to suspend processing */
1238d14abf15SRobert Mustacchi    DbgBreakIf(rcq->suspend_processing == TRUE);
1239d14abf15SRobert Mustacchi 
1240d14abf15SRobert Mustacchi    /* Update update statistics - These statistics indicate which FW flow was taken and also count the overall number of updates */
1241d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_ramrod_comp(): %d\n",update_stats_type);
1242d14abf15SRobert Mustacchi     switch (update_stats_type) {
1243d14abf15SRobert Mustacchi     case TOE_RSS_UPD_QUIET:
1244d14abf15SRobert Mustacchi         rcq->rss_update_stats_quiet++;
1245d14abf15SRobert Mustacchi         break;
1246d14abf15SRobert Mustacchi     case TOE_RSS_UPD_SLEEPING:
1247d14abf15SRobert Mustacchi         rcq->rss_update_stats_sleeping++;
1248d14abf15SRobert Mustacchi         break;
1249d14abf15SRobert Mustacchi     case TOE_RSS_UPD_DELAYED:
1250d14abf15SRobert Mustacchi         rcq->rss_update_stats_delayed++;
1251d14abf15SRobert Mustacchi         break;
1252d14abf15SRobert Mustacchi     default:
1253d14abf15SRobert Mustacchi         DbgBreak();
1254d14abf15SRobert Mustacchi         break;
1255d14abf15SRobert Mustacchi     }
1256d14abf15SRobert Mustacchi 
1257d14abf15SRobert Mustacchi     /* This is a hack due to the fact the FW has a hard time providing the cid on which the ramrod was sent on */
1258d14abf15SRobert Mustacchi     /* I know that I sent the ramrod on the leading connection so I use it here instead of the cid on the cqe (update cid) */
1259d14abf15SRobert Mustacchi     /* If the driver ever changes the cid on which the rmarod is snt on this line will have to be changed as well - UGLY, UGLY */
1260d14abf15SRobert Mustacchi     rcq->update_cid = LM_SW_LEADING_RSS_CID(pdev);
1261d14abf15SRobert Mustacchi 
1262d14abf15SRobert Mustacchi     /* This is what should have been if the FW alwys put the ramrod cid on these completions
1263d14abf15SRobert Mustacchi     rcq->update_cid = cid;
1264d14abf15SRobert Mustacchi     */
1265d14abf15SRobert Mustacchi     if (update_suspend_rcq) {
1266d14abf15SRobert Mustacchi         lm_tcp_rss_update_suspend_rcq(pdev, rcq);
1267d14abf15SRobert Mustacchi     } else {
1268d14abf15SRobert Mustacchi         rcq->rss_update_processing_delayed++;
1269d14abf15SRobert Mustacchi     }
1270d14abf15SRobert Mustacchi }
1271d14abf15SRobert Mustacchi 
1272d14abf15SRobert Mustacchi /* Desciption:
1273d14abf15SRobert Mustacchi  *  Checks whether the rcq processing should be suspended as a result of an rss update
1274d14abf15SRobert Mustacchi  */
lm_tcp_rss_update_suspend_rcq(IN struct _lm_device_t * pdev,IN lm_tcp_rcq_t * rcq)1275d14abf15SRobert Mustacchi void lm_tcp_rss_update_suspend_rcq(
1276d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
1277d14abf15SRobert Mustacchi     IN    lm_tcp_rcq_t        * rcq)
1278d14abf15SRobert Mustacchi {
1279d14abf15SRobert Mustacchi     void * cookie = NULL;
1280d14abf15SRobert Mustacchi     /*  This function is called once when an update completion is encountered and the rcq porcessing is not suspended yet.
1281d14abf15SRobert Mustacchi      *  At all other times it is called only if the rcq processing is already suspended. */
1282d14abf15SRobert Mustacchi     if (rcq->suspend_processing == FALSE)
1283d14abf15SRobert Mustacchi     {
1284d14abf15SRobert Mustacchi         /* decrment the expected completion counter */
1285d14abf15SRobert Mustacchi         mm_atomic_dec(&pdev->params.update_comp_cnt);
1286d14abf15SRobert Mustacchi         /* Toe specific... to determine who completes the ramrod. */
1287d14abf15SRobert Mustacchi         if (mm_atomic_dec(&pdev->params.update_toe_comp_cnt) == 0)
1288d14abf15SRobert Mustacchi         {
1289d14abf15SRobert Mustacchi             /* Everyone is done. Time to return credit to the slowpath ring... */
1290d14abf15SRobert Mustacchi             lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RSS_UPDATE,
1291d14abf15SRobert Mustacchi                            TOE_CONNECTION_TYPE, LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)));
1292d14abf15SRobert Mustacchi         }
1293d14abf15SRobert Mustacchi     }
1294d14abf15SRobert Mustacchi     rcq->suspend_processing = pdev->params.update_toe_comp_cnt ? TRUE : FALSE;
1295d14abf15SRobert Mustacchi 
1296d14abf15SRobert Mustacchi     if (rcq->suspend_processing == FALSE)
1297d14abf15SRobert Mustacchi     {
1298d14abf15SRobert Mustacchi         /* processing was suspended and can now be resumed, try to complete the update ramrod */
1299d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "lm_tcp_rss_update_suspend_rcq(): calling lm_eth_update_ramrod_comp\n");
1300d14abf15SRobert Mustacchi         if (mm_atomic_dec(&pdev->params.update_suspend_cnt) == 0)
1301d14abf15SRobert Mustacchi         {
1302d14abf15SRobert Mustacchi             if (pdev->slowpath_info.set_rss_cookie)
1303d14abf15SRobert Mustacchi             {
1304d14abf15SRobert Mustacchi                 cookie = (void *)pdev->slowpath_info.set_rss_cookie;
1305d14abf15SRobert Mustacchi                 pdev->slowpath_info.set_rss_cookie = NULL;
1306d14abf15SRobert Mustacchi                 mm_set_done(pdev, rcq->update_cid, cookie);
1307d14abf15SRobert Mustacchi             }
1308d14abf15SRobert Mustacchi         }
1309d14abf15SRobert Mustacchi     }
1310d14abf15SRobert Mustacchi }
1311d14abf15SRobert Mustacchi 
1312d14abf15SRobert Mustacchi 
1313d14abf15SRobert Mustacchi 
1314d14abf15SRobert Mustacchi /* Desciption:
1315d14abf15SRobert Mustacchi  *  initiate a caller allocated lm neighbor state
1316d14abf15SRobert Mustacchi  * Assumptions:
1317d14abf15SRobert Mustacchi  *  - caller already zeroed given neigh state
1318d14abf15SRobert Mustacchi  * Returns:
1319d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_neigh_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_neigh_state_t * neigh,l4_neigh_const_state_t * neigh_const,l4_neigh_cached_state_t * neigh_cached,l4_neigh_delegated_state_t * neigh_delegated)1320d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_neigh_state(
1321d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1322d14abf15SRobert Mustacchi     lm_state_block_t *state_blk,
1323d14abf15SRobert Mustacchi     lm_neigh_state_t *neigh,
1324d14abf15SRobert Mustacchi     l4_neigh_const_state_t *neigh_const,
1325d14abf15SRobert Mustacchi     l4_neigh_cached_state_t *neigh_cached,
1326d14abf15SRobert Mustacchi     l4_neigh_delegated_state_t *neigh_delegated)
1327d14abf15SRobert Mustacchi {
1328d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_neigh_state\n");
1329d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && state_blk && neigh && neigh_const && neigh_cached && neigh_delegated));
1330d14abf15SRobert Mustacchi 
1331d14abf15SRobert Mustacchi     neigh->hdr.state_blk    = state_blk;
1332d14abf15SRobert Mustacchi     neigh->hdr.state_id     = STATE_ID_NEIGH;
1333d14abf15SRobert Mustacchi     neigh->hdr.status       = STATE_STATUS_NORMAL;
1334d14abf15SRobert Mustacchi     d_list_push_tail(&state_blk->neigh_list, &neigh->hdr.link);
1335d14abf15SRobert Mustacchi     neigh->num_dependents   = 0;
1336d14abf15SRobert Mustacchi 
1337d14abf15SRobert Mustacchi     mm_memcpy(&neigh->neigh_cached, neigh_cached, sizeof(neigh->neigh_cached));
1338d14abf15SRobert Mustacchi     mm_memcpy(&neigh->neigh_const, neigh_const, sizeof(neigh->neigh_const));
1339d14abf15SRobert Mustacchi     mm_memcpy(&neigh->neigh_delegated, neigh_delegated, sizeof(neigh->neigh_delegated));
1340d14abf15SRobert Mustacchi 
1341d14abf15SRobert Mustacchi     neigh->host_reachability_time   = 0; /* SHOULD BE: (mm_get_current_time() - neigh_cached->host_reachability_delta)   */
1342d14abf15SRobert Mustacchi     neigh->nic_reachability_time    = 0; /* SHOULD BE: (mm_get_current_time() - neigh_delegated->nic_reachability_delta) */
1343d14abf15SRobert Mustacchi     neigh->stale                    = 0;
1344d14abf15SRobert Mustacchi 
1345d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1346d14abf15SRobert Mustacchi }
1347d14abf15SRobert Mustacchi 
1348d14abf15SRobert Mustacchi /* Desciption:
1349d14abf15SRobert Mustacchi  *  initiate a caller allocated lm path state
1350d14abf15SRobert Mustacchi  * Assumptions:
1351d14abf15SRobert Mustacchi  *  - caller already zeroed given path state
1352d14abf15SRobert Mustacchi  * Returns:
1353d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_path_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_path_state_t * path,lm_neigh_state_t * neigh,l4_path_const_state_t * path_const,l4_path_cached_state_t * path_cached,l4_path_delegated_state_t * path_delegated)1354d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_path_state(
1355d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1356d14abf15SRobert Mustacchi     lm_state_block_t *state_blk,
1357d14abf15SRobert Mustacchi     lm_path_state_t *path,
1358d14abf15SRobert Mustacchi     lm_neigh_state_t *neigh,
1359d14abf15SRobert Mustacchi     l4_path_const_state_t *path_const,
1360d14abf15SRobert Mustacchi     l4_path_cached_state_t *path_cached,
1361d14abf15SRobert Mustacchi     l4_path_delegated_state_t *path_delegated)
1362d14abf15SRobert Mustacchi {
1363d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_path_state\n");
1364d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && state_blk && path && neigh && path_const && path_cached && path_delegated));
1365d14abf15SRobert Mustacchi     DbgBreakIf(neigh->hdr.state_id != STATE_ID_NEIGH || neigh->hdr.status != STATE_STATUS_NORMAL);
1366d14abf15SRobert Mustacchi 
1367d14abf15SRobert Mustacchi     path->hdr.state_blk     = state_blk;
1368d14abf15SRobert Mustacchi     path->hdr.state_id      = STATE_ID_PATH;
1369d14abf15SRobert Mustacchi     path->hdr.status        = STATE_STATUS_NORMAL;
1370d14abf15SRobert Mustacchi     d_list_push_tail(&state_blk->path_list, &path->hdr.link);
1371d14abf15SRobert Mustacchi     path->neigh             = neigh;
1372d14abf15SRobert Mustacchi     neigh->num_dependents++;
1373d14abf15SRobert Mustacchi     path->num_dependents    = 0;
1374d14abf15SRobert Mustacchi 
1375d14abf15SRobert Mustacchi     mm_memcpy(&path->path_cached, path_cached, sizeof(path->path_cached));
1376d14abf15SRobert Mustacchi     mm_memcpy(&path->path_const, path_const, sizeof(path->path_const));
1377d14abf15SRobert Mustacchi     mm_memcpy(&path->path_delegated, path_delegated, sizeof(path->path_delegated));
1378d14abf15SRobert Mustacchi 
1379d14abf15SRobert Mustacchi    return LM_STATUS_SUCCESS;
1380d14abf15SRobert Mustacchi }
1381d14abf15SRobert Mustacchi 
1382d14abf15SRobert Mustacchi /* Desciption:
1383d14abf15SRobert Mustacchi  *  initiate a caller allocated lm tcp state
1384d14abf15SRobert Mustacchi  * Assumptions:
1385d14abf15SRobert Mustacchi  *  - caller already zeroed given tcp state
1386d14abf15SRobert Mustacchi  *  - caller already set the tx/rx_con pointers of the given
1387d14abf15SRobert Mustacchi  *    tcp state to pre-allocated tx/rx cons
1388d14abf15SRobert Mustacchi  * Returns:
1389d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_tcp_state(struct _lm_device_t * pdev,lm_state_block_t * state_blk,lm_tcp_state_t * tcp,lm_path_state_t * path,l4_tcp_const_state_t * tcp_const,l4_tcp_cached_state_t * tcp_cached,l4_tcp_delegated_state_t * tcp_delegated,u32_t tcp_cid_addr)1390d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_tcp_state(
1391d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1392d14abf15SRobert Mustacchi     lm_state_block_t *state_blk,
1393d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
1394d14abf15SRobert Mustacchi     lm_path_state_t *path,
1395d14abf15SRobert Mustacchi     l4_tcp_const_state_t *tcp_const,
1396d14abf15SRobert Mustacchi     l4_tcp_cached_state_t *tcp_cached,
1397d14abf15SRobert Mustacchi     l4_tcp_delegated_state_t *tcp_delegated,
1398d14abf15SRobert Mustacchi     u32_t tcp_cid_addr)
1399d14abf15SRobert Mustacchi {
1400d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_state, ptr=%p, src_port=%d\n", tcp, tcp_const->src_port);
1401d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && state_blk && tcp && path && tcp_const && tcp_cached && tcp_delegated));
1402d14abf15SRobert Mustacchi     DbgBreakIf(path->hdr.state_id != STATE_ID_PATH || path->hdr.status != STATE_STATUS_NORMAL);
1403d14abf15SRobert Mustacchi 
1404d14abf15SRobert Mustacchi     /* We need to determine the ULP_TYPE and get ourselves a cid if one doesn't already exist */
1405d14abf15SRobert Mustacchi     if (!tcp_cid_addr)
1406d14abf15SRobert Mustacchi     {
1407d14abf15SRobert Mustacchi         tcp->ulp_type = TOE_CONNECTION_TYPE;
1408d14abf15SRobert Mustacchi     }
1409d14abf15SRobert Mustacchi     else
1410d14abf15SRobert Mustacchi     {
1411d14abf15SRobert Mustacchi         tcp->ulp_type = lm_map_cid_to_proto(pdev, tcp_cid_addr);
1412d14abf15SRobert Mustacchi         tcp->cid = tcp_cid_addr;
1413d14abf15SRobert Mustacchi         lm_set_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp, tcp_cid_addr);
1414d14abf15SRobert Mustacchi     }
1415d14abf15SRobert Mustacchi 
1416d14abf15SRobert Mustacchi     tcp->hdr.state_blk     = state_blk;
1417d14abf15SRobert Mustacchi     tcp->hdr.state_id      = STATE_ID_TCP;
1418d14abf15SRobert Mustacchi     tcp->hdr.status        = STATE_STATUS_INIT;
1419d14abf15SRobert Mustacchi     d_list_push_tail(&state_blk->tcp_list, &tcp->hdr.link);
1420d14abf15SRobert Mustacchi     tcp->path = path;
1421d14abf15SRobert Mustacchi     path->num_dependents++;
1422d14abf15SRobert Mustacchi 
1423d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE)
1424d14abf15SRobert Mustacchi     {
1425d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_ofld++;
1426d14abf15SRobert Mustacchi     }
1427d14abf15SRobert Mustacchi     else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
1428d14abf15SRobert Mustacchi     {
1429d14abf15SRobert Mustacchi         pdev->iscsi_info.run_time.stats.total_ofld++;
1430d14abf15SRobert Mustacchi     }
1431d14abf15SRobert Mustacchi 
1432d14abf15SRobert Mustacchi     mm_memcpy(&tcp->tcp_cached, tcp_cached, sizeof(tcp->tcp_cached));
1433d14abf15SRobert Mustacchi     mm_memcpy(&tcp->tcp_const, tcp_const, sizeof(tcp->tcp_const));
1434d14abf15SRobert Mustacchi     mm_memcpy(&tcp->tcp_delegated, tcp_delegated, sizeof(tcp->tcp_delegated));
1435d14abf15SRobert Mustacchi 
1436d14abf15SRobert Mustacchi     /* the rest of the tcp state's fields that require initialization value other than 0,
1437d14abf15SRobert Mustacchi      * will be initialized later (when lm_tcp_init_tx_con/lm_tcp_init_rx_con/lm_tcp_init_tcp_context are called) */
1438d14abf15SRobert Mustacchi 
1439d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1440d14abf15SRobert Mustacchi }
1441d14abf15SRobert Mustacchi 
1442d14abf15SRobert Mustacchi /* calc connection's mss according to path_mtu and remote MSS */
_lm_tcp_calc_mss(u32_t path_mtu,u16_t remote_mss,u8_t is_ipv6,u8_t ts_enabled,u8_t llc_snap_enabled,u8_t vlan_enabled)1443d14abf15SRobert Mustacchi static u32_t _lm_tcp_calc_mss(u32_t path_mtu, u16_t remote_mss, u8_t is_ipv6, u8_t ts_enabled,
1444d14abf15SRobert Mustacchi                               u8_t llc_snap_enabled, u8_t vlan_enabled)
1445d14abf15SRobert Mustacchi {
1446d14abf15SRobert Mustacchi #define MIN_MTU         576 /* rfc 793 */
1447d14abf15SRobert Mustacchi #define IPV4_HDR_LEN    20
1448d14abf15SRobert Mustacchi #define IPV6_HDR_LEN    40
1449d14abf15SRobert Mustacchi #define TCP_HDR_LEN     20
1450d14abf15SRobert Mustacchi #define TCP_OPTION_LEN  12
1451d14abf15SRobert Mustacchi #define LLC_SNAP_LEN     8
1452d14abf15SRobert Mustacchi #define VLAN_LEN         4
1453d14abf15SRobert Mustacchi 
1454d14abf15SRobert Mustacchi     u32_t mss  = 0;
1455d14abf15SRobert Mustacchi     u32_t hdrs = TCP_HDR_LEN;
1456d14abf15SRobert Mustacchi 
1457d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(vlan_enabled);
1458d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(llc_snap_enabled);
1459d14abf15SRobert Mustacchi 
1460d14abf15SRobert Mustacchi     if(is_ipv6) {
1461d14abf15SRobert Mustacchi         hdrs += IPV6_HDR_LEN;
1462d14abf15SRobert Mustacchi     } else {
1463d14abf15SRobert Mustacchi         hdrs += IPV4_HDR_LEN;
1464d14abf15SRobert Mustacchi     }
1465d14abf15SRobert Mustacchi #ifdef LLC_SNAP_HEADER_ROOMS_WITH_PAYLOAD
1466d14abf15SRobert Mustacchi /*
1467d14abf15SRobert Mustacchi     LLC_SNAP_HEADER_ROOMS_WITH_PAYLOAD never was defined. Nobody remembers when LLC/SNAP protocol was tested but
1468d14abf15SRobert Mustacchi     in any case don't use payload to room LLC/SNAP header
1469d14abf15SRobert Mustacchi */
1470d14abf15SRobert Mustacchi     if (llc_snap_enabled) {
1471d14abf15SRobert Mustacchi         hdrs += LLC_SNAP_LEN;
1472d14abf15SRobert Mustacchi     }
1473d14abf15SRobert Mustacchi #endif
1474d14abf15SRobert Mustacchi #ifdef VLAN_HEADER_ROOMS_WITH_PAYLOAD
1475d14abf15SRobert Mustacchi /*
1476d14abf15SRobert Mustacchi     VLAN_HEADER_ROOMS_WITH_PAYLOAD never was defined and below strings is reminder that once there was problem of
1477d14abf15SRobert Mustacchi     decreasing (-4) data payload size because of VLAN header rooming with payload CQ39709
1478d14abf15SRobert Mustacchi */
1479d14abf15SRobert Mustacchi     if (vlan_enabled) {
1480d14abf15SRobert Mustacchi         hdrs += VLAN_LEN;
1481d14abf15SRobert Mustacchi     }
1482d14abf15SRobert Mustacchi #endif
1483d14abf15SRobert Mustacchi     DbgBreakIf(path_mtu < MIN_MTU);
1484d14abf15SRobert Mustacchi     mss = path_mtu - hdrs;
1485d14abf15SRobert Mustacchi 
1486d14abf15SRobert Mustacchi     if(mss > remote_mss) {
1487d14abf15SRobert Mustacchi         mss = remote_mss;
1488d14abf15SRobert Mustacchi     }
1489d14abf15SRobert Mustacchi     if(ts_enabled) {
1490d14abf15SRobert Mustacchi         mss -= TCP_OPTION_LEN;
1491d14abf15SRobert Mustacchi     }
1492d14abf15SRobert Mustacchi     if (!mss) {
1493d14abf15SRobert Mustacchi         DbgBreakIf(!mss);
1494d14abf15SRobert Mustacchi         mss = 1; /*mss may be used as divider, so let's prevent division by zero*/
1495d14abf15SRobert Mustacchi     }
1496d14abf15SRobert Mustacchi     return mss;
1497d14abf15SRobert Mustacchi }
1498d14abf15SRobert Mustacchi 
1499d14abf15SRobert Mustacchi /** Description
1500d14abf15SRobert Mustacchi  *  calculate the fragment count for a given initial receive window and mss
1501d14abf15SRobert Mustacchi  *  The fragment count is based on the maximum size we will need to do for a single
1502d14abf15SRobert Mustacchi  *  indication
1503d14abf15SRobert Mustacchi  */
_lm_tcp_calc_frag_cnt(lm_device_t * pdev,u32_t initial_rcv_wnd,u32_t mss)1504d14abf15SRobert Mustacchi static u32_t _lm_tcp_calc_frag_cnt(lm_device_t * pdev, u32_t initial_rcv_wnd, u32_t mss)
1505d14abf15SRobert Mustacchi {
1506d14abf15SRobert Mustacchi     u32_t frag_cnt;
1507d14abf15SRobert Mustacchi 
1508d14abf15SRobert Mustacchi     frag_cnt = initial_rcv_wnd / mss;
1509d14abf15SRobert Mustacchi     if (frag_cnt < (0x10000 / mss)) {
1510d14abf15SRobert Mustacchi         frag_cnt = 0x10000 / mss;
1511d14abf15SRobert Mustacchi     }
1512d14abf15SRobert Mustacchi 
1513d14abf15SRobert Mustacchi     if ((pdev->params.l4_max_rcv_wnd_size > 0x10000) && (frag_cnt > (pdev->params.l4_max_rcv_wnd_size / mss))) {
1514d14abf15SRobert Mustacchi         frag_cnt = pdev->params.l4_max_rcv_wnd_size / mss;
1515d14abf15SRobert Mustacchi     }
1516d14abf15SRobert Mustacchi     frag_cnt = frag_cnt * 2 + 1;
1517d14abf15SRobert Mustacchi 
1518d14abf15SRobert Mustacchi     if (pdev->params.l4_max_gen_buf_cnt && (frag_cnt > pdev->params.l4_max_gen_buf_cnt)) {
1519d14abf15SRobert Mustacchi         frag_cnt = pdev->params.l4_max_gen_buf_cnt;
1520d14abf15SRobert Mustacchi     }
1521d14abf15SRobert Mustacchi     return frag_cnt;
1522d14abf15SRobert Mustacchi }
1523d14abf15SRobert Mustacchi 
lm_tcp_calc_frag_cnt(lm_device_t * pdev,lm_tcp_state_t * tcp)1524d14abf15SRobert Mustacchi u32_t lm_tcp_calc_frag_cnt(
1525d14abf15SRobert Mustacchi         lm_device_t * pdev,
1526d14abf15SRobert Mustacchi         lm_tcp_state_t * tcp
1527d14abf15SRobert Mustacchi     )
1528d14abf15SRobert Mustacchi {
1529d14abf15SRobert Mustacchi     u32_t mss, frag_cnt;
1530d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
1531d14abf15SRobert Mustacchi     mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1532d14abf15SRobert Mustacchi                            tcp->tcp_const.remote_mss,
1533d14abf15SRobert Mustacchi                            (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1534d14abf15SRobert Mustacchi                            tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1535d14abf15SRobert Mustacchi                            FALSE,
1536d14abf15SRobert Mustacchi                            tcp->path->neigh->neigh_const.vlan_tag != 0);
1537d14abf15SRobert Mustacchi 
1538d14abf15SRobert Mustacchi     frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
1539d14abf15SRobert Mustacchi 
1540d14abf15SRobert Mustacchi     return frag_cnt;
1541d14abf15SRobert Mustacchi }
1542d14abf15SRobert Mustacchi 
1543d14abf15SRobert Mustacchi 
1544d14abf15SRobert Mustacchi 
_lm_tcp_init_qe_buffer(struct _lm_device_t * pdev,lm_tcp_qe_buffer_t * qe_buffer,u8_t * mem_virt,u32_t cnt,u8_t cqe_size)1545d14abf15SRobert Mustacchi static void _lm_tcp_init_qe_buffer(
1546d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
1547d14abf15SRobert Mustacchi     lm_tcp_qe_buffer_t  * qe_buffer,
1548d14abf15SRobert Mustacchi     u8_t                * mem_virt,
1549d14abf15SRobert Mustacchi     u32_t                 cnt,
1550d14abf15SRobert Mustacchi     u8_t                  cqe_size)
1551d14abf15SRobert Mustacchi {
1552d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
1553d14abf15SRobert Mustacchi 
1554d14abf15SRobert Mustacchi     qe_buffer->left    = cnt;
1555d14abf15SRobert Mustacchi     qe_buffer->first   = (char *)mem_virt;
1556d14abf15SRobert Mustacchi     qe_buffer->head    = qe_buffer->first;
1557d14abf15SRobert Mustacchi     qe_buffer->tail    = qe_buffer->first;
1558d14abf15SRobert Mustacchi     qe_buffer->last    = qe_buffer->first;
1559d14abf15SRobert Mustacchi     qe_buffer->last   += (qe_buffer->left-1)*cqe_size;
1560d14abf15SRobert Mustacchi     qe_buffer->qe_size = cqe_size;
1561d14abf15SRobert Mustacchi }
1562d14abf15SRobert Mustacchi 
1563d14abf15SRobert Mustacchi /** Description
1564d14abf15SRobert Mustacchi  *  function calculates the amount of virtual memory required for the RX connection
1565d14abf15SRobert Mustacchi  * Return
1566d14abf15SRobert Mustacchi  *  amount of virtual memory required
1567d14abf15SRobert Mustacchi  */
lm_tcp_rx_con_get_virt_size(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1568d14abf15SRobert Mustacchi u32_t lm_tcp_rx_con_get_virt_size(struct _lm_device_t * pdev, lm_tcp_state_t * tcp)
1569d14abf15SRobert Mustacchi {
1570d14abf15SRobert Mustacchi     u32_t frag_cnt;
1571d14abf15SRobert Mustacchi     u32_t mem_size;
1572d14abf15SRobert Mustacchi     u32_t mss;
1573d14abf15SRobert Mustacchi 
1574d14abf15SRobert Mustacchi     /* The calculation for frag_cnt is based on the calculation from Teton's init_rx_tcp_resc()
1575d14abf15SRobert Mustacchi      * also the assertion is taken from Teton */
1576d14abf15SRobert Mustacchi     DbgBreakIf(tcp->tcp_cached.initial_rcv_wnd == 0);
1577d14abf15SRobert Mustacchi     /* the rx_con may not be initialized at this state, therefore we can't rely on the mss being initialized. */
1578d14abf15SRobert Mustacchi     mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1579d14abf15SRobert Mustacchi                            tcp->tcp_const.remote_mss,
1580d14abf15SRobert Mustacchi                            (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1581d14abf15SRobert Mustacchi                            tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1582d14abf15SRobert Mustacchi                            pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
1583d14abf15SRobert Mustacchi                            tcp->path->neigh->neigh_const.vlan_tag  != 0);
1584d14abf15SRobert Mustacchi 
1585d14abf15SRobert Mustacchi     frag_cnt = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, mss);
1586d14abf15SRobert Mustacchi 
1587d14abf15SRobert Mustacchi 
1588d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx, "Calc #frags for rx-con initial_rcv_wnd: %d frag_cnt: %d\n", tcp->tcp_cached.initial_rcv_wnd, frag_cnt);
1589d14abf15SRobert Mustacchi 
1590d14abf15SRobert Mustacchi     mem_size = sizeof(lm_frag_list_t) + (frag_cnt - 1)*sizeof(lm_frag_t);
1591d14abf15SRobert Mustacchi 
1592d14abf15SRobert Mustacchi     return mem_size;
1593d14abf15SRobert Mustacchi }
1594d14abf15SRobert Mustacchi 
lm_tcp_init_tcp_sp_data_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1595d14abf15SRobert Mustacchi void lm_tcp_init_tcp_sp_data_mem(
1596d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1597d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp
1598d14abf15SRobert Mustacchi     )
1599d14abf15SRobert Mustacchi {
1600d14abf15SRobert Mustacchi     /* slow-path physical memory */
1601d14abf15SRobert Mustacchi     /* allocation of physical area for sp request */
1602d14abf15SRobert Mustacchi     lm_sp_req_manager_t *sp_req_mgr = NULL;
1603d14abf15SRobert Mustacchi 
1604d14abf15SRobert Mustacchi     sp_req_mgr = lm_cid_sp_req_mgr(pdev, tcp->cid);
1605d14abf15SRobert Mustacchi     if CHK_NULL(sp_req_mgr)
1606d14abf15SRobert Mustacchi     {
1607d14abf15SRobert Mustacchi         DbgBreakIf(!sp_req_mgr);
1608d14abf15SRobert Mustacchi         return;
1609d14abf15SRobert Mustacchi     }
1610d14abf15SRobert Mustacchi     DbgBreakIf(sp_req_mgr->sp_data_phys_addr.as_u32.low & CACHE_LINE_SIZE_MASK);
1611d14abf15SRobert Mustacchi     tcp->sp_req_data.phys_addr = sp_req_mgr->sp_data_phys_addr;
1612d14abf15SRobert Mustacchi     tcp->sp_req_data.virt_addr = sp_req_mgr->sp_data_virt_addr;
1613d14abf15SRobert Mustacchi }
1614d14abf15SRobert Mustacchi 
1615d14abf15SRobert Mustacchi 
lm_tcp_init_tcp_phys_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_phy_mem_block_t * phy_mblk)1616d14abf15SRobert Mustacchi void lm_tcp_init_tcp_phys_mem(
1617d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1618d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
1619d14abf15SRobert Mustacchi     lm_tcp_phy_mem_block_t * phy_mblk)
1620d14abf15SRobert Mustacchi {
1621d14abf15SRobert Mustacchi     lm_tcp_con_t * con;
1622d14abf15SRobert Mustacchi     u32_t mem_size;
1623d14abf15SRobert Mustacchi     u16_t page_cnt,page_idx;
1624d14abf15SRobert Mustacchi     u32_t idx = 0;
1625d14abf15SRobert Mustacchi     u8_t  bd_size;
1626d14abf15SRobert Mustacchi     u8_t  block_idx;
1627d14abf15SRobert Mustacchi 
1628d14abf15SRobert Mustacchi     #if (LM_PAGE_SIZE != 4096)
1629d14abf15SRobert Mustacchi     #error (LM_PAGE_SIZE != 4096) /* currently FW assumes a tx chain page is 4KB */
1630d14abf15SRobert Mustacchi     #endif
1631d14abf15SRobert Mustacchi 
1632d14abf15SRobert Mustacchi     /* Init physical memory */
1633d14abf15SRobert Mustacchi     /* bd-chains */
1634d14abf15SRobert Mustacchi     con = tcp->tx_con;
1635d14abf15SRobert Mustacchi     page_cnt = (u16_t)pdev->params.l4_tx_chain_page_cnt;
1636d14abf15SRobert Mustacchi     bd_size = sizeof(struct toe_tx_bd);
1637d14abf15SRobert Mustacchi     block_idx = 0;
1638d14abf15SRobert Mustacchi     for (idx = 0 ; idx < 2; idx++) {
1639d14abf15SRobert Mustacchi         mem_size = LM_PAGE_SIZE;
1640d14abf15SRobert Mustacchi         for (page_idx = 0; page_idx < page_cnt; page_idx++) {
1641d14abf15SRobert Mustacchi             if (phy_mblk[block_idx].left < mem_size) {
1642d14abf15SRobert Mustacchi                 block_idx++;
1643d14abf15SRobert Mustacchi                 DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1644d14abf15SRobert Mustacchi             }
1645d14abf15SRobert Mustacchi             DbgBreakIf(phy_mblk[block_idx].left < mem_size);
1646d14abf15SRobert Mustacchi             lm_bd_chain_add_page(pdev,&con->bd_chain,phy_mblk[block_idx].free, phy_mblk[block_idx].free_phy, bd_size, TRUE);
1647d14abf15SRobert Mustacchi             phy_mblk[block_idx].free += mem_size;
1648d14abf15SRobert Mustacchi             phy_mblk[block_idx].left -= mem_size;
1649d14abf15SRobert Mustacchi             LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1650d14abf15SRobert Mustacchi         }
1651d14abf15SRobert Mustacchi         /* rx-con */
1652d14abf15SRobert Mustacchi         con = tcp->rx_con;
1653d14abf15SRobert Mustacchi         page_cnt = (u16_t)pdev->params.l4_rx_chain_page_cnt;
1654d14abf15SRobert Mustacchi         bd_size = sizeof(struct toe_rx_bd);
1655d14abf15SRobert Mustacchi     }
1656d14abf15SRobert Mustacchi 
1657d14abf15SRobert Mustacchi     /* slow-path physical memory */
1658d14abf15SRobert Mustacchi     /* allocation of physical area for sp request */
1659d14abf15SRobert Mustacchi     mem_size = TOE_SP_PHYS_DATA_SIZE;
1660d14abf15SRobert Mustacchi 
1661d14abf15SRobert Mustacchi     if (phy_mblk[block_idx].left < mem_size) {
1662d14abf15SRobert Mustacchi         block_idx++;
1663d14abf15SRobert Mustacchi         DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1664d14abf15SRobert Mustacchi     }
1665d14abf15SRobert Mustacchi     DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1666d14abf15SRobert Mustacchi     DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1667d14abf15SRobert Mustacchi     tcp->sp_req_data.phys_addr = phy_mblk[block_idx].free_phy;
1668d14abf15SRobert Mustacchi     tcp->sp_req_data.virt_addr = (lm_tcp_slow_path_phys_data_t *)phy_mblk[block_idx].free;
1669d14abf15SRobert Mustacchi     mm_memset(tcp->sp_req_data.virt_addr, 0, mem_size);
1670d14abf15SRobert Mustacchi     phy_mblk[block_idx].free += mem_size;
1671d14abf15SRobert Mustacchi     phy_mblk[block_idx].left -= mem_size;
1672d14abf15SRobert Mustacchi     LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1673d14abf15SRobert Mustacchi 
1674d14abf15SRobert Mustacchi     /* doorbell data */
1675d14abf15SRobert Mustacchi     /* init tx part */
1676d14abf15SRobert Mustacchi     mem_size = TOE_DB_TX_DATA_SIZE;
1677d14abf15SRobert Mustacchi     if (phy_mblk[block_idx].left < mem_size) {
1678d14abf15SRobert Mustacchi         block_idx++;
1679d14abf15SRobert Mustacchi         DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1680d14abf15SRobert Mustacchi     }
1681d14abf15SRobert Mustacchi     DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1682d14abf15SRobert Mustacchi     DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1683d14abf15SRobert Mustacchi     tcp->tx_con->phys_db_data = phy_mblk[block_idx].free_phy;
1684d14abf15SRobert Mustacchi     tcp->tx_con->db_data.tx = (volatile struct toe_tx_db_data *)phy_mblk[block_idx].free;
1685d14abf15SRobert Mustacchi     tcp->tx_con->db_data.tx->flags = 0;
1686d14abf15SRobert Mustacchi     tcp->tx_con->db_data.tx->bds_prod = 0;
1687d14abf15SRobert Mustacchi     /* init tx db data to snd.una (+ sizeof sent unacked data that will
1688d14abf15SRobert Mustacchi      * be initiated when sent unacked data is posted): */
1689d14abf15SRobert Mustacchi     tcp->tx_con->db_data.tx->bytes_prod_seq = tcp->tcp_delegated.send_una;
1690d14abf15SRobert Mustacchi     phy_mblk[block_idx].free += mem_size;
1691d14abf15SRobert Mustacchi     phy_mblk[block_idx].left -= mem_size;
1692d14abf15SRobert Mustacchi     LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1693d14abf15SRobert Mustacchi 
1694d14abf15SRobert Mustacchi 
1695d14abf15SRobert Mustacchi     /* init rx part */
1696d14abf15SRobert Mustacchi     if (phy_mblk[block_idx].left < mem_size) {
1697d14abf15SRobert Mustacchi         block_idx++;
1698d14abf15SRobert Mustacchi         DbgBreakIf(block_idx == pdev->params.l4_num_of_blocks_per_connection);
1699d14abf15SRobert Mustacchi     }
1700d14abf15SRobert Mustacchi     mem_size = TOE_DB_RX_DATA_SIZE;
1701d14abf15SRobert Mustacchi     DbgBreakIf(mem_size > phy_mblk[block_idx].left);
1702d14abf15SRobert Mustacchi     DbgBreakIf(phy_mblk[block_idx].free_phy.as_u32.low & CACHE_LINE_SIZE_MASK);
1703d14abf15SRobert Mustacchi     tcp->rx_con->phys_db_data = phy_mblk[block_idx].free_phy;
1704d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx = (volatile struct toe_rx_db_data *)phy_mblk[block_idx].free;
1705d14abf15SRobert Mustacchi     phy_mblk[block_idx].free += mem_size;
1706d14abf15SRobert Mustacchi     phy_mblk[block_idx].left -= mem_size;
1707d14abf15SRobert Mustacchi     LM_INC64(&phy_mblk[block_idx].free_phy, mem_size);
1708d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
1709d14abf15SRobert Mustacchi     /* we also need to initialize the driver copy of the rcv_win_right_edge */
1710d14abf15SRobert Mustacchi     tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
1711d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->bds_prod = 0;
1712d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->bytes_prod = 0;
1713d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->consumed_grq_bytes = 0;
1714d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->flags = 0;
1715d14abf15SRobert Mustacchi     tcp->rx_con->db_data.rx->reserved1 = 0;
1716d14abf15SRobert Mustacchi }
1717d14abf15SRobert Mustacchi 
lm_tcp_init_tcp_virt_mem(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_mem_block_t * mblk)1718d14abf15SRobert Mustacchi void lm_tcp_init_tcp_virt_mem(
1719d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1720d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
1721d14abf15SRobert Mustacchi     lm_tcp_mem_block_t * mblk)
1722d14abf15SRobert Mustacchi {
1723d14abf15SRobert Mustacchi     lm_tcp_con_t * con;
1724d14abf15SRobert Mustacchi     u32_t mem_size;
1725d14abf15SRobert Mustacchi 
1726d14abf15SRobert Mustacchi     u32_t idx = 0;
1727d14abf15SRobert Mustacchi     u8_t  cqe_size;
1728d14abf15SRobert Mustacchi 
1729d14abf15SRobert Mustacchi     con = tcp->tx_con;
1730d14abf15SRobert Mustacchi     cqe_size = sizeof(struct toe_tx_cqe);
1731d14abf15SRobert Mustacchi     for (idx = 0; idx < 2; idx++) {
1732d14abf15SRobert Mustacchi         /* allocation of buffers for history CQEs */
1733d14abf15SRobert Mustacchi         if (pdev->params.l4_history_cqe_cnt) {
1734d14abf15SRobert Mustacchi             mem_size = pdev->params.l4_history_cqe_cnt*cqe_size;
1735d14abf15SRobert Mustacchi             DbgBreakIf(mblk->left < mem_size);
1736d14abf15SRobert Mustacchi             _lm_tcp_init_qe_buffer(pdev, &con->history_cqes, mblk->free, pdev->params.l4_history_cqe_cnt, cqe_size);
1737d14abf15SRobert Mustacchi             mblk->free += mem_size;
1738d14abf15SRobert Mustacchi             mblk->left -= mem_size;
1739d14abf15SRobert Mustacchi         } else {
1740d14abf15SRobert Mustacchi             DbgBreakMsg("MichalS: Currently History Count = 0 is not SUPPORTED\n");
1741d14abf15SRobert Mustacchi         }
1742d14abf15SRobert Mustacchi         con = tcp->rx_con;
1743d14abf15SRobert Mustacchi         cqe_size = sizeof(struct toe_rx_cqe);
1744d14abf15SRobert Mustacchi     }
1745d14abf15SRobert Mustacchi 
1746d14abf15SRobert Mustacchi     /* rx frag list */
1747d14abf15SRobert Mustacchi     mem_size = lm_tcp_rx_con_get_virt_size(pdev, tcp);
1748d14abf15SRobert Mustacchi     DbgBreakIf(mblk->left < mem_size);
1749d14abf15SRobert Mustacchi 
1750d14abf15SRobert Mustacchi     tcp->rx_con->u.rx.gen_info.frag_list = (lm_frag_list_t *)mblk->free;
1751d14abf15SRobert Mustacchi     mblk->free += mem_size;
1752d14abf15SRobert Mustacchi     mblk->left -= mem_size;
1753d14abf15SRobert Mustacchi 
1754d14abf15SRobert Mustacchi }
lm_tcp_init_tcp_resc(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_mem_block_t * mblk,lm_tcp_phy_mem_block_t * phy_mblk)1755d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_tcp_resc(
1756d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1757d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
1758d14abf15SRobert Mustacchi     lm_tcp_mem_block_t * mblk,
1759d14abf15SRobert Mustacchi     lm_tcp_phy_mem_block_t * phy_mblk)
1760d14abf15SRobert Mustacchi {
1761d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tx_con\n");
1762d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
1763d14abf15SRobert Mustacchi 
1764d14abf15SRobert Mustacchi     /* tx-specific */
1765d14abf15SRobert Mustacchi     tcp->tx_con->type = TCP_CON_TYPE_TX;
1766d14abf15SRobert Mustacchi     mm_memset(&tcp->tx_con->u.tx, 0, sizeof(lm_tcp_con_tx_t));
1767d14abf15SRobert Mustacchi 
1768d14abf15SRobert Mustacchi     tcp->tx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
1769d14abf15SRobert Mustacchi     tcp->tx_con->tcp_state = tcp;
1770d14abf15SRobert Mustacchi     s_list_init(&tcp->tx_con->active_tb_list, NULL, NULL, 0);
1771d14abf15SRobert Mustacchi 
1772d14abf15SRobert Mustacchi     /* rx-specific */
1773d14abf15SRobert Mustacchi     tcp->rx_con->type = TCP_CON_TYPE_RX;
1774d14abf15SRobert Mustacchi     mm_memset(&tcp->rx_con->u.rx, 0, sizeof(lm_tcp_con_rx_t));
1775d14abf15SRobert Mustacchi 
1776d14abf15SRobert Mustacchi     tcp->rx_con->flags = (TCP_POST_BLOCKED | TCP_COMP_BLOCKED);
1777d14abf15SRobert Mustacchi     tcp->rx_con->tcp_state = tcp;
1778d14abf15SRobert Mustacchi     s_list_init(&tcp->rx_con->active_tb_list, NULL, NULL, 0);
1779d14abf15SRobert Mustacchi 
1780d14abf15SRobert Mustacchi     lm_tcp_init_tcp_phys_mem(pdev,tcp,phy_mblk);
1781d14abf15SRobert Mustacchi 
1782d14abf15SRobert Mustacchi     lm_tcp_init_tcp_virt_mem(pdev,tcp,mblk);
1783d14abf15SRobert Mustacchi 
1784d14abf15SRobert Mustacchi 
1785d14abf15SRobert Mustacchi     tcp->rx_con->u.rx.sws_info.mss = tcp->tx_con->u.tx.mss =
1786d14abf15SRobert Mustacchi         _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
1787d14abf15SRobert Mustacchi                          tcp->tcp_const.remote_mss,
1788d14abf15SRobert Mustacchi                          (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
1789d14abf15SRobert Mustacchi                          tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
1790d14abf15SRobert Mustacchi                          pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
1791d14abf15SRobert Mustacchi                          tcp->path->neigh->neigh_const.vlan_tag  != 0);
1792d14abf15SRobert Mustacchi 
1793d14abf15SRobert Mustacchi 
1794d14abf15SRobert Mustacchi 
1795d14abf15SRobert Mustacchi     tcp->rx_con->u.rx.gen_info.max_frag_count  = _lm_tcp_calc_frag_cnt(pdev, tcp->tcp_cached.initial_rcv_wnd, tcp->rx_con->u.rx.sws_info.mss);
1796d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1797d14abf15SRobert Mustacchi }
1798d14abf15SRobert Mustacchi 
1799d14abf15SRobert Mustacchi /* Function returns the required size for a virtual connection. If tcp_state is given,
1800d14abf15SRobert Mustacchi  * the size is calculated for the specific connection given, o/w the default size is given.
1801d14abf15SRobert Mustacchi  */
lm_tcp_get_virt_size(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state)1802d14abf15SRobert Mustacchi u32_t lm_tcp_get_virt_size(
1803d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
1804d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp_state)
1805d14abf15SRobert Mustacchi {
1806d14abf15SRobert Mustacchi     u32_t       virt_size = 0;
1807d14abf15SRobert Mustacchi     u32_t       mss       = 0;
1808d14abf15SRobert Mustacchi     u32_t const chain_idx = LM_SW_LEADING_RSS_CID(pdev);
1809d14abf15SRobert Mustacchi 
1810d14abf15SRobert Mustacchi     virt_size =
1811d14abf15SRobert Mustacchi         pdev->params.l4_history_cqe_cnt*sizeof(struct toe_tx_cqe)   +
1812d14abf15SRobert Mustacchi         pdev->params.l4_history_cqe_cnt*sizeof(struct toe_rx_cqe);
1813d14abf15SRobert Mustacchi 
1814d14abf15SRobert Mustacchi     if (tcp_state)
1815d14abf15SRobert Mustacchi     {
1816d14abf15SRobert Mustacchi         virt_size += lm_tcp_rx_con_get_virt_size(pdev,tcp_state);
1817d14abf15SRobert Mustacchi     }
1818d14abf15SRobert Mustacchi     else
1819d14abf15SRobert Mustacchi     {
1820d14abf15SRobert Mustacchi         #define LM_TCP_DEFAULT_WINDOW_SIZE 0x10000
1821d14abf15SRobert Mustacchi 
1822d14abf15SRobert Mustacchi         if(CHK_NULL(pdev) ||
1823d14abf15SRobert Mustacchi         ERR_IF((ARRSIZE(pdev->params.l2_cli_con_params) <= chain_idx) ||
1824d14abf15SRobert Mustacchi                 (CHIP_IS_E1H(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1H)) || /* TODO E2 add IS_E2*/
1825d14abf15SRobert Mustacchi                 (CHIP_IS_E1(pdev) && (chain_idx >= ETH_MAX_RX_CLIENTS_E1)) ))
1826d14abf15SRobert Mustacchi         {
1827d14abf15SRobert Mustacchi             DbgBreakIf(1);
1828d14abf15SRobert Mustacchi             return 0;
1829d14abf15SRobert Mustacchi         }
1830d14abf15SRobert Mustacchi 
1831d14abf15SRobert Mustacchi         mss = _lm_tcp_calc_mss(pdev->params.l2_cli_con_params[chain_idx].mtu, 0xffff, FALSE, FALSE, FALSE, FALSE);
1832d14abf15SRobert Mustacchi         virt_size += sizeof(lm_frag_list_t) +
1833d14abf15SRobert Mustacchi             (_lm_tcp_calc_frag_cnt(pdev, LM_TCP_DEFAULT_WINDOW_SIZE, mss) - 1)*sizeof(lm_frag_t);
1834d14abf15SRobert Mustacchi     }
1835d14abf15SRobert Mustacchi     return virt_size;
1836d14abf15SRobert Mustacchi }
1837d14abf15SRobert Mustacchi 
lm_tcp_get_phys_size(struct _lm_device_t * pdev)1838d14abf15SRobert Mustacchi u32_t lm_tcp_get_phys_size(
1839d14abf15SRobert Mustacchi     struct _lm_device_t * pdev)
1840d14abf15SRobert Mustacchi {
1841d14abf15SRobert Mustacchi     u32_t mem_size = TOE_SP_PHYS_DATA_SIZE + TOE_DB_TX_DATA_SIZE + TOE_DB_RX_DATA_SIZE;
1842d14abf15SRobert Mustacchi 
1843d14abf15SRobert Mustacchi     mem_size = ((mem_size / LM_PAGE_SIZE) + 1) * LM_PAGE_SIZE;
1844d14abf15SRobert Mustacchi 
1845d14abf15SRobert Mustacchi     mem_size += pdev->params.l4_rx_chain_page_cnt*LM_PAGE_SIZE + /* rx bd-chain */
1846d14abf15SRobert Mustacchi             pdev->params.l4_tx_chain_page_cnt*LM_PAGE_SIZE; /* tx bd-chain */
1847d14abf15SRobert Mustacchi 
1848d14abf15SRobert Mustacchi     return mem_size;
1849d14abf15SRobert Mustacchi }
1850d14abf15SRobert Mustacchi 
lm_tcp_post_buffered_data(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,d_list_t * buffered_data)1851d14abf15SRobert Mustacchi lm_status_t lm_tcp_post_buffered_data(
1852d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1853d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
1854d14abf15SRobert Mustacchi     d_list_t *buffered_data)
1855d14abf15SRobert Mustacchi {
1856d14abf15SRobert Mustacchi     lm_tcp_con_rx_gen_info_t * gen_info     = NULL;
1857d14abf15SRobert Mustacchi     lm_tcp_gen_buf_t         * curr_gen_buf = NULL;
1858d14abf15SRobert Mustacchi 
1859d14abf15SRobert Mustacchi     DbgBreakIf(!buffered_data);
1860d14abf15SRobert Mustacchi     if(!d_list_is_empty(buffered_data)) {
1861d14abf15SRobert Mustacchi         gen_info = &tcp->rx_con->u.rx.gen_info;
1862d14abf15SRobert Mustacchi         curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_peek_head(buffered_data);
1863d14abf15SRobert Mustacchi         DbgBreakIf(!d_list_is_empty(&gen_info->peninsula_list));
1864d14abf15SRobert Mustacchi         d_list_add_head(&gen_info->peninsula_list, buffered_data);
1865d14abf15SRobert Mustacchi         /* initialize peninsula_nbytes */
1866d14abf15SRobert Mustacchi         while (curr_gen_buf) {
1867d14abf15SRobert Mustacchi             gen_info->peninsula_nbytes += curr_gen_buf->placed_bytes;
1868d14abf15SRobert Mustacchi             curr_gen_buf = (lm_tcp_gen_buf_t *)d_list_next_entry(&curr_gen_buf->link);
1869d14abf15SRobert Mustacchi         }
1870d14abf15SRobert Mustacchi 
1871d14abf15SRobert Mustacchi         DbgBreakIf(tcp->rx_con->flags & TCP_INDICATE_REJECTED);
1872d14abf15SRobert Mustacchi         tcp->rx_con->flags |= TCP_RX_COMP_DEFERRED; /* TCP_INDICATE_REJECTED was here to wait rx buffers from OS.
1873d14abf15SRobert Mustacchi                                                        With TCP_RX_COMP_DEFERRED flag processing of completion
1874d14abf15SRobert Mustacchi                                                        SP_REQUEST_INITIATE_OFFLOAD will indicate the buffered data
1875d14abf15SRobert Mustacchi                                                        if it needed */
1876d14abf15SRobert Mustacchi     }
1877d14abf15SRobert Mustacchi 
1878d14abf15SRobert Mustacchi 
1879d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1880d14abf15SRobert Mustacchi }
1881d14abf15SRobert Mustacchi 
1882d14abf15SRobert Mustacchi /* calculate tcp pseudo check sum.
1883d14abf15SRobert Mustacchi  * input and retured value in _network_ order */
lm_tcp_calc_tcp_pseudo_checksum(struct _lm_device_t * pdev,u32_t n_src_ip[4],u32_t n_dst_ip[4],u8_t ip_type)1884d14abf15SRobert Mustacchi static u16_t lm_tcp_calc_tcp_pseudo_checksum(
1885d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1886d14abf15SRobert Mustacchi     u32_t n_src_ip[4],
1887d14abf15SRobert Mustacchi     u32_t n_dst_ip[4],
1888d14abf15SRobert Mustacchi     u8_t ip_type)
1889d14abf15SRobert Mustacchi {
1890d14abf15SRobert Mustacchi #define D_IP_PROTOCOL_TCP 6
1891d14abf15SRobert Mustacchi     u32_t sum = 0;
1892d14abf15SRobert Mustacchi     int i;
1893d14abf15SRobert Mustacchi 
1894d14abf15SRobert Mustacchi     if(ip_type == IP_VERSION_IPV4) { /* IPV4 */
1895d14abf15SRobert Mustacchi         sum += n_src_ip[0] & 0xffff;
1896d14abf15SRobert Mustacchi         sum += (n_src_ip[0]>>16) & 0xffff;
1897d14abf15SRobert Mustacchi 
1898d14abf15SRobert Mustacchi         sum += n_dst_ip[0] & 0xffff;
1899d14abf15SRobert Mustacchi         sum += (n_dst_ip[0]>>16) & 0xffff;
1900d14abf15SRobert Mustacchi     } else {
1901d14abf15SRobert Mustacchi         for (i = 0; i < 4; i++) {
1902d14abf15SRobert Mustacchi             sum += n_src_ip[i] & 0xffff;
1903d14abf15SRobert Mustacchi             sum += (n_src_ip[i]>>16) & 0xffff;
1904d14abf15SRobert Mustacchi         }
1905d14abf15SRobert Mustacchi         for (i = 0; i < 4; i++) {
1906d14abf15SRobert Mustacchi             sum += n_dst_ip[i] & 0xffff;
1907d14abf15SRobert Mustacchi             sum += (n_dst_ip[i]>>16) & 0xffff;
1908d14abf15SRobert Mustacchi         }
1909d14abf15SRobert Mustacchi     }
1910d14abf15SRobert Mustacchi 
1911d14abf15SRobert Mustacchi     sum +=  HTON16((u16_t)(D_IP_PROTOCOL_TCP));
1912d14abf15SRobert Mustacchi 
1913d14abf15SRobert Mustacchi    /* Fold 32-bit sum to 16 bits */
1914d14abf15SRobert Mustacchi    while( sum >> 16 ) {
1915d14abf15SRobert Mustacchi        sum = (sum & 0xffff) + (sum >> 16);
1916d14abf15SRobert Mustacchi    }
1917d14abf15SRobert Mustacchi 
1918d14abf15SRobert Mustacchi    DbgMessage(pdev, VERBOSEl4sp,
1919d14abf15SRobert Mustacchi                "_lm_tcp_calc_tcp_pseudo_checksum: n_src_ip=%x, n_dst_ip=%x, (u16_t)sum=%x\n",
1920d14abf15SRobert Mustacchi                n_src_ip[0], n_dst_ip[0], (u16_t)sum);
1921d14abf15SRobert Mustacchi 
1922d14abf15SRobert Mustacchi    return (u16_t)sum;
1923d14abf15SRobert Mustacchi }
1924d14abf15SRobert Mustacchi 
1925d14abf15SRobert Mustacchi /* find the bd in the bd chain that contains snd_nxt, the offset of snd_nxt
1926d14abf15SRobert Mustacchi  * within this bd, and the base address of the page that contains this bd. */
lm_locate_snd_next_info(lm_tcp_con_t * tx_con,u32_t snd_nxt,u32_t snd_una,u16_t * bd_idx,u16_t * bd_offset,lm_address_t * page_addr)1927d14abf15SRobert Mustacchi static lm_status_t lm_locate_snd_next_info(
1928d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con,
1929d14abf15SRobert Mustacchi     u32_t          snd_nxt,
1930d14abf15SRobert Mustacchi     u32_t          snd_una,
1931d14abf15SRobert Mustacchi     u16_t        * bd_idx,
1932d14abf15SRobert Mustacchi     u16_t        * bd_offset,
1933d14abf15SRobert Mustacchi     lm_address_t * page_addr)
1934d14abf15SRobert Mustacchi {
1935d14abf15SRobert Mustacchi     u32_t              cur_seq   = 0;
1936d14abf15SRobert Mustacchi     struct toe_tx_bd * cur_tx_bd = NULL;
1937d14abf15SRobert Mustacchi 
1938d14abf15SRobert Mustacchi     /* we assume that the first byte of the first application buffer equals SND.UNA
1939d14abf15SRobert Mustacchi      * we need to find SND.NXT relative to this */
1940d14abf15SRobert Mustacchi     DbgMessage(NULL, VERBOSEl4sp, "### lm_locate_snd_next_info\n");
1941d14abf15SRobert Mustacchi 
1942d14abf15SRobert Mustacchi     /* want to make sure the consumer is still zero ... */
1943d14abf15SRobert Mustacchi     if ((tx_con->bd_chain.cons_idx != 0) ||
1944d14abf15SRobert Mustacchi         (S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0) ||
1945d14abf15SRobert Mustacchi         (tx_con->bytes_comp_cnt))
1946d14abf15SRobert Mustacchi     {
1947d14abf15SRobert Mustacchi         DbgBreakIf(tx_con->bd_chain.cons_idx != 0);
1948d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(tx_con->bytes_post_cnt ,S32_SUB(snd_nxt, snd_una)) < 0);
1949d14abf15SRobert Mustacchi         DbgBreakIf(tx_con->bytes_comp_cnt); /* nothing should be completed yet */
1950d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
1951d14abf15SRobert Mustacchi     }
1952d14abf15SRobert Mustacchi 
1953d14abf15SRobert Mustacchi     *bd_idx = 0;
1954d14abf15SRobert Mustacchi     *bd_offset = 0;
1955d14abf15SRobert Mustacchi     *page_addr = tx_con->bd_chain.bd_chain_phy;
1956d14abf15SRobert Mustacchi 
1957d14abf15SRobert Mustacchi     if (lm_bd_chain_prod_idx(&tx_con->bd_chain) == 0) {
1958d14abf15SRobert Mustacchi         /* If the producer is '0', chain is empty. bd_idx/offset are 0 */
1959d14abf15SRobert Mustacchi         if ((tx_con->bytes_post_cnt > 0) ||
1960d14abf15SRobert Mustacchi             (snd_nxt != snd_una))
1961d14abf15SRobert Mustacchi         {
1962d14abf15SRobert Mustacchi             DbgBreakIf(tx_con->bytes_post_cnt > 0);
1963d14abf15SRobert Mustacchi             /* Notice: This case was seen and its a bug in the MS stack: delegated: snd_nxt > snd_una but WITHOUT unacked data */
1964d14abf15SRobert Mustacchi             DbgBreakIf(snd_nxt != snd_una);
1965d14abf15SRobert Mustacchi             return LM_STATUS_INVALID_PARAMETER;
1966d14abf15SRobert Mustacchi         }
1967d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS;
1968d14abf15SRobert Mustacchi     }
1969d14abf15SRobert Mustacchi 
1970d14abf15SRobert Mustacchi     cur_seq    = snd_una;
1971d14abf15SRobert Mustacchi     cur_tx_bd  = (struct toe_tx_bd *)tx_con->bd_chain.bd_chain_virt;
1972d14abf15SRobert Mustacchi 
1973d14abf15SRobert Mustacchi     while ((*bd_idx < lm_bd_chain_prod_idx(&tx_con->bd_chain))
1974d14abf15SRobert Mustacchi         && S32_SUB(snd_nxt, cur_seq + cur_tx_bd->size) >= 0) {
1975d14abf15SRobert Mustacchi         /* Advance to the next bd. */
1976d14abf15SRobert Mustacchi         cur_seq += cur_tx_bd->size;
1977d14abf15SRobert Mustacchi         lm_bd_chain_incr_bd(&tx_con->bd_chain, page_addr, (void**)&cur_tx_bd, bd_idx);
1978d14abf15SRobert Mustacchi     }
1979d14abf15SRobert Mustacchi 
1980d14abf15SRobert Mustacchi     /* make sure assignment is legit. */
1981d14abf15SRobert Mustacchi     if ((S32_SUB(snd_nxt, cur_seq) < 0) ||
1982d14abf15SRobert Mustacchi         (S32_SUB(snd_nxt, cur_seq) > 0xffff))
1983d14abf15SRobert Mustacchi     {
1984d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(snd_nxt, cur_seq) < 0 );
1985d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(snd_nxt, cur_seq) > 0xffff );
1986d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
1987d14abf15SRobert Mustacchi     }
1988d14abf15SRobert Mustacchi 
1989d14abf15SRobert Mustacchi     *bd_offset = S32_SUB(snd_nxt, cur_seq);
1990d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
1991d14abf15SRobert Mustacchi }
1992d14abf15SRobert Mustacchi 
_lm_tcp_init_xstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)1993d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_xstorm_toe_context(
1994d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
1995d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp)
1996d14abf15SRobert Mustacchi {
1997d14abf15SRobert Mustacchi     struct toe_context * ctx                = (struct toe_context *)tcp->ctx_virt;
1998d14abf15SRobert Mustacchi     struct xstorm_toe_ag_context * xctx_ag  = &ctx->xstorm_ag_context;
1999d14abf15SRobert Mustacchi     struct xstorm_toe_st_context * xctx_st  = &ctx->xstorm_st_context.context;
2000d14abf15SRobert Mustacchi     lm_address_t                  mem_phys  = {{0}};
2001d14abf15SRobert Mustacchi     u16_t                         bd_idx    = 0;
2002d14abf15SRobert Mustacchi     u16_t                         bd_offset = 0;
2003d14abf15SRobert Mustacchi     lm_status_t                   lm_status = LM_STATUS_SUCCESS;
2004d14abf15SRobert Mustacchi 
2005d14abf15SRobert Mustacchi     /* xstorm ag context */
2006d14abf15SRobert Mustacchi     mm_memset(xctx_ag, 0, sizeof(struct xstorm_toe_ag_context));
2007d14abf15SRobert Mustacchi 
2008d14abf15SRobert Mustacchi     if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING)
2009d14abf15SRobert Mustacchi     {
2010d14abf15SRobert Mustacchi         xctx_ag->agg_vars1 |= XSTORM_TOE_AG_CONTEXT_NAGLE_EN;
2011d14abf15SRobert Mustacchi     }
2012d14abf15SRobert Mustacchi     /* Initialize Send-Una info */
2013d14abf15SRobert Mustacchi     mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
2014d14abf15SRobert Mustacchi     xctx_ag->cmp_bd_cons           = 0;                           /* idx of bd with snd.una - always 0 */
2015d14abf15SRobert Mustacchi     xctx_ag->cmp_bd_page_0_to_31   = mem_phys.as_u32.low;         /* page that includes the snd.una */
2016d14abf15SRobert Mustacchi     xctx_ag->cmp_bd_page_32_to_63  = mem_phys.as_u32.high;        /* page that includes the snd.una */
2017d14abf15SRobert Mustacchi     xctx_ag->cmp_bd_start_seq      = tcp->tcp_delegated.send_una; /* the sequence number of the first byte in the bd which holds SndUna */
2018d14abf15SRobert Mustacchi 
2019d14abf15SRobert Mustacchi     /* more_to_send: The difference between SndNxt and the last byte in the bd pointed by bd prod */
2020d14abf15SRobert Mustacchi     if (tcp->tx_con->bytes_comp_cnt)
2021d14abf15SRobert Mustacchi     {
2022d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tx_con->bytes_comp_cnt);
2023d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
2024d14abf15SRobert Mustacchi     }
2025d14abf15SRobert Mustacchi     xctx_ag->more_to_send = S32_SUB(tcp->tx_con->bytes_post_cnt,(S32_SUB(tcp->tcp_delegated.send_next,tcp->tcp_delegated.send_una)));
2026d14abf15SRobert Mustacchi     if ((tcp->tx_con->flags & TCP_FIN_REQ_POSTED) && !(tcp->tx_con->flags & TCP_FIN_REQ_COMPLETED)) {
2027d14abf15SRobert Mustacchi         xctx_ag->more_to_send--; /* the fin byte on the bd chain is not counted */
2028d14abf15SRobert Mustacchi     }
2029d14abf15SRobert Mustacchi 
2030d14abf15SRobert Mustacchi     /* xstorm st context */
2031d14abf15SRobert Mustacchi     mm_memset(xctx_st, 0, sizeof(struct xstorm_toe_st_context));
2032d14abf15SRobert Mustacchi     lm_status = lm_locate_snd_next_info(tcp->tx_con, tcp->tcp_delegated.send_next, tcp->tcp_delegated.send_una,
2033d14abf15SRobert Mustacchi                             &bd_idx, &bd_offset, &mem_phys);
2034d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS)
2035d14abf15SRobert Mustacchi     {
2036d14abf15SRobert Mustacchi         return lm_status;
2037d14abf15SRobert Mustacchi     }
2038d14abf15SRobert Mustacchi     xctx_st->toe.tx_bd_cons                   = bd_idx;    /* index of bd that includes snd_nxt */
2039d14abf15SRobert Mustacchi     xctx_st->toe.tx_bd_offset                 = bd_offset; /* offset of snd_nxt within its bd */
2040d14abf15SRobert Mustacchi     xctx_st->toe.tx_bd_page_base_hi           = mem_phys.as_u32.high;
2041d14abf15SRobert Mustacchi     xctx_st->toe.tx_bd_page_base_lo           = mem_phys.as_u32.low;
2042d14abf15SRobert Mustacchi 
2043d14abf15SRobert Mustacchi     xctx_st->toe.bd_prod                      = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain); /* Bd containing the last byte the application wishes to trasnmit */
2044d14abf15SRobert Mustacchi     xctx_st->toe.driver_doorbell_info_ptr_lo  = tcp->tx_con->phys_db_data.as_u32.low;
2045d14abf15SRobert Mustacchi     xctx_st->toe.driver_doorbell_info_ptr_hi  = tcp->tx_con->phys_db_data.as_u32.high;
2046d14abf15SRobert Mustacchi 
2047d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2048d14abf15SRobert Mustacchi }
2049d14abf15SRobert Mustacchi 
2050d14abf15SRobert Mustacchi 
_lm_tcp_init_ustorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2051d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_ustorm_toe_context(
2052d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2053d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2054d14abf15SRobert Mustacchi {
2055d14abf15SRobert Mustacchi     struct toe_context *          ctx      = (struct toe_context *)tcp->ctx_virt;
2056d14abf15SRobert Mustacchi     struct ustorm_toe_ag_context *uctx_ag  = &ctx->ustorm_ag_context;
2057d14abf15SRobert Mustacchi     struct ustorm_toe_st_context *uctx_st  = &ctx->ustorm_st_context.context;
2058d14abf15SRobert Mustacchi     lm_address_t                  mem_phys = {{0}};
2059d14abf15SRobert Mustacchi 
2060d14abf15SRobert Mustacchi     /* Calculate the crc8 for CDU Validation */
2061d14abf15SRobert Mustacchi     mm_memset(uctx_ag, 0, sizeof(struct ustorm_toe_ag_context));
2062d14abf15SRobert Mustacchi 
2063d14abf15SRobert Mustacchi     /* ustorm_ag_context */
2064d14abf15SRobert Mustacchi     uctx_ag->rq_prod                     = 0;
2065d14abf15SRobert Mustacchi     uctx_ag->driver_doorbell_info_ptr_hi = tcp->rx_con->phys_db_data.as_u32.high;
2066d14abf15SRobert Mustacchi     uctx_ag->driver_doorbell_info_ptr_lo = tcp->rx_con->phys_db_data.as_u32.low;
2067d14abf15SRobert Mustacchi 
2068d14abf15SRobert Mustacchi     /* ustorm_st_context */
2069d14abf15SRobert Mustacchi     mm_memset(uctx_st, 0, sizeof(struct ustorm_toe_st_context));
2070d14abf15SRobert Mustacchi     uctx_st->indirection_ram_offset   = (u16_t)tcp->tcp_const.hash_value;
2071d14abf15SRobert Mustacchi     uctx_st->pen_grq_placed_bytes     = tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
2072d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "_lm_tcp_init_ustorm_toe_context: IRO is 0x%x, IS is %d\n",
2073d14abf15SRobert Mustacchi                 uctx_st->indirection_ram_offset, uctx_st->__indirection_shift);
2074d14abf15SRobert Mustacchi     if ((tcp->tcp_cached.rcv_indication_size > 0xffff) ||
2075d14abf15SRobert Mustacchi         (tcp->tcp_cached.rcv_indication_size != 0))
2076d14abf15SRobert Mustacchi     {
2077d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
2078d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tcp_cached.rcv_indication_size != 0); /* TBA receive_indication_size != 0 not supported : if it is we need to change initialization below */
2079d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
2080d14abf15SRobert Mustacchi     }
2081d14abf15SRobert Mustacchi     /* We set the ustorm context to rcv_indication_size = 1 byte, this means that the first packet that is placed on GRQ,
2082d14abf15SRobert Mustacchi      * that exceeds or equals 1 byte is indicated immediately, without arming the push timer, the first packet is identified by
2083d14abf15SRobert Mustacchi      * a packet that is placed while there are no GRQ placed bytes, every time that the driver advertises 'consumedGRQ', GRQ placed bytes
2084d14abf15SRobert Mustacchi      * is decreased by the number, bringing it back to '0' will bring us back to the state where the next packet with 1 byte will be indicated.
2085d14abf15SRobert Mustacchi      * We added this feature due to a sparta test called ReceiveIndication, which sends a fairly small packet and expects it to be indicated straight
2086d14abf15SRobert Mustacchi      * awat, for some reason the small RQ buffer doesn't make it's way to the VBD... */
2087d14abf15SRobert Mustacchi     uctx_st->rcv_indication_size      = 1;
2088d14abf15SRobert Mustacchi     mem_phys = lm_bd_chain_phys_addr(&tcp->rx_con->bd_chain, 0);
2089d14abf15SRobert Mustacchi     uctx_st->pen_ring_params.rq_cons  = 0;
2090d14abf15SRobert Mustacchi     uctx_st->pen_ring_params.rq_cons_addr_hi = mem_phys.as_u32.high;
2091d14abf15SRobert Mustacchi     uctx_st->pen_ring_params.rq_cons_addr_lo = mem_phys.as_u32.low;
2092d14abf15SRobert Mustacchi 
2093d14abf15SRobert Mustacchi     uctx_st->prev_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
2094d14abf15SRobert Mustacchi 
2095d14abf15SRobert Mustacchi     if (pdev->params.l4_ignore_grq_push_enabled)
2096d14abf15SRobert Mustacchi     {
2097d14abf15SRobert Mustacchi         SET_FLAGS(uctx_st->flags2, USTORM_TOE_ST_CONTEXT_IGNORE_GRQ_PUSH);
2098d14abf15SRobert Mustacchi     }
2099d14abf15SRobert Mustacchi 
2100d14abf15SRobert Mustacchi     if (pdev->params.l4_enable_rss == L4_RSS_DYNAMIC)
2101d14abf15SRobert Mustacchi     {
2102d14abf15SRobert Mustacchi         SET_FLAGS( uctx_st->flags2, USTORM_TOE_ST_CONTEXT_RSS_UPDATE_ENABLED );
2103d14abf15SRobert Mustacchi     }
2104d14abf15SRobert Mustacchi     /*DbgMessage(pdev, FATAL, "_lm_tcp_init_ustorm_toe_context(): uctx_st->initial_rcv_wnd=%d\n", tcp->tcp_cached.initial_rcv_wnd);*/
2105d14abf15SRobert Mustacchi     uctx_st->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
2106d14abf15SRobert Mustacchi     uctx_st->rcv_nxt         = tcp->tcp_delegated.recv_next;
2107d14abf15SRobert Mustacchi 
2108d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2109d14abf15SRobert Mustacchi }
2110d14abf15SRobert Mustacchi 
_lm_tcp_init_cstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2111d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_cstorm_toe_context(
2112d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2113d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2114d14abf15SRobert Mustacchi {
2115d14abf15SRobert Mustacchi     struct toe_context           *ctx      = (struct toe_context *)tcp->ctx_virt;
2116d14abf15SRobert Mustacchi     struct cstorm_toe_ag_context *cctx_ag  = &ctx->cstorm_ag_context;
2117d14abf15SRobert Mustacchi     struct cstorm_toe_st_context *cctx_st  = &ctx->cstorm_st_context.context;
2118d14abf15SRobert Mustacchi     lm_address_t                  mem_phys = {{0}};
2119d14abf15SRobert Mustacchi 
2120d14abf15SRobert Mustacchi     mm_memset(cctx_ag, 0, sizeof(struct cstorm_toe_ag_context));
2121d14abf15SRobert Mustacchi 
2122d14abf15SRobert Mustacchi     if (tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND)
2123d14abf15SRobert Mustacchi     {
2124d14abf15SRobert Mustacchi         /* we can't support more than the maximum receive window due to cyclic counters we use for
2125d14abf15SRobert Mustacchi          * recv_next, recv_win_seq, updates, window increase */
2126d14abf15SRobert Mustacchi         DbgBreakIfAll(tcp->tcp_cached.initial_rcv_wnd > MAX_INITIAL_RCV_WND);
2127d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
2128d14abf15SRobert Mustacchi     }
2129d14abf15SRobert Mustacchi 
2130d14abf15SRobert Mustacchi     /* cstorm_ag_context */
2131d14abf15SRobert Mustacchi     cctx_ag->bd_prod = lm_bd_chain_prod_idx(&tcp->tx_con->bd_chain); /* Bd containing the last byte the application wishes to trasnmit */
2132d14abf15SRobert Mustacchi     cctx_ag->rel_seq = tcp->tcp_delegated.send_una;
2133d14abf15SRobert Mustacchi     cctx_ag->snd_max = tcp->tcp_delegated.send_max;
2134d14abf15SRobert Mustacchi 
2135d14abf15SRobert Mustacchi     /* cstorm_st_context */
2136d14abf15SRobert Mustacchi     mm_memset(cctx_st, 0, sizeof(struct cstorm_toe_st_context));
2137d14abf15SRobert Mustacchi     mem_phys = lm_bd_chain_phys_addr(&tcp->tx_con->bd_chain, 0);
2138d14abf15SRobert Mustacchi     cctx_st->bds_ring_page_base_addr_hi = mem_phys.as_u32.high; /* page that includes the snd.una */
2139d14abf15SRobert Mustacchi     cctx_st->bds_ring_page_base_addr_lo = mem_phys.as_u32.low;  /* page that includes the snd.una */
2140d14abf15SRobert Mustacchi     cctx_st->bd_cons          = 0; /* idx of bd with snd.una - always 0 */
2141d14abf15SRobert Mustacchi     if (ERR_IF(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)) {
2142d14abf15SRobert Mustacchi         if (tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE)
2143d14abf15SRobert Mustacchi         {
2144d14abf15SRobert Mustacchi             DbgBreakIfAll(tcp->tcp_const.hash_value >= (u8_t)USTORM_INDIRECTION_TABLE_SIZE);
2145d14abf15SRobert Mustacchi             return LM_STATUS_INVALID_PARAMETER;
2146d14abf15SRobert Mustacchi         }
2147d14abf15SRobert Mustacchi         tcp->tcp_const.hash_value = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
2148d14abf15SRobert Mustacchi     }
2149d14abf15SRobert Mustacchi 
2150d14abf15SRobert Mustacchi     cctx_st->prev_snd_max = tcp->tcp_delegated.send_una;
2151d14abf15SRobert Mustacchi 
2152d14abf15SRobert Mustacchi 
2153d14abf15SRobert Mustacchi 
2154d14abf15SRobert Mustacchi 
2155d14abf15SRobert Mustacchi     /* For TOE RSS the values in the USTORM (RSS) must differ from the one in CSTORM (TSS)
2156d14abf15SRobert Mustacchi        2 options:
2157d14abf15SRobert Mustacchi         a. base chain.
2158d14abf15SRobert Mustacchi         b. value of most up-to-date indirection table.
2159d14abf15SRobert Mustacchi     */
2160d14abf15SRobert Mustacchi     if (pdev->params.l4_enable_rss == L4_RSS_DISABLED)
2161d14abf15SRobert Mustacchi     {
2162d14abf15SRobert Mustacchi         cctx_st->cpu_id = LM_TOE_FW_RSS_ID(pdev,LM_TOE_BASE_RSS_ID(pdev));
2163d14abf15SRobert Mustacchi     }
2164d14abf15SRobert Mustacchi     else
2165d14abf15SRobert Mustacchi     {
2166d14abf15SRobert Mustacchi         cctx_st->cpu_id = pdev->toe_info.indirection_table[tcp->tcp_const.hash_value];
2167d14abf15SRobert Mustacchi     }
2168d14abf15SRobert Mustacchi 
2169d14abf15SRobert Mustacchi     cctx_st->free_seq = tcp->tcp_delegated.send_una - 1; /* (snd.una - 1 - offset of snd.una byte in its buffer (which is always 0)) */
2170d14abf15SRobert Mustacchi 
2171d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2172d14abf15SRobert Mustacchi }
2173d14abf15SRobert Mustacchi 
_lm_tcp_init_tstorm_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2174d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_tstorm_toe_context(
2175d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2176d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp)
2177d14abf15SRobert Mustacchi {
2178d14abf15SRobert Mustacchi     struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
2179d14abf15SRobert Mustacchi     struct tstorm_toe_ag_context * tctx_ag = &ctx->tstorm_ag_context;
2180d14abf15SRobert Mustacchi     struct tstorm_toe_st_context * tctx_st = &ctx->tstorm_st_context.context;
2181d14abf15SRobert Mustacchi 
2182d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
2183d14abf15SRobert Mustacchi 
2184d14abf15SRobert Mustacchi     /* tstorm ag context */
2185d14abf15SRobert Mustacchi     mm_mem_zero(tctx_ag, sizeof(struct tstorm_toe_ag_context));
2186d14abf15SRobert Mustacchi 
2187d14abf15SRobert Mustacchi     /* tstorm st context */
2188d14abf15SRobert Mustacchi     mm_mem_zero(tctx_st, sizeof(struct tstorm_toe_st_context));
2189d14abf15SRobert Mustacchi 
2190d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2191d14abf15SRobert Mustacchi }
2192d14abf15SRobert Mustacchi 
_lm_tcp_init_timers_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2193d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_timers_context(
2194d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2195d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2196d14abf15SRobert Mustacchi {
2197d14abf15SRobert Mustacchi     struct toe_context * ctx = (struct toe_context *)tcp->ctx_virt;
2198d14abf15SRobert Mustacchi     /* timers_context */
2199d14abf15SRobert Mustacchi     SET_FLAGS(ctx->timers_context.flags, TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG);
2200d14abf15SRobert Mustacchi 
2201d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
2202d14abf15SRobert Mustacchi 
2203d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2204d14abf15SRobert Mustacchi }
2205d14abf15SRobert Mustacchi 
_lm_tcp_init_toe_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2206d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_toe_context(
2207d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2208d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2209d14abf15SRobert Mustacchi {
2210d14abf15SRobert Mustacchi     lm_status_t lm_status = LM_STATUS_SUCCESS;
2211d14abf15SRobert Mustacchi 
2212d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_xstorm_toe_context(pdev, tcp);
2213d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2214d14abf15SRobert Mustacchi         return lm_status;
2215d14abf15SRobert Mustacchi     }
2216d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_ustorm_toe_context(pdev, tcp);
2217d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2218d14abf15SRobert Mustacchi         return lm_status;
2219d14abf15SRobert Mustacchi     }
2220d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_cstorm_toe_context(pdev, tcp);
2221d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2222d14abf15SRobert Mustacchi         return lm_status;
2223d14abf15SRobert Mustacchi     }
2224d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_tstorm_toe_context(pdev, tcp);
2225d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2226d14abf15SRobert Mustacchi         return lm_status;
2227d14abf15SRobert Mustacchi     }
2228d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_timers_context(pdev, tcp);
2229d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2230d14abf15SRobert Mustacchi         return lm_status;
2231d14abf15SRobert Mustacchi     }
2232d14abf15SRobert Mustacchi 
2233d14abf15SRobert Mustacchi     /* now we need to configure the cdu-validation data */
2234d14abf15SRobert Mustacchi     lm_set_cdu_validation_data(pdev, tcp->cid, FALSE /* don't invalidate */);
2235d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2236d14abf15SRobert Mustacchi }
2237d14abf15SRobert Mustacchi 
2238d14abf15SRobert Mustacchi 
_lm_tcp_init_tstorm_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2239d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_tstorm_tcp_context(
2240d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2241d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp
2242d14abf15SRobert Mustacchi     )
2243d14abf15SRobert Mustacchi {
2244d14abf15SRobert Mustacchi     /* TODO: unify iscsi + toe structure name */
2245d14abf15SRobert Mustacchi     struct tstorm_toe_tcp_ag_context_section *ttcp_ag;
2246d14abf15SRobert Mustacchi     struct tstorm_tcp_st_context_section *ttcp_st;
2247d14abf15SRobert Mustacchi     l4_ofld_params_t *l4_params = &pdev->ofld_info.l4_params;
2248d14abf15SRobert Mustacchi     lm_path_state_t *path = tcp->path;
2249d14abf15SRobert Mustacchi     lm_neigh_state_t *neigh = path->neigh;
2250d14abf15SRobert Mustacchi     u32_t sm_rtt, sm_delta;
2251d14abf15SRobert Mustacchi     u32_t snd_wnd;
2252d14abf15SRobert Mustacchi 
2253d14abf15SRobert Mustacchi     ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section) );
2254d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE)
2255d14abf15SRobert Mustacchi     {
2256d14abf15SRobert Mustacchi         ttcp_ag = &((struct toe_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
2257d14abf15SRobert Mustacchi         ttcp_st = &((struct toe_context *)tcp->ctx_virt)->tstorm_st_context.context.tcp;
2258d14abf15SRobert Mustacchi     }
2259d14abf15SRobert Mustacchi     else
2260d14abf15SRobert Mustacchi     {
2261d14abf15SRobert Mustacchi         ttcp_ag = (struct tstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context.tcp;
2262d14abf15SRobert Mustacchi         ttcp_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context.tcp;
2263d14abf15SRobert Mustacchi     }
2264d14abf15SRobert Mustacchi     mm_mem_zero(ttcp_ag, sizeof(struct tstorm_toe_tcp_ag_context_section));
2265d14abf15SRobert Mustacchi     mm_mem_zero(ttcp_st, sizeof(struct tstorm_tcp_st_context_section));
2266d14abf15SRobert Mustacchi 
2267d14abf15SRobert Mustacchi     /* tstorm_ag_context */
2268d14abf15SRobert Mustacchi     ttcp_ag->snd_max      = tcp->tcp_delegated.send_max;
2269d14abf15SRobert Mustacchi     ttcp_ag->snd_nxt      = tcp->tcp_delegated.send_next;
2270d14abf15SRobert Mustacchi     ttcp_ag->snd_una      = tcp->tcp_delegated.send_una;
2271d14abf15SRobert Mustacchi 
2272d14abf15SRobert Mustacchi     /* tstorm_st_context*/
2273d14abf15SRobert Mustacchi     // starting FW 7.6.5, the DA_EN is a "don't care" for iSCSI as it is set in pf init to FW
2274d14abf15SRobert Mustacchi     // iSCSI FW overrides this flag according to pf init value regardless context init here.
2275d14abf15SRobert Mustacchi     ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;         /* DA timer always on */
2276d14abf15SRobert Mustacchi 
2277d14abf15SRobert Mustacchi     // DA_COUNTER_EN should stay always on since FW will not use it in case DA_EN is off.
2278d14abf15SRobert Mustacchi     ttcp_st->flags2 |= TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN; /* DA counter always on */
2279d14abf15SRobert Mustacchi     ttcp_st->dup_ack_count = tcp->tcp_delegated.dup_ack_count;
2280d14abf15SRobert Mustacchi 
2281d14abf15SRobert Mustacchi     if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
2282d14abf15SRobert Mustacchi         ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS;
2283d14abf15SRobert Mustacchi     }
2284d14abf15SRobert Mustacchi     if(tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
2285d14abf15SRobert Mustacchi         ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED;
2286d14abf15SRobert Mustacchi         if ((tcp->tcp_cached.ka_time_out == 0) ||
2287d14abf15SRobert Mustacchi             (tcp->tcp_cached.ka_interval == 0))
2288d14abf15SRobert Mustacchi         {
2289d14abf15SRobert Mustacchi             DbgBreakIf(tcp->tcp_cached.ka_time_out == 0);
2290d14abf15SRobert Mustacchi             DbgBreakIf(tcp->tcp_cached.ka_interval == 0);
2291d14abf15SRobert Mustacchi             return LM_STATUS_INVALID_PARAMETER;
2292d14abf15SRobert Mustacchi         }
2293d14abf15SRobert Mustacchi     }
2294d14abf15SRobert Mustacchi     if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_WIN_SCALING) {
2295d14abf15SRobert Mustacchi         ttcp_st->snd_wnd_scale = tcp->tcp_const.snd_seg_scale;
2296d14abf15SRobert Mustacchi     }
2297d14abf15SRobert Mustacchi 
2298d14abf15SRobert Mustacchi     ttcp_st->cwnd                 = tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una; /* i.e. ndis_tcp_delegated->CWnd */
2299d14abf15SRobert Mustacchi     /* bugbug: driver workaround - wnd may be 0xffffffff, in this case we change it to 2^30 - since FW has an assumption this value
2300d14abf15SRobert Mustacchi      * doesn't wrap-around, configuring it to 0xffffffff may cause it to wrap around and then change from a very large cwnd to a ver
2301d14abf15SRobert Mustacchi      * small one - we give 2^30 which is the largest cwnd that can be advertised.  */
2302d14abf15SRobert Mustacchi     if (ttcp_st->cwnd == 0xffffffff) {
2303d14abf15SRobert Mustacchi         ttcp_st->cwnd = 0x40000000;
2304d14abf15SRobert Mustacchi     }
2305d14abf15SRobert Mustacchi 
2306d14abf15SRobert Mustacchi     ttcp_st->ka_interval          =
2307d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2308d14abf15SRobert Mustacchi     ttcp_st->ka_max_probe_count   = tcp->tcp_cached.ka_probe_cnt;
2309d14abf15SRobert Mustacchi     if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) { /* KA is running (?) */
2310d14abf15SRobert Mustacchi         ttcp_st->ka_probe_count   = tcp->tcp_delegated.u.keep_alive.probe_cnt;
2311d14abf15SRobert Mustacchi     } else {   /* retransmit is running (?) */
2312d14abf15SRobert Mustacchi         ttcp_st->ka_probe_count   = 0;
2313d14abf15SRobert Mustacchi     }
2314d14abf15SRobert Mustacchi     ttcp_st->ka_timeout           =
2315d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2316d14abf15SRobert Mustacchi 
2317d14abf15SRobert Mustacchi     /* Set the src mac addr in tstorm context:
2318d14abf15SRobert Mustacchi      * In both big and little endian architectures, the mac addr is given from the client in an array of
2319d14abf15SRobert Mustacchi      * 6 chars. Therefore, regardless the endian architectue, we need to swap this array into the little endian
2320d14abf15SRobert Mustacchi      * convention of the tstorm context. */
2321d14abf15SRobert Mustacchi     ttcp_st->msb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[0])));
2322d14abf15SRobert Mustacchi     ttcp_st->mid_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[2])));
2323d14abf15SRobert Mustacchi     ttcp_st->lsb_mac_address = mm_cpu_to_le16(NTOH16(*(u16 *)(&neigh->neigh_const.src_addr[4])));
2324d14abf15SRobert Mustacchi 
2325d14abf15SRobert Mustacchi     ttcp_st->max_rt_time          =
2326d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2327d14abf15SRobert Mustacchi     /* GilR: place holder, to be enabled in v0_18_1 when proper FW support is included */
2328d14abf15SRobert Mustacchi     //ttcp_st->max_seg_retransmit_en = 0;
2329d14abf15SRobert Mustacchi     if (ttcp_st->max_rt_time == 0) { /* GilR 9/19/2006 - TBD - currently FW does not handle the '0' case correctly. */
2330d14abf15SRobert Mustacchi         ttcp_st->max_rt_time = 0xffffffff;
2331d14abf15SRobert Mustacchi         ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN;
2332d14abf15SRobert Mustacchi         //ctx->tstorm_st_context.tcp.max_seg_retransmit_en = 1;
2333d14abf15SRobert Mustacchi     }
2334d14abf15SRobert Mustacchi 
2335d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2336d14abf15SRobert Mustacchi         if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
2337d14abf15SRobert Mustacchi         {
2338d14abf15SRobert Mustacchi             DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
2339d14abf15SRobert Mustacchi             return LM_STATUS_INVALID_PARAMETER;
2340d14abf15SRobert Mustacchi         }
2341d14abf15SRobert Mustacchi         ttcp_st->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
2342d14abf15SRobert Mustacchi     } else {
2343d14abf15SRobert Mustacchi         /* we must calc mss here since it is possible that we don't have rx_con (iscsi) */
2344d14abf15SRobert Mustacchi         ttcp_st->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
2345d14abf15SRobert Mustacchi                                     tcp->tcp_const.remote_mss,
2346d14abf15SRobert Mustacchi                                     (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
2347d14abf15SRobert Mustacchi                                     tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
2348d14abf15SRobert Mustacchi                                     pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
2349d14abf15SRobert Mustacchi                                     tcp->path->neigh->neigh_const.vlan_tag  != 0) & 0xffff;
2350d14abf15SRobert Mustacchi 
2351d14abf15SRobert Mustacchi         /* NirV: set expected release sequance parameter that's being set in the toe fw but not in the iscsi fw */
2352d14abf15SRobert Mustacchi         /* should be done in the iscsi initiate offload handler in the fw as in toe */
2353d14abf15SRobert Mustacchi         ttcp_st->expected_rel_seq = tcp->tcp_delegated.send_una;
2354d14abf15SRobert Mustacchi     }
2355d14abf15SRobert Mustacchi 
2356d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "offload num_retx=%d, snd_wnd_probe_cnt=%d\n",tcp->tcp_delegated.u.retransmit.num_retx,tcp->tcp_delegated.snd_wnd_probe_count);
2357d14abf15SRobert Mustacchi 
2358d14abf15SRobert Mustacchi     ttcp_st->persist_probe_count  = tcp->tcp_delegated.snd_wnd_probe_count;
2359d14abf15SRobert Mustacchi     ttcp_st->prev_seg_seq         = tcp->tcp_delegated.send_wl1;
2360d14abf15SRobert Mustacchi     ttcp_st->rcv_nxt              = tcp->tcp_delegated.recv_next;
2361d14abf15SRobert Mustacchi     /*ttcp_st->reserved_slowpath    = 0;  This value is the 7 LSBs of the toeplitz hash result for this connection's 4 tuple.
2362d14abf15SRobert Mustacchi                                                                     required in order to give the L2-completion on the correct RSS ring
2363d14abf15SRobert Mustacchi                                                                     TBD - toeplitz hash calc not implemented for this yet, but no harm done */
2364d14abf15SRobert Mustacchi 
2365d14abf15SRobert Mustacchi     //calculate snd window
2366d14abf15SRobert Mustacchi     snd_wnd = (S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
2367d14abf15SRobert Mustacchi         (tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) : /* i.e. ndis_tcp_delegated->SndWnd */
2368d14abf15SRobert Mustacchi         (tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una); /* i.e. ndis_tcp_delegated->CWnd */
2369d14abf15SRobert Mustacchi 
2370d14abf15SRobert Mustacchi     if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && snd_wnd > 0) { /* KA is running (?) */
2371d14abf15SRobert Mustacchi         ttcp_st->rto_exp = 0;
2372d14abf15SRobert Mustacchi         ttcp_st->retransmit_count = 0;
2373d14abf15SRobert Mustacchi     } else {   /* retransmit is running (?) */
2374d14abf15SRobert Mustacchi         ttcp_st->retransmit_count = tcp->tcp_delegated.u.retransmit.num_retx;
2375d14abf15SRobert Mustacchi         ttcp_st->rto_exp = tcp->tcp_delegated.u.retransmit.num_retx;
2376d14abf15SRobert Mustacchi     }
2377d14abf15SRobert Mustacchi     ttcp_st->retransmit_start_time =
2378d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_delegated.total_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2379d14abf15SRobert Mustacchi 
2380d14abf15SRobert Mustacchi     /* convert to ms.
2381d14abf15SRobert Mustacchi      * the /8 and /4 are a result of some shifts that MSFT does, these number were received from MSFT through emails and are
2382d14abf15SRobert Mustacchi      * done the same in Teton. */
2383d14abf15SRobert Mustacchi     sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
2384d14abf15SRobert Mustacchi     if (sm_rtt > 30000) {   /* reduce to 30sec */
2385d14abf15SRobert Mustacchi         sm_rtt = 30000;
2386d14abf15SRobert Mustacchi     }
2387d14abf15SRobert Mustacchi     sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
2388d14abf15SRobert Mustacchi     if (sm_delta > 30000) {   /* reduce to 30sec */
2389d14abf15SRobert Mustacchi         sm_delta = 30000;
2390d14abf15SRobert Mustacchi     }
2391d14abf15SRobert Mustacchi 
2392d14abf15SRobert Mustacchi     ttcp_st->flags1 |= (sm_rtt << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT);  /* given in ticks, no conversion is required */
2393d14abf15SRobert Mustacchi     ttcp_st->flags2 |= (sm_delta << TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT); /* given in ticks, no conversion is required */
2394d14abf15SRobert Mustacchi     if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED)) {
2395d14abf15SRobert Mustacchi         ttcp_st->flags1 |= TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD;
2396d14abf15SRobert Mustacchi     }
2397d14abf15SRobert Mustacchi 
2398d14abf15SRobert Mustacchi     ttcp_st->ss_thresh            = tcp->tcp_delegated.ss_thresh;
2399d14abf15SRobert Mustacchi     ttcp_st->timestamp_recent     = tcp->tcp_delegated.ts_recent;
2400d14abf15SRobert Mustacchi     ttcp_st->timestamp_recent_time =
2401d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_delegated.ts_recent_age, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
2402d14abf15SRobert Mustacchi     ttcp_st->vlan_id              = neigh->neigh_const.vlan_tag;
2403d14abf15SRobert Mustacchi     ttcp_st->recent_seg_wnd       = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
2404d14abf15SRobert Mustacchi     ttcp_st->ooo_support_mode      = (tcp->ulp_type == TOE_CONNECTION_TYPE)? TCP_TSTORM_OOO_SUPPORTED : TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
2405d14abf15SRobert Mustacchi     ttcp_st->statistics_counter_id = (tcp->ulp_type == TOE_CONNECTION_TYPE)? LM_STATS_CNT_ID(pdev) : LM_CLI_IDX_ISCSI;
2406d14abf15SRobert Mustacchi 
2407d14abf15SRobert Mustacchi     // Set statistics params
2408d14abf15SRobert Mustacchi     if( TOE_CONNECTION_TYPE == tcp->ulp_type )
2409d14abf15SRobert Mustacchi     {
2410d14abf15SRobert Mustacchi         // set enable L2
2411d14abf15SRobert Mustacchi         SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
2412d14abf15SRobert Mustacchi 
2413d14abf15SRobert Mustacchi         // set enable L4
2414d14abf15SRobert Mustacchi         SET_FLAGS( ttcp_st->flags2, 1<<TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
2415d14abf15SRobert Mustacchi     }
2416d14abf15SRobert Mustacchi 
2417d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2418d14abf15SRobert Mustacchi }
2419d14abf15SRobert Mustacchi 
2420d14abf15SRobert Mustacchi 
_lm_tcp_init_xstorm_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2421d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_xstorm_tcp_context(
2422d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2423d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2424d14abf15SRobert Mustacchi {
2425d14abf15SRobert Mustacchi     /* TODO: unify iscsi + toe structure name */
2426d14abf15SRobert Mustacchi     struct xstorm_toe_tcp_ag_context_section * xtcp_ag;
2427d14abf15SRobert Mustacchi     struct xstorm_common_context_section     * xtcp_st;
2428d14abf15SRobert Mustacchi     lm_path_state_t  * path  = tcp->path;
2429d14abf15SRobert Mustacchi     lm_neigh_state_t * neigh = path->neigh;
2430d14abf15SRobert Mustacchi     l4_ofld_params_t * l4_params = &(pdev->ofld_info.l4_params);
2431d14abf15SRobert Mustacchi     u32_t src_ip[4], dst_ip[4];
2432d14abf15SRobert Mustacchi     u16_t pseudo_cs, i;
2433d14abf15SRobert Mustacchi     u32_t sm_rtt, sm_delta;
2434d14abf15SRobert Mustacchi 
2435d14abf15SRobert Mustacchi     ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
2436d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2437d14abf15SRobert Mustacchi         xtcp_ag = &((struct toe_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
2438d14abf15SRobert Mustacchi         xtcp_st = &((struct toe_context *)tcp->ctx_virt)->xstorm_st_context.context.common;
2439d14abf15SRobert Mustacchi     } else {
2440d14abf15SRobert Mustacchi         xtcp_ag = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context.tcp;
2441d14abf15SRobert Mustacchi         xtcp_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context.common;
2442d14abf15SRobert Mustacchi     }
2443d14abf15SRobert Mustacchi 
2444d14abf15SRobert Mustacchi     mm_mem_zero(xtcp_ag, sizeof(struct xstorm_toe_tcp_ag_context_section));
2445d14abf15SRobert Mustacchi     mm_mem_zero(xtcp_st, sizeof(struct xstorm_common_context_section));
2446d14abf15SRobert Mustacchi 
2447d14abf15SRobert Mustacchi     xtcp_ag->ack_to_far_end       = tcp->tcp_delegated.recv_next;
2448d14abf15SRobert Mustacchi     if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) { /* KA is running (?) */
2449d14abf15SRobert Mustacchi         if ((tcp->tcp_cached.ka_probe_cnt > 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
2450d14abf15SRobert Mustacchi             xtcp_ag->ka_timer = 1;
2451d14abf15SRobert Mustacchi         } else if ((tcp->tcp_cached.ka_probe_cnt == 0) && (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0)) {
2452d14abf15SRobert Mustacchi             if (tcp->tcp_cached.ka_time_out == 0) {/* KA disabled */
2453d14abf15SRobert Mustacchi                 xtcp_ag->ka_timer = 0xffffffff;
2454d14abf15SRobert Mustacchi             } else {
2455d14abf15SRobert Mustacchi                 if (tcp->tcp_cached.ka_time_out == 0xffffffff) {
2456d14abf15SRobert Mustacchi                     xtcp_ag->ka_timer  = 0xffffffff;
2457d14abf15SRobert Mustacchi                 } else {
2458d14abf15SRobert Mustacchi                     xtcp_ag->ka_timer =
2459d14abf15SRobert Mustacchi                         tcp->tcp_cached.ka_time_out ?
2460d14abf15SRobert Mustacchi                         lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC) :
2461d14abf15SRobert Mustacchi                         1 /* value of 0 is not allowed by FW */;
2462d14abf15SRobert Mustacchi                 }
2463d14abf15SRobert Mustacchi             }
2464d14abf15SRobert Mustacchi         } else {
2465d14abf15SRobert Mustacchi             if (tcp->tcp_delegated.u.keep_alive.timeout_delta == 0xffffffff) {
2466d14abf15SRobert Mustacchi                 xtcp_ag->ka_timer  = 0xffffffff;
2467d14abf15SRobert Mustacchi             } else {
2468d14abf15SRobert Mustacchi                 xtcp_ag->ka_timer = lm_time_resolution(pdev, tcp->tcp_delegated.u.keep_alive.timeout_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
2469d14abf15SRobert Mustacchi             }
2470d14abf15SRobert Mustacchi         }
2471d14abf15SRobert Mustacchi     } else {   /* retransmit is running (?) */
2472d14abf15SRobert Mustacchi         xtcp_ag->ka_timer         = 0xffffffff;
2473d14abf15SRobert Mustacchi     }
2474d14abf15SRobert Mustacchi 
2475d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2476d14abf15SRobert Mustacchi         xtcp_ag->local_adv_wnd        = tcp->tcp_delegated.recv_win_seq;
2477d14abf15SRobert Mustacchi     } else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
2478d14abf15SRobert Mustacchi         /* NirV: Add define to the iscsi HSI */
2479d14abf15SRobert Mustacchi         xtcp_ag->local_adv_wnd        = 0xFFFF << ((u16_t)tcp->tcp_const.rcv_seg_scale & 0xf); /* rcv_seg_scale is only 4b long */
2480d14abf15SRobert Mustacchi     }
2481d14abf15SRobert Mustacchi 
2482d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2483d14abf15SRobert Mustacchi         if (tcp->rx_con->u.rx.sws_info.mss > 0xffff)
2484d14abf15SRobert Mustacchi         {
2485d14abf15SRobert Mustacchi             DbgBreakIf(tcp->rx_con->u.rx.sws_info.mss > 0xffff);
2486d14abf15SRobert Mustacchi             return LM_STATUS_INVALID_PARAMETER;
2487d14abf15SRobert Mustacchi         }
2488d14abf15SRobert Mustacchi         xtcp_ag->mss = tcp->rx_con->u.rx.sws_info.mss & 0xffff;
2489d14abf15SRobert Mustacchi     } else {
2490d14abf15SRobert Mustacchi         /* we must calc mss here since it is possible that we don't have rx_con (iscsi) */
2491d14abf15SRobert Mustacchi         xtcp_ag->mss = _lm_tcp_calc_mss(tcp->path->path_cached.path_mtu,
2492d14abf15SRobert Mustacchi                                     tcp->tcp_const.remote_mss,
2493d14abf15SRobert Mustacchi                                     (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
2494d14abf15SRobert Mustacchi                                     tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
2495d14abf15SRobert Mustacchi                                     pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
2496d14abf15SRobert Mustacchi                                     tcp->path->neigh->neigh_const.vlan_tag  != 0) & 0xfffc;     /* MSS value set in the XStorm should be multiple of 4 */
2497d14abf15SRobert Mustacchi 
2498d14abf15SRobert Mustacchi         if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
2499d14abf15SRobert Mustacchi         {
2500d14abf15SRobert Mustacchi             if (xtcp_ag->mss < 4)
2501d14abf15SRobert Mustacchi             {
2502d14abf15SRobert Mustacchi                 DbgBreakIf(xtcp_ag->mss < 4);
2503d14abf15SRobert Mustacchi                 return LM_STATUS_INVALID_PARAMETER;
2504d14abf15SRobert Mustacchi             }
2505d14abf15SRobert Mustacchi             xtcp_ag->mss -= 4;  // -4 for data digest
2506d14abf15SRobert Mustacchi         }
2507d14abf15SRobert Mustacchi     }
2508d14abf15SRobert Mustacchi 
2509d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2510d14abf15SRobert Mustacchi         /*if persist probes were sent xstorm should be blocked*/
2511d14abf15SRobert Mustacchi         if (tcp->tcp_delegated.snd_wnd_probe_count == 0) {
2512d14abf15SRobert Mustacchi             xtcp_ag->tcp_agg_vars2 |= __XSTORM_TOE_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED;
2513d14abf15SRobert Mustacchi         }
2514d14abf15SRobert Mustacchi     }
2515d14abf15SRobert Mustacchi 
2516d14abf15SRobert Mustacchi     /* calculate transmission window */
2517d14abf15SRobert Mustacchi     xtcp_ag->tx_wnd               =
2518d14abf15SRobert Mustacchi         (S32_SUB(tcp->tcp_delegated.send_cwin, tcp->tcp_delegated.send_win) > 0) ?
2519d14abf15SRobert Mustacchi         (tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) : /* i.e. ndis_tcp_delegated->SndWnd */
2520d14abf15SRobert Mustacchi         (tcp->tcp_delegated.send_cwin - tcp->tcp_delegated.send_una); /* i.e. ndis_tcp_delegated->CWnd */
2521d14abf15SRobert Mustacchi 
2522d14abf15SRobert Mustacchi     /* bugbug: driver workaround - wnd may be 0xffffffff, in this case we change it to 2^30 - since FW has an assumption this value
2523d14abf15SRobert Mustacchi      * doesn't wrap-around, configuring it to 0xffffffff may cause it to wrap around and then change from a very large cwnd to a ver
2524d14abf15SRobert Mustacchi      * small one - we give 2^30 which is the largest cwnd that can be advertised.  */
2525d14abf15SRobert Mustacchi     if (xtcp_ag->tx_wnd == 0xffffffff) {
2526d14abf15SRobert Mustacchi         xtcp_ag->tx_wnd = 0x40000000;
2527d14abf15SRobert Mustacchi     }
2528d14abf15SRobert Mustacchi 
2529d14abf15SRobert Mustacchi     /* check if we are in keepalive. */
2530d14abf15SRobert Mustacchi     if ((tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max) && ((xtcp_ag->tx_wnd > 0) || (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff))) { /* KA is enabled (?) */
2531d14abf15SRobert Mustacchi        /* convert to ms.
2532d14abf15SRobert Mustacchi         * the /8 and /4 are a result of some shifts that MSFT does, these number were received from MSFT through emails and are
2533d14abf15SRobert Mustacchi         * done the same in Teton. */
2534d14abf15SRobert Mustacchi         sm_rtt = lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/8;
2535d14abf15SRobert Mustacchi         if (sm_rtt > 30000) {   /* reduce to 30sec */
2536d14abf15SRobert Mustacchi             sm_rtt = 30000;
2537d14abf15SRobert Mustacchi         }
2538d14abf15SRobert Mustacchi         sm_delta = lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC)/4;
2539d14abf15SRobert Mustacchi         if (sm_delta > 30000) { /* reduce to 30sec */
2540d14abf15SRobert Mustacchi             sm_delta = 30000;
2541d14abf15SRobert Mustacchi         }
2542d14abf15SRobert Mustacchi         xtcp_ag->rto_timer  = (sm_rtt + (sm_delta << 2));
2543d14abf15SRobert Mustacchi     } else {   /* retransmit is running (?) */
2544d14abf15SRobert Mustacchi         if (tcp->tcp_delegated.u.retransmit.retx_ms == 0xffffffff) {
2545d14abf15SRobert Mustacchi             xtcp_ag->rto_timer       = 0xffffffff;
2546d14abf15SRobert Mustacchi         } else {
2547d14abf15SRobert Mustacchi             xtcp_ag->rto_timer        = tcp->tcp_delegated.u.retransmit.retx_ms ? tcp->tcp_delegated.u.retransmit.retx_ms : 1 /* value of 0 is not allowed by FW*/;
2548d14abf15SRobert Mustacchi                 /* TODO: retx_ms is already converted in Miniport
2549d14abf15SRobert Mustacchi                  * we need to convert retx_ms to clock ticks in VBD instead of
2550d14abf15SRobert Mustacchi                  * doing this conversion in NDIS (same as Teton) */
2551d14abf15SRobert Mustacchi                 /*tcp->tcp_delegated.u.retransmit.retx_ms ?
2552d14abf15SRobert Mustacchi                 lm_time_resolution(pdev, tcp->tcp_delegated.u.retransmit.retx_ms,
2553d14abf15SRobert Mustacchi                                    1000, TIMERS_TICKS_PER_SEC) :
2554d14abf15SRobert Mustacchi                 1 *//* value of 0 is not allowed by FW*/;
2555d14abf15SRobert Mustacchi         }
2556d14abf15SRobert Mustacchi     }
2557d14abf15SRobert Mustacchi     xtcp_ag->snd_nxt              = tcp->tcp_delegated.send_next;
2558d14abf15SRobert Mustacchi     xtcp_ag->snd_una              = tcp->tcp_delegated.send_una;
2559d14abf15SRobert Mustacchi     xtcp_ag->tcp_agg_vars2        |= XSTORM_TOE_TCP_AG_CONTEXT_SECTION_DA_ENABLE; /* Delayed Acks always on */
2560d14abf15SRobert Mustacchi     xtcp_ag->ts_to_echo           = tcp->tcp_delegated.ts_recent;
2561d14abf15SRobert Mustacchi 
2562d14abf15SRobert Mustacchi 
2563d14abf15SRobert Mustacchi     /* xstorm_st_context */
2564d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_0      = neigh->neigh_cached.dst_addr[0];
2565d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_1      = neigh->neigh_cached.dst_addr[1];
2566d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_2      = neigh->neigh_cached.dst_addr[2];
2567d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_3      = neigh->neigh_cached.dst_addr[3];
2568d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_4      = neigh->neigh_cached.dst_addr[4];
2569d14abf15SRobert Mustacchi     xtcp_st->ethernet.remote_addr_5      = neigh->neigh_cached.dst_addr[5];
2570d14abf15SRobert Mustacchi 
2571d14abf15SRobert Mustacchi     if (neigh->neigh_const.vlan_tag > 0xfff)
2572d14abf15SRobert Mustacchi     {
2573d14abf15SRobert Mustacchi         DbgBreakIf(neigh->neigh_const.vlan_tag > 0xfff);
2574d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
2575d14abf15SRobert Mustacchi     }
2576d14abf15SRobert Mustacchi     xtcp_st->ethernet.vlan_params |= (neigh->neigh_const.vlan_tag << XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT);
2577d14abf15SRobert Mustacchi 
2578d14abf15SRobert Mustacchi     if (tcp->tcp_cached.user_priority > 0x7)
2579d14abf15SRobert Mustacchi     {
2580d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tcp_cached.user_priority > 0x7);
2581d14abf15SRobert Mustacchi         return LM_STATUS_INVALID_PARAMETER;
2582d14abf15SRobert Mustacchi     }
2583d14abf15SRobert Mustacchi     xtcp_st->ethernet.vlan_params  |= (tcp->tcp_cached.user_priority << XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT);
2584d14abf15SRobert Mustacchi 
2585d14abf15SRobert Mustacchi     if ((0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_VLAN_ID)) ||
2586d14abf15SRobert Mustacchi         (0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_CFI))     ||
2587d14abf15SRobert Mustacchi         (0 != GET_FLAGS(xtcp_st->ethernet.vlan_params, XSTORM_ETH_CONTEXT_SECTION_PRIORITY)))
2588d14abf15SRobert Mustacchi     {
2589*48bbca81SDaniel Hoffman         // This fields should be set to 1 whenever an inner VLAN is provided by the OS.
2590d14abf15SRobert Mustacchi         // This flags is relevant for all function modes.
2591d14abf15SRobert Mustacchi         SET_FLAGS( xtcp_st->flags, XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE);
2592d14abf15SRobert Mustacchi     }
2593d14abf15SRobert Mustacchi 
2594d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_0   = neigh->neigh_const.src_addr[0];
2595d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_1   = neigh->neigh_const.src_addr[1];
2596d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_2   = neigh->neigh_const.src_addr[2];
2597d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_3   = neigh->neigh_const.src_addr[3];
2598d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_4   = neigh->neigh_const.src_addr[4];
2599d14abf15SRobert Mustacchi     xtcp_st->ethernet.local_addr_5   = neigh->neigh_const.src_addr[5];
2600d14abf15SRobert Mustacchi     xtcp_st->ethernet.reserved_vlan_type = 0x8100;
2601d14abf15SRobert Mustacchi 
2602d14abf15SRobert Mustacchi     xtcp_st->ip_version_1b           = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? 0 : 1;
2603d14abf15SRobert Mustacchi     if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
2604d14abf15SRobert Mustacchi         /* IPv4*/
2605d14abf15SRobert Mustacchi         xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_remote_addr      = path->path_const.u.ipv4.dst_ip;
2606d14abf15SRobert Mustacchi         xtcp_st->ip_union.padded_ip_v4.ip_v4.ip_local_addr       = path->path_const.u.ipv4.src_ip;
2607d14abf15SRobert Mustacchi         xtcp_st->ip_union.padded_ip_v4.ip_v4.tos                 = tcp->tcp_cached.tos_or_traffic_class;
2608d14abf15SRobert Mustacchi #if DBG
2609d14abf15SRobert Mustacchi         xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl                 = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
2610d14abf15SRobert Mustacchi #else
2611d14abf15SRobert Mustacchi         xtcp_st->ip_union.padded_ip_v4.ip_v4.ttl                 = tcp->tcp_cached.ttl_or_hop_limit;
2612d14abf15SRobert Mustacchi #endif
2613d14abf15SRobert Mustacchi         src_ip[0] = HTON32(path->path_const.u.ipv4.src_ip);
2614d14abf15SRobert Mustacchi         dst_ip[0] = HTON32(path->path_const.u.ipv4.dst_ip);
2615*48bbca81SDaniel Hoffman         pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV4);
2616d14abf15SRobert Mustacchi     } else {
2617d14abf15SRobert Mustacchi         /* IPv6*/
2618d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_lo = path->path_const.u.ipv6.dst_ip[0];
2619d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_remote_addr_lo_hi = path->path_const.u.ipv6.dst_ip[1];
2620d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_lo = path->path_const.u.ipv6.dst_ip[2];
2621d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_remote_addr_hi_hi = path->path_const.u.ipv6.dst_ip[3];
2622d14abf15SRobert Mustacchi 
2623d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_local_addr_lo_lo  = path->path_const.u.ipv6.src_ip[0];
2624d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_local_addr_lo_hi  = path->path_const.u.ipv6.src_ip[1];
2625d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_local_addr_hi_lo  = path->path_const.u.ipv6.src_ip[2];
2626d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.ip_local_addr_hi_hi  = path->path_const.u.ipv6.src_ip[3];
2627d14abf15SRobert Mustacchi 
2628d14abf15SRobert Mustacchi #if DBG
2629d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.hop_limit                        = (tcp->ulp_type == TOE_CONNECTION_TYPE) ? TOE_DBG_TTL : ISCSI_DBG_TTL;
2630d14abf15SRobert Mustacchi #else
2631d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.hop_limit                        = tcp->tcp_cached.ttl_or_hop_limit;
2632d14abf15SRobert Mustacchi #endif
2633d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tcp_cached.flow_label > 0xffff);
2634d14abf15SRobert Mustacchi         xtcp_st->ip_union.ip_v6.priority_flow_label =
2635d14abf15SRobert Mustacchi             tcp->tcp_cached.flow_label << XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT |
2636d14abf15SRobert Mustacchi             tcp->tcp_cached.tos_or_traffic_class << XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT;
2637d14abf15SRobert Mustacchi 
2638d14abf15SRobert Mustacchi         for (i = 0; i < 4; i++) {
2639d14abf15SRobert Mustacchi             src_ip[i] = HTON32(path->path_const.u.ipv6.src_ip[i]);
2640d14abf15SRobert Mustacchi             dst_ip[i] = HTON32(path->path_const.u.ipv6.dst_ip[i]);
2641d14abf15SRobert Mustacchi         }
2642d14abf15SRobert Mustacchi         pseudo_cs = lm_tcp_calc_tcp_pseudo_checksum(pdev, src_ip, dst_ip, IP_VERSION_IPV6);
2643d14abf15SRobert Mustacchi     }
2644d14abf15SRobert Mustacchi 
2645d14abf15SRobert Mustacchi     xtcp_st->tcp.local_port            = tcp->tcp_const.src_port;
2646d14abf15SRobert Mustacchi 
2647d14abf15SRobert Mustacchi 
2648d14abf15SRobert Mustacchi     xtcp_st->tcp.pseudo_csum           = NTOH16(pseudo_cs);
2649d14abf15SRobert Mustacchi     xtcp_st->tcp.remote_port           = tcp->tcp_const.dst_port;
2650d14abf15SRobert Mustacchi     xtcp_st->tcp.snd_max               = tcp->tcp_delegated.send_max;
2651d14abf15SRobert Mustacchi     if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP) {
2652d14abf15SRobert Mustacchi         xtcp_st->tcp.ts_enabled  = 1;
2653d14abf15SRobert Mustacchi     }
2654d14abf15SRobert Mustacchi     if(tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_SACK) {
2655d14abf15SRobert Mustacchi         xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED;
2656d14abf15SRobert Mustacchi     }
2657d14abf15SRobert Mustacchi     if ((tcp->ulp_type == TOE_CONNECTION_TYPE) && (tcp->tx_con->flags & TCP_FIN_REQ_POSTED)) {
2658d14abf15SRobert Mustacchi         xtcp_st->tcp.tcp_params |= XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG;
2659d14abf15SRobert Mustacchi     }
2660d14abf15SRobert Mustacchi     xtcp_st->tcp.ts_time_diff          = tcp->tcp_delegated.tstamp; /* time conversion not required */
2661d14abf15SRobert Mustacchi     xtcp_st->tcp.window_scaling_factor = (u16_t)tcp->tcp_const.rcv_seg_scale & 0xf; /* rcv_seg_scale is only 4b long */
2662d14abf15SRobert Mustacchi 
2663d14abf15SRobert Mustacchi     // Set statistics params
2664d14abf15SRobert Mustacchi     if( TOE_CONNECTION_TYPE == tcp->ulp_type )
2665d14abf15SRobert Mustacchi     {
2666d14abf15SRobert Mustacchi         // set counter id
2667d14abf15SRobert Mustacchi         xtcp_st->tcp.statistics_counter_id = LM_STATS_CNT_ID(pdev);
2668d14abf15SRobert Mustacchi 
2669d14abf15SRobert Mustacchi         // set enable L2
2670d14abf15SRobert Mustacchi         SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT );
2671d14abf15SRobert Mustacchi 
2672d14abf15SRobert Mustacchi         // set enable L4
2673d14abf15SRobert Mustacchi         SET_FLAGS( xtcp_st->tcp.statistics_params, 1<<XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT );
2674d14abf15SRobert Mustacchi     }
2675d14abf15SRobert Mustacchi     if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
2676d14abf15SRobert Mustacchi     {
2677d14abf15SRobert Mustacchi         SET_FLAGS( xtcp_st->flags,(1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT ));
2678d14abf15SRobert Mustacchi 
2679d14abf15SRobert Mustacchi         SET_FLAGS( xtcp_st->flags,(PORT_ID(pdev) << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT));
2680d14abf15SRobert Mustacchi     }
2681d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2682d14abf15SRobert Mustacchi }
2683d14abf15SRobert Mustacchi 
2684d14abf15SRobert Mustacchi 
2685d14abf15SRobert Mustacchi /* init the content of the toe context */
_lm_tcp_init_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2686d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_tcp_context(
2687d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2688d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2689d14abf15SRobert Mustacchi {
2690d14abf15SRobert Mustacchi     lm_status_t lm_status ;
2691d14abf15SRobert Mustacchi 
2692d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_xstorm_tcp_context(pdev, tcp);
2693d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2694d14abf15SRobert Mustacchi         return lm_status;
2695d14abf15SRobert Mustacchi     }
2696d14abf15SRobert Mustacchi 
2697d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_tstorm_tcp_context(pdev, tcp);
2698d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2699d14abf15SRobert Mustacchi         return lm_status;
2700d14abf15SRobert Mustacchi     }
2701d14abf15SRobert Mustacchi 
2702d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2703d14abf15SRobert Mustacchi }
2704d14abf15SRobert Mustacchi 
_lm_tcp_init_iscsi_tcp_related_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2705d14abf15SRobert Mustacchi static lm_status_t _lm_tcp_init_iscsi_tcp_related_context(
2706d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2707d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2708d14abf15SRobert Mustacchi {
2709d14abf15SRobert Mustacchi     struct cstorm_iscsi_ag_context * ciscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_ag_context;
2710d14abf15SRobert Mustacchi     struct cstorm_iscsi_st_context * ciscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->cstorm_st_context;
2711d14abf15SRobert Mustacchi     struct xstorm_iscsi_ag_context * xiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_ag_context;
2712d14abf15SRobert Mustacchi     struct xstorm_iscsi_st_context * xiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->xstorm_st_context;
2713d14abf15SRobert Mustacchi     struct tstorm_iscsi_ag_context * tiscsi_ag = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_ag_context;
2714d14abf15SRobert Mustacchi     struct tstorm_iscsi_st_context * tiscsi_st = &((struct iscsi_context *)tcp->ctx_virt)->tstorm_st_context;
2715d14abf15SRobert Mustacchi 
2716d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
2717d14abf15SRobert Mustacchi 
2718d14abf15SRobert Mustacchi     ASSERT_STATIC(sizeof(struct cstorm_toe_ag_context) == sizeof(struct cstorm_iscsi_ag_context));
2719d14abf15SRobert Mustacchi //  ASSERT_STATIC(sizeof(struct cstorm_toe_st_context) == sizeof(struct cstorm_iscsi_st_context));
2720d14abf15SRobert Mustacchi //  ASSERT_STATIC(OFFSETOF(struct iscsi_context, cstorm_ag_context)== OFFSETOF(struct toe_context, cstorm_ag_context) ) ;
2721d14abf15SRobert Mustacchi //  ASSERT_STATIC(OFFSETOF(struct iscsi_context, cstorm_st_context)== OFFSETOF(struct toe_context, cstorm_st_context) ) ;
2722d14abf15SRobert Mustacchi 
2723d14abf15SRobert Mustacchi     /* cstorm */
2724d14abf15SRobert Mustacchi     ciscsi_ag->rel_seq      = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2725d14abf15SRobert Mustacchi     ciscsi_ag->rel_seq_th   = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2726d14abf15SRobert Mustacchi     ciscsi_st->hq_tcp_seq   = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2727d14abf15SRobert Mustacchi 
2728d14abf15SRobert Mustacchi     /* xstorm */
2729d14abf15SRobert Mustacchi     xiscsi_ag->hq_cons_tcp_seq = tcp->tcp_delegated.send_next; //pTcpParams->sndNext;
2730d14abf15SRobert Mustacchi 
2731d14abf15SRobert Mustacchi     /* tstorm */
2732d14abf15SRobert Mustacchi     /* in toe the window right edge is initialized by the doorbell */
2733d14abf15SRobert Mustacchi                                                  /* recv_win_seq */                                                             /* recv next */
2734d14abf15SRobert Mustacchi     tiscsi_ag->tcp.wnd_right_edge = (xiscsi_ag->tcp.local_adv_wnd << xiscsi_st->common.tcp.window_scaling_factor) + xiscsi_ag->tcp.ack_to_far_end;
2735d14abf15SRobert Mustacchi 
2736d14abf15SRobert Mustacchi     tiscsi_ag->tcp.wnd_right_edge_local = tiscsi_ag->tcp.wnd_right_edge;
2737d14abf15SRobert Mustacchi 
2738d14abf15SRobert Mustacchi     tiscsi_st->iscsi.process_nxt = tcp->tcp_delegated.recv_next; // same value as rcv_nxt
2739d14abf15SRobert Mustacchi 
2740d14abf15SRobert Mustacchi     //xAgCtx->mss = pTcpParams->mss - 4; // -4 for data digest
2741d14abf15SRobert Mustacchi 
2742d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2743d14abf15SRobert Mustacchi }
2744d14abf15SRobert Mustacchi 
2745d14abf15SRobert Mustacchi /* Desciption:
2746d14abf15SRobert Mustacchi  *  Allocation of CID for a new TCP connection to be offloaded,
2747d14abf15SRobert Mustacchi  *  Initiation of connection's context line as required by FW.
2748d14abf15SRobert Mustacchi  * Assumptions:
2749d14abf15SRobert Mustacchi  *  - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
2750d14abf15SRobert Mustacchi  *  - send unacked data already posted
2751d14abf15SRobert Mustacchi  *  - If the TCP is in states FinWait1, Closing or LastAck,
2752d14abf15SRobert Mustacchi  *    FIN is already posted to the tx chain
2753d14abf15SRobert Mustacchi  *  - Called under connection lock: since it can be called from either initiate-ofld
2754d14abf15SRobert Mustacchi  *    or recycle-cid (before ofld had the chance to complete)
2755d14abf15SRobert Mustacchi  * Returns:
2756d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_tcp_context(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)2757d14abf15SRobert Mustacchi static lm_status_t lm_tcp_init_tcp_context(
2758d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2759d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
2760d14abf15SRobert Mustacchi {
2761d14abf15SRobert Mustacchi     s32_t cid;
2762d14abf15SRobert Mustacchi     lm_status_t lm_status;
2763d14abf15SRobert Mustacchi     lm_4tuple_t tuple = {{0}};
2764d14abf15SRobert Mustacchi     u32_t expect_rwin;
2765d14abf15SRobert Mustacchi     u8_t i;
2766d14abf15SRobert Mustacchi 
2767d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_context\n");
2768d14abf15SRobert Mustacchi 
2769d14abf15SRobert Mustacchi     /* NirV: allocate cid is getting back here */
2770d14abf15SRobert Mustacchi     /* allocate cid only if cid==0: we may re-enter this function after a cid has already been allocated */
2771d14abf15SRobert Mustacchi     if (tcp->cid == 0)
2772d14abf15SRobert Mustacchi     {
2773d14abf15SRobert Mustacchi         lm_status = lm_allocate_cid(pdev, TOE_CONNECTION_TYPE, (void*)tcp, &cid);
2774d14abf15SRobert Mustacchi         if(lm_status == LM_STATUS_RESOURCE){
2775d14abf15SRobert Mustacchi             DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_state: Failed in allocating cid\n");
2776d14abf15SRobert Mustacchi             return LM_STATUS_RESOURCE;
2777d14abf15SRobert Mustacchi         } else if (lm_status == LM_STATUS_PENDING) {
2778d14abf15SRobert Mustacchi             lm_sp_req_manager_block(pdev, (u32_t)cid);
2779d14abf15SRobert Mustacchi         }
2780d14abf15SRobert Mustacchi         tcp->cid = (u32_t)cid;
2781d14abf15SRobert Mustacchi     }
2782d14abf15SRobert Mustacchi 
2783d14abf15SRobert Mustacchi     if (lm_cid_state(pdev, tcp->cid) == LM_CID_STATE_PENDING) {
2784d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS; /* Too soon to initialize context */
2785d14abf15SRobert Mustacchi     }
2786d14abf15SRobert Mustacchi 
2787d14abf15SRobert Mustacchi     /* Validate some of the offload parameters - only relevant for TOE. */
2788d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2789d14abf15SRobert Mustacchi         tcp->rx_con->u.rx.sws_info.extra_bytes = 0;
2790d14abf15SRobert Mustacchi         if (tcp->rx_con->u.rx.gen_info.peninsula_nbytes > tcp->tcp_cached.initial_rcv_wnd) {
2791d14abf15SRobert Mustacchi             tcp->rx_con->u.rx.sws_info.extra_bytes = tcp->rx_con->u.rx.gen_info.peninsula_nbytes - tcp->tcp_cached.initial_rcv_wnd;
2792d14abf15SRobert Mustacchi             tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_next;
2793d14abf15SRobert Mustacchi             tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_next;
2794d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, ext:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
2795d14abf15SRobert Mustacchi                        tcp->tcp_cached.initial_rcv_wnd,tcp->rx_con->u.rx.sws_info.extra_bytes,tcp->tcp_delegated.recv_next);
2796d14abf15SRobert Mustacchi         } else {
2797d14abf15SRobert Mustacchi             expect_rwin = (u32_t)S32_SUB(
2798d14abf15SRobert Mustacchi                 tcp->tcp_delegated.recv_win_seq,
2799d14abf15SRobert Mustacchi                 tcp->tcp_delegated.recv_next);
2800d14abf15SRobert Mustacchi             expect_rwin += tcp->rx_con->u.rx.gen_info.peninsula_nbytes;
2801d14abf15SRobert Mustacchi 
2802d14abf15SRobert Mustacchi         /* WorkAround for LH: fields received at offload should match the equation below,
2803d14abf15SRobert Mustacchi          * In LH it's not the case. TBA: add assert that we are on LH operating system */
2804d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl4sp, "lm_tcp_init_tcp_state: pnb:%x, irw:%x, rws:%x, rnx:%x\n",tcp->rx_con->u.rx.gen_info.peninsula_nbytes,
2805d14abf15SRobert Mustacchi                         tcp->tcp_cached.initial_rcv_wnd,
2806d14abf15SRobert Mustacchi                         tcp->tcp_delegated.recv_win_seq,
2807d14abf15SRobert Mustacchi                         tcp->tcp_delegated.recv_next);
2808d14abf15SRobert Mustacchi             if (ERR_IF(expect_rwin != tcp->tcp_cached.initial_rcv_wnd)) {
2809d14abf15SRobert Mustacchi                 u32_t delta;
2810d14abf15SRobert Mustacchi                 /* move tcp_delegated.recv_next accordingly */
2811d14abf15SRobert Mustacchi                 if (expect_rwin > tcp->tcp_cached.initial_rcv_wnd) {
2812d14abf15SRobert Mustacchi                     delta = expect_rwin - tcp->tcp_cached.initial_rcv_wnd;
2813d14abf15SRobert Mustacchi                     tcp->tcp_delegated.recv_win_seq -= delta;
2814d14abf15SRobert Mustacchi                 } else {
2815d14abf15SRobert Mustacchi                     delta = tcp->tcp_cached.initial_rcv_wnd - expect_rwin;
2816d14abf15SRobert Mustacchi                     tcp->tcp_delegated.recv_win_seq += delta;
2817d14abf15SRobert Mustacchi                 }
2818d14abf15SRobert Mustacchi                 /* Need to also update the driver win right edge */
2819d14abf15SRobert Mustacchi                 tcp->rx_con->db_data.rx->rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
2820d14abf15SRobert Mustacchi                 tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->tcp_delegated.recv_win_seq;
2821d14abf15SRobert Mustacchi             }
2822d14abf15SRobert Mustacchi         }
2823d14abf15SRobert Mustacchi     }
2824d14abf15SRobert Mustacchi     /* insert 4 tuple to searcher's mirror hash */
2825d14abf15SRobert Mustacchi     if(tcp->path->path_const.ip_version == IP_VERSION_IPV4) { /* IPV4 */
2826d14abf15SRobert Mustacchi         tuple.ip_type = LM_IP_TYPE_V4;
2827d14abf15SRobert Mustacchi         tuple.dst_ip[0] = tcp->path->path_const.u.ipv4.dst_ip;
2828d14abf15SRobert Mustacchi         tuple.src_ip[0] = tcp->path->path_const.u.ipv4.src_ip;
2829d14abf15SRobert Mustacchi     } else {
2830d14abf15SRobert Mustacchi         tuple.ip_type = LM_IP_TYPE_V6;
2831d14abf15SRobert Mustacchi         for (i = 0; i < 4; i++) {
2832d14abf15SRobert Mustacchi             tuple.dst_ip[i] = tcp->path->path_const.u.ipv6.dst_ip[i];
2833d14abf15SRobert Mustacchi             tuple.src_ip[i] = tcp->path->path_const.u.ipv6.src_ip[i];
2834d14abf15SRobert Mustacchi         }
2835d14abf15SRobert Mustacchi     }
2836d14abf15SRobert Mustacchi     tuple.src_port = tcp->tcp_const.src_port;
2837d14abf15SRobert Mustacchi     tuple.dst_port = tcp->tcp_const.dst_port;
2838d14abf15SRobert Mustacchi     if (lm_searcher_mirror_hash_insert(pdev, tcp->cid, &tuple) != LM_STATUS_SUCCESS) {
2839d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "lm_tcp_init_tcp_context: Failed inserting tuple to SRC hash\n");
2840d14abf15SRobert Mustacchi         tcp->in_searcher = 0;
2841d14abf15SRobert Mustacchi         return LM_STATUS_RESOURCE;
2842d14abf15SRobert Mustacchi     }
2843d14abf15SRobert Mustacchi     tcp->in_searcher = 1;
2844d14abf15SRobert Mustacchi 
2845d14abf15SRobert Mustacchi     /* get context */
2846d14abf15SRobert Mustacchi     tcp->ctx_virt = (struct toe_context *)lm_get_context(pdev, tcp->cid);
2847d14abf15SRobert Mustacchi     if (!tcp->ctx_virt) {
2848d14abf15SRobert Mustacchi         DbgBreakIf(!tcp->ctx_virt);
2849d14abf15SRobert Mustacchi         return LM_STATUS_FAILURE;
2850d14abf15SRobert Mustacchi     }
2851d14abf15SRobert Mustacchi 
2852d14abf15SRobert Mustacchi     tcp->ctx_phys.as_u64 = lm_get_context_phys(pdev, tcp->cid);
2853d14abf15SRobert Mustacchi     if (!tcp->ctx_phys.as_u64) {
2854d14abf15SRobert Mustacchi         DbgBreakIf(!tcp->ctx_phys.as_u64);
2855d14abf15SRobert Mustacchi         return LM_STATUS_FAILURE;
2856d14abf15SRobert Mustacchi     }
2857d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp,
2858d14abf15SRobert Mustacchi                 "tcp->ctx_virt=%p, tcp->ctx_phys_high=%x, tcp->ctx_phys_low=%x\n",
2859d14abf15SRobert Mustacchi                 tcp->ctx_virt, tcp->ctx_phys.as_u32.high, tcp->ctx_phys.as_u32.low);
2860d14abf15SRobert Mustacchi 
2861d14abf15SRobert Mustacchi     /* init the content of the context */
2862d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
2863d14abf15SRobert Mustacchi         lm_status = _lm_tcp_init_toe_context(pdev, tcp);
2864d14abf15SRobert Mustacchi         if (lm_status != LM_STATUS_SUCCESS) {
2865d14abf15SRobert Mustacchi             return lm_status;
2866d14abf15SRobert Mustacchi         }
2867d14abf15SRobert Mustacchi     }
2868d14abf15SRobert Mustacchi 
2869d14abf15SRobert Mustacchi     lm_status = _lm_tcp_init_tcp_context(pdev, tcp);
2870d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
2871d14abf15SRobert Mustacchi         return lm_status;
2872d14abf15SRobert Mustacchi     }
2873d14abf15SRobert Mustacchi 
2874d14abf15SRobert Mustacchi     /* iscsi / toe contexts are initialized separately, only the tcp section is common, HOWEVER, in iscsi
2875d14abf15SRobert Mustacchi      * most of the context is initialized in the l5_ofld_stage, but some of the context initialization is based on tcp
2876d14abf15SRobert Mustacchi      * params, that's why we need to complete it here...  */
2877d14abf15SRobert Mustacchi     if (tcp->ulp_type == ISCSI_CONNECTION_TYPE) {
2878d14abf15SRobert Mustacchi         lm_status = _lm_tcp_init_iscsi_tcp_related_context(pdev, tcp);
2879d14abf15SRobert Mustacchi         if (lm_status != LM_STATUS_SUCCESS) {
2880d14abf15SRobert Mustacchi             return lm_status;
2881d14abf15SRobert Mustacchi         }
2882d14abf15SRobert Mustacchi     }
2883d14abf15SRobert Mustacchi 
2884d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
2885d14abf15SRobert Mustacchi }
2886d14abf15SRobert Mustacchi 
2887d14abf15SRobert Mustacchi /** Description
2888d14abf15SRobert Mustacchi  *  Callback function for cids being recylced
2889d14abf15SRobert Mustacchi  */
lm_tcp_recycle_cid_cb(struct _lm_device_t * pdev,void * cookie,s32_t cid)2890d14abf15SRobert Mustacchi void lm_tcp_recycle_cid_cb(
2891d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
2892d14abf15SRobert Mustacchi     void *cookie,
2893d14abf15SRobert Mustacchi     s32_t cid)
2894d14abf15SRobert Mustacchi {
2895d14abf15SRobert Mustacchi     lm_tcp_state_t       *tcp    = (lm_tcp_state_t *)cookie;
2896d14abf15SRobert Mustacchi     lm_sp_req_common_t   *sp_req = NULL;
2897d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
2898d14abf15SRobert Mustacchi 
2899d14abf15SRobert Mustacchi     /* un-block the manager... */
2900d14abf15SRobert Mustacchi     lm_set_cid_state(pdev, tcp->cid, LM_CID_STATE_VALID);
2901d14abf15SRobert Mustacchi 
2902d14abf15SRobert Mustacchi     /* if the ofld flow got to the ofld workitem, only now set we can use the context,
2903d14abf15SRobert Mustacchi        other wise, we'll get to the init_tcp_context later on */
2904d14abf15SRobert Mustacchi     if (tcp->hdr.status == STATE_STATUS_INIT_CONTEXT)
2905d14abf15SRobert Mustacchi     {
2906d14abf15SRobert Mustacchi         lm_tcp_init_tcp_context(pdev,tcp);
2907d14abf15SRobert Mustacchi     }
2908d14abf15SRobert Mustacchi 
2909d14abf15SRobert Mustacchi     /* we can now unblock any pending slow-paths */
2910d14abf15SRobert Mustacchi     lm_sp_req_manager_unblock(pdev,cid, &sp_req);
2911d14abf15SRobert Mustacchi 
2912d14abf15SRobert Mustacchi 
2913d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
2914d14abf15SRobert Mustacchi }
2915d14abf15SRobert Mustacchi 
2916d14abf15SRobert Mustacchi /* This function needs to complete a pending slowpath toe request. Unfortunatelly it needs
2917d14abf15SRobert Mustacchi  * to take care of all the steps done in lm_toe_service_rx_intr and lm_toe_service_tx_intr,
2918d14abf15SRobert Mustacchi  * process the cqe, and complete slowpath...
2919d14abf15SRobert Mustacchi  */
lm_tcp_comp_cb(struct _lm_device_t * pdev,struct sq_pending_command * pending)2920d14abf15SRobert Mustacchi void lm_tcp_comp_cb(struct _lm_device_t *pdev, struct sq_pending_command *pending)
2921d14abf15SRobert Mustacchi {
2922d14abf15SRobert Mustacchi     lm_tcp_state_t  * tcp    = NULL;
2923d14abf15SRobert Mustacchi     lm_tcp_con_t    * rx_con = NULL;
2924d14abf15SRobert Mustacchi     lm_tcp_con_t    * tx_con = NULL;
2925d14abf15SRobert Mustacchi     struct toe_rx_cqe rx_cqe = {0};
2926d14abf15SRobert Mustacchi     struct toe_tx_cqe tx_cqe = {0};
2927d14abf15SRobert Mustacchi     u8_t              i      = 0;
2928d14abf15SRobert Mustacchi     u8_t              cmp_rx = FALSE;
2929d14abf15SRobert Mustacchi     u8_t              cmp_tx = FALSE;
2930d14abf15SRobert Mustacchi 
2931d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
2932d14abf15SRobert Mustacchi 
2933d14abf15SRobert Mustacchi     tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, pending->cid);
2934d14abf15SRobert Mustacchi     /* Possible the tcp is NULL for ramrods that are context-less (RSS for example) */
2935d14abf15SRobert Mustacchi     if (tcp)
2936d14abf15SRobert Mustacchi     {
2937d14abf15SRobert Mustacchi         rx_con = tcp->rx_con;
2938d14abf15SRobert Mustacchi         tx_con = tcp->tx_con;
2939d14abf15SRobert Mustacchi     }
2940d14abf15SRobert Mustacchi 
2941d14abf15SRobert Mustacchi     #define LM_TCP_SET_CQE(_param, _cid, _cmd) \
2942d14abf15SRobert Mustacchi         (_param) = (((_cid) << TOE_RX_CQE_CID_SHIFT) & TOE_RX_CQE_CID) | \
2943d14abf15SRobert Mustacchi                    (((_cmd) << TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) & TOE_RX_CQE_COMPLETION_OPCODE);
2944d14abf15SRobert Mustacchi 
2945d14abf15SRobert Mustacchi     switch (pending->cmd)
2946d14abf15SRobert Mustacchi     {
2947d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_INIT:
2948d14abf15SRobert Mustacchi         DbgBreakMsg("Not Supported\n");
2949d14abf15SRobert Mustacchi         break;
2950d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_INITIATE_OFFLOAD:
2951d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD);
2952d14abf15SRobert Mustacchi         cmp_rx = TRUE;
2953d14abf15SRobert Mustacchi         break;
2954d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_SEARCHER_DELETE:
2955d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_SEARCHER_DELETE);
2956d14abf15SRobert Mustacchi         cmp_rx = TRUE;
2957d14abf15SRobert Mustacchi         break;
2958d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_TERMINATE:
2959d14abf15SRobert Mustacchi         /* Completion may have completed on tx / rx only, so whether or not to complete it depends not
2960d14abf15SRobert Mustacchi          * only on type but on state of sp_request as well... */
2961d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
2962d14abf15SRobert Mustacchi         cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2963d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_TERMINATE);
2964d14abf15SRobert Mustacchi         cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);;
2965d14abf15SRobert Mustacchi         break;
2966d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_QUERY:
2967d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_QUERY);
2968d14abf15SRobert Mustacchi         cmp_rx = TRUE;
2969d14abf15SRobert Mustacchi         break;
2970d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_RESET_SEND:
2971d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
2972d14abf15SRobert Mustacchi         cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2973d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_RESET_SEND);
2974d14abf15SRobert Mustacchi         cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2975d14abf15SRobert Mustacchi         break;
2976d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
2977d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
2978d14abf15SRobert Mustacchi         cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2979d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_EMPTY_RAMROD);
2980d14abf15SRobert Mustacchi         cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2981d14abf15SRobert Mustacchi         break;
2982d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_INVALIDATE:
2983d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
2984d14abf15SRobert Mustacchi         cmp_rx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_RX) == 0);
2985d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(tx_cqe.params, pending->cid, RAMROD_OPCODE_TOE_INVALIDATE);
2986d14abf15SRobert Mustacchi         cmp_tx = (GET_FLAGS(tcp->sp_flags, SP_REQUEST_COMPLETED_TX) == 0);
2987d14abf15SRobert Mustacchi         break;
2988d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_UPDATE:
2989d14abf15SRobert Mustacchi         LM_TCP_SET_CQE(rx_cqe.params1, pending->cid, RAMROD_OPCODE_TOE_UPDATE);
2990d14abf15SRobert Mustacchi         cmp_rx = TRUE;
2991d14abf15SRobert Mustacchi         break;
2992d14abf15SRobert Mustacchi     case RAMROD_OPCODE_TOE_RSS_UPDATE:
2993d14abf15SRobert Mustacchi         /* This one is special, its not treated as other ramrods, we return and not break
2994d14abf15SRobert Mustacchi          * at the end of this one... */
2995d14abf15SRobert Mustacchi         /* a bit of a hack here... we only want to give one completion and not on all
2996d14abf15SRobert Mustacchi          * rcq-chains, so we update the counters and decrease all l4 rss chains
2997d14abf15SRobert Mustacchi          * except one. then we give the completion to just one chain which should take care
2998d14abf15SRobert Mustacchi          * of completing the sq and if L2 ramrod has completed already it will also comp
2999d14abf15SRobert Mustacchi          * back to OS */
3000d14abf15SRobert Mustacchi         for (i = 0; i < pdev->params.l4_rss_chain_cnt-1; i++)
3001d14abf15SRobert Mustacchi         {
3002d14abf15SRobert Mustacchi             mm_atomic_dec(&pdev->params.update_toe_comp_cnt);
3003d14abf15SRobert Mustacchi             mm_atomic_dec(&pdev->params.update_comp_cnt);
3004d14abf15SRobert Mustacchi             mm_atomic_dec(&pdev->params.update_suspend_cnt);
3005d14abf15SRobert Mustacchi         }
3006d14abf15SRobert Mustacchi         lm_tcp_rss_update_ramrod_comp(pdev,
3007d14abf15SRobert Mustacchi                                       &pdev->toe_info.rcqs[LM_TOE_BASE_RSS_ID(pdev)],
3008d14abf15SRobert Mustacchi                                       pending->cid,
3009d14abf15SRobert Mustacchi                                       TOE_RSS_UPD_QUIET /* doesn't really matter*/,
3010d14abf15SRobert Mustacchi                                       TRUE);
3011d14abf15SRobert Mustacchi 
3012d14abf15SRobert Mustacchi         return;
3013d14abf15SRobert Mustacchi     }
3014d14abf15SRobert Mustacchi     /* process the cqes and initialize connections with all the connections that appeared
3015d14abf15SRobert Mustacchi      * in the DPC */
3016d14abf15SRobert Mustacchi     if (cmp_rx)
3017d14abf15SRobert Mustacchi     {
3018d14abf15SRobert Mustacchi         lm_tcp_rx_process_cqe(pdev, &rx_cqe, tcp, 0 /* d/c for slpowpath */);
3019d14abf15SRobert Mustacchi         /* FP: no need to call complete_tcp_fp since we're only completing slowpath, but we do
3020d14abf15SRobert Mustacchi          * need to  move the flags for sake of next function */
3021d14abf15SRobert Mustacchi         rx_con->dpc_info.snapshot_flags = rx_con->dpc_info.dpc_flags;
3022d14abf15SRobert Mustacchi         rx_con->dpc_info.dpc_flags = 0;
3023d14abf15SRobert Mustacchi 
3024d14abf15SRobert Mustacchi         /* we access snapshot and not dpc, since once the dpc_flags were copied
3025d14abf15SRobert Mustacchi          * to snapshot they were zeroized */
3026d14abf15SRobert Mustacchi         lm_tcp_rx_complete_tcp_sp(pdev, tcp, rx_con);
3027d14abf15SRobert Mustacchi     }
3028d14abf15SRobert Mustacchi 
3029d14abf15SRobert Mustacchi     /* process the cqes and initialize connections with all the connections that appeared
3030d14abf15SRobert Mustacchi      * in the DPC */
3031d14abf15SRobert Mustacchi     if (cmp_tx)
3032d14abf15SRobert Mustacchi     {
3033d14abf15SRobert Mustacchi         lm_tcp_tx_process_cqe(pdev, &tx_cqe, tcp);
3034d14abf15SRobert Mustacchi         /* FP: no need to call complete_tcp_fp since we're only completing slowpath, but we do
3035d14abf15SRobert Mustacchi          * need to  move the flags for sake of next function */
3036d14abf15SRobert Mustacchi         tx_con->dpc_info.snapshot_flags = tx_con->dpc_info.dpc_flags;
3037d14abf15SRobert Mustacchi         tx_con->dpc_info.dpc_flags = 0;
3038d14abf15SRobert Mustacchi 
3039d14abf15SRobert Mustacchi         /* we access snapshot and not dpc, since once the dpc_flags were copied
3040d14abf15SRobert Mustacchi          * to snapshot they were zeroized */
3041d14abf15SRobert Mustacchi         lm_tcp_tx_complete_tcp_sp(pdev, tcp, tx_con);
3042d14abf15SRobert Mustacchi     }
3043d14abf15SRobert Mustacchi 
3044d14abf15SRobert Mustacchi }
3045d14abf15SRobert Mustacchi 
3046d14abf15SRobert Mustacchi /* Desciption:
3047d14abf15SRobert Mustacchi  *  - init TCP state according to its TCP state machine's state
3048d14abf15SRobert Mustacchi  * Assumptions:
3049d14abf15SRobert Mustacchi  *  - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
3050d14abf15SRobert Mustacchi  *  - send unacked data already posted
3051d14abf15SRobert Mustacchi  * Returns:
3052d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_tcp_state_machine(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)3053d14abf15SRobert Mustacchi static lm_status_t lm_tcp_init_tcp_state_machine(
3054d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
3055d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
3056d14abf15SRobert Mustacchi {
3057d14abf15SRobert Mustacchi     lm_tcp_con_t                *con        = tcp->rx_con;
3058d14abf15SRobert Mustacchi     lm_tcp_state_calculation_t  *state_calc = &tcp->tcp_state_calc;
3059d14abf15SRobert Mustacchi     u64_t                       curr_time   = 0;
3060d14abf15SRobert Mustacchi     lm_status_t                 lm_status   = LM_STATUS_SUCCESS;
3061d14abf15SRobert Mustacchi 
3062d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_init_tcp_state_machine\n");
3063d14abf15SRobert Mustacchi 
3064d14abf15SRobert Mustacchi     /* initiate times in the state calculation struct
3065d14abf15SRobert Mustacchi       according to delegated.con_state */
3066d14abf15SRobert Mustacchi 
3067d14abf15SRobert Mustacchi     state_calc->fin_request_time = state_calc->fin_completed_time =
3068d14abf15SRobert Mustacchi         state_calc->fin_reception_time = 0;
3069d14abf15SRobert Mustacchi     curr_time = mm_get_current_time(pdev);
3070d14abf15SRobert Mustacchi 
3071d14abf15SRobert Mustacchi     switch (tcp->tcp_delegated.con_state) {
3072d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_ESTABLISHED:
3073d14abf15SRobert Mustacchi         break;
3074d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_FIN_WAIT1:
3075d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT1 (tcp=%p)\n", tcp);
3076d14abf15SRobert Mustacchi         state_calc->fin_request_time = curr_time;
3077d14abf15SRobert Mustacchi         break;
3078d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_FIN_WAIT2:
3079d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state FIN_WAIT2 (tcp=%p)\n", tcp);
3080d14abf15SRobert Mustacchi         state_calc->fin_request_time = curr_time - 1;
3081d14abf15SRobert Mustacchi         state_calc->fin_completed_time = curr_time;
3082d14abf15SRobert Mustacchi         break;
3083d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_CLOSE_WAIT:
3084d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSE_WAIT (tcp=%p)\n", tcp);
3085d14abf15SRobert Mustacchi         state_calc->fin_reception_time = curr_time;
3086d14abf15SRobert Mustacchi         break;
3087d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_CLOSING:
3088d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state CLOSING (tcp=%p)\n", tcp);
3089d14abf15SRobert Mustacchi         state_calc->fin_request_time = curr_time - 1;
3090d14abf15SRobert Mustacchi         state_calc->fin_reception_time = curr_time;
3091d14abf15SRobert Mustacchi         break;
3092d14abf15SRobert Mustacchi     case L4_TCP_CON_STATE_LAST_ACK:
3093d14abf15SRobert Mustacchi         DbgMessage(pdev, WARNl4sp, "#tcp state offloaded in state LAST_ACK (tcp=%p)\n", tcp);
3094d14abf15SRobert Mustacchi         state_calc->fin_reception_time = curr_time - 1;
3095d14abf15SRobert Mustacchi         state_calc->fin_request_time = curr_time;
3096d14abf15SRobert Mustacchi         break;
3097d14abf15SRobert Mustacchi     default:
3098d14abf15SRobert Mustacchi         DbgMessage(pdev, FATAL,
3099d14abf15SRobert Mustacchi                     "Initiate offload in con state=%d is not allowed by WDK!\n",
3100d14abf15SRobert Mustacchi                     tcp->tcp_delegated.con_state);
3101d14abf15SRobert Mustacchi         DbgBreak();
3102d14abf15SRobert Mustacchi         return LM_STATUS_FAILURE;
3103d14abf15SRobert Mustacchi     }
3104d14abf15SRobert Mustacchi 
3105d14abf15SRobert Mustacchi     /* In case the the TCP state is CloseWait, Closing or LastAck, the Rx con
3106d14abf15SRobert Mustacchi      * should be initiated as if remote FIN was already received */
3107d14abf15SRobert Mustacchi 
3108d14abf15SRobert Mustacchi     if (state_calc->fin_reception_time) {
3109d14abf15SRobert Mustacchi         /* remote FIN was already received */
3110d14abf15SRobert Mustacchi         DbgBreakIf(con->flags & TCP_REMOTE_FIN_RECEIVED);
3111d14abf15SRobert Mustacchi         con->flags |= TCP_REMOTE_FIN_RECEIVED;
3112d14abf15SRobert Mustacchi 
3113d14abf15SRobert Mustacchi         if (con->flags & TCP_INDICATE_REJECTED) {
3114d14abf15SRobert Mustacchi             /* GilR: TODO - is this case really possible [fin received+buffered data given]? If so, does NDIS really expect the fin received indication? */
3115d14abf15SRobert Mustacchi             /* buffered data exists, defer FIN indication */
3116d14abf15SRobert Mustacchi             con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
3117d14abf15SRobert Mustacchi         } else {
3118d14abf15SRobert Mustacchi             /* no buffered data, simulate that remote FIN already indicated */
3119d14abf15SRobert Mustacchi             con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
3120d14abf15SRobert Mustacchi             con->flags |= TCP_BUFFERS_ABORTED;
3121d14abf15SRobert Mustacchi         }
3122d14abf15SRobert Mustacchi     }
3123d14abf15SRobert Mustacchi 
3124d14abf15SRobert Mustacchi     con = tcp->tx_con;
3125d14abf15SRobert Mustacchi     /* check if local FIN was already sent, and if it was acknowledged */
3126d14abf15SRobert Mustacchi     if (state_calc->fin_completed_time) {
3127d14abf15SRobert Mustacchi         /* FIN already sent and acked */
3128d14abf15SRobert Mustacchi         volatile struct toe_tx_db_data *db_data = con->db_data.tx;
3129d14abf15SRobert Mustacchi         DbgBreakIf(!state_calc->fin_request_time);
3130d14abf15SRobert Mustacchi         DbgBreakIf(!s_list_is_empty(&con->active_tb_list));
3131d14abf15SRobert Mustacchi         con->flags |= (TCP_FIN_REQ_POSTED | TCP_FIN_REQ_COMPLETED);
3132d14abf15SRobert Mustacchi         db_data->flags |= (TOE_TX_DB_DATA_FIN << TOE_TX_DB_DATA_FIN_SHIFT);
3133d14abf15SRobert Mustacchi         db_data->bytes_prod_seq--;
3134d14abf15SRobert Mustacchi     } else if (state_calc->fin_request_time) {
3135d14abf15SRobert Mustacchi         /* FIN was already sent but not acked */
3136d14abf15SRobert Mustacchi 
3137d14abf15SRobert Mustacchi         /* GilR 11/12/2006 - TODO - we do not take the tx lock here, verify that its ok... */
3138d14abf15SRobert Mustacchi         /* We want to make sure we'll be able to post the tcp buffer but
3139d14abf15SRobert Mustacchi          * NOT ring the doorbell */
3140d14abf15SRobert Mustacchi         DbgBreakIf(con->flags & TCP_DB_BLOCKED);
3141d14abf15SRobert Mustacchi         con->flags |= TCP_DB_BLOCKED;
3142d14abf15SRobert Mustacchi         DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
3143d14abf15SRobert Mustacchi         con->flags &= ~TCP_POST_BLOCKED; /* posting is temporary allowed */
3144d14abf15SRobert Mustacchi 
3145d14abf15SRobert Mustacchi         con->u.tx.flags |= TCP_CON_FIN_REQ_LM_INTERNAL;
3146d14abf15SRobert Mustacchi         lm_status = lm_tcp_graceful_disconnect(pdev, tcp);
3147d14abf15SRobert Mustacchi         DbgBreakIf(lm_status != LM_STATUS_SUCCESS);
3148d14abf15SRobert Mustacchi 
3149d14abf15SRobert Mustacchi         /* retrieve initial state */
3150d14abf15SRobert Mustacchi         con->flags &= ~TCP_DB_BLOCKED;
3151d14abf15SRobert Mustacchi         con->flags |= TCP_POST_BLOCKED; /* posting is no longer allowed*/
3152d14abf15SRobert Mustacchi     }
3153d14abf15SRobert Mustacchi 
3154d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
3155d14abf15SRobert Mustacchi }
3156d14abf15SRobert Mustacchi 
3157d14abf15SRobert Mustacchi 
3158d14abf15SRobert Mustacchi /* Desciption:
3159d14abf15SRobert Mustacchi  *  - call lm_tcp_init_tcp_state_machine
3160d14abf15SRobert Mustacchi  *  - call lm_tcp_init_tcp_context
3161d14abf15SRobert Mustacchi  * Assumptions:
3162d14abf15SRobert Mustacchi  *  - lm_tcp_init_tcp_state, lm_tcp_init_rx_con/tx_con already called
3163d14abf15SRobert Mustacchi  *  - send unacked data already posted
3164d14abf15SRobert Mustacchi  * Returns:
3165d14abf15SRobert Mustacchi  *  SUCCESS or any failure */
lm_tcp_init_tcp_common(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)3166d14abf15SRobert Mustacchi lm_status_t lm_tcp_init_tcp_common(
3167d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
3168d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
3169d14abf15SRobert Mustacchi {
3170d14abf15SRobert Mustacchi     lm_status_t lm_status = LM_STATUS_SUCCESS;
3171d14abf15SRobert Mustacchi 
3172d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_init_tcp_common\n");
3173d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
3174d14abf15SRobert Mustacchi 
3175d14abf15SRobert Mustacchi     lm_status = lm_tcp_init_tcp_state_machine(pdev, tcp);
3176d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
3177d14abf15SRobert Mustacchi         return lm_status;
3178d14abf15SRobert Mustacchi     }
3179d14abf15SRobert Mustacchi 
3180d14abf15SRobert Mustacchi     lm_status = lm_tcp_init_tcp_context(pdev, tcp);
3181d14abf15SRobert Mustacchi     if (lm_status != LM_STATUS_SUCCESS) {
3182d14abf15SRobert Mustacchi         return lm_status;
3183d14abf15SRobert Mustacchi     }
3184d14abf15SRobert Mustacchi 
3185d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3186d14abf15SRobert Mustacchi         tcp->rx_con->u.rx.gen_info.dont_send_to_system_more_then_rwin = FALSE; //TRUE;
3187d14abf15SRobert Mustacchi     }
3188d14abf15SRobert Mustacchi 
3189d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
3190d14abf15SRobert Mustacchi }
3191d14abf15SRobert Mustacchi 
3192d14abf15SRobert Mustacchi 
_lm_tcp_comp_upload_neigh_request(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)3193d14abf15SRobert Mustacchi static void _lm_tcp_comp_upload_neigh_request(
3194d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
3195d14abf15SRobert Mustacchi     lm_neigh_state_t    * neigh_state)
3196d14abf15SRobert Mustacchi {
3197d14abf15SRobert Mustacchi     DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3198d14abf15SRobert Mustacchi     DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
3199d14abf15SRobert Mustacchi 
3200d14abf15SRobert Mustacchi     DbgBreakIf(neigh_state->num_dependents);
3201d14abf15SRobert Mustacchi 
3202d14abf15SRobert Mustacchi     neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3203d14abf15SRobert Mustacchi     mm_tcp_complete_neigh_upload_request(pdev, neigh_state);
3204d14abf15SRobert Mustacchi }
3205d14abf15SRobert Mustacchi 
3206d14abf15SRobert Mustacchi 
3207d14abf15SRobert Mustacchi /** Description
3208d14abf15SRobert Mustacchi  *  upload path state
3209d14abf15SRobert Mustacchi  * Assumptions:
3210d14abf15SRobert Mustacchi  *   called under TOE-lock
3211d14abf15SRobert Mustacchi  */
_lm_tcp_comp_upload_path_request(struct _lm_device_t * pdev,lm_path_state_t * path_state)3212d14abf15SRobert Mustacchi static void _lm_tcp_comp_upload_path_request(
3213d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
3214d14abf15SRobert Mustacchi     lm_path_state_t     * path_state)
3215d14abf15SRobert Mustacchi {
3216d14abf15SRobert Mustacchi     lm_neigh_state_t * neigh = NULL;
3217d14abf15SRobert Mustacchi 
3218d14abf15SRobert Mustacchi     DbgBreakIf(path_state->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3219d14abf15SRobert Mustacchi     DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
3220d14abf15SRobert Mustacchi 
3221d14abf15SRobert Mustacchi     path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3222d14abf15SRobert Mustacchi 
3223d14abf15SRobert Mustacchi     DbgBreakIf(path_state->neigh->num_dependents == 0);
3224d14abf15SRobert Mustacchi     path_state->neigh->num_dependents--;
3225d14abf15SRobert Mustacchi     if ((path_state->neigh->num_dependents == 0) &&
3226d14abf15SRobert Mustacchi         (path_state->neigh->hdr.status == STATE_STATUS_UPLOAD_PENDING)) {
3227d14abf15SRobert Mustacchi         /* Time to release the neighbor resources...*/
3228d14abf15SRobert Mustacchi         neigh = path_state->neigh;
3229d14abf15SRobert Mustacchi     }
3230d14abf15SRobert Mustacchi     path_state->neigh = NULL;
3231d14abf15SRobert Mustacchi 
3232d14abf15SRobert Mustacchi     DbgBreakIf(path_state->num_dependents);
3233d14abf15SRobert Mustacchi 
3234d14abf15SRobert Mustacchi     mm_tcp_complete_path_upload_request(pdev, path_state);
3235d14abf15SRobert Mustacchi 
3236d14abf15SRobert Mustacchi     if (neigh) {
3237d14abf15SRobert Mustacchi         _lm_tcp_comp_upload_neigh_request(pdev, neigh);
3238d14abf15SRobert Mustacchi     }
3239d14abf15SRobert Mustacchi }
3240d14abf15SRobert Mustacchi 
3241d14abf15SRobert Mustacchi 
3242d14abf15SRobert Mustacchi /* post initiate offload slow path ramrod
3243d14abf15SRobert Mustacchi  * returns SUCCESS or any failure */
lm_tcp_post_initiate_offload_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t * command,u64_t * data)3244d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_initiate_offload_request(
3245d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
3246d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
3247d14abf15SRobert Mustacchi     u8_t *command,
3248d14abf15SRobert Mustacchi     u64_t *data)
3249d14abf15SRobert Mustacchi {
3250d14abf15SRobert Mustacchi     lm_tcp_con_t *con = tcp->tx_con;
3251d14abf15SRobert Mustacchi     int           i   = 0;
3252d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3253d14abf15SRobert Mustacchi 
3254d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_post_initiate_offload_request\n");
3255d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_INIT_CONTEXT);
3256d14abf15SRobert Mustacchi     tcp->hdr.status = STATE_STATUS_OFFLOAD_PENDING;
3257d14abf15SRobert Mustacchi 
3258d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3259d14abf15SRobert Mustacchi         con = tcp->tx_con;
3260d14abf15SRobert Mustacchi         for (i = 0; i < 2; i++) {
3261d14abf15SRobert Mustacchi             mm_acquire_tcp_lock(pdev, con);
3262d14abf15SRobert Mustacchi             DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
3263d14abf15SRobert Mustacchi             DbgBreakIf(!(con->flags & TCP_COMP_BLOCKED));
3264d14abf15SRobert Mustacchi             con->flags &= ~TCP_COMP_BLOCKED;
3265d14abf15SRobert Mustacchi             con->flags |= TCP_COMP_DEFERRED; /* completions are now allowed but deferred */
3266d14abf15SRobert Mustacchi             mm_release_tcp_lock(pdev, con);
3267d14abf15SRobert Mustacchi             con = tcp->rx_con;
3268d14abf15SRobert Mustacchi         }
3269d14abf15SRobert Mustacchi     }
3270d14abf15SRobert Mustacchi 
3271d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_OFLD_REQ_POSTED;
3272d14abf15SRobert Mustacchi     *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_INITIATE_OFFLOAD : L5CM_RAMROD_CMD_ID_ADD_NEW_CONNECTION;
3273d14abf15SRobert Mustacchi     *data = tcp->ctx_phys.as_u64;
3274d14abf15SRobert Mustacchi 
3275d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3276d14abf15SRobert Mustacchi }
3277d14abf15SRobert Mustacchi 
3278d14abf15SRobert Mustacchi 
lm_tcp_post_terminate_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3279d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_terminate_tcp_request (
3280d14abf15SRobert Mustacchi     IN    struct _lm_device_t   * pdev,
3281d14abf15SRobert Mustacchi     IN    lm_tcp_state_t        * tcp,
3282d14abf15SRobert Mustacchi     OUT   u8_t                  * command,
3283d14abf15SRobert Mustacchi     OUT   u64_t                 * data
3284d14abf15SRobert Mustacchi     )
3285d14abf15SRobert Mustacchi {
3286d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_terminate_tcp_request\n");
3287d14abf15SRobert Mustacchi 
3288d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
3289d14abf15SRobert Mustacchi 
3290d14abf15SRobert Mustacchi     lm_tcp_flush_db(pdev,tcp);
3291d14abf15SRobert Mustacchi 
3292d14abf15SRobert Mustacchi     SET_FLAGS(tcp->sp_flags, SP_TCP_TRM_REQ_POSTED );
3293d14abf15SRobert Mustacchi 
3294d14abf15SRobert Mustacchi     *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_TERMINATE : L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3295d14abf15SRobert Mustacchi     *data = 0;
3296d14abf15SRobert Mustacchi 
3297d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3298d14abf15SRobert Mustacchi }
3299d14abf15SRobert Mustacchi 
3300d14abf15SRobert Mustacchi /**
3301d14abf15SRobert Mustacchi  Description:
3302d14abf15SRobert Mustacchi  *  Posts RST request.
3303d14abf15SRobert Mustacchi  *
3304d14abf15SRobert Mustacchi  * Assumptions:
3305d14abf15SRobert Mustacchi  *  - Global TOE lock is already taken by the caller.
3306d14abf15SRobert Mustacchi  *
3307d14abf15SRobert Mustacchi  * Returns:
3308d14abf15SRobert Mustacchi  *  SUCCESS or any failure
3309d14abf15SRobert Mustacchi  *
3310d14abf15SRobert Mustacchi  */
lm_tcp_post_abortive_disconnect_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3311d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_abortive_disconnect_request (
3312d14abf15SRobert Mustacchi     IN    struct _lm_device_t   * pdev,
3313d14abf15SRobert Mustacchi     IN    lm_tcp_state_t        * tcp,
3314d14abf15SRobert Mustacchi     OUT   u8_t                  * command,
3315d14abf15SRobert Mustacchi     OUT   u64_t                 * data
3316d14abf15SRobert Mustacchi     )
3317d14abf15SRobert Mustacchi {
3318d14abf15SRobert Mustacchi     /* Get Rx and Tx connections */
3319d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con = tcp->rx_con;
3320d14abf15SRobert Mustacchi     lm_tcp_con_t *tx_con = tcp->tx_con;
3321d14abf15SRobert Mustacchi 
3322d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3323d14abf15SRobert Mustacchi 
3324d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_abortive_disconnect_request\n");
3325d14abf15SRobert Mustacchi     DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL ) &&
3326d14abf15SRobert Mustacchi                 (tcp->hdr.status != STATE_STATUS_ABORTED) );
3327d14abf15SRobert Mustacchi 
3328d14abf15SRobert Mustacchi /*********************** Tx **********************/
3329d14abf15SRobert Mustacchi     /* Take Tx lock */
3330d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
3331d14abf15SRobert Mustacchi 
3332d14abf15SRobert Mustacchi     /* This will imply Tx POST_BLOCKED */
3333d14abf15SRobert Mustacchi     tx_con->flags |= TCP_RST_REQ_POSTED;
3334d14abf15SRobert Mustacchi 
3335d14abf15SRobert Mustacchi     /* Release Tx lock */
3336d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
3337d14abf15SRobert Mustacchi 
3338d14abf15SRobert Mustacchi /*********************** Rx **********************/
3339d14abf15SRobert Mustacchi     /* Take Rx lock */
3340d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
3341d14abf15SRobert Mustacchi 
3342d14abf15SRobert Mustacchi     /* This will imply Rx POST_BLOCKED and IND_BLOCKED */
3343d14abf15SRobert Mustacchi     rx_con->flags |= TCP_RST_REQ_POSTED;
3344d14abf15SRobert Mustacchi 
3345d14abf15SRobert Mustacchi     /* Release Rx lock */
3346d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
3347d14abf15SRobert Mustacchi /**************Post the ramrod *******************/
3348d14abf15SRobert Mustacchi     *command = RAMROD_OPCODE_TOE_RESET_SEND;
3349d14abf15SRobert Mustacchi     *data = 0;
3350d14abf15SRobert Mustacchi 
3351d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3352d14abf15SRobert Mustacchi }
3353d14abf15SRobert Mustacchi 
3354d14abf15SRobert Mustacchi 
3355d14abf15SRobert Mustacchi /**
3356d14abf15SRobert Mustacchi  Description:
3357d14abf15SRobert Mustacchi  *  Initiates the TCP connection upload process.
3358d14abf15SRobert Mustacchi  *  Posts a Searcher ramrod to the chip.
3359d14abf15SRobert Mustacchi  *
3360d14abf15SRobert Mustacchi  * Assumptions:
3361d14abf15SRobert Mustacchi  *  - Global TOE lock is already taken by the caller.
3362d14abf15SRobert Mustacchi  *  - UM caller has allocated "struct toe_context" phys. cont. buffer
3363d14abf15SRobert Mustacchi  *    and put its address to "data.phys_addr".
3364d14abf15SRobert Mustacchi  * Returns:
3365d14abf15SRobert Mustacchi  *  SUCCESS or any failure
3366d14abf15SRobert Mustacchi  *
3367d14abf15SRobert Mustacchi  */
lm_tcp_post_upload_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3368d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_upload_tcp_request (
3369d14abf15SRobert Mustacchi     IN    struct _lm_device_t   * pdev,
3370d14abf15SRobert Mustacchi     IN    lm_tcp_state_t        * tcp,
3371d14abf15SRobert Mustacchi     OUT   u8_t                  * command,
3372d14abf15SRobert Mustacchi     OUT   u64_t                 * data
3373d14abf15SRobert Mustacchi     )
3374d14abf15SRobert Mustacchi {
3375d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con, *tx_con = NULL;
3376d14abf15SRobert Mustacchi     struct toe_spe         spe    = {{0}};
3377d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3378d14abf15SRobert Mustacchi 
3379d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_upload_tcp_request\n");
3380d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status < STATE_STATUS_NORMAL);
3381d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status >= STATE_STATUS_UPLOAD_PENDING);
3382d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
3383d14abf15SRobert Mustacchi 
3384d14abf15SRobert Mustacchi 
3385d14abf15SRobert Mustacchi     /* Set the status of the connection to UPLOAD_PENDING */
3386d14abf15SRobert Mustacchi     tcp->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3387d14abf15SRobert Mustacchi 
3388d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
3389d14abf15SRobert Mustacchi         /* Get Rx and Tx connections */
3390d14abf15SRobert Mustacchi         rx_con = tcp->rx_con;
3391d14abf15SRobert Mustacchi         tx_con = tcp->tx_con;
3392d14abf15SRobert Mustacchi 
3393d14abf15SRobert Mustacchi         /* Set the flags for the connections (Rx and Tx) */
3394d14abf15SRobert Mustacchi         /* Tx */
3395d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, tx_con);
3396d14abf15SRobert Mustacchi         DbgBreakIf(tx_con->flags & TCP_TRM_REQ_POSTED);
3397d14abf15SRobert Mustacchi         tx_con->flags |= TCP_TRM_REQ_POSTED;
3398d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, tx_con);
3399d14abf15SRobert Mustacchi         /* Rx */
3400d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, rx_con);
3401d14abf15SRobert Mustacchi         DbgBreakIf(rx_con->flags & TCP_TRM_REQ_POSTED);
3402d14abf15SRobert Mustacchi         rx_con->flags |= TCP_TRM_REQ_POSTED;
3403d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, rx_con);
3404d14abf15SRobert Mustacchi     }
3405d14abf15SRobert Mustacchi 
3406d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_SRC_REQ_POSTED;
3407d14abf15SRobert Mustacchi 
3408d14abf15SRobert Mustacchi     *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_SEARCHER_DELETE : L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3409d14abf15SRobert Mustacchi     spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
3410d14abf15SRobert Mustacchi     *data = *((u64_t*)(&(spe.toe_data.rx_completion)));
3411d14abf15SRobert Mustacchi 
3412d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3413d14abf15SRobert Mustacchi }
3414d14abf15SRobert Mustacchi 
lm_tcp_post_query_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data,IN lm_tcp_slow_path_request_t * request)3415d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_query_request (
3416d14abf15SRobert Mustacchi     IN    struct _lm_device_t        * pdev,
3417d14abf15SRobert Mustacchi     IN    lm_tcp_state_t             * tcp,
3418d14abf15SRobert Mustacchi     OUT   u8_t                       * command,
3419d14abf15SRobert Mustacchi     OUT   u64_t                      * data,
3420d14abf15SRobert Mustacchi     IN    lm_tcp_slow_path_request_t * request
3421d14abf15SRobert Mustacchi     )
3422d14abf15SRobert Mustacchi {
3423d14abf15SRobert Mustacchi     struct toe_spe spe = {{0}};
3424d14abf15SRobert Mustacchi 
3425d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(request);
3426d14abf15SRobert Mustacchi 
3427d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_query_request\n");
3428d14abf15SRobert Mustacchi 
3429d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_QRY_REQ_POSTED;
3430d14abf15SRobert Mustacchi     *command = (tcp->ulp_type == TOE_CONNECTION_TYPE)? RAMROD_OPCODE_TOE_QUERY : L5CM_RAMROD_CMD_ID_QUERY;
3431d14abf15SRobert Mustacchi 
3432d14abf15SRobert Mustacchi     mm_memset(tcp->sp_req_data.virt_addr, 0, TOE_SP_PHYS_DATA_SIZE);
3433d14abf15SRobert Mustacchi 
3434d14abf15SRobert Mustacchi     spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
3435d14abf15SRobert Mustacchi     spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
3436d14abf15SRobert Mustacchi     *data = *((u64_t*)(&(spe.toe_data.phys_addr)));
3437d14abf15SRobert Mustacchi 
3438d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3439d14abf15SRobert Mustacchi }
3440d14abf15SRobert Mustacchi 
lm_tcp_post_upload_path_request(struct _lm_device_t * pdev,lm_path_state_t * path_state,l4_path_delegated_state_t * ret_delegated)3441d14abf15SRobert Mustacchi lm_status_t lm_tcp_post_upload_path_request (
3442d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
3443d14abf15SRobert Mustacchi     lm_path_state_t * path_state,
3444d14abf15SRobert Mustacchi     l4_path_delegated_state_t * ret_delegated)
3445d14abf15SRobert Mustacchi {
3446d14abf15SRobert Mustacchi 
3447d14abf15SRobert Mustacchi     DbgBreakIf(path_state->hdr.status != STATE_STATUS_NORMAL);
3448d14abf15SRobert Mustacchi     DbgBreakIf(path_state->hdr.state_id != STATE_ID_PATH);
3449d14abf15SRobert Mustacchi 
3450d14abf15SRobert Mustacchi     /* MichalS TBA: do we need this? (also in spec ('ipv4_current_ip_id' unclear)) */
3451d14abf15SRobert Mustacchi     *ret_delegated = path_state->path_delegated;
3452d14abf15SRobert Mustacchi 
3453d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_path_request: num_dependents=%d\n", path_state->num_dependents);
3454d14abf15SRobert Mustacchi 
3455d14abf15SRobert Mustacchi     if (path_state->num_dependents == 0) {
3456d14abf15SRobert Mustacchi         path_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3457d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS;
3458d14abf15SRobert Mustacchi     }
3459d14abf15SRobert Mustacchi     path_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3460d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3461d14abf15SRobert Mustacchi 
3462d14abf15SRobert Mustacchi }
3463d14abf15SRobert Mustacchi 
lm_tcp_post_upload_neigh_request(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)3464d14abf15SRobert Mustacchi lm_status_t lm_tcp_post_upload_neigh_request(
3465d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
3466d14abf15SRobert Mustacchi     lm_neigh_state_t * neigh_state
3467d14abf15SRobert Mustacchi     )
3468d14abf15SRobert Mustacchi {
3469d14abf15SRobert Mustacchi     DbgBreakIf(neigh_state->hdr.status != STATE_STATUS_NORMAL);
3470d14abf15SRobert Mustacchi     DbgBreakIf(neigh_state->hdr.state_id != STATE_ID_NEIGH);
3471d14abf15SRobert Mustacchi 
3472d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "lm_tcp_post_upload_neigh_request: num_dependents=%d\n", neigh_state->num_dependents);
3473d14abf15SRobert Mustacchi 
3474d14abf15SRobert Mustacchi     #if DBG
3475d14abf15SRobert Mustacchi     {
3476d14abf15SRobert Mustacchi         /* NirV: multi client todo */
3477d14abf15SRobert Mustacchi         lm_path_state_t * path = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
3478d14abf15SRobert Mustacchi         while(path) {
3479d14abf15SRobert Mustacchi             if(path->neigh == neigh_state) {
3480d14abf15SRobert Mustacchi                 DbgBreakIf(path->hdr.status == STATE_STATUS_NORMAL);
3481d14abf15SRobert Mustacchi             }
3482d14abf15SRobert Mustacchi             path = (lm_path_state_t *) d_list_next_entry(&path->hdr.link);
3483d14abf15SRobert Mustacchi         }
3484d14abf15SRobert Mustacchi     }
3485d14abf15SRobert Mustacchi     #endif
3486d14abf15SRobert Mustacchi 
3487d14abf15SRobert Mustacchi     if (neigh_state->num_dependents == 0) {
3488d14abf15SRobert Mustacchi         neigh_state->hdr.status = STATE_STATUS_UPLOAD_DONE;
3489d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS;
3490d14abf15SRobert Mustacchi     }
3491d14abf15SRobert Mustacchi     neigh_state->hdr.status = STATE_STATUS_UPLOAD_PENDING;
3492d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3493d14abf15SRobert Mustacchi 
3494d14abf15SRobert Mustacchi }
3495d14abf15SRobert Mustacchi 
3496d14abf15SRobert Mustacchi /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros) */
lm_tcp_set_tcp_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_tcp_cached_state_t * tcp_cached,void * mem_virt)3497d14abf15SRobert Mustacchi static lm_status_t lm_tcp_set_tcp_cached(
3498d14abf15SRobert Mustacchi     struct _lm_device_t     * pdev,
3499d14abf15SRobert Mustacchi     lm_tcp_state_t          * tcp,
3500d14abf15SRobert Mustacchi     l4_tcp_cached_state_t   * tcp_cached,
3501d14abf15SRobert Mustacchi     void                    * mem_virt        /* firmware context */
3502d14abf15SRobert Mustacchi     )
3503d14abf15SRobert Mustacchi {
3504d14abf15SRobert Mustacchi     struct toe_update_ramrod_cached_params * ctx       = mem_virt;
3505d14abf15SRobert Mustacchi     l4_ofld_params_t                       * l4_params = &(pdev->ofld_info.l4_params);
3506d14abf15SRobert Mustacchi 
3507d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3508d14abf15SRobert Mustacchi 
3509d14abf15SRobert Mustacchi     /* tcp-flags */
3510d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "## lm_tcp_set_tcp_cached cid=%d\n", tcp->cid);
3511d14abf15SRobert Mustacchi 
3512d14abf15SRobert Mustacchi     if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) !=
3513d14abf15SRobert Mustacchi         (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
3514d14abf15SRobert Mustacchi         if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE) {
3515d14abf15SRobert Mustacchi             ctx->enable_keepalive = 1;
3516d14abf15SRobert Mustacchi         } else {
3517d14abf15SRobert Mustacchi             ctx->enable_keepalive = 0;
3518d14abf15SRobert Mustacchi         }
3519d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_KEEPALIVE_CHANGED;
3520d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached: [cid=%d] update : flag TCP_FLAG_ENABLE_KEEP_ALIVE changed to %d\n",
3521d14abf15SRobert Mustacchi                     tcp->cid, ctx->enable_keepalive);
3522d14abf15SRobert Mustacchi     }
3523d14abf15SRobert Mustacchi     if ((tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_NAGLING) !=
3524d14abf15SRobert Mustacchi         (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING)) {
3525d14abf15SRobert Mustacchi         if (tcp_cached->tcp_flags & TCP_FLAG_ENABLE_NAGLING) {
3526d14abf15SRobert Mustacchi             ctx->enable_nagle = 1;
3527d14abf15SRobert Mustacchi         } else {
3528d14abf15SRobert Mustacchi             ctx->enable_nagle = 0;
3529d14abf15SRobert Mustacchi         }
3530d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_ENABLE_NAGLE_CHANGED;
3531d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_ENABLE_NAGLING changed to %d\n",
3532d14abf15SRobert Mustacchi                     tcp->cid, ctx->enable_nagle);
3533d14abf15SRobert Mustacchi     }
3534d14abf15SRobert Mustacchi     if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_KEEP_ALIVE) {
3535d14abf15SRobert Mustacchi         ctx->ka_restart = 1;
3536d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_RESTART_KEEP_ALIVE set\n",
3537d14abf15SRobert Mustacchi                     tcp->cid);
3538d14abf15SRobert Mustacchi     } else {
3539d14abf15SRobert Mustacchi         ctx->ka_restart = 0;
3540d14abf15SRobert Mustacchi     }
3541d14abf15SRobert Mustacchi     if (tcp_cached->tcp_flags & TCP_FLAG_RESTART_MAX_RT) {
3542d14abf15SRobert Mustacchi         ctx->retransmit_restart = 1;
3543d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TOE_CACHED_RESTART_MAX_RT set\n",
3544d14abf15SRobert Mustacchi                     tcp->cid);
3545d14abf15SRobert Mustacchi     } else {
3546d14abf15SRobert Mustacchi         ctx->retransmit_restart = 0;
3547d14abf15SRobert Mustacchi     }
3548d14abf15SRobert Mustacchi     if (tcp_cached->tcp_flags & TCP_FLAG_UPDATE_RCV_WINDOW) {
3549d14abf15SRobert Mustacchi         /* for debugging purposes */
3550d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flag TCP_FLAG_UPDATE_RCV_WINDOW set\n",
3551d14abf15SRobert Mustacchi                     tcp->cid);
3552d14abf15SRobert Mustacchi     }
3553d14abf15SRobert Mustacchi 
3554d14abf15SRobert Mustacchi     tcp->tcp_cached.tcp_flags = tcp_cached->tcp_flags;
3555d14abf15SRobert Mustacchi 
3556d14abf15SRobert Mustacchi     /* flow label ipv6 only */
3557d14abf15SRobert Mustacchi     if (tcp->path->path_const.ip_version == IP_VERSION_IPV6) {
3558d14abf15SRobert Mustacchi         if (tcp->tcp_cached.flow_label != tcp_cached->flow_label) {
3559d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : flow_label changed from %d to %d\n",
3560d14abf15SRobert Mustacchi                         tcp->cid, tcp->tcp_cached.flow_label, tcp_cached->flow_label);
3561d14abf15SRobert Mustacchi             tcp->tcp_cached.flow_label = tcp_cached->flow_label;
3562d14abf15SRobert Mustacchi             ctx->flow_label= tcp->tcp_cached.flow_label;
3563d14abf15SRobert Mustacchi             ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_FLOW_LABEL_CHANGED;
3564d14abf15SRobert Mustacchi         }
3565d14abf15SRobert Mustacchi     }
3566d14abf15SRobert Mustacchi 
3567d14abf15SRobert Mustacchi     /* initial_rcv_wnd */
3568d14abf15SRobert Mustacchi     if (tcp->tcp_cached.initial_rcv_wnd != tcp_cached->initial_rcv_wnd) {
3569d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : initial_rcv_wnd changed from %d to %d\n",
3570d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.initial_rcv_wnd, tcp_cached->initial_rcv_wnd);
3571d14abf15SRobert Mustacchi         /* no change to firmware */
3572d14abf15SRobert Mustacchi         mm_tcp_update_required_gen_bufs(pdev,
3573d14abf15SRobert Mustacchi                                         tcp->rx_con->u.rx.sws_info.mss,   /* new-mss(no change)*/
3574d14abf15SRobert Mustacchi                                         tcp->rx_con->u.rx.sws_info.mss,   /* old-mss*/
3575d14abf15SRobert Mustacchi                                         tcp_cached->initial_rcv_wnd,      /* new initial receive window */
3576d14abf15SRobert Mustacchi                                         tcp->tcp_cached.initial_rcv_wnd); /* old initial receive window */
3577d14abf15SRobert Mustacchi 
3578d14abf15SRobert Mustacchi         /* In VISTA and higher, window CAN decrease! */
3579d14abf15SRobert Mustacchi         if ERR_IF(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND) {
3580d14abf15SRobert Mustacchi             /* TBD: Miniport doesn't handle any parameter other than SUCCESS / PENDING... */
3581d14abf15SRobert Mustacchi             /* TODO: return LM_STATUS_INVALID_PARAMETER; */
3582d14abf15SRobert Mustacchi             DbgBreakIfAll(tcp_cached->initial_rcv_wnd > MAX_INITIAL_RCV_WND);
3583d14abf15SRobert Mustacchi         }
3584d14abf15SRobert Mustacchi         /* update the sws_bytea accordingly */
3585d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev,tcp->rx_con);
3586d14abf15SRobert Mustacchi         /* it's now time to give the window doorbell in-case there was a window update - could be negative, in which case, special handling is required... */
3587d14abf15SRobert Mustacchi         if (tcp->tcp_cached.initial_rcv_wnd < tcp_cached->initial_rcv_wnd) {
3588d14abf15SRobert Mustacchi             /* regular window update */
3589d14abf15SRobert Mustacchi             lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp_cached->initial_rcv_wnd - tcp->tcp_cached.initial_rcv_wnd, TCP_RX_POST_SWS_INC);
3590d14abf15SRobert Mustacchi         } else {
3591d14abf15SRobert Mustacchi             lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->tcp_cached.initial_rcv_wnd - tcp_cached->initial_rcv_wnd, TCP_RX_POST_SWS_DEC);
3592d14abf15SRobert Mustacchi             pdev->toe_info.toe_events |= LM_TOE_EVENT_WINDOW_DECREASE;
3593d14abf15SRobert Mustacchi         }
3594d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, tcp->rx_con);
3595d14abf15SRobert Mustacchi         tcp->tcp_cached.initial_rcv_wnd = tcp_cached->initial_rcv_wnd;
3596d14abf15SRobert Mustacchi         ctx->initial_rcv_wnd = tcp->tcp_cached.initial_rcv_wnd;
3597d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_INITIAL_RCV_WND_CHANGED;
3598d14abf15SRobert Mustacchi     }
3599d14abf15SRobert Mustacchi 
3600d14abf15SRobert Mustacchi     /*ttl_or_hop_limit*/
3601d14abf15SRobert Mustacchi     if (tcp->tcp_cached.ttl_or_hop_limit != tcp_cached->ttl_or_hop_limit) {
3602d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ttl_or_hop_limit changed from %d to %d\n",
3603d14abf15SRobert Mustacchi                         tcp->cid, tcp->tcp_cached.ttl_or_hop_limit, tcp_cached->ttl_or_hop_limit);
3604d14abf15SRobert Mustacchi         tcp->tcp_cached.ttl_or_hop_limit = tcp_cached->ttl_or_hop_limit;
3605d14abf15SRobert Mustacchi         if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
3606d14abf15SRobert Mustacchi             ctx->ttl= tcp->tcp_cached.ttl_or_hop_limit;
3607d14abf15SRobert Mustacchi             ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TTL_CHANGED;
3608d14abf15SRobert Mustacchi         } else {
3609d14abf15SRobert Mustacchi             ctx->hop_limit = tcp->tcp_cached.ttl_or_hop_limit;
3610d14abf15SRobert Mustacchi             ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_HOP_LIMIT_CHANGED;
3611d14abf15SRobert Mustacchi         }
3612d14abf15SRobert Mustacchi     }
3613d14abf15SRobert Mustacchi 
3614d14abf15SRobert Mustacchi     /* tos_or_traffic_class */
3615d14abf15SRobert Mustacchi     if (tcp->tcp_cached.tos_or_traffic_class != tcp_cached->tos_or_traffic_class) {
3616d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : tos_or_traffic_class changed from %d to %d\n",
3617d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.tos_or_traffic_class, tcp_cached->tos_or_traffic_class);
3618d14abf15SRobert Mustacchi         tcp->tcp_cached.tos_or_traffic_class = tcp_cached->tos_or_traffic_class;
3619d14abf15SRobert Mustacchi 
3620d14abf15SRobert Mustacchi         if (tcp->path->path_const.ip_version == IP_VERSION_IPV4) {
3621d14abf15SRobert Mustacchi             ctx->tos = tcp_cached->tos_or_traffic_class;
3622d14abf15SRobert Mustacchi             ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TOS_CHANGED;
3623d14abf15SRobert Mustacchi         } else {
3624d14abf15SRobert Mustacchi             ctx->traffic_class = tcp_cached->tos_or_traffic_class;
3625d14abf15SRobert Mustacchi             ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_TRAFFIC_CLASS_CHANGED;
3626d14abf15SRobert Mustacchi         }
3627d14abf15SRobert Mustacchi     }
3628d14abf15SRobert Mustacchi 
3629d14abf15SRobert Mustacchi     /* ka_probe_cnt */
3630d14abf15SRobert Mustacchi     if (tcp->tcp_cached.ka_probe_cnt != tcp_cached->ka_probe_cnt) {
3631d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_probe_cnt changed from %d to %d\n",
3632d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.ka_probe_cnt, tcp_cached->ka_probe_cnt);
3633d14abf15SRobert Mustacchi         tcp->tcp_cached.ka_probe_cnt = tcp_cached->ka_probe_cnt;
3634d14abf15SRobert Mustacchi         ctx->ka_max_probe_count = tcp_cached->ka_probe_cnt;
3635d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_MAX_PROBE_COUNT_CHANGED;
3636d14abf15SRobert Mustacchi     }
3637d14abf15SRobert Mustacchi 
3638d14abf15SRobert Mustacchi     /* user_priority */
3639d14abf15SRobert Mustacchi     if (tcp->tcp_cached.user_priority != tcp_cached->user_priority) {
3640d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : user_priority changed from %d to %d\n",
3641d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.user_priority, tcp_cached->user_priority);
3642d14abf15SRobert Mustacchi         DbgBreakIf(tcp_cached->user_priority > 0x7);
3643d14abf15SRobert Mustacchi         tcp->tcp_cached.user_priority = tcp_cached->user_priority;
3644d14abf15SRobert Mustacchi         ctx->user_priority = tcp_cached->user_priority;
3645d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_USER_PRIORITY_CHANGED;
3646d14abf15SRobert Mustacchi     }
3647d14abf15SRobert Mustacchi 
3648d14abf15SRobert Mustacchi     /* rcv_indication_size */
3649d14abf15SRobert Mustacchi     DbgBreakIf(tcp_cached->rcv_indication_size != 0);
3650d14abf15SRobert Mustacchi     if (tcp->tcp_cached.rcv_indication_size != tcp_cached->rcv_indication_size) {
3651d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : rcv_indication_size changed from %d to %d\n",
3652d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.rcv_indication_size, tcp_cached->rcv_indication_size);
3653d14abf15SRobert Mustacchi         DbgBreakIf(tcp->tcp_cached.rcv_indication_size > 0xffff);
3654d14abf15SRobert Mustacchi         tcp->tcp_cached.rcv_indication_size = tcp_cached->rcv_indication_size;
3655d14abf15SRobert Mustacchi         ctx->rcv_indication_size = (u16_t)tcp_cached->rcv_indication_size;
3656d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_RCV_INDICATION_SIZE_CHANGED;
3657d14abf15SRobert Mustacchi     }
3658d14abf15SRobert Mustacchi 
3659d14abf15SRobert Mustacchi     /* ka_time_out */
3660d14abf15SRobert Mustacchi     if (tcp->tcp_cached.ka_time_out != tcp_cached->ka_time_out) {
3661d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_time_out changed from %d to %d\n",
3662d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.ka_time_out, tcp_cached->ka_time_out);
3663d14abf15SRobert Mustacchi         tcp->tcp_cached.ka_time_out = tcp_cached->ka_time_out;
3664d14abf15SRobert Mustacchi         ctx->ka_timeout =
3665d14abf15SRobert Mustacchi             lm_time_resolution(pdev, tcp->tcp_cached.ka_time_out, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
3666d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_TIMEOUT_CHANGED;
3667d14abf15SRobert Mustacchi     }
3668d14abf15SRobert Mustacchi 
3669d14abf15SRobert Mustacchi     /* ka_interval */
3670d14abf15SRobert Mustacchi     if (tcp->tcp_cached.ka_interval != tcp_cached->ka_interval) {
3671d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : ka_interval changed from %d to %d\n",
3672d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.ka_interval, tcp_cached->ka_interval);
3673d14abf15SRobert Mustacchi         tcp->tcp_cached.ka_interval = tcp_cached->ka_interval;
3674d14abf15SRobert Mustacchi         ctx->ka_interval =
3675d14abf15SRobert Mustacchi             lm_time_resolution(pdev, tcp->tcp_cached.ka_interval, l4_params->ticks_per_second, TIMERS_TICKS_PER_SEC);
3676d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_KA_INTERVAL_CHANGED;
3677d14abf15SRobert Mustacchi     }
3678d14abf15SRobert Mustacchi 
3679d14abf15SRobert Mustacchi     /* max_rt */
3680d14abf15SRobert Mustacchi     if (tcp->tcp_cached.max_rt != tcp_cached->max_rt) {
3681d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : max_rt changed from %d to %d\n",
3682d14abf15SRobert Mustacchi                     tcp->cid, tcp->tcp_cached.max_rt, tcp_cached->max_rt);
3683d14abf15SRobert Mustacchi         tcp->tcp_cached.max_rt = tcp_cached->max_rt;
3684d14abf15SRobert Mustacchi         ctx->max_rt =
3685d14abf15SRobert Mustacchi             lm_time_resolution(pdev, tcp->tcp_cached.max_rt, l4_params->ticks_per_second, TSEMI_CLK1_TICKS_PER_SEC);
3686d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MAX_RT_CHANGED;
3687d14abf15SRobert Mustacchi     }
3688d14abf15SRobert Mustacchi 
3689d14abf15SRobert Mustacchi     if (!ctx->changed_fields && !ctx->ka_restart && !ctx->retransmit_restart) {
3690d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## tcp_cached [cid=%d] update : nothing changed,  completing synchronously\n", tcp->cid);
3691d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS; /* synchronous complete */
3692d14abf15SRobert Mustacchi     }
3693d14abf15SRobert Mustacchi     //DbgMessage(pdev, WARNl4sp, "## lm_tcp_set_tcp_cached cid=%d DONE!\n", tcp->cid);
3694d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3695d14abf15SRobert Mustacchi }
3696d14abf15SRobert Mustacchi 
3697d14abf15SRobert Mustacchi /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros) */
lm_tcp_set_path_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_path_cached_state_t * path_cached,void * mem_virt)3698d14abf15SRobert Mustacchi static lm_status_t lm_tcp_set_path_cached(
3699d14abf15SRobert Mustacchi     struct _lm_device_t     * pdev,
3700d14abf15SRobert Mustacchi     lm_tcp_state_t          * tcp,
3701d14abf15SRobert Mustacchi     l4_path_cached_state_t  * path_cached,
3702d14abf15SRobert Mustacchi     void                    * mem_virt        /* firmware context */
3703d14abf15SRobert Mustacchi     )
3704d14abf15SRobert Mustacchi {
3705d14abf15SRobert Mustacchi     struct toe_update_ramrod_cached_params * ctx    = mem_virt;
3706d14abf15SRobert Mustacchi     u32_t                                   new_mss = 0;
3707d14abf15SRobert Mustacchi 
3708d14abf15SRobert Mustacchi     new_mss = _lm_tcp_calc_mss(path_cached->path_mtu,
3709d14abf15SRobert Mustacchi                                tcp->tcp_const.remote_mss,
3710d14abf15SRobert Mustacchi                                (tcp->path->path_const.ip_version == IP_VERSION_IPV6),
3711d14abf15SRobert Mustacchi                                tcp->tcp_const.tcp_flags & TCP_FLAG_ENABLE_TIME_STAMP,
3712d14abf15SRobert Mustacchi                                pdev->ofld_info.l4_params.flags & OFLD_PARAM_FLAG_SNAP_ENCAP,
3713d14abf15SRobert Mustacchi                                tcp->path->neigh->neigh_const.vlan_tag  != 0);
3714d14abf15SRobert Mustacchi 
3715d14abf15SRobert Mustacchi     if (new_mss != tcp->rx_con->u.rx.sws_info.mss) {
3716d14abf15SRobert Mustacchi         /* also need to notify um, since this may affect the number of generic buffers
3717d14abf15SRobert Mustacchi          * required. */
3718d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "## path_cached: tcp [cid=%d] update : mss (as a result of pathMtu) from %d to %d\n",
3719d14abf15SRobert Mustacchi                     tcp->cid, tcp->rx_con->u.rx.sws_info.mss, new_mss);
3720d14abf15SRobert Mustacchi         mm_tcp_update_required_gen_bufs(pdev,
3721d14abf15SRobert Mustacchi                                         new_mss,
3722d14abf15SRobert Mustacchi                                         tcp->rx_con->u.rx.sws_info.mss,   /* old-mss*/
3723d14abf15SRobert Mustacchi                                         tcp->tcp_cached.initial_rcv_wnd,  /* new initial receive window */
3724d14abf15SRobert Mustacchi                                         tcp->tcp_cached.initial_rcv_wnd); /* old initial receive window */
3725d14abf15SRobert Mustacchi 
3726d14abf15SRobert Mustacchi         tcp->rx_con->u.rx.sws_info.mss = new_mss;
3727d14abf15SRobert Mustacchi         DbgBreakIf(new_mss > 0xffff);
3728d14abf15SRobert Mustacchi         ctx->mss = (u16_t)new_mss;
3729d14abf15SRobert Mustacchi         ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_MSS_CHANGED;
3730d14abf15SRobert Mustacchi     }
3731d14abf15SRobert Mustacchi 
3732d14abf15SRobert Mustacchi     if (ctx->changed_fields == 0) {
3733d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS; /* synchronous complete */
3734d14abf15SRobert Mustacchi     }
3735d14abf15SRobert Mustacchi 
3736d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3737d14abf15SRobert Mustacchi }
3738d14abf15SRobert Mustacchi 
3739d14abf15SRobert Mustacchi /* sets the cached parameters of tcp/path/neigh and initializes a toe_context (which is initially all zeros)
3740d14abf15SRobert Mustacchi  * Assumption: this function is only called if in-fact, the destination address changed.
3741d14abf15SRobert Mustacchi  */
lm_tcp_set_neigh_cached(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_neigh_cached_state_t * neigh_cached,void * mem_virt)3742d14abf15SRobert Mustacchi static lm_status_t lm_tcp_set_neigh_cached(
3743d14abf15SRobert Mustacchi     struct _lm_device_t     * pdev,
3744d14abf15SRobert Mustacchi     lm_tcp_state_t          * tcp,
3745d14abf15SRobert Mustacchi     l4_neigh_cached_state_t * neigh_cached,
3746d14abf15SRobert Mustacchi     void                    * mem_virt        /* firmware context */
3747d14abf15SRobert Mustacchi     )
3748d14abf15SRobert Mustacchi {
3749d14abf15SRobert Mustacchi     struct toe_update_ramrod_cached_params * ctx = mem_virt;
3750d14abf15SRobert Mustacchi     int    i                                     = 0;
3751d14abf15SRobert Mustacchi 
3752d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "## neigh_cached: tcp [cid=%d] update : neighbor dst_addr\n", tcp->cid);
3753d14abf15SRobert Mustacchi 
3754d14abf15SRobert Mustacchi     for (i = 0; i < 6; i++) {
3755d14abf15SRobert Mustacchi         ctx->dest_addr[i] = (u8_t)neigh_cached->dst_addr[i]; /* TBA Michals : is this init correct? order of assignment*/
3756d14abf15SRobert Mustacchi     }
3757d14abf15SRobert Mustacchi     ctx->changed_fields |= TOE_UPDATE_RAMROD_CACHED_PARAMS_DEST_ADDR_CHANGED;
3758d14abf15SRobert Mustacchi 
3759d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3760d14abf15SRobert Mustacchi }
3761d14abf15SRobert Mustacchi 
lm_tcp_post_update_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data,IN lm_tcp_slow_path_request_t * request)3762d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_update_request (
3763d14abf15SRobert Mustacchi     IN    struct _lm_device_t        * pdev,
3764d14abf15SRobert Mustacchi     IN    lm_tcp_state_t             * tcp,
3765d14abf15SRobert Mustacchi     OUT   u8_t                       * command,
3766d14abf15SRobert Mustacchi     OUT   u64_t                      * data,
3767d14abf15SRobert Mustacchi     IN    lm_tcp_slow_path_request_t * request
3768d14abf15SRobert Mustacchi     )
3769d14abf15SRobert Mustacchi {
3770d14abf15SRobert Mustacchi     struct toe_spe spe       = {{0}};
3771d14abf15SRobert Mustacchi     lm_status_t    lm_status = LM_STATUS_FAILURE ;
3772d14abf15SRobert Mustacchi 
3773d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.state_id != STATE_ID_TCP);
3774d14abf15SRobert Mustacchi 
3775d14abf15SRobert Mustacchi     *command = RAMROD_OPCODE_TOE_UPDATE;
3776d14abf15SRobert Mustacchi     spe.toe_data.phys_addr.hi = tcp->sp_req_data.phys_addr.as_u32.high;
3777d14abf15SRobert Mustacchi     spe.toe_data.phys_addr.lo = tcp->sp_req_data.phys_addr.as_u32.low;
3778d14abf15SRobert Mustacchi     *data = *((u64_t*)(&(spe.toe_data.phys_addr)));
3779d14abf15SRobert Mustacchi     mm_memset(tcp->sp_req_data.virt_addr, 0, sizeof(struct toe_update_ramrod_cached_params));
3780d14abf15SRobert Mustacchi 
3781d14abf15SRobert Mustacchi     DbgBreakIf((tcp->hdr.status != STATE_STATUS_NORMAL) &&
3782d14abf15SRobert Mustacchi                (tcp->hdr.status != STATE_STATUS_ABORTED));
3783d14abf15SRobert Mustacchi 
3784d14abf15SRobert Mustacchi     /* we need to initialize the data for firmware */
3785d14abf15SRobert Mustacchi     switch(request->type) {
3786d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_TCP:
3787d14abf15SRobert Mustacchi         lm_status = lm_tcp_set_tcp_cached(pdev, tcp,
3788d14abf15SRobert Mustacchi                                           request->sent_data.tcp_update_data.data,
3789d14abf15SRobert Mustacchi                                           tcp->sp_req_data.virt_addr);
3790d14abf15SRobert Mustacchi         break;
3791d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH:
3792d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
3793d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3794d14abf15SRobert Mustacchi         lm_status = lm_tcp_set_path_cached(pdev, tcp,
3795d14abf15SRobert Mustacchi                                            request->sent_data.tcp_update_data.data,
3796d14abf15SRobert Mustacchi                                            tcp->sp_req_data.virt_addr);
3797d14abf15SRobert Mustacchi         break;
3798d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_NEIGH:
3799d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3800d14abf15SRobert Mustacchi 
3801d14abf15SRobert Mustacchi         lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
3802d14abf15SRobert Mustacchi                                             request->sent_data.tcp_update_data.data,
3803d14abf15SRobert Mustacchi                                             tcp->sp_req_data.virt_addr);
3804d14abf15SRobert Mustacchi         break;
3805d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH_RELINK:
3806d14abf15SRobert Mustacchi         /* we will always return PENDING status */
3807d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3808d14abf15SRobert Mustacchi         lm_status = lm_tcp_set_neigh_cached(pdev, tcp,
3809d14abf15SRobert Mustacchi                                             &((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->neigh_cached,
3810d14abf15SRobert Mustacchi                                             tcp->sp_req_data.virt_addr);
3811d14abf15SRobert Mustacchi 
3812d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
3813d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->neigh->hdr.status != STATE_STATUS_NORMAL);
3814d14abf15SRobert Mustacchi         lm_tcp_set_path_cached(pdev, tcp, &((lm_tcp_path_relink_cached_t *)request->sent_data.tcp_update_data.data)->path_cached,
3815d14abf15SRobert Mustacchi                                            tcp->sp_req_data.virt_addr);
3816d14abf15SRobert Mustacchi         break;
3817d14abf15SRobert Mustacchi     }
3818d14abf15SRobert Mustacchi 
3819d14abf15SRobert Mustacchi     return lm_status;
3820d14abf15SRobert Mustacchi }
3821d14abf15SRobert Mustacchi 
lm_tcp_post_empty_ramrod_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3822d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_empty_ramrod_request(
3823d14abf15SRobert Mustacchi     IN struct _lm_device_t         * pdev,
3824d14abf15SRobert Mustacchi     IN lm_tcp_state_t              * tcp,
3825d14abf15SRobert Mustacchi     OUT u8_t                       * command,
3826d14abf15SRobert Mustacchi     OUT u64_t                      * data)
3827d14abf15SRobert Mustacchi {
3828d14abf15SRobert Mustacchi     struct toe_spe spe = {{0}};
3829d14abf15SRobert Mustacchi 
3830d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "## lm_tcp_post_empty_ramrod_request\n");
3831d14abf15SRobert Mustacchi 
3832d14abf15SRobert Mustacchi     *command = RAMROD_OPCODE_TOE_EMPTY_RAMROD;
3833d14abf15SRobert Mustacchi     spe.toe_data.rx_completion.hash_value = (u16_t)(tcp->tcp_const.hash_value);
3834d14abf15SRobert Mustacchi     *data = *((u64_t*)(&(spe.toe_data.rx_completion)));
3835d14abf15SRobert Mustacchi 
3836d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3837d14abf15SRobert Mustacchi }
3838d14abf15SRobert Mustacchi 
lm_tcp_post_invalidate_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,OUT u8_t * command,OUT u64_t * data)3839d14abf15SRobert Mustacchi static lm_status_t lm_tcp_post_invalidate_request(
3840d14abf15SRobert Mustacchi     IN struct _lm_device_t         * pdev,
3841d14abf15SRobert Mustacchi     IN lm_tcp_state_t              * tcp,
3842d14abf15SRobert Mustacchi     OUT u8_t                       * command,
3843d14abf15SRobert Mustacchi     OUT u64_t                      * data)
3844d14abf15SRobert Mustacchi {
3845d14abf15SRobert Mustacchi     /* Get Rx and Tx connections */
3846d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con = tcp->rx_con;
3847d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con = tcp->tx_con;
3848d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3849d14abf15SRobert Mustacchi 
3850d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "## lm_tcp_post_invalidate_request cid=%d\n", tcp->cid);
3851d14abf15SRobert Mustacchi 
3852d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL &&
3853d14abf15SRobert Mustacchi                tcp->hdr.status != STATE_STATUS_ABORTED);
3854d14abf15SRobert Mustacchi 
3855d14abf15SRobert Mustacchi     /* Set the flags for the connections (Rx and Tx) */
3856d14abf15SRobert Mustacchi     /* Tx */
3857d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
3858d14abf15SRobert Mustacchi     DbgBreakIf(tx_con->flags & TCP_INV_REQ_POSTED);
3859d14abf15SRobert Mustacchi     tx_con->flags |= TCP_INV_REQ_POSTED;
3860d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
3861d14abf15SRobert Mustacchi     /* Rx */
3862d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
3863d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_INV_REQ_POSTED);
3864d14abf15SRobert Mustacchi     rx_con->flags |= TCP_INV_REQ_POSTED;
3865d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
3866d14abf15SRobert Mustacchi 
3867d14abf15SRobert Mustacchi 
3868d14abf15SRobert Mustacchi     *command = RAMROD_OPCODE_TOE_INVALIDATE;
3869d14abf15SRobert Mustacchi     *data = 0;
3870d14abf15SRobert Mustacchi 
3871d14abf15SRobert Mustacchi     return LM_STATUS_PENDING;
3872d14abf15SRobert Mustacchi }
3873d14abf15SRobert Mustacchi 
3874d14abf15SRobert Mustacchi 
3875d14abf15SRobert Mustacchi /* Desciption:
3876d14abf15SRobert Mustacchi  *  post slow path request of given type for given tcp state
3877d14abf15SRobert Mustacchi  * Assumptions:
3878*48bbca81SDaniel Hoffman  *  - caller initialized request->type according to its specific request
3879d14abf15SRobert Mustacchi  *  - caller allocated space for request->data, according to the specific request type
3880d14abf15SRobert Mustacchi  *  - all previous slow path requests for given tcp state are already completed
3881d14abf15SRobert Mustacchi  * Returns:
3882d14abf15SRobert Mustacchi  *  PENDING, SUCCESS or any failure */
lm_tcp_post_slow_path_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)3883d14abf15SRobert Mustacchi lm_status_t lm_tcp_post_slow_path_request(
3884d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
3885d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
3886d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t *request)
3887d14abf15SRobert Mustacchi {
3888d14abf15SRobert Mustacchi     lm_status_t lm_status = LM_STATUS_INVALID_PARAMETER;
3889d14abf15SRobert Mustacchi     u64_t       data      = 0;
3890d14abf15SRobert Mustacchi     u8_t        command   = 0;
3891d14abf15SRobert Mustacchi 
3892d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp && request));
3893d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_request); /* lm supports only one pending slow path request per connection */
3894d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "### lm_tcp_post_slow_path_request cid=%d, type=%d\n", tcp->cid, request->type);
3895d14abf15SRobert Mustacchi     DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
3896d14abf15SRobert Mustacchi     tcp->sp_request = request;
3897d14abf15SRobert Mustacchi 
3898d14abf15SRobert Mustacchi     switch(request->type) {
3899d14abf15SRobert Mustacchi     /* call the type specific post function that:
3900d14abf15SRobert Mustacchi     execute any actions required for the specific sp request (possibly take tx/rx locks for that)
3901d14abf15SRobert Mustacchi     according to state, possibly set the request status and complete the request synchronously
3902d14abf15SRobert Mustacchi     fill the appropriate content in the lm information structure of the request */
3903d14abf15SRobert Mustacchi     case SP_REQUEST_INITIATE_OFFLOAD:
3904d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_initiate_offload_request(pdev, tcp, &command, &data);
3905d14abf15SRobert Mustacchi         break;
3906d14abf15SRobert Mustacchi     case SP_REQUEST_TERMINATE1_OFFLOAD:
3907d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_terminate_tcp_request(pdev, tcp, &command, &data);
3908d14abf15SRobert Mustacchi         break;
3909d14abf15SRobert Mustacchi     case SP_REQUEST_TERMINATE_OFFLOAD:
3910d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_upload_tcp_request(pdev, tcp, &command, &data);
3911d14abf15SRobert Mustacchi         break;
3912d14abf15SRobert Mustacchi     case SP_REQUEST_QUERY:
3913d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_query_request(pdev, tcp, &command, &data, request);
3914d14abf15SRobert Mustacchi         break;
3915d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_TCP:
3916d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH:
3917d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_NEIGH:
3918d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH_RELINK:
3919d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_update_request(pdev, tcp, &command, &data, request);
3920d14abf15SRobert Mustacchi         break;
3921d14abf15SRobert Mustacchi     case SP_REQUEST_INVALIDATE:
3922d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_invalidate_request(pdev, tcp, &command, &data);
3923d14abf15SRobert Mustacchi         break;
3924d14abf15SRobert Mustacchi     case SP_REQUEST_ABORTIVE_DISCONNECT:
3925d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_abortive_disconnect_request(pdev,tcp, &command, &data);
3926d14abf15SRobert Mustacchi         break;
3927d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
3928d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
3929d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_TX_RST:
3930d14abf15SRobert Mustacchi         lm_status = lm_tcp_post_empty_ramrod_request(pdev, tcp, &command, &data);
3931d14abf15SRobert Mustacchi         break;
3932d14abf15SRobert Mustacchi     default:
3933d14abf15SRobert Mustacchi         DbgBreakMsg("Illegal slow path request type!\n");
3934d14abf15SRobert Mustacchi     }
3935d14abf15SRobert Mustacchi     if(lm_status == LM_STATUS_PENDING) {
3936d14abf15SRobert Mustacchi         DbgMessage(pdev, VERBOSEl4sp,
3937d14abf15SRobert Mustacchi                    "calling lm_command_post, cid=%d, command=%d, con_type=%d, data=%lx\n",
3938d14abf15SRobert Mustacchi                    tcp->cid, command, tcp->ulp_type, data);
3939d14abf15SRobert Mustacchi         if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)
3940d14abf15SRobert Mustacchi         {
3941d14abf15SRobert Mustacchi             /* no slow path request can be posted after connection is uploaded */
3942d14abf15SRobert Mustacchi             DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
3943d14abf15SRobert Mustacchi             tcp->sp_request = NULL;
3944d14abf15SRobert Mustacchi             lm_status = LM_STATUS_INVALID_PARAMETER;
3945d14abf15SRobert Mustacchi         } else
3946d14abf15SRobert Mustacchi         {
3947d14abf15SRobert Mustacchi             lm_command_post(pdev, tcp->cid, command, CMD_PRIORITY_NORMAL, tcp->ulp_type, data);
3948d14abf15SRobert Mustacchi         }
3949d14abf15SRobert Mustacchi     } else {
3950d14abf15SRobert Mustacchi         tcp->sp_request = NULL;
3951d14abf15SRobert Mustacchi     }
3952d14abf15SRobert Mustacchi 
3953d14abf15SRobert Mustacchi     request->status = lm_status;
3954d14abf15SRobert Mustacchi     return lm_status;
3955d14abf15SRobert Mustacchi }
3956d14abf15SRobert Mustacchi 
3957d14abf15SRobert Mustacchi /* slow path request completion template */
3958d14abf15SRobert Mustacchi // lm_status_t lm_tcp_comp_XXX_slow_path_request(struct _lm_device_t *pdev,
3959d14abf15SRobert Mustacchi //                                               lm_tcp_state_t *tcp,
3960d14abf15SRobert Mustacchi //                                               ...cqe...)
3961d14abf15SRobert Mustacchi // {
3962d14abf15SRobert Mustacchi //     lm_tcp_slow_path_request_t *sp_request;
3963d14abf15SRobert Mustacchi //
3964d14abf15SRobert Mustacchi //     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_comp_XXX_slow_path_request\n");
3965d14abf15SRobert Mustacchi //     MM_ACQUIRE_TOE_LOCK(pdev);
3966d14abf15SRobert Mustacchi //     DbgBreakIf(tcp->hdr.status != STATE_STATUS_YYY);
3967d14abf15SRobert Mustacchi //     tcp->hdr.status = STATE_STATUS_ZZZ;
3968d14abf15SRobert Mustacchi //     execute lm state actions if required
3969d14abf15SRobert Mustacchi //     lm_sp_ring_command_completed (*) [not here, automatically in 'process CQ']
3970d14abf15SRobert Mustacchi //     MM_RELEASE_TOE_LOCK(pdev);
3971d14abf15SRobert Mustacchi //     under tx lock, execute any Tx actions required (possibly call mm_*)
3972d14abf15SRobert Mustacchi //     under rx lock, execute any Rx actions required (possibly call mm_*)
3973d14abf15SRobert Mustacchi //     MM_ACQUIRE_TOE_LOCK(pdev);
3974d14abf15SRobert Mustacchi //     tcp->sp_flags ~= (SP_REQ_COMPLETED_RX | SP_REQ_COMPLETED_TX)
3975d14abf15SRobert Mustacchi //     tcp->sp_request->status = completion status;
3976d14abf15SRobert Mustacchi //     sp_request = tcp->sp_request;
3977d14abf15SRobert Mustacchi //     tcp->sp_request = NULL
3978d14abf15SRobert Mustacchi //     mm_tcp_comp_slow_path_request(tcp, sp_request)
3979d14abf15SRobert Mustacchi //     MM_RELEASE_TOE_LOCK(pdev);
3980d14abf15SRobert Mustacchi // }
lm_tcp_service_deferred_cqes(lm_device_t * pdev,lm_tcp_state_t * tcp)3981d14abf15SRobert Mustacchi void lm_tcp_service_deferred_cqes(lm_device_t * pdev, lm_tcp_state_t * tcp)
3982d14abf15SRobert Mustacchi {
3983d14abf15SRobert Mustacchi     lm_tcp_con_t * con         = tcp->tx_con;
3984d14abf15SRobert Mustacchi     u8_t           idx = 0, dead=FALSE;
3985d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
3986d14abf15SRobert Mustacchi 
3987d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "### lm_tcp_service_deferred_cqes cid=%d\n", tcp->cid);
3988d14abf15SRobert Mustacchi 
3989d14abf15SRobert Mustacchi 
3990d14abf15SRobert Mustacchi 
3991d14abf15SRobert Mustacchi     for (idx = 0; idx < 2; idx++) {
3992d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, con);
3993d14abf15SRobert Mustacchi         while(con->flags & TCP_DEFERRED_PROCESSING) {
3994d14abf15SRobert Mustacchi             /* consistent state. at this stage, since we have the lock and deferred cqes need the lock
3995d14abf15SRobert Mustacchi              * for processing, it's as if we have just processed X cqes and are about to complete the fp
3996d14abf15SRobert Mustacchi              * of these cqes... During the complete of fp and sp, the lock may be released, in this case
3997d14abf15SRobert Mustacchi              * more cqes may be processed, in which case TCP_DEFERRED_PROCESSING will be switched back on. */
3998d14abf15SRobert Mustacchi             con->flags &= ~TCP_DEFERRED_PROCESSING;
3999d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl4sp, "### deferred cid=%d\n", tcp->cid);
4000d14abf15SRobert Mustacchi 
4001d14abf15SRobert Mustacchi             if (con->type == TCP_CON_TYPE_RX) {
4002d14abf15SRobert Mustacchi                 lm_tcp_rx_complete_tcp_fp(pdev, con->tcp_state, con);
4003d14abf15SRobert Mustacchi             } else {
4004d14abf15SRobert Mustacchi                 lm_tcp_tx_complete_tcp_fp(pdev, con->tcp_state, con);
4005d14abf15SRobert Mustacchi             }
4006d14abf15SRobert Mustacchi 
4007d14abf15SRobert Mustacchi             if (con->dpc_info.snapshot_flags) {
4008d14abf15SRobert Mustacchi                 mm_release_tcp_lock(pdev, con);
4009d14abf15SRobert Mustacchi 
4010d14abf15SRobert Mustacchi                 if (con->type == TCP_CON_TYPE_RX) {
4011d14abf15SRobert Mustacchi                     lm_tcp_rx_complete_tcp_sp(pdev,tcp, con);
4012d14abf15SRobert Mustacchi                 } else {
4013d14abf15SRobert Mustacchi                     lm_tcp_tx_complete_tcp_sp(pdev,tcp, con);
4014d14abf15SRobert Mustacchi                 }
4015d14abf15SRobert Mustacchi 
4016d14abf15SRobert Mustacchi                 mm_acquire_tcp_lock(pdev, con);
4017d14abf15SRobert Mustacchi             }
4018d14abf15SRobert Mustacchi         }
4019d14abf15SRobert Mustacchi 
4020d14abf15SRobert Mustacchi         con->flags &= ~TCP_COMP_DEFERRED; /* completions are no longer deferred */
4021d14abf15SRobert Mustacchi 
4022d14abf15SRobert Mustacchi         /* it's possible, that while processing the deferred cqes - the connection was uploaded,
4023d14abf15SRobert Mustacchi          *  since the TCP_COMP_DEFERRED flag was still on - we didn't delete it yet, now is the time
4024d14abf15SRobert Mustacchi          * to delete it... note, that this can only happen while we're handling the deferred cqes of
4025d14abf15SRobert Mustacchi          * Rx_con - since query will only complete on RX and not TX, that's why it's safe to check and
4026d14abf15SRobert Mustacchi          * after handling rx we won't access this connection anymore....*/
4027d14abf15SRobert Mustacchi         dead = lm_tcp_is_tcp_dead(pdev, tcp, TCP_IS_DEAD_OP_OFLD_COMP_DFRD);
4028d14abf15SRobert Mustacchi 
4029d14abf15SRobert Mustacchi 
4030d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, con);
4031d14abf15SRobert Mustacchi 
4032d14abf15SRobert Mustacchi         con = tcp->rx_con;
4033d14abf15SRobert Mustacchi 
4034d14abf15SRobert Mustacchi         if (dead) {
4035d14abf15SRobert Mustacchi             mm_tcp_del_tcp_state(pdev, tcp);
4036d14abf15SRobert Mustacchi         }
4037d14abf15SRobert Mustacchi 
4038d14abf15SRobert Mustacchi     }
4039d14abf15SRobert Mustacchi }
4040d14abf15SRobert Mustacchi 
4041d14abf15SRobert Mustacchi /* initiate offload request completion */
lm_tcp_comp_initiate_offload_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u32_t comp_status)4042d14abf15SRobert Mustacchi void lm_tcp_comp_initiate_offload_request(
4043d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4044d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp,
4045d14abf15SRobert Mustacchi     u32_t comp_status)
4046d14abf15SRobert Mustacchi {
4047d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t *sp_request;
4048d14abf15SRobert Mustacchi     lm_tcp_con_t *con;
4049d14abf15SRobert Mustacchi     lm_status_t lm_status = LM_STATUS_SUCCESS;
4050d14abf15SRobert Mustacchi     int i;
4051d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4052d14abf15SRobert Mustacchi 
4053d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "##lm_tcp_comp_initiate_offload_request\n");
4054d14abf15SRobert Mustacchi 
4055d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4056d14abf15SRobert Mustacchi 
4057d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_OFFLOAD_PENDING);
4058d14abf15SRobert Mustacchi 
4059d14abf15SRobert Mustacchi     if(!comp_status)
4060d14abf15SRobert Mustacchi     { /* successful completion */
4061d14abf15SRobert Mustacchi         tcp->hdr.status = STATE_STATUS_NORMAL;
4062d14abf15SRobert Mustacchi 
4063d14abf15SRobert Mustacchi         if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4064d14abf15SRobert Mustacchi         {
4065d14abf15SRobert Mustacchi             con = tcp->tx_con;
4066d14abf15SRobert Mustacchi             for (i = 0; i < 2; i++)
4067d14abf15SRobert Mustacchi             {
4068d14abf15SRobert Mustacchi                 mm_acquire_tcp_lock(pdev, con);
4069d14abf15SRobert Mustacchi                 DbgBreakIf(!(con->flags & TCP_COMP_DEFERRED));
4070d14abf15SRobert Mustacchi                 DbgBreakIf(!(con->flags & TCP_POST_BLOCKED));
4071d14abf15SRobert Mustacchi                 con->flags &= ~TCP_POST_BLOCKED; /* posting is now allowed */
4072d14abf15SRobert Mustacchi                 mm_release_tcp_lock(pdev, con);
4073d14abf15SRobert Mustacchi                 con = tcp->rx_con;
4074d14abf15SRobert Mustacchi             }
4075d14abf15SRobert Mustacchi 
4076d14abf15SRobert Mustacchi             // update stats counters if TOE
4077d14abf15SRobert Mustacchi             if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
4078d14abf15SRobert Mustacchi             {
4079d14abf15SRobert Mustacchi                 ++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
4080d14abf15SRobert Mustacchi             }
4081d14abf15SRobert Mustacchi             else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
4082d14abf15SRobert Mustacchi             {
4083d14abf15SRobert Mustacchi                 ++pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
4084d14abf15SRobert Mustacchi             }
4085d14abf15SRobert Mustacchi         }
4086d14abf15SRobert Mustacchi     }
4087d14abf15SRobert Mustacchi     else
4088d14abf15SRobert Mustacchi     {
4089d14abf15SRobert Mustacchi #ifndef _VBD_CMD_
4090d14abf15SRobert Mustacchi         DbgMessage(pdev, FATAL, "initiate offload failed. err=%x\n", comp_status);
4091d14abf15SRobert Mustacchi #endif // _VBD_CMD_
4092d14abf15SRobert Mustacchi         tcp->hdr.status = STATE_STATUS_INIT_OFFLOAD_ERR;
4093d14abf15SRobert Mustacchi 
4094d14abf15SRobert Mustacchi         if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4095d14abf15SRobert Mustacchi         {
4096d14abf15SRobert Mustacchi             con = tcp->tx_con;
4097d14abf15SRobert Mustacchi             for (i = 0; i < 2; i++)
4098d14abf15SRobert Mustacchi             {
4099d14abf15SRobert Mustacchi                 mm_acquire_tcp_lock(pdev, con);
4100d14abf15SRobert Mustacchi                 DbgBreakIf((con->flags & ~TCP_INDICATE_REJECTED) != (TCP_POST_BLOCKED | TCP_COMP_DEFERRED));
4101d14abf15SRobert Mustacchi                 con->flags &= ~TCP_COMP_DEFERRED;
4102d14abf15SRobert Mustacchi                 con->flags |= TCP_COMP_BLOCKED; /* completions are blocked */
4103d14abf15SRobert Mustacchi                 mm_release_tcp_lock(pdev, con);
4104d14abf15SRobert Mustacchi                 con = tcp->rx_con;
4105d14abf15SRobert Mustacchi             }
4106d14abf15SRobert Mustacchi         }
4107d14abf15SRobert Mustacchi 
4108d14abf15SRobert Mustacchi         lm_status = LM_STATUS_FAILURE;
4109d14abf15SRobert Mustacchi     }
4110d14abf15SRobert Mustacchi 
4111d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_flags & (SP_REQUEST_COMPLETED_RX | SP_REQUEST_COMPLETED_TX));
4112d14abf15SRobert Mustacchi     tcp->sp_request->status = lm_status;
4113d14abf15SRobert Mustacchi //    DbgMessage(pdev, FATAL, "#lm_tcp_comp_initiate_offload_request cid=%d, sp_request->status=%d\n", tcp->cid, tcp->sp_request->status);
4114d14abf15SRobert Mustacchi     sp_request = tcp->sp_request;
4115d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
4116d14abf15SRobert Mustacchi 
4117d14abf15SRobert Mustacchi     DbgBreakIf(!(tcp->sp_flags & SP_TCP_OFLD_REQ_POSTED));
4118d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_OFLD_REQ_COMP;
4119d14abf15SRobert Mustacchi     mm_tcp_comp_slow_path_request(pdev, tcp, sp_request);
4120d14abf15SRobert Mustacchi 
4121d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4122d14abf15SRobert Mustacchi 
4123d14abf15SRobert Mustacchi     /* handle deferred CQEs */
4124d14abf15SRobert Mustacchi     if(!comp_status && (tcp->ulp_type == TOE_CONNECTION_TYPE)) {
4125d14abf15SRobert Mustacchi         lm_tcp_service_deferred_cqes(pdev, tcp);
4126d14abf15SRobert Mustacchi     }
4127d14abf15SRobert Mustacchi }
4128d14abf15SRobert Mustacchi 
lm_tcp_collect_stats(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4129d14abf15SRobert Mustacchi void lm_tcp_collect_stats(
4130d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4131d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
4132d14abf15SRobert Mustacchi {
4133d14abf15SRobert Mustacchi 
4134d14abf15SRobert Mustacchi     if (tcp->tx_con && tcp->rx_con) {
4135d14abf15SRobert Mustacchi         pdev->toe_info.stats.tx_bytes_posted_total += tcp->tx_con->bytes_post_cnt;
4136d14abf15SRobert Mustacchi         pdev->toe_info.stats.tx_rq_complete_calls += tcp->tx_con->rq_completion_calls;
4137d14abf15SRobert Mustacchi         pdev->toe_info.stats.tx_bytes_completed_total += tcp->tx_con->bytes_comp_cnt;
4138d14abf15SRobert Mustacchi         pdev->toe_info.stats.tx_rq_bufs_completed += tcp->tx_con->buffer_completed_cnt;
4139d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_tx_abortion_under_flr += tcp->tx_con->abortion_under_flr;
4140d14abf15SRobert Mustacchi 
4141d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_rq_complete_calls += tcp->rx_con->rq_completion_calls;
4142d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_rq_bufs_completed += tcp->rx_con->buffer_completed_cnt;
4143d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_completed_total += tcp->rx_con->bytes_comp_cnt;
4144d14abf15SRobert Mustacchi 
4145d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_accepted_indications += tcp->rx_con->u.rx.gen_info.num_success_indicates;
4146d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_indicated_accepted += tcp->rx_con->u.rx.gen_info.num_buffers_indicated;
4147d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_indicated_accepted += tcp->rx_con->u.rx.gen_info.bytes_indicated_accepted;
4148d14abf15SRobert Mustacchi 
4149d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_rejected_indications += tcp->rx_con->u.rx.gen_info.num_failed_indicates;
4150d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_indicated_rejected += tcp->rx_con->u.rx.gen_info.bufs_indicated_rejected;
4151d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_indicated_rejected += tcp->rx_con->u.rx.gen_info.bytes_indicated_rejected;
4152d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_num_non_full_indications += tcp->rx_con->u.rx.gen_info.num_non_full_indications;
4153d14abf15SRobert Mustacchi 
4154d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_zero_byte_recv_reqs += tcp->rx_con->u.rx.rx_zero_byte_recv_reqs;
4155d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_copied_grq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_grq;
4156d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_copied_rq += tcp->rx_con->u.rx.gen_info.num_buffers_copied_rq;
4157d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_copied_in_comp += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_comp;
4158d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_copied_in_post += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_post;
4159d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_copied_in_process += tcp->rx_con->u.rx.gen_info.bytes_copied_cnt_in_process;
4160d14abf15SRobert Mustacchi         if (pdev->toe_info.stats.max_number_of_isles_in_single_con < tcp->rx_con->u.rx.gen_info.max_number_of_isles) {
4161d14abf15SRobert Mustacchi             pdev->toe_info.stats.max_number_of_isles_in_single_con = tcp->rx_con->u.rx.gen_info.max_number_of_isles;
4162d14abf15SRobert Mustacchi         }
4163d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_posted_total += tcp->rx_con->buffer_post_cnt;
4164d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_posted_total += tcp->rx_con->bytes_post_cnt;
4165d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bufs_skipped_post += tcp->rx_con->buffer_skip_post_cnt;
4166d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_skipped_post += tcp->rx_con->bytes_skip_post_cnt;
4167d14abf15SRobert Mustacchi 
4168d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_bytes_skipped_push += tcp->rx_con->bytes_push_skip_cnt;
4169d14abf15SRobert Mustacchi         pdev->toe_info.stats.rx_partially_completed_buf_cnt += tcp->rx_con->partially_completed_buf_cnt;
4170d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_droped_empty_isles += tcp->rx_con->droped_empty_isles;
4171d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_droped_non_empty_isles += tcp->rx_con->droped_non_empty_isles;
4172d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_rx_post_blocked += tcp->rx_con->rx_post_blocked;
4173d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_zb_rx_post_blocked += tcp->rx_con->zb_rx_post_blocked;
4174d14abf15SRobert Mustacchi         if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION) {
4175d14abf15SRobert Mustacchi             pdev->toe_info.stats.total_aux_mem_success_allocations++;
4176d14abf15SRobert Mustacchi         } else if (tcp->aux_mem_flag & TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION) {
4177d14abf15SRobert Mustacchi             pdev->toe_info.stats.total_aux_mem_failed_allocations++;
4178d14abf15SRobert Mustacchi         }
4179d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_rx_abortion_under_flr += tcp->rx_con->abortion_under_flr;
4180d14abf15SRobert Mustacchi     }
4181d14abf15SRobert Mustacchi }
4182d14abf15SRobert Mustacchi 
4183d14abf15SRobert Mustacchi 
4184d14abf15SRobert Mustacchi 
4185d14abf15SRobert Mustacchi /* Desciption:
4186d14abf15SRobert Mustacchi  *  delete tcp state from lm _except_ from actual freeing of memory.
4187d14abf15SRobert Mustacchi  *  the task of freeing of memory is done in lm_tcp_free_tcp_state()
4188d14abf15SRobert Mustacchi  * Assumptions:
4189d14abf15SRobert Mustacchi  *  global toe lock is taken by the caller
4190d14abf15SRobert Mustacchi  */
lm_tcp_del_tcp_state(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4191d14abf15SRobert Mustacchi void lm_tcp_del_tcp_state(
4192d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4193d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
4194d14abf15SRobert Mustacchi {
4195d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_del_tcp_state\n");
4196d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
4197d14abf15SRobert Mustacchi 
4198d14abf15SRobert Mustacchi     if (!lm_fl_reset_is_inprogress(pdev))
4199d14abf15SRobert Mustacchi     {
4200d14abf15SRobert Mustacchi         DbgBreakIf(tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
4201d14abf15SRobert Mustacchi                tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
4202d14abf15SRobert Mustacchi     }
4203d14abf15SRobert Mustacchi     else
4204d14abf15SRobert Mustacchi     {
4205d14abf15SRobert Mustacchi         DbgMessage(pdev, FATAL, "###lm_tcp_del_tcp_state under FLR\n");
4206d14abf15SRobert Mustacchi     }
4207d14abf15SRobert Mustacchi 
4208d14abf15SRobert Mustacchi     /* just a moment before we delete this connection, lets take it's info... */
4209d14abf15SRobert Mustacchi     lm_tcp_collect_stats(pdev, tcp);
4210d14abf15SRobert Mustacchi 
4211d14abf15SRobert Mustacchi     d_list_remove_entry(
4212d14abf15SRobert Mustacchi         &tcp->hdr.state_blk->tcp_list,
4213d14abf15SRobert Mustacchi         &tcp->hdr.link);
4214d14abf15SRobert Mustacchi 
4215d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE)
4216d14abf15SRobert Mustacchi     {
4217d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_upld++;
4218d14abf15SRobert Mustacchi     }
4219d14abf15SRobert Mustacchi     else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
4220d14abf15SRobert Mustacchi     {
4221d14abf15SRobert Mustacchi         pdev->iscsi_info.run_time.stats.total_upld++;
4222d14abf15SRobert Mustacchi     }
4223d14abf15SRobert Mustacchi 
4224d14abf15SRobert Mustacchi     if (!lm_fl_reset_is_inprogress(pdev) && (tcp->path != NULL)) {
4225d14abf15SRobert Mustacchi         /* This is called as a result of a failured offload and not an upload...,
4226d14abf15SRobert Mustacchi          * if connection is uploaded it means that path must have been taken care of
4227d14abf15SRobert Mustacchi          * already. */
4228d14abf15SRobert Mustacchi         DbgBreakIf((tcp->hdr.status != STATE_STATUS_INIT_OFFLOAD_ERR) &&
4229d14abf15SRobert Mustacchi                    (tcp->hdr.status != STATE_STATUS_INIT) &&
4230d14abf15SRobert Mustacchi                    (tcp->hdr.status != STATE_STATUS_INIT_CONTEXT));
4231d14abf15SRobert Mustacchi         DbgBreakIf(tcp->path->hdr.status != STATE_STATUS_NORMAL);
4232d14abf15SRobert Mustacchi         tcp->path->num_dependents--;
4233d14abf15SRobert Mustacchi         tcp->path = NULL;
4234d14abf15SRobert Mustacchi     }
4235d14abf15SRobert Mustacchi 
4236d14abf15SRobert Mustacchi     if (tcp->in_searcher) {
4237d14abf15SRobert Mustacchi         /* remove 4tuple from searcher */
4238d14abf15SRobert Mustacchi         lm_searcher_mirror_hash_remove(pdev, tcp->cid);
4239d14abf15SRobert Mustacchi         tcp->in_searcher = 0;
4240d14abf15SRobert Mustacchi     }
4241d14abf15SRobert Mustacchi 
4242d14abf15SRobert Mustacchi     if (tcp->cid != 0) {
4243d14abf15SRobert Mustacchi         u8_t notify_fw = 0;
4244d14abf15SRobert Mustacchi 
4245d14abf15SRobert Mustacchi         /* we only notify FW if this delete is a result of upload, otherwise
4246d14abf15SRobert Mustacchi          * (err_offload / error in init stage) we don't*/
4247d14abf15SRobert Mustacchi         if (!lm_fl_reset_is_inprogress(pdev) && (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE)) {
4248d14abf15SRobert Mustacchi             notify_fw = 1;
4249d14abf15SRobert Mustacchi         }
4250d14abf15SRobert Mustacchi         lm_free_cid_resc(pdev, TOE_CONNECTION_TYPE, tcp->cid, notify_fw);
4251d14abf15SRobert Mustacchi     }
4252d14abf15SRobert Mustacchi 
4253d14abf15SRobert Mustacchi     tcp->hdr.state_blk     = NULL;
4254d14abf15SRobert Mustacchi     tcp->cid = 0;
4255d14abf15SRobert Mustacchi     tcp->ctx_virt = NULL;
4256d14abf15SRobert Mustacchi     tcp->ctx_phys.as_u64 = 0;
4257d14abf15SRobert Mustacchi     if (tcp->aux_memory != NULL) {
4258d14abf15SRobert Mustacchi         switch (tcp->type_of_aux_memory) {
4259d14abf15SRobert Mustacchi         case TCP_CON_AUX_RT_MEM:
4260d14abf15SRobert Mustacchi             DbgMessage(pdev, WARNl4sp,
4261d14abf15SRobert Mustacchi                         "###lm_tcp_del_tcp_state: delete aux_mem (%d)\n",
4262d14abf15SRobert Mustacchi                         tcp->aux_mem_size);
4263d14abf15SRobert Mustacchi             tcp->type_of_aux_memory = 0;
4264d14abf15SRobert Mustacchi             mm_rt_free_mem(pdev,tcp->aux_memory,tcp->aux_mem_size,LM_RESOURCE_NDIS);
4265d14abf15SRobert Mustacchi             break;
4266d14abf15SRobert Mustacchi         default:
4267d14abf15SRobert Mustacchi             break;
4268d14abf15SRobert Mustacchi         }
4269d14abf15SRobert Mustacchi     }
4270d14abf15SRobert Mustacchi } /* lm_tcp_del_tcp_state */
4271d14abf15SRobert Mustacchi 
4272d14abf15SRobert Mustacchi /* Desciption:
4273d14abf15SRobert Mustacchi  *  delete path state from lm
4274d14abf15SRobert Mustacchi  * Assumptions:
4275d14abf15SRobert Mustacchi  *  global toe lock is taken by the caller
4276d14abf15SRobert Mustacchi  */
lm_tcp_del_path_state(struct _lm_device_t * pdev,lm_path_state_t * path)4277d14abf15SRobert Mustacchi void lm_tcp_del_path_state(
4278d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4279d14abf15SRobert Mustacchi     lm_path_state_t *path)
4280d14abf15SRobert Mustacchi {
4281d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
4282d14abf15SRobert Mustacchi 
4283d14abf15SRobert Mustacchi     if (path->neigh != NULL) {
4284d14abf15SRobert Mustacchi 
4285d14abf15SRobert Mustacchi         DbgBreakIf(path->neigh->hdr.status != STATE_STATUS_NORMAL);
4286d14abf15SRobert Mustacchi         /* This is called as a result of a synchronous path upload */
4287d14abf15SRobert Mustacchi         path->neigh->num_dependents--;
4288d14abf15SRobert Mustacchi         path->neigh = NULL;
4289d14abf15SRobert Mustacchi     }
4290d14abf15SRobert Mustacchi 
4291d14abf15SRobert Mustacchi     DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (path->hdr.status != STATE_STATUS_UPLOAD_DONE));
4292d14abf15SRobert Mustacchi     d_list_remove_entry(&path->hdr.state_blk->path_list, &path->hdr.link);
4293d14abf15SRobert Mustacchi }
4294d14abf15SRobert Mustacchi 
4295d14abf15SRobert Mustacchi /* Desciption:
4296d14abf15SRobert Mustacchi  *  delete neigh state from lm
4297d14abf15SRobert Mustacchi  * Assumptions:
4298d14abf15SRobert Mustacchi  *  global toe lock is taken by the caller
4299d14abf15SRobert Mustacchi  */
lm_tcp_del_neigh_state(struct _lm_device_t * pdev,lm_neigh_state_t * neigh)4300d14abf15SRobert Mustacchi void lm_tcp_del_neigh_state(
4301d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4302d14abf15SRobert Mustacchi     lm_neigh_state_t *neigh)
4303d14abf15SRobert Mustacchi {
4304d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
4305d14abf15SRobert Mustacchi 
4306d14abf15SRobert Mustacchi     DbgBreakIf(!lm_fl_reset_is_inprogress(pdev) && (neigh->hdr.status != STATE_STATUS_UPLOAD_DONE));
4307d14abf15SRobert Mustacchi     d_list_remove_entry(&neigh->hdr.state_blk->neigh_list, &neigh->hdr.link);
4308d14abf15SRobert Mustacchi }
4309d14abf15SRobert Mustacchi 
4310d14abf15SRobert Mustacchi /* Desciption:
4311d14abf15SRobert Mustacchi  *  free lm tcp state resources
4312d14abf15SRobert Mustacchi  * Assumptions:
4313d14abf15SRobert Mustacchi  *  lm_tcp_del_tcp_state() already called  */
lm_tcp_free_tcp_resc(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4314d14abf15SRobert Mustacchi void lm_tcp_free_tcp_resc(
4315d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
4316d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp)
4317d14abf15SRobert Mustacchi {
4318d14abf15SRobert Mustacchi     lm_tcp_con_t *tcp_con;
4319d14abf15SRobert Mustacchi     d_list_t      released_list_of_gen_bufs;
4320d14abf15SRobert Mustacchi     u8_t reset_in_progress = lm_reset_is_inprogress(pdev);
4321d14abf15SRobert Mustacchi     u32_t   num_isles = 0;
4322d14abf15SRobert Mustacchi     u32_t   num_bytes_in_isles = 0;
4323d14abf15SRobert Mustacchi     u32_t   num_gen_bufs_in_isles = 0;
4324d14abf15SRobert Mustacchi 
4325d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4sp, "###lm_tcp_free_tcp_resc tcp=%p\n", tcp);
4326d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && tcp));
4327d14abf15SRobert Mustacchi     DbgBreakIf(!reset_in_progress && tcp->hdr.status >= STATE_STATUS_OFFLOAD_PENDING &&
4328d14abf15SRobert Mustacchi                tcp->hdr.status < STATE_STATUS_UPLOAD_DONE);
4329d14abf15SRobert Mustacchi     DbgBreakIf(tcp->cid); /* i.e lm_tcp_del_tcp_state wasn't called */
4330d14abf15SRobert Mustacchi 
4331d14abf15SRobert Mustacchi     tcp_con = tcp->rx_con;
4332d14abf15SRobert Mustacchi     if (tcp_con) {
4333d14abf15SRobert Mustacchi         /* need to return the generic buffers of the isle list to the pool */
4334d14abf15SRobert Mustacchi         d_list_init(&released_list_of_gen_bufs, NULL, NULL, 0);
4335d14abf15SRobert Mustacchi         num_isles = d_list_entry_cnt(&tcp_con->u.rx.gen_info.isles_list);
4336d14abf15SRobert Mustacchi         num_bytes_in_isles = tcp_con->u.rx.gen_info.isle_nbytes;
4337d14abf15SRobert Mustacchi         lm_tcp_rx_clear_isles(pdev, tcp, &released_list_of_gen_bufs);
4338d14abf15SRobert Mustacchi         num_gen_bufs_in_isles = d_list_entry_cnt(&released_list_of_gen_bufs);
4339d14abf15SRobert Mustacchi         if(!d_list_is_empty(&tcp_con->u.rx.gen_info.dpc_peninsula_list)) {
4340d14abf15SRobert Mustacchi             if (!reset_in_progress) {
4341d14abf15SRobert Mustacchi                 DbgBreak();
4342d14abf15SRobert Mustacchi             }
4343d14abf15SRobert Mustacchi             d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.dpc_peninsula_list);
4344d14abf15SRobert Mustacchi             d_list_init(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list, NULL, NULL, 0);
4345d14abf15SRobert Mustacchi         }
4346d14abf15SRobert Mustacchi         if (!d_list_is_empty(&tcp_con->u.rx.gen_info.peninsula_list)) {
4347d14abf15SRobert Mustacchi             d_list_add_tail(&released_list_of_gen_bufs,&tcp_con->u.rx.gen_info.peninsula_list);
4348d14abf15SRobert Mustacchi             d_list_init(&tcp->rx_con->u.rx.gen_info.peninsula_list, NULL, NULL, 0);
4349d14abf15SRobert Mustacchi             if (!reset_in_progress) {
4350d14abf15SRobert Mustacchi                 /* we can only have data in the peninsula if we didn't go via the upload flow (i.e. offload failure of some sort...)*/
4351d14abf15SRobert Mustacchi                 DbgBreakIf(tcp->hdr.status == STATE_STATUS_UPLOAD_DONE);
4352d14abf15SRobert Mustacchi                 if (tcp->hdr.status == STATE_STATUS_UPLOAD_DONE) {
4353d14abf15SRobert Mustacchi                     pdev->toe_info.stats.total_bytes_lost_on_upload += tcp_con->u.rx.gen_info.peninsula_nbytes;
4354d14abf15SRobert Mustacchi                 }
4355d14abf15SRobert Mustacchi             }
4356d14abf15SRobert Mustacchi         }
4357d14abf15SRobert Mustacchi 
4358d14abf15SRobert Mustacchi         if (!d_list_is_empty(&released_list_of_gen_bufs)) {
4359d14abf15SRobert Mustacchi             mm_tcp_return_list_of_gen_bufs(pdev, &released_list_of_gen_bufs, 0, NON_EXISTENT_SB_IDX);
4360d14abf15SRobert Mustacchi             if (!reset_in_progress && num_isles) {
4361d14abf15SRobert Mustacchi                 s32_t delta = -(s32_t)num_gen_bufs_in_isles;
4362d14abf15SRobert Mustacchi                 MM_ACQUIRE_ISLES_CONTROL_LOCK(pdev);
4363d14abf15SRobert Mustacchi                 lm_tcp_update_isles_cnts(pdev, -(s32_t)num_isles, delta);
4364d14abf15SRobert Mustacchi                 MM_RELEASE_ISLES_CONTROL_LOCK(pdev);
4365d14abf15SRobert Mustacchi             }
4366d14abf15SRobert Mustacchi         }
4367d14abf15SRobert Mustacchi     }
4368d14abf15SRobert Mustacchi 
4369d14abf15SRobert Mustacchi } /* lm_tcp_free_tcp_resc */
4370d14abf15SRobert Mustacchi 
4371d14abf15SRobert Mustacchi /* Desciption:
4372d14abf15SRobert Mustacchi  *  update chip internal memory and hw with given offload params
4373d14abf15SRobert Mustacchi  * Assumptions:
4374d14abf15SRobert Mustacchi  *  - lm_tcp_init was already called
4375d14abf15SRobert Mustacchi  * Returns:
4376d14abf15SRobert Mustacchi  *  SUCCESS or any failure  */
4377d14abf15SRobert Mustacchi lm_status_t
lm_tcp_set_ofld_params(lm_device_t * pdev,lm_state_block_t * state_blk,l4_ofld_params_t * params)4378d14abf15SRobert Mustacchi lm_tcp_set_ofld_params(
4379d14abf15SRobert Mustacchi     lm_device_t *pdev,
4380d14abf15SRobert Mustacchi     lm_state_block_t *state_blk,
4381d14abf15SRobert Mustacchi     l4_ofld_params_t *params)
4382d14abf15SRobert Mustacchi {
4383d14abf15SRobert Mustacchi     l4_ofld_params_t *curr_params = &pdev->ofld_info.l4_params;
4384d14abf15SRobert Mustacchi 
4385d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(state_blk);
4386d14abf15SRobert Mustacchi 
4387d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSE, "###lm_tcp_set_ofld_params\n");
4388d14abf15SRobert Mustacchi 
4389d14abf15SRobert Mustacchi     /* we assume all timers periods can't be 0 */
4390d14abf15SRobert Mustacchi     DbgBreakIf(!(params->delayed_ack_ticks &&
4391d14abf15SRobert Mustacchi                  params->nce_stale_ticks &&
4392d14abf15SRobert Mustacchi                  params->push_ticks &&
4393d14abf15SRobert Mustacchi                  params->sws_prevention_ticks &&
4394d14abf15SRobert Mustacchi                  params->ticks_per_second));
4395d14abf15SRobert Mustacchi 
4396d14abf15SRobert Mustacchi     /* <MichalK> Here we override the ofld info. This in theory effects iscsi as well, however, since ftsk
4397d14abf15SRobert Mustacchi      * does not really use timers, and passes '0' for ka / rt in delegate/cached params its ok that
4398*48bbca81SDaniel Hoffman      * we're overriding the parameters here. The correct solution is to maintain this per cli-idx,
4399*48bbca81SDaniel Hoffman      * but that will require major changes in l4 context initialization and not worth the effort.
4400d14abf15SRobert Mustacchi      */
4401d14abf15SRobert Mustacchi     *curr_params = *params;
4402d14abf15SRobert Mustacchi 
4403d14abf15SRobert Mustacchi     /* update internal memory/hw for each storm both with
4404d14abf15SRobert Mustacchi      * toe/rdma/iscsi common params and with toe private params (where applicable) */
4405d14abf15SRobert Mustacchi 
4406d14abf15SRobert Mustacchi     _lm_set_ofld_params_xstorm_common(pdev, curr_params);
4407d14abf15SRobert Mustacchi 
4408d14abf15SRobert Mustacchi     _lm_set_ofld_params_tstorm_common(pdev, curr_params);
4409d14abf15SRobert Mustacchi 
4410d14abf15SRobert Mustacchi     _lm_set_ofld_params_tstorm_toe(pdev, curr_params);
4411d14abf15SRobert Mustacchi 
4412d14abf15SRobert Mustacchi     _lm_set_ofld_params_ustorm_toe(pdev, curr_params);
4413d14abf15SRobert Mustacchi 
4414d14abf15SRobert Mustacchi     _lm_set_ofld_params_xstorm_toe(pdev, curr_params);
4415d14abf15SRobert Mustacchi 
4416d14abf15SRobert Mustacchi     /* GilR 6/7/2006 - TBD - usage of params->starting_ip_id is not clear. currenlty we ignore it */
4417d14abf15SRobert Mustacchi 
4418d14abf15SRobert Mustacchi     return LM_STATUS_SUCCESS;
4419d14abf15SRobert Mustacchi } /* lm_tcp_set_ofld_params */
4420d14abf15SRobert Mustacchi 
4421d14abf15SRobert Mustacchi 
4422d14abf15SRobert Mustacchi /** Description
4423d14abf15SRobert Mustacchi  *  indicates that a rst request was received. Called from several
4424d14abf15SRobert Mustacchi  *  functions. Could also be called as a result of a delayed rst.
4425d14abf15SRobert Mustacchi  *  Assumptions:
4426d14abf15SRobert Mustacchi  */
lm_tcp_indicate_rst_received(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4427d14abf15SRobert Mustacchi void lm_tcp_indicate_rst_received(
4428d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4429d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp
4430d14abf15SRobert Mustacchi     )
4431d14abf15SRobert Mustacchi {
4432d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con, *tx_con;
4433d14abf15SRobert Mustacchi     u8_t ip_version;
4434d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4435d14abf15SRobert Mustacchi 
4436d14abf15SRobert Mustacchi     //DbgMessage(pdev, WARNl4rx , "##lm_tcp_indicate_rst_received cid=%d\n", tcp->cid);
4437d14abf15SRobert Mustacchi 
4438d14abf15SRobert Mustacchi     /* Update the Reset Received statistic*/
4439d14abf15SRobert Mustacchi     ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4440d14abf15SRobert Mustacchi     LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_reset);
4441d14abf15SRobert Mustacchi 
4442d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
4443d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
4444d14abf15SRobert Mustacchi 
4445d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
4446d14abf15SRobert Mustacchi     /* The state may only be NORMAL or UPLOAD_PENDING */
4447d14abf15SRobert Mustacchi     DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
4448d14abf15SRobert Mustacchi                 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
4449d14abf15SRobert Mustacchi 
4450d14abf15SRobert Mustacchi     /* Get the global TOE lock */
4451d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4452d14abf15SRobert Mustacchi 
4453d14abf15SRobert Mustacchi     /* Change the state status if needed: NORMAL->ABORTED */
4454d14abf15SRobert Mustacchi     if ( tcp->hdr.status == STATE_STATUS_NORMAL ) {
4455d14abf15SRobert Mustacchi         tcp->hdr.status = STATE_STATUS_ABORTED;
4456d14abf15SRobert Mustacchi     }
4457d14abf15SRobert Mustacchi 
4458d14abf15SRobert Mustacchi     /* Release the global TOE lock */
4459d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4460d14abf15SRobert Mustacchi /*********************** Tx **********************/
4461d14abf15SRobert Mustacchi     /* Take Tx lock */
4462d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
4463d14abf15SRobert Mustacchi 
4464d14abf15SRobert Mustacchi     /* Implies POST Tx blocked */
4465d14abf15SRobert Mustacchi     DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
4466d14abf15SRobert Mustacchi     tx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
4467d14abf15SRobert Mustacchi 
4468d14abf15SRobert Mustacchi     /* Abort Tx buffers */
4469d14abf15SRobert Mustacchi     lm_tcp_abort_bufs(pdev, tcp, tx_con, LM_STATUS_CONNECTION_RESET);
4470d14abf15SRobert Mustacchi 
4471d14abf15SRobert Mustacchi     /* Clear delayed RST flag */
4472d14abf15SRobert Mustacchi     tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
4473d14abf15SRobert Mustacchi 
4474d14abf15SRobert Mustacchi     /* Release Tx lock */
4475d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
4476d14abf15SRobert Mustacchi /*********************** Rx **********************/
4477d14abf15SRobert Mustacchi     /* Take Rx lock */
4478d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4479d14abf15SRobert Mustacchi 
4480d14abf15SRobert Mustacchi     /* Clear delayed FIN and RST */
4481d14abf15SRobert Mustacchi     rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING | TCP_CON_FIN_IND_PENDING);
4482d14abf15SRobert Mustacchi 
4483d14abf15SRobert Mustacchi     /* Implies POST Rx blocked */
4484d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED);
4485d14abf15SRobert Mustacchi     rx_con->flags |= TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED;
4486d14abf15SRobert Mustacchi 
4487d14abf15SRobert Mustacchi     /* Abort Rx buffers */
4488d14abf15SRobert Mustacchi     lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_CONNECTION_RESET);
4489d14abf15SRobert Mustacchi 
4490d14abf15SRobert Mustacchi     /* Release Rx lock */
4491d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4492d14abf15SRobert Mustacchi 
4493d14abf15SRobert Mustacchi     /* Indicate the Remote Abortive Disconnect to the Client */
4494d14abf15SRobert Mustacchi     mm_tcp_indicate_rst_received(pdev, tcp);
4495d14abf15SRobert Mustacchi }
4496d14abf15SRobert Mustacchi 
lm_tcp_searcher_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4497d14abf15SRobert Mustacchi void lm_tcp_searcher_ramrod_complete(
4498d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4499d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp
4500d14abf15SRobert Mustacchi     )
4501d14abf15SRobert Mustacchi {
4502d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request = tcp->sp_request;
4503d14abf15SRobert Mustacchi 
4504d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4, "## lm_tcp_searcher_ramrod_comp\n");
4505d14abf15SRobert Mustacchi 
4506d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4507d14abf15SRobert Mustacchi     DbgBreakIf(request->type != SP_REQUEST_TERMINATE_OFFLOAD);
4508d14abf15SRobert Mustacchi 
4509d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
4510d14abf15SRobert Mustacchi     request->type = SP_REQUEST_TERMINATE1_OFFLOAD;
4511d14abf15SRobert Mustacchi 
4512d14abf15SRobert Mustacchi 
4513d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4514d14abf15SRobert Mustacchi     /* remove 4tuple from searcher */
4515d14abf15SRobert Mustacchi     DbgBreakIf(!tcp->in_searcher);
4516d14abf15SRobert Mustacchi     lm_searcher_mirror_hash_remove(pdev, tcp->cid);
4517d14abf15SRobert Mustacchi     tcp->in_searcher = 0;
4518d14abf15SRobert Mustacchi     DbgBreakIf(!(tcp->sp_flags & SP_TCP_SRC_REQ_POSTED));
4519d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_SRC_REQ_COMP;
4520d14abf15SRobert Mustacchi     lm_tcp_post_slow_path_request(pdev, tcp, request);
4521d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4522d14abf15SRobert Mustacchi }
4523d14abf15SRobert Mustacchi 
lm_tcp_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4524d14abf15SRobert Mustacchi void lm_tcp_terminate_ramrod_complete(
4525d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4526d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp)
4527d14abf15SRobert Mustacchi {
4528d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request = tcp->sp_request;
4529d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4530d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
4531d14abf15SRobert Mustacchi     request->type = SP_REQUEST_QUERY;
4532d14abf15SRobert Mustacchi     /* Clear the flags */
4533d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4534d14abf15SRobert Mustacchi 
4535d14abf15SRobert Mustacchi     DbgBreakIf(!(tcp->sp_flags & SP_TCP_TRM_REQ_POSTED));
4536d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_TRM_REQ_COMP;
4537d14abf15SRobert Mustacchi 
4538d14abf15SRobert Mustacchi     /* Part of the fast-terminate flow is to zeroize the timers context: turn of num of active timers  */
4539d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
4540d14abf15SRobert Mustacchi         RESET_FLAGS(((struct toe_context *)tcp->ctx_virt)->timers_context.flags, __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS);
4541d14abf15SRobert Mustacchi     }
4542d14abf15SRobert Mustacchi 
4543d14abf15SRobert Mustacchi     lm_tcp_post_slow_path_request(pdev, tcp, request);
4544d14abf15SRobert Mustacchi 
4545d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4546d14abf15SRobert Mustacchi }
4547d14abf15SRobert Mustacchi 
lm_tcp_rx_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4548d14abf15SRobert Mustacchi static void lm_tcp_rx_terminate_ramrod_complete(
4549d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4550d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp)
4551d14abf15SRobert Mustacchi {
4552d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con = tcp->rx_con;
4553d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4554d14abf15SRobert Mustacchi 
4555d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4rx, "## lm_tcp_terminate_ramrod_comp_rx\n");
4556d14abf15SRobert Mustacchi 
4557d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4558d14abf15SRobert Mustacchi 
4559d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4560d14abf15SRobert Mustacchi     DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
4561d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_TRM_REQ_COMPLETED);
4562d14abf15SRobert Mustacchi     rx_con->flags |= TCP_TRM_REQ_COMPLETED;
4563d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4564d14abf15SRobert Mustacchi }
4565d14abf15SRobert Mustacchi 
lm_tcp_tx_terminate_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4566d14abf15SRobert Mustacchi static void lm_tcp_tx_terminate_ramrod_complete(
4567d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4568d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp)
4569d14abf15SRobert Mustacchi {
4570d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con = tcp->tx_con;
4571d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4572d14abf15SRobert Mustacchi 
4573d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4tx, "## lm_tcp_terminate_ramrod_comp_tx\n");
4574d14abf15SRobert Mustacchi 
4575d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4576d14abf15SRobert Mustacchi 
4577d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
4578d14abf15SRobert Mustacchi     DbgBreakIf(tx_con->flags & TCP_TRM_REQ_COMPLETED);
4579d14abf15SRobert Mustacchi     tx_con->flags |= TCP_TRM_REQ_COMPLETED;
4580d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
4581d14abf15SRobert Mustacchi 
4582d14abf15SRobert Mustacchi }
4583d14abf15SRobert Mustacchi 
4584d14abf15SRobert Mustacchi /** Description
4585d14abf15SRobert Mustacchi  *  indicates that a fin request was received. Called from several
4586d14abf15SRobert Mustacchi  *  functions. Could also be called as a result of a delayed fin
4587d14abf15SRobert Mustacchi  *  Assumptions: called without any lock taken
4588d14abf15SRobert Mustacchi  */
lm_tcp_indicate_fin_received(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4589d14abf15SRobert Mustacchi static void lm_tcp_indicate_fin_received(
4590d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4591d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp
4592d14abf15SRobert Mustacchi     )
4593d14abf15SRobert Mustacchi {
4594d14abf15SRobert Mustacchi     lm_tcp_con_t        * rx_con;
4595d14abf15SRobert Mustacchi     u8_t ip_version;
4596d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4597d14abf15SRobert Mustacchi 
4598d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx , "##lm_tcp_indicate_fin_received cid=%d\n", tcp->cid);
4599d14abf15SRobert Mustacchi     DbgBreakIf( ! ( pdev && tcp ) );
4600d14abf15SRobert Mustacchi 
4601d14abf15SRobert Mustacchi     ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4602d14abf15SRobert Mustacchi     LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].in_fin);
4603d14abf15SRobert Mustacchi 
4604d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
4605d14abf15SRobert Mustacchi 
4606d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4607d14abf15SRobert Mustacchi 
4608d14abf15SRobert Mustacchi     rx_con->u.rx.flags &= ~TCP_CON_FIN_IND_PENDING;
4609d14abf15SRobert Mustacchi 
4610d14abf15SRobert Mustacchi     /* Mark the connection as POST_BLOCKED due to Remote FIN Received */
4611d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED);
4612d14abf15SRobert Mustacchi     rx_con->flags |= TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED;
4613d14abf15SRobert Mustacchi     /* Abort pending Rx buffers */
4614d14abf15SRobert Mustacchi     lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_SUCCESS);
4615d14abf15SRobert Mustacchi 
4616d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4617d14abf15SRobert Mustacchi 
4618d14abf15SRobert Mustacchi     /* Indicate the Remote FIN up to the client */
4619d14abf15SRobert Mustacchi     mm_tcp_indicate_fin_received(pdev, tcp);
4620d14abf15SRobert Mustacchi }
4621d14abf15SRobert Mustacchi 
lm_tcp_process_retrieve_indication_cqe(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_upload_reason_t upload_reason)4622d14abf15SRobert Mustacchi void lm_tcp_process_retrieve_indication_cqe(
4623d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4624d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp,
4625d14abf15SRobert Mustacchi     l4_upload_reason_t    upload_reason)
4626d14abf15SRobert Mustacchi {
4627d14abf15SRobert Mustacchi     u32_t rx_flags = 0;
4628d14abf15SRobert Mustacchi     u32_t tx_flags = 0;
4629d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4, "###lm_tcp_process_retrieve_indication_cqe cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
4630d14abf15SRobert Mustacchi 
4631d14abf15SRobert Mustacchi     /* assert that this CQE is allowed */
4632d14abf15SRobert Mustacchi     /* we could receive this cqe after a RST / UPL, in which cases we will not notify about it. */
4633d14abf15SRobert Mustacchi     SET_FLAGS(rx_flags, TCP_RX_COMP_BLOCKED | TCP_UPLOAD_REQUESTED);
4634d14abf15SRobert Mustacchi     SET_FLAGS(tx_flags, TCP_TX_COMP_BLOCKED);
4635d14abf15SRobert Mustacchi 
4636d14abf15SRobert Mustacchi     /* we do need to notify about it even if it's after a FIN... */
4637d14abf15SRobert Mustacchi     RESET_FLAGS(rx_flags, TCP_REMOTE_FIN_RECEIVED);
4638d14abf15SRobert Mustacchi     RESET_FLAGS(tx_flags, TCP_FIN_REQ_COMPLETED);
4639d14abf15SRobert Mustacchi 
4640d14abf15SRobert Mustacchi     if (!GET_FLAGS(tcp->rx_con->flags, rx_flags) && !GET_FLAGS(tcp->tx_con->flags,tx_flags)) {
4641d14abf15SRobert Mustacchi         SET_FLAGS(tcp->rx_con->flags, TCP_UPLOAD_REQUESTED);
4642d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4, "###Indicating UP: cid=%d upload_reason=%d\n", tcp->cid, upload_reason);
4643d14abf15SRobert Mustacchi         mm_tcp_indicate_retrieve_indication(pdev, tcp, upload_reason);
4644d14abf15SRobert Mustacchi     }
4645d14abf15SRobert Mustacchi }
4646d14abf15SRobert Mustacchi 
4647d14abf15SRobert Mustacchi /* Assumption: called without any lock taken */
lm_tcp_rx_fin_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t upload)4648d14abf15SRobert Mustacchi static void lm_tcp_rx_fin_received_complete(
4649d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4650d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp,
4651d14abf15SRobert Mustacchi     u8_t                  upload
4652d14abf15SRobert Mustacchi     )
4653d14abf15SRobert Mustacchi {
4654d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con;
4655d14abf15SRobert Mustacchi     u8_t indicate = 1;
4656d14abf15SRobert Mustacchi     u8_t is_empty_peninsula;
4657d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4658d14abf15SRobert Mustacchi 
4659d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx, "###lm_tcp_rx_fin_received_complete cid=%d\n", tcp->cid);
4660d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
4661d14abf15SRobert Mustacchi     DbgBreakIf( tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING);
4662d14abf15SRobert Mustacchi 
4663d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
4664d14abf15SRobert Mustacchi 
4665d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4666d14abf15SRobert Mustacchi 
4667d14abf15SRobert Mustacchi     /* break if we received a fin on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
4668d14abf15SRobert Mustacchi     DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
4669d14abf15SRobert Mustacchi 
4670d14abf15SRobert Mustacchi     /* Mark the connection as 'COMP_BLOCKED' and 'DB BLOCKED'  */
4671d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_REMOTE_FIN_RECEIVED);
4672d14abf15SRobert Mustacchi     rx_con->flags |= TCP_REMOTE_FIN_RECEIVED;
4673d14abf15SRobert Mustacchi     is_empty_peninsula = (rx_con->u.rx.gen_info.peninsula_nbytes > 0 ? 0 : 1);
4674d14abf15SRobert Mustacchi     if (!is_empty_peninsula || mm_tcp_indicating_bufs(rx_con) ) {
4675d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4, "lm_tcp_process_fin_received_cqe - postponing fin indication cid=%d\n", tcp->cid);
4676d14abf15SRobert Mustacchi         rx_con->u.rx.flags |= TCP_CON_FIN_IND_PENDING;
4677d14abf15SRobert Mustacchi         indicate = 0;
4678d14abf15SRobert Mustacchi     }
4679d14abf15SRobert Mustacchi 
4680d14abf15SRobert Mustacchi     tcp->tcp_state_calc.fin_reception_time = mm_get_current_time(pdev);
4681d14abf15SRobert Mustacchi     if (tcp->tcp_state_calc.fin_reception_time == tcp->tcp_state_calc.fin_request_time) {
4682d14abf15SRobert Mustacchi         tcp->tcp_state_calc.fin_request_time -= 1;
4683d14abf15SRobert Mustacchi     }
4684d14abf15SRobert Mustacchi 
4685d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4686d14abf15SRobert Mustacchi 
4687d14abf15SRobert Mustacchi     if (indicate)
4688d14abf15SRobert Mustacchi     {
4689d14abf15SRobert Mustacchi         lm_tcp_indicate_fin_received(pdev, tcp);
4690d14abf15SRobert Mustacchi     } else if(upload && !is_empty_peninsula)
4691d14abf15SRobert Mustacchi     {
4692d14abf15SRobert Mustacchi         /* we did not indicate the received fin, AND we got upload request from FW, AND peninsula is not empty,
4693d14abf15SRobert Mustacchi            i.e. we _may_ be waiting for RQ buffers to be posted before we indicate the fin.
4694d14abf15SRobert Mustacchi            Thus, we _may_ need to request for upload:  */
4695d14abf15SRobert Mustacchi 
4696d14abf15SRobert Mustacchi         /* imitate as if FW has sent an upload request CQE: */
4697d14abf15SRobert Mustacchi         lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
4698d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_fin_upld_requested++;
4699d14abf15SRobert Mustacchi     }
4700d14abf15SRobert Mustacchi }
4701d14abf15SRobert Mustacchi 
4702d14abf15SRobert Mustacchi 
lm_tcp_comp_empty_ramrod_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)4703d14abf15SRobert Mustacchi static void lm_tcp_comp_empty_ramrod_request(
4704d14abf15SRobert Mustacchi     IN struct _lm_device_t * pdev,
4705d14abf15SRobert Mustacchi     IN lm_tcp_state_t      * tcp)
4706d14abf15SRobert Mustacchi {
4707d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * sp_req = tcp->sp_request;
4708d14abf15SRobert Mustacchi 
4709d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4710d14abf15SRobert Mustacchi 
4711d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4712d14abf15SRobert Mustacchi     sp_req->status = LM_STATUS_SUCCESS;
4713d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
4714d14abf15SRobert Mustacchi     mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
4715d14abf15SRobert Mustacchi 
4716d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4717d14abf15SRobert Mustacchi }
4718d14abf15SRobert Mustacchi 
lm_tcp_rx_empty_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN u32_t sp_type)4719d14abf15SRobert Mustacchi static void lm_tcp_rx_empty_ramrod_complete(
4720d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4721d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp,
4722d14abf15SRobert Mustacchi     IN    u32_t                 sp_type)
4723d14abf15SRobert Mustacchi {
4724d14abf15SRobert Mustacchi     u8_t indicate = 0;
4725d14abf15SRobert Mustacchi 
4726d14abf15SRobert Mustacchi     DbgBreakIf(!tcp);
4727d14abf15SRobert Mustacchi 
4728d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx | INFORMl4sp,
4729d14abf15SRobert Mustacchi                 "###lm_tcp_process_empty_slow_path_rcqe cid=%d, request->type=%d\n",
4730d14abf15SRobert Mustacchi                 tcp->cid, sp_type);
4731d14abf15SRobert Mustacchi 
4732d14abf15SRobert Mustacchi     switch (sp_type) {
4733d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
4734d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_TX_RST:
4735d14abf15SRobert Mustacchi         break; /* relevant to scqe only */
4736d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
4737d14abf15SRobert Mustacchi         if ( tcp->rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
4738d14abf15SRobert Mustacchi             /* process it */
4739d14abf15SRobert Mustacchi             MM_ACQUIRE_TOE_LOCK(pdev);
4740d14abf15SRobert Mustacchi 
4741d14abf15SRobert Mustacchi             /* Mark Rx ready for RST indication - before it was marked as 'delayed' */
4742d14abf15SRobert Mustacchi             tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
4743d14abf15SRobert Mustacchi 
4744d14abf15SRobert Mustacchi             if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4745d14abf15SRobert Mustacchi                 indicate = 1;
4746d14abf15SRobert Mustacchi             }
4747d14abf15SRobert Mustacchi 
4748d14abf15SRobert Mustacchi             /* Release global TOE lock */
4749d14abf15SRobert Mustacchi             MM_RELEASE_TOE_LOCK(pdev);
4750d14abf15SRobert Mustacchi             if (indicate) {
4751d14abf15SRobert Mustacchi                 lm_tcp_indicate_rst_received(pdev, tcp);
4752d14abf15SRobert Mustacchi             } /* o/w we haven't seen the TX yet... */
4753d14abf15SRobert Mustacchi         }
4754d14abf15SRobert Mustacchi         else if ( tcp->rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ) {
4755d14abf15SRobert Mustacchi             /* process it */
4756d14abf15SRobert Mustacchi             lm_tcp_indicate_fin_received(pdev, tcp);
4757d14abf15SRobert Mustacchi         }
4758d14abf15SRobert Mustacchi         break;
4759d14abf15SRobert Mustacchi     default:
4760d14abf15SRobert Mustacchi         {
4761d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL,
4762d14abf15SRobert Mustacchi                     "'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
4763d14abf15SRobert Mustacchi                     sp_type);
4764d14abf15SRobert Mustacchi             DbgBreak();
4765d14abf15SRobert Mustacchi         }
4766d14abf15SRobert Mustacchi     }
4767d14abf15SRobert Mustacchi }
4768d14abf15SRobert Mustacchi 
lm_tcp_tx_empty_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN u32_t sp_type)4769d14abf15SRobert Mustacchi static void lm_tcp_tx_empty_ramrod_complete(
4770d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
4771d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp,
4772d14abf15SRobert Mustacchi     IN    u32_t                 sp_type)
4773d14abf15SRobert Mustacchi {
4774d14abf15SRobert Mustacchi     u8_t indicate = 0;
4775d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4776d14abf15SRobert Mustacchi 
4777d14abf15SRobert Mustacchi     DbgBreakIf(!tcp);
4778d14abf15SRobert Mustacchi 
4779d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4tx | INFORMl4sp,
4780d14abf15SRobert Mustacchi                 "###lm_tcp_process_empty_slow_path_scqe cid=%d, request->type=%d\n",
4781d14abf15SRobert Mustacchi                 tcp->cid, sp_type);
4782d14abf15SRobert Mustacchi 
4783d14abf15SRobert Mustacchi     switch (sp_type) {
4784d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
4785d14abf15SRobert Mustacchi         /* process it */
4786d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, tcp->tx_con);
4787d14abf15SRobert Mustacchi         lm_tcp_abort_bufs(pdev,tcp,tcp->tx_con,LM_STATUS_ABORTED);
4788d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, tcp->tx_con);
4789d14abf15SRobert Mustacchi         break;
4790d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
4791d14abf15SRobert Mustacchi         break; /* rcqe only */
4792d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_TX_RST:
4793d14abf15SRobert Mustacchi         /* safe to abort buffers at this stage - we know none are pending on pbf */
4794d14abf15SRobert Mustacchi         if (tcp->tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE ) {
4795d14abf15SRobert Mustacchi             /* process it */
4796d14abf15SRobert Mustacchi             MM_ACQUIRE_TOE_LOCK(pdev);
4797d14abf15SRobert Mustacchi 
4798d14abf15SRobert Mustacchi             /* Mark Rx ready for RST indication - before it was marked as 'delayed' */
4799d14abf15SRobert Mustacchi             tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
4800d14abf15SRobert Mustacchi 
4801d14abf15SRobert Mustacchi             if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4802d14abf15SRobert Mustacchi                 indicate = 1;
4803d14abf15SRobert Mustacchi             }
4804d14abf15SRobert Mustacchi 
4805d14abf15SRobert Mustacchi             mm_acquire_tcp_lock(pdev, tcp->tx_con);
4806d14abf15SRobert Mustacchi             tcp->tx_con->u.tx.flags &= ~TCP_CON_RST_IND_NOT_SAFE;
4807d14abf15SRobert Mustacchi             mm_release_tcp_lock(pdev, tcp->tx_con);
4808d14abf15SRobert Mustacchi 
4809d14abf15SRobert Mustacchi             /* Release global TOE lock */
4810d14abf15SRobert Mustacchi             MM_RELEASE_TOE_LOCK(pdev);
4811d14abf15SRobert Mustacchi             if (indicate) {
4812d14abf15SRobert Mustacchi                 lm_tcp_indicate_rst_received(pdev, tcp);
4813d14abf15SRobert Mustacchi             } /* o/w we haven't seen the RX yet... */
4814d14abf15SRobert Mustacchi         }
4815d14abf15SRobert Mustacchi         break;
4816d14abf15SRobert Mustacchi     default:
4817d14abf15SRobert Mustacchi         {
4818d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL,
4819d14abf15SRobert Mustacchi                     "'empty ramrod' opcode in cqe doesn't fit with sp_request->type %d\n",
4820d14abf15SRobert Mustacchi                    sp_type);
4821d14abf15SRobert Mustacchi             DbgBreak();
4822d14abf15SRobert Mustacchi         }
4823d14abf15SRobert Mustacchi     }
4824d14abf15SRobert Mustacchi }
4825d14abf15SRobert Mustacchi 
lm_tcp_comp_abortive_disconnect_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)4826d14abf15SRobert Mustacchi static void lm_tcp_comp_abortive_disconnect_request(
4827d14abf15SRobert Mustacchi     struct _lm_device_t        * pdev,
4828d14abf15SRobert Mustacchi     lm_tcp_state_t             * tcp,
4829d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request
4830d14abf15SRobert Mustacchi     )
4831d14abf15SRobert Mustacchi {
4832d14abf15SRobert Mustacchi     lm_tcp_con_t *rx_con, *tx_con;
4833d14abf15SRobert Mustacchi     u8_t delayed_rst = 0;
4834d14abf15SRobert Mustacchi     u8_t ip_version;
4835d14abf15SRobert Mustacchi     u8_t complete_sp_request = TRUE;
4836d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4837d14abf15SRobert Mustacchi 
4838d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp && request) );
4839d14abf15SRobert Mustacchi 
4840d14abf15SRobert Mustacchi     /* Update the statistics */
4841d14abf15SRobert Mustacchi     ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
4842d14abf15SRobert Mustacchi     LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].out_resets);
4843d14abf15SRobert Mustacchi 
4844d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
4845d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
4846d14abf15SRobert Mustacchi 
4847d14abf15SRobert Mustacchi     /* Get global TOE lock */
4848d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4849d14abf15SRobert Mustacchi 
4850d14abf15SRobert Mustacchi     /* The state may only be NORMAL or ABORTED (due to remote RST) */
4851d14abf15SRobert Mustacchi     DbgBreakIf( ( tcp->hdr.status != STATE_STATUS_NORMAL ) && ( tcp->hdr.status != STATE_STATUS_ABORTED ) );
4852d14abf15SRobert Mustacchi     /* the FW will always post a RST packet no matter if
4853d14abf15SRobert Mustacchi        remote RST was already received, therefore, the
4854d14abf15SRobert Mustacchi        completion status of the request is always SUCCESS */
4855d14abf15SRobert Mustacchi     request->status = LM_STATUS_SUCCESS;
4856d14abf15SRobert Mustacchi 
4857d14abf15SRobert Mustacchi     tcp->hdr.status = STATE_STATUS_ABORTED;
4858d14abf15SRobert Mustacchi 
4859d14abf15SRobert Mustacchi     tcp->tcp_state_calc.con_rst_flag = TRUE;
4860d14abf15SRobert Mustacchi 
4861d14abf15SRobert Mustacchi     /* Release global TOE lock */
4862d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4863d14abf15SRobert Mustacchi 
4864d14abf15SRobert Mustacchi  /***************** Tx ********************/
4865d14abf15SRobert Mustacchi     /* Get Tx lock */
4866d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
4867d14abf15SRobert Mustacchi 
4868d14abf15SRobert Mustacchi     /* Clear delayed RST flag */
4869d14abf15SRobert Mustacchi     tx_con->u.tx.flags &= ~ TCP_CON_RST_IND_NOT_SAFE;
4870d14abf15SRobert Mustacchi     /* safe to abort buffers anyway, even if we have a non-safe tx abort, since this means that a ramrod has  been sent so queues are clear */
4871d14abf15SRobert Mustacchi     lm_tcp_abort_bufs(pdev,tcp,tx_con, LM_STATUS_ABORTED);
4872d14abf15SRobert Mustacchi 
4873d14abf15SRobert Mustacchi     /* Release Tx lock */
4874d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
4875d14abf15SRobert Mustacchi 
4876d14abf15SRobert Mustacchi /***************** Rx ********************/
4877d14abf15SRobert Mustacchi     /* Get Rx lock */
4878d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4879d14abf15SRobert Mustacchi 
4880d14abf15SRobert Mustacchi     /* 'POST/IND BLOCKED' in the request. Even a post was in the middle it must be done by now */
4881d14abf15SRobert Mustacchi     if (mm_tcp_indicating_bufs(rx_con)) {
4882d14abf15SRobert Mustacchi         if (pdev->params.l4_support_pending_sp_req_complete) {
4883d14abf15SRobert Mustacchi             DbgBreakIf(DBG_BREAK_ON(ABORTIVE_DISCONNECT_DURING_IND));
4884d14abf15SRobert Mustacchi             complete_sp_request = FALSE;
4885d14abf15SRobert Mustacchi             tcp->sp_request_pending_completion = TRUE;
4886d14abf15SRobert Mustacchi             tcp->pending_abortive_disconnect++;
4887d14abf15SRobert Mustacchi             mm_atomic_inc(&pdev->toe_info.stats.total_aborive_disconnect_during_completion);
4888d14abf15SRobert Mustacchi             DbgMessage(pdev, INFORMl4sp, "Abortive disconnect completion during indication(%d)\n", tcp->cid);
4889d14abf15SRobert Mustacchi         } else {
4890d14abf15SRobert Mustacchi             DbgBreak();
4891d14abf15SRobert Mustacchi         }
4892d14abf15SRobert Mustacchi     }
4893d14abf15SRobert Mustacchi 
4894d14abf15SRobert Mustacchi     if ( rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING ) {
4895d14abf15SRobert Mustacchi         delayed_rst = 1;
4896d14abf15SRobert Mustacchi     }
4897d14abf15SRobert Mustacchi 
4898d14abf15SRobert Mustacchi     /* Clear delayed RST and FIN flags */
4899d14abf15SRobert Mustacchi     rx_con->u.rx.flags &= ~ (TCP_CON_RST_IND_PENDING  | TCP_CON_FIN_IND_PENDING);
4900d14abf15SRobert Mustacchi 
4901d14abf15SRobert Mustacchi     lm_tcp_abort_bufs(pdev,tcp, rx_con, LM_STATUS_ABORTED);
4902d14abf15SRobert Mustacchi 
4903d14abf15SRobert Mustacchi     /* Release Rx lock */
4904d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4905d14abf15SRobert Mustacchi /*****************************************/
4906d14abf15SRobert Mustacchi 
4907d14abf15SRobert Mustacchi     if ( delayed_rst ) {
4908d14abf15SRobert Mustacchi         /* GilR 10/15/2006 - TBD - since anyway we complete the request
4909d14abf15SRobert Mustacchi           with status SUCCESS, we do not need to indicate a remote RST
4910d14abf15SRobert Mustacchi           that was delayed. therefore the following call to
4911d14abf15SRobert Mustacchi           mm_tcp_indicate_rst_received is canceled */
4912d14abf15SRobert Mustacchi       //mm_tcp_indicate_rst_received(pdev, tcp);
4913d14abf15SRobert Mustacchi     }
4914d14abf15SRobert Mustacchi 
4915d14abf15SRobert Mustacchi     if (complete_sp_request) {
4916d14abf15SRobert Mustacchi         /* Get global TOE lock */
4917d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_LOCK(pdev);
4918d14abf15SRobert Mustacchi 
4919d14abf15SRobert Mustacchi         DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
4920d14abf15SRobert Mustacchi 
4921d14abf15SRobert Mustacchi         tcp->sp_request = NULL;
4922d14abf15SRobert Mustacchi 
4923d14abf15SRobert Mustacchi         mm_tcp_comp_slow_path_request(pdev, tcp, request);
4924d14abf15SRobert Mustacchi 
4925d14abf15SRobert Mustacchi         /* Release global TOE lock */
4926d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
4927d14abf15SRobert Mustacchi     }
4928d14abf15SRobert Mustacchi }
4929d14abf15SRobert Mustacchi 
lm_tcp_rx_rst_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4930d14abf15SRobert Mustacchi static void lm_tcp_rx_rst_received_complete (
4931d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4932d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp
4933d14abf15SRobert Mustacchi     )
4934d14abf15SRobert Mustacchi {
4935d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con;
4936d14abf15SRobert Mustacchi     u8_t indicate = 0;
4937d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
4938d14abf15SRobert Mustacchi 
4939d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx , "###lm_tcp_process_rst_received_rcqe cid=%d\n", tcp->cid);
4940d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
4941d14abf15SRobert Mustacchi     /* The state may only be NORMAL or UPLOAD_PENDING */
4942d14abf15SRobert Mustacchi     DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
4943d14abf15SRobert Mustacchi                 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
4944d14abf15SRobert Mustacchi 
4945d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
4946d14abf15SRobert Mustacchi 
4947d14abf15SRobert Mustacchi     /* Get global TOE lock */
4948d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
4949d14abf15SRobert Mustacchi 
4950d14abf15SRobert Mustacchi     /* Take the Rx lock */
4951d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
4952d14abf15SRobert Mustacchi 
4953d14abf15SRobert Mustacchi     /* break if we received a rst on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
4954d14abf15SRobert Mustacchi     DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.dpc_peninsula_list) );
4955d14abf15SRobert Mustacchi 
4956d14abf15SRobert Mustacchi 
4957d14abf15SRobert Mustacchi     /* This will imply RX_COMP_LOCKED and RX_DB_BLOCKED */
4958d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_REMOTE_RST_RECEIVED);
4959d14abf15SRobert Mustacchi     rx_con->flags |= TCP_REMOTE_RST_RECEIVED;
4960d14abf15SRobert Mustacchi 
4961d14abf15SRobert Mustacchi     /* Clear pending FIN */
4962d14abf15SRobert Mustacchi     rx_con->u.rx.flags &= ~ TCP_CON_FIN_IND_PENDING;
4963d14abf15SRobert Mustacchi 
4964d14abf15SRobert Mustacchi     /* Check if all received data has been completed towards the Client */
4965d14abf15SRobert Mustacchi     if (rx_con->u.rx.gen_info.peninsula_nbytes || mm_tcp_indicating_bufs(rx_con) ) {
4966d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4rx , "lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d\n", tcp->cid);
4967d14abf15SRobert Mustacchi         rx_con->u.rx.flags |= TCP_CON_RST_IND_PENDING;
4968d14abf15SRobert Mustacchi     } else {
4969d14abf15SRobert Mustacchi         /* Mark Rx ready for RST indication */
4970d14abf15SRobert Mustacchi         tcp->sp_flags |= REMOTE_RST_INDICATED_RX;
4971d14abf15SRobert Mustacchi     }
4972d14abf15SRobert Mustacchi 
4973d14abf15SRobert Mustacchi     /* Release the Rx lock */
4974d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
4975d14abf15SRobert Mustacchi 
4976d14abf15SRobert Mustacchi     if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
4977d14abf15SRobert Mustacchi         indicate = 1;
4978d14abf15SRobert Mustacchi         tcp->tcp_state_calc.con_rst_flag = TRUE;
4979d14abf15SRobert Mustacchi     }
4980d14abf15SRobert Mustacchi 
4981d14abf15SRobert Mustacchi     /* Release global TOE lock */
4982d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
4983d14abf15SRobert Mustacchi 
4984d14abf15SRobert Mustacchi     /* Indicate the RST to the Client if it was the second completion */
4985d14abf15SRobert Mustacchi     if ( indicate ) {
4986d14abf15SRobert Mustacchi         lm_tcp_indicate_rst_received(pdev,tcp);
4987d14abf15SRobert Mustacchi     }
4988d14abf15SRobert Mustacchi }
4989d14abf15SRobert Mustacchi 
lm_tcp_tx_rst_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)4990d14abf15SRobert Mustacchi static void lm_tcp_tx_rst_received_complete (
4991d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
4992d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp
4993d14abf15SRobert Mustacchi     )
4994d14abf15SRobert Mustacchi {
4995d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con;
4996d14abf15SRobert Mustacchi     lm_status_t lm_status;
4997d14abf15SRobert Mustacchi     u8_t indicate = 0;
4998d14abf15SRobert Mustacchi     u8_t send_empty_ramrod = 0;
4999d14abf15SRobert Mustacchi     u8_t upload_on_fail = 0;
5000d14abf15SRobert Mustacchi 
5001d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5002d14abf15SRobert Mustacchi 
5003d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_rst_received_complete cid=%d\n", tcp->cid);
5004d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
5005d14abf15SRobert Mustacchi     /* The state may only be NORMAL or UPLOAD_PENDING */
5006d14abf15SRobert Mustacchi     DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
5007d14abf15SRobert Mustacchi                 (tcp->hdr.status != STATE_STATUS_UPLOAD_PENDING) );
5008d14abf15SRobert Mustacchi 
5009d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
5010d14abf15SRobert Mustacchi 
5011d14abf15SRobert Mustacchi     /* Get global TOE lock */
5012d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
5013d14abf15SRobert Mustacchi 
5014d14abf15SRobert Mustacchi     /* Take the Tx lock */
5015d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
5016d14abf15SRobert Mustacchi 
5017d14abf15SRobert Mustacchi     /* This will imply TX_COMP_LOCKED and TX_DB_BLOCKED */
5018d14abf15SRobert Mustacchi     DbgBreakIf(tx_con->flags & TCP_REMOTE_RST_RECEIVED);
5019d14abf15SRobert Mustacchi     tx_con->flags |= TCP_REMOTE_RST_RECEIVED;
5020d14abf15SRobert Mustacchi 
5021d14abf15SRobert Mustacchi     /* There is a potential race between receiving a reset to aborting buffers, once reset is received from te CSTORM it doesn't mean that
5022d14abf15SRobert Mustacchi      * the pbf isn't trying to transmit any other buffers, to make sure that it flushes remaining buffers we need to pass a ramrod - any ramrod,
5023d14abf15SRobert Mustacchi      * if the active_tb_list is not empty, if the tx post is blocked already, it means its too late, rst / fin / trm / inv were posted, so we don't
5024d14abf15SRobert Mustacchi      * abort the buffers - they will be aborted later on... to make sure buffers aren't aborted we turn on the TCP_CON_RST_IND_NOT_SAFE flag. they'll
5025d14abf15SRobert Mustacchi      * be aborted in terminate later on. we won't send the indication as well, we'll send it when completing terminate / empty ramrod later on.
5026d14abf15SRobert Mustacchi      */
5027d14abf15SRobert Mustacchi     /* Check if all received data has been completed towards the Client + terminate ramrod has not been posted yet */
5028d14abf15SRobert Mustacchi     if ( s_list_entry_cnt(&tx_con->active_tb_list) > 0 ) {
5029d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4rx, "TX lm_tcp_process_rst_received_cqe - postponing rst indication cid=%d sending empty ramrod\n", tcp->cid);
5030d14abf15SRobert Mustacchi         tx_con->u.tx.flags |= TCP_CON_RST_IND_NOT_SAFE;
5031d14abf15SRobert Mustacchi         /* send the empty ramrod only if we're not blocked already.
5032d14abf15SRobert Mustacchi          * TCP_TX_POST_BLOCKED includes FIN_REQ_POSTED case in which we should send the empty ramrod,
5033d14abf15SRobert Mustacchi          * and REMOTE_RST_RECEIVED_ALL_RX_INDICATED, TCP_POST_BLOCKED that shouldn't be set when reaching this point,
5034d14abf15SRobert Mustacchi          * so we'll check all other the relevant flags.
5035d14abf15SRobert Mustacchi          * here we determine whether to send the ramrod according to the lm flags, it is possible that the ramrod will be dropped later
5036d14abf15SRobert Mustacchi          * in the mm_tcp_post_empty_slow_path_request() due upload request pending in the um */
5037d14abf15SRobert Mustacchi         if (!(tx_con->flags & (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED))) {
5038d14abf15SRobert Mustacchi             send_empty_ramrod = TRUE;
5039d14abf15SRobert Mustacchi         }
5040d14abf15SRobert Mustacchi     } else {
5041d14abf15SRobert Mustacchi         /* Mark Tx ready for RST indication */
5042d14abf15SRobert Mustacchi         tcp->sp_flags |= REMOTE_RST_INDICATED_TX;
5043d14abf15SRobert Mustacchi     }
5044d14abf15SRobert Mustacchi 
5045d14abf15SRobert Mustacchi     /* Release the Tx lock */
5046d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
5047d14abf15SRobert Mustacchi 
5048d14abf15SRobert Mustacchi     if ( (tcp->sp_flags & REMOTE_RST_INDICATED_RX) && (tcp->sp_flags & REMOTE_RST_INDICATED_TX) ) {
5049d14abf15SRobert Mustacchi         indicate = 1;
5050d14abf15SRobert Mustacchi         tcp->tcp_state_calc.con_rst_flag = TRUE;
5051d14abf15SRobert Mustacchi     } else if ( tcp->sp_flags & REMOTE_RST_INDICATED_RX ) {
5052d14abf15SRobert Mustacchi         upload_on_fail = 1; /* RX is done, the only reason that TX isn't is because it has buffers to abort, if we can't postpone tx, indicate anyway. */
5053d14abf15SRobert Mustacchi         tcp->tcp_state_calc.con_rst_flag = TRUE;
5054d14abf15SRobert Mustacchi     }
5055d14abf15SRobert Mustacchi 
5056d14abf15SRobert Mustacchi     /* Indicate the RST to the Client if it was the second completion */
5057d14abf15SRobert Mustacchi     if ( indicate ) {
5058d14abf15SRobert Mustacchi         /* Release global TOE lock */
5059d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
5060d14abf15SRobert Mustacchi 
5061d14abf15SRobert Mustacchi         lm_tcp_indicate_rst_received(pdev,tcp);
5062d14abf15SRobert Mustacchi     } else if (send_empty_ramrod) {
5063d14abf15SRobert Mustacchi         /* Send empty ramrod, only when it is complete we can complete the reset i.e. tx reset received.
5064d14abf15SRobert Mustacchi          * it is possible that the ramrod will be dropped due upload request pending in the um */
5065d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4tx, "Sending Empty Ramrod TX\n");
5066d14abf15SRobert Mustacchi         lm_status = mm_tcp_post_empty_slow_path_request(pdev, tcp, SP_REQUEST_PENDING_TX_RST);
5067d14abf15SRobert Mustacchi 
5068d14abf15SRobert Mustacchi         /* Release global TOE lock */
5069d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
5070d14abf15SRobert Mustacchi 
5071d14abf15SRobert Mustacchi         if ((lm_status != LM_STATUS_PENDING) && (lm_status != LM_STATUS_UPLOAD_IN_PROGRESS)) { /* we expect the posting of an empty ramrod to be pending... */
5072d14abf15SRobert Mustacchi             /* This is a bit of a problem here...we don't want to risk the pbf accessing released data, so instead
5073d14abf15SRobert Mustacchi              * we risk the application turning an error, we delay the abort of buffers till the terminate stage.
5074d14abf15SRobert Mustacchi              * we don't remove the RST_IND_PENDING... we'll look at that before aborting buffers... */
5075d14abf15SRobert Mustacchi             if (upload_on_fail) {
5076d14abf15SRobert Mustacchi                 DbgMessage(pdev, WARNl4sp, "Couldn't send empty ramrod on TX when we needed\n");
5077d14abf15SRobert Mustacchi 
5078d14abf15SRobert Mustacchi                 /* instead of indicating the rst, which is NOT possible at this stage, ask for connection upload */
5079d14abf15SRobert Mustacchi                 mm_tcp_indicate_retrieve_indication(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5080d14abf15SRobert Mustacchi                 pdev->toe_info.stats.total_rst_upld_requested++;
5081d14abf15SRobert Mustacchi             }
5082d14abf15SRobert Mustacchi         }
5083d14abf15SRobert Mustacchi     }
5084d14abf15SRobert Mustacchi     else
5085d14abf15SRobert Mustacchi     {
5086d14abf15SRobert Mustacchi         /* Release global TOE lock */
5087d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
5088d14abf15SRobert Mustacchi     }
5089d14abf15SRobert Mustacchi }
5090d14abf15SRobert Mustacchi 
5091d14abf15SRobert Mustacchi 
lm_tcp_rx_abortive_disconnect_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5092d14abf15SRobert Mustacchi static void lm_tcp_rx_abortive_disconnect_ramrod_complete (
5093d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
5094d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp)
5095d14abf15SRobert Mustacchi {
5096d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con;
5097d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5098d14abf15SRobert Mustacchi 
5099d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_abortive_disconnect_request_rcqe cid=%d\n", tcp->cid);
5100d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
5101d14abf15SRobert Mustacchi 
5102d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
5103d14abf15SRobert Mustacchi 
5104d14abf15SRobert Mustacchi     /* Take the Rx lock */
5105d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
5106d14abf15SRobert Mustacchi 
5107d14abf15SRobert Mustacchi     /* break if we received a rst on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
5108d14abf15SRobert Mustacchi     DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
5109d14abf15SRobert Mustacchi                 (((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
5110d14abf15SRobert Mustacchi 
5111d14abf15SRobert Mustacchi     /* This implies COMP_BLOCKED */
5112d14abf15SRobert Mustacchi     rx_con->flags |= TCP_RST_REQ_COMPLETED;
5113d14abf15SRobert Mustacchi 
5114d14abf15SRobert Mustacchi     /* Release the Tx lock */
5115d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
5116d14abf15SRobert Mustacchi }
5117d14abf15SRobert Mustacchi 
lm_tcp_tx_abortive_disconnect_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5118d14abf15SRobert Mustacchi static void lm_tcp_tx_abortive_disconnect_ramrod_complete (
5119d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
5120d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp)
5121d14abf15SRobert Mustacchi {
5122d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con;
5123d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5124d14abf15SRobert Mustacchi 
5125d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_abortive_disconnect_request_complete cid=%d\n", tcp->cid);
5126d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
5127d14abf15SRobert Mustacchi 
5128d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
5129d14abf15SRobert Mustacchi 
5130d14abf15SRobert Mustacchi     /* Take the Tx lock */
5131d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
5132d14abf15SRobert Mustacchi 
5133d14abf15SRobert Mustacchi     /* This implies COMP_BLOCKED */
5134d14abf15SRobert Mustacchi     tx_con->flags |= TCP_RST_REQ_COMPLETED;
5135d14abf15SRobert Mustacchi 
5136d14abf15SRobert Mustacchi     /* Release the Tx lock */
5137d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
5138d14abf15SRobert Mustacchi }
5139d14abf15SRobert Mustacchi 
5140d14abf15SRobert Mustacchi 
5141d14abf15SRobert Mustacchi 
lm_tcp_comp_invalidate_request(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * request)5142d14abf15SRobert Mustacchi static void lm_tcp_comp_invalidate_request(
5143d14abf15SRobert Mustacchi     struct _lm_device_t        * pdev,
5144d14abf15SRobert Mustacchi     lm_tcp_state_t             * tcp,
5145d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request)
5146d14abf15SRobert Mustacchi {
5147d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "### Completing invalidate request cid=%d\n", tcp->cid);
5148d14abf15SRobert Mustacchi 
5149d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
5150d14abf15SRobert Mustacchi 
5151d14abf15SRobert Mustacchi     DbgBreakIf(!pdev || !tcp);
5152d14abf15SRobert Mustacchi     DbgBreakIf(tcp->hdr.status != STATE_STATUS_NORMAL && tcp->hdr.status != STATE_STATUS_ABORTED);
5153d14abf15SRobert Mustacchi 
5154d14abf15SRobert Mustacchi     tcp->hdr.status = STATE_STATUS_INVALIDATED;
5155d14abf15SRobert Mustacchi 
5156d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
5157d14abf15SRobert Mustacchi 
5158d14abf15SRobert Mustacchi     request->status = LM_STATUS_SUCCESS;
5159d14abf15SRobert Mustacchi 
5160d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
5161d14abf15SRobert Mustacchi 
5162d14abf15SRobert Mustacchi     mm_tcp_comp_slow_path_request(pdev, tcp, request);
5163d14abf15SRobert Mustacchi 
5164d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
5165d14abf15SRobert Mustacchi }
5166d14abf15SRobert Mustacchi 
5167d14abf15SRobert Mustacchi 
lm_tcp_tx_invalidate_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5168d14abf15SRobert Mustacchi static void lm_tcp_tx_invalidate_ramrod_complete (
5169d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
5170d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp)
5171d14abf15SRobert Mustacchi {
5172d14abf15SRobert Mustacchi     lm_tcp_con_t * tx_con;
5173d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5174d14abf15SRobert Mustacchi 
5175d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_invalidate_request_complete cid=%d\n", tcp->cid);
5176d14abf15SRobert Mustacchi 
5177d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
5178d14abf15SRobert Mustacchi 
5179d14abf15SRobert Mustacchi     tx_con = tcp->tx_con;
5180d14abf15SRobert Mustacchi 
5181d14abf15SRobert Mustacchi     /* Take the Tx lock */
5182d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tx_con);
5183d14abf15SRobert Mustacchi 
5184d14abf15SRobert Mustacchi     /* This implies COMP_BLOCKED */
5185d14abf15SRobert Mustacchi     DbgBreakIf(tx_con->flags & TCP_INV_REQ_COMPLETED);
5186d14abf15SRobert Mustacchi     tx_con->flags |= TCP_INV_REQ_COMPLETED;
5187d14abf15SRobert Mustacchi 
5188d14abf15SRobert Mustacchi     /* Release the Tx lock */
5189d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tx_con);
5190d14abf15SRobert Mustacchi }
5191d14abf15SRobert Mustacchi 
5192d14abf15SRobert Mustacchi 
lm_tcp_rx_invalidate_ramrod_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp)5193d14abf15SRobert Mustacchi static void lm_tcp_rx_invalidate_ramrod_complete (
5194d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
5195d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp)
5196d14abf15SRobert Mustacchi {
5197d14abf15SRobert Mustacchi     lm_tcp_con_t * rx_con;
5198d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5199d14abf15SRobert Mustacchi 
5200d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx, "###lm_tcp_process_invalidate_request_rcqe cid=%d\n", tcp->cid);
5201d14abf15SRobert Mustacchi     DbgBreakIf( ! (pdev && tcp) );
5202d14abf15SRobert Mustacchi 
5203d14abf15SRobert Mustacchi     rx_con = tcp->rx_con;
5204d14abf15SRobert Mustacchi 
5205d14abf15SRobert Mustacchi 
5206d14abf15SRobert Mustacchi     /* Take the Rx lock */
5207d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, rx_con);
5208d14abf15SRobert Mustacchi     /* 'POST/IND BLOCKED' in the request.
5209d14abf15SRobert Mustacchi        Even a post was in the middle it must be done by now
5210d14abf15SRobert Mustacchi        */
5211d14abf15SRobert Mustacchi     DbgBreakIf( mm_tcp_indicating_bufs(rx_con) );
5212d14abf15SRobert Mustacchi 
5213d14abf15SRobert Mustacchi     /* break if we received an invalidate on the cqe and we still have an 'unreleased' generic buffer in our peninsula */
5214d14abf15SRobert Mustacchi     DbgBreakIf( !d_list_is_empty(&tcp->rx_con->u.rx.gen_info.peninsula_list) &&
5215d14abf15SRobert Mustacchi                 (((lm_tcp_gen_buf_t *)(d_list_peek_tail(&tcp->rx_con->u.rx.gen_info.peninsula_list)))->placed_bytes == 0));
5216d14abf15SRobert Mustacchi 
5217d14abf15SRobert Mustacchi     /* This implies COMP_BLOCKED */
5218d14abf15SRobert Mustacchi     DbgBreakIf(rx_con->flags & TCP_INV_REQ_COMPLETED);
5219d14abf15SRobert Mustacchi     rx_con->flags |= TCP_INV_REQ_COMPLETED;
5220d14abf15SRobert Mustacchi 
5221d14abf15SRobert Mustacchi     /* Release the Rx lock */
5222d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, rx_con);
5223d14abf15SRobert Mustacchi }
5224d14abf15SRobert Mustacchi 
5225d14abf15SRobert Mustacchi 
lm_tcp_get_delegated(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN void * ctx_p)5226d14abf15SRobert Mustacchi static void lm_tcp_get_delegated(
5227d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
5228d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp,
5229d14abf15SRobert Mustacchi     IN    void                * ctx_p /* context with updated data */
5230d14abf15SRobert Mustacchi     )
5231d14abf15SRobert Mustacchi {
5232d14abf15SRobert Mustacchi     struct xstorm_toe_tcp_ag_context_section * xag_tcp = NULL;
5233d14abf15SRobert Mustacchi     struct tstorm_tcp_st_context_section     * tst_tcp = NULL;
5234d14abf15SRobert Mustacchi     struct xstorm_tcp_context_section        * xst_tcp = NULL;
5235d14abf15SRobert Mustacchi     struct tstorm_toe_tcp_ag_context_section * tag_tcp = NULL;
5236d14abf15SRobert Mustacchi 
5237d14abf15SRobert Mustacchi     struct ustorm_toe_st_context             * ust_toe = NULL;
5238d14abf15SRobert Mustacchi     struct cstorm_toe_st_context             * cst_toe = NULL;
5239d14abf15SRobert Mustacchi     struct xstorm_toe_ag_context             * xag_toe = NULL;
5240d14abf15SRobert Mustacchi     struct xstorm_toe_context_section        * xst_toe = NULL;
5241d14abf15SRobert Mustacchi 
5242d14abf15SRobert Mustacchi     u32_t send_wnd;
5243d14abf15SRobert Mustacchi     u8_t  sanity_check;
5244d14abf15SRobert Mustacchi 
5245d14abf15SRobert Mustacchi     ASSERT_STATIC(sizeof(struct xstorm_toe_tcp_ag_context_section) == sizeof(struct xstorm_tcp_tcp_ag_context_section));
5246d14abf15SRobert Mustacchi     ASSERT_STATIC(sizeof(struct tstorm_toe_tcp_ag_context_section) == sizeof(struct tstorm_tcp_tcp_ag_context_section));
5247d14abf15SRobert Mustacchi 
5248d14abf15SRobert Mustacchi     sanity_check = FALSE;
5249d14abf15SRobert Mustacchi 
5250d14abf15SRobert Mustacchi     /* Set shortcuts... and take care of driver delegated params. */
5251d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE)
5252d14abf15SRobert Mustacchi     {
5253d14abf15SRobert Mustacchi         xst_tcp = &((struct toe_context *)ctx_p)->xstorm_st_context.context.common.tcp;
5254d14abf15SRobert Mustacchi         xag_tcp = &((struct toe_context *)ctx_p)->xstorm_ag_context.tcp;
5255d14abf15SRobert Mustacchi         tst_tcp = &((struct toe_context *)ctx_p)->tstorm_st_context.context.tcp;
5256d14abf15SRobert Mustacchi         tag_tcp = &((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
5257d14abf15SRobert Mustacchi 
5258d14abf15SRobert Mustacchi         xst_toe = &((struct toe_context *)ctx_p)->xstorm_st_context.context.toe;
5259d14abf15SRobert Mustacchi         xag_toe = &((struct toe_context *)ctx_p)->xstorm_ag_context;
5260d14abf15SRobert Mustacchi         cst_toe = &((struct toe_context *)ctx_p)->cstorm_st_context.context;
5261d14abf15SRobert Mustacchi         ust_toe = &((struct toe_context *)ctx_p)->ustorm_st_context.context;
5262d14abf15SRobert Mustacchi 
5263d14abf15SRobert Mustacchi         if (S32_SUB(tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge, tcp->rx_con->db_data.rx->rcv_win_right_edge) < 0) {
5264d14abf15SRobert Mustacchi             /* due to window decrease issues... */
5265d14abf15SRobert Mustacchi             tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge = tcp->rx_con->db_data.rx->rcv_win_right_edge;
5266d14abf15SRobert Mustacchi         }
5267d14abf15SRobert Mustacchi 
5268d14abf15SRobert Mustacchi         /* RcvWnd = WndRightEgde - RcvNext */
5269d14abf15SRobert Mustacchi         /* recv_win_seq is determined by the driver, and therefore is the most up-to-date value,
5270d14abf15SRobert Mustacchi         * we also have to add any pending indicated bytes to this value, and this is because we don't
5271d14abf15SRobert Mustacchi         * add them immediatel, only when the buffer is returned to help limit our GRQ pool. */
5272d14abf15SRobert Mustacchi         tcp->tcp_delegated.recv_win_seq = tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge +
5273d14abf15SRobert Mustacchi             tcp->rx_con->u.rx.gen_info.pending_indicated_bytes;
5274d14abf15SRobert Mustacchi 
5275d14abf15SRobert Mustacchi         if (!lm_reset_is_inprogress(pdev))
5276d14abf15SRobert Mustacchi         {
5277d14abf15SRobert Mustacchi             sanity_check = TRUE;
5278d14abf15SRobert Mustacchi         }
5279d14abf15SRobert Mustacchi 
5280d14abf15SRobert Mustacchi     }
5281d14abf15SRobert Mustacchi     else if (tcp->ulp_type == ISCSI_CONNECTION_TYPE)
5282d14abf15SRobert Mustacchi     {
5283d14abf15SRobert Mustacchi         xst_tcp = &((struct iscsi_context *)ctx_p)->xstorm_st_context.common.tcp;
5284d14abf15SRobert Mustacchi         xag_tcp = (struct xstorm_toe_tcp_ag_context_section *)&((struct iscsi_context *)ctx_p)->xstorm_ag_context.tcp;
5285d14abf15SRobert Mustacchi         tst_tcp = &((struct iscsi_context *)ctx_p)->tstorm_st_context.tcp;
5286d14abf15SRobert Mustacchi         tag_tcp = (struct tstorm_toe_tcp_ag_context_section *)&((struct toe_context *)ctx_p)->tstorm_ag_context.tcp;
5287d14abf15SRobert Mustacchi 
5288d14abf15SRobert Mustacchi                            /* RcvWnd    =         WndRightEgde    -         RcvNext */
5289d14abf15SRobert Mustacchi         tcp->tcp_delegated.recv_win_seq = tag_tcp->wnd_right_edge - tst_tcp->rcv_nxt;
5290d14abf15SRobert Mustacchi     }
5291d14abf15SRobert Mustacchi     else
5292d14abf15SRobert Mustacchi     {
5293d14abf15SRobert Mustacchi        DbgBreakMsg("lm_tcp_get_delegated: Unsupported protocol type \n") ;
5294d14abf15SRobert Mustacchi        return;
5295d14abf15SRobert Mustacchi     }
5296d14abf15SRobert Mustacchi 
5297d14abf15SRobert Mustacchi     /* Sanity Checks: (block below)
5298d14abf15SRobert Mustacchi      * the purpose for sanity checks below, under debug only is to find a problem in FW delegated params before
5299d14abf15SRobert Mustacchi      * we send them to OS in which case it may assert later on, or worse after several offloads.
5300d14abf15SRobert Mustacchi      * Perform sanity checks only if chip isn't under reset... In case of error recovery for example, these delegated
5301d14abf15SRobert Mustacchi      * params may be rubbish, it's ok since in the same case we'll also send a LM_STATUS_FAILURE in the upload completion.
5302d14abf15SRobert Mustacchi      */
5303d14abf15SRobert Mustacchi     if (sanity_check)
5304d14abf15SRobert Mustacchi     {
5305d14abf15SRobert Mustacchi 
5306d14abf15SRobert Mustacchi         /* context sanity checks */
5307d14abf15SRobert Mustacchi #if !defined(_VBD_CMD_)
5308d14abf15SRobert Mustacchi         /* check that DMA write towards host is done */
5309d14abf15SRobert Mustacchi         DbgBreakIf(((struct toe_context *)ctx_p)->ustorm_ag_context.__state == 0);
5310d14abf15SRobert Mustacchi         DbgBreakIf(((struct toe_context *)ctx_p)->tstorm_ag_context.__state == 0);
5311d14abf15SRobert Mustacchi         DbgBreakIf(((struct toe_context *)ctx_p)->xstorm_ag_context.__state == 0);
5312d14abf15SRobert Mustacchi         /* needs to be: t <= x <= u <= drv  */
5313d14abf15SRobert Mustacchi         /* driver window right edge >= ust.prev_rcv_win_right_edge >= xag.local_adv_wnd >= tag.wnd_right_edge (cyclic)*/
5314d14abf15SRobert Mustacchi // apply in w2k3
5315d14abf15SRobert Mustacchi //        DbgBreakIf(S32_SUB(xag_tcp->local_adv_wnd, tag_tcp->wnd_right_edge) < 0);
5316d14abf15SRobert Mustacchi //        DbgBreakIf(S32_SUB(ust_toe->prev_rcv_win_right_edge, xag_tcp->local_adv_wnd) < 0);
5317d14abf15SRobert Mustacchi //        DbgBreakIf(S32_SUB(tcp->rx_con->u.rx.sws_info.drv_rcv_win_right_edge, ust_toe->prev_rcv_win_right_edge) < 0);
5318d14abf15SRobert Mustacchi         /* xag.snd_nxt <= xst.snd_max */
5319d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(xag_tcp->snd_nxt, xst_tcp->snd_max) > 0);
5320d14abf15SRobert Mustacchi         /* xag.snd_una <= tag.snd_una <= tag.snd_max <= xst.snd_max */
5321d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(xag_tcp->snd_una, tag_tcp->snd_una) != 0);
5322d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(tag_tcp->snd_una, tag_tcp->snd_max) > 0);
5323d14abf15SRobert Mustacchi         // TBD: the assert is not valid, discuess with FW regarding a change. DbgBreakIf(S32_SUB(tag_tcp->snd_max, xst_tcp->snd_max) > 0);
5324d14abf15SRobert Mustacchi         /* xag.cmp_bd_start_seq <= c.cmp_bd_start_seq <= tag.snd_una */
5325d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(xag_toe->cmp_bd_start_seq, tag_tcp->snd_una) > 0);
5326d14abf15SRobert Mustacchi         /* tst.rcv_nxt >= xag.ack_to_far_end */
5327d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(tst_tcp->rcv_nxt, xag_tcp->ack_to_far_end) != 0);
5328d14abf15SRobert Mustacchi         /* tst.rcv_nxt >= tst.prev_seg_seq  */
5329d14abf15SRobert Mustacchi         //DbgBreakIf(S32_SUB(tst_tcp->rcv_nxt, tst_tcp->prev_seg_seq) < 0);
5330d14abf15SRobert Mustacchi         /* xag.cmp_bd_cons <= cst.bd_cons <= xst.tx_bd_cons <= xst.bd_prod <= Driver bd prod (16 bit cyclic) */
5331d14abf15SRobert Mustacchi         DbgBreakIf(S16_SUB(xag_toe->cmp_bd_cons, cst_toe->bd_cons) > 0);
5332d14abf15SRobert Mustacchi         DbgBreakIf(S16_SUB(xst_toe->tx_bd_cons, xst_toe->bd_prod) > 0);
5333d14abf15SRobert Mustacchi         DbgBreakIf(S16_SUB(xst_toe->bd_prod, tcp->tx_con->db_data.tx->bds_prod) > 0);
5334d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(tag_tcp->snd_una, xag_tcp->snd_nxt) > 0);
5335d14abf15SRobert Mustacchi         /* timestamp: */
5336d14abf15SRobert Mustacchi         /* tst.timestamp_exists == xst.ts_enable -- ? can't find fields in fw*/
5337d14abf15SRobert Mustacchi 
5338d14abf15SRobert Mustacchi         /* tst.timestamp_recent >= xag.ts_to_echo (cyclic) */
5339d14abf15SRobert Mustacchi         DbgBreakIf(S32_SUB(tst_tcp->timestamp_recent, xag_tcp->ts_to_echo) < 0);
5340d14abf15SRobert Mustacchi 
5341d14abf15SRobert Mustacchi         /* fin: ?? can't find fields in fw */
5342d14abf15SRobert Mustacchi         /* if (xst.fin_sent_flag) then bds should contain bd with fin // driver flag 'sent-fin' */
5343d14abf15SRobert Mustacchi         /* if (tag.fin_sent_flag) then xst.fin_sent_flag */
5344d14abf15SRobert Mustacchi 
5345d14abf15SRobert Mustacchi 
5346d14abf15SRobert Mustacchi         /* check that rcv nxt has the expected value compared to bytes that were completed on rx application buffers and generic buffers */
5347d14abf15SRobert Mustacchi /*        rx_bytes_recv = tcp->rx_con->bytes_comp_cnt +
5348d14abf15SRobert Mustacchi                         tcp->rx_con->u.rx.gen_info.bytes_indicated_accepted +
5349d14abf15SRobert Mustacchi                         (tcp->sp_request->ret_data.tcp_upload_data.frag_list ? tcp->sp_request->ret_data.tcp_upload_data.frag_list->size : 0) -
5350d14abf15SRobert Mustacchi                         tcp->rx_con->bytes_push_skip_cnt -
5351d14abf15SRobert Mustacchi         if (tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED)
5352d14abf15SRobert Mustacchi         {
5353d14abf15SRobert Mustacchi             DbgBreakIf(((u32_t)(tcp->tcp_delegated.recv_next + (u32_t)rx_bytes_recv + 1) != tst_tcp->rcv_nxt));
5354d14abf15SRobert Mustacchi         } else
5355d14abf15SRobert Mustacchi         {
5356d14abf15SRobert Mustacchi             DbgBreakIf(((u32_t)(tcp->tcp_delegated.recv_next + (u32_t)rx_bytes_recv) != tst_tcp->rcv_nxt));
5357d14abf15SRobert Mustacchi         }
5358d14abf15SRobert Mustacchi */
5359d14abf15SRobert Mustacchi         /* check that cstrom rel seq is equal to tstorm snd una */
5360d14abf15SRobert Mustacchi         DbgBreakIf(((struct toe_context *)ctx_p)->cstorm_ag_context.rel_seq != tag_tcp->snd_una);
5361d14abf15SRobert Mustacchi 
5362d14abf15SRobert Mustacchi         /* check that snd una has the expected value compared to bytes that were completed on tx application buffers */
5363d14abf15SRobert Mustacchi         DbgBreakIf((u32_t)(tcp->tcp_delegated.send_una + (u32_t)tcp->tx_con->bytes_comp_cnt + (u32_t)tcp->tx_con->bytes_trm_aborted_cnt - (u32_t)tcp->tx_con->bytes_aborted_cnt) != tag_tcp->snd_una);
5364d14abf15SRobert Mustacchi #endif
5365d14abf15SRobert Mustacchi 
5366d14abf15SRobert Mustacchi     }
5367d14abf15SRobert Mustacchi 
5368d14abf15SRobert Mustacchi     /* Set the updated delegated parameters */
5369d14abf15SRobert Mustacchi     tcp->tcp_delegated.recv_next      = tst_tcp->rcv_nxt;
5370d14abf15SRobert Mustacchi 
5371d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_una       = tag_tcp->snd_una;
5372d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_next      = xag_tcp->snd_nxt;
5373d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_max       = xst_tcp->snd_max;
5374d14abf15SRobert Mustacchi     /* recent_seg_wnd is the value received in the last packet from the other side. This means this value is scaled,
5375d14abf15SRobert Mustacchi      * therefore we need to get the absolute value by 'unscaling' it */
5376d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_win       = (tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale)
5377d14abf15SRobert Mustacchi                                         + tcp->tcp_delegated.send_una;
5378d14abf15SRobert Mustacchi     send_wnd = tst_tcp->recent_seg_wnd << tcp->tcp_const.snd_seg_scale;
5379d14abf15SRobert Mustacchi 
5380d14abf15SRobert Mustacchi     /* Does not come from chip! Driver uses what the chip returned for SndWnd,
5381d14abf15SRobert Mustacchi        and takes the maximum between that, all past query results for this paramter,
5382d14abf15SRobert Mustacchi        and 2 * MSS.
5383d14abf15SRobert Mustacchi      */
5384d14abf15SRobert Mustacchi     if ( tcp->tcp_delegated.max_send_win < tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una) {
5385d14abf15SRobert Mustacchi         tcp->tcp_delegated.max_send_win = tcp->tcp_delegated.send_win - tcp->tcp_delegated.send_una;
5386d14abf15SRobert Mustacchi     }
5387d14abf15SRobert Mustacchi 
5388d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_wl1                   = tst_tcp->prev_seg_seq;
5389d14abf15SRobert Mustacchi     tcp->tcp_delegated.send_cwin                  = tst_tcp->cwnd + tcp->tcp_delegated.send_una;
5390d14abf15SRobert Mustacchi     tcp->tcp_delegated.ss_thresh                  = tst_tcp->ss_thresh;
5391d14abf15SRobert Mustacchi 
5392d14abf15SRobert Mustacchi     tcp->tcp_delegated.sm_rtt    = (tst_tcp->flags1 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT)
5393d14abf15SRobert Mustacchi                                    >> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT;
5394d14abf15SRobert Mustacchi     tcp->tcp_delegated.sm_delta    = (tst_tcp->flags2 & TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION)
5395d14abf15SRobert Mustacchi                                      >> TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT;
5396d14abf15SRobert Mustacchi     /* convert ms to ticks. */
5397d14abf15SRobert Mustacchi     //16/09/2008 NirV: Assert removed, return upon fw fix
5398d14abf15SRobert Mustacchi     //DbgBreakIf(tcp->tcp_delegated.sm_rtt > (35*TIMERS_TICKS_PER_SEC));
5399d14abf15SRobert Mustacchi     //DbgBreakIf(tcp->tcp_delegated.sm_delta > (35*TIMERS_TICKS_PER_SEC));
5400d14abf15SRobert Mustacchi 
5401d14abf15SRobert Mustacchi     tcp->tcp_delegated.sm_rtt =
5402d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_delegated.sm_rtt, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*8;
5403d14abf15SRobert Mustacchi     tcp->tcp_delegated.sm_delta =
5404d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tcp->tcp_delegated.sm_delta, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second)*4;
5405d14abf15SRobert Mustacchi 
5406d14abf15SRobert Mustacchi     tcp->tcp_delegated.ts_recent     = tst_tcp->timestamp_recent;
5407d14abf15SRobert Mustacchi     /* convert ms to ticks. */
5408d14abf15SRobert Mustacchi     tcp->tcp_delegated.ts_recent_age =
5409d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tst_tcp->timestamp_recent_time, TSEMI_CLK1_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5410d14abf15SRobert Mustacchi 
5411d14abf15SRobert Mustacchi     tcp->tcp_delegated.tstamp   = xst_tcp->ts_time_diff;
5412d14abf15SRobert Mustacchi     /* convert ms to ticks. */
5413d14abf15SRobert Mustacchi     tcp->tcp_delegated.total_rt =
5414d14abf15SRobert Mustacchi         lm_time_resolution(pdev, tst_tcp->retransmit_start_time, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5415d14abf15SRobert Mustacchi 
5416d14abf15SRobert Mustacchi     tcp->tcp_delegated.dup_ack_count        = tst_tcp->dup_ack_count;
5417d14abf15SRobert Mustacchi     tcp->tcp_delegated.snd_wnd_probe_count  = tst_tcp->persist_probe_count;
5418d14abf15SRobert Mustacchi 
5419d14abf15SRobert Mustacchi     if(tcp->tcp_delegated.send_una == tcp->tcp_delegated.send_max && (send_wnd > 0)) { /* KA is running (?) */
5420d14abf15SRobert Mustacchi         if ( (tcp->tcp_cached.tcp_flags & TCP_FLAG_ENABLE_KEEP_ALIVE)) {
5421d14abf15SRobert Mustacchi 
5422d14abf15SRobert Mustacchi            tcp->tcp_delegated.u.keep_alive.probe_cnt     = tst_tcp->ka_probe_count;
5423d14abf15SRobert Mustacchi 
5424d14abf15SRobert Mustacchi             /* convert ms to ticks. */
5425d14abf15SRobert Mustacchi             tcp->tcp_delegated.u.keep_alive.timeout_delta =
5426d14abf15SRobert Mustacchi             lm_time_resolution(pdev, xag_tcp->ka_timer, TIMERS_TICKS_PER_SEC, pdev->ofld_info.l4_params.ticks_per_second);
5427d14abf15SRobert Mustacchi 
5428d14abf15SRobert Mustacchi             /* ka timeout may be negative in cases that it expired and timer was armed for other purposes. In this case - we write 0 to the
5429d14abf15SRobert Mustacchi              * timeout delta - OS will treat this as if timer has just expired */
5430d14abf15SRobert Mustacchi             /* bugbug, for some reason, we get a 28 bit value from FW, so a value such as 0xffffff9 is actually negative... so instead of checking (the reason is that timer's block bus width is 28 bit - ariel)
5431d14abf15SRobert Mustacchi              * negative - we just check if it's larger than 0x8000000*/
5432d14abf15SRobert Mustacchi             if ((tcp->tcp_delegated.u.keep_alive.timeout_delta != 0xffffffff) &&
5433d14abf15SRobert Mustacchi                 (tcp->tcp_delegated.u.keep_alive.timeout_delta > 0x8000000)) {
5434d14abf15SRobert Mustacchi                 tcp->tcp_delegated.u.keep_alive.timeout_delta = 0;
5435d14abf15SRobert Mustacchi             }
5436d14abf15SRobert Mustacchi         } else { //ka disabled
5437d14abf15SRobert Mustacchi             tcp->tcp_delegated.u.keep_alive.probe_cnt     = 0;
5438d14abf15SRobert Mustacchi             tcp->tcp_delegated.u.keep_alive.timeout_delta = 0xffffffff;
5439d14abf15SRobert Mustacchi         }
5440d14abf15SRobert Mustacchi     } else {
5441d14abf15SRobert Mustacchi         tcp->tcp_delegated.u.retransmit.num_retx      = tst_tcp->retransmit_count;
5442d14abf15SRobert Mustacchi         //TBD: Ariel, why it comes from the same place as TotalRT?
5443d14abf15SRobert Mustacchi         /* TODO: we need to convert retx_ms to clock ticks in VBD instead of
5444d14abf15SRobert Mustacchi          * doing this conversion in NDIS (same as Teton) */
5445d14abf15SRobert Mustacchi 
5446d14abf15SRobert Mustacchi         /* rto_timer may be negative in cases that it expired and timer was armed for other purposes. In this case - we write 0 to the
5447d14abf15SRobert Mustacchi          * retx_ms - OS will treat this as if timer has just expired and immediately retransmit. */
5448d14abf15SRobert Mustacchi         /* bugbug, for some reason, we get a 28 bit value from FW, so a value such as 0xffffff9 is actually negative... so instead of checking
5449d14abf15SRobert Mustacchi          * negative - we just check if it's larger than 0xf000000*/
5450d14abf15SRobert Mustacchi         if ((xag_tcp->rto_timer != 0xffffffff) && (xag_tcp->rto_timer > 0x8000000)) {
5451d14abf15SRobert Mustacchi             tcp->tcp_delegated.u.retransmit.retx_ms = 0;
5452d14abf15SRobert Mustacchi         } else {
5453d14abf15SRobert Mustacchi             tcp->tcp_delegated.u.retransmit.retx_ms = xag_tcp->rto_timer;
5454d14abf15SRobert Mustacchi         }
5455d14abf15SRobert Mustacchi     }
5456d14abf15SRobert Mustacchi 
5457d14abf15SRobert Mustacchi     /* Calculate the TCP connection state */
5458d14abf15SRobert Mustacchi     tcp->tcp_delegated.con_state = lm_tcp_calc_state(pdev, tcp,
5459d14abf15SRobert Mustacchi                                                      xst_tcp->tcp_params & XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG ? 1 : 0);
5460d14abf15SRobert Mustacchi     pdev->toe_info.stats.con_state_on_upload[tcp->tcp_delegated.con_state]++;
5461d14abf15SRobert Mustacchi }
5462d14abf15SRobert Mustacchi 
5463d14abf15SRobert Mustacchi 
lm_init_sp_req_type(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,lm_tcp_slow_path_request_t * lm_req,void * req_input_data)5464d14abf15SRobert Mustacchi void lm_init_sp_req_type(
5465d14abf15SRobert Mustacchi     struct _lm_device_t        * pdev,
5466d14abf15SRobert Mustacchi     lm_tcp_state_t             * tcp,
5467d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * lm_req,
5468d14abf15SRobert Mustacchi     void                       * req_input_data)
5469d14abf15SRobert Mustacchi {
5470d14abf15SRobert Mustacchi 
5471d14abf15SRobert Mustacchi     UNREFERENCED_PARAMETER_(pdev);
5472d14abf15SRobert Mustacchi 
5473d14abf15SRobert Mustacchi     switch(lm_req->type) {
5474d14abf15SRobert Mustacchi     case SP_REQUEST_INITIATE_OFFLOAD:
5475d14abf15SRobert Mustacchi     case SP_REQUEST_TERMINATE_OFFLOAD:
5476d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5477d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5478d14abf15SRobert Mustacchi     case SP_REQUEST_PENDING_TX_RST:
5479d14abf15SRobert Mustacchi     case SP_REQUEST_ABORTIVE_DISCONNECT:
5480d14abf15SRobert Mustacchi     case SP_REQUEST_INVALIDATE:
5481d14abf15SRobert Mustacchi         break;
5482d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_TCP:
5483d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH:
5484d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_NEIGH:
5485d14abf15SRobert Mustacchi     case SP_REQUEST_UPDATE_PATH_RELINK:
5486d14abf15SRobert Mustacchi         lm_req->sent_data.tcp_update_data.data = req_input_data;
5487d14abf15SRobert Mustacchi         break;
5488d14abf15SRobert Mustacchi     case SP_REQUEST_QUERY:
5489d14abf15SRobert Mustacchi         DbgBreakMsg("GilR - NOT IMPLEMENTED!\n");
5490d14abf15SRobert Mustacchi         break;
5491d14abf15SRobert Mustacchi     default:
5492d14abf15SRobert Mustacchi         DbgBreakMsg("Illegal slow path request type!\n");
5493d14abf15SRobert Mustacchi     }
5494d14abf15SRobert Mustacchi 
5495d14abf15SRobert Mustacchi     /* initialize common section of the sp request */
5496d14abf15SRobert Mustacchi     lm_req->sp_req_common.req_post_func = (void *)lm_tcp_post_slow_path_request;
5497d14abf15SRobert Mustacchi     lm_req->sp_req_common.req_post_ctx  = tcp;
5498d14abf15SRobert Mustacchi }
5499d14abf15SRobert Mustacchi 
5500d14abf15SRobert Mustacchi 
5501d14abf15SRobert Mustacchi 
_lm_tcp_comp_upload_tcp_request(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)5502d14abf15SRobert Mustacchi static void _lm_tcp_comp_upload_tcp_request (
5503d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
5504d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp
5505d14abf15SRobert Mustacchi     )
5506d14abf15SRobert Mustacchi {
5507d14abf15SRobert Mustacchi     lm_tcp_con_t               * rx_con    = tcp->rx_con;
5508d14abf15SRobert Mustacchi     lm_tcp_con_t               * tx_con    = tcp->tx_con;
5509d14abf15SRobert Mustacchi     u8_t                         has_fin   = 0;
5510d14abf15SRobert Mustacchi     u8_t                         has_rst   = 0;
5511d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * sp_req    = tcp->sp_request;
5512d14abf15SRobert Mustacchi     lm_path_state_t            * path      = NULL;
5513d14abf15SRobert Mustacchi     lm_status_t                  lm_status = LM_STATUS_SUCCESS;
5514d14abf15SRobert Mustacchi     #if 0 // TODO: add WINDOW_DEC validation check in w2k3, implement upon os type identification in the lm
5515d14abf15SRobert Mustacchi     #if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
5516d14abf15SRobert Mustacchi     u32_t expect_rwin;
5517d14abf15SRobert Mustacchi     #endif
5518d14abf15SRobert Mustacchi     #endif
5519d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5520d14abf15SRobert Mustacchi 
5521d14abf15SRobert Mustacchi     /* status will be changed only after upload completion returns from the client */
5522d14abf15SRobert Mustacchi 
5523d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
5524d14abf15SRobert Mustacchi         /* Abort Tx buffers and pending graceful disconnect request if any */
5525d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, tx_con);
5526d14abf15SRobert Mustacchi         lm_tcp_abort_bufs(pdev, tcp, tx_con, (tx_con->flags & TCP_CON_RST_IND_NOT_SAFE)? LM_STATUS_CONNECTION_RESET : LM_STATUS_UPLOAD_IN_PROGRESS);
5527d14abf15SRobert Mustacchi 
5528d14abf15SRobert Mustacchi         /* Remember pending RST if any */
5529d14abf15SRobert Mustacchi         has_rst |= (tx_con->u.tx.flags & TCP_CON_RST_IND_NOT_SAFE) ? 1 : 0;
5530d14abf15SRobert Mustacchi 
5531d14abf15SRobert Mustacchi         /* Clear pending RST */
5532d14abf15SRobert Mustacchi         tx_con->u.tx.flags &= ~(TCP_CON_RST_IND_NOT_SAFE);
5533d14abf15SRobert Mustacchi 
5534d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, tx_con);
5535d14abf15SRobert Mustacchi 
5536d14abf15SRobert Mustacchi         /* Rx abortive part... */
5537d14abf15SRobert Mustacchi         mm_acquire_tcp_lock(pdev, rx_con);
5538d14abf15SRobert Mustacchi         /* Abort pending buffers */
5539d14abf15SRobert Mustacchi         lm_tcp_abort_bufs(pdev, tcp, rx_con, LM_STATUS_UPLOAD_IN_PROGRESS);
5540d14abf15SRobert Mustacchi 
5541d14abf15SRobert Mustacchi         /* Remember pending FIN if any */
5542d14abf15SRobert Mustacchi         has_fin = rx_con->u.rx.flags & TCP_CON_FIN_IND_PENDING ? 1 : 0;
5543d14abf15SRobert Mustacchi 
5544d14abf15SRobert Mustacchi         /* Remember pending RST if any */
5545d14abf15SRobert Mustacchi         has_rst |= (rx_con->u.rx.flags & TCP_CON_RST_IND_PENDING) ? 1 : 0;
5546d14abf15SRobert Mustacchi 
5547d14abf15SRobert Mustacchi         /* Clear pending FIN and RST */
5548d14abf15SRobert Mustacchi         rx_con->u.rx.flags &= ~(TCP_CON_FIN_IND_PENDING | TCP_CON_RST_IND_PENDING);
5549d14abf15SRobert Mustacchi 
5550d14abf15SRobert Mustacchi         /* Get generic data that hasn't been indicated so far */
5551d14abf15SRobert Mustacchi         lm_status = lm_tcp_rx_get_buffered_data_from_terminate(pdev, tcp,
5552d14abf15SRobert Mustacchi                                      &(tcp->sp_request->ret_data.tcp_upload_data.frag_list),
5553d14abf15SRobert Mustacchi                                      &(tcp->sp_request->ret_data.tcp_upload_data.ret_buf_ctx)
5554d14abf15SRobert Mustacchi                                      );
5555d14abf15SRobert Mustacchi         mm_release_tcp_lock(pdev, rx_con);
5556d14abf15SRobert Mustacchi 
5557d14abf15SRobert Mustacchi         /* check if we have a delayed fin */
5558d14abf15SRobert Mustacchi         /* assumption: if we have a delayed-fin, it means we have buffered data*/
5559d14abf15SRobert Mustacchi         /* OS can't handle fin indiaction followed by buffered data */
5560d14abf15SRobert Mustacchi         /* DbgBreakIf(has_fin && !sp_req->ret_data.tcp_upload_data.frag_list); */
5561d14abf15SRobert Mustacchi         /* DbgBreakIf(has_rst && !sp_req->ret_data.tcp_upload_data.frag_list); */
5562d14abf15SRobert Mustacchi 
5563d14abf15SRobert Mustacchi         /* check if we have a delayed rst (rst is sp so no locks) */
5564d14abf15SRobert Mustacchi         if ( has_rst ) {
5565d14abf15SRobert Mustacchi             mm_tcp_indicate_rst_received(pdev, tcp);
5566d14abf15SRobert Mustacchi         }
5567d14abf15SRobert Mustacchi     }
5568d14abf15SRobert Mustacchi 
5569d14abf15SRobert Mustacchi     /* Indication part */
5570d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
5571d14abf15SRobert Mustacchi 
5572d14abf15SRobert Mustacchi     DbgBreakIf(!(tcp->sp_flags & SP_TCP_QRY_REQ_POSTED));
5573d14abf15SRobert Mustacchi     tcp->sp_flags |= SP_TCP_QRY_REQ_COMP;
5574d14abf15SRobert Mustacchi 
5575d14abf15SRobert Mustacchi     /* Update delegated parameters */
5576d14abf15SRobert Mustacchi     lm_tcp_get_delegated(pdev, tcp, &tcp->sp_req_data.virt_addr->toe_ctx);
5577d14abf15SRobert Mustacchi 
5578d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
5579d14abf15SRobert Mustacchi     sp_req->status = lm_status;
5580d14abf15SRobert Mustacchi 
5581d14abf15SRobert Mustacchi     /* Indicate SP request completion up to the client */
5582d14abf15SRobert Mustacchi     /* Set the request type to TERMINATE_OFFLOAD as it was set by UM during the post */
5583d14abf15SRobert Mustacchi     sp_req->type = SP_REQUEST_TERMINATE_OFFLOAD;
5584d14abf15SRobert Mustacchi 
5585d14abf15SRobert Mustacchi     DbgBreakIf(tcp->path->num_dependents == 0);
5586d14abf15SRobert Mustacchi     tcp->path->num_dependents--;
5587d14abf15SRobert Mustacchi 
5588d14abf15SRobert Mustacchi     // update stats counters if TOE
5589d14abf15SRobert Mustacchi     if (TOE_CONNECTION_TYPE == tcp->ulp_type )
5590d14abf15SRobert Mustacchi     {
5591d14abf15SRobert Mustacchi         if( IP_VERSION_IPV4 == tcp->path->path_const.ip_version )
5592d14abf15SRobert Mustacchi         {
5593d14abf15SRobert Mustacchi             --pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_4_IDX].currently_established;
5594d14abf15SRobert Mustacchi         }
5595d14abf15SRobert Mustacchi         else if( IP_VERSION_IPV6 == tcp->path->path_const.ip_version )
5596d14abf15SRobert Mustacchi         {
5597d14abf15SRobert Mustacchi             --pdev->vars.stats.stats_mirror.stats_drv.drv_toe.ipv[STATS_IP_6_IDX].currently_established;
5598d14abf15SRobert Mustacchi         }
5599d14abf15SRobert Mustacchi     }
5600d14abf15SRobert Mustacchi 
5601d14abf15SRobert Mustacchi     if (tcp->path->hdr.status == STATE_STATUS_UPLOAD_PENDING &&
5602d14abf15SRobert Mustacchi         tcp->path->num_dependents == 0) {
5603d14abf15SRobert Mustacchi         /* last pendind-upload-path dependent... */
5604d14abf15SRobert Mustacchi         path = tcp->path;
5605d14abf15SRobert Mustacchi     }
5606d14abf15SRobert Mustacchi     tcp->path = NULL;
5607d14abf15SRobert Mustacchi 
5608d14abf15SRobert Mustacchi     #if 0 // TODO: add WINDOW_DEC validation check in w2k3, implement upon os type identification in the lm
5609d14abf15SRobert Mustacchi     if (tcp->ulp_type == TOE_CONNECTION_TYPE) {
5610d14abf15SRobert Mustacchi         #if (DBG && !defined(_VBD_CMD_) && !defined(__USER_MODE_DEBUG))
5611d14abf15SRobert Mustacchi         expect_rwin = (u32_t) S32_SUB(
5612d14abf15SRobert Mustacchi             tcp->tcp_delegated.recv_win_seq,
5613d14abf15SRobert Mustacchi             tcp->tcp_delegated.recv_next);
5614d14abf15SRobert Mustacchi         /* These asserts are not valid for WSD connections. */
5615d14abf15SRobert Mustacchi         if(sp_req->ret_data.tcp_upload_data.frag_list)
5616d14abf15SRobert Mustacchi         {
5617d14abf15SRobert Mustacchi             expect_rwin += (u32_t)sp_req->ret_data.tcp_upload_data.frag_list->size;
5618d14abf15SRobert Mustacchi         }
5619d14abf15SRobert Mustacchi 
5620d14abf15SRobert Mustacchi         /* If we received a fin / rst we may be down by one on the initial_rcv_wnd... */
5621d14abf15SRobert Mustacchi         if((tcp->rx_con->flags & TCP_REMOTE_FIN_RECEIVED) ||
5622d14abf15SRobert Mustacchi            (tcp->rx_con->flags & TCP_REMOTE_RST_RECEIVED))
5623d14abf15SRobert Mustacchi         {
5624d14abf15SRobert Mustacchi             DbgBreakIf(
5625d14abf15SRobert Mustacchi                 (expect_rwin != tcp->tcp_cached.initial_rcv_wnd) &&
5626d14abf15SRobert Mustacchi                 (expect_rwin != tcp->tcp_cached.initial_rcv_wnd - 1));
5627d14abf15SRobert Mustacchi         }
5628d14abf15SRobert Mustacchi         else
5629d14abf15SRobert Mustacchi         {
5630d14abf15SRobert Mustacchi             DbgBreakIf(expect_rwin != tcp->tcp_cached.initial_rcv_wnd);
5631d14abf15SRobert Mustacchi         }
5632d14abf15SRobert Mustacchi         #endif
5633d14abf15SRobert Mustacchi     }
5634d14abf15SRobert Mustacchi     #endif
5635d14abf15SRobert Mustacchi 
5636d14abf15SRobert Mustacchi     mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
5637d14abf15SRobert Mustacchi 
5638d14abf15SRobert Mustacchi     if (path) {
5639d14abf15SRobert Mustacchi         DbgMessage(pdev, INFORMl4sp, "_lm_tcp_comp_upload_request: last tcp dependent of pending path %p\n", path);
5640d14abf15SRobert Mustacchi         _lm_tcp_comp_upload_path_request(pdev, path);
5641d14abf15SRobert Mustacchi     }
5642d14abf15SRobert Mustacchi 
5643d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
5644d14abf15SRobert Mustacchi 
5645d14abf15SRobert Mustacchi 
5646d14abf15SRobert Mustacchi }
5647d14abf15SRobert Mustacchi 
lm_tcp_get_next_path_dependent(struct _lm_device_t * pdev,void * path_state,lm_tcp_state_t * tcp_state)5648d14abf15SRobert Mustacchi lm_tcp_state_t * lm_tcp_get_next_path_dependent(
5649d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
5650d14abf15SRobert Mustacchi     void   *path_state,
5651d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp_state)
5652d14abf15SRobert Mustacchi {
5653d14abf15SRobert Mustacchi     if (tcp_state == NULL) {
5654d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5655d14abf15SRobert Mustacchi     } else {
5656d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5657d14abf15SRobert Mustacchi     }
5658d14abf15SRobert Mustacchi 
5659d14abf15SRobert Mustacchi     while(tcp_state)  {
5660d14abf15SRobert Mustacchi         /* Update the tcp state only if it is a dependent and is not being offloaded,
5661d14abf15SRobert Mustacchi          * invalidated, or uploaded. */
5662d14abf15SRobert Mustacchi         if (tcp_state->path == (lm_path_state_t*)path_state) {
5663d14abf15SRobert Mustacchi             return tcp_state;
5664d14abf15SRobert Mustacchi         }
5665d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5666d14abf15SRobert Mustacchi     }
5667d14abf15SRobert Mustacchi     return NULL;
5668d14abf15SRobert Mustacchi 
5669d14abf15SRobert Mustacchi }
5670d14abf15SRobert Mustacchi 
5671d14abf15SRobert Mustacchi 
lm_tcp_get_next_neigh_dependent(struct _lm_device_t * pdev,void * neigh_state,lm_tcp_state_t * tcp_state)5672d14abf15SRobert Mustacchi lm_tcp_state_t * lm_tcp_get_next_neigh_dependent(
5673d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
5674d14abf15SRobert Mustacchi     void * neigh_state,
5675d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp_state)
5676d14abf15SRobert Mustacchi {
5677d14abf15SRobert Mustacchi     if (tcp_state == NULL) {
5678d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5679d14abf15SRobert Mustacchi     } else {
5680d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5681d14abf15SRobert Mustacchi     }
5682d14abf15SRobert Mustacchi 
5683d14abf15SRobert Mustacchi     while(tcp_state)  {
5684d14abf15SRobert Mustacchi         /* Update the tcp state only if it is a dependent and is not being offloaded,
5685d14abf15SRobert Mustacchi          * invalidated, or uploaded. */
5686d14abf15SRobert Mustacchi         if (tcp_state->path && (tcp_state->path->neigh == (lm_neigh_state_t*)neigh_state)) {
5687d14abf15SRobert Mustacchi             return tcp_state;
5688d14abf15SRobert Mustacchi         }
5689d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
5690d14abf15SRobert Mustacchi     }
5691d14abf15SRobert Mustacchi     return NULL;
5692d14abf15SRobert Mustacchi }
5693d14abf15SRobert Mustacchi 
5694d14abf15SRobert Mustacchi 
lm_tcp_update_ramrod_complete(lm_device_t * pdev,lm_tcp_state_t * tcp)5695d14abf15SRobert Mustacchi void lm_tcp_update_ramrod_complete(lm_device_t * pdev, lm_tcp_state_t * tcp)
5696d14abf15SRobert Mustacchi {
5697d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t  *sp_req;
5698d14abf15SRobert Mustacchi     MM_INIT_TCP_LOCK_HANDLE();
5699d14abf15SRobert Mustacchi 
5700d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp, "###lm_tcp_update_ramrod_complete cid=%d \n", tcp->cid);
5701d14abf15SRobert Mustacchi 
5702d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
5703d14abf15SRobert Mustacchi 
5704d14abf15SRobert Mustacchi     /* assert state status is NORMAL */
5705d14abf15SRobert Mustacchi     DbgBreakIf( (tcp->hdr.status != STATE_STATUS_NORMAL) &&
5706d14abf15SRobert Mustacchi                 (tcp->hdr.status != STATE_STATUS_ABORTED));
5707d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_request == NULL);
5708d14abf15SRobert Mustacchi     DbgBreakIf((tcp->sp_request->type != SP_REQUEST_UPDATE_NEIGH) &&
5709d14abf15SRobert Mustacchi                (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH) &&
5710d14abf15SRobert Mustacchi                (tcp->sp_request->type != SP_REQUEST_UPDATE_TCP) &&
5711d14abf15SRobert Mustacchi                (tcp->sp_request->type != SP_REQUEST_UPDATE_PATH_RELINK));
5712d14abf15SRobert Mustacchi 
5713d14abf15SRobert Mustacchi     sp_req = tcp->sp_request;
5714d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_flags & ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX ));
5715d14abf15SRobert Mustacchi     sp_req->status = LM_STATUS_SUCCESS;
5716d14abf15SRobert Mustacchi     tcp->sp_request = NULL;
5717d14abf15SRobert Mustacchi 
5718d14abf15SRobert Mustacchi     /* Take the Rx lock */
5719d14abf15SRobert Mustacchi     mm_acquire_tcp_lock(pdev, tcp->rx_con);
5720d14abf15SRobert Mustacchi     if ((sp_req->type == SP_REQUEST_UPDATE_TCP) && (GET_FLAGS(tcp->rx_con->db_data.rx->flags, TOE_RX_DB_DATA_IGNORE_WND_UPDATES)))
5721d14abf15SRobert Mustacchi     {
5722d14abf15SRobert Mustacchi         lm_tcp_rx_post_sws(pdev, tcp, tcp->rx_con, tcp->rx_con->dpc_info.dpc_fw_wnd_after_dec, TCP_RX_POST_SWS_SET);
5723d14abf15SRobert Mustacchi     }
5724d14abf15SRobert Mustacchi     /* Release the Rx lock */
5725d14abf15SRobert Mustacchi     mm_release_tcp_lock(pdev, tcp->rx_con);
5726d14abf15SRobert Mustacchi 
5727d14abf15SRobert Mustacchi     mm_tcp_comp_slow_path_request(pdev, tcp, sp_req);
5728d14abf15SRobert Mustacchi 
5729d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
5730d14abf15SRobert Mustacchi }
5731d14abf15SRobert Mustacchi 
5732d14abf15SRobert Mustacchi 
lm_tcp_query_ramrod_complete(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp)5733d14abf15SRobert Mustacchi void lm_tcp_query_ramrod_complete(
5734d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
5735d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp
5736d14abf15SRobert Mustacchi     )
5737d14abf15SRobert Mustacchi {
5738d14abf15SRobert Mustacchi     DbgMessage(pdev, VERBOSEl4, "## lm_tcp_query_ramrod_comp\n");
5739d14abf15SRobert Mustacchi     DbgBreakIf(! tcp->sp_request );
5740d14abf15SRobert Mustacchi     DbgBreakIf(tcp->sp_request->type != SP_REQUEST_QUERY);
5741d14abf15SRobert Mustacchi 
5742d14abf15SRobert Mustacchi     if (tcp->hdr.status == STATE_STATUS_UPLOAD_PENDING) {
5743d14abf15SRobert Mustacchi         _lm_tcp_comp_upload_tcp_request(pdev, tcp);
5744d14abf15SRobert Mustacchi     } else {
5745d14abf15SRobert Mustacchi         DbgBreakMsg("Vladz: Not implemented yet!\n");
5746d14abf15SRobert Mustacchi     }
5747d14abf15SRobert Mustacchi }
5748d14abf15SRobert Mustacchi 
5749d14abf15SRobert Mustacchi /* TOE lock should be taken by hte caller */
lm_tcp_internal_query(IN struct _lm_device_t * pdev)5750d14abf15SRobert Mustacchi void lm_tcp_internal_query(
5751d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev)
5752d14abf15SRobert Mustacchi {
5753d14abf15SRobert Mustacchi     lm_tcp_state_t *tcp_state;
5754d14abf15SRobert Mustacchi     u32_t status_arr[STATE_STATUS_ERR+1] = {0};
5755d14abf15SRobert Mustacchi     u32_t status, num_tcps, i;
5756d14abf15SRobert Mustacchi 
5757d14abf15SRobert Mustacchi     DbgMessage(pdev, FATAL, "## lm_tcp_debug_query START version %d.%d.%d\n",
5758d14abf15SRobert Mustacchi                 LM_DRIVER_MAJOR_VER, LM_DRIVER_MINOR_VER, LM_DRIVER_FIX_NUM);
5759d14abf15SRobert Mustacchi 
5760d14abf15SRobert Mustacchi     num_tcps = d_list_entry_cnt(&pdev->toe_info.state_blk.tcp_list);
5761d14abf15SRobert Mustacchi     tcp_state = (lm_tcp_state_t *)d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
5762d14abf15SRobert Mustacchi     i = 0;
5763d14abf15SRobert Mustacchi     while (tcp_state) {
5764d14abf15SRobert Mustacchi         status = tcp_state->hdr.status;
5765d14abf15SRobert Mustacchi         status_arr[status]++;
5766d14abf15SRobert Mustacchi 
5767d14abf15SRobert Mustacchi         /* check state's status */
5768d14abf15SRobert Mustacchi         if(status != STATE_STATUS_NORMAL) {
5769d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has status=%d (!= normal)\n",
5770d14abf15SRobert Mustacchi                         tcp_state, tcp_state->cid, status);
5771d14abf15SRobert Mustacchi         }
5772d14abf15SRobert Mustacchi 
5773d14abf15SRobert Mustacchi         /* verify the is no pending slow path request */
5774d14abf15SRobert Mustacchi         if(tcp_state->sp_request) {
5775d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has slow path request of type %d, not completed by FW (sp comp flags=0x%x\n",
5776d14abf15SRobert Mustacchi                         tcp_state, tcp_state->cid, tcp_state->sp_request->type, tcp_state->sp_flags);
5777d14abf15SRobert Mustacchi         }
5778d14abf15SRobert Mustacchi 
5779d14abf15SRobert Mustacchi         /* verify the is no bytes pending completion */
5780d14abf15SRobert Mustacchi         if(tcp_state->tx_con->bytes_post_cnt != tcp_state->tx_con->bytes_comp_cnt) {
5781d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has TX pending bytes (%d). (con->flags=0x%x)\n",
5782d14abf15SRobert Mustacchi                         tcp_state, tcp_state->cid,
5783d14abf15SRobert Mustacchi                         S64_SUB(tcp_state->tx_con->bytes_post_cnt, tcp_state->tx_con->bytes_comp_cnt),
5784d14abf15SRobert Mustacchi                         tcp_state->tx_con->flags);
5785d14abf15SRobert Mustacchi         }
5786d14abf15SRobert Mustacchi         if(tcp_state->rx_con->bytes_post_cnt != tcp_state->rx_con->bytes_comp_cnt) {
5787d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "# tcp ptr 0x%p (cid %d), has RX pending bytes (%d). (con->flags=0x%x)\n",
5788d14abf15SRobert Mustacchi                         tcp_state, tcp_state->cid,
5789d14abf15SRobert Mustacchi                         S64_SUB(tcp_state->rx_con->bytes_post_cnt, tcp_state->rx_con->bytes_comp_cnt),
5790d14abf15SRobert Mustacchi                         tcp_state->rx_con->flags);
5791d14abf15SRobert Mustacchi         }
5792d14abf15SRobert Mustacchi 
5793d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *)d_list_next_entry((d_list_entry_t*)tcp_state);
5794d14abf15SRobert Mustacchi     }
5795d14abf15SRobert Mustacchi 
5796d14abf15SRobert Mustacchi     /* print statistics */
5797d14abf15SRobert Mustacchi     DbgMessage(pdev, FATAL, "# num offloaded connections=%d\n", num_tcps);
5798d14abf15SRobert Mustacchi     for (i = 0; i < STATE_STATUS_ERR+1; i++) {
5799d14abf15SRobert Mustacchi         if (status_arr[i]) {
5800d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "#    num connections in status %d=%d\n", i, status_arr[i]);
5801d14abf15SRobert Mustacchi         }
5802d14abf15SRobert Mustacchi     }
5803d14abf15SRobert Mustacchi 
5804d14abf15SRobert Mustacchi     DbgMessage(pdev, FATAL, "## lm_tcp_debug_query END\n");
5805d14abf15SRobert Mustacchi }
5806d14abf15SRobert Mustacchi 
5807d14abf15SRobert Mustacchi 
lm_tcp_upld_close_received_complete(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,l4_upload_reason_t upload_reason)5808d14abf15SRobert Mustacchi void lm_tcp_upld_close_received_complete(
5809d14abf15SRobert Mustacchi     struct _lm_device_t * pdev,
5810d14abf15SRobert Mustacchi     lm_tcp_state_t      * tcp,
5811d14abf15SRobert Mustacchi     l4_upload_reason_t    upload_reason)
5812d14abf15SRobert Mustacchi {
5813d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4sp , "###lm_tcp_drv_upl_received_complete cid=%d \n", tcp->cid);
5814d14abf15SRobert Mustacchi 
5815d14abf15SRobert Mustacchi     MM_ACQUIRE_TOE_LOCK(pdev);
5816d14abf15SRobert Mustacchi 
5817d14abf15SRobert Mustacchi     tcp->tcp_state_calc.con_upld_close_flag = TRUE;
5818d14abf15SRobert Mustacchi 
5819d14abf15SRobert Mustacchi     MM_RELEASE_TOE_LOCK(pdev);
5820d14abf15SRobert Mustacchi 
5821d14abf15SRobert Mustacchi     lm_tcp_process_retrieve_indication_cqe(pdev, tcp, upload_reason);
5822d14abf15SRobert Mustacchi     pdev->toe_info.stats.total_close_upld_requested++;
5823d14abf15SRobert Mustacchi }
5824d14abf15SRobert Mustacchi 
5825d14abf15SRobert Mustacchi 
5826d14abf15SRobert Mustacchi /** Description
5827d14abf15SRobert Mustacchi  *   completes the slow-path part of a connection
5828d14abf15SRobert Mustacchi  */
lm_tcp_tx_complete_tcp_sp(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_tcp_con_t * con)5829d14abf15SRobert Mustacchi void lm_tcp_tx_complete_tcp_sp(
5830d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
5831d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp,
5832d14abf15SRobert Mustacchi     IN    lm_tcp_con_t        * con)
5833d14abf15SRobert Mustacchi {
5834d14abf15SRobert Mustacchi     u8_t complete_ramrod;
5835d14abf15SRobert Mustacchi     u32_t sp_type,sp_flags,flags,snapshot_flags;
5836d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request = NULL;
5837d14abf15SRobert Mustacchi 
5838d14abf15SRobert Mustacchi     snapshot_flags = con->dpc_info.snapshot_flags;
5839d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
5840d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
5841d14abf15SRobert Mustacchi         lm_tcp_tx_rst_received_complete(pdev, con->tcp_state);
5842d14abf15SRobert Mustacchi     }
5843d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
5844d14abf15SRobert Mustacchi         /* clean the dpc_info: we're done with it */
5845d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags = 0;
5846d14abf15SRobert Mustacchi 
5847d14abf15SRobert Mustacchi         /* all ramrod on SCQ also complete on RCQ*/
5848d14abf15SRobert Mustacchi         complete_ramrod = FALSE;
5849d14abf15SRobert Mustacchi         /* Get global TOE lock */
5850d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_LOCK(pdev);
5851d14abf15SRobert Mustacchi 
5852d14abf15SRobert Mustacchi         /* save the type under the lock because the next ramrod will change this type ???*/
5853d14abf15SRobert Mustacchi         sp_type = tcp->sp_request->type;
5854d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
5855d14abf15SRobert Mustacchi 
5856d14abf15SRobert Mustacchi         switch(sp_type) {
5857d14abf15SRobert Mustacchi         case SP_REQUEST_ABORTIVE_DISCONNECT:
5858d14abf15SRobert Mustacchi             lm_tcp_tx_abortive_disconnect_ramrod_complete(pdev, tcp);
5859d14abf15SRobert Mustacchi             break;
5860d14abf15SRobert Mustacchi         case SP_REQUEST_INVALIDATE:
5861d14abf15SRobert Mustacchi             lm_tcp_tx_invalidate_ramrod_complete(pdev, tcp);
5862d14abf15SRobert Mustacchi             break;
5863d14abf15SRobert Mustacchi         case SP_REQUEST_TERMINATE1_OFFLOAD:
5864d14abf15SRobert Mustacchi             lm_tcp_tx_terminate_ramrod_complete(pdev, tcp);
5865d14abf15SRobert Mustacchi             break;
5866d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5867d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5868d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_TX_RST:
5869d14abf15SRobert Mustacchi             lm_tcp_tx_empty_ramrod_complete(pdev, tcp, sp_type);
5870d14abf15SRobert Mustacchi             break;
5871d14abf15SRobert Mustacchi         default:
5872d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
5873d14abf15SRobert Mustacchi             DbgBreak();
5874d14abf15SRobert Mustacchi         }
5875d14abf15SRobert Mustacchi         /* Get global TOE lock */
5876d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_LOCK(pdev);
5877d14abf15SRobert Mustacchi 
5878d14abf15SRobert Mustacchi         /* save the type under the lock because the next ramrod will change this type */
5879d14abf15SRobert Mustacchi         DbgBreakIf(sp_type != tcp->sp_request->type);
5880d14abf15SRobert Mustacchi 
5881d14abf15SRobert Mustacchi         tcp->sp_flags |= SP_REQUEST_COMPLETED_TX;
5882d14abf15SRobert Mustacchi 
5883d14abf15SRobert Mustacchi         /* If it's a second comletion, post the query ramrod */
5884d14abf15SRobert Mustacchi         if ( tcp->sp_flags & SP_REQUEST_COMPLETED_RX ) {
5885d14abf15SRobert Mustacchi             complete_ramrod = TRUE;
5886d14abf15SRobert Mustacchi             tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
5887d14abf15SRobert Mustacchi         }
5888d14abf15SRobert Mustacchi         sp_flags = tcp->sp_flags;
5889d14abf15SRobert Mustacchi         flags = tcp->tx_con->flags;
5890d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
5891d14abf15SRobert Mustacchi         if (complete_ramrod) {
5892d14abf15SRobert Mustacchi             request = tcp->sp_request;
5893d14abf15SRobert Mustacchi             DbgBreakIf(request == NULL);
5894d14abf15SRobert Mustacchi             switch(sp_type) {
5895d14abf15SRobert Mustacchi             case SP_REQUEST_ABORTIVE_DISCONNECT:
5896d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
5897d14abf15SRobert Mustacchi                 lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
5898d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
5899d14abf15SRobert Mustacchi             break;
5900d14abf15SRobert Mustacchi             case SP_REQUEST_INVALIDATE:
5901d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
5902d14abf15SRobert Mustacchi                 lm_tcp_comp_invalidate_request(pdev, tcp, request);
5903d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
5904d14abf15SRobert Mustacchi             break;
5905d14abf15SRobert Mustacchi             case SP_REQUEST_TERMINATE1_OFFLOAD:
5906d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
5907d14abf15SRobert Mustacchi                 lm_tcp_terminate_ramrod_complete(pdev, tcp);
5908d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
5909d14abf15SRobert Mustacchi             break;
5910d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
5911d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
5912d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_TX_RST:
5913d14abf15SRobert Mustacchi                 lm_tcp_comp_empty_ramrod_request(pdev, tcp);
5914d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
5915d14abf15SRobert Mustacchi             break;
5916d14abf15SRobert Mustacchi             default:
5917d14abf15SRobert Mustacchi                 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
5918d14abf15SRobert Mustacchi                 DbgBreak();
5919d14abf15SRobert Mustacchi             }
5920d14abf15SRobert Mustacchi         }
5921d14abf15SRobert Mustacchi     }
5922d14abf15SRobert Mustacchi }
5923d14abf15SRobert Mustacchi 
5924d14abf15SRobert Mustacchi /** Description
5925d14abf15SRobert Mustacchi  *   completes the slow-path part of a connection
5926d14abf15SRobert Mustacchi  *   completes ramrods if ramrod is completed.
5927d14abf15SRobert Mustacchi  *   function logic: every stage 'turns' of it's flag, if at the end of the check the flags is zero
5928d14abf15SRobert Mustacchi  *   it means there is nothing left to do and we can return. Usually, we will rarely have a case of more
5929d14abf15SRobert Mustacchi  *   than one/two flags on, therefore it seems useless to check all the cases (too many if/jumps)
5930d14abf15SRobert Mustacchi  */
lm_tcp_rx_complete_tcp_sp(IN struct _lm_device_t * pdev,IN lm_tcp_state_t * tcp,IN lm_tcp_con_t * con)5931d14abf15SRobert Mustacchi void lm_tcp_rx_complete_tcp_sp(
5932d14abf15SRobert Mustacchi     IN    struct _lm_device_t * pdev,
5933d14abf15SRobert Mustacchi     IN    lm_tcp_state_t      * tcp,
5934d14abf15SRobert Mustacchi     IN    lm_tcp_con_t        * con
5935d14abf15SRobert Mustacchi     )
5936d14abf15SRobert Mustacchi {
5937d14abf15SRobert Mustacchi     u8_t complete_ramrod;
5938d14abf15SRobert Mustacchi     u32_t sp_type,sp_flags,flags,snapshot_flags;
5939d14abf15SRobert Mustacchi     lm_tcp_slow_path_request_t * request = NULL;
5940d14abf15SRobert Mustacchi     u32_t cid;
5941d14abf15SRobert Mustacchi     u8_t  ulp_type;
5942d14abf15SRobert Mustacchi 
5943d14abf15SRobert Mustacchi     /* handle fin recv */
5944d14abf15SRobert Mustacchi     snapshot_flags = con->dpc_info.snapshot_flags;
5945d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV) {
5946d14abf15SRobert Mustacchi         lm_tcp_rx_fin_received_complete(pdev, tcp, 0);
5947d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV;
5948d14abf15SRobert Mustacchi     }
5949d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_RECV_UPL) {
5950d14abf15SRobert Mustacchi         lm_tcp_rx_fin_received_complete(pdev, tcp, 1);
5951d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_RECV_UPL;
5952d14abf15SRobert Mustacchi     }
5953d14abf15SRobert Mustacchi 
5954d14abf15SRobert Mustacchi     DbgMessage(pdev, INFORMl4rx, "lm_tcp_rx_complete_tcp_sp tcp=%p cid=%d \n", tcp, tcp->cid);
5955d14abf15SRobert Mustacchi     /* reset recv needs to be checked first */
5956d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RESET_RECV) {
5957d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RESET_RECV;
5958d14abf15SRobert Mustacchi         lm_tcp_rx_rst_received_complete(pdev, tcp);
5959d14abf15SRobert Mustacchi     }
5960d14abf15SRobert Mustacchi 
5961d14abf15SRobert Mustacchi     /* check if we have some sort of retrieve indication. we sort of check twice */
5962d14abf15SRobert Mustacchi     /* Rx completions (from ustorm) will not arrive after the following indications,
5963d14abf15SRobert Mustacchi      * therefore, we can assume, they were received before
5964d14abf15SRobert Mustacchi      * can't assume the same for ramrods */
5965d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & (LM_TCP_DPC_URG | LM_TCP_DPC_RT_TO | LM_TCP_DPC_KA_TO | LM_TCP_DPC_DBT_RE | LM_TCP_DPC_OPT_ERR | LM_TCP_DPC_UPLD_CLOSE)) {
5966d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
5967d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_URG) {
5968d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_URG;
5969d14abf15SRobert Mustacchi             lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_URG);
5970d14abf15SRobert Mustacchi         }
5971d14abf15SRobert Mustacchi 
5972d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RT_TO) {
5973d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RT_TO;
5974d14abf15SRobert Mustacchi             lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_RETRANSMIT_TIMEOUT);
5975d14abf15SRobert Mustacchi         }
5976d14abf15SRobert Mustacchi 
5977d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_KA_TO) {
5978d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_KA_TO;
5979d14abf15SRobert Mustacchi             lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_KEEP_ALIVE_TIMEOUT);
5980d14abf15SRobert Mustacchi         }
5981d14abf15SRobert Mustacchi 
5982d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_DBT_RE) {
5983d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_DBT_RE;
5984d14abf15SRobert Mustacchi             lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5985d14abf15SRobert Mustacchi             pdev->toe_info.stats.total_dbt_upld_requested++;
5986d14abf15SRobert Mustacchi         }
5987d14abf15SRobert Mustacchi 
5988d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_OPT_ERR) {
5989d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_OPT_ERR;
5990d14abf15SRobert Mustacchi             lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5991d14abf15SRobert Mustacchi             pdev->toe_info.stats.total_opt_upld_requested++;
5992d14abf15SRobert Mustacchi         }
5993d14abf15SRobert Mustacchi 
5994d14abf15SRobert Mustacchi         if (con->dpc_info.snapshot_flags & LM_TCP_DPC_UPLD_CLOSE) {
5995d14abf15SRobert Mustacchi             con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_UPLD_CLOSE;
5996d14abf15SRobert Mustacchi             lm_tcp_upld_close_received_complete(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
5997d14abf15SRobert Mustacchi         }
5998d14abf15SRobert Mustacchi     } else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_BIG_ISLE) {
5999d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~(LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES);
6000d14abf15SRobert Mustacchi         lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
6001d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_big_isle_upld_requesed++;
6002d14abf15SRobert Mustacchi     } else if (con->dpc_info.snapshot_flags & LM_TCP_DPC_TOO_MANY_ISLES) {
6003d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_TOO_MANY_ISLES;
6004d14abf15SRobert Mustacchi         lm_tcp_process_retrieve_indication_cqe(pdev, tcp, L4_UPLOAD_REASON_UPLOAD_REQUESTED);
6005d14abf15SRobert Mustacchi         pdev->toe_info.stats.total_many_isles_upld_requesed++;
6006d14abf15SRobert Mustacchi     }
6007d14abf15SRobert Mustacchi 
6008d14abf15SRobert Mustacchi 
6009d14abf15SRobert Mustacchi     if (con->dpc_info.snapshot_flags & LM_TCP_DPC_RAMROD_CMP) {
6010d14abf15SRobert Mustacchi         con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_RAMROD_CMP;
6011d14abf15SRobert Mustacchi         DbgBreakIf(con->dpc_info.snapshot_flags != 0);
6012d14abf15SRobert Mustacchi         /* Keep these before completing as the completion calls themselves can cause tcp state to be
6013d14abf15SRobert Mustacchi          * deleted... */
6014d14abf15SRobert Mustacchi         cid = tcp->cid;
6015d14abf15SRobert Mustacchi         ulp_type = tcp->ulp_type;
6016d14abf15SRobert Mustacchi         switch (tcp->sp_request->type) {
6017d14abf15SRobert Mustacchi             case SP_REQUEST_UPDATE_NEIGH:
6018d14abf15SRobert Mustacchi             case SP_REQUEST_UPDATE_PATH:
6019d14abf15SRobert Mustacchi             case SP_REQUEST_UPDATE_TCP:
6020d14abf15SRobert Mustacchi             case SP_REQUEST_UPDATE_PATH_RELINK:
6021d14abf15SRobert Mustacchi                 lm_tcp_update_ramrod_complete(pdev, tcp);
6022d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_UPDATE, ulp_type, cid);
6023d14abf15SRobert Mustacchi                 return;
6024d14abf15SRobert Mustacchi             case SP_REQUEST_QUERY:
6025d14abf15SRobert Mustacchi                 lm_tcp_query_ramrod_complete(pdev, tcp); /*  this may delete tcp !! */
6026d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_QUERY, ulp_type, cid);
6027d14abf15SRobert Mustacchi                 return;
6028d14abf15SRobert Mustacchi             case SP_REQUEST_TERMINATE_OFFLOAD:
6029d14abf15SRobert Mustacchi                 lm_tcp_searcher_ramrod_complete(pdev, tcp);
6030d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_SEARCHER_DELETE, ulp_type, cid);
6031d14abf15SRobert Mustacchi                 return;
6032d14abf15SRobert Mustacchi             case SP_REQUEST_INITIATE_OFFLOAD:
6033d14abf15SRobert Mustacchi                 /* Completion of initiate offload request can reach this point only if there was a license error, */
6034d14abf15SRobert Mustacchi                 /* otherwise its being completed earlier during 'process' stage                                   */
6035d14abf15SRobert Mustacchi                 lm_tcp_comp_initiate_offload_request(pdev, tcp, TOE_INITIATE_OFFLOAD_RAMROD_DATA_LICENSE_FAILURE);
6036d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INITIATE_OFFLOAD, tcp->ulp_type, cid);
6037d14abf15SRobert Mustacchi                 return;
6038d14abf15SRobert Mustacchi         }
6039d14abf15SRobert Mustacchi 
6040d14abf15SRobert Mustacchi         /* The rest of the ramrods on RCQ also complete on SCQ */
6041d14abf15SRobert Mustacchi         complete_ramrod = FALSE;
6042d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_LOCK(pdev);
6043d14abf15SRobert Mustacchi 
6044d14abf15SRobert Mustacchi         /* save the type under the lock because the next ramrod will change this type ????*/
6045d14abf15SRobert Mustacchi         sp_type = tcp->sp_request->type;
6046d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
6047d14abf15SRobert Mustacchi 
6048d14abf15SRobert Mustacchi         switch(sp_type) {
6049d14abf15SRobert Mustacchi         case SP_REQUEST_ABORTIVE_DISCONNECT:
6050d14abf15SRobert Mustacchi             lm_tcp_rx_abortive_disconnect_ramrod_complete(pdev, tcp);
6051d14abf15SRobert Mustacchi             break;
6052d14abf15SRobert Mustacchi         case SP_REQUEST_INVALIDATE:
6053d14abf15SRobert Mustacchi             lm_tcp_rx_invalidate_ramrod_complete(pdev, tcp);
6054d14abf15SRobert Mustacchi             break;
6055d14abf15SRobert Mustacchi         case SP_REQUEST_TERMINATE1_OFFLOAD:
6056d14abf15SRobert Mustacchi             lm_tcp_rx_terminate_ramrod_complete(pdev, tcp);
6057d14abf15SRobert Mustacchi             break;
6058d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
6059d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
6060d14abf15SRobert Mustacchi         case SP_REQUEST_PENDING_TX_RST:
6061d14abf15SRobert Mustacchi             lm_tcp_rx_empty_ramrod_complete(pdev,tcp, sp_type);
6062d14abf15SRobert Mustacchi             break;
6063d14abf15SRobert Mustacchi         default:
6064d14abf15SRobert Mustacchi             DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
6065d14abf15SRobert Mustacchi             DbgBreak();
6066d14abf15SRobert Mustacchi         }
6067d14abf15SRobert Mustacchi         /* Get global TOE lock */
6068d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_LOCK(pdev);
6069d14abf15SRobert Mustacchi 
6070d14abf15SRobert Mustacchi         DbgBreakIf(sp_type != tcp->sp_request->type);
6071d14abf15SRobert Mustacchi 
6072d14abf15SRobert Mustacchi         tcp->sp_flags |= SP_REQUEST_COMPLETED_RX;
6073d14abf15SRobert Mustacchi 
6074d14abf15SRobert Mustacchi         /* If it's a second comletion, post the query ramrod */
6075d14abf15SRobert Mustacchi         if ( tcp->sp_flags & SP_REQUEST_COMPLETED_TX ) {
6076d14abf15SRobert Mustacchi             complete_ramrod = TRUE;
6077d14abf15SRobert Mustacchi             tcp->sp_flags &= ~ ( SP_REQUEST_COMPLETED_TX | SP_REQUEST_COMPLETED_RX );
6078d14abf15SRobert Mustacchi         }
6079d14abf15SRobert Mustacchi         sp_flags = tcp->sp_flags;
6080d14abf15SRobert Mustacchi         flags = tcp->rx_con->flags;
6081d14abf15SRobert Mustacchi         MM_RELEASE_TOE_LOCK(pdev);
6082d14abf15SRobert Mustacchi         if (complete_ramrod) {
6083d14abf15SRobert Mustacchi             request = tcp->sp_request;
6084d14abf15SRobert Mustacchi             DbgBreakIf(request == NULL);
6085d14abf15SRobert Mustacchi             switch(sp_type) {
6086d14abf15SRobert Mustacchi             case SP_REQUEST_ABORTIVE_DISCONNECT:
6087d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
6088d14abf15SRobert Mustacchi                 lm_tcp_comp_abortive_disconnect_request(pdev, tcp, request);
6089d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_RESET_SEND, tcp->ulp_type, tcp->cid);
6090d14abf15SRobert Mustacchi                 break;
6091d14abf15SRobert Mustacchi             case SP_REQUEST_INVALIDATE:
6092d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_INVALIDATE);
6093d14abf15SRobert Mustacchi                 lm_tcp_comp_invalidate_request(pdev, tcp, request);
6094d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_INVALIDATE, tcp->ulp_type, tcp->cid);
6095d14abf15SRobert Mustacchi                 break;
6096d14abf15SRobert Mustacchi             case SP_REQUEST_TERMINATE1_OFFLOAD:
6097d14abf15SRobert Mustacchi                 DbgBreakIf(request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
6098d14abf15SRobert Mustacchi                 lm_tcp_terminate_ramrod_complete(pdev, tcp);
6099d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_TERMINATE, tcp->ulp_type, tcp->cid);
6100d14abf15SRobert Mustacchi                 break;
6101d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT:
6102d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_REMOTE_DISCONNECT:
6103d14abf15SRobert Mustacchi             case SP_REQUEST_PENDING_TX_RST:
6104d14abf15SRobert Mustacchi                 lm_tcp_comp_empty_ramrod_request(pdev, tcp);
6105d14abf15SRobert Mustacchi                 lm_sq_complete(pdev, CMD_PRIORITY_NORMAL, RAMROD_OPCODE_TOE_EMPTY_RAMROD, tcp->ulp_type, tcp->cid);
6106d14abf15SRobert Mustacchi                 break;
6107d14abf15SRobert Mustacchi             default:
6108d14abf15SRobert Mustacchi                 DbgMessage(pdev, FATAL, "unexpected sp completion type=%d\n", tcp->sp_request->type);
6109d14abf15SRobert Mustacchi                 DbgBreak();
6110d14abf15SRobert Mustacchi             }
6111d14abf15SRobert Mustacchi         }
6112d14abf15SRobert Mustacchi     }
6113d14abf15SRobert Mustacchi }
6114d14abf15SRobert Mustacchi 
6115d14abf15SRobert Mustacchi #define MSL        4  /* 4 seconds */
6116d14abf15SRobert Mustacchi 
lm_tcp_calc_state(lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t fin_was_sent)6117d14abf15SRobert Mustacchi l4_tcp_con_state_t lm_tcp_calc_state (
6118d14abf15SRobert Mustacchi     lm_device_t    * pdev,
6119d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp,
6120d14abf15SRobert Mustacchi     u8_t             fin_was_sent
6121d14abf15SRobert Mustacchi     )
6122d14abf15SRobert Mustacchi {
6123d14abf15SRobert Mustacchi     enum {
6124d14abf15SRobert Mustacchi         NO_CLOSE = 0,
6125d14abf15SRobert Mustacchi         ACTIVE_CLOSE,
6126d14abf15SRobert Mustacchi         PASSIVE_CLOSE,
6127d14abf15SRobert Mustacchi         PASSIVE_BY_ACTIVE_CLOSE
6128d14abf15SRobert Mustacchi     } closing_type;
6129d14abf15SRobert Mustacchi 
6130d14abf15SRobert Mustacchi     u32_t snd_max = tcp->tcp_delegated.send_max;
6131d14abf15SRobert Mustacchi     u32_t snd_una = tcp->tcp_delegated.send_una;
6132d14abf15SRobert Mustacchi     u8_t  con_rst = tcp->tcp_state_calc.con_rst_flag;
6133d14abf15SRobert Mustacchi     u8_t  con_upld_close = tcp->tcp_state_calc.con_upld_close_flag;
6134d14abf15SRobert Mustacchi     u64_t fin_completed_time = tcp->tcp_state_calc.fin_completed_time;
6135d14abf15SRobert Mustacchi     u64_t fin_reception_time = tcp->tcp_state_calc.fin_reception_time;
6136d14abf15SRobert Mustacchi     u64_t fin_request_time = tcp->tcp_state_calc.fin_request_time;
6137d14abf15SRobert Mustacchi     u64_t time_wait_state_entering_time = fin_completed_time > fin_reception_time ?
6138d14abf15SRobert Mustacchi                                       fin_completed_time : fin_reception_time;
6139d14abf15SRobert Mustacchi     l4_tcp_con_state_t tcp_state;
6140d14abf15SRobert Mustacchi 
6141d14abf15SRobert Mustacchi     /* Set closing type */
6142d14abf15SRobert Mustacchi     closing_type = NO_CLOSE;
6143d14abf15SRobert Mustacchi     if ( fin_reception_time == 0 ) {
6144d14abf15SRobert Mustacchi         if ( fin_request_time > 0 ) {
6145d14abf15SRobert Mustacchi             closing_type = ACTIVE_CLOSE;
6146d14abf15SRobert Mustacchi         }
6147d14abf15SRobert Mustacchi     } else if ( ( fin_reception_time < fin_request_time ) || (fin_request_time == 0) ) {
6148d14abf15SRobert Mustacchi         closing_type = PASSIVE_CLOSE;
6149d14abf15SRobert Mustacchi     } else if ( ( fin_reception_time >= fin_request_time ) && (fin_request_time > 0) ){
6150d14abf15SRobert Mustacchi         closing_type = PASSIVE_BY_ACTIVE_CLOSE;
6151d14abf15SRobert Mustacchi     }
6152d14abf15SRobert Mustacchi 
6153d14abf15SRobert Mustacchi     if ((con_rst) || (con_upld_close)) {
6154d14abf15SRobert Mustacchi         tcp_state = L4_TCP_CON_STATE_CLOSED;
6155d14abf15SRobert Mustacchi     } else if ( closing_type == NO_CLOSE ) {
6156d14abf15SRobert Mustacchi         tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
6157d14abf15SRobert Mustacchi     } else if ( ( closing_type == ACTIVE_CLOSE ) && fin_was_sent ) {
6158d14abf15SRobert Mustacchi         if  ( snd_una == snd_max ){
6159d14abf15SRobert Mustacchi             tcp_state = L4_TCP_CON_STATE_FIN_WAIT2;
6160d14abf15SRobert Mustacchi         } else {
6161d14abf15SRobert Mustacchi             tcp_state = L4_TCP_CON_STATE_FIN_WAIT1;
6162d14abf15SRobert Mustacchi         }
6163d14abf15SRobert Mustacchi     } else if ( ( closing_type == PASSIVE_BY_ACTIVE_CLOSE ) && (! fin_was_sent ) ) {
6164d14abf15SRobert Mustacchi         tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
6165d14abf15SRobert Mustacchi     } else if (closing_type == PASSIVE_BY_ACTIVE_CLOSE ) {
6166d14abf15SRobert Mustacchi         if (snd_una == snd_max) {
6167d14abf15SRobert Mustacchi             if ( mm_get_current_time(pdev) - time_wait_state_entering_time > 2*pdev->ofld_info.l4_params.ticks_per_second *MSL ) {
6168d14abf15SRobert Mustacchi                 tcp_state = L4_TCP_CON_STATE_CLOSED;
6169d14abf15SRobert Mustacchi             } else  {
6170d14abf15SRobert Mustacchi                 tcp_state = L4_TCP_CON_STATE_TIME_WAIT;
6171d14abf15SRobert Mustacchi             }
6172d14abf15SRobert Mustacchi         } else {
6173d14abf15SRobert Mustacchi             tcp_state = L4_TCP_CON_STATE_CLOSING;
6174d14abf15SRobert Mustacchi         }
6175d14abf15SRobert Mustacchi     } else if (closing_type == PASSIVE_CLOSE ) {
6176d14abf15SRobert Mustacchi             if ( ! fin_was_sent ) {
6177d14abf15SRobert Mustacchi                 tcp_state = L4_TCP_CON_STATE_CLOSE_WAIT;
6178d14abf15SRobert Mustacchi             } else if ( snd_una == snd_max ) {
6179d14abf15SRobert Mustacchi                 tcp_state = L4_TCP_CON_STATE_CLOSED;
6180d14abf15SRobert Mustacchi             } else {
6181d14abf15SRobert Mustacchi                 tcp_state = L4_TCP_CON_STATE_LAST_ACK;
6182d14abf15SRobert Mustacchi             }
6183d14abf15SRobert Mustacchi     } else {
6184d14abf15SRobert Mustacchi         tcp_state = L4_TCP_CON_STATE_ESTABLISHED;
6185d14abf15SRobert Mustacchi     }
6186d14abf15SRobert Mustacchi 
6187d14abf15SRobert Mustacchi     return tcp_state;
6188d14abf15SRobert Mustacchi }
6189d14abf15SRobert Mustacchi 
lm_tcp_clear_grqs(lm_device_t * pdev)6190d14abf15SRobert Mustacchi void lm_tcp_clear_grqs(lm_device_t * pdev)
6191d14abf15SRobert Mustacchi {
6192d14abf15SRobert Mustacchi     lm_tcp_grq_t     * grq;
6193d14abf15SRobert Mustacchi //    lm_tcp_gen_buf_t * gen_buf;
6194d14abf15SRobert Mustacchi     u8_t              idx;
6195d14abf15SRobert Mustacchi 
6196d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev->params.ofld_cap & LM_OFFLOAD_CHIMNEY));
6197d14abf15SRobert Mustacchi 
6198d14abf15SRobert Mustacchi     /* shutdown bug - BSOD only if shutdown is not in progress */
6199d14abf15SRobert Mustacchi     if (!lm_reset_is_inprogress(pdev)){
6200d14abf15SRobert Mustacchi         DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.tcp_list));
6201d14abf15SRobert Mustacchi         DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.path_list));
6202d14abf15SRobert Mustacchi         DbgBreakIf(!d_list_is_empty(&pdev->toe_info.state_blk.neigh_list));
6203d14abf15SRobert Mustacchi     }
6204d14abf15SRobert Mustacchi 
6205d14abf15SRobert Mustacchi     /* we need to go over all the buffers in the GRQs and return them to the pool. We also need
6206d14abf15SRobert Mustacchi      * to clear the consumer- of the grq in the FWto make sure this grq isn't treated in the xon test. */
6207d14abf15SRobert Mustacchi     /* This function is called after all work - items have finished, and the driver
6208d14abf15SRobert Mustacchi      * state is no longer running, therefore there is no risk at accessing the grqs without
6209d14abf15SRobert Mustacchi      * a lock */
6210d14abf15SRobert Mustacchi 
6211d14abf15SRobert Mustacchi     if (IS_PFDEV(pdev)) {
6212d14abf15SRobert Mustacchi         DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_LO_SIZE != 4);
6213d14abf15SRobert Mustacchi         DbgBreakIf(USTORM_TOE_GRQ_CONS_PTR_HI_SIZE != 4);
6214d14abf15SRobert Mustacchi     }
6215d14abf15SRobert Mustacchi 
6216d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev, idx)
6217d14abf15SRobert Mustacchi     {
6218d14abf15SRobert Mustacchi         grq = &pdev->toe_info.grqs[idx];
6219d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_GRQ_LOCK(pdev, idx);
6220d14abf15SRobert Mustacchi         grq->grq_compensate_on_alloc = FALSE;
6221d14abf15SRobert Mustacchi         MM_RELEASE_TOE_GRQ_LOCK(pdev, idx);
6222d14abf15SRobert Mustacchi     }
6223d14abf15SRobert Mustacchi 
6224d14abf15SRobert Mustacchi     LM_TOE_FOREACH_RSS_IDX(pdev, idx)
6225d14abf15SRobert Mustacchi     {
6226d14abf15SRobert Mustacchi         if (IS_PFDEV(pdev)) {
6227d14abf15SRobert Mustacchi            /* nullify consumer pointer of all inactive GRQs (required by FW) (will override with active ones)  */
6228d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_LO_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
6229d14abf15SRobert Mustacchi             LM_INTMEM_WRITE32(pdev, USTORM_TOE_GRQ_CONS_PTR_HI_OFFSET(LM_TOE_FW_RSS_ID(pdev,idx), PORT_ID(pdev)), 0, BAR_USTRORM_INTMEM);
6230d14abf15SRobert Mustacchi         }
6231d14abf15SRobert Mustacchi 
6232d14abf15SRobert Mustacchi         grq = &pdev->toe_info.grqs[idx];
6233d14abf15SRobert Mustacchi         if (!d_list_is_empty(&grq->aux_gen_list)) {
6234d14abf15SRobert Mustacchi             mm_tcp_return_list_of_gen_bufs(pdev, &grq->aux_gen_list, 0, NON_EXISTENT_SB_IDX);
6235d14abf15SRobert Mustacchi             d_list_clear(&grq->aux_gen_list);
6236d14abf15SRobert Mustacchi         }
6237d14abf15SRobert Mustacchi         if (!d_list_is_empty(&grq->active_gen_list)) {
6238d14abf15SRobert Mustacchi             mm_tcp_return_list_of_gen_bufs(pdev, &grq->active_gen_list, 0, NON_EXISTENT_SB_IDX);
6239d14abf15SRobert Mustacchi             d_list_clear(&grq->active_gen_list);
6240d14abf15SRobert Mustacchi             lm_bd_chain_reset(pdev, &grq->bd_chain);
6241d14abf15SRobert Mustacchi         }
6242d14abf15SRobert Mustacchi     }
6243d14abf15SRobert Mustacchi }
6244d14abf15SRobert Mustacchi 
6245d14abf15SRobert Mustacchi /**
6246d14abf15SRobert Mustacchi  * @Description: Update TOE RSS. The origin of this call is when getting
6247d14abf15SRobert Mustacchi  *               an OS RSS update. It's actually by L2 interface and not
6248d14abf15SRobert Mustacchi  *               L4. However, the ramrods are separate for L4 + L2 due to the
6249d14abf15SRobert Mustacchi  *               assumptions by the different protocols of what the data is
6250d14abf15SRobert Mustacchi  *               in the indirection table.
6251d14abf15SRobert Mustacchi  *
6252d14abf15SRobert Mustacchi  * @Assumptions: Called BEFORE calling L2
6253d14abf15SRobert Mustacchi  *                 enable-rss!!
6254d14abf15SRobert Mustacchi  *
6255d14abf15SRobert Mustacchi  * @param pdev
6256d14abf15SRobert Mustacchi  * @param chain_indirection_table - table of TOE RCQ chain values
6257d14abf15SRobert Mustacchi  * @param table_size    - size of table above
6258d14abf15SRobert Mustacchi  * @param enable    - is this enable/disable rss if it's disable, the
6259d14abf15SRobert Mustacchi  *                    table will all point to the same entry
6260d14abf15SRobert Mustacchi  *
6261d14abf15SRobert Mustacchi  * @return lm_status_t - PENDING is completion will arrive asyncrounoulsy
6262d14abf15SRobert Mustacchi  *                     - SUCCESS if no ramrod is sent (for example table didn't change)
6263d14abf15SRobert Mustacchi  *                     - FAILURE o/w
6264d14abf15SRobert Mustacchi  */
lm_tcp_update_rss(struct _lm_device_t * pdev,u8_t * chain_indirection_table,u32_t table_size,u8_t enable)6265d14abf15SRobert Mustacchi lm_status_t lm_tcp_update_rss(struct _lm_device_t * pdev, u8_t * chain_indirection_table,
6266d14abf15SRobert Mustacchi                               u32_t table_size, u8_t  enable)
6267d14abf15SRobert Mustacchi {
6268d14abf15SRobert Mustacchi     struct toe_rss_update_ramrod_data *data = pdev->toe_info.rss_update_data;
6269d14abf15SRobert Mustacchi     lm_status_t lm_status   = LM_STATUS_SUCCESS;
6270d14abf15SRobert Mustacchi     u8_t        value       = 0;
6271d14abf15SRobert Mustacchi     u8_t        send_ramrod = 0;
6272d14abf15SRobert Mustacchi     u8_t        rss_idx     = 0;
6273d14abf15SRobert Mustacchi     u16_t       bitmap      = 0;
6274d14abf15SRobert Mustacchi     u8_t        i,j;
6275d14abf15SRobert Mustacchi 
6276d14abf15SRobert Mustacchi     /* If data is NULL (allocation failed...) we don't want to fail this operation for L2 */
6277d14abf15SRobert Mustacchi     if (pdev->params.l4_enable_rss == L4_RSS_DISABLED || data == NULL)
6278d14abf15SRobert Mustacchi     {
6279d14abf15SRobert Mustacchi         return LM_STATUS_SUCCESS;
6280d14abf15SRobert Mustacchi     }
6281d14abf15SRobert Mustacchi 
6282d14abf15SRobert Mustacchi     DbgBreakIf(pdev->params.l4_enable_rss != L4_RSS_DYNAMIC);
6283d14abf15SRobert Mustacchi 
6284d14abf15SRobert Mustacchi     if (enable)
6285d14abf15SRobert Mustacchi     {
6286d14abf15SRobert Mustacchi         if (pdev->params.l4_grq_page_cnt > 2)
6287d14abf15SRobert Mustacchi         {
6288d14abf15SRobert Mustacchi             LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
6289d14abf15SRobert Mustacchi             {
6290d14abf15SRobert Mustacchi                 pdev->toe_info.grqs[rss_idx].high_bds_threshold = 2 * 512;
6291d14abf15SRobert Mustacchi             }
6292d14abf15SRobert Mustacchi         }
6293d14abf15SRobert Mustacchi     }
6294d14abf15SRobert Mustacchi     else
6295d14abf15SRobert Mustacchi     {
6296d14abf15SRobert Mustacchi         pdev->toe_info.grqs[LM_TOE_BASE_RSS_ID(pdev)].high_bds_threshold = 0;
6297d14abf15SRobert Mustacchi     }
6298d14abf15SRobert Mustacchi 
6299d14abf15SRobert Mustacchi 
6300d14abf15SRobert Mustacchi     for (j = 0; j < TOE_INDIRECTION_TABLE_SIZE/table_size; j++)
6301d14abf15SRobert Mustacchi     {
6302d14abf15SRobert Mustacchi         for (i = 0; i < table_size; i++)
6303d14abf15SRobert Mustacchi         {
6304d14abf15SRobert Mustacchi             value = LM_TOE_FW_RSS_ID(pdev,chain_indirection_table[i]);
6305d14abf15SRobert Mustacchi 
6306d14abf15SRobert Mustacchi             if (pdev->toe_info.indirection_table[(j*table_size)+i] != value) {
6307d14abf15SRobert Mustacchi                 pdev->toe_info.indirection_table[(j*table_size)+i] = value;
6308d14abf15SRobert Mustacchi                 send_ramrod = TRUE;
6309d14abf15SRobert Mustacchi             }
6310d14abf15SRobert Mustacchi         }
6311d14abf15SRobert Mustacchi     }
6312d14abf15SRobert Mustacchi 
6313d14abf15SRobert Mustacchi     /* send update ramrod */
6314d14abf15SRobert Mustacchi     if (send_ramrod)
6315d14abf15SRobert Mustacchi     {
6316d14abf15SRobert Mustacchi         pdev->params.update_comp_cnt = 0;
6317d14abf15SRobert Mustacchi         pdev->params.update_suspend_cnt = 0;
6318d14abf15SRobert Mustacchi         pdev->params.update_toe_comp_cnt = 0; /* We need a separate one for TOE to determine when to update sq credit */
6319d14abf15SRobert Mustacchi 
6320d14abf15SRobert Mustacchi         /* 2 global update counters :
6321d14abf15SRobert Mustacchi          * update_comp_cnt -    Set initialy to the number of expected completions, decrmented every time an update completion is processed.
6322d14abf15SRobert Mustacchi          *                      The processing for all chains is suspended until this counter gets to 0.
6323d14abf15SRobert Mustacchi          * update_suspend_cnt - Set initialy to the number of potentially suspended chains. Decremented when each chain resumes processing. The ramrod completion
6324d14abf15SRobert Mustacchi          *                      is indicated back only when this counter gets to 0.
6325d14abf15SRobert Mustacchi          *
6326d14abf15SRobert Mustacchi          * The update ramrod is 1 pending so we can access the completion and suspend counters here and below without grabbing a lock
6327d14abf15SRobert Mustacchi          */
6328d14abf15SRobert Mustacchi 
6329d14abf15SRobert Mustacchi         /* Update once for Eth... */
6330d14abf15SRobert Mustacchi         pdev->params.update_comp_cnt++;
6331d14abf15SRobert Mustacchi         pdev->params.update_suspend_cnt++;
6332d14abf15SRobert Mustacchi 
6333d14abf15SRobert Mustacchi 
6334d14abf15SRobert Mustacchi         /* TODO: Enhancment, send only on the chains that take part, and the ones removed... */
6335d14abf15SRobert Mustacchi         LM_TOE_FOREACH_RSS_IDX(pdev, rss_idx)
6336d14abf15SRobert Mustacchi         {
6337d14abf15SRobert Mustacchi             bitmap |= (1<<LM_TOE_FW_RSS_ID(pdev,rss_idx));
6338d14abf15SRobert Mustacchi         }
6339d14abf15SRobert Mustacchi 
6340d14abf15SRobert Mustacchi         mm_memcpy(data->indirection_table, pdev->toe_info.indirection_table, sizeof(data->indirection_table));
6341d14abf15SRobert Mustacchi         data->toe_rss_bitmap = bitmap;
6342d14abf15SRobert Mustacchi 
6343d14abf15SRobert Mustacchi         pdev->params.update_comp_cnt += pdev->params.l4_rss_chain_cnt;
6344d14abf15SRobert Mustacchi         pdev->params.update_suspend_cnt += pdev->params.l4_rss_chain_cnt;
6345d14abf15SRobert Mustacchi         pdev->params.update_toe_comp_cnt = pdev->params.l4_rss_chain_cnt; /* TOE only! */
6346d14abf15SRobert Mustacchi 
6347d14abf15SRobert Mustacchi         lm_status = lm_command_post(pdev,
6348d14abf15SRobert Mustacchi                                     LM_TOE_FW_RSS_ID(pdev, LM_TOE_BASE_RSS_ID(pdev)),
6349d14abf15SRobert Mustacchi                                     RAMROD_OPCODE_TOE_RSS_UPDATE,
6350d14abf15SRobert Mustacchi                                     CMD_PRIORITY_MEDIUM,
6351d14abf15SRobert Mustacchi                                     TOE_CONNECTION_TYPE,
6352d14abf15SRobert Mustacchi                                     pdev->toe_info.rss_update_data_phys.as_u64);
6353d14abf15SRobert Mustacchi 
6354d14abf15SRobert Mustacchi         if (lm_status == LM_STATUS_SUCCESS)
6355d14abf15SRobert Mustacchi         {
6356d14abf15SRobert Mustacchi             lm_status = LM_STATUS_PENDING;
6357d14abf15SRobert Mustacchi         }
6358d14abf15SRobert Mustacchi     }
6359d14abf15SRobert Mustacchi 
6360d14abf15SRobert Mustacchi     return lm_status;
6361d14abf15SRobert Mustacchi }
6362d14abf15SRobert Mustacchi 
6363d14abf15SRobert Mustacchi 
6364d14abf15SRobert Mustacchi /** Description
6365d14abf15SRobert Mustacchi  *  function is called whenever the UM allocates more generic buffers
6366d14abf15SRobert Mustacchi  */
lm_tcp_rx_gen_bufs_alloc_cb(lm_device_t * pdev)6367d14abf15SRobert Mustacchi void lm_tcp_rx_gen_bufs_alloc_cb(lm_device_t * pdev)
6368d14abf15SRobert Mustacchi {
6369d14abf15SRobert Mustacchi    u8_t i;
6370d14abf15SRobert Mustacchi 
6371d14abf15SRobert Mustacchi    LM_TOE_FOREACH_RSS_IDX(pdev, i)
6372d14abf15SRobert Mustacchi    {
6373d14abf15SRobert Mustacchi 
6374d14abf15SRobert Mustacchi         lm_tcp_grq_t *grq = &pdev->toe_info.grqs[i];
6375d14abf15SRobert Mustacchi         MM_ACQUIRE_TOE_GRQ_LOCK(pdev, i);
6376d14abf15SRobert Mustacchi         if (grq->grq_compensate_on_alloc) {
6377d14abf15SRobert Mustacchi             /* fill GRQ */
6378d14abf15SRobert Mustacchi             if (lm_tcp_rx_fill_grq(pdev, i, NULL, FILL_GRQ_LOW_THRESHOLD)) {
6379d14abf15SRobert Mustacchi                 DbgMessage(pdev, INFORMl4rx, "lm_toe_service_rx_intr: Updating GRQ producer\n");
6380d14abf15SRobert Mustacchi                 /* notify the fw of the prod of the GRQ */
6381d14abf15SRobert Mustacchi                 LM_INTMEM_WRITE16(pdev, USTORM_TOE_GRQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,i), PORT_ID(pdev)),
6382d14abf15SRobert Mustacchi                                   lm_bd_chain_prod_idx(&pdev->toe_info.grqs[i].bd_chain), BAR_USTRORM_INTMEM);
6383d14abf15SRobert Mustacchi             }
6384d14abf15SRobert Mustacchi         }
6385d14abf15SRobert Mustacchi         MM_RELEASE_TOE_GRQ_LOCK(pdev, i);
6386d14abf15SRobert Mustacchi     }
6387d14abf15SRobert Mustacchi }
6388d14abf15SRobert Mustacchi 
lm_tcp_update_isles_cnts(struct _lm_device_t * pdev,s16_t number_of_isles,s32_t number_of_gen_bufs)6389d14abf15SRobert Mustacchi void lm_tcp_update_isles_cnts(struct _lm_device_t * pdev, s16_t number_of_isles, s32_t number_of_gen_bufs)
6390d14abf15SRobert Mustacchi {
6391d14abf15SRobert Mustacchi     lm_toe_isles_t  *archipelago = &pdev->toe_info.archipelago;
6392d14abf15SRobert Mustacchi 
6393d14abf15SRobert Mustacchi     pdev->toe_info.archipelago.number_of_isles += number_of_isles;
6394d14abf15SRobert Mustacchi     pdev->toe_info.archipelago.gen_bufs_in_isles += number_of_gen_bufs;
6395d14abf15SRobert Mustacchi     if (archipelago->number_of_isles > archipelago->max_number_of_isles) {
6396d14abf15SRobert Mustacchi         archipelago->max_number_of_isles = archipelago->number_of_isles;
6397d14abf15SRobert Mustacchi     }
6398d14abf15SRobert Mustacchi 
6399d14abf15SRobert Mustacchi     if (archipelago->gen_bufs_in_isles > archipelago->max_gen_bufs_in_isles) {
6400d14abf15SRobert Mustacchi         archipelago->max_gen_bufs_in_isles = archipelago->gen_bufs_in_isles;
6401d14abf15SRobert Mustacchi     }
6402d14abf15SRobert Mustacchi     if (pdev->params.l4_max_gen_bufs_in_archipelago
6403d14abf15SRobert Mustacchi             && (archipelago->gen_bufs_in_isles > (s32_t)pdev->params.l4_max_gen_bufs_in_archipelago)) {
6404d14abf15SRobert Mustacchi         if (pdev->params.l4_limit_isles & L4_LI_NOTIFY) {
6405d14abf15SRobert Mustacchi             DbgBreak();
6406d14abf15SRobert Mustacchi         }
6407d14abf15SRobert Mustacchi         if (pdev->params.l4_limit_isles & L4_LI_MAX_GEN_BUFS_IN_ARCHIPELAGO) {
6408d14abf15SRobert Mustacchi             pdev->toe_info.archipelago.l4_decrease_archipelago = TRUE;
6409d14abf15SRobert Mustacchi         }
6410d14abf15SRobert Mustacchi     } else if (pdev->toe_info.archipelago.l4_decrease_archipelago) {
6411d14abf15SRobert Mustacchi         if (archipelago->gen_bufs_in_isles <= (s32_t)pdev->params.l4_valid_gen_bufs_in_archipelago) {
6412d14abf15SRobert Mustacchi             pdev->toe_info.archipelago.l4_decrease_archipelago = FALSE;
6413d14abf15SRobert Mustacchi         }
6414d14abf15SRobert Mustacchi     }
6415d14abf15SRobert Mustacchi 
6416d14abf15SRobert Mustacchi }
6417d14abf15SRobert Mustacchi 
lm_tcp_init_num_of_blocks_per_connection(struct _lm_device_t * pdev,u8_t num)6418d14abf15SRobert Mustacchi void lm_tcp_init_num_of_blocks_per_connection(
6419d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
6420d14abf15SRobert Mustacchi     u8_t    num)
6421d14abf15SRobert Mustacchi {
6422d14abf15SRobert Mustacchi     pdev->params.l4_num_of_blocks_per_connection = num;
6423d14abf15SRobert Mustacchi }
6424d14abf15SRobert Mustacchi 
lm_tcp_get_num_of_blocks_per_connection(struct _lm_device_t * pdev)6425d14abf15SRobert Mustacchi u8_t lm_tcp_get_num_of_blocks_per_connection(
6426d14abf15SRobert Mustacchi     struct _lm_device_t *pdev)
6427d14abf15SRobert Mustacchi {
6428d14abf15SRobert Mustacchi     return pdev->params.l4_num_of_blocks_per_connection;
6429d14abf15SRobert Mustacchi }
6430d14abf15SRobert Mustacchi 
lm_tcp_get_next_neigh(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state)6431d14abf15SRobert Mustacchi lm_neigh_state_t * lm_tcp_get_next_neigh(
6432d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
6433d14abf15SRobert Mustacchi     lm_neigh_state_t * neigh_state)
6434d14abf15SRobert Mustacchi {
6435d14abf15SRobert Mustacchi     if (neigh_state == NULL) {
6436d14abf15SRobert Mustacchi         neigh_state = (lm_neigh_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.neigh_list);
6437d14abf15SRobert Mustacchi     } else {
6438d14abf15SRobert Mustacchi         neigh_state = (lm_neigh_state_t *) d_list_next_entry(&neigh_state->hdr.link);
6439d14abf15SRobert Mustacchi     }
6440d14abf15SRobert Mustacchi     return neigh_state;
6441d14abf15SRobert Mustacchi }
6442d14abf15SRobert Mustacchi 
lm_tcp_get_next_path(struct _lm_device_t * pdev,lm_neigh_state_t * neigh_state,lm_path_state_t * path_state)6443d14abf15SRobert Mustacchi lm_path_state_t * lm_tcp_get_next_path(
6444d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
6445d14abf15SRobert Mustacchi     lm_neigh_state_t * neigh_state,
6446d14abf15SRobert Mustacchi     lm_path_state_t * path_state)
6447d14abf15SRobert Mustacchi {
6448d14abf15SRobert Mustacchi     if (path_state == NULL) {
6449d14abf15SRobert Mustacchi         path_state = (lm_path_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.path_list);
6450d14abf15SRobert Mustacchi     } else {
6451d14abf15SRobert Mustacchi         path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
6452d14abf15SRobert Mustacchi     }
6453d14abf15SRobert Mustacchi 
6454d14abf15SRobert Mustacchi     if (neigh_state != NULL) {
6455d14abf15SRobert Mustacchi         while(path_state)  {
6456d14abf15SRobert Mustacchi             if (path_state->neigh == neigh_state) {
6457d14abf15SRobert Mustacchi                 return path_state;
6458d14abf15SRobert Mustacchi             }
6459d14abf15SRobert Mustacchi             path_state = (lm_path_state_t *) d_list_next_entry(&path_state->hdr.link);
6460d14abf15SRobert Mustacchi         }
6461d14abf15SRobert Mustacchi     }
6462d14abf15SRobert Mustacchi     return path_state;
6463d14abf15SRobert Mustacchi }
6464d14abf15SRobert Mustacchi 
lm_tcp_get_next_tcp(struct _lm_device_t * pdev,lm_tcp_state_t * tcp_state)6465d14abf15SRobert Mustacchi lm_tcp_state_t * lm_tcp_get_next_tcp(
6466d14abf15SRobert Mustacchi     struct _lm_device_t *pdev,
6467d14abf15SRobert Mustacchi     lm_tcp_state_t * tcp_state)
6468d14abf15SRobert Mustacchi {
6469d14abf15SRobert Mustacchi     if (tcp_state == NULL) {
6470d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_peek_head(&pdev->toe_info.state_blk.tcp_list);
6471d14abf15SRobert Mustacchi     } else {
6472d14abf15SRobert Mustacchi         tcp_state = (lm_tcp_state_t *) d_list_next_entry(&tcp_state->hdr.link);
6473d14abf15SRobert Mustacchi     }
6474d14abf15SRobert Mustacchi     return tcp_state;
6475d14abf15SRobert Mustacchi }
6476d14abf15SRobert Mustacchi 
lm_tcp_get_src_ip_cam_byte(IN struct _lm_device_t * pdev,IN lm_path_state_t * path)6477d14abf15SRobert Mustacchi u8_t lm_tcp_get_src_ip_cam_byte(
6478d14abf15SRobert Mustacchi     IN    struct _lm_device_t   * pdev,
6479d14abf15SRobert Mustacchi     IN    lm_path_state_t        * path)
6480d14abf15SRobert Mustacchi {
6481d14abf15SRobert Mustacchi     u8_t src_ip_byte;
6482d14abf15SRobert Mustacchi 
6483d14abf15SRobert Mustacchi     DbgBreakIf(!(pdev && path));
6484d14abf15SRobert Mustacchi 
6485d14abf15SRobert Mustacchi     if (path->path_const.ip_version ==  IP_VERSION_IPV4) {
6486d14abf15SRobert Mustacchi         src_ip_byte = path->path_const.u.ipv4.src_ip & 0x000000FF;
6487d14abf15SRobert Mustacchi     } else {
6488d14abf15SRobert Mustacchi         src_ip_byte = path->path_const.u.ipv6.src_ip[0] & 0x000000FF;
6489d14abf15SRobert Mustacchi     }
6490d14abf15SRobert Mustacchi     return src_ip_byte;
6491d14abf15SRobert Mustacchi }
6492d14abf15SRobert Mustacchi 
lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t * pdev,u8_t src_ip_byte,u8_t src_tcp_b,u8_t dst_tcp_b,lm_tcp_state_t * prev_tcp)6493d14abf15SRobert Mustacchi lm_tcp_state_t* lm_tcp_find_offloaded_tcp_tuple(struct _lm_device_t   * pdev, u8_t src_ip_byte, u8_t src_tcp_b, u8_t dst_tcp_b, lm_tcp_state_t * prev_tcp)
6494d14abf15SRobert Mustacchi {
6495d14abf15SRobert Mustacchi     lm_tcp_state_t *connection_found = NULL;
6496d14abf15SRobert Mustacchi     lm_tcp_state_t *current_tcp = NULL;
6497d14abf15SRobert Mustacchi 
6498d14abf15SRobert Mustacchi     while ((current_tcp =  lm_tcp_get_next_tcp(pdev, prev_tcp))) {
6499d14abf15SRobert Mustacchi         u8_t c_src_tcp_b;
6500d14abf15SRobert Mustacchi         u8_t c_dst_tcp_b;
6501d14abf15SRobert Mustacchi         prev_tcp = current_tcp;
6502d14abf15SRobert Mustacchi         c_src_tcp_b = current_tcp->tcp_const.src_port & 0x00FF;
6503d14abf15SRobert Mustacchi         c_dst_tcp_b = current_tcp->tcp_const.dst_port & 0x00FF;
6504d14abf15SRobert Mustacchi         if ((c_src_tcp_b == src_tcp_b) && (c_dst_tcp_b == dst_tcp_b)) {
6505d14abf15SRobert Mustacchi             if ((current_tcp->path == NULL) || (lm_tcp_get_src_ip_cam_byte(pdev,current_tcp->path) == src_ip_byte)) {
6506d14abf15SRobert Mustacchi                 connection_found = current_tcp;
6507d14abf15SRobert Mustacchi                 break;
6508d14abf15SRobert Mustacchi             }
6509d14abf15SRobert Mustacchi         }
6510d14abf15SRobert Mustacchi     }
6511d14abf15SRobert Mustacchi 
6512d14abf15SRobert Mustacchi     return connection_found;
6513d14abf15SRobert Mustacchi }
6514d14abf15SRobert Mustacchi 
lm_tcp_get_pattern(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t pattern_idx,u32_t offset,u32_t * pattern_size)6515d14abf15SRobert Mustacchi u8_t * lm_tcp_get_pattern(struct _lm_device_t * pdev,
6516d14abf15SRobert Mustacchi                           lm_tcp_state_t * tcp,
6517d14abf15SRobert Mustacchi                           u8_t  pattern_idx,
6518d14abf15SRobert Mustacchi                           u32_t offset,
6519d14abf15SRobert Mustacchi                           u32_t * pattern_size)
6520d14abf15SRobert Mustacchi {
6521d14abf15SRobert Mustacchi     offset = tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] + offset;
6522d14abf15SRobert Mustacchi     offset = offset % pdev->toe_info.integrity_info.pattern_size;
6523d14abf15SRobert Mustacchi     if (*pattern_size > (pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size)) {
6524d14abf15SRobert Mustacchi         *pattern_size = pdev->toe_info.integrity_info.pattern_buf_size - pdev->toe_info.integrity_info.pattern_size;
6525d14abf15SRobert Mustacchi     }
6526d14abf15SRobert Mustacchi     return (pdev->toe_info.integrity_info.pattern_buf + offset);
6527d14abf15SRobert Mustacchi }
6528d14abf15SRobert Mustacchi 
lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,lm_tcp_state_t * tcp,u8_t pattern_idx,u32_t offset)6529d14abf15SRobert Mustacchi void lm_tcp_set_pattern_offset(struct _lm_device_t * pdev,
6530d14abf15SRobert Mustacchi                           lm_tcp_state_t * tcp,
6531d14abf15SRobert Mustacchi                           u8_t  pattern_idx,
6532d14abf15SRobert Mustacchi                           u32_t offset)
6533d14abf15SRobert Mustacchi {
6534d14abf15SRobert Mustacchi     tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] += offset;
6535d14abf15SRobert Mustacchi     tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] =
6536d14abf15SRobert Mustacchi         tcp->integrity_info.current_offset_in_pattern_buf[pattern_idx] % pdev->toe_info.integrity_info.pattern_size;
6537d14abf15SRobert Mustacchi 
6538d14abf15SRobert Mustacchi     return;
6539d14abf15SRobert Mustacchi }
6540d14abf15SRobert Mustacchi 
lm_tcp_find_pattern_offset(struct _lm_device_t * pdev,u8_t * sub_buf,u32_t sub_buf_size)6541d14abf15SRobert Mustacchi u32_t lm_tcp_find_pattern_offset(struct _lm_device_t * pdev, u8_t * sub_buf, u32_t sub_buf_size)
6542d14abf15SRobert Mustacchi {
6543d14abf15SRobert Mustacchi     u32_t i,j;
6544d14abf15SRobert Mustacchi     for (j = 0; j < pdev->toe_info.integrity_info.pattern_size; j++) {
6545d14abf15SRobert Mustacchi         for (i = 0; i < sub_buf_size; i++) {
6546d14abf15SRobert Mustacchi             if (sub_buf[i] != pdev->toe_info.integrity_info.pattern_buf[j+i]) {
6547d14abf15SRobert Mustacchi                 break;
6548d14abf15SRobert Mustacchi             }
6549d14abf15SRobert Mustacchi         }
6550d14abf15SRobert Mustacchi         if (i == sub_buf_size) {
6551d14abf15SRobert Mustacchi             return j;
6552d14abf15SRobert Mustacchi         }
6553d14abf15SRobert Mustacchi     }
6554d14abf15SRobert Mustacchi     return 0xFFFFFFFF;
6555d14abf15SRobert Mustacchi }
6556