1*d14abf15SRobert Mustacchi /******************************************************************************* 2*d14abf15SRobert Mustacchi * lm_l4st.h - L4 lm data structures 3*d14abf15SRobert Mustacchi ******************************************************************************/ 4*d14abf15SRobert Mustacchi #ifndef _LM_L4ST_H 5*d14abf15SRobert Mustacchi #define _LM_L4ST_H 6*d14abf15SRobert Mustacchi 7*d14abf15SRobert Mustacchi #include "l4states.h" 8*d14abf15SRobert Mustacchi #include "bd_chain_st.h" 9*d14abf15SRobert Mustacchi #include "lm_sp_req_mgr.h" 10*d14abf15SRobert Mustacchi #include "toe_constants.h" 11*d14abf15SRobert Mustacchi 12*d14abf15SRobert Mustacchi #define MAX_L4_RX_CHAIN 16 13*d14abf15SRobert Mustacchi #define MAX_L4_TX_CHAIN 16 14*d14abf15SRobert Mustacchi 15*d14abf15SRobert Mustacchi #define PATTERN_COUNTER_IDX_COMPLETION 0 16*d14abf15SRobert Mustacchi #define PATTERN_COUNTER_IDX_CQE 1 17*d14abf15SRobert Mustacchi #define MAX_PATTERN_IDX 2 18*d14abf15SRobert Mustacchi 19*d14abf15SRobert Mustacchi typedef struct _lm_tcp_integrity_info_t 20*d14abf15SRobert Mustacchi { 21*d14abf15SRobert Mustacchi u32_t current_offset_in_pattern_buf[MAX_PATTERN_IDX]; 22*d14abf15SRobert Mustacchi u32_t skip_bytes_in_incoming_buf[MAX_PATTERN_IDX]; 23*d14abf15SRobert Mustacchi u32_t is_offsets_initialized; 24*d14abf15SRobert Mustacchi } lm_tcp_integrity_info_t; 25*d14abf15SRobert Mustacchi 26*d14abf15SRobert Mustacchi 27*d14abf15SRobert Mustacchi typedef struct _lm_toe_integrity_info_t 28*d14abf15SRobert Mustacchi { 29*d14abf15SRobert Mustacchi u8_t * pattern_buf; 30*d14abf15SRobert Mustacchi u32_t pattern_buf_size; 31*d14abf15SRobert Mustacchi u32_t pattern_size; 32*d14abf15SRobert Mustacchi } lm_toe_integrity_info_t; 33*d14abf15SRobert Mustacchi 34*d14abf15SRobert Mustacchi 35*d14abf15SRobert Mustacchi /******************************************************************************* 36*d14abf15SRobert Mustacchi * 'Posting' TCP buffer. 37*d14abf15SRobert Mustacchi ******************************************************************************/ 38*d14abf15SRobert Mustacchi typedef struct _lm_tcp_buffer_t 39*d14abf15SRobert Mustacchi { 40*d14abf15SRobert Mustacchi /* Must be the first entry in this structure. */ 41*d14abf15SRobert Mustacchi s_list_entry_t link; 42*d14abf15SRobert Mustacchi 43*d14abf15SRobert Mustacchi /* Corresponds to l4buffer_t buffer_size. 44*d14abf15SRobert Mustacchi * The number of bytes in this buffer may not corresponds to the 45*d14abf15SRobert Mustacchi * the number of bytes of the application buffer. An application buffer 46*d14abf15SRobert Mustacchi * could span multiple tcp_bufs. The flags field is used to mark the 47*d14abf15SRobert Mustacchi * starting and the end of an application buffer. */ 48*d14abf15SRobert Mustacchi u32_t size; 49*d14abf15SRobert Mustacchi 50*d14abf15SRobert Mustacchi /* Number of bytes that were not completed yet */ 51*d14abf15SRobert Mustacchi u32_t more_to_comp; 52*d14abf15SRobert Mustacchi 53*d14abf15SRobert Mustacchi u32_t flags; /* Flags for indicating the start and end of an io buffer. */ 54*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_NONE 0x00 55*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_POST_START 0x01 56*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_POST_END 0x02 57*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_RESERVED1 0x04 /* used in Teton for dummy buffer. */ 58*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_SPLIT 0x04 /* Used in everest for split buffer Everest cleans before completing to miniport */ 59*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_RESERVED2 0x08 /* used only in Miniport as 'last post' */ 60*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_RX_NO_PUSH 0x10 61*d14abf15SRobert Mustacchi #define TCP_BUF_FLAG_L4_PARTIAL_FILLED 0x20 62*d14abf15SRobert Mustacchi /* NOTE: lm_tcp_buffer flags values must correspond to flags definition in l4buffer_t */ 63*d14abf15SRobert Mustacchi 64*d14abf15SRobert Mustacchi u16_t bd_used; /* Number of BDs consumed in the bd chain for this tcp buffer */ 65*d14abf15SRobert Mustacchi u16_t _pad; 66*d14abf15SRobert Mustacchi 67*d14abf15SRobert Mustacchi /* These fields are valid when TCP_BUF_FLAG_L4_POST_END flag is set. */ 68*d14abf15SRobert Mustacchi u32_t app_buf_size; /* Number of bytes of all buffers from BUFFER_START till BUFFER_END */ 69*d14abf15SRobert Mustacchi u32_t app_buf_xferred; /* Number of bytes xferred on all buffers from BUFFER_START till BUFFER_END */ 70*d14abf15SRobert Mustacchi } lm_tcp_buffer_t; 71*d14abf15SRobert Mustacchi 72*d14abf15SRobert Mustacchi /******************************************************************************* 73*d14abf15SRobert Mustacchi * state header. 74*d14abf15SRobert Mustacchi * Each state must start with this entry, which is used for chaining 75*d14abf15SRobert Mustacchi * states together and for identifying a particular state. 76*d14abf15SRobert Mustacchi ******************************************************************************/ 77*d14abf15SRobert Mustacchi typedef struct _lm_state_header_t 78*d14abf15SRobert Mustacchi { 79*d14abf15SRobert Mustacchi d_list_entry_t link; 80*d14abf15SRobert Mustacchi struct _lm_state_block_t *state_blk; 81*d14abf15SRobert Mustacchi 82*d14abf15SRobert Mustacchi u32_t state_id; 83*d14abf15SRobert Mustacchi #define STATE_ID_UNKNOWN 0 84*d14abf15SRobert Mustacchi #define STATE_ID_TCP 1 85*d14abf15SRobert Mustacchi #define STATE_ID_PATH 2 86*d14abf15SRobert Mustacchi #define STATE_ID_NEIGH 3 87*d14abf15SRobert Mustacchi 88*d14abf15SRobert Mustacchi u32_t status; 89*d14abf15SRobert Mustacchi #define STATE_STATUS_UNKNOWN 0 90*d14abf15SRobert Mustacchi #define STATE_STATUS_INIT 1 91*d14abf15SRobert Mustacchi #define STATE_STATUS_INIT_CONTEXT 2 92*d14abf15SRobert Mustacchi #define STATE_STATUS_OFFLOAD_PENDING 3 93*d14abf15SRobert Mustacchi #define STATE_STATUS_NORMAL 4 94*d14abf15SRobert Mustacchi #define STATE_STATUS_ABORTED 5 95*d14abf15SRobert Mustacchi #define STATE_STATUS_INVALIDATED 6 96*d14abf15SRobert Mustacchi #define STATE_STATUS_UPLOAD_PENDING 7 97*d14abf15SRobert Mustacchi #define STATE_STATUS_UPLOAD_DONE 8 98*d14abf15SRobert Mustacchi #define STATE_STATUS_INIT_OFFLOAD_ERR 9 99*d14abf15SRobert Mustacchi #define STATE_STATUS_ERR 10 100*d14abf15SRobert Mustacchi } lm_state_header_t; 101*d14abf15SRobert Mustacchi 102*d14abf15SRobert Mustacchi /******************************************************************************* 103*d14abf15SRobert Mustacchi * neighbor state 104*d14abf15SRobert Mustacchi ******************************************************************************/ 105*d14abf15SRobert Mustacchi typedef struct _lm_neigh_state_t 106*d14abf15SRobert Mustacchi { 107*d14abf15SRobert Mustacchi lm_state_header_t hdr; 108*d14abf15SRobert Mustacchi 109*d14abf15SRobert Mustacchi l4_neigh_const_state_t neigh_const; 110*d14abf15SRobert Mustacchi l4_neigh_cached_state_t neigh_cached; 111*d14abf15SRobert Mustacchi l4_neigh_delegated_state_t neigh_delegated; 112*d14abf15SRobert Mustacchi 113*d14abf15SRobert Mustacchi /* network reachability */ 114*d14abf15SRobert Mustacchi u32_t host_reachability_time; 115*d14abf15SRobert Mustacchi u32_t nic_reachability_time; 116*d14abf15SRobert Mustacchi u8_t stale; 117*d14abf15SRobert Mustacchi u8_t _pad[3]; 118*d14abf15SRobert Mustacchi 119*d14abf15SRobert Mustacchi /* debug */ 120*d14abf15SRobert Mustacchi u32_t num_dependents; /* number of dependent path states */ 121*d14abf15SRobert Mustacchi } lm_neigh_state_t; 122*d14abf15SRobert Mustacchi 123*d14abf15SRobert Mustacchi /******************************************************************************* 124*d14abf15SRobert Mustacchi * path state 125*d14abf15SRobert Mustacchi ******************************************************************************/ 126*d14abf15SRobert Mustacchi typedef struct _lm_path_state_t 127*d14abf15SRobert Mustacchi { 128*d14abf15SRobert Mustacchi lm_state_header_t hdr; 129*d14abf15SRobert Mustacchi 130*d14abf15SRobert Mustacchi lm_neigh_state_t *neigh; /* parent neighbor state */ 131*d14abf15SRobert Mustacchi 132*d14abf15SRobert Mustacchi l4_path_const_state_t path_const; 133*d14abf15SRobert Mustacchi l4_path_cached_state_t path_cached; 134*d14abf15SRobert Mustacchi l4_path_delegated_state_t path_delegated; 135*d14abf15SRobert Mustacchi 136*d14abf15SRobert Mustacchi /* debug */ 137*d14abf15SRobert Mustacchi u32_t num_dependents; /* number of dependent tcp states */ 138*d14abf15SRobert Mustacchi } lm_path_state_t; 139*d14abf15SRobert Mustacchi 140*d14abf15SRobert Mustacchi /******************************************************************************* 141*d14abf15SRobert Mustacchi * queue element buffer - for buffering queue elements (of any type) 142*d14abf15SRobert Mustacchi ******************************************************************************/ 143*d14abf15SRobert Mustacchi typedef struct _lm_tcp_qe_buffer_t 144*d14abf15SRobert Mustacchi { 145*d14abf15SRobert Mustacchi char *first; 146*d14abf15SRobert Mustacchi char *tail; 147*d14abf15SRobert Mustacchi char *head; 148*d14abf15SRobert Mustacchi char *last; 149*d14abf15SRobert Mustacchi 150*d14abf15SRobert Mustacchi u32_t qe_size; /* queue element size */ 151*d14abf15SRobert Mustacchi u32_t left; 152*d14abf15SRobert Mustacchi } lm_tcp_qe_buffer_t; 153*d14abf15SRobert Mustacchi 154*d14abf15SRobert Mustacchi 155*d14abf15SRobert Mustacchi /******************************************************************************* 156*d14abf15SRobert Mustacchi * Memory Blocks 157*d14abf15SRobert Mustacchi ******************************************************************************/ 158*d14abf15SRobert Mustacchi typedef struct _lm_tcp_mem_block_t 159*d14abf15SRobert Mustacchi { 160*d14abf15SRobert Mustacchi s_list_entry_t link; /* Must be the first entry... */ 161*d14abf15SRobert Mustacchi 162*d14abf15SRobert Mustacchi u8_t * start; /* Start of the memory block */ 163*d14abf15SRobert Mustacchi u8_t * free; /* Pointer to the start of the remaining free space of the block */ 164*d14abf15SRobert Mustacchi u32_t total; /* Size of the entire block */ 165*d14abf15SRobert Mustacchi u32_t left; /* free bytes left in the block */ 166*d14abf15SRobert Mustacchi u8_t flags; /* virt-memblock-pool member or not */ 167*d14abf15SRobert Mustacchi #define MBLK_RETURN_TO_POOL 0x1 168*d14abf15SRobert Mustacchi } lm_tcp_mem_block_t; 169*d14abf15SRobert Mustacchi 170*d14abf15SRobert Mustacchi typedef struct _lm_tcp_phy_mem_block_t 171*d14abf15SRobert Mustacchi { 172*d14abf15SRobert Mustacchi s_list_entry_t link; 173*d14abf15SRobert Mustacchi 174*d14abf15SRobert Mustacchi u8_t * start; /* Start of the memory block */ 175*d14abf15SRobert Mustacchi u8_t * free; /* Pointer to the start of the remaining free space of the block */ 176*d14abf15SRobert Mustacchi u32_t total; /* Size of the entire block */ 177*d14abf15SRobert Mustacchi u32_t left; /* free bytes left in the block */ 178*d14abf15SRobert Mustacchi 179*d14abf15SRobert Mustacchi lm_address_t start_phy; 180*d14abf15SRobert Mustacchi lm_address_t free_phy; 181*d14abf15SRobert Mustacchi } lm_tcp_phy_mem_block_t; 182*d14abf15SRobert Mustacchi 183*d14abf15SRobert Mustacchi #define DEBUG_OOO_CQE 184*d14abf15SRobert Mustacchi typedef struct _lm_isle_t 185*d14abf15SRobert Mustacchi { 186*d14abf15SRobert Mustacchi d_list_entry_t isle_link; 187*d14abf15SRobert Mustacchi d_list_t isle_gen_bufs_list_head; 188*d14abf15SRobert Mustacchi u32_t isle_nbytes; 189*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE 190*d14abf15SRobert Mustacchi u32_t dedicated_cid; 191*d14abf15SRobert Mustacchi u32_t recent_ooo_combined_cqe; 192*d14abf15SRobert Mustacchi #endif 193*d14abf15SRobert Mustacchi } lm_isle_t; 194*d14abf15SRobert Mustacchi 195*d14abf15SRobert Mustacchi #ifdef DEBUG_OOO_CQE 196*d14abf15SRobert Mustacchi #define SET_DEBUG_OOO_INFO(_isle, _cmd, _data) \ 197*d14abf15SRobert Mustacchi (_isle)->recent_ooo_combined_cqe = ((((_cmd) << TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) & TOE_RX_CQE_COMPLETION_OPCODE) \ 198*d14abf15SRobert Mustacchi | (((_data) << TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT) & TOE_RX_CQE_OOO_PARAMS_NBYTES)) 199*d14abf15SRobert Mustacchi #define GET_RECENT_OOO_CMD(_isle) \ 200*d14abf15SRobert Mustacchi (((_isle)->recent_ooo_combined_cqe & TOE_RX_CQE_COMPLETION_OPCODE) >> TOE_RX_CQE_COMPLETION_OPCODE_SHIFT) 201*d14abf15SRobert Mustacchi #define GET_RECENT_OOO_DATA(_isle) \ 202*d14abf15SRobert Mustacchi (((_isle)->recent_ooo_combined_cqe & TOE_RX_CQE_OOO_PARAMS_NBYTES) >> TOE_RX_CQE_OOO_PARAMS_NBYTES_SHIFT) 203*d14abf15SRobert Mustacchi #endif 204*d14abf15SRobert Mustacchi 205*d14abf15SRobert Mustacchi /******************************************************************************* 206*d14abf15SRobert Mustacchi * Rx connection's generic buffers info. 207*d14abf15SRobert Mustacchi ******************************************************************************/ 208*d14abf15SRobert Mustacchi typedef struct _lm_tcp_con_rx_gen_info_t 209*d14abf15SRobert Mustacchi { 210*d14abf15SRobert Mustacchi d_list_t peninsula_list; /* accessed only via lock */ 211*d14abf15SRobert Mustacchi d_list_t dpc_peninsula_list; /* accessed lock-free only in dpc */ 212*d14abf15SRobert Mustacchi 213*d14abf15SRobert Mustacchi d_list_t isles_list; 214*d14abf15SRobert Mustacchi lm_isle_t first_isle; 215*d14abf15SRobert Mustacchi lm_isle_t * current_isle; 216*d14abf15SRobert Mustacchi u8_t current_isle_number; 217*d14abf15SRobert Mustacchi u8_t max_number_of_isles; 218*d14abf15SRobert Mustacchi u8_t _isle_pad[2]; 219*d14abf15SRobert Mustacchi 220*d14abf15SRobert Mustacchi lm_frag_list_t * frag_list; /* allocated in initialization of connection */ 221*d14abf15SRobert Mustacchi u32_t max_frag_count; /* the number of frags statically allocated */ 222*d14abf15SRobert Mustacchi 223*d14abf15SRobert Mustacchi u32_t peninsula_nbytes; 224*d14abf15SRobert Mustacchi u32_t dpc_peninsula_nbytes; 225*d14abf15SRobert Mustacchi u32_t isle_nbytes; 226*d14abf15SRobert Mustacchi u16_t first_buf_offset; 227*d14abf15SRobert Mustacchi 228*d14abf15SRobert Mustacchi /* How many buffers (head of indications) were indicated for this connection and haven't 229*d14abf15SRobert Mustacchi * returned yet from NDIS. We need to know that to make sure we don't delete the connection 230*d14abf15SRobert Mustacchi * before all buffers pointing to it have returned. 231*d14abf15SRobert Mustacchi */ 232*d14abf15SRobert Mustacchi u16_t pending_return_indications; 233*d14abf15SRobert Mustacchi /* bytes indicated that their buffers have not yet been returned, this is a value that will increase 234*d14abf15SRobert Mustacchi * the window. If we're uploaded and we still have pending_indicated_bytes we need to increase them immediataly 235*d14abf15SRobert Mustacchi * and not wait... */ 236*d14abf15SRobert Mustacchi u32_t pending_indicated_bytes; 237*d14abf15SRobert Mustacchi 238*d14abf15SRobert Mustacchi /* Each indication may result in us updating the window - this depends on the #of bytes accepted AND the update_window_mode 239*d14abf15SRobert Mustacchi * we're in. We aggregate this over all indications (mm_tcp_rx_indicate_gen may be called several times if more generic data 240*d14abf15SRobert Mustacchi * was received during indicate). This field is updated ONLY by the function lm_tcp_rx_buffered_data_indicated, and is accessed 241*d14abf15SRobert Mustacchi * once the mm_tcp_rx_indicate_gen function completes. The main reason for this aggregation, unfortunatelly, is for passing 242*d14abf15SRobert Mustacchi * SpartaTest - receive_indications, which expects a specific number of indications. */ 243*d14abf15SRobert Mustacchi u32_t add_sws_bytes; 244*d14abf15SRobert Mustacchi 245*d14abf15SRobert Mustacchi u8_t wait_for_isle_left; 246*d14abf15SRobert Mustacchi u8_t _padding; 247*d14abf15SRobert Mustacchi 248*d14abf15SRobert Mustacchi /* The update window mode is taken from the toe information before an indication 249*d14abf15SRobert Mustacchi * We can't use the main copy because it may change between the time we indicate 250*d14abf15SRobert Mustacchi * (after we've marked the buffer) and the time we get an answer (and need to determine 251*d14abf15SRobert Mustacchi * whether to update the window or not) */ 252*d14abf15SRobert Mustacchi u8_t update_window_mode; 253*d14abf15SRobert Mustacchi 254*d14abf15SRobert Mustacchi /* debug/statistics */ 255*d14abf15SRobert Mustacchi /* DEFINITION: A generic buffer can be 'done' with as a result of a succesfull indicate or as a result of a copy 256*d14abf15SRobert Mustacchi * operation to an application buffer. (regardless of its state before: partially indicated/partially copied). 257*d14abf15SRobert Mustacchi * We count the number of times generic buffers were 'done' with */ 258*d14abf15SRobert Mustacchi u8_t peninsula_blocked; /* peninsula is blocked as a result of terminate (get_buffered_data) */ 259*d14abf15SRobert Mustacchi u32_t num_buffers_indicated; /* 'done' with as a result of an indicate */ 260*d14abf15SRobert Mustacchi u32_t num_buffers_copied_grq;/* # grq buffers copied */ 261*d14abf15SRobert Mustacchi u32_t num_buffers_copied_rq; /* # rq buffers copied TBD how to count*/ 262*d14abf15SRobert Mustacchi u32_t num_bytes_indicated; /* all bytes indicated in either full/partial indications */ 263*d14abf15SRobert Mustacchi u32_t copy_gen_buf_fail_cnt; /* counts the number of times a client.copy operation failed */ 264*d14abf15SRobert Mustacchi u32_t copy_gen_buf_dmae_cnt; /* counts the number of times dmae copy operation was used */ 265*d14abf15SRobert Mustacchi u32_t num_success_indicates; /* number of times indicate succeeded */ 266*d14abf15SRobert Mustacchi u32_t num_failed_indicates; /* number of times indicate failed */ 267*d14abf15SRobert Mustacchi u32_t bufs_indicated_rejected; /* number of rejected bufs */ 268*d14abf15SRobert Mustacchi u64_t bytes_copied_cnt_in_process; 269*d14abf15SRobert Mustacchi u64_t bytes_copied_cnt_in_post; 270*d14abf15SRobert Mustacchi u64_t bytes_copied_cnt_in_comp; 271*d14abf15SRobert Mustacchi u64_t bytes_indicated_accepted; 272*d14abf15SRobert Mustacchi u64_t bytes_indicated_rejected; 273*d14abf15SRobert Mustacchi u32_t dont_send_to_system_more_then_rwin; 274*d14abf15SRobert Mustacchi u32_t num_non_full_indications; 275*d14abf15SRobert Mustacchi 276*d14abf15SRobert Mustacchi } lm_tcp_con_rx_gen_info_t; 277*d14abf15SRobert Mustacchi 278*d14abf15SRobert Mustacchi /******************************************************************************* 279*d14abf15SRobert Mustacchi * Rx connection's receive window information for silly window syndrome avoidance 280*d14abf15SRobert Mustacchi ******************************************************************************/ 281*d14abf15SRobert Mustacchi #define MAX_INITIAL_RCV_WND 0x80000000 /* 2GB (due to cyclic counters and window-update algorithm */ 282*d14abf15SRobert Mustacchi 283*d14abf15SRobert Mustacchi /* DWA: Delayed Window Update Algorithm : the twin of DCA, delay the window updates according to the delayed completions. */ 284*d14abf15SRobert Mustacchi 285*d14abf15SRobert Mustacchi #define MAX_DW_THRESH_ENTRY_CNT 16 /* a new entry is created each time we see a NDC completion (non-delayed-complete). We 286*d14abf15SRobert Mustacchi * can limit these to 16 'active completions' i.e. completions that haven't received a 287*d14abf15SRobert Mustacchi * window-update yet. FW-DCA works with quad-buffer, therefore 16 is more than enough. */ 288*d14abf15SRobert Mustacchi 289*d14abf15SRobert Mustacchi typedef struct _lm_tcp_rx_dwa_info { 290*d14abf15SRobert Mustacchi u32_t dw_thresh[MAX_DW_THRESH_ENTRY_CNT]; /* delayed window update thresholds. */ 291*d14abf15SRobert Mustacchi u8_t head; /* head of the the cyclic buffer dw_thresh (next empty entry) */ 292*d14abf15SRobert Mustacchi u8_t tail; /* tail of the the cyclic buffer dw_thresh */ 293*d14abf15SRobert Mustacchi u16_t _pad; 294*d14abf15SRobert Mustacchi } lm_tcp_rx_dwa_info; 295*d14abf15SRobert Mustacchi 296*d14abf15SRobert Mustacchi typedef struct _lm_tcp_con_rx_sws_info_t 297*d14abf15SRobert Mustacchi { 298*d14abf15SRobert Mustacchi u32_t drv_rcv_win_right_edge; /* The drivers window right edge (shadow of fw, and may be 299*d14abf15SRobert Mustacchi * larger if the difference is smaller than mss) */ 300*d14abf15SRobert Mustacchi u32_t mss; /* min(tcp_const.remote_mss, 301*d14abf15SRobert Mustacchi parent_path->path_cached.path_mtu - HEADERS size) */ 302*d14abf15SRobert Mustacchi u32_t extra_bytes; 303*d14abf15SRobert Mustacchi 304*d14abf15SRobert Mustacchi u8_t timer_on; 305*d14abf15SRobert Mustacchi } lm_tcp_con_rx_sws_info_t; 306*d14abf15SRobert Mustacchi 307*d14abf15SRobert Mustacchi /******************************************************************************* 308*d14abf15SRobert Mustacchi * Rx connection's special information 309*d14abf15SRobert Mustacchi ******************************************************************************/ 310*d14abf15SRobert Mustacchi typedef struct _lm_tcp_con_rx_t 311*d14abf15SRobert Mustacchi { 312*d14abf15SRobert Mustacchi lm_tcp_con_rx_gen_info_t gen_info; 313*d14abf15SRobert Mustacchi lm_tcp_con_rx_sws_info_t sws_info; 314*d14abf15SRobert Mustacchi 315*d14abf15SRobert Mustacchi /* Last bd written to: required in spcecial case of very large application buffers 316*d14abf15SRobert Mustacchi * not fitting into the bd-chain . */ 317*d14abf15SRobert Mustacchi struct toe_rx_bd * last_rx_bd; 318*d14abf15SRobert Mustacchi 319*d14abf15SRobert Mustacchi /* Remember a remote disconnect event until all received data is 320*d14abf15SRobert Mustacchi * completed/indicated successfully to the client */ 321*d14abf15SRobert Mustacchi u8_t flags; 322*d14abf15SRobert Mustacchi #define TCP_CON_RST_IND_PENDING 0x1 323*d14abf15SRobert Mustacchi #define TCP_CON_FIN_IND_PENDING 0x2 324*d14abf15SRobert Mustacchi u8_t zero_byte_posted_during_ind; 325*d14abf15SRobert Mustacchi u8_t check_data_integrity_on_complete; 326*d14abf15SRobert Mustacchi u8_t check_data_integrity_on_receive; 327*d14abf15SRobert Mustacchi u32_t compared_bytes; 328*d14abf15SRobert Mustacchi 329*d14abf15SRobert Mustacchi u32_t skp_bytes_copied; /* counter of bytes that were already copied to the buffer at post that we 330*d14abf15SRobert Mustacchi * will receive a skip for which we'll need to ignore...This counter must be protected 331*d14abf15SRobert Mustacchi * by a lock */ 332*d14abf15SRobert Mustacchi /* GilR 4/3/2006 - TBA - add lm tcp con rx debug/stats fields? */ 333*d14abf15SRobert Mustacchi u32_t rx_zero_byte_recv_reqs; /* #Zero byte receeive requests */ 334*d14abf15SRobert Mustacchi } lm_tcp_con_rx_t; 335*d14abf15SRobert Mustacchi 336*d14abf15SRobert Mustacchi /******************************************************************************* 337*d14abf15SRobert Mustacchi * Tx connection's special information 338*d14abf15SRobert Mustacchi ******************************************************************************/ 339*d14abf15SRobert Mustacchi typedef struct _lm_tcp_con_tx_t 340*d14abf15SRobert Mustacchi { 341*d14abf15SRobert Mustacchi u16_t bds_without_comp_flag; /* counter of consecutive BDs without CompFlag */ 342*d14abf15SRobert Mustacchi u8_t flags; 343*d14abf15SRobert Mustacchi #define TCP_CON_FIN_REQ_LM_INTERNAL 0x1 /* FIN request completion should 344*d14abf15SRobert Mustacchi * not be indicated to mm */ 345*d14abf15SRobert Mustacchi #define TCP_CON_RST_IND_NOT_SAFE 0x2 346*d14abf15SRobert Mustacchi 347*d14abf15SRobert Mustacchi 348*d14abf15SRobert Mustacchi u8_t _pad; 349*d14abf15SRobert Mustacchi u32_t mss; 350*d14abf15SRobert Mustacchi } lm_tcp_con_tx_t; 351*d14abf15SRobert Mustacchi 352*d14abf15SRobert Mustacchi 353*d14abf15SRobert Mustacchi /******************************************************************************* 354*d14abf15SRobert Mustacchi * TCP connection - rx OR tx 355*d14abf15SRobert Mustacchi ******************************************************************************/ 356*d14abf15SRobert Mustacchi /* This structure is used to collect information during a DPC without taking the 357*d14abf15SRobert Mustacchi * fp-lock. All fields in this structure must be accessed ONLY from within the 358*d14abf15SRobert Mustacchi * the DPC 359*d14abf15SRobert Mustacchi */ 360*d14abf15SRobert Mustacchi typedef struct _lm_tcp_dpc_info_t 361*d14abf15SRobert Mustacchi { 362*d14abf15SRobert Mustacchi s_list_entry_t link; /* must be the first entry here */ 363*d14abf15SRobert Mustacchi s_list_entry_t * dpc_completed_tail; /* points to the tail of the sub-list of active_tb_list that needs to 364*d14abf15SRobert Mustacchi * be completed. */ 365*d14abf15SRobert Mustacchi u32_t dpc_bufs_completed; /* number of buffers completed in the dpc (aggregated during process 366*d14abf15SRobert Mustacchi * stage for fast splitting of the active_tb_list at completion stage)*/ 367*d14abf15SRobert Mustacchi u32_t dpc_rq_placed_bytes; /* how many bytes were placed on rq as a result of rq-cmp / copying from grq->rq */ 368*d14abf15SRobert Mustacchi u32_t dpc_actual_bytes_completed; /* number of bytes completed to client - aggregated during process stage */ 369*d14abf15SRobert Mustacchi u16_t dpc_bd_used; /* number of bds used - aggregated during process stage */ 370*d14abf15SRobert Mustacchi u16_t dpc_flags; /* flags marked during cqe processing - only accessed during processing and 371*d14abf15SRobert Mustacchi * snapshot-ed under a lock */ 372*d14abf15SRobert Mustacchi #define LM_TCP_DPC_RESET_RECV 0x1 373*d14abf15SRobert Mustacchi #define LM_TCP_DPC_FIN_RECV 0x2 374*d14abf15SRobert Mustacchi #define LM_TCP_DPC_FIN_CMP 0x4 375*d14abf15SRobert Mustacchi #define LM_TCP_DPC_KA_TO 0x8 376*d14abf15SRobert Mustacchi #define LM_TCP_DPC_RT_TO 0x10 377*d14abf15SRobert Mustacchi #define LM_TCP_DPC_URG 0x20 378*d14abf15SRobert Mustacchi #define LM_TCP_DPC_RAMROD_CMP 0x40 379*d14abf15SRobert Mustacchi // #define LM_TCP_DPC_NDC 0x80 380*d14abf15SRobert Mustacchi #define LM_TCP_DPC_DBT_RE 0x100 381*d14abf15SRobert Mustacchi #define LM_TCP_DPC_OPT_ERR 0x200 382*d14abf15SRobert Mustacchi #define LM_TCP_DPC_UPLD_CLOSE 0x400 383*d14abf15SRobert Mustacchi #define LM_TCP_DPC_FIN_RECV_UPL 0x800 384*d14abf15SRobert Mustacchi #define LM_TCP_DPC_TOO_BIG_ISLE 0x1000 385*d14abf15SRobert Mustacchi #define LM_TCP_DPC_TOO_MANY_ISLES 0x2000 386*d14abf15SRobert Mustacchi 387*d14abf15SRobert Mustacchi /* 388*d14abf15SRobert Mustacchi #define LM_TCP_COMPLETE_FP (LM_TCP_DPC_RESET_RECV | LM_TCP_DPC_FIN_RECV | LM_TCP_DPC_FIN_RECV_UPL | LM_TCP_DPC_FIN_CMP | \ 389*d14abf15SRobert Mustacchi LM_TCP_DPC_KA_TO | LM_TCP_DPC_RT_TO | LM_TCP_DPC_URG | LM_TCP_DPC_RAMROD_CMP | LM_TCP_DPC_NDC | \ 390*d14abf15SRobert Mustacchi LM_TCP_DPC_DBT_RE | LM_TCP_DPC_OPT_ERR | LM_TCP_DPC_UPLD_CLOSE | \ 391*d14abf15SRobert Mustacchi LM_TCP_DPC_TOO_BIG_ISLE | LM_TCP_DPC_TOO_MANY_ISLES) 392*d14abf15SRobert Mustacchi */ 393*d14abf15SRobert Mustacchi /* dpc snapshot parameters: taken before an operation that can release a lock is done 394*d14abf15SRobert Mustacchi * in lm_tcp_xx_complete_fp */ 395*d14abf15SRobert Mustacchi u16_t snapshot_flags; /* only accessed under lock */ 396*d14abf15SRobert Mustacchi 397*d14abf15SRobert Mustacchi /* we have special cases where lm blocks um from posting until a specific buffer gets completed, we have a flag for this 398*d14abf15SRobert Mustacchi * this flag is accessed with the post flow, so it should be protected by a lock, therefore we remember we have to unset it 399*d14abf15SRobert Mustacchi * in the completion stage (under a lock) */ 400*d14abf15SRobert Mustacchi u8_t dpc_unblock_post; 401*d14abf15SRobert Mustacchi /* debug / stats */ 402*d14abf15SRobert Mustacchi u8_t dpc_comp_blocked; 403*d14abf15SRobert Mustacchi 404*d14abf15SRobert Mustacchi /* the window size returned from the fw after window size decreasment request returned, written back to the fw */ 405*d14abf15SRobert Mustacchi u32_t dpc_fw_wnd_after_dec; 406*d14abf15SRobert Mustacchi 407*d14abf15SRobert Mustacchi } lm_tcp_dpc_info_t; 408*d14abf15SRobert Mustacchi 409*d14abf15SRobert Mustacchi typedef struct _lm_tcp_con_t 410*d14abf15SRobert Mustacchi { 411*d14abf15SRobert Mustacchi lm_tcp_dpc_info_t dpc_info; /* must be the first field */ 412*d14abf15SRobert Mustacchi 413*d14abf15SRobert Mustacchi struct _lm_tcp_state_t * tcp_state; /* The tcp state associated with this connection */ 414*d14abf15SRobert Mustacchi 415*d14abf15SRobert Mustacchi union { 416*d14abf15SRobert Mustacchi volatile struct toe_rx_db_data *rx; 417*d14abf15SRobert Mustacchi volatile struct toe_tx_db_data *tx; 418*d14abf15SRobert Mustacchi } db_data; 419*d14abf15SRobert Mustacchi lm_address_t phys_db_data; 420*d14abf15SRobert Mustacchi 421*d14abf15SRobert Mustacchi /* rx/tx tcp connection info. */ 422*d14abf15SRobert Mustacchi union 423*d14abf15SRobert Mustacchi { 424*d14abf15SRobert Mustacchi lm_tcp_con_rx_t rx; 425*d14abf15SRobert Mustacchi lm_tcp_con_tx_t tx; 426*d14abf15SRobert Mustacchi } u; 427*d14abf15SRobert Mustacchi 428*d14abf15SRobert Mustacchi lm_bd_chain_t bd_chain; 429*d14abf15SRobert Mustacchi 430*d14abf15SRobert Mustacchi /* List of posted buffers (i.e. attached to the bd chain) */ 431*d14abf15SRobert Mustacchi s_list_t active_tb_list; 432*d14abf15SRobert Mustacchi u32_t rq_nbytes; /* how many bytes are in the active-tb-list */ 433*d14abf15SRobert Mustacchi 434*d14abf15SRobert Mustacchi /* buffer of cqes that represent the last X cqes received */ 435*d14abf15SRobert Mustacchi lm_tcp_qe_buffer_t history_cqes; 436*d14abf15SRobert Mustacchi 437*d14abf15SRobert Mustacchi u32_t type; 438*d14abf15SRobert Mustacchi #define TCP_CON_TYPE_RX 1 439*d14abf15SRobert Mustacchi #define TCP_CON_TYPE_TX 2 440*d14abf15SRobert Mustacchi 441*d14abf15SRobert Mustacchi /* accumulator of currently posted application buffer bytes. 442*d14abf15SRobert Mustacchi * accumulated in order to set lm_tcp_buffer.app_buf_size of 443*d14abf15SRobert Mustacchi * the last tcp buffer of the application buffer */ 444*d14abf15SRobert Mustacchi u32_t app_buf_bytes_acc_post; 445*d14abf15SRobert Mustacchi 446*d14abf15SRobert Mustacchi /* accumulator of currently completed application buffer bytes. 447*d14abf15SRobert Mustacchi * accumulated in order to set lm_tcp_buffer.app_buf_xferred of 448*d14abf15SRobert Mustacchi * the last tcp buffer of the application buffer */ 449*d14abf15SRobert Mustacchi u32_t app_buf_bytes_acc_comp; 450*d14abf15SRobert Mustacchi 451*d14abf15SRobert Mustacchi u32_t db_more_bytes; /* number of bytes to be produced in next doorbell */ 452*d14abf15SRobert Mustacchi u16_t db_more_bufs; /* number of tcp buffers to be produced in next doorbell */ 453*d14abf15SRobert Mustacchi u16_t db_more_bds; /* number of bds to be produced in next doorbell */ 454*d14abf15SRobert Mustacchi 455*d14abf15SRobert Mustacchi /* flags are used for managing the connection's posting/completing/indicating state machines */ 456*d14abf15SRobert Mustacchi u32_t flags; 457*d14abf15SRobert Mustacchi #define TCP_FIN_REQ_POSTED 0x0001 458*d14abf15SRobert Mustacchi #define TCP_RST_REQ_POSTED 0x0002 459*d14abf15SRobert Mustacchi #define TCP_INV_REQ_POSTED 0x0004 460*d14abf15SRobert Mustacchi #define TCP_TRM_REQ_POSTED 0x0008 461*d14abf15SRobert Mustacchi #define TCP_FIN_REQ_COMPLETED 0x0010 462*d14abf15SRobert Mustacchi #define TCP_RST_REQ_COMPLETED 0x0020 463*d14abf15SRobert Mustacchi #define TCP_INV_REQ_COMPLETED 0x0040 464*d14abf15SRobert Mustacchi #define TCP_TRM_REQ_COMPLETED 0x0080 465*d14abf15SRobert Mustacchi #define TCP_REMOTE_FIN_RECEIVED 0x0100 466*d14abf15SRobert Mustacchi #define TCP_REMOTE_RST_RECEIVED 0x0200 467*d14abf15SRobert Mustacchi #define TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED 0x0400 468*d14abf15SRobert Mustacchi #define TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED 0x0800 469*d14abf15SRobert Mustacchi #define TCP_INDICATE_REJECTED 0x1000 470*d14abf15SRobert Mustacchi #define TCP_POST_BLOCKED 0x2000 471*d14abf15SRobert Mustacchi #define TCP_COMP_BLOCKED 0x4000 472*d14abf15SRobert Mustacchi #define TCP_COMP_DEFERRED 0x8000 473*d14abf15SRobert Mustacchi #define TCP_BUFFERS_ABORTED 0x10000 474*d14abf15SRobert Mustacchi #define TCP_DEFERRED_PROCESSING 0x20000 475*d14abf15SRobert Mustacchi #define TCP_POST_DELAYED 0x40000 /* lm sets this when posting buffers is delay for some reason */ 476*d14abf15SRobert Mustacchi #define TCP_POST_COMPLETE_SPLIT 0x80000 /* lm sets this when every split buffer that'll be posted will be completed immediately */ 477*d14abf15SRobert Mustacchi #define TCP_POST_NO_SKP 0x100000 /* lm sets this when there will be no more skp completions from fw (comp blocked...) */ 478*d14abf15SRobert Mustacchi #define TCP_UPLOAD_REQUESTED 0x200000 /* lm sets this when FW requests an upload for any reason - after this is set, no more uploads will be requested*/ 479*d14abf15SRobert Mustacchi #define TCP_DB_BLOCKED 0x400000 480*d14abf15SRobert Mustacchi #define TCP_RX_DB_BLOCKED (TCP_REMOTE_FIN_RECEIVED | TCP_REMOTE_RST_RECEIVED | TCP_DB_BLOCKED) 481*d14abf15SRobert Mustacchi #define TCP_TX_DB_BLOCKED (TCP_REMOTE_RST_RECEIVED | TCP_DB_BLOCKED) 482*d14abf15SRobert Mustacchi #define TCP_TX_POST_BLOCKED (TCP_FIN_REQ_POSTED | TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | \ 483*d14abf15SRobert Mustacchi TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED | TCP_POST_BLOCKED \ 484*d14abf15SRobert Mustacchi ) 485*d14abf15SRobert Mustacchi /* GilR 4/4/2006 - TBD - open issue with Hav, for Tx POST BLOCKED we might not wait for 'rx indicated' after RST received */ 486*d14abf15SRobert Mustacchi #define TCP_RX_POST_BLOCKED (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | \ 487*d14abf15SRobert Mustacchi TCP_REMOTE_RST_RECEIVED_ALL_RX_INDICATED | TCP_REMOTE_FIN_RECEIVED_ALL_RX_INDICATED | \ 488*d14abf15SRobert Mustacchi TCP_POST_BLOCKED) 489*d14abf15SRobert Mustacchi #define TCP_TX_COMP_BLOCKED (TCP_RST_REQ_COMPLETED | TCP_FIN_REQ_COMPLETED | TCP_REMOTE_RST_RECEIVED | \ 490*d14abf15SRobert Mustacchi TCP_INV_REQ_COMPLETED | TCP_TRM_REQ_COMPLETED | TCP_COMP_BLOCKED \ 491*d14abf15SRobert Mustacchi ) 492*d14abf15SRobert Mustacchi #define TCP_RX_COMP_BLOCKED (TCP_RST_REQ_COMPLETED | TCP_REMOTE_FIN_RECEIVED | TCP_REMOTE_RST_RECEIVED | \ 493*d14abf15SRobert Mustacchi TCP_INV_REQ_COMPLETED | TCP_TRM_REQ_COMPLETED | TCP_COMP_BLOCKED \ 494*d14abf15SRobert Mustacchi ) 495*d14abf15SRobert Mustacchi #define TCP_TX_COMP_DEFERRED TCP_COMP_DEFERRED 496*d14abf15SRobert Mustacchi #define TCP_RX_COMP_DEFERRED TCP_COMP_DEFERRED 497*d14abf15SRobert Mustacchi #define TCP_RX_IND_BLOCKED (TCP_RST_REQ_POSTED | TCP_INV_REQ_POSTED | TCP_TRM_REQ_POSTED | TCP_INDICATE_REJECTED) 498*d14abf15SRobert Mustacchi 499*d14abf15SRobert Mustacchi /* GilR 4/3/2006 - TBA - add lm con debug/statistics */ 500*d14abf15SRobert Mustacchi u64_t bytes_post_cnt; /* cyclic counter of posted application buffer bytes */ 501*d14abf15SRobert Mustacchi u64_t bytes_comp_cnt; /* cyclic counter of completed application buffer bytes (including skipped bytes due to push) */ 502*d14abf15SRobert Mustacchi u64_t bytes_push_skip_cnt; 503*d14abf15SRobert Mustacchi u64_t bytes_skip_post_cnt; /* skipped post because of generic data */ 504*d14abf15SRobert Mustacchi u32_t buffer_skip_post_cnt; /* skipped post because of generic data */ 505*d14abf15SRobert Mustacchi u32_t buffer_post_cnt; 506*d14abf15SRobert Mustacchi u32_t buffer_completed_cnt; 507*d14abf15SRobert Mustacchi u32_t rq_completion_calls; 508*d14abf15SRobert Mustacchi u32_t partially_completed_buf_cnt; /* included in 'buffer_completed_cnt' above */ 509*d14abf15SRobert Mustacchi u32_t buffer_aborted_cnt; 510*d14abf15SRobert Mustacchi u64_t bytes_aborted_cnt; /* cyclic counter of aborted application buffer bytes */ 511*d14abf15SRobert Mustacchi u32_t bytes_trm_aborted_cnt; /* cyclic counter of bytes received with rst ramrod completion */ 512*d14abf15SRobert Mustacchi u32_t fp_db_cnt; /* Fast path doorbell counter - doens't count Adv. Wnd. doorbells*/ 513*d14abf15SRobert Mustacchi u32_t indicate_once_more_cnt; 514*d14abf15SRobert Mustacchi u32_t droped_non_empty_isles; 515*d14abf15SRobert Mustacchi u32_t droped_empty_isles; 516*d14abf15SRobert Mustacchi u32_t rx_post_blocked; 517*d14abf15SRobert Mustacchi u32_t zb_rx_post_blocked; 518*d14abf15SRobert Mustacchi u32_t partially_filled_buf_sent; 519*d14abf15SRobert Mustacchi u32_t abortion_under_flr; 520*d14abf15SRobert Mustacchi } lm_tcp_con_t; 521*d14abf15SRobert Mustacchi 522*d14abf15SRobert Mustacchi 523*d14abf15SRobert Mustacchi /******************************************************************************* 524*d14abf15SRobert Mustacchi * Slow path request information 525*d14abf15SRobert Mustacchi ******************************************************************************/ 526*d14abf15SRobert Mustacchi /* structure used for storing the data returned by a completion of a slow-path request */ 527*d14abf15SRobert Mustacchi typedef union _lm_tcp_slow_path_ret_data_t 528*d14abf15SRobert Mustacchi { 529*d14abf15SRobert Mustacchi struct { 530*d14abf15SRobert Mustacchi lm_frag_list_t * frag_list; 531*d14abf15SRobert Mustacchi struct _lm_tcp_gen_buf_t * ret_buf_ctx; 532*d14abf15SRobert Mustacchi } tcp_upload_data; 533*d14abf15SRobert Mustacchi } lm_tcp_slow_path_ret_data_t; 534*d14abf15SRobert Mustacchi 535*d14abf15SRobert Mustacchi /* structure used for storing the data required for a slow-path request */ 536*d14abf15SRobert Mustacchi typedef struct _lm_tcp_path_relink_cached_t 537*d14abf15SRobert Mustacchi { 538*d14abf15SRobert Mustacchi l4_path_cached_state_t path_cached; 539*d14abf15SRobert Mustacchi l4_neigh_cached_state_t neigh_cached; 540*d14abf15SRobert Mustacchi } lm_tcp_path_relink_cached_t; 541*d14abf15SRobert Mustacchi 542*d14abf15SRobert Mustacchi typedef union _lm_tcp_slow_path_sent_data_t { 543*d14abf15SRobert Mustacchi struct { 544*d14abf15SRobert Mustacchi void * data; 545*d14abf15SRobert Mustacchi } tcp_update_data; 546*d14abf15SRobert Mustacchi } lm_tcp_slow_path_sent_data_t ; 547*d14abf15SRobert Mustacchi 548*d14abf15SRobert Mustacchi typedef union _lm_tcp_slow_path_phys_data_t 549*d14abf15SRobert Mustacchi { 550*d14abf15SRobert Mustacchi struct toe_context toe_ctx; /* used by query slow path request */ 551*d14abf15SRobert Mustacchi struct toe_update_ramrod_cached_params update_ctx; /* used by update slow path request */ 552*d14abf15SRobert Mustacchi 553*d14abf15SRobert Mustacchi } lm_tcp_slow_path_phys_data_t; 554*d14abf15SRobert Mustacchi 555*d14abf15SRobert Mustacchi typedef struct _lm_tcp_slow_path_data_t { 556*d14abf15SRobert Mustacchi lm_tcp_slow_path_phys_data_t * virt_addr; 557*d14abf15SRobert Mustacchi lm_address_t phys_addr; 558*d14abf15SRobert Mustacchi }lm_tcp_slow_path_data_t ; 559*d14abf15SRobert Mustacchi 560*d14abf15SRobert Mustacchi typedef struct _lm_tcp_slow_path_request_t 561*d14abf15SRobert Mustacchi { 562*d14abf15SRobert Mustacchi lm_sp_req_common_t sp_req_common; 563*d14abf15SRobert Mustacchi lm_tcp_slow_path_ret_data_t ret_data; /* SP req. output data */ 564*d14abf15SRobert Mustacchi lm_tcp_slow_path_sent_data_t sent_data; /* SP req. input data */ 565*d14abf15SRobert Mustacchi 566*d14abf15SRobert Mustacchi u32_t type; 567*d14abf15SRobert Mustacchi #define SP_REQUEST_NONE 0 568*d14abf15SRobert Mustacchi #define SP_REQUEST_INITIATE_OFFLOAD 1 569*d14abf15SRobert Mustacchi #define SP_REQUEST_TERMINATE_OFFLOAD 2 570*d14abf15SRobert Mustacchi #define SP_REQUEST_QUERY 3 571*d14abf15SRobert Mustacchi #define SP_REQUEST_UPDATE_TCP 4 572*d14abf15SRobert Mustacchi #define SP_REQUEST_UPDATE_PATH 5 573*d14abf15SRobert Mustacchi #define SP_REQUEST_UPDATE_NEIGH 6 574*d14abf15SRobert Mustacchi #define SP_REQUEST_INVALIDATE 7 575*d14abf15SRobert Mustacchi #define SP_REQUEST_ABORTIVE_DISCONNECT 8 576*d14abf15SRobert Mustacchi #define SP_REQUEST_TERMINATE1_OFFLOAD 9 577*d14abf15SRobert Mustacchi #define SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT 10 /* used only for LOCAL graceful disconnect */ 578*d14abf15SRobert Mustacchi #define SP_REQUEST_PENDING_REMOTE_DISCONNECT 11 /* used for both abortive and graceful disconnect */ 579*d14abf15SRobert Mustacchi #define SP_REQUEST_PENDING_TX_RST 12 /* used for TX Reset received while buffers in the active-tb-list */ 580*d14abf15SRobert Mustacchi #define SP_REQUEST_BLOCKED 13 /* when there is no pending connection, we just want to block sp-command 581*d14abf15SRobert Mustacchi * for example, delay offload */ 582*d14abf15SRobert Mustacchi #define SP_REQUEST_UPDATE_PATH_RELINK 14 583*d14abf15SRobert Mustacchi lm_status_t status; /* request completion status */ 584*d14abf15SRobert Mustacchi } lm_tcp_slow_path_request_t; 585*d14abf15SRobert Mustacchi 586*d14abf15SRobert Mustacchi /******************************************************************************* 587*d14abf15SRobert Mustacchi * information required for calculating the TCP state on 'query' 588*d14abf15SRobert Mustacchi * and 'terminate' completions 589*d14abf15SRobert Mustacchi ******************************************************************************/ 590*d14abf15SRobert Mustacchi typedef struct _lm_tcp_state_calculation_t 591*d14abf15SRobert Mustacchi { 592*d14abf15SRobert Mustacchi u64_t fin_request_time; /* written by Tx path, when a fin request is posted to the chip */ 593*d14abf15SRobert Mustacchi u64_t fin_completed_time; /* written by Tx path, when a fin request is completed by the chip */ 594*d14abf15SRobert Mustacchi u64_t fin_reception_time; /* written by Rx path, when a remote fin is received */ 595*d14abf15SRobert Mustacchi u8_t con_rst_flag; /* set whenever chip reports RST reception or RST sent completion */ 596*d14abf15SRobert Mustacchi u8_t con_upld_close_flag; /* set whenever chip reports request to upload a connection after SYN was received or FIN_WAIT2 timer expired */ 597*d14abf15SRobert Mustacchi u8_t _pad[2]; 598*d14abf15SRobert Mustacchi } lm_tcp_state_calculation_t; 599*d14abf15SRobert Mustacchi 600*d14abf15SRobert Mustacchi /******************************************************************************* 601*d14abf15SRobert Mustacchi * tcp state 602*d14abf15SRobert Mustacchi ******************************************************************************/ 603*d14abf15SRobert Mustacchi typedef struct _lm_tcp_state_t 604*d14abf15SRobert Mustacchi { 605*d14abf15SRobert Mustacchi lm_state_header_t hdr; 606*d14abf15SRobert Mustacchi lm_path_state_t *path; 607*d14abf15SRobert Mustacchi lm_tcp_con_t *rx_con; 608*d14abf15SRobert Mustacchi lm_tcp_con_t *tx_con; 609*d14abf15SRobert Mustacchi lm_tcp_slow_path_request_t *sp_request; 610*d14abf15SRobert Mustacchi lm_tcp_slow_path_data_t sp_req_data; 611*d14abf15SRobert Mustacchi lm_tcp_state_calculation_t tcp_state_calc; 612*d14abf15SRobert Mustacchi void *ctx_virt; /* Can point to different structures depending on the ulp_type */ 613*d14abf15SRobert Mustacchi lm_address_t ctx_phys; 614*d14abf15SRobert Mustacchi l4_tcp_delegated_state_t tcp_delegated; 615*d14abf15SRobert Mustacchi l4_tcp_const_state_t tcp_const; 616*d14abf15SRobert Mustacchi l4_tcp_cached_state_t tcp_cached; 617*d14abf15SRobert Mustacchi 618*d14abf15SRobert Mustacchi u32_t cid; 619*d14abf15SRobert Mustacchi #define TCP_CID_MASK 0xffffff 620*d14abf15SRobert Mustacchi 621*d14abf15SRobert Mustacchi /* synchronization between Tx and Rx completions of slow path events */ 622*d14abf15SRobert Mustacchi u16_t sp_flags; 623*d14abf15SRobert Mustacchi #define SP_REQUEST_COMPLETED_RX 0x001 624*d14abf15SRobert Mustacchi #define SP_REQUEST_COMPLETED_TX 0x002 625*d14abf15SRobert Mustacchi #define REMOTE_RST_INDICATED_RX 0x004 626*d14abf15SRobert Mustacchi #define REMOTE_RST_INDICATED_TX 0x008 627*d14abf15SRobert Mustacchi /* mainly for debugging purposes... slow-path indications when there is no fp... */ 628*d14abf15SRobert Mustacchi #define SP_TCP_OFLD_REQ_POSTED 0x010 629*d14abf15SRobert Mustacchi #define SP_TCP_SRC_REQ_POSTED 0x020 630*d14abf15SRobert Mustacchi #define SP_TCP_TRM_REQ_POSTED 0x040 631*d14abf15SRobert Mustacchi #define SP_TCP_QRY_REQ_POSTED 0x080 632*d14abf15SRobert Mustacchi #define SP_TCP_OFLD_REQ_COMP 0x100 633*d14abf15SRobert Mustacchi #define SP_TCP_SRC_REQ_COMP 0x200 634*d14abf15SRobert Mustacchi #define SP_TCP_TRM_REQ_COMP 0x400 635*d14abf15SRobert Mustacchi #define SP_TCP_QRY_REQ_COMP 0x800 636*d14abf15SRobert Mustacchi 637*d14abf15SRobert Mustacchi u8_t in_searcher; /* was the tcp state added to searcher hash */ 638*d14abf15SRobert Mustacchi u8_t ulp_type; 639*d14abf15SRobert Mustacchi void * aux_memory; 640*d14abf15SRobert Mustacchi u32_t aux_mem_size; 641*d14abf15SRobert Mustacchi u8_t type_of_aux_memory; 642*d14abf15SRobert Mustacchi #define TCP_CON_AUX_RT_MEM 0x1 643*d14abf15SRobert Mustacchi u8_t aux_mem_flag; 644*d14abf15SRobert Mustacchi #define TCP_CON_AUX_RT_MEM_SUCCSESS_ALLOCATION 0x1 645*d14abf15SRobert Mustacchi #define TCP_CON_AUX_RT_MEM_FAILED_ALLOCATION 0x2 646*d14abf15SRobert Mustacchi 647*d14abf15SRobert Mustacchi u8_t sp_request_pending_completion; 648*d14abf15SRobert Mustacchi u8_t pending_abortive_disconnect; 649*d14abf15SRobert Mustacchi 650*d14abf15SRobert Mustacchi lm_tcp_integrity_info_t integrity_info; 651*d14abf15SRobert Mustacchi /* GilR 4/3/2006 - TBA - add lm tcp state debug/statistics */ 652*d14abf15SRobert Mustacchi } lm_tcp_state_t; 653*d14abf15SRobert Mustacchi 654*d14abf15SRobert Mustacchi /******************************************************************************* 655*d14abf15SRobert Mustacchi * Generic TCP buffer. 656*d14abf15SRobert Mustacchi ******************************************************************************/ 657*d14abf15SRobert Mustacchi typedef struct _lm_tcp_gen_buf_t 658*d14abf15SRobert Mustacchi { 659*d14abf15SRobert Mustacchi d_list_entry_t link; /* MUST be the first field in this structure */ 660*d14abf15SRobert Mustacchi /* generic buffers create a list of generic buffers. The next element is infact a d_list_entry, 661*d14abf15SRobert Mustacchi * however, the generic buffer list is not always accessed as a d_list, it is sometime traversed as 662*d14abf15SRobert Mustacchi * a list ending with NULL */ 663*d14abf15SRobert Mustacchi #define NEXT_GEN_BUF(_gen_buf) (struct _lm_tcp_gen_buf_t *)d_list_next_entry(&((_gen_buf)->link)) 664*d14abf15SRobert Mustacchi #define PREV_GEN_BUF(_gen_buf) (struct _lm_tcp_gen_buf_t *)d_list_prev_entry(&((_gen_buf)->link)) 665*d14abf15SRobert Mustacchi 666*d14abf15SRobert Mustacchi lm_address_t buf_phys; 667*d14abf15SRobert Mustacchi lm_tcp_state_t * tcp; /* mainly for updating pending_return_indications */ 668*d14abf15SRobert Mustacchi u8_t * buf_virt; 669*d14abf15SRobert Mustacchi 670*d14abf15SRobert Mustacchi /* Following 4 fields are used for supporting SWS accessed when buffer is returned */ 671*d14abf15SRobert Mustacchi u32_t ind_bytes; /* set only in buffer that is head of indication - how many bytes were indicated */ 672*d14abf15SRobert Mustacchi u32_t ind_nbufs; /** how many buffers were included in the indication. Needed for: 673*d14abf15SRobert Mustacchi * - returning buffers to generic pool 674*d14abf15SRobert Mustacchi * - efficiently restore the peninsula list */ 675*d14abf15SRobert Mustacchi /** refcnt required only if we support RcvIndicationSize > 0 */ 676*d14abf15SRobert Mustacchi u16_t refcnt; /* reference count for number of times the buffer was succesfully indicated to um */ 677*d14abf15SRobert Mustacchi u16_t placed_bytes; 678*d14abf15SRobert Mustacchi 679*d14abf15SRobert Mustacchi /* The FREE_WHEN_DONE flag indicates that this generic buffer 680*d14abf15SRobert Mustacchi * contains the buffered data received when doing tcp_offload and when it is completed, this 681*d14abf15SRobert Mustacchi * generic buffer is freed back into system memory instead of the generic buffer pool. */ 682*d14abf15SRobert Mustacchi u8_t flags; 683*d14abf15SRobert Mustacchi #define GEN_FLAG_FREE_WHEN_DONE 0x01 684*d14abf15SRobert Mustacchi #define GEN_FLAG_SWS_UPDATE 0x02 /* In certain cases succesfull indication updates the window immediately, however 685*d14abf15SRobert Mustacchi * when we enter a 'safe-mode' we wait for the generic buffers to return before we 686*d14abf15SRobert Mustacchi * update the window. This flag indicates whether or not we have to update. */ 687*d14abf15SRobert Mustacchi 688*d14abf15SRobert Mustacchi u16_t phys_offset; /* When allocating gen bufs for buffered data, save the offset 689*d14abf15SRobert Mustacchi from the original phys addr, and use it when when we free the gen buf */ 690*d14abf15SRobert Mustacchi 691*d14abf15SRobert Mustacchi } lm_tcp_gen_buf_t; 692*d14abf15SRobert Mustacchi 693*d14abf15SRobert Mustacchi 694*d14abf15SRobert Mustacchi /******************************************************************************* 695*d14abf15SRobert Mustacchi * generic buffer queue 696*d14abf15SRobert Mustacchi ******************************************************************************/ 697*d14abf15SRobert Mustacchi typedef struct _lm_tcp_grq_t 698*d14abf15SRobert Mustacchi { 699*d14abf15SRobert Mustacchi lm_bd_chain_t bd_chain; 700*d14abf15SRobert Mustacchi 701*d14abf15SRobert Mustacchi /* List of posted generic buffers (i.e. attached to the bd chain) */ 702*d14abf15SRobert Mustacchi d_list_t active_gen_list; 703*d14abf15SRobert Mustacchi 704*d14abf15SRobert Mustacchi /* List of returned generic buffers, may be used to immediate compensation this grq */ 705*d14abf15SRobert Mustacchi d_list_t aux_gen_list; 706*d14abf15SRobert Mustacchi 707*d14abf15SRobert Mustacchi lm_isle_t* isles_pool; 708*d14abf15SRobert Mustacchi /* Flag indicating that the grq needs to be compensated after generic buffers are allocated... */ 709*d14abf15SRobert Mustacchi u8_t grq_compensate_on_alloc; 710*d14abf15SRobert Mustacchi u8_t grq_invloved_in_rss; 711*d14abf15SRobert Mustacchi 712*d14abf15SRobert Mustacchi u16_t low_bds_threshold; 713*d14abf15SRobert Mustacchi u16_t high_bds_threshold; 714*d14abf15SRobert Mustacchi 715*d14abf15SRobert Mustacchi s16_t number_of_isles_delta; 716*d14abf15SRobert Mustacchi s32_t gen_bufs_in_isles_delta; 717*d14abf15SRobert Mustacchi 718*d14abf15SRobert Mustacchi /* statistics */ 719*d14abf15SRobert Mustacchi u16_t max_grqs_per_dpc; /* maximum grqs compensated in dpc */ 720*d14abf15SRobert Mustacchi u16_t num_grqs_last_dpc; 721*d14abf15SRobert Mustacchi u16_t num_deficient; /* number of times compensation wasn't complete */ 722*d14abf15SRobert Mustacchi u16_t avg_grqs_per_dpc; 723*d14abf15SRobert Mustacchi u32_t avg_dpc_cnt; 724*d14abf15SRobert Mustacchi u32_t sum_grqs_last_x_dpcs; 725*d14abf15SRobert Mustacchi u32_t gen_bufs_compensated_from_bypass_only; 726*d14abf15SRobert Mustacchi u32_t gen_bufs_compensated_till_low_threshold; 727*d14abf15SRobert Mustacchi u32_t gen_bufs_collected_to_later_compensation; 728*d14abf15SRobert Mustacchi } lm_tcp_grq_t; 729*d14abf15SRobert Mustacchi 730*d14abf15SRobert Mustacchi /******************************************************************************* 731*d14abf15SRobert Mustacchi * L4 receive completion queue 732*d14abf15SRobert Mustacchi ******************************************************************************/ 733*d14abf15SRobert Mustacchi typedef struct _lm_tcp_rcq_t 734*d14abf15SRobert Mustacchi { 735*d14abf15SRobert Mustacchi lm_bd_chain_t bd_chain; 736*d14abf15SRobert Mustacchi 737*d14abf15SRobert Mustacchi /* points directly to the TOE Rx index in the USTORM part 738*d14abf15SRobert Mustacchi * of the non-default status block */ 739*d14abf15SRobert Mustacchi u16_t volatile *hw_con_idx_ptr; 740*d14abf15SRobert Mustacchi 741*d14abf15SRobert Mustacchi /* for RSS indirection table update synchronization */ 742*d14abf15SRobert Mustacchi u8_t rss_update_pending; /* unused */ 743*d14abf15SRobert Mustacchi u8_t suspend_processing; 744*d14abf15SRobert Mustacchi u32_t update_cid; 745*d14abf15SRobert Mustacchi u32_t rss_update_stats_quiet; 746*d14abf15SRobert Mustacchi u32_t rss_update_stats_sleeping; 747*d14abf15SRobert Mustacchi u32_t rss_update_stats_delayed; 748*d14abf15SRobert Mustacchi u32_t rss_update_processing_delayed; 749*d14abf15SRobert Mustacchi u32_t rss_update_processing_continued; 750*d14abf15SRobert Mustacchi u32_t rss_update_processing_max_continued; 751*d14abf15SRobert Mustacchi 752*d14abf15SRobert Mustacchi /* statistics */ 753*d14abf15SRobert Mustacchi u16_t max_cqes_per_dpc; 754*d14abf15SRobert Mustacchi u16_t num_cqes_last_dpc; 755*d14abf15SRobert Mustacchi u16_t avg_cqes_per_dpc; 756*d14abf15SRobert Mustacchi u16_t _pad16; 757*d14abf15SRobert Mustacchi u32_t avg_dpc_cnt; 758*d14abf15SRobert Mustacchi u32_t sum_cqes_last_x_dpcs; 759*d14abf15SRobert Mustacchi 760*d14abf15SRobert Mustacchi lm_hc_sb_info_t hc_sb_info; 761*d14abf15SRobert Mustacchi 762*d14abf15SRobert Mustacchi } lm_tcp_rcq_t; 763*d14abf15SRobert Mustacchi 764*d14abf15SRobert Mustacchi /******************************************************************************* 765*d14abf15SRobert Mustacchi * L4 send completion queue 766*d14abf15SRobert Mustacchi ******************************************************************************/ 767*d14abf15SRobert Mustacchi typedef struct _lm_tcp_scq_t 768*d14abf15SRobert Mustacchi { 769*d14abf15SRobert Mustacchi lm_bd_chain_t bd_chain; 770*d14abf15SRobert Mustacchi 771*d14abf15SRobert Mustacchi /* points directly to the TOE Tx index in the CSTORM part 772*d14abf15SRobert Mustacchi * of the non-default status block */ 773*d14abf15SRobert Mustacchi u16_t volatile *hw_con_idx_ptr; 774*d14abf15SRobert Mustacchi 775*d14abf15SRobert Mustacchi /* statistics */ 776*d14abf15SRobert Mustacchi u16_t max_cqes_per_dpc; 777*d14abf15SRobert Mustacchi u16_t num_cqes_last_dpc; 778*d14abf15SRobert Mustacchi u16_t avg_cqes_per_dpc; 779*d14abf15SRobert Mustacchi u16_t _pad16; 780*d14abf15SRobert Mustacchi u32_t avg_dpc_cnt; 781*d14abf15SRobert Mustacchi u32_t sum_cqes_last_x_dpcs; 782*d14abf15SRobert Mustacchi 783*d14abf15SRobert Mustacchi lm_hc_sb_info_t hc_sb_info; 784*d14abf15SRobert Mustacchi 785*d14abf15SRobert Mustacchi } lm_tcp_scq_t; 786*d14abf15SRobert Mustacchi 787*d14abf15SRobert Mustacchi /******************************************************************************* 788*d14abf15SRobert Mustacchi * states block - includes all offloaded states and possibly other offload 789*d14abf15SRobert Mustacchi * information of a specific client. 790*d14abf15SRobert Mustacchi ******************************************************************************/ 791*d14abf15SRobert Mustacchi typedef struct _lm_state_block_t 792*d14abf15SRobert Mustacchi { 793*d14abf15SRobert Mustacchi d_list_t tcp_list; 794*d14abf15SRobert Mustacchi d_list_t path_list; 795*d14abf15SRobert Mustacchi d_list_t neigh_list; 796*d14abf15SRobert Mustacchi } lm_state_block_t; 797*d14abf15SRobert Mustacchi 798*d14abf15SRobert Mustacchi 799*d14abf15SRobert Mustacchi typedef struct _lm_toe_statistics_t 800*d14abf15SRobert Mustacchi { 801*d14abf15SRobert Mustacchi u32_t total_ofld; /* cyclic counter of number of offloaded tcp states */ 802*d14abf15SRobert Mustacchi u32_t total_upld; /* cyclic counter of number of uploaded tcp states */ 803*d14abf15SRobert Mustacchi s32_t total_indicated; /* cyclic counter of number of generic indications (sum of connections pending...) */ 804*d14abf15SRobert Mustacchi s32_t total_indicated_returned; /* cyclic counter of number of generic indications that have returned */ 805*d14abf15SRobert Mustacchi 806*d14abf15SRobert Mustacchi /* aggregative per-connections statistics */ 807*d14abf15SRobert Mustacchi u32_t rx_rq_complete_calls; /* #RQ completion calls (total, copy + zero copy) */ 808*d14abf15SRobert Mustacchi u32_t rx_rq_bufs_completed; /* #RQ completion buffers */ 809*d14abf15SRobert Mustacchi u64_t rx_bytes_completed_total; /* #RQ completion bytes */ 810*d14abf15SRobert Mustacchi 811*d14abf15SRobert Mustacchi u32_t rx_accepted_indications; /* #GRQ completion calls (indicate) */ 812*d14abf15SRobert Mustacchi u32_t rx_bufs_indicated_accepted; /* #GRQ completion buffers */ 813*d14abf15SRobert Mustacchi u64_t rx_bytes_indicated_accepted; /* #GRQ completion bytes */ 814*d14abf15SRobert Mustacchi 815*d14abf15SRobert Mustacchi u32_t rx_rejected_indications; /* #failed or partially consumed indicate calls */ 816*d14abf15SRobert Mustacchi u32_t rx_bufs_indicated_rejected; /* #GRQ completion bytes */ 817*d14abf15SRobert Mustacchi u64_t rx_bytes_indicated_rejected; /* #GRQ completion bytes */ 818*d14abf15SRobert Mustacchi 819*d14abf15SRobert Mustacchi u32_t rx_zero_byte_recv_reqs; /* #Zero byte receeive requests */ 820*d14abf15SRobert Mustacchi u32_t rx_bufs_copied_grq; /* #VBD copy bufs total */ 821*d14abf15SRobert Mustacchi u32_t rx_bufs_copied_rq; /* #VBD copy bufs total */ 822*d14abf15SRobert Mustacchi u32_t _pad32_1; 823*d14abf15SRobert Mustacchi u64_t rx_bytes_copied_in_post; /* #VBD copy bytes in post phase*/ 824*d14abf15SRobert Mustacchi u64_t rx_bytes_copied_in_comp; /* #VBD copy bytes in completion phase */ 825*d14abf15SRobert Mustacchi u64_t rx_bytes_copied_in_process; /* #VBD copy bytes in process phase */ 826*d14abf15SRobert Mustacchi 827*d14abf15SRobert Mustacchi /* post */ 828*d14abf15SRobert Mustacchi u32_t rx_bufs_posted_total; 829*d14abf15SRobert Mustacchi u32_t rx_bufs_skipped_post; 830*d14abf15SRobert Mustacchi u64_t rx_bytes_skipped_post; 831*d14abf15SRobert Mustacchi u64_t rx_bytes_posted_total; 832*d14abf15SRobert Mustacchi 833*d14abf15SRobert Mustacchi /* push related */ 834*d14abf15SRobert Mustacchi u64_t rx_bytes_skipped_push; 835*d14abf15SRobert Mustacchi u32_t rx_partially_completed_buf_cnt; 836*d14abf15SRobert Mustacchi 837*d14abf15SRobert Mustacchi /* abort */ 838*d14abf15SRobert Mustacchi u32_t rx_buffer_aborted_cnt; 839*d14abf15SRobert Mustacchi 840*d14abf15SRobert Mustacchi u32_t tx_rq_complete_calls; 841*d14abf15SRobert Mustacchi u32_t tx_rq_bufs_completed; 842*d14abf15SRobert Mustacchi u64_t tx_bytes_posted_total; 843*d14abf15SRobert Mustacchi u64_t tx_bytes_completed_total; 844*d14abf15SRobert Mustacchi 845*d14abf15SRobert Mustacchi u32_t total_dbg_upld_requested; 846*d14abf15SRobert Mustacchi u32_t total_fin_upld_requested; 847*d14abf15SRobert Mustacchi u32_t total_rst_upld_requested; 848*d14abf15SRobert Mustacchi u32_t total_close_upld_requested; 849*d14abf15SRobert Mustacchi u32_t total_dbt_upld_requested; 850*d14abf15SRobert Mustacchi u32_t total_opt_upld_requested; 851*d14abf15SRobert Mustacchi u32_t total_big_isle_upld_requesed; 852*d14abf15SRobert Mustacchi u32_t total_many_isles_upld_requesed; 853*d14abf15SRobert Mustacchi u32_t total_upld_requested[L4_UPLOAD_REASON_MAX]; 854*d14abf15SRobert Mustacchi u32_t con_state_on_upload[L4_TCP_CON_STATE_MAX]; 855*d14abf15SRobert Mustacchi u32_t total_bytes_lost_on_upload; 856*d14abf15SRobert Mustacchi u32_t total_droped_non_empty_isles; 857*d14abf15SRobert Mustacchi u32_t total_droped_empty_isles; 858*d14abf15SRobert Mustacchi u32_t total_rx_post_blocked; 859*d14abf15SRobert Mustacchi u32_t total_zb_rx_post_blocked; 860*d14abf15SRobert Mustacchi u32_t total_cfc_delete_error; 861*d14abf15SRobert Mustacchi u32_t total_num_non_full_indications; 862*d14abf15SRobert Mustacchi u32_t total_aux_mem_success_allocations; 863*d14abf15SRobert Mustacchi u32_t total_aux_mem_failed_allocations; 864*d14abf15SRobert Mustacchi u32_t total_rx_abortion_under_flr; 865*d14abf15SRobert Mustacchi u32_t total_tx_abortion_under_flr; 866*d14abf15SRobert Mustacchi u32_t max_number_of_isles_in_single_con; 867*d14abf15SRobert Mustacchi u32_t total_aborive_disconnect_during_completion; 868*d14abf15SRobert Mustacchi u32_t total_pending_aborive_disconnect_completed; 869*d14abf15SRobert Mustacchi u32_t total_aborive_disconnect_completed; 870*d14abf15SRobert Mustacchi 871*d14abf15SRobert Mustacchi u64_t total_buffered_data; 872*d14abf15SRobert Mustacchi } lm_toe_statistics_t; 873*d14abf15SRobert Mustacchi 874*d14abf15SRobert Mustacchi typedef struct _lm_toe_isles_t 875*d14abf15SRobert Mustacchi { 876*d14abf15SRobert Mustacchi s32_t gen_bufs_in_isles; 877*d14abf15SRobert Mustacchi s32_t max_gen_bufs_in_isles; 878*d14abf15SRobert Mustacchi s16_t number_of_isles; 879*d14abf15SRobert Mustacchi s16_t max_number_of_isles; 880*d14abf15SRobert Mustacchi u8_t l4_decrease_archipelago; 881*d14abf15SRobert Mustacchi u8_t __pad[3]; 882*d14abf15SRobert Mustacchi } lm_toe_isles_t; 883*d14abf15SRobert Mustacchi 884*d14abf15SRobert Mustacchi /******************************************************************************* 885*d14abf15SRobert Mustacchi * toe info - all TOE (L4) information/data structures of the lm_device 886*d14abf15SRobert Mustacchi ******************************************************************************/ 887*d14abf15SRobert Mustacchi typedef struct _lm_toe_info_t 888*d14abf15SRobert Mustacchi { 889*d14abf15SRobert Mustacchi struct _lm_device_t *pdev; 890*d14abf15SRobert Mustacchi lm_state_block_t state_blk; 891*d14abf15SRobert Mustacchi 892*d14abf15SRobert Mustacchi lm_toe_statistics_t stats; 893*d14abf15SRobert Mustacchi lm_toe_isles_t archipelago; 894*d14abf15SRobert Mustacchi 895*d14abf15SRobert Mustacchi lm_tcp_scq_t scqs[MAX_L4_TX_CHAIN]; 896*d14abf15SRobert Mustacchi lm_tcp_rcq_t rcqs[MAX_L4_RX_CHAIN]; 897*d14abf15SRobert Mustacchi lm_tcp_grq_t grqs[MAX_L4_RX_CHAIN]; 898*d14abf15SRobert Mustacchi 899*d14abf15SRobert Mustacchi u8_t indirection_table[TOE_INDIRECTION_TABLE_SIZE]; 900*d14abf15SRobert Mustacchi u32_t rss_update_cnt; /* GilR 4/4/2006 - TBD on RSS indirection table update implementation */ 901*d14abf15SRobert Mustacchi u32_t gen_buf_size; /* The size of a generic buffer based on gen_buf_min_size and mtu */ 902*d14abf15SRobert Mustacchi 903*d14abf15SRobert Mustacchi u8_t state; 904*d14abf15SRobert Mustacchi #define LM_TOE_STATE_NONE 0 905*d14abf15SRobert Mustacchi #define LM_TOE_STATE_INIT 1 906*d14abf15SRobert Mustacchi #define LM_TOE_STATE_NORMAL 2 907*d14abf15SRobert Mustacchi 908*d14abf15SRobert Mustacchi /* Once a generic indication succeeded and the buffers are given to the client we have to choose whether we want 909*d14abf15SRobert Mustacchi * to give a window-update immediately (short-loop) or wait for the buffer to return (long-loop). The mode is determined 910*d14abf15SRobert Mustacchi * by a set of rules in the UM related to the generic buffer pool and its state. The UM sets this parameter for the lm, 911*d14abf15SRobert Mustacchi * and at each indication the lm checks which mode it is in, marks the generic buffer and gives a window-update accordingly */ 912*d14abf15SRobert Mustacchi u8_t update_window_mode; 913*d14abf15SRobert Mustacchi #define LM_TOE_UPDATE_MODE_LONG_LOOP 0 914*d14abf15SRobert Mustacchi #define LM_TOE_UPDATE_MODE_SHORT_LOOP 1 915*d14abf15SRobert Mustacchi 916*d14abf15SRobert Mustacchi /* This field is used to indicate that certain events have occured in TOE. Should be updated under TOE-LOCK */ 917*d14abf15SRobert Mustacchi u8_t toe_events; 918*d14abf15SRobert Mustacchi #define LM_TOE_EVENT_WINDOW_DECREASE 0x1 919*d14abf15SRobert Mustacchi u8_t __pad[1]; 920*d14abf15SRobert Mustacchi 921*d14abf15SRobert Mustacchi lm_toe_integrity_info_t integrity_info; 922*d14abf15SRobert Mustacchi 923*d14abf15SRobert Mustacchi /* Slow-path data for toe-rss (common and not per connection, therefore located here! ) */ 924*d14abf15SRobert Mustacchi struct toe_rss_update_ramrod_data * rss_update_data; 925*d14abf15SRobert Mustacchi lm_address_t rss_update_data_phys; 926*d14abf15SRobert Mustacchi } lm_toe_info_t; 927*d14abf15SRobert Mustacchi 928*d14abf15SRobert Mustacchi 929*d14abf15SRobert Mustacchi #endif /* _LM_L4ST_H */ 930