1 
2 /*
3 functions for managing Chip per-connection context
4 */
5 #include "context.h"
6 #include "command.h"
7 #include "cdu_def.h"
8 #include "bd_chain.h"
9 
10 /* returns a pionter to a connections chip context*/
lm_get_context(struct _lm_device_t * pdev,u32_t cid)11 void * lm_get_context(struct _lm_device_t *pdev, u32_t cid){
12 
13     void * ret = NULL;
14     u32_t page,off;
15 
16     DbgBreakIf(cid > pdev->params.max_func_connections);
17     DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
18 
19     /* calculate which context page the CID is on*/
20     page = cid / (pdev->params.num_context_in_page);
21 
22     /* calculate at what offset inside the page CID is on*/
23     off = cid % (pdev->params.num_context_in_page);
24 
25     /* now goto page,off */
26     ret = (void*)((char*)pdev->vars.context_cdu_virt_addr_table[page] + (pdev->params.context_line_size * off));
27     /* warrning, this assumes context line size is in chars, need to check!!!*/
28 
29     return ret;
30 }
31 
32 /* same as above but returns phys address in 64 bit pointer */
lm_get_context_phys(struct _lm_device_t * pdev,u32_t cid)33 u64_t lm_get_context_phys(struct _lm_device_t *pdev, u32_t cid){
34 
35     u64_t ret = 0;
36     u32_t page,off;
37 
38     DbgBreakIf(cid > pdev->params.max_func_connections);
39     DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
40 
41     /* calculate which context page the CID is on*/
42     page = cid / (pdev->params.num_context_in_page);
43 
44     /* calculate at what offset inside the page CID is on*/
45     off = cid % (pdev->params.num_context_in_page);
46 
47     /* now goto page,off */
48     ret = (pdev->vars.context_cdu_phys_addr_table[page].as_u64 + (pdev->params.context_line_size * off));
49     /* warrning, this assumes context line size is in chars, need to check!!!*/
50 
51     return ret;
52 }
53 
54 extern u32_t LOG2(u32_t v);
lm_setup_searcher_hash_info(struct _lm_device_t * pdev)55 static lm_status_t lm_setup_searcher_hash_info(struct _lm_device_t *pdev)
56 {
57     u32_t                    num_con    = 0 ;
58     u32_t                    alloc_size = 0 ;
59     lm_context_info_t*       context    = NULL;
60     lm_searcher_hash_info_t* hash_info  = NULL;
61     int                      offset     = 0 ;
62 
63     /* sanity */
64     if ( CHK_NULL(pdev) || CHK_NULL( pdev->context_info ) )
65     {
66         DbgBreakMsg("Invalid Parameters") ;
67         return LM_STATUS_INVALID_PARAMETER ;
68     }
69     context   = pdev->context_info;
70     hash_info = &context->searcher_hash;
71 
72     DbgBreakIf(!pdev->params.max_func_connections);
73 
74     if CHK_NULL( hash_info->searcher_table)
75     {
76         DbgBreakIf(!( hash_info->searcher_table));
77         return LM_STATUS_FAILURE;
78     }
79     num_con    = pdev->params.max_func_connections;
80     alloc_size = sizeof(lm_searcher_hash_entry_t) * num_con;
81     mm_mem_zero(hash_info->searcher_table, alloc_size);
82 
83     /* init value for searcher key */
84     // TODO: for now a fixed key, need to change at runtime
85     *(u32_t *)(&hash_info->searcher_key[0])  = 0x63285672;
86     *(u32_t *)(&hash_info->searcher_key[4])  = 0x24B8F2CC;
87     *(u32_t *)(&hash_info->searcher_key[8])  = 0x223AEF9B;
88     *(u32_t *)(&hash_info->searcher_key[12]) = 0x26001E3A;
89     *(u32_t *)(&hash_info->searcher_key[16]) = 0x7AE91116;
90     *(u32_t *)(&hash_info->searcher_key[20]) = 0x5CE5230B;
91     *(u32_t *)(&hash_info->searcher_key[24]) = 0x298D8ADF;
92     *(u32_t *)(&hash_info->searcher_key[28]) = 0x6EB0FF09;
93     *(u32_t *)(&hash_info->searcher_key[32]) = 0x1830F82F;
94     *(u32_t *)(&hash_info->searcher_key[36]) = 0x1E46BE7;
95 
96     /* Microsoft's example key */
97 //      *(u32_t *)(&hash_info->searcher_key[0]) = 0xda565a6d;
98 //      *(u32_t *)(&hash_info->searcher_key[4]) = 0xc20e5b25;
99 //      *(u32_t *)(&hash_info->searcher_key[8]) = 0x3d256741;
100 //      *(u32_t *)(&hash_info->searcher_key[12]) = 0xb08fa343;
101 //      *(u32_t *)(&hash_info->searcher_key[16]) = 0xcb2bcad0;
102 //      *(u32_t *)(&hash_info->searcher_key[20]) = 0xb4307bae;
103 //      *(u32_t *)(&hash_info->searcher_key[24]) = 0xa32dcb77;
104 //      *(u32_t *)(&hash_info->searcher_key[28]) = 0x0cf23080;
105 //      *(u32_t *)(&hash_info->searcher_key[32]) = 0x3bb7426a;
106 //      *(u32_t *)(&hash_info->searcher_key[36]) = 0xfa01acbe;
107 
108     /* init searcher_key_bits array */
109     for (offset = 0; offset < 10; offset++)
110     {
111         int j,k;
112         u32_t bitsOffset = 32*offset;
113         u8_t _byte;
114 
115         for (j= 0; j < 4; j++)
116         {
117             _byte  = (u8_t)((*(u32_t *)(&hash_info->searcher_key[offset*4]) >> (j*8)) & 0xff);
118             for (k = 0; k < 8; k++)
119             {
120                 hash_info->searcher_key_bits[bitsOffset+(j*8)+k] = ((_byte<<(k%8))& 0x80) ? 1 : 0;
121             }
122         }
123     }
124 
125     /* init value for num hash bits */
126     hash_info->num_hash_bits = (u8_t)LOG2(num_con);
127 
128     return LM_STATUS_SUCCESS ;
129 }
130 
lm_alloc_searcher_hash_info(struct _lm_device_t * pdev)131 static lm_status_t lm_alloc_searcher_hash_info(struct _lm_device_t *pdev)
132 {
133     u32_t                    num_con    = 0 ;
134     u32_t                    alloc_size = 0 ;
135     lm_searcher_hash_info_t* hash_info  = NULL ;
136     u8_t                     mm_cli_idx = 0 ;
137 
138     if CHK_NULL(pdev)
139     {
140         return LM_STATUS_INVALID_PARAMETER ;
141     }
142 
143     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
144 
145     /* searcher is defined with per-function #connections */
146     num_con    = pdev->params.max_func_connections;
147     alloc_size = sizeof(lm_searcher_hash_entry_t) * num_con;
148 
149     hash_info  = &pdev->context_info->searcher_hash;
150 
151     if CHK_NULL(hash_info)
152     {
153         return LM_STATUS_INVALID_PARAMETER ;
154     }
155 
156     /* allocate searcher mirror hash table */
157     hash_info->searcher_table = mm_alloc_mem(pdev, alloc_size, mm_cli_idx);
158 
159     if CHK_NULL( hash_info->searcher_table )
160     {
161         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
162         return  LM_STATUS_RESOURCE ;
163     }
164     return LM_STATUS_SUCCESS ;
165 }
166 
lm_init_cid_resc(struct _lm_device_t * pdev,u32_t cid)167 lm_status_t lm_init_cid_resc(struct _lm_device_t *pdev, u32_t cid)
168 {
169     lm_cid_resc_t *cid_resc = NULL;
170     int            i        = 0;
171 
172     if CHK_NULL(pdev)
173     {
174         return LM_STATUS_INVALID_PARAMETER;
175     }
176 
177     cid_resc = &pdev->context_info->array[cid].cid_resc;
178     if CHK_NULL(cid_resc)
179     {
180         return LM_STATUS_INVALID_PARAMETER;
181     }
182 
183     for (i = 0; i < ARRSIZE(cid_resc->cookies); i++)
184     {
185         cid_resc->cookies[i] = NULL;
186     }
187 
188     cid_resc->cid_pending = LM_CID_STATE_VALID;
189     lm_sp_req_manager_init(pdev, cid);
190 
191     return LM_STATUS_SUCCESS;
192 }
193 
lm_setup_context_pool(struct _lm_device_t * pdev)194 lm_status_t lm_setup_context_pool(struct _lm_device_t *pdev)
195 {
196     u32_t                     num_con         = 0;
197     lm_context_info_t *       context         = NULL ;
198     u32_t                     i,j;
199     struct lm_context_cookie* array           = NULL ;
200     lm_searcher_hash_entry_t* searcher_table  = NULL ;
201 
202     if CHK_NULL(pdev)
203     {
204         DbgBreakIf(!pdev);
205         return LM_STATUS_INVALID_PARAMETER;
206     }
207 
208     context = pdev->context_info;
209 
210     if CHK_NULL(context)
211     {
212         DbgBreakIf( context == NULL );
213         return LM_STATUS_INVALID_PARAMETER;
214     }
215 
216     num_con = pdev->params.max_func_connections;
217 
218     array           = context->array ;
219     searcher_table  = context->searcher_hash.searcher_table ;
220 
221     mm_mem_zero( context, sizeof(lm_context_info_t) ) ;
222 
223     context->array                        = array ;
224     context->searcher_hash.searcher_table = searcher_table ;
225 
226     context->proto_start[ETH_CONNECTION_TYPE]   = 0;
227     context->proto_end  [ETH_CONNECTION_TYPE]   = pdev->params.max_eth_including_vfs_conns - 1;
228     context->proto_start[TOE_CONNECTION_TYPE]   = context->proto_end  [ETH_CONNECTION_TYPE]   + 1;
229     context->proto_end  [TOE_CONNECTION_TYPE]   = context->proto_start[TOE_CONNECTION_TYPE]   + pdev->params.max_func_toe_cons - 1;
230     context->proto_start[RDMA_CONNECTION_TYPE]  = context->proto_end  [TOE_CONNECTION_TYPE]   + 1;
231     context->proto_end  [RDMA_CONNECTION_TYPE]  = context->proto_start[RDMA_CONNECTION_TYPE]  + pdev->params.max_func_rdma_cons - 1;
232     context->proto_start[ISCSI_CONNECTION_TYPE] = context->proto_end  [RDMA_CONNECTION_TYPE]  + 1;
233     context->proto_end  [ISCSI_CONNECTION_TYPE] = context->proto_start[ISCSI_CONNECTION_TYPE] + pdev->params.max_func_iscsi_cons - 1;
234     context->proto_start[FCOE_CONNECTION_TYPE]  = context->proto_end  [ISCSI_CONNECTION_TYPE] + 1;
235     context->proto_end  [FCOE_CONNECTION_TYPE]  = context->proto_start[FCOE_CONNECTION_TYPE]  + pdev->params.max_func_fcoe_cons - 1;
236     DbgBreakIf(context->proto_end[MAX_PROTO - 1] > pdev->params.max_func_connections -1);
237 
238     if CHK_NULL(context->array)
239     {
240         DbgBreakIf(!( context->array));
241         return LM_STATUS_INVALID_PARAMETER;
242     }
243 
244     mm_mem_zero(context->array, sizeof(struct lm_context_cookie)*num_con);
245 
246     ASSERT_STATIC( ARRSIZE(context->proto_start) == ARRSIZE(context->proto_end) );
247 
248     /* zero cookies and populate the free lists */
249     for (i = 0; i < ARRSIZE(context->proto_start); i++ )
250     {
251         for (j = context->proto_start[i]; j <= context->proto_end[i]; j++)
252         {
253             context->array[j].next    = j+1;
254             context->array[j].invalid = LM_CONTEXT_VALID;
255             context->array[j].ip_type = 0;
256             context->array[j].h_val   = 0;
257             lm_init_cid_resc(pdev, j);
258         }
259         /* set the first free item if max_func_XX_cons > 0 */
260         if (context->proto_start[i] <= context->proto_end[i]) {
261             context->proto_ffree[i] = context->proto_start[i];
262         }
263         else
264         {
265             context->proto_ffree[i] = 0;
266         }
267         context->proto_pending[i] = 0;
268         /* put 0 (end of freelist in the last entry for the proto */
269         context->array[context->proto_end[i]].next = 0;
270     }
271     //The ETH cid doorbell space was remapped just fixing the pointers.
272     for (j = context->proto_start[ETH_CONNECTION_TYPE]; j <= context->proto_end[ETH_CONNECTION_TYPE]; j++)
273     {
274 #ifdef VF_INVOLVED
275         if (IS_CHANNEL_VFDEV(pdev)) {
276             context->array[j].cid_resc.mapped_cid_bar_addr =
277                 (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_0] + j*lm_vf_get_doorbell_size(pdev) + VF_BAR0_DB_OFFSET);
278 #ifdef __SunOS
279             context->array[j].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_0];
280 #endif /* __SunOS */
281         } else
282 #endif /* VF_INVOLVED */
283         {
284             context->array[j].cid_resc.mapped_cid_bar_addr =
285                 (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + j*LM_DQ_CID_SIZE);
286 #ifdef __SunOS
287             context->array[j].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_1];
288 #endif /* __SunOS */
289         }
290     }
291     return lm_setup_searcher_hash_info(pdev) ;
292 }
293 
294 /* context pool initializer */
lm_alloc_context_pool(struct _lm_device_t * pdev)295 lm_status_t lm_alloc_context_pool(struct _lm_device_t *pdev){
296 
297     u32_t               num_con    = 0 ;
298     lm_context_info_t * context    = NULL ;
299     u8_t                mm_cli_idx = 0;
300 
301     if CHK_NULL(pdev)
302     {
303         DbgBreakIf(!pdev);
304         return LM_STATUS_INVALID_PARAMETER ;
305     }
306 
307     /* must not be called if allready initialized */
308     if ERR_IF( NULL != pdev->context_info )
309     {
310         DbgBreakIf( pdev->context_info != NULL ) ;
311         return LM_STATUS_FAILURE ;
312     }
313 
314     mm_cli_idx = LM_RESOURCE_COMMON;//!!DP mm_cli_idx_to_um_idx(LM_CLI_IDX_MAX);
315 
316     /* number of context is per-function, the cdu has a per-port register that can be set to be higher than the max_func_connections, but
317      * the amount of memory actually allocated for the CDU matches max_func_connections. */
318     num_con = pdev->params.max_func_connections ;
319 
320     /* allocate context info and cookie array */
321     context = mm_alloc_mem(pdev, sizeof(lm_context_info_t), mm_cli_idx);
322     if CHK_NULL(context)
323     {
324         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
325         return LM_STATUS_RESOURCE ;
326     }
327 
328     /* allocate list entries */
329     context->array = mm_alloc_mem(pdev, sizeof(struct lm_context_cookie)*num_con, mm_cli_idx);
330     if CHK_NULL(context->array)
331     {
332         DbgBreakIf(DBG_BREAK_ON(MEMORY_ALLOCATION_FAILURE));
333         return LM_STATUS_RESOURCE ;
334     }
335 
336     /* initilize the lock */
337 
338     /* put the context where it belongs */
339     pdev->context_info = context;
340 
341     /* init searcher hash info */
342     return lm_alloc_searcher_hash_info(pdev);
343     /* return success */
344 }
345 
346 /* context pool release function */
lm_release_context_pool(struct _lm_device_t * pdev)347 void lm_release_context_pool(struct _lm_device_t *pdev){
348 
349     lm_context_info_t* context = NULL;
350     u32_t i, j;
351 
352     /* must only be called if initialized */
353     DbgBreakIf( pdev->context_info == NULL );
354 
355     /* first make a copy and kill the original refference */
356     context            = pdev->context_info;
357     pdev->context_info = NULL;
358 
359     /* free context cookie array
360        sanity check: scan it and make sure it is empty */
361     for (i=0; i<(pdev->params.max_func_connections); i++  )
362     {
363         for (j = 0; j < MAX_PROTO; j++)
364         {
365             DbgBreakIf( context->array[i].cid_resc.cookies[j] != NULL );
366         }
367 
368         /* NirV: can't call from here, context_info is NULL */
369         /*DbgBreakIf(lm_sp_req_manager_shutdown(pdev, i) != LM_STATUS_SUCCESS);*/
370     }
371     /* mm_free_mem(context->array); */
372 
373     /* sanity check - searcher mirror hash must be empty */
374     DbgBreakIf(context->searcher_hash.num_tuples);
375 
376     /* de-initilize the lock? if in debug mode we can leave it taken to chatch errors */
377 
378     /* free context info */
379     /* mm_free_mem(context); */
380 
381 
382     /* return success */
383 
384 }
385 
_lm_searcher_mirror_hash_calc(lm_searcher_hash_info_t * hash_info,lm_4tuple_t * tuple)386 static u32_t _lm_searcher_mirror_hash_calc(lm_searcher_hash_info_t *hash_info, lm_4tuple_t *tuple)
387 {
388     u8_t  in_str[MAX_SEARCHER_IN_STR] = {0};
389     u8_t* in_str_bits                 = hash_info->searcher_in_str_bits;
390     u8_t* key_bits                    = hash_info->searcher_key_bits;
391     u32_t in_bits                     = 0;
392     u32_t result                      = 0;
393     u16_t i                           = 0;
394     u16_t j                           = 0;
395 
396     /* prepare input string */
397     if (tuple->ip_type == LM_IP_TYPE_V4)
398     {
399         *(u32_t *)(&in_str[0])  = HTON32(tuple->src_ip[0]);
400         *(u32_t *)(&in_str[4])  = HTON32(tuple->dst_ip[0]);
401         *(u16_t *)(&in_str[8])  = tuple->src_port;
402         *(u16_t *)(&in_str[10]) = tuple->dst_port;
403         in_bits = 12 * 8;
404     }
405     else
406     {
407         *(u32_t *)(&in_str[0])   = HTON32(tuple->src_ip[0]);
408         *(u32_t *)(&in_str[4])   = HTON32(tuple->src_ip[1]);
409         *(u32_t *)(&in_str[8])   = HTON32(tuple->src_ip[2]);
410         *(u32_t *)(&in_str[12])  = HTON32(tuple->src_ip[3]);
411 
412         *(u32_t *)(&in_str[16])  = HTON32(tuple->dst_ip[0]);
413         *(u32_t *)(&in_str[20])  = HTON32(tuple->dst_ip[1]);
414         *(u32_t *)(&in_str[24])  = HTON32(tuple->dst_ip[2]);
415         *(u32_t *)(&in_str[28])  = HTON32(tuple->dst_ip[3]);
416 
417         *(u16_t *)(&in_str[32]) = tuple->src_port;
418         *(u16_t *)(&in_str[34]) = tuple->dst_port;
419         in_bits = 36 * 8;
420     }
421 
422     /* prepare searcher_in_str_bits from in_str */
423     for (i = 0; i < in_bits; i++)
424     {
425         /* 0x80 - the leftmost bit. */
426         in_str_bits[i] = ((in_str[i/8]<<(i%8)) & 0x80) ? 1 : 0;
427     }
428 
429     /* calc ToeplitzHash */
430     for (i = 0; i < 32; i++)
431     {
432         u8_t h = 0;
433 
434         for (j = 0; j < in_bits; j++)
435         {
436             h ^= key_bits[i+j] & in_str_bits[j];
437         }
438 
439         result |= (h<<(32-i-1));
440     }
441 
442     return result;
443 }
444 
445 /* assumption: CID lock NOT taken by caller */
lm_searcher_mirror_hash_insert(struct _lm_device_t * pdev,u32_t cid,lm_4tuple_t * tuple)446 lm_status_t lm_searcher_mirror_hash_insert(struct _lm_device_t *pdev, u32_t cid, lm_4tuple_t *tuple)
447 {
448     lm_context_info_t        *context    = NULL;
449     lm_searcher_hash_entry_t *hash_entry = NULL;
450     u32_t                    h_val       = 0;
451     u8_t temp_ipv6, temp_ipv4, temp_depth_ipv4, is_ipv4;
452     lm_status_t              lm_status   = LM_STATUS_SUCCESS;
453     #define SRC_HASH_DEPTH_TH 15 /* that is searcher's default MaxNumHops - 1 */
454 
455     /* take spinlock */
456     MM_ACQUIRE_CID_LOCK(pdev);
457 
458     context = pdev->context_info;
459     is_ipv4 = (tuple->ip_type == LM_IP_TYPE_V4 ? 1 : 0);
460 
461     /* calc hash val */
462     h_val = _lm_searcher_mirror_hash_calc(&context->searcher_hash, tuple);
463 
464     /* take only num_hash_bits LSBs */
465     h_val &= ((1 << context->searcher_hash.num_hash_bits) - 1);
466 
467     /* init num_hash_bits in the searcher: if the h_val is all FFFFs - set it to 0 */
468     if (h_val == ((1 << context->searcher_hash.num_hash_bits) - 1)) {
469         h_val = 0;
470     }
471 
472     /* get the hash entry */
473     hash_entry = &context->searcher_hash.searcher_table[h_val];
474 
475     /* start the alg. to find if there is a place available in that entry */
476     temp_ipv6 = hash_entry->num_ipv6 + (is_ipv4 ? 0 : 1);
477     temp_ipv4 = hash_entry->num_ipv4 + is_ipv4;
478 
479     /* tempDepthIpv4 = max ( depthIpv4(H), roundup(tempIpv4/2) ) */
480     temp_depth_ipv4 = (temp_ipv4 / 2) + (temp_ipv4 % 2);
481     if (temp_depth_ipv4 < hash_entry->depth_ipv4) {
482         temp_depth_ipv4 = hash_entry->depth_ipv4;
483     }
484 
485     if (temp_depth_ipv4 + temp_ipv6 > SRC_HASH_DEPTH_TH) {
486         /* each hash entry has SRC_HASH_DEPTH_TH available places.
487          * each place can contain 1 ipv6 connection or 2 ipv4 connections */
488         DbgBreakMsg("Reached searcher hash limit\n");
489         lm_status = LM_STATUS_FAILURE;
490     } else {
491         hash_entry->num_ipv6 = temp_ipv6;
492         hash_entry->num_ipv4 = temp_ipv4;
493         hash_entry->depth_ipv4 = temp_depth_ipv4;
494 
495         /* for debug, save the max depth reached */
496         if (context->searcher_hash.hash_depth_reached < hash_entry->depth_ipv4 + hash_entry->num_ipv6) {
497             context->searcher_hash.hash_depth_reached = hash_entry->depth_ipv4 + hash_entry->num_ipv6;
498         }
499         context->searcher_hash.num_tuples++;
500 
501         /* remeber the IP type and h_val to know where and how much
502          * to decrease upon CID recycling */
503         DbgBreakIf(context->array[cid].ip_type); /* cid can't be inserted twice */
504         context->array[cid].ip_type = tuple->ip_type;
505         context->array[cid].h_val = h_val;
506     }
507 
508     /* release spinlock */
509     MM_RELEASE_CID_LOCK(pdev);
510 
511     return lm_status;
512 }
513 
514 /* assumption: CID lock NOT taken by caller */
lm_searcher_mirror_hash_remove(struct _lm_device_t * pdev,u32_t cid)515 void lm_searcher_mirror_hash_remove(struct _lm_device_t *pdev, u32_t cid)
516 {
517     lm_context_info_t        *context    = NULL;
518     lm_searcher_hash_entry_t *hash_entry = NULL;
519     u32_t                    h_val       = 0;
520 
521     /* take spinlock */
522     MM_ACQUIRE_CID_LOCK(pdev);
523 
524     context = pdev->context_info;
525 
526     if(!context->array[cid].ip_type) {
527         /* i.e lm_searcher_mirror_hash_insert was not called for this cid */
528         DbgMessage(pdev, WARN,
529                    "not removing CID %d from SRC hash (hash insert was not called for this cid)\n"
530                    ,cid);
531 
532         /* release spinlock */
533         MM_RELEASE_CID_LOCK(pdev);
534 
535         return;
536     }
537 
538     h_val = context->array[cid].h_val;
539     hash_entry = &context->searcher_hash.searcher_table[h_val];
540 
541     if (context->array[cid].ip_type == LM_IP_TYPE_V6) {
542         DbgBreakIf(!hash_entry->num_ipv6);
543         hash_entry->num_ipv6--;
544     }
545     else
546     {
547         DbgBreakIf(!hash_entry->num_ipv4);
548         hash_entry->num_ipv4--;
549         if (hash_entry->num_ipv4 < hash_entry->depth_ipv4)
550         {
551             hash_entry->depth_ipv4 = hash_entry->num_ipv4;
552         }
553     }
554 
555     /* for debug */
556     context->searcher_hash.num_tuples--;
557 
558     /* clear the entry of the context */
559     context->array[cid].ip_type = 0;
560     context->array[cid].h_val = 0;
561 
562     /* release spinlock */
563     MM_RELEASE_CID_LOCK(pdev);
564 }
565 
566 /*  allocate a free context by type
567     returns CID in the out_cid param
568     return LM_STATUS_SUCCESS for available cid
569     LM_STATUS_RESOURCE if no cids are available
570     LM_STATUS_PENDING if there is a pending cfc-delete cid
571     takes the list spinlock */
lm_allocate_cid(struct _lm_device_t * pdev,u32_t type,void * cookie,s32_t * out_cid)572 lm_status_t lm_allocate_cid(struct _lm_device_t *pdev, u32_t type, void * cookie, s32_t * out_cid){
573 
574     lm_context_info_t  *context  = NULL;
575     lm_status_t        lm_status = LM_STATUS_SUCCESS;
576     u32_t              cid       = (u32_t)-1;
577     lm_address_t       phy_addr  = {{0}} ;
578 
579     if ( CHK_NULL(out_cid) ||
580          CHK_NULL(pdev) ||
581          CHK_NULL(pdev->context_info) ||
582          CHK_NULL(pdev->context_info->array) ||
583          CHK_NULL(cookie) ||
584          ERR_IF(type >= ARRSIZE(pdev->context_info->proto_pending)) )
585 
586     {
587         DbgBreakIf(!out_cid) ;
588         DbgBreakIf(!pdev);
589         DbgBreakIf(!pdev->context_info);
590         DbgBreakIf(!pdev->context_info->array);
591         DbgBreakIf(!cookie);
592         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_pending)) ;
593         return LM_STATUS_INVALID_PARAMETER ;
594     }
595 
596     context = pdev->context_info;
597     *out_cid = 0;
598     /* take spinlock */
599     MM_ACQUIRE_CID_LOCK(pdev);
600 
601     // if the free list is empty return error
602     if (context->proto_ffree[type]==0) {
603         if ((pdev->params.cid_allocation_mode == LM_CID_ALLOC_REGULAR) || (context->proto_pending[type] == 0)) {
604             // if the free list is empty AND the pending list is empty return error OR
605             // the free list is empty and we're in the regular allocating mode
606             lm_status = LM_STATUS_RESOURCE;
607         }
608         else
609         {
610             /* pop pendinglist entry and place cookie */
611             /* we only use the cid to connect between the pending connection and this cid, but
612              * the connection can't know of this cid before it is acually freed, for this reason
613              * we return cid = 0, which means, 'pending' */
614             cid = context->proto_pending[type];
615             context->proto_pending[type] = context->array[cid].next;
616             context->array[cid].next = 0;
617             context->array[cid].cid_resc.cookies[type] = cookie;
618             context->array[cid].cid_resc.cid_pending = LM_CID_STATE_PENDING;
619             lm_sp_req_manager_init(pdev, cid);
620             *out_cid = cid;
621 
622             /* make sure the first cid previous is set correctly*/
623             cid = context->proto_pending[type];
624             if (cid) {
625                 context->array[cid].prev = 0;
626             }
627             lm_status = LM_STATUS_PENDING;
628         }
629     }else{
630         /* pop freelist entry and place cookie*/
631         cid = context->proto_ffree[type];
632         context->proto_ffree[type] = context->array[cid].next;
633         context->array[cid].next = 0;
634         context->array[cid].prev = 0;
635         context->array[cid].cid_resc.cookies[type] = cookie;
636         lm_sp_req_manager_init(pdev, cid);
637         *out_cid = cid;
638         lm_status = LM_STATUS_SUCCESS;
639     }
640 
641     MM_RELEASE_CID_LOCK(pdev);
642 
643     if(LM_STATUS_SUCCESS == lm_status)
644     {
645         //If the function allocated a new free CID, (not pending) the function MmMapIoSpace will be called
646         //to map the specific physical cid doorbell space to a virtual address.
647         //In case of a pending CID, the map doorbell space will not be remapped. The pending CID will use
648         //the old mapping cid doorbell space.
649         phy_addr.as_u32.low = (pdev->hw_info.mem_base[BAR_1].as_u32.low) & 0xfffffff0;
650         phy_addr.as_u32.high = pdev->hw_info.mem_base[BAR_1].as_u32.high;
651 
652         LM_INC64(&phy_addr,(cid*LM_DQ_CID_SIZE));
653 
654 #ifdef __SunOS
655 
656         context->array[cid].cid_resc.mapped_cid_bar_addr =
657 #ifdef VF_INVOLVED
658             (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + cid*LM_DQ_CID_SIZE);
659         context->array[cid].cid_resc.reg_handle = pdev->vars.reg_handle[BAR_1];
660 #else /* !VF_INVOLVED */
661             (volatile void *)mm_map_io_space_solaris(pdev,
662                                                      phy_addr,
663                                                      BAR_1,
664                                                      (cid * LM_DQ_CID_SIZE),
665                                                      LM_DQ_CID_SIZE,
666                                                      &context->array[cid].cid_resc.reg_handle);
667 #endif /* VF_INVOLVED */
668 
669 #else /* !__SunOS */
670 
671         context->array[cid].cid_resc.mapped_cid_bar_addr =
672 #ifdef VF_INVOLVED
673             (volatile void *)((u8_t*)pdev->vars.mapped_bar_addr[BAR_1] + cid*LM_DQ_CID_SIZE);
674 #else /* !VF_INVOLVED */
675             (volatile void *)mm_map_io_space(pdev, phy_addr, LM_DQ_CID_SIZE);
676 #endif /* VF_INVOLVED */
677 
678 #endif /* __SunOS */
679 
680         // If the mapping failed we will return LM_STATUS_RESOURCE and return the cid resource.
681         if CHK_NULL(context->array[cid].cid_resc.mapped_cid_bar_addr)
682         {
683             DbgMessage(pdev, FATAL, "lm_allocate_cid: mm_map_io_space failed. address low=%d address high=%d\n", phy_addr.as_u32.low,phy_addr.as_u32.high );
684 
685             /* take spinlock */
686             MM_ACQUIRE_CID_LOCK(pdev);
687             /* return the cid to free list */
688             context->array[cid].next = pdev->context_info->proto_ffree[type];
689             context->proto_ffree[type] = cid;
690             context->array[cid].invalid = LM_CONTEXT_VALID;
691             MM_RELEASE_CID_LOCK(pdev);
692 
693             lm_status = LM_STATUS_RESOURCE;
694             *out_cid =0;
695         }
696     }
697     return lm_status;
698 }
699 
lm_cfc_delete(struct _lm_device_t * pdev,void * param)700 void lm_cfc_delete(struct _lm_device_t *pdev, void *param)
701 {
702     u32_t cid             = (u32_t)(uintptr_t)param;
703     u8_t  flr_in_progress = lm_fl_reset_is_inprogress(pdev);
704 
705     if ( CHK_NULL(pdev) ||
706          ERR_IF(cid > pdev->params.max_func_connections) ||
707          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_WAIT) )
708     {
709         DbgBreakIf(!pdev);
710         DbgBreakIf(cid > pdev->params.max_func_connections);
711 
712         if (!flr_in_progress)
713         {
714             DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_WAIT);
715         }
716         else
717         {
718             DbgMessage(pdev, FATAL, "lm_cfc_delete: invalid %d for cid=%d\n", pdev->context_info->array[cid].invalid,cid);
719 
720             if (pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE)
721             {
722                 DbgBreakIf(1);
723             }
724         }
725     }
726 
727     DbgMessage(pdev, WARN, "lm_cfc_delete: cid=0x%x\n",cid);
728     pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_DELETE;
729 
730     if (lm_fl_reset_is_inprogress(pdev))
731     {
732          lm_recycle_cid(pdev, cid);
733     }
734     else
735     {
736         /* use common bit */
737         lm_command_post(pdev,
738                         cid,
739                         RAMROD_CMD_ID_COMMON_CFC_DEL,
740                         CMD_PRIORITY_NORMAL,
741                         NONE_CONNECTION_TYPE,
742                         0 );
743     }
744     return;
745 }
746 
747 /* free a context
748    takes the list spinlock */
lm_free_cid(struct _lm_device_t * pdev,u32_t type,u32_t cid,u8_t notify_fw)749 void lm_free_cid(struct _lm_device_t *pdev, u32_t type, u32_t cid, u8_t notify_fw){
750     u32_t delay_time  = 0;
751     u32_t curr_cid    = 0;
752     u8_t  recycle_now = 0;
753     u8_t  proto_idx   = 0;
754 
755     if ( CHK_NULL(pdev) ||
756          CHK_NULL(pdev->context_info) ||
757          ERR_IF(type >= ARRSIZE(pdev->context_info->proto_end)) ||
758          ERR_IF(cid > (pdev->context_info->proto_end[type])) ||
759          ERR_IF(cid < (pdev->context_info->proto_start[type])) ||
760          (!lm_fl_reset_is_inprogress(pdev) && (pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID)))
761     {
762         DbgBreakIf(!pdev);
763         DbgBreakIf(!pdev->context_info);
764         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_end));
765         DbgBreakIf(cid > (pdev->context_info->proto_end[type]));
766         DbgBreakIf(cid < (pdev->context_info->proto_start[type]));
767         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
768         return;
769     }
770     MM_ACQUIRE_CID_LOCK(pdev);
771 
772     for (proto_idx = 0; proto_idx < MAX_PROTO; proto_idx++)
773     {
774         DbgBreakIf(pdev->context_info->array[cid].cid_resc.cookies[proto_idx]);
775     }
776 
777     lm_sp_req_manager_shutdown(pdev, cid);
778 
779     if (notify_fw)
780     {
781         /* Vladz: Added in order to optimize CID release in DOS */
782 #if !(defined(DOS) || defined(__LINUX))
783         delay_time = LM_FREE_CID_DELAY_TIME(pdev);
784 #else
785         delay_time = 0;
786 #endif
787 
788         pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_WAIT;
789 
790         recycle_now = FALSE;
791         /* add the cid to proto-pending: it'll be freed soon when cfc-delete is done */
792         curr_cid = pdev->context_info->proto_pending[type];
793         pdev->context_info->array[cid].next = curr_cid;
794         pdev->context_info->array[cid].prev = 0;
795         if (curr_cid != 0)
796         {
797             pdev->context_info->array[curr_cid].prev = cid;
798         }
799         pdev->context_info->proto_pending[type] = cid;
800     }
801     else
802     {
803         pdev->context_info->array[cid].invalid = LM_CONTEXT_INVALID_DELETE;
804         recycle_now = TRUE;
805         /* If we're recylcing now, there's no point in adding it to the pending list */
806     }
807 
808     MM_RELEASE_CID_LOCK(pdev);
809 
810     if (recycle_now) {
811         lm_recycle_cid(pdev, cid);
812     }
813     else
814     {
815         if (type == TOE_CONNECTION_TYPE)
816         {
817             DbgMessage(pdev, WARN, "lm_free_cid: CFC delete: cid=0x%x\n",cid);
818             lm_cfc_delete(pdev,(void *)(uintptr_t)cid);
819         }
820         else
821         {
822             DbgMessage(pdev, WARN, "lm_free_cid: schedule CFC delete: cid=0x%x\n",cid);
823             mm_schedule_task(pdev,delay_time,lm_cfc_delete,
824 		(void *)(uintptr_t)cid);
825         }
826     }
827 
828 }
829 
lm_recycle_cid(struct _lm_device_t * pdev,u32_t cid)830 void lm_recycle_cid(struct _lm_device_t *pdev, u32_t cid){
831 
832     u32_t type = MAX_PROTO+1;
833     u32_t prev_cid, next_cid;
834     u32_t i;
835     u8_t  call_cb = TRUE;
836 
837     if ( CHK_NULL(pdev) ||
838          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE) ||
839          ERR_IF(cid > pdev->params.max_func_connections) )
840     {
841         DbgBreakIf(!pdev);
842         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_INVALID_DELETE);
843         DbgBreakIf(cid > pdev->params.max_func_connections);
844         return;
845     }
846 
847     for (i=0; i < MAX_PROTO; i++ ) {
848         if ((cid >= pdev->context_info->proto_start[i]) && (cid <= pdev->context_info->proto_end[i]))
849         {
850             type = i;
851             break;
852         }
853     }
854     if ERR_IF(type >= ARRSIZE(pdev->context_info->proto_pending))
855     {
856         DbgBreakIf(type >= ARRSIZE(pdev->context_info->proto_pending)) ;
857         return;
858     }
859     /* take spinlock */
860     MM_ACQUIRE_CID_LOCK(pdev);
861 #ifdef _VBD_
862     if ((type == TOE_CONNECTION_TYPE) && (pdev->ofld_info.l4_params.ticks_per_second != 0))
863     {
864         pdev->vars.last_recycling_timestamp = mm_get_current_time(pdev) * 1000 / pdev->ofld_info.l4_params.ticks_per_second; /*time in ms*/
865     }
866 #endif
867     /* If no cookie is waiting on this cid extract from pending and push enrty into the freelist */
868     if (pdev->context_info->array[cid].cid_resc.cid_pending == FALSE) {
869         /* take the cid out of the proto_pending cids if it's there */
870         prev_cid = pdev->context_info->array[cid].prev;
871         next_cid = pdev->context_info->array[cid].next;
872         if (prev_cid) {
873             pdev->context_info->array[prev_cid].next = next_cid;
874         }
875         if (next_cid) {
876             pdev->context_info->array[next_cid].prev = prev_cid;
877         }
878         if (pdev->context_info->proto_pending[type] == cid) {
879             DbgBreakIf(prev_cid != 0);
880             pdev->context_info->proto_pending[type] = next_cid;
881         }
882         pdev->context_info->array[cid].prev = pdev->context_info->array[cid].next = 0;
883         /* add to free list */
884         pdev->context_info->array[cid].next = pdev->context_info->proto_ffree[type];
885         pdev->context_info->array[cid].invalid = LM_CONTEXT_VALID;
886         pdev->context_info->array[cid].cfc_delete_cnt = 0;
887         pdev->context_info->proto_ffree[type] = cid;
888         call_cb = FALSE; /* no one is waiting on this... */
889         //free virtual memory for cids not in use.
890 #ifndef VF_INVOLVED
891         mm_unmap_io_space(pdev,(void *)pdev->context_info->array[cid].cid_resc.mapped_cid_bar_addr, LM_DQ_CID_SIZE);
892 #endif
893     }
894     else
895     {
896         /* No need to extract from pending - it's not there. */
897 
898         /* NirV: we still can't set cid_resc.cid_pending to false, */
899         /* will be possible only in the callback */
900 
901         pdev->context_info->array[cid].invalid = LM_CONTEXT_VALID;
902         call_cb = TRUE;
903     }
904 
905     /* time to clear the active bit (cdu-validation ) we can only do this after cfc-delete has completed, at this point, invalid==LM_CONTEXT_VALID */
906     lm_set_cdu_validation_data(pdev, cid, TRUE /* Invalidate */);
907 
908 
909     /* rlease spinlock */
910     MM_RELEASE_CID_LOCK(pdev);
911 
912     /* call here the cid recycle callback of that
913        protocol type if such cb exists*/
914     if (pdev->cid_recycled_callbacks[type] && call_cb) {
915         pdev->cid_recycled_callbacks[type](pdev, pdev->context_info->array[cid].cid_resc.cookies[type], cid);
916     }
917 
918     return;
919 }
920 
921 /* lookup the protocol cookie for a given CID
922    does not take a lock
923    will DbgBreakIf( if the CID is not allocated. */
lm_cid_cookie(struct _lm_device_t * pdev,u32_t type,u32_t cid)924 void * lm_cid_cookie(struct _lm_device_t *pdev, u32_t type, u32_t cid){
925 
926     if ( CHK_NULL(pdev) ||
927          CHK_NULL(pdev->context_info) ||
928          ERR_IF(type >= MAX_PROTO) ||
929          ERR_IF(cid > (pdev->context_info->proto_end[MAX_PROTO - 1])) ||
930          CHK_NULL(pdev->context_info->array[cid].cid_resc.cookies[type]) ||
931          ERR_IF(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID) )
932     {
933         DbgBreakIf(!pdev);
934         DbgBreakIf(!pdev->context_info);
935         DbgBreakIf(type >= MAX_PROTO);
936         DbgBreakIf(cid > (pdev->context_info->proto_end[MAX_PROTO - 1]));
937         DbgBreakIf(pdev->context_info->array[cid].invalid != LM_CONTEXT_VALID);
938     }
939 
940     if (pdev->context_info->array[cid].cid_resc.cookies[type] == NULL)
941     {
942         return NULL;
943     }
944 
945 
946     /* if the cid is pending, return null */
947     if (pdev->context_info->array[cid].cid_resc.cid_pending != LM_CID_STATE_VALID)
948     {
949         return NULL;
950     }
951 
952     return pdev->context_info->array[cid].cid_resc.cookies[type];
953 }
954 
955 /* lookup the protocol cid_resc for a given CID
956    does not take a lock
957    will DbgBreakIf( if the CID is not allocated */
lm_cid_resc(struct _lm_device_t * pdev,u32_t cid)958 lm_cid_resc_t * lm_cid_resc(struct _lm_device_t *pdev, u32_t cid){
959 
960     if ( CHK_NULL(pdev) ||
961          CHK_NULL(pdev->context_info) ||
962          ERR_IF(cid > (pdev->context_info->proto_end[MAX_PROTO - 1])) )
963     {
964         DbgBreakIf(!pdev);
965         DbgBreakIf(!pdev->context_info);
966         DbgBreakIf(cid > (pdev->context_info->proto_end[MAX_PROTO - 1]));
967     }
968 
969     return &pdev->context_info->array[cid].cid_resc;
970 }
971 
lm_map_cid_to_proto(struct _lm_device_t * pdev,u32_t cid)972 u8_t lm_map_cid_to_proto(struct _lm_device_t * pdev, u32_t cid)
973 {
974     u8_t type = MAX_PROTO+1;
975     u8_t i;
976 
977     if (!pdev || cid > pdev->params.max_func_connections) {
978         return type;
979     }
980 
981     for (i=0; i < MAX_PROTO; i++ ) {
982         if ((cid >= pdev->context_info->proto_start[i]) && (cid <= pdev->context_info->proto_end[i]))  {
983             type = i;
984             break;
985         }
986     }
987     return type;
988 }
989 
lm_init_connection_context(struct _lm_device_t * pdev,u32_t const sw_cid,u8_t sb_id)990 void lm_init_connection_context(struct _lm_device_t *pdev, u32_t const sw_cid, u8_t sb_id)
991 {
992     struct eth_context * context      = NULL;
993 
994     if ( CHK_NULL(pdev) ||
995          ERR_IF(sw_cid < PFDEV(pdev)->context_info->proto_start[ETH_CONNECTION_TYPE]) ||
996          ERR_IF(sw_cid > PFDEV(pdev)->context_info->proto_end[ETH_CONNECTION_TYPE]) )
997     {
998         DbgBreakIf(!pdev);
999         DbgBreakIf(sw_cid < PFDEV(pdev)->context_info->proto_start[ETH_CONNECTION_TYPE]); /* first legal NIC CID */
1000         DbgBreakIf(sw_cid > PFDEV(pdev)->context_info->proto_end[ETH_CONNECTION_TYPE]);   /* last legal NIC CID */
1001     }
1002 
1003     context = lm_get_context(PFDEV(pdev), VF_TO_PF_CID(pdev,sw_cid));
1004 
1005     mm_mem_zero( context, sizeof(struct eth_context) ) ;
1006 
1007     /* calculate the cdu-validation value. */
1008     lm_set_cdu_validation_data(pdev, VF_TO_PF_CID(pdev,sw_cid), FALSE /* don't invalidate */);
1009 
1010 }
1011 
1012 lm_status_t
lm_set_cid_resc(IN struct _lm_device_t * pdev,IN u32_t type,IN void * cookie,IN u32_t cid)1013 lm_set_cid_resc(
1014     IN struct _lm_device_t *pdev,
1015     IN u32_t type,
1016     IN void *cookie,
1017     IN u32_t cid)
1018 {
1019     lm_status_t     lm_status  = LM_STATUS_SUCCESS;
1020     lm_cid_resc_t   *cid_resc  = NULL;
1021 
1022     if CHK_NULL(pdev)
1023     {
1024         return LM_STATUS_INVALID_PARAMETER;
1025     }
1026 
1027     /* take spinlock */
1028     MM_ACQUIRE_CID_LOCK(pdev);
1029 
1030     cid_resc = lm_cid_resc(pdev, cid);
1031 
1032     if CHK_NULL(cid_resc)
1033     {
1034         MM_RELEASE_CID_LOCK(pdev);
1035         return LM_STATUS_INVALID_PARAMETER;
1036     }
1037 
1038     cid_resc->cookies[type] = cookie;
1039 
1040     /* rlease spinlock */
1041     MM_RELEASE_CID_LOCK(pdev);
1042 
1043     return lm_status;
1044 }
1045 
1046 lm_status_t
lm_free_cid_resc(IN struct _lm_device_t * pdev,IN u32_t type,IN u32_t cid,IN u8_t notify_fw)1047 lm_free_cid_resc(
1048     IN    struct _lm_device_t *pdev,
1049     IN    u32_t type,
1050     IN    u32_t cid,
1051     IN    u8_t notify_fw)
1052 {
1053     lm_cid_resc_t   *cid_resc = NULL;
1054     u8_t            proto_idx = 0;
1055 
1056 
1057     if (CHK_NULL(pdev) || (cid == 0))
1058     {
1059         return LM_STATUS_INVALID_PARAMETER;
1060     }
1061 
1062     /* take spinlock */
1063     MM_ACQUIRE_CID_LOCK(pdev);
1064 
1065     cid_resc = lm_cid_resc(pdev, cid);
1066 
1067     if CHK_NULL(cid_resc)
1068     {
1069         MM_RELEASE_CID_LOCK(pdev);
1070         return LM_STATUS_INVALID_PARAMETER;
1071     }
1072 
1073     cid_resc->cookies[type] = NULL;
1074 
1075     while ((proto_idx < MAX_PROTO) && (cid_resc->cookies[proto_idx] == NULL))
1076     {
1077         proto_idx++;
1078     }
1079     /* rlease spinlock */
1080     MM_RELEASE_CID_LOCK(pdev);
1081 
1082     if (proto_idx == MAX_PROTO)
1083     {
1084         /* We'll call lm_map_cid_to_proto() to compute the appropriate type that was associated with that CID,
1085          * this is done to avoid assert upon race scenarios in which the last cookie resource that gets freed is not from the type of the CID */
1086         lm_free_cid(pdev, lm_map_cid_to_proto(pdev, cid), cid, notify_fw);
1087     }
1088 
1089     return LM_STATUS_SUCCESS;
1090 }
1091 
1092 
1093 
1094 lm_sp_req_manager_t *
lm_cid_sp_req_mgr(IN struct _lm_device_t * pdev,IN u32_t cid)1095 lm_cid_sp_req_mgr(
1096     IN struct _lm_device_t *pdev,
1097     IN u32_t cid
1098     )
1099 {
1100     lm_cid_resc_t   *cid_resc   = NULL;
1101 
1102     if CHK_NULL(pdev)
1103     {
1104         return NULL;
1105     }
1106 
1107     cid_resc = lm_cid_resc(pdev, cid);
1108 
1109     if CHK_NULL(cid_resc)
1110     {
1111         return NULL;
1112     }
1113 
1114     return &cid_resc->sp_req_mgr;
1115 }
1116 
1117 
1118 
1119 lm_cid_state_enum
lm_cid_state(IN struct _lm_device_t * pdev,IN u32_t cid)1120 lm_cid_state(
1121     IN struct _lm_device_t *pdev,
1122     IN u32_t cid
1123     )
1124 {
1125     lm_cid_resc_t   *cid_resc   = NULL;
1126 
1127     if CHK_NULL(pdev)
1128     {
1129         return LM_CID_STATE_ERROR;
1130     }
1131 
1132     cid_resc = lm_cid_resc(pdev, cid);
1133 
1134     if CHK_NULL(cid_resc)
1135     {
1136         return LM_CID_STATE_ERROR;
1137     }
1138 
1139     return (lm_cid_state_enum)cid_resc->cid_pending;
1140 }
1141 
1142 
1143 
1144 lm_status_t
lm_set_cid_state(IN struct _lm_device_t * pdev,IN u32_t cid,IN lm_cid_state_enum state)1145 lm_set_cid_state(
1146     IN struct _lm_device_t *pdev,
1147     IN u32_t cid,
1148     IN lm_cid_state_enum state
1149     )
1150 {
1151     lm_cid_resc_t   *cid_resc   = NULL;
1152 
1153     if CHK_NULL(pdev)
1154     {
1155         return LM_STATUS_INVALID_PARAMETER;
1156     }
1157 
1158     /* take spinlock */
1159     MM_ACQUIRE_CID_LOCK(pdev);
1160 
1161     cid_resc = lm_cid_resc(pdev, cid);
1162 
1163     if CHK_NULL(cid_resc)
1164     {
1165         MM_RELEASE_CID_LOCK(pdev);
1166         return LM_STATUS_INVALID_PARAMETER;
1167     }
1168 
1169     cid_resc->cid_pending = state;
1170 
1171     /* rlease spinlock */
1172     MM_RELEASE_CID_LOCK(pdev);
1173 
1174     return LM_STATUS_SUCCESS;
1175 }
1176 
1177 /**
1178  * sets the CDU validation data to be valid for a given cid
1179  *
1180  * @param pdev - the physical device handle
1181  * @param cid - the context of this cid will be initialized with the cdu validataion data
1182  *
1183  * @return lm_status_t
1184  */
lm_set_cdu_validation_data(struct _lm_device_t * pdev,s32_t cid,u8_t invalidate)1185 lm_status_t lm_set_cdu_validation_data(struct _lm_device_t *pdev, s32_t cid, u8_t invalidate)
1186 {
1187     lm_status_t lm_status = LM_STATUS_SUCCESS;
1188     void        *context        = NULL;
1189     u8_t        *cdu_reserved   = NULL; /* Pointer to the actual location of cdu_reserved field according to protocol */
1190     u8_t        *cdu_usage      = NULL; /* Pointer to the actual location of cdu_usage field according to protocol */
1191     u8_t        proto_type      = 0;
1192 
1193     context = lm_get_context(PFDEV(pdev), cid);
1194 
1195     if (!context) {
1196         return LM_STATUS_FAILURE;
1197     }
1198 
1199     proto_type = lm_map_cid_to_proto(PFDEV(pdev), cid);
1200 
1201     switch (proto_type) {
1202     case TOE_CONNECTION_TYPE:
1203         cdu_reserved = &((struct toe_context *)context)->xstorm_ag_context.cdu_reserved;
1204         cdu_usage = &(((struct toe_context *)context)->ustorm_ag_context.cdu_usage);
1205         break;
1206     case ETH_CONNECTION_TYPE:
1207         cdu_reserved = &(((struct eth_context *)context)->xstorm_ag_context.cdu_reserved);
1208         cdu_usage =  &(((struct eth_context *)context)->ustorm_ag_context.cdu_usage);
1209         break;
1210     case ISCSI_CONNECTION_TYPE:
1211         cdu_reserved = &(((struct iscsi_context *)context)->xstorm_ag_context.cdu_reserved);
1212         cdu_usage = &(((struct iscsi_context *)context)->ustorm_ag_context.cdu_usage);
1213         break;
1214     case FCOE_CONNECTION_TYPE:
1215         cdu_reserved = &(((struct fcoe_context *)context)->xstorm_ag_context.cdu_reserved);
1216         cdu_usage = &(((struct fcoe_context *)context)->ustorm_ag_context.cdu_usage);
1217         break;
1218     default:
1219         lm_status = LM_STATUS_FAILURE;
1220         break;
1221     }
1222 
1223     if (cdu_reserved && cdu_usage) {
1224         if (invalidate) {
1225             *cdu_reserved = CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(*cdu_reserved);
1226             *cdu_usage    = CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(*cdu_usage);
1227         } else {
1228             *cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(pdev, cid), CDU_REGION_NUMBER_XCM_AG, proto_type);
1229             *cdu_usage    = CDU_RSRVD_VALUE_TYPE_A(HW_CID(pdev, cid), CDU_REGION_NUMBER_UCM_AG, proto_type);
1230         }
1231     }
1232 
1233     return lm_status;
1234 }
1235 
1236 
lm_get_context_size(struct _lm_device_t * pdev,s32_t * context_size)1237 lm_status_t lm_get_context_size(struct _lm_device_t *pdev, s32_t * context_size)
1238 {
1239     *context_size = LM_CONTEXT_SIZE;
1240     return LM_STATUS_SUCCESS;
1241 }
1242 
lm_set_con_state(struct _lm_device_t * pdev,u32_t cid,u32_t state)1243 lm_status_t lm_set_con_state(struct _lm_device_t *pdev, u32_t cid, u32_t state)
1244 {
1245     lm_cid_resc_t * cid_resc = lm_cid_resc(pdev, cid);
1246 
1247     if CHK_NULL(cid_resc)
1248     {
1249         return LM_STATUS_INVALID_PARAMETER;
1250     }
1251 
1252     cid_resc->con_state = state;
1253 
1254     return LM_STATUS_SUCCESS;
1255 }
1256 
lm_get_con_state(struct _lm_device_t * pdev,u32_t cid)1257 u32_t lm_get_con_state(struct _lm_device_t *pdev, u32_t cid)
1258 {
1259     const lm_cid_resc_t * cid_resc = lm_cid_resc(pdev, cid);
1260 
1261     if CHK_NULL(cid_resc)
1262     {
1263         return LM_CON_STATE_CLOSE;
1264     }
1265 
1266     return cid_resc->con_state;
1267 }
1268 
1269 
1270