1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_osdep.h"
36 #include "i40e_register.h"
37 #include "i40e_type.h"
38 #include "i40e_hmc.h"
39 #include "i40e_lan_hmc.h"
40 #include "i40e_prototype.h"
41 
42 /* lan specific interface functions */
43 
44 /**
45  * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
46  * @offset: base address offset needing alignment
47  *
48  * Aligns the layer 2 function private memory so it's 512-byte aligned.
49  **/
i40e_align_l2obj_base(u64 offset)50 static u64 i40e_align_l2obj_base(u64 offset)
51 {
52 	u64 aligned_offset = offset;
53 
54 	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
55 		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
56 				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
57 
58 	return aligned_offset;
59 }
60 
61 /**
62  * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
63  * @txq_num: number of Tx queues needing backing context
64  * @rxq_num: number of Rx queues needing backing context
65  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
66  * @fcoe_filt_num: number of FCoE filters needing backing context
67  *
68  * Calculates the maximum amount of memory for the function required, based
69  * on the number of resources it must provide context for.
70  **/
i40e_calculate_l2fpm_size(u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)71 u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
72 			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
73 {
74 	u64 fpm_size = 0;
75 
76 	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
77 	fpm_size = i40e_align_l2obj_base(fpm_size);
78 
79 	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
80 	fpm_size = i40e_align_l2obj_base(fpm_size);
81 
82 	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
83 	fpm_size = i40e_align_l2obj_base(fpm_size);
84 
85 	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
86 	fpm_size = i40e_align_l2obj_base(fpm_size);
87 
88 	return fpm_size;
89 }
90 
91 /**
92  * i40e_init_lan_hmc - initialize i40e_hmc_info struct
93  * @hw: pointer to the HW structure
94  * @txq_num: number of Tx queues needing backing context
95  * @rxq_num: number of Rx queues needing backing context
96  * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
97  * @fcoe_filt_num: number of FCoE filters needing backing context
98  *
99  * This function will be called once per physical function initialization.
100  * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
101  * the driver's provided input, as well as information from the HMC itself
102  * loaded from NVRAM.
103  *
104  * Assumptions:
105  *   - HMC Resource Profile has been selected before calling this function.
106  **/
i40e_init_lan_hmc(struct i40e_hw * hw,u32 txq_num,u32 rxq_num,u32 fcoe_cntx_num,u32 fcoe_filt_num)107 enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
108 					u32 rxq_num, u32 fcoe_cntx_num,
109 					u32 fcoe_filt_num)
110 {
111 	struct i40e_hmc_obj_info *obj, *full_obj;
112 	enum i40e_status_code ret_code = I40E_SUCCESS;
113 	u64 l2fpm_size;
114 	u32 size_exp;
115 
116 	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
117 	hw->hmc.hmc_fn_id = hw->pf_id;
118 
119 	/* allocate memory for hmc_obj */
120 	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
121 			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
122 	if (ret_code)
123 		goto init_lan_hmc_out;
124 	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
125 			  hw->hmc.hmc_obj_virt_mem.va;
126 
127 	/* The full object will be used to create the LAN HMC SD */
128 	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
129 	full_obj->max_cnt = 0;
130 	full_obj->cnt = 0;
131 	full_obj->base = 0;
132 	full_obj->size = 0;
133 
134 	/* Tx queue context information */
135 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
136 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
137 	obj->cnt = txq_num;
138 	obj->base = 0;
139 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
140 	obj->size = BIT_ULL(size_exp);
141 
142 	/* validate values requested by driver don't exceed HMC capacity */
143 	if (txq_num > obj->max_cnt) {
144 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
145 		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
146 			  txq_num, obj->max_cnt, ret_code);
147 		goto free_hmc_out;
148 	}
149 
150 	/* aggregate values into the full LAN object for later */
151 	full_obj->max_cnt += obj->max_cnt;
152 	full_obj->cnt += obj->cnt;
153 
154 	/* Rx queue context information */
155 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
156 	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
157 	obj->cnt = rxq_num;
158 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
159 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
160 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
161 	obj->base = i40e_align_l2obj_base(obj->base);
162 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
163 	obj->size = BIT_ULL(size_exp);
164 
165 	/* validate values requested by driver don't exceed HMC capacity */
166 	if (rxq_num > obj->max_cnt) {
167 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
168 		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
169 			  rxq_num, obj->max_cnt, ret_code);
170 		goto free_hmc_out;
171 	}
172 
173 	/* aggregate values into the full LAN object for later */
174 	full_obj->max_cnt += obj->max_cnt;
175 	full_obj->cnt += obj->cnt;
176 
177 	/* FCoE context information */
178 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
179 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
180 	obj->cnt = fcoe_cntx_num;
181 	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
182 		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
183 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
184 	obj->base = i40e_align_l2obj_base(obj->base);
185 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
186 	obj->size = BIT_ULL(size_exp);
187 
188 	/* validate values requested by driver don't exceed HMC capacity */
189 	if (fcoe_cntx_num > obj->max_cnt) {
190 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
191 		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
192 			  fcoe_cntx_num, obj->max_cnt, ret_code);
193 		goto free_hmc_out;
194 	}
195 
196 	/* aggregate values into the full LAN object for later */
197 	full_obj->max_cnt += obj->max_cnt;
198 	full_obj->cnt += obj->cnt;
199 
200 	/* FCoE filter information */
201 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
202 	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
203 	obj->cnt = fcoe_filt_num;
204 	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
205 		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
206 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
207 	obj->base = i40e_align_l2obj_base(obj->base);
208 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
209 	obj->size = BIT_ULL(size_exp);
210 
211 	/* validate values requested by driver don't exceed HMC capacity */
212 	if (fcoe_filt_num > obj->max_cnt) {
213 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
214 		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
215 			  fcoe_filt_num, obj->max_cnt, ret_code);
216 		goto free_hmc_out;
217 	}
218 
219 	/* aggregate values into the full LAN object for later */
220 	full_obj->max_cnt += obj->max_cnt;
221 	full_obj->cnt += obj->cnt;
222 
223 	hw->hmc.first_sd_index = 0;
224 	hw->hmc.sd_table.ref_cnt = 0;
225 	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
226 					       fcoe_filt_num);
227 	if (NULL == hw->hmc.sd_table.sd_entry) {
228 		hw->hmc.sd_table.sd_cnt = (u32)
229 				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
230 				   I40E_HMC_DIRECT_BP_SIZE;
231 
232 		/* allocate the sd_entry members in the sd_table */
233 		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
234 					  (sizeof(struct i40e_hmc_sd_entry) *
235 					  hw->hmc.sd_table.sd_cnt));
236 		if (ret_code)
237 			goto free_hmc_out;
238 		hw->hmc.sd_table.sd_entry =
239 			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
240 	}
241 	/* store in the LAN full object for later */
242 	full_obj->size = l2fpm_size;
243 
244 init_lan_hmc_out:
245 	return ret_code;
246 free_hmc_out:
247 	if (hw->hmc.hmc_obj_virt_mem.va)
248 		i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
249 
250 	return ret_code;
251 }
252 
253 /**
254  * i40e_remove_pd_page - Remove a page from the page descriptor table
255  * @hw: pointer to the HW structure
256  * @hmc_info: pointer to the HMC configuration information structure
257  * @idx: segment descriptor index to find the relevant page descriptor
258  *
259  * This function:
260  *	1. Marks the entry in pd table (for paged address mode) invalid
261  *	2. write to register PMPDINV to invalidate the backing page in FV cache
262  *	3. Decrement the ref count for  pd_entry
263  * assumptions:
264  *	1. caller can deallocate the memory used by pd after this function
265  *	   returns.
266  **/
i40e_remove_pd_page(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)267 static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
268 						 struct i40e_hmc_info *hmc_info,
269 						 u32 idx)
270 {
271 	enum i40e_status_code ret_code = I40E_SUCCESS;
272 
273 	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
274 		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
275 
276 	return ret_code;
277 }
278 
279 /**
280  * i40e_remove_sd_bp - remove a backing page from a segment descriptor
281  * @hw: pointer to our HW structure
282  * @hmc_info: pointer to the HMC configuration information structure
283  * @idx: the page index
284  *
285  * This function:
286  *	1. Marks the entry in sd table (for direct address mode) invalid
287  *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
288  *	   to 0) and PMSDDATAHIGH to invalidate the sd page
289  *	3. Decrement the ref count for the sd_entry
290  * assumptions:
291  *	1. caller can deallocate the memory used by backing storage after this
292  *	   function returns.
293  **/
i40e_remove_sd_bp(struct i40e_hw * hw,struct i40e_hmc_info * hmc_info,u32 idx)294 static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
295 					       struct i40e_hmc_info *hmc_info,
296 					       u32 idx)
297 {
298 	enum i40e_status_code ret_code = I40E_SUCCESS;
299 
300 	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
301 		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
302 
303 	return ret_code;
304 }
305 
306 /**
307  * i40e_create_lan_hmc_object - allocate backing store for hmc objects
308  * @hw: pointer to the HW structure
309  * @info: pointer to i40e_hmc_create_obj_info struct
310  *
311  * This will allocate memory for PDs and backing pages and populate
312  * the sd and pd entries.
313  **/
i40e_create_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_create_obj_info * info)314 enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
315 				struct i40e_hmc_lan_create_obj_info *info)
316 {
317 	enum i40e_status_code ret_code = I40E_SUCCESS;
318 	struct i40e_hmc_sd_entry *sd_entry;
319 	u32 pd_idx1 = 0, pd_lmt1 = 0;
320 	u32 pd_idx = 0, pd_lmt = 0;
321 	bool pd_error = FALSE;
322 	u32 sd_idx, sd_lmt;
323 	u64 sd_size;
324 	u32 i, j;
325 
326 	if (NULL == info) {
327 		ret_code = I40E_ERR_BAD_PTR;
328 		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
329 		goto exit;
330 	}
331 	if (NULL == info->hmc_info) {
332 		ret_code = I40E_ERR_BAD_PTR;
333 		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
334 		goto exit;
335 	}
336 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
337 		ret_code = I40E_ERR_BAD_PTR;
338 		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
339 		goto exit;
340 	}
341 
342 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
343 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
344 		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
345 			  ret_code);
346 		goto exit;
347 	}
348 	if ((info->start_idx + info->count) >
349 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
350 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
351 		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
352 			  ret_code);
353 		goto exit;
354 	}
355 
356 	/* find sd index and limit */
357 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
358 				 info->start_idx, info->count,
359 				 &sd_idx, &sd_lmt);
360 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
361 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
362 			ret_code = I40E_ERR_INVALID_SD_INDEX;
363 			goto exit;
364 	}
365 	/* find pd index */
366 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
367 				 info->start_idx, info->count, &pd_idx,
368 				 &pd_lmt);
369 
370 	/* This is to cover for cases where you may not want to have an SD with
371 	 * the full 2M memory but something smaller. By not filling out any
372 	 * size, the function will default the SD size to be 2M.
373 	 */
374 	if (info->direct_mode_sz == 0)
375 		sd_size = I40E_HMC_DIRECT_BP_SIZE;
376 	else
377 		sd_size = info->direct_mode_sz;
378 
379 	/* check if all the sds are valid. If not, allocate a page and
380 	 * initialize it.
381 	 */
382 	for (j = sd_idx; j < sd_lmt; j++) {
383 		/* update the sd table entry */
384 		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
385 						   info->entry_type,
386 						   sd_size);
387 		if (I40E_SUCCESS != ret_code)
388 			goto exit_sd_error;
389 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
390 		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
391 			/* check if all the pds in this sd are valid. If not,
392 			 * allocate a page and initialize it.
393 			 */
394 
395 			/* find pd_idx and pd_lmt in this sd */
396 			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
397 			pd_lmt1 = min(pd_lmt,
398 				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
399 			for (i = pd_idx1; i < pd_lmt1; i++) {
400 				/* update the pd table entry */
401 				ret_code = i40e_add_pd_table_entry(hw,
402 								info->hmc_info,
403 								i, NULL);
404 				if (I40E_SUCCESS != ret_code) {
405 					pd_error = TRUE;
406 					break;
407 				}
408 			}
409 			if (pd_error) {
410 				/* remove the backing pages from pd_idx1 to i */
411 				while (i && (i > pd_idx1)) {
412 					i40e_remove_pd_bp(hw, info->hmc_info,
413 							  (i - 1));
414 					i--;
415 				}
416 			}
417 		}
418 		if (!sd_entry->valid) {
419 			sd_entry->valid = TRUE;
420 			switch (sd_entry->entry_type) {
421 			case I40E_SD_TYPE_PAGED:
422 				I40E_SET_PF_SD_ENTRY(hw,
423 					sd_entry->u.pd_table.pd_page_addr.pa,
424 					j, sd_entry->entry_type);
425 				break;
426 			case I40E_SD_TYPE_DIRECT:
427 				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
428 						     j, sd_entry->entry_type);
429 				break;
430 			default:
431 				ret_code = I40E_ERR_INVALID_SD_TYPE;
432 				goto exit;
433 			}
434 		}
435 	}
436 	goto exit;
437 
438 exit_sd_error:
439 	/* cleanup for sd entries from j to sd_idx */
440 	while (j && (j > sd_idx)) {
441 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
442 		switch (sd_entry->entry_type) {
443 		case I40E_SD_TYPE_PAGED:
444 			pd_idx1 = max(pd_idx,
445 				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
446 			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
447 			for (i = pd_idx1; i < pd_lmt1; i++)
448 				i40e_remove_pd_bp(hw, info->hmc_info, i);
449 			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
450 			break;
451 		case I40E_SD_TYPE_DIRECT:
452 			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
453 			break;
454 		default:
455 			ret_code = I40E_ERR_INVALID_SD_TYPE;
456 			break;
457 		}
458 		j--;
459 	}
460 exit:
461 	return ret_code;
462 }
463 
464 /**
465  * i40e_configure_lan_hmc - prepare the HMC backing store
466  * @hw: pointer to the hw structure
467  * @model: the model for the layout of the SD/PD tables
468  *
469  * - This function will be called once per physical function initialization.
470  * - This function will be called after i40e_init_lan_hmc() and before
471  *   any LAN/FCoE HMC objects can be created.
472  **/
i40e_configure_lan_hmc(struct i40e_hw * hw,enum i40e_hmc_model model)473 enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
474 					     enum i40e_hmc_model model)
475 {
476 	struct i40e_hmc_lan_create_obj_info info;
477 	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
478 	struct i40e_hmc_obj_info *obj;
479 	enum i40e_status_code ret_code = I40E_SUCCESS;
480 
481 	/* Initialize part of the create object info struct */
482 	info.hmc_info = &hw->hmc;
483 	info.rsrc_type = I40E_HMC_LAN_FULL;
484 	info.start_idx = 0;
485 	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
486 
487 	/* Build the SD entry for the LAN objects */
488 	switch (model) {
489 	case I40E_HMC_MODEL_DIRECT_PREFERRED:
490 	case I40E_HMC_MODEL_DIRECT_ONLY:
491 		info.entry_type = I40E_SD_TYPE_DIRECT;
492 		/* Make one big object, a single SD */
493 		info.count = 1;
494 		ret_code = i40e_create_lan_hmc_object(hw, &info);
495 		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
496 			goto try_type_paged;
497 		else if (ret_code != I40E_SUCCESS)
498 			goto configure_lan_hmc_out;
499 		/* else clause falls through the break */
500 		break;
501 	case I40E_HMC_MODEL_PAGED_ONLY:
502 try_type_paged:
503 		info.entry_type = I40E_SD_TYPE_PAGED;
504 		/* Make one big object in the PD table */
505 		info.count = 1;
506 		ret_code = i40e_create_lan_hmc_object(hw, &info);
507 		if (ret_code != I40E_SUCCESS)
508 			goto configure_lan_hmc_out;
509 		break;
510 	default:
511 		/* unsupported type */
512 		ret_code = I40E_ERR_INVALID_SD_TYPE;
513 		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
514 			  ret_code);
515 		goto configure_lan_hmc_out;
516 	}
517 
518 	/* Configure and program the FPM registers so objects can be created */
519 
520 	/* Tx contexts */
521 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
522 	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
523 	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
524 	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
525 
526 	/* Rx contexts */
527 	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
528 	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
529 	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
530 	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
531 
532 	/* FCoE contexts */
533 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
534 	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
535 	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
536 	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
537 
538 	/* FCoE filters */
539 	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
540 	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
541 	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
542 	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
543 
544 configure_lan_hmc_out:
545 	return ret_code;
546 }
547 
548 /**
549  * i40e_delete_hmc_object - remove hmc objects
550  * @hw: pointer to the HW structure
551  * @info: pointer to i40e_hmc_delete_obj_info struct
552  *
553  * This will de-populate the SDs and PDs.  It frees
554  * the memory for PDS and backing storage.  After this function is returned,
555  * caller should deallocate memory allocated previously for
556  * book-keeping information about PDs and backing storage.
557  **/
i40e_delete_lan_hmc_object(struct i40e_hw * hw,struct i40e_hmc_lan_delete_obj_info * info)558 enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
559 				struct i40e_hmc_lan_delete_obj_info *info)
560 {
561 	enum i40e_status_code ret_code = I40E_SUCCESS;
562 	struct i40e_hmc_pd_table *pd_table;
563 	u32 pd_idx, pd_lmt, rel_pd_idx;
564 	u32 sd_idx, sd_lmt;
565 	u32 i, j;
566 
567 	if (NULL == info) {
568 		ret_code = I40E_ERR_BAD_PTR;
569 		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
570 		goto exit;
571 	}
572 	if (NULL == info->hmc_info) {
573 		ret_code = I40E_ERR_BAD_PTR;
574 		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
575 		goto exit;
576 	}
577 	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
578 		ret_code = I40E_ERR_BAD_PTR;
579 		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
580 		goto exit;
581 	}
582 
583 	if (NULL == info->hmc_info->sd_table.sd_entry) {
584 		ret_code = I40E_ERR_BAD_PTR;
585 		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
586 		goto exit;
587 	}
588 
589 	if (NULL == info->hmc_info->hmc_obj) {
590 		ret_code = I40E_ERR_BAD_PTR;
591 		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
592 		goto exit;
593 	}
594 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
595 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
596 		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
597 			  ret_code);
598 		goto exit;
599 	}
600 
601 	if ((info->start_idx + info->count) >
602 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
603 		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
604 		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
605 			  ret_code);
606 		goto exit;
607 	}
608 
609 	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
610 				 info->start_idx, info->count, &pd_idx,
611 				 &pd_lmt);
612 
613 	for (j = pd_idx; j < pd_lmt; j++) {
614 		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
615 
616 		if (I40E_SD_TYPE_PAGED !=
617 		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
618 			continue;
619 
620 		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
621 
622 		pd_table =
623 			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
624 		if (pd_table->pd_entry[rel_pd_idx].valid) {
625 			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
626 			if (I40E_SUCCESS != ret_code)
627 				goto exit;
628 		}
629 	}
630 
631 	/* find sd index and limit */
632 	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
633 				 info->start_idx, info->count,
634 				 &sd_idx, &sd_lmt);
635 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
636 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
637 		ret_code = I40E_ERR_INVALID_SD_INDEX;
638 		goto exit;
639 	}
640 
641 	for (i = sd_idx; i < sd_lmt; i++) {
642 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
643 			continue;
644 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
645 		case I40E_SD_TYPE_DIRECT:
646 			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
647 			if (I40E_SUCCESS != ret_code)
648 				goto exit;
649 			break;
650 		case I40E_SD_TYPE_PAGED:
651 			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
652 			if (I40E_SUCCESS != ret_code)
653 				goto exit;
654 			break;
655 		default:
656 			break;
657 		}
658 	}
659 exit:
660 	return ret_code;
661 }
662 
663 /**
664  * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
665  * @hw: pointer to the hw structure
666  *
667  * This must be called by drivers as they are shutting down and being
668  * removed from the OS.
669  **/
i40e_shutdown_lan_hmc(struct i40e_hw * hw)670 enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
671 {
672 	struct i40e_hmc_lan_delete_obj_info info;
673 	enum i40e_status_code ret_code;
674 
675 	info.hmc_info = &hw->hmc;
676 	info.rsrc_type = I40E_HMC_LAN_FULL;
677 	info.start_idx = 0;
678 	info.count = 1;
679 
680 	/* delete the object */
681 	ret_code = i40e_delete_lan_hmc_object(hw, &info);
682 
683 	/* free the SD table entry for LAN */
684 	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
685 	hw->hmc.sd_table.sd_cnt = 0;
686 	hw->hmc.sd_table.sd_entry = NULL;
687 
688 	/* free memory used for hmc_obj */
689 	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
690 	hw->hmc.hmc_obj = NULL;
691 
692 	return ret_code;
693 }
694 
695 #define I40E_HMC_STORE(_struct, _ele)		\
696 	offsetof(struct _struct, _ele),		\
697 	FIELD_SIZEOF(struct _struct, _ele)
698 
699 struct i40e_context_ele {
700 	u16 offset;
701 	u16 size_of;
702 	u16 width;
703 	u16 lsb;
704 };
705 
706 /* LAN Tx Queue Context */
707 static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
708 					     /* Field      Width    LSB */
709 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
710 	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
711 	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
712 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
713 	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
714 	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
715 	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
716 	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
717 /* line 1 */
718 	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
719 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
720 	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
721 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
722 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
723 	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
724 	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
725 /* line 7 */
726 	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
727 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
728 	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
729 	{ 0 }
730 };
731 
732 /* LAN Rx Queue Context */
733 static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
734 					 /* Field      Width    LSB */
735 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
736 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
737 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
738 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
739 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
740 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
741 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
742 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
743 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
744 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
745 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
746 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
747 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
748 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
749 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
750 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
751 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
752 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
753 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
754 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
755 	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
756 	{ 0 }
757 };
758 
759 /**
760  * i40e_write_byte - replace HMC context byte
761  * @hmc_bits: pointer to the HMC memory
762  * @ce_info: a description of the struct to be read from
763  * @src: the struct to be read from
764  **/
i40e_write_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)765 static void i40e_write_byte(u8 *hmc_bits,
766 			    struct i40e_context_ele *ce_info,
767 			    u8 *src)
768 {
769 	u8 src_byte, dest_byte, mask;
770 	u8 *from, *dest;
771 	u16 shift_width;
772 
773 	/* copy from the next struct field */
774 	from = src + ce_info->offset;
775 
776 	/* prepare the bits and mask */
777 	shift_width = ce_info->lsb % 8;
778 	mask = (u8)(BIT(ce_info->width) - 1);
779 
780 	src_byte = *from;
781 	src_byte &= mask;
782 
783 	/* shift to correct alignment */
784 	mask <<= shift_width;
785 	src_byte <<= shift_width;
786 
787 	/* get the current bits from the target bit string */
788 	dest = hmc_bits + (ce_info->lsb / 8);
789 
790 	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
791 
792 	dest_byte &= ~mask;	/* get the bits not changing */
793 	dest_byte |= src_byte;	/* add in the new bits */
794 
795 	/* put it all back */
796 	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
797 }
798 
799 /**
800  * i40e_write_word - replace HMC context word
801  * @hmc_bits: pointer to the HMC memory
802  * @ce_info: a description of the struct to be read from
803  * @src: the struct to be read from
804  **/
i40e_write_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)805 static void i40e_write_word(u8 *hmc_bits,
806 			    struct i40e_context_ele *ce_info,
807 			    u8 *src)
808 {
809 	u16 src_word, mask;
810 	u8 *from, *dest;
811 	u16 shift_width;
812 	__le16 dest_word;
813 
814 	/* copy from the next struct field */
815 	from = src + ce_info->offset;
816 
817 	/* prepare the bits and mask */
818 	shift_width = ce_info->lsb % 8;
819 	mask = BIT(ce_info->width) - 1;
820 
821 	/* don't swizzle the bits until after the mask because the mask bits
822 	 * will be in a different bit position on big endian machines
823 	 */
824 	src_word = *(u16 *)from;
825 	src_word &= mask;
826 
827 	/* shift to correct alignment */
828 	mask <<= shift_width;
829 	src_word <<= shift_width;
830 
831 	/* get the current bits from the target bit string */
832 	dest = hmc_bits + (ce_info->lsb / 8);
833 
834 	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
835 
836 	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
837 	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
838 
839 	/* put it all back */
840 	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
841 }
842 
843 /**
844  * i40e_write_dword - replace HMC context dword
845  * @hmc_bits: pointer to the HMC memory
846  * @ce_info: a description of the struct to be read from
847  * @src: the struct to be read from
848  **/
i40e_write_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)849 static void i40e_write_dword(u8 *hmc_bits,
850 			     struct i40e_context_ele *ce_info,
851 			     u8 *src)
852 {
853 	u32 src_dword, mask;
854 	u8 *from, *dest;
855 	u16 shift_width;
856 	__le32 dest_dword;
857 
858 	/* copy from the next struct field */
859 	from = src + ce_info->offset;
860 
861 	/* prepare the bits and mask */
862 	shift_width = ce_info->lsb % 8;
863 
864 	/* if the field width is exactly 32 on an x86 machine, then the shift
865 	 * operation will not work because the SHL instructions count is masked
866 	 * to 5 bits so the shift will do nothing
867 	 */
868 	if (ce_info->width < 32)
869 		mask = BIT(ce_info->width) - 1;
870 	else
871 		mask = ~(u32)0;
872 
873 	/* don't swizzle the bits until after the mask because the mask bits
874 	 * will be in a different bit position on big endian machines
875 	 */
876 	src_dword = *(u32 *)from;
877 	src_dword &= mask;
878 
879 	/* shift to correct alignment */
880 	mask <<= shift_width;
881 	src_dword <<= shift_width;
882 
883 	/* get the current bits from the target bit string */
884 	dest = hmc_bits + (ce_info->lsb / 8);
885 
886 	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
887 
888 	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
889 	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
890 
891 	/* put it all back */
892 	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
893 }
894 
895 /**
896  * i40e_write_qword - replace HMC context qword
897  * @hmc_bits: pointer to the HMC memory
898  * @ce_info: a description of the struct to be read from
899  * @src: the struct to be read from
900  **/
i40e_write_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * src)901 static void i40e_write_qword(u8 *hmc_bits,
902 			     struct i40e_context_ele *ce_info,
903 			     u8 *src)
904 {
905 	u64 src_qword, mask;
906 	u8 *from, *dest;
907 	u16 shift_width;
908 	__le64 dest_qword;
909 
910 	/* copy from the next struct field */
911 	from = src + ce_info->offset;
912 
913 	/* prepare the bits and mask */
914 	shift_width = ce_info->lsb % 8;
915 
916 	/* if the field width is exactly 64 on an x86 machine, then the shift
917 	 * operation will not work because the SHL instructions count is masked
918 	 * to 6 bits so the shift will do nothing
919 	 */
920 	if (ce_info->width < 64)
921 		mask = BIT_ULL(ce_info->width) - 1;
922 	else
923 		mask = ~(u64)0;
924 
925 	/* don't swizzle the bits until after the mask because the mask bits
926 	 * will be in a different bit position on big endian machines
927 	 */
928 	src_qword = *(u64 *)from;
929 	src_qword &= mask;
930 
931 	/* shift to correct alignment */
932 	mask <<= shift_width;
933 	src_qword <<= shift_width;
934 
935 	/* get the current bits from the target bit string */
936 	dest = hmc_bits + (ce_info->lsb / 8);
937 
938 	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
939 
940 	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
941 	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
942 
943 	/* put it all back */
944 	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
945 }
946 
947 /**
948  * i40e_read_byte - read HMC context byte into struct
949  * @hmc_bits: pointer to the HMC memory
950  * @ce_info: a description of the struct to be filled
951  * @dest: the struct to be filled
952  **/
i40e_read_byte(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)953 static void i40e_read_byte(u8 *hmc_bits,
954 			   struct i40e_context_ele *ce_info,
955 			   u8 *dest)
956 {
957 	u8 dest_byte, mask;
958 	u8 *src, *target;
959 	u16 shift_width;
960 
961 	/* prepare the bits and mask */
962 	shift_width = ce_info->lsb % 8;
963 	mask = (u8)(BIT(ce_info->width) - 1);
964 
965 	/* shift to correct alignment */
966 	mask <<= shift_width;
967 
968 	/* get the current bits from the src bit string */
969 	src = hmc_bits + (ce_info->lsb / 8);
970 
971 	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
972 
973 	dest_byte &= ~(mask);
974 
975 	dest_byte >>= shift_width;
976 
977 	/* get the address from the struct field */
978 	target = dest + ce_info->offset;
979 
980 	/* put it back in the struct */
981 	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
982 }
983 
984 /**
985  * i40e_read_word - read HMC context word into struct
986  * @hmc_bits: pointer to the HMC memory
987  * @ce_info: a description of the struct to be filled
988  * @dest: the struct to be filled
989  **/
i40e_read_word(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)990 static void i40e_read_word(u8 *hmc_bits,
991 			   struct i40e_context_ele *ce_info,
992 			   u8 *dest)
993 {
994 	u16 dest_word, mask;
995 	u8 *src, *target;
996 	u16 shift_width;
997 	__le16 src_word;
998 
999 	/* prepare the bits and mask */
1000 	shift_width = ce_info->lsb % 8;
1001 	mask = BIT(ce_info->width) - 1;
1002 
1003 	/* shift to correct alignment */
1004 	mask <<= shift_width;
1005 
1006 	/* get the current bits from the src bit string */
1007 	src = hmc_bits + (ce_info->lsb / 8);
1008 
1009 	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
1010 
1011 	/* the data in the memory is stored as little endian so mask it
1012 	 * correctly
1013 	 */
1014 	src_word &= ~(CPU_TO_LE16(mask));
1015 
1016 	/* get the data back into host order before shifting */
1017 	dest_word = LE16_TO_CPU(src_word);
1018 
1019 	dest_word >>= shift_width;
1020 
1021 	/* get the address from the struct field */
1022 	target = dest + ce_info->offset;
1023 
1024 	/* put it back in the struct */
1025 	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
1026 }
1027 
1028 /**
1029  * i40e_read_dword - read HMC context dword into struct
1030  * @hmc_bits: pointer to the HMC memory
1031  * @ce_info: a description of the struct to be filled
1032  * @dest: the struct to be filled
1033  **/
i40e_read_dword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1034 static void i40e_read_dword(u8 *hmc_bits,
1035 			    struct i40e_context_ele *ce_info,
1036 			    u8 *dest)
1037 {
1038 	u32 dest_dword, mask;
1039 	u8 *src, *target;
1040 	u16 shift_width;
1041 	__le32 src_dword;
1042 
1043 	/* prepare the bits and mask */
1044 	shift_width = ce_info->lsb % 8;
1045 
1046 	/* if the field width is exactly 32 on an x86 machine, then the shift
1047 	 * operation will not work because the SHL instructions count is masked
1048 	 * to 5 bits so the shift will do nothing
1049 	 */
1050 	if (ce_info->width < 32)
1051 		mask = BIT(ce_info->width) - 1;
1052 	else
1053 		mask = ~(u32)0;
1054 
1055 	/* shift to correct alignment */
1056 	mask <<= shift_width;
1057 
1058 	/* get the current bits from the src bit string */
1059 	src = hmc_bits + (ce_info->lsb / 8);
1060 
1061 	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
1062 
1063 	/* the data in the memory is stored as little endian so mask it
1064 	 * correctly
1065 	 */
1066 	src_dword &= ~(CPU_TO_LE32(mask));
1067 
1068 	/* get the data back into host order before shifting */
1069 	dest_dword = LE32_TO_CPU(src_dword);
1070 
1071 	dest_dword >>= shift_width;
1072 
1073 	/* get the address from the struct field */
1074 	target = dest + ce_info->offset;
1075 
1076 	/* put it back in the struct */
1077 	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
1078 		    I40E_NONDMA_TO_DMA);
1079 }
1080 
1081 /**
1082  * i40e_read_qword - read HMC context qword into struct
1083  * @hmc_bits: pointer to the HMC memory
1084  * @ce_info: a description of the struct to be filled
1085  * @dest: the struct to be filled
1086  **/
i40e_read_qword(u8 * hmc_bits,struct i40e_context_ele * ce_info,u8 * dest)1087 static void i40e_read_qword(u8 *hmc_bits,
1088 			    struct i40e_context_ele *ce_info,
1089 			    u8 *dest)
1090 {
1091 	u64 dest_qword, mask;
1092 	u8 *src, *target;
1093 	u16 shift_width;
1094 	__le64 src_qword;
1095 
1096 	/* prepare the bits and mask */
1097 	shift_width = ce_info->lsb % 8;
1098 
1099 	/* if the field width is exactly 64 on an x86 machine, then the shift
1100 	 * operation will not work because the SHL instructions count is masked
1101 	 * to 6 bits so the shift will do nothing
1102 	 */
1103 	if (ce_info->width < 64)
1104 		mask = BIT_ULL(ce_info->width) - 1;
1105 	else
1106 		mask = ~(u64)0;
1107 
1108 	/* shift to correct alignment */
1109 	mask <<= shift_width;
1110 
1111 	/* get the current bits from the src bit string */
1112 	src = hmc_bits + (ce_info->lsb / 8);
1113 
1114 	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
1115 
1116 	/* the data in the memory is stored as little endian so mask it
1117 	 * correctly
1118 	 */
1119 	src_qword &= ~(CPU_TO_LE64(mask));
1120 
1121 	/* get the data back into host order before shifting */
1122 	dest_qword = LE64_TO_CPU(src_qword);
1123 
1124 	dest_qword >>= shift_width;
1125 
1126 	/* get the address from the struct field */
1127 	target = dest + ce_info->offset;
1128 
1129 	/* put it back in the struct */
1130 	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
1131 		    I40E_NONDMA_TO_DMA);
1132 }
1133 
1134 /**
1135  * i40e_get_hmc_context - extract HMC context bits
1136  * @context_bytes: pointer to the context bit array
1137  * @ce_info: a description of the struct to be filled
1138  * @dest: the struct to be filled
1139  **/
i40e_get_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1140 static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
1141 					struct i40e_context_ele *ce_info,
1142 					u8 *dest)
1143 {
1144 	int f;
1145 
1146 	for (f = 0; ce_info[f].width != 0; f++) {
1147 		switch (ce_info[f].size_of) {
1148 		case 1:
1149 			i40e_read_byte(context_bytes, &ce_info[f], dest);
1150 			break;
1151 		case 2:
1152 			i40e_read_word(context_bytes, &ce_info[f], dest);
1153 			break;
1154 		case 4:
1155 			i40e_read_dword(context_bytes, &ce_info[f], dest);
1156 			break;
1157 		case 8:
1158 			i40e_read_qword(context_bytes, &ce_info[f], dest);
1159 			break;
1160 		default:
1161 			/* nothing to do, just keep going */
1162 			break;
1163 		}
1164 	}
1165 
1166 	return I40E_SUCCESS;
1167 }
1168 
1169 /**
1170  * i40e_clear_hmc_context - zero out the HMC context bits
1171  * @hw:       the hardware struct
1172  * @context_bytes: pointer to the context bit array (DMA memory)
1173  * @hmc_type: the type of HMC resource
1174  **/
i40e_clear_hmc_context(struct i40e_hw * hw,u8 * context_bytes,enum i40e_hmc_lan_rsrc_type hmc_type)1175 static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
1176 					u8 *context_bytes,
1177 					enum i40e_hmc_lan_rsrc_type hmc_type)
1178 {
1179 	/* clean the bit array */
1180 	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
1181 		    I40E_DMA_MEM);
1182 
1183 	return I40E_SUCCESS;
1184 }
1185 
1186 /**
1187  * i40e_set_hmc_context - replace HMC context bits
1188  * @context_bytes: pointer to the context bit array
1189  * @ce_info:  a description of the struct to be filled
1190  * @dest:     the struct to be filled
1191  **/
i40e_set_hmc_context(u8 * context_bytes,struct i40e_context_ele * ce_info,u8 * dest)1192 static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
1193 					struct i40e_context_ele *ce_info,
1194 					u8 *dest)
1195 {
1196 	int f;
1197 
1198 	for (f = 0; ce_info[f].width != 0; f++) {
1199 
1200 		/* we have to deal with each element of the HMC using the
1201 		 * correct size so that we are correct regardless of the
1202 		 * endianness of the machine
1203 		 */
1204 		switch (ce_info[f].size_of) {
1205 		case 1:
1206 			i40e_write_byte(context_bytes, &ce_info[f], dest);
1207 			break;
1208 		case 2:
1209 			i40e_write_word(context_bytes, &ce_info[f], dest);
1210 			break;
1211 		case 4:
1212 			i40e_write_dword(context_bytes, &ce_info[f], dest);
1213 			break;
1214 		case 8:
1215 			i40e_write_qword(context_bytes, &ce_info[f], dest);
1216 			break;
1217 		}
1218 	}
1219 
1220 	return I40E_SUCCESS;
1221 }
1222 
1223 /**
1224  * i40e_hmc_get_object_va - retrieves an object's virtual address
1225  * @hw: pointer to the hw structure
1226  * @object_base: pointer to u64 to get the va
1227  * @rsrc_type: the hmc resource type
1228  * @obj_idx: hmc object index
1229  *
1230  * This function retrieves the object's virtual address from the object
1231  * base pointer.  This function is used for LAN Queue contexts.
1232  **/
1233 static
i40e_hmc_get_object_va(struct i40e_hw * hw,u8 ** object_base,enum i40e_hmc_lan_rsrc_type rsrc_type,u32 obj_idx)1234 enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
1235 					u8 **object_base,
1236 					enum i40e_hmc_lan_rsrc_type rsrc_type,
1237 					u32 obj_idx)
1238 {
1239 	u32 obj_offset_in_sd, obj_offset_in_pd;
1240 	struct i40e_hmc_info     *hmc_info = &hw->hmc;
1241 	struct i40e_hmc_sd_entry *sd_entry;
1242 	struct i40e_hmc_pd_entry *pd_entry;
1243 	u32 pd_idx, pd_lmt, rel_pd_idx;
1244 	enum i40e_status_code ret_code = I40E_SUCCESS;
1245 	u64 obj_offset_in_fpm;
1246 	u32 sd_idx, sd_lmt;
1247 
1248 	if (NULL == hmc_info->hmc_obj) {
1249 		ret_code = I40E_ERR_BAD_PTR;
1250 		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
1251 		goto exit;
1252 	}
1253 	if (NULL == object_base) {
1254 		ret_code = I40E_ERR_BAD_PTR;
1255 		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
1256 		goto exit;
1257 	}
1258 	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
1259 		ret_code = I40E_ERR_BAD_PTR;
1260 		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
1261 		goto exit;
1262 	}
1263 	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
1264 		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
1265 			  ret_code);
1266 		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
1267 		goto exit;
1268 	}
1269 	/* find sd index and limit */
1270 	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1271 				 &sd_idx, &sd_lmt);
1272 
1273 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
1274 	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
1275 			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
1276 
1277 	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
1278 		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
1279 					 &pd_idx, &pd_lmt);
1280 		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
1281 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
1282 		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
1283 					 I40E_HMC_PAGED_BP_SIZE);
1284 		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
1285 	} else {
1286 		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
1287 					 I40E_HMC_DIRECT_BP_SIZE);
1288 		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
1289 	}
1290 exit:
1291 	return ret_code;
1292 }
1293 
1294 /**
1295  * i40e_get_lan_tx_queue_context - return the HMC context for the queue
1296  * @hw:    the hardware struct
1297  * @queue: the queue we care about
1298  * @s:     the struct to be filled
1299  **/
i40e_get_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1300 enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
1301 						    u16 queue,
1302 						    struct i40e_hmc_obj_txq *s)
1303 {
1304 	enum i40e_status_code err;
1305 	u8 *context_bytes;
1306 
1307 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1308 	if (err < 0)
1309 		return err;
1310 
1311 	return i40e_get_hmc_context(context_bytes,
1312 				    i40e_hmc_txq_ce_info, (u8 *)s);
1313 }
1314 
1315 /**
1316  * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
1317  * @hw:    the hardware struct
1318  * @queue: the queue we care about
1319  **/
i40e_clear_lan_tx_queue_context(struct i40e_hw * hw,u16 queue)1320 enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
1321 						      u16 queue)
1322 {
1323 	enum i40e_status_code err;
1324 	u8 *context_bytes;
1325 
1326 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1327 	if (err < 0)
1328 		return err;
1329 
1330 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
1331 }
1332 
1333 /**
1334  * i40e_set_lan_tx_queue_context - set the HMC context for the queue
1335  * @hw:    the hardware struct
1336  * @queue: the queue we care about
1337  * @s:     the struct to be filled
1338  **/
i40e_set_lan_tx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_txq * s)1339 enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
1340 						    u16 queue,
1341 						    struct i40e_hmc_obj_txq *s)
1342 {
1343 	enum i40e_status_code err;
1344 	u8 *context_bytes;
1345 
1346 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
1347 	if (err < 0)
1348 		return err;
1349 
1350 	return i40e_set_hmc_context(context_bytes,
1351 				    i40e_hmc_txq_ce_info, (u8 *)s);
1352 }
1353 
1354 /**
1355  * i40e_get_lan_rx_queue_context - return the HMC context for the queue
1356  * @hw:    the hardware struct
1357  * @queue: the queue we care about
1358  * @s:     the struct to be filled
1359  **/
i40e_get_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1360 enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
1361 						    u16 queue,
1362 						    struct i40e_hmc_obj_rxq *s)
1363 {
1364 	enum i40e_status_code err;
1365 	u8 *context_bytes;
1366 
1367 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1368 	if (err < 0)
1369 		return err;
1370 
1371 	return i40e_get_hmc_context(context_bytes,
1372 				    i40e_hmc_rxq_ce_info, (u8 *)s);
1373 }
1374 
1375 /**
1376  * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
1377  * @hw:    the hardware struct
1378  * @queue: the queue we care about
1379  **/
i40e_clear_lan_rx_queue_context(struct i40e_hw * hw,u16 queue)1380 enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
1381 						      u16 queue)
1382 {
1383 	enum i40e_status_code err;
1384 	u8 *context_bytes;
1385 
1386 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1387 	if (err < 0)
1388 		return err;
1389 
1390 	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
1391 }
1392 
1393 /**
1394  * i40e_set_lan_rx_queue_context - set the HMC context for the queue
1395  * @hw:    the hardware struct
1396  * @queue: the queue we care about
1397  * @s:     the struct to be filled
1398  **/
i40e_set_lan_rx_queue_context(struct i40e_hw * hw,u16 queue,struct i40e_hmc_obj_rxq * s)1399 enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
1400 						    u16 queue,
1401 						    struct i40e_hmc_obj_rxq *s)
1402 {
1403 	enum i40e_status_code err;
1404 	u8 *context_bytes;
1405 
1406 	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
1407 	if (err < 0)
1408 		return err;
1409 
1410 	return i40e_set_hmc_context(context_bytes,
1411 				    i40e_hmc_rxq_ce_info, (u8 *)s);
1412 }
1413