1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD: head/sys/dev/ixl/i40e_adminq.c 284049 2015-06-05 22:52:42Z jfv $*/
34 
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40 
41 /**
42  * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
43  * @desc: API request descriptor
44  **/
45 static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
46 {
47 	return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
48 		desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
49 }
50 
51 /**
52  *  i40e_adminq_init_regs - Initialize AdminQ registers
53  *  @hw: pointer to the hardware structure
54  *
55  *  This assumes the alloc_asq and alloc_arq functions have already been called
56  **/
57 static void i40e_adminq_init_regs(struct i40e_hw *hw)
58 {
59 	/* set head and tail registers in our local struct */
60 	if (i40e_is_vf(hw)) {
61 		hw->aq.asq.tail = I40E_VF_ATQT1;
62 		hw->aq.asq.head = I40E_VF_ATQH1;
63 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
64 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
65 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
66 		hw->aq.arq.tail = I40E_VF_ARQT1;
67 		hw->aq.arq.head = I40E_VF_ARQH1;
68 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
69 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
70 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
71 	} else {
72 		hw->aq.asq.tail = I40E_PF_ATQT;
73 		hw->aq.asq.head = I40E_PF_ATQH;
74 		hw->aq.asq.len  = I40E_PF_ATQLEN;
75 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
76 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
77 		hw->aq.arq.tail = I40E_PF_ARQT;
78 		hw->aq.arq.head = I40E_PF_ARQH;
79 		hw->aq.arq.len  = I40E_PF_ARQLEN;
80 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
81 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
82 	}
83 }
84 
85 /**
86  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
87  *  @hw: pointer to the hardware structure
88  **/
89 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
90 {
91 	enum i40e_status_code ret_code;
92 
93 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
94 					 i40e_mem_atq_ring,
95 					 (hw->aq.num_asq_entries *
96 					 sizeof(struct i40e_aq_desc)),
97 					 I40E_ADMINQ_DESC_ALIGNMENT);
98 	if (ret_code)
99 		return ret_code;
100 
101 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
102 					  (hw->aq.num_asq_entries *
103 					  sizeof(struct i40e_asq_cmd_details)));
104 	if (ret_code) {
105 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
106 		return ret_code;
107 	}
108 
109 	return ret_code;
110 }
111 
112 /**
113  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
114  *  @hw: pointer to the hardware structure
115  **/
116 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
117 {
118 	enum i40e_status_code ret_code;
119 
120 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
121 					 i40e_mem_arq_ring,
122 					 (hw->aq.num_arq_entries *
123 					 sizeof(struct i40e_aq_desc)),
124 					 I40E_ADMINQ_DESC_ALIGNMENT);
125 
126 	return ret_code;
127 }
128 
129 /**
130  *  i40e_free_adminq_asq - Free Admin Queue send rings
131  *  @hw: pointer to the hardware structure
132  *
133  *  This assumes the posted send buffers have already been cleaned
134  *  and de-allocated
135  **/
136 void i40e_free_adminq_asq(struct i40e_hw *hw)
137 {
138 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
139 }
140 
141 /**
142  *  i40e_free_adminq_arq - Free Admin Queue receive rings
143  *  @hw: pointer to the hardware structure
144  *
145  *  This assumes the posted receive buffers have already been cleaned
146  *  and de-allocated
147  **/
148 void i40e_free_adminq_arq(struct i40e_hw *hw)
149 {
150 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
151 }
152 
153 /**
154  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
155  *  @hw: pointer to the hardware structure
156  **/
157 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
158 {
159 	enum i40e_status_code ret_code;
160 	struct i40e_aq_desc *desc;
161 	struct i40e_dma_mem *bi;
162 	int i;
163 
164 	/* We'll be allocating the buffer info memory first, then we can
165 	 * allocate the mapped buffers for the event processing
166 	 */
167 
168 	/* buffer_info structures do not need alignment */
169 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
170 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
171 	if (ret_code)
172 		goto alloc_arq_bufs;
173 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
174 
175 	/* allocate the mapped buffers */
176 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
177 		bi = &hw->aq.arq.r.arq_bi[i];
178 		ret_code = i40e_allocate_dma_mem(hw, bi,
179 						 i40e_mem_arq_buf,
180 						 hw->aq.arq_buf_size,
181 						 I40E_ADMINQ_DESC_ALIGNMENT);
182 		if (ret_code)
183 			goto unwind_alloc_arq_bufs;
184 
185 		/* now configure the descriptors for use */
186 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
187 
188 		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
189 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
190 			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
191 		desc->opcode = 0;
192 		/* This is in accordance with Admin queue design, there is no
193 		 * register for buffer size configuration
194 		 */
195 		desc->datalen = CPU_TO_LE16((u16)bi->size);
196 		desc->retval = 0;
197 		desc->cookie_high = 0;
198 		desc->cookie_low = 0;
199 		desc->params.external.addr_high =
200 			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
201 		desc->params.external.addr_low =
202 			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
203 		desc->params.external.param0 = 0;
204 		desc->params.external.param1 = 0;
205 	}
206 
207 alloc_arq_bufs:
208 	return ret_code;
209 
210 unwind_alloc_arq_bufs:
211 	/* don't try to free the one that failed... */
212 	i--;
213 	for (; i >= 0; i--)
214 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
215 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
216 
217 	return ret_code;
218 }
219 
220 /**
221  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
222  *  @hw: pointer to the hardware structure
223  **/
224 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
225 {
226 	enum i40e_status_code ret_code;
227 	struct i40e_dma_mem *bi;
228 	int i;
229 
230 	/* No mapped memory needed yet, just the buffer info structures */
231 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
232 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
233 	if (ret_code)
234 		goto alloc_asq_bufs;
235 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
236 
237 	/* allocate the mapped buffers */
238 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
239 		bi = &hw->aq.asq.r.asq_bi[i];
240 		ret_code = i40e_allocate_dma_mem(hw, bi,
241 						 i40e_mem_asq_buf,
242 						 hw->aq.asq_buf_size,
243 						 I40E_ADMINQ_DESC_ALIGNMENT);
244 		if (ret_code)
245 			goto unwind_alloc_asq_bufs;
246 	}
247 alloc_asq_bufs:
248 	return ret_code;
249 
250 unwind_alloc_asq_bufs:
251 	/* don't try to free the one that failed... */
252 	i--;
253 	for (; i >= 0; i--)
254 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
255 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
256 
257 	return ret_code;
258 }
259 
260 /**
261  *  i40e_free_arq_bufs - Free receive queue buffer info elements
262  *  @hw: pointer to the hardware structure
263  **/
264 static void i40e_free_arq_bufs(struct i40e_hw *hw)
265 {
266 	int i;
267 
268 	/* free descriptors */
269 	for (i = 0; i < hw->aq.num_arq_entries; i++)
270 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
271 
272 	/* free the descriptor memory */
273 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
274 
275 	/* free the dma header */
276 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
277 }
278 
279 /**
280  *  i40e_free_asq_bufs - Free send queue buffer info elements
281  *  @hw: pointer to the hardware structure
282  **/
283 static void i40e_free_asq_bufs(struct i40e_hw *hw)
284 {
285 	int i;
286 
287 	/* only unmap if the address is non-NULL */
288 	for (i = 0; i < hw->aq.num_asq_entries; i++)
289 		if (hw->aq.asq.r.asq_bi[i].pa)
290 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
291 
292 	/* free the buffer info list */
293 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
294 
295 	/* free the descriptor memory */
296 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
297 
298 	/* free the dma header */
299 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
300 }
301 
302 /**
303  *  i40e_config_asq_regs - configure ASQ registers
304  *  @hw: pointer to the hardware structure
305  *
306  *  Configure base address and length registers for the transmit queue
307  **/
308 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
309 {
310 	enum i40e_status_code ret_code = I40E_SUCCESS;
311 	u32 reg = 0;
312 
313 	/* Clear Head and Tail */
314 	wr32(hw, hw->aq.asq.head, 0);
315 	wr32(hw, hw->aq.asq.tail, 0);
316 
317 	/* set starting point */
318 	if (!i40e_is_vf(hw))
319 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
320 					  I40E_PF_ATQLEN_ATQENABLE_MASK));
321 	if (i40e_is_vf(hw))
322 		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
323 					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
324 	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
325 	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
326 
327 	/* Check one register to verify that config was applied */
328 	reg = rd32(hw, hw->aq.asq.bal);
329 	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
330 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
331 
332 	return ret_code;
333 }
334 
335 /**
336  *  i40e_config_arq_regs - ARQ register configuration
337  *  @hw: pointer to the hardware structure
338  *
339  * Configure base address and length registers for the receive (event queue)
340  **/
341 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
342 {
343 	enum i40e_status_code ret_code = I40E_SUCCESS;
344 	u32 reg = 0;
345 
346 	/* Clear Head and Tail */
347 	wr32(hw, hw->aq.arq.head, 0);
348 	wr32(hw, hw->aq.arq.tail, 0);
349 
350 	/* set starting point */
351 	if (!i40e_is_vf(hw))
352 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
353 					  I40E_PF_ARQLEN_ARQENABLE_MASK));
354 	if (i40e_is_vf(hw))
355 		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
356 					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
357 	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
358 	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
359 
360 	/* Update tail in the HW to post pre-allocated buffers */
361 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
362 
363 	/* Check one register to verify that config was applied */
364 	reg = rd32(hw, hw->aq.arq.bal);
365 	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
366 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
367 
368 	return ret_code;
369 }
370 
371 /**
372  *  i40e_init_asq - main initialization routine for ASQ
373  *  @hw: pointer to the hardware structure
374  *
375  *  This is the main initialization routine for the Admin Send Queue
376  *  Prior to calling this function, drivers *MUST* set the following fields
377  *  in the hw->aq structure:
378  *     - hw->aq.num_asq_entries
379  *     - hw->aq.arq_buf_size
380  *
381  *  Do *NOT* hold the lock when calling this as the memory allocation routines
382  *  called are not going to be atomic context safe
383  **/
384 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
385 {
386 	enum i40e_status_code ret_code = I40E_SUCCESS;
387 
388 	if (hw->aq.asq.count > 0) {
389 		/* queue already initialized */
390 		ret_code = I40E_ERR_NOT_READY;
391 		goto init_adminq_exit;
392 	}
393 
394 	/* verify input for valid configuration */
395 	if ((hw->aq.num_asq_entries == 0) ||
396 	    (hw->aq.asq_buf_size == 0)) {
397 		ret_code = I40E_ERR_CONFIG;
398 		goto init_adminq_exit;
399 	}
400 
401 	hw->aq.asq.next_to_use = 0;
402 	hw->aq.asq.next_to_clean = 0;
403 	hw->aq.asq.count = hw->aq.num_asq_entries;
404 
405 	/* allocate the ring memory */
406 	ret_code = i40e_alloc_adminq_asq_ring(hw);
407 	if (ret_code != I40E_SUCCESS)
408 		goto init_adminq_exit;
409 
410 	/* allocate buffers in the rings */
411 	ret_code = i40e_alloc_asq_bufs(hw);
412 	if (ret_code != I40E_SUCCESS)
413 		goto init_adminq_free_rings;
414 
415 	/* initialize base registers */
416 	ret_code = i40e_config_asq_regs(hw);
417 	if (ret_code != I40E_SUCCESS)
418 		goto init_adminq_free_rings;
419 
420 	/* success! */
421 	goto init_adminq_exit;
422 
423 init_adminq_free_rings:
424 	i40e_free_adminq_asq(hw);
425 
426 init_adminq_exit:
427 	return ret_code;
428 }
429 
430 /**
431  *  i40e_init_arq - initialize ARQ
432  *  @hw: pointer to the hardware structure
433  *
434  *  The main initialization routine for the Admin Receive (Event) Queue.
435  *  Prior to calling this function, drivers *MUST* set the following fields
436  *  in the hw->aq structure:
437  *     - hw->aq.num_asq_entries
438  *     - hw->aq.arq_buf_size
439  *
440  *  Do *NOT* hold the lock when calling this as the memory allocation routines
441  *  called are not going to be atomic context safe
442  **/
443 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
444 {
445 	enum i40e_status_code ret_code = I40E_SUCCESS;
446 
447 	if (hw->aq.arq.count > 0) {
448 		/* queue already initialized */
449 		ret_code = I40E_ERR_NOT_READY;
450 		goto init_adminq_exit;
451 	}
452 
453 	/* verify input for valid configuration */
454 	if ((hw->aq.num_arq_entries == 0) ||
455 	    (hw->aq.arq_buf_size == 0)) {
456 		ret_code = I40E_ERR_CONFIG;
457 		goto init_adminq_exit;
458 	}
459 
460 	hw->aq.arq.next_to_use = 0;
461 	hw->aq.arq.next_to_clean = 0;
462 	hw->aq.arq.count = hw->aq.num_arq_entries;
463 
464 	/* allocate the ring memory */
465 	ret_code = i40e_alloc_adminq_arq_ring(hw);
466 	if (ret_code != I40E_SUCCESS)
467 		goto init_adminq_exit;
468 
469 	/* allocate buffers in the rings */
470 	ret_code = i40e_alloc_arq_bufs(hw);
471 	if (ret_code != I40E_SUCCESS)
472 		goto init_adminq_free_rings;
473 
474 	/* initialize base registers */
475 	ret_code = i40e_config_arq_regs(hw);
476 	if (ret_code != I40E_SUCCESS)
477 		goto init_adminq_free_rings;
478 
479 	/* success! */
480 	goto init_adminq_exit;
481 
482 init_adminq_free_rings:
483 	i40e_free_adminq_arq(hw);
484 
485 init_adminq_exit:
486 	return ret_code;
487 }
488 
489 /**
490  *  i40e_shutdown_asq - shutdown the ASQ
491  *  @hw: pointer to the hardware structure
492  *
493  *  The main shutdown routine for the Admin Send Queue
494  **/
495 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
496 {
497 	enum i40e_status_code ret_code = I40E_SUCCESS;
498 
499 	if (hw->aq.asq.count == 0)
500 		return I40E_ERR_NOT_READY;
501 
502 	/* Stop firmware AdminQ processing */
503 	wr32(hw, hw->aq.asq.head, 0);
504 	wr32(hw, hw->aq.asq.tail, 0);
505 	wr32(hw, hw->aq.asq.len, 0);
506 	wr32(hw, hw->aq.asq.bal, 0);
507 	wr32(hw, hw->aq.asq.bah, 0);
508 
509 	/* make sure spinlock is available */
510 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
511 
512 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
513 
514 	/* free ring buffers */
515 	i40e_free_asq_bufs(hw);
516 
517 	i40e_release_spinlock(&hw->aq.asq_spinlock);
518 
519 	return ret_code;
520 }
521 
522 /**
523  *  i40e_shutdown_arq - shutdown ARQ
524  *  @hw: pointer to the hardware structure
525  *
526  *  The main shutdown routine for the Admin Receive Queue
527  **/
528 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
529 {
530 	enum i40e_status_code ret_code = I40E_SUCCESS;
531 
532 	if (hw->aq.arq.count == 0)
533 		return I40E_ERR_NOT_READY;
534 
535 	/* Stop firmware AdminQ processing */
536 	wr32(hw, hw->aq.arq.head, 0);
537 	wr32(hw, hw->aq.arq.tail, 0);
538 	wr32(hw, hw->aq.arq.len, 0);
539 	wr32(hw, hw->aq.arq.bal, 0);
540 	wr32(hw, hw->aq.arq.bah, 0);
541 
542 	/* make sure spinlock is available */
543 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
544 
545 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
546 
547 	/* free ring buffers */
548 	i40e_free_arq_bufs(hw);
549 
550 	i40e_release_spinlock(&hw->aq.arq_spinlock);
551 
552 	return ret_code;
553 }
554 
555 /**
556  *  i40e_init_adminq - main initialization routine for Admin Queue
557  *  @hw: pointer to the hardware structure
558  *
559  *  Prior to calling this function, drivers *MUST* set the following fields
560  *  in the hw->aq structure:
561  *     - hw->aq.num_asq_entries
562  *     - hw->aq.num_arq_entries
563  *     - hw->aq.arq_buf_size
564  *     - hw->aq.asq_buf_size
565  **/
566 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
567 {
568 	enum i40e_status_code ret_code;
569 	u16 eetrack_lo, eetrack_hi;
570 	u16 cfg_ptr, oem_hi, oem_lo;
571 	int retry = 0;
572 	/* verify input for valid configuration */
573 	if ((hw->aq.num_arq_entries == 0) ||
574 	    (hw->aq.num_asq_entries == 0) ||
575 	    (hw->aq.arq_buf_size == 0) ||
576 	    (hw->aq.asq_buf_size == 0)) {
577 		ret_code = I40E_ERR_CONFIG;
578 		goto init_adminq_exit;
579 	}
580 
581 	/* initialize spin locks */
582 	i40e_init_spinlock(&hw->aq.asq_spinlock);
583 	i40e_init_spinlock(&hw->aq.arq_spinlock);
584 
585 	/* Set up register offsets */
586 	i40e_adminq_init_regs(hw);
587 
588 	/* setup ASQ command write back timeout */
589 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
590 
591 	/* allocate the ASQ */
592 	ret_code = i40e_init_asq(hw);
593 	if (ret_code != I40E_SUCCESS)
594 		goto init_adminq_destroy_spinlocks;
595 
596 	/* allocate the ARQ */
597 	ret_code = i40e_init_arq(hw);
598 	if (ret_code != I40E_SUCCESS)
599 		goto init_adminq_free_asq;
600 
601 	/* VF has no need of firmware */
602 	if (i40e_is_vf(hw))
603 		goto init_adminq_exit;
604 	/* There are some cases where the firmware may not be quite ready
605 	 * for AdminQ operations, so we retry the AdminQ setup a few times
606 	 * if we see timeouts in this first AQ call.
607 	 */
608 	do {
609 		ret_code = i40e_aq_get_firmware_version(hw,
610 							&hw->aq.fw_maj_ver,
611 							&hw->aq.fw_min_ver,
612 							&hw->aq.fw_build,
613 							&hw->aq.api_maj_ver,
614 							&hw->aq.api_min_ver,
615 							NULL);
616 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
617 			break;
618 		retry++;
619 		i40e_msec_delay(100);
620 		i40e_resume_aq(hw);
621 	} while (retry < 10);
622 	if (ret_code != I40E_SUCCESS)
623 		goto init_adminq_free_arq;
624 
625 	/* get the NVM version info */
626 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
627 			   &hw->nvm.version);
628 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
629 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
630 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
631 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
632 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
633 			   &oem_hi);
634 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
635 			   &oem_lo);
636 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
637 
638 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
639 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
640 		goto init_adminq_free_arq;
641 	}
642 
643 	/* pre-emptive resource lock release */
644 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
645 	hw->aq.nvm_release_on_done = FALSE;
646 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
647 
648 	ret_code = i40e_aq_set_hmc_resource_profile(hw,
649 						    I40E_HMC_PROFILE_DEFAULT,
650 						    0,
651 						    NULL);
652 	ret_code = I40E_SUCCESS;
653 
654 	/* success! */
655 	goto init_adminq_exit;
656 
657 init_adminq_free_arq:
658 	i40e_shutdown_arq(hw);
659 init_adminq_free_asq:
660 	i40e_shutdown_asq(hw);
661 init_adminq_destroy_spinlocks:
662 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
663 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
664 
665 init_adminq_exit:
666 	return ret_code;
667 }
668 
669 /**
670  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
671  *  @hw: pointer to the hardware structure
672  **/
673 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
674 {
675 	enum i40e_status_code ret_code = I40E_SUCCESS;
676 
677 	if (i40e_check_asq_alive(hw))
678 		i40e_aq_queue_shutdown(hw, TRUE);
679 
680 	i40e_shutdown_asq(hw);
681 	i40e_shutdown_arq(hw);
682 
683 	/* destroy the spinlocks */
684 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
685 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
686 
687 	if (hw->nvm_buff.va)
688 		i40e_free_virt_mem(hw, &hw->nvm_buff);
689 
690 	return ret_code;
691 }
692 
693 /**
694  *  i40e_clean_asq - cleans Admin send queue
695  *  @hw: pointer to the hardware structure
696  *
697  *  returns the number of free desc
698  **/
699 u16 i40e_clean_asq(struct i40e_hw *hw)
700 {
701 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
702 	struct i40e_asq_cmd_details *details;
703 	u16 ntc = asq->next_to_clean;
704 	struct i40e_aq_desc desc_cb;
705 	struct i40e_aq_desc *desc;
706 
707 	desc = I40E_ADMINQ_DESC(*asq, ntc);
708 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
709 
710 	while (rd32(hw, hw->aq.asq.head) != ntc) {
711 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
712 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
713 
714 		if (details->callback) {
715 			I40E_ADMINQ_CALLBACK cb_func =
716 					(I40E_ADMINQ_CALLBACK)details->callback;
717 			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
718 				    I40E_DMA_TO_DMA);
719 			cb_func(hw, &desc_cb);
720 		}
721 		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
722 		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
723 		ntc++;
724 		if (ntc == asq->count)
725 			ntc = 0;
726 		desc = I40E_ADMINQ_DESC(*asq, ntc);
727 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
728 	}
729 
730 	asq->next_to_clean = ntc;
731 
732 	return I40E_DESC_UNUSED(asq);
733 }
734 
735 /**
736  *  i40e_asq_done - check if FW has processed the Admin Send Queue
737  *  @hw: pointer to the hw struct
738  *
739  *  Returns TRUE if the firmware has processed all descriptors on the
740  *  admin send queue. Returns FALSE if there are still requests pending.
741  **/
742 bool i40e_asq_done(struct i40e_hw *hw)
743 {
744 	/* AQ designers suggest use of head for better
745 	 * timing reliability than DD bit
746 	 */
747 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
748 
749 }
750 
751 /**
752  *  i40e_asq_send_command - send command to Admin Queue
753  *  @hw: pointer to the hw struct
754  *  @desc: prefilled descriptor describing the command (non DMA mem)
755  *  @buff: buffer to use for indirect commands
756  *  @buff_size: size of buffer for indirect commands
757  *  @cmd_details: pointer to command details structure
758  *
759  *  This is the main send command driver routine for the Admin Queue send
760  *  queue.  It runs the queue, cleans the queue, etc
761  **/
762 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
763 				struct i40e_aq_desc *desc,
764 				void *buff, /* can be NULL */
765 				u16  buff_size,
766 				struct i40e_asq_cmd_details *cmd_details)
767 {
768 	enum i40e_status_code status = I40E_SUCCESS;
769 	struct i40e_dma_mem *dma_buff = NULL;
770 	struct i40e_asq_cmd_details *details;
771 	struct i40e_aq_desc *desc_on_ring;
772 	bool cmd_completed = FALSE;
773 	u16  retval = 0;
774 	u32  val = 0;
775 
776 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
777 
778 	val = rd32(hw, hw->aq.asq.head);
779 	if (val >= hw->aq.num_asq_entries) {
780 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
781 			   "AQTX: head overrun at %d\n", val);
782 		status = I40E_ERR_QUEUE_EMPTY;
783 		goto asq_send_command_exit;
784 	}
785 
786 	if (hw->aq.asq.count == 0) {
787 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
788 			   "AQTX: Admin queue not initialized.\n");
789 		status = I40E_ERR_QUEUE_EMPTY;
790 		goto asq_send_command_exit;
791 	}
792 
793 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
794 	if (cmd_details) {
795 		i40e_memcpy(details,
796 			    cmd_details,
797 			    sizeof(struct i40e_asq_cmd_details),
798 			    I40E_NONDMA_TO_NONDMA);
799 
800 		/* If the cmd_details are defined copy the cookie.  The
801 		 * CPU_TO_LE32 is not needed here because the data is ignored
802 		 * by the FW, only used by the driver
803 		 */
804 		if (details->cookie) {
805 			desc->cookie_high =
806 				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
807 			desc->cookie_low =
808 				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
809 		}
810 	} else {
811 		i40e_memset(details, 0,
812 			    sizeof(struct i40e_asq_cmd_details),
813 			    I40E_NONDMA_MEM);
814 	}
815 
816 	/* clear requested flags and then set additional flags if defined */
817 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
818 	desc->flags |= CPU_TO_LE16(details->flags_ena);
819 
820 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
821 
822 	if (buff_size > hw->aq.asq_buf_size) {
823 		i40e_debug(hw,
824 			   I40E_DEBUG_AQ_MESSAGE,
825 			   "AQTX: Invalid buffer size: %d.\n",
826 			   buff_size);
827 		status = I40E_ERR_INVALID_SIZE;
828 		goto asq_send_command_error;
829 	}
830 
831 	if (details->postpone && !details->async) {
832 		i40e_debug(hw,
833 			   I40E_DEBUG_AQ_MESSAGE,
834 			   "AQTX: Async flag not set along with postpone flag");
835 		status = I40E_ERR_PARAM;
836 		goto asq_send_command_error;
837 	}
838 
839 	/* call clean and check queue available function to reclaim the
840 	 * descriptors that were processed by FW, the function returns the
841 	 * number of desc available
842 	 */
843 	/* the clean function called here could be called in a separate thread
844 	 * in case of asynchronous completions
845 	 */
846 	if (i40e_clean_asq(hw) == 0) {
847 		i40e_debug(hw,
848 			   I40E_DEBUG_AQ_MESSAGE,
849 			   "AQTX: Error queue is full.\n");
850 		status = I40E_ERR_ADMIN_QUEUE_FULL;
851 		goto asq_send_command_error;
852 	}
853 
854 	/* initialize the temp desc pointer with the right desc */
855 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
856 
857 	/* if the desc is available copy the temp desc to the right place */
858 	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
859 		    I40E_NONDMA_TO_DMA);
860 
861 	/* if buff is not NULL assume indirect command */
862 	if (buff != NULL) {
863 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
864 		/* copy the user buff into the respective DMA buff */
865 		i40e_memcpy(dma_buff->va, buff, buff_size,
866 			    I40E_NONDMA_TO_DMA);
867 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
868 
869 		/* Update the address values in the desc with the pa value
870 		 * for respective buffer
871 		 */
872 		desc_on_ring->params.external.addr_high =
873 				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
874 		desc_on_ring->params.external.addr_low =
875 				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
876 	}
877 
878 	/* bump the tail */
879 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
880 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
881 		      buff, buff_size);
882 	(hw->aq.asq.next_to_use)++;
883 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
884 		hw->aq.asq.next_to_use = 0;
885 	if (!details->postpone)
886 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
887 
888 	/* if cmd_details are not defined or async flag is not set,
889 	 * we need to wait for desc write back
890 	 */
891 	if (!details->async && !details->postpone) {
892 		u32 total_delay = 0;
893 
894 		do {
895 			/* AQ designers suggest use of head for better
896 			 * timing reliability than DD bit
897 			 */
898 			if (i40e_asq_done(hw))
899 				break;
900 			/* ugh! delay while spin_lock */
901 			i40e_msec_delay(1);
902 			total_delay++;
903 		} while (total_delay < hw->aq.asq_cmd_timeout);
904 	}
905 
906 	/* if ready, copy the desc back to temp */
907 	if (i40e_asq_done(hw)) {
908 		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
909 			    I40E_DMA_TO_NONDMA);
910 		if (buff != NULL)
911 			i40e_memcpy(buff, dma_buff->va, buff_size,
912 				    I40E_DMA_TO_NONDMA);
913 		retval = LE16_TO_CPU(desc->retval);
914 		if (retval != 0) {
915 			i40e_debug(hw,
916 				   I40E_DEBUG_AQ_MESSAGE,
917 				   "AQTX: Command completed with error 0x%X.\n",
918 				   retval);
919 
920 			/* strip off FW internal code */
921 			retval &= 0xff;
922 		}
923 		cmd_completed = TRUE;
924 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
925 			status = I40E_SUCCESS;
926 		else
927 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
928 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
929 	}
930 
931 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
932 		   "AQTX: desc and buffer writeback:\n");
933 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
934 
935 	/* save writeback aq if requested */
936 	if (details->wb_desc)
937 		i40e_memcpy(details->wb_desc, desc_on_ring,
938 			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
939 
940 	/* update the error if time out occurred */
941 	if ((!cmd_completed) &&
942 	    (!details->async && !details->postpone)) {
943 		i40e_debug(hw,
944 			   I40E_DEBUG_AQ_MESSAGE,
945 			   "AQTX: Writeback timeout.\n");
946 		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
947 	}
948 
949 asq_send_command_error:
950 	i40e_release_spinlock(&hw->aq.asq_spinlock);
951 asq_send_command_exit:
952 	return status;
953 }
954 
955 /**
956  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
957  *  @desc:     pointer to the temp descriptor (non DMA mem)
958  *  @opcode:   the opcode can be used to decide which flags to turn off or on
959  *
960  *  Fill the desc with default values
961  **/
962 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
963 				       u16 opcode)
964 {
965 	/* zero out the desc */
966 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
967 		    I40E_NONDMA_MEM);
968 	desc->opcode = CPU_TO_LE16(opcode);
969 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
970 }
971 
972 /**
973  *  i40e_clean_arq_element
974  *  @hw: pointer to the hw struct
975  *  @e: event info from the receive descriptor, includes any buffers
976  *  @pending: number of events that could be left to process
977  *
978  *  This function cleans one Admin Receive Queue element and returns
979  *  the contents through e.  It can also return how many events are
980  *  left to process through 'pending'
981  **/
982 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
983 					     struct i40e_arq_event_info *e,
984 					     u16 *pending)
985 {
986 	enum i40e_status_code ret_code = I40E_SUCCESS;
987 	u16 ntc = hw->aq.arq.next_to_clean;
988 	struct i40e_aq_desc *desc;
989 	struct i40e_dma_mem *bi;
990 	u16 desc_idx;
991 	u16 datalen;
992 	u16 flags;
993 	u16 ntu;
994 
995 	/* take the lock before we start messing with the ring */
996 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
997 
998 	/* set next_to_use to head */
999 	if (!i40e_is_vf(hw))
1000 		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
1001 	if (i40e_is_vf(hw))
1002 		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
1003 	if (ntu == ntc) {
1004 		/* nothing to do - shouldn't need to update ring's values */
1005 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1006 		goto clean_arq_element_out;
1007 	}
1008 
1009 	/* now clean the next descriptor */
1010 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1011 	desc_idx = ntc;
1012 
1013 	flags = LE16_TO_CPU(desc->flags);
1014 	if (flags & I40E_AQ_FLAG_ERR) {
1015 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1016 		hw->aq.arq_last_status =
1017 			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1018 		i40e_debug(hw,
1019 			   I40E_DEBUG_AQ_MESSAGE,
1020 			   "AQRX: Event received with error 0x%X.\n",
1021 			   hw->aq.arq_last_status);
1022 	}
1023 
1024 	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1025 		    I40E_DMA_TO_NONDMA);
1026 	datalen = LE16_TO_CPU(desc->datalen);
1027 	e->msg_len = min(datalen, e->buf_len);
1028 	if (e->msg_buf != NULL && (e->msg_len != 0))
1029 		i40e_memcpy(e->msg_buf,
1030 			    hw->aq.arq.r.arq_bi[desc_idx].va,
1031 			    e->msg_len, I40E_DMA_TO_NONDMA);
1032 
1033 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1034 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1035 		      hw->aq.arq_buf_size);
1036 
1037 	/* Restore the original datalen and buffer address in the desc,
1038 	 * FW updates datalen to indicate the event message
1039 	 * size
1040 	 */
1041 	bi = &hw->aq.arq.r.arq_bi[ntc];
1042 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1043 
1044 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1045 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1046 		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1047 	desc->datalen = CPU_TO_LE16((u16)bi->size);
1048 	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1049 	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1050 
1051 	/* set tail = the last cleaned desc index. */
1052 	wr32(hw, hw->aq.arq.tail, ntc);
1053 	/* ntc is updated to tail + 1 */
1054 	ntc++;
1055 	if (ntc == hw->aq.num_arq_entries)
1056 		ntc = 0;
1057 	hw->aq.arq.next_to_clean = ntc;
1058 	hw->aq.arq.next_to_use = ntu;
1059 
1060 clean_arq_element_out:
1061 	/* Set pending if needed, unlock and return */
1062 	if (pending != NULL)
1063 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1064 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1065 
1066 	if (i40e_is_nvm_update_op(&e->desc)) {
1067 		if (hw->aq.nvm_release_on_done) {
1068 			i40e_release_nvm(hw);
1069 			hw->aq.nvm_release_on_done = FALSE;
1070 		}
1071 
1072 		switch (hw->nvmupd_state) {
1073 		case I40E_NVMUPD_STATE_INIT_WAIT:
1074 			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1075 			break;
1076 
1077 		case I40E_NVMUPD_STATE_WRITE_WAIT:
1078 			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1079 			break;
1080 
1081 		default:
1082 			break;
1083 		}
1084 	}
1085 
1086 	return ret_code;
1087 }
1088 
1089 void i40e_resume_aq(struct i40e_hw *hw)
1090 {
1091 	/* Registers are reset after PF reset */
1092 	hw->aq.asq.next_to_use = 0;
1093 	hw->aq.asq.next_to_clean = 0;
1094 
1095 	i40e_config_asq_regs(hw);
1096 
1097 	hw->aq.arq.next_to_use = 0;
1098 	hw->aq.arq.next_to_clean = 0;
1099 
1100 	i40e_config_arq_regs(hw);
1101 }
1102