xref: /illumos-gate/usr/src/uts/common/io/qede/qede_osal.c (revision 14b24e2b)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 #include "qede.h"
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 extern ddi_dma_attr_t qede_gen_buf_dma_attr;
40 extern struct ddi_device_acc_attr qede_desc_acc_attr;
41 
42 /*
43  * Find the dma_handle corresponding to the tx, rx data structures
44  */
45 int
qede_osal_find_dma_handle_for_block(qede_t * qede,void * addr,ddi_dma_handle_t * dma_handle)46 qede_osal_find_dma_handle_for_block(qede_t *qede, void *addr,
47     ddi_dma_handle_t *dma_handle)
48 {
49 	qede_phys_mem_entry_t *entry;
50 	int ret = DDI_FAILURE;
51 
52 	mutex_enter(&qede->phys_mem_list.lock);
53 	QEDE_LIST_FOR_EACH_ENTRY(entry,
54 	    /* LINTED E_BAD_PTR_CAST_ALIGN */
55 	    &qede->phys_mem_list.head,
56 	    qede_phys_mem_entry_t,
57 	    list_entry) {
58 		if (entry->paddr == addr) {
59 			*dma_handle = entry->dma_handle;
60 			ret = DDI_SUCCESS;
61 			break;
62 		}
63 	}
64 
65 	mutex_exit(&qede->phys_mem_list.lock);
66 
67 	return (ret);
68 }
69 
70 void
qede_osal_dma_sync(struct ecore_dev * edev,void * addr,u32 size,bool is_post)71 qede_osal_dma_sync(struct ecore_dev *edev, void* addr, u32 size, bool is_post)
72 {
73 	qede_t *qede = (qede_t *)edev;
74 	qede_phys_mem_entry_t *entry;
75 	ddi_dma_handle_t *dma_handle = NULL;
76 	uint_t type = (is_post == false) ? DDI_DMA_SYNC_FORDEV :
77 	    DDI_DMA_SYNC_FORKERNEL;
78 
79 	mutex_enter(&qede->phys_mem_list.lock);
80 
81 	/* LINTED E_BAD_PTR_CAST_ALIGN */
82 	QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
83 	    qede_phys_mem_entry_t, list_entry) {
84 		if (entry->paddr == addr) {
85 			dma_handle = &entry->dma_handle;
86 		}
87 	}
88 
89 	if (dma_handle == NULL) {
90 		qede_print_err("!%s(%d): addr %p not found in list",
91 		    __func__, qede->instance, addr);
92 		mutex_exit(&qede->phys_mem_list.lock);
93 		return;
94 	} else {
95 		(void) ddi_dma_sync(*dma_handle,
96 		    0 /* offset into the mem block */,
97 		    size, type);
98 	}
99 
100 	mutex_exit(&qede->phys_mem_list.lock);
101 }
102 
103 void *
qede_osal_zalloc(struct ecore_dev * edev,int flags,size_t size)104 qede_osal_zalloc(struct ecore_dev *edev, int flags, size_t size)
105 {
106 	qede_t *qede = (qede_t *)edev;
107 	qede_mem_list_entry_t *new_entry;
108 	void *buf;
109 
110 	if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
111 	    == NULL) {
112 		qede_print_err("%s(%d): Failed to alloc new list entry",
113 		    __func__, qede->instance);
114 		return (NULL);
115 	}
116 
117 	if ((buf = kmem_zalloc(size, flags)) == NULL) {
118 		qede_print_err("%s(%d): Failed to alloc mem, size %d",
119 		    __func__, qede->instance, size);
120 		kmem_free(new_entry, sizeof (qede_mem_list_entry_t));
121 		return (NULL);
122 	}
123 
124 	new_entry->size = size;
125 	new_entry->buf = buf;
126 
127 	mutex_enter(&qede->mem_list.mem_list_lock);
128 	QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
129 	mutex_exit(&qede->mem_list.mem_list_lock);
130 
131 	return (buf);
132 }
133 
134 
135 void *
qede_osal_alloc(struct ecore_dev * edev,int flags,size_t size)136 qede_osal_alloc(struct ecore_dev *edev, int flags, size_t size)
137 {
138 	qede_t *qede = (qede_t *)edev;
139 	qede_mem_list_entry_t *new_entry;
140 	void *buf;
141 
142 	if ((new_entry = kmem_zalloc(sizeof (qede_mem_list_entry_t), flags))
143 	    == NULL) {
144 		qede_print_err("%s(%d): Failed to alloc new list entry",
145 		    __func__, qede->instance);
146 		return (NULL);
147 	}
148 
149 	if ((buf = kmem_alloc(size, flags)) == NULL) {
150 		qede_print_err("%s(%d): Failed to alloc %d bytes",
151 		    __func__, qede->instance, size);
152 		kmem_free(new_entry, sizeof (qede_mem_list_t));
153 		return (NULL);
154 	}
155 
156 	new_entry->size = size;
157 	new_entry->buf = buf;
158 
159 	mutex_enter(&qede->mem_list.mem_list_lock);
160 	QEDE_LIST_ADD(&new_entry->mem_entry, &qede->mem_list.mem_list_head);
161 	mutex_exit(&qede->mem_list.mem_list_lock);
162 
163 	return (buf);
164 }
165 
166 void
qede_osal_free(struct ecore_dev * edev,void * addr)167 qede_osal_free(struct ecore_dev *edev, void *addr)
168 {
169 	qede_t *qede = (qede_t *)edev;
170 	qede_mem_list_entry_t *mem_entry;
171 
172 	mutex_enter(&qede->mem_list.mem_list_lock);
173 
174 	/* LINTED E_BAD_PTR_CAST_ALIGN */
175 	QEDE_LIST_FOR_EACH_ENTRY(mem_entry, &qede->mem_list.mem_list_head,
176 	    qede_mem_list_entry_t, mem_entry) {
177 		if (mem_entry->buf == addr) {
178 			QEDE_LIST_REMOVE(&mem_entry->mem_entry,
179 			    &qede->mem_list.mem_list_head);
180 			kmem_free(addr, mem_entry->size);
181 			kmem_free(mem_entry, sizeof (qede_mem_list_entry_t));
182 			break;
183 		}
184 	}
185 
186 	mutex_exit(&qede->mem_list.mem_list_lock);
187 }
188 
189 /*
190  * @VB: What are the alignment requirements here ??
191  */
192 void *
qede_osal_dma_alloc_coherent(struct ecore_dev * edev,dma_addr_t * paddr,size_t size)193 qede_osal_dma_alloc_coherent(struct ecore_dev *edev, dma_addr_t *paddr,
194     size_t size)
195 {
196 	qede_t *qede = (qede_t *)edev;
197 	qede_phys_mem_entry_t *new_entry;
198 	ddi_dma_handle_t *dma_handle;
199 	ddi_acc_handle_t *dma_acc_handle;
200 	ddi_dma_cookie_t cookie;
201 	int ret;
202 	caddr_t pbuf;
203 	unsigned int count;
204 
205 	memset(&cookie, 0, sizeof (cookie));
206 
207 	if ((new_entry =
208 	    kmem_zalloc(sizeof (qede_phys_mem_entry_t), KM_NOSLEEP)) == NULL) {
209 		qede_print_err("%s(%d): Failed to alloc new list entry",
210 		    __func__, qede->instance);
211 		return (NULL);
212 	}
213 
214 	dma_handle = &new_entry->dma_handle;
215 	dma_acc_handle = &new_entry->dma_acc_handle;
216 
217 	if ((ret =
218 	    ddi_dma_alloc_handle(qede->dip, &qede_gen_buf_dma_attr,
219 	    DDI_DMA_DONTWAIT,
220 	    NULL, dma_handle)) != DDI_SUCCESS) {
221 		qede_print_err("%s(%d): Failed to alloc dma handle",
222 		    __func__, qede->instance);
223 		qede_stacktrace(qede);
224 		goto free;
225 	}
226 
227 	if ((ret = ddi_dma_mem_alloc(*dma_handle, size, &qede_desc_acc_attr,
228 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &pbuf, &size,
229 	    dma_acc_handle)) != DDI_SUCCESS) {
230 		qede_print_err("%s(%d): Failed to alloc dma mem %d bytes",
231 		    __func__, qede->instance, size);
232 		qede_stacktrace(qede);
233 		goto free_hdl;
234 	}
235 
236 	if ((ret = ddi_dma_addr_bind_handle(*dma_handle, NULL, pbuf, size,
237 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
238 	    DDI_DMA_DONTWAIT, NULL, &cookie, &count)) != DDI_DMA_MAPPED) {
239 		qede_print("!%s(%d): failed to bind dma addr to handle,"
240 		   " ret %d",
241 		    __func__, qede->instance, ret);
242 		goto free_dma_mem;
243 	}
244 
245 	if (count != 1) {
246 		qede_print("%s(%d): ncookies = %d for phys addr %p, "
247 		    "discard dma buffer",
248 		    __func__, qede->instance, count, &cookie.dmac_laddress);
249 		goto free_dma_mem;
250 	}
251 
252 	new_entry->size = size;
253 	new_entry->virt_addr = pbuf;
254 
255 	new_entry->paddr = (void *)cookie.dmac_laddress;
256 
257 	*paddr = (dma_addr_t)new_entry->paddr;
258 
259 	mutex_enter(&qede->phys_mem_list.lock);
260 	QEDE_LIST_ADD(&new_entry->list_entry, &qede->phys_mem_list.head);
261 	mutex_exit(&qede->phys_mem_list.lock);
262 
263 	return (new_entry->virt_addr);
264 
265 free_dma_mem:
266 	ddi_dma_mem_free(dma_acc_handle);
267 free_hdl:
268 	ddi_dma_free_handle(dma_handle);
269 free:
270 	kmem_free(new_entry, sizeof (qede_phys_mem_entry_t));
271 	return (NULL);
272 }
273 
274 void
qede_osal_dma_free_coherent(struct ecore_dev * edev,void * vaddr,dma_addr_t paddr,size_t size)275 qede_osal_dma_free_coherent(struct ecore_dev *edev, void *vaddr,
276     dma_addr_t paddr, size_t size)
277 {
278 	qede_t *qede = (qede_t *)edev;
279 	qede_phys_mem_entry_t *entry;
280 
281 	mutex_enter(&qede->phys_mem_list.lock);
282 
283 	/* LINTED E_BAD_PTR_CAST_ALIGN */
284 	QEDE_LIST_FOR_EACH_ENTRY(entry, &qede->phys_mem_list.head,
285 	    qede_phys_mem_entry_t, list_entry) {
286 		if (entry->virt_addr == vaddr) {
287 			QEDE_LIST_REMOVE(&entry->list_entry,
288 			    &qede->phys_mem_list.head);
289 			ddi_dma_unbind_handle(entry->dma_handle);
290 			ddi_dma_mem_free(&entry->dma_acc_handle);
291 			ddi_dma_free_handle(&entry->dma_handle);
292 			kmem_free(entry, sizeof (qede_phys_mem_entry_t));
293 			break;
294 		}
295 	}
296 
297 	mutex_exit(&qede->phys_mem_list.lock);
298 }
299 
300 static int
qede_get_port_type(uint32_t media_type)301 qede_get_port_type(uint32_t media_type)
302 {
303         uint32_t port_type;
304 
305         switch (media_type) {
306         case MEDIA_SFPP_10G_FIBER:
307         case MEDIA_SFP_1G_FIBER:
308         case MEDIA_XFP_FIBER:
309         case MEDIA_KR:
310                 port_type = GLDM_FIBER;
311                 break;
312         case MEDIA_DA_TWINAX:
313                 port_type = GLDM_BNC; /* Check? */
314                 break;
315         case MEDIA_BASE_T:
316                 port_type = GLDM_TP;
317                 break;
318         case MEDIA_NOT_PRESENT:
319         case MEDIA_UNSPECIFIED:
320         default:
321                 port_type = GLDM_UNKNOWN;
322                 break;
323         }
324         return (port_type);
325 }
326 
327 void
qede_get_link_info(struct ecore_hwfn * hwfn,struct qede_link_cfg * lnkCfg)328 qede_get_link_info(struct ecore_hwfn *hwfn, struct qede_link_cfg *lnkCfg)
329 {
330         struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
331         qede_t *qede = (qede_t *)(void *)edev;
332         uint32_t media_type;
333         struct ecore_mcp_link_state lnk_state;
334         struct ecore_mcp_link_params lnk_params;
335         struct ecore_mcp_link_capabilities lnk_caps;
336 
337         ecore_mcp_get_media_type(edev, &media_type);
338         lnkCfg->port = qede_get_port_type(media_type);
339 
340         memcpy(&lnk_state, ecore_mcp_get_link_state(hwfn),
341 	    sizeof (lnk_state));
342         memcpy(&lnk_params, ecore_mcp_get_link_params(hwfn),
343 	    sizeof (lnk_params));
344         memcpy(&lnk_caps, ecore_mcp_get_link_capabilities(hwfn),
345 	    sizeof (lnk_caps));
346 
347 	if (lnk_state.link_up) {
348 		lnkCfg->link_up = B_TRUE;
349 		lnkCfg->speed = lnk_state.speed;
350 		lnkCfg->duplex = DUPLEX_FULL;
351 	}
352 
353 	if (lnk_params.speed.autoneg) {
354 		lnkCfg->supp_capab.autoneg = B_TRUE;
355 		lnkCfg->adv_capab.autoneg = B_TRUE;
356 	}
357 	if (lnk_params.speed.autoneg ||
358 		(lnk_params.pause.forced_rx && lnk_params.pause.forced_tx)) {
359 		lnkCfg->supp_capab.asym_pause = B_TRUE;
360 		lnkCfg->adv_capab.asym_pause = B_TRUE;
361 	}
362 	if (lnk_params.speed.autoneg ||
363 		lnk_params.pause.forced_rx || lnk_params.pause.forced_tx) {
364 		lnkCfg->supp_capab.pause = B_TRUE;
365 		lnkCfg->adv_capab.pause = B_TRUE;
366 	}
367 
368 	if (lnk_params.speed.advertised_speeds &
369 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
370 		lnkCfg->adv_capab.param_10000fdx = B_TRUE;
371 	}
372 	if(lnk_params.speed.advertised_speeds &
373 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
374                 lnkCfg->adv_capab.param_25000fdx = B_TRUE;
375 	}
376 	if (lnk_params.speed.advertised_speeds &
377 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
378 		lnkCfg->adv_capab.param_40000fdx = B_TRUE;
379 	}
380 	if (lnk_params.speed.advertised_speeds &
381 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
382 		lnkCfg->adv_capab.param_50000fdx = B_TRUE;
383 	}
384 	if (lnk_params.speed.advertised_speeds &
385 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
386 		lnkCfg->adv_capab.param_100000fdx = B_TRUE;
387 	}
388 	if (lnk_params.speed.advertised_speeds &
389 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
390 		lnkCfg->adv_capab.param_1000fdx = B_TRUE;
391 		lnkCfg->adv_capab.param_1000hdx = B_TRUE;
392 	}
393 
394 	lnkCfg->autoneg = lnk_params.speed.autoneg;
395 
396 	if (lnk_caps.speed_capabilities &
397 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
398 		lnkCfg->supp_capab.param_10000fdx = B_TRUE;
399 	}
400 	if(lnk_caps.speed_capabilities &
401 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
402                 lnkCfg->supp_capab.param_25000fdx = B_TRUE;
403 	}
404 	if (lnk_caps.speed_capabilities &
405 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
406 		lnkCfg->supp_capab.param_40000fdx = B_TRUE;
407 	}
408 	if (lnk_caps.speed_capabilities &
409 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) {
410 		lnkCfg->supp_capab.param_50000fdx = B_TRUE;
411 	}
412 	if (lnk_caps.speed_capabilities &
413 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
414 		lnkCfg->supp_capab.param_100000fdx = B_TRUE;
415 	}
416 	if (lnk_caps.speed_capabilities &
417 	    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
418 		lnkCfg->supp_capab.param_1000fdx = B_TRUE;
419 		lnkCfg->supp_capab.param_1000hdx = B_TRUE;
420 	}
421 
422 	if (lnk_params.pause.autoneg) {
423                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_AUTONEG_ENABLE;
424 	}
425         if (lnk_params.pause.forced_rx) {
426                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_RX_ENABLE;
427 	}
428         if (lnk_params.pause.forced_tx) {
429                 lnkCfg->pause_cfg |= QEDE_LINK_PAUSE_TX_ENABLE;
430 	}
431 
432 
433 	if(lnk_state.partner_adv_speed &
434 		ECORE_LINK_PARTNER_SPEED_1G_HD) {
435 		lnkCfg->rem_capab.param_1000hdx = B_TRUE;
436 	}
437 	if(lnk_state.partner_adv_speed &
438 		ECORE_LINK_PARTNER_SPEED_1G_FD) {
439 		lnkCfg->rem_capab.param_1000fdx = B_TRUE;
440 	}
441 	if(lnk_state.partner_adv_speed &
442 		ECORE_LINK_PARTNER_SPEED_10G) {
443 		lnkCfg->rem_capab.param_10000fdx = B_TRUE;
444 	}
445 	if(lnk_state.partner_adv_speed &
446 		ECORE_LINK_PARTNER_SPEED_40G) {
447 		lnkCfg->rem_capab.param_40000fdx = B_TRUE;
448 	}
449 	if(lnk_state.partner_adv_speed &
450 		ECORE_LINK_PARTNER_SPEED_50G) {
451 		lnkCfg->rem_capab.param_50000fdx = B_TRUE;
452 	}
453 	if(lnk_state.partner_adv_speed &
454 		ECORE_LINK_PARTNER_SPEED_100G) {
455 		lnkCfg->rem_capab.param_100000fdx = B_TRUE;
456 	}
457 
458 	if(lnk_state.an_complete) {
459 	    lnkCfg->rem_capab.autoneg = B_TRUE;
460 	}
461 
462 	if(lnk_state.partner_adv_pause) {
463 	    lnkCfg->rem_capab.pause = B_TRUE;
464 	}
465 	if(lnk_state.partner_adv_pause ==
466 	    ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
467 	    lnk_state.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
468 	    lnkCfg->rem_capab.asym_pause = B_TRUE;
469 	}
470 }
471 
472 void
qede_osal_link_update(struct ecore_hwfn * hwfn)473 qede_osal_link_update(struct ecore_hwfn *hwfn)
474 {
475 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
476 	qede_t *qede = (qede_t *)(void *)edev;
477 	struct qede_link_cfg link_cfg;
478 
479         memset(&link_cfg, 0 , sizeof (struct qede_link_cfg));
480 	qede_get_link_info(hwfn, &link_cfg);
481 
482 	if (link_cfg.duplex == DUPLEX_FULL) {
483 		qede->props.link_duplex = DUPLEX_FULL;
484 	} else {
485 		qede->props.link_duplex = DUPLEX_HALF;
486 	}
487 
488 	if (!link_cfg.link_up) {
489 		qede_print("!%s(%d): Link marked down",
490 		    __func__, qede->instance);
491 		qede->params.link_state = 0;
492 	 	qede->props.link_duplex = B_FALSE;
493 		qede->props.link_speed = 0;
494 		qede->props.tx_pause = B_FALSE;
495 		qede->props.rx_pause = B_FALSE;
496 		qede->props.uptime = 0;
497 		mac_link_update(qede->mac_handle, LINK_STATE_DOWN);
498 	} else if (link_cfg.link_up) {
499 		qede_print("!%s(%d): Link marked up",
500 		    __func__, qede->instance);
501 		qede->params.link_state = 1;
502 		qede->props.link_speed = link_cfg.speed;
503 		qede->props.link_duplex = link_cfg.duplex;
504 		qede->props.tx_pause = (link_cfg.pause_cfg &
505 		    QEDE_LINK_PAUSE_TX_ENABLE) ? B_TRUE : B_FALSE;
506 		qede->props.rx_pause = (link_cfg.pause_cfg &
507 		    QEDE_LINK_PAUSE_RX_ENABLE) ? B_TRUE : B_FALSE;
508 		qede->props.uptime = ddi_get_time();
509 		mac_link_update(qede->mac_handle, LINK_STATE_UP);
510 	}
511 }
512 
513 unsigned long
log2_align(unsigned long n)514 log2_align(unsigned long n)
515 {
516 	unsigned long ret = n ? 1 : 0;
517 	unsigned long _n  = n >> 1;
518 
519 	while (_n) {
520 		_n >>= 1;
521 		ret <<= 1;
522 	}
523 
524 	if (ret < n) {
525 		ret <<= 1;
526 	}
527 
528 	return (ret);
529 }
530 
531 u32
LOG2(u32 v)532 LOG2(u32 v)
533 {
534 	u32 r = 0;
535 	while (v >>= 1) {
536 		r++;
537 	}
538 	return (r);
539 }
540 
541 int
542 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_pci_find_ext_capab(struct ecore_dev * edev,u16 pcie_id)543 qede_osal_pci_find_ext_capab(struct ecore_dev *edev, u16 pcie_id)
544 {
545 	int offset = 0;
546 
547 	return (offset);
548 }
549 
550 void
qede_osal_pci_write32(struct ecore_hwfn * hwfn,u32 offset,u32 val)551 qede_osal_pci_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
552 {
553 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
554 	qede_t *qede = (qede_t *)(void *)edev;
555 	u64 addr = qede->pci_bar0_base;
556 
557 	addr += offset;
558 
559 	ddi_put32(qede->regs_handle, (u32 *)addr, val);
560 }
561 
562 void
qede_osal_pci_write16(struct ecore_hwfn * hwfn,u32 offset,u16 val)563 qede_osal_pci_write16(struct ecore_hwfn *hwfn, u32 offset, u16 val)
564 {
565 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
566 	qede_t *qede = (qede_t *)(void *)edev;
567 	u64 addr = qede->pci_bar0_base;
568 
569 	addr += offset;
570 
571 	ddi_put16(qede->regs_handle, (u16 *)addr, val);
572 }
573 
574 u32
qede_osal_pci_read32(struct ecore_hwfn * hwfn,u32 offset)575 qede_osal_pci_read32(struct ecore_hwfn *hwfn, u32 offset)
576 {
577 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
578 	qede_t *qede = (qede_t *)(void *)edev;
579 	u32 val = 0;
580 	u64 addr = qede->pci_bar0_base;
581 
582 	addr += offset;
583 
584 	val = ddi_get32(qede->regs_handle, (u32 *)addr);
585 
586 	return (val);
587 }
588 
589 void
qede_osal_pci_bar2_write32(struct ecore_hwfn * hwfn,u32 offset,u32 val)590 qede_osal_pci_bar2_write32(struct ecore_hwfn *hwfn, u32 offset, u32 val)
591 {
592 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
593 	qede_t *qede = (qede_t *)(void *)edev;
594 	u64 addr = qede->pci_bar2_base;
595 
596 	addr += offset;
597 	ddi_put32(qede->doorbell_handle, (u32 *)addr, val);
598 }
599 
600 u32
qede_osal_direct_reg_read32(struct ecore_hwfn * hwfn,void * addr)601 qede_osal_direct_reg_read32(struct ecore_hwfn *hwfn, void *addr)
602 {
603 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
604 	qede_t *qede = (qede_t *)(void *)edev;
605 
606 	return (ddi_get32(qede->regs_handle, (u32 *)addr));
607 }
608 
609 void
qede_osal_direct_reg_write32(struct ecore_hwfn * hwfn,void * addr,u32 value)610 qede_osal_direct_reg_write32(struct ecore_hwfn *hwfn, void *addr, u32 value)
611 {
612 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
613 	qede_t *qede = (qede_t *)(void *)edev;
614 
615 	ddi_put32(qede->regs_handle, (u32 *)addr, value);
616 }
617 
618 u32 *
qede_osal_reg_addr(struct ecore_hwfn * hwfn,u32 addr)619 qede_osal_reg_addr(struct ecore_hwfn *hwfn, u32 addr)
620 {
621 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
622 	qede_t *qede = (qede_t *)(void *)edev;
623 
624 	return ((u32 *)(qede->pci_bar0_base + addr));
625 }
626 
627 void
qede_osal_pci_read_config_byte(struct ecore_dev * edev,u32 addr,u8 * val)628 qede_osal_pci_read_config_byte(struct ecore_dev *edev, u32 addr, u8 *val)
629 {
630 
631 	qede_t *qede = (qede_t *)edev;
632 
633 	*val = pci_config_get8(qede->pci_cfg_handle, (off_t)addr);
634 }
635 
636 void
qede_osal_pci_read_config_word(struct ecore_dev * edev,u32 addr,u16 * val)637 qede_osal_pci_read_config_word(struct ecore_dev *edev, u32 addr, u16 *val)
638 {
639 	qede_t *qede = (qede_t *)edev;
640 
641 	*val = pci_config_get16(qede->pci_cfg_handle, (off_t)addr);
642 }
643 
644 void
qede_osal_pci_read_config_dword(struct ecore_dev * edev,u32 addr,u32 * val)645 qede_osal_pci_read_config_dword(struct ecore_dev *edev, u32 addr, u32 *val)
646 {
647 	qede_t *qede = (qede_t *)edev;
648 
649 	*val = pci_config_get32(qede->pci_cfg_handle, (off_t)addr);
650 
651 }
652 
653 void
qede_print(char * format,...)654 qede_print(char *format, ...)
655 {
656 	va_list ap;
657 
658 	va_start(ap, format);
659 	vcmn_err(CE_NOTE, format, ap);
660 	va_end(ap);
661 }
662 
663 void
qede_print_err(char * format,...)664 qede_print_err(char *format, ...)
665 {
666 	va_list ap;
667 
668 	va_start(ap, format);
669 	vcmn_err(CE_WARN, format, ap);
670 	va_end(ap);
671 }
672 
673 /*
674  * Check if any mem/dma entries are left behind
675  * after unloading the ecore. If found
676  * then make sure they are freed
677  */
678 u32
qede_osal_cleanup(qede_t * qede)679 qede_osal_cleanup(qede_t *qede)
680 {
681 	qede_mem_list_entry_t *entry = NULL;
682 	qede_mem_list_entry_t *temp = NULL;
683 	qede_phys_mem_entry_t *entry_phys;
684 	qede_phys_mem_entry_t *temp_phys;
685 
686 	/*
687 	 * Check for misplaced mem. blocks(if any)
688 	 */
689 	mutex_enter(&qede->mem_list.mem_list_lock);
690 
691 	if (!QEDE_LIST_EMPTY(&qede->mem_list.mem_list_head)) {
692 		/*
693 		 * Something went wrong either in ecore
694 		 * or the osal mem management routines
695 		 * and the mem entry was not freed
696 		 */
697 		qede_print_err("!%s(%d): Mem entries left behind",
698 		    __func__, qede->instance);
699 
700 		QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry,
701 		    temp,
702 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
703 		    &qede->mem_list.mem_list_head,
704 		    mem_entry,
705 		    qede_mem_list_entry_t) {
706 			qede_print("!%s(%d): Cleaning-up entry %p",
707 			    __func__, qede->instance, entry);
708 			QEDE_LIST_REMOVE(&entry->mem_entry,
709 			    &qede->mem_list.mem_list_head);
710 			if (entry->buf) {
711 				kmem_free(entry->buf, entry->size);
712 				kmem_free(entry,
713 				    sizeof (qede_mem_list_entry_t));
714 			}
715 		}
716 	}
717 
718 	mutex_exit(&qede->mem_list.mem_list_lock);
719 
720 	/*
721 	 * Check for misplaced dma blocks (if any)
722 	 */
723 	mutex_enter(&qede->phys_mem_list.lock);
724 
725 	if (!QEDE_LIST_EMPTY(&qede->phys_mem_list.head)) {
726 		qede_print("!%s(%d): Dma entries left behind",
727 		    __func__, qede->instance);
728 
729 		QEDE_LIST_FOR_EACH_ENTRY_SAFE(entry_phys,
730 		    temp_phys,
731 		    /* LINTED E_BAD_PTR_CAST_ALIGN */
732 		    &qede->phys_mem_list.head,
733 		    list_entry,
734 		    qede_phys_mem_entry_t) {
735 			qede_print("!%s(%d): Cleaning-up entry %p",
736 			    __func__, qede->instance, entry_phys);
737 			QEDE_LIST_REMOVE(&entry_phys->list_entry,
738 			    &qede->phys_mem_list.head);
739 
740 			if (entry_phys->virt_addr) {
741 				ddi_dma_unbind_handle(entry_phys->dma_handle);
742 				ddi_dma_mem_free(&entry_phys->dma_acc_handle);
743 				ddi_dma_free_handle(&entry_phys->dma_handle);
744 				kmem_free(entry_phys,
745 				    sizeof (qede_phys_mem_entry_t));
746 			}
747 		}
748 	}
749 
750 	mutex_exit(&qede->phys_mem_list.lock);
751 
752 	return (0);
753 }
754 
755 
756 void
qede_osal_recovery_handler(struct ecore_hwfn * hwfn)757 qede_osal_recovery_handler(struct ecore_hwfn *hwfn)
758 {
759 	struct ecore_dev *edev = (struct ecore_dev *)hwfn->p_dev;
760 	qede_t *qede = (qede_t *)(void *)edev;
761 
762 	 cmn_err(CE_WARN, "!%s(%d):Not implemented !",
763             __func__, qede->instance);
764 
765 }
766 
767 
768 enum _ecore_status_t
qede_osal_iov_vf_acquire(struct ecore_hwfn * p_hwfn,int vf_id)769 qede_osal_iov_vf_acquire(struct ecore_hwfn *p_hwfn, int vf_id)
770 {
771 	return (ECORE_SUCCESS);
772 }
773 
774 
775 void
qede_osal_pci_write_config_word(struct ecore_dev * dev,u32 addr,u16 pcie_id)776 qede_osal_pci_write_config_word(struct ecore_dev *dev, u32 addr, u16 pcie_id)
777 {
778 	qede_t *qede = (qede_t *)dev;
779 	ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
780 
781 	pci_config_put16(pci_cfg_handle, (off_t)addr, pcie_id);
782 }
783 
784 void *
qede_osal_valloc(struct ecore_dev * dev,u32 size)785 qede_osal_valloc(struct ecore_dev *dev, u32 size)
786 {
787 	void *ptr = 0;
788 
789 	return (ptr);
790 }
791 
792 void
qede_osal_vfree(struct ecore_dev * dev,void * mem)793 qede_osal_vfree(struct ecore_dev *dev, void* mem)
794 {
795 }
796 
797 int
798 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_pci_find_capability(struct ecore_dev * dev,u16 pcie_id)799 qede_osal_pci_find_capability(struct ecore_dev *dev, u16 pcie_id)
800 {
801 	return 1;
802 }
803 
804 void
qede_osal_poll_mode_dpc(struct ecore_hwfn * p_hwfn)805 qede_osal_poll_mode_dpc(struct ecore_hwfn *p_hwfn)
806 {
807 }
808 
809 int
810 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_bitmap_weight(unsigned long * bitmap,uint32_t nbits)811 qede_osal_bitmap_weight(unsigned long *bitmap, uint32_t nbits)
812 {
813 	uint32_t count = 0, temp = *bitmap;
814 	return count;
815 }
816 
817 void
818 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_mfw_tlv_req(struct ecore_hwfn * p_hwfn)819 qede_osal_mfw_tlv_req(struct ecore_hwfn *p_hwfn)
820 {
821 }
822 
823 u32
824 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_crc32(u32 crc,u8 * buf,u64 length)825 qede_osal_crc32(u32 crc, u8 *buf, u64 length)
826 {
827 	return 1;
828 }
829 
830 void
831 /* LINTED E_FUNC_ARG_UNUSED */
qede_osal_hw_info_change(struct ecore_hwfn * p_hwfn,int change)832 qede_osal_hw_info_change(struct ecore_hwfn *p_hwfn, int change)
833 {
834 }
835 
836 void
837 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_CRC8_POPULATE(u8 * cdu_crc8_table,u8 polynomial)838 OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial)
839 {
840 }
841 u8
842 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_CRC8(u8 * cdu_crc8_table,u8 * data_to_crc,int data_to_crc_len,u8 init_value)843 OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len,
844     u8 init_value)
845 {
846 	return (0);
847 }
848 void
849 /* LINTED E_FUNC_ARG_UNUSED */
OSAL_DPC_SYNC(struct ecore_hwfn * p_hwfn)850 OSAL_DPC_SYNC(struct ecore_hwfn *p_hwfn)
851 {
852 	//Do nothing right now.
853 }
854