xref: /illumos-gate/usr/src/uts/common/io/bnx/bnxrcv.c (revision eef4f27b)
1 /*
2  * Copyright 2014-2017 Cavium, Inc.
3  * The contents of this file are subject to the terms of the Common Development
4  * and Distribution License, v.1,  (the "License").
5  *
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the License at available
9  * at http://opensource.org/licenses/CDDL-1.0
10  *
11  * See the License for the specific language governing permissions and
12  * limitations under the License.
13  */
14 
15 /*
16  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17  * Copyright (c) 2019, Joyent, Inc.
18  */
19 
20 #include "bnxrcv.h"
21 
22 
23 #define	BNX_RECV_INIT_FAIL_THRESH 1
24 
25 #ifndef	NUM_RX_CHAIN
26 #error NUM_RX_CHAIN is not defined.
27 #else
28 /*
29  * Range check NUM_RX_CHAIN.  Technically the LM controls this definition,
30  * but it makes sense to use what the LM uses.
31  */
32 #if NUM_RX_CHAIN < 0
33 #error Invalid NUM_RX_CHAIN definition.
34 #elif NUM_RX_CHAIN > 1
35 #warning NUM_RX_CHAIN is greater than 1.
36 #endif
37 #endif
38 
39 
40 static ddi_dma_attr_t bnx_rx_jmb_dma_attrib = {
41 	DMA_ATTR_V0,			/* dma_attr_version */
42 	0,				/* dma_attr_addr_lo */
43 	0xffffffffffffffff,		/* dma_attr_addr_hi */
44 	0x0ffffff,			/* dma_attr_count_max */
45 	BNX_DMA_ALIGNMENT,		/* dma_attr_align */
46 	0xffffffff,			/* dma_attr_burstsizes */
47 	1,				/* dma_attr_minxfer */
48 	0x00ffffff,			/* dma_attr_maxxfer */
49 	0xffffffff,			/* dma_attr_seg */
50 	BNX_RECV_MAX_FRAGS,		/* dma_attr_sgllen */
51 	BNX_MIN_BYTES_PER_FRAGMENT,	/* dma_attr_granular */
52 	0,				/* dma_attr_flags */
53 };
54 
55 static int
bnx_rxbuffer_alloc(um_device_t * const umdevice,um_rxpacket_t * const umpacket)56 bnx_rxbuffer_alloc(um_device_t *const umdevice, um_rxpacket_t *const umpacket)
57 {
58 	int rc;
59 	size_t pktsize;
60 	size_t reallen;
61 	uint_t dc_count;
62 	lm_packet_t *lmpacket;
63 	ddi_dma_cookie_t cookie;
64 
65 	lmpacket = &(umpacket->lmpacket);
66 
67 	rc = ddi_dma_alloc_handle(umdevice->os_param.dip,
68 	    &bnx_rx_jmb_dma_attrib, DDI_DMA_DONTWAIT,
69 	    (void *)0, &(umpacket->dma_handle));
70 	if (rc != DDI_SUCCESS) {
71 		return (-1);
72 	}
73 
74 	/*
75 	 * The buffer size as set by the lower module is the actual buffer
76 	 * size plus room for a small, 16 byte inline rx buffer descriptor
77 	 * header plus an implied two byte TCP shift optimization.  We
78 	 * don't need to adjust the size at all.
79 	 */
80 	pktsize = lmpacket->u1.rx.buf_size;
81 
82 	rc = ddi_dma_mem_alloc(umpacket->dma_handle, pktsize,
83 	    &bnxAccessAttribBUF, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
84 	    (void *)0, (caddr_t *)&lmpacket->u1.rx.mem_virt, &reallen,
85 	    &umpacket->dma_acc_handle);
86 	if (rc != DDI_SUCCESS) {
87 		goto error1;
88 	}
89 
90 	/* Bind the message block buffer address to the handle. */
91 	rc = ddi_dma_addr_bind_handle(umpacket->dma_handle, NULL,
92 	    (caddr_t)lmpacket->u1.rx.mem_virt, pktsize,
93 	    DDI_DMA_READ | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
94 	    &cookie, &dc_count);
95 	if (rc != DDI_DMA_MAPPED) {
96 		goto error2;
97 	}
98 
99 	lmpacket->u1.rx.mem_phy.as_u64 = cookie.dmac_laddress;
100 
101 	return (0);
102 
103 error2:
104 	ddi_dma_mem_free(&(umpacket->dma_acc_handle));
105 
106 error1:
107 	ddi_dma_free_handle(&(umpacket->dma_handle));
108 
109 	return (-1);
110 }
111 
112 static void
bnx_rxbuffer_free(um_device_t * const umdevice,um_rxpacket_t * const umpacket)113 bnx_rxbuffer_free(um_device_t * const umdevice, um_rxpacket_t * const umpacket)
114 {
115 	lm_packet_t *lmpacket;
116 
117 	lmpacket = &(umpacket->lmpacket);
118 
119 	lmpacket->u1.rx.mem_phy.as_u64 = 0;
120 	lmpacket->u1.rx.buf_size = 0;
121 
122 	(void) ddi_dma_unbind_handle(umpacket->dma_handle);
123 
124 	lmpacket->u1.rx.mem_virt = NULL;
125 	ddi_dma_mem_free(&umpacket->dma_acc_handle);
126 
127 	ddi_dma_free_handle(&(umpacket->dma_handle));
128 }
129 
130 static void
bnx_recv_ring_init(um_device_t * const umdevice,const unsigned int ringidx)131 bnx_recv_ring_init(um_device_t * const umdevice, const unsigned int ringidx)
132 {
133 	s_list_t *srcq;
134 	s_list_t *dstq;
135 	lm_rx_chain_t *lmrxring;
136 	um_recv_qinfo *recvinfo;
137 	um_rxpacket_t *umpacket;
138 
139 	recvinfo = &_RX_QINFO(umdevice, ringidx);
140 
141 	recvinfo->processing = B_FALSE;
142 
143 	lmrxring = &umdevice->lm_dev.rx_info.chain[ringidx];
144 
145 	srcq = &(lmrxring->free_descq);
146 
147 	dstq = &(recvinfo->buffq);
148 
149 	s_list_init(dstq, NULL, NULL, 0);
150 
151 	/* CONSTANTCONDITION */
152 	/*
153 	 * Put all available packet descriptors in our special wait queue.
154 	 * The wait queue is an area to store packet descriptors that do
155 	 * not yet have buffers associated with them.
156 	 */
157 	while (1) {
158 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
159 		if (umpacket == NULL) {
160 			break;
161 		}
162 
163 		s_list_push_tail(dstq, &(umpacket->lmpacket.link));
164 	}
165 
166 	dstq  = &(recvinfo->waitq);
167 
168 	s_list_init(dstq, NULL, NULL, 0);
169 }
170 
171 static void
bnx_recv_ring_fill(um_device_t * const umdevice,const unsigned int ringidx)172 bnx_recv_ring_fill(um_device_t * const umdevice, const unsigned int ringidx)
173 {
174 	s_list_t *srcq;
175 	s_list_t *dstq;
176 	um_rxpacket_t *umpacket;
177 	um_recv_qinfo *recvinfo;
178 
179 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
180 
181 	srcq = &(recvinfo->buffq);
182 
183 	dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
184 
185 	/* CONSTANTCONDITION */
186 	/* Populate as many of the packet descriptors as we can. */
187 	while (1) {
188 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
189 		if (umpacket == NULL) {
190 			break;
191 		}
192 
193 		if (bnx_rxbuffer_alloc(umdevice, umpacket) != 0) {
194 			s_list_push_head(srcq, &umpacket->lmpacket.link);
195 			break;
196 		}
197 
198 		s_list_push_tail(dstq, &umpacket->lmpacket.link);
199 	}
200 }
201 
202 /*
203  * NOTE!!!  This function assumes the rcv_mutex is already held.
204  */
205 static void
bnx_recv_ring_recv(um_device_t * const umdevice,const unsigned int ringidx)206 bnx_recv_ring_recv(um_device_t *const umdevice, const unsigned int ringidx)
207 {
208 	mblk_t *head = NULL;
209 	mblk_t *tail = NULL;
210 	s_list_t *srcq;
211 	s_list_t *recvq;
212 	s_list_t *freeq;
213 	boolean_t dcopy;
214 	boolean_t lm_rcvq_empty;
215 	lm_packet_t *lmpacket;
216 	um_rxpacket_t *umpacket;
217 	um_recv_qinfo *recvinfo;
218 
219 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
220 
221 	/*
222 	 * We can't hold the receive mutex across the receive function or
223 	 * deadlock results.  So that other threads know we are still doing
224 	 * business, toggle a flag they can look at.  If the flag says,
225 	 * we're processing, other threads should back off.
226 	 */
227 	recvinfo->processing = B_TRUE;
228 
229 	srcq  = &(recvinfo->waitq);
230 	freeq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
231 
232 	recvq = &(umdevice->lm_dev.rx_info.chain[ringidx].active_descq);
233 	if (s_list_entry_cnt(recvq)) {
234 		lm_rcvq_empty = B_FALSE;
235 	} else {
236 		lm_rcvq_empty = B_TRUE;
237 	}
238 
239 	/* CONSTANTCONDITION */
240 	/* Send the rx packets up. */
241 	while (1) {
242 		mblk_t *mp = NULL;
243 		unsigned int pktlen;
244 		int ofld_flags;
245 
246 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
247 		if (umpacket == NULL) {
248 			break;
249 		}
250 
251 		lmpacket = &(umpacket->lmpacket);
252 
253 		if (lmpacket->status != LM_STATUS_SUCCESS) {
254 			s_list_push_tail(freeq, &(lmpacket->link));
255 			continue;
256 		}
257 
258 		pktlen = lmpacket->size;
259 
260 		/*
261 		 * FIXME -- Implement mm_flush_cache().
262 		 *
263 		 * The LM uses mm_flush_cache() to make sure the processor is
264 		 * working with current data.  The call to ddi_dma_sync should
265 		 * go there instead.  How mm_flush_cache() should be
266 		 * implemented depends on what test mode we are in.
267 		 *
268 		 * if (lmdevice->params.test_mode & TEST_MODE_VERIFY_RX_CRC) {
269 		 *	// The LM will need access to the complete rx buffer.
270 		 * } else {
271 		 *	// The LM only needs access to the 16 byte inline rx BD.
272 		 *	// Be sure in this case to ddi_dma_sync() as many
273 		 *	// fragments as necessary to get the full rx BD in
274 		 *	// host memory.
275 		 * }
276 		 */
277 		(void) ddi_dma_sync(umpacket->dma_handle, 0,
278 		    pktlen + L2RX_FRAME_HDR_LEN, DDI_DMA_SYNC_FORKERNEL);
279 
280 		dcopy = B_FALSE;
281 
282 		if (pktlen < umdevice->rx_copy_threshold) {
283 			lm_device_t *lmdevice;
284 			lmdevice = &(umdevice->lm_dev);
285 
286 			if ((lmdevice->params.keep_vlan_tag == 0) &&
287 			    (lmpacket->u1.rx.flags &
288 			    LM_RX_FLAG_VALID_VLAN_TAG)) {
289 
290 				/*
291 				 * The hardware stripped the VLAN tag
292 				 * we must now reinsert the tag.  This is
293 				 * done to be compatiable with older firmware
294 				 * who could not handle VLAN tags
295 				 */
296 				mp = allocb(pktlen + 6, BPRI_MED);
297 				if (mp != NULL) {
298 					uint8_t *dataptr;
299 					const uint16_t tpid = htons(0x8100);
300 					uint16_t vlan_tag;
301 
302 					vlan_tag =
303 					    htons(lmpacket->u1.rx.vlan_tag);
304 
305 					/*
306 					 * For analysis of the packet contents,
307 					 * we first need to advance
308 					 * the pointer beyond the inlined return
309 					 * buffer descriptor.
310 					 */
311 					dataptr = lmpacket->u1.rx.mem_virt +
312 					    L2RX_FRAME_HDR_LEN;
313 
314 					/* TCP alignment optimization. */
315 					mp->b_rptr += 2;
316 
317 					/*
318 					 * First copy the dest/source MAC
319 					 * addresses
320 					 */
321 					bcopy(dataptr, mp->b_rptr, 12);
322 
323 					/* Second copy the VLAN tag */
324 					bcopy(&tpid, mp->b_rptr + 12, 2);
325 					bcopy(&vlan_tag, mp->b_rptr + 14, 2);
326 
327 					/* Third copy the reset of the packet */
328 					dataptr = dataptr + 12;
329 
330 					bcopy(dataptr, mp->b_rptr + 16,
331 					    pktlen - 12);
332 					mp->b_wptr = mp->b_rptr + pktlen + 4;
333 
334 					dcopy = B_TRUE;
335 
336 					goto sendup;
337 				}
338 			} else {
339 				/*  The hardware didn't strip the VLAN tag  */
340 				mp = allocb(pktlen + 2, BPRI_MED);
341 				if (mp != NULL) {
342 					uint8_t *dataptr;
343 
344 					/*
345 					 * For analysis of the packet contents,
346 					 * we first need to advance
347 					 * the pointer beyond the inlined return
348 					 * buffer descriptor.
349 					 */
350 					dataptr = lmpacket->u1.rx.mem_virt +
351 					    L2RX_FRAME_HDR_LEN;
352 
353 					/* TCP alignment optimization. */
354 					mp->b_rptr += 2;
355 
356 					bcopy(dataptr, mp->b_rptr, pktlen);
357 					mp->b_wptr = mp->b_rptr + pktlen;
358 
359 					dcopy = B_TRUE;
360 
361 					goto sendup;
362 				}
363 			}
364 
365 			umdevice->recv_discards++;
366 
367 			s_list_push_tail(freeq, &(lmpacket->link));
368 
369 			continue;
370 		}
371 
372 		if (lm_rcvq_empty == B_TRUE && !(s_list_entry_cnt(srcq))) {
373 			/*
374 			 * If the hardware is out of receive buffers and we are
375 			 * on the last receive packet, we need to drop the
376 			 * packet.  We do this because we might not be able to
377 			 * allocate _any_ new receive buffers before the ISR
378 			 * completes.  If this happens, the driver will enter
379 			 * an infinite interrupt loop where the hardware is
380 			 * requesting rx buffers the driver cannot allocate.
381 			 * So that the system doesn't livelock, we leave one
382 			 * buffer perpetually available.  Note that we do this
383 			 * _after_ giving the double copy code a chance to
384 			 * claim the packet.
385 			 */
386 
387 			/*
388 			 * FIXME -- Make sure to add one more to the rx packet
389 			 * descriptor count before allocating them.
390 			 */
391 
392 			umdevice->recv_discards++;
393 
394 			s_list_push_tail(freeq, &(lmpacket->link));
395 
396 			continue;
397 		}
398 
399 sendup:
400 
401 		/*
402 		 * Check if the checksum was offloaded.
403 		 * If so, pass the result to stack.
404 		 */
405 		ofld_flags = 0;
406 		if ((umdevice->dev_var.enabled_oflds &
407 		    LM_OFFLOAD_RX_IP_CKSUM) &&
408 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_IP_CKSUM_IS_GOOD)) {
409 			ofld_flags |= HCK_IPV4_HDRCKSUM_OK;
410 		}
411 
412 		if (((umdevice->dev_var.enabled_oflds &
413 		    LM_OFFLOAD_RX_TCP_CKSUM) &&
414 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_TCP_CKSUM_IS_GOOD)) ||
415 		    ((umdevice->dev_var.enabled_oflds &
416 		    LM_OFFLOAD_RX_UDP_CKSUM) &&
417 		    (lmpacket->u1.rx.flags & LM_RX_FLAG_UDP_CKSUM_IS_GOOD))) {
418 			ofld_flags |= HCK_FULLCKSUM_OK;
419 		}
420 
421 		if (ofld_flags != 0) {
422 			mac_hcksum_set(mp, 0, 0, 0, 0, ofld_flags);
423 		}
424 
425 		/*
426 		 * Push the packet descriptor onto one of the queues before we
427 		 * attempt to send the packet up.  If the send-up function
428 		 * hangs during driver unload, we want all our packet
429 		 * descriptors to be available for deallocation.
430 		 */
431 		if (dcopy == B_TRUE) {
432 			s_list_push_tail(freeq, &(lmpacket->link));
433 		}
434 
435 		if (head == NULL) {
436 			head = mp;
437 			tail = mp;
438 		} else {
439 			tail->b_next = mp;
440 			tail = mp;
441 		}
442 		tail->b_next = NULL;
443 	}
444 
445 	if (head) {
446 		mutex_exit(&umdevice->os_param.rcv_mutex);
447 
448 		mac_rx(umdevice->os_param.macp,
449 		    umdevice->os_param.rx_resc_handle[ringidx], head);
450 
451 		mutex_enter(&umdevice->os_param.rcv_mutex);
452 	}
453 
454 	recvinfo->processing = B_FALSE;
455 }
456 
457 static void
bnx_recv_ring_dump(um_device_t * const umdevice,const unsigned int ringidx)458 bnx_recv_ring_dump(um_device_t *const umdevice, const unsigned int ringidx)
459 {
460 	s_list_t *srcq;
461 	s_list_t *dstq;
462 	um_rxpacket_t *umpacket;
463 
464 	srcq = &(_RX_QINFO(umdevice, ringidx).waitq);
465 	dstq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
466 
467 	/* CONSTANTCONDITION */
468 	/* Dump all the packets pending a send-up. */
469 	while (1) {
470 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
471 		if (umpacket == NULL) {
472 			break;
473 		}
474 
475 		s_list_push_tail(dstq, &(umpacket->lmpacket.link));
476 	}
477 }
478 
479 static void
bnx_recv_ring_free(um_device_t * const umdevice,const unsigned int ringidx)480 bnx_recv_ring_free(um_device_t *const umdevice, const unsigned int ringidx)
481 {
482 	s_list_t *srcq;
483 	s_list_t *dstq;
484 	um_rxpacket_t *umpacket;
485 
486 	srcq = &(umdevice->lm_dev.rx_info.chain[ringidx].free_descq);
487 
488 	dstq = &(_RX_QINFO(umdevice, ringidx).buffq);
489 
490 	/* CONSTANTCONDITION */
491 	/*
492 	 * Back out all the packets submitted to the "available for hardware
493 	 * use" queue.  Free the buffers associated with the descriptors as
494 	 * we go.
495 	 */
496 	while (1) {
497 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
498 		if (umpacket == NULL) {
499 			break;
500 		}
501 
502 		bnx_rxbuffer_free(umdevice, umpacket);
503 
504 		s_list_push_tail(dstq, &umpacket->lmpacket.link);
505 	}
506 }
507 
508 static void
bnx_recv_ring_fini(um_device_t * const umdevice,const unsigned int ringidx)509 bnx_recv_ring_fini(um_device_t *const umdevice, const unsigned int ringidx)
510 {
511 	s_list_t *srcq;
512 	um_rxpacket_t *umpacket;
513 	um_recv_qinfo *recvinfo;
514 
515 	recvinfo = &(_RX_QINFO(umdevice, ringidx));
516 
517 	srcq = &(recvinfo->buffq);
518 
519 	/* CONSTANTCONDITION */
520 	while (1) {
521 		umpacket = (um_rxpacket_t *)s_list_pop_head(srcq);
522 		if (umpacket == NULL) {
523 			break;
524 		}
525 
526 		/*
527 		 * Intentionally throw the packet away.  The memory was
528 		 * allocated by the lower module and will be reclaimed when
529 		 * we do our final memory cleanup.
530 		 */
531 	}
532 }
533 
534 int
bnx_rxpkts_init(um_device_t * const umdevice)535 bnx_rxpkts_init(um_device_t *const umdevice)
536 {
537 	int i;
538 	int alloccnt;
539 	lm_device_t *lmdevice;
540 
541 	lmdevice = &(umdevice->lm_dev);
542 
543 	alloccnt = 0;
544 
545 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
546 		int post_count = 0;
547 
548 		bnx_recv_ring_init(umdevice, i);
549 
550 		bnx_recv_ring_fill(umdevice, i);
551 
552 		post_count =
553 		    s_list_entry_cnt(&lmdevice->rx_info.chain[i].free_descq);
554 
555 		if (post_count != lmdevice->params.l2_rx_desc_cnt[i]) {
556 			cmn_err(CE_NOTE,
557 			    "!%s: %d rx buffers requested.  %d allocated.\n",
558 			    umdevice->dev_name,
559 			    umdevice->lm_dev.params.l2_rx_desc_cnt[i],
560 			    post_count);
561 		}
562 
563 		alloccnt += post_count;
564 	}
565 
566 	/* FIXME -- Set rxbuffer allocation failure threshold. */
567 	if (alloccnt < BNX_RECV_INIT_FAIL_THRESH) {
568 		cmn_err(CE_WARN,
569 		    "%s: Failed to allocate minimum number of RX buffers.\n",
570 		    umdevice->dev_name);
571 
572 /* BEGIN CSTYLED */
573 #if BNX_RECV_INIT_FAIL_THRESH > 1
574 #warning Need to implement code to free previously allocated rx buffers in bnx_rxpkts_init error path.
575 #endif
576 /* END CSTYLED */
577 
578 		return (-1);
579 	}
580 
581 	return (0);
582 }
583 
584 void
bnx_rxpkts_intr(um_device_t * const umdevice)585 bnx_rxpkts_intr(um_device_t *const umdevice)
586 {
587 	int i;
588 	um_recv_qinfo * recvinfo;
589 
590 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
591 		recvinfo = &(_RX_QINFO(umdevice, i));
592 
593 		if (recvinfo->processing == B_FALSE) {
594 			/* Send the packets up the stack. */
595 			bnx_recv_ring_recv(umdevice, i);
596 		}
597 	}
598 }
599 
600 void
bnx_rxpkts_post(um_device_t * const umdevice)601 bnx_rxpkts_post(um_device_t *const umdevice)
602 {
603 	int i;
604 	um_recv_qinfo *recvinfo;
605 
606 	for (i = RX_CHAIN_IDX0; i < NUM_RX_CHAIN; i++) {
607 		recvinfo = &(_RX_QINFO(umdevice, i));
608 
609 		if (recvinfo->processing == B_FALSE) {
610 			/* Allocate new rx buffers. */
611 			bnx_recv_ring_fill(umdevice, i);
612 
613 			/* Submit the rx buffers to the hardware. */
614 			(void) lm_post_buffers(&(umdevice->lm_dev), i, NULL);
615 		}
616 	}
617 }
618 
619 void
bnx_rxpkts_recycle(um_device_t * const umdevice)620 bnx_rxpkts_recycle(um_device_t *const umdevice)
621 {
622 	int i;
623 
624 	for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
625 		bnx_recv_ring_dump(umdevice, i);
626 
627 		lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
628 	}
629 }
630 
631 void
bnx_rxpkts_fini(um_device_t * const umdevice)632 bnx_rxpkts_fini(um_device_t *const umdevice)
633 {
634 	int i;
635 
636 	for (i = NUM_RX_CHAIN - 1; i >= RX_CHAIN_IDX0; i--) {
637 		/* Dump shouldn't be necessary, but just to be safe... */
638 		bnx_recv_ring_dump(umdevice, i);
639 
640 		/* Recycle shouldn't be necessary, but just to be safe... */
641 		lm_abort(&(umdevice->lm_dev), ABORT_OP_RX_CHAIN, i);
642 
643 		bnx_recv_ring_free(umdevice, i);
644 		bnx_recv_ring_fini(umdevice, i);
645 	}
646 }
647