1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the Receive Path handling
29  * functions
30  */
31 #include <oce_impl.h>
32 
33 
34 void oce_rx_pool_free(char *arg);
35 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
36 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
37     size_t size, int flags);
38 
39 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
40     struct oce_nic_rx_cqe *cqe);
41 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
42 	struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
43 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
44 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
45 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
46 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
47     struct oce_nic_rx_cqe *cqe);
48 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
49 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
50 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
51 
52 #pragma	inline(oce_rx)
53 #pragma	inline(oce_rx_bcopy)
54 #pragma	inline(oce_rq_charge)
55 #pragma	inline(oce_rx_insert_tag)
56 #pragma	inline(oce_set_rx_oflags)
57 #pragma	inline(oce_rx_drop_pkt)
58 #pragma	inline(oce_rqb_alloc)
59 #pragma	inline(oce_rqb_free)
60 #pragma inline(oce_rq_post_buffer)
61 
62 static ddi_dma_attr_t oce_rx_buf_attr = {
63 	DMA_ATTR_V0,		/* version number */
64 	0x0000000000000000ull,	/* low address */
65 	0xFFFFFFFFFFFFFFFFull,	/* high address */
66 	0x00000000FFFFFFFFull,	/* dma counter max */
67 	OCE_DMA_ALIGNMENT,	/* alignment */
68 	0x000007FF,		/* burst sizes */
69 	0x00000001,		/* minimum transfer size */
70 	0x00000000FFFFFFFFull,	/* maximum transfer size */
71 	0xFFFFFFFFFFFFFFFFull,	/* maximum segment size */
72 	1,			/* scatter/gather list length */
73 	0x00000001,		/* granularity */
74 	DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING		/* DMA flags */
75 };
76 
77 /*
78  * function to create a DMA buffer pool for RQ
79  *
80  * dev - software handle to the device
81  * num_items - number of buffers in the pool
82  * item_size - size of each buffer
83  *
84  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
85  */
86 int
87 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
88 {
89 	int size;
90 	int cnt;
91 	int ret;
92 	oce_rq_bdesc_t *rqbd;
93 
94 	_NOTE(ARGUNUSED(buf_size));
95 	rqbd = rq->rq_bdesc_array;
96 	size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
97 	for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
98 		rq->rqb_freelist[cnt] = rqbd;
99 		ret = oce_rqb_ctor(rqbd, rq,
100 		    size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
101 		if (ret != DDI_SUCCESS) {
102 			goto rqb_fail;
103 		}
104 	}
105 	rq->rqb_free = rq->cfg.nbufs;
106 	rq->rqb_rc_head = 0;
107 	rq->rqb_next_free = 0;
108 	return (DDI_SUCCESS);
109 
110 rqb_fail:
111 	oce_rqb_cache_destroy(rq);
112 	return (DDI_FAILURE);
113 } /* oce_rqb_cache_create */
114 
115 /*
116  * function to Destroy RQ DMA buffer cache
117  *
118  * rq - pointer to rq structure
119  *
120  * return none
121  */
122 void
123 oce_rqb_cache_destroy(struct oce_rq *rq)
124 {
125 	oce_rq_bdesc_t *rqbd = NULL;
126 	int cnt;
127 
128 	rqbd = rq->rq_bdesc_array;
129 	for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
130 		oce_rqb_dtor(rqbd);
131 	}
132 } /* oce_rqb_cache_destroy */
133 
134 /*
135  * RQ buffer destructor function
136  *
137  * rqbd - pointer to rq buffer descriptor
138  *
139  * return none
140  */
141 static	void
142 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
143 {
144 	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
145 		return;
146 	}
147 	if (rqbd->mp != NULL) {
148 		rqbd->fr_rtn.free_arg = NULL;
149 		freemsg(rqbd->mp);
150 		rqbd->mp = NULL;
151 	}
152 	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
153 } /* oce_rqb_dtor */
154 
155 /*
156  * RQ buffer constructor function
157  *
158  * rqbd - pointer to rq buffer descriptor
159  * rq - pointer to RQ structure
160  * size - size of the buffer
161  * flags - KM_SLEEP OR KM_NOSLEEP
162  *
163  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
164  */
165 static int
166 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
167 {
168 	struct oce_dev *dev;
169 	oce_dma_buf_t *dbuf;
170 
171 	dev = rq->parent;
172 
173 	dbuf  = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
174 	if (dbuf == NULL) {
175 		return (DDI_FAILURE);
176 	}
177 
178 	/* Set the call back function parameters */
179 	rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
180 	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
181 	rqbd->mp = desballoc((uchar_t *)(dbuf->base),
182 	    dbuf->size, 0, &rqbd->fr_rtn);
183 	if (rqbd->mp == NULL) {
184 		oce_free_dma_buffer(dev, dbuf);
185 		return (DDI_FAILURE);
186 	}
187 	rqbd->rqb = dbuf;
188 	rqbd->rq = rq;
189 	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
190 	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
191 	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
192 
193 	return (DDI_SUCCESS);
194 } /* oce_rqb_ctor */
195 
196 /*
197  * RQ buffer allocator function
198  *
199  * rq - pointer to RQ structure
200  *
201  * return pointer to RQ buffer descriptor
202  */
203 static inline oce_rq_bdesc_t *
204 oce_rqb_alloc(struct oce_rq *rq)
205 {
206 	oce_rq_bdesc_t *rqbd;
207 	uint32_t free_index;
208 	free_index = rq->rqb_next_free;
209 	rqbd = rq->rqb_freelist[free_index];
210 	rq->rqb_freelist[free_index] = NULL;
211 	rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
212 	return (rqbd);
213 } /* oce_rqb_alloc */
214 
215 /*
216  * function to free the RQ buffer
217  *
218  * rq - pointer to RQ structure
219  * rqbd - pointer to recieve buffer descriptor
220  *
221  * return none
222  */
223 static inline void
224 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
225 {
226 	uint32_t free_index;
227 	mutex_enter(&rq->rc_lock);
228 	free_index = rq->rqb_rc_head;
229 	rq->rqb_freelist[free_index] = rqbd;
230 	rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
231 	mutex_exit(&rq->rc_lock);
232 	atomic_add_32(&rq->rqb_free, 1);
233 } /* oce_rqb_free */
234 
235 
236 
237 
238 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
239 {
240 	pd_rxulp_db_t rxdb_reg;
241 	int count;
242 	struct oce_dev *dev =  rq->parent;
243 
244 
245 	rxdb_reg.dw0 = 0;
246 	rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
247 
248 	for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
249 		rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
250 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
251 		rq->buf_avail += OCE_MAX_RQ_POSTS;
252 		nbufs -= OCE_MAX_RQ_POSTS;
253 	}
254 	if (nbufs > 0) {
255 		rxdb_reg.bits.num_posted = nbufs;
256 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
257 		rq->buf_avail += nbufs;
258 	}
259 }
260 /*
261  * function to charge a given rq with buffers from a pool's free list
262  *
263  * dev - software handle to the device
264  * rq - pointer to the RQ to charge
265  * nbufs - numbers of buffers to be charged
266  *
267  * return number of rqe's charges.
268  */
269 static inline int
270 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
271 {
272 	struct oce_nic_rqe *rqe;
273 	oce_rq_bdesc_t *rqbd;
274 	oce_rq_bdesc_t **shadow_rq;
275 	int cnt;
276 	int cur_index;
277 	oce_ring_buffer_t *ring;
278 
279 	shadow_rq = rq->shadow_ring;
280 	ring = rq->ring;
281 	cur_index = ring->cidx;
282 
283 	for (cnt = 0; cnt < nbufs; cnt++) {
284 		if (!repost) {
285 			rqbd = oce_rqb_alloc(rq);
286 		} else {
287 			/* just repost the buffers from shadow ring */
288 			rqbd = shadow_rq[cur_index];
289 			cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
290 		}
291 		/* fill the rqes */
292 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
293 		    struct oce_nic_rqe);
294 		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
295 		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
296 		shadow_rq[rq->ring->pidx] = rqbd;
297 		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
298 		RING_PUT(rq->ring, 1);
299 	}
300 
301 	return (cnt);
302 } /* oce_rq_charge */
303 
304 /*
305  * function to release the posted buffers
306  *
307  * rq - pointer to the RQ to charge
308  *
309  * return none
310  */
311 void
312 oce_rq_discharge(struct oce_rq *rq)
313 {
314 	oce_rq_bdesc_t *rqbd;
315 	oce_rq_bdesc_t **shadow_rq;
316 
317 	shadow_rq = rq->shadow_ring;
318 	/* Free the posted buffer since RQ is destroyed already */
319 	while ((int32_t)rq->buf_avail > 0) {
320 		rqbd = shadow_rq[rq->ring->cidx];
321 		oce_rqb_free(rq, rqbd);
322 		RING_GET(rq->ring, 1);
323 		rq->buf_avail--;
324 	}
325 }
326 /*
327  * function to process a single packet
328  *
329  * dev - software handle to the device
330  * rq - pointer to the RQ to charge
331  * cqe - Pointer to Completion Q entry
332  *
333  * return mblk pointer =>  success, NULL  => error
334  */
335 static inline mblk_t *
336 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
337 {
338 	mblk_t *mp;
339 	int pkt_len;
340 	int32_t frag_cnt = 0;
341 	mblk_t **mblk_tail;
342 	mblk_t	*mblk_head;
343 	int frag_size;
344 	oce_rq_bdesc_t *rqbd;
345 	uint16_t cur_index;
346 	oce_ring_buffer_t *ring;
347 	int i;
348 
349 	frag_cnt  = cqe->u0.s.num_fragments & 0x7;
350 	mblk_head = NULL;
351 	mblk_tail = &mblk_head;
352 
353 	ring = rq->ring;
354 	cur_index = ring->cidx;
355 
356 	/* Get the relevant Queue pointers */
357 	pkt_len = cqe->u0.s.pkt_size;
358 	for (i = 0; i < frag_cnt; i++) {
359 		rqbd = rq->shadow_ring[cur_index];
360 		if (rqbd->mp == NULL) {
361 			rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
362 			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
363 			if (rqbd->mp == NULL) {
364 				return (NULL);
365 			}
366 
367 			rqbd->mp->b_rptr =
368 			    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
369 		}
370 
371 		mp = rqbd->mp;
372 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
373 		    rq->cfg.frag_size : pkt_len;
374 		mp->b_wptr = mp->b_rptr + frag_size;
375 		pkt_len   -= frag_size;
376 		mp->b_next = mp->b_cont = NULL;
377 		/* Chain the message mblks */
378 		*mblk_tail = mp;
379 		mblk_tail = &mp->b_cont;
380 		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
381 		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
382 	}
383 
384 	if (mblk_head == NULL) {
385 		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
386 		return (NULL);
387 	}
388 
389 	/* replace the buffer with new ones */
390 	(void) oce_rq_charge(rq, frag_cnt, B_FALSE);
391 	atomic_add_32(&rq->pending, frag_cnt);
392 	return (mblk_head);
393 } /* oce_rx */
394 
395 static inline mblk_t *
396 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
397 {
398 	mblk_t *mp;
399 	int pkt_len;
400 	int alloc_len;
401 	int32_t frag_cnt = 0;
402 	int frag_size;
403 	oce_rq_bdesc_t *rqbd;
404 	unsigned char  *rptr;
405 	uint32_t cur_index;
406 	oce_ring_buffer_t *ring;
407 	oce_rq_bdesc_t **shadow_rq;
408 	int cnt = 0;
409 
410 	_NOTE(ARGUNUSED(dev));
411 
412 	shadow_rq = rq->shadow_ring;
413 	pkt_len = cqe->u0.s.pkt_size;
414 	alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
415 	frag_cnt = cqe->u0.s.num_fragments & 0x7;
416 
417 	mp = allocb(alloc_len, BPRI_HI);
418 	if (mp == NULL) {
419 		return (NULL);
420 	}
421 
422 	mp->b_rptr += OCE_RQE_BUF_HEADROOM;
423 	rptr = mp->b_rptr;
424 	mp->b_wptr = mp->b_rptr + pkt_len;
425 	ring = rq->ring;
426 
427 	cur_index = ring->cidx;
428 	for (cnt = 0; cnt < frag_cnt; cnt++) {
429 		rqbd = shadow_rq[cur_index];
430 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
431 		    rq->cfg.frag_size : pkt_len;
432 		(void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
433 		bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
434 		rptr += frag_size;
435 		pkt_len   -= frag_size;
436 		cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
437 	}
438 	(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
439 	return (mp);
440 }
441 
442 static inline void
443 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
444 {
445 	int csum_flags = 0;
446 
447 	/* set flags */
448 	if (cqe->u0.s.ip_cksum_pass) {
449 		csum_flags |= HCK_IPV4_HDRCKSUM_OK;
450 	}
451 
452 	if (cqe->u0.s.l4_cksum_pass) {
453 		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
454 	}
455 
456 	if (csum_flags) {
457 		(void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
458 	}
459 }
460 
461 static inline void
462 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
463 {
464 	struct ether_vlan_header *ehp;
465 
466 	(void) memmove(mp->b_rptr - VLAN_TAGSZ,
467 	    mp->b_rptr, 2 * ETHERADDRL);
468 	mp->b_rptr -= VLAN_TAGSZ;
469 	ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
470 	ehp->ether_tpid = htons(ETHERTYPE_VLAN);
471 	ehp->ether_tci = LE_16(vtag);
472 }
473 
474 static inline void
475 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
476 {
477 	int frag_cnt;
478 	oce_rq_bdesc_t *rqbd;
479 	oce_rq_bdesc_t  **shadow_rq;
480 	shadow_rq = rq->shadow_ring;
481 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
482 		rqbd = shadow_rq[rq->ring->cidx];
483 		oce_rqb_free(rq, rqbd);
484 		RING_GET(rq->ring, 1);
485 	}
486 }
487 
488 
489 /*
490  * function to process a Recieve queue
491  *
492  * arg - pointer to the RQ to charge
493  *
494  * return number of cqes processed
495  */
496 uint16_t
497 oce_drain_rq_cq(void *arg)
498 {
499 	struct oce_nic_rx_cqe *cqe;
500 	struct oce_rq *rq;
501 	mblk_t *mp = NULL;
502 	mblk_t *mblk_head;
503 	mblk_t **mblk_tail;
504 	uint16_t num_cqe = 0;
505 	struct oce_cq  *cq;
506 	struct oce_dev *dev;
507 	int32_t frag_cnt;
508 	uint32_t nbufs = 0;
509 
510 	rq = (struct oce_rq *)arg;
511 	dev = rq->parent;
512 	cq = rq->cq;
513 	mblk_head = NULL;
514 	mblk_tail = &mblk_head;
515 
516 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
517 
518 	(void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
519 	/* dequeue till you reach an invalid cqe */
520 	while (RQ_CQE_VALID(cqe)) {
521 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
522 		frag_cnt = cqe->u0.s.num_fragments & 0x7;
523 		/* if insufficient buffers to charge then do copy */
524 		if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
525 		    (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
526 			mp = oce_rx_bcopy(dev, rq, cqe);
527 		} else {
528 			mp = oce_rx(dev, rq, cqe);
529 			if (mp == NULL) {
530 				atomic_add_32(&rq->rqb_free, frag_cnt);
531 				mp = oce_rx_bcopy(dev, rq, cqe);
532 			}
533 		}
534 		if (mp != NULL) {
535 			if (cqe->u0.s.vlan_tag_present) {
536 				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
537 			}
538 			oce_set_rx_oflags(mp, cqe);
539 
540 			*mblk_tail = mp;
541 			mblk_tail = &mp->b_next;
542 		} else {
543 			(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
544 		}
545 		RING_GET(rq->ring, frag_cnt);
546 		rq->buf_avail -= frag_cnt;
547 		nbufs += frag_cnt;
548 
549 		oce_rq_post_buffer(rq, frag_cnt);
550 		RQ_CQE_INVALIDATE(cqe);
551 		RING_GET(cq->ring, 1);
552 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
553 		    struct oce_nic_rx_cqe);
554 		num_cqe++;
555 		/* process max ring size */
556 		if (num_cqe > dev->rx_pkt_per_intr) {
557 			break;
558 		}
559 	} /* for all valid CQEs */
560 
561 	if (mblk_head) {
562 		mac_rx(dev->mac_handle, NULL, mblk_head);
563 	}
564 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
565 	return (num_cqe);
566 } /* oce_drain_rq_cq */
567 
568 /*
569  * function to free mblk databuffer to the RQ pool
570  *
571  * arg - pointer to the receive buffer descriptor
572  *
573  * return none
574  */
575 void
576 oce_rx_pool_free(char *arg)
577 {
578 	oce_rq_bdesc_t *rqbd;
579 	struct oce_rq  *rq;
580 
581 	/* During destroy, arg will be NULL */
582 	if (arg == NULL) {
583 		return;
584 	}
585 
586 	/* retrieve the pointers from arg */
587 	rqbd = (oce_rq_bdesc_t *)(void *)arg;
588 	rq = rqbd->rq;
589 	rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
590 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
591 
592 	if (rqbd->mp) {
593 		rqbd->mp->b_rptr =
594 		    (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
595 	}
596 
597 	oce_rqb_free(rq, rqbd);
598 	(void) atomic_add_32(&rq->pending, -1);
599 } /* rx_pool_free */
600 
601 /*
602  * function to stop the RX
603  *
604  * rq - pointer to RQ structure
605  *
606  * return none
607  */
608 void
609 oce_clean_rq(struct oce_rq *rq)
610 {
611 	uint16_t num_cqe = 0;
612 	struct oce_cq  *cq;
613 	struct oce_dev *dev;
614 	struct oce_nic_rx_cqe *cqe;
615 	int32_t ti = 0;
616 
617 	dev = rq->parent;
618 	cq = rq->cq;
619 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
620 	/* dequeue till you reach an invalid cqe */
621 	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
622 
623 		while (RQ_CQE_VALID(cqe)) {
624 			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
625 			oce_rx_drop_pkt(rq, cqe);
626 			atomic_add_32(&rq->buf_avail,
627 			    -(cqe->u0.s.num_fragments & 0x7));
628 			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
629 			RQ_CQE_INVALIDATE(cqe);
630 			RING_GET(cq->ring, 1);
631 			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
632 			    struct oce_nic_rx_cqe);
633 			num_cqe++;
634 		}
635 		OCE_MSDELAY(1);
636 	}
637 } /* oce_clean_rq */
638 
639 /*
640  * function to start  the RX
641  *
642  * rq - pointer to RQ structure
643  *
644  * return number of rqe's charges.
645  */
646 int
647 oce_start_rq(struct oce_rq *rq)
648 {
649 	int ret = 0;
650 	int to_charge = 0;
651 	struct oce_dev *dev = rq->parent;
652 	to_charge = rq->cfg.q_len - rq->buf_avail;
653 	to_charge = min(to_charge, rq->rqb_free);
654 	atomic_add_32(&rq->rqb_free, -to_charge);
655 	(void) oce_rq_charge(rq, to_charge, B_FALSE);
656 	/* ok to do it here since Rx has not even started */
657 	oce_rq_post_buffer(rq, to_charge);
658 	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
659 	return (ret);
660 } /* oce_start_rq */
661 
662 /* Checks for pending rx buffers with Stack */
663 int
664 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
665 {
666 	int ti;
667 	_NOTE(ARGUNUSED(dev));
668 
669 	for (ti = 0; ti < timeout; ti++) {
670 		if (rq->pending > 0) {
671 			OCE_MSDELAY(10);
672 			continue;
673 		} else {
674 			rq->pending = 0;
675 			break;
676 		}
677 	}
678 	return (rq->pending);
679 }
680