1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the Receive Path handling
29  * functions
30  */
31 #include <oce_impl.h>
32 
33 
34 static void rx_pool_free(char *arg);
35 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
36     struct oce_nic_rx_cqe *cqe);
37 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
38 	struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
39 static int oce_rq_charge(struct oce_dev *dev, struct oce_rq *rq,
40     uint32_t nbufs);
41 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
42 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
43 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
44 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
45     size_t size, int flags);
46 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
47 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
48 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
49     struct oce_nic_rx_cqe *cqe);
50 
51 
52 /*
53  * function to create a DMA buffer pool for RQ
54  *
55  * dev - software handle to the device
56  * num_items - number of buffers in the pool
57  * item_size - size of each buffer
58  *
59  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
60  */
61 int
62 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
63 {
64 	struct oce_dev *dev = rq->parent;
65 	int size;
66 	int cnt;
67 	int ret;
68 	int nitems;
69 
70 	nitems = rq->cfg.nbufs;
71 	size = nitems * sizeof (oce_rq_bdesc_t);
72 	rq->rq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
73 	if (rq->rq_bdesc_array == NULL) {
74 		return (DDI_FAILURE);
75 	}
76 
77 	/* Create the free buffer list */
78 	OCE_LIST_CREATE(&rq->rq_buf_list, DDI_INTR_PRI(dev->intr_pri));
79 
80 	for (cnt = 0; cnt < nitems; cnt++) {
81 		ret = oce_rqb_ctor(&rq->rq_bdesc_array[cnt],
82 		    rq, buf_size, DDI_DMA_STREAMING);
83 		if (ret != DDI_SUCCESS) {
84 			goto rqb_fail;
85 		}
86 		OCE_LIST_INSERT_TAIL(&rq->rq_buf_list,
87 		    &(rq->rq_bdesc_array[cnt].link));
88 	}
89 	return (DDI_SUCCESS);
90 
91 rqb_fail:
92 	oce_rqb_cache_destroy(rq);
93 	return (DDI_FAILURE);
94 } /* oce_rqb_cache_create */
95 
96 /*
97  * function to Destroy RQ DMA buffer cache
98  *
99  * rq - pointer to rq structure
100  *
101  * return none
102  */
103 void
104 oce_rqb_cache_destroy(struct oce_rq *rq)
105 {
106 	oce_rq_bdesc_t *rqbd = NULL;
107 
108 	while ((rqbd = (oce_rq_bdesc_t *)OCE_LIST_REM_HEAD(&rq->rq_buf_list))
109 	    != NULL) {
110 		oce_rqb_dtor(rqbd);
111 	}
112 	kmem_free(rq->rq_bdesc_array,
113 	    rq->cfg.nbufs * sizeof (oce_rq_bdesc_t));
114 	OCE_LIST_DESTROY(&rq->rq_buf_list);
115 } /* oce_rqb_cache_destroy */
116 
117 /*
118  * RQ buffer destructor function
119  *
120  * rqbd - pointer to rq buffer descriptor
121  *
122  * return none
123  */
124 static	void
125 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
126 {
127 	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
128 		return;
129 	}
130 	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
131 	if (rqbd->mp != NULL) {
132 		/* Buffer is already free  */
133 		rqbd->fr_rtn.free_arg = NULL;
134 		freeb(rqbd->mp);
135 	}
136 } /* oce_rqb_dtor */
137 
138 /*
139  * RQ buffer constructor function
140  *
141  * rqbd - pointer to rq buffer descriptor
142  * rq - pointer to RQ structure
143  * size - size of the buffer
144  * flags - KM_SLEEP OR KM_NOSLEEP
145  *
146  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
147  */
148 static int
149 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
150 {
151 	struct oce_dev *dev;
152 	oce_dma_buf_t *dbuf;
153 
154 	dev = rq->parent;
155 
156 	dbuf  = oce_alloc_dma_buffer(dev, size, flags);
157 	if (dbuf == NULL) {
158 		return (DDI_FAILURE);
159 	}
160 
161 	/* override usable length */
162 	rqbd->rqb = dbuf;
163 	rqbd->rq = rq;
164 	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
165 	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
166 	rqbd->fr_rtn.free_func = (void (*)())rx_pool_free;
167 	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
168 	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
169 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
170 	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
171 
172 	return (DDI_SUCCESS);
173 } /* oce_rqb_ctor */
174 
175 /*
176  * RQ buffer allocator function
177  *
178  * rq - pointer to RQ structure
179  *
180  * return pointer to RQ buffer descriptor
181  */
182 static inline oce_rq_bdesc_t *
183 oce_rqb_alloc(struct oce_rq *rq)
184 {
185 	oce_rq_bdesc_t *rqbd;
186 	rqbd = OCE_LIST_REM_HEAD(&rq->rq_buf_list);
187 	return (rqbd);
188 } /* oce_rqb_alloc */
189 
190 /*
191  * function to free the RQ buffer
192  *
193  * rq - pointer to RQ structure
194  * rqbd - pointer to recieve buffer descriptor
195  *
196  * return none
197  */
198 static inline void
199 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
200 {
201 	OCE_LIST_INSERT_TAIL(&rq->rq_buf_list, rqbd);
202 } /* oce_rqb_free */
203 
204 
205 /*
206  * function to charge a given rq with buffers from a pool's free list
207  *
208  * dev - software handle to the device
209  * rq - pointer to the RQ to charge
210  * nbufs - numbers of buffers to be charged
211  *
212  * return number of rqe's charges.
213  */
214 static inline int
215 oce_rq_charge(struct oce_dev *dev,
216     struct oce_rq *rq, uint32_t nbufs)
217 {
218 	struct oce_nic_rqe *rqe;
219 	oce_rq_bdesc_t *rqbd;
220 	struct rq_shadow_entry	*shadow_rq;
221 	int32_t num_bufs = 0;
222 	int32_t total_bufs = 0;
223 	pd_rxulp_db_t rxdb_reg;
224 	uint32_t cnt;
225 
226 	shadow_rq = rq->shadow_ring;
227 	/* check number of slots free and recharge */
228 	nbufs = ((rq->buf_avail + nbufs) > rq->cfg.q_len) ?
229 	    (rq->cfg.q_len - rq->buf_avail) : nbufs;
230 	for (cnt = 0; cnt < nbufs; cnt++) {
231 		rqbd = oce_rqb_alloc(rq);
232 		if (rqbd == NULL) {
233 			oce_log(dev, CE_NOTE, MOD_RX, "%s %x",
234 			    "rqb pool empty @ ticks",
235 			    (uint32_t)ddi_get_lbolt());
236 			break;
237 		}
238 		if (rqbd->mp == NULL) {
239 			rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
240 			    rqbd->rqb->size, 0, &rqbd->fr_rtn);
241 			if (rqbd->mp != NULL) {
242 				rqbd->mp->b_rptr =
243 				    (uchar_t *)rqbd->rqb->base +
244 				    OCE_RQE_BUF_HEADROOM;
245 			}
246 
247 			/*
248 			 * Failed again put back the buffer and continue
249 			 * loops for nbufs so its a finite loop
250 			 */
251 
252 			if (rqbd->mp == NULL) {
253 				oce_rqb_free(rq, rqbd);
254 				continue;
255 			}
256 		}
257 
258 		/* fill the rqes */
259 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
260 		    struct oce_nic_rqe);
261 		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
262 		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
263 		shadow_rq[rq->ring->pidx].rqbd = rqbd;
264 		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
265 		RING_PUT(rq->ring, 1);
266 
267 		/* if we have reached the max allowed posts, post */
268 		if (cnt && !(cnt % OCE_MAX_RQ_POSTS)) {
269 			rxdb_reg.dw0 = 0;
270 			rxdb_reg.bits.num_posted = num_bufs;
271 			rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
272 			OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
273 			if (oce_fm_check_acc_handle(dev, dev->db_handle) !=
274 			    DDI_FM_OK) {
275 				ddi_fm_service_impact(dev->dip,
276 				    DDI_SERVICE_DEGRADED);
277 			}
278 			num_bufs = 0;
279 		}
280 		num_bufs++;
281 		total_bufs++;
282 	}
283 
284 	/* post pending bufs */
285 	if (num_bufs) {
286 		rxdb_reg.dw0 = 0;
287 		rxdb_reg.bits.num_posted = num_bufs;
288 		rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
289 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
290 		if (oce_fm_check_acc_handle(dev, dev->db_handle) !=
291 		    DDI_FM_OK) {
292 			ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
293 		}
294 	}
295 	atomic_add_32(&rq->buf_avail, total_bufs);
296 
297 	return (total_bufs);
298 } /* oce_rq_charge */
299 
300 /*
301  * function to release the posted buffers
302  *
303  * rq - pointer to the RQ to charge
304  *
305  * return none
306  */
307 void
308 oce_rq_discharge(struct oce_rq *rq)
309 {
310 	oce_rq_bdesc_t *rqbd;
311 	struct rq_shadow_entry *shadow_rq;
312 
313 	shadow_rq = rq->shadow_ring;
314 	/* Free the posted buffer since RQ is destroyed already */
315 	while ((int32_t)rq->buf_avail > 0) {
316 		rqbd = shadow_rq[rq->ring->cidx].rqbd;
317 		oce_rqb_free(rq, rqbd);
318 		RING_GET(rq->ring, 1);
319 		rq->buf_avail--;
320 	}
321 }
322 /*
323  * function to process a single packet
324  *
325  * dev - software handle to the device
326  * rq - pointer to the RQ to charge
327  * cqe - Pointer to Completion Q entry
328  *
329  * return mblk pointer =>  success, NULL  => error
330  */
331 static inline mblk_t *
332 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
333 {
334 	mblk_t *mp;
335 	int pkt_len;
336 	int32_t frag_cnt = 0;
337 	mblk_t *mblk_prev = NULL;
338 	mblk_t	*mblk_head = NULL;
339 	int frag_size;
340 	struct rq_shadow_entry *shadow_rq;
341 	struct rq_shadow_entry *shadow_rqe;
342 	oce_rq_bdesc_t *rqbd;
343 
344 	/* Get the relevant Queue pointers */
345 	shadow_rq = rq->shadow_ring;
346 	pkt_len = cqe->u0.s.pkt_size;
347 	for (; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
348 		shadow_rqe = &shadow_rq[rq->ring->cidx];
349 		rqbd = shadow_rqe->rqbd;
350 		mp = rqbd->mp;
351 		if (mp == NULL)
352 			return (NULL);
353 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
354 		    rq->cfg.frag_size : pkt_len;
355 		mp->b_wptr = mp->b_rptr + frag_size;
356 		pkt_len   -= frag_size;
357 		/* Chain the message mblks */
358 		if (mblk_head == NULL) {
359 			mblk_head = mblk_prev = mp;
360 		} else {
361 			mblk_prev->b_cont = mp;
362 			mblk_prev = mp;
363 		}
364 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
365 		    DDI_DMA_SYNC_FORKERNEL);
366 		RING_GET(rq->ring, 1);
367 	}
368 
369 	if (mblk_head == NULL) {
370 		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
371 		return (NULL);
372 	}
373 	atomic_add_32(&rq->pending, (cqe->u0.s.num_fragments & 0x7));
374 	mblk_head->b_next = NULL;
375 	return (mblk_head);
376 } /* oce_rx */
377 
378 /* ARGSUSED */
379 static inline mblk_t *
380 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
381 {
382 	mblk_t *mp;
383 	int pkt_len;
384 	int alloc_len;
385 	int32_t frag_cnt = 0;
386 	int frag_size;
387 	struct rq_shadow_entry *shadow_rq;
388 	struct rq_shadow_entry *shadow_rqe;
389 	oce_rq_bdesc_t *rqbd;
390 	boolean_t tag_present =  B_FALSE;
391 	unsigned char  *rptr;
392 
393 	shadow_rq = rq->shadow_ring;
394 	pkt_len = cqe->u0.s.pkt_size;
395 	alloc_len = pkt_len;
396 
397 	/* Hardware always Strips Vlan tag so insert it back */
398 	if (cqe->u0.s.vlan_tag_present) {
399 		alloc_len += VLAN_TAGSZ;
400 		tag_present = B_TRUE;
401 	}
402 	mp = allocb(alloc_len, BPRI_HI);
403 	if (mp == NULL)
404 		return (NULL);
405 	if (tag_present) {
406 		/* offset the read pointer by 4 bytes to insert tag */
407 		mp->b_rptr += VLAN_TAGSZ;
408 	}
409 	rptr = mp->b_rptr;
410 	mp->b_wptr = mp->b_wptr + alloc_len;
411 
412 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
413 		shadow_rqe = &shadow_rq[rq->ring->cidx];
414 		rqbd = shadow_rqe->rqbd;
415 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
416 		    rq->cfg.frag_size : pkt_len;
417 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
418 		    DDI_DMA_SYNC_FORKERNEL);
419 		bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM,
420 		    rptr, frag_size);
421 		rptr += frag_size;
422 		pkt_len   -= frag_size;
423 		oce_rqb_free(rq, rqbd);
424 		RING_GET(rq->ring, 1);
425 	}
426 	return (mp);
427 }
428 
429 static inline void
430 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
431 {
432 	int csum_flags = 0;
433 
434 	/* set flags */
435 	if (cqe->u0.s.ip_cksum_pass) {
436 		csum_flags |= HCK_IPV4_HDRCKSUM_OK;
437 	}
438 
439 	if (cqe->u0.s.l4_cksum_pass) {
440 		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
441 	}
442 
443 	if (csum_flags) {
444 		(void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
445 	}
446 }
447 
448 static inline void
449 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
450 {
451 	struct ether_vlan_header *ehp;
452 
453 	(void) memmove(mp->b_rptr - VLAN_TAGSZ,
454 	    mp->b_rptr, 2 * ETHERADDRL);
455 	mp->b_rptr -= VLAN_TAGSZ;
456 	ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
457 	ehp->ether_tpid = htons(ETHERTYPE_VLAN);
458 	ehp->ether_tci = LE_16(vtag);
459 }
460 
461 
462 
463 /*
464  * function to process a Recieve queue
465  *
466  * arg - pointer to the RQ to charge
467  *
468  * return number of cqes processed
469  */
470 uint16_t
471 oce_drain_rq_cq(void *arg)
472 {
473 	struct oce_nic_rx_cqe *cqe;
474 	struct oce_rq *rq;
475 	mblk_t *mp = NULL;
476 	mblk_t *mblk_head  = NULL;
477 	mblk_t *mblk_prev  = NULL;
478 	uint16_t num_cqe = 0;
479 	struct oce_cq  *cq;
480 	struct oce_dev *dev;
481 
482 	if (arg == NULL)
483 		return (0);
484 
485 	rq = (struct oce_rq *)arg;
486 	dev = rq->parent;
487 	cq = rq->cq;
488 	mutex_enter(&rq->rx_lock);
489 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
490 
491 	/* dequeue till you reach an invalid cqe */
492 	while (RQ_CQE_VALID(cqe) && (num_cqe < rq->cfg.q_len)) {
493 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
494 		/* if insufficient buffers to charge then do copy */
495 		if (cqe->u0.s.pkt_size < dev->rx_bcopy_limit ||
496 		    OCE_LIST_SIZE(&rq->rq_buf_list) < cqe->u0.s.num_fragments) {
497 			mp = oce_rx_bcopy(dev, rq, cqe);
498 		} else {
499 			mp = oce_rx(dev, rq, cqe);
500 		}
501 		if (mp != NULL) {
502 			if (cqe->u0.s.vlan_tag_present) {
503 				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
504 			}
505 			oce_set_rx_oflags(mp, cqe);
506 			if (mblk_head == NULL) {
507 				mblk_head = mblk_prev  = mp;
508 			} else {
509 				mblk_prev->b_next = mp;
510 				mblk_prev = mp;
511 			}
512 
513 		} else {
514 			oce_rx_drop_pkt(rq, cqe);
515 		}
516 		atomic_add_32(&rq->buf_avail, -(cqe->u0.s.num_fragments & 0x7));
517 		(void) oce_rq_charge(dev, rq,
518 		    (cqe->u0.s.num_fragments & 0x7));
519 		RQ_CQE_INVALIDATE(cqe);
520 		RING_GET(cq->ring, 1);
521 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
522 		    struct oce_nic_rx_cqe);
523 		num_cqe++;
524 	} /* for all valid CQEs */
525 	mutex_exit(&rq->rx_lock);
526 	if (mblk_head) {
527 		mac_rx(dev->mac_handle, NULL, mblk_head);
528 	}
529 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
530 	return (num_cqe);
531 } /* oce_drain_rq_cq */
532 
533 /*
534  * function to free mblk databuffer to the RQ pool
535  *
536  * arg - pointer to the receive buffer descriptor
537  *
538  * return none
539  */
540 static void
541 rx_pool_free(char *arg)
542 {
543 	oce_rq_bdesc_t *rqbd;
544 	struct oce_rq  *rq;
545 
546 	/* During destroy, arg will be NULL */
547 	if (arg == NULL) {
548 		return;
549 	}
550 
551 	/* retrieve the pointers from arg */
552 	rqbd = (oce_rq_bdesc_t *)(void *)arg;
553 	rq = rqbd->rq;
554 
555 	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
556 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
557 	if (rqbd->mp != NULL) {
558 		rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
559 		    OCE_RQE_BUF_HEADROOM;
560 	}
561 	oce_rqb_free(rq, rqbd);
562 	(void) atomic_add_32(&rq->pending, -1);
563 } /* rx_pool_free */
564 
565 /*
566  * function to stop the RX
567  *
568  * rq - pointer to RQ structure
569  *
570  * return none
571  */
572 void
573 oce_clean_rq(struct oce_rq *rq)
574 {
575 	uint16_t num_cqe = 0;
576 	struct oce_cq  *cq;
577 	struct oce_dev *dev;
578 	struct oce_nic_rx_cqe *cqe;
579 	int32_t ti = 0;
580 
581 	dev = rq->parent;
582 	cq = rq->cq;
583 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
584 	/* dequeue till you reach an invalid cqe */
585 	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
586 
587 		while (RQ_CQE_VALID(cqe)) {
588 			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
589 			oce_rx_drop_pkt(rq, cqe);
590 			atomic_add_32(&rq->buf_avail,
591 			    -(cqe->u0.s.num_fragments & 0x7));
592 			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
593 			RQ_CQE_INVALIDATE(cqe);
594 			RING_GET(cq->ring, 1);
595 			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
596 			    struct oce_nic_rx_cqe);
597 			num_cqe++;
598 		}
599 		OCE_MSDELAY(1);
600 	}
601 #if 0
602 	if (num_cqe) {
603 		oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
604 	}
605 	/* Drain the Event queue now */
606 	oce_drain_eq(rq->cq->eq);
607 	return (num_cqe);
608 #endif
609 } /* oce_clean_rq */
610 
611 /*
612  * function to start  the RX
613  *
614  * rq - pointer to RQ structure
615  *
616  * return number of rqe's charges.
617  */
618 int
619 oce_start_rq(struct oce_rq *rq)
620 {
621 	int ret = 0;
622 	struct oce_dev *dev = rq->parent;
623 
624 	(void) oce_rq_charge(dev, rq, rq->cfg.q_len);
625 	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
626 	return (ret);
627 } /* oce_start_rq */
628 
629 /* Checks for pending rx buffers with Stack */
630 int
631 oce_rx_pending(struct oce_dev *dev)
632 {
633 	int ti;
634 
635 	for (ti = 0; ti < 200; ti++) {
636 		if (dev->rq[0]->pending > 0) {
637 			OCE_MSDELAY(1);
638 			continue;
639 		} else {
640 			dev->rq[0]->pending = 0;
641 			break;
642 		}
643 	}
644 	return (dev->rq[0]->pending);
645 }
646 
647 static inline void
648 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
649 {
650 	int frag_cnt;
651 	oce_rq_bdesc_t *rqbd;
652 	struct rq_shadow_entry *shadow_rq;
653 	shadow_rq = rq->shadow_ring;
654 	for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
655 		rqbd = shadow_rq[rq->ring->cidx].rqbd;
656 		oce_rqb_free(rq, rqbd);
657 		RING_GET(rq->ring, 1);
658 	}
659 }
660