1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Source file containing the Recieve Path handling
29  * functions
30  */
31 #include <oce_impl.h>
32 
33 
34 static void rx_pool_free(char *arg);
35 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
36     struct oce_nic_rx_cqe *cqe);
37 static int oce_rq_charge(struct oce_dev *dev, struct oce_rq *rq,
38     uint32_t nbufs);
39 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
40 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
41 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
42 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
43     size_t size, int flags);
44 
45 /*
46  * function to create a DMA buffer pool for RQ
47  *
48  * dev - software handle to the device
49  * num_items - number of buffers in the pool
50  * item_size - size of each buffer
51  *
52  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
53  */
54 int
55 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
56 {
57 	struct oce_dev *dev = rq->parent;
58 	int size;
59 	int cnt;
60 	int ret;
61 	int nitems;
62 
63 	nitems = rq->cfg.nbufs;
64 	size = nitems * sizeof (oce_rq_bdesc_t);
65 	rq->rq_bdesc_array = kmem_zalloc(size, KM_SLEEP);
66 
67 	/* Create the free buffer list */
68 	OCE_LIST_CREATE(&rq->rq_buf_list, DDI_INTR_PRI(dev->intr_pri));
69 
70 	for (cnt = 0; cnt < nitems; cnt++) {
71 		ret = oce_rqb_ctor(&rq->rq_bdesc_array[cnt],
72 		    rq, buf_size, DDI_DMA_STREAMING);
73 		if (ret != DDI_SUCCESS) {
74 			goto rqb_fail;
75 		}
76 		OCE_LIST_INSERT_TAIL(&rq->rq_buf_list,
77 		    &(rq->rq_bdesc_array[cnt].link));
78 	}
79 	return (DDI_SUCCESS);
80 
81 rqb_fail:
82 	oce_rqb_cache_destroy(rq);
83 	return (DDI_FAILURE);
84 } /* oce_rqb_cache_create */
85 
86 /*
87  * function to Destroy RQ DMA buffer cache
88  *
89  * rq - pointer to rq structure
90  *
91  * return none
92  */
93 void
94 oce_rqb_cache_destroy(struct oce_rq *rq)
95 {
96 	oce_rq_bdesc_t *rqbd = NULL;
97 
98 	while ((rqbd = (oce_rq_bdesc_t *)OCE_LIST_REM_HEAD(&rq->rq_buf_list))
99 	    != NULL) {
100 		oce_rqb_dtor(rqbd);
101 	}
102 	kmem_free(rq->rq_bdesc_array,
103 	    rq->cfg.nbufs * sizeof (oce_rq_bdesc_t));
104 	OCE_LIST_DESTROY(&rq->rq_buf_list);
105 } /* oce_rqb_cache_destroy */
106 
107 /*
108  * RQ buffer destructor function
109  *
110  * rqbd - pointer to rq buffer descriptor
111  *
112  * return none
113  */
114 static	void
115 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
116 {
117 	if ((rqbd == NULL) || (rqbd->rq == NULL)) {
118 		return;
119 	}
120 	oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
121 	if (rqbd->mp != NULL) {
122 		/* Buffer is already free  */
123 		rqbd->fr_rtn.free_arg = NULL;
124 		freeb(rqbd->mp);
125 	}
126 } /* oce_rqb_dtor */
127 
128 /*
129  * RQ buffer constructor function
130  *
131  * rqbd - pointer to rq buffer descriptor
132  * rq - pointer to RQ structure
133  * size - size of the buffer
134  * flags - KM_SLEEP OR KM_NOSLEEP
135  *
136  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
137  */
138 static int
139 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
140 {
141 	struct oce_dev *dev;
142 	oce_dma_buf_t *dbuf;
143 
144 	dev = rq->parent;
145 
146 	dbuf  = oce_alloc_dma_buffer(dev, size, flags);
147 	if (dbuf == NULL) {
148 		return (DDI_FAILURE);
149 	}
150 
151 	/* override usable length */
152 	rqbd->rqb = dbuf;
153 	rqbd->rq = rq;
154 	rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
155 	rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
156 	rqbd->fr_rtn.free_func = (void (*)())rx_pool_free;
157 	rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
158 	rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
159 	    rqbd->rqb->size, 0, &rqbd->fr_rtn);
160 	rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
161 
162 	return (DDI_SUCCESS);
163 } /* oce_rqb_ctor */
164 
165 /*
166  * RQ buffer allocator function
167  *
168  * rq - pointer to RQ structure
169  *
170  * return pointer to RQ buffer descriptor
171  */
172 static inline oce_rq_bdesc_t *
173 oce_rqb_alloc(struct oce_rq *rq)
174 {
175 	oce_rq_bdesc_t *rqbd;
176 	rqbd = OCE_LIST_REM_HEAD(&rq->rq_buf_list);
177 	return (rqbd);
178 } /* oce_rqb_alloc */
179 
180 /*
181  * function to free the RQ buffer
182  *
183  * rq - pointer to RQ structure
184  * rqbd - pointer to recieve buffer descriptor
185  *
186  * return none
187  */
188 static inline void
189 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
190 {
191 	OCE_LIST_INSERT_TAIL(&rq->rq_buf_list, rqbd);
192 } /* oce_rqb_free */
193 
194 
195 /*
196  * function to charge a given rq with buffers from a pool's free list
197  *
198  * dev - software handle to the device
199  * rq - pointer to the RQ to charge
200  * nbufs - numbers of buffers to be charged
201  *
202  * return number of rqe's charges.
203  */
204 static inline int
205 oce_rq_charge(struct oce_dev *dev,
206     struct oce_rq *rq, uint32_t nbufs)
207 {
208 	struct oce_nic_rqe *rqe;
209 	oce_rq_bdesc_t *rqbd;
210 	struct rq_shadow_entry	*shadow_rq;
211 	int32_t num_bufs = 0;
212 	int32_t total_bufs = 0;
213 	pd_rxulp_db_t rxdb_reg;
214 	uint32_t cnt;
215 
216 	shadow_rq = rq->shadow_ring;
217 	mutex_enter(&rq->lock);
218 
219 	/* check number of slots free and recharge */
220 	nbufs = ((rq->buf_avail + nbufs) > rq->cfg.q_len) ?
221 	    (rq->cfg.q_len - rq->buf_avail) : nbufs;
222 
223 	for (cnt = 0; cnt < nbufs; cnt++) {
224 
225 		int i = 0;
226 		const int retries = 1000;
227 
228 		do {
229 			rqbd = oce_rqb_alloc(rq);
230 			if (rqbd != NULL) {
231 				break;
232 			}
233 		} while ((++i) < retries);
234 
235 		if (rqbd == NULL) {
236 			oce_log(dev, CE_NOTE, MOD_RX, "%s %x",
237 			    "rqb pool empty @ ticks",
238 			    (uint32_t)ddi_get_lbolt());
239 
240 			break;
241 		}
242 
243 		i = 0;
244 
245 		if (rqbd->mp == NULL) {
246 
247 			do {
248 				rqbd->mp =
249 				    desballoc((uchar_t *)(rqbd->rqb->base),
250 				    rqbd->rqb->size, 0, &rqbd->fr_rtn);
251 				if (rqbd->mp != NULL) {
252 					rqbd->mp->b_rptr =
253 					    (uchar_t *)rqbd->rqb->base +
254 					    OCE_RQE_BUF_HEADROOM;
255 					break;
256 				}
257 			} while ((++i) < retries);
258 		}
259 
260 		/*
261 		 * Failed again put back the buffer and continue
262 		 * loops for nbufs so its a finite loop
263 		 */
264 
265 		if (rqbd->mp == NULL) {
266 			oce_rqb_free(rq, rqbd);
267 			continue;
268 		}
269 
270 		/* fill the rqes */
271 		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
272 		    struct oce_nic_rqe);
273 		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
274 		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
275 		shadow_rq[rq->ring->pidx].rqbd = rqbd;
276 		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
277 		RING_PUT(rq->ring, 1);
278 
279 		/* if we have reached the max allowed posts, post */
280 		if (cnt && !(cnt % OCE_MAX_RQ_POSTS)) {
281 			rxdb_reg.dw0 = 0;
282 			rxdb_reg.bits.num_posted = num_bufs;
283 			rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
284 			OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
285 			num_bufs = 0;
286 		}
287 		num_bufs++;
288 		total_bufs++;
289 	}
290 
291 	/* post pending bufs */
292 	if (num_bufs) {
293 		rxdb_reg.dw0 = 0;
294 		rxdb_reg.bits.num_posted = num_bufs;
295 		rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
296 		OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
297 	}
298 	mutex_exit(&rq->lock);
299 	atomic_add_32(&rq->buf_avail, total_bufs);
300 	return (total_bufs);
301 } /* oce_rq_charge */
302 
303 /*
304  * function to release the posted buffers
305  *
306  * rq - pointer to the RQ to charge
307  *
308  * return none
309  */
310 void
311 oce_rq_discharge(struct oce_rq *rq)
312 {
313 	oce_rq_bdesc_t *rqbd;
314 	struct rq_shadow_entry *shadow_rq;
315 
316 	shadow_rq = rq->shadow_ring;
317 	mutex_enter(&rq->lock);
318 
319 	/* Free the posted buffer since RQ is destroyed already */
320 	while ((int32_t)rq->buf_avail > 0) {
321 		rqbd = shadow_rq[rq->ring->cidx].rqbd;
322 		oce_rqb_free(rq, rqbd);
323 		RING_GET(rq->ring, 1);
324 		rq->buf_avail--;
325 	}
326 	mutex_exit(&rq->lock);
327 }
328 /*
329  * function to process a single packet
330  *
331  * dev - software handle to the device
332  * rq - pointer to the RQ to charge
333  * cqe - Pointer to Completion Q entry
334  *
335  * return mblk pointer =>  success, NULL  => error
336  */
337 static inline mblk_t *
338 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
339 {
340 	mblk_t *mp;
341 	uint32_t csum_flags = 0;
342 	int pkt_len;
343 	uint16_t vtag;
344 	int32_t frag_cnt = 0;
345 	mblk_t *mblk_prev = NULL;
346 	mblk_t	*mblk_head = NULL;
347 	int frag_size;
348 	struct rq_shadow_entry *shadow_rq;
349 	struct rq_shadow_entry *shadow_rqe;
350 	oce_rq_bdesc_t *rqbd;
351 	struct ether_vlan_header *ehp;
352 
353 	/* Get the relevant Queue pointers */
354 	shadow_rq = rq->shadow_ring;
355 	pkt_len = cqe->u0.s.pkt_size;
356 
357 	/* Hardware always Strips Vlan tag so insert it back */
358 	if (cqe->u0.s.vlan_tag_present) {
359 		shadow_rqe = &shadow_rq[rq->ring->cidx];
360 		/* retrive the Rx buffer from the shadow ring */
361 		rqbd = shadow_rqe->rqbd;
362 		mp = rqbd->mp;
363 		if (mp == NULL)
364 			return (NULL);
365 		vtag = cqe->u0.s.vlan_tag;
366 		(void) memmove(mp->b_rptr - VLAN_TAGSZ,
367 		    mp->b_rptr, 2 * ETHERADDRL);
368 		mp->b_rptr -= VLAN_TAGSZ;
369 		ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
370 		ehp->ether_tpid = htons(ETHERTYPE_VLAN);
371 		ehp->ether_tci = LE_16(vtag);
372 
373 		frag_size = (pkt_len > rq->cfg.frag_size) ?
374 		    rq->cfg.frag_size : pkt_len;
375 		mp->b_wptr =  mp->b_rptr + frag_size + VLAN_TAGSZ;
376 		mblk_head = mblk_prev = mp;
377 		/* Move the pointers */
378 		RING_GET(rq->ring, 1);
379 		frag_cnt++;
380 		pkt_len -= frag_size;
381 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
382 		    DDI_DMA_SYNC_FORKERNEL);
383 	}
384 
385 	for (; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
386 		shadow_rqe = &shadow_rq[rq->ring->cidx];
387 		rqbd = shadow_rqe->rqbd;
388 		mp = rqbd->mp;
389 		if (mp == NULL)
390 			return (NULL);
391 		frag_size  = (pkt_len > rq->cfg.frag_size) ?
392 		    rq->cfg.frag_size : pkt_len;
393 		mp->b_wptr = mp->b_rptr + frag_size;
394 		pkt_len   -= frag_size;
395 		/* Chain the message mblks */
396 		if (mblk_head == NULL) {
397 			mblk_head = mblk_prev = mp;
398 		} else {
399 			mblk_prev->b_cont = mp;
400 			mblk_prev = mp;
401 		}
402 		(void) ddi_dma_sync(rqbd->rqb->dma_handle, 0, frag_size,
403 		    DDI_DMA_SYNC_FORKERNEL);
404 		RING_GET(rq->ring, 1);
405 	}
406 
407 	if (mblk_head == NULL) {
408 		oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
409 		return (NULL);
410 	}
411 
412 	atomic_add_32(&rq->buf_avail, -frag_cnt);
413 	(void) oce_rq_charge(dev, rq, frag_cnt);
414 
415 	/* check dma handle */
416 	if (oce_fm_check_dma_handle(dev, rqbd->rqb->dma_handle) !=
417 	    DDI_FM_OK) {
418 		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
419 		return (NULL);
420 	}
421 
422 	/* set flags */
423 	if (cqe->u0.s.ip_cksum_pass) {
424 		csum_flags |= HCK_IPV4_HDRCKSUM;
425 	}
426 
427 	if (cqe->u0.s.l4_cksum_pass) {
428 		csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
429 	}
430 
431 	if (csum_flags) {
432 		(void) hcksum_assoc(mblk_head, NULL, NULL, 0, 0, 0, 0,
433 		    csum_flags, 0);
434 	}
435 	mblk_head->b_next = NULL;
436 	return (mblk_head);
437 } /* oce_rx */
438 
439 
440 /*
441  * function to process a Recieve queue
442  *
443  * arg - pointer to the RQ to charge
444  *
445  * return number of cqes processed
446  */
447 uint16_t
448 oce_drain_rq_cq(void *arg)
449 {
450 	struct oce_nic_rx_cqe *cqe;
451 	struct oce_rq *rq;
452 	mblk_t *mp = NULL;
453 	mblk_t *mblk_head  = NULL;
454 	mblk_t *mblk_prev  = NULL;
455 	uint16_t num_cqe = 0;
456 	struct oce_cq  *cq;
457 	struct oce_dev *dev;
458 	int32_t buf_used = 0;
459 
460 	if (arg == NULL)
461 		return (0);
462 
463 	rq = (struct oce_rq *)arg;
464 	dev = rq->parent;
465 	cq = rq->cq;
466 
467 	if (dev == NULL || cq == NULL)
468 		return (0);
469 
470 	mutex_enter(&cq->lock);
471 	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
472 
473 	/* dequeue till you reach an invalid cqe */
474 	while (RQ_CQE_VALID(cqe) && (num_cqe < rq->cfg.q_len)) {
475 		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
476 		ASSERT(rq->ring->cidx != cqe->u0.s.frag_index);
477 		mp = oce_rx(dev, rq, cqe);
478 		if (mp != NULL) {
479 			if (mblk_head == NULL) {
480 				mblk_head = mblk_prev  = mp;
481 			} else {
482 				mblk_prev->b_next = mp;
483 				mblk_prev = mp;
484 			}
485 		}
486 		buf_used +=  (cqe->u0.s.num_fragments & 0x7);
487 		RQ_CQE_INVALIDATE(cqe);
488 		RING_GET(cq->ring, 1);
489 		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
490 		    struct oce_nic_rx_cqe);
491 		num_cqe++;
492 	} /* for all valid CQEs */
493 
494 	atomic_add_32(&rq->pending, buf_used);
495 	mutex_exit(&cq->lock);
496 	if (mblk_head) {
497 		mac_rx(dev->mac_handle, NULL, mblk_head);
498 	}
499 	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
500 	return (num_cqe);
501 } /* oce_drain_rq_cq */
502 
503 /*
504  * function to free mblk databuffer to the RQ pool
505  *
506  * arg - pointer to the receive buffer descriptor
507  *
508  * return none
509  */
510 static void
511 rx_pool_free(char *arg)
512 {
513 	oce_rq_bdesc_t *rqbd;
514 	struct oce_rq  *rq;
515 	struct oce_dev *dev;
516 	int i = 0;
517 	const int retries = 1000;
518 
519 	/* During destroy, arg will be NULL */
520 	if (arg == NULL) {
521 		return;
522 	}
523 
524 	/* retrieve the pointers from arg */
525 	rqbd = (oce_rq_bdesc_t *)(void *)arg;
526 	rq = rqbd->rq;
527 	dev = rq->parent;
528 
529 	if ((dev->state & STATE_MAC_STARTED) == 0) {
530 		return;
531 	}
532 
533 	do {
534 		rqbd->mp = desballoc((uchar_t *)(rqbd->rqb->base),
535 		    rqbd->rqb->size, 0, &rqbd->fr_rtn);
536 		if (rqbd->mp != NULL) {
537 			rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base +
538 			    OCE_RQE_BUF_HEADROOM;
539 			break;
540 		}
541 	} while ((++i) < retries);
542 
543 	oce_rqb_free(rq, rqbd);
544 	(void) atomic_add_32(&rq->pending, -1);
545 	if (atomic_add_32_nv(&rq->buf_avail, 0) == 0 &&
546 	    OCE_LIST_SIZE(&rq->rq_buf_list) > 16) {
547 		/*
548 		 * Rx has stalled because of lack of buffers
549 		 * So try to charge fully
550 		 */
551 		(void) oce_rq_charge(dev, rq, rq->cfg.q_len);
552 	}
553 } /* rx_pool_free */
554 
555 /*
556  * function to stop the RX
557  *
558  * rq - pointer to RQ structure
559  *
560  * return none
561  */
562 void
563 oce_stop_rq(struct oce_rq *rq)
564 {
565 	/*
566 	 * Wait for Packets sent up to be freed
567 	 */
568 	while (rq->pending > 0) {
569 		drv_usecwait(10 * 1000);
570 	}
571 
572 	rq->pending = 0;
573 	/* Drain the Event queue now */
574 	oce_drain_eq(rq->cq->eq);
575 } /* oce_stop_rq */
576 
577 /*
578  * function to start  the RX
579  *
580  * rq - pointer to RQ structure
581  *
582  * return number of rqe's charges.
583  */
584 int
585 oce_start_rq(struct oce_rq *rq)
586 {
587 	int ret = 0;
588 	struct oce_dev *dev = rq->parent;
589 
590 	oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
591 	ret = oce_rq_charge(dev, rq, rq->cfg.q_len);
592 	return (ret);
593 } /* oce_start_rq */
594