xref: /illumos-gate/usr/src/uts/common/io/rge/rge_rxtx.c (revision ba2e4443)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include "rge.h"
29 
30 #define	U32TOPTR(x)	((void *)(uintptr_t)(uint32_t)(x))
31 #define	PTRTOU32(x)	((uint32_t)(uintptr_t)(void *)(x))
32 
33 /*
34  * ========== RX side routines ==========
35  */
36 
37 #define	RGE_DBG		RGE_DBG_RECV	/* debug flag for this code	*/
38 
39 static uint32_t rge_atomic_reserve(uint32_t *count_p, uint32_t n);
40 #pragma	inline(rge_atomic_reserve)
41 
42 static uint32_t
43 rge_atomic_reserve(uint32_t *count_p, uint32_t n)
44 {
45 	uint32_t oldval;
46 	uint32_t newval;
47 
48 	/* ATOMICALLY */
49 	do {
50 		oldval = *count_p;
51 		newval = oldval - n;
52 		if (oldval <= n)
53 			return (0);		/* no resources left	*/
54 	} while (cas32(count_p, oldval, newval) != oldval);
55 
56 	return (newval);
57 }
58 
59 /*
60  * Atomically increment a counter
61  */
62 static void rge_atomic_renounce(uint32_t *count_p, uint32_t n);
63 #pragma	inline(rge_atomic_renounce)
64 
65 static void
66 rge_atomic_renounce(uint32_t *count_p, uint32_t n)
67 {
68 	uint32_t oldval;
69 	uint32_t newval;
70 
71 	/* ATOMICALLY */
72 	do {
73 		oldval = *count_p;
74 		newval = oldval + n;
75 	} while (cas32(count_p, oldval, newval) != oldval);
76 }
77 
78 /*
79  * Callback code invoked from STREAMs when the recv data buffer is free
80  * for recycling.
81  */
82 
83 void
84 rge_rx_recycle(caddr_t arg)
85 {
86 	rge_t *rgep;
87 	dma_buf_t *rx_buf;
88 	sw_rbd_t *free_rbdp;
89 	uint32_t slot_recy;
90 
91 	rx_buf = (dma_buf_t *)arg;
92 	rgep = (rge_t *)rx_buf->private;
93 
94 	/*
95 	 * If rge_unattach() is called, this callback function will also
96 	 * be called when we try to free the mp in rge_fini_rings().
97 	 * In such situation, we needn't do below desballoc(), otherwise,
98 	 * there'll be memory leak.
99 	 */
100 	if (rgep->rge_mac_state == RGE_MAC_UNATTACH)
101 		return;
102 
103 	/*
104 	 * Recycle the data buffer again
105 	 * and fill them in free ring
106 	 */
107 	rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
108 	    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
109 	if (rx_buf->mp == NULL) {
110 		rge_problem(rgep, "rge_rx_recycle: desballoc() failed");
111 		return;
112 	}
113 	mutex_enter(rgep->rc_lock);
114 	slot_recy = rgep->rc_next;
115 	free_rbdp = &rgep->free_rbds[slot_recy];
116 	if (free_rbdp->rx_buf == NULL) {
117 		free_rbdp->rx_buf = rx_buf;
118 		rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS);
119 		rge_atomic_renounce(&rgep->rx_free, 1);
120 		if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS)
121 			rgep->rx_bcopy = B_FALSE;
122 		ASSERT(rgep->rx_free <= RGE_BUF_SLOTS);
123 	} else {
124 		/*
125 		 * This situation shouldn't happen
126 		 */
127 		rge_problem(rgep, "rge_rx_recycle: buffer %d recycle error",
128 		    slot_recy);
129 		rgep->stats.recycle_err++;
130 	}
131 	mutex_exit(rgep->rc_lock);
132 }
133 
134 static int rge_rx_refill(rge_t *rgep, uint32_t slot);
135 #pragma	inline(rge_rx_refill)
136 
137 static int
138 rge_rx_refill(rge_t *rgep, uint32_t slot)
139 {
140 	dma_buf_t *free_buf;
141 	rge_bd_t *hw_rbd_p;
142 	sw_rbd_t *srbdp;
143 	uint32_t free_slot;
144 
145 	srbdp = &rgep->sw_rbds[slot];
146 	hw_rbd_p = &rgep->rx_ring[slot];
147 	free_slot = rgep->rf_next;
148 	free_buf = rgep->free_rbds[free_slot].rx_buf;
149 	if (free_buf != NULL) {
150 		srbdp->rx_buf = free_buf;
151 		rgep->free_rbds[free_slot].rx_buf = NULL;
152 		hw_rbd_p->host_buf_addr = RGE_BSWAP_32(RGE_HEADROOM +
153 		    + free_buf->pbuf.cookie.dmac_laddress);
154 		hw_rbd_p->host_buf_addr_hi =
155 		    RGE_BSWAP_32(free_buf->pbuf.cookie.dmac_laddress >> 32);
156 		rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS);
157 		return (1);
158 	} else {
159 		/*
160 		 * This situation shouldn't happen
161 		 */
162 		rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL",
163 		    free_slot);
164 		rgep->rx_bcopy = B_TRUE;
165 		return (0);
166 	}
167 }
168 
169 static mblk_t *rge_receive_packet(rge_t *rgep, uint32_t slot);
170 #pragma	inline(rge_receive_packet)
171 
172 static mblk_t *
173 rge_receive_packet(rge_t *rgep, uint32_t slot)
174 {
175 	rge_bd_t *hw_rbd_p;
176 	sw_rbd_t *srbdp;
177 	uchar_t *dp;
178 	mblk_t *mp;
179 	uint8_t *rx_ptr;
180 	uint32_t rx_status;
181 	uint_t packet_len;
182 	uint_t minsize;
183 	uint_t maxsize;
184 	uint32_t proto;
185 	uint32_t pflags;
186 	struct ether_vlan_header *ehp;
187 	uint16_t vtag = 0;
188 
189 	hw_rbd_p = &rgep->rx_ring[slot];
190 	srbdp = &rgep->sw_rbds[slot];
191 	packet_len = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_LEN_MASK;
192 
193 	/*
194 	 * Read receive status
195 	 */
196 	rx_status = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_FLAGS_MASK;
197 
198 	/*
199 	 * Handle error packet
200 	 */
201 	if (!(rx_status & BD_FLAG_PKT_END)) {
202 		RGE_DEBUG(("rge_receive_packet: not a complete packat"));
203 		return (NULL);
204 	}
205 	if (rx_status & RBD_FLAG_ERROR) {
206 		if (rx_status & RBD_FLAG_CRC_ERR)
207 			rgep->stats.crc_err++;
208 		if (rx_status & RBD_FLAG_RUNT)
209 			rgep->stats.in_short++;
210 		/*
211 		 * Set chip_error flag to reset chip:
212 		 * (suggested in Realtek programming guide.)
213 		 */
214 		RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
215 		    rx_status));
216 		mutex_enter(rgep->genlock);
217 		rgep->rge_chip_state = RGE_CHIP_ERROR;
218 		mutex_exit(rgep->genlock);
219 		return (NULL);
220 	}
221 
222 	/*
223 	 * Handle size error packet
224 	 */
225 	minsize = ETHERMIN  - VLAN_TAGSZ + ETHERFCSL;
226 	maxsize = rgep->ethmax_size + ETHERFCSL;
227 
228 	if (packet_len < minsize || packet_len > maxsize) {
229 		RGE_DEBUG(("rge_receive_packet: len err = %d", packet_len));
230 		return (NULL);
231 	}
232 
233 	DMA_SYNC(srbdp->rx_buf->pbuf, DDI_DMA_SYNC_FORKERNEL);
234 	if (packet_len <= RGE_RECV_COPY_SIZE || rgep->rx_bcopy ||
235 	    !rge_atomic_reserve(&rgep->rx_free, 1)) {
236 		/*
237 		 * Allocate buffer to receive this good packet
238 		 */
239 		mp = allocb(packet_len + RGE_HEADROOM, 0);
240 		if (mp == NULL) {
241 			RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
242 			rgep->stats.no_rcvbuf++;
243 			return (NULL);
244 		}
245 
246 		/*
247 		 * Copy the data found into the new cluster
248 		 */
249 		rx_ptr = DMA_VPTR(srbdp->rx_buf->pbuf);
250 		mp->b_rptr = dp = mp->b_rptr + RGE_HEADROOM;
251 		bcopy(rx_ptr + RGE_HEADROOM, dp, packet_len);
252 		mp->b_wptr = dp + packet_len - ETHERFCSL;
253 	} else {
254 		mp = srbdp->rx_buf->mp;
255 		mp->b_rptr += RGE_HEADROOM;
256 		mp->b_wptr = mp->b_rptr + packet_len - ETHERFCSL;
257 		mp->b_next = mp->b_cont = NULL;
258 		/*
259 		 * Refill the current receive bd buffer
260 		 *   if fails, will just keep the mp.
261 		 */
262 		if (!rge_rx_refill(rgep, slot))
263 			return (NULL);
264 	}
265 	rgep->stats.rbytes += packet_len;
266 
267 	/*
268 	 * VLAN packet ?
269 	 */
270 	pflags = RGE_BSWAP_32(hw_rbd_p->vlan_tag);
271 	if (pflags & RBD_VLAN_PKT)
272 		vtag = pflags & RBD_VLAN_TAG;
273 	if (vtag) {
274 		vtag = TCI_CHIP2OS(vtag);
275 		/*
276 		 * As h/w strips the VLAN tag from incoming packet, we need
277 		 * insert VLAN tag into this packet before send up here.
278 		 */
279 		(void) memmove(mp->b_rptr - VLAN_TAGSZ, mp->b_rptr,
280 		    2 * ETHERADDRL);
281 		mp->b_rptr -= VLAN_TAGSZ;
282 		ehp = (struct ether_vlan_header *)mp->b_rptr;
283 		ehp->ether_tpid = htons(VLAN_TPID);
284 		ehp->ether_tci = htons(vtag);
285 	}
286 
287 	/*
288 	 * Check h/w checksum offload status
289 	 */
290 	pflags = 0;
291 	proto = rx_status & RBD_FLAG_PROTOCOL;
292 	if ((proto == RBD_FLAG_TCP && !(rx_status & RBD_TCP_CKSUM_ERR)) ||
293 	    (proto == RBD_FLAG_UDP && !(rx_status & RBD_UDP_CKSUM_ERR)))
294 		pflags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
295 	if (proto != RBD_FLAG_NONE_IP && !(rx_status & RBD_IP_CKSUM_ERR))
296 		pflags |= HCK_IPV4_HDRCKSUM;
297 	if (pflags != 0)  {
298 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, pflags, 0);
299 	}
300 
301 	return (mp);
302 }
303 
304 /*
305  * Accept the packets received in rx ring.
306  *
307  * Returns a chain of mblks containing the received data, to be
308  * passed up to mac_rx().
309  * The routine returns only when a complete scan has been performed
310  * without finding any packets to receive.
311  * This function must SET the OWN bit of BD to indicate the packets
312  * it has accepted from the ring.
313  */
314 static mblk_t *rge_receive_ring(rge_t *rgep);
315 #pragma	inline(rge_receive_ring)
316 
317 static mblk_t *
318 rge_receive_ring(rge_t *rgep)
319 {
320 	rge_bd_t *hw_rbd_p;
321 	mblk_t *head;
322 	mblk_t **tail;
323 	mblk_t *mp;
324 	uint32_t slot;
325 
326 	ASSERT(mutex_owned(rgep->rx_lock));
327 
328 	/*
329 	 * Sync (all) the receive ring descriptors
330 	 * before accepting the packets they describe
331 	 */
332 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL);
333 	slot = rgep->rx_next;
334 	hw_rbd_p = &rgep->rx_ring[slot];
335 	head = NULL;
336 	tail = &head;
337 
338 	while (!(hw_rbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN))) {
339 		if ((mp = rge_receive_packet(rgep, slot)) != NULL) {
340 			*tail = mp;
341 			tail = &mp->b_next;
342 		}
343 
344 		/*
345 		 * Clear RBD flags
346 		 */
347 		hw_rbd_p->flags_len =
348 		    RGE_BSWAP_32(rgep->rxbuf_size - RGE_HEADROOM);
349 		HW_RBD_INIT(hw_rbd_p, slot);
350 		slot = NEXT(slot, RGE_RECV_SLOTS);
351 		hw_rbd_p = &rgep->rx_ring[slot];
352 	}
353 
354 	rgep->rx_next = slot;
355 	return (head);
356 }
357 
358 /*
359  * Receive all ready packets.
360  */
361 void rge_receive(rge_t *rgep);
362 #pragma	no_inline(rge_receive)
363 
364 void
365 rge_receive(rge_t *rgep)
366 {
367 	mblk_t *mp;
368 
369 	mutex_enter(rgep->rx_lock);
370 	mp = rge_receive_ring(rgep);
371 	mutex_exit(rgep->rx_lock);
372 
373 	if (mp != NULL)
374 		mac_rx(rgep->mh, rgep->handle, mp);
375 }
376 
377 
378 #undef	RGE_DBG
379 #define	RGE_DBG		RGE_DBG_SEND	/* debug flag for this code	*/
380 
381 
382 /*
383  * ========== Send-side recycle routines ==========
384  */
385 static uint32_t rge_send_claim(rge_t *rgep);
386 #pragma	inline(rge_send_claim)
387 
388 static uint32_t
389 rge_send_claim(rge_t *rgep)
390 {
391 	uint32_t slot;
392 	uint32_t next;
393 
394 	mutex_enter(rgep->tx_lock);
395 	slot = rgep->tx_next;
396 	next = NEXT(slot, RGE_SEND_SLOTS);
397 	rgep->tx_next = next;
398 	rgep->tx_flow++;
399 	mutex_exit(rgep->tx_lock);
400 
401 	/*
402 	 * We check that our invariants still hold:
403 	 * +	the slot and next indexes are in range
404 	 * +	the slot must not be the last one (i.e. the *next*
405 	 *	index must not match the next-recycle index), 'cos
406 	 *	there must always be at least one free slot in a ring
407 	 */
408 	ASSERT(slot < RGE_SEND_SLOTS);
409 	ASSERT(next < RGE_SEND_SLOTS);
410 	ASSERT(next != rgep->tc_next);
411 
412 	return (slot);
413 }
414 
415 /*
416  * We don't want to call this function every time after a successful
417  * h/w transmit done in ISR.  Instead, we call this function in the
418  * rge_send() when there're few or no free tx BDs remained.
419  */
420 static void rge_send_recycle(rge_t *rgep);
421 #pragma	inline(rge_send_recycle)
422 
423 static void
424 rge_send_recycle(rge_t *rgep)
425 {
426 	rge_bd_t *hw_sbd_p;
427 	uint32_t tc_tail;
428 	uint32_t tc_head;
429 	uint32_t n;
430 
431 	if (rgep->tx_free == RGE_SEND_SLOTS)
432 		return;
433 
434 	mutex_enter(rgep->tc_lock);
435 	tc_head = rgep->tc_next;
436 	tc_tail = rgep->tc_tail;
437 
438 	do {
439 		tc_tail = LAST(tc_tail, RGE_SEND_SLOTS);
440 		hw_sbd_p = &rgep->tx_ring[tc_tail];
441 		if (tc_tail == tc_head) {
442 			if (hw_sbd_p->flags_len &
443 			    RGE_BSWAP_32(BD_FLAG_HW_OWN)) {
444 				/*
445 				 * Bump the watchdog counter, thus guaranteeing
446 				 * that it's nonzero (watchdog activated).
447 				 */
448 				rgep->watchdog += 1;
449 				mutex_exit(rgep->tc_lock);
450 				return;
451 			}
452 			break;
453 		}
454 	} while (hw_sbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN));
455 
456 	rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS);
457 	n = rgep->tc_next - tc_head;
458 	if (rgep->tc_next < tc_head)
459 		n += RGE_SEND_SLOTS;
460 	rge_atomic_renounce(&rgep->tx_free, n);
461 	rgep->watchdog = 0;
462 	mutex_exit(rgep->tc_lock);
463 
464 	if (rgep->resched_needed) {
465 		rgep->resched_needed = 0;
466 		ddi_trigger_softintr(rgep->resched_id);
467 	}
468 }
469 
470 /*
471  * Send a message by copying it into a preallocated (and premapped) buffer
472  */
473 static void rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci, uchar_t proto);
474 #pragma	inline(rge_send_copy)
475 
476 static void
477 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci, uchar_t proto)
478 {
479 	rge_bd_t *hw_sbd_p;
480 	sw_sbd_t *ssbdp;
481 	mblk_t *bp;
482 	char *txb;
483 	uint32_t slot;
484 	size_t totlen;
485 	size_t mblen;
486 	uint32_t pflags;
487 
488 	/*
489 	 * IMPORTANT:
490 	 *	Up to the point where it claims a place, a send_msg()
491 	 *	routine can indicate failure by returning B_FALSE.  Once it's
492 	 *	claimed a place, it mustn't fail.
493 	 *
494 	 * In this version, there's no setup to be done here, and there's
495 	 * nothing that can fail, so we can go straight to claiming our
496 	 * already-reserved place on the train.
497 	 *
498 	 * This is the point of no return!
499 	 */
500 	slot = rge_send_claim(rgep);
501 	ssbdp = &rgep->sw_sbds[slot];
502 
503 	/*
504 	 * Copy the data into a pre-mapped buffer, which avoids the
505 	 * overhead (and complication) of mapping/unmapping STREAMS
506 	 * buffers and keeping hold of them until the DMA has completed.
507 	 *
508 	 * Because all buffers are the same size, and larger than the
509 	 * longest single valid message, we don't have to bother about
510 	 * splitting the message across multiple buffers either.
511 	 */
512 	txb = DMA_VPTR(ssbdp->pbuf);
513 	for (totlen = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
514 		mblen = bp->b_wptr - bp->b_rptr;
515 		if ((totlen += mblen) <= rgep->ethmax_size) {
516 			bcopy(bp->b_rptr, txb, mblen);
517 			txb += mblen;
518 		}
519 	}
520 	rgep->stats.obytes += totlen + ETHERFCSL;
521 
522 	/*
523 	 * We'e reached the end of the chain; and we should have
524 	 * collected no more than ETHERMAX bytes into our buffer.
525 	 */
526 	ASSERT(bp == NULL);
527 	ASSERT(totlen <= rgep->ethmax_size);
528 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
529 
530 	/*
531 	 * Update the hardware send buffer descriptor; then we're done
532 	 * and return. The message can be freed right away in rge_send(),
533 	 * as we've already copied the contents ...
534 	 */
535 	hw_sbd_p = &rgep->tx_ring[slot];
536 	hw_sbd_p->flags_len = RGE_BSWAP_32(totlen & SBD_LEN_MASK);
537 	if (tci != 0) {
538 		tci = TCI_OS2CHIP(tci);
539 		hw_sbd_p->vlan_tag = RGE_BSWAP_32(tci);
540 		hw_sbd_p->vlan_tag |= RGE_BSWAP_32(SBD_VLAN_PKT);
541 	} else {
542 		hw_sbd_p->vlan_tag = 0;
543 	}
544 
545 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
546 	if (pflags & HCK_FULLCKSUM) {
547 		switch (proto) {
548 		case IS_UDP_PKT:
549 			hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_UDP_CKSUM);
550 			proto = IS_IPV4_PKT;
551 			break;
552 		case IS_TCP_PKT:
553 			hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_TCP_CKSUM);
554 			proto = IS_IPV4_PKT;
555 			break;
556 		default:
557 			break;
558 		}
559 	}
560 	if ((pflags & HCK_IPV4_HDRCKSUM) && (proto == IS_IPV4_PKT))
561 		hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_IP_CKSUM);
562 
563 	HW_SBD_SET(hw_sbd_p, slot);
564 }
565 
566 static boolean_t
567 rge_send(rge_t *rgep, mblk_t *mp)
568 {
569 	struct ether_vlan_header *ehp;
570 	boolean_t need_strip = B_FALSE;
571 	uint16_t tci = 0;
572 	uchar_t proto = UNKNOWN_PKT;
573 	struct ether_header *ethhdr;
574 	struct ip *ip_hdr;
575 
576 	ASSERT(mp->b_next == NULL);
577 
578 	/*
579 	 * Determine if the packet is VLAN tagged.
580 	 */
581 	ASSERT(MBLKL(mp) >= sizeof (struct ether_header));
582 	ehp = (struct ether_vlan_header *)mp->b_rptr;
583 
584 	if (ehp->ether_tpid == htons(VLAN_TPID)) {
585 		if (MBLKL(mp) < sizeof (struct ether_vlan_header)) {
586 			uint32_t pflags;
587 
588 			/*
589 			 * Need to preserve checksum flags across pullup.
590 			 */
591 			hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL,
592 			    NULL, &pflags);
593 
594 			if (!pullupmsg(mp,
595 			    sizeof (struct ether_vlan_header))) {
596 				RGE_DEBUG(("rge_send: pullup failure"));
597 				rgep->resched_needed = B_TRUE;
598 				return (B_FALSE);
599 			}
600 
601 			(void) hcksum_assoc(mp, NULL, NULL, NULL, NULL, NULL,
602 			    NULL, pflags, KM_NOSLEEP);
603 		}
604 
605 		ehp = (struct ether_vlan_header *)mp->b_rptr;
606 		need_strip = B_TRUE;
607 	}
608 
609 	/*
610 	 * Try to reserve a place in the transmit ring.
611 	 */
612 	if (!rge_atomic_reserve(&rgep->tx_free, 1)) {
613 		RGE_DEBUG(("rge_send: no free slots"));
614 		rgep->stats.defer++;
615 		rgep->resched_needed = B_TRUE;
616 		rge_send_recycle(rgep);
617 		return (B_FALSE);
618 	}
619 
620 	/*
621 	 * We've reserved a place :-)
622 	 * These ASSERTions check that our invariants still hold:
623 	 *	there must still be at least one free place
624 	 *	there must be at least one place NOT free (ours!)
625 	 */
626 	ASSERT(rgep->tx_free < RGE_SEND_SLOTS);
627 
628 	/*
629 	 * Now that we know that there is space to transmit the packet
630 	 * strip any VLAN tag that is present.
631 	 */
632 	if (need_strip) {
633 		tci = ntohs(ehp->ether_tci);
634 		(void) memmove(mp->b_rptr + VLAN_TAGSZ, mp->b_rptr,
635 		    2 * ETHERADDRL);
636 		mp->b_rptr += VLAN_TAGSZ;
637 	}
638 
639 	/*
640 	 * Check the packet protocol type for according h/w checksum offload
641 	 */
642 	if (MBLKL(mp) >= sizeof (struct ether_header) +
643 	    sizeof (struct ip)) {
644 		ethhdr = (struct ether_header *)(mp->b_rptr);
645 		/*
646 		 * Is the packet an IP(v4) packet?
647 		 */
648 		if (ntohs(ethhdr->ether_type) == ETHERTYPE_IP) {
649 			proto = IS_IPV4_PKT;
650 			ip_hdr = (struct ip *)(mp->b_rptr +
651 			    sizeof (struct ether_header));
652 			if (ip_hdr->ip_p == IPPROTO_TCP)
653 				proto = IS_TCP_PKT;
654 			else if (ip_hdr->ip_p == IPPROTO_UDP)
655 				proto = IS_UDP_PKT;
656 		}
657 	}
658 
659 	rge_send_copy(rgep, mp, tci, proto);
660 
661 	/*
662 	 * Trigger chip h/w transmit ...
663 	 */
664 	mutex_enter(rgep->tx_lock);
665 	if (--rgep->tx_flow == 0) {
666 		DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
667 		rge_tx_trigger(rgep);
668 		if (rgep->tx_free < RGE_SEND_SLOTS/32)
669 			rge_send_recycle(rgep);
670 		rgep->tc_tail = rgep->tx_next;
671 	}
672 	mutex_exit(rgep->tx_lock);
673 
674 	freemsg(mp);
675 	return (B_TRUE);
676 }
677 
678 uint_t
679 rge_reschedule(caddr_t arg)
680 {
681 	rge_t *rgep;
682 	uint_t rslt;
683 
684 	rgep = (rge_t *)arg;
685 	rslt = DDI_INTR_UNCLAIMED;
686 
687 	if (rgep->rge_mac_state == RGE_MAC_STARTED && rgep->resched_needed) {
688 		mac_tx_update(rgep->mh);
689 		rgep->resched_needed = B_FALSE;
690 		rslt = DDI_INTR_CLAIMED;
691 	}
692 
693 	return (rslt);
694 }
695 
696 /*
697  * rge_m_tx() - send a chain of packets
698  */
699 mblk_t *
700 rge_m_tx(void *arg, mblk_t *mp)
701 {
702 	rge_t *rgep = arg;		/* private device info	*/
703 	mblk_t *next;
704 
705 	ASSERT(mp != NULL);
706 	ASSERT(rgep->rge_mac_state == RGE_MAC_STARTED);
707 
708 	if (rgep->rge_chip_state != RGE_CHIP_RUNNING) {
709 		RGE_DEBUG(("rge_m_tx: chip not running"));
710 		return (mp);
711 	}
712 
713 	rw_enter(rgep->errlock, RW_READER);
714 	while (mp != NULL) {
715 		next = mp->b_next;
716 		mp->b_next = NULL;
717 
718 		if (!rge_send(rgep, mp)) {
719 			mp->b_next = next;
720 			break;
721 		}
722 
723 		mp = next;
724 	}
725 	rw_exit(rgep->errlock);
726 
727 	return (mp);
728 }
729