xref: /illumos-gate/usr/src/uts/common/io/igb/igb_tx.c (revision 85f496fa)
1c869993eSxy /*
2c869993eSxy  * CDDL HEADER START
3c869993eSxy  *
4c869993eSxy  * The contents of this file are subject to the terms of the
5c869993eSxy  * Common Development and Distribution License (the "License").
6c869993eSxy  * You may not use this file except in compliance with the License.
7c869993eSxy  *
80dc2366fSVenugopal Iyer  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90dc2366fSVenugopal Iyer  * or http://www.opensolaris.org/os/licensing.
10c869993eSxy  * See the License for the specific language governing permissions
11c869993eSxy  * and limitations under the License.
12c869993eSxy  *
130dc2366fSVenugopal Iyer  * When distributing Covered Code, include this CDDL HEADER in each
140dc2366fSVenugopal Iyer  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15c869993eSxy  * If applicable, add the following below this CDDL HEADER, with the
16c869993eSxy  * fields enclosed by brackets "[]" replaced with your own identifying
17c869993eSxy  * information: Portions Copyright [yyyy] [name of copyright owner]
18c869993eSxy  *
19c869993eSxy  * CDDL HEADER END
20c869993eSxy  */
21c869993eSxy 
22c869993eSxy /*
2369b2d733SGuoqing Zhu  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
2469b2d733SGuoqing Zhu  */
2569b2d733SGuoqing Zhu 
2669b2d733SGuoqing Zhu /*
2769b2d733SGuoqing Zhu  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28da14cebeSEric Cheng  */
29c869993eSxy 
30c869993eSxy #include "igb_sw.h"
31c869993eSxy 
32c869993eSxy static boolean_t igb_tx(igb_tx_ring_t *, mblk_t *);
33c869993eSxy static int igb_tx_copy(igb_tx_ring_t *, tx_control_block_t *, mblk_t *,
34fa25784cSxy     uint32_t, boolean_t);
35c869993eSxy static int igb_tx_bind(igb_tx_ring_t *, tx_control_block_t *, mblk_t *,
36c869993eSxy     uint32_t);
37d11274aaSPaul Guo static int igb_tx_fill_ring(igb_tx_ring_t *, link_list_t *, tx_context_t *,
38d11274aaSPaul Guo     size_t);
39c869993eSxy static void igb_save_desc(tx_control_block_t *, uint64_t, size_t);
40c869993eSxy static tx_control_block_t *igb_get_free_list(igb_tx_ring_t *);
41d11274aaSPaul Guo static int igb_get_tx_context(mblk_t *, tx_context_t *);
42d11274aaSPaul Guo static boolean_t igb_check_tx_context(igb_tx_ring_t *, tx_context_t *);
43d11274aaSPaul Guo static void igb_fill_tx_context(struct e1000_adv_tx_context_desc *,
44d11274aaSPaul Guo     tx_context_t *, uint32_t);
45c869993eSxy 
46c869993eSxy #ifndef IGB_DEBUG
47c869993eSxy #pragma inline(igb_save_desc)
48d11274aaSPaul Guo #pragma inline(igb_get_tx_context)
49d11274aaSPaul Guo #pragma inline(igb_check_tx_context)
50d11274aaSPaul Guo #pragma inline(igb_fill_tx_context)
51c869993eSxy #endif
52c869993eSxy 
53c869993eSxy mblk_t *
igb_tx_ring_send(void * arg,mblk_t * mp)54da14cebeSEric Cheng igb_tx_ring_send(void *arg, mblk_t *mp)
55c869993eSxy {
56da14cebeSEric Cheng 	igb_tx_ring_t *tx_ring = (igb_tx_ring_t *)arg;
57b607c8a3SKeith M Wesolowski 	igb_t *igb;
58c869993eSxy 
59da14cebeSEric Cheng 	ASSERT(tx_ring != NULL);
60c869993eSxy 
61b607c8a3SKeith M Wesolowski 	igb = tx_ring->igb;
62b607c8a3SKeith M Wesolowski 
63b607c8a3SKeith M Wesolowski 	if ((igb->igb_state & IGB_SUSPENDED) ||
64b607c8a3SKeith M Wesolowski 	    (igb->igb_state & IGB_ERROR) ||
65b607c8a3SKeith M Wesolowski 	    !(igb->igb_state & IGB_STARTED) ||
66b607c8a3SKeith M Wesolowski 	    igb->link_state != LINK_STATE_UP) {
67cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		freemsg(mp);
68cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		return (NULL);
69cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 	}
70cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 
71da14cebeSEric Cheng 	return ((igb_tx(tx_ring, mp)) ? NULL : mp);
72c869993eSxy }
73c869993eSxy 
74c869993eSxy /*
75c869993eSxy  * igb_tx - Main transmit processing
76c869993eSxy  *
77c869993eSxy  * Called from igb_m_tx with an mblk ready to transmit. this
78c869993eSxy  * routine sets up the transmit descriptors and sends data to
79c869993eSxy  * the wire.
80c869993eSxy  *
81c869993eSxy  * One mblk can consist of several fragments, each fragment
82c869993eSxy  * will be processed with different methods based on the size.
83c869993eSxy  * For the fragments with size less than the bcopy threshold,
84c869993eSxy  * they will be processed by using bcopy; otherwise, they will
85c869993eSxy  * be processed by using DMA binding.
86c869993eSxy  *
87c869993eSxy  * To process the mblk, a tx control block is got from the
88c869993eSxy  * free list. One tx control block contains one tx buffer, which
89c869993eSxy  * is used to copy mblk fragments' data; and one tx DMA handle,
90c869993eSxy  * which is used to bind a mblk fragment with DMA resource.
91c869993eSxy  *
92c869993eSxy  * Several small mblk fragments can be copied into one tx control
93c869993eSxy  * block's buffer, and then the buffer will be transmitted with
94c869993eSxy  * one tx descriptor.
95c869993eSxy  *
96c869993eSxy  * A large fragment only binds with one tx control block's DMA
97c869993eSxy  * handle, and it can span several tx descriptors for transmitting.
98c869993eSxy  *
99c869993eSxy  * So to transmit a packet (mblk), several tx control blocks can
100c869993eSxy  * be used. After the processing, those tx control blocks will
101c869993eSxy  * be put to the work list.
102c869993eSxy  */
103c869993eSxy static boolean_t
igb_tx(igb_tx_ring_t * tx_ring,mblk_t * mp)104c869993eSxy igb_tx(igb_tx_ring_t *tx_ring, mblk_t *mp)
105c869993eSxy {
106c869993eSxy 	igb_t *igb = tx_ring->igb;
107c869993eSxy 	tx_type_t current_flag, next_flag;
108c869993eSxy 	uint32_t current_len, next_len;
109c869993eSxy 	uint32_t desc_total;
110c869993eSxy 	size_t mbsize;
111c869993eSxy 	int desc_num;
112c869993eSxy 	boolean_t copy_done, eop;
113c869993eSxy 	mblk_t *current_mp, *next_mp, *nmp;
114c869993eSxy 	tx_control_block_t *tcb;
115d11274aaSPaul Guo 	tx_context_t tx_context, *ctx;
116c869993eSxy 	link_list_t pending_list;
117ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	mblk_t *hdr_new_mp = NULL;
118ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	mblk_t *hdr_previous_mp = NULL;
119ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	mblk_t *hdr_current_mp = NULL;
120d11274aaSPaul Guo 	uint32_t hdr_frag_len;
121d11274aaSPaul Guo 	uint32_t hdr_len, len;
122d11274aaSPaul Guo 	uint32_t copy_thresh;
123d11274aaSPaul Guo 
124ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	copy_thresh = igb->tx_copy_thresh;
125c869993eSxy 
126c869993eSxy 	/* Get the mblk size */
127c869993eSxy 	mbsize = 0;
128c869993eSxy 	for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
129d11274aaSPaul Guo 		mbsize += MBLKL(nmp);
130c869993eSxy 	}
131c869993eSxy 
132d11274aaSPaul Guo 	if (igb->tx_hcksum_enable) {
133d11274aaSPaul Guo 		ctx = &tx_context;
134d11274aaSPaul Guo 		/*
135d11274aaSPaul Guo 		 * Retrieve offloading context information from the mblk
136d11274aaSPaul Guo 		 * that will be used to decide whether/how to fill the
137d11274aaSPaul Guo 		 * context descriptor.
138d11274aaSPaul Guo 		 */
139d11274aaSPaul Guo 		if (igb_get_tx_context(mp, ctx) != TX_CXT_SUCCESS) {
140d11274aaSPaul Guo 			freemsg(mp);
141d11274aaSPaul Guo 			return (B_TRUE);
142d11274aaSPaul Guo 		}
143d11274aaSPaul Guo 
144d11274aaSPaul Guo 		if ((ctx->lso_flag &&
145d11274aaSPaul Guo 		    (mbsize > (ctx->mac_hdr_len + IGB_LSO_MAXLEN))) ||
146d11274aaSPaul Guo 		    (!ctx->lso_flag &&
147d11274aaSPaul Guo 		    (mbsize > (igb->max_frame_size - ETHERFCSL)))) {
148d11274aaSPaul Guo 			freemsg(mp);
149fa4e188eSYuri Pankov 			igb_log(igb, IGB_LOG_INFO, "igb_tx: packet oversize");
150d11274aaSPaul Guo 			return (B_TRUE);
151d11274aaSPaul Guo 		}
152d11274aaSPaul Guo 	} else {
153d11274aaSPaul Guo 		ctx = NULL;
154d11274aaSPaul Guo 		if (mbsize > (igb->max_frame_size - ETHERFCSL)) {
155d11274aaSPaul Guo 			freemsg(mp);
156fa4e188eSYuri Pankov 			igb_log(igb, IGB_LOG_INFO, "igb_tx: packet oversize");
157d11274aaSPaul Guo 			return (B_TRUE);
158d11274aaSPaul Guo 		}
159c869993eSxy 	}
160c869993eSxy 
161c869993eSxy 	/*
162c869993eSxy 	 * Check and recycle tx descriptors.
163c869993eSxy 	 * The recycle threshold here should be selected carefully
164c869993eSxy 	 */
165ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	if (tx_ring->tbd_free < igb->tx_recycle_thresh)
166c869993eSxy 		tx_ring->tx_recycle(tx_ring);
167c869993eSxy 
168c869993eSxy 	/*
169c869993eSxy 	 * After the recycling, if the tbd_free is less than the
170ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	 * tx_overload_threshold, assert overload, return B_FALSE;
171c869993eSxy 	 * and we need to re-schedule the tx again.
172c869993eSxy 	 */
173ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	if (tx_ring->tbd_free < igb->tx_overload_thresh) {
174c869993eSxy 		tx_ring->reschedule = B_TRUE;
175c869993eSxy 		IGB_DEBUG_STAT(tx_ring->stat_overload);
176c869993eSxy 		return (B_FALSE);
177c869993eSxy 	}
178c869993eSxy 
179d11274aaSPaul Guo 	/*
180d11274aaSPaul Guo 	 * The software should guarantee LSO packet header(MAC+IP+TCP)
181d11274aaSPaul Guo 	 * to be within one descriptor - this is required by h/w.
182d11274aaSPaul Guo 	 * Here will reallocate and refill the header if
183d11274aaSPaul Guo 	 * the headers(MAC+IP+TCP) is physical memory non-contiguous.
184d11274aaSPaul Guo 	 */
185d11274aaSPaul Guo 	if (ctx && ctx->lso_flag) {
186ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		hdr_len = ctx->mac_hdr_len + ctx->ip_hdr_len + ctx->l4_hdr_len;
187d11274aaSPaul Guo 		len = MBLKL(mp);
188ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		hdr_current_mp = mp;
189d11274aaSPaul Guo 		while (len < hdr_len) {
190ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_previous_mp = hdr_current_mp;
191ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_current_mp = hdr_current_mp->b_cont;
192ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			len += MBLKL(hdr_current_mp);
193d11274aaSPaul Guo 		}
194d11274aaSPaul Guo 		/*
195ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * If the header and the payload are in different mblks,
196ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * we simply force the header to be copied into pre-allocated
197ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * page-aligned buffer.
198d11274aaSPaul Guo 		 */
199ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		if (len == hdr_len)
200ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			goto adjust_threshold;
201d11274aaSPaul Guo 
202ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		hdr_frag_len = hdr_len - (len - MBLKL(hdr_current_mp));
203ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		/*
204ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * There are two cases we will reallocate
205ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * a mblk for the last header fragment.
206ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * 1. the header is in multiple mblks and
207ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 *    the last fragment shares the same mblk
208ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 *    with the payload
209ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * 2. the header is in a single mblk shared
210ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 *    with the payload but the header crosses
211ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 *    a page.
212ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 */
213ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		if ((hdr_current_mp != mp) ||
214ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		    (P2NPHASE((uintptr_t)hdr_current_mp->b_rptr, igb->page_size)
215ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		    < hdr_len)) {
216ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			/*
217ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			 * reallocate the mblk for the last header fragment,
218ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			 * expect it to be copied into pre-allocated
219ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			 * page-aligned buffer
220ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			 */
221d5f5d513SToomas Soome 			hdr_new_mp = allocb(hdr_frag_len, 0);
222ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			if (!hdr_new_mp) {
223ac7f5757Schenlu chen - Sun Microsystems - Beijing China 				return (B_FALSE);
224d11274aaSPaul Guo 			}
225d11274aaSPaul Guo 
226ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			/* link the new header fragment with the other parts */
227ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			bcopy(hdr_current_mp->b_rptr,
228ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			    hdr_new_mp->b_rptr, hdr_frag_len);
229ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_new_mp->b_wptr = hdr_new_mp->b_rptr + hdr_frag_len;
230ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_new_mp->b_cont = hdr_current_mp;
231ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			if (hdr_previous_mp)
232ac7f5757Schenlu chen - Sun Microsystems - Beijing China 				hdr_previous_mp->b_cont = hdr_new_mp;
233ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			else
234ac7f5757Schenlu chen - Sun Microsystems - Beijing China 				mp = hdr_new_mp;
235ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_current_mp->b_rptr += hdr_frag_len;
236d11274aaSPaul Guo 		}
237ac7f5757Schenlu chen - Sun Microsystems - Beijing China adjust_threshold:
238ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		/*
239ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * adjust the bcopy threshhold to guarantee
240ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 * the header to use bcopy way
241ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		 */
242ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		if (copy_thresh < hdr_len)
243ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			copy_thresh = hdr_len;
244d11274aaSPaul Guo 	}
245d11274aaSPaul Guo 
246c869993eSxy 	/*
247c869993eSxy 	 * The pending_list is a linked list that is used to save
248c869993eSxy 	 * the tx control blocks that have packet data processed
249c869993eSxy 	 * but have not put the data to the tx descriptor ring.
250c869993eSxy 	 * It is used to reduce the lock contention of the tx_lock.
251c869993eSxy 	 */
252c869993eSxy 	LINK_LIST_INIT(&pending_list);
253c869993eSxy 	desc_num = 0;
254c869993eSxy 	desc_total = 0;
255c869993eSxy 
256c869993eSxy 	current_mp = mp;
257d11274aaSPaul Guo 	current_len = MBLKL(current_mp);
258c869993eSxy 	/*
259c869993eSxy 	 * Decide which method to use for the first fragment
260c869993eSxy 	 */
261d11274aaSPaul Guo 	current_flag = (current_len <= copy_thresh) ?
262c869993eSxy 	    USE_COPY : USE_DMA;
263c869993eSxy 	/*
264c869993eSxy 	 * If the mblk includes several contiguous small fragments,
265c869993eSxy 	 * they may be copied into one buffer. This flag is used to
266c869993eSxy 	 * indicate whether there are pending fragments that need to
267c869993eSxy 	 * be copied to the current tx buffer.
268c869993eSxy 	 *
269c869993eSxy 	 * If this flag is B_TRUE, it indicates that a new tx control
270c869993eSxy 	 * block is needed to process the next fragment using either
271c869993eSxy 	 * copy or DMA binding.
272c869993eSxy 	 *
273c869993eSxy 	 * Otherwise, it indicates that the next fragment will be
274c869993eSxy 	 * copied to the current tx buffer that is maintained by the
275c869993eSxy 	 * current tx control block. No new tx control block is needed.
276c869993eSxy 	 */
277c869993eSxy 	copy_done = B_TRUE;
278c869993eSxy 	while (current_mp) {
279c869993eSxy 		next_mp = current_mp->b_cont;
280c869993eSxy 		eop = (next_mp == NULL); /* Last fragment of the packet? */
281d11274aaSPaul Guo 		next_len = eop ? 0: MBLKL(next_mp);
282c869993eSxy 
283c869993eSxy 		/*
284c869993eSxy 		 * When the current fragment is an empty fragment, if
285c869993eSxy 		 * the next fragment will still be copied to the current
286c869993eSxy 		 * tx buffer, we cannot skip this fragment here. Because
287c869993eSxy 		 * the copy processing is pending for completion. We have
288c869993eSxy 		 * to process this empty fragment in the tx_copy routine.
289c869993eSxy 		 *
290c869993eSxy 		 * If the copy processing is completed or a DMA binding
291c869993eSxy 		 * processing is just completed, we can just skip this
292c869993eSxy 		 * empty fragment.
293c869993eSxy 		 */
294c869993eSxy 		if ((current_len == 0) && (copy_done)) {
295c869993eSxy 			current_mp = next_mp;
296c869993eSxy 			current_len = next_len;
297d11274aaSPaul Guo 			current_flag = (current_len <= copy_thresh) ?
298c869993eSxy 			    USE_COPY : USE_DMA;
299c869993eSxy 			continue;
300c869993eSxy 		}
301c869993eSxy 
302c869993eSxy 		if (copy_done) {
303c869993eSxy 			/*
304c869993eSxy 			 * Get a new tx control block from the free list
305c869993eSxy 			 */
306c869993eSxy 			tcb = igb_get_free_list(tx_ring);
307c869993eSxy 
308c869993eSxy 			if (tcb == NULL) {
309c869993eSxy 				IGB_DEBUG_STAT(tx_ring->stat_fail_no_tcb);
310c869993eSxy 				goto tx_failure;
311c869993eSxy 			}
312c869993eSxy 
313c869993eSxy 			/*
314c869993eSxy 			 * Push the tx control block to the pending list
315c869993eSxy 			 * to avoid using lock too early
316c869993eSxy 			 */
317c869993eSxy 			LIST_PUSH_TAIL(&pending_list, &tcb->link);
318c869993eSxy 		}
319c869993eSxy 
320c869993eSxy 		if (current_flag == USE_COPY) {
321c869993eSxy 			/*
322c869993eSxy 			 * Check whether to use bcopy or DMA binding to process
323c869993eSxy 			 * the next fragment, and if using bcopy, whether we
324c869993eSxy 			 * need to continue copying the next fragment into the
325c869993eSxy 			 * current tx buffer.
326c869993eSxy 			 */
327c869993eSxy 			ASSERT((tcb->tx_buf.len + current_len) <=
328c869993eSxy 			    tcb->tx_buf.size);
329c869993eSxy 
330c869993eSxy 			if (eop) {
331c869993eSxy 				/*
332c869993eSxy 				 * This is the last fragment of the packet, so
333c869993eSxy 				 * the copy processing will be completed with
334c869993eSxy 				 * this fragment.
335c869993eSxy 				 */
336c869993eSxy 				next_flag = USE_NONE;
337c869993eSxy 				copy_done = B_TRUE;
338c869993eSxy 			} else if ((tcb->tx_buf.len + current_len + next_len) >
339c869993eSxy 			    tcb->tx_buf.size) {
340c869993eSxy 				/*
341c869993eSxy 				 * If the next fragment is too large to be
342c869993eSxy 				 * copied to the current tx buffer, we need
343c869993eSxy 				 * to complete the current copy processing.
344c869993eSxy 				 */
345d11274aaSPaul Guo 				next_flag = (next_len > copy_thresh) ?
346c869993eSxy 				    USE_DMA: USE_COPY;
347c869993eSxy 				copy_done = B_TRUE;
348d11274aaSPaul Guo 			} else if (next_len > copy_thresh) {
349c869993eSxy 				/*
350c869993eSxy 				 * The next fragment needs to be processed with
351c869993eSxy 				 * DMA binding. So the copy prcessing will be
352c869993eSxy 				 * completed with the current fragment.
353c869993eSxy 				 */
354c869993eSxy 				next_flag = USE_DMA;
355c869993eSxy 				copy_done = B_TRUE;
356c869993eSxy 			} else {
357c869993eSxy 				/*
358c869993eSxy 				 * Continue to copy the next fragment to the
359c869993eSxy 				 * current tx buffer.
360c869993eSxy 				 */
361c869993eSxy 				next_flag = USE_COPY;
362c869993eSxy 				copy_done = B_FALSE;
363c869993eSxy 			}
364c869993eSxy 
365c869993eSxy 			desc_num = igb_tx_copy(tx_ring, tcb, current_mp,
366fa25784cSxy 			    current_len, copy_done);
367c869993eSxy 		} else {
368c869993eSxy 			/*
369c869993eSxy 			 * Check whether to use bcopy or DMA binding to process
370c869993eSxy 			 * the next fragment.
371c869993eSxy 			 */
372d11274aaSPaul Guo 			next_flag = (next_len > copy_thresh) ?
373c869993eSxy 			    USE_DMA: USE_COPY;
374c869993eSxy 			ASSERT(copy_done == B_TRUE);
375c869993eSxy 
376c869993eSxy 			desc_num = igb_tx_bind(tx_ring, tcb, current_mp,
377c869993eSxy 			    current_len);
378c869993eSxy 		}
379c869993eSxy 
380c869993eSxy 		if (desc_num > 0)
381c869993eSxy 			desc_total += desc_num;
382c869993eSxy 		else if (desc_num < 0)
383c869993eSxy 			goto tx_failure;
384c869993eSxy 
385c869993eSxy 		current_mp = next_mp;
386c869993eSxy 		current_len = next_len;
387c869993eSxy 		current_flag = next_flag;
388c869993eSxy 	}
389c869993eSxy 
390c869993eSxy 	/*
391c869993eSxy 	 * Attach the mblk to the last tx control block
392c869993eSxy 	 */
393c869993eSxy 	ASSERT(tcb);
394c869993eSxy 	ASSERT(tcb->mp == NULL);
395c869993eSxy 	tcb->mp = mp;
396c869993eSxy 
397c869993eSxy 	/*
398c869993eSxy 	 * Before fill the tx descriptor ring with the data, we need to
399c869993eSxy 	 * ensure there are adequate free descriptors for transmit
400c869993eSxy 	 * (including one context descriptor).
40169b2d733SGuoqing Zhu 	 * Do not use up all the tx descriptors.
40269b2d733SGuoqing Zhu 	 * Otherwise tx recycle will fail and cause false hang.
403c869993eSxy 	 */
40469b2d733SGuoqing Zhu 	if (tx_ring->tbd_free <= (desc_total + 1)) {
405c869993eSxy 		tx_ring->tx_recycle(tx_ring);
406c869993eSxy 	}
407c869993eSxy 
408c869993eSxy 	mutex_enter(&tx_ring->tx_lock);
409c869993eSxy 
410c869993eSxy 	/*
411c869993eSxy 	 * If the number of free tx descriptors is not enough for transmit
412c869993eSxy 	 * then return failure.
413c869993eSxy 	 *
414c869993eSxy 	 * Note: we must put this check under the mutex protection to
415c869993eSxy 	 * ensure the correctness when multiple threads access it in
416c869993eSxy 	 * parallel.
417c869993eSxy 	 */
41869b2d733SGuoqing Zhu 	if (tx_ring->tbd_free <= (desc_total + 1)) {
419c869993eSxy 		IGB_DEBUG_STAT(tx_ring->stat_fail_no_tbd);
420c869993eSxy 		mutex_exit(&tx_ring->tx_lock);
421c869993eSxy 		goto tx_failure;
422c869993eSxy 	}
423c869993eSxy 
424d11274aaSPaul Guo 	desc_num = igb_tx_fill_ring(tx_ring, &pending_list, ctx, mbsize);
425c869993eSxy 
426c869993eSxy 	ASSERT((desc_num == desc_total) || (desc_num == (desc_total + 1)));
427c869993eSxy 
4280dc2366fSVenugopal Iyer 	/* Update per-ring tx statistics */
4290dc2366fSVenugopal Iyer 	tx_ring->tx_pkts++;
4300dc2366fSVenugopal Iyer 	tx_ring->tx_bytes += mbsize;
4310dc2366fSVenugopal Iyer 
432c869993eSxy 	mutex_exit(&tx_ring->tx_lock);
433c869993eSxy 
434c869993eSxy 	return (B_TRUE);
435c869993eSxy 
436c869993eSxy tx_failure:
437ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	/*
438ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	 * If new mblk has been allocted for the last header
439ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	 * fragment of a LSO packet, we should restore the
440ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	 * modified mp.
441ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	 */
442ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	if (hdr_new_mp) {
443ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		hdr_new_mp->b_cont = NULL;
444ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		freeb(hdr_new_mp);
445ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		hdr_current_mp->b_rptr -= hdr_frag_len;
446ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		if (hdr_previous_mp)
447ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			hdr_previous_mp->b_cont = hdr_current_mp;
448ac7f5757Schenlu chen - Sun Microsystems - Beijing China 		else
449ac7f5757Schenlu chen - Sun Microsystems - Beijing China 			mp = hdr_current_mp;
450ac7f5757Schenlu chen - Sun Microsystems - Beijing China 	}
451ac7f5757Schenlu chen - Sun Microsystems - Beijing China 
452c869993eSxy 	/*
453c869993eSxy 	 * Discard the mblk and free the used resources
454c869993eSxy 	 */
455c869993eSxy 	tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
456c869993eSxy 	while (tcb) {
457c869993eSxy 		tcb->mp = NULL;
458c869993eSxy 
459c869993eSxy 		igb_free_tcb(tcb);
460c869993eSxy 
461c869993eSxy 		tcb = (tx_control_block_t *)
462c869993eSxy 		    LIST_GET_NEXT(&pending_list, &tcb->link);
463c869993eSxy 	}
464c869993eSxy 
465c869993eSxy 	/*
466c869993eSxy 	 * Return the tx control blocks in the pending list to the free list.
467c869993eSxy 	 */
468c869993eSxy 	igb_put_free_list(tx_ring, &pending_list);
469c869993eSxy 
470c869993eSxy 	/* Transmit failed, do not drop the mblk, rechedule the transmit */
471c869993eSxy 	tx_ring->reschedule = B_TRUE;
472c869993eSxy 
473c869993eSxy 	return (B_FALSE);
474c869993eSxy }
475c869993eSxy 
476c869993eSxy /*
477c869993eSxy  * igb_tx_copy
478c869993eSxy  *
479c869993eSxy  * Copy the mblk fragment to the pre-allocated tx buffer
480c869993eSxy  */
481c869993eSxy static int
igb_tx_copy(igb_tx_ring_t * tx_ring,tx_control_block_t * tcb,mblk_t * mp,uint32_t len,boolean_t copy_done)482c869993eSxy igb_tx_copy(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
483fa25784cSxy     uint32_t len, boolean_t copy_done)
484c869993eSxy {
485c869993eSxy 	dma_buffer_t *tx_buf;
486c869993eSxy 	uint32_t desc_num;
487c869993eSxy 	_NOTE(ARGUNUSED(tx_ring));
488c869993eSxy 
489c869993eSxy 	tx_buf = &tcb->tx_buf;
490c869993eSxy 
491c869993eSxy 	/*
492c869993eSxy 	 * Copy the packet data of the mblk fragment into the
493c869993eSxy 	 * pre-allocated tx buffer, which is maintained by the
494c869993eSxy 	 * tx control block.
495c869993eSxy 	 *
496c869993eSxy 	 * Several mblk fragments can be copied into one tx buffer.
497c869993eSxy 	 * The destination address of the current copied fragment in
498c869993eSxy 	 * the tx buffer is next to the end of the previous copied
499c869993eSxy 	 * fragment.
500c869993eSxy 	 */
501c869993eSxy 	if (len > 0) {
502c869993eSxy 		bcopy(mp->b_rptr, tx_buf->address + tx_buf->len, len);
503c869993eSxy 
504c869993eSxy 		tx_buf->len += len;
505c869993eSxy 		tcb->frag_num++;
506c869993eSxy 	}
507c869993eSxy 
508c869993eSxy 	desc_num = 0;
509c869993eSxy 
510c869993eSxy 	/*
511c869993eSxy 	 * If it is the last fragment copied to the current tx buffer,
512c869993eSxy 	 * in other words, if there's no remaining fragment or the remaining
513c869993eSxy 	 * fragment requires a new tx control block to process, we need to
514c869993eSxy 	 * complete the current copy processing by syncing up the current
515c869993eSxy 	 * DMA buffer and saving the descriptor data.
516c869993eSxy 	 */
517c869993eSxy 	if (copy_done) {
518c869993eSxy 		/*
519c869993eSxy 		 * Sync the DMA buffer of the packet data
520c869993eSxy 		 */
521c869993eSxy 		DMA_SYNC(tx_buf, DDI_DMA_SYNC_FORDEV);
522c869993eSxy 
523c869993eSxy 		tcb->tx_type = USE_COPY;
524c869993eSxy 
525c869993eSxy 		/*
526c869993eSxy 		 * Save the address and length to the private data structure
527c869993eSxy 		 * of the tx control block, which will be used to fill the
528c869993eSxy 		 * tx descriptor ring after all the fragments are processed.
529c869993eSxy 		 */
530c869993eSxy 		igb_save_desc(tcb, tx_buf->dma_address, tx_buf->len);
531c869993eSxy 		desc_num++;
532c869993eSxy 	}
533c869993eSxy 
534c869993eSxy 	return (desc_num);
535c869993eSxy }
536c869993eSxy 
537c869993eSxy /*
538c869993eSxy  * igb_tx_bind
539c869993eSxy  *
540c869993eSxy  * Bind the mblk fragment with DMA
541c869993eSxy  */
542c869993eSxy static int
igb_tx_bind(igb_tx_ring_t * tx_ring,tx_control_block_t * tcb,mblk_t * mp,uint32_t len)543c869993eSxy igb_tx_bind(igb_tx_ring_t *tx_ring, tx_control_block_t *tcb, mblk_t *mp,
544c869993eSxy     uint32_t len)
545c869993eSxy {
546c869993eSxy 	int status, i;
547c869993eSxy 	ddi_dma_cookie_t dma_cookie;
548c869993eSxy 	uint_t ncookies;
549c869993eSxy 	int desc_num;
550c869993eSxy 
551c869993eSxy 	/*
552c869993eSxy 	 * Use DMA binding to process the mblk fragment
553c869993eSxy 	 */
554c869993eSxy 	status = ddi_dma_addr_bind_handle(tcb->tx_dma_handle, NULL,
555c869993eSxy 	    (caddr_t)mp->b_rptr, len,
556c869993eSxy 	    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT,
557c869993eSxy 	    0, &dma_cookie, &ncookies);
558c869993eSxy 
559c869993eSxy 	if (status != DDI_DMA_MAPPED) {
560c869993eSxy 		IGB_DEBUG_STAT(tx_ring->stat_fail_dma_bind);
561c869993eSxy 		return (-1);
562c869993eSxy 	}
563c869993eSxy 
564c869993eSxy 	tcb->frag_num++;
565c869993eSxy 	tcb->tx_type = USE_DMA;
566c869993eSxy 	/*
567c869993eSxy 	 * Each fragment can span several cookies. One cookie will have
568c869993eSxy 	 * one tx descriptor to transmit.
569c869993eSxy 	 */
570c869993eSxy 	desc_num = 0;
571c869993eSxy 	for (i = ncookies; i > 0; i--) {
572c869993eSxy 		/*
573c869993eSxy 		 * Save the address and length to the private data structure
574c869993eSxy 		 * of the tx control block, which will be used to fill the
575c869993eSxy 		 * tx descriptor ring after all the fragments are processed.
576c869993eSxy 		 */
577c869993eSxy 		igb_save_desc(tcb,
578c869993eSxy 		    dma_cookie.dmac_laddress,
579c869993eSxy 		    dma_cookie.dmac_size);
580c869993eSxy 
581c869993eSxy 		desc_num++;
582c869993eSxy 
583c869993eSxy 		if (i > 1)
584c869993eSxy 			ddi_dma_nextcookie(tcb->tx_dma_handle, &dma_cookie);
585c869993eSxy 	}
586c869993eSxy 
587c869993eSxy 	return (desc_num);
588c869993eSxy }
589c869993eSxy 
590c869993eSxy /*
591d11274aaSPaul Guo  * igb_get_tx_context
592c869993eSxy  *
593d11274aaSPaul Guo  * Get the tx context information from the mblk
594c869993eSxy  */
595d11274aaSPaul Guo static int
igb_get_tx_context(mblk_t * mp,tx_context_t * ctx)596d11274aaSPaul Guo igb_get_tx_context(mblk_t *mp, tx_context_t *ctx)
597c869993eSxy {
598c869993eSxy 	uint32_t start;
599c869993eSxy 	uint32_t flags;
600d11274aaSPaul Guo 	uint32_t lso_flag;
601*85f496faSRobert Mustacchi 	uint32_t lso_cksum;
602d11274aaSPaul Guo 	uint32_t mss;
603c869993eSxy 	uint32_t len;
604c869993eSxy 	uint32_t size;
605c869993eSxy 	uint32_t offset;
606c869993eSxy 	unsigned char *pos;
607c869993eSxy 	ushort_t etype;
608c869993eSxy 	uint32_t mac_hdr_len;
609c869993eSxy 	uint32_t l4_proto;
610d11274aaSPaul Guo 	uint32_t l4_hdr_len;
611c869993eSxy 
612c869993eSxy 	ASSERT(mp != NULL);
613c869993eSxy 
6140dc2366fSVenugopal Iyer 	mac_hcksum_get(mp, &start, NULL, NULL, NULL, &flags);
615d11274aaSPaul Guo 	bzero(ctx, sizeof (tx_context_t));
616c869993eSxy 
617d11274aaSPaul Guo 	ctx->hcksum_flags = flags;
618c869993eSxy 
619c869993eSxy 	if (flags == 0)
620d11274aaSPaul Guo 		return (TX_CXT_SUCCESS);
621d11274aaSPaul Guo 
6220dc2366fSVenugopal Iyer 	mac_lso_get(mp, &mss, &lso_flag);
623d11274aaSPaul Guo 	ctx->mss = mss;
624d11274aaSPaul Guo 	ctx->lso_flag = (lso_flag == HW_LSO);
625d11274aaSPaul Guo 
626c869993eSxy 	etype = 0;
627c869993eSxy 	mac_hdr_len = 0;
628c869993eSxy 	l4_proto = 0;
629c869993eSxy 
630c869993eSxy 	/*
631c869993eSxy 	 * Firstly get the position of the ether_type/ether_tpid.
632c869993eSxy 	 * Here we don't assume the ether (VLAN) header is fully included
633c869993eSxy 	 * in one mblk fragment, so we go thourgh the fragments to parse
634c869993eSxy 	 * the ether type.
635c869993eSxy 	 */
636d11274aaSPaul Guo 	size = len = MBLKL(mp);
637c869993eSxy 	offset = offsetof(struct ether_header, ether_type);
638c869993eSxy 	while (size <= offset) {
639c869993eSxy 		mp = mp->b_cont;
640c869993eSxy 		ASSERT(mp != NULL);
641d11274aaSPaul Guo 		len = MBLKL(mp);
642c869993eSxy 		size += len;
643c869993eSxy 	}
644c869993eSxy 	pos = mp->b_rptr + offset + len - size;
645c869993eSxy 
646c869993eSxy 	etype = ntohs(*(ushort_t *)(uintptr_t)pos);
647c869993eSxy 	if (etype == ETHERTYPE_VLAN) {
648c869993eSxy 		/*
649c869993eSxy 		 * Get the position of the ether_type in VLAN header
650c869993eSxy 		 */
651c869993eSxy 		offset = offsetof(struct ether_vlan_header, ether_type);
652c869993eSxy 		while (size <= offset) {
653c869993eSxy 			mp = mp->b_cont;
654c869993eSxy 			ASSERT(mp != NULL);
655d11274aaSPaul Guo 			len = MBLKL(mp);
656c869993eSxy 			size += len;
657c869993eSxy 		}
658c869993eSxy 		pos = mp->b_rptr + offset + len - size;
659c869993eSxy 
660c869993eSxy 		etype = ntohs(*(ushort_t *)(uintptr_t)pos);
661c869993eSxy 		mac_hdr_len = sizeof (struct ether_vlan_header);
662c869993eSxy 	} else {
663c869993eSxy 		mac_hdr_len = sizeof (struct ether_header);
664c869993eSxy 	}
665c869993eSxy 
666c869993eSxy 	/*
667d11274aaSPaul Guo 	 * Here we assume the IP(V6) header is fully included in one
668d11274aaSPaul Guo 	 * mblk fragment.
669c869993eSxy 	 */
670*85f496faSRobert Mustacchi 	lso_cksum = HCK_PARTIALCKSUM;
671*85f496faSRobert Mustacchi 	ctx->l3_proto = etype;
672c869993eSxy 	switch (etype) {
673c869993eSxy 	case ETHERTYPE_IP:
674d11274aaSPaul Guo 		offset = mac_hdr_len;
675c869993eSxy 		while (size <= offset) {
676c869993eSxy 			mp = mp->b_cont;
677c869993eSxy 			ASSERT(mp != NULL);
678d11274aaSPaul Guo 			len = MBLKL(mp);
679c869993eSxy 			size += len;
680c869993eSxy 		}
681c869993eSxy 		pos = mp->b_rptr + offset + len - size;
682c869993eSxy 
683d11274aaSPaul Guo 		if (ctx->lso_flag) {
684d11274aaSPaul Guo 			*((uint16_t *)(uintptr_t)(pos + offsetof(ipha_t,
685d11274aaSPaul Guo 			    ipha_length))) = 0;
686d11274aaSPaul Guo 
687d11274aaSPaul Guo 			/*
688d11274aaSPaul Guo 			 * To utilize igb LSO, here need to fill
689d11274aaSPaul Guo 			 * the tcp checksum field of the packet with the
690d11274aaSPaul Guo 			 * following pseudo-header checksum:
691d11274aaSPaul Guo 			 * (ip_source_addr, ip_destination_addr, l4_proto)
692d11274aaSPaul Guo 			 * and also need to fill the ip header checksum
693d11274aaSPaul Guo 			 * with zero. Currently the tcp/ip stack has done
694d11274aaSPaul Guo 			 * these.
695d11274aaSPaul Guo 			 */
696*85f496faSRobert Mustacchi 			lso_cksum |= HCK_IPV4_HDRCKSUM;
697d11274aaSPaul Guo 		}
698d11274aaSPaul Guo 
699d11274aaSPaul Guo 		l4_proto = *(uint8_t *)(pos + offsetof(ipha_t, ipha_protocol));
700c869993eSxy 		break;
701c869993eSxy 	case ETHERTYPE_IPV6:
702*85f496faSRobert Mustacchi 		/*
703*85f496faSRobert Mustacchi 		 * We need to zero out the length in the header.
704*85f496faSRobert Mustacchi 		 */
705*85f496faSRobert Mustacchi 		if (ctx->lso_flag) {
706*85f496faSRobert Mustacchi 			offset = offsetof(ip6_t, ip6_plen) + mac_hdr_len;
707*85f496faSRobert Mustacchi 			while (size <= offset) {
708*85f496faSRobert Mustacchi 				mp = mp->b_cont;
709*85f496faSRobert Mustacchi 				ASSERT(mp != NULL);
710*85f496faSRobert Mustacchi 				len = MBLKL(mp);
711*85f496faSRobert Mustacchi 				size += len;
712*85f496faSRobert Mustacchi 			}
713*85f496faSRobert Mustacchi 			pos = mp->b_rptr + offset + len - size;
714*85f496faSRobert Mustacchi 			*((uint16_t *)(uintptr_t)(pos)) = 0;
715*85f496faSRobert Mustacchi 		}
716*85f496faSRobert Mustacchi 
717c869993eSxy 		offset = offsetof(ip6_t, ip6_nxt) + mac_hdr_len;
718c869993eSxy 		while (size <= offset) {
719c869993eSxy 			mp = mp->b_cont;
720c869993eSxy 			ASSERT(mp != NULL);
721d11274aaSPaul Guo 			len = MBLKL(mp);
722c869993eSxy 			size += len;
723c869993eSxy 		}
724c869993eSxy 		pos = mp->b_rptr + offset + len - size;
725c869993eSxy 
726c869993eSxy 		l4_proto = *(uint8_t *)pos;
727c869993eSxy 		break;
728c869993eSxy 	default:
729c869993eSxy 		/* Unrecoverable error */
730fa4e188eSYuri Pankov 		igb_log(NULL, IGB_LOG_INFO, "Ethernet type field error with "
731d11274aaSPaul Guo 		    "tx hcksum flag set");
732d11274aaSPaul Guo 		return (TX_CXT_E_ETHER_TYPE);
733d11274aaSPaul Guo 	}
734d11274aaSPaul Guo 
735d11274aaSPaul Guo 	if (ctx->lso_flag) {
736*85f496faSRobert Mustacchi 		/*
737*85f496faSRobert Mustacchi 		 * LSO relies on tx h/w checksum, so here the packet will be
738*85f496faSRobert Mustacchi 		 * dropped if the h/w checksum flags are not set.
739*85f496faSRobert Mustacchi 		 */
740*85f496faSRobert Mustacchi 		if ((ctx->hcksum_flags & lso_cksum) != lso_cksum) {
741*85f496faSRobert Mustacchi 			igb_log(NULL, IGB_LOG_INFO, "igb_tx: h/w "
742*85f496faSRobert Mustacchi 			    "checksum flags are not set for LSO, found "
743*85f496faSRobert Mustacchi 			    "0x%x, needed bits 0x%x", ctx->hcksum_flags,
744*85f496faSRobert Mustacchi 			    lso_cksum);
745*85f496faSRobert Mustacchi 			return (TX_CXT_E_LSO_CSUM);
746*85f496faSRobert Mustacchi 		}
747*85f496faSRobert Mustacchi 
748d11274aaSPaul Guo 		offset = mac_hdr_len + start;
749d11274aaSPaul Guo 		while (size <= offset) {
750d11274aaSPaul Guo 			mp = mp->b_cont;
751d11274aaSPaul Guo 			ASSERT(mp != NULL);
752d11274aaSPaul Guo 			len = MBLKL(mp);
753d11274aaSPaul Guo 			size += len;
754d11274aaSPaul Guo 		}
755d11274aaSPaul Guo 		pos = mp->b_rptr + offset + len - size;
756d11274aaSPaul Guo 
757d11274aaSPaul Guo 		l4_hdr_len = TCP_HDR_LENGTH((tcph_t *)pos);
758d11274aaSPaul Guo 	} else {
759d11274aaSPaul Guo 		/*
760d11274aaSPaul Guo 		 * l4 header length is only required for LSO
761d11274aaSPaul Guo 		 */
762d11274aaSPaul Guo 		l4_hdr_len = 0;
763c869993eSxy 	}
764c869993eSxy 
765d11274aaSPaul Guo 	ctx->mac_hdr_len = mac_hdr_len;
766d11274aaSPaul Guo 	ctx->ip_hdr_len = start;
767d11274aaSPaul Guo 	ctx->l4_proto = l4_proto;
768d11274aaSPaul Guo 	ctx->l4_hdr_len = l4_hdr_len;
769d11274aaSPaul Guo 
770d11274aaSPaul Guo 	return (TX_CXT_SUCCESS);
771c869993eSxy }
772c869993eSxy 
773c869993eSxy /*
774d11274aaSPaul Guo  * igb_check_tx_context
775c869993eSxy  *
776c869993eSxy  * Check if a new context descriptor is needed
777c869993eSxy  */
778c869993eSxy static boolean_t
igb_check_tx_context(igb_tx_ring_t * tx_ring,tx_context_t * ctx)779d11274aaSPaul Guo igb_check_tx_context(igb_tx_ring_t *tx_ring, tx_context_t *ctx)
780c869993eSxy {
781d11274aaSPaul Guo 	tx_context_t *last;
782c869993eSxy 
783d11274aaSPaul Guo 	if (ctx == NULL)
784c869993eSxy 		return (B_FALSE);
785c869993eSxy 
786c869993eSxy 	/*
787d11274aaSPaul Guo 	 * Compare the context data retrieved from the mblk and the
788d11274aaSPaul Guo 	 * stored context data of the last context descriptor. The data
789c869993eSxy 	 * need to be checked are:
790c869993eSxy 	 *	hcksum_flags
791c869993eSxy 	 *	l4_proto
792*85f496faSRobert Mustacchi 	 *	l3_proto
793d11274aaSPaul Guo 	 *	mss (only check for LSO)
794d11274aaSPaul Guo 	 *	l4_hdr_len (only check for LSO)
795c869993eSxy 	 *	ip_hdr_len
796d11274aaSPaul Guo 	 *	mac_hdr_len
797c869993eSxy 	 * Either one of the above data is changed, a new context descriptor
798c869993eSxy 	 * will be needed.
799c869993eSxy 	 */
800d11274aaSPaul Guo 	last = &tx_ring->tx_context;
801d11274aaSPaul Guo 
802d11274aaSPaul Guo 	if (ctx->hcksum_flags != 0) {
803d11274aaSPaul Guo 		if ((ctx->hcksum_flags != last->hcksum_flags) ||
804d11274aaSPaul Guo 		    (ctx->l4_proto != last->l4_proto) ||
805*85f496faSRobert Mustacchi 		    (ctx->l3_proto != last->l3_proto) ||
806d11274aaSPaul Guo 		    (ctx->lso_flag && ((ctx->mss != last->mss) ||
807d11274aaSPaul Guo 		    (ctx->l4_hdr_len != last->l4_hdr_len))) ||
808d11274aaSPaul Guo 		    (ctx->ip_hdr_len != last->ip_hdr_len) ||
809d11274aaSPaul Guo 		    (ctx->mac_hdr_len != last->mac_hdr_len)) {
810c869993eSxy 			return (B_TRUE);
811c869993eSxy 		}
812c869993eSxy 	}
813c869993eSxy 
814c869993eSxy 	return (B_FALSE);
815c869993eSxy }
816c869993eSxy 
817c869993eSxy /*
818d11274aaSPaul Guo  * igb_fill_tx_context
819c869993eSxy  *
820c869993eSxy  * Fill the context descriptor with hardware checksum informations
821c869993eSxy  */
822c869993eSxy static void
igb_fill_tx_context(struct e1000_adv_tx_context_desc * ctx_tbd,tx_context_t * ctx,uint32_t ring_index)823d11274aaSPaul Guo igb_fill_tx_context(struct e1000_adv_tx_context_desc *ctx_tbd,
824d11274aaSPaul Guo     tx_context_t *ctx, uint32_t ring_index)
825c869993eSxy {
826c869993eSxy 	/*
827c869993eSxy 	 * Fill the context descriptor with the checksum
828c869993eSxy 	 * context information we've got
829c869993eSxy 	 */
830d11274aaSPaul Guo 	ctx_tbd->vlan_macip_lens = ctx->ip_hdr_len;
831d11274aaSPaul Guo 	ctx_tbd->vlan_macip_lens |= ctx->mac_hdr_len <<
832c869993eSxy 	    E1000_ADVTXD_MACLEN_SHIFT;
833c869993eSxy 
834c869993eSxy 	ctx_tbd->type_tucmd_mlhl =
835c869993eSxy 	    E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
836c869993eSxy 
837*85f496faSRobert Mustacchi 	/*
838*85f496faSRobert Mustacchi 	 * When we have a TX context set up, we enforce that the ethertype is
839*85f496faSRobert Mustacchi 	 * either IPv4 or IPv6 in igb_get_tx_context().
840*85f496faSRobert Mustacchi 	 */
841*85f496faSRobert Mustacchi 	if (ctx->lso_flag || ctx->hcksum_flags & HCK_IPV4_HDRCKSUM) {
842*85f496faSRobert Mustacchi 		if (ctx->l3_proto == ETHERTYPE_IP) {
843*85f496faSRobert Mustacchi 			ctx_tbd->type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
844*85f496faSRobert Mustacchi 		} else {
845*85f496faSRobert Mustacchi 			ctx_tbd->type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
846*85f496faSRobert Mustacchi 		}
847*85f496faSRobert Mustacchi 	}
848c869993eSxy 
849*85f496faSRobert Mustacchi 	if (ctx->lso_flag || ctx->hcksum_flags & HCK_PARTIALCKSUM) {
850d11274aaSPaul Guo 		switch (ctx->l4_proto) {
851c869993eSxy 		case IPPROTO_TCP:
852c869993eSxy 			ctx_tbd->type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
853c869993eSxy 			break;
854c869993eSxy 		case IPPROTO_UDP:
855c869993eSxy 			/*
856c869993eSxy 			 * We don't have to explicitly set:
857c869993eSxy 			 *	ctx_tbd->type_tucmd_mlhl |=
858c869993eSxy 			 *	    E1000_ADVTXD_TUCMD_L4T_UDP;
859c869993eSxy 			 * Because E1000_ADVTXD_TUCMD_L4T_UDP == 0b
860c869993eSxy 			 */
861c869993eSxy 			break;
862c869993eSxy 		default:
863c869993eSxy 			/* Unrecoverable error */
864fa4e188eSYuri Pankov 			igb_log(NULL, IGB_LOG_INFO,
865fa4e188eSYuri Pankov 			    "L4 type error with tx hcksum");
866c869993eSxy 			break;
867c869993eSxy 		}
868c869993eSxy 	}
869c869993eSxy 
870c869993eSxy 	ctx_tbd->seqnum_seed = 0;
871da14cebeSEric Cheng 	ctx_tbd->mss_l4len_idx = ring_index << 4;
872d11274aaSPaul Guo 	if (ctx->lso_flag) {
873d11274aaSPaul Guo 		ctx_tbd->mss_l4len_idx |=
874d11274aaSPaul Guo 		    (ctx->l4_hdr_len << E1000_ADVTXD_L4LEN_SHIFT) |
875d11274aaSPaul Guo 		    (ctx->mss << E1000_ADVTXD_MSS_SHIFT);
876d11274aaSPaul Guo 	}
877c869993eSxy }
878c869993eSxy 
879c869993eSxy /*
880c869993eSxy  * igb_tx_fill_ring
881c869993eSxy  *
882c869993eSxy  * Fill the tx descriptor ring with the data
883c869993eSxy  */
884c869993eSxy static int
igb_tx_fill_ring(igb_tx_ring_t * tx_ring,link_list_t * pending_list,tx_context_t * ctx,size_t mbsize)885c869993eSxy igb_tx_fill_ring(igb_tx_ring_t *tx_ring, link_list_t *pending_list,
886d11274aaSPaul Guo     tx_context_t *ctx, size_t mbsize)
887c869993eSxy {
888c869993eSxy 	struct e1000_hw *hw = &tx_ring->igb->hw;
889c869993eSxy 	boolean_t load_context;
890c869993eSxy 	uint32_t index, tcb_index, desc_num;
891c869993eSxy 	union e1000_adv_tx_desc *tbd, *first_tbd;
892c869993eSxy 	tx_control_block_t *tcb, *first_tcb;
893c869993eSxy 	uint32_t hcksum_flags;
894c869993eSxy 	int i;
8958bb4b220Sgl 	igb_t *igb = tx_ring->igb;
896c869993eSxy 
897c869993eSxy 	ASSERT(mutex_owned(&tx_ring->tx_lock));
898c869993eSxy 
899c869993eSxy 	tbd = NULL;
900c869993eSxy 	first_tbd = NULL;
901c869993eSxy 	first_tcb = NULL;
902c869993eSxy 	desc_num = 0;
903c869993eSxy 	hcksum_flags = 0;
904c869993eSxy 	load_context = B_FALSE;
905c869993eSxy 
906c869993eSxy 	/*
907c869993eSxy 	 * Get the index of the first tx descriptor that will be filled,
908c869993eSxy 	 * and the index of the first work list item that will be attached
909c869993eSxy 	 * with the first used tx control block in the pending list.
910c869993eSxy 	 * Note: the two indexes are the same.
911c869993eSxy 	 */
912c869993eSxy 	index = tx_ring->tbd_tail;
913c869993eSxy 	tcb_index = tx_ring->tbd_tail;
914c869993eSxy 
915d11274aaSPaul Guo 	if (ctx != NULL) {
916d11274aaSPaul Guo 		hcksum_flags = ctx->hcksum_flags;
917c869993eSxy 
918c869993eSxy 		/*
919c869993eSxy 		 * Check if a new context descriptor is needed for this packet
920c869993eSxy 		 */
921d11274aaSPaul Guo 		load_context = igb_check_tx_context(tx_ring, ctx);
922c869993eSxy 		if (load_context) {
923c869993eSxy 			tbd = &tx_ring->tbd_ring[index];
924c869993eSxy 
925c869993eSxy 			/*
926c869993eSxy 			 * Fill the context descriptor with the
927c869993eSxy 			 * hardware checksum offload informations.
928c869993eSxy 			 */
929d11274aaSPaul Guo 			igb_fill_tx_context(
930d11274aaSPaul Guo 			    (struct e1000_adv_tx_context_desc *)tbd,
931d11274aaSPaul Guo 			    ctx, tx_ring->index);
932c869993eSxy 
933c869993eSxy 			index = NEXT_INDEX(index, 1, tx_ring->ring_size);
934c869993eSxy 			desc_num++;
935c869993eSxy 
936c869993eSxy 			/*
937c869993eSxy 			 * Store the checksum context data if
938c869993eSxy 			 * a new context descriptor is added
939c869993eSxy 			 */
940d11274aaSPaul Guo 			tx_ring->tx_context = *ctx;
941c869993eSxy 		}
942c869993eSxy 	}
943c869993eSxy 
944c869993eSxy 	first_tbd = &tx_ring->tbd_ring[index];
945c869993eSxy 
946c869993eSxy 	/*
947c869993eSxy 	 * Fill tx data descriptors with the data saved in the pending list.
948c869993eSxy 	 * The tx control blocks in the pending list are added to the work list
949c869993eSxy 	 * at the same time.
950c869993eSxy 	 *
951c869993eSxy 	 * The work list is strictly 1:1 corresponding to the descriptor ring.
952c869993eSxy 	 * One item of the work list corresponds to one tx descriptor. Because
953c869993eSxy 	 * one tx control block can span multiple tx descriptors, the tx
954c869993eSxy 	 * control block will be added to the first work list item that
955c869993eSxy 	 * corresponds to the first tx descriptor generated from that tx
956c869993eSxy 	 * control block.
957c869993eSxy 	 */
958c869993eSxy 	tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
95969b2d733SGuoqing Zhu 	first_tcb = tcb;
960c869993eSxy 	while (tcb != NULL) {
961c869993eSxy 
962c869993eSxy 		for (i = 0; i < tcb->desc_num; i++) {
963c869993eSxy 			tbd = &tx_ring->tbd_ring[index];
964c869993eSxy 
965c869993eSxy 			tbd->read.buffer_addr = tcb->desc[i].address;
966c869993eSxy 			tbd->read.cmd_type_len = tcb->desc[i].length;
967c869993eSxy 
968c869993eSxy 			tbd->read.cmd_type_len |= E1000_ADVTXD_DCMD_RS |
96980a11ad2Schenlu chen - Sun Microsystems - Beijing China 			    E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_DATA |
97080a11ad2Schenlu chen - Sun Microsystems - Beijing China 			    E1000_ADVTXD_DCMD_IFCS;
971c869993eSxy 
972c869993eSxy 			tbd->read.olinfo_status = 0;
973c869993eSxy 
974c869993eSxy 			index = NEXT_INDEX(index, 1, tx_ring->ring_size);
975c869993eSxy 			desc_num++;
976c869993eSxy 		}
977c869993eSxy 
978c869993eSxy 		/*
979c869993eSxy 		 * Add the tx control block to the work list
980c869993eSxy 		 */
981c869993eSxy 		ASSERT(tx_ring->work_list[tcb_index] == NULL);
982c869993eSxy 		tx_ring->work_list[tcb_index] = tcb;
983c869993eSxy 
984c869993eSxy 		tcb_index = index;
985c869993eSxy 		tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
986c869993eSxy 	}
987c869993eSxy 
98869b2d733SGuoqing Zhu 	if (load_context) {
98969b2d733SGuoqing Zhu 		/*
99069b2d733SGuoqing Zhu 		 * Count the checksum context descriptor for
99169b2d733SGuoqing Zhu 		 * the first tx control block.
99269b2d733SGuoqing Zhu 		 */
99369b2d733SGuoqing Zhu 		first_tcb->desc_num++;
99469b2d733SGuoqing Zhu 	}
99569b2d733SGuoqing Zhu 	first_tcb->last_index = PREV_INDEX(index, 1, tx_ring->ring_size);
99669b2d733SGuoqing Zhu 
997c869993eSxy 	/*
998c869993eSxy 	 * The Insert Ethernet CRC (IFCS) bit and the checksum fields are only
999c869993eSxy 	 * valid in the first descriptor of the packet.
1000d11274aaSPaul Guo 	 * 82576 also requires the payload length setting even without LSO
1001c869993eSxy 	 */
1002c869993eSxy 	ASSERT(first_tbd != NULL);
1003c869993eSxy 	first_tbd->read.cmd_type_len |= E1000_ADVTXD_DCMD_IFCS;
1004d11274aaSPaul Guo 	if (ctx != NULL && ctx->lso_flag) {
1005d11274aaSPaul Guo 		first_tbd->read.cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
1006d11274aaSPaul Guo 		first_tbd->read.olinfo_status |=
1007d11274aaSPaul Guo 		    (mbsize - ctx->mac_hdr_len - ctx->ip_hdr_len
1008d11274aaSPaul Guo 		    - ctx->l4_hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT;
1009d11274aaSPaul Guo 	} else {
10103f7e60a6Szhefeng xu - Sun Microsystems - Beijing China 		if (hw->mac.type >= e1000_82576) {
1011d11274aaSPaul Guo 			first_tbd->read.olinfo_status |=
1012d11274aaSPaul Guo 			    (mbsize << E1000_ADVTXD_PAYLEN_SHIFT);
1013d11274aaSPaul Guo 		}
101480a11ad2Schenlu chen - Sun Microsystems - Beijing China 	}
1015c869993eSxy 
1016c869993eSxy 	/* Set hardware checksum bits */
1017c869993eSxy 	if (hcksum_flags != 0) {
1018c869993eSxy 		if (hcksum_flags & HCK_IPV4_HDRCKSUM)
1019c869993eSxy 			first_tbd->read.olinfo_status |=
1020c869993eSxy 			    E1000_TXD_POPTS_IXSM << 8;
1021c869993eSxy 		if (hcksum_flags & HCK_PARTIALCKSUM)
1022c869993eSxy 			first_tbd->read.olinfo_status |=
1023c869993eSxy 			    E1000_TXD_POPTS_TXSM << 8;
1024da14cebeSEric Cheng 		first_tbd->read.olinfo_status |= tx_ring->index << 4;
1025c869993eSxy 	}
1026c869993eSxy 
1027c869993eSxy 	/*
1028c869993eSxy 	 * The last descriptor of packet needs End Of Packet (EOP),
1029c869993eSxy 	 * and Report Status (RS) bits set
1030c869993eSxy 	 */
1031c869993eSxy 	ASSERT(tbd != NULL);
1032c869993eSxy 	tbd->read.cmd_type_len |=
1033c869993eSxy 	    E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS;
1034c869993eSxy 
1035da14cebeSEric Cheng 	IGB_DEBUG_STAT(tx_ring->stat_pkt_cnt);
1036da14cebeSEric Cheng 
1037c869993eSxy 	/*
1038c869993eSxy 	 * Sync the DMA buffer of the tx descriptor ring
1039c869993eSxy 	 */
1040c869993eSxy 	DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORDEV);
1041c869993eSxy 
1042c869993eSxy 	/*
1043c869993eSxy 	 * Update the number of the free tx descriptors.
1044c869993eSxy 	 * The mutual exclusion between the transmission and the recycling
1045c869993eSxy 	 * (for the tx descriptor ring and the work list) is implemented
1046c869993eSxy 	 * with the atomic operation on the number of the free tx descriptors.
1047c869993eSxy 	 *
1048c869993eSxy 	 * Note: we should always decrement the counter tbd_free before
1049c869993eSxy 	 * advancing the hardware TDT pointer to avoid the race condition -
1050c869993eSxy 	 * before the counter tbd_free is decremented, the transmit of the
1051c869993eSxy 	 * tx descriptors has done and the counter tbd_free is increased by
1052c869993eSxy 	 * the tx recycling.
1053c869993eSxy 	 */
1054c869993eSxy 	i = igb_atomic_reserve(&tx_ring->tbd_free, desc_num);
1055c869993eSxy 	ASSERT(i >= 0);
1056c869993eSxy 
1057c869993eSxy 	tx_ring->tbd_tail = index;
1058c869993eSxy 
1059c869993eSxy 	/*
1060c869993eSxy 	 * Advance the hardware TDT pointer of the tx descriptor ring
1061c869993eSxy 	 */
1062c869993eSxy 	E1000_WRITE_REG(hw, E1000_TDT(tx_ring->index), index);
1063c869993eSxy 
10648bb4b220Sgl 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
10658bb4b220Sgl 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1066cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		atomic_or_32(&igb->igb_state, IGB_ERROR);
10678bb4b220Sgl 	}
10688bb4b220Sgl 
1069c869993eSxy 	return (desc_num);
1070c869993eSxy }
1071c869993eSxy 
1072c869993eSxy /*
1073c869993eSxy  * igb_save_desc
1074c869993eSxy  *
1075c869993eSxy  * Save the address/length pair to the private array
1076c869993eSxy  * of the tx control block. The address/length pairs
1077c869993eSxy  * will be filled into the tx descriptor ring later.
1078c869993eSxy  */
1079c869993eSxy static void
igb_save_desc(tx_control_block_t * tcb,uint64_t address,size_t length)1080c869993eSxy igb_save_desc(tx_control_block_t *tcb, uint64_t address, size_t length)
1081c869993eSxy {
1082c869993eSxy 	sw_desc_t *desc;
1083c869993eSxy 
1084c869993eSxy 	desc = &tcb->desc[tcb->desc_num];
1085c869993eSxy 	desc->address = address;
1086c869993eSxy 	desc->length = length;
1087c869993eSxy 
1088c869993eSxy 	tcb->desc_num++;
1089c869993eSxy }
1090c869993eSxy 
1091c869993eSxy /*
1092c869993eSxy  * igb_tx_recycle_legacy
1093c869993eSxy  *
1094c869993eSxy  * Recycle the tx descriptors and tx control blocks.
1095c869993eSxy  *
1096c869993eSxy  * The work list is traversed to check if the corresponding
1097c869993eSxy  * tx descriptors have been transmitted. If so, the resources
1098c869993eSxy  * bound to the tx control blocks will be freed, and those
1099c869993eSxy  * tx control blocks will be returned to the free list.
1100c869993eSxy  */
1101c869993eSxy uint32_t
igb_tx_recycle_legacy(igb_tx_ring_t * tx_ring)1102c869993eSxy igb_tx_recycle_legacy(igb_tx_ring_t *tx_ring)
1103c869993eSxy {
110469b2d733SGuoqing Zhu 	uint32_t index, last_index, next_index;
1105c869993eSxy 	int desc_num;
1106c869993eSxy 	boolean_t desc_done;
1107c869993eSxy 	tx_control_block_t *tcb;
1108c869993eSxy 	link_list_t pending_list;
11098bb4b220Sgl 	igb_t *igb = tx_ring->igb;
1110c869993eSxy 
1111c869993eSxy 	/*
1112c869993eSxy 	 * The mutex_tryenter() is used to avoid unnecessary
1113c869993eSxy 	 * lock contention.
1114c869993eSxy 	 */
1115c869993eSxy 	if (mutex_tryenter(&tx_ring->recycle_lock) == 0)
1116c869993eSxy 		return (0);
1117c869993eSxy 
1118c869993eSxy 	ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
1119c869993eSxy 
1120c869993eSxy 	if (tx_ring->tbd_free == tx_ring->ring_size) {
1121c869993eSxy 		tx_ring->recycle_fail = 0;
1122c869993eSxy 		tx_ring->stall_watchdog = 0;
1123c869993eSxy 		mutex_exit(&tx_ring->recycle_lock);
1124c869993eSxy 		return (0);
1125c869993eSxy 	}
1126c869993eSxy 
1127c869993eSxy 	/*
1128c869993eSxy 	 * Sync the DMA buffer of the tx descriptor ring
1129c869993eSxy 	 */
1130c869993eSxy 	DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL);
1131c869993eSxy 
11328bb4b220Sgl 	if (igb_check_dma_handle(
11338bb4b220Sgl 	    tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
1134b227c420Schenlu chen - Sun Microsystems - Beijing China 		mutex_exit(&tx_ring->recycle_lock);
11358bb4b220Sgl 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1136cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		atomic_or_32(&igb->igb_state, IGB_ERROR);
1137cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		return (0);
11388bb4b220Sgl 	}
11398bb4b220Sgl 
1140c869993eSxy 	LINK_LIST_INIT(&pending_list);
1141c869993eSxy 	desc_num = 0;
1142c869993eSxy 	index = tx_ring->tbd_head;	/* Index of next tbd/tcb to recycle */
1143c869993eSxy 
1144c869993eSxy 	tcb = tx_ring->work_list[index];
1145c869993eSxy 	ASSERT(tcb != NULL);
1146c869993eSxy 
114769b2d733SGuoqing Zhu 	while (tcb != NULL) {
1148c869993eSxy 
1149c869993eSxy 		/*
115069b2d733SGuoqing Zhu 		 * Get the last tx descriptor of this packet.
115169b2d733SGuoqing Zhu 		 * If the last tx descriptor is done, then
115269b2d733SGuoqing Zhu 		 * we can recycle all descriptors of a packet
115369b2d733SGuoqing Zhu 		 * which usually includes several tx control blocks.
115469b2d733SGuoqing Zhu 		 * For some chips, LSO descriptors can not be recycled
115569b2d733SGuoqing Zhu 		 * unless the whole packet's transmission is done.
115669b2d733SGuoqing Zhu 		 * That's why packet level recycling is used here.
1157c869993eSxy 		 */
115869b2d733SGuoqing Zhu 		last_index = tcb->last_index;
115969b2d733SGuoqing Zhu 		/*
116069b2d733SGuoqing Zhu 		 * MAX_TX_RING_SIZE is used to judge whether
116169b2d733SGuoqing Zhu 		 * the index is a valid value or not.
116269b2d733SGuoqing Zhu 		 */
116369b2d733SGuoqing Zhu 		if (last_index == MAX_TX_RING_SIZE)
116469b2d733SGuoqing Zhu 			break;
116569b2d733SGuoqing Zhu 
116669b2d733SGuoqing Zhu 		next_index = NEXT_INDEX(last_index, 1, tx_ring->ring_size);
1167c869993eSxy 
1168c869993eSxy 		/*
1169c869993eSxy 		 * Check if the Descriptor Done bit is set
1170c869993eSxy 		 */
1171c869993eSxy 		desc_done = tx_ring->tbd_ring[last_index].wb.status &
1172c869993eSxy 		    E1000_TXD_STAT_DD;
1173c869993eSxy 		if (desc_done) {
117469b2d733SGuoqing Zhu 			while (tcb != NULL) {
117569b2d733SGuoqing Zhu 				/*
117669b2d733SGuoqing Zhu 				 * Strip off the tx control block from the work
117769b2d733SGuoqing Zhu 				 * list, and add it to the pending list.
117869b2d733SGuoqing Zhu 				 */
117969b2d733SGuoqing Zhu 				tx_ring->work_list[index] = NULL;
118069b2d733SGuoqing Zhu 				LIST_PUSH_TAIL(&pending_list, &tcb->link);
1181c869993eSxy 
118269b2d733SGuoqing Zhu 				/*
118369b2d733SGuoqing Zhu 				 * Count the total number of the tx descriptors
118469b2d733SGuoqing Zhu 				 * recycled.
118569b2d733SGuoqing Zhu 				 */
118669b2d733SGuoqing Zhu 				desc_num += tcb->desc_num;
1187c869993eSxy 
118869b2d733SGuoqing Zhu 				/*
118969b2d733SGuoqing Zhu 				 * Advance the index of the tx descriptor ring
119069b2d733SGuoqing Zhu 				 */
119169b2d733SGuoqing Zhu 				index = NEXT_INDEX(index, tcb->desc_num,
119269b2d733SGuoqing Zhu 				    tx_ring->ring_size);
1193c869993eSxy 
119469b2d733SGuoqing Zhu 				tcb = tx_ring->work_list[index];
119569b2d733SGuoqing Zhu 				if (index == next_index)
119669b2d733SGuoqing Zhu 					break;
119769b2d733SGuoqing Zhu 			}
119869b2d733SGuoqing Zhu 		} else {
119969b2d733SGuoqing Zhu 			break;
1200c869993eSxy 		}
1201c869993eSxy 	}
1202c869993eSxy 
1203c869993eSxy 	/*
1204c869993eSxy 	 * If no tx descriptors are recycled, no need to do more processing
1205c869993eSxy 	 */
1206c869993eSxy 	if (desc_num == 0) {
1207c869993eSxy 		tx_ring->recycle_fail++;
1208c869993eSxy 		mutex_exit(&tx_ring->recycle_lock);
1209c869993eSxy 		return (0);
1210c869993eSxy 	}
1211c869993eSxy 
1212c869993eSxy 	tx_ring->recycle_fail = 0;
1213c869993eSxy 	tx_ring->stall_watchdog = 0;
1214c869993eSxy 
1215c869993eSxy 	/*
1216c869993eSxy 	 * Update the head index of the tx descriptor ring
1217c869993eSxy 	 */
1218c869993eSxy 	tx_ring->tbd_head = index;
1219c869993eSxy 
1220c869993eSxy 	/*
1221c869993eSxy 	 * Update the number of the free tx descriptors with atomic operations
1222c869993eSxy 	 */
1223c869993eSxy 	atomic_add_32(&tx_ring->tbd_free, desc_num);
1224c869993eSxy 
1225c869993eSxy 	mutex_exit(&tx_ring->recycle_lock);
1226c869993eSxy 
1227c869993eSxy 	/*
1228c869993eSxy 	 * Free the resources used by the tx control blocks
1229c869993eSxy 	 * in the pending list
1230c869993eSxy 	 */
1231c869993eSxy 	tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
1232c869993eSxy 	while (tcb != NULL) {
1233c869993eSxy 		/*
1234c869993eSxy 		 * Release the resources occupied by the tx control block
1235c869993eSxy 		 */
1236c869993eSxy 		igb_free_tcb(tcb);
1237c869993eSxy 
1238c869993eSxy 		tcb = (tx_control_block_t *)
1239c869993eSxy 		    LIST_GET_NEXT(&pending_list, &tcb->link);
1240c869993eSxy 	}
1241c869993eSxy 
1242c869993eSxy 	/*
1243c869993eSxy 	 * Add the tx control blocks in the pending list to the free list.
1244c869993eSxy 	 */
1245c869993eSxy 	igb_put_free_list(tx_ring, &pending_list);
1246c869993eSxy 
1247c869993eSxy 	return (desc_num);
1248c869993eSxy }
1249c869993eSxy 
1250c869993eSxy /*
1251c869993eSxy  * igb_tx_recycle_head_wb
1252c869993eSxy  *
1253c869993eSxy  * Check the head write-back, and recycle all the transmitted
1254c869993eSxy  * tx descriptors and tx control blocks.
1255c869993eSxy  */
1256c869993eSxy uint32_t
igb_tx_recycle_head_wb(igb_tx_ring_t * tx_ring)1257c869993eSxy igb_tx_recycle_head_wb(igb_tx_ring_t *tx_ring)
1258c869993eSxy {
1259c869993eSxy 	uint32_t index;
1260c869993eSxy 	uint32_t head_wb;
1261c869993eSxy 	int desc_num;
1262c869993eSxy 	tx_control_block_t *tcb;
1263c869993eSxy 	link_list_t pending_list;
12648bb4b220Sgl 	igb_t *igb = tx_ring->igb;
1265c869993eSxy 
1266c869993eSxy 	/*
1267c869993eSxy 	 * The mutex_tryenter() is used to avoid unnecessary
1268c869993eSxy 	 * lock contention.
1269c869993eSxy 	 */
1270c869993eSxy 	if (mutex_tryenter(&tx_ring->recycle_lock) == 0)
1271c869993eSxy 		return (0);
1272c869993eSxy 
1273c869993eSxy 	ASSERT(tx_ring->tbd_free <= tx_ring->ring_size);
1274c869993eSxy 
1275c869993eSxy 	if (tx_ring->tbd_free == tx_ring->ring_size) {
1276c869993eSxy 		tx_ring->recycle_fail = 0;
1277c869993eSxy 		tx_ring->stall_watchdog = 0;
1278c869993eSxy 		mutex_exit(&tx_ring->recycle_lock);
1279c869993eSxy 		return (0);
1280c869993eSxy 	}
1281c869993eSxy 
1282c869993eSxy 	/*
1283c869993eSxy 	 * Sync the DMA buffer of the tx descriptor ring
1284c869993eSxy 	 *
1285c869993eSxy 	 * Note: For head write-back mode, the tx descriptors will not
1286c869993eSxy 	 * be written back, but the head write-back value is stored at
1287c869993eSxy 	 * the last extra tbd at the end of the DMA area, we still need
1288c869993eSxy 	 * to sync the head write-back value for kernel.
1289c869993eSxy 	 *
1290c869993eSxy 	 * DMA_SYNC(&tx_ring->tbd_area, DDI_DMA_SYNC_FORKERNEL);
1291c869993eSxy 	 */
1292c869993eSxy 	(void) ddi_dma_sync(tx_ring->tbd_area.dma_handle,
1293c869993eSxy 	    sizeof (union e1000_adv_tx_desc) * tx_ring->ring_size,
1294c869993eSxy 	    sizeof (uint32_t),
1295c869993eSxy 	    DDI_DMA_SYNC_FORKERNEL);
1296c869993eSxy 
12978bb4b220Sgl 	if (igb_check_dma_handle(
12988bb4b220Sgl 	    tx_ring->tbd_area.dma_handle) != DDI_FM_OK) {
1299b227c420Schenlu chen - Sun Microsystems - Beijing China 		mutex_exit(&tx_ring->recycle_lock);
13008bb4b220Sgl 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1301cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		atomic_or_32(&igb->igb_state, IGB_ERROR);
1302cf8dcc9bSzhefeng xu - Sun Microsystems - Beijing China 		return (0);
13038bb4b220Sgl 	}
13048bb4b220Sgl 
1305c869993eSxy 	LINK_LIST_INIT(&pending_list);
1306c869993eSxy 	desc_num = 0;
1307c869993eSxy 	index = tx_ring->tbd_head;	/* Next index to clean */
1308c869993eSxy 
1309c869993eSxy 	/*
1310c869993eSxy 	 * Get the value of head write-back
1311c869993eSxy 	 */
1312c869993eSxy 	head_wb = *tx_ring->tbd_head_wb;
1313c869993eSxy 	while (index != head_wb) {
1314c869993eSxy 		tcb = tx_ring->work_list[index];
1315c869993eSxy 		ASSERT(tcb != NULL);
1316c869993eSxy 
1317c869993eSxy 		if (OFFSET(index, head_wb, tx_ring->ring_size) <
1318c869993eSxy 		    tcb->desc_num) {
1319c869993eSxy 			/*
1320c869993eSxy 			 * The current tx control block is not
1321c869993eSxy 			 * completely transmitted, stop recycling
1322c869993eSxy 			 */
1323c869993eSxy 			break;
1324c869993eSxy 		}
1325c869993eSxy 
1326c869993eSxy 		/*
1327c869993eSxy 		 * Strip off the tx control block from the work list,
1328c869993eSxy 		 * and add it to the pending list.
1329c869993eSxy 		 */
1330c869993eSxy 		tx_ring->work_list[index] = NULL;
1331c869993eSxy 		LIST_PUSH_TAIL(&pending_list, &tcb->link);
1332c869993eSxy 
1333c869993eSxy 		/*
1334c869993eSxy 		 * Advance the index of the tx descriptor ring
1335c869993eSxy 		 */
1336c869993eSxy 		index = NEXT_INDEX(index, tcb->desc_num, tx_ring->ring_size);
1337c869993eSxy 
1338c869993eSxy 		/*
1339c869993eSxy 		 * Count the total number of the tx descriptors recycled
1340c869993eSxy 		 */
1341c869993eSxy 		desc_num += tcb->desc_num;
1342c869993eSxy 	}
1343c869993eSxy 
1344c869993eSxy 	/*
1345c869993eSxy 	 * If no tx descriptors are recycled, no need to do more processing
1346c869993eSxy 	 */
1347c869993eSxy 	if (desc_num == 0) {
1348c869993eSxy 		tx_ring->recycle_fail++;
1349c869993eSxy 		mutex_exit(&tx_ring->recycle_lock);
1350c869993eSxy 		return (0);
1351c869993eSxy 	}
1352c869993eSxy 
1353c869993eSxy 	tx_ring->recycle_fail = 0;
1354c869993eSxy 	tx_ring->stall_watchdog = 0;
1355c869993eSxy 
1356c869993eSxy 	/*
1357c869993eSxy 	 * Update the head index of the tx descriptor ring
1358c869993eSxy 	 */
1359c869993eSxy 	tx_ring->tbd_head = index;
1360c869993eSxy 
1361c869993eSxy 	/*
1362c869993eSxy 	 * Update the number of the free tx descriptors with atomic operations
1363c869993eSxy 	 */
1364c869993eSxy 	atomic_add_32(&tx_ring->tbd_free, desc_num);
1365c869993eSxy 
1366c869993eSxy 	mutex_exit(&tx_ring->recycle_lock);
1367c869993eSxy 
1368c869993eSxy 	/*
1369c869993eSxy 	 * Free the resources used by the tx control blocks
1370c869993eSxy 	 * in the pending list
1371c869993eSxy 	 */
1372c869993eSxy 	tcb = (tx_control_block_t *)LIST_GET_HEAD(&pending_list);
1373c869993eSxy 	while (tcb) {
1374c869993eSxy 		/*
1375c869993eSxy 		 * Release the resources occupied by the tx control block
1376c869993eSxy 		 */
1377c869993eSxy 		igb_free_tcb(tcb);
1378c869993eSxy 
1379c869993eSxy 		tcb = (tx_control_block_t *)
1380c869993eSxy 		    LIST_GET_NEXT(&pending_list, &tcb->link);
1381c869993eSxy 	}
1382c869993eSxy 
1383c869993eSxy 	/*
1384c869993eSxy 	 * Add the tx control blocks in the pending list to the free list.
1385c869993eSxy 	 */
1386c869993eSxy 	igb_put_free_list(tx_ring, &pending_list);
1387c869993eSxy 
1388c869993eSxy 	return (desc_num);
1389c869993eSxy }
1390c869993eSxy 
1391c869993eSxy /*
1392c869993eSxy  * igb_free_tcb - free up the tx control block
1393c869993eSxy  *
1394c869993eSxy  * Free the resources of the tx control block, including
1395c869993eSxy  * unbind the previously bound DMA handle, and reset other
1396c869993eSxy  * control fields.
1397c869993eSxy  */
1398c869993eSxy void
igb_free_tcb(tx_control_block_t * tcb)1399c869993eSxy igb_free_tcb(tx_control_block_t *tcb)
1400c869993eSxy {
1401c869993eSxy 	switch (tcb->tx_type) {
1402c869993eSxy 	case USE_COPY:
1403c869993eSxy 		/*
1404c869993eSxy 		 * Reset the buffer length that is used for copy
1405c869993eSxy 		 */
1406c869993eSxy 		tcb->tx_buf.len = 0;
1407c869993eSxy 		break;
1408c869993eSxy 	case USE_DMA:
1409c869993eSxy 		/*
1410c869993eSxy 		 * Release the DMA resource that is used for
1411c869993eSxy 		 * DMA binding.
1412c869993eSxy 		 */
1413c869993eSxy 		(void) ddi_dma_unbind_handle(tcb->tx_dma_handle);
1414c869993eSxy 		break;
1415c869993eSxy 	default:
1416c869993eSxy 		break;
1417c869993eSxy 	}
1418c869993eSxy 
1419c869993eSxy 	/*
1420c869993eSxy 	 * Free the mblk
1421c869993eSxy 	 */
1422c869993eSxy 	if (tcb->mp != NULL) {
1423c869993eSxy 		freemsg(tcb->mp);
1424c869993eSxy 		tcb->mp = NULL;
1425c869993eSxy 	}
1426c869993eSxy 
1427c869993eSxy 	tcb->tx_type = USE_NONE;
142869b2d733SGuoqing Zhu 	tcb->last_index = MAX_TX_RING_SIZE;
1429c869993eSxy 	tcb->frag_num = 0;
1430c869993eSxy 	tcb->desc_num = 0;
1431c869993eSxy }
1432c869993eSxy 
1433c869993eSxy /*
1434c869993eSxy  * igb_get_free_list - Get a free tx control block from the free list
1435c869993eSxy  *
1436c869993eSxy  * The atomic operation on the number of the available tx control block
1437c869993eSxy  * in the free list is used to keep this routine mutual exclusive with
1438c869993eSxy  * the routine igb_put_check_list.
1439c869993eSxy  */
1440c869993eSxy static tx_control_block_t *
igb_get_free_list(igb_tx_ring_t * tx_ring)1441c869993eSxy igb_get_free_list(igb_tx_ring_t *tx_ring)
1442c869993eSxy {
1443c869993eSxy 	tx_control_block_t *tcb;
1444c869993eSxy 
1445c869993eSxy 	/*
1446c869993eSxy 	 * Check and update the number of the free tx control block
1447c869993eSxy 	 * in the free list.
1448c869993eSxy 	 */
1449c869993eSxy 	if (igb_atomic_reserve(&tx_ring->tcb_free, 1) < 0)
1450c869993eSxy 		return (NULL);
1451c869993eSxy 
1452c869993eSxy 	mutex_enter(&tx_ring->tcb_head_lock);
1453c869993eSxy 
1454c869993eSxy 	tcb = tx_ring->free_list[tx_ring->tcb_head];
1455c869993eSxy 	ASSERT(tcb != NULL);
1456c869993eSxy 	tx_ring->free_list[tx_ring->tcb_head] = NULL;
1457c869993eSxy 	tx_ring->tcb_head = NEXT_INDEX(tx_ring->tcb_head, 1,
1458c869993eSxy 	    tx_ring->free_list_size);
1459c869993eSxy 
1460c869993eSxy 	mutex_exit(&tx_ring->tcb_head_lock);
1461c869993eSxy 
1462c869993eSxy 	return (tcb);
1463c869993eSxy }
1464c869993eSxy 
1465c869993eSxy /*
1466c869993eSxy  * igb_put_free_list
1467c869993eSxy  *
1468c869993eSxy  * Put a list of used tx control blocks back to the free list
1469c869993eSxy  *
1470c869993eSxy  * A mutex is used here to ensure the serialization. The mutual exclusion
1471c869993eSxy  * between igb_get_free_list and igb_put_free_list is implemented with
1472c869993eSxy  * the atomic operation on the counter tcb_free.
1473c869993eSxy  */
1474c869993eSxy void
igb_put_free_list(igb_tx_ring_t * tx_ring,link_list_t * pending_list)1475c869993eSxy igb_put_free_list(igb_tx_ring_t *tx_ring, link_list_t *pending_list)
1476c869993eSxy {
1477c869993eSxy 	uint32_t index;
1478c869993eSxy 	int tcb_num;
1479c869993eSxy 	tx_control_block_t *tcb;
1480c869993eSxy 
1481c869993eSxy 	mutex_enter(&tx_ring->tcb_tail_lock);
1482c869993eSxy 
1483c869993eSxy 	index = tx_ring->tcb_tail;
1484c869993eSxy 
1485c869993eSxy 	tcb_num = 0;
1486c869993eSxy 	tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
1487c869993eSxy 	while (tcb != NULL) {
1488c869993eSxy 		ASSERT(tx_ring->free_list[index] == NULL);
1489c869993eSxy 		tx_ring->free_list[index] = tcb;
1490c869993eSxy 
1491c869993eSxy 		tcb_num++;
1492c869993eSxy 
1493c869993eSxy 		index = NEXT_INDEX(index, 1, tx_ring->free_list_size);
1494c869993eSxy 
1495c869993eSxy 		tcb = (tx_control_block_t *)LIST_POP_HEAD(pending_list);
1496c869993eSxy 	}
1497c869993eSxy 
1498c869993eSxy 	tx_ring->tcb_tail = index;
1499c869993eSxy 
1500c869993eSxy 	/*
1501c869993eSxy 	 * Update the number of the free tx control block
1502c869993eSxy 	 * in the free list. This operation must be placed
1503c869993eSxy 	 * under the protection of the lock.
1504c869993eSxy 	 */
1505c869993eSxy 	atomic_add_32(&tx_ring->tcb_free, tcb_num);
1506c869993eSxy 
1507c869993eSxy 	mutex_exit(&tx_ring->tcb_tail_lock);
1508c869993eSxy }
1509