1/*
2 * Copyright 2014-2017 Cavium, Inc.
3 * The contents of this file are subject to the terms of the Common Development
4 * and Distribution License, v.1,  (the "License").
5 *
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the License at available
9 * at http://opensource.org/licenses/CDDL-1.0
10 *
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15/*
16 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
17 * Copyright (c) 2019, Joyent, Inc.
18 */
19
20#include "bnxsnd.h"
21
22
23/* Low water marks for transmit credits. */
24#define	BNX_DCOPY_ALIGN			32
25#define	BNX_XMIT_INIT_FAIL_THRESH	1
26#define	BNX_PDWM_THRESHOLD		8
27
28
29#ifndef NUM_TX_CHAIN
30#error NUM_TX_CHAIN is not defined.
31#else
32/*
33 * Range check NUM_TX_CHAIN.  Technically the LM controls this definition,
34 * but it makes sense to use what the LM uses.
35 */
36#if NUM_TX_CHAIN < 0
37#error Invalid NUM_TX_CHAIN definition.
38#elif NUM_TX_CHAIN > 1
39#warning NUM_TX_CHAIN is greater than 1.
40#endif
41#endif
42
43
44static ddi_dma_attr_t bnx_snd_dma_attrib = {
45	DMA_ATTR_V0,			/* dma_attr_version */
46	0,				/* dma_attr_addr_lo */
47	0xffffffffffffffff,		/* dma_attr_addr_hi */
48	0x0ffffff,			/* dma_attr_count_max */
49	BNX_DMA_ALIGNMENT,		/* dma_attr_align */
50	0xffffffff,			/* dma_attr_burstsizes */
51	1,				/* dma_attr_minxfer */
52	0x00ffffff,			/* dma_attr_maxxfer */
53	0xffffffff,			/* dma_attr_seg */
54	BNX_MAX_SGL_ENTRIES,		/* dma_attr_sgllen */
55	BNX_MIN_BYTES_PER_FRAGMENT,	/* dma_attr_granular */
56	0,				/* dma_attr_flags */
57};
58
59/*
60 * Description:  This function will map the fragments of the message block
61 *
62 * Return:  DDI_DMA_MAPPED:   Success
63 *          DDI_DMA_INUSE:    Another I/O transaction is using the DMA handle
64 *          DDI_DMA_NORESOURCES: No resources are available at the present time
65 *          DDI_DMA_NOMAPPING: The object cannot be reached by the device
66 *                             requesting the resources.
67 *          DDI_DMA_TOOBIG:   The object is too big. A request of this size can
68 *                            never be satisfied on this particular system.
69 *                            The maximum size varies depending on machine
70 *                            and configuration.
71 */
72static int
73bnx_xmit_frag_map(mblk_t *mp, ddi_dma_handle_t *handle,
74    lm_frag_list_t *fraglist)
75{
76	int i;
77	uint_t ccount;
78	ddi_dma_cookie_t cookie;
79	lm_frag_t *fragment;
80
81	if (fraglist->cnt >= BNX_MAX_SGL_ENTRIES) {
82		return (DDI_DMA_NOMAPPING);
83	}
84
85	i = ddi_dma_addr_bind_handle(*handle, NULL,
86	    (caddr_t)mp->b_rptr, mp->b_wptr - mp->b_rptr,
87	    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
88	    &cookie, &ccount);
89	if (i != DDI_DMA_MAPPED) {
90		return (i);
91	}
92
93	/*
94	 * It looks strange at first, but the below check is needed.
95	 * ddi_dma_addr_bind_handle() correctly returns an error if
96	 * the physical fragment count exceeds the maximum fragment
97	 * count specified in the ddi_dma_attrib structure for the
98	 * current mp.  However, a packet can span multiple mp's.
99	 * The purpose of the check below is to make sure we do not
100	 * overflow the global fragment count limit.
101	 */
102	if (fraglist->cnt + ccount > BNX_MAX_SGL_ENTRIES) {
103		/* We hit our fragment count limit. */
104		(void) ddi_dma_unbind_handle(*handle);
105
106		return (DDI_DMA_NOMAPPING);
107	}
108
109	fragment = &(fraglist->frag_arr[fraglist->cnt]);
110	fraglist->cnt += ccount;
111
112	for (i = 0; i < ccount-1; i++) {
113		fragment->addr.as_u64 = cookie.dmac_laddress;
114		fragment->size = cookie.dmac_size;
115
116		fragment++;
117
118		ddi_dma_nextcookie(*handle, &cookie);
119	}
120
121	fragment->addr.as_u64 = cookie.dmac_laddress;
122	fragment->size = cookie.dmac_size;
123
124	return (0);
125}
126
127static void
128bnx_xmit_pkt_unmap(um_txpacket_t * const umpacket)
129{
130	int i;
131
132	for (i = 0; i < umpacket->num_handles; i++) {
133		(void) ddi_dma_unbind_handle(umpacket->dma_handle[i]);
134	}
135
136	umpacket->num_handles = 0;
137}
138
139int
140bnx_xmit_pkt_map(um_txpacket_t * const umpacket, mblk_t * mp)
141{
142	int rc;
143	u32_t num_dma_handle;
144
145	num_dma_handle = umpacket->num_handles;
146
147	if (num_dma_handle == BNX_MAX_SGL_ENTRIES) {
148		return (BNX_TX_RESOURCES_TOO_MANY_FRAGS);
149	}
150
151	rc = bnx_xmit_frag_map(mp, &umpacket->dma_handle[num_dma_handle++],
152	    &(umpacket->frag_list));
153	if (rc) {
154		return (BNX_TX_RESOURCES_NO_OS_DMA_RES);
155	}
156
157	umpacket->num_handles = num_dma_handle;
158
159	return (0);
160}
161
162static void
163bnx_xmit_pkt_cpy(um_device_t * const umdevice, um_txpacket_t * const umpacket)
164{
165	size_t msgsize;
166	u32_t cpysize;
167	lm_frag_t *cpyfrag;
168	boolean_t map_enable;
169	mblk_t *mp;
170	int rc;
171
172	map_enable = B_TRUE;
173	cpysize = 0;
174	cpyfrag = NULL;
175
176	for (mp = umpacket->mp; mp; mp = mp->b_cont) {
177		msgsize = MBLKL(mp);
178
179		if (msgsize == 0)
180			continue;
181
182		if (map_enable && msgsize > umdevice->tx_copy_threshold) {
183			rc = bnx_xmit_pkt_map(umpacket, mp);
184			if (rc == 0) {
185				cpyfrag = NULL;
186				continue;
187			} else {
188				map_enable = B_FALSE;
189			}
190		}
191
192		ASSERT(cpysize + msgsize <= umdevice->dev_var.mtu +
193		    sizeof (struct ether_vlan_header));
194
195		bcopy(mp->b_rptr, (char *)umpacket->cpymem + cpysize, msgsize);
196
197		if (cpyfrag != NULL) {
198			cpyfrag->size += msgsize;
199		} else {
200			cpyfrag = &umpacket->frag_list.frag_arr[
201			    umpacket->frag_list.cnt++];
202			ASSERT(umpacket->frag_list.cnt <= BNX_MAX_SGL_ENTRIES +
203			    1);
204			cpyfrag->size = msgsize;
205
206			cpyfrag->addr.as_u64 = umpacket->cpyphy.as_u64 +
207			    cpysize;
208		}
209
210		cpysize += msgsize;
211	}
212
213	if (cpysize > 0) {
214		(void) ddi_dma_sync(*(umpacket->cpyhdl), umpacket->cpyoff,
215		    cpysize, DDI_DMA_SYNC_FORDEV);
216	}
217
218	if (umpacket->num_handles == 0) {
219		freemsg(umpacket->mp);
220		umpacket->mp = NULL;
221	}
222
223}
224
225static int
226bnx_xmit_pkt_init(um_device_t * const umdevice, um_txpacket_t * const umpacket,
227    int num, lm_u64_t memphys)
228{
229	int i;
230	int rc;
231	um_xmit_qinfo * xmitinfo;
232
233	xmitinfo = &_TX_QINFO(umdevice, 0);
234
235	for (i = 0; i < BNX_MAX_SGL_ENTRIES; i++) {
236		rc = ddi_dma_alloc_handle(umdevice->os_param.dip,
237		    &bnx_snd_dma_attrib, DDI_DMA_DONTWAIT,
238		    (void *)0, &umpacket->dma_handle[i]);
239		if (rc != DDI_SUCCESS) {
240			cmn_err(CE_WARN, "%s:%s failed. (errno=%d)",
241			    umdevice->dev_name, __func__, rc);
242			goto error;
243		}
244	}
245
246	/* Init the relavant informations in the packet structure */
247	umpacket->mp = NULL;
248	umpacket->num_handles = 0;
249	umpacket->frag_list.cnt = 0;
250
251	umpacket->cpyhdl = &(xmitinfo->dcpyhndl);
252	umpacket->cpyoff = num * xmitinfo->dcpyhard;
253	umpacket->cpymem = xmitinfo->dcpyvirt + umpacket->cpyoff;
254	umpacket->cpyphy = memphys;
255
256	return (rc);
257
258error:
259	for (i--; i >= 0; i--) {
260		ddi_dma_free_handle(&umpacket->dma_handle[i]);
261	}
262
263	return (-1);
264}
265
266static void
267bnx_xmit_pkt_fini(um_txpacket_t * const umpacket)
268{
269	int i;
270
271	for (i = BNX_MAX_SGL_ENTRIES - 1; i >= 0; i--) {
272		ddi_dma_free_handle(&umpacket->dma_handle[i]);
273	}
274
275	umpacket->mp = NULL;
276	umpacket->num_handles = 0;
277	umpacket->frag_list.cnt = 0;
278
279	umpacket->cpyhdl = NULL;
280	umpacket->cpyoff = 0;
281	umpacket->cpymem = NULL;
282}
283
284static int
285bnx_xmit_packet(um_device_t * const umdevice, const unsigned int ringidx,
286    um_txpacket_t * const umpacket)
287{
288	int rc;
289	s_list_t *waitq;
290	lm_tx_chain_t *txq;
291	lm_packet_t *lmpacket;
292	lm_device_t *lmdevice;
293	lm_frag_list_t *lmfraglist;
294
295	lmdevice = &(umdevice->lm_dev);
296	lmpacket = &(umpacket->lm_pkt);
297
298	lmfraglist = &(umpacket->frag_list);
299	txq = &lmdevice->tx_info.chain[ringidx];
300
301	/* Try to recycle, if available bd is lower than threshold */
302	if (txq->bd_left < BNX_MAX_SGL_ENTRIES) {
303		s_list_t xmitpkts;
304
305		s_list_init(&xmitpkts, NULL, NULL, 0);
306
307		rc = lm_get_packets_sent(lmdevice, ringidx, 0, &xmitpkts);
308
309		if (rc) {
310			bnx_xmit_ring_reclaim(umdevice, ringidx, &xmitpkts);
311		}
312	}
313
314	waitq = &_TXQ_RESC_DESC(umdevice, ringidx);
315	if (s_list_is_empty(waitq) && txq->bd_left >= lmfraglist->cnt) {
316		(void) lm_send_packet(lmdevice, ringidx, lmpacket, lmfraglist);
317
318		return (BNX_SEND_GOODXMIT);
319	}
320
321	s_list_push_tail(waitq, &umpacket->lm_pkt.link);
322
323	if (txq->bd_left >= BNX_MAX_SGL_ENTRIES) {
324		rc = bnx_xmit_ring_xmit_qpkt(umdevice, ringidx);
325		if (rc == BNX_SEND_GOODXMIT) {
326			return (BNX_SEND_GOODXMIT);
327		}
328	}
329
330	umdevice->no_tx_credits |= BNX_TX_RESOURCES_NO_CREDIT;
331
332	return (BNX_SEND_DEFERPKT);
333}
334
335static int
336bnx_xmit_ring_cpybuf_alloc(um_device_t * const umdevice,
337    um_xmit_qinfo * const xmitinfo,
338    unsigned int buffsize)
339{
340	int rc;
341	size_t actualsize;
342	unsigned int alignedsize;
343	unsigned int count;
344	ddi_dma_cookie_t cookie;
345
346	ASSERT(buffsize > 0);
347
348	alignedsize = buffsize;
349	alignedsize += (BNX_DCOPY_ALIGN - 1);
350	alignedsize &= ~((unsigned int)(BNX_DCOPY_ALIGN - 1));
351
352	/* We want double copy buffers to be completely contiguous. */
353	rc = ddi_dma_alloc_handle(umdevice->os_param.dip, &bnx_std_dma_attrib,
354	    DDI_DMA_DONTWAIT, (void *)0, &xmitinfo->dcpyhndl);
355	if (rc != DDI_SUCCESS) {
356		cmn_err(CE_WARN,
357		    "%s: %s: Failed to alloc phys dma handle.\n",
358		    umdevice->dev_name, __func__);
359		return (-1);
360	}
361
362	rc = ddi_dma_mem_alloc(xmitinfo->dcpyhndl,
363	    alignedsize * xmitinfo->desc_cnt, &bnxAccessAttribBUF,
364	    DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, (void *)0,
365	    &xmitinfo->dcpyvirt, &actualsize, &xmitinfo->dcpyahdl);
366	if (rc != DDI_SUCCESS) {
367		cmn_err(CE_WARN,
368		    "%s: %s: Failed to alloc phys memory.\n",
369		    umdevice->dev_name, __func__);
370		goto error1;
371	}
372
373	rc = ddi_dma_addr_bind_handle(xmitinfo->dcpyhndl,
374	    (struct as *)0, xmitinfo->dcpyvirt, actualsize,
375	    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, (void *)0,
376	    &cookie, &count);
377	if (rc != DDI_SUCCESS) {
378		cmn_err(CE_WARN,
379		    "%s: %s: Failed to bind DMA address.\n",
380		    umdevice->dev_name, __func__);
381		goto error2;
382	}
383
384	xmitinfo->dcpyhard = alignedsize;
385	xmitinfo->dcpyphys.as_u64 = (u64_t)cookie.dmac_laddress;
386
387	return (0);
388
389error2:
390	ddi_dma_mem_free(&xmitinfo->dcpyahdl);
391
392error1:
393	ddi_dma_free_handle(&xmitinfo->dcpyhndl);
394
395	return (-1);
396}
397
398static void
399bnx_xmit_ring_cpybuf_free(um_device_t * const umdevice,
400    um_xmit_qinfo * const xmitinfo)
401{
402	(void) ddi_dma_unbind_handle(xmitinfo->dcpyhndl);
403	ddi_dma_mem_free(&xmitinfo->dcpyahdl);
404	ddi_dma_free_handle(&xmitinfo->dcpyhndl);
405
406	xmitinfo->dcpyvirt = NULL;
407	xmitinfo->dcpyphys.as_u64 = 0;
408	xmitinfo->dcpyhard = 0;
409}
410
411static int
412bnx_xmit_ring_init(um_device_t * const umdevice, const unsigned int ringidx)
413{
414	int i;
415	size_t memsize;
416	void *memvirt;
417	s_list_t *freeq;
418	lm_u64_t memphys;
419	um_txpacket_t *umpacket;
420	um_xmit_qinfo *xmitinfo;
421
422	xmitinfo = &_TX_QINFO(umdevice, ringidx);
423
424	s_list_init(&_TXQ_FREE_DESC(umdevice, ringidx), NULL, NULL, 0);
425	s_list_init(&_TXQ_RESC_DESC(umdevice, ringidx), NULL, NULL, 0);
426
427	if (xmitinfo->desc_cnt == 0) {
428		return (0);
429	}
430
431	xmitinfo->thresh_pdwm = BNX_PDWM_THRESHOLD;
432
433	memsize = xmitinfo->desc_cnt * sizeof (um_txpacket_t);
434	memvirt = kmem_zalloc(memsize, KM_NOSLEEP);
435	if (memvirt == NULL) {
436		cmn_err(CE_WARN, "%s: Failed to allocate TX packet "
437		    "descriptor memory (%d).\n", umdevice->dev_name, ringidx);
438		return (-1);
439	}
440
441	xmitinfo->desc_mem.addr = memvirt;
442	xmitinfo->desc_mem.size = memsize;
443
444	if (bnx_xmit_ring_cpybuf_alloc(umdevice, xmitinfo,
445	    umdevice->dev_var.mtu + sizeof (struct ether_vlan_header))) {
446		kmem_free(xmitinfo->desc_mem.addr, xmitinfo->desc_mem.size);
447		xmitinfo->desc_mem.addr = NULL;
448		xmitinfo->desc_mem.size = 0;
449
450		return (-1);
451	}
452
453	/*
454	 * Driver successfully allocated memory for this transmit queue, now
455	 * link them together and place them in the free pool.
456	 */
457
458	freeq = &_TXQ_FREE_DESC(umdevice, ringidx);
459	umpacket = (um_txpacket_t *)memvirt;
460
461	memphys = xmitinfo->dcpyphys;
462
463	for (i = 0; i < xmitinfo->desc_cnt; i++) {
464		if (bnx_xmit_pkt_init(umdevice, umpacket, i, memphys)) {
465			break;
466		}
467
468		LM_INC64(&memphys, xmitinfo->dcpyhard);
469
470		s_list_push_tail(freeq, &umpacket->lm_pkt.link);
471
472		umpacket++;
473	}
474
475	mutex_init(&xmitinfo->free_mutex, NULL, MUTEX_DRIVER,
476	    DDI_INTR_PRI(umdevice->intrPriority));
477
478	return (0);
479}
480
481void
482bnx_xmit_ring_reclaim(um_device_t * const umdevice,
483    const unsigned int ringidx, s_list_t *srcq)
484{
485	s_list_t *freeq;
486	s_list_entry_t *lmpacket;
487	um_txpacket_t *umpacket;
488	um_xmit_qinfo *xmitinfo;
489
490	if (s_list_entry_cnt(srcq) ==  0) {
491		return;
492	}
493
494	for (lmpacket = s_list_peek_head(srcq); lmpacket;
495	    lmpacket = s_list_next_entry(lmpacket)) {
496
497		umpacket = (um_txpacket_t *)lmpacket;
498
499		if (umpacket->num_handles > 0) {
500			bnx_xmit_pkt_unmap(umpacket);
501		}
502
503		if (umpacket->mp != NULL) {
504			freemsg(umpacket->mp);
505			umpacket->mp = NULL;
506		}
507	}
508
509	freeq = &_TXQ_FREE_DESC(umdevice, ringidx);
510	xmitinfo = &_TX_QINFO(umdevice, ringidx);
511
512	mutex_enter(&xmitinfo->free_mutex);
513	s_list_add_tail(freeq, srcq);
514	mutex_exit(&xmitinfo->free_mutex);
515
516}
517
518int
519bnx_xmit_ring_xmit_qpkt(um_device_t * const umdevice,
520    const unsigned int ringidx)
521{
522	s_list_t *waitq;
523	lm_tx_chain_t *txq;
524	lm_packet_t *lmpacket;
525	lm_device_t *lmdevice;
526	lm_frag_list_t *lmfraglist;
527	um_txpacket_t *umpacket;
528	int rc = 0;
529
530	lmdevice = &(umdevice->lm_dev);
531	waitq = &_TXQ_RESC_DESC(umdevice, ringidx);
532	txq = &lmdevice->tx_info.chain[ringidx];
533
534	while (s_list_entry_cnt(waitq)) {
535		umpacket = (um_txpacket_t *)s_list_peek_head(waitq);
536		lmfraglist = &(umpacket->frag_list);
537
538		if (lmfraglist->cnt > txq->bd_left) {
539			rc = BNX_SEND_DEFERPKT;
540			break;
541		}
542
543		umpacket = (um_txpacket_t *)s_list_pop_head(waitq);
544		lmpacket = &(umpacket->lm_pkt);
545
546		/*
547		 * The main way that this can fail is in the check we just
548		 * performed around the fragment list versus txq, so we ignore
549		 * the return value.
550		 */
551		(void) lm_send_packet(lmdevice, ringidx, lmpacket, lmfraglist);
552	}
553
554	return (rc);
555}
556
557int
558bnx_xmit_ring_xmit_mblk(um_device_t * const umdevice,
559    const unsigned int ringidx, mblk_t *mp)
560{
561	int rc;
562	uint32_t pflags;
563	s_list_t *txfreeq;
564	lm_packet_t *lmpacket;
565	um_txpacket_t *umpacket;
566	um_xmit_qinfo *xmitinfo;
567
568	xmitinfo = &_TX_QINFO(umdevice, ringidx);
569
570	txfreeq = &_TXQ_FREE_DESC(umdevice, ringidx);
571
572	mutex_enter(&xmitinfo->free_mutex);
573	umpacket = (um_txpacket_t *)s_list_pop_head(txfreeq);
574	mutex_exit(&xmitinfo->free_mutex);
575
576	/* Try to recycle, if no more packet available */
577	if (umpacket == NULL) {
578		s_list_t  xmitpkts;
579		lm_device_t *lmdevice;
580
581		lmdevice = &(umdevice->lm_dev);
582
583		s_list_init(&xmitpkts, NULL, NULL, 0);
584
585		mutex_enter(&umdevice->os_param.xmit_mutex);
586		rc = lm_get_packets_sent(lmdevice, ringidx, 0, &xmitpkts);
587		if (rc == 0) {
588			umdevice->no_tx_credits |= BNX_TX_RESOURCES_NO_DESC;
589
590			mutex_exit(&umdevice->os_param.xmit_mutex);
591			return (BNX_SEND_HDWRFULL);
592		}
593		mutex_exit(&umdevice->os_param.xmit_mutex);
594
595		umpacket = (um_txpacket_t *)s_list_pop_head(&xmitpkts);
596		if (umpacket->num_handles > 0) {
597			bnx_xmit_pkt_unmap(umpacket);
598		}
599		if (umpacket->mp != NULL) {
600			freemsg(umpacket->mp);
601			umpacket->mp = NULL;
602		}
603
604		/* clean up resources */
605		bnx_xmit_ring_reclaim(umdevice, ringidx, &xmitpkts);
606	}
607
608	umpacket->lm_pkt.link.next = NULL;
609	ASSERT(umpacket->mp == NULL);
610	ASSERT(umpacket->num_handles == 0);
611	umpacket->frag_list.cnt = 0;
612	umpacket->mp = mp;
613
614	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
615
616	bnx_xmit_pkt_cpy(umdevice, umpacket);
617
618	lmpacket = &(umpacket->lm_pkt);
619
620	lmpacket->u1.tx.flags   = 0;
621	lmpacket->u1.tx.lso_mss = 0;
622
623	lmpacket->u1.tx.vlan_tag = 0;
624
625	if (pflags & HCK_IPV4_HDRCKSUM) {
626		lmpacket->u1.tx.flags |= LM_TX_FLAG_COMPUTE_IP_CKSUM;
627	}
628
629	if (pflags & HCK_FULLCKSUM) {
630		lmpacket->u1.tx.flags |= LM_TX_FLAG_COMPUTE_TCP_UDP_CKSUM;
631	}
632
633	mutex_enter(&umdevice->os_param.xmit_mutex);
634	rc = bnx_xmit_packet(umdevice, ringidx, umpacket);
635	mutex_exit(&umdevice->os_param.xmit_mutex);
636
637	return (rc);
638}
639
640void
641bnx_xmit_ring_intr(um_device_t * const umdevice, const unsigned int ringidx)
642{
643	u32_t rc;
644	s_list_t xmitpkts;
645	lm_device_t *lmdevice;
646
647	lmdevice = &(umdevice->lm_dev);
648
649	s_list_init(&xmitpkts, NULL, NULL, 0);
650
651	mutex_enter(&umdevice->os_param.xmit_mutex);
652
653	rc = lm_get_packets_sent(lmdevice, ringidx, 0, &xmitpkts);
654
655	mutex_exit(&umdevice->os_param.xmit_mutex);
656
657	if (rc) {
658		bnx_xmit_ring_reclaim(umdevice, ringidx, &xmitpkts);
659	}
660}
661
662void
663bnx_xmit_ring_post(um_device_t * const umdevice, const unsigned int ringidx)
664{
665	int rc;
666	s_list_t *freeq;
667	lm_device_t *lmdevice;
668	um_xmit_qinfo *xmitinfo;
669	lm_tx_chain_t *lmtxring;
670
671	if (umdevice->no_tx_credits != 0) {
672		if (umdevice->no_tx_credits & BNX_TX_RESOURCES_NO_CREDIT) {
673			rc = bnx_xmit_ring_xmit_qpkt(umdevice, ringidx);
674
675			if (rc == BNX_SEND_GOODXMIT) {
676				lmdevice = &(umdevice->lm_dev);
677				lmtxring = &(lmdevice->tx_info.chain[ringidx]);
678
679				if (lmtxring->bd_left >= BNX_MAX_SGL_ENTRIES) {
680					umdevice->no_tx_credits &=
681					    ~BNX_TX_RESOURCES_NO_CREDIT;
682				}
683			}
684		}
685
686		if (umdevice->no_tx_credits & BNX_TX_RESOURCES_NO_DESC) {
687			freeq = &_TXQ_FREE_DESC(umdevice, ringidx);
688			xmitinfo = &_TX_QINFO(umdevice, ringidx);
689
690			if (s_list_entry_cnt(freeq) > xmitinfo->thresh_pdwm) {
691				umdevice->no_tx_credits &=
692				    ~BNX_TX_RESOURCES_NO_DESC;
693			}
694		}
695
696		if (umdevice->no_tx_credits == 0) {
697			mac_tx_update(umdevice->os_param.macp);
698		}
699	}
700}
701
702static void
703bnx_xmit_ring_fini(um_device_t * const umdevice, const unsigned int ringidx)
704{
705	s_list_t *srcq;
706	um_txpacket_t *umpacket;
707	um_xmit_qinfo *xmitinfo;
708
709	xmitinfo = &_TX_QINFO(umdevice, ringidx);
710
711	mutex_destroy(&xmitinfo->free_mutex);
712
713	srcq = &_TXQ_FREE_DESC(umdevice, ringidx);
714
715	/* CONSTANTCONDITION */
716	/* Pop all the packet descriptors off the free list and discard them. */
717	while (1) {
718		umpacket = (um_txpacket_t *)s_list_pop_head(srcq);
719		if (umpacket == NULL) {
720			break;
721		}
722
723		bnx_xmit_pkt_fini(umpacket);
724	}
725
726	bnx_xmit_ring_cpybuf_free(umdevice, xmitinfo);
727
728	kmem_free(xmitinfo->desc_mem.addr, xmitinfo->desc_mem.size);
729	xmitinfo->desc_mem.addr = NULL;
730	xmitinfo->desc_mem.size = 0;
731}
732
733int
734bnx_txpkts_init(um_device_t * const umdevice)
735{
736	int i;
737	int alloccnt;
738	um_xmit_qinfo *xmitinfo;
739
740	xmitinfo = &_TX_QINFO(umdevice, 0);
741
742	mutex_init(&umdevice->os_param.xmit_mutex, NULL,
743	    MUTEX_DRIVER, DDI_INTR_PRI(umdevice->intrPriority));
744
745	alloccnt = 0;
746
747	/* Allocate packet descriptors for the TX queue. */
748	for (i = TX_CHAIN_IDX0; i < NUM_TX_CHAIN; i++) {
749		int desc_cnt;
750
751		if (bnx_xmit_ring_init(umdevice, i)) {
752			goto error;
753		}
754
755		desc_cnt = s_list_entry_cnt(&_TXQ_FREE_DESC(umdevice, i));
756
757		if (desc_cnt != xmitinfo->desc_cnt) {
758			cmn_err(CE_NOTE,
759			    "%s: %d tx buffers requested.  %d allocated.\n",
760			    umdevice->dev_name, xmitinfo->desc_cnt, desc_cnt);
761		}
762
763		alloccnt += desc_cnt;
764	}
765
766	/* FIXME -- Review TX buffer allocation failure threshold. */
767	if (alloccnt < BNX_XMIT_INIT_FAIL_THRESH) {
768		cmn_err(CE_WARN,
769		    "%s: Failed to allocate minimum number of TX buffers.\n",
770		    umdevice->dev_name);
771
772		goto error;
773	}
774
775	return (0);
776
777error:
778	for (i--; i >= TX_CHAIN_IDX0; i--) {
779		bnx_xmit_ring_fini(umdevice, i);
780	}
781
782	mutex_destroy(&umdevice->os_param.xmit_mutex);
783
784	return (-1);
785}
786
787void
788bnx_txpkts_flush(um_device_t * const umdevice)
789{
790	int i;
791	boolean_t notx_fl = B_FALSE;
792
793	for (i = NUM_TX_CHAIN - 1; i >= TX_CHAIN_IDX0; i--) {
794		lm_abort(&(umdevice->lm_dev), ABORT_OP_TX_CHAIN, i);
795
796		bnx_xmit_ring_reclaim(umdevice, i,
797		    &_TXQ_RESC_DESC(umdevice, i));
798
799		s_list_init(&_TXQ_RESC_DESC(umdevice, i), NULL, NULL, 0);
800
801		if (umdevice->no_tx_credits & BNX_TX_RESOURCES_NO_CREDIT) {
802			umdevice->no_tx_credits &= ~BNX_TX_RESOURCES_NO_CREDIT;
803			notx_fl = B_TRUE;
804		}
805		if (umdevice->no_tx_credits & BNX_TX_RESOURCES_NO_DESC) {
806			umdevice->no_tx_credits &= ~BNX_TX_RESOURCES_NO_DESC;
807			notx_fl = B_TRUE;
808		}
809		if (umdevice->no_tx_credits == 0 && notx_fl == B_TRUE) {
810			mac_tx_update(umdevice->os_param.macp);
811		}
812	}
813}
814
815void
816bnx_txpkts_intr(um_device_t * const umdevice)
817{
818	int i;
819
820	for (i = TX_CHAIN_IDX0; i < NUM_TX_CHAIN; i++) {
821		bnx_xmit_ring_post(umdevice, i);
822	}
823}
824
825void
826bnx_txpkts_fini(um_device_t * const umdevice)
827{
828	int i;
829
830	for (i = NUM_TX_CHAIN - 1; i >= TX_CHAIN_IDX0; i--) {
831		bnx_xmit_ring_fini(umdevice, i);
832	}
833
834	mutex_destroy(&umdevice->os_param.xmit_mutex);
835}
836