xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 1a5e258f)
1a23fd118Syl /*
2a23fd118Syl  * CDDL HEADER START
3a23fd118Syl  *
4a23fd118Syl  * The contents of this file are subject to the terms of the
5a23fd118Syl  * Common Development and Distribution License (the "License").
6a23fd118Syl  * You may not use this file except in compliance with the License.
7a23fd118Syl  *
8a23fd118Syl  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9a23fd118Syl  * or http://www.opensolaris.org/os/licensing.
10a23fd118Syl  * See the License for the specific language governing permissions
11a23fd118Syl  * and limitations under the License.
12a23fd118Syl  *
13a23fd118Syl  * When distributing Covered Code, include this CDDL HEADER in each
14a23fd118Syl  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15a23fd118Syl  * If applicable, add the following below this CDDL HEADER, with the
16a23fd118Syl  * fields enclosed by brackets "[]" replaced with your own identifying
17a23fd118Syl  * information: Portions Copyright [yyyy] [name of copyright owner]
18a23fd118Syl  *
19a23fd118Syl  * CDDL HEADER END
20a23fd118Syl  */
21a23fd118Syl 
22a23fd118Syl /*
230dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24a23fd118Syl  * Use is subject to license terms.
25a23fd118Syl  */
26a23fd118Syl 
27a23fd118Syl /*
28f30c160eSRoamer  *  Copyright (c) 2002-2009 Neterion, Inc.
29a23fd118Syl  *  All right Reserved.
30a23fd118Syl  *
31a23fd118Syl  *  FileName :    xgell.c
32a23fd118Syl  *
33a23fd118Syl  *  Description:  Xge Link Layer data path implementation
34a23fd118Syl  *
35a23fd118Syl  */
36a23fd118Syl 
37a23fd118Syl #include "xgell.h"
38a23fd118Syl 
39a23fd118Syl #include <netinet/ip.h>
40a23fd118Syl #include <netinet/tcp.h>
418347601bSyl #include <netinet/udp.h>
42a23fd118Syl 
43ba2e4443Sseb #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
44a23fd118Syl     sizeof (struct ether_vlan_header))
45a23fd118Syl 
46a23fd118Syl #define	HEADROOM		2	/* for DIX-only packets */
47a23fd118Syl 
header_free_func(void * arg)48a23fd118Syl void header_free_func(void *arg) { }
49a23fd118Syl frtn_t header_frtn = {header_free_func, NULL};
50a23fd118Syl 
51a23fd118Syl /* DMA attributes used for Tx side */
52a23fd118Syl static struct ddi_dma_attr tx_dma_attr = {
53a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
54a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
55a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
567eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
577eced415Sxw #if defined(__sparc)
587eced415Sxw 	0x2000,				/* dma_attr_align */
597eced415Sxw #else
607eced415Sxw 	0x1000,				/* dma_attr_align */
617eced415Sxw #endif
627eced415Sxw 	0xFC00FC,			/* dma_attr_burstsizes */
637eced415Sxw 	0x1,				/* dma_attr_minxfer */
647eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
65a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
668347601bSyl 	18,				/* dma_attr_sgllen */
677eced415Sxw 	(unsigned int)1,		/* dma_attr_granular */
68a23fd118Syl 	0				/* dma_attr_flags */
69a23fd118Syl };
70a23fd118Syl 
71a23fd118Syl /*
72a23fd118Syl  * DMA attributes used when using ddi_dma_mem_alloc to
73a23fd118Syl  * allocat HAL descriptors and Rx buffers during replenish
74a23fd118Syl  */
75a23fd118Syl static struct ddi_dma_attr hal_dma_attr = {
76a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
77a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
78a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
797eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
807eced415Sxw #if defined(__sparc)
817eced415Sxw 	0x2000,				/* dma_attr_align */
827eced415Sxw #else
837eced415Sxw 	0x1000,				/* dma_attr_align */
847eced415Sxw #endif
857eced415Sxw 	0xFC00FC,			/* dma_attr_burstsizes */
867eced415Sxw 	0x1,				/* dma_attr_minxfer */
877eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
88a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
89a23fd118Syl 	1,				/* dma_attr_sgllen */
907eced415Sxw 	(unsigned int)1,		/* dma_attr_sgllen */
917eced415Sxw 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
92a23fd118Syl };
93a23fd118Syl 
94a23fd118Syl struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95a23fd118Syl 
96ba2e4443Sseb static int		xgell_m_stat(void *, uint_t, uint64_t *);
97ba2e4443Sseb static int		xgell_m_start(void *);
98ba2e4443Sseb static void		xgell_m_stop(void *);
99ba2e4443Sseb static int		xgell_m_promisc(void *, boolean_t);
100ba2e4443Sseb static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
101ba2e4443Sseb static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
102ba2e4443Sseb static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
103ba2e4443Sseb 
104ba2e4443Sseb #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
105ba2e4443Sseb 
106ba2e4443Sseb static mac_callbacks_t xgell_m_callbacks = {
107ba2e4443Sseb 	XGELL_M_CALLBACK_FLAGS,
108ba2e4443Sseb 	xgell_m_stat,
109ba2e4443Sseb 	xgell_m_start,
110ba2e4443Sseb 	xgell_m_stop,
111ba2e4443Sseb 	xgell_m_promisc,
112ba2e4443Sseb 	xgell_m_multicst,
113da14cebeSEric Cheng 	NULL,
114ba2e4443Sseb 	NULL,
1150dc2366fSVenugopal Iyer 	NULL,
116ba2e4443Sseb 	xgell_m_ioctl,
117ba2e4443Sseb 	xgell_m_getcapab
118ba2e4443Sseb };
119ba2e4443Sseb 
120a23fd118Syl /*
121a23fd118Syl  * xge_device_poll
122a23fd118Syl  *
123da14cebeSEric Cheng  * Timeout should call me every 1s. xge_callback_event_queued should call me
124a23fd118Syl  * when HAL hope event was rescheduled.
125a23fd118Syl  */
126a23fd118Syl /*ARGSUSED*/
127a23fd118Syl void
xge_device_poll(void * data)128a23fd118Syl xge_device_poll(void *data)
129a23fd118Syl {
130a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
131a23fd118Syl 
132a23fd118Syl 	mutex_enter(&lldev->genlock);
133a23fd118Syl 	if (lldev->is_initialized) {
134a23fd118Syl 		xge_hal_device_poll(data);
135a23fd118Syl 		lldev->timeout_id = timeout(xge_device_poll, data,
136a23fd118Syl 		    XGE_DEV_POLL_TICKS);
1378347601bSyl 	} else if (lldev->in_reset == 1) {
1388347601bSyl 		lldev->timeout_id = timeout(xge_device_poll, data,
1398347601bSyl 		    XGE_DEV_POLL_TICKS);
1408347601bSyl 	} else {
1418347601bSyl 		lldev->timeout_id = 0;
142a23fd118Syl 	}
143a23fd118Syl 	mutex_exit(&lldev->genlock);
144a23fd118Syl }
145a23fd118Syl 
146a23fd118Syl /*
147a23fd118Syl  * xge_device_poll_now
148a23fd118Syl  *
149a23fd118Syl  * Will call xge_device_poll() immediately
150a23fd118Syl  */
151a23fd118Syl void
xge_device_poll_now(void * data)152a23fd118Syl xge_device_poll_now(void *data)
153a23fd118Syl {
154a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
155a23fd118Syl 
156a23fd118Syl 	mutex_enter(&lldev->genlock);
1578347601bSyl 	if (lldev->is_initialized) {
1588347601bSyl 		xge_hal_device_poll(data);
1598347601bSyl 	}
160a23fd118Syl 	mutex_exit(&lldev->genlock);
161a23fd118Syl }
162a23fd118Syl 
163a23fd118Syl /*
164a23fd118Syl  * xgell_callback_link_up
165a23fd118Syl  *
166a23fd118Syl  * This function called by HAL to notify HW link up state change.
167a23fd118Syl  */
168a23fd118Syl void
xgell_callback_link_up(void * userdata)169a23fd118Syl xgell_callback_link_up(void *userdata)
170a23fd118Syl {
171a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
172a23fd118Syl 
173ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_UP);
174a23fd118Syl }
175a23fd118Syl 
176a23fd118Syl /*
177a23fd118Syl  * xgell_callback_link_down
178a23fd118Syl  *
179a23fd118Syl  * This function called by HAL to notify HW link down state change.
180a23fd118Syl  */
181a23fd118Syl void
xgell_callback_link_down(void * userdata)182a23fd118Syl xgell_callback_link_down(void *userdata)
183a23fd118Syl {
184a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
185a23fd118Syl 
186ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
187a23fd118Syl }
188a23fd118Syl 
189a23fd118Syl /*
190a23fd118Syl  * xgell_rx_buffer_replenish_all
191a23fd118Syl  *
192a23fd118Syl  * To replenish all freed dtr(s) with buffers in free pool. It's called by
193da14cebeSEric Cheng  * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
194a23fd118Syl  * Must be called with pool_lock held.
195a23fd118Syl  */
196a23fd118Syl static void
xgell_rx_buffer_replenish_all(xgell_rx_ring_t * ring)197da14cebeSEric Cheng xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
198a23fd118Syl {
199da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
200a23fd118Syl 	xge_hal_dtr_h dtr;
201a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
202a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
203a23fd118Syl 
204da14cebeSEric Cheng 	xge_assert(mutex_owned(&bf_pool->pool_lock));
205da14cebeSEric Cheng 
206da14cebeSEric Cheng 	while ((bf_pool->free > 0) &&
207da14cebeSEric Cheng 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
208da14cebeSEric Cheng 		xge_assert(bf_pool->head);
2098347601bSyl 
210da14cebeSEric Cheng 		rx_buffer = bf_pool->head;
211da14cebeSEric Cheng 
212da14cebeSEric Cheng 		bf_pool->head = rx_buffer->next;
213da14cebeSEric Cheng 		bf_pool->free--;
214a23fd118Syl 
215a23fd118Syl 		xge_assert(rx_buffer->dma_addr);
216a23fd118Syl 
217a23fd118Syl 		rxd_priv = (xgell_rxd_priv_t *)
2187eced415Sxw 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
219a23fd118Syl 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
220da14cebeSEric Cheng 		    bf_pool->size);
221a23fd118Syl 
222a23fd118Syl 		rxd_priv->rx_buffer = rx_buffer;
2237eced415Sxw 		xge_hal_ring_dtr_post(ring->channelh, dtr);
224a23fd118Syl 	}
225a23fd118Syl }
226a23fd118Syl 
227a23fd118Syl /*
228a23fd118Syl  * xgell_rx_buffer_release
229a23fd118Syl  *
230a23fd118Syl  * The only thing done here is to put the buffer back to the pool.
2318347601bSyl  * Calling this function need be protected by mutex, bf_pool.pool_lock.
232a23fd118Syl  */
233a23fd118Syl static void
xgell_rx_buffer_release(xgell_rx_buffer_t * rx_buffer)234a23fd118Syl xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
235a23fd118Syl {
236da14cebeSEric Cheng 	xgell_rx_ring_t *ring = rx_buffer->ring;
237da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
238a23fd118Syl 
239da14cebeSEric Cheng 	xge_assert(mutex_owned(&bf_pool->pool_lock));
240a23fd118Syl 
241a23fd118Syl 	/* Put the buffer back to pool */
242da14cebeSEric Cheng 	rx_buffer->next = bf_pool->head;
243da14cebeSEric Cheng 	bf_pool->head = rx_buffer;
244a23fd118Syl 
245da14cebeSEric Cheng 	bf_pool->free++;
246a23fd118Syl }
247a23fd118Syl 
248a23fd118Syl /*
249a23fd118Syl  * xgell_rx_buffer_recycle
250a23fd118Syl  *
251a23fd118Syl  * Called by desballoc() to "free" the resource.
252a23fd118Syl  * We will try to replenish all descripters.
253a23fd118Syl  */
2547eced415Sxw 
2557eced415Sxw /*
2567eced415Sxw  * Previously there were much lock contention between xgell_rx_1b_compl() and
2577eced415Sxw  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
2587eced415Sxw  * effect on rx performance. A separate recycle list is introduced to overcome
2597eced415Sxw  * this. The recycle list is used to record the rx buffer that has been recycled
2607eced415Sxw  * and these buffers will be retuned back to the free list in bulk instead of
2617eced415Sxw  * one-by-one.
2627eced415Sxw  */
2637eced415Sxw 
264a23fd118Syl static void
xgell_rx_buffer_recycle(char * arg)265a23fd118Syl xgell_rx_buffer_recycle(char *arg)
266a23fd118Syl {
267a23fd118Syl 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
268da14cebeSEric Cheng 	xgell_rx_ring_t *ring = rx_buffer->ring;
2697eced415Sxw 	xgelldev_t *lldev = ring->lldev;
2707eced415Sxw 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
271a23fd118Syl 
2727eced415Sxw 	mutex_enter(&bf_pool->recycle_lock);
2738347601bSyl 
2747eced415Sxw 	rx_buffer->next = bf_pool->recycle_head;
2757eced415Sxw 	bf_pool->recycle_head = rx_buffer;
2767eced415Sxw 	if (bf_pool->recycle_tail == NULL)
2777eced415Sxw 		bf_pool->recycle_tail = rx_buffer;
2787eced415Sxw 	bf_pool->recycle++;
279a23fd118Syl 
280a23fd118Syl 	/*
281a23fd118Syl 	 * Before finding a good way to set this hiwat, just always call to
282a23fd118Syl 	 * replenish_all. *TODO*
283a23fd118Syl 	 */
284da14cebeSEric Cheng 	if ((lldev->is_initialized != 0) && (ring->live) &&
2857eced415Sxw 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
286da14cebeSEric Cheng 		mutex_enter(&bf_pool->pool_lock);
287da14cebeSEric Cheng 		bf_pool->recycle_tail->next = bf_pool->head;
288da14cebeSEric Cheng 		bf_pool->head = bf_pool->recycle_head;
289da14cebeSEric Cheng 		bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
290da14cebeSEric Cheng 		bf_pool->post -= bf_pool->recycle;
291da14cebeSEric Cheng 		bf_pool->free += bf_pool->recycle;
292da14cebeSEric Cheng 		bf_pool->recycle = 0;
293da14cebeSEric Cheng 		xgell_rx_buffer_replenish_all(ring);
294da14cebeSEric Cheng 		mutex_exit(&bf_pool->pool_lock);
295a23fd118Syl 	}
296a23fd118Syl 
2977eced415Sxw 	mutex_exit(&bf_pool->recycle_lock);
298a23fd118Syl }
299a23fd118Syl 
300a23fd118Syl /*
301a23fd118Syl  * xgell_rx_buffer_alloc
302a23fd118Syl  *
303a23fd118Syl  * Allocate one rx buffer and return with the pointer to the buffer.
304a23fd118Syl  * Return NULL if failed.
305a23fd118Syl  */
306a23fd118Syl static xgell_rx_buffer_t *
xgell_rx_buffer_alloc(xgell_rx_ring_t * ring)307da14cebeSEric Cheng xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
308a23fd118Syl {
309da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
310da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
311a23fd118Syl 	xge_hal_device_t *hldev;
312a23fd118Syl 	void *vaddr;
313a23fd118Syl 	ddi_dma_handle_t dma_handle;
314a23fd118Syl 	ddi_acc_handle_t dma_acch;
315a23fd118Syl 	dma_addr_t dma_addr;
316a23fd118Syl 	uint_t ncookies;
317a23fd118Syl 	ddi_dma_cookie_t dma_cookie;
318a23fd118Syl 	size_t real_size;
319a23fd118Syl 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
320a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
321a23fd118Syl 
3228347601bSyl 	hldev = (xge_hal_device_t *)lldev->devh;
323a23fd118Syl 
324a23fd118Syl 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
325a23fd118Syl 	    0, &dma_handle) != DDI_SUCCESS) {
326a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
327a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
328a23fd118Syl 		goto handle_failed;
329a23fd118Syl 	}
330a23fd118Syl 
331a23fd118Syl 	/* reserve some space at the end of the buffer for recycling */
332da14cebeSEric Cheng 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
333a23fd118Syl 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
334a23fd118Syl 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
335a23fd118Syl 	    DDI_SUCCESS) {
336a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
337a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
338a23fd118Syl 		goto mem_failed;
339a23fd118Syl 	}
340a23fd118Syl 
341da14cebeSEric Cheng 	if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
342a23fd118Syl 	    real_size) {
343a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
344a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
345a23fd118Syl 		goto bind_failed;
346a23fd118Syl 	}
347a23fd118Syl 
348a23fd118Syl 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
349da14cebeSEric Cheng 	    bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
350a23fd118Syl 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
351a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
352a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
353a23fd118Syl 		goto bind_failed;
354a23fd118Syl 	}
355a23fd118Syl 
356da14cebeSEric Cheng 	if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
357a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
358a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
359a23fd118Syl 		goto check_failed;
360a23fd118Syl 	}
361a23fd118Syl 
362a23fd118Syl 	dma_addr = dma_cookie.dmac_laddress;
363a23fd118Syl 
364a23fd118Syl 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
365a23fd118Syl 	    sizeof (xgell_rx_buffer_t));
366a23fd118Syl 	rx_buffer->next = NULL;
367a23fd118Syl 	rx_buffer->vaddr = vaddr;
368a23fd118Syl 	rx_buffer->dma_addr = dma_addr;
369a23fd118Syl 	rx_buffer->dma_handle = dma_handle;
370a23fd118Syl 	rx_buffer->dma_acch = dma_acch;
3717eced415Sxw 	rx_buffer->ring = ring;
372a23fd118Syl 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
373a23fd118Syl 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
374a23fd118Syl 
375a23fd118Syl 	return (rx_buffer);
376a23fd118Syl 
377a23fd118Syl check_failed:
378a23fd118Syl 	(void) ddi_dma_unbind_handle(dma_handle);
379a23fd118Syl bind_failed:
380a23fd118Syl 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
381a23fd118Syl 	ddi_dma_mem_free(&dma_acch);
382a23fd118Syl mem_failed:
383a23fd118Syl 	ddi_dma_free_handle(&dma_handle);
384a23fd118Syl handle_failed:
385a23fd118Syl 
386a23fd118Syl 	return (NULL);
387a23fd118Syl }
388a23fd118Syl 
389a23fd118Syl /*
390a23fd118Syl  * xgell_rx_destroy_buffer_pool
391a23fd118Syl  *
392a23fd118Syl  * Destroy buffer pool. If there is still any buffer hold by upper layer,
393a23fd118Syl  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
394a23fd118Syl  */
395da14cebeSEric Cheng static boolean_t
xgell_rx_destroy_buffer_pool(xgell_rx_ring_t * ring)396da14cebeSEric Cheng xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
397a23fd118Syl {
398da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
399da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
400a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
401a23fd118Syl 	ddi_dma_handle_t  dma_handle;
402a23fd118Syl 	ddi_acc_handle_t  dma_acch;
403a23fd118Syl 	int i;
404a23fd118Syl 
405da14cebeSEric Cheng 	/*
406da14cebeSEric Cheng 	 * If the pool has been destroied, just return B_TRUE
407da14cebeSEric Cheng 	 */
408da14cebeSEric Cheng 	if (!bf_pool->live)
409da14cebeSEric Cheng 		return (B_TRUE);
410da14cebeSEric Cheng 
411da14cebeSEric Cheng 	mutex_enter(&bf_pool->recycle_lock);
412da14cebeSEric Cheng 	if (bf_pool->recycle > 0) {
413da14cebeSEric Cheng 		mutex_enter(&bf_pool->pool_lock);
414da14cebeSEric Cheng 		bf_pool->recycle_tail->next = bf_pool->head;
415da14cebeSEric Cheng 		bf_pool->head = bf_pool->recycle_head;
416da14cebeSEric Cheng 		bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
417da14cebeSEric Cheng 		bf_pool->post -= bf_pool->recycle;
418da14cebeSEric Cheng 		bf_pool->free += bf_pool->recycle;
419da14cebeSEric Cheng 		bf_pool->recycle = 0;
420da14cebeSEric Cheng 		mutex_exit(&bf_pool->pool_lock);
4217eced415Sxw 	}
422da14cebeSEric Cheng 	mutex_exit(&bf_pool->recycle_lock);
4237eced415Sxw 
424a23fd118Syl 	/*
425a23fd118Syl 	 * If there is any posted buffer, the driver should reject to be
426a23fd118Syl 	 * detached. Need notice upper layer to release them.
427a23fd118Syl 	 */
428da14cebeSEric Cheng 	if (bf_pool->post != 0) {
429a23fd118Syl 		xge_debug_ll(XGE_ERR,
430a23fd118Syl 		    "%s%d has some buffers not be recycled, try later!",
431a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
432da14cebeSEric Cheng 		return (B_FALSE);
433a23fd118Syl 	}
434a23fd118Syl 
435a23fd118Syl 	/*
436da14cebeSEric Cheng 	 * Release buffers one by one.
437a23fd118Syl 	 */
438da14cebeSEric Cheng 	for (i = bf_pool->total; i > 0; i--) {
439da14cebeSEric Cheng 		rx_buffer = bf_pool->head;
440a23fd118Syl 		xge_assert(rx_buffer != NULL);
441a23fd118Syl 
442da14cebeSEric Cheng 		bf_pool->head = rx_buffer->next;
443a23fd118Syl 
444a23fd118Syl 		dma_handle = rx_buffer->dma_handle;
445a23fd118Syl 		dma_acch = rx_buffer->dma_acch;
446a23fd118Syl 
447a23fd118Syl 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
448da14cebeSEric Cheng 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
449da14cebeSEric Cheng 			bf_pool->head = rx_buffer;
450da14cebeSEric Cheng 			return (B_FALSE);
451a23fd118Syl 		}
452a23fd118Syl 		ddi_dma_mem_free(&dma_acch);
453a23fd118Syl 		ddi_dma_free_handle(&dma_handle);
454a23fd118Syl 
455da14cebeSEric Cheng 		bf_pool->total--;
456da14cebeSEric Cheng 		bf_pool->free--;
457a23fd118Syl 	}
458a23fd118Syl 
459da14cebeSEric Cheng 	xge_assert(!mutex_owned(&bf_pool->pool_lock));
460da14cebeSEric Cheng 
461da14cebeSEric Cheng 	mutex_destroy(&bf_pool->recycle_lock);
462da14cebeSEric Cheng 	mutex_destroy(&bf_pool->pool_lock);
463da14cebeSEric Cheng 	bf_pool->live = B_FALSE;
464da14cebeSEric Cheng 
465da14cebeSEric Cheng 	return (B_TRUE);
466a23fd118Syl }
467a23fd118Syl 
468a23fd118Syl /*
469a23fd118Syl  * xgell_rx_create_buffer_pool
470a23fd118Syl  *
471a23fd118Syl  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
472a23fd118Syl  */
473da14cebeSEric Cheng static boolean_t
xgell_rx_create_buffer_pool(xgell_rx_ring_t * ring)474da14cebeSEric Cheng xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
475a23fd118Syl {
476da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
477da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
478a23fd118Syl 	xge_hal_device_t *hldev;
479a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
480a23fd118Syl 	int i;
481a23fd118Syl 
482da14cebeSEric Cheng 	if (bf_pool->live)
483da14cebeSEric Cheng 		return (B_TRUE);
484da14cebeSEric Cheng 
485ba2e4443Sseb 	hldev = (xge_hal_device_t *)lldev->devh;
486a23fd118Syl 
487da14cebeSEric Cheng 	bf_pool->total = 0;
488da14cebeSEric Cheng 	bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
489da14cebeSEric Cheng 	bf_pool->head = NULL;
490da14cebeSEric Cheng 	bf_pool->free = 0;
491da14cebeSEric Cheng 	bf_pool->post = 0;
492da14cebeSEric Cheng 	bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
493da14cebeSEric Cheng 	bf_pool->recycle = 0;
494da14cebeSEric Cheng 	bf_pool->recycle_head = NULL;
495da14cebeSEric Cheng 	bf_pool->recycle_tail = NULL;
496da14cebeSEric Cheng 	bf_pool->live = B_TRUE;
497da14cebeSEric Cheng 
498da14cebeSEric Cheng 	mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
4997eced415Sxw 	    DDI_INTR_PRI(hldev->irqh));
500da14cebeSEric Cheng 	mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
5017eced415Sxw 	    DDI_INTR_PRI(hldev->irqh));
502a23fd118Syl 
503a23fd118Syl 	/*
504a23fd118Syl 	 * Allocate buffers one by one. If failed, destroy whole pool by
505a23fd118Syl 	 * call to xgell_rx_destroy_buffer_pool().
506a23fd118Syl 	 */
5077eced415Sxw 
508a23fd118Syl 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
5097eced415Sxw 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
5107eced415Sxw 			(void) xgell_rx_destroy_buffer_pool(ring);
511da14cebeSEric Cheng 			return (B_FALSE);
512a23fd118Syl 		}
513a23fd118Syl 
514da14cebeSEric Cheng 		rx_buffer->next = bf_pool->head;
515da14cebeSEric Cheng 		bf_pool->head = rx_buffer;
516a23fd118Syl 
517da14cebeSEric Cheng 		bf_pool->total++;
518da14cebeSEric Cheng 		bf_pool->free++;
519a23fd118Syl 	}
520a23fd118Syl 
521da14cebeSEric Cheng 	return (B_TRUE);
522a23fd118Syl }
523a23fd118Syl 
524a23fd118Syl /*
525a23fd118Syl  * xgell_rx_dtr_replenish
526a23fd118Syl  *
527a23fd118Syl  * Replenish descriptor with rx_buffer in RX buffer pool.
528a23fd118Syl  * The dtr should be post right away.
529a23fd118Syl  */
530a23fd118Syl xge_hal_status_e
xgell_rx_dtr_replenish(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,int index,void * userdata,xge_hal_channel_reopen_e reopen)531a23fd118Syl xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
532a23fd118Syl     void *userdata, xge_hal_channel_reopen_e reopen)
533a23fd118Syl {
534da14cebeSEric Cheng 	xgell_rx_ring_t *ring = userdata;
535da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
536a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
537a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
538a23fd118Syl 
539da14cebeSEric Cheng 	mutex_enter(&bf_pool->pool_lock);
540da14cebeSEric Cheng 	if (bf_pool->head == NULL) {
541da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
542a23fd118Syl 		return (XGE_HAL_FAIL);
543a23fd118Syl 	}
544da14cebeSEric Cheng 	rx_buffer = bf_pool->head;
545a23fd118Syl 	xge_assert(rx_buffer);
546a23fd118Syl 	xge_assert(rx_buffer->dma_addr);
547a23fd118Syl 
548da14cebeSEric Cheng 	bf_pool->head = rx_buffer->next;
549da14cebeSEric Cheng 	bf_pool->free--;
550da14cebeSEric Cheng 	mutex_exit(&bf_pool->pool_lock);
551da14cebeSEric Cheng 
5527eced415Sxw 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
553