xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 1a5e258f)
1a23fd118Syl /*
2a23fd118Syl  * CDDL HEADER START
3a23fd118Syl  *
4a23fd118Syl  * The contents of this file are subject to the terms of the
5a23fd118Syl  * Common Development and Distribution License (the "License").
6a23fd118Syl  * You may not use this file except in compliance with the License.
7a23fd118Syl  *
8a23fd118Syl  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9a23fd118Syl  * or http://www.opensolaris.org/os/licensing.
10a23fd118Syl  * See the License for the specific language governing permissions
11a23fd118Syl  * and limitations under the License.
12a23fd118Syl  *
13a23fd118Syl  * When distributing Covered Code, include this CDDL HEADER in each
14a23fd118Syl  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15a23fd118Syl  * If applicable, add the following below this CDDL HEADER, with the
16a23fd118Syl  * fields enclosed by brackets "[]" replaced with your own identifying
17a23fd118Syl  * information: Portions Copyright [yyyy] [name of copyright owner]
18a23fd118Syl  *
19a23fd118Syl  * CDDL HEADER END
20a23fd118Syl  */
21a23fd118Syl 
22a23fd118Syl /*
230dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24a23fd118Syl  * Use is subject to license terms.
25a23fd118Syl  */
26a23fd118Syl 
27a23fd118Syl /*
28f30c160eSRoamer  *  Copyright (c) 2002-2009 Neterion, Inc.
29a23fd118Syl  *  All right Reserved.
30a23fd118Syl  *
31a23fd118Syl  *  FileName :    xgell.c
32a23fd118Syl  *
33a23fd118Syl  *  Description:  Xge Link Layer data path implementation
34a23fd118Syl  *
35a23fd118Syl  */
36a23fd118Syl 
37a23fd118Syl #include "xgell.h"
38a23fd118Syl 
39a23fd118Syl #include <netinet/ip.h>
40a23fd118Syl #include <netinet/tcp.h>
418347601bSyl #include <netinet/udp.h>
42a23fd118Syl 
43ba2e4443Sseb #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
44a23fd118Syl     sizeof (struct ether_vlan_header))
45a23fd118Syl 
46a23fd118Syl #define	HEADROOM		2	/* for DIX-only packets */
47a23fd118Syl 
header_free_func(void * arg)48a23fd118Syl void header_free_func(void *arg) { }
49a23fd118Syl frtn_t header_frtn = {header_free_func, NULL};
50a23fd118Syl 
51a23fd118Syl /* DMA attributes used for Tx side */
52a23fd118Syl static struct ddi_dma_attr tx_dma_attr = {
53a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
54a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
55a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
567eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
577eced415Sxw #if defined(__sparc)
587eced415Sxw 	0x2000,				/* dma_attr_align */
597eced415Sxw #else
607eced415Sxw 	0x1000,				/* dma_attr_align */
617eced415Sxw #endif
627eced415Sxw 	0xFC00FC,			/* dma_attr_burstsizes */
637eced415Sxw 	0x1,				/* dma_attr_minxfer */
647eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
65a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
668347601bSyl 	18,				/* dma_attr_sgllen */
677eced415Sxw 	(unsigned int)1,		/* dma_attr_granular */
68a23fd118Syl 	0				/* dma_attr_flags */
69a23fd118Syl };
70a23fd118Syl 
71a23fd118Syl /*
72a23fd118Syl  * DMA attributes used when using ddi_dma_mem_alloc to
73a23fd118Syl  * allocat HAL descriptors and Rx buffers during replenish
74a23fd118Syl  */
75a23fd118Syl static struct ddi_dma_attr hal_dma_attr = {
76a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
77a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
78a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
797eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
807eced415Sxw #if defined(__sparc)
817eced415Sxw 	0x2000,				/* dma_attr_align */
827eced415Sxw #else
837eced415Sxw 	0x1000,				/* dma_attr_align */
847eced415Sxw #endif
857eced415Sxw 	0xFC00FC,			/* dma_attr_burstsizes */
867eced415Sxw 	0x1,				/* dma_attr_minxfer */
877eced415Sxw 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
88a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
89a23fd118Syl 	1,				/* dma_attr_sgllen */
907eced415Sxw 	(unsigned int)1,		/* dma_attr_sgllen */
917eced415Sxw 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
92a23fd118Syl };
93a23fd118Syl 
94a23fd118Syl struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
95a23fd118Syl 
96ba2e4443Sseb static int		xgell_m_stat(void *, uint_t, uint64_t *);
97ba2e4443Sseb static int		xgell_m_start(void *);
98ba2e4443Sseb static void		xgell_m_stop(void *);
99ba2e4443Sseb static int		xgell_m_promisc(void *, boolean_t);
100ba2e4443Sseb static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
101ba2e4443Sseb static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
102ba2e4443Sseb static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
103ba2e4443Sseb 
104ba2e4443Sseb #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
105ba2e4443Sseb 
106ba2e4443Sseb static mac_callbacks_t xgell_m_callbacks = {
107ba2e4443Sseb 	XGELL_M_CALLBACK_FLAGS,
108ba2e4443Sseb 	xgell_m_stat,
109ba2e4443Sseb 	xgell_m_start,
110ba2e4443Sseb 	xgell_m_stop,
111ba2e4443Sseb 	xgell_m_promisc,
112ba2e4443Sseb 	xgell_m_multicst,
113da14cebeSEric Cheng 	NULL,
114ba2e4443Sseb 	NULL,
1150dc2366fSVenugopal Iyer 	NULL,
116ba2e4443Sseb 	xgell_m_ioctl,
117ba2e4443Sseb 	xgell_m_getcapab
118ba2e4443Sseb };
119ba2e4443Sseb 
120a23fd118Syl /*
121a23fd118Syl  * xge_device_poll
122a23fd118Syl  *
123da14cebeSEric Cheng  * Timeout should call me every 1s. xge_callback_event_queued should call me
124a23fd118Syl  * when HAL hope event was rescheduled.
125a23fd118Syl  */
126a23fd118Syl /*ARGSUSED*/
127a23fd118Syl void
xge_device_poll(void * data)128a23fd118Syl xge_device_poll(void *data)
129a23fd118Syl {
130a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
131a23fd118Syl 
132a23fd118Syl 	mutex_enter(&lldev->genlock);
133a23fd118Syl 	if (lldev->is_initialized) {
134a23fd118Syl 		xge_hal_device_poll(data);
135a23fd118Syl 		lldev->timeout_id = timeout(xge_device_poll, data,
136a23fd118Syl 		    XGE_DEV_POLL_TICKS);
1378347601bSyl 	} else if (lldev->in_reset == 1) {
1388347601bSyl 		lldev->timeout_id = timeout(xge_device_poll, data,
1398347601bSyl 		    XGE_DEV_POLL_TICKS);
1408347601bSyl 	} else {
1418347601bSyl 		lldev->timeout_id = 0;
142a23fd118Syl 	}
143a23fd118Syl 	mutex_exit(&lldev->genlock);
144a23fd118Syl }
145a23fd118Syl 
146a23fd118Syl /*
147a23fd118Syl  * xge_device_poll_now
148a23fd118Syl  *
149a23fd118Syl  * Will call xge_device_poll() immediately
150a23fd118Syl  */
151a23fd118Syl void
xge_device_poll_now(void * data)152a23fd118Syl xge_device_poll_now(void *data)
153a23fd118Syl {
154a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
155a23fd118Syl 
156a23fd118Syl 	mutex_enter(&lldev->genlock);
1578347601bSyl 	if (lldev->is_initialized) {
1588347601bSyl 		xge_hal_device_poll(data);
1598347601bSyl 	}
160a23fd118Syl 	mutex_exit(&lldev->genlock);
161a23fd118Syl }
162a23fd118Syl 
163a23fd118Syl /*
164a23fd118Syl  * xgell_callback_link_up
165a23fd118Syl  *
166a23fd118Syl  * This function called by HAL to notify HW link up state change.
167a23fd118Syl  */
168a23fd118Syl void
xgell_callback_link_up(void * userdata)169a23fd118Syl xgell_callback_link_up(void *userdata)
170a23fd118Syl {
171a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
172a23fd118Syl 
173ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_UP);
174a23fd118Syl }
175a23fd118Syl 
176a23fd118Syl /*
177a23fd118Syl  * xgell_callback_link_down
178a23fd118Syl  *
179a23fd118Syl  * This function called by HAL to notify HW link down state change.
180a23fd118Syl  */
181a23fd118Syl void
xgell_callback_link_down(void * userdata)182a23fd118Syl xgell_callback_link_down(void *userdata)
183a23fd118Syl {
184a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
185a23fd118Syl 
186ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
187a23fd118Syl }
188a23fd118Syl 
189a23fd118Syl /*
190a23fd118Syl  * xgell_rx_buffer_replenish_all
191a23fd118Syl  *
192a23fd118Syl  * To replenish all freed dtr(s) with buffers in free pool. It's called by
193da14cebeSEric Cheng  * xgell_rx_buffer_recycle() or xgell_rx_1b_callback().
194a23fd118Syl  * Must be called with pool_lock held.
195a23fd118Syl  */
196a23fd118Syl static void
xgell_rx_buffer_replenish_all(xgell_rx_ring_t * ring)197da14cebeSEric Cheng xgell_rx_buffer_replenish_all(xgell_rx_ring_t *ring)
198a23fd118Syl {
199da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
200a23fd118Syl 	xge_hal_dtr_h dtr;
201a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
202a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
203a23fd118Syl 
204da14cebeSEric Cheng 	xge_assert(mutex_owned(&bf_pool->pool_lock));
205da14cebeSEric Cheng 
206da14cebeSEric Cheng 	while ((bf_pool->free > 0) &&
207da14cebeSEric Cheng 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) == XGE_HAL_OK)) {
208da14cebeSEric Cheng 		xge_assert(bf_pool->head);
2098347601bSyl 
210da14cebeSEric Cheng 		rx_buffer = bf_pool->head;
211da14cebeSEric Cheng 
212da14cebeSEric Cheng 		bf_pool->head = rx_buffer->next;
213da14cebeSEric Cheng 		bf_pool->free--;
214a23fd118Syl 
215a23fd118Syl 		xge_assert(rx_buffer->dma_addr);
216a23fd118Syl 
217a23fd118Syl 		rxd_priv = (xgell_rxd_priv_t *)
2187eced415Sxw 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
219a23fd118Syl 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
220da14cebeSEric Cheng 		    bf_pool->size);
221a23fd118Syl 
222a23fd118Syl 		rxd_priv->rx_buffer = rx_buffer;
2237eced415Sxw 		xge_hal_ring_dtr_post(ring->channelh, dtr);
224a23fd118Syl 	}
225a23fd118Syl }
226a23fd118Syl 
227a23fd118Syl /*
228a23fd118Syl  * xgell_rx_buffer_release
229a23fd118Syl  *
230a23fd118Syl  * The only thing done here is to put the buffer back to the pool.
2318347601bSyl  * Calling this function need be protected by mutex, bf_pool.pool_lock.
232a23fd118Syl  */
233a23fd118Syl static void
xgell_rx_buffer_release(xgell_rx_buffer_t * rx_buffer)234a23fd118Syl xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
235a23fd118Syl {
236da14cebeSEric Cheng 	xgell_rx_ring_t *ring = rx_buffer->ring;
237da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
238a23fd118Syl 
239da14cebeSEric Cheng 	xge_assert(mutex_owned(&bf_pool->pool_lock));
240a23fd118Syl 
241a23fd118Syl 	/* Put the buffer back to pool */
242da14cebeSEric Cheng 	rx_buffer->next = bf_pool->head;
243da14cebeSEric Cheng 	bf_pool->head = rx_buffer;
244a23fd118Syl 
245da14cebeSEric Cheng 	bf_pool->free++;
246a23fd118Syl }
247a23fd118Syl 
248a23fd118Syl /*
249a23fd118Syl  * xgell_rx_buffer_recycle
250a23fd118Syl  *
251a23fd118Syl  * Called by desballoc() to "free" the resource.
252a23fd118Syl  * We will try to replenish all descripters.
253a23fd118Syl  */
2547eced415Sxw 
2557eced415Sxw /*
2567eced415Sxw  * Previously there were much lock contention between xgell_rx_1b_compl() and
2577eced415Sxw  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
2587eced415Sxw  * effect on rx performance. A separate recycle list is introduced to overcome
2597eced415Sxw  * this. The recycle list is used to record the rx buffer that has been recycled
2607eced415Sxw  * and these buffers will be retuned back to the free list in bulk instead of
2617eced415Sxw  * one-by-one.
2627eced415Sxw  */
2637eced415Sxw 
264a23fd118Syl static void
xgell_rx_buffer_recycle(char * arg)265a23fd118Syl xgell_rx_buffer_recycle(char *arg)
266a23fd118Syl {
267a23fd118Syl 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
268da14cebeSEric Cheng 	xgell_rx_ring_t *ring = rx_buffer->ring;
2697eced415Sxw 	xgelldev_t *lldev = ring->lldev;
2707eced415Sxw 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
271a23fd118Syl 
2727eced415Sxw 	mutex_enter(&bf_pool->recycle_lock);
2738347601bSyl 
2747eced415Sxw 	rx_buffer->next = bf_pool->recycle_head;
2757eced415Sxw 	bf_pool->recycle_head = rx_buffer;
2767eced415Sxw 	if (bf_pool->recycle_tail == NULL)
2777eced415Sxw 		bf_pool->recycle_tail = rx_buffer;
2787eced415Sxw 	bf_pool->recycle++;
279a23fd118Syl 
280a23fd118Syl 	/*
281a23fd118Syl 	 * Before finding a good way to set this hiwat, just always call to
282a23fd118Syl 	 * replenish_all. *TODO*
283a23fd118Syl 	 */
284da14cebeSEric Cheng 	if ((lldev->is_initialized != 0) && (ring->live) &&
2857eced415Sxw 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
286da14cebeSEric Cheng 		mutex_enter(&bf_pool->pool_lock);
287da14cebeSEric Cheng 		bf_pool->recycle_tail->next = bf_pool->head;
288da14cebeSEric Cheng 		bf_pool->head = bf_pool->recycle_head;
289da14cebeSEric Cheng 		bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
290da14cebeSEric Cheng 		bf_pool->post -= bf_pool->recycle;
291da14cebeSEric Cheng 		bf_pool->free += bf_pool->recycle;
292da14cebeSEric Cheng 		bf_pool->recycle = 0;
293da14cebeSEric Cheng 		xgell_rx_buffer_replenish_all(ring);
294da14cebeSEric Cheng 		mutex_exit(&bf_pool->pool_lock);
295a23fd118Syl 	}
296a23fd118Syl 
2977eced415Sxw 	mutex_exit(&bf_pool->recycle_lock);
298a23fd118Syl }
299a23fd118Syl 
300a23fd118Syl /*
301a23fd118Syl  * xgell_rx_buffer_alloc
302a23fd118Syl  *
303a23fd118Syl  * Allocate one rx buffer and return with the pointer to the buffer.
304a23fd118Syl  * Return NULL if failed.
305a23fd118Syl  */
306a23fd118Syl static xgell_rx_buffer_t *
xgell_rx_buffer_alloc(xgell_rx_ring_t * ring)307da14cebeSEric Cheng xgell_rx_buffer_alloc(xgell_rx_ring_t *ring)
308a23fd118Syl {
309da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
310da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
311a23fd118Syl 	xge_hal_device_t *hldev;
312a23fd118Syl 	void *vaddr;
313a23fd118Syl 	ddi_dma_handle_t dma_handle;
314a23fd118Syl 	ddi_acc_handle_t dma_acch;
315a23fd118Syl 	dma_addr_t dma_addr;
316a23fd118Syl 	uint_t ncookies;
317a23fd118Syl 	ddi_dma_cookie_t dma_cookie;
318a23fd118Syl 	size_t real_size;
319a23fd118Syl 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
320a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
321a23fd118Syl 
3228347601bSyl 	hldev = (xge_hal_device_t *)lldev->devh;
323a23fd118Syl 
324a23fd118Syl 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
325a23fd118Syl 	    0, &dma_handle) != DDI_SUCCESS) {
326a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
327a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
328a23fd118Syl 		goto handle_failed;
329a23fd118Syl 	}
330a23fd118Syl 
331a23fd118Syl 	/* reserve some space at the end of the buffer for recycling */
332da14cebeSEric Cheng 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + bf_pool->size +
333a23fd118Syl 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
334a23fd118Syl 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
335a23fd118Syl 	    DDI_SUCCESS) {
336a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
337a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
338a23fd118Syl 		goto mem_failed;
339a23fd118Syl 	}
340a23fd118Syl 
341da14cebeSEric Cheng 	if (HEADROOM + bf_pool->size + sizeof (xgell_rx_buffer_t) >
342a23fd118Syl 	    real_size) {
343a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
344a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
345a23fd118Syl 		goto bind_failed;
346a23fd118Syl 	}
347a23fd118Syl 
348a23fd118Syl 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
349da14cebeSEric Cheng 	    bf_pool->size, DDI_DMA_READ | DDI_DMA_STREAMING,
350a23fd118Syl 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
351a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
352a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
353a23fd118Syl 		goto bind_failed;
354a23fd118Syl 	}
355a23fd118Syl 
356da14cebeSEric Cheng 	if (ncookies != 1 || dma_cookie.dmac_size < bf_pool->size) {
357a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
358a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
359a23fd118Syl 		goto check_failed;
360a23fd118Syl 	}
361a23fd118Syl 
362a23fd118Syl 	dma_addr = dma_cookie.dmac_laddress;
363a23fd118Syl 
364a23fd118Syl 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
365a23fd118Syl 	    sizeof (xgell_rx_buffer_t));
366a23fd118Syl 	rx_buffer->next = NULL;
367a23fd118Syl 	rx_buffer->vaddr = vaddr;
368a23fd118Syl 	rx_buffer->dma_addr = dma_addr;
369a23fd118Syl 	rx_buffer->dma_handle = dma_handle;
370a23fd118Syl 	rx_buffer->dma_acch = dma_acch;
3717eced415Sxw 	rx_buffer->ring = ring;
372a23fd118Syl 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
373a23fd118Syl 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
374a23fd118Syl 
375a23fd118Syl 	return (rx_buffer);
376a23fd118Syl 
377a23fd118Syl check_failed:
378a23fd118Syl 	(void) ddi_dma_unbind_handle(dma_handle);
379a23fd118Syl bind_failed:
380a23fd118Syl 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
381a23fd118Syl 	ddi_dma_mem_free(&dma_acch);
382a23fd118Syl mem_failed:
383a23fd118Syl 	ddi_dma_free_handle(&dma_handle);
384a23fd118Syl handle_failed:
385a23fd118Syl 
386a23fd118Syl 	return (NULL);
387a23fd118Syl }
388a23fd118Syl 
389a23fd118Syl /*
390a23fd118Syl  * xgell_rx_destroy_buffer_pool
391a23fd118Syl  *
392a23fd118Syl  * Destroy buffer pool. If there is still any buffer hold by upper layer,
393a23fd118Syl  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
394a23fd118Syl  */
395da14cebeSEric Cheng static boolean_t
xgell_rx_destroy_buffer_pool(xgell_rx_ring_t * ring)396da14cebeSEric Cheng xgell_rx_destroy_buffer_pool(xgell_rx_ring_t *ring)
397a23fd118Syl {
398da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
399da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
400a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
401a23fd118Syl 	ddi_dma_handle_t  dma_handle;
402a23fd118Syl 	ddi_acc_handle_t  dma_acch;
403a23fd118Syl 	int i;
404a23fd118Syl 
405da14cebeSEric Cheng 	/*
406da14cebeSEric Cheng 	 * If the pool has been destroied, just return B_TRUE
407da14cebeSEric Cheng 	 */
408da14cebeSEric Cheng 	if (!bf_pool->live)
409da14cebeSEric Cheng 		return (B_TRUE);
410da14cebeSEric Cheng 
411da14cebeSEric Cheng 	mutex_enter(&bf_pool->recycle_lock);
412da14cebeSEric Cheng 	if (bf_pool->recycle > 0) {
413da14cebeSEric Cheng 		mutex_enter(&bf_pool->pool_lock);
414da14cebeSEric Cheng 		bf_pool->recycle_tail->next = bf_pool->head;
415da14cebeSEric Cheng 		bf_pool->head = bf_pool->recycle_head;
416da14cebeSEric Cheng 		bf_pool->recycle_tail = bf_pool->recycle_head = NULL;
417da14cebeSEric Cheng 		bf_pool->post -= bf_pool->recycle;
418da14cebeSEric Cheng 		bf_pool->free += bf_pool->recycle;
419da14cebeSEric Cheng 		bf_pool->recycle = 0;
420da14cebeSEric Cheng 		mutex_exit(&bf_pool->pool_lock);
4217eced415Sxw 	}
422da14cebeSEric Cheng 	mutex_exit(&bf_pool->recycle_lock);
4237eced415Sxw 
424a23fd118Syl 	/*
425a23fd118Syl 	 * If there is any posted buffer, the driver should reject to be
426a23fd118Syl 	 * detached. Need notice upper layer to release them.
427a23fd118Syl 	 */
428da14cebeSEric Cheng 	if (bf_pool->post != 0) {
429a23fd118Syl 		xge_debug_ll(XGE_ERR,
430a23fd118Syl 		    "%s%d has some buffers not be recycled, try later!",
431a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
432da14cebeSEric Cheng 		return (B_FALSE);
433a23fd118Syl 	}
434a23fd118Syl 
435a23fd118Syl 	/*
436da14cebeSEric Cheng 	 * Release buffers one by one.
437a23fd118Syl 	 */
438da14cebeSEric Cheng 	for (i = bf_pool->total; i > 0; i--) {
439da14cebeSEric Cheng 		rx_buffer = bf_pool->head;
440a23fd118Syl 		xge_assert(rx_buffer != NULL);
441a23fd118Syl 
442da14cebeSEric Cheng 		bf_pool->head = rx_buffer->next;
443a23fd118Syl 
444a23fd118Syl 		dma_handle = rx_buffer->dma_handle;
445a23fd118Syl 		dma_acch = rx_buffer->dma_acch;
446a23fd118Syl 
447a23fd118Syl 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
448da14cebeSEric Cheng 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
449da14cebeSEric Cheng 			bf_pool->head = rx_buffer;
450da14cebeSEric Cheng 			return (B_FALSE);
451a23fd118Syl 		}
452a23fd118Syl 		ddi_dma_mem_free(&dma_acch);
453a23fd118Syl 		ddi_dma_free_handle(&dma_handle);
454a23fd118Syl 
455da14cebeSEric Cheng 		bf_pool->total--;
456da14cebeSEric Cheng 		bf_pool->free--;
457a23fd118Syl 	}
458a23fd118Syl 
459da14cebeSEric Cheng 	xge_assert(!mutex_owned(&bf_pool->pool_lock));
460da14cebeSEric Cheng 
461da14cebeSEric Cheng 	mutex_destroy(&bf_pool->recycle_lock);
462da14cebeSEric Cheng 	mutex_destroy(&bf_pool->pool_lock);
463da14cebeSEric Cheng 	bf_pool->live = B_FALSE;
464da14cebeSEric Cheng 
465da14cebeSEric Cheng 	return (B_TRUE);
466a23fd118Syl }
467a23fd118Syl 
468a23fd118Syl /*
469a23fd118Syl  * xgell_rx_create_buffer_pool
470a23fd118Syl  *
471a23fd118Syl  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
472a23fd118Syl  */
473da14cebeSEric Cheng static boolean_t
xgell_rx_create_buffer_pool(xgell_rx_ring_t * ring)474da14cebeSEric Cheng xgell_rx_create_buffer_pool(xgell_rx_ring_t *ring)
475a23fd118Syl {
476da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
477da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
478a23fd118Syl 	xge_hal_device_t *hldev;
479a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
480a23fd118Syl 	int i;
481a23fd118Syl 
482da14cebeSEric Cheng 	if (bf_pool->live)
483da14cebeSEric Cheng 		return (B_TRUE);
484da14cebeSEric Cheng 
485ba2e4443Sseb 	hldev = (xge_hal_device_t *)lldev->devh;
486a23fd118Syl 
487da14cebeSEric Cheng 	bf_pool->total = 0;
488da14cebeSEric Cheng 	bf_pool->size = XGELL_MAX_FRAME_SIZE(hldev);
489da14cebeSEric Cheng 	bf_pool->head = NULL;
490da14cebeSEric Cheng 	bf_pool->free = 0;
491da14cebeSEric Cheng 	bf_pool->post = 0;
492da14cebeSEric Cheng 	bf_pool->post_hiwat = lldev->config.rx_buffer_post_hiwat;
493da14cebeSEric Cheng 	bf_pool->recycle = 0;
494da14cebeSEric Cheng 	bf_pool->recycle_head = NULL;
495da14cebeSEric Cheng 	bf_pool->recycle_tail = NULL;
496da14cebeSEric Cheng 	bf_pool->live = B_TRUE;
497da14cebeSEric Cheng 
498da14cebeSEric Cheng 	mutex_init(&bf_pool->pool_lock, NULL, MUTEX_DRIVER,
4997eced415Sxw 	    DDI_INTR_PRI(hldev->irqh));
500da14cebeSEric Cheng 	mutex_init(&bf_pool->recycle_lock, NULL, MUTEX_DRIVER,
5017eced415Sxw 	    DDI_INTR_PRI(hldev->irqh));
502a23fd118Syl 
503a23fd118Syl 	/*
504a23fd118Syl 	 * Allocate buffers one by one. If failed, destroy whole pool by
505a23fd118Syl 	 * call to xgell_rx_destroy_buffer_pool().
506a23fd118Syl 	 */
5077eced415Sxw 
508a23fd118Syl 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
5097eced415Sxw 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
5107eced415Sxw 			(void) xgell_rx_destroy_buffer_pool(ring);
511da14cebeSEric Cheng 			return (B_FALSE);
512a23fd118Syl 		}
513a23fd118Syl 
514da14cebeSEric Cheng 		rx_buffer->next = bf_pool->head;
515da14cebeSEric Cheng 		bf_pool->head = rx_buffer;
516a23fd118Syl 
517da14cebeSEric Cheng 		bf_pool->total++;
518da14cebeSEric Cheng 		bf_pool->free++;
519a23fd118Syl 	}
520a23fd118Syl 
521da14cebeSEric Cheng 	return (B_TRUE);
522a23fd118Syl }
523a23fd118Syl 
524a23fd118Syl /*
525a23fd118Syl  * xgell_rx_dtr_replenish
526a23fd118Syl  *
527a23fd118Syl  * Replenish descriptor with rx_buffer in RX buffer pool.
528a23fd118Syl  * The dtr should be post right away.
529a23fd118Syl  */
530a23fd118Syl xge_hal_status_e
xgell_rx_dtr_replenish(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,int index,void * userdata,xge_hal_channel_reopen_e reopen)531a23fd118Syl xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
532a23fd118Syl     void *userdata, xge_hal_channel_reopen_e reopen)
533a23fd118Syl {
534da14cebeSEric Cheng 	xgell_rx_ring_t *ring = userdata;
535da14cebeSEric Cheng 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
536a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
537a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
538a23fd118Syl 
539da14cebeSEric Cheng 	mutex_enter(&bf_pool->pool_lock);
540da14cebeSEric Cheng 	if (bf_pool->head == NULL) {
541da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
542a23fd118Syl 		return (XGE_HAL_FAIL);
543a23fd118Syl 	}
544da14cebeSEric Cheng 	rx_buffer = bf_pool->head;
545a23fd118Syl 	xge_assert(rx_buffer);
546a23fd118Syl 	xge_assert(rx_buffer->dma_addr);
547a23fd118Syl 
548da14cebeSEric Cheng 	bf_pool->head = rx_buffer->next;
549da14cebeSEric Cheng 	bf_pool->free--;
550da14cebeSEric Cheng 	mutex_exit(&bf_pool->pool_lock);
551da14cebeSEric Cheng 
5527eced415Sxw 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
553da14cebeSEric Cheng 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, bf_pool->size);
554a23fd118Syl 
555a23fd118Syl 	rxd_priv->rx_buffer = rx_buffer;
556a23fd118Syl 
557a23fd118Syl 	return (XGE_HAL_OK);
558a23fd118Syl }
559a23fd118Syl 
560a23fd118Syl /*
561a23fd118Syl  * xgell_get_ip_offset
562a23fd118Syl  *
563a23fd118Syl  * Calculate the offset to IP header.
564a23fd118Syl  */
565a23fd118Syl static inline int
xgell_get_ip_offset(xge_hal_dtr_info_t * ext_info)566a23fd118Syl xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
567a23fd118Syl {
568a23fd118Syl 	int ip_off;
569a23fd118Syl 
570a23fd118Syl 	/* get IP-header offset */
571a23fd118Syl 	switch (ext_info->frame) {
572a23fd118Syl 	case XGE_HAL_FRAME_TYPE_DIX:
573a23fd118Syl 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
574a23fd118Syl 		break;
575a23fd118Syl 	case XGE_HAL_FRAME_TYPE_IPX:
576a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
577a23fd118Syl 		    XGE_HAL_HEADER_802_2_SIZE +
578a23fd118Syl 		    XGE_HAL_HEADER_SNAP_SIZE);
579a23fd118Syl 		break;
580a23fd118Syl 	case XGE_HAL_FRAME_TYPE_LLC:
581a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
582a23fd118Syl 		    XGE_HAL_HEADER_802_2_SIZE);
583a23fd118Syl 		break;
584a23fd118Syl 	case XGE_HAL_FRAME_TYPE_SNAP:
585a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
586a23fd118Syl 		    XGE_HAL_HEADER_SNAP_SIZE);
587a23fd118Syl 		break;
588a23fd118Syl 	default:
589a23fd118Syl 		ip_off = 0;
590a23fd118Syl 		break;
591a23fd118Syl 	}
592a23fd118Syl 
593a23fd118Syl 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
594a23fd118Syl 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
595a23fd118Syl 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
596a23fd118Syl 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
597a23fd118Syl 	}
598a23fd118Syl 
599a23fd118Syl 	return (ip_off);
600a23fd118Syl }
601a23fd118Syl 
602a23fd118Syl /*
603a23fd118Syl  * xgell_rx_hcksum_assoc
604a23fd118Syl  *
605a23fd118Syl  * Judge the packet type and then call to hcksum_assoc() to associate
606a23fd118Syl  * h/w checksum information.
607a23fd118Syl  */
608a23fd118Syl static inline void
xgell_rx_hcksum_assoc(mblk_t * mp,char * vaddr,int pkt_length,xge_hal_dtr_info_t * ext_info)609a23fd118Syl xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
610a23fd118Syl     xge_hal_dtr_info_t *ext_info)
611a23fd118Syl {
612a23fd118Syl 	int cksum_flags = 0;
613a23fd118Syl 
614a23fd118Syl 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
615a23fd118Syl 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
616a23fd118Syl 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
6170dc2366fSVenugopal Iyer 				cksum_flags |= HCK_IPV4_HDRCKSUM_OK;
618a23fd118Syl 			}
619a23fd118Syl 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
620a23fd118Syl 				cksum_flags |= HCK_FULLCKSUM_OK;
621a23fd118Syl 			}
6220dc2366fSVenugopal Iyer 			if (cksum_flags != 0) {
6230dc2366fSVenugopal Iyer 				mac_hcksum_set(mp, 0, 0, 0, 0, cksum_flags);
624a23fd118Syl 			}
625a23fd118Syl 		}
626a23fd118Syl 	} else if (ext_info->proto &
627a23fd118Syl 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
628a23fd118Syl 		/*
629a23fd118Syl 		 * Just pass the partial cksum up to IP.
630a23fd118Syl 		 */
6318347601bSyl 		int ip_off = xgell_get_ip_offset(ext_info);
632a23fd118Syl 		int start, end = pkt_length - ip_off;
633a23fd118Syl 
634a23fd118Syl 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
635a23fd118Syl 			struct ip *ip =
636a23fd118Syl 			    (struct ip *)(vaddr + ip_off);
637f30c160eSRoamer 			start = ip->ip_hl * 4;
638a23fd118Syl 		} else {
639f30c160eSRoamer 			start = 40;
640a23fd118Syl 		}
641a23fd118Syl 		cksum_flags |= HCK_PARTIALCKSUM;
6420dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, start, 0, end,
6430dc2366fSVenugopal Iyer 		    ntohs(ext_info->l4_cksum), cksum_flags);
644a23fd118Syl 	}
645a23fd118Syl }
646a23fd118Syl 
647a23fd118Syl /*
648a23fd118Syl  * xgell_rx_1b_msg_alloc
649a23fd118Syl  *
650a23fd118Syl  * Allocate message header for data buffer, and decide if copy the packet to
651a23fd118Syl  * new data buffer to release big rx_buffer to save memory.
652a23fd118Syl  *
6538347601bSyl  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
654a23fd118Syl  * new message and copy the payload in.
655a23fd118Syl  */
656a23fd118Syl static mblk_t *
xgell_rx_1b_msg_alloc(xgell_rx_ring_t * ring,xgell_rx_buffer_t * rx_buffer,int pkt_length,xge_hal_dtr_info_t * ext_info,boolean_t * copyit)657da14cebeSEric Cheng xgell_rx_1b_msg_alloc(xgell_rx_ring_t *ring, xgell_rx_buffer_t *rx_buffer,
6588347601bSyl     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
659a23fd118Syl {
660da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
661a23fd118Syl 	mblk_t *mp;
662a23fd118Syl 	char *vaddr;
663a23fd118Syl 
664a23fd118Syl 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
665a23fd118Syl 	/*
666a23fd118Syl 	 * Copy packet into new allocated message buffer, if pkt_length
6678347601bSyl 	 * is less than XGELL_RX_DMA_LOWAT
668a23fd118Syl 	 */
6698347601bSyl 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
6707eced415Sxw 		if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
671a23fd118Syl 			return (NULL);
672a23fd118Syl 		}
6737eced415Sxw 		mp->b_rptr += HEADROOM;
674a23fd118Syl 		bcopy(vaddr, mp->b_rptr, pkt_length);
675a23fd118Syl 		mp->b_wptr = mp->b_rptr + pkt_length;
676a23fd118Syl 		*copyit = B_TRUE;
677a23fd118Syl 		return (mp);
678a23fd118Syl 	}
679a23fd118Syl 
680a23fd118Syl 	/*
681a23fd118Syl 	 * Just allocate mblk for current data buffer
682a23fd118Syl 	 */
6833c785c4cSyl 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
684a23fd118Syl 	    &rx_buffer->frtn)) == NULL) {
685a23fd118Syl 		/* Drop it */
686a23fd118Syl 		return (NULL);
687a23fd118Syl 	}
688a23fd118Syl 	/*
6893c785c4cSyl 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
690a23fd118Syl 	 */
6913c785c4cSyl 	mp->b_wptr += pkt_length;
692a23fd118Syl 
693a23fd118Syl 	return (mp);
694a23fd118Syl }
695a23fd118Syl 
696a23fd118Syl /*
697da14cebeSEric Cheng  * xgell_rx_1b_callback
698a23fd118Syl  *
699a23fd118Syl  * If the interrupt is because of a received frame or if the receive ring
700a23fd118Syl  * contains fresh as yet un-processed frames, this function is called.
701a23fd118Syl  */
702a23fd118Syl static xge_hal_status_e
xgell_rx_1b_callback(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)703da14cebeSEric Cheng xgell_rx_1b_callback(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
704a23fd118Syl     void *userdata)
705a23fd118Syl {
706da14cebeSEric Cheng 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)userdata;
7077eced415Sxw 	xgelldev_t *lldev = ring->lldev;
708a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
709a23fd118Syl 	mblk_t *mp_head = NULL;
710a23fd118Syl 	mblk_t *mp_end  = NULL;
7118347601bSyl 	int pkt_burst = 0;
7128347601bSyl 
713da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "xgell_rx_1b_callback on ring %d", ring->index);
714a23fd118Syl 
715da14cebeSEric Cheng 	mutex_enter(&ring->bf_pool.pool_lock);
716a23fd118Syl 	do {
717a23fd118Syl 		int pkt_length;
718a23fd118Syl 		dma_addr_t dma_data;
719a23fd118Syl 		mblk_t *mp;
720a23fd118Syl 		boolean_t copyit = B_FALSE;
721a23fd118Syl 
722a23fd118Syl 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
723a23fd118Syl 		    xge_hal_ring_dtr_private(channelh, dtr));
724a23fd118Syl 		xge_hal_dtr_info_t ext_info;
725a23fd118Syl 
726a23fd118Syl 		rx_buffer = rxd_priv->rx_buffer;
727a23fd118Syl 
728a23fd118Syl 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
729a23fd118Syl 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
730a23fd118Syl 
731a23fd118Syl 		xge_assert(dma_data == rx_buffer->dma_addr);
732a23fd118Syl 
733a23fd118Syl 		if (t_code != 0) {
734a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
735a23fd118Syl 			    " completed due to error t_code %01x", XGELL_IFNAME,
736a23fd118Syl 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
737a23fd118Syl 
738a23fd118Syl 			(void) xge_hal_device_handle_tcode(channelh, dtr,
739a23fd118Syl 			    t_code);
740a23fd118Syl 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
741a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
742a23fd118Syl 			continue;
743a23fd118Syl 		}
744a23fd118Syl 
745a23fd118Syl 		/*
746a23fd118Syl 		 * Sync the DMA memory
747a23fd118Syl 		 */
7488347601bSyl 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
7498347601bSyl 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
750a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
751a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
752a23fd118Syl 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
753a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
754a23fd118Syl 			continue;
755a23fd118Syl 		}
756a23fd118Syl 
757a23fd118Syl 		/*
758a23fd118Syl 		 * Allocate message for the packet.
759a23fd118Syl 		 */
7607eced415Sxw 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
761a23fd118Syl 			copyit = B_TRUE;
762a23fd118Syl 		} else {
763a23fd118Syl 			copyit = B_FALSE;
764a23fd118Syl 		}
765a23fd118Syl 
766da14cebeSEric Cheng 		mp = xgell_rx_1b_msg_alloc(ring, rx_buffer, pkt_length,
7678347601bSyl 		    &ext_info, &copyit);
768a23fd118Syl 
769a23fd118Syl 		xge_hal_ring_dtr_free(channelh, dtr);
770a23fd118Syl 
771a23fd118Syl 		/*
772a23fd118Syl 		 * Release the buffer and recycle it later
773a23fd118Syl 		 */
774a23fd118Syl 		if ((mp == NULL) || copyit) {
775a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
776a23fd118Syl 		} else {
777a23fd118Syl 			/*
778a23fd118Syl 			 * Count it since the buffer should be loaned up.
779a23fd118Syl 			 */
7807eced415Sxw 			ring->bf_pool.post++;
781a23fd118Syl 		}
782a23fd118Syl 		if (mp == NULL) {
783a23fd118Syl 			xge_debug_ll(XGE_ERR,
7848347601bSyl 			    "%s%d: rx: can not allocate mp mblk",
7858347601bSyl 			    XGELL_IFNAME, lldev->instance);
786a23fd118Syl 			continue;
787a23fd118Syl 		}
788a23fd118Syl 
789a23fd118Syl 		/*
7908347601bSyl 		 * Associate cksum_flags per packet type and h/w
7918347601bSyl 		 * cksum flags.
792a23fd118Syl 		 */
793da14cebeSEric Cheng 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr + HEADROOM,
794da14cebeSEric Cheng 		    pkt_length, &ext_info);
795da14cebeSEric Cheng 
7960dc2366fSVenugopal Iyer 		ring->rx_pkts++;
7970dc2366fSVenugopal Iyer 		ring->rx_bytes += pkt_length;
798a23fd118Syl 
799a23fd118Syl 		if (mp_head == NULL) {
800a23fd118Syl 			mp_head = mp;
801a23fd118Syl 			mp_end = mp;
802a23fd118Syl 		} else {
803a23fd118Syl 			mp_end->b_next = mp;
804a23fd118Syl 			mp_end = mp;
805a23fd118Syl 		}
806a23fd118Syl 
807da14cebeSEric Cheng 		/*
808da14cebeSEric Cheng 		 * Inlined implemented polling function.
809da14cebeSEric Cheng 		 */
810da14cebeSEric Cheng 		if ((ring->poll_mp == NULL) && (ring->poll_bytes > 0)) {
811da14cebeSEric Cheng 			ring->poll_mp = mp_head;
812da14cebeSEric Cheng 		}
813da14cebeSEric Cheng 		if (ring->poll_mp != NULL) {
814da14cebeSEric Cheng 			if ((ring->poll_bytes -= pkt_length) <= 0) {
815da14cebeSEric Cheng 				/* have polled enough packets. */
816da14cebeSEric Cheng 				break;
817da14cebeSEric Cheng 			} else {
818da14cebeSEric Cheng 				/* continue polling packets. */
819da14cebeSEric Cheng 				continue;
820da14cebeSEric Cheng 			}
821da14cebeSEric Cheng 		}
822da14cebeSEric Cheng 
823da14cebeSEric Cheng 		/*
824da14cebeSEric Cheng 		 * We're not in polling mode, so try to chain more messages
825da14cebeSEric Cheng 		 * or send the chain up according to pkt_burst.
826da14cebeSEric Cheng 		 */
8278347601bSyl 		if (++pkt_burst < lldev->config.rx_pkt_burst)
8288347601bSyl 			continue;
8298347601bSyl 
8307eced415Sxw 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
8318347601bSyl 			/* Replenish rx buffers */
8327eced415Sxw 			xgell_rx_buffer_replenish_all(ring);
8338347601bSyl 		}
8347eced415Sxw 		mutex_exit(&ring->bf_pool.pool_lock);
8358347601bSyl 		if (mp_head != NULL) {
836da14cebeSEric Cheng 			mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
837da14cebeSEric Cheng 			    ring->ring_gen_num);
8388347601bSyl 		}
8398347601bSyl 		mp_head = mp_end  = NULL;
8408347601bSyl 		pkt_burst = 0;
8417eced415Sxw 		mutex_enter(&ring->bf_pool.pool_lock);
8428347601bSyl 
843a23fd118Syl 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
844a23fd118Syl 	    XGE_HAL_OK);
845a23fd118Syl 
846a23fd118Syl 	/*
847a23fd118Syl 	 * Always call replenish_all to recycle rx_buffers.
848a23fd118Syl 	 */
8497eced415Sxw 	xgell_rx_buffer_replenish_all(ring);
8507eced415Sxw 	mutex_exit(&ring->bf_pool.pool_lock);
851a23fd118Syl 
852da14cebeSEric Cheng 	/*
853da14cebeSEric Cheng 	 * If we're not in polling cycle, call mac_rx(), otherwise
854da14cebeSEric Cheng 	 * just return while leaving packets chained to ring->poll_mp.
855da14cebeSEric Cheng 	 */
856da14cebeSEric Cheng 	if ((ring->poll_mp == NULL) && (mp_head != NULL)) {
857da14cebeSEric Cheng 		mac_rx_ring(lldev->mh, ring->ring_handle, mp_head,
858da14cebeSEric Cheng 		    ring->ring_gen_num);
8598347601bSyl 	}
8608347601bSyl 
861a23fd118Syl 	return (XGE_HAL_OK);
862a23fd118Syl }
863a23fd118Syl 
864da14cebeSEric Cheng mblk_t *
xgell_rx_poll(void * arg,int bytes_to_pickup)865da14cebeSEric Cheng xgell_rx_poll(void *arg, int bytes_to_pickup)
866da14cebeSEric Cheng {
867da14cebeSEric Cheng 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)arg;
868da14cebeSEric Cheng 	int got_rx = 0;
869da14cebeSEric Cheng 	mblk_t *mp;
870da14cebeSEric Cheng 
871da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "xgell_rx_poll on ring %d", ring->index);
872da14cebeSEric Cheng 
873da14cebeSEric Cheng 	ring->poll_mp = NULL;
874da14cebeSEric Cheng 	ring->poll_bytes = bytes_to_pickup;
875da14cebeSEric Cheng 	(void) xge_hal_device_poll_rx_channel(ring->channelh, &got_rx);
876da14cebeSEric Cheng 
877da14cebeSEric Cheng 	mp = ring->poll_mp;
878da14cebeSEric Cheng 	ring->poll_bytes = -1;
879da14cebeSEric Cheng 	ring->polled_bytes += got_rx;
880da14cebeSEric Cheng 	ring->poll_mp = NULL;
881da14cebeSEric Cheng 
882da14cebeSEric Cheng 	return (mp);
883da14cebeSEric Cheng }
884da14cebeSEric Cheng 
885a23fd118Syl /*
886a23fd118Syl  * xgell_xmit_compl
887a23fd118Syl  *
888a23fd118Syl  * If an interrupt was raised to indicate DMA complete of the Tx packet,
889a23fd118Syl  * this function is called. It identifies the last TxD whose buffer was
890a23fd118Syl  * freed and frees all skbs whose data have already DMA'ed into the NICs
891a23fd118Syl  * internal memory.
892a23fd118Syl  */
893a23fd118Syl static xge_hal_status_e
xgell_xmit_compl(xge_hal_channel_h channelh,xge_hal_dtr_h dtr,u8 t_code,void * userdata)894a23fd118Syl xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
895a23fd118Syl     void *userdata)
896a23fd118Syl {
897da14cebeSEric Cheng 	xgell_tx_ring_t *ring = userdata;
898da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
899a23fd118Syl 
900a23fd118Syl 	do {
901a23fd118Syl 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
902a23fd118Syl 		    xge_hal_fifo_dtr_private(dtr));
903a23fd118Syl 		int i;
904a23fd118Syl 
905a23fd118Syl 		if (t_code) {
906a23fd118Syl 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
907a23fd118Syl 			    " completed due to error t_code %01x", XGELL_IFNAME,
908a23fd118Syl 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
909a23fd118Syl 
910a23fd118Syl 			(void) xge_hal_device_handle_tcode(channelh, dtr,
911a23fd118Syl 			    t_code);
912a23fd118Syl 		}
913a23fd118Syl 
914a23fd118Syl 		for (i = 0; i < txd_priv->handle_cnt; i++) {
9157eced415Sxw 			if (txd_priv->dma_handles[i] != NULL) {
9167eced415Sxw 				xge_assert(txd_priv->dma_handles[i]);
9177eced415Sxw 				(void) ddi_dma_unbind_handle(
9187eced415Sxw 				    txd_priv->dma_handles[i]);
9197eced415Sxw 				ddi_dma_free_handle(&txd_priv->dma_handles[i]);
9207eced415Sxw 				txd_priv->dma_handles[i] = 0;
9217eced415Sxw 			}
922a23fd118Syl 		}
9237eced415Sxw 		txd_priv->handle_cnt = 0;
924a23fd118Syl 
925a23fd118Syl 		xge_hal_fifo_dtr_free(channelh, dtr);
926a23fd118Syl 
9277eced415Sxw 		if (txd_priv->mblk != NULL) {
9287eced415Sxw 			freemsg(txd_priv->mblk);
9297eced415Sxw 			txd_priv->mblk = NULL;
9307eced415Sxw 		}
9317eced415Sxw 
932a23fd118Syl 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
933a23fd118Syl 	    XGE_HAL_OK);
934a23fd118Syl 
935da14cebeSEric Cheng 	if (ring->need_resched)
936da14cebeSEric Cheng 		mac_tx_ring_update(lldev->mh, ring->ring_handle);
937a23fd118Syl 
938a23fd118Syl 	return (XGE_HAL_OK);
939a23fd118Syl }
940a23fd118Syl 
941da14cebeSEric Cheng mblk_t *
xgell_ring_tx(void * arg,mblk_t * mp)942da14cebeSEric Cheng xgell_ring_tx(void *arg, mblk_t *mp)
943a23fd118Syl {
944da14cebeSEric Cheng 	xgell_tx_ring_t *ring = (xgell_tx_ring_t *)arg;
945a23fd118Syl 	mblk_t *bp;
946da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
9478347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
948a23fd118Syl 	xge_hal_status_e status;
949a23fd118Syl 	xge_hal_dtr_h dtr;
950a23fd118Syl 	xgell_txd_priv_t *txd_priv;
9518347601bSyl 	uint32_t hckflags;
952da14cebeSEric Cheng 	uint32_t lsoflags;
9538347601bSyl 	uint32_t mss;
9548347601bSyl 	int handle_cnt, frag_cnt, ret, i, copied;
9558347601bSyl 	boolean_t used_copy;
9560dc2366fSVenugopal Iyer 	uint64_t sent_bytes;
957a23fd118Syl 
958a23fd118Syl _begin:
959a23fd118Syl 	handle_cnt = frag_cnt = 0;
9600dc2366fSVenugopal Iyer 	sent_bytes = 0;
961a23fd118Syl 
962a23fd118Syl 	if (!lldev->is_initialized || lldev->in_reset)
963da14cebeSEric Cheng 		return (mp);
9647eced415Sxw 
965a23fd118Syl 	/*
966a23fd118Syl 	 * If the free Tx dtrs count reaches the lower threshold,
967a23fd118Syl 	 * inform the gld to stop sending more packets till the free
968a23fd118Syl 	 * dtrs count exceeds higher threshold. Driver informs the
969a23fd118Syl 	 * gld through gld_sched call, when the free dtrs count exceeds
970a23fd118Syl 	 * the higher threshold.
971a23fd118Syl 	 */
972da14cebeSEric Cheng 	if (xge_hal_channel_dtr_count(ring->channelh)
973a23fd118Syl 	    <= XGELL_TX_LEVEL_LOW) {
974da14cebeSEric Cheng 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
975da14cebeSEric Cheng 		    "free descriptors count at low threshold %d",
976da14cebeSEric Cheng 		    XGELL_IFNAME, lldev->instance,
977da14cebeSEric Cheng 		    ((xge_hal_channel_t *)ring->channelh)->post_qid,
978da14cebeSEric Cheng 		    XGELL_TX_LEVEL_LOW);
979da14cebeSEric Cheng 		goto _exit;
980a23fd118Syl 	}
981a23fd118Syl 
982da14cebeSEric Cheng 	status = xge_hal_fifo_dtr_reserve(ring->channelh, &dtr);
983a23fd118Syl 	if (status != XGE_HAL_OK) {
984a23fd118Syl 		switch (status) {
985a23fd118Syl 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
986a23fd118Syl 			xge_debug_ll(XGE_ERR,
987a23fd118Syl 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
988a23fd118Syl 			    lldev->instance,
989a23fd118Syl 			    ((xge_hal_channel_t *)
990da14cebeSEric Cheng 			    ring->channelh)->post_qid);
991a23fd118Syl 			goto _exit;
992a23fd118Syl 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
993a23fd118Syl 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
994a23fd118Syl 			    " out of descriptors.", XGELL_IFNAME,
995a23fd118Syl 			    lldev->instance,
996a23fd118Syl 			    ((xge_hal_channel_t *)
997da14cebeSEric Cheng 			    ring->channelh)->post_qid);
998a23fd118Syl 			goto _exit;
999a23fd118Syl 		default:
1000da14cebeSEric Cheng 			return (mp);
1001a23fd118Syl 		}
1002a23fd118Syl 	}
1003a23fd118Syl 
1004a23fd118Syl 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1005a23fd118Syl 	txd_priv->mblk = mp;
1006a23fd118Syl 
1007a23fd118Syl 	/*
1008a23fd118Syl 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1009a23fd118Syl 	 * do insertion.
1010a23fd118Syl 	 *
1011a23fd118Syl 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1012a23fd118Syl 	 * is the other implementation for xge. The driver can simple bcopy()
1013a23fd118Syl 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1014a23fd118Syl 	 * automatically, since it's impossible that GLD sends down mp(s) with
1015a23fd118Syl 	 * splited ether_vlan_header.
1016a23fd118Syl 	 *
1017a23fd118Syl 	 * struct ether_vlan_header *evhp;
1018a23fd118Syl 	 * uint16_t tci;
1019a23fd118Syl 	 *
1020a23fd118Syl 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1021a23fd118Syl 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
10228347601bSyl 	 *	tci = ntohs(evhp->ether_tci);
10238347601bSyl 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1024a23fd118Syl 	 *	    2 * ETHERADDRL);
10258347601bSyl 	 *	mp->b_rptr += VLAN_TAGSZ;
1026a23fd118Syl 	 *
10278347601bSyl 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1028a23fd118Syl 	 * }
1029a23fd118Syl 	 */
1030a23fd118Syl 
10318347601bSyl 	copied = 0;
10328347601bSyl 	used_copy = B_FALSE;
1033a23fd118Syl 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1034a23fd118Syl 		int mblen;
1035a23fd118Syl 		uint_t ncookies;
1036a23fd118Syl 		ddi_dma_cookie_t dma_cookie;
1037a23fd118Syl 		ddi_dma_handle_t dma_handle;
1038a23fd118Syl 
1039a23fd118Syl 		/* skip zero-length message blocks */
1040a23fd118Syl 		mblen = MBLKL(bp);
1041a23fd118Syl 		if (mblen == 0) {
1042a23fd118Syl 			continue;
1043a23fd118Syl 		}
1044a23fd118Syl 
10450dc2366fSVenugopal Iyer 		sent_bytes += mblen;
1046da14cebeSEric Cheng 
10478347601bSyl 		/*
10488347601bSyl 		 * Check the message length to decide to DMA or bcopy() data
10498347601bSyl 		 * to tx descriptor(s).
10508347601bSyl 		 */
10518347601bSyl 		if (mblen < lldev->config.tx_dma_lowat &&
10528347601bSyl 		    (copied + mblen) < lldev->tx_copied_max) {
10538347601bSyl 			xge_hal_status_e rc;
1054da14cebeSEric Cheng 			rc = xge_hal_fifo_dtr_buffer_append(ring->channelh,
10558347601bSyl 			    dtr, bp->b_rptr, mblen);
10568347601bSyl 			if (rc == XGE_HAL_OK) {
10578347601bSyl 				used_copy = B_TRUE;
10588347601bSyl 				copied += mblen;
10598347601bSyl 				continue;
10608347601bSyl 			} else if (used_copy) {
10618347601bSyl 				xge_hal_fifo_dtr_buffer_finalize(
1062da14cebeSEric Cheng 				    ring->channelh, dtr, frag_cnt++);
10638347601bSyl 				used_copy = B_FALSE;
10648347601bSyl 			}
10658347601bSyl 		} else if (used_copy) {
1066da14cebeSEric Cheng 			xge_hal_fifo_dtr_buffer_finalize(ring->channelh,
10678347601bSyl 			    dtr, frag_cnt++);
10688347601bSyl 			used_copy = B_FALSE;
10698347601bSyl 		}
10708347601bSyl 
1071ba2e4443Sseb 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1072a23fd118Syl 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1073a23fd118Syl 		if (ret != DDI_SUCCESS) {
1074a23fd118Syl 			xge_debug_ll(XGE_ERR,
10758347601bSyl 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
10768347601bSyl 			    lldev->instance);
1077a23fd118Syl 			goto _exit_cleanup;
1078a23fd118Syl 		}
1079a23fd118Syl 
1080a23fd118Syl 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1081a23fd118Syl 		    (caddr_t)bp->b_rptr, mblen,
1082a23fd118Syl 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1083a23fd118Syl 		    &dma_cookie, &ncookies);
1084a23fd118Syl 
1085a23fd118Syl 		switch (ret) {
1086a23fd118Syl 		case DDI_DMA_MAPPED:
1087a23fd118Syl 			/* everything's fine */
1088a23fd118Syl 			break;
1089a23fd118Syl 
1090a23fd118Syl 		case DDI_DMA_NORESOURCES:
1091a23fd118Syl 			xge_debug_ll(XGE_ERR,
1092a23fd118Syl 			    "%s%d: can not bind dma address",
1093a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
1094a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1095a23fd118Syl 			goto _exit_cleanup;
1096a23fd118Syl 
1097a23fd118Syl 		case DDI_DMA_NOMAPPING:
1098a23fd118Syl 		case DDI_DMA_INUSE:
1099a23fd118Syl 		case DDI_DMA_TOOBIG:
1100a23fd118Syl 		default:
1101a23fd118Syl 			/* drop packet, don't retry */
1102a23fd118Syl 			xge_debug_ll(XGE_ERR,
1103a23fd118Syl 			    "%s%d: can not map message buffer",
1104a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
1105a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1106a23fd118Syl 			goto _exit_cleanup;
1107a23fd118Syl 		}
1108a23fd118Syl 
11098347601bSyl 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1110a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1111a23fd118Syl 			    "requested c:%d+f:%d", XGELL_IFNAME,
1112a23fd118Syl 			    lldev->instance, ncookies, frag_cnt);
1113a23fd118Syl 			(void) ddi_dma_unbind_handle(dma_handle);
1114a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1115a23fd118Syl 			goto _exit_cleanup;
1116a23fd118Syl 		}
1117a23fd118Syl 
1118a23fd118Syl 		/* setup the descriptors for this data buffer */
1119a23fd118Syl 		while (ncookies) {
1120da14cebeSEric Cheng 			xge_hal_fifo_dtr_buffer_set(ring->channelh, dtr,
1121a23fd118Syl 			    frag_cnt++, dma_cookie.dmac_laddress,
1122a23fd118Syl 			    dma_cookie.dmac_size);
1123a23fd118Syl 			if (--ncookies) {
1124a23fd118Syl 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1125a23fd118Syl 			}
1126a23fd118Syl 
1127a23fd118Syl 		}
1128a23fd118Syl 
1129a23fd118Syl 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1130a23fd118Syl 
1131a23fd118Syl 		if (bp->b_cont &&
1132a23fd118Syl 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
11337eced415Sxw 		    hldev->config.fifo.max_frags)) {
1134a23fd118Syl 			mblk_t *nmp;
1135a23fd118Syl 
1136a23fd118Syl 			xge_debug_ll(XGE_TRACE,
1137a23fd118Syl 			    "too many FRAGs [%d], pull up them", frag_cnt);
1138a23fd118Syl 
1139a23fd118Syl 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1140a23fd118Syl 				/* Drop packet, don't retry */
1141a23fd118Syl 				xge_debug_ll(XGE_ERR,
1142a23fd118Syl 				    "%s%d: can not pullup message buffer",
1143a23fd118Syl 				    XGELL_IFNAME, lldev->instance);
1144a23fd118Syl 				goto _exit_cleanup;
1145a23fd118Syl 			}
1146a23fd118Syl 			freemsg(bp->b_cont);
1147a23fd118Syl 			bp->b_cont = nmp;
1148a23fd118Syl 		}
1149a23fd118Syl 	}
1150a23fd118Syl 
11518347601bSyl 	/* finalize unfinished copies */
11528347601bSyl 	if (used_copy) {
1153da14cebeSEric Cheng 		xge_hal_fifo_dtr_buffer_finalize(ring->channelh, dtr,
11548347601bSyl 		    frag_cnt++);
11558347601bSyl 	}
11568347601bSyl 
1157a23fd118Syl 	txd_priv->handle_cnt = handle_cnt;
1158a23fd118Syl 
11598347601bSyl 	/*
11608347601bSyl 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
11618347601bSyl 	 * do all necessary work.
11628347601bSyl 	 */
11630dc2366fSVenugopal Iyer 	mac_lso_get(mp, &mss, &lsoflags);
1164da14cebeSEric Cheng 
1165da14cebeSEric Cheng 	if (lsoflags & HW_LSO) {
1166da14cebeSEric Cheng 		xge_assert((mss != 0) && (mss <= XGE_HAL_DEFAULT_MTU));
11678347601bSyl 		xge_hal_fifo_dtr_mss_set(dtr, mss);
11688347601bSyl 	}
11698347601bSyl 
11700dc2366fSVenugopal Iyer 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &hckflags);
11718347601bSyl 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1172a23fd118Syl 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1173a23fd118Syl 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1174a23fd118Syl 	}
11758347601bSyl 	if (hckflags & HCK_FULLCKSUM) {
1176a23fd118Syl 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1177a23fd118Syl 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1178a23fd118Syl 	}
1179a23fd118Syl 
1180da14cebeSEric Cheng 	xge_hal_fifo_dtr_post(ring->channelh, dtr);
1181a23fd118Syl 
11820dc2366fSVenugopal Iyer 	/* Update per-ring tx statistics */
1183*1a5e258fSJosef 'Jeff' Sipek 	atomic_inc_64(&ring->tx_pkts);
11840dc2366fSVenugopal Iyer 	atomic_add_64(&ring->tx_bytes, sent_bytes);
11850dc2366fSVenugopal Iyer 
1186da14cebeSEric Cheng 	return (NULL);
1187a23fd118Syl 
1188a23fd118Syl _exit_cleanup:
1189da14cebeSEric Cheng 	/*
1190da14cebeSEric Cheng 	 * Could not successfully transmit but have changed the message,
1191da14cebeSEric Cheng 	 * so just free it and return NULL
1192da14cebeSEric Cheng 	 */
1193a23fd118Syl 	for (i = 0; i < handle_cnt; i++) {
1194a23fd118Syl 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1195a23fd118Syl 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1196a23fd118Syl 		txd_priv->dma_handles[i] = 0;
1197a23fd118Syl 	}
1198a23fd118Syl 
1199da14cebeSEric Cheng 	xge_hal_fifo_dtr_free(ring->channelh, dtr);
1200da14cebeSEric Cheng 
1201da14cebeSEric Cheng 	freemsg(mp);
1202da14cebeSEric Cheng 	return (NULL);
1203a23fd118Syl 
1204a23fd118Syl _exit:
1205da14cebeSEric Cheng 	ring->need_resched = B_TRUE;
1206da14cebeSEric Cheng 	return (mp);
1207da14cebeSEric Cheng }
1208da14cebeSEric Cheng 
1209da14cebeSEric Cheng /*
1210da14cebeSEric Cheng  * xgell_ring_macaddr_init
1211da14cebeSEric Cheng  */
1212da14cebeSEric Cheng static void
xgell_rx_ring_maddr_init(xgell_rx_ring_t * ring)1213da14cebeSEric Cheng xgell_rx_ring_maddr_init(xgell_rx_ring_t *ring)
1214da14cebeSEric Cheng {
1215da14cebeSEric Cheng 	int i;
1216da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
1217da14cebeSEric Cheng 	xge_hal_device_t *hldev = lldev->devh;
1218da14cebeSEric Cheng 	int slot_start;
1219da14cebeSEric Cheng 
1220da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "%s", "xgell_rx_ring_maddr_init");
1221da14cebeSEric Cheng 
1222da14cebeSEric Cheng 	ring->mmac.naddr = XGE_RX_MULTI_MAC_ADDRESSES_MAX;
1223da14cebeSEric Cheng 	ring->mmac.naddrfree = ring->mmac.naddr;
1224da14cebeSEric Cheng 
1225da14cebeSEric Cheng 	/*
1226da14cebeSEric Cheng 	 * For the default rx ring, the first MAC address is the factory one.
1227da14cebeSEric Cheng 	 * This will be set by the framework, so need to clear it for now.
1228da14cebeSEric Cheng 	 */
1229da14cebeSEric Cheng 	(void) xge_hal_device_macaddr_clear(hldev, 0);
1230da14cebeSEric Cheng 
1231da14cebeSEric Cheng 	/*
1232da14cebeSEric Cheng 	 * Read the MAC address Configuration Memory from HAL.
1233da14cebeSEric Cheng 	 * The first slot will hold a factory MAC address, contents in other
1234da14cebeSEric Cheng 	 * slots will be FF:FF:FF:FF:FF:FF.
1235da14cebeSEric Cheng 	 */
1236da14cebeSEric Cheng 	slot_start = ring->index * 32;
1237da14cebeSEric Cheng 	for (i = 0; i < ring->mmac.naddr; i++) {
1238da14cebeSEric Cheng 		(void) xge_hal_device_macaddr_get(hldev, slot_start + i,
1239da14cebeSEric Cheng 		    ring->mmac.mac_addr + i);
1240da14cebeSEric Cheng 		ring->mmac.mac_addr_set[i] = B_FALSE;
1241da14cebeSEric Cheng 	}
1242da14cebeSEric Cheng }
1243da14cebeSEric Cheng 
1244da14cebeSEric Cheng static int xgell_maddr_set(xgelldev_t *, int, uint8_t *);
1245da14cebeSEric Cheng 
1246da14cebeSEric Cheng static int
xgell_addmac(void * arg,const uint8_t * mac_addr)1247da14cebeSEric Cheng xgell_addmac(void *arg, const uint8_t *mac_addr)
1248da14cebeSEric Cheng {
1249da14cebeSEric Cheng 	xgell_rx_ring_t *ring = arg;
1250da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
1251da14cebeSEric Cheng 	xge_hal_device_t *hldev = lldev->devh;
1252da14cebeSEric Cheng 	int slot;
1253da14cebeSEric Cheng 	int slot_start;
1254da14cebeSEric Cheng 
1255da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "%s", "xgell_addmac");
1256da14cebeSEric Cheng 
1257da14cebeSEric Cheng 	mutex_enter(&lldev->genlock);
1258da14cebeSEric Cheng 
1259da14cebeSEric Cheng 	if (ring->mmac.naddrfree == 0) {
1260da14cebeSEric Cheng 		mutex_exit(&lldev->genlock);
1261da14cebeSEric Cheng 		return (ENOSPC);
1262da14cebeSEric Cheng 	}
1263da14cebeSEric Cheng 
1264da14cebeSEric Cheng 	/* First slot is for factory MAC address */
1265da14cebeSEric Cheng 	for (slot = 0; slot < ring->mmac.naddr; slot++) {
1266da14cebeSEric Cheng 		if (ring->mmac.mac_addr_set[slot] == B_FALSE) {
1267da14cebeSEric Cheng 			break;
1268a23fd118Syl 		}
1269a23fd118Syl 	}
1270a23fd118Syl 
1271da14cebeSEric Cheng 	ASSERT(slot < ring->mmac.naddr);
1272da14cebeSEric Cheng 
1273da14cebeSEric Cheng 	slot_start = ring->index * 32;
1274da14cebeSEric Cheng 
1275da14cebeSEric Cheng 	if (xgell_maddr_set(lldev, slot_start + slot, (uint8_t *)mac_addr) !=
1276da14cebeSEric Cheng 	    0) {
1277da14cebeSEric Cheng 		mutex_exit(&lldev->genlock);
1278da14cebeSEric Cheng 		return (EIO);
1279da14cebeSEric Cheng 	}
1280da14cebeSEric Cheng 
1281da14cebeSEric Cheng 	/* Simply enable RTS for the whole section. */
1282da14cebeSEric Cheng 	(void) xge_hal_device_rts_section_enable(hldev, slot_start + slot);
1283da14cebeSEric Cheng 
1284da14cebeSEric Cheng 	/*
1285da14cebeSEric Cheng 	 * Read back the MAC address from HAL to keep the array up to date.
1286da14cebeSEric Cheng 	 */
1287da14cebeSEric Cheng 	if (xge_hal_device_macaddr_get(hldev, slot_start + slot,
1288da14cebeSEric Cheng 	    ring->mmac.mac_addr + slot) != XGE_HAL_OK) {
1289da14cebeSEric Cheng 		(void) xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1290da14cebeSEric Cheng 		return (EIO);
1291da14cebeSEric Cheng 	}
1292da14cebeSEric Cheng 
1293da14cebeSEric Cheng 	ring->mmac.mac_addr_set[slot] = B_TRUE;
1294da14cebeSEric Cheng 	ring->mmac.naddrfree--;
1295da14cebeSEric Cheng 
1296da14cebeSEric Cheng 	mutex_exit(&lldev->genlock);
1297da14cebeSEric Cheng 
1298da14cebeSEric Cheng 	return (0);
1299da14cebeSEric Cheng }
1300da14cebeSEric Cheng 
1301da14cebeSEric Cheng static int
xgell_remmac(void * arg,const uint8_t * mac_addr)1302da14cebeSEric Cheng xgell_remmac(void *arg, const uint8_t *mac_addr)
1303da14cebeSEric Cheng {
1304da14cebeSEric Cheng 	xgell_rx_ring_t *ring = arg;
1305da14cebeSEric Cheng 	xgelldev_t *lldev = ring->lldev;
1306da14cebeSEric Cheng 	xge_hal_device_t *hldev = lldev->devh;
1307da14cebeSEric Cheng 	xge_hal_status_e status;
1308da14cebeSEric Cheng 	int slot;
1309da14cebeSEric Cheng 	int slot_start;
1310da14cebeSEric Cheng 
1311da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "%s", "xgell_remmac");
1312da14cebeSEric Cheng 
1313da14cebeSEric Cheng 	slot = xge_hal_device_macaddr_find(hldev, (uint8_t *)mac_addr);
1314da14cebeSEric Cheng 	if (slot == -1)
1315da14cebeSEric Cheng 		return (EINVAL);
1316da14cebeSEric Cheng 
1317da14cebeSEric Cheng 	slot_start = ring->index * 32;
1318da14cebeSEric Cheng 
1319da14cebeSEric Cheng 	/*
1320da14cebeSEric Cheng 	 * Adjust slot to the offset in the MAC array of this ring (group).
1321da14cebeSEric Cheng 	 */
1322da14cebeSEric Cheng 	slot -= slot_start;
1323da14cebeSEric Cheng 
1324da14cebeSEric Cheng 	/*
1325da14cebeSEric Cheng 	 * Only can remove a pre-set MAC address for this ring (group).
1326da14cebeSEric Cheng 	 */
1327da14cebeSEric Cheng 	if (slot < 0 || slot >= ring->mmac.naddr)
1328da14cebeSEric Cheng 		return (EINVAL);
1329da14cebeSEric Cheng 
1330da14cebeSEric Cheng 
1331da14cebeSEric Cheng 	xge_assert(ring->mmac.mac_addr_set[slot]);
1332da14cebeSEric Cheng 
1333da14cebeSEric Cheng 	mutex_enter(&lldev->genlock);
1334da14cebeSEric Cheng 	if (!ring->mmac.mac_addr_set[slot]) {
1335da14cebeSEric Cheng 		mutex_exit(&lldev->genlock);
1336da14cebeSEric Cheng 		/*
1337da14cebeSEric Cheng 		 * The result will be unexpected when reach here. WARNING!
1338da14cebeSEric Cheng 		 */
1339da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR,
1340da14cebeSEric Cheng 		    "%s%d: caller is trying to remove an unset MAC address",
1341da14cebeSEric Cheng 		    XGELL_IFNAME, lldev->instance);
1342da14cebeSEric Cheng 		return (ENXIO);
1343da14cebeSEric Cheng 	}
1344da14cebeSEric Cheng 
1345da14cebeSEric Cheng 	status = xge_hal_device_macaddr_clear(hldev, slot_start + slot);
1346da14cebeSEric Cheng 	if (status != XGE_HAL_OK) {
1347da14cebeSEric Cheng 		mutex_exit(&lldev->genlock);
1348da14cebeSEric Cheng 		return (EIO);
1349da14cebeSEric Cheng 	}
1350da14cebeSEric Cheng 
1351da14cebeSEric Cheng 	ring->mmac.mac_addr_set[slot] = B_FALSE;
1352da14cebeSEric Cheng 	ring->mmac.naddrfree++;
1353da14cebeSEric Cheng 
1354da14cebeSEric Cheng 	/*
1355da14cebeSEric Cheng 	 * TODO: Disable MAC RTS if all addresses have been cleared.
1356da14cebeSEric Cheng 	 */
1357da14cebeSEric Cheng 
1358da14cebeSEric Cheng 	/*
1359da14cebeSEric Cheng 	 * Read back the MAC address from HAL to keep the array up to date.
1360da14cebeSEric Cheng 	 */
1361da14cebeSEric Cheng 	(void) xge_hal_device_macaddr_get(hldev, slot_start + slot,
1362da14cebeSEric Cheng 	    ring->mmac.mac_addr + slot);
1363da14cebeSEric Cheng 	mutex_exit(&lldev->genlock);
1364da14cebeSEric Cheng 
1365da14cebeSEric Cheng 	return (0);
1366a23fd118Syl }
1367a23fd118Syl 
1368a23fd118Syl /*
1369da14cebeSEric Cheng  * Temporarily calling hal function.
1370a23fd118Syl  *
1371da14cebeSEric Cheng  * With MSI-X implementation, no lock is needed, so that the interrupt
1372da14cebeSEric Cheng  * handling could be faster.
1373a23fd118Syl  */
1374da14cebeSEric Cheng int
xgell_rx_ring_intr_enable(mac_intr_handle_t ih)1375da14cebeSEric Cheng xgell_rx_ring_intr_enable(mac_intr_handle_t ih)
1376a23fd118Syl {
1377da14cebeSEric Cheng 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1378a23fd118Syl 
1379da14cebeSEric Cheng 	mutex_enter(&ring->ring_lock);
1380da14cebeSEric Cheng 	xge_hal_device_rx_channel_disable_polling(ring->channelh);
1381da14cebeSEric Cheng 	mutex_exit(&ring->ring_lock);
1382a23fd118Syl 
1383da14cebeSEric Cheng 	return (0);
1384da14cebeSEric Cheng }
1385da14cebeSEric Cheng 
1386da14cebeSEric Cheng int
xgell_rx_ring_intr_disable(mac_intr_handle_t ih)1387da14cebeSEric Cheng xgell_rx_ring_intr_disable(mac_intr_handle_t ih)
1388da14cebeSEric Cheng {
1389da14cebeSEric Cheng 	xgell_rx_ring_t *ring = (xgell_rx_ring_t *)ih;
1390da14cebeSEric Cheng 
1391da14cebeSEric Cheng 	mutex_enter(&ring->ring_lock);
1392da14cebeSEric Cheng 	xge_hal_device_rx_channel_enable_polling(ring->channelh);
1393da14cebeSEric Cheng 	mutex_exit(&ring->ring_lock);
1394da14cebeSEric Cheng 
1395da14cebeSEric Cheng 	return (0);
1396da14cebeSEric Cheng }
1397da14cebeSEric Cheng 
1398da14cebeSEric Cheng static int
xgell_rx_ring_start(mac_ring_driver_t rh,uint64_t mr_gen_num)1399da14cebeSEric Cheng xgell_rx_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
1400da14cebeSEric Cheng {
1401da14cebeSEric Cheng 	xgell_rx_ring_t *rx_ring = (xgell_rx_ring_t *)rh;
1402da14cebeSEric Cheng 
1403da14cebeSEric Cheng 	rx_ring->ring_gen_num = mr_gen_num;
1404da14cebeSEric Cheng 
1405da14cebeSEric Cheng 	return (0);
1406da14cebeSEric Cheng }
1407da14cebeSEric Cheng 
1408da14cebeSEric Cheng /*ARGSUSED*/
1409da14cebeSEric Cheng static void
xgell_rx_ring_stop(mac_ring_driver_t rh)1410da14cebeSEric Cheng xgell_rx_ring_stop(mac_ring_driver_t rh)
1411da14cebeSEric Cheng {
1412da14cebeSEric Cheng }
1413da14cebeSEric Cheng 
1414da14cebeSEric Cheng /*ARGSUSED*/
1415da14cebeSEric Cheng static int
xgell_tx_ring_start(mac_ring_driver_t rh,uint64_t useless)1416da14cebeSEric Cheng xgell_tx_ring_start(mac_ring_driver_t rh, uint64_t useless)
1417da14cebeSEric Cheng {
1418da14cebeSEric Cheng 	return (0);
1419da14cebeSEric Cheng }
1420da14cebeSEric Cheng 
1421da14cebeSEric Cheng /*ARGSUSED*/
1422da14cebeSEric Cheng static void
xgell_tx_ring_stop(mac_ring_driver_t rh)1423da14cebeSEric Cheng xgell_tx_ring_stop(mac_ring_driver_t rh)
1424da14cebeSEric Cheng {
1425da14cebeSEric Cheng }
1426da14cebeSEric Cheng 
1427da14cebeSEric Cheng /*
1428da14cebeSEric Cheng  * Callback funtion for MAC layer to register all rings.
1429da14cebeSEric Cheng  *
1430da14cebeSEric Cheng  * Xframe hardware doesn't support grouping explicitly, so the driver needs
1431da14cebeSEric Cheng  * to pretend having resource groups. We may also optionally group all 8 rx
1432da14cebeSEric Cheng  * rings into a single group for increased scalability on CMT architectures,
1433da14cebeSEric Cheng  * or group one rx ring per group for maximum virtualization.
1434da14cebeSEric Cheng  *
1435da14cebeSEric Cheng  * TX grouping is actually done by framework, so, just register all TX
1436da14cebeSEric Cheng  * resources without grouping them.
1437da14cebeSEric Cheng  */
1438da14cebeSEric Cheng void
xgell_fill_ring(void * arg,mac_ring_type_t rtype,const int rg_index,const int index,mac_ring_info_t * infop,mac_ring_handle_t rh)1439da14cebeSEric Cheng xgell_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
1440da14cebeSEric Cheng     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
1441da14cebeSEric Cheng {
1442da14cebeSEric Cheng 	xgelldev_t *lldev = (xgelldev_t *)arg;
1443da14cebeSEric Cheng 	mac_intr_t *mintr;
1444da14cebeSEric Cheng 
1445da14cebeSEric Cheng 	switch (rtype) {
1446da14cebeSEric Cheng 	case MAC_RING_TYPE_RX: {
1447da14cebeSEric Cheng 		xgell_rx_ring_t *rx_ring;
1448da14cebeSEric Cheng 
1449da14cebeSEric Cheng 		xge_assert(index < lldev->init_rx_rings);
1450da14cebeSEric Cheng 		xge_assert(rg_index < lldev->init_rx_groups);
1451da14cebeSEric Cheng 
1452da14cebeSEric Cheng 		/*
1453da14cebeSEric Cheng 		 * Performance vs. Virtualization
1454da14cebeSEric Cheng 		 */
1455da14cebeSEric Cheng 		if (lldev->init_rx_rings == lldev->init_rx_groups)
1456da14cebeSEric Cheng 			rx_ring = lldev->rx_ring + rg_index;
1457da14cebeSEric Cheng 		else
1458da14cebeSEric Cheng 			rx_ring = lldev->rx_ring + index;
1459da14cebeSEric Cheng 
1460da14cebeSEric Cheng 		rx_ring->ring_handle = rh;
1461da14cebeSEric Cheng 
1462da14cebeSEric Cheng 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
1463da14cebeSEric Cheng 		infop->mri_start = xgell_rx_ring_start;
1464da14cebeSEric Cheng 		infop->mri_stop = xgell_rx_ring_stop;
1465da14cebeSEric Cheng 		infop->mri_poll = xgell_rx_poll;
14660dc2366fSVenugopal Iyer 		infop->mri_stat = xgell_rx_ring_stat;
1467da14cebeSEric Cheng 
1468da14cebeSEric Cheng 		mintr = &infop->mri_intr;
1469da14cebeSEric Cheng 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
1470da14cebeSEric Cheng 		mintr->mi_enable = xgell_rx_ring_intr_enable;
1471da14cebeSEric Cheng 		mintr->mi_disable = xgell_rx_ring_intr_disable;
1472da14cebeSEric Cheng 
1473da14cebeSEric Cheng 		break;
1474a23fd118Syl 	}
1475da14cebeSEric Cheng 	case MAC_RING_TYPE_TX: {
1476da14cebeSEric Cheng 		xgell_tx_ring_t *tx_ring;
1477a23fd118Syl 
1478da14cebeSEric Cheng 		xge_assert(rg_index == -1);
1479da14cebeSEric Cheng 
1480da14cebeSEric Cheng 		xge_assert((index >= 0) && (index < lldev->init_tx_rings));
1481da14cebeSEric Cheng 
1482da14cebeSEric Cheng 		tx_ring = lldev->tx_ring + index;
1483da14cebeSEric Cheng 		tx_ring->ring_handle = rh;
1484da14cebeSEric Cheng 
1485da14cebeSEric Cheng 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
1486da14cebeSEric Cheng 		infop->mri_start = xgell_tx_ring_start;
1487da14cebeSEric Cheng 		infop->mri_stop = xgell_tx_ring_stop;
1488da14cebeSEric Cheng 		infop->mri_tx = xgell_ring_tx;
14890dc2366fSVenugopal Iyer 		infop->mri_stat = xgell_tx_ring_stat;
1490da14cebeSEric Cheng 
1491da14cebeSEric Cheng 		break;
1492da14cebeSEric Cheng 	}
1493da14cebeSEric Cheng 	default:
1494da14cebeSEric Cheng 		break;
1495da14cebeSEric Cheng 	}
1496da14cebeSEric Cheng }
1497da14cebeSEric Cheng 
1498da14cebeSEric Cheng void
xgell_fill_group(void * arg,mac_ring_type_t rtype,const int index,mac_group_info_t * infop,mac_group_handle_t gh)1499da14cebeSEric Cheng xgell_fill_group(void *arg, mac_ring_type_t rtype, const int index,
1500da14cebeSEric Cheng     mac_group_info_t *infop, mac_group_handle_t gh)
1501da14cebeSEric Cheng {
1502da14cebeSEric Cheng 	xgelldev_t *lldev = (xgelldev_t *)arg;
1503da14cebeSEric Cheng 
1504da14cebeSEric Cheng 	switch (rtype) {
1505da14cebeSEric Cheng 	case MAC_RING_TYPE_RX: {
1506da14cebeSEric Cheng 		xgell_rx_ring_t *rx_ring;
1507da14cebeSEric Cheng 
1508da14cebeSEric Cheng 		xge_assert(index < lldev->init_rx_groups);
1509da14cebeSEric Cheng 
1510da14cebeSEric Cheng 		rx_ring = lldev->rx_ring + index;
1511da14cebeSEric Cheng 
1512da14cebeSEric Cheng 		rx_ring->group_handle = gh;
1513da14cebeSEric Cheng 
1514da14cebeSEric Cheng 		infop->mgi_driver = (mac_group_driver_t)rx_ring;
1515da14cebeSEric Cheng 		infop->mgi_start = NULL;
1516da14cebeSEric Cheng 		infop->mgi_stop = NULL;
1517da14cebeSEric Cheng 		infop->mgi_addmac = xgell_addmac;
1518da14cebeSEric Cheng 		infop->mgi_remmac = xgell_remmac;
1519da14cebeSEric Cheng 		infop->mgi_count = lldev->init_rx_rings / lldev->init_rx_groups;
1520da14cebeSEric Cheng 
1521da14cebeSEric Cheng 		break;
1522da14cebeSEric Cheng 	}
1523da14cebeSEric Cheng 	case MAC_RING_TYPE_TX:
1524da14cebeSEric Cheng 		xge_assert(0);
1525da14cebeSEric Cheng 		break;
1526da14cebeSEric Cheng 	default:
1527da14cebeSEric Cheng 		break;
1528da14cebeSEric Cheng 	}
1529da14cebeSEric Cheng }
1530da14cebeSEric Cheng 
1531da14cebeSEric Cheng /*
1532da14cebeSEric Cheng  * xgell_macaddr_set
1533da14cebeSEric Cheng  */
1534da14cebeSEric Cheng static int
xgell_maddr_set(xgelldev_t * lldev,int index,uint8_t * macaddr)1535da14cebeSEric Cheng xgell_maddr_set(xgelldev_t *lldev, int index, uint8_t *macaddr)
1536da14cebeSEric Cheng {
1537da14cebeSEric Cheng 	xge_hal_device_t *hldev = lldev->devh;
1538da14cebeSEric Cheng 	xge_hal_status_e status;
1539da14cebeSEric Cheng 
1540da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "%s", "xgell_maddr_set");
1541da14cebeSEric Cheng 
1542da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE,
1543da14cebeSEric Cheng 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1544da14cebeSEric Cheng 	    macaddr[0], macaddr[1], macaddr[2],
1545da14cebeSEric Cheng 	    macaddr[3], macaddr[4], macaddr[5]);
1546da14cebeSEric Cheng 
1547da14cebeSEric Cheng 	status = xge_hal_device_macaddr_set(hldev, index, (uchar_t *)macaddr);
1548da14cebeSEric Cheng 
1549da14cebeSEric Cheng 	if (status != XGE_HAL_OK) {
1550da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1551da14cebeSEric Cheng 		    XGELL_IFNAME, lldev->instance);
1552da14cebeSEric Cheng 		return (EIO);
1553da14cebeSEric Cheng 	}
1554da14cebeSEric Cheng 
1555da14cebeSEric Cheng 	return (0);
1556a23fd118Syl }
1557a23fd118Syl 
1558a23fd118Syl /*
1559a23fd118Syl  * xgell_rx_dtr_term
1560a23fd118Syl  *
1561a23fd118Syl  * Function will be called by HAL to terminate all DTRs for
1562a23fd118Syl  * Ring(s) type of channels.
1563a23fd118Syl  */
1564a23fd118Syl static void
xgell_rx_dtr_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1565a23fd118Syl xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1566a23fd118Syl     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1567a23fd118Syl {
1568a23fd118Syl 	xgell_rxd_priv_t *rxd_priv =
1569a23fd118Syl 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1570a23fd118Syl 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1571a23fd118Syl 
1572a23fd118Syl 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1573da14cebeSEric Cheng 		xgell_rx_ring_t *ring = rx_buffer->ring;
1574da14cebeSEric Cheng 
15757eced415Sxw 		mutex_enter(&ring->bf_pool.pool_lock);
1576a23fd118Syl 		xge_hal_ring_dtr_free(channelh, dtrh);
1577a23fd118Syl 		xgell_rx_buffer_release(rx_buffer);
15787eced415Sxw 		mutex_exit(&ring->bf_pool.pool_lock);
1579a23fd118Syl 	}
1580a23fd118Syl }
1581a23fd118Syl 
1582da14cebeSEric Cheng /*
1583da14cebeSEric Cheng  * To open a rx ring.
1584da14cebeSEric Cheng  */
1585da14cebeSEric Cheng static boolean_t
xgell_rx_ring_open(xgell_rx_ring_t * rx_ring)1586da14cebeSEric Cheng xgell_rx_ring_open(xgell_rx_ring_t *rx_ring)
1587da14cebeSEric Cheng {
1588da14cebeSEric Cheng 	xge_hal_status_e status;
1589da14cebeSEric Cheng 	xge_hal_channel_attr_t attr;
1590da14cebeSEric Cheng 	xgelldev_t *lldev = rx_ring->lldev;
1591da14cebeSEric Cheng 	xge_hal_device_t *hldev = lldev->devh;
1592da14cebeSEric Cheng 
1593da14cebeSEric Cheng 	if (rx_ring->live)
1594da14cebeSEric Cheng 		return (B_TRUE);
1595da14cebeSEric Cheng 
1596da14cebeSEric Cheng 	/* Create the buffer pool first */
1597da14cebeSEric Cheng 	if (!xgell_rx_create_buffer_pool(rx_ring)) {
1598da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "can not create buffer pool for ring: %d",
1599da14cebeSEric Cheng 		    rx_ring->index);
1600da14cebeSEric Cheng 		return (B_FALSE);
1601da14cebeSEric Cheng 	}
1602da14cebeSEric Cheng 
1603da14cebeSEric Cheng 	/* Default ring initialization */
1604da14cebeSEric Cheng 	attr.post_qid		= rx_ring->index;
1605da14cebeSEric Cheng 	attr.compl_qid		= 0;
1606da14cebeSEric Cheng 	attr.callback		= xgell_rx_1b_callback;
1607da14cebeSEric Cheng 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1608da14cebeSEric Cheng 	attr.flags		= 0;
1609da14cebeSEric Cheng 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1610da14cebeSEric Cheng 	attr.dtr_init		= xgell_rx_dtr_replenish;
1611da14cebeSEric Cheng 	attr.dtr_term		= xgell_rx_dtr_term;
1612da14cebeSEric Cheng 	attr.userdata		= rx_ring;
1613da14cebeSEric Cheng 
1614da14cebeSEric Cheng 	status = xge_hal_channel_open(lldev->devh, &attr, &rx_ring->channelh,
1615da14cebeSEric Cheng 	    XGE_HAL_CHANNEL_OC_NORMAL);
1616da14cebeSEric Cheng 	if (status != XGE_HAL_OK) {
1617da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1618da14cebeSEric Cheng 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1619da14cebeSEric Cheng 		(void) xgell_rx_destroy_buffer_pool(rx_ring);
1620da14cebeSEric Cheng 		return (B_FALSE);
1621da14cebeSEric Cheng 	}
1622da14cebeSEric Cheng 
1623da14cebeSEric Cheng 	xgell_rx_ring_maddr_init(rx_ring);
1624da14cebeSEric Cheng 
1625da14cebeSEric Cheng 	mutex_init(&rx_ring->ring_lock, NULL, MUTEX_DRIVER,
1626da14cebeSEric Cheng 	    DDI_INTR_PRI(hldev->irqh));
1627da14cebeSEric Cheng 
1628da14cebeSEric Cheng 	rx_ring->poll_bytes = -1;
1629da14cebeSEric Cheng 	rx_ring->polled_bytes = 0;
1630da14cebeSEric Cheng 	rx_ring->poll_mp = NULL;
1631da14cebeSEric Cheng 	rx_ring->live = B_TRUE;
1632da14cebeSEric Cheng 
1633da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "RX ring [%d] is opened successfully",
1634da14cebeSEric Cheng 	    rx_ring->index);
1635da14cebeSEric Cheng 
1636da14cebeSEric Cheng 	return (B_TRUE);
1637da14cebeSEric Cheng }
1638da14cebeSEric Cheng 
1639da14cebeSEric Cheng static void
xgell_rx_ring_close(xgell_rx_ring_t * rx_ring)1640da14cebeSEric Cheng xgell_rx_ring_close(xgell_rx_ring_t *rx_ring)
1641da14cebeSEric Cheng {
1642da14cebeSEric Cheng 	if (!rx_ring->live)
1643da14cebeSEric Cheng 		return;
1644da14cebeSEric Cheng 	xge_hal_channel_close(rx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1645da14cebeSEric Cheng 	rx_ring->channelh = NULL;
1646da14cebeSEric Cheng 	/* This may not clean up all used buffers, driver will handle it */
1647da14cebeSEric Cheng 	if (xgell_rx_destroy_buffer_pool(rx_ring))
1648da14cebeSEric Cheng 		rx_ring->live = B_FALSE;
1649da14cebeSEric Cheng 
1650da14cebeSEric Cheng 	mutex_destroy(&rx_ring->ring_lock);
1651da14cebeSEric Cheng }
1652da14cebeSEric Cheng 
1653da14cebeSEric Cheng /*
1654da14cebeSEric Cheng  * xgell_rx_open
1655da14cebeSEric Cheng  * @lldev: the link layer object
1656da14cebeSEric Cheng  *
1657da14cebeSEric Cheng  * Initialize and open all RX channels.
1658da14cebeSEric Cheng  */
1659da14cebeSEric Cheng static boolean_t
xgell_rx_open(xgelldev_t * lldev)1660da14cebeSEric Cheng xgell_rx_open(xgelldev_t *lldev)
1661da14cebeSEric Cheng {
1662da14cebeSEric Cheng 	xgell_rx_ring_t *rx_ring;
1663da14cebeSEric Cheng 	int i;
1664da14cebeSEric Cheng 
1665da14cebeSEric Cheng 	if (lldev->live_rx_rings != 0)
1666da14cebeSEric Cheng 		return (B_TRUE);
1667da14cebeSEric Cheng 
1668da14cebeSEric Cheng 	lldev->live_rx_rings = 0;
1669da14cebeSEric Cheng 
1670da14cebeSEric Cheng 	/*
1671da14cebeSEric Cheng 	 * Initialize all rings
1672da14cebeSEric Cheng 	 */
1673da14cebeSEric Cheng 	for (i = 0; i < lldev->init_rx_rings; i++) {
1674da14cebeSEric Cheng 		rx_ring = &lldev->rx_ring[i];
1675da14cebeSEric Cheng 		rx_ring->index = i;
1676da14cebeSEric Cheng 		rx_ring->lldev = lldev;
1677da14cebeSEric Cheng 		rx_ring->live = B_FALSE;
1678da14cebeSEric Cheng 
1679da14cebeSEric Cheng 		if (!xgell_rx_ring_open(rx_ring))
1680da14cebeSEric Cheng 			return (B_FALSE);
1681da14cebeSEric Cheng 
1682da14cebeSEric Cheng 		lldev->live_rx_rings++;
1683da14cebeSEric Cheng 	}
1684da14cebeSEric Cheng 
1685da14cebeSEric Cheng 	return (B_TRUE);
1686da14cebeSEric Cheng }
1687da14cebeSEric Cheng 
1688da14cebeSEric Cheng static void
xgell_rx_close(xgelldev_t * lldev)1689da14cebeSEric Cheng xgell_rx_close(xgelldev_t *lldev)
1690da14cebeSEric Cheng {
1691da14cebeSEric Cheng 	xgell_rx_ring_t *rx_ring;
1692da14cebeSEric Cheng 	int i;
1693da14cebeSEric Cheng 
1694da14cebeSEric Cheng 	if (lldev->live_rx_rings == 0)
1695da14cebeSEric Cheng 		return;
1696da14cebeSEric Cheng 
1697da14cebeSEric Cheng 	/*
1698da14cebeSEric Cheng 	 * Close all rx rings
1699da14cebeSEric Cheng 	 */
1700da14cebeSEric Cheng 	for (i = 0; i < lldev->init_rx_rings; i++) {
1701da14cebeSEric Cheng 		rx_ring = &lldev->rx_ring[i];
1702da14cebeSEric Cheng 
1703da14cebeSEric Cheng 		if (rx_ring->live) {
1704da14cebeSEric Cheng 			xgell_rx_ring_close(rx_ring);
1705da14cebeSEric Cheng 			lldev->live_rx_rings--;
1706da14cebeSEric Cheng 		}
1707da14cebeSEric Cheng 	}
1708da14cebeSEric Cheng 
1709da14cebeSEric Cheng 	xge_assert(lldev->live_rx_rings == 0);
1710da14cebeSEric Cheng }
1711da14cebeSEric Cheng 
1712a23fd118Syl /*
1713a23fd118Syl  * xgell_tx_term
1714a23fd118Syl  *
1715a23fd118Syl  * Function will be called by HAL to terminate all DTRs for
1716a23fd118Syl  * Fifo(s) type of channels.
1717a23fd118Syl  */
1718a23fd118Syl static void
xgell_tx_term(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,xge_hal_dtr_state_e state,void * userdata,xge_hal_channel_reopen_e reopen)1719a23fd118Syl xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1720a23fd118Syl     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1721a23fd118Syl {
1722a23fd118Syl 	xgell_txd_priv_t *txd_priv =
1723a23fd118Syl 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1724a23fd118Syl 	mblk_t *mp = txd_priv->mblk;
1725a23fd118Syl 	int i;
17268347601bSyl 
1727a23fd118Syl 	/*
1728a23fd118Syl 	 * for Tx we must clean up the DTR *only* if it has been
1729a23fd118Syl 	 * posted!
1730a23fd118Syl 	 */
1731a23fd118Syl 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1732a23fd118Syl 		return;
1733a23fd118Syl 	}
1734a23fd118Syl 
1735a23fd118Syl 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1736a23fd118Syl 		xge_assert(txd_priv->dma_handles[i]);
1737a23fd118Syl 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1738a23fd118Syl 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1739a23fd118Syl 		txd_priv->dma_handles[i] = 0;
1740a23fd118Syl 	}
1741a23fd118Syl 
1742a23fd118Syl 	xge_hal_fifo_dtr_free(channelh, dtrh);
1743a23fd118Syl 
17447eced415Sxw 	if (mp) {
17457eced415Sxw 		txd_priv->mblk = NULL;
17467eced415Sxw 		freemsg(mp);
17477eced415Sxw 	}
17487eced415Sxw }
17497eced415Sxw 
1750a23fd118Syl static boolean_t
xgell_tx_ring_open(xgell_tx_ring_t * tx_ring)1751da14cebeSEric Cheng xgell_tx_ring_open(xgell_tx_ring_t *tx_ring)
1752a23fd118Syl {
1753a23fd118Syl 	xge_hal_status_e status;
1754a23fd118Syl 	xge_hal_channel_attr_t attr;
1755da14cebeSEric Cheng 	xgelldev_t *lldev = tx_ring->lldev;
1756da14cebeSEric Cheng 
1757da14cebeSEric Cheng 	if (tx_ring->live)
1758da14cebeSEric Cheng 		return (B_TRUE);
1759a23fd118Syl 
1760da14cebeSEric Cheng 	attr.post_qid		= tx_ring->index;
1761a23fd118Syl 	attr.compl_qid		= 0;
1762a23fd118Syl 	attr.callback		= xgell_xmit_compl;
1763a23fd118Syl 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1764a23fd118Syl 	attr.flags		= 0;
1765a23fd118Syl 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1766a23fd118Syl 	attr.dtr_init		= NULL;
1767a23fd118Syl 	attr.dtr_term		= xgell_tx_term;
1768da14cebeSEric Cheng 	attr.userdata		= tx_ring;
1769a23fd118Syl 
1770da14cebeSEric Cheng 	status = xge_hal_channel_open(lldev->devh, &attr, &tx_ring->channelh,
1771da14cebeSEric Cheng 	    XGE_HAL_CHANNEL_OC_NORMAL);
1772da14cebeSEric Cheng 	if (status != XGE_HAL_OK) {
1773da14cebeSEric Cheng 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel got status "
1774da14cebeSEric Cheng 		    "code %d", XGELL_IFNAME, lldev->instance, status);
1775a23fd118Syl 		return (B_FALSE);
1776a23fd118Syl 	}
1777a23fd118Syl 
1778da14cebeSEric Cheng 	tx_ring->live = B_TRUE;
1779a23fd118Syl 
1780a23fd118Syl 	return (B_TRUE);
1781a23fd118Syl }
1782a23fd118Syl 
17837eced415Sxw static void
xgell_tx_ring_close(xgell_tx_ring_t * tx_ring)1784da14cebeSEric Cheng xgell_tx_ring_close(xgell_tx_ring_t *tx_ring)
17857eced415Sxw {
1786da14cebeSEric Cheng 	if (!tx_ring->live)
1787da14cebeSEric Cheng 		return;
1788da14cebeSEric Cheng 	xge_hal_channel_close(tx_ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1789da14cebeSEric Cheng 	tx_ring->live = B_FALSE;
17907eced415Sxw }
17917eced415Sxw 
1792a23fd118Syl /*
1793da14cebeSEric Cheng  * xgell_tx_open
1794a23fd118Syl  * @lldev: the link layer object
1795a23fd118Syl  *
1796da14cebeSEric Cheng  * Initialize and open all TX channels.
1797a23fd118Syl  */
1798a23fd118Syl static boolean_t
xgell_tx_open(xgelldev_t * lldev)1799da14cebeSEric Cheng xgell_tx_open(xgelldev_t *lldev)
1800a23fd118Syl {
1801da14cebeSEric Cheng 	xgell_tx_ring_t *tx_ring;
1802da14cebeSEric Cheng 	int i;
1803a23fd118Syl 
1804da14cebeSEric Cheng 	if (lldev->live_tx_rings != 0)
1805da14cebeSEric Cheng 		return (B_TRUE);
1806a23fd118Syl 
1807da14cebeSEric Cheng 	lldev->live_tx_rings = 0;
1808a23fd118Syl 
18097eced415Sxw 	/*
1810da14cebeSEric Cheng 	 * Enable rings by reserve sequence to match the h/w sequences.
18117eced415Sxw 	 */
1812da14cebeSEric Cheng 	for (i = 0; i < lldev->init_tx_rings; i++) {
1813da14cebeSEric Cheng 		tx_ring = &lldev->tx_ring[i];
1814da14cebeSEric Cheng 		tx_ring->index = i;
1815da14cebeSEric Cheng 		tx_ring->lldev = lldev;
1816da14cebeSEric Cheng 		tx_ring->live = B_FALSE;
18177eced415Sxw 
1818da14cebeSEric Cheng 		if (!xgell_tx_ring_open(tx_ring))
18197eced415Sxw 			return (B_FALSE);
18207eced415Sxw 
1821da14cebeSEric Cheng 		lldev->live_tx_rings++;
1822a23fd118Syl 	}
1823a23fd118Syl 
1824a23fd118Syl 	return (B_TRUE);
1825a23fd118Syl }
1826a23fd118Syl 
1827da14cebeSEric Cheng static void
xgell_tx_close(xgelldev_t * lldev)1828da14cebeSEric Cheng xgell_tx_close(xgelldev_t *lldev)
1829da14cebeSEric Cheng {
1830da14cebeSEric Cheng 	xgell_tx_ring_t *tx_ring;
1831da14cebeSEric Cheng 	int i;
1832da14cebeSEric Cheng 
1833da14cebeSEric Cheng 	if (lldev->live_tx_rings == 0)
1834da14cebeSEric Cheng 		return;
1835da14cebeSEric Cheng 
1836da14cebeSEric Cheng 	/*
1837da14cebeSEric Cheng 	 * Enable rings by reserve sequence to match the h/w sequences.
1838da14cebeSEric Cheng 	 */
1839da14cebeSEric Cheng 	for (i = 0; i < lldev->init_tx_rings; i++) {
1840da14cebeSEric Cheng 		tx_ring = &lldev->tx_ring[i];
1841da14cebeSEric Cheng 		if (tx_ring->live) {
1842da14cebeSEric Cheng 			xgell_tx_ring_close(tx_ring);
1843da14cebeSEric Cheng 			lldev->live_tx_rings--;
1844da14cebeSEric Cheng 		}
1845da14cebeSEric Cheng 	}
1846da14cebeSEric Cheng }
1847da14cebeSEric Cheng 
1848a23fd118Syl static int
xgell_initiate_start(xgelldev_t * lldev)1849a23fd118Syl xgell_initiate_start(xgelldev_t *lldev)
1850a23fd118Syl {
1851a23fd118Syl 	xge_hal_status_e status;
1852a23fd118Syl 	xge_hal_device_t *hldev = lldev->devh;
1853ba2e4443Sseb 	int maxpkt = hldev->config.mtu;
1854a23fd118Syl 
1855a23fd118Syl 	/* check initial mtu before enabling the device */
1856a23fd118Syl 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1857a23fd118Syl 	if (status != XGE_HAL_OK) {
1858a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1859a23fd118Syl 		    XGELL_IFNAME, lldev->instance, maxpkt);
1860a23fd118Syl 		return (EINVAL);
1861a23fd118Syl 	}
1862a23fd118Syl 
1863a23fd118Syl 	/* set initial mtu before enabling the device */
1864a23fd118Syl 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1865a23fd118Syl 	if (status != XGE_HAL_OK) {
1866a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1867a23fd118Syl 		    XGELL_IFNAME, lldev->instance, maxpkt);
1868a23fd118Syl 		return (EIO);
1869a23fd118Syl 	}
1870a23fd118Syl 
18718347601bSyl 	/* tune jumbo/normal frame UFC counters */
1872da14cebeSEric Cheng 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_b =
1873da14cebeSEric Cheng 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
18747eced415Sxw 	    XGE_HAL_DEFAULT_RX_UFC_B_J :
18757eced415Sxw 	    XGE_HAL_DEFAULT_RX_UFC_B_N;
18768347601bSyl 
1877da14cebeSEric Cheng 	hldev->config.ring.queue[XGELL_RX_RING_MAIN].rti.ufc_c =
1878da14cebeSEric Cheng 	    (maxpkt > XGE_HAL_DEFAULT_MTU) ?
18797eced415Sxw 	    XGE_HAL_DEFAULT_RX_UFC_C_J :
18807eced415Sxw 	    XGE_HAL_DEFAULT_RX_UFC_C_N;
18818347601bSyl 
1882a23fd118Syl 	/* now, enable the device */
1883a23fd118Syl 	status = xge_hal_device_enable(lldev->devh);
1884a23fd118Syl 	if (status != XGE_HAL_OK) {
1885a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1886a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1887a23fd118Syl 		return (EIO);
1888a23fd118Syl 	}
1889a23fd118Syl 
1890a23fd118Syl 	if (!xgell_rx_open(lldev)) {
1891a23fd118Syl 		status = xge_hal_device_disable(lldev->devh);
1892a23fd118Syl 		if (status != XGE_HAL_OK) {
1893a23fd118Syl 			u64 adapter_status;
1894a23fd118Syl 			(void) xge_hal_device_status(lldev->devh,
1895a23fd118Syl 			    &adapter_status);
1896a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1897a23fd118Syl 			    "the device. adaper status 0x%"PRIx64
1898a23fd118Syl 			    " returned status %d",
1899a23fd118Syl 			    XGELL_IFNAME, lldev->instance,
1900a23fd118Syl 			    (uint64_t)adapter_status, status);
1901a23fd118Syl 		}
1902da14cebeSEric Cheng 		xgell_rx_close(lldev);
1903a23fd118Syl 		xge_os_mdelay(1500);
1904a23fd118Syl 		return (ENOMEM);
1905a23fd118Syl 	}
1906a23fd118Syl 
1907a23fd118Syl 	if (!xgell_tx_open(lldev)) {
1908a23fd118Syl 		status = xge_hal_device_disable(lldev->devh);
1909a23fd118Syl 		if (status != XGE_HAL_OK) {
1910a23fd118Syl 			u64 adapter_status;
1911a23fd118Syl 			(void) xge_hal_device_status(lldev->devh,
1912a23fd118Syl 			    &adapter_status);
1913a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1914a23fd118Syl 			    "the device. adaper status 0x%"PRIx64
1915a23fd118Syl 			    " returned status %d",
1916a23fd118Syl 			    XGELL_IFNAME, lldev->instance,
1917a23fd118Syl 			    (uint64_t)adapter_status, status);
1918a23fd118Syl 		}
1919da14cebeSEric Cheng 		xgell_tx_close(lldev);
19207eced415Sxw 		xgell_rx_close(lldev);
1921da14cebeSEric Cheng 		xge_os_mdelay(1500);
1922a23fd118Syl 		return (ENOMEM);
1923a23fd118Syl 	}
1924a23fd118Syl 
1925a23fd118Syl 	/* time to enable interrupts */
19267eced415Sxw 	(void) xge_enable_intrs(lldev);
1927a23fd118Syl 	xge_hal_device_intr_enable(lldev->devh);
1928a23fd118Syl 
1929a23fd118Syl 	lldev->is_initialized = 1;
1930a23fd118Syl 
1931a23fd118Syl 	return (0);
1932a23fd118Syl }
1933a23fd118Syl 
1934a23fd118Syl static void
xgell_initiate_stop(xgelldev_t * lldev)1935a23fd118Syl xgell_initiate_stop(xgelldev_t *lldev)
1936a23fd118Syl {
1937a23fd118Syl 	xge_hal_status_e status;
1938a23fd118Syl 
1939a23fd118Syl 	lldev->is_initialized = 0;
1940a23fd118Syl 
1941a23fd118Syl 	status = xge_hal_device_disable(lldev->devh);
1942a23fd118Syl 	if (status != XGE_HAL_OK) {
1943a23fd118Syl 		u64 adapter_status;
1944a23fd118Syl 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1945a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1946a23fd118Syl 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1947a23fd118Syl 		    XGELL_IFNAME, lldev->instance,
1948a23fd118Syl 		    (uint64_t)adapter_status, status);
1949a23fd118Syl 	}
1950a23fd118Syl 	xge_hal_device_intr_disable(lldev->devh);
19517eced415Sxw 	/* disable OS ISR's */
19527eced415Sxw 	xge_disable_intrs(lldev);
1953a23fd118Syl 
1954a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s",
1955a23fd118Syl 	    "waiting for device irq to become quiescent...");
1956a23fd118Syl 	xge_os_mdelay(1500);
1957a23fd118Syl 
1958a23fd118Syl 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1959a23fd118Syl 
19607eced415Sxw 	xgell_rx_close(lldev);
19617eced415Sxw 	xgell_tx_close(lldev);
1962a23fd118Syl }
1963a23fd118Syl 
1964a23fd118Syl /*
1965a23fd118Syl  * xgell_m_start
1966a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1967a23fd118Syl  *
1968a23fd118Syl  * This function is called by MAC Layer to enable the XFRAME
1969a23fd118Syl  * firmware to generate interrupts and also prepare the
1970a23fd118Syl  * driver to call mac_rx for delivering receive packets
1971a23fd118Syl  * to MAC Layer.
1972a23fd118Syl  */
1973a23fd118Syl static int
xgell_m_start(void * arg)1974a23fd118Syl xgell_m_start(void *arg)
1975a23fd118Syl {
19768347601bSyl 	xgelldev_t *lldev = arg;
19778347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1978a23fd118Syl 	int ret;
1979a23fd118Syl 
1980a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1981a23fd118Syl 	    lldev->instance);
1982a23fd118Syl 
1983a23fd118Syl 	mutex_enter(&lldev->genlock);
1984a23fd118Syl 
1985a23fd118Syl 	if (lldev->is_initialized) {
1986a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1987a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1988a23fd118Syl 		mutex_exit(&lldev->genlock);
1989a23fd118Syl 		return (EINVAL);
1990a23fd118Syl 	}
1991a23fd118Syl 
1992a23fd118Syl 	hldev->terminating = 0;
1993a23fd118Syl 	if (ret = xgell_initiate_start(lldev)) {
1994a23fd118Syl 		mutex_exit(&lldev->genlock);
1995a23fd118Syl 		return (ret);
1996a23fd118Syl 	}
1997a23fd118Syl 
1998a23fd118Syl 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1999a23fd118Syl 
2000a23fd118Syl 	mutex_exit(&lldev->genlock);
2001a23fd118Syl 
2002a23fd118Syl 	return (0);
2003a23fd118Syl }
2004a23fd118Syl 
2005a23fd118Syl /*
2006a23fd118Syl  * xgell_m_stop
2007a23fd118Syl  * @arg: pointer to device private data (hldev)
2008a23fd118Syl  *
2009a23fd118Syl  * This function is called by the MAC Layer to disable
2010a23fd118Syl  * the XFRAME firmware for generating any interrupts and
2011a23fd118Syl  * also stop the driver from calling mac_rx() for
2012a23fd118Syl  * delivering data packets to the MAC Layer.
2013a23fd118Syl  */
2014a23fd118Syl static void
xgell_m_stop(void * arg)2015a23fd118Syl xgell_m_stop(void *arg)
2016a23fd118Syl {
20178347601bSyl 	xgelldev_t *lldev = arg;
20188347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
2019a23fd118Syl 
2020a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
2021a23fd118Syl 
2022a23fd118Syl 	mutex_enter(&lldev->genlock);
2023a23fd118Syl 	if (!lldev->is_initialized) {
2024a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
2025a23fd118Syl 		mutex_exit(&lldev->genlock);
2026a23fd118Syl 		return;
2027a23fd118Syl 	}
2028a23fd118Syl 
2029a23fd118Syl 	xge_hal_device_terminating(hldev);
2030a23fd118Syl 	xgell_initiate_stop(lldev);
2031a23fd118Syl 
2032a23fd118Syl 	/* reset device */
2033a23fd118Syl 	(void) xge_hal_device_reset(lldev->devh);
2034a23fd118Syl 
2035a23fd118Syl 	mutex_exit(&lldev->genlock);
2036a23fd118Syl 
20378347601bSyl 	if (lldev->timeout_id != 0) {
20388347601bSyl 		(void) untimeout(lldev->timeout_id);
20398347601bSyl 	}
2040a23fd118Syl 
2041a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
2042a23fd118Syl }
2043a23fd118Syl 
2044a23fd118Syl /*
2045a23fd118Syl  * xgell_onerr_reset
2046a23fd118Syl  * @lldev: pointer to xgelldev_t structure
2047a23fd118Syl  *
2048a23fd118Syl  * This function is called by HAL Event framework to reset the HW
2049a23fd118Syl  * This function is must be called with genlock taken.
2050a23fd118Syl  */
2051a23fd118Syl int
xgell_onerr_reset(xgelldev_t * lldev)2052a23fd118Syl xgell_onerr_reset(xgelldev_t *lldev)
2053a23fd118Syl {
2054a23fd118Syl 	int rc = 0;
2055a23fd118Syl 
2056a23fd118Syl 	if (!lldev->is_initialized) {
2057a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
2058a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
2059a23fd118Syl 		return (rc);
2060a23fd118Syl 	}
2061a23fd118Syl 
2062a23fd118Syl 	lldev->in_reset = 1;
2063a23fd118Syl 	xgell_initiate_stop(lldev);
2064a23fd118Syl 
2065a23fd118Syl 	/* reset device */
2066a23fd118Syl 	(void) xge_hal_device_reset(lldev->devh);
2067a23fd118Syl 
2068a23fd118Syl 	rc = xgell_initiate_start(lldev);
2069a23fd118Syl 	lldev->in_reset = 0;
2070a23fd118Syl 
2071a23fd118Syl 	return (rc);
2072a23fd118Syl }
2073a23fd118Syl 
2074a23fd118Syl /*
2075a23fd118Syl  * xgell_m_multicst
2076a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
2077a23fd118Syl  * @add:
2078a23fd118Syl  * @mc_addr:
2079a23fd118Syl  *
2080a23fd118Syl  * This function is called by MAC Layer to enable or
2081a23fd118Syl  * disable device-level reception of specific multicast addresses.
2082a23fd118Syl  */
2083a23fd118Syl static int
xgell_m_multicst(void * arg,boolean_t add,const uint8_t * mc_addr)2084a23fd118Syl xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
2085a23fd118Syl {
2086a23fd118Syl 	xge_hal_status_e status;
20878347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
20888347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
2089a23fd118Syl 
2090a23fd118Syl 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
2091a23fd118Syl 
2092a23fd118Syl 	mutex_enter(&lldev->genlock);
2093a23fd118Syl 
2094a23fd118Syl 	if (!lldev->is_initialized) {
2095a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
2096a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
2097a23fd118Syl 		mutex_exit(&lldev->genlock);
2098a23fd118Syl 		return (EIO);
2099a23fd118Syl 	}
2100a23fd118Syl 
2101a23fd118Syl 	/* FIXME: missing HAL functionality: enable_one() */
2102a23fd118Syl 
2103a23fd118Syl 	status = (add) ?
2104a23fd118Syl 	    xge_hal_device_mcast_enable(hldev) :
2105a23fd118Syl 	    xge_hal_device_mcast_disable(hldev);
2106a23fd118Syl 
2107a23fd118Syl 	if (status != XGE_HAL_OK) {
2108a23fd118Syl 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
2109a23fd118Syl 		    add ? "enable" : "disable", status);
2110a23fd118Syl 		mutex_exit(&lldev->genlock);
2111a23fd118Syl 		return (EIO);
2112a23fd118Syl 	}
2113a23fd118Syl 
2114a23fd118Syl 	mutex_exit(&lldev->genlock);
2115a23fd118Syl 
2116a23fd118Syl 	return (0);
2117a23fd118Syl }
2118a23fd118Syl 
2119a23fd118Syl 
2120a23fd118Syl /*
2121a23fd118Syl  * xgell_m_promisc
2122a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
2123a23fd118Syl  * @on:
2124a23fd118Syl  *
2125a23fd118Syl  * This function is called by MAC Layer to enable or
2126a23fd118Syl  * disable the reception of all the packets on the medium
2127a23fd118Syl  */
2128a23fd118Syl static int
xgell_m_promisc(void * arg,boolean_t on)2129a23fd118Syl xgell_m_promisc(void *arg, boolean_t on)
2130a23fd118Syl {
21318347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
21328347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
2133a23fd118Syl 
2134a23fd118Syl 	mutex_enter(&lldev->genlock);
2135a23fd118Syl 
2136a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
2137a23fd118Syl 
2138a23fd118Syl 	if (!lldev->is_initialized) {
2139a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
2140a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
2141a23fd118Syl 		mutex_exit(&lldev->genlock);
2142a23fd118Syl 		return (EIO);
2143a23fd118Syl 	}
2144a23fd118Syl 
2145a23fd118Syl 	if (on) {
2146a23fd118Syl 		xge_hal_device_promisc_enable(hldev);
2147a23fd118Syl 	} else {
2148a23fd118Syl 		xge_hal_device_promisc_disable(hldev);
2149a23fd118Syl 	}
2150a23fd118Syl 
2151a23fd118Syl 	mutex_exit(&lldev->genlock);
2152a23fd118Syl 
2153a23fd118Syl 	return (0);
2154a23fd118Syl }
2155a23fd118Syl 
2156a23fd118Syl /*
2157ba2e4443Sseb  * xgell_m_stat
2158a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
2159a23fd118Syl  *
2160ba2e4443Sseb  * This function is called by MAC Layer to get network statistics
2161a23fd118Syl  * from the driver.
2162a23fd118Syl  */
2163ba2e4443Sseb static int
xgell_m_stat(void * arg,uint_t stat,uint64_t * val)2164ba2e4443Sseb xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
2165a23fd118Syl {
2166a23fd118Syl 	xge_hal_stats_hw_info_t *hw_info;
21678347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
21688347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
2169a23fd118Syl 
2170a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
2171a23fd118Syl 
21727eced415Sxw 	mutex_enter(&lldev->genlock);
2173a23fd118Syl 
2174a23fd118Syl 	if (!lldev->is_initialized) {
2175a23fd118Syl 		mutex_exit(&lldev->genlock);
2176ba2e4443Sseb 		return (EAGAIN);
2177a23fd118Syl 	}
2178a23fd118Syl 
2179a23fd118Syl 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
2180a23fd118Syl 		mutex_exit(&lldev->genlock);
2181ba2e4443Sseb 		return (EAGAIN);
2182a23fd118Syl 	}
2183a23fd118Syl 
2184a23fd118Syl 	switch (stat) {
2185a23fd118Syl 	case MAC_STAT_IFSPEED:
2186ba2e4443Sseb 		*val = 10000000000ull; /* 10G */
2187a23fd118Syl 		break;
2188a23fd118Syl 
2189a23fd118Syl 	case MAC_STAT_MULTIRCV:
21908347601bSyl 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
21918347601bSyl 		    hw_info->rmac_vld_mcst_frms;
2192a23fd118Syl 		break;
2193a23fd118Syl 
2194a23fd118Syl 	case MAC_STAT_BRDCSTRCV:
21958347601bSyl 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
21968347601bSyl 		    hw_info->rmac_vld_bcst_frms;
2197a23fd118Syl 		break;
2198a23fd118Syl 
2199a23fd118Syl 	case MAC_STAT_MULTIXMT:
22008347601bSyl 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
22018347601bSyl 		    hw_info->tmac_mcst_frms;
2202a23fd118Syl 		break;
2203a23fd118Syl 
2204a23fd118Syl 	case MAC_STAT_BRDCSTXMT:
22058347601bSyl 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
22068347601bSyl 		    hw_info->tmac_bcst_frms;
2207a23fd118Syl 		break;
2208a23fd118Syl 
2209a23fd118Syl 	case MAC_STAT_RBYTES:
22108347601bSyl 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
22118347601bSyl 		    hw_info->rmac_ttl_octets;
2212a23fd118Syl 		break;
2213a23fd118Syl 
2214a23fd118Syl 	case MAC_STAT_NORCVBUF:
2215ba2e4443Sseb 		*val = hw_info->rmac_drop_frms;
2216a23fd118Syl 		break;
2217a23fd118Syl 
2218a23fd118Syl 	case MAC_STAT_IERRORS:
22198347601bSyl 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
22208347601bSyl 		    hw_info->rmac_discarded_frms;
2221a23fd118Syl 		break;
2222a23fd118Syl 
2223a23fd118Syl 	case MAC_STAT_OBYTES:
22248347601bSyl 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
22258347601bSyl 		    hw_info->tmac_ttl_octets;
2226a23fd118Syl 		break;
2227a23fd118Syl 
2228a23fd118Syl 	case MAC_STAT_NOXMTBUF:
2229ba2e4443Sseb 		*val = hw_info->tmac_drop_frms;
2230a23fd118Syl 		break;
2231a23fd118Syl 
2232a23fd118Syl 	case MAC_STAT_OERRORS:
22338347601bSyl 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
22348347601bSyl 		    hw_info->tmac_any_err_frms;
2235a23fd118Syl 		break;
2236a23fd118Syl 
2237a23fd118Syl 	case MAC_STAT_IPACKETS:
22388347601bSyl 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
22398347601bSyl 		    hw_info->rmac_vld_frms;
2240a23fd118Syl 		break;
2241a23fd118Syl 
2242a23fd118Syl 	case MAC_STAT_OPACKETS:
22438347601bSyl 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
22448347601bSyl 		    hw_info->tmac_frms;
2245ba2e4443Sseb 		break;
2246ba2e4443Sseb 
2247ba2e4443Sseb 	case ETHER_STAT_FCS_ERRORS:
2248ba2e4443Sseb 		*val = hw_info->rmac_fcs_err_frms;
2249a23fd118Syl 		break;
2250a23fd118Syl 
2251ba2e4443Sseb 	case ETHER_STAT_TOOLONG_ERRORS:
2252ba2e4443Sseb 		*val = hw_info->rmac_long_frms;
2253a23fd118Syl 		break;
2254a23fd118Syl 
2255ba2e4443Sseb 	case ETHER_STAT_LINK_DUPLEX:
2256ba2e4443Sseb 		*val = LINK_DUPLEX_FULL;
2257a23fd118Syl 		break;
2258a23fd118Syl 
2259a23fd118Syl 	default:
2260ba2e4443Sseb 		mutex_exit(&lldev->genlock);
2261ba2e4443Sseb 		return (ENOTSUP);
2262a23fd118Syl 	}
2263a23fd118Syl 
2264a23fd118Syl 	mutex_exit(&lldev->genlock);
2265a23fd118Syl 
2266ba2e4443Sseb 	return (0);
2267a23fd118Syl }
2268a23fd118Syl 
22690dc2366fSVenugopal Iyer /*
22700dc2366fSVenugopal Iyer  * Retrieve a value for one of the statistics for a particular rx ring
22710dc2366fSVenugopal Iyer  */
22720dc2366fSVenugopal Iyer int
xgell_rx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)22730dc2366fSVenugopal Iyer xgell_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
22740dc2366fSVenugopal Iyer {
22750dc2366fSVenugopal Iyer 	xgell_rx_ring_t	*rx_ring = (xgell_rx_ring_t *)rh;
22760dc2366fSVenugopal Iyer 
22770dc2366fSVenugopal Iyer 	switch (stat) {
22780dc2366fSVenugopal Iyer 	case MAC_STAT_RBYTES:
22790dc2366fSVenugopal Iyer 		*val = rx_ring->rx_bytes;
22800dc2366fSVenugopal Iyer 		break;
22810dc2366fSVenugopal Iyer 
22820dc2366fSVenugopal Iyer 	case MAC_STAT_IPACKETS:
22830dc2366fSVenugopal Iyer 		*val = rx_ring->rx_pkts;
22840dc2366fSVenugopal Iyer 		break;
22850dc2366fSVenugopal Iyer 
22860dc2366fSVenugopal Iyer 	default:
22870dc2366fSVenugopal Iyer 		*val = 0;
22880dc2366fSVenugopal Iyer 		return (ENOTSUP);
22890dc2366fSVenugopal Iyer 	}
22900dc2366fSVenugopal Iyer 
22910dc2366fSVenugopal Iyer 	return (0);
22920dc2366fSVenugopal Iyer }
22930dc2366fSVenugopal Iyer 
22940dc2366fSVenugopal Iyer /*
22950dc2366fSVenugopal Iyer  * Retrieve a value for one of the statistics for a particular tx ring
22960dc2366fSVenugopal Iyer  */
22970dc2366fSVenugopal Iyer int
xgell_tx_ring_stat(mac_ring_driver_t rh,uint_t stat,uint64_t * val)22980dc2366fSVenugopal Iyer xgell_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
22990dc2366fSVenugopal Iyer {
23000dc2366fSVenugopal Iyer 	xgell_tx_ring_t	*tx_ring = (xgell_tx_ring_t *)rh;
23010dc2366fSVenugopal Iyer 
23020dc2366fSVenugopal Iyer 	switch (stat) {
23030dc2366fSVenugopal Iyer 	case MAC_STAT_OBYTES:
23040dc2366fSVenugopal Iyer 		*val = tx_ring->tx_bytes;
23050dc2366fSVenugopal Iyer 		break;
23060dc2366fSVenugopal Iyer 
23070dc2366fSVenugopal Iyer 	case MAC_STAT_OPACKETS:
23080dc2366fSVenugopal Iyer 		*val = tx_ring->tx_pkts;
23090dc2366fSVenugopal Iyer 		break;
23100dc2366fSVenugopal Iyer 
23110dc2366fSVenugopal Iyer 	default:
23120dc2366fSVenugopal Iyer 		*val = 0;
23130dc2366fSVenugopal Iyer 		return (ENOTSUP);
23140dc2366fSVenugopal Iyer 	}
23150dc2366fSVenugopal Iyer 
23160dc2366fSVenugopal Iyer 	return (0);
23170dc2366fSVenugopal Iyer }
23180dc2366fSVenugopal Iyer 
2319a23fd118Syl /*
2320a23fd118Syl  * xgell_device_alloc - Allocate new LL device
2321a23fd118Syl  */
2322a23fd118Syl int
xgell_device_alloc(xge_hal_device_h devh,dev_info_t * dev_info,xgelldev_t ** lldev_out)2323a23fd118Syl xgell_device_alloc(xge_hal_device_h devh,
2324a23fd118Syl     dev_info_t *dev_info, xgelldev_t **lldev_out)
2325a23fd118Syl {
2326a23fd118Syl 	xgelldev_t *lldev;
2327a23fd118Syl 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2328a23fd118Syl 	int instance = ddi_get_instance(dev_info);
2329a23fd118Syl 
2330a23fd118Syl 	*lldev_out = NULL;
2331a23fd118Syl 
2332a23fd118Syl 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
2333a23fd118Syl 	    XGELL_IFNAME, instance);
2334a23fd118Syl 
2335a23fd118Syl 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
2336a23fd118Syl 
2337a23fd118Syl 	lldev->devh = hldev;
2338a23fd118Syl 	lldev->instance = instance;
2339a23fd118Syl 	lldev->dev_info = dev_info;
2340a23fd118Syl 
2341a23fd118Syl 	*lldev_out = lldev;
2342a23fd118Syl 
2343a23fd118Syl 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
2344a23fd118Syl 
2345a23fd118Syl 	return (DDI_SUCCESS);
2346a23fd118Syl }
2347a23fd118Syl 
2348a23fd118Syl /*
2349a23fd118Syl  * xgell_device_free
2350a23fd118Syl  */
2351a23fd118Syl void
xgell_device_free(xgelldev_t * lldev)2352a23fd118Syl xgell_device_free(xgelldev_t *lldev)
2353a23fd118Syl {
2354a23fd118Syl 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
2355a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
2356a23fd118Syl 
2357a23fd118Syl 	kmem_free(lldev, sizeof (xgelldev_t));
2358a23fd118Syl }
2359a23fd118Syl 
2360a23fd118Syl /*
2361a23fd118Syl  * xgell_ioctl
2362a23fd118Syl  */
2363a23fd118Syl static void
xgell_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)2364a23fd118Syl xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2365a23fd118Syl {
23668347601bSyl 	xgelldev_t *lldev = arg;
2367a23fd118Syl 	struct iocblk *iocp;
2368a23fd118Syl 	int err = 0;
2369a23fd118Syl 	int cmd;
2370a23fd118Syl 	int need_privilege = 1;
2371a23fd118Syl 	int ret = 0;
2372a23fd118Syl 
2373a23fd118Syl 
2374a23fd118Syl 	iocp = (struct iocblk *)mp->b_rptr;
2375a23fd118Syl 	iocp->ioc_error = 0;
2376a23fd118Syl 	cmd = iocp->ioc_cmd;
2377a23fd118Syl 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
2378a23fd118Syl 	switch (cmd) {
2379a23fd118Syl 	case ND_GET:
2380a23fd118Syl 		need_privilege = 0;
2381a23fd118Syl 		/* FALLTHRU */
2382a23fd118Syl 	case ND_SET:
2383a23fd118Syl 		break;
2384a23fd118Syl 	default:
2385a23fd118Syl 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
2386a23fd118Syl 		miocnak(wq, mp, 0, EINVAL);
2387a23fd118Syl 		return;
2388a23fd118Syl 	}
2389a23fd118Syl 
2390a23fd118Syl 	if (need_privilege) {
2391a23fd118Syl 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2392a23fd118Syl 		if (err != 0) {
2393a23fd118Syl 			xge_debug_ll(XGE_ERR,
2394a23fd118Syl 			    "drv_priv(): rejected cmd 0x%x, err %d",
2395a23fd118Syl 			    cmd, err);
2396a23fd118Syl 			miocnak(wq, mp, 0, err);
2397a23fd118Syl 			return;
2398a23fd118Syl 		}
2399a23fd118Syl 	}
2400a23fd118Syl 
2401a23fd118Syl 	switch (cmd) {
2402a23fd118Syl 	case ND_GET:
2403a23fd118Syl 		/*
2404a23fd118Syl 		 * If nd_getset() returns B_FALSE, the command was
2405a23fd118Syl 		 * not valid (e.g. unknown name), so we just tell the
2406a23fd118Syl 		 * top-level ioctl code to send a NAK (with code EINVAL).
2407a23fd118Syl 		 *
2408a23fd118Syl 		 * Otherwise, nd_getset() will have built the reply to
2409a23fd118Syl 		 * be sent (but not actually sent it), so we tell the
2410a23fd118Syl 		 * caller to send the prepared reply.
2411a23fd118Syl 		 */
2412a23fd118Syl 		ret = nd_getset(wq, lldev->ndp, mp);
24137eced415Sxw 		xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2414a23fd118Syl 		break;
2415a23fd118Syl 
2416a23fd118Syl 	case ND_SET:
2417a23fd118Syl 		ret = nd_getset(wq, lldev->ndp, mp);
24187eced415Sxw 		xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2419a23fd118Syl 		break;
2420a23fd118Syl 
2421a23fd118Syl 	default:
2422a23fd118Syl 		break;
2423a23fd118Syl 	}
2424a23fd118Syl 
2425a23fd118Syl 	if (ret == B_FALSE) {
2426a23fd118Syl 		xge_debug_ll(XGE_ERR,
2427a23fd118Syl 		    "nd_getset(): rejected cmd 0x%x, err %d",
2428a23fd118Syl 		    cmd, err);
2429a23fd118Syl 		miocnak(wq, mp, 0, EINVAL);
2430a23fd118Syl 	} else {
2431a23fd118Syl 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2432a23fd118Syl 		    M_IOCACK : M_IOCNAK;
2433a23fd118Syl 		qreply(wq, mp);
2434a23fd118Syl 	}
2435a23fd118Syl }
2436a23fd118Syl 
2437da14cebeSEric Cheng 
2438ba2e4443Sseb static boolean_t
xgell_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)2439ba2e4443Sseb xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2440a23fd118Syl {
24418347601bSyl 	xgelldev_t *lldev = arg;
24428347601bSyl 
2443da14cebeSEric Cheng 	xge_debug_ll(XGE_TRACE, "xgell_m_getcapab: %x", cap);
2444da14cebeSEric Cheng 
2445ba2e4443Sseb 	switch (cap) {
2446ba2e4443Sseb 	case MAC_CAPAB_HCKSUM: {
2447ba2e4443Sseb 		uint32_t *hcksum_txflags = cap_data;
2448ba2e4443Sseb 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2449ba2e4443Sseb 		    HCKSUM_IPHDRCKSUM;
2450ba2e4443Sseb 		break;
2451ba2e4443Sseb 	}
24528347601bSyl 	case MAC_CAPAB_LSO: {
24538347601bSyl 		mac_capab_lso_t *cap_lso = cap_data;
24548347601bSyl 
24558347601bSyl 		if (lldev->config.lso_enable) {
24568347601bSyl 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
24578347601bSyl 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
24588347601bSyl 			break;
24598347601bSyl 		} else {
24608347601bSyl 			return (B_FALSE);
24618347601bSyl 		}
24628347601bSyl 	}
2463da14cebeSEric Cheng 	case MAC_CAPAB_RINGS: {
2464da14cebeSEric Cheng 		mac_capab_rings_t *cap_rings = cap_data;
2465da14cebeSEric Cheng 
2466da14cebeSEric Cheng 		switch (cap_rings->mr_type) {
2467da14cebeSEric Cheng 		case MAC_RING_TYPE_RX:
2468da14cebeSEric Cheng 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2469da14cebeSEric Cheng 			cap_rings->mr_rnum = lldev->init_rx_rings;
2470da14cebeSEric Cheng 			cap_rings->mr_gnum = lldev->init_rx_groups;
2471da14cebeSEric Cheng 			cap_rings->mr_rget = xgell_fill_ring;
2472da14cebeSEric Cheng 			cap_rings->mr_gget = xgell_fill_group;
2473da14cebeSEric Cheng 			break;
2474da14cebeSEric Cheng 		case MAC_RING_TYPE_TX:
2475da14cebeSEric Cheng 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2476da14cebeSEric Cheng 			cap_rings->mr_rnum = lldev->init_tx_rings;
2477da14cebeSEric Cheng 			cap_rings->mr_gnum = 0;
2478da14cebeSEric Cheng 			cap_rings->mr_rget = xgell_fill_ring;
2479da14cebeSEric Cheng 			cap_rings->mr_gget = NULL;
2480da14cebeSEric Cheng 			break;
2481da14cebeSEric Cheng 		default:
2482da14cebeSEric Cheng 			break;
2483da14cebeSEric Cheng 		}
2484da14cebeSEric Cheng 		break;
2485da14cebeSEric Cheng 	}
2486ba2e4443Sseb 	default:
2487ba2e4443Sseb 		return (B_FALSE);
2488ba2e4443Sseb 	}
2489ba2e4443Sseb 	return (B_TRUE);
2490a23fd118Syl }
2491a23fd118Syl 
2492a23fd118Syl static int
xgell_stats_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2493a23fd118Syl xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2494a23fd118Syl {
2495a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2496a23fd118Syl 	xge_hal_status_e status;
2497a23fd118Syl 	int count = 0, retsize;
2498a23fd118Syl 	char *buf;
2499a23fd118Syl 
2500a23fd118Syl 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2501a23fd118Syl 	if (buf == NULL) {
2502a23fd118Syl 		return (ENOSPC);
2503a23fd118Syl 	}
2504a23fd118Syl 
2505a23fd118Syl 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2506a23fd118Syl 	    buf, &retsize);
2507a23fd118Syl 	if (status != XGE_HAL_OK) {
2508a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2509a23fd118Syl 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2510a23fd118Syl 		return (EINVAL);
2511a23fd118Syl 	}
2512a23fd118Syl 	count += retsize;
2513a23fd118Syl 
2514a23fd118Syl 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2515a23fd118Syl 	    XGELL_STATS_BUFSIZE - count,
2516a23fd118Syl 	    buf+count, &retsize);
2517a23fd118Syl 	if (status != XGE_HAL_OK) {
2518a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2519a23fd118Syl 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2520a23fd118Syl 		return (EINVAL);
2521a23fd118Syl 	}
2522a23fd118Syl 	count += retsize;
2523a23fd118Syl 
2524a23fd118Syl 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2525a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2526a23fd118Syl 	if (status != XGE_HAL_OK) {
2527a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2528a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2529a23fd118Syl 		return (EINVAL);
2530a23fd118Syl 	}
2531a23fd118Syl 	count += retsize;
2532a23fd118Syl 
2533a23fd118Syl 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2534a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2535a23fd118Syl 	if (status != XGE_HAL_OK) {
2536a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2537a23fd118Syl 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2538a23fd118Syl 		return (EINVAL);
2539a23fd118Syl 	}
2540a23fd118Syl 	count += retsize;
2541a23fd118Syl 
2542a23fd118Syl 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2543a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2544a23fd118Syl 	if (status != XGE_HAL_OK) {
2545a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2546a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2547a23fd118Syl 		return (EINVAL);
2548a23fd118Syl 	}
2549a23fd118Syl 	count += retsize;
2550a23fd118Syl 
2551a23fd118Syl 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2552a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2553a23fd118Syl 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2554a23fd118Syl 
2555a23fd118Syl 	return (0);
2556a23fd118Syl }
2557a23fd118Syl 
2558a23fd118Syl static int
xgell_pciconf_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2559a23fd118Syl xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2560a23fd118Syl {
2561a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2562a23fd118Syl 	xge_hal_status_e status;
2563a23fd118Syl 	int retsize;
2564a23fd118Syl 	char *buf;
2565a23fd118Syl 
2566a23fd118Syl 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2567a23fd118Syl 	if (buf == NULL) {
2568a23fd118Syl 		return (ENOSPC);
2569a23fd118Syl 	}
2570a23fd118Syl 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2571a23fd118Syl 	    buf, &retsize);
2572a23fd118Syl 	if (status != XGE_HAL_OK) {
2573a23fd118Syl 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2574a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2575a23fd118Syl 		return (EINVAL);
2576a23fd118Syl 	}
2577a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2578a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2579a23fd118Syl 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2580a23fd118Syl 
2581a23fd118Syl 	return (0);
2582a23fd118Syl }
2583a23fd118Syl 
2584a23fd118Syl static int
xgell_about_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2585a23fd118Syl xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2586a23fd118Syl {
2587a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2588a23fd118Syl 	xge_hal_status_e status;
2589a23fd118Syl 	int retsize;
2590a23fd118Syl 	char *buf;
2591a23fd118Syl 
2592a23fd118Syl 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2593a23fd118Syl 	if (buf == NULL) {
2594a23fd118Syl 		return (ENOSPC);
2595a23fd118Syl 	}
2596a23fd118Syl 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2597a23fd118Syl 	    buf, &retsize);
2598a23fd118Syl 	if (status != XGE_HAL_OK) {
2599a23fd118Syl 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2600a23fd118Syl 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2601a23fd118Syl 		return (EINVAL);
2602a23fd118Syl 	}
2603a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2604a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2605a23fd118Syl 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2606a23fd118Syl 
2607a23fd118Syl 	return (0);
2608a23fd118Syl }
2609a23fd118Syl 
2610a23fd118Syl static unsigned long bar0_offset = 0x110; /* adapter_control */
2611a23fd118Syl 
2612a23fd118Syl static int
xgell_bar0_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2613a23fd118Syl xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2614a23fd118Syl {
2615a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2616a23fd118Syl 	xge_hal_status_e status;
2617a23fd118Syl 	int retsize;
2618a23fd118Syl 	char *buf;
2619a23fd118Syl 
2620a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2621a23fd118Syl 	if (buf == NULL) {
2622a23fd118Syl 		return (ENOSPC);
2623a23fd118Syl 	}
2624a23fd118Syl 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2625a23fd118Syl 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2626a23fd118Syl 	if (status != XGE_HAL_OK) {
2627a23fd118Syl 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2628a23fd118Syl 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2629a23fd118Syl 		return (EINVAL);
2630a23fd118Syl 	}
2631a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2632a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2633a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2634a23fd118Syl 
2635a23fd118Syl 	return (0);
2636a23fd118Syl }
2637a23fd118Syl 
2638a23fd118Syl static int
xgell_bar0_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2639a23fd118Syl xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2640a23fd118Syl {
2641a23fd118Syl 	unsigned long old_offset = bar0_offset;
2642a23fd118Syl 	char *end;
2643a23fd118Syl 
2644a23fd118Syl 	if (value && *value == '0' &&
2645a23fd118Syl 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2646a23fd118Syl 		value += 2;
2647a23fd118Syl 	}
2648a23fd118Syl 
2649a23fd118Syl 	bar0_offset = mi_strtol(value, &end, 16);
2650a23fd118Syl 	if (end == value) {
2651a23fd118Syl 		bar0_offset = old_offset;
2652a23fd118Syl 		return (EINVAL);
2653a23fd118Syl 	}
2654a23fd118Syl 
2655a23fd118Syl 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2656a23fd118Syl 
2657a23fd118Syl 	return (0);
2658a23fd118Syl }
2659a23fd118Syl 
2660a23fd118Syl static int
xgell_debug_level_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2661a23fd118Syl xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2662a23fd118Syl {
2663a23fd118Syl 	char *buf;
2664a23fd118Syl 
2665a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2666a23fd118Syl 	if (buf == NULL) {
2667a23fd118Syl 		return (ENOSPC);
2668a23fd118Syl 	}
2669a23fd118Syl 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2670a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2671a23fd118Syl 
2672a23fd118Syl 	return (0);
2673a23fd118Syl }
2674a23fd118Syl 
2675a23fd118Syl static int
xgell_debug_level_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2676a23fd118Syl xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2677a23fd118Syl     cred_t *credp)
2678a23fd118Syl {
2679a23fd118Syl 	int level;
2680a23fd118Syl 	char *end;
2681a23fd118Syl 
2682a23fd118Syl 	level = mi_strtol(value, &end, 10);
2683a23fd118Syl 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2684a23fd118Syl 		return (EINVAL);
2685a23fd118Syl 	}
2686a23fd118Syl 
2687a23fd118Syl 	xge_hal_driver_debug_level_set(level);
2688a23fd118Syl 
2689a23fd118Syl 	return (0);
2690a23fd118Syl }
2691a23fd118Syl 
2692a23fd118Syl static int
xgell_debug_module_mask_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2693a23fd118Syl xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2694a23fd118Syl {
2695a23fd118Syl 	char *buf;
2696a23fd118Syl 
2697a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2698a23fd118Syl 	if (buf == NULL) {
2699a23fd118Syl 		return (ENOSPC);
2700a23fd118Syl 	}
2701a23fd118Syl 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2702a23fd118Syl 	    xge_hal_driver_debug_module_mask());
2703a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2704a23fd118Syl 
2705a23fd118Syl 	return (0);
2706a23fd118Syl }
2707a23fd118Syl 
2708a23fd118Syl static int
xgell_debug_module_mask_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * credp)2709a23fd118Syl xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2710a23fd118Syl 			    cred_t *credp)
2711a23fd118Syl {
2712a23fd118Syl 	u32 mask;
2713a23fd118Syl 	char *end;
2714a23fd118Syl 
2715a23fd118Syl 	if (value && *value == '0' &&
2716a23fd118Syl 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2717a23fd118Syl 		value += 2;
2718a23fd118Syl 	}
2719a23fd118Syl 
2720a23fd118Syl 	mask = mi_strtol(value, &end, 16);
2721a23fd118Syl 	if (end == value) {
2722a23fd118Syl 		return (EINVAL);
2723a23fd118Syl 	}
2724a23fd118Syl 
2725a23fd118Syl 	xge_hal_driver_debug_module_mask_set(mask);
2726a23fd118Syl 
2727a23fd118Syl 	return (0);
2728a23fd118Syl }
2729a23fd118Syl 
2730a23fd118Syl static int
xgell_devconfig_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * credp)2731a23fd118Syl xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2732a23fd118Syl {
2733a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2734a23fd118Syl 	xge_hal_status_e status;
2735a23fd118Syl 	int retsize;
2736a23fd118Syl 	char *buf;
2737a23fd118Syl 
2738a23fd118Syl 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2739a23fd118Syl 	if (buf == NULL) {
2740a23fd118Syl 		return (ENOSPC);
2741a23fd118Syl 	}
2742a23fd118Syl 	status = xge_hal_aux_device_config_read(lldev->devh,
2743da14cebeSEric Cheng 	    XGELL_DEVCONF_BUFSIZE, buf, &retsize);
2744a23fd118Syl 	if (status != XGE_HAL_OK) {
2745a23fd118Syl 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2746a23fd118Syl 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2747a23fd118Syl 		    status);
2748a23fd118Syl 		return (EINVAL);
2749a23fd118Syl 	}
2750a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2751a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2752a23fd118Syl 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2753a23fd118Syl 
2754a23fd118Syl 	return (0);
2755a23fd118Syl }
2756a23fd118Syl 
2757a23fd118Syl /*
2758a23fd118Syl  * xgell_device_register
2759a23fd118Syl  * @devh: pointer on HAL device
2760a23fd118Syl  * @config: pointer on this network device configuration
2761a23fd118Syl  * @ll_out: output pointer. Will be assigned to valid LL device.
2762a23fd118Syl  *
2763a23fd118Syl  * This function will allocate and register network device
2764a23fd118Syl  */
2765a23fd118Syl int
xgell_device_register(xgelldev_t * lldev,xgell_config_t * config)2766a23fd118Syl xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2767a23fd118Syl {
27688347601bSyl 	mac_register_t *macp = NULL;
2769a23fd118Syl 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2770a23fd118Syl 
2771da14cebeSEric Cheng 	/*
2772da14cebeSEric Cheng 	 * Initialize some NDD interface for internal debug.
2773da14cebeSEric Cheng 	 */
2774a23fd118Syl 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2775ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2776ba2e4443Sseb 		goto xgell_ndd_fail;
2777a23fd118Syl 
2778a23fd118Syl 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2779ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2780ba2e4443Sseb 		goto xgell_ndd_fail;
2781a23fd118Syl 
2782a23fd118Syl 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2783ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2784ba2e4443Sseb 		goto xgell_ndd_fail;
2785a23fd118Syl 
2786a23fd118Syl 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2787ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2788ba2e4443Sseb 		goto xgell_ndd_fail;
2789a23fd118Syl 
2790a23fd118Syl 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2791ba2e4443Sseb 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2792ba2e4443Sseb 		goto xgell_ndd_fail;
2793a23fd118Syl 
2794a23fd118Syl 	if (nd_load(&lldev->ndp, "debug_module_mask",
2795a23fd118Syl 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2796ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2797ba2e4443Sseb 		goto xgell_ndd_fail;
2798a23fd118Syl 
2799a23fd118Syl 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2800ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2801ba2e4443Sseb 		goto xgell_ndd_fail;
2802a23fd118Syl 
2803a23fd118Syl 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2804a23fd118Syl 
28057eced415Sxw 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
28067eced415Sxw 	    DDI_INTR_PRI(hldev->irqh));
2807a23fd118Syl 
2808ba2e4443Sseb 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2809ba2e4443Sseb 		goto xgell_register_fail;
2810ba2e4443Sseb 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
28118347601bSyl 	macp->m_driver = lldev;
2812ba2e4443Sseb 	macp->m_dip = lldev->dev_info;
2813ba2e4443Sseb 	macp->m_src_addr = hldev->macaddr[0];
2814ba2e4443Sseb 	macp->m_callbacks = &xgell_m_callbacks;
2815ba2e4443Sseb 	macp->m_min_sdu = 0;
2816ba2e4443Sseb 	macp->m_max_sdu = hldev->config.mtu;
2817d62bc4baSyz 	macp->m_margin = VLAN_TAGSZ;
2818da14cebeSEric Cheng 	macp->m_v12n = MAC_VIRT_LEVEL1;
2819da14cebeSEric Cheng 
2820a23fd118Syl 	/*
2821da14cebeSEric Cheng 	 * MAC Registration.
2822a23fd118Syl 	 */
28238347601bSyl 	if (mac_register(macp, &lldev->mh) != 0)
2824ba2e4443Sseb 		goto xgell_register_fail;
2825a23fd118Syl 
28263c785c4cSyl 	/* Always free the macp after register */
28273c785c4cSyl 	if (macp != NULL)
28283c785c4cSyl 		mac_free(macp);
28293c785c4cSyl 
28308347601bSyl 	/* Calculate tx_copied_max here ??? */
28318347601bSyl 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
28327eced415Sxw 	    hldev->config.fifo.alignment_size *
28337eced415Sxw 	    hldev->config.fifo.max_aligned_frags;
28348347601bSyl 
2835a23fd118Syl 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2836a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
2837a23fd118Syl 
2838a23fd118Syl 	return (DDI_SUCCESS);
2839ba2e4443Sseb 
2840ba2e4443Sseb xgell_ndd_fail:
2841ba2e4443Sseb 	nd_free(&lldev->ndp);
2842ba2e4443Sseb 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2843ba2e4443Sseb 	return (DDI_FAILURE);
2844ba2e4443Sseb 
2845ba2e4443Sseb xgell_register_fail:
28468347601bSyl 	if (macp != NULL)
28478347601bSyl 		mac_free(macp);
2848ba2e4443Sseb 	nd_free(&lldev->ndp);
2849ba2e4443Sseb 	mutex_destroy(&lldev->genlock);
2850ba2e4443Sseb 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2851ba2e4443Sseb 	return (DDI_FAILURE);
2852a23fd118Syl }
2853a23fd118Syl 
2854a23fd118Syl /*
2855a23fd118Syl  * xgell_device_unregister
2856a23fd118Syl  * @devh: pointer on HAL device
2857a23fd118Syl  * @lldev: pointer to valid LL device.
2858a23fd118Syl  *
2859a23fd118Syl  * This function will unregister and free network device
2860a23fd118Syl  */
2861a23fd118Syl int
xgell_device_unregister(xgelldev_t * lldev)2862a23fd118Syl xgell_device_unregister(xgelldev_t *lldev)
2863a23fd118Syl {
2864ba2e4443Sseb 	if (mac_unregister(lldev->mh) != 0) {
2865a23fd118Syl 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2866a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
2867a23fd118Syl 		return (DDI_FAILURE);
2868a23fd118Syl 	}
2869a23fd118Syl 
2870a23fd118Syl 	mutex_destroy(&lldev->genlock);
2871a23fd118Syl 
2872a23fd118Syl 	nd_free(&lldev->ndp);
2873a23fd118Syl 
2874a23fd118Syl 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2875a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
2876a23fd118Syl 
2877a23fd118Syl 	return (DDI_SUCCESS);
2878a23fd118Syl }
2879