xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 8347601b)
1a23fd118Syl /*
2a23fd118Syl  * CDDL HEADER START
3a23fd118Syl  *
4a23fd118Syl  * The contents of this file are subject to the terms of the
5a23fd118Syl  * Common Development and Distribution License (the "License").
6a23fd118Syl  * You may not use this file except in compliance with the License.
7a23fd118Syl  *
8a23fd118Syl  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9a23fd118Syl  * or http://www.opensolaris.org/os/licensing.
10a23fd118Syl  * See the License for the specific language governing permissions
11a23fd118Syl  * and limitations under the License.
12a23fd118Syl  *
13a23fd118Syl  * When distributing Covered Code, include this CDDL HEADER in each
14a23fd118Syl  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15a23fd118Syl  * If applicable, add the following below this CDDL HEADER, with the
16a23fd118Syl  * fields enclosed by brackets "[]" replaced with your own identifying
17a23fd118Syl  * information: Portions Copyright [yyyy] [name of copyright owner]
18a23fd118Syl  *
19a23fd118Syl  * CDDL HEADER END
20a23fd118Syl  */
21a23fd118Syl 
22a23fd118Syl /*
23a23fd118Syl  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24a23fd118Syl  * Use is subject to license terms.
25a23fd118Syl  */
26a23fd118Syl 
27a23fd118Syl #pragma ident	"%Z%%M%	%I%	%E% SMI"
28a23fd118Syl 
29a23fd118Syl /*
30a23fd118Syl  *  Copyright (c) 2002-2005 Neterion, Inc.
31a23fd118Syl  *  All right Reserved.
32a23fd118Syl  *
33a23fd118Syl  *  FileName :    xgell.c
34a23fd118Syl  *
35a23fd118Syl  *  Description:  Xge Link Layer data path implementation
36a23fd118Syl  *
37a23fd118Syl  */
38a23fd118Syl 
39a23fd118Syl #include "xgell.h"
40a23fd118Syl 
41a23fd118Syl #include <netinet/ip.h>
42a23fd118Syl #include <netinet/tcp.h>
43*8347601bSyl #include <netinet/udp.h>
44a23fd118Syl 
45ba2e4443Sseb #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
46a23fd118Syl     sizeof (struct ether_vlan_header))
47a23fd118Syl 
48a23fd118Syl #define	HEADROOM		2	/* for DIX-only packets */
49a23fd118Syl 
50a23fd118Syl #ifdef XGELL_L3_ALIGNED
51a23fd118Syl void header_free_func(void *arg) { }
52a23fd118Syl frtn_t header_frtn = {header_free_func, NULL};
53a23fd118Syl #endif
54a23fd118Syl 
55a23fd118Syl /* DMA attributes used for Tx side */
56a23fd118Syl static struct ddi_dma_attr tx_dma_attr = {
57a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
58a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
59a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
60a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_count_max */
61a23fd118Syl 	0x1ULL,				/* dma_attr_align */
62a23fd118Syl 	0xFFF,				/* dma_attr_burstsizes */
63a23fd118Syl 	1,				/* dma_attr_minxfer */
64a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
65a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
66*8347601bSyl 	18,				/* dma_attr_sgllen */
67a23fd118Syl 	1,				/* dma_attr_granular */
68a23fd118Syl 	0				/* dma_attr_flags */
69a23fd118Syl };
70a23fd118Syl 
71a23fd118Syl /* Aligned DMA attributes used for Tx side */
72a23fd118Syl struct ddi_dma_attr tx_dma_attr_align = {
73a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
74a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
75a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
76a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_count_max */
77a23fd118Syl 	4096,				/* dma_attr_align */
78a23fd118Syl 	0xFFF,				/* dma_attr_burstsizes */
79a23fd118Syl 	1,				/* dma_attr_minxfer */
80a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
81a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
82a23fd118Syl 	4,				/* dma_attr_sgllen */
83a23fd118Syl 	1,				/* dma_attr_granular */
84a23fd118Syl 	0				/* dma_attr_flags */
85a23fd118Syl };
86a23fd118Syl 
87a23fd118Syl /*
88a23fd118Syl  * DMA attributes used when using ddi_dma_mem_alloc to
89a23fd118Syl  * allocat HAL descriptors and Rx buffers during replenish
90a23fd118Syl  */
91a23fd118Syl static struct ddi_dma_attr hal_dma_attr = {
92a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
93a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
94a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
95a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_count_max */
96a23fd118Syl 	0x1ULL,				/* dma_attr_align */
97a23fd118Syl 	0xFFF,				/* dma_attr_burstsizes */
98a23fd118Syl 	1,				/* dma_attr_minxfer */
99a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
100a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
101a23fd118Syl 	1,				/* dma_attr_sgllen */
102a23fd118Syl 	1,				/* dma_attr_granular */
103a23fd118Syl 	0				/* dma_attr_flags */
104a23fd118Syl };
105a23fd118Syl 
106a23fd118Syl /*
107a23fd118Syl  * Aligned DMA attributes used when using ddi_dma_mem_alloc to
108a23fd118Syl  * allocat HAL descriptors and Rx buffers during replenish
109a23fd118Syl  */
110a23fd118Syl struct ddi_dma_attr hal_dma_attr_aligned = {
111a23fd118Syl 	DMA_ATTR_V0,			/* dma_attr_version */
112a23fd118Syl 	0x0ULL,				/* dma_attr_addr_lo */
113a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
114a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_count_max */
115a23fd118Syl 	4096,				/* dma_attr_align */
116a23fd118Syl 	0xFFF,				/* dma_attr_burstsizes */
117a23fd118Syl 	1,				/* dma_attr_minxfer */
118a23fd118Syl 	0xFFFFFFFFULL,			/* dma_attr_maxxfer */
119a23fd118Syl 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
120a23fd118Syl 	1,				/* dma_attr_sgllen */
121a23fd118Syl 	1,				/* dma_attr_granular */
122a23fd118Syl 	0				/* dma_attr_flags */
123a23fd118Syl };
124a23fd118Syl 
125a23fd118Syl struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
126a23fd118Syl struct ddi_dma_attr *p_hal_dma_attr_aligned = &hal_dma_attr_aligned;
127a23fd118Syl 
128ba2e4443Sseb static int		xgell_m_stat(void *, uint_t, uint64_t *);
129ba2e4443Sseb static int		xgell_m_start(void *);
130ba2e4443Sseb static void		xgell_m_stop(void *);
131ba2e4443Sseb static int		xgell_m_promisc(void *, boolean_t);
132ba2e4443Sseb static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
133ba2e4443Sseb static int		xgell_m_unicst(void *, const uint8_t *);
134ba2e4443Sseb static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
135ba2e4443Sseb static mblk_t 		*xgell_m_tx(void *, mblk_t *);
136ba2e4443Sseb static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
137ba2e4443Sseb 
138ba2e4443Sseb #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
139ba2e4443Sseb 
140ba2e4443Sseb static mac_callbacks_t xgell_m_callbacks = {
141ba2e4443Sseb 	XGELL_M_CALLBACK_FLAGS,
142ba2e4443Sseb 	xgell_m_stat,
143ba2e4443Sseb 	xgell_m_start,
144ba2e4443Sseb 	xgell_m_stop,
145ba2e4443Sseb 	xgell_m_promisc,
146ba2e4443Sseb 	xgell_m_multicst,
147ba2e4443Sseb 	xgell_m_unicst,
148ba2e4443Sseb 	xgell_m_tx,
149ba2e4443Sseb 	NULL,
150ba2e4443Sseb 	xgell_m_ioctl,
151ba2e4443Sseb 	xgell_m_getcapab
152ba2e4443Sseb };
153ba2e4443Sseb 
154a23fd118Syl /*
155a23fd118Syl  * xge_device_poll
156a23fd118Syl  *
157a23fd118Syl  * Cyclic should call me every 1s. xge_callback_event_queued should call me
158a23fd118Syl  * when HAL hope event was rescheduled.
159a23fd118Syl  */
160a23fd118Syl /*ARGSUSED*/
161a23fd118Syl void
162a23fd118Syl xge_device_poll(void *data)
163a23fd118Syl {
164a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
165a23fd118Syl 
166a23fd118Syl 	mutex_enter(&lldev->genlock);
167a23fd118Syl 	if (lldev->is_initialized) {
168a23fd118Syl 		xge_hal_device_poll(data);
169a23fd118Syl 		lldev->timeout_id = timeout(xge_device_poll, data,
170a23fd118Syl 		    XGE_DEV_POLL_TICKS);
171*8347601bSyl 	} else if (lldev->in_reset == 1) {
172*8347601bSyl 		lldev->timeout_id = timeout(xge_device_poll, data,
173*8347601bSyl 		    XGE_DEV_POLL_TICKS);
174*8347601bSyl 	} else {
175*8347601bSyl 		lldev->timeout_id = 0;
176a23fd118Syl 	}
177a23fd118Syl 	mutex_exit(&lldev->genlock);
178a23fd118Syl }
179a23fd118Syl 
180a23fd118Syl /*
181a23fd118Syl  * xge_device_poll_now
182a23fd118Syl  *
183a23fd118Syl  * Will call xge_device_poll() immediately
184a23fd118Syl  */
185a23fd118Syl void
186a23fd118Syl xge_device_poll_now(void *data)
187a23fd118Syl {
188a23fd118Syl 	xgelldev_t *lldev = xge_hal_device_private(data);
189a23fd118Syl 
190a23fd118Syl 	mutex_enter(&lldev->genlock);
191*8347601bSyl 	if (lldev->is_initialized) {
192*8347601bSyl 		xge_hal_device_poll(data);
193*8347601bSyl 	}
194a23fd118Syl 	mutex_exit(&lldev->genlock);
195a23fd118Syl }
196a23fd118Syl 
197a23fd118Syl /*
198a23fd118Syl  * xgell_callback_link_up
199a23fd118Syl  *
200a23fd118Syl  * This function called by HAL to notify HW link up state change.
201a23fd118Syl  */
202a23fd118Syl void
203a23fd118Syl xgell_callback_link_up(void *userdata)
204a23fd118Syl {
205a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
206a23fd118Syl 
207ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_UP);
208a23fd118Syl 	/* Link states should be reported to user whenever it changes */
209a23fd118Syl 	cmn_err(CE_NOTE, "!%s%d: Link is up [10 Gbps Full Duplex]",
210a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
211a23fd118Syl }
212a23fd118Syl 
213a23fd118Syl /*
214a23fd118Syl  * xgell_callback_link_down
215a23fd118Syl  *
216a23fd118Syl  * This function called by HAL to notify HW link down state change.
217a23fd118Syl  */
218a23fd118Syl void
219a23fd118Syl xgell_callback_link_down(void *userdata)
220a23fd118Syl {
221a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)userdata;
222a23fd118Syl 
223ba2e4443Sseb 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
224a23fd118Syl 	/* Link states should be reported to user whenever it changes */
225a23fd118Syl 	cmn_err(CE_NOTE, "!%s%d: Link is down", XGELL_IFNAME,
226a23fd118Syl 	    lldev->instance);
227a23fd118Syl }
228a23fd118Syl 
229a23fd118Syl /*
230a23fd118Syl  * xgell_rx_buffer_replenish_all
231a23fd118Syl  *
232a23fd118Syl  * To replenish all freed dtr(s) with buffers in free pool. It's called by
233a23fd118Syl  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
234a23fd118Syl  * Must be called with pool_lock held.
235a23fd118Syl  */
236a23fd118Syl static void
237a23fd118Syl xgell_rx_buffer_replenish_all(xgelldev_t *lldev)
238a23fd118Syl {
239a23fd118Syl 	xge_hal_dtr_h dtr;
240a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
241a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
242a23fd118Syl 
243*8347601bSyl 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
244*8347601bSyl 
245a23fd118Syl 	while ((lldev->bf_pool.free > 0) &&
246a23fd118Syl 	    (xge_hal_ring_dtr_reserve(lldev->ring_main.channelh, &dtr) ==
247a23fd118Syl 	    XGE_HAL_OK)) {
248a23fd118Syl 		rx_buffer = lldev->bf_pool.head;
249a23fd118Syl 		lldev->bf_pool.head = rx_buffer->next;
250a23fd118Syl 		lldev->bf_pool.free--;
251a23fd118Syl 
252a23fd118Syl 		xge_assert(rx_buffer);
253a23fd118Syl 		xge_assert(rx_buffer->dma_addr);
254a23fd118Syl 
255a23fd118Syl 		rxd_priv = (xgell_rxd_priv_t *)
256a23fd118Syl 		    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
257a23fd118Syl 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
258a23fd118Syl 		    lldev->bf_pool.size);
259a23fd118Syl 
260a23fd118Syl 		rxd_priv->rx_buffer = rx_buffer;
261a23fd118Syl 		xge_hal_ring_dtr_post(lldev->ring_main.channelh, dtr);
262a23fd118Syl 	}
263a23fd118Syl }
264a23fd118Syl 
265a23fd118Syl /*
266a23fd118Syl  * xgell_rx_buffer_release
267a23fd118Syl  *
268a23fd118Syl  * The only thing done here is to put the buffer back to the pool.
269*8347601bSyl  * Calling this function need be protected by mutex, bf_pool.pool_lock.
270a23fd118Syl  */
271a23fd118Syl static void
272a23fd118Syl xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
273a23fd118Syl {
274a23fd118Syl 	xgelldev_t *lldev = rx_buffer->lldev;
275a23fd118Syl 
276*8347601bSyl 	xge_assert(mutex_owned(&lldev->bf_pool.pool_lock));
277a23fd118Syl 
278a23fd118Syl 	/* Put the buffer back to pool */
279a23fd118Syl 	rx_buffer->next = lldev->bf_pool.head;
280a23fd118Syl 	lldev->bf_pool.head = rx_buffer;
281a23fd118Syl 
282a23fd118Syl 	lldev->bf_pool.free++;
283a23fd118Syl }
284a23fd118Syl 
285a23fd118Syl /*
286a23fd118Syl  * xgell_rx_buffer_recycle
287a23fd118Syl  *
288a23fd118Syl  * Called by desballoc() to "free" the resource.
289a23fd118Syl  * We will try to replenish all descripters.
290a23fd118Syl  */
291a23fd118Syl static void
292a23fd118Syl xgell_rx_buffer_recycle(char *arg)
293a23fd118Syl {
294a23fd118Syl 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
295a23fd118Syl 	xgelldev_t *lldev = rx_buffer->lldev;
296a23fd118Syl 
297a23fd118Syl 	mutex_enter(&lldev->bf_pool.pool_lock);
298*8347601bSyl 
299*8347601bSyl 	xgell_rx_buffer_release(rx_buffer);
300a23fd118Syl 	lldev->bf_pool.post--;
301a23fd118Syl 
302a23fd118Syl 	/*
303a23fd118Syl 	 * Before finding a good way to set this hiwat, just always call to
304a23fd118Syl 	 * replenish_all. *TODO*
305a23fd118Syl 	 */
306a23fd118Syl 	if (lldev->is_initialized != 0) {
307a23fd118Syl 		xgell_rx_buffer_replenish_all(lldev);
308a23fd118Syl 	}
309a23fd118Syl 
310a23fd118Syl 	mutex_exit(&lldev->bf_pool.pool_lock);
311a23fd118Syl }
312a23fd118Syl 
313a23fd118Syl /*
314a23fd118Syl  * xgell_rx_buffer_alloc
315a23fd118Syl  *
316a23fd118Syl  * Allocate one rx buffer and return with the pointer to the buffer.
317a23fd118Syl  * Return NULL if failed.
318a23fd118Syl  */
319a23fd118Syl static xgell_rx_buffer_t *
320a23fd118Syl xgell_rx_buffer_alloc(xgelldev_t *lldev)
321a23fd118Syl {
322a23fd118Syl 	xge_hal_device_t *hldev;
323a23fd118Syl 	void *vaddr;
324a23fd118Syl 	ddi_dma_handle_t dma_handle;
325a23fd118Syl 	ddi_acc_handle_t dma_acch;
326a23fd118Syl 	dma_addr_t dma_addr;
327a23fd118Syl 	uint_t ncookies;
328a23fd118Syl 	ddi_dma_cookie_t dma_cookie;
329a23fd118Syl 	size_t real_size;
330a23fd118Syl 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
331a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
332a23fd118Syl 
333*8347601bSyl 	hldev = (xge_hal_device_t *)lldev->devh;
334a23fd118Syl 
335a23fd118Syl 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
336a23fd118Syl 	    0, &dma_handle) != DDI_SUCCESS) {
337a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
338a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
339a23fd118Syl 		goto handle_failed;
340a23fd118Syl 	}
341a23fd118Syl 
342a23fd118Syl 	/* reserve some space at the end of the buffer for recycling */
343a23fd118Syl 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + lldev->bf_pool.size +
344a23fd118Syl 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
345a23fd118Syl 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
346a23fd118Syl 	    DDI_SUCCESS) {
347a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
348a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
349a23fd118Syl 		goto mem_failed;
350a23fd118Syl 	}
351a23fd118Syl 
352a23fd118Syl 	if (HEADROOM + lldev->bf_pool.size + sizeof (xgell_rx_buffer_t) >
353a23fd118Syl 	    real_size) {
354a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
355a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
356a23fd118Syl 		goto bind_failed;
357a23fd118Syl 	}
358a23fd118Syl 
359a23fd118Syl 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
360a23fd118Syl 	    lldev->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
361a23fd118Syl 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
362a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
363a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
364a23fd118Syl 		goto bind_failed;
365a23fd118Syl 	}
366a23fd118Syl 
367a23fd118Syl 	if (ncookies != 1 || dma_cookie.dmac_size < lldev->bf_pool.size) {
368a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
369a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
370a23fd118Syl 		goto check_failed;
371a23fd118Syl 	}
372a23fd118Syl 
373a23fd118Syl 	dma_addr = dma_cookie.dmac_laddress;
374a23fd118Syl 
375a23fd118Syl 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
376a23fd118Syl 	    sizeof (xgell_rx_buffer_t));
377a23fd118Syl 	rx_buffer->next = NULL;
378a23fd118Syl 	rx_buffer->vaddr = vaddr;
379a23fd118Syl 	rx_buffer->dma_addr = dma_addr;
380a23fd118Syl 	rx_buffer->dma_handle = dma_handle;
381a23fd118Syl 	rx_buffer->dma_acch = dma_acch;
382a23fd118Syl 	rx_buffer->lldev = lldev;
383a23fd118Syl 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
384a23fd118Syl 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
385a23fd118Syl 
386a23fd118Syl 	return (rx_buffer);
387a23fd118Syl 
388a23fd118Syl check_failed:
389a23fd118Syl 	(void) ddi_dma_unbind_handle(dma_handle);
390a23fd118Syl bind_failed:
391a23fd118Syl 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
392a23fd118Syl 	ddi_dma_mem_free(&dma_acch);
393a23fd118Syl mem_failed:
394a23fd118Syl 	ddi_dma_free_handle(&dma_handle);
395a23fd118Syl handle_failed:
396a23fd118Syl 
397a23fd118Syl 	return (NULL);
398a23fd118Syl }
399a23fd118Syl 
400a23fd118Syl /*
401a23fd118Syl  * xgell_rx_destroy_buffer_pool
402a23fd118Syl  *
403a23fd118Syl  * Destroy buffer pool. If there is still any buffer hold by upper layer,
404a23fd118Syl  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
405a23fd118Syl  */
406a23fd118Syl static int
407a23fd118Syl xgell_rx_destroy_buffer_pool(xgelldev_t *lldev)
408a23fd118Syl {
409a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
410a23fd118Syl 	ddi_dma_handle_t  dma_handle;
411a23fd118Syl 	ddi_acc_handle_t  dma_acch;
412a23fd118Syl 	int i;
413a23fd118Syl 
414a23fd118Syl 	/*
415a23fd118Syl 	 * If there is any posted buffer, the driver should reject to be
416a23fd118Syl 	 * detached. Need notice upper layer to release them.
417a23fd118Syl 	 */
418a23fd118Syl 	if (lldev->bf_pool.post != 0) {
419a23fd118Syl 		xge_debug_ll(XGE_ERR,
420a23fd118Syl 		    "%s%d has some buffers not be recycled, try later!",
421a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
422a23fd118Syl 		return (DDI_FAILURE);
423a23fd118Syl 	}
424a23fd118Syl 
425a23fd118Syl 	/*
426a23fd118Syl 	 * Relase buffers one by one.
427a23fd118Syl 	 */
428a23fd118Syl 	for (i = lldev->bf_pool.total; i > 0; i--) {
429a23fd118Syl 		rx_buffer = lldev->bf_pool.head;
430a23fd118Syl 		xge_assert(rx_buffer != NULL);
431a23fd118Syl 
432a23fd118Syl 		lldev->bf_pool.head = rx_buffer->next;
433a23fd118Syl 
434a23fd118Syl 		dma_handle = rx_buffer->dma_handle;
435a23fd118Syl 		dma_acch = rx_buffer->dma_acch;
436a23fd118Syl 
437a23fd118Syl 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
438a23fd118Syl 			xge_debug_ll(XGE_ERR, "failed to unbind DMA handle!");
439a23fd118Syl 			lldev->bf_pool.head = rx_buffer;
440a23fd118Syl 			return (DDI_FAILURE);
441a23fd118Syl 		}
442a23fd118Syl 		ddi_dma_mem_free(&dma_acch);
443a23fd118Syl 		ddi_dma_free_handle(&dma_handle);
444a23fd118Syl 
445a23fd118Syl 		lldev->bf_pool.total--;
446a23fd118Syl 		lldev->bf_pool.free--;
447a23fd118Syl 	}
448a23fd118Syl 
449a23fd118Syl 	mutex_destroy(&lldev->bf_pool.pool_lock);
450a23fd118Syl 	return (DDI_SUCCESS);
451a23fd118Syl }
452a23fd118Syl 
453a23fd118Syl /*
454a23fd118Syl  * xgell_rx_create_buffer_pool
455a23fd118Syl  *
456a23fd118Syl  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
457a23fd118Syl  */
458a23fd118Syl static int
459a23fd118Syl xgell_rx_create_buffer_pool(xgelldev_t *lldev)
460a23fd118Syl {
461a23fd118Syl 	xge_hal_device_t *hldev;
462a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
463a23fd118Syl 	int i;
464a23fd118Syl 
465ba2e4443Sseb 	hldev = (xge_hal_device_t *)lldev->devh;
466a23fd118Syl 
467a23fd118Syl 	lldev->bf_pool.total = 0;
468ba2e4443Sseb 	lldev->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
469a23fd118Syl 	lldev->bf_pool.head = NULL;
470a23fd118Syl 	lldev->bf_pool.free = 0;
471a23fd118Syl 	lldev->bf_pool.post = 0;
472a23fd118Syl 	lldev->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
473a23fd118Syl 
474a23fd118Syl 	mutex_init(&lldev->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
475a23fd118Syl 	    hldev->irqh);
476a23fd118Syl 
477a23fd118Syl 	/*
478a23fd118Syl 	 * Allocate buffers one by one. If failed, destroy whole pool by
479a23fd118Syl 	 * call to xgell_rx_destroy_buffer_pool().
480a23fd118Syl 	 */
481a23fd118Syl 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
482a23fd118Syl 		if ((rx_buffer = xgell_rx_buffer_alloc(lldev)) == NULL) {
483a23fd118Syl 			(void) xgell_rx_destroy_buffer_pool(lldev);
484a23fd118Syl 			return (DDI_FAILURE);
485a23fd118Syl 		}
486a23fd118Syl 
487a23fd118Syl 		rx_buffer->next = lldev->bf_pool.head;
488a23fd118Syl 		lldev->bf_pool.head = rx_buffer;
489a23fd118Syl 
490a23fd118Syl 		lldev->bf_pool.total++;
491a23fd118Syl 		lldev->bf_pool.free++;
492a23fd118Syl 	}
493a23fd118Syl 
494a23fd118Syl 	return (DDI_SUCCESS);
495a23fd118Syl }
496a23fd118Syl 
497a23fd118Syl /*
498a23fd118Syl  * xgell_rx_dtr_replenish
499a23fd118Syl  *
500a23fd118Syl  * Replenish descriptor with rx_buffer in RX buffer pool.
501a23fd118Syl  * The dtr should be post right away.
502a23fd118Syl  */
503a23fd118Syl xge_hal_status_e
504a23fd118Syl xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
505a23fd118Syl     void *userdata, xge_hal_channel_reopen_e reopen)
506a23fd118Syl {
507a23fd118Syl 	xgell_ring_t *ring = userdata;
508ba2e4443Sseb 	xgelldev_t *lldev = ring->lldev;
509a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
510a23fd118Syl 	xgell_rxd_priv_t *rxd_priv;
511a23fd118Syl 
512a23fd118Syl 	if (lldev->bf_pool.head == NULL) {
513a23fd118Syl 		xge_debug_ll(XGE_ERR, "no more available rx DMA buffer!");
514a23fd118Syl 		return (XGE_HAL_FAIL);
515a23fd118Syl 	}
516a23fd118Syl 	rx_buffer = lldev->bf_pool.head;
517a23fd118Syl 	lldev->bf_pool.head = rx_buffer->next;
518a23fd118Syl 	lldev->bf_pool.free--;
519a23fd118Syl 
520a23fd118Syl 	xge_assert(rx_buffer);
521a23fd118Syl 	xge_assert(rx_buffer->dma_addr);
522a23fd118Syl 
523a23fd118Syl 	rxd_priv = (xgell_rxd_priv_t *)
524a23fd118Syl 	    xge_hal_ring_dtr_private(lldev->ring_main.channelh, dtr);
525a23fd118Syl 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, lldev->bf_pool.size);
526a23fd118Syl 
527a23fd118Syl 	rxd_priv->rx_buffer = rx_buffer;
528a23fd118Syl 
529a23fd118Syl 	return (XGE_HAL_OK);
530a23fd118Syl }
531a23fd118Syl 
532a23fd118Syl /*
533a23fd118Syl  * xgell_get_ip_offset
534a23fd118Syl  *
535a23fd118Syl  * Calculate the offset to IP header.
536a23fd118Syl  */
537a23fd118Syl static inline int
538a23fd118Syl xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
539a23fd118Syl {
540a23fd118Syl 	int ip_off;
541a23fd118Syl 
542a23fd118Syl 	/* get IP-header offset */
543a23fd118Syl 	switch (ext_info->frame) {
544a23fd118Syl 	case XGE_HAL_FRAME_TYPE_DIX:
545a23fd118Syl 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
546a23fd118Syl 		break;
547a23fd118Syl 	case XGE_HAL_FRAME_TYPE_IPX:
548a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
549a23fd118Syl 		    XGE_HAL_HEADER_802_2_SIZE +
550a23fd118Syl 		    XGE_HAL_HEADER_SNAP_SIZE);
551a23fd118Syl 		break;
552a23fd118Syl 	case XGE_HAL_FRAME_TYPE_LLC:
553a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
554a23fd118Syl 		    XGE_HAL_HEADER_802_2_SIZE);
555a23fd118Syl 		break;
556a23fd118Syl 	case XGE_HAL_FRAME_TYPE_SNAP:
557a23fd118Syl 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
558a23fd118Syl 		    XGE_HAL_HEADER_SNAP_SIZE);
559a23fd118Syl 		break;
560a23fd118Syl 	default:
561a23fd118Syl 		ip_off = 0;
562a23fd118Syl 		break;
563a23fd118Syl 	}
564a23fd118Syl 
565a23fd118Syl 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
566a23fd118Syl 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
567a23fd118Syl 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
568a23fd118Syl 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
569a23fd118Syl 	}
570a23fd118Syl 
571a23fd118Syl 	return (ip_off);
572a23fd118Syl }
573a23fd118Syl 
574a23fd118Syl /*
575a23fd118Syl  * xgell_rx_hcksum_assoc
576a23fd118Syl  *
577a23fd118Syl  * Judge the packet type and then call to hcksum_assoc() to associate
578a23fd118Syl  * h/w checksum information.
579a23fd118Syl  */
580a23fd118Syl static inline void
581a23fd118Syl xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
582a23fd118Syl     xge_hal_dtr_info_t *ext_info)
583a23fd118Syl {
584a23fd118Syl 	int cksum_flags = 0;
585a23fd118Syl 
586a23fd118Syl 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
587a23fd118Syl 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
588a23fd118Syl 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
589a23fd118Syl 				cksum_flags |= HCK_IPV4_HDRCKSUM;
590a23fd118Syl 			}
591a23fd118Syl 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
592a23fd118Syl 				cksum_flags |= HCK_FULLCKSUM_OK;
593a23fd118Syl 			}
594a23fd118Syl 			if (cksum_flags) {
595a23fd118Syl 				cksum_flags |= HCK_FULLCKSUM;
596a23fd118Syl 				(void) hcksum_assoc(mp, NULL, NULL, 0,
597a23fd118Syl 				    0, 0, 0, cksum_flags, 0);
598a23fd118Syl 			}
599a23fd118Syl 		}
600a23fd118Syl 	} else if (ext_info->proto &
601a23fd118Syl 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
602a23fd118Syl 		/*
603a23fd118Syl 		 * Just pass the partial cksum up to IP.
604a23fd118Syl 		 */
605*8347601bSyl 		int ip_off = xgell_get_ip_offset(ext_info);
606a23fd118Syl 		int start, end = pkt_length - ip_off;
607a23fd118Syl 
608a23fd118Syl 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
609a23fd118Syl 			struct ip *ip =
610a23fd118Syl 			    (struct ip *)(vaddr + ip_off);
611a23fd118Syl 			start = ip->ip_hl * 4 + ip_off;
612a23fd118Syl 		} else {
613a23fd118Syl 			start = ip_off + 40;
614a23fd118Syl 		}
615a23fd118Syl 		cksum_flags |= HCK_PARTIALCKSUM;
616a23fd118Syl 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
617a23fd118Syl 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
618a23fd118Syl 		    0);
619a23fd118Syl 	}
620a23fd118Syl }
621a23fd118Syl 
622a23fd118Syl /*
623a23fd118Syl  * xgell_rx_1b_msg_alloc
624a23fd118Syl  *
625a23fd118Syl  * Allocate message header for data buffer, and decide if copy the packet to
626a23fd118Syl  * new data buffer to release big rx_buffer to save memory.
627a23fd118Syl  *
628*8347601bSyl  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
629a23fd118Syl  * new message and copy the payload in.
630a23fd118Syl  */
631a23fd118Syl static mblk_t *
632*8347601bSyl xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
633*8347601bSyl     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
634a23fd118Syl {
635a23fd118Syl 	mblk_t *mp;
636a23fd118Syl 	mblk_t *nmp = NULL;
637a23fd118Syl 	char *vaddr;
638a23fd118Syl 	int hdr_length = 0;
639a23fd118Syl 
640a23fd118Syl #ifdef XGELL_L3_ALIGNED
641*8347601bSyl 	boolean_t doalign = B_TRUE;
642a23fd118Syl 	struct ip *ip;
643a23fd118Syl 	struct tcphdr *tcp;
644a23fd118Syl 	int tcp_off;
645a23fd118Syl 	int mp_align_len;
646a23fd118Syl 	int ip_off;
647a23fd118Syl #endif
648a23fd118Syl 
649a23fd118Syl 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
650a23fd118Syl #ifdef XGELL_L3_ALIGNED
651a23fd118Syl 	ip_off = xgell_get_ip_offset(ext_info);
652a23fd118Syl 
653a23fd118Syl 	/* Check ip_off with HEADROOM */
654a23fd118Syl 	if ((ip_off & 3) == HEADROOM) {
655*8347601bSyl 		doalign = B_FALSE;
656a23fd118Syl 	}
657a23fd118Syl 
658a23fd118Syl 	/*
659a23fd118Syl 	 * Doalign? Check for types of packets.
660a23fd118Syl 	 */
661a23fd118Syl 	/* Is IPv4 or IPv6? */
662a23fd118Syl 	if (doalign && !(ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
663a23fd118Syl 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6)) {
664*8347601bSyl 		doalign = B_FALSE;
665a23fd118Syl 	}
666a23fd118Syl 
667a23fd118Syl 	/* Is TCP? */
668a23fd118Syl 	if (doalign &&
669a23fd118Syl 	    ((ip = (struct ip *)(vaddr + ip_off))->ip_p == IPPROTO_TCP)) {
670a23fd118Syl 		tcp_off = ip->ip_hl * 4 + ip_off;
671a23fd118Syl 		tcp = (struct tcphdr *)(vaddr + tcp_off);
672a23fd118Syl 		hdr_length = tcp_off + tcp->th_off * 4;
673a23fd118Syl 		if (pkt_length < (XGE_HAL_TCPIP_HEADER_MAX_SIZE +
674a23fd118Syl 		    XGE_HAL_MAC_HEADER_MAX_SIZE)) {
675a23fd118Syl 			hdr_length = pkt_length;
676a23fd118Syl 		}
677a23fd118Syl 	} else {
678*8347601bSyl 		doalign = B_FALSE;
679a23fd118Syl 	}
680a23fd118Syl #endif
681a23fd118Syl 
682a23fd118Syl 	/*
683a23fd118Syl 	 * Copy packet into new allocated message buffer, if pkt_length
684*8347601bSyl 	 * is less than XGELL_RX_DMA_LOWAT
685a23fd118Syl 	 */
686*8347601bSyl 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
687a23fd118Syl 		/* Keep room for alignment */
688a23fd118Syl 		if ((mp = allocb(pkt_length + HEADROOM + 4, 0)) == NULL) {
689a23fd118Syl 			return (NULL);
690a23fd118Syl 		}
691a23fd118Syl #ifdef XGELL_L3_ALIGNED
692a23fd118Syl 		if (doalign) {
693a23fd118Syl 			mp_align_len =
694a23fd118Syl 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
695a23fd118Syl 			mp->b_rptr += mp_align_len;
696a23fd118Syl 		}
697a23fd118Syl #endif
698a23fd118Syl 		bcopy(vaddr, mp->b_rptr, pkt_length);
699a23fd118Syl 		mp->b_wptr = mp->b_rptr + pkt_length;
700a23fd118Syl 		*copyit = B_TRUE;
701a23fd118Syl 		return (mp);
702a23fd118Syl 	}
703a23fd118Syl 
704a23fd118Syl 	/*
705a23fd118Syl 	 * Just allocate mblk for current data buffer
706a23fd118Syl 	 */
707a23fd118Syl 	if ((nmp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
708a23fd118Syl 	    &rx_buffer->frtn)) == NULL) {
709a23fd118Syl 		/* Drop it */
710a23fd118Syl 		return (NULL);
711a23fd118Syl 	}
712a23fd118Syl 
713a23fd118Syl 	/*
714a23fd118Syl 	 * Adjust the b_rptr/b_wptr in the mblk_t structure to point to
715a23fd118Syl 	 * payload.
716a23fd118Syl 	 */
717a23fd118Syl 	nmp->b_rptr += hdr_length;
718a23fd118Syl 	nmp->b_wptr += pkt_length;
719a23fd118Syl 
720a23fd118Syl #ifdef XGELL_L3_ALIGNED
721a23fd118Syl 	if (doalign) {
722a23fd118Syl 		if ((mp = esballoc(rx_buffer->header, hdr_length + 4, 0,
723a23fd118Syl 		    &header_frtn)) == NULL) {
724a23fd118Syl 			/* can not align! */
725a23fd118Syl 			mp = nmp;
726a23fd118Syl 			mp->b_rptr = (u8 *)vaddr;
727a23fd118Syl 			mp->b_wptr = mp->b_rptr + pkt_length;
728a23fd118Syl 			mp->b_next = NULL;
729a23fd118Syl 			mp->b_cont = NULL;
730a23fd118Syl 		} else {
731a23fd118Syl 			/* align packet's ip-header offset */
732a23fd118Syl 			mp_align_len =
733a23fd118Syl 			    (4 - ((uint64_t)(mp->b_rptr + ip_off) & 3));
734a23fd118Syl 			mp->b_rptr += mp_align_len;
735a23fd118Syl 			mp->b_wptr += mp_align_len + hdr_length;
736a23fd118Syl 			mp->b_cont = nmp;
737a23fd118Syl 			mp->b_next = NULL;
738a23fd118Syl 			nmp->b_cont = NULL;
739a23fd118Syl 			nmp->b_next = NULL;
740a23fd118Syl 
741a23fd118Syl 			bcopy(vaddr, mp->b_rptr, hdr_length);
742a23fd118Syl 		}
743a23fd118Syl 	} else {
744a23fd118Syl 		/* no need to align */
745a23fd118Syl 		mp = nmp;
746a23fd118Syl 		mp->b_next = NULL;
747a23fd118Syl 		mp->b_cont = NULL;
748a23fd118Syl 	}
749a23fd118Syl #else
750a23fd118Syl 	mp = nmp;
751a23fd118Syl 	mp->b_next = NULL;
752a23fd118Syl 	mp->b_cont = NULL;
753a23fd118Syl #endif
754a23fd118Syl 
755a23fd118Syl 	return (mp);
756a23fd118Syl }
757a23fd118Syl 
758a23fd118Syl /*
759a23fd118Syl  * xgell_rx_1b_compl
760a23fd118Syl  *
761a23fd118Syl  * If the interrupt is because of a received frame or if the receive ring
762a23fd118Syl  * contains fresh as yet un-processed frames, this function is called.
763a23fd118Syl  */
764a23fd118Syl static xge_hal_status_e
765a23fd118Syl xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
766a23fd118Syl     void *userdata)
767a23fd118Syl {
768ba2e4443Sseb 	xgelldev_t *lldev = ((xgell_ring_t *)userdata)->lldev;
769a23fd118Syl 	xgell_rx_buffer_t *rx_buffer;
770a23fd118Syl 	mblk_t *mp_head = NULL;
771a23fd118Syl 	mblk_t *mp_end  = NULL;
772*8347601bSyl 	int pkt_burst = 0;
773*8347601bSyl 
774*8347601bSyl 	mutex_enter(&lldev->bf_pool.pool_lock);
775a23fd118Syl 
776a23fd118Syl 	do {
777a23fd118Syl 		int pkt_length;
778a23fd118Syl 		dma_addr_t dma_data;
779a23fd118Syl 		mblk_t *mp;
780a23fd118Syl 		boolean_t copyit = B_FALSE;
781a23fd118Syl 
782a23fd118Syl 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
783a23fd118Syl 		    xge_hal_ring_dtr_private(channelh, dtr));
784a23fd118Syl 		xge_hal_dtr_info_t ext_info;
785a23fd118Syl 
786a23fd118Syl 		rx_buffer = rxd_priv->rx_buffer;
787a23fd118Syl 
788a23fd118Syl 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
789a23fd118Syl 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
790a23fd118Syl 
791a23fd118Syl 		xge_assert(dma_data == rx_buffer->dma_addr);
792a23fd118Syl 
793a23fd118Syl 		if (t_code != 0) {
794a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
795a23fd118Syl 			    " completed due to error t_code %01x", XGELL_IFNAME,
796a23fd118Syl 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
797a23fd118Syl 
798a23fd118Syl 			(void) xge_hal_device_handle_tcode(channelh, dtr,
799a23fd118Syl 			    t_code);
800a23fd118Syl 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
801a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
802a23fd118Syl 			continue;
803a23fd118Syl 		}
804a23fd118Syl 
805a23fd118Syl 		/*
806a23fd118Syl 		 * Sync the DMA memory
807a23fd118Syl 		 */
808*8347601bSyl 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
809*8347601bSyl 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
810a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
811a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
812a23fd118Syl 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
813a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
814a23fd118Syl 			continue;
815a23fd118Syl 		}
816a23fd118Syl 
817a23fd118Syl 		/*
818a23fd118Syl 		 * Allocate message for the packet.
819a23fd118Syl 		 */
820a23fd118Syl 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
821a23fd118Syl 			copyit = B_TRUE;
822a23fd118Syl 		} else {
823a23fd118Syl 			copyit = B_FALSE;
824a23fd118Syl 		}
825a23fd118Syl 
826*8347601bSyl 		mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
827*8347601bSyl 		    &ext_info, &copyit);
828a23fd118Syl 
829a23fd118Syl 		xge_hal_ring_dtr_free(channelh, dtr);
830a23fd118Syl 
831a23fd118Syl 		/*
832a23fd118Syl 		 * Release the buffer and recycle it later
833a23fd118Syl 		 */
834a23fd118Syl 		if ((mp == NULL) || copyit) {
835a23fd118Syl 			xgell_rx_buffer_release(rx_buffer);
836a23fd118Syl 		} else {
837a23fd118Syl 			/*
838a23fd118Syl 			 * Count it since the buffer should be loaned up.
839a23fd118Syl 			 */
840a23fd118Syl 			lldev->bf_pool.post++;
841a23fd118Syl 		}
842a23fd118Syl 		if (mp == NULL) {
843a23fd118Syl 			xge_debug_ll(XGE_ERR,
844*8347601bSyl 			    "%s%d: rx: can not allocate mp mblk",
845*8347601bSyl 			    XGELL_IFNAME, lldev->instance);
846a23fd118Syl 			continue;
847a23fd118Syl 		}
848a23fd118Syl 
849a23fd118Syl 		/*
850*8347601bSyl 		 * Associate cksum_flags per packet type and h/w
851*8347601bSyl 		 * cksum flags.
852a23fd118Syl 		 */
853a23fd118Syl 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
854a23fd118Syl 		    HEADROOM, pkt_length, &ext_info);
855a23fd118Syl 
856a23fd118Syl 		if (mp_head == NULL) {
857a23fd118Syl 			mp_head = mp;
858a23fd118Syl 			mp_end = mp;
859a23fd118Syl 		} else {
860a23fd118Syl 			mp_end->b_next = mp;
861a23fd118Syl 			mp_end = mp;
862a23fd118Syl 		}
863a23fd118Syl 
864*8347601bSyl 		if (++pkt_burst < lldev->config.rx_pkt_burst)
865*8347601bSyl 			continue;
866*8347601bSyl 
867*8347601bSyl 		if (lldev->bf_pool.post > lldev->bf_pool.post_hiwat) {
868*8347601bSyl 			/* Replenish rx buffers */
869*8347601bSyl 			xgell_rx_buffer_replenish_all(lldev);
870*8347601bSyl 		}
871*8347601bSyl 		mutex_exit(&lldev->bf_pool.pool_lock);
872*8347601bSyl 		if (mp_head != NULL) {
873*8347601bSyl 			mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
874*8347601bSyl 			    mp_head);
875*8347601bSyl 		}
876*8347601bSyl 		mp_head = mp_end  = NULL;
877*8347601bSyl 		pkt_burst = 0;
878*8347601bSyl 		mutex_enter(&lldev->bf_pool.pool_lock);
879*8347601bSyl 
880a23fd118Syl 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
881a23fd118Syl 	    XGE_HAL_OK);
882a23fd118Syl 
883a23fd118Syl 	/*
884a23fd118Syl 	 * Always call replenish_all to recycle rx_buffers.
885a23fd118Syl 	 */
886a23fd118Syl 	xgell_rx_buffer_replenish_all(lldev);
887a23fd118Syl 	mutex_exit(&lldev->bf_pool.pool_lock);
888a23fd118Syl 
889*8347601bSyl 	if (mp_head != NULL) {
890*8347601bSyl 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
891*8347601bSyl 	}
892*8347601bSyl 
893a23fd118Syl 	return (XGE_HAL_OK);
894a23fd118Syl }
895a23fd118Syl 
896a23fd118Syl /*
897a23fd118Syl  * xgell_xmit_compl
898a23fd118Syl  *
899a23fd118Syl  * If an interrupt was raised to indicate DMA complete of the Tx packet,
900a23fd118Syl  * this function is called. It identifies the last TxD whose buffer was
901a23fd118Syl  * freed and frees all skbs whose data have already DMA'ed into the NICs
902a23fd118Syl  * internal memory.
903a23fd118Syl  */
904a23fd118Syl static xge_hal_status_e
905a23fd118Syl xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
906a23fd118Syl     void *userdata)
907a23fd118Syl {
908a23fd118Syl 	xgelldev_t *lldev = userdata;
909a23fd118Syl 
910a23fd118Syl 	do {
911a23fd118Syl 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
912a23fd118Syl 		    xge_hal_fifo_dtr_private(dtr));
913a23fd118Syl 		mblk_t *mp = txd_priv->mblk;
914a23fd118Syl 		int i;
915a23fd118Syl 
916a23fd118Syl 		if (t_code) {
917a23fd118Syl 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
918a23fd118Syl 			    " completed due to error t_code %01x", XGELL_IFNAME,
919a23fd118Syl 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
920a23fd118Syl 
921a23fd118Syl 			(void) xge_hal_device_handle_tcode(channelh, dtr,
922a23fd118Syl 			    t_code);
923a23fd118Syl 		}
924a23fd118Syl 
925a23fd118Syl 		for (i = 0; i < txd_priv->handle_cnt; i++) {
926a23fd118Syl 			xge_assert(txd_priv->dma_handles[i]);
927a23fd118Syl 			(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
928a23fd118Syl 			ddi_dma_free_handle(&txd_priv->dma_handles[i]);
929a23fd118Syl 			txd_priv->dma_handles[i] = 0;
930a23fd118Syl 		}
931a23fd118Syl 
932a23fd118Syl 		xge_hal_fifo_dtr_free(channelh, dtr);
933a23fd118Syl 
934a23fd118Syl 		freemsg(mp);
935a23fd118Syl 		lldev->resched_avail++;
936a23fd118Syl 
937a23fd118Syl 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
938a23fd118Syl 	    XGE_HAL_OK);
939a23fd118Syl 
940a23fd118Syl 	if (lldev->resched_retry &&
941a23fd118Syl 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
942a23fd118Syl 	    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
943a23fd118Syl 	    xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
944a23fd118Syl 		XGELL_IFNAME, lldev->instance,
945a23fd118Syl 		((xge_hal_channel_t *)lldev->fifo_channel)->post_qid);
946a23fd118Syl 		lldev->resched_send = lldev->resched_avail;
947a23fd118Syl 		lldev->resched_retry = 0;
948a23fd118Syl 	}
949a23fd118Syl 
950a23fd118Syl 	return (XGE_HAL_OK);
951a23fd118Syl }
952a23fd118Syl 
953a23fd118Syl /*
954a23fd118Syl  * xgell_send
955*8347601bSyl  * @hldev: pointer to xge_hal_device_t strucutre
956a23fd118Syl  * @mblk: pointer to network buffer, i.e. mblk_t structure
957a23fd118Syl  *
958a23fd118Syl  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
959a23fd118Syl  * A pointer to an M_DATA message that contains the packet is passed to
960a23fd118Syl  * this routine.
961a23fd118Syl  */
962a23fd118Syl static boolean_t
963*8347601bSyl xgell_send(xgelldev_t *lldev, mblk_t *mp)
964a23fd118Syl {
965a23fd118Syl 	mblk_t *bp;
966*8347601bSyl 	boolean_t retry;
967*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
968a23fd118Syl 	xge_hal_status_e status;
969a23fd118Syl 	xge_hal_dtr_h dtr;
970a23fd118Syl 	xgell_txd_priv_t *txd_priv;
971*8347601bSyl 	uint32_t hckflags;
972*8347601bSyl 	uint32_t mss;
973*8347601bSyl 	int handle_cnt, frag_cnt, ret, i, copied;
974*8347601bSyl 	boolean_t used_copy;
975a23fd118Syl 
976a23fd118Syl _begin:
977*8347601bSyl 	retry = B_FALSE;
978a23fd118Syl 	handle_cnt = frag_cnt = 0;
979a23fd118Syl 
980a23fd118Syl 	if (!lldev->is_initialized || lldev->in_reset)
981a23fd118Syl 		return (B_FALSE);
982a23fd118Syl 
983a23fd118Syl 	/*
984a23fd118Syl 	 * If the free Tx dtrs count reaches the lower threshold,
985a23fd118Syl 	 * inform the gld to stop sending more packets till the free
986a23fd118Syl 	 * dtrs count exceeds higher threshold. Driver informs the
987a23fd118Syl 	 * gld through gld_sched call, when the free dtrs count exceeds
988a23fd118Syl 	 * the higher threshold.
989a23fd118Syl 	 */
990*8347601bSyl 	if (xge_hal_channel_dtr_count(lldev->fifo_channel)
991a23fd118Syl 	    <= XGELL_TX_LEVEL_LOW) {
992a23fd118Syl 		xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
993a23fd118Syl 		    "free descriptors count at low threshold %d",
994a23fd118Syl 		    XGELL_IFNAME, lldev->instance,
995a23fd118Syl 		    ((xge_hal_channel_t *)lldev->fifo_channel)->post_qid,
996a23fd118Syl 		    XGELL_TX_LEVEL_LOW);
997*8347601bSyl 		retry = B_TRUE;
998a23fd118Syl 		goto _exit;
999a23fd118Syl 	}
1000a23fd118Syl 
1001a23fd118Syl 	status = xge_hal_fifo_dtr_reserve(lldev->fifo_channel, &dtr);
1002a23fd118Syl 	if (status != XGE_HAL_OK) {
1003a23fd118Syl 		switch (status) {
1004a23fd118Syl 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
1005a23fd118Syl 			xge_debug_ll(XGE_ERR,
1006a23fd118Syl 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
1007a23fd118Syl 			    lldev->instance,
1008a23fd118Syl 			    ((xge_hal_channel_t *)
1009a23fd118Syl 			    lldev->fifo_channel)->post_qid);
1010*8347601bSyl 			retry = B_TRUE;
1011a23fd118Syl 			goto _exit;
1012a23fd118Syl 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
1013a23fd118Syl 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
1014a23fd118Syl 			    " out of descriptors.", XGELL_IFNAME,
1015a23fd118Syl 			    lldev->instance,
1016a23fd118Syl 			    ((xge_hal_channel_t *)
1017a23fd118Syl 			    lldev->fifo_channel)->post_qid);
1018*8347601bSyl 			retry = B_TRUE;
1019a23fd118Syl 			goto _exit;
1020a23fd118Syl 		default:
1021a23fd118Syl 			return (B_FALSE);
1022a23fd118Syl 		}
1023a23fd118Syl 	}
1024a23fd118Syl 
1025a23fd118Syl 	txd_priv = xge_hal_fifo_dtr_private(dtr);
1026a23fd118Syl 	txd_priv->mblk = mp;
1027a23fd118Syl 
1028a23fd118Syl 	/*
1029a23fd118Syl 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
1030a23fd118Syl 	 * do insertion.
1031a23fd118Syl 	 *
1032a23fd118Syl 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
1033a23fd118Syl 	 * is the other implementation for xge. The driver can simple bcopy()
1034a23fd118Syl 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
1035a23fd118Syl 	 * automatically, since it's impossible that GLD sends down mp(s) with
1036a23fd118Syl 	 * splited ether_vlan_header.
1037a23fd118Syl 	 *
1038a23fd118Syl 	 * struct ether_vlan_header *evhp;
1039a23fd118Syl 	 * uint16_t tci;
1040a23fd118Syl 	 *
1041a23fd118Syl 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
1042a23fd118Syl 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
1043*8347601bSyl 	 *	tci = ntohs(evhp->ether_tci);
1044*8347601bSyl 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
1045a23fd118Syl 	 *	    2 * ETHERADDRL);
1046*8347601bSyl 	 *	mp->b_rptr += VLAN_TAGSZ;
1047a23fd118Syl 	 *
1048*8347601bSyl 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
1049a23fd118Syl 	 * }
1050a23fd118Syl 	 */
1051a23fd118Syl 
1052*8347601bSyl 	copied = 0;
1053*8347601bSyl 	used_copy = B_FALSE;
1054a23fd118Syl 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
1055a23fd118Syl 		int mblen;
1056a23fd118Syl 		uint_t ncookies;
1057a23fd118Syl 		ddi_dma_cookie_t dma_cookie;
1058a23fd118Syl 		ddi_dma_handle_t dma_handle;
1059a23fd118Syl 
1060a23fd118Syl 		/* skip zero-length message blocks */
1061a23fd118Syl 		mblen = MBLKL(bp);
1062a23fd118Syl 		if (mblen == 0) {
1063a23fd118Syl 			continue;
1064a23fd118Syl 		}
1065a23fd118Syl 
1066*8347601bSyl 		/*
1067*8347601bSyl 		 * Check the message length to decide to DMA or bcopy() data
1068*8347601bSyl 		 * to tx descriptor(s).
1069*8347601bSyl 		 */
1070*8347601bSyl 		if (mblen < lldev->config.tx_dma_lowat &&
1071*8347601bSyl 		    (copied + mblen) < lldev->tx_copied_max) {
1072*8347601bSyl 			xge_hal_status_e rc;
1073*8347601bSyl 			rc = xge_hal_fifo_dtr_buffer_append(lldev->fifo_channel,
1074*8347601bSyl 			    dtr, bp->b_rptr, mblen);
1075*8347601bSyl 			if (rc == XGE_HAL_OK) {
1076*8347601bSyl 				used_copy = B_TRUE;
1077*8347601bSyl 				copied += mblen;
1078*8347601bSyl 				continue;
1079*8347601bSyl 			} else if (used_copy) {
1080*8347601bSyl 				xge_hal_fifo_dtr_buffer_finalize(
1081*8347601bSyl 					lldev->fifo_channel, dtr, frag_cnt++);
1082*8347601bSyl 				used_copy = B_FALSE;
1083*8347601bSyl 			}
1084*8347601bSyl 		} else if (used_copy) {
1085*8347601bSyl 			xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel,
1086*8347601bSyl 			    dtr, frag_cnt++);
1087*8347601bSyl 			used_copy = B_FALSE;
1088*8347601bSyl 		}
1089*8347601bSyl 
1090ba2e4443Sseb 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1091a23fd118Syl 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1092a23fd118Syl 		if (ret != DDI_SUCCESS) {
1093a23fd118Syl 			xge_debug_ll(XGE_ERR,
1094*8347601bSyl 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1095*8347601bSyl 			    lldev->instance);
1096a23fd118Syl 			goto _exit_cleanup;
1097a23fd118Syl 		}
1098a23fd118Syl 
1099a23fd118Syl 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1100a23fd118Syl 		    (caddr_t)bp->b_rptr, mblen,
1101a23fd118Syl 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1102a23fd118Syl 		    &dma_cookie, &ncookies);
1103a23fd118Syl 
1104a23fd118Syl 		switch (ret) {
1105a23fd118Syl 		case DDI_DMA_MAPPED:
1106a23fd118Syl 			/* everything's fine */
1107a23fd118Syl 			break;
1108a23fd118Syl 
1109a23fd118Syl 		case DDI_DMA_NORESOURCES:
1110a23fd118Syl 			xge_debug_ll(XGE_ERR,
1111a23fd118Syl 			    "%s%d: can not bind dma address",
1112a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
1113a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1114a23fd118Syl 			goto _exit_cleanup;
1115a23fd118Syl 
1116a23fd118Syl 		case DDI_DMA_NOMAPPING:
1117a23fd118Syl 		case DDI_DMA_INUSE:
1118a23fd118Syl 		case DDI_DMA_TOOBIG:
1119a23fd118Syl 		default:
1120a23fd118Syl 			/* drop packet, don't retry */
1121a23fd118Syl 			xge_debug_ll(XGE_ERR,
1122a23fd118Syl 			    "%s%d: can not map message buffer",
1123a23fd118Syl 			    XGELL_IFNAME, lldev->instance);
1124a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1125a23fd118Syl 			goto _exit_cleanup;
1126a23fd118Syl 		}
1127a23fd118Syl 
1128*8347601bSyl 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1129a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1130a23fd118Syl 			    "requested c:%d+f:%d", XGELL_IFNAME,
1131a23fd118Syl 			    lldev->instance, ncookies, frag_cnt);
1132a23fd118Syl 			(void) ddi_dma_unbind_handle(dma_handle);
1133a23fd118Syl 			ddi_dma_free_handle(&dma_handle);
1134a23fd118Syl 			goto _exit_cleanup;
1135a23fd118Syl 		}
1136a23fd118Syl 
1137a23fd118Syl 		/* setup the descriptors for this data buffer */
1138a23fd118Syl 		while (ncookies) {
1139a23fd118Syl 			xge_hal_fifo_dtr_buffer_set(lldev->fifo_channel, dtr,
1140a23fd118Syl 			    frag_cnt++, dma_cookie.dmac_laddress,
1141a23fd118Syl 			    dma_cookie.dmac_size);
1142a23fd118Syl 			if (--ncookies) {
1143a23fd118Syl 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1144a23fd118Syl 			}
1145a23fd118Syl 
1146a23fd118Syl 		}
1147a23fd118Syl 
1148a23fd118Syl 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1149a23fd118Syl 
1150a23fd118Syl 		if (bp->b_cont &&
1151a23fd118Syl 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1152*8347601bSyl 			hldev->config.fifo.max_frags)) {
1153a23fd118Syl 			mblk_t *nmp;
1154a23fd118Syl 
1155a23fd118Syl 			xge_debug_ll(XGE_TRACE,
1156a23fd118Syl 			    "too many FRAGs [%d], pull up them", frag_cnt);
1157a23fd118Syl 
1158a23fd118Syl 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1159a23fd118Syl 				/* Drop packet, don't retry */
1160a23fd118Syl 				xge_debug_ll(XGE_ERR,
1161a23fd118Syl 				    "%s%d: can not pullup message buffer",
1162a23fd118Syl 				    XGELL_IFNAME, lldev->instance);
1163a23fd118Syl 				goto _exit_cleanup;
1164a23fd118Syl 			}
1165a23fd118Syl 			freemsg(bp->b_cont);
1166a23fd118Syl 			bp->b_cont = nmp;
1167a23fd118Syl 		}
1168a23fd118Syl 	}
1169a23fd118Syl 
1170*8347601bSyl 	/* finalize unfinished copies */
1171*8347601bSyl 	if (used_copy) {
1172*8347601bSyl 		xge_hal_fifo_dtr_buffer_finalize(lldev->fifo_channel, dtr,
1173*8347601bSyl 		    frag_cnt++);
1174*8347601bSyl 	}
1175*8347601bSyl 
1176a23fd118Syl 	txd_priv->handle_cnt = handle_cnt;
1177a23fd118Syl 
1178*8347601bSyl 	/*
1179*8347601bSyl 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1180*8347601bSyl 	 * do all necessary work.
1181*8347601bSyl 	 */
1182*8347601bSyl 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
1183*8347601bSyl 	if ((hckflags & HW_LSO) && (mss != 0)) {
1184*8347601bSyl 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1185*8347601bSyl 	}
1186*8347601bSyl 
1187*8347601bSyl 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1188a23fd118Syl 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1189a23fd118Syl 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1190a23fd118Syl 	}
1191*8347601bSyl 	if (hckflags & HCK_FULLCKSUM) {
1192a23fd118Syl 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1193a23fd118Syl 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1194a23fd118Syl 	}
1195a23fd118Syl 
1196a23fd118Syl 	xge_hal_fifo_dtr_post(lldev->fifo_channel, dtr);
1197a23fd118Syl 
1198a23fd118Syl 	return (B_TRUE);
1199a23fd118Syl 
1200a23fd118Syl _exit_cleanup:
1201a23fd118Syl 
1202a23fd118Syl 	for (i = 0; i < handle_cnt; i++) {
1203a23fd118Syl 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1204a23fd118Syl 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1205a23fd118Syl 		txd_priv->dma_handles[i] = 0;
1206a23fd118Syl 	}
1207a23fd118Syl 
1208a23fd118Syl 	xge_hal_fifo_dtr_free(lldev->fifo_channel, dtr);
1209a23fd118Syl 
1210a23fd118Syl _exit:
1211a23fd118Syl 	if (retry) {
1212a23fd118Syl 		if (lldev->resched_avail != lldev->resched_send &&
1213a23fd118Syl 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1214a23fd118Syl 		    XGELL_EVENT_RESCHED_NEEDED, lldev) == XGE_QUEUE_OK) {
1215a23fd118Syl 			lldev->resched_send = lldev->resched_avail;
1216a23fd118Syl 			return (B_FALSE);
1217a23fd118Syl 		} else {
1218a23fd118Syl 			lldev->resched_retry = 1;
1219a23fd118Syl 		}
1220a23fd118Syl 	}
1221a23fd118Syl 
1222a23fd118Syl 	freemsg(mp);
1223a23fd118Syl 	return (B_TRUE);
1224a23fd118Syl }
1225a23fd118Syl 
1226a23fd118Syl /*
1227a23fd118Syl  * xge_m_tx
1228*8347601bSyl  * @arg: pointer to the xgelldev_t structure
1229a23fd118Syl  * @resid: resource id
1230a23fd118Syl  * @mp: pointer to the message buffer
1231a23fd118Syl  *
1232a23fd118Syl  * Called by MAC Layer to send a chain of packets
1233a23fd118Syl  */
1234a23fd118Syl static mblk_t *
1235a23fd118Syl xgell_m_tx(void *arg, mblk_t *mp)
1236a23fd118Syl {
1237*8347601bSyl 	xgelldev_t *lldev = arg;
1238a23fd118Syl 	mblk_t *next;
1239a23fd118Syl 
1240a23fd118Syl 	while (mp != NULL) {
1241a23fd118Syl 		next = mp->b_next;
1242a23fd118Syl 		mp->b_next = NULL;
1243a23fd118Syl 
1244*8347601bSyl 		if (!xgell_send(lldev, mp)) {
1245a23fd118Syl 			mp->b_next = next;
1246a23fd118Syl 			break;
1247a23fd118Syl 		}
1248a23fd118Syl 		mp = next;
1249a23fd118Syl 	}
1250a23fd118Syl 
1251a23fd118Syl 	return (mp);
1252a23fd118Syl }
1253a23fd118Syl 
1254a23fd118Syl /*
1255a23fd118Syl  * xgell_rx_dtr_term
1256a23fd118Syl  *
1257a23fd118Syl  * Function will be called by HAL to terminate all DTRs for
1258a23fd118Syl  * Ring(s) type of channels.
1259a23fd118Syl  */
1260a23fd118Syl static void
1261a23fd118Syl xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1262a23fd118Syl     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1263a23fd118Syl {
1264a23fd118Syl 	xgell_rxd_priv_t *rxd_priv =
1265a23fd118Syl 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1266a23fd118Syl 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1267a23fd118Syl 
1268a23fd118Syl 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1269*8347601bSyl 		xgelldev_t *lldev = rx_buffer->lldev;
1270*8347601bSyl 
1271*8347601bSyl 		mutex_enter(&lldev->bf_pool.pool_lock);
1272a23fd118Syl 		xge_hal_ring_dtr_free(channelh, dtrh);
1273a23fd118Syl 		xgell_rx_buffer_release(rx_buffer);
1274*8347601bSyl 		mutex_exit(&lldev->bf_pool.pool_lock);
1275a23fd118Syl 	}
1276a23fd118Syl }
1277a23fd118Syl 
1278a23fd118Syl /*
1279a23fd118Syl  * xgell_tx_term
1280a23fd118Syl  *
1281a23fd118Syl  * Function will be called by HAL to terminate all DTRs for
1282a23fd118Syl  * Fifo(s) type of channels.
1283a23fd118Syl  */
1284a23fd118Syl static void
1285a23fd118Syl xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1286a23fd118Syl     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1287a23fd118Syl {
1288a23fd118Syl 	xgell_txd_priv_t *txd_priv =
1289a23fd118Syl 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1290a23fd118Syl 	mblk_t *mp = txd_priv->mblk;
1291a23fd118Syl 	int i;
1292*8347601bSyl 
1293a23fd118Syl 	/*
1294a23fd118Syl 	 * for Tx we must clean up the DTR *only* if it has been
1295a23fd118Syl 	 * posted!
1296a23fd118Syl 	 */
1297a23fd118Syl 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1298a23fd118Syl 		return;
1299a23fd118Syl 	}
1300a23fd118Syl 
1301a23fd118Syl 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1302a23fd118Syl 		xge_assert(txd_priv->dma_handles[i]);
1303a23fd118Syl 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1304a23fd118Syl 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1305a23fd118Syl 		txd_priv->dma_handles[i] = 0;
1306a23fd118Syl 	}
1307a23fd118Syl 
1308a23fd118Syl 	xge_hal_fifo_dtr_free(channelh, dtrh);
1309a23fd118Syl 
1310a23fd118Syl 	freemsg(mp);
1311a23fd118Syl }
1312a23fd118Syl 
1313a23fd118Syl /*
1314a23fd118Syl  * xgell_tx_open
1315a23fd118Syl  * @lldev: the link layer object
1316a23fd118Syl  *
1317a23fd118Syl  * Initialize and open all Tx channels;
1318a23fd118Syl  */
1319a23fd118Syl static boolean_t
1320a23fd118Syl xgell_tx_open(xgelldev_t *lldev)
1321a23fd118Syl {
1322a23fd118Syl 	xge_hal_status_e status;
1323a23fd118Syl 	u64 adapter_status;
1324a23fd118Syl 	xge_hal_channel_attr_t attr;
1325a23fd118Syl 
1326a23fd118Syl 	attr.post_qid		= 0;
1327a23fd118Syl 	attr.compl_qid		= 0;
1328a23fd118Syl 	attr.callback		= xgell_xmit_compl;
1329a23fd118Syl 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1330a23fd118Syl 	attr.flags		= 0;
1331a23fd118Syl 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1332a23fd118Syl 	attr.userdata		= lldev;
1333a23fd118Syl 	attr.dtr_init		= NULL;
1334a23fd118Syl 	attr.dtr_term		= xgell_tx_term;
1335a23fd118Syl 
1336a23fd118Syl 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1337a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1338a23fd118Syl 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1339a23fd118Syl 		    lldev->instance, (uint64_t)adapter_status);
1340a23fd118Syl 		return (B_FALSE);
1341a23fd118Syl 	}
1342a23fd118Syl 
1343a23fd118Syl 	status = xge_hal_channel_open(lldev->devh, &attr,
1344a23fd118Syl 	    &lldev->fifo_channel, XGE_HAL_CHANNEL_OC_NORMAL);
1345a23fd118Syl 	if (status != XGE_HAL_OK) {
1346a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1347a23fd118Syl 		    "got status code %d", XGELL_IFNAME,
1348a23fd118Syl 		    lldev->instance, status);
1349a23fd118Syl 		return (B_FALSE);
1350a23fd118Syl 	}
1351a23fd118Syl 
1352a23fd118Syl 	return (B_TRUE);
1353a23fd118Syl }
1354a23fd118Syl 
1355a23fd118Syl /*
1356a23fd118Syl  * xgell_rx_open
1357a23fd118Syl  * @lldev: the link layer object
1358a23fd118Syl  *
1359a23fd118Syl  * Initialize and open all Rx channels;
1360a23fd118Syl  */
1361a23fd118Syl static boolean_t
1362a23fd118Syl xgell_rx_open(xgelldev_t *lldev)
1363a23fd118Syl {
1364a23fd118Syl 	xge_hal_status_e status;
1365a23fd118Syl 	u64 adapter_status;
1366a23fd118Syl 	xge_hal_channel_attr_t attr;
1367a23fd118Syl 
1368a23fd118Syl 	attr.post_qid		= XGELL_RING_MAIN_QID;
1369a23fd118Syl 	attr.compl_qid		= 0;
1370a23fd118Syl 	attr.callback		= xgell_rx_1b_compl;
1371a23fd118Syl 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1372a23fd118Syl 	attr.flags		= 0;
1373a23fd118Syl 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1374a23fd118Syl 	attr.dtr_init		= xgell_rx_dtr_replenish;
1375a23fd118Syl 	attr.dtr_term		= xgell_rx_dtr_term;
1376a23fd118Syl 
1377a23fd118Syl 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1378a23fd118Syl 		xge_debug_ll(XGE_ERR,
1379a23fd118Syl 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1380a23fd118Syl 		    XGELL_IFNAME, lldev->instance,
1381a23fd118Syl 		    (uint64_t)adapter_status);
1382a23fd118Syl 		return (B_FALSE);
1383a23fd118Syl 	}
1384a23fd118Syl 
1385ba2e4443Sseb 	lldev->ring_main.lldev = lldev;
1386a23fd118Syl 	attr.userdata = &lldev->ring_main;
1387a23fd118Syl 
1388a23fd118Syl 	status = xge_hal_channel_open(lldev->devh, &attr,
1389a23fd118Syl 	    &lldev->ring_main.channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1390a23fd118Syl 	if (status != XGE_HAL_OK) {
1391a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel got status "
1392a23fd118Syl 		    " code %d", XGELL_IFNAME, lldev->instance, status);
1393a23fd118Syl 		return (B_FALSE);
1394a23fd118Syl 	}
1395a23fd118Syl 
1396a23fd118Syl 	return (B_TRUE);
1397a23fd118Syl }
1398a23fd118Syl 
1399a23fd118Syl static int
1400a23fd118Syl xgell_initiate_start(xgelldev_t *lldev)
1401a23fd118Syl {
1402a23fd118Syl 	xge_hal_status_e status;
1403a23fd118Syl 	xge_hal_device_t *hldev = lldev->devh;
1404ba2e4443Sseb 	int maxpkt = hldev->config.mtu;
1405a23fd118Syl 
1406a23fd118Syl 	/* check initial mtu before enabling the device */
1407a23fd118Syl 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1408a23fd118Syl 	if (status != XGE_HAL_OK) {
1409a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1410a23fd118Syl 		    XGELL_IFNAME, lldev->instance, maxpkt);
1411a23fd118Syl 		return (EINVAL);
1412a23fd118Syl 	}
1413a23fd118Syl 
1414a23fd118Syl 	/* set initial mtu before enabling the device */
1415a23fd118Syl 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1416a23fd118Syl 	if (status != XGE_HAL_OK) {
1417a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1418a23fd118Syl 		    XGELL_IFNAME, lldev->instance, maxpkt);
1419a23fd118Syl 		return (EIO);
1420a23fd118Syl 	}
1421a23fd118Syl 
1422*8347601bSyl 	/* tune jumbo/normal frame UFC counters */
1423*8347601bSyl 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
1424*8347601bSyl 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1425*8347601bSyl 			XGE_HAL_DEFAULT_RX_UFC_B_J :
1426*8347601bSyl 			XGE_HAL_DEFAULT_RX_UFC_B_N;
1427*8347601bSyl 
1428*8347601bSyl 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
1429*8347601bSyl 		maxpkt > XGE_HAL_DEFAULT_MTU ?
1430*8347601bSyl 			XGE_HAL_DEFAULT_RX_UFC_C_J :
1431*8347601bSyl 			XGE_HAL_DEFAULT_RX_UFC_C_N;
1432*8347601bSyl 
1433a23fd118Syl 	/* now, enable the device */
1434a23fd118Syl 	status = xge_hal_device_enable(lldev->devh);
1435a23fd118Syl 	if (status != XGE_HAL_OK) {
1436a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1437a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1438a23fd118Syl 		return (EIO);
1439a23fd118Syl 	}
1440a23fd118Syl 
1441a23fd118Syl 	if (!xgell_rx_open(lldev)) {
1442a23fd118Syl 		status = xge_hal_device_disable(lldev->devh);
1443a23fd118Syl 		if (status != XGE_HAL_OK) {
1444a23fd118Syl 			u64 adapter_status;
1445a23fd118Syl 			(void) xge_hal_device_status(lldev->devh,
1446a23fd118Syl 			    &adapter_status);
1447a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1448a23fd118Syl 			    "the device. adaper status 0x%"PRIx64
1449a23fd118Syl 			    " returned status %d",
1450a23fd118Syl 			    XGELL_IFNAME, lldev->instance,
1451a23fd118Syl 			    (uint64_t)adapter_status, status);
1452a23fd118Syl 		}
1453a23fd118Syl 		xge_os_mdelay(1500);
1454a23fd118Syl 		return (ENOMEM);
1455a23fd118Syl 	}
1456a23fd118Syl 
1457a23fd118Syl 	if (!xgell_tx_open(lldev)) {
1458a23fd118Syl 		status = xge_hal_device_disable(lldev->devh);
1459a23fd118Syl 		if (status != XGE_HAL_OK) {
1460a23fd118Syl 			u64 adapter_status;
1461a23fd118Syl 			(void) xge_hal_device_status(lldev->devh,
1462a23fd118Syl 			    &adapter_status);
1463a23fd118Syl 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1464a23fd118Syl 			    "the device. adaper status 0x%"PRIx64
1465a23fd118Syl 			    " returned status %d",
1466a23fd118Syl 			    XGELL_IFNAME, lldev->instance,
1467a23fd118Syl 			    (uint64_t)adapter_status, status);
1468a23fd118Syl 		}
1469a23fd118Syl 		xge_os_mdelay(1500);
1470a23fd118Syl 		xge_hal_channel_close(lldev->ring_main.channelh,
1471a23fd118Syl 		    XGE_HAL_CHANNEL_OC_NORMAL);
1472a23fd118Syl 		return (ENOMEM);
1473a23fd118Syl 	}
1474a23fd118Syl 
1475a23fd118Syl 	/* time to enable interrupts */
1476a23fd118Syl 	xge_hal_device_intr_enable(lldev->devh);
1477a23fd118Syl 
1478a23fd118Syl 	lldev->is_initialized = 1;
1479a23fd118Syl 
1480a23fd118Syl 	return (0);
1481a23fd118Syl }
1482a23fd118Syl 
1483a23fd118Syl static void
1484a23fd118Syl xgell_initiate_stop(xgelldev_t *lldev)
1485a23fd118Syl {
1486a23fd118Syl 	xge_hal_status_e status;
1487a23fd118Syl 
1488a23fd118Syl 	lldev->is_initialized = 0;
1489a23fd118Syl 
1490a23fd118Syl 	status = xge_hal_device_disable(lldev->devh);
1491a23fd118Syl 	if (status != XGE_HAL_OK) {
1492a23fd118Syl 		u64 adapter_status;
1493a23fd118Syl 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1494a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1495a23fd118Syl 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1496a23fd118Syl 		    XGELL_IFNAME, lldev->instance,
1497a23fd118Syl 		    (uint64_t)adapter_status, status);
1498a23fd118Syl 	}
1499a23fd118Syl 	xge_hal_device_intr_disable(lldev->devh);
1500a23fd118Syl 
1501a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s",
1502a23fd118Syl 	    "waiting for device irq to become quiescent...");
1503a23fd118Syl 	xge_os_mdelay(1500);
1504a23fd118Syl 
1505a23fd118Syl 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1506a23fd118Syl 
1507a23fd118Syl 	xge_hal_channel_close(lldev->ring_main.channelh,
1508a23fd118Syl 	    XGE_HAL_CHANNEL_OC_NORMAL);
1509a23fd118Syl 
1510a23fd118Syl 	xge_hal_channel_close(lldev->fifo_channel,
1511a23fd118Syl 	    XGE_HAL_CHANNEL_OC_NORMAL);
1512a23fd118Syl }
1513a23fd118Syl 
1514a23fd118Syl /*
1515a23fd118Syl  * xgell_m_start
1516a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1517a23fd118Syl  *
1518a23fd118Syl  * This function is called by MAC Layer to enable the XFRAME
1519a23fd118Syl  * firmware to generate interrupts and also prepare the
1520a23fd118Syl  * driver to call mac_rx for delivering receive packets
1521a23fd118Syl  * to MAC Layer.
1522a23fd118Syl  */
1523a23fd118Syl static int
1524a23fd118Syl xgell_m_start(void *arg)
1525a23fd118Syl {
1526*8347601bSyl 	xgelldev_t *lldev = arg;
1527*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1528a23fd118Syl 	int ret;
1529a23fd118Syl 
1530a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1531a23fd118Syl 	    lldev->instance);
1532a23fd118Syl 
1533a23fd118Syl 	mutex_enter(&lldev->genlock);
1534a23fd118Syl 
1535a23fd118Syl 	if (lldev->is_initialized) {
1536a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1537a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1538a23fd118Syl 		mutex_exit(&lldev->genlock);
1539a23fd118Syl 		return (EINVAL);
1540a23fd118Syl 	}
1541a23fd118Syl 
1542a23fd118Syl 	hldev->terminating = 0;
1543a23fd118Syl 	if (ret = xgell_initiate_start(lldev)) {
1544a23fd118Syl 		mutex_exit(&lldev->genlock);
1545a23fd118Syl 		return (ret);
1546a23fd118Syl 	}
1547a23fd118Syl 
1548a23fd118Syl 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1549a23fd118Syl 
1550a23fd118Syl 	mutex_exit(&lldev->genlock);
1551a23fd118Syl 
1552a23fd118Syl 	return (0);
1553a23fd118Syl }
1554a23fd118Syl 
1555a23fd118Syl /*
1556a23fd118Syl  * xgell_m_stop
1557a23fd118Syl  * @arg: pointer to device private data (hldev)
1558a23fd118Syl  *
1559a23fd118Syl  * This function is called by the MAC Layer to disable
1560a23fd118Syl  * the XFRAME firmware for generating any interrupts and
1561a23fd118Syl  * also stop the driver from calling mac_rx() for
1562a23fd118Syl  * delivering data packets to the MAC Layer.
1563a23fd118Syl  */
1564a23fd118Syl static void
1565a23fd118Syl xgell_m_stop(void *arg)
1566a23fd118Syl {
1567*8347601bSyl 	xgelldev_t *lldev = arg;
1568*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1569a23fd118Syl 
1570a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1571a23fd118Syl 
1572a23fd118Syl 	mutex_enter(&lldev->genlock);
1573a23fd118Syl 	if (!lldev->is_initialized) {
1574a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1575a23fd118Syl 		mutex_exit(&lldev->genlock);
1576a23fd118Syl 		return;
1577a23fd118Syl 	}
1578a23fd118Syl 
1579a23fd118Syl 	xge_hal_device_terminating(hldev);
1580a23fd118Syl 	xgell_initiate_stop(lldev);
1581a23fd118Syl 
1582a23fd118Syl 	/* reset device */
1583a23fd118Syl 	(void) xge_hal_device_reset(lldev->devh);
1584a23fd118Syl 
1585a23fd118Syl 	mutex_exit(&lldev->genlock);
1586a23fd118Syl 
1587*8347601bSyl 	if (lldev->timeout_id != 0) {
1588*8347601bSyl 		(void) untimeout(lldev->timeout_id);
1589*8347601bSyl 	}
1590a23fd118Syl 
1591a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1592a23fd118Syl }
1593a23fd118Syl 
1594a23fd118Syl /*
1595a23fd118Syl  * xgell_onerr_reset
1596a23fd118Syl  * @lldev: pointer to xgelldev_t structure
1597a23fd118Syl  *
1598a23fd118Syl  * This function is called by HAL Event framework to reset the HW
1599a23fd118Syl  * This function is must be called with genlock taken.
1600a23fd118Syl  */
1601a23fd118Syl int
1602a23fd118Syl xgell_onerr_reset(xgelldev_t *lldev)
1603a23fd118Syl {
1604a23fd118Syl 	int rc = 0;
1605a23fd118Syl 
1606a23fd118Syl 	if (!lldev->is_initialized) {
1607a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1608a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1609a23fd118Syl 		return (rc);
1610a23fd118Syl 	}
1611a23fd118Syl 
1612a23fd118Syl 	lldev->in_reset = 1;
1613a23fd118Syl 	xgell_initiate_stop(lldev);
1614a23fd118Syl 
1615a23fd118Syl 	/* reset device */
1616a23fd118Syl 	(void) xge_hal_device_reset(lldev->devh);
1617a23fd118Syl 
1618a23fd118Syl 	rc = xgell_initiate_start(lldev);
1619a23fd118Syl 	lldev->in_reset = 0;
1620a23fd118Syl 
1621a23fd118Syl 	return (rc);
1622a23fd118Syl }
1623a23fd118Syl 
1624a23fd118Syl 
1625a23fd118Syl /*
1626a23fd118Syl  * xgell_m_unicst
1627a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1628a23fd118Syl  * @mac_addr:
1629a23fd118Syl  *
1630a23fd118Syl  * This function is called by MAC Layer to set the physical address
1631a23fd118Syl  * of the XFRAME firmware.
1632a23fd118Syl  */
1633a23fd118Syl static int
1634a23fd118Syl xgell_m_unicst(void *arg, const uint8_t *macaddr)
1635a23fd118Syl {
1636a23fd118Syl 	xge_hal_status_e status;
1637*8347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
1638*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1639a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1640a23fd118Syl 
1641a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1642a23fd118Syl 
1643a23fd118Syl 	mutex_enter(&lldev->genlock);
1644a23fd118Syl 
1645a23fd118Syl 	xge_debug_ll(XGE_TRACE,
1646a23fd118Syl 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1647a23fd118Syl 	    macaddr[0], macaddr[1], macaddr[2],
1648a23fd118Syl 	    macaddr[3], macaddr[4], macaddr[5]);
1649a23fd118Syl 
1650a23fd118Syl 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1651a23fd118Syl 	if (status != XGE_HAL_OK) {
1652a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1653a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1654a23fd118Syl 		mutex_exit(&lldev->genlock);
1655a23fd118Syl 		return (EIO);
1656a23fd118Syl 	}
1657a23fd118Syl 
1658a23fd118Syl 	mutex_exit(&lldev->genlock);
1659a23fd118Syl 
1660a23fd118Syl 	return (0);
1661a23fd118Syl }
1662a23fd118Syl 
1663a23fd118Syl 
1664a23fd118Syl /*
1665a23fd118Syl  * xgell_m_multicst
1666a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1667a23fd118Syl  * @add:
1668a23fd118Syl  * @mc_addr:
1669a23fd118Syl  *
1670a23fd118Syl  * This function is called by MAC Layer to enable or
1671a23fd118Syl  * disable device-level reception of specific multicast addresses.
1672a23fd118Syl  */
1673a23fd118Syl static int
1674a23fd118Syl xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1675a23fd118Syl {
1676a23fd118Syl 	xge_hal_status_e status;
1677*8347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
1678*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1679a23fd118Syl 
1680a23fd118Syl 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1681a23fd118Syl 
1682a23fd118Syl 	mutex_enter(&lldev->genlock);
1683a23fd118Syl 
1684a23fd118Syl 	if (!lldev->is_initialized) {
1685a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1686a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1687a23fd118Syl 		mutex_exit(&lldev->genlock);
1688a23fd118Syl 		return (EIO);
1689a23fd118Syl 	}
1690a23fd118Syl 
1691a23fd118Syl 	/* FIXME: missing HAL functionality: enable_one() */
1692a23fd118Syl 
1693a23fd118Syl 	status = (add) ?
1694a23fd118Syl 	    xge_hal_device_mcast_enable(hldev) :
1695a23fd118Syl 	    xge_hal_device_mcast_disable(hldev);
1696a23fd118Syl 
1697a23fd118Syl 	if (status != XGE_HAL_OK) {
1698a23fd118Syl 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1699a23fd118Syl 		    add ? "enable" : "disable", status);
1700a23fd118Syl 		mutex_exit(&lldev->genlock);
1701a23fd118Syl 		return (EIO);
1702a23fd118Syl 	}
1703a23fd118Syl 
1704a23fd118Syl 	mutex_exit(&lldev->genlock);
1705a23fd118Syl 
1706a23fd118Syl 	return (0);
1707a23fd118Syl }
1708a23fd118Syl 
1709a23fd118Syl 
1710a23fd118Syl /*
1711a23fd118Syl  * xgell_m_promisc
1712a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1713a23fd118Syl  * @on:
1714a23fd118Syl  *
1715a23fd118Syl  * This function is called by MAC Layer to enable or
1716a23fd118Syl  * disable the reception of all the packets on the medium
1717a23fd118Syl  */
1718a23fd118Syl static int
1719a23fd118Syl xgell_m_promisc(void *arg, boolean_t on)
1720a23fd118Syl {
1721*8347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
1722*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1723a23fd118Syl 
1724a23fd118Syl 	mutex_enter(&lldev->genlock);
1725a23fd118Syl 
1726a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1727a23fd118Syl 
1728a23fd118Syl 	if (!lldev->is_initialized) {
1729a23fd118Syl 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1730a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
1731a23fd118Syl 		mutex_exit(&lldev->genlock);
1732a23fd118Syl 		return (EIO);
1733a23fd118Syl 	}
1734a23fd118Syl 
1735a23fd118Syl 	if (on) {
1736a23fd118Syl 		xge_hal_device_promisc_enable(hldev);
1737a23fd118Syl 	} else {
1738a23fd118Syl 		xge_hal_device_promisc_disable(hldev);
1739a23fd118Syl 	}
1740a23fd118Syl 
1741a23fd118Syl 	mutex_exit(&lldev->genlock);
1742a23fd118Syl 
1743a23fd118Syl 	return (0);
1744a23fd118Syl }
1745a23fd118Syl 
1746a23fd118Syl /*
1747ba2e4443Sseb  * xgell_m_stat
1748a23fd118Syl  * @arg: pointer to device private strucutre(hldev)
1749a23fd118Syl  *
1750ba2e4443Sseb  * This function is called by MAC Layer to get network statistics
1751a23fd118Syl  * from the driver.
1752a23fd118Syl  */
1753ba2e4443Sseb static int
1754ba2e4443Sseb xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1755a23fd118Syl {
1756a23fd118Syl 	xge_hal_stats_hw_info_t *hw_info;
1757*8347601bSyl 	xgelldev_t *lldev = (xgelldev_t *)arg;
1758*8347601bSyl 	xge_hal_device_t *hldev = lldev->devh;
1759a23fd118Syl 
1760a23fd118Syl 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1761a23fd118Syl 
1762a23fd118Syl 	if (!mutex_tryenter(&lldev->genlock))
1763ba2e4443Sseb 		return (EAGAIN);
1764a23fd118Syl 
1765a23fd118Syl 	if (!lldev->is_initialized) {
1766a23fd118Syl 		mutex_exit(&lldev->genlock);
1767ba2e4443Sseb 		return (EAGAIN);
1768a23fd118Syl 	}
1769a23fd118Syl 
1770a23fd118Syl 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1771a23fd118Syl 		mutex_exit(&lldev->genlock);
1772ba2e4443Sseb 		return (EAGAIN);
1773a23fd118Syl 	}
1774a23fd118Syl 
1775a23fd118Syl 	switch (stat) {
1776a23fd118Syl 	case MAC_STAT_IFSPEED:
1777ba2e4443Sseb 		*val = 10000000000ull; /* 10G */
1778a23fd118Syl 		break;
1779a23fd118Syl 
1780a23fd118Syl 	case MAC_STAT_MULTIRCV:
1781*8347601bSyl 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
1782*8347601bSyl 		    hw_info->rmac_vld_mcst_frms;
1783a23fd118Syl 		break;
1784a23fd118Syl 
1785a23fd118Syl 	case MAC_STAT_BRDCSTRCV:
1786*8347601bSyl 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
1787*8347601bSyl 		    hw_info->rmac_vld_bcst_frms;
1788a23fd118Syl 		break;
1789a23fd118Syl 
1790a23fd118Syl 	case MAC_STAT_MULTIXMT:
1791*8347601bSyl 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
1792*8347601bSyl 		    hw_info->tmac_mcst_frms;
1793a23fd118Syl 		break;
1794a23fd118Syl 
1795a23fd118Syl 	case MAC_STAT_BRDCSTXMT:
1796*8347601bSyl 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
1797*8347601bSyl 		    hw_info->tmac_bcst_frms;
1798a23fd118Syl 		break;
1799a23fd118Syl 
1800a23fd118Syl 	case MAC_STAT_RBYTES:
1801*8347601bSyl 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
1802*8347601bSyl 		    hw_info->rmac_ttl_octets;
1803a23fd118Syl 		break;
1804a23fd118Syl 
1805a23fd118Syl 	case MAC_STAT_NORCVBUF:
1806ba2e4443Sseb 		*val = hw_info->rmac_drop_frms;
1807a23fd118Syl 		break;
1808a23fd118Syl 
1809a23fd118Syl 	case MAC_STAT_IERRORS:
1810*8347601bSyl 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
1811*8347601bSyl 		    hw_info->rmac_discarded_frms;
1812a23fd118Syl 		break;
1813a23fd118Syl 
1814a23fd118Syl 	case MAC_STAT_OBYTES:
1815*8347601bSyl 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
1816*8347601bSyl 		    hw_info->tmac_ttl_octets;
1817a23fd118Syl 		break;
1818a23fd118Syl 
1819a23fd118Syl 	case MAC_STAT_NOXMTBUF:
1820ba2e4443Sseb 		*val = hw_info->tmac_drop_frms;
1821a23fd118Syl 		break;
1822a23fd118Syl 
1823a23fd118Syl 	case MAC_STAT_OERRORS:
1824*8347601bSyl 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
1825*8347601bSyl 		    hw_info->tmac_any_err_frms;
1826a23fd118Syl 		break;
1827a23fd118Syl 
1828a23fd118Syl 	case MAC_STAT_IPACKETS:
1829*8347601bSyl 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
1830*8347601bSyl 		    hw_info->rmac_vld_frms;
1831a23fd118Syl 		break;
1832a23fd118Syl 
1833a23fd118Syl 	case MAC_STAT_OPACKETS:
1834*8347601bSyl 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
1835*8347601bSyl 		    hw_info->tmac_frms;
1836ba2e4443Sseb 		break;
1837ba2e4443Sseb 
1838ba2e4443Sseb 	case ETHER_STAT_FCS_ERRORS:
1839ba2e4443Sseb 		*val = hw_info->rmac_fcs_err_frms;
1840a23fd118Syl 		break;
1841a23fd118Syl 
1842ba2e4443Sseb 	case ETHER_STAT_TOOLONG_ERRORS:
1843ba2e4443Sseb 		*val = hw_info->rmac_long_frms;
1844a23fd118Syl 		break;
1845a23fd118Syl 
1846ba2e4443Sseb 	case ETHER_STAT_LINK_DUPLEX:
1847ba2e4443Sseb 		*val = LINK_DUPLEX_FULL;
1848a23fd118Syl 		break;
1849a23fd118Syl 
1850a23fd118Syl 	default:
1851ba2e4443Sseb 		mutex_exit(&lldev->genlock);
1852ba2e4443Sseb 		return (ENOTSUP);
1853a23fd118Syl 	}
1854a23fd118Syl 
1855a23fd118Syl 	mutex_exit(&lldev->genlock);
1856a23fd118Syl 
1857ba2e4443Sseb 	return (0);
1858a23fd118Syl }
1859a23fd118Syl 
1860a23fd118Syl /*
1861a23fd118Syl  * xgell_device_alloc - Allocate new LL device
1862a23fd118Syl  */
1863a23fd118Syl int
1864a23fd118Syl xgell_device_alloc(xge_hal_device_h devh,
1865a23fd118Syl     dev_info_t *dev_info, xgelldev_t **lldev_out)
1866a23fd118Syl {
1867a23fd118Syl 	xgelldev_t *lldev;
1868a23fd118Syl 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1869a23fd118Syl 	int instance = ddi_get_instance(dev_info);
1870a23fd118Syl 
1871a23fd118Syl 	*lldev_out = NULL;
1872a23fd118Syl 
1873a23fd118Syl 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1874a23fd118Syl 	    XGELL_IFNAME, instance);
1875a23fd118Syl 
1876a23fd118Syl 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1877a23fd118Syl 
1878a23fd118Syl 	lldev->devh = hldev;
1879a23fd118Syl 	lldev->instance = instance;
1880a23fd118Syl 	lldev->dev_info = dev_info;
1881a23fd118Syl 
1882a23fd118Syl 	*lldev_out = lldev;
1883a23fd118Syl 
1884a23fd118Syl 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1885a23fd118Syl 
1886a23fd118Syl 	return (DDI_SUCCESS);
1887a23fd118Syl }
1888a23fd118Syl 
1889a23fd118Syl /*
1890a23fd118Syl  * xgell_device_free
1891a23fd118Syl  */
1892a23fd118Syl void
1893a23fd118Syl xgell_device_free(xgelldev_t *lldev)
1894a23fd118Syl {
1895a23fd118Syl 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1896a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
1897a23fd118Syl 
1898a23fd118Syl 	kmem_free(lldev, sizeof (xgelldev_t));
1899a23fd118Syl }
1900a23fd118Syl 
1901a23fd118Syl /*
1902a23fd118Syl  * xgell_ioctl
1903a23fd118Syl  */
1904a23fd118Syl static void
1905a23fd118Syl xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1906a23fd118Syl {
1907*8347601bSyl 	xgelldev_t *lldev = arg;
1908a23fd118Syl 	struct iocblk *iocp;
1909a23fd118Syl 	int err = 0;
1910a23fd118Syl 	int cmd;
1911a23fd118Syl 	int need_privilege = 1;
1912a23fd118Syl 	int ret = 0;
1913a23fd118Syl 
1914a23fd118Syl 
1915a23fd118Syl 	iocp = (struct iocblk *)mp->b_rptr;
1916a23fd118Syl 	iocp->ioc_error = 0;
1917a23fd118Syl 	cmd = iocp->ioc_cmd;
1918a23fd118Syl 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1919a23fd118Syl 	switch (cmd) {
1920a23fd118Syl 	case ND_GET:
1921a23fd118Syl 		need_privilege = 0;
1922a23fd118Syl 		/* FALLTHRU */
1923a23fd118Syl 	case ND_SET:
1924a23fd118Syl 		break;
1925a23fd118Syl 	default:
1926a23fd118Syl 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1927a23fd118Syl 		miocnak(wq, mp, 0, EINVAL);
1928a23fd118Syl 		return;
1929a23fd118Syl 	}
1930a23fd118Syl 
1931a23fd118Syl 	if (need_privilege) {
1932a23fd118Syl 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1933a23fd118Syl 		if (err != 0) {
1934a23fd118Syl 			xge_debug_ll(XGE_ERR,
1935a23fd118Syl 			    "drv_priv(): rejected cmd 0x%x, err %d",
1936a23fd118Syl 			    cmd, err);
1937a23fd118Syl 			miocnak(wq, mp, 0, err);
1938a23fd118Syl 			return;
1939a23fd118Syl 		}
1940a23fd118Syl 	}
1941a23fd118Syl 
1942a23fd118Syl 	switch (cmd) {
1943a23fd118Syl 	case ND_GET:
1944a23fd118Syl 		/*
1945a23fd118Syl 		 * If nd_getset() returns B_FALSE, the command was
1946a23fd118Syl 		 * not valid (e.g. unknown name), so we just tell the
1947a23fd118Syl 		 * top-level ioctl code to send a NAK (with code EINVAL).
1948a23fd118Syl 		 *
1949a23fd118Syl 		 * Otherwise, nd_getset() will have built the reply to
1950a23fd118Syl 		 * be sent (but not actually sent it), so we tell the
1951a23fd118Syl 		 * caller to send the prepared reply.
1952a23fd118Syl 		 */
1953a23fd118Syl 		ret = nd_getset(wq, lldev->ndp, mp);
1954a23fd118Syl 		xge_debug_ll(XGE_TRACE, "got ndd get ioctl");
1955a23fd118Syl 		break;
1956a23fd118Syl 
1957a23fd118Syl 	case ND_SET:
1958a23fd118Syl 		ret = nd_getset(wq, lldev->ndp, mp);
1959a23fd118Syl 		xge_debug_ll(XGE_TRACE, "got ndd set ioctl");
1960a23fd118Syl 		break;
1961a23fd118Syl 
1962a23fd118Syl 	default:
1963a23fd118Syl 		break;
1964a23fd118Syl 	}
1965a23fd118Syl 
1966a23fd118Syl 	if (ret == B_FALSE) {
1967a23fd118Syl 		xge_debug_ll(XGE_ERR,
1968a23fd118Syl 		    "nd_getset(): rejected cmd 0x%x, err %d",
1969a23fd118Syl 		    cmd, err);
1970a23fd118Syl 		miocnak(wq, mp, 0, EINVAL);
1971a23fd118Syl 	} else {
1972a23fd118Syl 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
1973a23fd118Syl 		    M_IOCACK : M_IOCNAK;
1974a23fd118Syl 		qreply(wq, mp);
1975a23fd118Syl 	}
1976a23fd118Syl }
1977a23fd118Syl 
1978ba2e4443Sseb /* ARGSUSED */
1979ba2e4443Sseb static boolean_t
1980ba2e4443Sseb xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1981a23fd118Syl {
1982*8347601bSyl 	xgelldev_t *lldev = arg;
1983*8347601bSyl 
1984ba2e4443Sseb 	switch (cap) {
1985ba2e4443Sseb 	case MAC_CAPAB_HCKSUM: {
1986ba2e4443Sseb 		uint32_t *hcksum_txflags = cap_data;
1987ba2e4443Sseb 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
1988ba2e4443Sseb 		    HCKSUM_IPHDRCKSUM;
1989ba2e4443Sseb 		break;
1990ba2e4443Sseb 	}
1991*8347601bSyl 	case MAC_CAPAB_LSO: {
1992*8347601bSyl 		mac_capab_lso_t *cap_lso = cap_data;
1993*8347601bSyl 
1994*8347601bSyl 		if (lldev->config.lso_enable) {
1995*8347601bSyl 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
1996*8347601bSyl 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
1997*8347601bSyl 			break;
1998*8347601bSyl 		} else {
1999*8347601bSyl 			return (B_FALSE);
2000*8347601bSyl 		}
2001*8347601bSyl 	}
2002ba2e4443Sseb 	default:
2003ba2e4443Sseb 		return (B_FALSE);
2004ba2e4443Sseb 	}
2005ba2e4443Sseb 	return (B_TRUE);
2006a23fd118Syl }
2007a23fd118Syl 
2008a23fd118Syl static int
2009a23fd118Syl xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2010a23fd118Syl {
2011a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2012a23fd118Syl 	xge_hal_status_e status;
2013a23fd118Syl 	int count = 0, retsize;
2014a23fd118Syl 	char *buf;
2015a23fd118Syl 
2016a23fd118Syl 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2017a23fd118Syl 	if (buf == NULL) {
2018a23fd118Syl 		return (ENOSPC);
2019a23fd118Syl 	}
2020a23fd118Syl 
2021a23fd118Syl 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2022a23fd118Syl 	    buf, &retsize);
2023a23fd118Syl 	if (status != XGE_HAL_OK) {
2024a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2025a23fd118Syl 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2026a23fd118Syl 		return (EINVAL);
2027a23fd118Syl 	}
2028a23fd118Syl 	count += retsize;
2029a23fd118Syl 
2030a23fd118Syl 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2031a23fd118Syl 	    XGELL_STATS_BUFSIZE - count,
2032a23fd118Syl 	    buf+count, &retsize);
2033a23fd118Syl 	if (status != XGE_HAL_OK) {
2034a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2035a23fd118Syl 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2036a23fd118Syl 		return (EINVAL);
2037a23fd118Syl 	}
2038a23fd118Syl 	count += retsize;
2039a23fd118Syl 
2040a23fd118Syl 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2041a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2042a23fd118Syl 	if (status != XGE_HAL_OK) {
2043a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2044a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2045a23fd118Syl 		return (EINVAL);
2046a23fd118Syl 	}
2047a23fd118Syl 	count += retsize;
2048a23fd118Syl 
2049a23fd118Syl 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2050a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2051a23fd118Syl 	if (status != XGE_HAL_OK) {
2052a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2053a23fd118Syl 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2054a23fd118Syl 		return (EINVAL);
2055a23fd118Syl 	}
2056a23fd118Syl 	count += retsize;
2057a23fd118Syl 
2058a23fd118Syl 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2059a23fd118Syl 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2060a23fd118Syl 	if (status != XGE_HAL_OK) {
2061a23fd118Syl 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2062a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2063a23fd118Syl 		return (EINVAL);
2064a23fd118Syl 	}
2065a23fd118Syl 	count += retsize;
2066a23fd118Syl 
2067a23fd118Syl 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2068a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2069a23fd118Syl 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2070a23fd118Syl 
2071a23fd118Syl 	return (0);
2072a23fd118Syl }
2073a23fd118Syl 
2074a23fd118Syl static int
2075a23fd118Syl xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2076a23fd118Syl {
2077a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2078a23fd118Syl 	xge_hal_status_e status;
2079a23fd118Syl 	int retsize;
2080a23fd118Syl 	char *buf;
2081a23fd118Syl 
2082a23fd118Syl 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2083a23fd118Syl 	if (buf == NULL) {
2084a23fd118Syl 		return (ENOSPC);
2085a23fd118Syl 	}
2086a23fd118Syl 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2087a23fd118Syl 	    buf, &retsize);
2088a23fd118Syl 	if (status != XGE_HAL_OK) {
2089a23fd118Syl 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2090a23fd118Syl 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2091a23fd118Syl 		return (EINVAL);
2092a23fd118Syl 	}
2093a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2094a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2095a23fd118Syl 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2096a23fd118Syl 
2097a23fd118Syl 	return (0);
2098a23fd118Syl }
2099a23fd118Syl 
2100a23fd118Syl static int
2101a23fd118Syl xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2102a23fd118Syl {
2103a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2104a23fd118Syl 	xge_hal_status_e status;
2105a23fd118Syl 	int retsize;
2106a23fd118Syl 	char *buf;
2107a23fd118Syl 
2108a23fd118Syl 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2109a23fd118Syl 	if (buf == NULL) {
2110a23fd118Syl 		return (ENOSPC);
2111a23fd118Syl 	}
2112a23fd118Syl 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2113a23fd118Syl 	    buf, &retsize);
2114a23fd118Syl 	if (status != XGE_HAL_OK) {
2115a23fd118Syl 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2116a23fd118Syl 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2117a23fd118Syl 		return (EINVAL);
2118a23fd118Syl 	}
2119a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2120a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2121a23fd118Syl 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2122a23fd118Syl 
2123a23fd118Syl 	return (0);
2124a23fd118Syl }
2125a23fd118Syl 
2126a23fd118Syl static unsigned long bar0_offset = 0x110; /* adapter_control */
2127a23fd118Syl 
2128a23fd118Syl static int
2129a23fd118Syl xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2130a23fd118Syl {
2131a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)cp;
2132a23fd118Syl 	xge_hal_status_e status;
2133a23fd118Syl 	int retsize;
2134a23fd118Syl 	char *buf;
2135a23fd118Syl 
2136a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2137a23fd118Syl 	if (buf == NULL) {
2138a23fd118Syl 		return (ENOSPC);
2139a23fd118Syl 	}
2140a23fd118Syl 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2141a23fd118Syl 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2142a23fd118Syl 	if (status != XGE_HAL_OK) {
2143a23fd118Syl 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2144a23fd118Syl 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2145a23fd118Syl 		return (EINVAL);
2146a23fd118Syl 	}
2147a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2148a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2149a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2150a23fd118Syl 
2151a23fd118Syl 	return (0);
2152a23fd118Syl }
2153a23fd118Syl 
2154a23fd118Syl static int
2155a23fd118Syl xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2156a23fd118Syl {
2157a23fd118Syl 	unsigned long old_offset = bar0_offset;
2158a23fd118Syl 	char *end;
2159a23fd118Syl 
2160a23fd118Syl 	if (value && *value == '0' &&
2161a23fd118Syl 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2162a23fd118Syl 		value += 2;
2163a23fd118Syl 	}
2164a23fd118Syl 
2165a23fd118Syl 	bar0_offset = mi_strtol(value, &end, 16);
2166a23fd118Syl 	if (end == value) {
2167a23fd118Syl 		bar0_offset = old_offset;
2168a23fd118Syl 		return (EINVAL);
2169a23fd118Syl 	}
2170a23fd118Syl 
2171a23fd118Syl 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2172a23fd118Syl 
2173a23fd118Syl 	return (0);
2174a23fd118Syl }
2175a23fd118Syl 
2176a23fd118Syl static int
2177a23fd118Syl xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2178a23fd118Syl {
2179a23fd118Syl 	char *buf;
2180a23fd118Syl 
2181a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2182a23fd118Syl 	if (buf == NULL) {
2183a23fd118Syl 		return (ENOSPC);
2184a23fd118Syl 	}
2185a23fd118Syl 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2186a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2187a23fd118Syl 
2188a23fd118Syl 	return (0);
2189a23fd118Syl }
2190a23fd118Syl 
2191a23fd118Syl static int
2192a23fd118Syl xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2193a23fd118Syl     cred_t *credp)
2194a23fd118Syl {
2195a23fd118Syl 	int level;
2196a23fd118Syl 	char *end;
2197a23fd118Syl 
2198a23fd118Syl 	level = mi_strtol(value, &end, 10);
2199a23fd118Syl 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2200a23fd118Syl 		return (EINVAL);
2201a23fd118Syl 	}
2202a23fd118Syl 
2203a23fd118Syl 	xge_hal_driver_debug_level_set(level);
2204a23fd118Syl 
2205a23fd118Syl 	return (0);
2206a23fd118Syl }
2207a23fd118Syl 
2208a23fd118Syl static int
2209a23fd118Syl xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2210a23fd118Syl {
2211a23fd118Syl 	char *buf;
2212a23fd118Syl 
2213a23fd118Syl 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2214a23fd118Syl 	if (buf == NULL) {
2215a23fd118Syl 		return (ENOSPC);
2216a23fd118Syl 	}
2217a23fd118Syl 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2218a23fd118Syl 	    xge_hal_driver_debug_module_mask());
2219a23fd118Syl 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2220a23fd118Syl 
2221a23fd118Syl 	return (0);
2222a23fd118Syl }
2223a23fd118Syl 
2224a23fd118Syl static int
2225a23fd118Syl xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2226a23fd118Syl 			    cred_t *credp)
2227a23fd118Syl {
2228a23fd118Syl 	u32 mask;
2229a23fd118Syl 	char *end;
2230a23fd118Syl 
2231a23fd118Syl 	if (value && *value == '0' &&
2232a23fd118Syl 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2233a23fd118Syl 		value += 2;
2234a23fd118Syl 	}
2235a23fd118Syl 
2236a23fd118Syl 	mask = mi_strtol(value, &end, 16);
2237a23fd118Syl 	if (end == value) {
2238a23fd118Syl 		return (EINVAL);
2239a23fd118Syl 	}
2240a23fd118Syl 
2241a23fd118Syl 	xge_hal_driver_debug_module_mask_set(mask);
2242a23fd118Syl 
2243a23fd118Syl 	return (0);
2244a23fd118Syl }
2245a23fd118Syl 
2246a23fd118Syl static int
2247a23fd118Syl xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2248a23fd118Syl {
2249a23fd118Syl 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2250a23fd118Syl 	xge_hal_status_e status;
2251a23fd118Syl 	int retsize;
2252a23fd118Syl 	char *buf;
2253a23fd118Syl 
2254a23fd118Syl 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2255a23fd118Syl 	if (buf == NULL) {
2256a23fd118Syl 		return (ENOSPC);
2257a23fd118Syl 	}
2258a23fd118Syl 	status = xge_hal_aux_device_config_read(lldev->devh,
2259a23fd118Syl 						XGELL_DEVCONF_BUFSIZE,
2260a23fd118Syl 						buf, &retsize);
2261a23fd118Syl 	if (status != XGE_HAL_OK) {
2262a23fd118Syl 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2263a23fd118Syl 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2264a23fd118Syl 		    status);
2265a23fd118Syl 		return (EINVAL);
2266a23fd118Syl 	}
2267a23fd118Syl 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2268a23fd118Syl 	(void) mi_mpprintf(mp, "%s", buf);
2269a23fd118Syl 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2270a23fd118Syl 
2271a23fd118Syl 	return (0);
2272a23fd118Syl }
2273a23fd118Syl 
2274a23fd118Syl /*
2275a23fd118Syl  * xgell_device_register
2276a23fd118Syl  * @devh: pointer on HAL device
2277a23fd118Syl  * @config: pointer on this network device configuration
2278a23fd118Syl  * @ll_out: output pointer. Will be assigned to valid LL device.
2279a23fd118Syl  *
2280a23fd118Syl  * This function will allocate and register network device
2281a23fd118Syl  */
2282a23fd118Syl int
2283a23fd118Syl xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2284a23fd118Syl {
2285*8347601bSyl 	mac_register_t *macp = NULL;
2286a23fd118Syl 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2287a23fd118Syl 
2288a23fd118Syl 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2289ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2290ba2e4443Sseb 		goto xgell_ndd_fail;
2291a23fd118Syl 
2292a23fd118Syl 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2293ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2294ba2e4443Sseb 		goto xgell_ndd_fail;
2295a23fd118Syl 
2296a23fd118Syl 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2297ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2298ba2e4443Sseb 		goto xgell_ndd_fail;
2299a23fd118Syl 
2300a23fd118Syl 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2301ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2302ba2e4443Sseb 		goto xgell_ndd_fail;
2303a23fd118Syl 
2304a23fd118Syl 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2305ba2e4443Sseb 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2306ba2e4443Sseb 		goto xgell_ndd_fail;
2307a23fd118Syl 
2308a23fd118Syl 	if (nd_load(&lldev->ndp, "debug_module_mask",
2309a23fd118Syl 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2310ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2311ba2e4443Sseb 		goto xgell_ndd_fail;
2312a23fd118Syl 
2313a23fd118Syl 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2314ba2e4443Sseb 	    (caddr_t)lldev) == B_FALSE)
2315ba2e4443Sseb 		goto xgell_ndd_fail;
2316a23fd118Syl 
2317a23fd118Syl 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2318a23fd118Syl 
2319a23fd118Syl 	if (xgell_rx_create_buffer_pool(lldev) != DDI_SUCCESS) {
2320a23fd118Syl 		nd_free(&lldev->ndp);
2321a23fd118Syl 		xge_debug_ll(XGE_ERR, "unable to create RX buffer pool");
2322a23fd118Syl 		return (DDI_FAILURE);
2323a23fd118Syl 	}
2324a23fd118Syl 
2325a23fd118Syl 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER, hldev->irqh);
2326a23fd118Syl 
2327ba2e4443Sseb 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2328ba2e4443Sseb 		goto xgell_register_fail;
2329ba2e4443Sseb 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2330*8347601bSyl 	macp->m_driver = lldev;
2331ba2e4443Sseb 	macp->m_dip = lldev->dev_info;
2332ba2e4443Sseb 	macp->m_src_addr = hldev->macaddr[0];
2333ba2e4443Sseb 	macp->m_callbacks = &xgell_m_callbacks;
2334ba2e4443Sseb 	macp->m_min_sdu = 0;
2335ba2e4443Sseb 	macp->m_max_sdu = hldev->config.mtu;
2336a23fd118Syl 	/*
2337a23fd118Syl 	 * Finally, we're ready to register ourselves with the Nemo
2338a23fd118Syl 	 * interface; if this succeeds, we're all ready to start()
2339a23fd118Syl 	 */
2340*8347601bSyl 
2341*8347601bSyl 	if (mac_register(macp, &lldev->mh) != 0)
2342ba2e4443Sseb 		goto xgell_register_fail;
2343a23fd118Syl 
2344*8347601bSyl 	/* Calculate tx_copied_max here ??? */
2345*8347601bSyl 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2346*8347601bSyl 		hldev->config.fifo.alignment_size *
2347*8347601bSyl 		hldev->config.fifo.max_aligned_frags;
2348*8347601bSyl 
2349a23fd118Syl 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2350a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
2351a23fd118Syl 
2352a23fd118Syl 	return (DDI_SUCCESS);
2353ba2e4443Sseb 
2354ba2e4443Sseb xgell_ndd_fail:
2355ba2e4443Sseb 	nd_free(&lldev->ndp);
2356ba2e4443Sseb 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2357ba2e4443Sseb 	return (DDI_FAILURE);
2358ba2e4443Sseb 
2359ba2e4443Sseb xgell_register_fail:
2360*8347601bSyl 	if (macp != NULL)
2361*8347601bSyl 		mac_free(macp);
2362ba2e4443Sseb 	nd_free(&lldev->ndp);
2363ba2e4443Sseb 	mutex_destroy(&lldev->genlock);
2364ba2e4443Sseb 	/* Ignore return value, since RX not start */
2365ba2e4443Sseb 	(void) xgell_rx_destroy_buffer_pool(lldev);
2366ba2e4443Sseb 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2367ba2e4443Sseb 	return (DDI_FAILURE);
2368a23fd118Syl }
2369a23fd118Syl 
2370a23fd118Syl /*
2371a23fd118Syl  * xgell_device_unregister
2372a23fd118Syl  * @devh: pointer on HAL device
2373a23fd118Syl  * @lldev: pointer to valid LL device.
2374a23fd118Syl  *
2375a23fd118Syl  * This function will unregister and free network device
2376a23fd118Syl  */
2377a23fd118Syl int
2378a23fd118Syl xgell_device_unregister(xgelldev_t *lldev)
2379a23fd118Syl {
2380a23fd118Syl 	/*
2381a23fd118Syl 	 * Destroy RX buffer pool.
2382a23fd118Syl 	 */
2383a23fd118Syl 	if (xgell_rx_destroy_buffer_pool(lldev) != DDI_SUCCESS) {
2384a23fd118Syl 		return (DDI_FAILURE);
2385a23fd118Syl 	}
2386a23fd118Syl 
2387ba2e4443Sseb 	if (mac_unregister(lldev->mh) != 0) {
2388a23fd118Syl 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2389a23fd118Syl 		    XGELL_IFNAME, lldev->instance);
2390a23fd118Syl 		return (DDI_FAILURE);
2391a23fd118Syl 	}
2392a23fd118Syl 
2393a23fd118Syl 	mutex_destroy(&lldev->genlock);
2394a23fd118Syl 
2395a23fd118Syl 	nd_free(&lldev->ndp);
2396a23fd118Syl 
2397a23fd118Syl 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2398a23fd118Syl 	    XGELL_IFNAME, lldev->instance);
2399a23fd118Syl 
2400a23fd118Syl 	return (DDI_SUCCESS);
2401a23fd118Syl }
2402