xref: /illumos-gate/usr/src/uts/common/io/xge/drv/xgell.c (revision 7eced415)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *  Copyright (c) 2002-2005 Neterion, Inc.
31  *  All right Reserved.
32  *
33  *  FileName :    xgell.c
34  *
35  *  Description:  Xge Link Layer data path implementation
36  *
37  */
38 
39 #include "xgell.h"
40 
41 #include <netinet/ip.h>
42 #include <netinet/tcp.h>
43 #include <netinet/udp.h>
44 
45 #define	XGELL_MAX_FRAME_SIZE(hldev)	((hldev)->config.mtu +	\
46     sizeof (struct ether_vlan_header))
47 
48 #define	HEADROOM		2	/* for DIX-only packets */
49 
50 void header_free_func(void *arg) { }
51 frtn_t header_frtn = {header_free_func, NULL};
52 
53 /* DMA attributes used for Tx side */
54 static struct ddi_dma_attr tx_dma_attr = {
55 	DMA_ATTR_V0,			/* dma_attr_version */
56 	0x0ULL,				/* dma_attr_addr_lo */
57 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
58 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
59 #if defined(__sparc)
60 	0x2000,				/* dma_attr_align */
61 #else
62 	0x1000,				/* dma_attr_align */
63 #endif
64 	0xFC00FC,			/* dma_attr_burstsizes */
65 	0x1,				/* dma_attr_minxfer */
66 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
67 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
68 	18,				/* dma_attr_sgllen */
69 	(unsigned int)1,		/* dma_attr_granular */
70 	0				/* dma_attr_flags */
71 };
72 
73 /*
74  * DMA attributes used when using ddi_dma_mem_alloc to
75  * allocat HAL descriptors and Rx buffers during replenish
76  */
77 static struct ddi_dma_attr hal_dma_attr = {
78 	DMA_ATTR_V0,			/* dma_attr_version */
79 	0x0ULL,				/* dma_attr_addr_lo */
80 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_addr_hi */
81 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_count_max */
82 #if defined(__sparc)
83 	0x2000,				/* dma_attr_align */
84 #else
85 	0x1000,				/* dma_attr_align */
86 #endif
87 	0xFC00FC,			/* dma_attr_burstsizes */
88 	0x1,				/* dma_attr_minxfer */
89 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_maxxfer */
90 	0xFFFFFFFFFFFFFFFFULL,		/* dma_attr_seg */
91 	1,				/* dma_attr_sgllen */
92 	(unsigned int)1,		/* dma_attr_sgllen */
93 	DDI_DMA_RELAXED_ORDERING	/* dma_attr_flags */
94 };
95 
96 struct ddi_dma_attr *p_hal_dma_attr = &hal_dma_attr;
97 
98 static int		xgell_m_stat(void *, uint_t, uint64_t *);
99 static int		xgell_m_start(void *);
100 static void		xgell_m_stop(void *);
101 static int		xgell_m_promisc(void *, boolean_t);
102 static int		xgell_m_multicst(void *, boolean_t, const uint8_t *);
103 static int		xgell_m_unicst(void *, const uint8_t *);
104 static void		xgell_m_ioctl(void *, queue_t *, mblk_t *);
105 static mblk_t 		*xgell_m_tx(void *, mblk_t *);
106 static boolean_t	xgell_m_getcapab(void *, mac_capab_t, void *);
107 
108 #define	XGELL_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
109 
110 static mac_callbacks_t xgell_m_callbacks = {
111 	XGELL_M_CALLBACK_FLAGS,
112 	xgell_m_stat,
113 	xgell_m_start,
114 	xgell_m_stop,
115 	xgell_m_promisc,
116 	xgell_m_multicst,
117 	xgell_m_unicst,
118 	xgell_m_tx,
119 	NULL,
120 	xgell_m_ioctl,
121 	xgell_m_getcapab
122 };
123 
124 /*
125  * xge_device_poll
126  *
127  * Cyclic should call me every 1s. xge_callback_event_queued should call me
128  * when HAL hope event was rescheduled.
129  */
130 /*ARGSUSED*/
131 void
132 xge_device_poll(void *data)
133 {
134 	xgelldev_t *lldev = xge_hal_device_private(data);
135 
136 	mutex_enter(&lldev->genlock);
137 	if (lldev->is_initialized) {
138 		xge_hal_device_poll(data);
139 		lldev->timeout_id = timeout(xge_device_poll, data,
140 		    XGE_DEV_POLL_TICKS);
141 	} else if (lldev->in_reset == 1) {
142 		lldev->timeout_id = timeout(xge_device_poll, data,
143 		    XGE_DEV_POLL_TICKS);
144 	} else {
145 		lldev->timeout_id = 0;
146 	}
147 	mutex_exit(&lldev->genlock);
148 }
149 
150 /*
151  * xge_device_poll_now
152  *
153  * Will call xge_device_poll() immediately
154  */
155 void
156 xge_device_poll_now(void *data)
157 {
158 	xgelldev_t *lldev = xge_hal_device_private(data);
159 
160 	mutex_enter(&lldev->genlock);
161 	if (lldev->is_initialized) {
162 		xge_hal_device_poll(data);
163 	}
164 	mutex_exit(&lldev->genlock);
165 }
166 
167 /*
168  * xgell_callback_link_up
169  *
170  * This function called by HAL to notify HW link up state change.
171  */
172 void
173 xgell_callback_link_up(void *userdata)
174 {
175 	xgelldev_t *lldev = (xgelldev_t *)userdata;
176 
177 	mac_link_update(lldev->mh, LINK_STATE_UP);
178 }
179 
180 /*
181  * xgell_callback_link_down
182  *
183  * This function called by HAL to notify HW link down state change.
184  */
185 void
186 xgell_callback_link_down(void *userdata)
187 {
188 	xgelldev_t *lldev = (xgelldev_t *)userdata;
189 
190 	mac_link_update(lldev->mh, LINK_STATE_DOWN);
191 }
192 
193 /*
194  * xgell_rx_buffer_replenish_all
195  *
196  * To replenish all freed dtr(s) with buffers in free pool. It's called by
197  * xgell_rx_buffer_recycle() or xgell_rx_1b_compl().
198  * Must be called with pool_lock held.
199  */
200 static void
201 xgell_rx_buffer_replenish_all(xgell_ring_t *ring)
202 {
203 	xge_hal_dtr_h dtr;
204 	xgell_rx_buffer_t *rx_buffer;
205 	xgell_rxd_priv_t *rxd_priv;
206 
207 	xge_assert(mutex_owned(&ring->bf_pool.pool_lock));
208 
209 	while ((ring->bf_pool.free > 0) &&
210 	    (xge_hal_ring_dtr_reserve(ring->channelh, &dtr) ==
211 	    XGE_HAL_OK)) {
212 		rx_buffer = ring->bf_pool.head;
213 		ring->bf_pool.head = rx_buffer->next;
214 		ring->bf_pool.free--;
215 
216 		xge_assert(rx_buffer);
217 		xge_assert(rx_buffer->dma_addr);
218 
219 		rxd_priv = (xgell_rxd_priv_t *)
220 		    xge_hal_ring_dtr_private(ring->channelh, dtr);
221 		xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr,
222 		    ring->bf_pool.size);
223 
224 		rxd_priv->rx_buffer = rx_buffer;
225 		xge_hal_ring_dtr_post(ring->channelh, dtr);
226 	}
227 }
228 
229 /*
230  * xgell_rx_buffer_release
231  *
232  * The only thing done here is to put the buffer back to the pool.
233  * Calling this function need be protected by mutex, bf_pool.pool_lock.
234  */
235 static void
236 xgell_rx_buffer_release(xgell_rx_buffer_t *rx_buffer)
237 {
238 	xgell_ring_t *ring = rx_buffer->ring;
239 
240 	xge_assert(mutex_owned(&ring->bf_pool.pool_lock));
241 
242 	/* Put the buffer back to pool */
243 	rx_buffer->next = ring->bf_pool.head;
244 	ring->bf_pool.head = rx_buffer;
245 
246 	ring->bf_pool.free++;
247 }
248 
249 /*
250  * xgell_rx_buffer_recycle
251  *
252  * Called by desballoc() to "free" the resource.
253  * We will try to replenish all descripters.
254  */
255 
256 /*
257  * Previously there were much lock contention between xgell_rx_1b_compl() and
258  * xgell_rx_buffer_recycle(), which consumed a lot of CPU resources and had bad
259  * effect on rx performance. A separate recycle list is introduced to overcome
260  * this. The recycle list is used to record the rx buffer that has been recycled
261  * and these buffers will be retuned back to the free list in bulk instead of
262  * one-by-one.
263  */
264 
265 static void
266 xgell_rx_buffer_recycle(char *arg)
267 {
268 	xgell_rx_buffer_t *rx_buffer = (xgell_rx_buffer_t *)arg;
269 	xgell_ring_t *ring = rx_buffer->ring;
270 	xgelldev_t *lldev = ring->lldev;
271 	xgell_rx_buffer_pool_t *bf_pool = &ring->bf_pool;
272 
273 	mutex_enter(&bf_pool->recycle_lock);
274 
275 	rx_buffer->next = bf_pool->recycle_head;
276 	bf_pool->recycle_head = rx_buffer;
277 	if (bf_pool->recycle_tail == NULL)
278 		bf_pool->recycle_tail = rx_buffer;
279 	bf_pool->recycle++;
280 
281 	/*
282 	 * Before finding a good way to set this hiwat, just always call to
283 	 * replenish_all. *TODO*
284 	 */
285 	if ((lldev->is_initialized != 0) &&
286 	    (bf_pool->recycle >= XGELL_RX_BUFFER_RECYCLE_CACHE)) {
287 		if (mutex_tryenter(&bf_pool->pool_lock)) {
288 			bf_pool->recycle_tail->next = bf_pool->head;
289 			bf_pool->head = bf_pool->recycle_head;
290 			bf_pool->recycle_head = bf_pool->recycle_tail = NULL;
291 			bf_pool->post -= bf_pool->recycle;
292 			bf_pool->free += bf_pool->recycle;
293 			bf_pool->recycle = 0;
294 			xgell_rx_buffer_replenish_all(ring);
295 			mutex_exit(&bf_pool->pool_lock);
296 		}
297 	}
298 
299 	mutex_exit(&bf_pool->recycle_lock);
300 }
301 
302 /*
303  * xgell_rx_buffer_alloc
304  *
305  * Allocate one rx buffer and return with the pointer to the buffer.
306  * Return NULL if failed.
307  */
308 static xgell_rx_buffer_t *
309 xgell_rx_buffer_alloc(xgell_ring_t *ring)
310 {
311 	xge_hal_device_t *hldev;
312 	void *vaddr;
313 	ddi_dma_handle_t dma_handle;
314 	ddi_acc_handle_t dma_acch;
315 	dma_addr_t dma_addr;
316 	uint_t ncookies;
317 	ddi_dma_cookie_t dma_cookie;
318 	size_t real_size;
319 	extern ddi_device_acc_attr_t *p_xge_dev_attr;
320 	xgell_rx_buffer_t *rx_buffer;
321 	xgelldev_t *lldev = ring->lldev;
322 
323 	hldev = (xge_hal_device_t *)lldev->devh;
324 
325 	if (ddi_dma_alloc_handle(hldev->pdev, p_hal_dma_attr, DDI_DMA_SLEEP,
326 	    0, &dma_handle) != DDI_SUCCESS) {
327 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA handle",
328 		    XGELL_IFNAME, lldev->instance);
329 		goto handle_failed;
330 	}
331 
332 	/* reserve some space at the end of the buffer for recycling */
333 	if (ddi_dma_mem_alloc(dma_handle, HEADROOM + ring->bf_pool.size +
334 	    sizeof (xgell_rx_buffer_t), p_xge_dev_attr, DDI_DMA_STREAMING,
335 	    DDI_DMA_SLEEP, 0, (caddr_t *)&vaddr, &real_size, &dma_acch) !=
336 	    DDI_SUCCESS) {
337 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
338 		    XGELL_IFNAME, lldev->instance);
339 		goto mem_failed;
340 	}
341 
342 	if (HEADROOM + ring->bf_pool.size + sizeof (xgell_rx_buffer_t) >
343 	    real_size) {
344 		xge_debug_ll(XGE_ERR, "%s%d: can not allocate DMA-able memory",
345 		    XGELL_IFNAME, lldev->instance);
346 		goto bind_failed;
347 	}
348 
349 	if (ddi_dma_addr_bind_handle(dma_handle, NULL, (char *)vaddr + HEADROOM,
350 	    ring->bf_pool.size, DDI_DMA_READ | DDI_DMA_STREAMING,
351 	    DDI_DMA_SLEEP, 0, &dma_cookie, &ncookies) != DDI_SUCCESS) {
352 		xge_debug_ll(XGE_ERR, "%s%d: out of mapping for mblk",
353 		    XGELL_IFNAME, lldev->instance);
354 		goto bind_failed;
355 	}
356 
357 	if (ncookies != 1 || dma_cookie.dmac_size < ring->bf_pool.size) {
358 		xge_debug_ll(XGE_ERR, "%s%d: can not handle partial DMA",
359 		    XGELL_IFNAME, lldev->instance);
360 		goto check_failed;
361 	}
362 
363 	dma_addr = dma_cookie.dmac_laddress;
364 
365 	rx_buffer = (xgell_rx_buffer_t *)((char *)vaddr + real_size -
366 	    sizeof (xgell_rx_buffer_t));
367 	rx_buffer->next = NULL;
368 	rx_buffer->vaddr = vaddr;
369 	rx_buffer->dma_addr = dma_addr;
370 	rx_buffer->dma_handle = dma_handle;
371 	rx_buffer->dma_acch = dma_acch;
372 	rx_buffer->ring = ring;
373 	rx_buffer->frtn.free_func = xgell_rx_buffer_recycle;
374 	rx_buffer->frtn.free_arg = (void *)rx_buffer;
375 
376 	return (rx_buffer);
377 
378 check_failed:
379 	(void) ddi_dma_unbind_handle(dma_handle);
380 bind_failed:
381 	XGE_OS_MEMORY_CHECK_FREE(vaddr, 0);
382 	ddi_dma_mem_free(&dma_acch);
383 mem_failed:
384 	ddi_dma_free_handle(&dma_handle);
385 handle_failed:
386 
387 	return (NULL);
388 }
389 
390 /*
391  * xgell_rx_destroy_buffer_pool
392  *
393  * Destroy buffer pool. If there is still any buffer hold by upper layer,
394  * recorded by bf_pool.post, return DDI_FAILURE to reject to be unloaded.
395  */
396 static int
397 xgell_rx_destroy_buffer_pool(xgell_ring_t *ring)
398 {
399 	xgell_rx_buffer_t *rx_buffer;
400 	ddi_dma_handle_t  dma_handle;
401 	ddi_acc_handle_t  dma_acch;
402 	xgelldev_t *lldev = ring->lldev;
403 	int i;
404 
405 	if (ring->bf_pool.recycle > 0) {
406 		ring->bf_pool.recycle_tail->next = ring->bf_pool.head;
407 		ring->bf_pool.head = ring->bf_pool.recycle_head;
408 		ring->bf_pool.recycle_tail =
409 		    ring->bf_pool.recycle_head = NULL;
410 		ring->bf_pool.post -= ring->bf_pool.recycle;
411 		ring->bf_pool.free += ring->bf_pool.recycle;
412 		ring->bf_pool.recycle = 0;
413 	}
414 
415 	/*
416 	 * If there is any posted buffer, the driver should reject to be
417 	 * detached. Need notice upper layer to release them.
418 	 */
419 	if (ring->bf_pool.post != 0) {
420 		xge_debug_ll(XGE_ERR,
421 		    "%s%d has some buffers not be recycled, try later!",
422 		    XGELL_IFNAME, lldev->instance);
423 		return (DDI_FAILURE);
424 	}
425 
426 	/*
427 	 * Relase buffers one by one.
428 	 */
429 	for (i = ring->bf_pool.total; i > 0; i--) {
430 		rx_buffer = ring->bf_pool.head;
431 		xge_assert(rx_buffer != NULL);
432 
433 		ring->bf_pool.head = rx_buffer->next;
434 
435 		dma_handle = rx_buffer->dma_handle;
436 		dma_acch = rx_buffer->dma_acch;
437 
438 		if (ddi_dma_unbind_handle(dma_handle) != DDI_SUCCESS) {
439 			xge_debug_ll(XGE_ERR, "%s",
440 			    "failed to unbind DMA handle!");
441 			ring->bf_pool.head = rx_buffer;
442 			return (DDI_FAILURE);
443 		}
444 		ddi_dma_mem_free(&dma_acch);
445 		ddi_dma_free_handle(&dma_handle);
446 
447 		ring->bf_pool.total--;
448 		ring->bf_pool.free--;
449 	}
450 
451 	mutex_destroy(&ring->bf_pool.recycle_lock);
452 	mutex_destroy(&ring->bf_pool.pool_lock);
453 	return (DDI_SUCCESS);
454 }
455 
456 /*
457  * xgell_rx_create_buffer_pool
458  *
459  * Initialize RX buffer pool for all RX rings. Refer to rx_buffer_pool_t.
460  */
461 static int
462 xgell_rx_create_buffer_pool(xgell_ring_t *ring)
463 {
464 	xge_hal_device_t *hldev;
465 	xgell_rx_buffer_t *rx_buffer;
466 	xgelldev_t *lldev = ring->lldev;
467 	int i;
468 
469 	hldev = (xge_hal_device_t *)lldev->devh;
470 
471 	ring->bf_pool.total = 0;
472 	ring->bf_pool.size = XGELL_MAX_FRAME_SIZE(hldev);
473 	ring->bf_pool.head = NULL;
474 	ring->bf_pool.free = 0;
475 	ring->bf_pool.post = 0;
476 	ring->bf_pool.post_hiwat = lldev->config.rx_buffer_post_hiwat;
477 	ring->bf_pool.recycle = 0;
478 	ring->bf_pool.recycle_head = NULL;
479 	ring->bf_pool.recycle_tail = NULL;
480 
481 	mutex_init(&ring->bf_pool.pool_lock, NULL, MUTEX_DRIVER,
482 	    DDI_INTR_PRI(hldev->irqh));
483 	mutex_init(&ring->bf_pool.recycle_lock, NULL, MUTEX_DRIVER,
484 	    DDI_INTR_PRI(hldev->irqh));
485 
486 	/*
487 	 * Allocate buffers one by one. If failed, destroy whole pool by
488 	 * call to xgell_rx_destroy_buffer_pool().
489 	 */
490 
491 	for (i = 0; i < lldev->config.rx_buffer_total; i++) {
492 		if ((rx_buffer = xgell_rx_buffer_alloc(ring)) == NULL) {
493 			(void) xgell_rx_destroy_buffer_pool(ring);
494 			return (DDI_FAILURE);
495 		}
496 
497 		rx_buffer->next = ring->bf_pool.head;
498 		ring->bf_pool.head = rx_buffer;
499 
500 		ring->bf_pool.total++;
501 		ring->bf_pool.free++;
502 	}
503 
504 	return (DDI_SUCCESS);
505 }
506 
507 /*
508  * xgell_rx_dtr_replenish
509  *
510  * Replenish descriptor with rx_buffer in RX buffer pool.
511  * The dtr should be post right away.
512  */
513 xge_hal_status_e
514 xgell_rx_dtr_replenish(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, int index,
515     void *userdata, xge_hal_channel_reopen_e reopen)
516 {
517 	xgell_ring_t *ring = userdata;
518 	xgell_rx_buffer_t *rx_buffer;
519 	xgell_rxd_priv_t *rxd_priv;
520 
521 	if (ring->bf_pool.head == NULL) {
522 		xge_debug_ll(XGE_ERR, "%s", "no more available rx DMA buffer!");
523 		return (XGE_HAL_FAIL);
524 	}
525 	rx_buffer = ring->bf_pool.head;
526 	ring->bf_pool.head = rx_buffer->next;
527 	ring->bf_pool.free--;
528 
529 	xge_assert(rx_buffer);
530 	xge_assert(rx_buffer->dma_addr);
531 
532 	rxd_priv = (xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtr);
533 	xge_hal_ring_dtr_1b_set(dtr, rx_buffer->dma_addr, ring->bf_pool.size);
534 
535 	rxd_priv->rx_buffer = rx_buffer;
536 
537 	return (XGE_HAL_OK);
538 }
539 
540 /*
541  * xgell_get_ip_offset
542  *
543  * Calculate the offset to IP header.
544  */
545 static inline int
546 xgell_get_ip_offset(xge_hal_dtr_info_t *ext_info)
547 {
548 	int ip_off;
549 
550 	/* get IP-header offset */
551 	switch (ext_info->frame) {
552 	case XGE_HAL_FRAME_TYPE_DIX:
553 		ip_off = XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE;
554 		break;
555 	case XGE_HAL_FRAME_TYPE_IPX:
556 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
557 		    XGE_HAL_HEADER_802_2_SIZE +
558 		    XGE_HAL_HEADER_SNAP_SIZE);
559 		break;
560 	case XGE_HAL_FRAME_TYPE_LLC:
561 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
562 		    XGE_HAL_HEADER_802_2_SIZE);
563 		break;
564 	case XGE_HAL_FRAME_TYPE_SNAP:
565 		ip_off = (XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE +
566 		    XGE_HAL_HEADER_SNAP_SIZE);
567 		break;
568 	default:
569 		ip_off = 0;
570 		break;
571 	}
572 
573 	if ((ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4 ||
574 	    ext_info->proto & XGE_HAL_FRAME_PROTO_IPV6) &&
575 	    (ext_info->proto & XGE_HAL_FRAME_PROTO_VLAN_TAGGED)) {
576 		ip_off += XGE_HAL_HEADER_VLAN_SIZE;
577 	}
578 
579 	return (ip_off);
580 }
581 
582 /*
583  * xgell_rx_hcksum_assoc
584  *
585  * Judge the packet type and then call to hcksum_assoc() to associate
586  * h/w checksum information.
587  */
588 static inline void
589 xgell_rx_hcksum_assoc(mblk_t *mp, char *vaddr, int pkt_length,
590     xge_hal_dtr_info_t *ext_info)
591 {
592 	int cksum_flags = 0;
593 
594 	if (!(ext_info->proto & XGE_HAL_FRAME_PROTO_IP_FRAGMENTED)) {
595 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_TCP_OR_UDP) {
596 			if (ext_info->l3_cksum == XGE_HAL_L3_CKSUM_OK) {
597 				cksum_flags |= HCK_IPV4_HDRCKSUM;
598 			}
599 			if (ext_info->l4_cksum == XGE_HAL_L4_CKSUM_OK) {
600 				cksum_flags |= HCK_FULLCKSUM_OK;
601 			}
602 			if (cksum_flags) {
603 				cksum_flags |= HCK_FULLCKSUM;
604 				(void) hcksum_assoc(mp, NULL, NULL, 0,
605 				    0, 0, 0, cksum_flags, 0);
606 			}
607 		}
608 	} else if (ext_info->proto &
609 	    (XGE_HAL_FRAME_PROTO_IPV4 | XGE_HAL_FRAME_PROTO_IPV6)) {
610 		/*
611 		 * Just pass the partial cksum up to IP.
612 		 */
613 		int ip_off = xgell_get_ip_offset(ext_info);
614 		int start, end = pkt_length - ip_off;
615 
616 		if (ext_info->proto & XGE_HAL_FRAME_PROTO_IPV4) {
617 			struct ip *ip =
618 			    (struct ip *)(vaddr + ip_off);
619 			start = ip->ip_hl * 4 + ip_off;
620 		} else {
621 			start = ip_off + 40;
622 		}
623 		cksum_flags |= HCK_PARTIALCKSUM;
624 		(void) hcksum_assoc(mp, NULL, NULL, start, 0,
625 		    end, ntohs(ext_info->l4_cksum), cksum_flags,
626 		    0);
627 	}
628 }
629 
630 /*
631  * xgell_rx_1b_msg_alloc
632  *
633  * Allocate message header for data buffer, and decide if copy the packet to
634  * new data buffer to release big rx_buffer to save memory.
635  *
636  * If the pkt_length <= XGELL_RX_DMA_LOWAT, call allocb() to allocate
637  * new message and copy the payload in.
638  */
639 static mblk_t *
640 xgell_rx_1b_msg_alloc(xgelldev_t *lldev, xgell_rx_buffer_t *rx_buffer,
641     int pkt_length, xge_hal_dtr_info_t *ext_info, boolean_t *copyit)
642 {
643 	mblk_t *mp;
644 	char *vaddr;
645 
646 	vaddr = (char *)rx_buffer->vaddr + HEADROOM;
647 	/*
648 	 * Copy packet into new allocated message buffer, if pkt_length
649 	 * is less than XGELL_RX_DMA_LOWAT
650 	 */
651 	if (*copyit || pkt_length <= lldev->config.rx_dma_lowat) {
652 		if ((mp = allocb(pkt_length + HEADROOM, 0)) == NULL) {
653 			return (NULL);
654 		}
655 		mp->b_rptr += HEADROOM;
656 		bcopy(vaddr, mp->b_rptr, pkt_length);
657 		mp->b_wptr = mp->b_rptr + pkt_length;
658 		*copyit = B_TRUE;
659 		return (mp);
660 	}
661 
662 	/*
663 	 * Just allocate mblk for current data buffer
664 	 */
665 	if ((mp = (mblk_t *)desballoc((unsigned char *)vaddr, pkt_length, 0,
666 	    &rx_buffer->frtn)) == NULL) {
667 		/* Drop it */
668 		return (NULL);
669 	}
670 	/*
671 	 * Adjust the b_rptr/b_wptr in the mblk_t structure.
672 	 */
673 	mp->b_wptr += pkt_length;
674 
675 	return (mp);
676 }
677 
678 /*
679  * xgell_rx_1b_compl
680  *
681  * If the interrupt is because of a received frame or if the receive ring
682  * contains fresh as yet un-processed frames, this function is called.
683  */
684 static xge_hal_status_e
685 xgell_rx_1b_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
686     void *userdata)
687 {
688 	xgell_ring_t *ring = (xgell_ring_t *)userdata;
689 	xgelldev_t *lldev = ring->lldev;
690 	xgell_rx_buffer_t *rx_buffer;
691 	mblk_t *mp_head = NULL;
692 	mblk_t *mp_end  = NULL;
693 	int pkt_burst = 0;
694 
695 	mutex_enter(&ring->bf_pool.pool_lock);
696 
697 	do {
698 		int pkt_length;
699 		dma_addr_t dma_data;
700 		mblk_t *mp;
701 		boolean_t copyit = B_FALSE;
702 
703 		xgell_rxd_priv_t *rxd_priv = ((xgell_rxd_priv_t *)
704 		    xge_hal_ring_dtr_private(channelh, dtr));
705 		xge_hal_dtr_info_t ext_info;
706 
707 		rx_buffer = rxd_priv->rx_buffer;
708 
709 		xge_hal_ring_dtr_1b_get(channelh, dtr, &dma_data, &pkt_length);
710 		xge_hal_ring_dtr_info_get(channelh, dtr, &ext_info);
711 
712 		xge_assert(dma_data == rx_buffer->dma_addr);
713 
714 		if (t_code != 0) {
715 			xge_debug_ll(XGE_ERR, "%s%d: rx: dtr 0x%"PRIx64
716 			    " completed due to error t_code %01x", XGELL_IFNAME,
717 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
718 
719 			(void) xge_hal_device_handle_tcode(channelh, dtr,
720 			    t_code);
721 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
722 			xgell_rx_buffer_release(rx_buffer);
723 			continue;
724 		}
725 
726 		/*
727 		 * Sync the DMA memory
728 		 */
729 		if (ddi_dma_sync(rx_buffer->dma_handle, 0, pkt_length,
730 		    DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS) {
731 			xge_debug_ll(XGE_ERR, "%s%d: rx: can not do DMA sync",
732 			    XGELL_IFNAME, lldev->instance);
733 			xge_hal_ring_dtr_free(channelh, dtr); /* drop it */
734 			xgell_rx_buffer_release(rx_buffer);
735 			continue;
736 		}
737 
738 		/*
739 		 * Allocate message for the packet.
740 		 */
741 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
742 			copyit = B_TRUE;
743 		} else {
744 			copyit = B_FALSE;
745 		}
746 
747 		mp = xgell_rx_1b_msg_alloc(lldev, rx_buffer, pkt_length,
748 		    &ext_info, &copyit);
749 
750 		xge_hal_ring_dtr_free(channelh, dtr);
751 
752 		/*
753 		 * Release the buffer and recycle it later
754 		 */
755 		if ((mp == NULL) || copyit) {
756 			xgell_rx_buffer_release(rx_buffer);
757 		} else {
758 			/*
759 			 * Count it since the buffer should be loaned up.
760 			 */
761 			ring->bf_pool.post++;
762 		}
763 		if (mp == NULL) {
764 			xge_debug_ll(XGE_ERR,
765 			    "%s%d: rx: can not allocate mp mblk",
766 			    XGELL_IFNAME, lldev->instance);
767 			continue;
768 		}
769 
770 		/*
771 		 * Associate cksum_flags per packet type and h/w
772 		 * cksum flags.
773 		 */
774 		xgell_rx_hcksum_assoc(mp, (char *)rx_buffer->vaddr +
775 		    HEADROOM, pkt_length, &ext_info);
776 
777 		if (mp_head == NULL) {
778 			mp_head = mp;
779 			mp_end = mp;
780 		} else {
781 			mp_end->b_next = mp;
782 			mp_end = mp;
783 		}
784 
785 		if (++pkt_burst < lldev->config.rx_pkt_burst)
786 			continue;
787 
788 		if (ring->bf_pool.post > ring->bf_pool.post_hiwat) {
789 			/* Replenish rx buffers */
790 			xgell_rx_buffer_replenish_all(ring);
791 		}
792 		mutex_exit(&ring->bf_pool.pool_lock);
793 		if (mp_head != NULL) {
794 			mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle,
795 			    mp_head);
796 		}
797 		mp_head = mp_end  = NULL;
798 		pkt_burst = 0;
799 		mutex_enter(&ring->bf_pool.pool_lock);
800 
801 	} while (xge_hal_ring_dtr_next_completed(channelh, &dtr, &t_code) ==
802 	    XGE_HAL_OK);
803 
804 	/*
805 	 * Always call replenish_all to recycle rx_buffers.
806 	 */
807 	xgell_rx_buffer_replenish_all(ring);
808 	mutex_exit(&ring->bf_pool.pool_lock);
809 
810 	if (mp_head != NULL) {
811 		mac_rx(lldev->mh, ((xgell_ring_t *)userdata)->handle, mp_head);
812 	}
813 
814 	return (XGE_HAL_OK);
815 }
816 
817 /*
818  * xgell_xmit_compl
819  *
820  * If an interrupt was raised to indicate DMA complete of the Tx packet,
821  * this function is called. It identifies the last TxD whose buffer was
822  * freed and frees all skbs whose data have already DMA'ed into the NICs
823  * internal memory.
824  */
825 static xge_hal_status_e
826 xgell_xmit_compl(xge_hal_channel_h channelh, xge_hal_dtr_h dtr, u8 t_code,
827     void *userdata)
828 {
829 	xgell_fifo_t *fifo = (xgell_fifo_t *)userdata;
830 	xgelldev_t *lldev = fifo->lldev;
831 
832 	do {
833 		xgell_txd_priv_t *txd_priv = ((xgell_txd_priv_t *)
834 		    xge_hal_fifo_dtr_private(dtr));
835 		int i;
836 
837 		if (t_code) {
838 			xge_debug_ll(XGE_TRACE, "%s%d: tx: dtr 0x%"PRIx64
839 			    " completed due to error t_code %01x", XGELL_IFNAME,
840 			    lldev->instance, (uint64_t)(uintptr_t)dtr, t_code);
841 
842 			(void) xge_hal_device_handle_tcode(channelh, dtr,
843 			    t_code);
844 		}
845 
846 		for (i = 0; i < txd_priv->handle_cnt; i++) {
847 			if (txd_priv->dma_handles[i] != NULL) {
848 				xge_assert(txd_priv->dma_handles[i]);
849 				(void) ddi_dma_unbind_handle(
850 				    txd_priv->dma_handles[i]);
851 				ddi_dma_free_handle(&txd_priv->dma_handles[i]);
852 				txd_priv->dma_handles[i] = 0;
853 			}
854 		}
855 		txd_priv->handle_cnt = 0;
856 
857 		xge_hal_fifo_dtr_free(channelh, dtr);
858 
859 		if (txd_priv->mblk != NULL) {
860 			freemsg(txd_priv->mblk);
861 			txd_priv->mblk = NULL;
862 		}
863 
864 		lldev->resched_avail++;
865 
866 	} while (xge_hal_fifo_dtr_next_completed(channelh, &dtr, &t_code) ==
867 	    XGE_HAL_OK);
868 
869 	if (lldev->resched_retry &&
870 	    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
871 	    XGELL_EVENT_RESCHED_NEEDED, fifo) == XGE_QUEUE_OK) {
872 		xge_debug_ll(XGE_TRACE, "%s%d: IRQ produced event for queue %d",
873 		    XGELL_IFNAME, lldev->instance,
874 		    ((xge_hal_channel_t *)channelh)->post_qid);
875 		lldev->resched_send = lldev->resched_avail;
876 		lldev->resched_retry = 0;
877 	}
878 
879 	return (XGE_HAL_OK);
880 }
881 
882 /*
883  * xgell_send
884  * @hldev: pointer to xge_hal_device_t strucutre
885  * @mblk: pointer to network buffer, i.e. mblk_t structure
886  *
887  * Called by the xgell_m_tx to transmit the packet to the XFRAME firmware.
888  * A pointer to an M_DATA message that contains the packet is passed to
889  * this routine.
890  */
891 static boolean_t
892 xgell_send(xgelldev_t *lldev, mblk_t *mp)
893 {
894 	mblk_t *bp;
895 	boolean_t retry;
896 	xge_hal_device_t *hldev = lldev->devh;
897 	xge_hal_status_e status;
898 	xge_hal_dtr_h dtr;
899 	xgell_txd_priv_t *txd_priv;
900 	uint32_t hckflags;
901 	uint32_t mss;
902 	int handle_cnt, frag_cnt, ret, i, copied;
903 	boolean_t used_copy;
904 	xgell_fifo_t *fifo;
905 	xge_hal_channel_h fifo_channel;
906 
907 _begin:
908 	retry = B_FALSE;
909 	handle_cnt = frag_cnt = 0;
910 
911 	if (!lldev->is_initialized || lldev->in_reset)
912 		return (B_FALSE);
913 
914 	fifo = &lldev->fifos[0];
915 	fifo_channel = fifo->channelh;
916 
917 	/*
918 	 * If the free Tx dtrs count reaches the lower threshold,
919 	 * inform the gld to stop sending more packets till the free
920 	 * dtrs count exceeds higher threshold. Driver informs the
921 	 * gld through gld_sched call, when the free dtrs count exceeds
922 	 * the higher threshold.
923 	 */
924 	if (xge_hal_channel_dtr_count(fifo_channel)
925 	    <= XGELL_TX_LEVEL_LOW) {
926 		if (++fifo->level_low > XGELL_TX_LEVEL_CHECK) {
927 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: err on xmit,"
928 			    "free descriptors count at low threshold %d",
929 			    XGELL_IFNAME, lldev->instance,
930 			    ((xge_hal_channel_t *)fifo_channel)->post_qid,
931 			    XGELL_TX_LEVEL_LOW);
932 			fifo->level_low = 0;
933 			retry = B_TRUE;
934 			goto _exit;
935 		}
936 	} else {
937 		fifo->level_low = 0;
938 	}
939 
940 	status = xge_hal_fifo_dtr_reserve(fifo_channel, &dtr);
941 	if (status != XGE_HAL_OK) {
942 		switch (status) {
943 		case XGE_HAL_INF_CHANNEL_IS_NOT_READY:
944 			xge_debug_ll(XGE_ERR,
945 			    "%s%d: channel %d is not ready.", XGELL_IFNAME,
946 			    lldev->instance,
947 			    ((xge_hal_channel_t *)
948 			    fifo_channel)->post_qid);
949 			retry = B_TRUE;
950 			goto _exit;
951 		case XGE_HAL_INF_OUT_OF_DESCRIPTORS:
952 			xge_debug_ll(XGE_TRACE, "%s%d: queue %d: error in xmit,"
953 			    " out of descriptors.", XGELL_IFNAME,
954 			    lldev->instance,
955 			    ((xge_hal_channel_t *)
956 			    fifo_channel)->post_qid);
957 			retry = B_TRUE;
958 			goto _exit;
959 		default:
960 			return (B_FALSE);
961 		}
962 	}
963 
964 	txd_priv = xge_hal_fifo_dtr_private(dtr);
965 	txd_priv->mblk = mp;
966 
967 	/*
968 	 * VLAN tag should be passed down along with MAC header, so h/w needn't
969 	 * do insertion.
970 	 *
971 	 * For NIC driver that has to strip and re-insert VLAN tag, the example
972 	 * is the other implementation for xge. The driver can simple bcopy()
973 	 * ether_vlan_header to overwrite VLAN tag and let h/w insert the tag
974 	 * automatically, since it's impossible that GLD sends down mp(s) with
975 	 * splited ether_vlan_header.
976 	 *
977 	 * struct ether_vlan_header *evhp;
978 	 * uint16_t tci;
979 	 *
980 	 * evhp = (struct ether_vlan_header *)mp->b_rptr;
981 	 * if (evhp->ether_tpid == htons(VLAN_TPID)) {
982 	 *	tci = ntohs(evhp->ether_tci);
983 	 *	(void) bcopy(mp->b_rptr, mp->b_rptr + VLAN_TAGSZ,
984 	 *	    2 * ETHERADDRL);
985 	 *	mp->b_rptr += VLAN_TAGSZ;
986 	 *
987 	 *	xge_hal_fifo_dtr_vlan_set(dtr, tci);
988 	 * }
989 	 */
990 
991 	copied = 0;
992 	used_copy = B_FALSE;
993 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
994 		int mblen;
995 		uint_t ncookies;
996 		ddi_dma_cookie_t dma_cookie;
997 		ddi_dma_handle_t dma_handle;
998 
999 		/* skip zero-length message blocks */
1000 		mblen = MBLKL(bp);
1001 		if (mblen == 0) {
1002 			continue;
1003 		}
1004 
1005 		/*
1006 		 * Check the message length to decide to DMA or bcopy() data
1007 		 * to tx descriptor(s).
1008 		 */
1009 		if (mblen < lldev->config.tx_dma_lowat &&
1010 		    (copied + mblen) < lldev->tx_copied_max) {
1011 			xge_hal_status_e rc;
1012 			rc = xge_hal_fifo_dtr_buffer_append(fifo_channel,
1013 			    dtr, bp->b_rptr, mblen);
1014 			if (rc == XGE_HAL_OK) {
1015 				used_copy = B_TRUE;
1016 				copied += mblen;
1017 				continue;
1018 			} else if (used_copy) {
1019 				xge_hal_fifo_dtr_buffer_finalize(
1020 				    fifo_channel, dtr, frag_cnt++);
1021 				used_copy = B_FALSE;
1022 			}
1023 		} else if (used_copy) {
1024 			xge_hal_fifo_dtr_buffer_finalize(fifo_channel,
1025 			    dtr, frag_cnt++);
1026 			used_copy = B_FALSE;
1027 		}
1028 
1029 		ret = ddi_dma_alloc_handle(lldev->dev_info, &tx_dma_attr,
1030 		    DDI_DMA_DONTWAIT, 0, &dma_handle);
1031 		if (ret != DDI_SUCCESS) {
1032 			xge_debug_ll(XGE_ERR,
1033 			    "%s%d: can not allocate dma handle", XGELL_IFNAME,
1034 			    lldev->instance);
1035 			goto _exit_cleanup;
1036 		}
1037 
1038 		ret = ddi_dma_addr_bind_handle(dma_handle, NULL,
1039 		    (caddr_t)bp->b_rptr, mblen,
1040 		    DDI_DMA_WRITE | DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0,
1041 		    &dma_cookie, &ncookies);
1042 
1043 		switch (ret) {
1044 		case DDI_DMA_MAPPED:
1045 			/* everything's fine */
1046 			break;
1047 
1048 		case DDI_DMA_NORESOURCES:
1049 			xge_debug_ll(XGE_ERR,
1050 			    "%s%d: can not bind dma address",
1051 			    XGELL_IFNAME, lldev->instance);
1052 			ddi_dma_free_handle(&dma_handle);
1053 			goto _exit_cleanup;
1054 
1055 		case DDI_DMA_NOMAPPING:
1056 		case DDI_DMA_INUSE:
1057 		case DDI_DMA_TOOBIG:
1058 		default:
1059 			/* drop packet, don't retry */
1060 			xge_debug_ll(XGE_ERR,
1061 			    "%s%d: can not map message buffer",
1062 			    XGELL_IFNAME, lldev->instance);
1063 			ddi_dma_free_handle(&dma_handle);
1064 			goto _exit_cleanup;
1065 		}
1066 
1067 		if (ncookies + frag_cnt > hldev->config.fifo.max_frags) {
1068 			xge_debug_ll(XGE_ERR, "%s%d: too many fragments, "
1069 			    "requested c:%d+f:%d", XGELL_IFNAME,
1070 			    lldev->instance, ncookies, frag_cnt);
1071 			(void) ddi_dma_unbind_handle(dma_handle);
1072 			ddi_dma_free_handle(&dma_handle);
1073 			goto _exit_cleanup;
1074 		}
1075 
1076 		/* setup the descriptors for this data buffer */
1077 		while (ncookies) {
1078 			xge_hal_fifo_dtr_buffer_set(fifo_channel, dtr,
1079 			    frag_cnt++, dma_cookie.dmac_laddress,
1080 			    dma_cookie.dmac_size);
1081 			if (--ncookies) {
1082 				ddi_dma_nextcookie(dma_handle, &dma_cookie);
1083 			}
1084 
1085 		}
1086 
1087 		txd_priv->dma_handles[handle_cnt++] = dma_handle;
1088 
1089 		if (bp->b_cont &&
1090 		    (frag_cnt + XGE_HAL_DEFAULT_FIFO_FRAGS_THRESHOLD >=
1091 		    hldev->config.fifo.max_frags)) {
1092 			mblk_t *nmp;
1093 
1094 			xge_debug_ll(XGE_TRACE,
1095 			    "too many FRAGs [%d], pull up them", frag_cnt);
1096 
1097 			if ((nmp = msgpullup(bp->b_cont, -1)) == NULL) {
1098 				/* Drop packet, don't retry */
1099 				xge_debug_ll(XGE_ERR,
1100 				    "%s%d: can not pullup message buffer",
1101 				    XGELL_IFNAME, lldev->instance);
1102 				goto _exit_cleanup;
1103 			}
1104 			freemsg(bp->b_cont);
1105 			bp->b_cont = nmp;
1106 		}
1107 	}
1108 
1109 	/* finalize unfinished copies */
1110 	if (used_copy) {
1111 		xge_hal_fifo_dtr_buffer_finalize(fifo_channel, dtr,
1112 		    frag_cnt++);
1113 	}
1114 
1115 	txd_priv->handle_cnt = handle_cnt;
1116 
1117 	/*
1118 	 * If LSO is required, just call xge_hal_fifo_dtr_mss_set(dtr, mss) to
1119 	 * do all necessary work.
1120 	 */
1121 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, &mss, &hckflags);
1122 	if ((hckflags & HW_LSO) && (mss != 0)) {
1123 		xge_hal_fifo_dtr_mss_set(dtr, mss);
1124 	}
1125 
1126 	if (hckflags & HCK_IPV4_HDRCKSUM) {
1127 		xge_hal_fifo_dtr_cksum_set_bits(dtr,
1128 		    XGE_HAL_TXD_TX_CKO_IPV4_EN);
1129 	}
1130 	if (hckflags & HCK_FULLCKSUM) {
1131 		xge_hal_fifo_dtr_cksum_set_bits(dtr, XGE_HAL_TXD_TX_CKO_TCP_EN |
1132 		    XGE_HAL_TXD_TX_CKO_UDP_EN);
1133 	}
1134 
1135 	xge_hal_fifo_dtr_post(fifo_channel, dtr);
1136 
1137 	return (B_TRUE);
1138 
1139 _exit_cleanup:
1140 
1141 	for (i = 0; i < handle_cnt; i++) {
1142 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1143 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1144 		txd_priv->dma_handles[i] = 0;
1145 	}
1146 
1147 	xge_hal_fifo_dtr_free(fifo_channel, dtr);
1148 
1149 _exit:
1150 	if (retry) {
1151 		if (lldev->resched_avail != lldev->resched_send &&
1152 		    xge_queue_produce_context(xge_hal_device_queue(lldev->devh),
1153 		    XGELL_EVENT_RESCHED_NEEDED, fifo) == XGE_QUEUE_OK) {
1154 			lldev->resched_send = lldev->resched_avail;
1155 			return (B_FALSE);
1156 		} else {
1157 			lldev->resched_retry = 1;
1158 		}
1159 	}
1160 
1161 	if (mp)
1162 		freemsg(mp);
1163 	return (B_TRUE);
1164 }
1165 
1166 /*
1167  * xge_m_tx
1168  * @arg: pointer to the xgelldev_t structure
1169  * @resid: resource id
1170  * @mp: pointer to the message buffer
1171  *
1172  * Called by MAC Layer to send a chain of packets
1173  */
1174 static mblk_t *
1175 xgell_m_tx(void *arg, mblk_t *mp)
1176 {
1177 	xgelldev_t *lldev = arg;
1178 	mblk_t *next;
1179 
1180 	while (mp != NULL) {
1181 		next = mp->b_next;
1182 		mp->b_next = NULL;
1183 
1184 		if (!xgell_send(lldev, mp)) {
1185 			mp->b_next = next;
1186 			break;
1187 		}
1188 		mp = next;
1189 	}
1190 
1191 	return (mp);
1192 }
1193 
1194 /*
1195  * xgell_rx_dtr_term
1196  *
1197  * Function will be called by HAL to terminate all DTRs for
1198  * Ring(s) type of channels.
1199  */
1200 static void
1201 xgell_rx_dtr_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1202     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1203 {
1204 	xgell_ring_t *ring = (xgell_ring_t *)userdata;
1205 	xgell_rxd_priv_t *rxd_priv =
1206 	    ((xgell_rxd_priv_t *)xge_hal_ring_dtr_private(channelh, dtrh));
1207 	xgell_rx_buffer_t *rx_buffer = rxd_priv->rx_buffer;
1208 
1209 	if (state == XGE_HAL_DTR_STATE_POSTED) {
1210 		mutex_enter(&ring->bf_pool.pool_lock);
1211 		xge_hal_ring_dtr_free(channelh, dtrh);
1212 		xgell_rx_buffer_release(rx_buffer);
1213 		mutex_exit(&ring->bf_pool.pool_lock);
1214 	}
1215 }
1216 
1217 /*
1218  * xgell_tx_term
1219  *
1220  * Function will be called by HAL to terminate all DTRs for
1221  * Fifo(s) type of channels.
1222  */
1223 static void
1224 xgell_tx_term(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1225     xge_hal_dtr_state_e state, void *userdata, xge_hal_channel_reopen_e reopen)
1226 {
1227 	xgell_txd_priv_t *txd_priv =
1228 	    ((xgell_txd_priv_t *)xge_hal_fifo_dtr_private(dtrh));
1229 	mblk_t *mp = txd_priv->mblk;
1230 	int i;
1231 
1232 	/*
1233 	 * for Tx we must clean up the DTR *only* if it has been
1234 	 * posted!
1235 	 */
1236 	if (state != XGE_HAL_DTR_STATE_POSTED) {
1237 		return;
1238 	}
1239 
1240 	for (i = 0; i < txd_priv->handle_cnt; i++) {
1241 		xge_assert(txd_priv->dma_handles[i]);
1242 		(void) ddi_dma_unbind_handle(txd_priv->dma_handles[i]);
1243 		ddi_dma_free_handle(&txd_priv->dma_handles[i]);
1244 		txd_priv->dma_handles[i] = 0;
1245 	}
1246 
1247 	xge_hal_fifo_dtr_free(channelh, dtrh);
1248 
1249 	if (mp) {
1250 		txd_priv->mblk = NULL;
1251 		freemsg(mp);
1252 	}
1253 }
1254 
1255 /*
1256  * xgell_tx_close
1257  * @lldev: the link layer object
1258  *
1259  * Close all Tx channels
1260  */
1261 static void
1262 xgell_tx_close(xgelldev_t *lldev)
1263 {
1264 	xge_list_t *item, *list;
1265 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1266 
1267 	list = &hldev->fifo_channels;
1268 	while (!xge_list_is_empty(list)) {
1269 		item = xge_list_first_get(list);
1270 		xge_hal_channel_t *channel = xge_container_of(item,
1271 		    xge_hal_channel_t, item);
1272 
1273 		xge_hal_channel_close(channel, XGE_HAL_CHANNEL_OC_NORMAL);
1274 	}
1275 }
1276 
1277 /*
1278  * xgell_tx_open
1279  * @lldev: the link layer object
1280  *
1281  * Initialize and open all Tx channels;
1282  */
1283 static boolean_t
1284 xgell_tx_open(xgelldev_t *lldev)
1285 {
1286 	xge_hal_status_e status;
1287 	u64 adapter_status;
1288 	xge_hal_channel_attr_t attr;
1289 	xge_list_t *item;
1290 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1291 
1292 	attr.post_qid		= 0;
1293 	attr.compl_qid		= 0;
1294 	attr.callback		= xgell_xmit_compl;
1295 	attr.per_dtr_space	= sizeof (xgell_txd_priv_t);
1296 	attr.flags		= 0;
1297 	attr.type		= XGE_HAL_CHANNEL_TYPE_FIFO;
1298 	attr.userdata		= lldev;
1299 	attr.dtr_init		= NULL;
1300 	attr.dtr_term		= xgell_tx_term;
1301 
1302 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1303 		xge_debug_ll(XGE_ERR, "%s%d: device is not ready "
1304 		    "adaper status reads 0x%"PRIx64, XGELL_IFNAME,
1305 		    lldev->instance, (uint64_t)adapter_status);
1306 		return (B_FALSE);
1307 	}
1308 
1309 	/*
1310 	 * Open only configured channels. HAL structures are static,
1311 	 * so, no worries here..
1312 	 */
1313 _next_channel:
1314 	xge_list_for_each(item, &hldev->free_channels) {
1315 		xge_hal_channel_t *channel = xge_container_of(item,
1316 		    xge_hal_channel_t, item);
1317 		xgell_fifo_t *fifo;
1318 
1319 		/* filter on FIFO channels */
1320 		if (channel->type != XGE_HAL_CHANNEL_TYPE_FIFO)
1321 			continue;
1322 
1323 		fifo = &lldev->fifos[attr.post_qid];
1324 		fifo->lldev = lldev;
1325 		attr.userdata = fifo;
1326 
1327 		status = xge_hal_channel_open(lldev->devh, &attr,
1328 		    &fifo->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1329 		if (status != XGE_HAL_OK) {
1330 			xge_debug_ll(XGE_ERR, "%s%d: cannot open Tx channel "
1331 			    "got status  code %d", XGELL_IFNAME,
1332 			    lldev->instance, status);
1333 			/* unwind */
1334 			xgell_tx_close(lldev);
1335 			return (B_FALSE);
1336 		}
1337 
1338 		attr.post_qid++;
1339 
1340 		/*
1341 		 * because channel_open() moves xge_list entry
1342 		 * to the fifos_channels
1343 		 */
1344 		goto _next_channel;
1345 	}
1346 
1347 	return (B_TRUE);
1348 }
1349 
1350 /*
1351  * xgell_rx_close
1352  * @lldev: the link layer object
1353  *
1354  * Close all Rx channels
1355  */
1356 static void
1357 xgell_rx_close(xgelldev_t *lldev)
1358 {
1359 	xge_list_t *item, *list;
1360 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1361 
1362 	list = &hldev->ring_channels;
1363 	while (!xge_list_is_empty(list)) {
1364 		item = xge_list_first_get(list);
1365 		xge_hal_channel_t *channel = xge_container_of(item,
1366 		    xge_hal_channel_t, item);
1367 		xgell_ring_t *ring = xge_hal_channel_userdata(channel);
1368 
1369 		xge_hal_channel_close(channel, XGE_HAL_CHANNEL_OC_NORMAL);
1370 
1371 		/*
1372 		 * destroy Ring's buffer pool
1373 		 */
1374 		if (xgell_rx_destroy_buffer_pool(ring) != DDI_SUCCESS) {
1375 			xge_debug_ll(XGE_ERR, "unable to destroy Ring%d "
1376 			    "buffer pool", channel->post_qid);
1377 		}
1378 		list = &hldev->ring_channels;
1379 	}
1380 }
1381 
1382 /*
1383  * xgell_rx_open
1384  * @lldev: the link layer object
1385  *
1386  * Initialize and open all Rx channels;
1387  */
1388 static boolean_t
1389 xgell_rx_open(xgelldev_t *lldev)
1390 {
1391 	xge_hal_status_e status;
1392 	u64 adapter_status;
1393 	xge_hal_channel_attr_t attr;
1394 	xge_list_t *item;
1395 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
1396 
1397 	attr.post_qid		= 0;
1398 	attr.compl_qid		= 0;
1399 	attr.callback		= xgell_rx_1b_compl;
1400 	attr.per_dtr_space	= sizeof (xgell_rxd_priv_t);
1401 	attr.flags		= 0;
1402 	attr.type		= XGE_HAL_CHANNEL_TYPE_RING;
1403 	attr.dtr_init		= xgell_rx_dtr_replenish;
1404 	attr.dtr_term		= xgell_rx_dtr_term;
1405 
1406 	if (xge_hal_device_status(lldev->devh, &adapter_status)) {
1407 		xge_debug_ll(XGE_ERR,
1408 		    "%s%d: device is not ready adaper status reads 0x%"PRIx64,
1409 		    XGELL_IFNAME, lldev->instance,
1410 		    (uint64_t)adapter_status);
1411 		return (B_FALSE);
1412 	}
1413 
1414 	/*
1415 	 * Open only configured channels. HAL structures are static,
1416 	 * so, no worries here..
1417 	 */
1418 _next_channel:
1419 	xge_list_for_each(item, &hldev->free_channels) {
1420 		xge_hal_channel_t *channel = xge_container_of(item,
1421 		    xge_hal_channel_t, item);
1422 		xgell_ring_t *ring;
1423 
1424 		/* filter on RING channels */
1425 		if (channel->type != XGE_HAL_CHANNEL_TYPE_RING)
1426 			continue;
1427 
1428 		ring = &lldev->rings[attr.post_qid];
1429 		ring->lldev = lldev;
1430 		attr.userdata = ring;
1431 
1432 		if (xgell_rx_create_buffer_pool(ring) != DDI_SUCCESS) {
1433 			xge_debug_ll(XGE_ERR, "unable to create Ring%d "
1434 			    "buffer pool", attr.post_qid);
1435 			/* unwind */
1436 			xgell_rx_close(lldev);
1437 			return (B_FALSE);
1438 		}
1439 
1440 		status = xge_hal_channel_open(lldev->devh, &attr,
1441 		    &ring->channelh, XGE_HAL_CHANNEL_OC_NORMAL);
1442 		if (status != XGE_HAL_OK) {
1443 			xge_debug_ll(XGE_ERR, "%s%d: cannot open Rx channel "
1444 			    "got status got status code %d", XGELL_IFNAME,
1445 			    lldev->instance, status);
1446 			/* unwind */
1447 			(void) xgell_rx_destroy_buffer_pool(ring);
1448 			xgell_rx_close(lldev);
1449 			return (B_FALSE);
1450 		}
1451 
1452 		attr.post_qid++;
1453 
1454 		/*
1455 		 * because chhannel_open() moves xge_list entry
1456 		 * to the rings channels
1457 		 */
1458 		goto _next_channel;
1459 	}
1460 
1461 	return (B_TRUE);
1462 }
1463 
1464 static int
1465 xgell_initiate_start(xgelldev_t *lldev)
1466 {
1467 	xge_hal_status_e status;
1468 	xge_hal_device_t *hldev = lldev->devh;
1469 	int maxpkt = hldev->config.mtu;
1470 
1471 	/* check initial mtu before enabling the device */
1472 	status = xge_hal_device_mtu_check(lldev->devh, maxpkt);
1473 	if (status != XGE_HAL_OK) {
1474 		xge_debug_ll(XGE_ERR, "%s%d: MTU size %d is invalid",
1475 		    XGELL_IFNAME, lldev->instance, maxpkt);
1476 		return (EINVAL);
1477 	}
1478 
1479 	/* set initial mtu before enabling the device */
1480 	status = xge_hal_device_mtu_set(lldev->devh, maxpkt);
1481 	if (status != XGE_HAL_OK) {
1482 		xge_debug_ll(XGE_ERR, "%s%d: can not set new MTU %d",
1483 		    XGELL_IFNAME, lldev->instance, maxpkt);
1484 		return (EIO);
1485 	}
1486 
1487 	/* tune jumbo/normal frame UFC counters */
1488 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_b = \
1489 	    maxpkt > XGE_HAL_DEFAULT_MTU ?
1490 	    XGE_HAL_DEFAULT_RX_UFC_B_J :
1491 	    XGE_HAL_DEFAULT_RX_UFC_B_N;
1492 
1493 	hldev->config.ring.queue[XGELL_RING_MAIN_QID].rti.ufc_c = \
1494 	    maxpkt > XGE_HAL_DEFAULT_MTU ?
1495 	    XGE_HAL_DEFAULT_RX_UFC_C_J :
1496 	    XGE_HAL_DEFAULT_RX_UFC_C_N;
1497 
1498 	/* now, enable the device */
1499 	status = xge_hal_device_enable(lldev->devh);
1500 	if (status != XGE_HAL_OK) {
1501 		xge_debug_ll(XGE_ERR, "%s%d: can not enable the device",
1502 		    XGELL_IFNAME, lldev->instance);
1503 		return (EIO);
1504 	}
1505 
1506 	if (!xgell_rx_open(lldev)) {
1507 		status = xge_hal_device_disable(lldev->devh);
1508 		if (status != XGE_HAL_OK) {
1509 			u64 adapter_status;
1510 			(void) xge_hal_device_status(lldev->devh,
1511 			    &adapter_status);
1512 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1513 			    "the device. adaper status 0x%"PRIx64
1514 			    " returned status %d",
1515 			    XGELL_IFNAME, lldev->instance,
1516 			    (uint64_t)adapter_status, status);
1517 		}
1518 		xge_os_mdelay(1500);
1519 		return (ENOMEM);
1520 	}
1521 
1522 	if (!xgell_tx_open(lldev)) {
1523 		status = xge_hal_device_disable(lldev->devh);
1524 		if (status != XGE_HAL_OK) {
1525 			u64 adapter_status;
1526 			(void) xge_hal_device_status(lldev->devh,
1527 			    &adapter_status);
1528 			xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1529 			    "the device. adaper status 0x%"PRIx64
1530 			    " returned status %d",
1531 			    XGELL_IFNAME, lldev->instance,
1532 			    (uint64_t)adapter_status, status);
1533 		}
1534 		xge_os_mdelay(1500);
1535 		xgell_rx_close(lldev);
1536 
1537 		return (ENOMEM);
1538 	}
1539 
1540 	/* time to enable interrupts */
1541 	(void) xge_enable_intrs(lldev);
1542 	xge_hal_device_intr_enable(lldev->devh);
1543 
1544 	lldev->is_initialized = 1;
1545 
1546 	return (0);
1547 }
1548 
1549 static void
1550 xgell_initiate_stop(xgelldev_t *lldev)
1551 {
1552 	xge_hal_status_e status;
1553 
1554 	lldev->is_initialized = 0;
1555 
1556 	status = xge_hal_device_disable(lldev->devh);
1557 	if (status != XGE_HAL_OK) {
1558 		u64 adapter_status;
1559 		(void) xge_hal_device_status(lldev->devh, &adapter_status);
1560 		xge_debug_ll(XGE_ERR, "%s%d: can not safely disable "
1561 		    "the device. adaper status 0x%"PRIx64" returned status %d",
1562 		    XGELL_IFNAME, lldev->instance,
1563 		    (uint64_t)adapter_status, status);
1564 	}
1565 	xge_hal_device_intr_disable(lldev->devh);
1566 	/* disable OS ISR's */
1567 	xge_disable_intrs(lldev);
1568 
1569 	xge_debug_ll(XGE_TRACE, "%s",
1570 	    "waiting for device irq to become quiescent...");
1571 	xge_os_mdelay(1500);
1572 
1573 	xge_queue_flush(xge_hal_device_queue(lldev->devh));
1574 
1575 	xgell_rx_close(lldev);
1576 	xgell_tx_close(lldev);
1577 }
1578 
1579 /*
1580  * xgell_m_start
1581  * @arg: pointer to device private strucutre(hldev)
1582  *
1583  * This function is called by MAC Layer to enable the XFRAME
1584  * firmware to generate interrupts and also prepare the
1585  * driver to call mac_rx for delivering receive packets
1586  * to MAC Layer.
1587  */
1588 static int
1589 xgell_m_start(void *arg)
1590 {
1591 	xgelldev_t *lldev = arg;
1592 	xge_hal_device_t *hldev = lldev->devh;
1593 	int ret;
1594 
1595 	xge_debug_ll(XGE_TRACE, "%s%d: M_START", XGELL_IFNAME,
1596 	    lldev->instance);
1597 
1598 	mutex_enter(&lldev->genlock);
1599 
1600 	if (lldev->is_initialized) {
1601 		xge_debug_ll(XGE_ERR, "%s%d: device is already initialized",
1602 		    XGELL_IFNAME, lldev->instance);
1603 		mutex_exit(&lldev->genlock);
1604 		return (EINVAL);
1605 	}
1606 
1607 	hldev->terminating = 0;
1608 	if (ret = xgell_initiate_start(lldev)) {
1609 		mutex_exit(&lldev->genlock);
1610 		return (ret);
1611 	}
1612 
1613 	lldev->timeout_id = timeout(xge_device_poll, hldev, XGE_DEV_POLL_TICKS);
1614 
1615 	mutex_exit(&lldev->genlock);
1616 
1617 	return (0);
1618 }
1619 
1620 /*
1621  * xgell_m_stop
1622  * @arg: pointer to device private data (hldev)
1623  *
1624  * This function is called by the MAC Layer to disable
1625  * the XFRAME firmware for generating any interrupts and
1626  * also stop the driver from calling mac_rx() for
1627  * delivering data packets to the MAC Layer.
1628  */
1629 static void
1630 xgell_m_stop(void *arg)
1631 {
1632 	xgelldev_t *lldev = arg;
1633 	xge_hal_device_t *hldev = lldev->devh;
1634 
1635 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STOP");
1636 
1637 	mutex_enter(&lldev->genlock);
1638 	if (!lldev->is_initialized) {
1639 		xge_debug_ll(XGE_ERR, "%s", "device is not initialized...");
1640 		mutex_exit(&lldev->genlock);
1641 		return;
1642 	}
1643 
1644 	xge_hal_device_terminating(hldev);
1645 	xgell_initiate_stop(lldev);
1646 
1647 	/* reset device */
1648 	(void) xge_hal_device_reset(lldev->devh);
1649 
1650 	mutex_exit(&lldev->genlock);
1651 
1652 	if (lldev->timeout_id != 0) {
1653 		(void) untimeout(lldev->timeout_id);
1654 	}
1655 
1656 	xge_debug_ll(XGE_TRACE, "%s", "returning back to MAC Layer...");
1657 }
1658 
1659 /*
1660  * xgell_onerr_reset
1661  * @lldev: pointer to xgelldev_t structure
1662  *
1663  * This function is called by HAL Event framework to reset the HW
1664  * This function is must be called with genlock taken.
1665  */
1666 int
1667 xgell_onerr_reset(xgelldev_t *lldev)
1668 {
1669 	int rc = 0;
1670 
1671 	if (!lldev->is_initialized) {
1672 		xge_debug_ll(XGE_ERR, "%s%d: can not reset",
1673 		    XGELL_IFNAME, lldev->instance);
1674 		return (rc);
1675 	}
1676 
1677 	lldev->in_reset = 1;
1678 	xgell_initiate_stop(lldev);
1679 
1680 	/* reset device */
1681 	(void) xge_hal_device_reset(lldev->devh);
1682 
1683 	rc = xgell_initiate_start(lldev);
1684 	lldev->in_reset = 0;
1685 
1686 	return (rc);
1687 }
1688 
1689 
1690 /*
1691  * xgell_m_unicst
1692  * @arg: pointer to device private strucutre(hldev)
1693  * @mac_addr:
1694  *
1695  * This function is called by MAC Layer to set the physical address
1696  * of the XFRAME firmware.
1697  */
1698 static int
1699 xgell_m_unicst(void *arg, const uint8_t *macaddr)
1700 {
1701 	xge_hal_status_e status;
1702 	xgelldev_t *lldev = (xgelldev_t *)arg;
1703 	xge_hal_device_t *hldev = lldev->devh;
1704 	xge_debug_ll(XGE_TRACE, "%s", "MAC_UNICST");
1705 
1706 	xge_debug_ll(XGE_TRACE, "%s", "M_UNICAST");
1707 
1708 	mutex_enter(&lldev->genlock);
1709 
1710 	xge_debug_ll(XGE_TRACE,
1711 	    "setting macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
1712 	    macaddr[0], macaddr[1], macaddr[2],
1713 	    macaddr[3], macaddr[4], macaddr[5]);
1714 
1715 	status = xge_hal_device_macaddr_set(hldev, 0, (uchar_t *)macaddr);
1716 	if (status != XGE_HAL_OK) {
1717 		xge_debug_ll(XGE_ERR, "%s%d: can not set mac address",
1718 		    XGELL_IFNAME, lldev->instance);
1719 		mutex_exit(&lldev->genlock);
1720 		return (EIO);
1721 	}
1722 
1723 	mutex_exit(&lldev->genlock);
1724 
1725 	return (0);
1726 }
1727 
1728 
1729 /*
1730  * xgell_m_multicst
1731  * @arg: pointer to device private strucutre(hldev)
1732  * @add:
1733  * @mc_addr:
1734  *
1735  * This function is called by MAC Layer to enable or
1736  * disable device-level reception of specific multicast addresses.
1737  */
1738 static int
1739 xgell_m_multicst(void *arg, boolean_t add, const uint8_t *mc_addr)
1740 {
1741 	xge_hal_status_e status;
1742 	xgelldev_t *lldev = (xgelldev_t *)arg;
1743 	xge_hal_device_t *hldev = lldev->devh;
1744 
1745 	xge_debug_ll(XGE_TRACE, "M_MULTICAST add %d", add);
1746 
1747 	mutex_enter(&lldev->genlock);
1748 
1749 	if (!lldev->is_initialized) {
1750 		xge_debug_ll(XGE_ERR, "%s%d: can not set multicast",
1751 		    XGELL_IFNAME, lldev->instance);
1752 		mutex_exit(&lldev->genlock);
1753 		return (EIO);
1754 	}
1755 
1756 	/* FIXME: missing HAL functionality: enable_one() */
1757 
1758 	status = (add) ?
1759 	    xge_hal_device_mcast_enable(hldev) :
1760 	    xge_hal_device_mcast_disable(hldev);
1761 
1762 	if (status != XGE_HAL_OK) {
1763 		xge_debug_ll(XGE_ERR, "failed to %s multicast, status %d",
1764 		    add ? "enable" : "disable", status);
1765 		mutex_exit(&lldev->genlock);
1766 		return (EIO);
1767 	}
1768 
1769 	mutex_exit(&lldev->genlock);
1770 
1771 	return (0);
1772 }
1773 
1774 
1775 /*
1776  * xgell_m_promisc
1777  * @arg: pointer to device private strucutre(hldev)
1778  * @on:
1779  *
1780  * This function is called by MAC Layer to enable or
1781  * disable the reception of all the packets on the medium
1782  */
1783 static int
1784 xgell_m_promisc(void *arg, boolean_t on)
1785 {
1786 	xgelldev_t *lldev = (xgelldev_t *)arg;
1787 	xge_hal_device_t *hldev = lldev->devh;
1788 
1789 	mutex_enter(&lldev->genlock);
1790 
1791 	xge_debug_ll(XGE_TRACE, "%s", "MAC_PROMISC_SET");
1792 
1793 	if (!lldev->is_initialized) {
1794 		xge_debug_ll(XGE_ERR, "%s%d: can not set promiscuous",
1795 		    XGELL_IFNAME, lldev->instance);
1796 		mutex_exit(&lldev->genlock);
1797 		return (EIO);
1798 	}
1799 
1800 	if (on) {
1801 		xge_hal_device_promisc_enable(hldev);
1802 	} else {
1803 		xge_hal_device_promisc_disable(hldev);
1804 	}
1805 
1806 	mutex_exit(&lldev->genlock);
1807 
1808 	return (0);
1809 }
1810 
1811 /*
1812  * xgell_m_stat
1813  * @arg: pointer to device private strucutre(hldev)
1814  *
1815  * This function is called by MAC Layer to get network statistics
1816  * from the driver.
1817  */
1818 static int
1819 xgell_m_stat(void *arg, uint_t stat, uint64_t *val)
1820 {
1821 	xge_hal_stats_hw_info_t *hw_info;
1822 	xgelldev_t *lldev = (xgelldev_t *)arg;
1823 	xge_hal_device_t *hldev = lldev->devh;
1824 
1825 	xge_debug_ll(XGE_TRACE, "%s", "MAC_STATS_GET");
1826 
1827 	mutex_enter(&lldev->genlock);
1828 
1829 	if (!lldev->is_initialized) {
1830 		mutex_exit(&lldev->genlock);
1831 		return (EAGAIN);
1832 	}
1833 
1834 	if (xge_hal_stats_hw(hldev, &hw_info) != XGE_HAL_OK) {
1835 		mutex_exit(&lldev->genlock);
1836 		return (EAGAIN);
1837 	}
1838 
1839 	switch (stat) {
1840 	case MAC_STAT_IFSPEED:
1841 		*val = 10000000000ull; /* 10G */
1842 		break;
1843 
1844 	case MAC_STAT_MULTIRCV:
1845 		*val = ((u64) hw_info->rmac_vld_mcst_frms_oflow << 32) |
1846 		    hw_info->rmac_vld_mcst_frms;
1847 		break;
1848 
1849 	case MAC_STAT_BRDCSTRCV:
1850 		*val = ((u64) hw_info->rmac_vld_bcst_frms_oflow << 32) |
1851 		    hw_info->rmac_vld_bcst_frms;
1852 		break;
1853 
1854 	case MAC_STAT_MULTIXMT:
1855 		*val = ((u64) hw_info->tmac_mcst_frms_oflow << 32) |
1856 		    hw_info->tmac_mcst_frms;
1857 		break;
1858 
1859 	case MAC_STAT_BRDCSTXMT:
1860 		*val = ((u64) hw_info->tmac_bcst_frms_oflow << 32) |
1861 		    hw_info->tmac_bcst_frms;
1862 		break;
1863 
1864 	case MAC_STAT_RBYTES:
1865 		*val = ((u64) hw_info->rmac_ttl_octets_oflow << 32) |
1866 		    hw_info->rmac_ttl_octets;
1867 		break;
1868 
1869 	case MAC_STAT_NORCVBUF:
1870 		*val = hw_info->rmac_drop_frms;
1871 		break;
1872 
1873 	case MAC_STAT_IERRORS:
1874 		*val = ((u64) hw_info->rmac_discarded_frms_oflow << 32) |
1875 		    hw_info->rmac_discarded_frms;
1876 		break;
1877 
1878 	case MAC_STAT_OBYTES:
1879 		*val = ((u64) hw_info->tmac_ttl_octets_oflow << 32) |
1880 		    hw_info->tmac_ttl_octets;
1881 		break;
1882 
1883 	case MAC_STAT_NOXMTBUF:
1884 		*val = hw_info->tmac_drop_frms;
1885 		break;
1886 
1887 	case MAC_STAT_OERRORS:
1888 		*val = ((u64) hw_info->tmac_any_err_frms_oflow << 32) |
1889 		    hw_info->tmac_any_err_frms;
1890 		break;
1891 
1892 	case MAC_STAT_IPACKETS:
1893 		*val = ((u64) hw_info->rmac_vld_frms_oflow << 32) |
1894 		    hw_info->rmac_vld_frms;
1895 		break;
1896 
1897 	case MAC_STAT_OPACKETS:
1898 		*val = ((u64) hw_info->tmac_frms_oflow << 32) |
1899 		    hw_info->tmac_frms;
1900 		break;
1901 
1902 	case ETHER_STAT_FCS_ERRORS:
1903 		*val = hw_info->rmac_fcs_err_frms;
1904 		break;
1905 
1906 	case ETHER_STAT_TOOLONG_ERRORS:
1907 		*val = hw_info->rmac_long_frms;
1908 		break;
1909 
1910 	case ETHER_STAT_LINK_DUPLEX:
1911 		*val = LINK_DUPLEX_FULL;
1912 		break;
1913 
1914 	default:
1915 		mutex_exit(&lldev->genlock);
1916 		return (ENOTSUP);
1917 	}
1918 
1919 	mutex_exit(&lldev->genlock);
1920 
1921 	return (0);
1922 }
1923 
1924 /*
1925  * xgell_device_alloc - Allocate new LL device
1926  */
1927 int
1928 xgell_device_alloc(xge_hal_device_h devh,
1929     dev_info_t *dev_info, xgelldev_t **lldev_out)
1930 {
1931 	xgelldev_t *lldev;
1932 	xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1933 	int instance = ddi_get_instance(dev_info);
1934 
1935 	*lldev_out = NULL;
1936 
1937 	xge_debug_ll(XGE_TRACE, "trying to register etherenet device %s%d...",
1938 	    XGELL_IFNAME, instance);
1939 
1940 	lldev = kmem_zalloc(sizeof (xgelldev_t), KM_SLEEP);
1941 
1942 	lldev->devh = hldev;
1943 	lldev->instance = instance;
1944 	lldev->dev_info = dev_info;
1945 
1946 	*lldev_out = lldev;
1947 
1948 	ddi_set_driver_private(dev_info, (caddr_t)hldev);
1949 
1950 	return (DDI_SUCCESS);
1951 }
1952 
1953 /*
1954  * xgell_device_free
1955  */
1956 void
1957 xgell_device_free(xgelldev_t *lldev)
1958 {
1959 	xge_debug_ll(XGE_TRACE, "freeing device %s%d",
1960 	    XGELL_IFNAME, lldev->instance);
1961 
1962 	kmem_free(lldev, sizeof (xgelldev_t));
1963 }
1964 
1965 /*
1966  * xgell_ioctl
1967  */
1968 static void
1969 xgell_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1970 {
1971 	xgelldev_t *lldev = arg;
1972 	struct iocblk *iocp;
1973 	int err = 0;
1974 	int cmd;
1975 	int need_privilege = 1;
1976 	int ret = 0;
1977 
1978 
1979 	iocp = (struct iocblk *)mp->b_rptr;
1980 	iocp->ioc_error = 0;
1981 	cmd = iocp->ioc_cmd;
1982 	xge_debug_ll(XGE_TRACE, "MAC_IOCTL cmd 0x%x", cmd);
1983 	switch (cmd) {
1984 	case ND_GET:
1985 		need_privilege = 0;
1986 		/* FALLTHRU */
1987 	case ND_SET:
1988 		break;
1989 	default:
1990 		xge_debug_ll(XGE_TRACE, "unknown cmd 0x%x", cmd);
1991 		miocnak(wq, mp, 0, EINVAL);
1992 		return;
1993 	}
1994 
1995 	if (need_privilege) {
1996 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1997 		if (err != 0) {
1998 			xge_debug_ll(XGE_ERR,
1999 			    "drv_priv(): rejected cmd 0x%x, err %d",
2000 			    cmd, err);
2001 			miocnak(wq, mp, 0, err);
2002 			return;
2003 		}
2004 	}
2005 
2006 	switch (cmd) {
2007 	case ND_GET:
2008 		/*
2009 		 * If nd_getset() returns B_FALSE, the command was
2010 		 * not valid (e.g. unknown name), so we just tell the
2011 		 * top-level ioctl code to send a NAK (with code EINVAL).
2012 		 *
2013 		 * Otherwise, nd_getset() will have built the reply to
2014 		 * be sent (but not actually sent it), so we tell the
2015 		 * caller to send the prepared reply.
2016 		 */
2017 		ret = nd_getset(wq, lldev->ndp, mp);
2018 		xge_debug_ll(XGE_TRACE, "%s", "got ndd get ioctl");
2019 		break;
2020 
2021 	case ND_SET:
2022 		ret = nd_getset(wq, lldev->ndp, mp);
2023 		xge_debug_ll(XGE_TRACE, "%s", "got ndd set ioctl");
2024 		break;
2025 
2026 	default:
2027 		break;
2028 	}
2029 
2030 	if (ret == B_FALSE) {
2031 		xge_debug_ll(XGE_ERR,
2032 		    "nd_getset(): rejected cmd 0x%x, err %d",
2033 		    cmd, err);
2034 		miocnak(wq, mp, 0, EINVAL);
2035 	} else {
2036 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
2037 		    M_IOCACK : M_IOCNAK;
2038 		qreply(wq, mp);
2039 	}
2040 }
2041 
2042 /* ARGSUSED */
2043 static boolean_t
2044 xgell_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2045 {
2046 	xgelldev_t *lldev = arg;
2047 
2048 	switch (cap) {
2049 	case MAC_CAPAB_HCKSUM: {
2050 		uint32_t *hcksum_txflags = cap_data;
2051 		*hcksum_txflags = HCKSUM_INET_FULL_V4 | HCKSUM_INET_FULL_V6 |
2052 		    HCKSUM_IPHDRCKSUM;
2053 		break;
2054 	}
2055 	case MAC_CAPAB_LSO: {
2056 		mac_capab_lso_t *cap_lso = cap_data;
2057 
2058 		if (lldev->config.lso_enable) {
2059 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2060 			cap_lso->lso_basic_tcp_ipv4.lso_max = XGELL_LSO_MAXLEN;
2061 			break;
2062 		} else {
2063 			return (B_FALSE);
2064 		}
2065 	}
2066 	default:
2067 		return (B_FALSE);
2068 	}
2069 	return (B_TRUE);
2070 }
2071 
2072 static int
2073 xgell_stats_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2074 {
2075 	xgelldev_t *lldev = (xgelldev_t *)cp;
2076 	xge_hal_status_e status;
2077 	int count = 0, retsize;
2078 	char *buf;
2079 
2080 	buf = kmem_alloc(XGELL_STATS_BUFSIZE, KM_SLEEP);
2081 	if (buf == NULL) {
2082 		return (ENOSPC);
2083 	}
2084 
2085 	status = xge_hal_aux_stats_tmac_read(lldev->devh, XGELL_STATS_BUFSIZE,
2086 	    buf, &retsize);
2087 	if (status != XGE_HAL_OK) {
2088 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2089 		xge_debug_ll(XGE_ERR, "tmac_read(): status %d", status);
2090 		return (EINVAL);
2091 	}
2092 	count += retsize;
2093 
2094 	status = xge_hal_aux_stats_rmac_read(lldev->devh,
2095 	    XGELL_STATS_BUFSIZE - count,
2096 	    buf+count, &retsize);
2097 	if (status != XGE_HAL_OK) {
2098 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2099 		xge_debug_ll(XGE_ERR, "rmac_read(): status %d", status);
2100 		return (EINVAL);
2101 	}
2102 	count += retsize;
2103 
2104 	status = xge_hal_aux_stats_pci_read(lldev->devh,
2105 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2106 	if (status != XGE_HAL_OK) {
2107 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2108 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2109 		return (EINVAL);
2110 	}
2111 	count += retsize;
2112 
2113 	status = xge_hal_aux_stats_sw_dev_read(lldev->devh,
2114 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2115 	if (status != XGE_HAL_OK) {
2116 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2117 		xge_debug_ll(XGE_ERR, "sw_dev_read(): status %d", status);
2118 		return (EINVAL);
2119 	}
2120 	count += retsize;
2121 
2122 	status = xge_hal_aux_stats_hal_read(lldev->devh,
2123 	    XGELL_STATS_BUFSIZE - count, buf + count, &retsize);
2124 	if (status != XGE_HAL_OK) {
2125 		kmem_free(buf, XGELL_STATS_BUFSIZE);
2126 		xge_debug_ll(XGE_ERR, "pci_read(): status %d", status);
2127 		return (EINVAL);
2128 	}
2129 	count += retsize;
2130 
2131 	*(buf + count - 1) = '\0'; /* remove last '\n' */
2132 	(void) mi_mpprintf(mp, "%s", buf);
2133 	kmem_free(buf, XGELL_STATS_BUFSIZE);
2134 
2135 	return (0);
2136 }
2137 
2138 static int
2139 xgell_pciconf_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2140 {
2141 	xgelldev_t *lldev = (xgelldev_t *)cp;
2142 	xge_hal_status_e status;
2143 	int retsize;
2144 	char *buf;
2145 
2146 	buf = kmem_alloc(XGELL_PCICONF_BUFSIZE, KM_SLEEP);
2147 	if (buf == NULL) {
2148 		return (ENOSPC);
2149 	}
2150 	status = xge_hal_aux_pci_config_read(lldev->devh, XGELL_PCICONF_BUFSIZE,
2151 	    buf, &retsize);
2152 	if (status != XGE_HAL_OK) {
2153 		kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2154 		xge_debug_ll(XGE_ERR, "pci_config_read(): status %d", status);
2155 		return (EINVAL);
2156 	}
2157 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2158 	(void) mi_mpprintf(mp, "%s", buf);
2159 	kmem_free(buf, XGELL_PCICONF_BUFSIZE);
2160 
2161 	return (0);
2162 }
2163 
2164 static int
2165 xgell_about_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2166 {
2167 	xgelldev_t *lldev = (xgelldev_t *)cp;
2168 	xge_hal_status_e status;
2169 	int retsize;
2170 	char *buf;
2171 
2172 	buf = kmem_alloc(XGELL_ABOUT_BUFSIZE, KM_SLEEP);
2173 	if (buf == NULL) {
2174 		return (ENOSPC);
2175 	}
2176 	status = xge_hal_aux_about_read(lldev->devh, XGELL_ABOUT_BUFSIZE,
2177 	    buf, &retsize);
2178 	if (status != XGE_HAL_OK) {
2179 		kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2180 		xge_debug_ll(XGE_ERR, "about_read(): status %d", status);
2181 		return (EINVAL);
2182 	}
2183 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2184 	(void) mi_mpprintf(mp, "%s", buf);
2185 	kmem_free(buf, XGELL_ABOUT_BUFSIZE);
2186 
2187 	return (0);
2188 }
2189 
2190 static unsigned long bar0_offset = 0x110; /* adapter_control */
2191 
2192 static int
2193 xgell_bar0_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2194 {
2195 	xgelldev_t *lldev = (xgelldev_t *)cp;
2196 	xge_hal_status_e status;
2197 	int retsize;
2198 	char *buf;
2199 
2200 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2201 	if (buf == NULL) {
2202 		return (ENOSPC);
2203 	}
2204 	status = xge_hal_aux_bar0_read(lldev->devh, bar0_offset,
2205 	    XGELL_IOCTL_BUFSIZE, buf, &retsize);
2206 	if (status != XGE_HAL_OK) {
2207 		kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2208 		xge_debug_ll(XGE_ERR, "bar0_read(): status %d", status);
2209 		return (EINVAL);
2210 	}
2211 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2212 	(void) mi_mpprintf(mp, "%s", buf);
2213 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2214 
2215 	return (0);
2216 }
2217 
2218 static int
2219 xgell_bar0_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *credp)
2220 {
2221 	unsigned long old_offset = bar0_offset;
2222 	char *end;
2223 
2224 	if (value && *value == '0' &&
2225 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2226 		value += 2;
2227 	}
2228 
2229 	bar0_offset = mi_strtol(value, &end, 16);
2230 	if (end == value) {
2231 		bar0_offset = old_offset;
2232 		return (EINVAL);
2233 	}
2234 
2235 	xge_debug_ll(XGE_TRACE, "bar0: new value %s:%lX", value, bar0_offset);
2236 
2237 	return (0);
2238 }
2239 
2240 static int
2241 xgell_debug_level_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2242 {
2243 	char *buf;
2244 
2245 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2246 	if (buf == NULL) {
2247 		return (ENOSPC);
2248 	}
2249 	(void) mi_mpprintf(mp, "debug_level %d", xge_hal_driver_debug_level());
2250 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2251 
2252 	return (0);
2253 }
2254 
2255 static int
2256 xgell_debug_level_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2257     cred_t *credp)
2258 {
2259 	int level;
2260 	char *end;
2261 
2262 	level = mi_strtol(value, &end, 10);
2263 	if (level < XGE_NONE || level > XGE_ERR || end == value) {
2264 		return (EINVAL);
2265 	}
2266 
2267 	xge_hal_driver_debug_level_set(level);
2268 
2269 	return (0);
2270 }
2271 
2272 static int
2273 xgell_debug_module_mask_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2274 {
2275 	char *buf;
2276 
2277 	buf = kmem_alloc(XGELL_IOCTL_BUFSIZE, KM_SLEEP);
2278 	if (buf == NULL) {
2279 		return (ENOSPC);
2280 	}
2281 	(void) mi_mpprintf(mp, "debug_module_mask 0x%08x",
2282 	    xge_hal_driver_debug_module_mask());
2283 	kmem_free(buf, XGELL_IOCTL_BUFSIZE);
2284 
2285 	return (0);
2286 }
2287 
2288 static int
2289 xgell_debug_module_mask_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
2290 			    cred_t *credp)
2291 {
2292 	u32 mask;
2293 	char *end;
2294 
2295 	if (value && *value == '0' &&
2296 	    (*(value + 1) == 'x' || *(value + 1) == 'X')) {
2297 		value += 2;
2298 	}
2299 
2300 	mask = mi_strtol(value, &end, 16);
2301 	if (end == value) {
2302 		return (EINVAL);
2303 	}
2304 
2305 	xge_hal_driver_debug_module_mask_set(mask);
2306 
2307 	return (0);
2308 }
2309 
2310 static int
2311 xgell_devconfig_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *credp)
2312 {
2313 	xgelldev_t *lldev = (xgelldev_t *)(void *)cp;
2314 	xge_hal_status_e status;
2315 	int retsize;
2316 	char *buf;
2317 
2318 	buf = kmem_alloc(XGELL_DEVCONF_BUFSIZE, KM_SLEEP);
2319 	if (buf == NULL) {
2320 		return (ENOSPC);
2321 	}
2322 	status = xge_hal_aux_device_config_read(lldev->devh,
2323 	    XGELL_DEVCONF_BUFSIZE,
2324 	    buf, &retsize);
2325 	if (status != XGE_HAL_OK) {
2326 		kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2327 		xge_debug_ll(XGE_ERR, "device_config_read(): status %d",
2328 		    status);
2329 		return (EINVAL);
2330 	}
2331 	*(buf + retsize - 1) = '\0'; /* remove last '\n' */
2332 	(void) mi_mpprintf(mp, "%s", buf);
2333 	kmem_free(buf, XGELL_DEVCONF_BUFSIZE);
2334 
2335 	return (0);
2336 }
2337 
2338 /*
2339  * xgell_device_register
2340  * @devh: pointer on HAL device
2341  * @config: pointer on this network device configuration
2342  * @ll_out: output pointer. Will be assigned to valid LL device.
2343  *
2344  * This function will allocate and register network device
2345  */
2346 int
2347 xgell_device_register(xgelldev_t *lldev, xgell_config_t *config)
2348 {
2349 	mac_register_t *macp = NULL;
2350 	xge_hal_device_t *hldev = (xge_hal_device_t *)lldev->devh;
2351 
2352 	if (nd_load(&lldev->ndp, "pciconf", xgell_pciconf_get, NULL,
2353 	    (caddr_t)lldev) == B_FALSE)
2354 		goto xgell_ndd_fail;
2355 
2356 	if (nd_load(&lldev->ndp, "about", xgell_about_get, NULL,
2357 	    (caddr_t)lldev) == B_FALSE)
2358 		goto xgell_ndd_fail;
2359 
2360 	if (nd_load(&lldev->ndp, "stats", xgell_stats_get, NULL,
2361 	    (caddr_t)lldev) == B_FALSE)
2362 		goto xgell_ndd_fail;
2363 
2364 	if (nd_load(&lldev->ndp, "bar0", xgell_bar0_get, xgell_bar0_set,
2365 	    (caddr_t)lldev) == B_FALSE)
2366 		goto xgell_ndd_fail;
2367 
2368 	if (nd_load(&lldev->ndp, "debug_level", xgell_debug_level_get,
2369 	    xgell_debug_level_set, (caddr_t)lldev) == B_FALSE)
2370 		goto xgell_ndd_fail;
2371 
2372 	if (nd_load(&lldev->ndp, "debug_module_mask",
2373 	    xgell_debug_module_mask_get, xgell_debug_module_mask_set,
2374 	    (caddr_t)lldev) == B_FALSE)
2375 		goto xgell_ndd_fail;
2376 
2377 	if (nd_load(&lldev->ndp, "devconfig", xgell_devconfig_get, NULL,
2378 	    (caddr_t)lldev) == B_FALSE)
2379 		goto xgell_ndd_fail;
2380 
2381 	bcopy(config, &lldev->config, sizeof (xgell_config_t));
2382 
2383 	mutex_init(&lldev->genlock, NULL, MUTEX_DRIVER,
2384 	    DDI_INTR_PRI(hldev->irqh));
2385 
2386 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
2387 		goto xgell_register_fail;
2388 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2389 	macp->m_driver = lldev;
2390 	macp->m_dip = lldev->dev_info;
2391 	macp->m_src_addr = hldev->macaddr[0];
2392 	macp->m_callbacks = &xgell_m_callbacks;
2393 	macp->m_min_sdu = 0;
2394 	macp->m_max_sdu = hldev->config.mtu;
2395 	macp->m_margin = VLAN_TAGSZ;
2396 	/*
2397 	 * Finally, we're ready to register ourselves with the Nemo
2398 	 * interface; if this succeeds, we're all ready to start()
2399 	 */
2400 
2401 	if (mac_register(macp, &lldev->mh) != 0)
2402 		goto xgell_register_fail;
2403 
2404 	/* Always free the macp after register */
2405 	if (macp != NULL)
2406 		mac_free(macp);
2407 
2408 	/* Calculate tx_copied_max here ??? */
2409 	lldev->tx_copied_max = hldev->config.fifo.max_frags *
2410 	    hldev->config.fifo.alignment_size *
2411 	    hldev->config.fifo.max_aligned_frags;
2412 
2413 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d registered",
2414 	    XGELL_IFNAME, lldev->instance);
2415 
2416 	return (DDI_SUCCESS);
2417 
2418 xgell_ndd_fail:
2419 	nd_free(&lldev->ndp);
2420 	xge_debug_ll(XGE_ERR, "%s", "unable to load ndd parameter");
2421 	return (DDI_FAILURE);
2422 
2423 xgell_register_fail:
2424 	if (macp != NULL)
2425 		mac_free(macp);
2426 	nd_free(&lldev->ndp);
2427 	mutex_destroy(&lldev->genlock);
2428 	xge_debug_ll(XGE_ERR, "%s", "unable to register networking device");
2429 	return (DDI_FAILURE);
2430 }
2431 
2432 /*
2433  * xgell_device_unregister
2434  * @devh: pointer on HAL device
2435  * @lldev: pointer to valid LL device.
2436  *
2437  * This function will unregister and free network device
2438  */
2439 int
2440 xgell_device_unregister(xgelldev_t *lldev)
2441 {
2442 	if (mac_unregister(lldev->mh) != 0) {
2443 		xge_debug_ll(XGE_ERR, "unable to unregister device %s%d",
2444 		    XGELL_IFNAME, lldev->instance);
2445 		return (DDI_FAILURE);
2446 	}
2447 
2448 	mutex_destroy(&lldev->genlock);
2449 
2450 	nd_free(&lldev->ndp);
2451 
2452 	xge_debug_ll(XGE_TRACE, "etherenet device %s%d unregistered",
2453 	    XGELL_IFNAME, lldev->instance);
2454 
2455 	return (DDI_SUCCESS);
2456 }
2457