xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision fe930412)
13dec9fcdSqs /*
23dec9fcdSqs  * CDDL HEADER START
33dec9fcdSqs  *
43dec9fcdSqs  * The contents of this file are subject to the terms of the
53dec9fcdSqs  * Common Development and Distribution License (the "License").
63dec9fcdSqs  * You may not use this file except in compliance with the License.
73dec9fcdSqs  *
83dec9fcdSqs  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93dec9fcdSqs  * or http://www.opensolaris.org/os/licensing.
103dec9fcdSqs  * See the License for the specific language governing permissions
113dec9fcdSqs  * and limitations under the License.
123dec9fcdSqs  *
133dec9fcdSqs  * When distributing Covered Code, include this CDDL HEADER in each
143dec9fcdSqs  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153dec9fcdSqs  * If applicable, add the following below this CDDL HEADER, with the
163dec9fcdSqs  * fields enclosed by brackets "[]" replaced with your own identifying
173dec9fcdSqs  * information: Portions Copyright [yyyy] [name of copyright owner]
183dec9fcdSqs  *
193dec9fcdSqs  * CDDL HEADER END
203dec9fcdSqs  */
213dec9fcdSqs /*
223dec9fcdSqs  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
233dec9fcdSqs  * Use is subject to license terms.
243dec9fcdSqs  */
253dec9fcdSqs 
263dec9fcdSqs #pragma ident	"%Z%%M%	%I%	%E% SMI"
273dec9fcdSqs 
283dec9fcdSqs /*
293dec9fcdSqs  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
303dec9fcdSqs  */
313dec9fcdSqs #include <hxge_impl.h>
323dec9fcdSqs #include <hxge_pfc.h>
333dec9fcdSqs 
343dec9fcdSqs /*
353dec9fcdSqs  * PSARC/2007/453 MSI-X interrupt limit override
363dec9fcdSqs  * (This PSARC case is limited to MSI-X vectors
373dec9fcdSqs  *  and SPARC platforms only).
383dec9fcdSqs  */
393dec9fcdSqs #if defined(_BIG_ENDIAN)
403dec9fcdSqs uint32_t hxge_msi_enable = 2;
413dec9fcdSqs #else
423dec9fcdSqs uint32_t hxge_msi_enable = 1;
433dec9fcdSqs #endif
443dec9fcdSqs 
453dec9fcdSqs /*
463dec9fcdSqs  * Globals: tunable parameters (/etc/system or adb)
473dec9fcdSqs  *
483dec9fcdSqs  */
493dec9fcdSqs uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
503dec9fcdSqs uint32_t hxge_rbr_spare_size = 0;
513dec9fcdSqs uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
523dec9fcdSqs uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
533dec9fcdSqs uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
543dec9fcdSqs uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
553dec9fcdSqs uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
563dec9fcdSqs uint32_t hxge_jumbo_mtu = TX_JUMBO_MTU;
573dec9fcdSqs boolean_t hxge_jumbo_enable = B_FALSE;
583dec9fcdSqs 
593dec9fcdSqs static hxge_os_mutex_t hxgedebuglock;
603dec9fcdSqs static int hxge_debug_init = 0;
613dec9fcdSqs 
623dec9fcdSqs /*
633dec9fcdSqs  * Debugging flags:
643dec9fcdSqs  *		hxge_no_tx_lb : transmit load balancing
653dec9fcdSqs  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
663dec9fcdSqs  *				   1 - From the Stack
673dec9fcdSqs  *				   2 - Destination IP Address
683dec9fcdSqs  */
693dec9fcdSqs uint32_t hxge_no_tx_lb = 0;
703dec9fcdSqs uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
713dec9fcdSqs 
723dec9fcdSqs /*
733dec9fcdSqs  * Add tunable to reduce the amount of time spent in the
743dec9fcdSqs  * ISR doing Rx Processing.
753dec9fcdSqs  */
763dec9fcdSqs uint32_t hxge_max_rx_pkts = 1024;
773dec9fcdSqs 
783dec9fcdSqs /*
793dec9fcdSqs  * Tunables to manage the receive buffer blocks.
803dec9fcdSqs  *
813dec9fcdSqs  * hxge_rx_threshold_hi: copy all buffers.
823dec9fcdSqs  * hxge_rx_bcopy_size_type: receive buffer block size type.
833dec9fcdSqs  * hxge_rx_threshold_lo: copy only up to tunable block size type.
843dec9fcdSqs  */
853dec9fcdSqs hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
863dec9fcdSqs hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
873dec9fcdSqs hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
883dec9fcdSqs 
893dec9fcdSqs rtrace_t hpi_rtracebuf;
903dec9fcdSqs 
913dec9fcdSqs /*
923dec9fcdSqs  * Function Prototypes
933dec9fcdSqs  */
943dec9fcdSqs static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
953dec9fcdSqs static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
963dec9fcdSqs static void hxge_unattach(p_hxge_t);
973dec9fcdSqs 
983dec9fcdSqs static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
993dec9fcdSqs 
1003dec9fcdSqs static hxge_status_t hxge_setup_mutexes(p_hxge_t);
1013dec9fcdSqs static void hxge_destroy_mutexes(p_hxge_t);
1023dec9fcdSqs 
1033dec9fcdSqs static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
1043dec9fcdSqs static void hxge_unmap_regs(p_hxge_t hxgep);
1053dec9fcdSqs 
1063dec9fcdSqs hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
1073dec9fcdSqs static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
1083dec9fcdSqs static void hxge_remove_intrs(p_hxge_t hxgep);
1093dec9fcdSqs static void hxge_remove_soft_intrs(p_hxge_t hxgep);
1103dec9fcdSqs static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
1113dec9fcdSqs static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
1123dec9fcdSqs static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
1133dec9fcdSqs void hxge_intrs_enable(p_hxge_t hxgep);
1143dec9fcdSqs static void hxge_intrs_disable(p_hxge_t hxgep);
1153dec9fcdSqs static void hxge_suspend(p_hxge_t);
1163dec9fcdSqs static hxge_status_t hxge_resume(p_hxge_t);
1173dec9fcdSqs hxge_status_t hxge_setup_dev(p_hxge_t);
1183dec9fcdSqs static void hxge_destroy_dev(p_hxge_t);
1193dec9fcdSqs hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
1203dec9fcdSqs static void hxge_free_mem_pool(p_hxge_t);
1213dec9fcdSqs static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
1223dec9fcdSqs static void hxge_free_rx_mem_pool(p_hxge_t);
1233dec9fcdSqs static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
1243dec9fcdSqs static void hxge_free_tx_mem_pool(p_hxge_t);
1253dec9fcdSqs static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
1263dec9fcdSqs     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
1273dec9fcdSqs     p_hxge_dma_common_t);
1283dec9fcdSqs static void hxge_dma_mem_free(p_hxge_dma_common_t);
1293dec9fcdSqs static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
1303dec9fcdSqs     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
1313dec9fcdSqs static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
1323dec9fcdSqs static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
1333dec9fcdSqs     p_hxge_dma_common_t *, size_t);
1343dec9fcdSqs static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
1353dec9fcdSqs static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
1363dec9fcdSqs     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
1373dec9fcdSqs static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
1383dec9fcdSqs static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
1393dec9fcdSqs     p_hxge_dma_common_t *, size_t);
1403dec9fcdSqs static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
1413dec9fcdSqs static int hxge_init_common_dev(p_hxge_t);
1423dec9fcdSqs static void hxge_uninit_common_dev(p_hxge_t);
1433dec9fcdSqs 
1443dec9fcdSqs /*
1453dec9fcdSqs  * The next declarations are for the GLDv3 interface.
1463dec9fcdSqs  */
1473dec9fcdSqs static int hxge_m_start(void *);
1483dec9fcdSqs static void hxge_m_stop(void *);
1493dec9fcdSqs static int hxge_m_unicst(void *, const uint8_t *);
1503dec9fcdSqs static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
1513dec9fcdSqs static int hxge_m_promisc(void *, boolean_t);
1523dec9fcdSqs static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
1533dec9fcdSqs static void hxge_m_resources(void *);
1543dec9fcdSqs static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
1553dec9fcdSqs 
1563dec9fcdSqs static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
1573dec9fcdSqs static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
1583dec9fcdSqs static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
1593dec9fcdSqs static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
1603dec9fcdSqs static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
1613dec9fcdSqs 
1623dec9fcdSqs #define	HXGE_MAGIC	0x4E584745UL
1633dec9fcdSqs #define	MAX_DUMP_SZ 256
1643dec9fcdSqs 
1653dec9fcdSqs #define	HXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
1663dec9fcdSqs 
1673dec9fcdSqs extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
1683dec9fcdSqs extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
1693dec9fcdSqs 
1703dec9fcdSqs static mac_callbacks_t hxge_m_callbacks = {
1713dec9fcdSqs 	HXGE_M_CALLBACK_FLAGS,
1723dec9fcdSqs 	hxge_m_stat,
1733dec9fcdSqs 	hxge_m_start,
1743dec9fcdSqs 	hxge_m_stop,
1753dec9fcdSqs 	hxge_m_promisc,
1763dec9fcdSqs 	hxge_m_multicst,
1773dec9fcdSqs 	hxge_m_unicst,
1783dec9fcdSqs 	hxge_m_tx,
1793dec9fcdSqs 	hxge_m_resources,
1803dec9fcdSqs 	hxge_m_ioctl,
1813dec9fcdSqs 	hxge_m_getcapab
1823dec9fcdSqs };
1833dec9fcdSqs 
1843dec9fcdSqs /* Enable debug messages as necessary. */
1853dec9fcdSqs uint64_t hxge_debug_level = 0x0;
1863dec9fcdSqs 
1873dec9fcdSqs /*
1883dec9fcdSqs  * This list contains the instance structures for the Hydra
1893dec9fcdSqs  * devices present in the system. The lock exists to guarantee
1903dec9fcdSqs  * mutually exclusive access to the list.
1913dec9fcdSqs  */
1923dec9fcdSqs void *hxge_list = NULL;
1933dec9fcdSqs void *hxge_hw_list = NULL;
1943dec9fcdSqs hxge_os_mutex_t hxge_common_lock;
1953dec9fcdSqs 
1963dec9fcdSqs extern uint64_t hpi_debug_level;
1973dec9fcdSqs 
1983dec9fcdSqs extern hxge_status_t hxge_ldgv_init();
1993dec9fcdSqs extern hxge_status_t hxge_ldgv_uninit();
2003dec9fcdSqs extern hxge_status_t hxge_intr_ldgv_init();
2013dec9fcdSqs extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
2023dec9fcdSqs     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
2033dec9fcdSqs extern void hxge_fm_fini(p_hxge_t hxgep);
2043dec9fcdSqs 
2053dec9fcdSqs /*
2063dec9fcdSqs  * Count used to maintain the number of buffers being used
2073dec9fcdSqs  * by Hydra instances and loaned up to the upper layers.
2083dec9fcdSqs  */
2093dec9fcdSqs uint32_t hxge_mblks_pending = 0;
2103dec9fcdSqs 
2113dec9fcdSqs /*
2123dec9fcdSqs  * Device register access attributes for PIO.
2133dec9fcdSqs  */
2143dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
2153dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
2163dec9fcdSqs 	DDI_STRUCTURE_LE_ACC,
2173dec9fcdSqs 	DDI_STRICTORDER_ACC,
2183dec9fcdSqs };
2193dec9fcdSqs 
2203dec9fcdSqs /*
2213dec9fcdSqs  * Device descriptor access attributes for DMA.
2223dec9fcdSqs  */
2233dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
2243dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
2253dec9fcdSqs 	DDI_STRUCTURE_LE_ACC,
2263dec9fcdSqs 	DDI_STRICTORDER_ACC
2273dec9fcdSqs };
2283dec9fcdSqs 
2293dec9fcdSqs /*
2303dec9fcdSqs  * Device buffer access attributes for DMA.
2313dec9fcdSqs  */
2323dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
2333dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
2343dec9fcdSqs 	DDI_STRUCTURE_BE_ACC,
2353dec9fcdSqs 	DDI_STRICTORDER_ACC
2363dec9fcdSqs };
2373dec9fcdSqs 
2383dec9fcdSqs ddi_dma_attr_t hxge_desc_dma_attr = {
2393dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
2403dec9fcdSqs 	0,			/* low address */
2413dec9fcdSqs 	0xffffffffffffffff,	/* high address */
2423dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
2433dec9fcdSqs 	0x100000,		/* alignment */
2443dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
2453dec9fcdSqs 	0x1,			/* minimum transfer size */
2463dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
2473dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
2483dec9fcdSqs 	1,			/* scatter/gather list length */
2493dec9fcdSqs 	(unsigned int)1,	/* granularity */
2503dec9fcdSqs 	0			/* attribute flags */
2513dec9fcdSqs };
2523dec9fcdSqs 
2533dec9fcdSqs ddi_dma_attr_t hxge_tx_dma_attr = {
2543dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
2553dec9fcdSqs 	0,			/* low address */
2563dec9fcdSqs 	0xffffffffffffffff,	/* high address */
2573dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
2583dec9fcdSqs #if defined(_BIG_ENDIAN)
2593dec9fcdSqs 	0x2000,			/* alignment */
2603dec9fcdSqs #else
2613dec9fcdSqs 	0x1000,			/* alignment */
2623dec9fcdSqs #endif
2633dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
2643dec9fcdSqs 	0x1,			/* minimum transfer size */
2653dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
2663dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
2673dec9fcdSqs 	5,			/* scatter/gather list length */
2683dec9fcdSqs 	(unsigned int)1,	/* granularity */
2693dec9fcdSqs 	0			/* attribute flags */
2703dec9fcdSqs };
2713dec9fcdSqs 
2723dec9fcdSqs ddi_dma_attr_t hxge_rx_dma_attr = {
2733dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
2743dec9fcdSqs 	0,			/* low address */
2753dec9fcdSqs 	0xffffffffffffffff,	/* high address */
2763dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
2773dec9fcdSqs 	0x10000,		/* alignment */
2783dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
2793dec9fcdSqs 	0x1,			/* minimum transfer size */
2803dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
2813dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
2823dec9fcdSqs 	1,			/* scatter/gather list length */
2833dec9fcdSqs 	(unsigned int)1,	/* granularity */
2843dec9fcdSqs 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
2853dec9fcdSqs };
2863dec9fcdSqs 
2873dec9fcdSqs ddi_dma_lim_t hxge_dma_limits = {
2883dec9fcdSqs 	(uint_t)0,		/* dlim_addr_lo */
2893dec9fcdSqs 	(uint_t)0xffffffff,	/* dlim_addr_hi */
2903dec9fcdSqs 	(uint_t)0xffffffff,	/* dlim_cntr_max */
2913dec9fcdSqs 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
2923dec9fcdSqs 	0x1,			/* dlim_minxfer */
2933dec9fcdSqs 	1024			/* dlim_speed */
2943dec9fcdSqs };
2953dec9fcdSqs 
2963dec9fcdSqs dma_method_t hxge_force_dma = DVMA;
2973dec9fcdSqs 
2983dec9fcdSqs /*
2993dec9fcdSqs  * dma chunk sizes.
3003dec9fcdSqs  *
3013dec9fcdSqs  * Try to allocate the largest possible size
3023dec9fcdSqs  * so that fewer number of dma chunks would be managed
3033dec9fcdSqs  */
3043dec9fcdSqs size_t alloc_sizes[] = {
3053dec9fcdSqs     0x1000, 0x2000, 0x4000, 0x8000,
3063dec9fcdSqs     0x10000, 0x20000, 0x40000, 0x80000,
3073dec9fcdSqs     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
3083dec9fcdSqs };
3093dec9fcdSqs 
3103dec9fcdSqs /*
3113dec9fcdSqs  * Translate "dev_t" to a pointer to the associated "dev_info_t".
3123dec9fcdSqs  */
3133dec9fcdSqs static int
3143dec9fcdSqs hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3153dec9fcdSqs {
3163dec9fcdSqs 	p_hxge_t	hxgep = NULL;
3173dec9fcdSqs 	int		instance;
3183dec9fcdSqs 	int		status = DDI_SUCCESS;
3193dec9fcdSqs 
3203dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
3213dec9fcdSqs 
3223dec9fcdSqs 	/*
3233dec9fcdSqs 	 * Get the device instance since we'll need to setup or retrieve a soft
3243dec9fcdSqs 	 * state for this instance.
3253dec9fcdSqs 	 */
3263dec9fcdSqs 	instance = ddi_get_instance(dip);
3273dec9fcdSqs 
3283dec9fcdSqs 	switch (cmd) {
3293dec9fcdSqs 	case DDI_ATTACH:
3303dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
3313dec9fcdSqs 		break;
3323dec9fcdSqs 
3333dec9fcdSqs 	case DDI_RESUME:
3343dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
3353dec9fcdSqs 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
3363dec9fcdSqs 		if (hxgep == NULL) {
3373dec9fcdSqs 			status = DDI_FAILURE;
3383dec9fcdSqs 			break;
3393dec9fcdSqs 		}
3403dec9fcdSqs 		if (hxgep->dip != dip) {
3413dec9fcdSqs 			status = DDI_FAILURE;
3423dec9fcdSqs 			break;
3433dec9fcdSqs 		}
3443dec9fcdSqs 		if (hxgep->suspended == DDI_PM_SUSPEND) {
3453dec9fcdSqs 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
3463dec9fcdSqs 		} else {
3473dec9fcdSqs 			(void) hxge_resume(hxgep);
3483dec9fcdSqs 		}
3493dec9fcdSqs 		goto hxge_attach_exit;
3503dec9fcdSqs 
3513dec9fcdSqs 	case DDI_PM_RESUME:
3523dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
3533dec9fcdSqs 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
3543dec9fcdSqs 		if (hxgep == NULL) {
3553dec9fcdSqs 			status = DDI_FAILURE;
3563dec9fcdSqs 			break;
3573dec9fcdSqs 		}
3583dec9fcdSqs 		if (hxgep->dip != dip) {
3593dec9fcdSqs 			status = DDI_FAILURE;
3603dec9fcdSqs 			break;
3613dec9fcdSqs 		}
3623dec9fcdSqs 		(void) hxge_resume(hxgep);
3633dec9fcdSqs 		goto hxge_attach_exit;
3643dec9fcdSqs 
3653dec9fcdSqs 	default:
3663dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
3673dec9fcdSqs 		status = DDI_FAILURE;
3683dec9fcdSqs 		goto hxge_attach_exit;
3693dec9fcdSqs 	}
3703dec9fcdSqs 
3713dec9fcdSqs 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
3723dec9fcdSqs 		status = DDI_FAILURE;
3733dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
3743dec9fcdSqs 		    "ddi_soft_state_zalloc failed"));
3753dec9fcdSqs 		goto hxge_attach_exit;
3763dec9fcdSqs 	}
3773dec9fcdSqs 
3783dec9fcdSqs 	hxgep = ddi_get_soft_state(hxge_list, instance);
3793dec9fcdSqs 	if (hxgep == NULL) {
3803dec9fcdSqs 		status = HXGE_ERROR;
3813dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
3823dec9fcdSqs 		    "ddi_get_soft_state failed"));
3833dec9fcdSqs 		goto hxge_attach_fail2;
3843dec9fcdSqs 	}
3853dec9fcdSqs 
3863dec9fcdSqs 	hxgep->drv_state = 0;
3873dec9fcdSqs 	hxgep->dip = dip;
3883dec9fcdSqs 	hxgep->instance = instance;
3893dec9fcdSqs 	hxgep->p_dip = ddi_get_parent(dip);
3903dec9fcdSqs 	hxgep->hxge_debug_level = hxge_debug_level;
3913dec9fcdSqs 	hpi_debug_level = hxge_debug_level;
3923dec9fcdSqs 
3933dec9fcdSqs 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
3943dec9fcdSqs 	    &hxge_rx_dma_attr);
3953dec9fcdSqs 
3963dec9fcdSqs 	status = hxge_map_regs(hxgep);
3973dec9fcdSqs 	if (status != HXGE_OK) {
3983dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
3993dec9fcdSqs 		goto hxge_attach_fail3;
4003dec9fcdSqs 	}
4013dec9fcdSqs 
4023dec9fcdSqs 	status = hxge_init_common_dev(hxgep);
4033dec9fcdSqs 	if (status != HXGE_OK) {
4043dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
4053dec9fcdSqs 		    "hxge_init_common_dev failed"));
4063dec9fcdSqs 		goto hxge_attach_fail4;
4073dec9fcdSqs 	}
4083dec9fcdSqs 
4093dec9fcdSqs 	/*
4103dec9fcdSqs 	 * Setup the Ndd parameters for this instance.
4113dec9fcdSqs 	 */
4123dec9fcdSqs 	hxge_init_param(hxgep);
4133dec9fcdSqs 
4143dec9fcdSqs 	/*
4153dec9fcdSqs 	 * Setup Register Tracing Buffer.
4163dec9fcdSqs 	 */
4173dec9fcdSqs 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
4183dec9fcdSqs 
4193dec9fcdSqs 	/* init stats ptr */
4203dec9fcdSqs 	hxge_init_statsp(hxgep);
4213dec9fcdSqs 
4223dec9fcdSqs 	status = hxge_get_config_properties(hxgep);
4233dec9fcdSqs 	if (status != HXGE_OK) {
4243dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
4253dec9fcdSqs 		goto hxge_attach_fail;
4263dec9fcdSqs 	}
4273dec9fcdSqs 
4283dec9fcdSqs 	/*
4293dec9fcdSqs 	 * Setup the Kstats for the driver.
4303dec9fcdSqs 	 */
4313dec9fcdSqs 	hxge_setup_kstats(hxgep);
4323dec9fcdSqs 	hxge_setup_param(hxgep);
4333dec9fcdSqs 
4343dec9fcdSqs 	status = hxge_setup_system_dma_pages(hxgep);
4353dec9fcdSqs 	if (status != HXGE_OK) {
4363dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
4373dec9fcdSqs 		goto hxge_attach_fail;
4383dec9fcdSqs 	}
4393dec9fcdSqs 
4403dec9fcdSqs 	hxge_hw_id_init(hxgep);
4413dec9fcdSqs 	hxge_hw_init_niu_common(hxgep);
4423dec9fcdSqs 
4433dec9fcdSqs 	status = hxge_setup_mutexes(hxgep);
4443dec9fcdSqs 	if (status != HXGE_OK) {
4453dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
4463dec9fcdSqs 		goto hxge_attach_fail;
4473dec9fcdSqs 	}
4483dec9fcdSqs 
4493dec9fcdSqs 	status = hxge_setup_dev(hxgep);
4503dec9fcdSqs 	if (status != DDI_SUCCESS) {
4513dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
4523dec9fcdSqs 		goto hxge_attach_fail;
4533dec9fcdSqs 	}
4543dec9fcdSqs 
4553dec9fcdSqs 	status = hxge_add_intrs(hxgep);
4563dec9fcdSqs 	if (status != DDI_SUCCESS) {
4573dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
4583dec9fcdSqs 		goto hxge_attach_fail;
4593dec9fcdSqs 	}
4603dec9fcdSqs 
4613dec9fcdSqs 	status = hxge_add_soft_intrs(hxgep);
4623dec9fcdSqs 	if (status != DDI_SUCCESS) {
4633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
4643dec9fcdSqs 		goto hxge_attach_fail;
4653dec9fcdSqs 	}
4663dec9fcdSqs 
4673dec9fcdSqs 	/*
4683dec9fcdSqs 	 * Enable interrupts.
4693dec9fcdSqs 	 */
4703dec9fcdSqs 	hxge_intrs_enable(hxgep);
4713dec9fcdSqs 
4723dec9fcdSqs 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
4733dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4743dec9fcdSqs 		    "unable to register to mac layer (%d)", status));
4753dec9fcdSqs 		goto hxge_attach_fail;
4763dec9fcdSqs 	}
4773dec9fcdSqs 	mac_link_update(hxgep->mach, LINK_STATE_UP);
4783dec9fcdSqs 
4793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
4803dec9fcdSqs 	    instance));
4813dec9fcdSqs 
4823dec9fcdSqs 	goto hxge_attach_exit;
4833dec9fcdSqs 
4843dec9fcdSqs hxge_attach_fail:
4853dec9fcdSqs 	hxge_unattach(hxgep);
4863dec9fcdSqs 	goto hxge_attach_fail1;
4873dec9fcdSqs 
4883dec9fcdSqs hxge_attach_fail5:
4893dec9fcdSqs 	/*
4903dec9fcdSqs 	 * Tear down the ndd parameters setup.
4913dec9fcdSqs 	 */
4923dec9fcdSqs 	hxge_destroy_param(hxgep);
4933dec9fcdSqs 
4943dec9fcdSqs 	/*
4953dec9fcdSqs 	 * Tear down the kstat setup.
4963dec9fcdSqs 	 */
4973dec9fcdSqs 	hxge_destroy_kstats(hxgep);
4983dec9fcdSqs 
4993dec9fcdSqs hxge_attach_fail4:
5003dec9fcdSqs 	if (hxgep->hxge_hw_p) {
5013dec9fcdSqs 		hxge_uninit_common_dev(hxgep);
5023dec9fcdSqs 		hxgep->hxge_hw_p = NULL;
5033dec9fcdSqs 	}
5043dec9fcdSqs hxge_attach_fail3:
5053dec9fcdSqs 	/*
5063dec9fcdSqs 	 * Unmap the register setup.
5073dec9fcdSqs 	 */
5083dec9fcdSqs 	hxge_unmap_regs(hxgep);
5093dec9fcdSqs 
5103dec9fcdSqs 	hxge_fm_fini(hxgep);
5113dec9fcdSqs 
5123dec9fcdSqs hxge_attach_fail2:
5133dec9fcdSqs 	ddi_soft_state_free(hxge_list, hxgep->instance);
5143dec9fcdSqs 
5153dec9fcdSqs hxge_attach_fail1:
5163dec9fcdSqs 	if (status != HXGE_OK)
5173dec9fcdSqs 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
5183dec9fcdSqs 	hxgep = NULL;
5193dec9fcdSqs 
5203dec9fcdSqs hxge_attach_exit:
5213dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
5223dec9fcdSqs 	    status));
5233dec9fcdSqs 
5243dec9fcdSqs 	return (status);
5253dec9fcdSqs }
5263dec9fcdSqs 
5273dec9fcdSqs static int
5283dec9fcdSqs hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
5293dec9fcdSqs {
5303dec9fcdSqs 	int		status = DDI_SUCCESS;
5313dec9fcdSqs 	int		instance;
5323dec9fcdSqs 	p_hxge_t	hxgep = NULL;
5333dec9fcdSqs 
5343dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
5353dec9fcdSqs 	instance = ddi_get_instance(dip);
5363dec9fcdSqs 	hxgep = ddi_get_soft_state(hxge_list, instance);
5373dec9fcdSqs 	if (hxgep == NULL) {
5383dec9fcdSqs 		status = DDI_FAILURE;
5393dec9fcdSqs 		goto hxge_detach_exit;
5403dec9fcdSqs 	}
5413dec9fcdSqs 
5423dec9fcdSqs 	switch (cmd) {
5433dec9fcdSqs 	case DDI_DETACH:
5443dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
5453dec9fcdSqs 		break;
5463dec9fcdSqs 
5473dec9fcdSqs 	case DDI_PM_SUSPEND:
5483dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
5493dec9fcdSqs 		hxgep->suspended = DDI_PM_SUSPEND;
5503dec9fcdSqs 		hxge_suspend(hxgep);
5513dec9fcdSqs 		break;
5523dec9fcdSqs 
5533dec9fcdSqs 	case DDI_SUSPEND:
5543dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
5553dec9fcdSqs 		if (hxgep->suspended != DDI_PM_SUSPEND) {
5563dec9fcdSqs 			hxgep->suspended = DDI_SUSPEND;
5573dec9fcdSqs 			hxge_suspend(hxgep);
5583dec9fcdSqs 		}
5593dec9fcdSqs 		break;
5603dec9fcdSqs 
5613dec9fcdSqs 	default:
5623dec9fcdSqs 		status = DDI_FAILURE;
5633dec9fcdSqs 		break;
5643dec9fcdSqs 	}
5653dec9fcdSqs 
5663dec9fcdSqs 	if (cmd != DDI_DETACH)
5673dec9fcdSqs 		goto hxge_detach_exit;
5683dec9fcdSqs 
5693dec9fcdSqs 	/*
5703dec9fcdSqs 	 * Stop the xcvr polling.
5713dec9fcdSqs 	 */
5723dec9fcdSqs 	hxgep->suspended = cmd;
5733dec9fcdSqs 
5743dec9fcdSqs 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
5753dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
5763dec9fcdSqs 		    "<== hxge_detach status = 0x%08X", status));
5773dec9fcdSqs 		return (DDI_FAILURE);
5783dec9fcdSqs 	}
5793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
5803dec9fcdSqs 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
5813dec9fcdSqs 
5823dec9fcdSqs 	hxge_unattach(hxgep);
5833dec9fcdSqs 	hxgep = NULL;
5843dec9fcdSqs 
5853dec9fcdSqs hxge_detach_exit:
5863dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
5873dec9fcdSqs 	    status));
5883dec9fcdSqs 
5893dec9fcdSqs 	return (status);
5903dec9fcdSqs }
5913dec9fcdSqs 
5923dec9fcdSqs static void
5933dec9fcdSqs hxge_unattach(p_hxge_t hxgep)
5943dec9fcdSqs {
5953dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
5963dec9fcdSqs 
5973dec9fcdSqs 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
5983dec9fcdSqs 		return;
5993dec9fcdSqs 	}
6003dec9fcdSqs 
6013dec9fcdSqs 	if (hxgep->hxge_hw_p) {
6023dec9fcdSqs 		hxge_uninit_common_dev(hxgep);
6033dec9fcdSqs 		hxgep->hxge_hw_p = NULL;
6043dec9fcdSqs 	}
6053dec9fcdSqs 
6063dec9fcdSqs 	if (hxgep->hxge_timerid) {
6073dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
6083dec9fcdSqs 		hxgep->hxge_timerid = 0;
6093dec9fcdSqs 	}
6103dec9fcdSqs 
6113dec9fcdSqs 	/* Stop any further interrupts. */
6123dec9fcdSqs 	hxge_remove_intrs(hxgep);
6133dec9fcdSqs 
6143dec9fcdSqs 	/* Remove soft interrups */
6153dec9fcdSqs 	hxge_remove_soft_intrs(hxgep);
6163dec9fcdSqs 
6173dec9fcdSqs 	/* Stop the device and free resources. */
6183dec9fcdSqs 	hxge_destroy_dev(hxgep);
6193dec9fcdSqs 
6203dec9fcdSqs 	/* Tear down the ndd parameters setup. */
6213dec9fcdSqs 	hxge_destroy_param(hxgep);
6223dec9fcdSqs 
6233dec9fcdSqs 	/* Tear down the kstat setup. */
6243dec9fcdSqs 	hxge_destroy_kstats(hxgep);
6253dec9fcdSqs 
6263dec9fcdSqs 	/*
6273dec9fcdSqs 	 * Remove the list of ndd parameters which were setup during attach.
6283dec9fcdSqs 	 */
6293dec9fcdSqs 	if (hxgep->dip) {
6303dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
6313dec9fcdSqs 		    " hxge_unattach: remove all properties"));
6323dec9fcdSqs 		(void) ddi_prop_remove_all(hxgep->dip);
6333dec9fcdSqs 	}
6343dec9fcdSqs 
635*fe930412Sqs 	/*
636*fe930412Sqs 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
637*fe930412Sqs 	 * previous state before unmapping the registers.
638*fe930412Sqs 	 */
639*fe930412Sqs 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
640*fe930412Sqs 	HXGE_DELAY(1000);
641*fe930412Sqs 
6423dec9fcdSqs 	/*
6433dec9fcdSqs 	 * Unmap the register setup.
6443dec9fcdSqs 	 */
6453dec9fcdSqs 	hxge_unmap_regs(hxgep);
6463dec9fcdSqs 
6473dec9fcdSqs 	hxge_fm_fini(hxgep);
6483dec9fcdSqs 
6493dec9fcdSqs 	/*
6503dec9fcdSqs 	 * Free the soft state data structures allocated with this instance.
6513dec9fcdSqs 	 */
6523dec9fcdSqs 	ddi_soft_state_free(hxge_list, hxgep->instance);
6533dec9fcdSqs 
654*fe930412Sqs 	/* Destroy all mutexes.  */
655*fe930412Sqs 	hxge_destroy_mutexes(hxgep);
656*fe930412Sqs 
6573dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
6583dec9fcdSqs }
6593dec9fcdSqs 
6603dec9fcdSqs static hxge_status_t
6613dec9fcdSqs hxge_map_regs(p_hxge_t hxgep)
6623dec9fcdSqs {
6633dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
6643dec9fcdSqs 	p_dev_regs_t	dev_regs;
6653dec9fcdSqs 
6663dec9fcdSqs #ifdef	HXGE_DEBUG
6673dec9fcdSqs 	char		*sysname;
6683dec9fcdSqs #endif
6693dec9fcdSqs 
6703dec9fcdSqs 	off_t		regsize;
6713dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
6723dec9fcdSqs 	int		nregs;
6733dec9fcdSqs 
6743dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
6753dec9fcdSqs 
6763dec9fcdSqs 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
6773dec9fcdSqs 		return (HXGE_ERROR);
6783dec9fcdSqs 
6793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
6803dec9fcdSqs 
6813dec9fcdSqs 	hxgep->dev_regs = NULL;
6823dec9fcdSqs 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
6833dec9fcdSqs 	dev_regs->hxge_regh = NULL;
6843dec9fcdSqs 	dev_regs->hxge_pciregh = NULL;
6853dec9fcdSqs 	dev_regs->hxge_msix_regh = NULL;
6863dec9fcdSqs 
6873dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
6883dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
6893dec9fcdSqs 	    "hxge_map_regs: pci config size 0x%x", regsize));
6903dec9fcdSqs 
6913dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
6923dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
6933dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
6943dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
6953dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
6963dec9fcdSqs 		    "ddi_map_regs, hxge bus config regs failed"));
6973dec9fcdSqs 		goto hxge_map_regs_fail0;
6983dec9fcdSqs 	}
6993dec9fcdSqs 
7003dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
7013dec9fcdSqs 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
7023dec9fcdSqs 	    dev_regs->hxge_pciregp,
7033dec9fcdSqs 	    dev_regs->hxge_pciregh));
7043dec9fcdSqs 
7053dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
7063dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
7073dec9fcdSqs 	    "hxge_map_regs: pio size 0x%x", regsize));
7083dec9fcdSqs 
7093dec9fcdSqs 	/* set up the device mapped register */
7103dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
7113dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
7123dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
7133dec9fcdSqs 
7143dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
7153dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
7163dec9fcdSqs 		    "ddi_map_regs for Hydra global reg failed"));
7173dec9fcdSqs 		goto hxge_map_regs_fail1;
7183dec9fcdSqs 	}
7193dec9fcdSqs 
7203dec9fcdSqs 	/* set up the msi/msi-x mapped register */
7213dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
7223dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
7233dec9fcdSqs 	    "hxge_map_regs: msix size 0x%x", regsize));
7243dec9fcdSqs 
7253dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
7263dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
7273dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
7283dec9fcdSqs 
7293dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
7303dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
7313dec9fcdSqs 		    "ddi_map_regs for msi reg failed"));
7323dec9fcdSqs 		goto hxge_map_regs_fail2;
7333dec9fcdSqs 	}
7343dec9fcdSqs 
7353dec9fcdSqs 	hxgep->dev_regs = dev_regs;
7363dec9fcdSqs 
7373dec9fcdSqs 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
7383dec9fcdSqs 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
7393dec9fcdSqs 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
7403dec9fcdSqs 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
7413dec9fcdSqs 
7423dec9fcdSqs 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
7433dec9fcdSqs 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
7443dec9fcdSqs 
7453dec9fcdSqs 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
7463dec9fcdSqs 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
7473dec9fcdSqs 
7483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
7493dec9fcdSqs 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
7503dec9fcdSqs 
7513dec9fcdSqs 	goto hxge_map_regs_exit;
7523dec9fcdSqs 
7533dec9fcdSqs hxge_map_regs_fail3:
7543dec9fcdSqs 	if (dev_regs->hxge_msix_regh) {
7553dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
7563dec9fcdSqs 	}
7573dec9fcdSqs 
7583dec9fcdSqs hxge_map_regs_fail2:
7593dec9fcdSqs 	if (dev_regs->hxge_regh) {
7603dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_regh);
7613dec9fcdSqs 	}
7623dec9fcdSqs 
7633dec9fcdSqs hxge_map_regs_fail1:
7643dec9fcdSqs 	if (dev_regs->hxge_pciregh) {
7653dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
7663dec9fcdSqs 	}
7673dec9fcdSqs 
7683dec9fcdSqs hxge_map_regs_fail0:
7693dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
7703dec9fcdSqs 	kmem_free(dev_regs, sizeof (dev_regs_t));
7713dec9fcdSqs 
7723dec9fcdSqs hxge_map_regs_exit:
7733dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
7743dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
7753dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
7763dec9fcdSqs 	return (status);
7773dec9fcdSqs }
7783dec9fcdSqs 
7793dec9fcdSqs static void
7803dec9fcdSqs hxge_unmap_regs(p_hxge_t hxgep)
7813dec9fcdSqs {
7823dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
7833dec9fcdSqs 	if (hxgep->dev_regs) {
7843dec9fcdSqs 		if (hxgep->dev_regs->hxge_pciregh) {
7853dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
7863dec9fcdSqs 			    "==> hxge_unmap_regs: bus"));
7873dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
7883dec9fcdSqs 			hxgep->dev_regs->hxge_pciregh = NULL;
7893dec9fcdSqs 		}
7903dec9fcdSqs 
7913dec9fcdSqs 		if (hxgep->dev_regs->hxge_regh) {
7923dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
7933dec9fcdSqs 			    "==> hxge_unmap_regs: device registers"));
7943dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
7953dec9fcdSqs 			hxgep->dev_regs->hxge_regh = NULL;
7963dec9fcdSqs 		}
7973dec9fcdSqs 
7983dec9fcdSqs 		if (hxgep->dev_regs->hxge_msix_regh) {
7993dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
8003dec9fcdSqs 			    "==> hxge_unmap_regs: device interrupts"));
8013dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
8023dec9fcdSqs 			hxgep->dev_regs->hxge_msix_regh = NULL;
8033dec9fcdSqs 		}
8043dec9fcdSqs 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
8053dec9fcdSqs 		hxgep->dev_regs = NULL;
8063dec9fcdSqs 	}
8073dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
8083dec9fcdSqs }
8093dec9fcdSqs 
8103dec9fcdSqs static hxge_status_t
8113dec9fcdSqs hxge_setup_mutexes(p_hxge_t hxgep)
8123dec9fcdSqs {
8133dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
8143dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
8153dec9fcdSqs 
8163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
8173dec9fcdSqs 
8183dec9fcdSqs 	/*
8193dec9fcdSqs 	 * Get the interrupt cookie so the mutexes can be Initialised.
8203dec9fcdSqs 	 */
8213dec9fcdSqs 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
8223dec9fcdSqs 	    &hxgep->interrupt_cookie);
8233dec9fcdSqs 
8243dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
8253dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
8263dec9fcdSqs 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
8273dec9fcdSqs 		goto hxge_setup_mutexes_exit;
8283dec9fcdSqs 	}
8293dec9fcdSqs 
8303dec9fcdSqs 	/*
8313dec9fcdSqs 	 * Initialize mutex's for this device.
8323dec9fcdSqs 	 */
8333dec9fcdSqs 	MUTEX_INIT(hxgep->genlock, NULL,
8343dec9fcdSqs 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
8353dec9fcdSqs 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
8363dec9fcdSqs 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
8373dec9fcdSqs 	RW_INIT(&hxgep->filter_lock, NULL,
8383dec9fcdSqs 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
839*fe930412Sqs 	MUTEX_INIT(&hxgep->pio_lock, NULL,
840*fe930412Sqs 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
8413dec9fcdSqs 
8423dec9fcdSqs hxge_setup_mutexes_exit:
8433dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
8443dec9fcdSqs 	    "<== hxge_setup_mutexes status = %x", status));
8453dec9fcdSqs 
8463dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
8473dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
8483dec9fcdSqs 
8493dec9fcdSqs 	return (status);
8503dec9fcdSqs }
8513dec9fcdSqs 
8523dec9fcdSqs static void
8533dec9fcdSqs hxge_destroy_mutexes(p_hxge_t hxgep)
8543dec9fcdSqs {
8553dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
8563dec9fcdSqs 	RW_DESTROY(&hxgep->filter_lock);
8573dec9fcdSqs 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
8583dec9fcdSqs 	MUTEX_DESTROY(hxgep->genlock);
859*fe930412Sqs 	MUTEX_DESTROY(&hxgep->pio_lock);
8603dec9fcdSqs 
8613dec9fcdSqs 	if (hxge_debug_init == 1) {
8623dec9fcdSqs 		MUTEX_DESTROY(&hxgedebuglock);
8633dec9fcdSqs 		hxge_debug_init = 0;
8643dec9fcdSqs 	}
8653dec9fcdSqs 
8663dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
8673dec9fcdSqs }
8683dec9fcdSqs 
8693dec9fcdSqs hxge_status_t
8703dec9fcdSqs hxge_init(p_hxge_t hxgep)
8713dec9fcdSqs {
8723dec9fcdSqs 	hxge_status_t status = HXGE_OK;
8733dec9fcdSqs 
8743dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
8753dec9fcdSqs 
8763dec9fcdSqs 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
8773dec9fcdSqs 		return (status);
8783dec9fcdSqs 	}
8793dec9fcdSqs 
8803dec9fcdSqs 	/*
8813dec9fcdSqs 	 * Allocate system memory for the receive/transmit buffer blocks and
8823dec9fcdSqs 	 * receive/transmit descriptor rings.
8833dec9fcdSqs 	 */
8843dec9fcdSqs 	status = hxge_alloc_mem_pool(hxgep);
8853dec9fcdSqs 	if (status != HXGE_OK) {
8863dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
8873dec9fcdSqs 		goto hxge_init_fail1;
8883dec9fcdSqs 	}
8893dec9fcdSqs 
8903dec9fcdSqs 	/*
8913dec9fcdSqs 	 * Initialize and enable TXDMA channels.
8923dec9fcdSqs 	 */
8933dec9fcdSqs 	status = hxge_init_txdma_channels(hxgep);
8943dec9fcdSqs 	if (status != HXGE_OK) {
8953dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
8963dec9fcdSqs 		goto hxge_init_fail3;
8973dec9fcdSqs 	}
8983dec9fcdSqs 
8993dec9fcdSqs 	/*
9003dec9fcdSqs 	 * Initialize and enable RXDMA channels.
9013dec9fcdSqs 	 */
9023dec9fcdSqs 	status = hxge_init_rxdma_channels(hxgep);
9033dec9fcdSqs 	if (status != HXGE_OK) {
9043dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
9053dec9fcdSqs 		goto hxge_init_fail4;
9063dec9fcdSqs 	}
9073dec9fcdSqs 
9083dec9fcdSqs 	/*
9093dec9fcdSqs 	 * Initialize TCAM
9103dec9fcdSqs 	 */
9113dec9fcdSqs 	status = hxge_classify_init(hxgep);
9123dec9fcdSqs 	if (status != HXGE_OK) {
9133dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
9143dec9fcdSqs 		goto hxge_init_fail5;
9153dec9fcdSqs 	}
9163dec9fcdSqs 
9173dec9fcdSqs 	/*
9183dec9fcdSqs 	 * Initialize the VMAC block.
9193dec9fcdSqs 	 */
9203dec9fcdSqs 	status = hxge_vmac_init(hxgep);
9213dec9fcdSqs 	if (status != HXGE_OK) {
9223dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
9233dec9fcdSqs 		goto hxge_init_fail5;
9243dec9fcdSqs 	}
9253dec9fcdSqs 
9263dec9fcdSqs 	/* Bringup - this may be unnecessary when PXE and FCODE available */
9273dec9fcdSqs 	status = hxge_pfc_set_default_mac_addr(hxgep);
9283dec9fcdSqs 	if (status != HXGE_OK) {
9293dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
9303dec9fcdSqs 		    "Default Address Failure\n"));
9313dec9fcdSqs 		goto hxge_init_fail5;
9323dec9fcdSqs 	}
9333dec9fcdSqs 
9343dec9fcdSqs 	hxge_intrs_enable(hxgep);
9353dec9fcdSqs 
9363dec9fcdSqs 	/*
9373dec9fcdSqs 	 * Enable hardware interrupts.
9383dec9fcdSqs 	 */
9393dec9fcdSqs 	hxge_intr_hw_enable(hxgep);
9403dec9fcdSqs 	hxgep->drv_state |= STATE_HW_INITIALIZED;
9413dec9fcdSqs 
9423dec9fcdSqs 	goto hxge_init_exit;
9433dec9fcdSqs 
9443dec9fcdSqs hxge_init_fail5:
9453dec9fcdSqs 	hxge_uninit_rxdma_channels(hxgep);
9463dec9fcdSqs hxge_init_fail4:
9473dec9fcdSqs 	hxge_uninit_txdma_channels(hxgep);
9483dec9fcdSqs hxge_init_fail3:
9493dec9fcdSqs 	hxge_free_mem_pool(hxgep);
9503dec9fcdSqs hxge_init_fail1:
9513dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
9523dec9fcdSqs 	    "<== hxge_init status (failed) = 0x%08x", status));
9533dec9fcdSqs 	return (status);
9543dec9fcdSqs 
9553dec9fcdSqs hxge_init_exit:
9563dec9fcdSqs 
9573dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
9583dec9fcdSqs 	    status));
9593dec9fcdSqs 
9603dec9fcdSqs 	return (status);
9613dec9fcdSqs }
9623dec9fcdSqs 
9633dec9fcdSqs timeout_id_t
9643dec9fcdSqs hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
9653dec9fcdSqs {
9663dec9fcdSqs 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
9673dec9fcdSqs 		return (timeout(func, (caddr_t)hxgep,
9683dec9fcdSqs 		    drv_usectohz(1000 * msec)));
9693dec9fcdSqs 	}
9703dec9fcdSqs 	return (NULL);
9713dec9fcdSqs }
9723dec9fcdSqs 
9733dec9fcdSqs /*ARGSUSED*/
9743dec9fcdSqs void
9753dec9fcdSqs hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
9763dec9fcdSqs {
9773dec9fcdSqs 	if (timerid) {
9783dec9fcdSqs 		(void) untimeout(timerid);
9793dec9fcdSqs 	}
9803dec9fcdSqs }
9813dec9fcdSqs 
9823dec9fcdSqs void
9833dec9fcdSqs hxge_uninit(p_hxge_t hxgep)
9843dec9fcdSqs {
9853dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
9863dec9fcdSqs 
9873dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
9883dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
9893dec9fcdSqs 		    "==> hxge_uninit: not initialized"));
9903dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
9913dec9fcdSqs 		return;
9923dec9fcdSqs 	}
9933dec9fcdSqs 
9943dec9fcdSqs 	/* Stop timer */
9953dec9fcdSqs 	if (hxgep->hxge_timerid) {
9963dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
9973dec9fcdSqs 		hxgep->hxge_timerid = 0;
9983dec9fcdSqs 	}
9993dec9fcdSqs 
10003dec9fcdSqs 	(void) hxge_intr_hw_disable(hxgep);
10013dec9fcdSqs 
10023dec9fcdSqs 	/* Reset the receive VMAC side.  */
10033dec9fcdSqs 	(void) hxge_rx_vmac_disable(hxgep);
10043dec9fcdSqs 
10053dec9fcdSqs 	/* Free classification resources */
10063dec9fcdSqs 	(void) hxge_classify_uninit(hxgep);
10073dec9fcdSqs 
10083dec9fcdSqs 	/* Reset the transmit/receive DMA side.  */
10093dec9fcdSqs 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
10103dec9fcdSqs 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
10113dec9fcdSqs 
10123dec9fcdSqs 	hxge_uninit_txdma_channels(hxgep);
10133dec9fcdSqs 	hxge_uninit_rxdma_channels(hxgep);
10143dec9fcdSqs 
10153dec9fcdSqs 	/* Reset the transmit VMAC side.  */
10163dec9fcdSqs 	(void) hxge_tx_vmac_disable(hxgep);
10173dec9fcdSqs 
10183dec9fcdSqs 	hxge_free_mem_pool(hxgep);
10193dec9fcdSqs 
10203dec9fcdSqs 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
10213dec9fcdSqs 
10223dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
10233dec9fcdSqs }
10243dec9fcdSqs 
10253dec9fcdSqs void
10263dec9fcdSqs hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
10273dec9fcdSqs {
10283dec9fcdSqs #if defined(__i386)
10293dec9fcdSqs 	size_t		reg;
10303dec9fcdSqs #else
10313dec9fcdSqs 	uint64_t	reg;
10323dec9fcdSqs #endif
10333dec9fcdSqs 	uint64_t	regdata;
10343dec9fcdSqs 	int		i, retry;
10353dec9fcdSqs 
10363dec9fcdSqs 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
10373dec9fcdSqs 	regdata = 0;
10383dec9fcdSqs 	retry = 1;
10393dec9fcdSqs 
10403dec9fcdSqs 	for (i = 0; i < retry; i++) {
10413dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
10423dec9fcdSqs 	}
10433dec9fcdSqs 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
10443dec9fcdSqs }
10453dec9fcdSqs 
10463dec9fcdSqs void
10473dec9fcdSqs hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
10483dec9fcdSqs {
10493dec9fcdSqs #if defined(__i386)
10503dec9fcdSqs 	size_t		reg;
10513dec9fcdSqs #else
10523dec9fcdSqs 	uint64_t	reg;
10533dec9fcdSqs #endif
10543dec9fcdSqs 	uint64_t	buf[2];
10553dec9fcdSqs 
10563dec9fcdSqs 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
10573dec9fcdSqs #if defined(__i386)
10583dec9fcdSqs 	reg = (size_t)buf[0];
10593dec9fcdSqs #else
10603dec9fcdSqs 	reg = buf[0];
10613dec9fcdSqs #endif
10623dec9fcdSqs 
10633dec9fcdSqs 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
10643dec9fcdSqs }
10653dec9fcdSqs 
10663dec9fcdSqs /*ARGSUSED*/
10673dec9fcdSqs /*VARARGS*/
10683dec9fcdSqs void
10693dec9fcdSqs hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
10703dec9fcdSqs {
10713dec9fcdSqs 	char		msg_buffer[1048];
10723dec9fcdSqs 	char		prefix_buffer[32];
10733dec9fcdSqs 	int		instance;
10743dec9fcdSqs 	uint64_t	debug_level;
10753dec9fcdSqs 	int		cmn_level = CE_CONT;
10763dec9fcdSqs 	va_list		ap;
10773dec9fcdSqs 
10783dec9fcdSqs 	debug_level = (hxgep == NULL) ? hxge_debug_level :
10793dec9fcdSqs 	    hxgep->hxge_debug_level;
10803dec9fcdSqs 
10813dec9fcdSqs 	if ((level & debug_level) || (level == HXGE_NOTE) ||
10823dec9fcdSqs 	    (level == HXGE_ERR_CTL)) {
10833dec9fcdSqs 		/* do the msg processing */
10843dec9fcdSqs 		if (hxge_debug_init == 0) {
10853dec9fcdSqs 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
10863dec9fcdSqs 			hxge_debug_init = 1;
10873dec9fcdSqs 		}
10883dec9fcdSqs 
10893dec9fcdSqs 		MUTEX_ENTER(&hxgedebuglock);
10903dec9fcdSqs 
10913dec9fcdSqs 		if ((level & HXGE_NOTE)) {
10923dec9fcdSqs 			cmn_level = CE_NOTE;
10933dec9fcdSqs 		}
10943dec9fcdSqs 
10953dec9fcdSqs 		if (level & HXGE_ERR_CTL) {
10963dec9fcdSqs 			cmn_level = CE_WARN;
10973dec9fcdSqs 		}
10983dec9fcdSqs 
10993dec9fcdSqs 		va_start(ap, fmt);
11003dec9fcdSqs 		(void) vsprintf(msg_buffer, fmt, ap);
11013dec9fcdSqs 		va_end(ap);
11023dec9fcdSqs 
11033dec9fcdSqs 		if (hxgep == NULL) {
11043dec9fcdSqs 			instance = -1;
11053dec9fcdSqs 			(void) sprintf(prefix_buffer, "%s :", "hxge");
11063dec9fcdSqs 		} else {
11073dec9fcdSqs 			instance = hxgep->instance;
11083dec9fcdSqs 			(void) sprintf(prefix_buffer,
11093dec9fcdSqs 			    "%s%d :", "hxge", instance);
11103dec9fcdSqs 		}
11113dec9fcdSqs 
11123dec9fcdSqs 		MUTEX_EXIT(&hxgedebuglock);
11133dec9fcdSqs 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
11143dec9fcdSqs 	}
11153dec9fcdSqs }
11163dec9fcdSqs 
11173dec9fcdSqs char *
11183dec9fcdSqs hxge_dump_packet(char *addr, int size)
11193dec9fcdSqs {
11203dec9fcdSqs 	uchar_t		*ap = (uchar_t *)addr;
11213dec9fcdSqs 	int		i;
11223dec9fcdSqs 	static char	etherbuf[1024];
11233dec9fcdSqs 	char		*cp = etherbuf;
11243dec9fcdSqs 	char		digits[] = "0123456789abcdef";
11253dec9fcdSqs 
11263dec9fcdSqs 	if (!size)
11273dec9fcdSqs 		size = 60;
11283dec9fcdSqs 
11293dec9fcdSqs 	if (size > MAX_DUMP_SZ) {
11303dec9fcdSqs 		/* Dump the leading bytes */
11313dec9fcdSqs 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
11323dec9fcdSqs 			if (*ap > 0x0f)
11333dec9fcdSqs 				*cp++ = digits[*ap >> 4];
11343dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
11353dec9fcdSqs 			*cp++ = ':';
11363dec9fcdSqs 		}
11373dec9fcdSqs 		for (i = 0; i < 20; i++)
11383dec9fcdSqs 			*cp++ = '.';
11393dec9fcdSqs 		/* Dump the last MAX_DUMP_SZ/2 bytes */
11403dec9fcdSqs 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
11413dec9fcdSqs 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
11423dec9fcdSqs 			if (*ap > 0x0f)
11433dec9fcdSqs 				*cp++ = digits[*ap >> 4];
11443dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
11453dec9fcdSqs 			*cp++ = ':';
11463dec9fcdSqs 		}
11473dec9fcdSqs 	} else {
11483dec9fcdSqs 		for (i = 0; i < size; i++) {
11493dec9fcdSqs 			if (*ap > 0x0f)
11503dec9fcdSqs 				*cp++ = digits[*ap >> 4];
11513dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
11523dec9fcdSqs 			*cp++ = ':';
11533dec9fcdSqs 		}
11543dec9fcdSqs 	}
11553dec9fcdSqs 	*--cp = 0;
11563dec9fcdSqs 	return (etherbuf);
11573dec9fcdSqs }
11583dec9fcdSqs 
11593dec9fcdSqs static void
11603dec9fcdSqs hxge_suspend(p_hxge_t hxgep)
11613dec9fcdSqs {
11623dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
11633dec9fcdSqs 
11643dec9fcdSqs 	hxge_intrs_disable(hxgep);
11653dec9fcdSqs 	hxge_destroy_dev(hxgep);
11663dec9fcdSqs 
11673dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
11683dec9fcdSqs }
11693dec9fcdSqs 
11703dec9fcdSqs static hxge_status_t
11713dec9fcdSqs hxge_resume(p_hxge_t hxgep)
11723dec9fcdSqs {
11733dec9fcdSqs 	hxge_status_t status = HXGE_OK;
11743dec9fcdSqs 
11753dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
11763dec9fcdSqs 	hxgep->suspended = DDI_RESUME;
11773dec9fcdSqs 
11783dec9fcdSqs 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
11793dec9fcdSqs 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
11803dec9fcdSqs 
11813dec9fcdSqs 	(void) hxge_rx_vmac_enable(hxgep);
11823dec9fcdSqs 	(void) hxge_tx_vmac_enable(hxgep);
11833dec9fcdSqs 
11843dec9fcdSqs 	hxge_intrs_enable(hxgep);
11853dec9fcdSqs 
11863dec9fcdSqs 	hxgep->suspended = 0;
11873dec9fcdSqs 
11883dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
11893dec9fcdSqs 	    "<== hxge_resume status = 0x%x", status));
11903dec9fcdSqs 
11913dec9fcdSqs 	return (status);
11923dec9fcdSqs }
11933dec9fcdSqs 
11943dec9fcdSqs hxge_status_t
11953dec9fcdSqs hxge_setup_dev(p_hxge_t hxgep)
11963dec9fcdSqs {
11973dec9fcdSqs 	hxge_status_t status = HXGE_OK;
11983dec9fcdSqs 
11993dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
12003dec9fcdSqs 
12013dec9fcdSqs 	status = hxge_link_init(hxgep);
12023dec9fcdSqs 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
12033dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
12043dec9fcdSqs 		    "Bad register acc handle"));
12053dec9fcdSqs 		status = HXGE_ERROR;
12063dec9fcdSqs 	}
12073dec9fcdSqs 
12083dec9fcdSqs 	if (status != HXGE_OK) {
12093dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
12103dec9fcdSqs 		    " hxge_setup_dev status (link init 0x%08x)", status));
12113dec9fcdSqs 		goto hxge_setup_dev_exit;
12123dec9fcdSqs 	}
12133dec9fcdSqs 
12143dec9fcdSqs hxge_setup_dev_exit:
12153dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
12163dec9fcdSqs 	    "<== hxge_setup_dev status = 0x%08x", status));
12173dec9fcdSqs 
12183dec9fcdSqs 	return (status);
12193dec9fcdSqs }
12203dec9fcdSqs 
12213dec9fcdSqs static void
12223dec9fcdSqs hxge_destroy_dev(p_hxge_t hxgep)
12233dec9fcdSqs {
12243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
12253dec9fcdSqs 
12263dec9fcdSqs 	(void) hxge_hw_stop(hxgep);
12273dec9fcdSqs 
12283dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
12293dec9fcdSqs }
12303dec9fcdSqs 
12313dec9fcdSqs static hxge_status_t
12323dec9fcdSqs hxge_setup_system_dma_pages(p_hxge_t hxgep)
12333dec9fcdSqs {
12343dec9fcdSqs 	int			ddi_status = DDI_SUCCESS;
12353dec9fcdSqs 	uint_t			count;
12363dec9fcdSqs 	ddi_dma_cookie_t	cookie;
12373dec9fcdSqs 	uint_t			iommu_pagesize;
12383dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
12393dec9fcdSqs 
12403dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
12413dec9fcdSqs 
12423dec9fcdSqs 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
12433dec9fcdSqs 	iommu_pagesize = dvma_pagesize(hxgep->dip);
12443dec9fcdSqs 
12453dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
12463dec9fcdSqs 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
12473dec9fcdSqs 	    " default_block_size %d iommu_pagesize %d",
12483dec9fcdSqs 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
12493dec9fcdSqs 	    hxgep->rx_default_block_size, iommu_pagesize));
12503dec9fcdSqs 
12513dec9fcdSqs 	if (iommu_pagesize != 0) {
12523dec9fcdSqs 		if (hxgep->sys_page_sz == iommu_pagesize) {
12533dec9fcdSqs 			/* Hydra support up to 8K pages */
12543dec9fcdSqs 			if (iommu_pagesize > 0x2000)
12553dec9fcdSqs 				hxgep->sys_page_sz = 0x2000;
12563dec9fcdSqs 		} else {
12573dec9fcdSqs 			if (hxgep->sys_page_sz > iommu_pagesize)
12583dec9fcdSqs 				hxgep->sys_page_sz = iommu_pagesize;
12593dec9fcdSqs 		}
12603dec9fcdSqs 	}
12613dec9fcdSqs 
12623dec9fcdSqs 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
12633dec9fcdSqs 
12643dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
12653dec9fcdSqs 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
12663dec9fcdSqs 	    "default_block_size %d page mask %d",
12673dec9fcdSqs 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
12683dec9fcdSqs 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
12693dec9fcdSqs 
12703dec9fcdSqs 	switch (hxgep->sys_page_sz) {
12713dec9fcdSqs 	default:
12723dec9fcdSqs 		hxgep->sys_page_sz = 0x1000;
12733dec9fcdSqs 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
12743dec9fcdSqs 		hxgep->rx_default_block_size = 0x1000;
12753dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
12763dec9fcdSqs 		break;
12773dec9fcdSqs 	case 0x1000:
12783dec9fcdSqs 		hxgep->rx_default_block_size = 0x1000;
12793dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
12803dec9fcdSqs 		break;
12813dec9fcdSqs 	case 0x2000:
12823dec9fcdSqs 		hxgep->rx_default_block_size = 0x2000;
12833dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
12843dec9fcdSqs 		break;
12853dec9fcdSqs 	}
12863dec9fcdSqs 
12873dec9fcdSqs 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
12883dec9fcdSqs 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
12893dec9fcdSqs 	hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz;
12903dec9fcdSqs 
12913dec9fcdSqs 	/*
12923dec9fcdSqs 	 * Get the system DMA burst size.
12933dec9fcdSqs 	 */
12943dec9fcdSqs 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
12953dec9fcdSqs 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
12963dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
12973dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
12983dec9fcdSqs 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
12993dec9fcdSqs 		goto hxge_get_soft_properties_exit;
13003dec9fcdSqs 	}
13013dec9fcdSqs 
13023dec9fcdSqs 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
13033dec9fcdSqs 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
13043dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
13053dec9fcdSqs 	    &cookie, &count);
13063dec9fcdSqs 	if (ddi_status != DDI_DMA_MAPPED) {
13073dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
13083dec9fcdSqs 		    "Binding spare handle to find system burstsize failed."));
13093dec9fcdSqs 		ddi_status = DDI_FAILURE;
13103dec9fcdSqs 		goto hxge_get_soft_properties_fail1;
13113dec9fcdSqs 	}
13123dec9fcdSqs 
13133dec9fcdSqs 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
13143dec9fcdSqs 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
13153dec9fcdSqs 
13163dec9fcdSqs hxge_get_soft_properties_fail1:
13173dec9fcdSqs 	ddi_dma_free_handle(&hxgep->dmasparehandle);
13183dec9fcdSqs 
13193dec9fcdSqs hxge_get_soft_properties_exit:
13203dec9fcdSqs 
13213dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
13223dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
13233dec9fcdSqs 
13243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
13253dec9fcdSqs 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
13263dec9fcdSqs 
13273dec9fcdSqs 	return (status);
13283dec9fcdSqs }
13293dec9fcdSqs 
13303dec9fcdSqs hxge_status_t
13313dec9fcdSqs hxge_alloc_mem_pool(p_hxge_t hxgep)
13323dec9fcdSqs {
13333dec9fcdSqs 	hxge_status_t status = HXGE_OK;
13343dec9fcdSqs 
13353dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
13363dec9fcdSqs 
13373dec9fcdSqs 	status = hxge_alloc_rx_mem_pool(hxgep);
13383dec9fcdSqs 	if (status != HXGE_OK) {
13393dec9fcdSqs 		return (HXGE_ERROR);
13403dec9fcdSqs 	}
13413dec9fcdSqs 
13423dec9fcdSqs 	status = hxge_alloc_tx_mem_pool(hxgep);
13433dec9fcdSqs 	if (status != HXGE_OK) {
13443dec9fcdSqs 		hxge_free_rx_mem_pool(hxgep);
13453dec9fcdSqs 		return (HXGE_ERROR);
13463dec9fcdSqs 	}
13473dec9fcdSqs 
13483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
13493dec9fcdSqs 	return (HXGE_OK);
13503dec9fcdSqs }
13513dec9fcdSqs 
13523dec9fcdSqs static void
13533dec9fcdSqs hxge_free_mem_pool(p_hxge_t hxgep)
13543dec9fcdSqs {
13553dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
13563dec9fcdSqs 
13573dec9fcdSqs 	hxge_free_rx_mem_pool(hxgep);
13583dec9fcdSqs 	hxge_free_tx_mem_pool(hxgep);
13593dec9fcdSqs 
13603dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
13613dec9fcdSqs }
13623dec9fcdSqs 
13633dec9fcdSqs static hxge_status_t
13643dec9fcdSqs hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
13653dec9fcdSqs {
13663dec9fcdSqs 	int			i, j;
13673dec9fcdSqs 	uint32_t		ndmas, st_rdc;
13683dec9fcdSqs 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
13693dec9fcdSqs 	p_hxge_hw_pt_cfg_t	p_cfgp;
13703dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
13713dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
13723dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
13733dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
13743dec9fcdSqs 	size_t			rx_buf_alloc_size;
13753dec9fcdSqs 	size_t			rx_cntl_alloc_size;
13763dec9fcdSqs 	uint32_t		*num_chunks;	/* per dma */
13773dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
13783dec9fcdSqs 
13793dec9fcdSqs 	uint32_t		hxge_port_rbr_size;
13803dec9fcdSqs 	uint32_t		hxge_port_rbr_spare_size;
13813dec9fcdSqs 	uint32_t		hxge_port_rcr_size;
13823dec9fcdSqs 
13833dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
13843dec9fcdSqs 
13853dec9fcdSqs 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
13863dec9fcdSqs 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
13873dec9fcdSqs 	st_rdc = p_cfgp->start_rdc;
13883dec9fcdSqs 	ndmas = p_cfgp->max_rdcs;
13893dec9fcdSqs 
13903dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
13913dec9fcdSqs 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
13923dec9fcdSqs 
13933dec9fcdSqs 	/*
13943dec9fcdSqs 	 * Allocate memory for each receive DMA channel.
13953dec9fcdSqs 	 */
13963dec9fcdSqs 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
13973dec9fcdSqs 	    KM_SLEEP);
13983dec9fcdSqs 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
13993dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
14003dec9fcdSqs 
14013dec9fcdSqs 	dma_cntl_poolp = (p_hxge_dma_pool_t)
14023dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
14033dec9fcdSqs 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
14043dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
14053dec9fcdSqs 
14063dec9fcdSqs 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
14073dec9fcdSqs 	    KM_SLEEP);
14083dec9fcdSqs 
14093dec9fcdSqs 	/*
14103dec9fcdSqs 	 * Assume that each DMA channel will be configured with default block
14113dec9fcdSqs 	 * size. rbr block counts are mod of batch count (16).
14123dec9fcdSqs 	 */
14133dec9fcdSqs 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
14143dec9fcdSqs 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
14153dec9fcdSqs 
14163dec9fcdSqs 	if (!hxge_port_rbr_size) {
14173dec9fcdSqs 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
14183dec9fcdSqs 	}
14193dec9fcdSqs 
14203dec9fcdSqs 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
14213dec9fcdSqs 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
14223dec9fcdSqs 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
14233dec9fcdSqs 	}
14243dec9fcdSqs 
14253dec9fcdSqs 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
14263dec9fcdSqs 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
14273dec9fcdSqs 
14283dec9fcdSqs 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
14293dec9fcdSqs 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
14303dec9fcdSqs 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
14313dec9fcdSqs 	}
14323dec9fcdSqs 
14333dec9fcdSqs 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
14343dec9fcdSqs 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
14353dec9fcdSqs 
14363dec9fcdSqs 	/*
14373dec9fcdSqs 	 * Addresses of receive block ring, receive completion ring and the
14383dec9fcdSqs 	 * mailbox must be all cache-aligned (64 bytes).
14393dec9fcdSqs 	 */
14403dec9fcdSqs 	rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
14413dec9fcdSqs 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
14423dec9fcdSqs 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size);
14433dec9fcdSqs 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
14443dec9fcdSqs 
14453dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
14463dec9fcdSqs 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
14473dec9fcdSqs 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
14483dec9fcdSqs 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
14493dec9fcdSqs 	    hxge_port_rcr_size, rx_cntl_alloc_size));
14503dec9fcdSqs 
14513dec9fcdSqs 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
14523dec9fcdSqs 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
14533dec9fcdSqs 
14543dec9fcdSqs 	/*
14553dec9fcdSqs 	 * Allocate memory for receive buffers and descriptor rings. Replace
14563dec9fcdSqs 	 * allocation functions with interface functions provided by the
14573dec9fcdSqs 	 * partition manager when it is available.
14583dec9fcdSqs 	 */
14593dec9fcdSqs 	/*
14603dec9fcdSqs 	 * Allocate memory for the receive buffer blocks.
14613dec9fcdSqs 	 */
14623dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
14633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
14643dec9fcdSqs 		    " hxge_alloc_rx_mem_pool to alloc mem: "
14653dec9fcdSqs 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
14663dec9fcdSqs 		    i, dma_buf_p[i], &dma_buf_p[i]));
14673dec9fcdSqs 
14683dec9fcdSqs 		num_chunks[i] = 0;
14693dec9fcdSqs 
14703dec9fcdSqs 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
14713dec9fcdSqs 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
14723dec9fcdSqs 		    &num_chunks[i]);
14733dec9fcdSqs 		if (status != HXGE_OK) {
14743dec9fcdSqs 			break;
14753dec9fcdSqs 		}
14763dec9fcdSqs 
14773dec9fcdSqs 		st_rdc++;
14783dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
14793dec9fcdSqs 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
14803dec9fcdSqs 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
14813dec9fcdSqs 		    dma_buf_p[i], &dma_buf_p[i]));
14823dec9fcdSqs 	}
14833dec9fcdSqs 
14843dec9fcdSqs 	if (i < ndmas) {
14853dec9fcdSqs 		goto hxge_alloc_rx_mem_fail1;
14863dec9fcdSqs 	}
14873dec9fcdSqs 
14883dec9fcdSqs 	/*
14893dec9fcdSqs 	 * Allocate memory for descriptor rings and mailbox.
14903dec9fcdSqs 	 */
14913dec9fcdSqs 	st_rdc = p_cfgp->start_rdc;
14923dec9fcdSqs 	for (j = 0; j < ndmas; j++) {
14933dec9fcdSqs 		status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j],
14943dec9fcdSqs 		    rx_cntl_alloc_size);
14953dec9fcdSqs 		if (status != HXGE_OK) {
14963dec9fcdSqs 			break;
14973dec9fcdSqs 		}
14983dec9fcdSqs 		st_rdc++;
14993dec9fcdSqs 	}
15003dec9fcdSqs 
15013dec9fcdSqs 	if (j < ndmas) {
15023dec9fcdSqs 		goto hxge_alloc_rx_mem_fail2;
15033dec9fcdSqs 	}
15043dec9fcdSqs 
15053dec9fcdSqs 	dma_poolp->ndmas = ndmas;
15063dec9fcdSqs 	dma_poolp->num_chunks = num_chunks;
15073dec9fcdSqs 	dma_poolp->buf_allocated = B_TRUE;
15083dec9fcdSqs 	hxgep->rx_buf_pool_p = dma_poolp;
15093dec9fcdSqs 	dma_poolp->dma_buf_pool_p = dma_buf_p;
15103dec9fcdSqs 
15113dec9fcdSqs 	dma_cntl_poolp->ndmas = ndmas;
15123dec9fcdSqs 	dma_cntl_poolp->buf_allocated = B_TRUE;
15133dec9fcdSqs 	hxgep->rx_cntl_pool_p = dma_cntl_poolp;
15143dec9fcdSqs 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
15153dec9fcdSqs 
15163dec9fcdSqs 	goto hxge_alloc_rx_mem_pool_exit;
15173dec9fcdSqs 
15183dec9fcdSqs hxge_alloc_rx_mem_fail2:
15193dec9fcdSqs 	/* Free control buffers */
15203dec9fcdSqs 	j--;
15213dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15223dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
15233dec9fcdSqs 	for (; j >= 0; j--) {
15243dec9fcdSqs 		hxge_free_rx_cntl_dma(hxgep,
15253dec9fcdSqs 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
15263dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15273dec9fcdSqs 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
15283dec9fcdSqs 	}
15293dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15303dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
15313dec9fcdSqs 
15323dec9fcdSqs hxge_alloc_rx_mem_fail1:
15333dec9fcdSqs 	/* Free data buffers */
15343dec9fcdSqs 	i--;
15353dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15363dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
15373dec9fcdSqs 	for (; i >= 0; i--) {
15383dec9fcdSqs 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
15393dec9fcdSqs 		    num_chunks[i]);
15403dec9fcdSqs 	}
15413dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15423dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
15433dec9fcdSqs 
15443dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
15453dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
15463dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
15473dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
15483dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
15493dec9fcdSqs 
15503dec9fcdSqs hxge_alloc_rx_mem_pool_exit:
15513dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
15523dec9fcdSqs 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
15533dec9fcdSqs 
15543dec9fcdSqs 	return (status);
15553dec9fcdSqs }
15563dec9fcdSqs 
15573dec9fcdSqs static void
15583dec9fcdSqs hxge_free_rx_mem_pool(p_hxge_t hxgep)
15593dec9fcdSqs {
15603dec9fcdSqs 	uint32_t		i, ndmas;
15613dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
15623dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
15633dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
15643dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
15653dec9fcdSqs 	uint32_t		*num_chunks;
15663dec9fcdSqs 
15673dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
15683dec9fcdSqs 
15693dec9fcdSqs 	dma_poolp = hxgep->rx_buf_pool_p;
15703dec9fcdSqs 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
15713dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
15723dec9fcdSqs 		    "(null rx buf pool or buf not allocated"));
15733dec9fcdSqs 		return;
15743dec9fcdSqs 	}
15753dec9fcdSqs 
15763dec9fcdSqs 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
15773dec9fcdSqs 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
15783dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
15793dec9fcdSqs 		    "<== hxge_free_rx_mem_pool "
15803dec9fcdSqs 		    "(null rx cntl buf pool or cntl buf not allocated"));
15813dec9fcdSqs 		return;
15823dec9fcdSqs 	}
15833dec9fcdSqs 
15843dec9fcdSqs 	dma_buf_p = dma_poolp->dma_buf_pool_p;
15853dec9fcdSqs 	num_chunks = dma_poolp->num_chunks;
15863dec9fcdSqs 
15873dec9fcdSqs 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
15883dec9fcdSqs 	ndmas = dma_cntl_poolp->ndmas;
15893dec9fcdSqs 
15903dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
15913dec9fcdSqs 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
15923dec9fcdSqs 	}
15933dec9fcdSqs 
15943dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
15953dec9fcdSqs 		hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]);
15963dec9fcdSqs 	}
15973dec9fcdSqs 
15983dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
15993dec9fcdSqs 		KMEM_FREE(dma_buf_p[i],
16003dec9fcdSqs 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
16013dec9fcdSqs 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
16023dec9fcdSqs 	}
16033dec9fcdSqs 
16043dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
16053dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
16063dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
16073dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
16083dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
16093dec9fcdSqs 
16103dec9fcdSqs 	hxgep->rx_buf_pool_p = NULL;
16113dec9fcdSqs 	hxgep->rx_cntl_pool_p = NULL;
16123dec9fcdSqs 
16133dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
16143dec9fcdSqs }
16153dec9fcdSqs 
16163dec9fcdSqs static hxge_status_t
16173dec9fcdSqs hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
16183dec9fcdSqs     p_hxge_dma_common_t *dmap,
16193dec9fcdSqs     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
16203dec9fcdSqs {
16213dec9fcdSqs 	p_hxge_dma_common_t	rx_dmap;
16223dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
16233dec9fcdSqs 	size_t			total_alloc_size;
16243dec9fcdSqs 	size_t			allocated = 0;
16253dec9fcdSqs 	int			i, size_index, array_size;
16263dec9fcdSqs 
16273dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
16283dec9fcdSqs 
16293dec9fcdSqs 	rx_dmap = (p_hxge_dma_common_t)
16303dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
16313dec9fcdSqs 
16323dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
16333dec9fcdSqs 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
16343dec9fcdSqs 	    dma_channel, alloc_size, block_size, dmap));
16353dec9fcdSqs 
16363dec9fcdSqs 	total_alloc_size = alloc_size;
16373dec9fcdSqs 
16383dec9fcdSqs 	i = 0;
16393dec9fcdSqs 	size_index = 0;
16403dec9fcdSqs 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
16413dec9fcdSqs 	while ((alloc_sizes[size_index] < alloc_size) &&
16423dec9fcdSqs 	    (size_index < array_size))
16433dec9fcdSqs 		size_index++;
16443dec9fcdSqs 	if (size_index >= array_size) {
16453dec9fcdSqs 		size_index = array_size - 1;
16463dec9fcdSqs 	}
16473dec9fcdSqs 
16483dec9fcdSqs 	while ((allocated < total_alloc_size) &&
16493dec9fcdSqs 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
16503dec9fcdSqs 		rx_dmap[i].dma_chunk_index = i;
16513dec9fcdSqs 		rx_dmap[i].block_size = block_size;
16523dec9fcdSqs 		rx_dmap[i].alength = alloc_sizes[size_index];
16533dec9fcdSqs 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
16543dec9fcdSqs 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
16553dec9fcdSqs 		rx_dmap[i].dma_channel = dma_channel;
16563dec9fcdSqs 		rx_dmap[i].contig_alloc_type = B_FALSE;
16573dec9fcdSqs 
16583dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
16593dec9fcdSqs 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
16603dec9fcdSqs 		    "i %d nblocks %d alength %d",
16613dec9fcdSqs 		    dma_channel, i, &rx_dmap[i], block_size,
16623dec9fcdSqs 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
16633dec9fcdSqs 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
16643dec9fcdSqs 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
16653dec9fcdSqs 		    &hxge_dev_buf_dma_acc_attr,
16663dec9fcdSqs 		    DDI_DMA_READ | DDI_DMA_STREAMING,
16673dec9fcdSqs 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
16683dec9fcdSqs 		if (status != HXGE_OK) {
16693dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
16703dec9fcdSqs 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
16713dec9fcdSqs 			    " for size: %d", alloc_sizes[size_index]));
16723dec9fcdSqs 			size_index--;
16733dec9fcdSqs 		} else {
16743dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
16753dec9fcdSqs 			    " alloc_rx_buf_dma allocated rdc %d "
16763dec9fcdSqs 			    "chunk %d size %x dvma %x bufp %llx ",
16773dec9fcdSqs 			    dma_channel, i, rx_dmap[i].alength,
16783dec9fcdSqs 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
16793dec9fcdSqs 			i++;
16803dec9fcdSqs 			allocated += alloc_sizes[size_index];
16813dec9fcdSqs 		}
16823dec9fcdSqs 	}
16833dec9fcdSqs 
16843dec9fcdSqs 	if (allocated < total_alloc_size) {
16853dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
16863dec9fcdSqs 		    " hxge_alloc_rx_buf_dma failed due to"
16873dec9fcdSqs 		    " allocated(%d) < required(%d)",
16883dec9fcdSqs 		    allocated, total_alloc_size));
16893dec9fcdSqs 		goto hxge_alloc_rx_mem_fail1;
16903dec9fcdSqs 	}
16913dec9fcdSqs 
16923dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
16933dec9fcdSqs 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
16943dec9fcdSqs 
16953dec9fcdSqs 	*num_chunks = i;
16963dec9fcdSqs 	*dmap = rx_dmap;
16973dec9fcdSqs 
16983dec9fcdSqs 	goto hxge_alloc_rx_mem_exit;
16993dec9fcdSqs 
17003dec9fcdSqs hxge_alloc_rx_mem_fail1:
17013dec9fcdSqs 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
17023dec9fcdSqs 
17033dec9fcdSqs hxge_alloc_rx_mem_exit:
17043dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
17053dec9fcdSqs 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
17063dec9fcdSqs 
17073dec9fcdSqs 	return (status);
17083dec9fcdSqs }
17093dec9fcdSqs 
17103dec9fcdSqs /*ARGSUSED*/
17113dec9fcdSqs static void
17123dec9fcdSqs hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
17133dec9fcdSqs     uint32_t num_chunks)
17143dec9fcdSqs {
17153dec9fcdSqs 	int i;
17163dec9fcdSqs 
17173dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
17183dec9fcdSqs 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
17193dec9fcdSqs 
17203dec9fcdSqs 	for (i = 0; i < num_chunks; i++) {
17213dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
17223dec9fcdSqs 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
17233dec9fcdSqs 		hxge_dma_mem_free(dmap++);
17243dec9fcdSqs 	}
17253dec9fcdSqs 
17263dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
17273dec9fcdSqs }
17283dec9fcdSqs 
17293dec9fcdSqs /*ARGSUSED*/
17303dec9fcdSqs static hxge_status_t
17313dec9fcdSqs hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
17323dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t size)
17333dec9fcdSqs {
17343dec9fcdSqs 	p_hxge_dma_common_t	rx_dmap;
17353dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
17363dec9fcdSqs 
17373dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
17383dec9fcdSqs 
17393dec9fcdSqs 	rx_dmap = (p_hxge_dma_common_t)
17403dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
17413dec9fcdSqs 
17423dec9fcdSqs 	rx_dmap->contig_alloc_type = B_FALSE;
17433dec9fcdSqs 
17443dec9fcdSqs 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
17453dec9fcdSqs 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
17463dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
17473dec9fcdSqs 	if (status != HXGE_OK) {
17483dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
17493dec9fcdSqs 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
17503dec9fcdSqs 		    " for size: %d", size));
17513dec9fcdSqs 		goto hxge_alloc_rx_cntl_dma_fail1;
17523dec9fcdSqs 	}
17533dec9fcdSqs 
17543dec9fcdSqs 	*dmap = rx_dmap;
17553dec9fcdSqs 
17563dec9fcdSqs 	goto hxge_alloc_rx_cntl_dma_exit;
17573dec9fcdSqs 
17583dec9fcdSqs hxge_alloc_rx_cntl_dma_fail1:
17593dec9fcdSqs 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
17603dec9fcdSqs 
17613dec9fcdSqs hxge_alloc_rx_cntl_dma_exit:
17623dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
17633dec9fcdSqs 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
17643dec9fcdSqs 
17653dec9fcdSqs 	return (status);
17663dec9fcdSqs }
17673dec9fcdSqs 
17683dec9fcdSqs /*ARGSUSED*/
17693dec9fcdSqs static void
17703dec9fcdSqs hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
17713dec9fcdSqs {
17723dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
17733dec9fcdSqs 
17743dec9fcdSqs 	hxge_dma_mem_free(dmap);
17753dec9fcdSqs 
17763dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
17773dec9fcdSqs }
17783dec9fcdSqs 
17793dec9fcdSqs static hxge_status_t
17803dec9fcdSqs hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
17813dec9fcdSqs {
17823dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
17833dec9fcdSqs 	int			i, j;
17843dec9fcdSqs 	uint32_t		ndmas, st_tdc;
17853dec9fcdSqs 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
17863dec9fcdSqs 	p_hxge_hw_pt_cfg_t	p_cfgp;
17873dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
17883dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
17893dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
17903dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
17913dec9fcdSqs 	size_t			tx_buf_alloc_size;
17923dec9fcdSqs 	size_t			tx_cntl_alloc_size;
17933dec9fcdSqs 	uint32_t		*num_chunks;	/* per dma */
17943dec9fcdSqs 
17953dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
17963dec9fcdSqs 
17973dec9fcdSqs 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
17983dec9fcdSqs 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
17993dec9fcdSqs 	st_tdc = p_cfgp->start_tdc;
18003dec9fcdSqs 	ndmas = p_cfgp->max_tdcs;
18013dec9fcdSqs 
18023dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
18033dec9fcdSqs 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
18043dec9fcdSqs 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
18053dec9fcdSqs 	/*
18063dec9fcdSqs 	 * Allocate memory for each transmit DMA channel.
18073dec9fcdSqs 	 */
18083dec9fcdSqs 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
18093dec9fcdSqs 	    KM_SLEEP);
18103dec9fcdSqs 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
18113dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
18123dec9fcdSqs 
18133dec9fcdSqs 	dma_cntl_poolp = (p_hxge_dma_pool_t)
18143dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
18153dec9fcdSqs 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
18163dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
18173dec9fcdSqs 
18183dec9fcdSqs 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
18193dec9fcdSqs 
18203dec9fcdSqs 	/*
18213dec9fcdSqs 	 * Assume that each DMA channel will be configured with default
18223dec9fcdSqs 	 * transmit bufer size for copying transmit data. (For packet payload
18233dec9fcdSqs 	 * over this limit, packets will not be copied.)
18243dec9fcdSqs 	 */
18253dec9fcdSqs 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
18263dec9fcdSqs 
18273dec9fcdSqs 	/*
18283dec9fcdSqs 	 * Addresses of transmit descriptor ring and the mailbox must be all
18293dec9fcdSqs 	 * cache-aligned (64 bytes).
18303dec9fcdSqs 	 */
18313dec9fcdSqs 	tx_cntl_alloc_size = hxge_tx_ring_size;
18323dec9fcdSqs 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
18333dec9fcdSqs 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
18343dec9fcdSqs 
18353dec9fcdSqs 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
18363dec9fcdSqs 	    KM_SLEEP);
18373dec9fcdSqs 
18383dec9fcdSqs 	/*
18393dec9fcdSqs 	 * Allocate memory for transmit buffers and descriptor rings. Replace
18403dec9fcdSqs 	 * allocation functions with interface functions provided by the
18413dec9fcdSqs 	 * partition manager when it is available.
18423dec9fcdSqs 	 *
18433dec9fcdSqs 	 * Allocate memory for the transmit buffer pool.
18443dec9fcdSqs 	 */
18453dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
18463dec9fcdSqs 		num_chunks[i] = 0;
18473dec9fcdSqs 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
18483dec9fcdSqs 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
18493dec9fcdSqs 		if (status != HXGE_OK) {
18503dec9fcdSqs 			break;
18513dec9fcdSqs 		}
18523dec9fcdSqs 		st_tdc++;
18533dec9fcdSqs 	}
18543dec9fcdSqs 
18553dec9fcdSqs 	if (i < ndmas) {
18563dec9fcdSqs 		goto hxge_alloc_tx_mem_pool_fail1;
18573dec9fcdSqs 	}
18583dec9fcdSqs 
18593dec9fcdSqs 	st_tdc = p_cfgp->start_tdc;
18603dec9fcdSqs 
18613dec9fcdSqs 	/*
18623dec9fcdSqs 	 * Allocate memory for descriptor rings and mailbox.
18633dec9fcdSqs 	 */
18643dec9fcdSqs 	for (j = 0; j < ndmas; j++) {
18653dec9fcdSqs 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
18663dec9fcdSqs 		    tx_cntl_alloc_size);
18673dec9fcdSqs 		if (status != HXGE_OK) {
18683dec9fcdSqs 			break;
18693dec9fcdSqs 		}
18703dec9fcdSqs 		st_tdc++;
18713dec9fcdSqs 	}
18723dec9fcdSqs 
18733dec9fcdSqs 	if (j < ndmas) {
18743dec9fcdSqs 		goto hxge_alloc_tx_mem_pool_fail2;
18753dec9fcdSqs 	}
18763dec9fcdSqs 
18773dec9fcdSqs 	dma_poolp->ndmas = ndmas;
18783dec9fcdSqs 	dma_poolp->num_chunks = num_chunks;
18793dec9fcdSqs 	dma_poolp->buf_allocated = B_TRUE;
18803dec9fcdSqs 	dma_poolp->dma_buf_pool_p = dma_buf_p;
18813dec9fcdSqs 	hxgep->tx_buf_pool_p = dma_poolp;
18823dec9fcdSqs 
18833dec9fcdSqs 	dma_cntl_poolp->ndmas = ndmas;
18843dec9fcdSqs 	dma_cntl_poolp->buf_allocated = B_TRUE;
18853dec9fcdSqs 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
18863dec9fcdSqs 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
18873dec9fcdSqs 
18883dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
18893dec9fcdSqs 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
18903dec9fcdSqs 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
18913dec9fcdSqs 
18923dec9fcdSqs 	goto hxge_alloc_tx_mem_pool_exit;
18933dec9fcdSqs 
18943dec9fcdSqs hxge_alloc_tx_mem_pool_fail2:
18953dec9fcdSqs 	/* Free control buffers */
18963dec9fcdSqs 	j--;
18973dec9fcdSqs 	for (; j >= 0; j--) {
18983dec9fcdSqs 		hxge_free_tx_cntl_dma(hxgep,
18993dec9fcdSqs 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
19003dec9fcdSqs 	}
19013dec9fcdSqs 
19023dec9fcdSqs hxge_alloc_tx_mem_pool_fail1:
19033dec9fcdSqs 	/* Free data buffers */
19043dec9fcdSqs 	i--;
19053dec9fcdSqs 	for (; i >= 0; i--) {
19063dec9fcdSqs 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
19073dec9fcdSqs 		    num_chunks[i]);
19083dec9fcdSqs 	}
19093dec9fcdSqs 
19103dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
19113dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
19123dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
19133dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
19143dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
19153dec9fcdSqs 
19163dec9fcdSqs hxge_alloc_tx_mem_pool_exit:
19173dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
19183dec9fcdSqs 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
19193dec9fcdSqs 
19203dec9fcdSqs 	return (status);
19213dec9fcdSqs }
19223dec9fcdSqs 
19233dec9fcdSqs static hxge_status_t
19243dec9fcdSqs hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
19253dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t alloc_size,
19263dec9fcdSqs     size_t block_size, uint32_t *num_chunks)
19273dec9fcdSqs {
19283dec9fcdSqs 	p_hxge_dma_common_t	tx_dmap;
19293dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
19303dec9fcdSqs 	size_t			total_alloc_size;
19313dec9fcdSqs 	size_t			allocated = 0;
19323dec9fcdSqs 	int			i, size_index, array_size;
19333dec9fcdSqs 
19343dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
19353dec9fcdSqs 
19363dec9fcdSqs 	tx_dmap = (p_hxge_dma_common_t)
19373dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
19383dec9fcdSqs 
19393dec9fcdSqs 	total_alloc_size = alloc_size;
19403dec9fcdSqs 	i = 0;
19413dec9fcdSqs 	size_index = 0;
19423dec9fcdSqs 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
19433dec9fcdSqs 	while ((alloc_sizes[size_index] < alloc_size) &&
19443dec9fcdSqs 	    (size_index < array_size))
19453dec9fcdSqs 		size_index++;
19463dec9fcdSqs 	if (size_index >= array_size) {
19473dec9fcdSqs 		size_index = array_size - 1;
19483dec9fcdSqs 	}
19493dec9fcdSqs 
19503dec9fcdSqs 	while ((allocated < total_alloc_size) &&
19513dec9fcdSqs 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
19523dec9fcdSqs 		tx_dmap[i].dma_chunk_index = i;
19533dec9fcdSqs 		tx_dmap[i].block_size = block_size;
19543dec9fcdSqs 		tx_dmap[i].alength = alloc_sizes[size_index];
19553dec9fcdSqs 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
19563dec9fcdSqs 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
19573dec9fcdSqs 		tx_dmap[i].dma_channel = dma_channel;
19583dec9fcdSqs 		tx_dmap[i].contig_alloc_type = B_FALSE;
19593dec9fcdSqs 
19603dec9fcdSqs 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
19613dec9fcdSqs 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
19623dec9fcdSqs 		    &hxge_dev_buf_dma_acc_attr,
19633dec9fcdSqs 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
19643dec9fcdSqs 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
19653dec9fcdSqs 		if (status != HXGE_OK) {
19663dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
19673dec9fcdSqs 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
19683dec9fcdSqs 			    " for size: %d", alloc_sizes[size_index]));
19693dec9fcdSqs 			size_index--;
19703dec9fcdSqs 		} else {
19713dec9fcdSqs 			i++;
19723dec9fcdSqs 			allocated += alloc_sizes[size_index];
19733dec9fcdSqs 		}
19743dec9fcdSqs 	}
19753dec9fcdSqs 
19763dec9fcdSqs 	if (allocated < total_alloc_size) {
19773dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
19783dec9fcdSqs 		    " hxge_alloc_tx_buf_dma: failed due to"
19793dec9fcdSqs 		    " allocated(%d) < required(%d)",
19803dec9fcdSqs 		    allocated, total_alloc_size));
19813dec9fcdSqs 		goto hxge_alloc_tx_mem_fail1;
19823dec9fcdSqs 	}
19833dec9fcdSqs 
19843dec9fcdSqs 	*num_chunks = i;
19853dec9fcdSqs 	*dmap = tx_dmap;
19863dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
19873dec9fcdSqs 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
19883dec9fcdSqs 	    *dmap, i));
19893dec9fcdSqs 	goto hxge_alloc_tx_mem_exit;
19903dec9fcdSqs 
19913dec9fcdSqs hxge_alloc_tx_mem_fail1:
19923dec9fcdSqs 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
19933dec9fcdSqs 
19943dec9fcdSqs hxge_alloc_tx_mem_exit:
19953dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
19963dec9fcdSqs 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
19973dec9fcdSqs 
19983dec9fcdSqs 	return (status);
19993dec9fcdSqs }
20003dec9fcdSqs 
20013dec9fcdSqs /*ARGSUSED*/
20023dec9fcdSqs static void
20033dec9fcdSqs hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
20043dec9fcdSqs     uint32_t num_chunks)
20053dec9fcdSqs {
20063dec9fcdSqs 	int i;
20073dec9fcdSqs 
20083dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
20093dec9fcdSqs 
20103dec9fcdSqs 	for (i = 0; i < num_chunks; i++) {
20113dec9fcdSqs 		hxge_dma_mem_free(dmap++);
20123dec9fcdSqs 	}
20133dec9fcdSqs 
20143dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
20153dec9fcdSqs }
20163dec9fcdSqs 
20173dec9fcdSqs /*ARGSUSED*/
20183dec9fcdSqs static hxge_status_t
20193dec9fcdSqs hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
20203dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t size)
20213dec9fcdSqs {
20223dec9fcdSqs 	p_hxge_dma_common_t	tx_dmap;
20233dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
20243dec9fcdSqs 
20253dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
20263dec9fcdSqs 
20273dec9fcdSqs 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
20283dec9fcdSqs 	    KM_SLEEP);
20293dec9fcdSqs 
20303dec9fcdSqs 	tx_dmap->contig_alloc_type = B_FALSE;
20313dec9fcdSqs 
20323dec9fcdSqs 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
20333dec9fcdSqs 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
20343dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
20353dec9fcdSqs 	if (status != HXGE_OK) {
20363dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
20373dec9fcdSqs 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
20383dec9fcdSqs 		    " for size: %d", size));
20393dec9fcdSqs 		goto hxge_alloc_tx_cntl_dma_fail1;
20403dec9fcdSqs 	}
20413dec9fcdSqs 
20423dec9fcdSqs 	*dmap = tx_dmap;
20433dec9fcdSqs 
20443dec9fcdSqs 	goto hxge_alloc_tx_cntl_dma_exit;
20453dec9fcdSqs 
20463dec9fcdSqs hxge_alloc_tx_cntl_dma_fail1:
20473dec9fcdSqs 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
20483dec9fcdSqs 
20493dec9fcdSqs hxge_alloc_tx_cntl_dma_exit:
20503dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
20513dec9fcdSqs 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
20523dec9fcdSqs 
20533dec9fcdSqs 	return (status);
20543dec9fcdSqs }
20553dec9fcdSqs 
20563dec9fcdSqs /*ARGSUSED*/
20573dec9fcdSqs static void
20583dec9fcdSqs hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
20593dec9fcdSqs {
20603dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
20613dec9fcdSqs 
20623dec9fcdSqs 	hxge_dma_mem_free(dmap);
20633dec9fcdSqs 
20643dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
20653dec9fcdSqs }
20663dec9fcdSqs 
20673dec9fcdSqs static void
20683dec9fcdSqs hxge_free_tx_mem_pool(p_hxge_t hxgep)
20693dec9fcdSqs {
20703dec9fcdSqs 	uint32_t		i, ndmas;
20713dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
20723dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
20733dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
20743dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
20753dec9fcdSqs 	uint32_t		*num_chunks;
20763dec9fcdSqs 
20773dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
20783dec9fcdSqs 
20793dec9fcdSqs 	dma_poolp = hxgep->tx_buf_pool_p;
20803dec9fcdSqs 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
20813dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20823dec9fcdSqs 		    "<== hxge_free_tx_mem_pool "
20833dec9fcdSqs 		    "(null rx buf pool or buf not allocated"));
20843dec9fcdSqs 		return;
20853dec9fcdSqs 	}
20863dec9fcdSqs 
20873dec9fcdSqs 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
20883dec9fcdSqs 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
20893dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
20903dec9fcdSqs 		    "<== hxge_free_tx_mem_pool "
20913dec9fcdSqs 		    "(null tx cntl buf pool or cntl buf not allocated"));
20923dec9fcdSqs 		return;
20933dec9fcdSqs 	}
20943dec9fcdSqs 
20953dec9fcdSqs 	dma_buf_p = dma_poolp->dma_buf_pool_p;
20963dec9fcdSqs 	num_chunks = dma_poolp->num_chunks;
20973dec9fcdSqs 
20983dec9fcdSqs 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
20993dec9fcdSqs 	ndmas = dma_cntl_poolp->ndmas;
21003dec9fcdSqs 
21013dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
21023dec9fcdSqs 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
21033dec9fcdSqs 	}
21043dec9fcdSqs 
21053dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
21063dec9fcdSqs 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
21073dec9fcdSqs 	}
21083dec9fcdSqs 
21093dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
21103dec9fcdSqs 		KMEM_FREE(dma_buf_p[i],
21113dec9fcdSqs 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
21123dec9fcdSqs 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
21133dec9fcdSqs 	}
21143dec9fcdSqs 
21153dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
21163dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
21173dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
21183dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
21193dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
21203dec9fcdSqs 
21213dec9fcdSqs 	hxgep->tx_buf_pool_p = NULL;
21223dec9fcdSqs 	hxgep->tx_cntl_pool_p = NULL;
21233dec9fcdSqs 
21243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
21253dec9fcdSqs }
21263dec9fcdSqs 
21273dec9fcdSqs /*ARGSUSED*/
21283dec9fcdSqs static hxge_status_t
21293dec9fcdSqs hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
21303dec9fcdSqs     struct ddi_dma_attr *dma_attrp,
21313dec9fcdSqs     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
21323dec9fcdSqs     p_hxge_dma_common_t dma_p)
21333dec9fcdSqs {
21343dec9fcdSqs 	caddr_t		kaddrp;
21353dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
21363dec9fcdSqs 
21373dec9fcdSqs 	dma_p->dma_handle = NULL;
21383dec9fcdSqs 	dma_p->acc_handle = NULL;
21393dec9fcdSqs 	dma_p->kaddrp = NULL;
21403dec9fcdSqs 
21413dec9fcdSqs 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
21423dec9fcdSqs 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
21433dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
21443dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21453dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
21463dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
21473dec9fcdSqs 	}
21483dec9fcdSqs 
21493dec9fcdSqs 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
21503dec9fcdSqs 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
21513dec9fcdSqs 	    &dma_p->acc_handle);
21523dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
21533dec9fcdSqs 		/* The caller will decide whether it is fatal */
21543dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
21553dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
21563dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
21573dec9fcdSqs 		dma_p->dma_handle = NULL;
21583dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
21593dec9fcdSqs 	}
21603dec9fcdSqs 
21613dec9fcdSqs 	if (dma_p->alength < length) {
21623dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21633dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
21643dec9fcdSqs 		ddi_dma_mem_free(&dma_p->acc_handle);
21653dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
21663dec9fcdSqs 		dma_p->acc_handle = NULL;
21673dec9fcdSqs 		dma_p->dma_handle = NULL;
21683dec9fcdSqs 		return (HXGE_ERROR);
21693dec9fcdSqs 	}
21703dec9fcdSqs 
21713dec9fcdSqs 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
21723dec9fcdSqs 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
21733dec9fcdSqs 	    &dma_p->dma_cookie, &dma_p->ncookies);
21743dec9fcdSqs 	if (ddi_status != DDI_DMA_MAPPED) {
21753dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
21763dec9fcdSqs 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
21773dec9fcdSqs 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
21783dec9fcdSqs 		if (dma_p->acc_handle) {
21793dec9fcdSqs 			ddi_dma_mem_free(&dma_p->acc_handle);
21803dec9fcdSqs 			dma_p->acc_handle = NULL;
21813dec9fcdSqs 		}
21823dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
21833dec9fcdSqs 		dma_p->dma_handle = NULL;
21843dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
21853dec9fcdSqs 	}
21863dec9fcdSqs 
21873dec9fcdSqs 	if (dma_p->ncookies != 1) {
21883dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
21893dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
21903dec9fcdSqs 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
21913dec9fcdSqs 		if (dma_p->acc_handle) {
21923dec9fcdSqs 			ddi_dma_mem_free(&dma_p->acc_handle);
21933dec9fcdSqs 			dma_p->acc_handle = NULL;
21943dec9fcdSqs 		}
21953dec9fcdSqs 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
21963dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
21973dec9fcdSqs 		dma_p->dma_handle = NULL;
21983dec9fcdSqs 		return (HXGE_ERROR);
21993dec9fcdSqs 	}
22003dec9fcdSqs 
22013dec9fcdSqs 	dma_p->kaddrp = kaddrp;
22023dec9fcdSqs #if defined(__i386)
22033dec9fcdSqs 	dma_p->ioaddr_pp =
22043dec9fcdSqs 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
22053dec9fcdSqs #else
22063dec9fcdSqs 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
22073dec9fcdSqs #endif
22083dec9fcdSqs 
22093dec9fcdSqs 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
22103dec9fcdSqs 
22113dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
22123dec9fcdSqs 	    "dma buffer allocated: dma_p $%p "
22133dec9fcdSqs 	    "return dmac_ladress from cookie $%p dmac_size %d "
22143dec9fcdSqs 	    "dma_p->ioaddr_p $%p "
22153dec9fcdSqs 	    "dma_p->orig_ioaddr_p $%p "
22163dec9fcdSqs 	    "orig_vatopa $%p "
22173dec9fcdSqs 	    "alength %d (0x%x) "
22183dec9fcdSqs 	    "kaddrp $%p "
22193dec9fcdSqs 	    "length %d (0x%x)",
22203dec9fcdSqs 	    dma_p,
22213dec9fcdSqs 	    dma_p->dma_cookie.dmac_laddress,
22223dec9fcdSqs 	    dma_p->dma_cookie.dmac_size,
22233dec9fcdSqs 	    dma_p->ioaddr_pp,
22243dec9fcdSqs 	    dma_p->orig_ioaddr_pp,
22253dec9fcdSqs 	    dma_p->orig_vatopa,
22263dec9fcdSqs 	    dma_p->alength, dma_p->alength,
22273dec9fcdSqs 	    kaddrp,
22283dec9fcdSqs 	    length, length));
22293dec9fcdSqs 
22303dec9fcdSqs 	return (HXGE_OK);
22313dec9fcdSqs }
22323dec9fcdSqs 
22333dec9fcdSqs static void
22343dec9fcdSqs hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
22353dec9fcdSqs {
22363dec9fcdSqs 	if (dma_p->dma_handle != NULL) {
22373dec9fcdSqs 		if (dma_p->ncookies) {
22383dec9fcdSqs 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
22393dec9fcdSqs 			dma_p->ncookies = 0;
22403dec9fcdSqs 		}
22413dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
22423dec9fcdSqs 		dma_p->dma_handle = NULL;
22433dec9fcdSqs 	}
22443dec9fcdSqs 	if (dma_p->acc_handle != NULL) {
22453dec9fcdSqs 		ddi_dma_mem_free(&dma_p->acc_handle);
22463dec9fcdSqs 		dma_p->acc_handle = NULL;
22473dec9fcdSqs 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
22483dec9fcdSqs 	}
22493dec9fcdSqs 	dma_p->kaddrp = NULL;
22503dec9fcdSqs 	dma_p->alength = NULL;
22513dec9fcdSqs }
22523dec9fcdSqs 
22533dec9fcdSqs /*
22543dec9fcdSqs  *	hxge_m_start() -- start transmitting and receiving.
22553dec9fcdSqs  *
22563dec9fcdSqs  *	This function is called by the MAC layer when the first
22573dec9fcdSqs  *	stream is open to prepare the hardware ready for sending
22583dec9fcdSqs  *	and transmitting packets.
22593dec9fcdSqs  */
22603dec9fcdSqs static int
22613dec9fcdSqs hxge_m_start(void *arg)
22623dec9fcdSqs {
22633dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
22643dec9fcdSqs 
22653dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
22663dec9fcdSqs 
22673dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
22683dec9fcdSqs 
22693dec9fcdSqs 	if (hxge_init(hxgep) != DDI_SUCCESS) {
22703dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
22713dec9fcdSqs 		    "<== hxge_m_start: initialization failed"));
22723dec9fcdSqs 		MUTEX_EXIT(hxgep->genlock);
22733dec9fcdSqs 		return (EIO);
22743dec9fcdSqs 	}
22753dec9fcdSqs 
22763dec9fcdSqs 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
22773dec9fcdSqs 		/*
22783dec9fcdSqs 		 * Start timer to check the system error and tx hangs
22793dec9fcdSqs 		 */
22803dec9fcdSqs 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
22813dec9fcdSqs 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
22823dec9fcdSqs 
22833dec9fcdSqs 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
22843dec9fcdSqs 	}
22853dec9fcdSqs 
22863dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
22873dec9fcdSqs 
22883dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
22893dec9fcdSqs 
22903dec9fcdSqs 	return (0);
22913dec9fcdSqs }
22923dec9fcdSqs 
22933dec9fcdSqs /*
22943dec9fcdSqs  * hxge_m_stop(): stop transmitting and receiving.
22953dec9fcdSqs  */
22963dec9fcdSqs static void
22973dec9fcdSqs hxge_m_stop(void *arg)
22983dec9fcdSqs {
22993dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
23003dec9fcdSqs 
23013dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
23023dec9fcdSqs 
23033dec9fcdSqs 	if (hxgep->hxge_timerid) {
23043dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
23053dec9fcdSqs 		hxgep->hxge_timerid = 0;
23063dec9fcdSqs 	}
23073dec9fcdSqs 
23083dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
23093dec9fcdSqs 
23103dec9fcdSqs 	hxge_uninit(hxgep);
23113dec9fcdSqs 
23123dec9fcdSqs 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
23133dec9fcdSqs 
23143dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
23153dec9fcdSqs 
23163dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
23173dec9fcdSqs }
23183dec9fcdSqs 
23193dec9fcdSqs static int
23203dec9fcdSqs hxge_m_unicst(void *arg, const uint8_t *macaddr)
23213dec9fcdSqs {
23223dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
23233dec9fcdSqs 	struct ether_addr	addrp;
23243dec9fcdSqs 	hxge_status_t		status;
23253dec9fcdSqs 
23263dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
23273dec9fcdSqs 
23283dec9fcdSqs 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
23293dec9fcdSqs 
23303dec9fcdSqs 	status = hxge_set_mac_addr(hxgep, &addrp);
23313dec9fcdSqs 	if (status != HXGE_OK) {
23323dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23333dec9fcdSqs 		    "<== hxge_m_unicst: set unitcast failed"));
23343dec9fcdSqs 		return (EINVAL);
23353dec9fcdSqs 	}
23363dec9fcdSqs 
23373dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
23383dec9fcdSqs 
23393dec9fcdSqs 	return (0);
23403dec9fcdSqs }
23413dec9fcdSqs 
23423dec9fcdSqs static int
23433dec9fcdSqs hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
23443dec9fcdSqs {
23453dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
23463dec9fcdSqs 	struct ether_addr	addrp;
23473dec9fcdSqs 
23483dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
23493dec9fcdSqs 
23503dec9fcdSqs 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
23513dec9fcdSqs 
23523dec9fcdSqs 	if (add) {
23533dec9fcdSqs 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
23543dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23553dec9fcdSqs 			    "<== hxge_m_multicst: add multicast failed"));
23563dec9fcdSqs 			return (EINVAL);
23573dec9fcdSqs 		}
23583dec9fcdSqs 	} else {
23593dec9fcdSqs 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
23603dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23613dec9fcdSqs 			    "<== hxge_m_multicst: del multicast failed"));
23623dec9fcdSqs 			return (EINVAL);
23633dec9fcdSqs 		}
23643dec9fcdSqs 	}
23653dec9fcdSqs 
23663dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
23673dec9fcdSqs 
23683dec9fcdSqs 	return (0);
23693dec9fcdSqs }
23703dec9fcdSqs 
23713dec9fcdSqs static int
23723dec9fcdSqs hxge_m_promisc(void *arg, boolean_t on)
23733dec9fcdSqs {
23743dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
23753dec9fcdSqs 
23763dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
23773dec9fcdSqs 
23783dec9fcdSqs 	if (hxge_set_promisc(hxgep, on)) {
23793dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
23803dec9fcdSqs 		    "<== hxge_m_promisc: set promisc failed"));
23813dec9fcdSqs 		return (EINVAL);
23823dec9fcdSqs 	}
23833dec9fcdSqs 
23843dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
23853dec9fcdSqs 
23863dec9fcdSqs 	return (0);
23873dec9fcdSqs }
23883dec9fcdSqs 
23893dec9fcdSqs static void
23903dec9fcdSqs hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
23913dec9fcdSqs {
23923dec9fcdSqs 	p_hxge_t	hxgep = (p_hxge_t)arg;
23933dec9fcdSqs 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
23943dec9fcdSqs 	boolean_t	need_privilege;
23953dec9fcdSqs 	int		err;
23963dec9fcdSqs 	int		cmd;
23973dec9fcdSqs 
23983dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
23993dec9fcdSqs 
24003dec9fcdSqs 	iocp = (struct iocblk *)mp->b_rptr;
24013dec9fcdSqs 	iocp->ioc_error = 0;
24023dec9fcdSqs 	need_privilege = B_TRUE;
24033dec9fcdSqs 	cmd = iocp->ioc_cmd;
24043dec9fcdSqs 
24053dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
24063dec9fcdSqs 	switch (cmd) {
24073dec9fcdSqs 	default:
24083dec9fcdSqs 		miocnak(wq, mp, 0, EINVAL);
24093dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
24103dec9fcdSqs 		return;
24113dec9fcdSqs 
24123dec9fcdSqs 	case LB_GET_INFO_SIZE:
24133dec9fcdSqs 	case LB_GET_INFO:
24143dec9fcdSqs 	case LB_GET_MODE:
24153dec9fcdSqs 		need_privilege = B_FALSE;
24163dec9fcdSqs 		break;
24173dec9fcdSqs 
24183dec9fcdSqs 	case LB_SET_MODE:
24193dec9fcdSqs 		break;
24203dec9fcdSqs 
24213dec9fcdSqs 	case ND_GET:
24223dec9fcdSqs 		need_privilege = B_FALSE;
24233dec9fcdSqs 		break;
24243dec9fcdSqs 	case ND_SET:
24253dec9fcdSqs 		break;
24263dec9fcdSqs 
24273dec9fcdSqs 	case HXGE_GET64:
24283dec9fcdSqs 	case HXGE_PUT64:
24293dec9fcdSqs 	case HXGE_GET_TX_RING_SZ:
24303dec9fcdSqs 	case HXGE_GET_TX_DESC:
24313dec9fcdSqs 	case HXGE_TX_SIDE_RESET:
24323dec9fcdSqs 	case HXGE_RX_SIDE_RESET:
24333dec9fcdSqs 	case HXGE_GLOBAL_RESET:
24343dec9fcdSqs 	case HXGE_RESET_MAC:
24353dec9fcdSqs 	case HXGE_PUT_TCAM:
24363dec9fcdSqs 	case HXGE_GET_TCAM:
24373dec9fcdSqs 	case HXGE_RTRACE:
24383dec9fcdSqs 
24393dec9fcdSqs 		need_privilege = B_FALSE;
24403dec9fcdSqs 		break;
24413dec9fcdSqs 	}
24423dec9fcdSqs 
24433dec9fcdSqs 	if (need_privilege) {
24443dec9fcdSqs 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
24453dec9fcdSqs 		if (err != 0) {
24463dec9fcdSqs 			miocnak(wq, mp, 0, err);
24473dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
24483dec9fcdSqs 			    "<== hxge_m_ioctl: no priv"));
24493dec9fcdSqs 			return;
24503dec9fcdSqs 		}
24513dec9fcdSqs 	}
24523dec9fcdSqs 
24533dec9fcdSqs 	switch (cmd) {
24543dec9fcdSqs 	case ND_GET:
24553dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
24563dec9fcdSqs 	case ND_SET:
24573dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
24583dec9fcdSqs 		hxge_param_ioctl(hxgep, wq, mp, iocp);
24593dec9fcdSqs 		break;
24603dec9fcdSqs 
24613dec9fcdSqs 	case LB_GET_MODE:
24623dec9fcdSqs 	case LB_SET_MODE:
24633dec9fcdSqs 	case LB_GET_INFO_SIZE:
24643dec9fcdSqs 	case LB_GET_INFO:
24653dec9fcdSqs 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
24663dec9fcdSqs 		break;
24673dec9fcdSqs 
24683dec9fcdSqs 	case HXGE_PUT_TCAM:
24693dec9fcdSqs 	case HXGE_GET_TCAM:
24703dec9fcdSqs 	case HXGE_GET64:
24713dec9fcdSqs 	case HXGE_PUT64:
24723dec9fcdSqs 	case HXGE_GET_TX_RING_SZ:
24733dec9fcdSqs 	case HXGE_GET_TX_DESC:
24743dec9fcdSqs 	case HXGE_TX_SIDE_RESET:
24753dec9fcdSqs 	case HXGE_RX_SIDE_RESET:
24763dec9fcdSqs 	case HXGE_GLOBAL_RESET:
24773dec9fcdSqs 	case HXGE_RESET_MAC:
24783dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
24793dec9fcdSqs 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
24803dec9fcdSqs 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
24813dec9fcdSqs 		break;
24823dec9fcdSqs 	}
24833dec9fcdSqs 
24843dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
24853dec9fcdSqs }
24863dec9fcdSqs 
24873dec9fcdSqs extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
24883dec9fcdSqs 
24893dec9fcdSqs static void
24903dec9fcdSqs hxge_m_resources(void *arg)
24913dec9fcdSqs {
24923dec9fcdSqs 	p_hxge_t hxgep = arg;
24933dec9fcdSqs 	mac_rx_fifo_t mrf;
24943dec9fcdSqs 	p_rx_rcr_rings_t rcr_rings;
24953dec9fcdSqs 	p_rx_rcr_ring_t *rcr_p;
24963dec9fcdSqs 	p_rx_rcr_ring_t rcrp;
24973dec9fcdSqs 	uint32_t i, ndmas;
24983dec9fcdSqs 	int status;
24993dec9fcdSqs 
25003dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
25013dec9fcdSqs 
25023dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
25033dec9fcdSqs 
25043dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
25053dec9fcdSqs 		status = hxge_init(hxgep);
25063dec9fcdSqs 		if (status != HXGE_OK) {
25073dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
25083dec9fcdSqs 			    "hxge_init failed"));
25093dec9fcdSqs 			MUTEX_EXIT(hxgep->genlock);
25103dec9fcdSqs 			return;
25113dec9fcdSqs 		}
25123dec9fcdSqs 	}
25133dec9fcdSqs 
25143dec9fcdSqs 	mrf.mrf_type = MAC_RX_FIFO;
25153dec9fcdSqs 	mrf.mrf_blank = hxge_rx_hw_blank;
2516*fe930412Sqs 	mrf.mrf_arg = (void *)hxgep;
25173dec9fcdSqs 
2518*fe930412Sqs 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2519*fe930412Sqs 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
25203dec9fcdSqs 
25213dec9fcdSqs 	rcr_rings = hxgep->rx_rcr_rings;
25223dec9fcdSqs 	rcr_p = rcr_rings->rcr_rings;
25233dec9fcdSqs 	ndmas = rcr_rings->ndmas;
25243dec9fcdSqs 
25253dec9fcdSqs 	/*
25263dec9fcdSqs 	 * Export our receive resources to the MAC layer.
25273dec9fcdSqs 	 */
25283dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
25293dec9fcdSqs 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
25303dec9fcdSqs 		rcrp->rcr_mac_handle =
25313dec9fcdSqs 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
25323dec9fcdSqs 
25333dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
25343dec9fcdSqs 		    "==> hxge_m_resources: vdma %d dma %d "
25353dec9fcdSqs 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
25363dec9fcdSqs 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
25373dec9fcdSqs 	}
25383dec9fcdSqs 
25393dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
25403dec9fcdSqs 
25413dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
25423dec9fcdSqs }
25433dec9fcdSqs 
25443dec9fcdSqs /*
25453dec9fcdSqs  * Set an alternate MAC address
25463dec9fcdSqs  */
25473dec9fcdSqs static int
25483dec9fcdSqs hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
25493dec9fcdSqs {
25503dec9fcdSqs 	uint64_t	address;
25513dec9fcdSqs 	uint64_t	tmp;
25523dec9fcdSqs 	hpi_status_t	status;
25533dec9fcdSqs 	uint8_t		addrn;
25543dec9fcdSqs 	int		i;
25553dec9fcdSqs 
25563dec9fcdSqs 	/*
25573dec9fcdSqs 	 * Convert a byte array to a 48 bit value.
25583dec9fcdSqs 	 * Need to check endianess if in doubt
25593dec9fcdSqs 	 */
25603dec9fcdSqs 	address = 0;
25613dec9fcdSqs 	for (i = 0; i < ETHERADDRL; i++) {
25623dec9fcdSqs 		tmp = maddr[i];
25633dec9fcdSqs 		address <<= 8;
25643dec9fcdSqs 		address |= tmp;
25653dec9fcdSqs 	}
25663dec9fcdSqs 
25673dec9fcdSqs 	addrn = (uint8_t)slot;
25683dec9fcdSqs 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
25693dec9fcdSqs 	if (status != HPI_SUCCESS)
25703dec9fcdSqs 		return (EIO);
25713dec9fcdSqs 
25723dec9fcdSqs 	return (0);
25733dec9fcdSqs }
25743dec9fcdSqs 
25753dec9fcdSqs static void
25763dec9fcdSqs hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
25773dec9fcdSqs {
25783dec9fcdSqs 	p_hxge_mmac_stats_t	mmac_stats;
25793dec9fcdSqs 	int			i;
25803dec9fcdSqs 	hxge_mmac_t		*mmac_info;
25813dec9fcdSqs 
25823dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
25833dec9fcdSqs 	mmac_stats = &hxgep->statsp->mmac_stats;
25843dec9fcdSqs 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
25853dec9fcdSqs 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
25863dec9fcdSqs 
25873dec9fcdSqs 	for (i = 0; i < ETHERADDRL; i++) {
25883dec9fcdSqs 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
25893dec9fcdSqs 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
25903dec9fcdSqs 	}
25913dec9fcdSqs }
25923dec9fcdSqs 
25933dec9fcdSqs /*
25943dec9fcdSqs  * Find an unused address slot, set the address value to the one specified,
25953dec9fcdSqs  * enable the port to start filtering on the new MAC address.
25963dec9fcdSqs  * Returns: 0 on success.
25973dec9fcdSqs  */
25983dec9fcdSqs int
25993dec9fcdSqs hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
26003dec9fcdSqs {
26013dec9fcdSqs 	p_hxge_t	hxgep = arg;
26023dec9fcdSqs 	mac_addr_slot_t	slot;
26033dec9fcdSqs 	hxge_mmac_t	*mmac_info;
26043dec9fcdSqs 	int		err;
26053dec9fcdSqs 	hxge_status_t	status;
26063dec9fcdSqs 
26073dec9fcdSqs 	mutex_enter(hxgep->genlock);
26083dec9fcdSqs 
26093dec9fcdSqs 	/*
26103dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
26113dec9fcdSqs 	 * not been called.
26123dec9fcdSqs 	 */
26133dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
26143dec9fcdSqs 		status = hxge_init(hxgep);
26153dec9fcdSqs 		if (status != HXGE_OK) {
26163dec9fcdSqs 			mutex_exit(hxgep->genlock);
26173dec9fcdSqs 			return (ENXIO);
26183dec9fcdSqs 		}
26193dec9fcdSqs 	}
26203dec9fcdSqs 
26213dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
26223dec9fcdSqs 	if (mmac_info->naddrfree == 0) {
26233dec9fcdSqs 		mutex_exit(hxgep->genlock);
26243dec9fcdSqs 		return (ENOSPC);
26253dec9fcdSqs 	}
26263dec9fcdSqs 
26273dec9fcdSqs 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
26283dec9fcdSqs 	    maddr->mma_addrlen)) {
26293dec9fcdSqs 		mutex_exit(hxgep->genlock);
26303dec9fcdSqs 		return (EINVAL);
26313dec9fcdSqs 	}
26323dec9fcdSqs 
26333dec9fcdSqs 	/*
26343dec9fcdSqs 	 * Search for the first available slot. Because naddrfree
26353dec9fcdSqs 	 * is not zero, we are guaranteed to find one.
26363dec9fcdSqs 	 * Slot 0 is for unique (primary) MAC.  The first alternate
26373dec9fcdSqs 	 * MAC slot is slot 1.
26383dec9fcdSqs 	 */
26393dec9fcdSqs 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
26403dec9fcdSqs 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
26413dec9fcdSqs 			break;
26423dec9fcdSqs 	}
26433dec9fcdSqs 
26443dec9fcdSqs 	ASSERT(slot < mmac_info->num_mmac);
26453dec9fcdSqs 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
26463dec9fcdSqs 		mutex_exit(hxgep->genlock);
26473dec9fcdSqs 		return (err);
26483dec9fcdSqs 	}
26493dec9fcdSqs 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
26503dec9fcdSqs 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
26513dec9fcdSqs 	mmac_info->naddrfree--;
26523dec9fcdSqs 	hxge_mmac_kstat_update(hxgep, slot);
26533dec9fcdSqs 
26543dec9fcdSqs 	maddr->mma_slot = slot;
26553dec9fcdSqs 
26563dec9fcdSqs 	mutex_exit(hxgep->genlock);
26573dec9fcdSqs 	return (0);
26583dec9fcdSqs }
26593dec9fcdSqs 
26603dec9fcdSqs /*
26613dec9fcdSqs  * Remove the specified mac address and update
26623dec9fcdSqs  * the h/w not to filter the mac address anymore.
26633dec9fcdSqs  * Returns: 0, on success.
26643dec9fcdSqs  */
26653dec9fcdSqs int
26663dec9fcdSqs hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
26673dec9fcdSqs {
26683dec9fcdSqs 	p_hxge_t	hxgep = arg;
26693dec9fcdSqs 	hxge_mmac_t	*mmac_info;
26703dec9fcdSqs 	int		err = 0;
26713dec9fcdSqs 	hxge_status_t	status;
26723dec9fcdSqs 
26733dec9fcdSqs 	mutex_enter(hxgep->genlock);
26743dec9fcdSqs 
26753dec9fcdSqs 	/*
26763dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
26773dec9fcdSqs 	 * not been called.
26783dec9fcdSqs 	 */
26793dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
26803dec9fcdSqs 		status = hxge_init(hxgep);
26813dec9fcdSqs 		if (status != HXGE_OK) {
26823dec9fcdSqs 			mutex_exit(hxgep->genlock);
26833dec9fcdSqs 			return (ENXIO);
26843dec9fcdSqs 		}
26853dec9fcdSqs 	}
26863dec9fcdSqs 
26873dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
26883dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
26893dec9fcdSqs 		mutex_exit(hxgep->genlock);
26903dec9fcdSqs 		return (EINVAL);
26913dec9fcdSqs 	}
26923dec9fcdSqs 
26933dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
26943dec9fcdSqs 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
26953dec9fcdSqs 		    HPI_SUCCESS) {
26963dec9fcdSqs 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
26973dec9fcdSqs 			mmac_info->naddrfree++;
26983dec9fcdSqs 			/*
26993dec9fcdSqs 			 * Clear mac_pool[slot].addr so that kstat shows 0
27003dec9fcdSqs 			 * alternate MAC address if the slot is not used.
27013dec9fcdSqs 			 */
27023dec9fcdSqs 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
27033dec9fcdSqs 			hxge_mmac_kstat_update(hxgep, slot);
27043dec9fcdSqs 		} else {
27053dec9fcdSqs 			err = EIO;
27063dec9fcdSqs 		}
27073dec9fcdSqs 	} else {
27083dec9fcdSqs 		err = EINVAL;
27093dec9fcdSqs 	}
27103dec9fcdSqs 
27113dec9fcdSqs 	mutex_exit(hxgep->genlock);
27123dec9fcdSqs 	return (err);
27133dec9fcdSqs }
27143dec9fcdSqs 
27153dec9fcdSqs /*
27163dec9fcdSqs  * Modify a mac address added by hxge_mmac_add().
27173dec9fcdSqs  * Returns: 0, on success.
27183dec9fcdSqs  */
27193dec9fcdSqs int
27203dec9fcdSqs hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
27213dec9fcdSqs {
27223dec9fcdSqs 	p_hxge_t	hxgep = arg;
27233dec9fcdSqs 	mac_addr_slot_t	slot;
27243dec9fcdSqs 	hxge_mmac_t	*mmac_info;
27253dec9fcdSqs 	int		err = 0;
27263dec9fcdSqs 	hxge_status_t	status;
27273dec9fcdSqs 
27283dec9fcdSqs 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
27293dec9fcdSqs 	    maddr->mma_addrlen))
27303dec9fcdSqs 		return (EINVAL);
27313dec9fcdSqs 
27323dec9fcdSqs 	slot = maddr->mma_slot;
27333dec9fcdSqs 
27343dec9fcdSqs 	mutex_enter(hxgep->genlock);
27353dec9fcdSqs 
27363dec9fcdSqs 	/*
27373dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
27383dec9fcdSqs 	 * not been called.
27393dec9fcdSqs 	 */
27403dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
27413dec9fcdSqs 		status = hxge_init(hxgep);
27423dec9fcdSqs 		if (status != HXGE_OK) {
27433dec9fcdSqs 			mutex_exit(hxgep->genlock);
27443dec9fcdSqs 			return (ENXIO);
27453dec9fcdSqs 		}
27463dec9fcdSqs 	}
27473dec9fcdSqs 
27483dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
27493dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
27503dec9fcdSqs 		mutex_exit(hxgep->genlock);
27513dec9fcdSqs 		return (EINVAL);
27523dec9fcdSqs 	}
27533dec9fcdSqs 
27543dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
27553dec9fcdSqs 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
27563dec9fcdSqs 		    slot)) == 0) {
27573dec9fcdSqs 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
27583dec9fcdSqs 			    ETHERADDRL);
27593dec9fcdSqs 			hxge_mmac_kstat_update(hxgep, slot);
27603dec9fcdSqs 		}
27613dec9fcdSqs 	} else {
27623dec9fcdSqs 		err = EINVAL;
27633dec9fcdSqs 	}
27643dec9fcdSqs 
27653dec9fcdSqs 	mutex_exit(hxgep->genlock);
27663dec9fcdSqs 	return (err);
27673dec9fcdSqs }
27683dec9fcdSqs 
27693dec9fcdSqs /*
27703dec9fcdSqs  * static int
27713dec9fcdSqs  * hxge_m_mmac_get() - Get the MAC address and other information
27723dec9fcdSqs  *	related to the slot.  mma_flags should be set to 0 in the call.
27733dec9fcdSqs  *	Note: although kstat shows MAC address as zero when a slot is
27743dec9fcdSqs  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
27753dec9fcdSqs  *	to the caller as long as the slot is not using a user MAC address.
27763dec9fcdSqs  *	The following table shows the rules,
27773dec9fcdSqs  *
27783dec9fcdSqs  *     					USED    VENDOR    mma_addr
27793dec9fcdSqs  *	------------------------------------------------------------
27803dec9fcdSqs  *	(1) Slot uses a user MAC:	yes      no     user MAC
27813dec9fcdSqs  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
27823dec9fcdSqs  *	(3) Slot is not used but is
27833dec9fcdSqs  *	     factory MAC capable:	no       yes    factory MAC
27843dec9fcdSqs  *	(4) Slot is not used and is
27853dec9fcdSqs  *	     not factory MAC capable:   no       no	0
27863dec9fcdSqs  *	------------------------------------------------------------
27873dec9fcdSqs  */
27883dec9fcdSqs int
27893dec9fcdSqs hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
27903dec9fcdSqs {
27913dec9fcdSqs 	hxge_t		*hxgep = arg;
27923dec9fcdSqs 	mac_addr_slot_t	slot;
27933dec9fcdSqs 	hxge_mmac_t	*mmac_info;
27943dec9fcdSqs 	hxge_status_t	status;
27953dec9fcdSqs 
27963dec9fcdSqs 	slot = maddr->mma_slot;
27973dec9fcdSqs 
27983dec9fcdSqs 	mutex_enter(hxgep->genlock);
27993dec9fcdSqs 
28003dec9fcdSqs 	/*
28013dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
28023dec9fcdSqs 	 * not been called.
28033dec9fcdSqs 	 */
28043dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
28053dec9fcdSqs 		status = hxge_init(hxgep);
28063dec9fcdSqs 		if (status != HXGE_OK) {
28073dec9fcdSqs 			mutex_exit(hxgep->genlock);
28083dec9fcdSqs 			return (ENXIO);
28093dec9fcdSqs 		}
28103dec9fcdSqs 	}
28113dec9fcdSqs 
28123dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
28133dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
28143dec9fcdSqs 		mutex_exit(hxgep->genlock);
28153dec9fcdSqs 		return (EINVAL);
28163dec9fcdSqs 	}
28173dec9fcdSqs 
28183dec9fcdSqs 	maddr->mma_flags = 0;
28193dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
28203dec9fcdSqs 		maddr->mma_flags |= MMAC_SLOT_USED;
28213dec9fcdSqs 		bcopy(mmac_info->mac_pool[slot].addr,
28223dec9fcdSqs 		    maddr->mma_addr, ETHERADDRL);
28233dec9fcdSqs 		maddr->mma_addrlen = ETHERADDRL;
28243dec9fcdSqs 	}
28253dec9fcdSqs 
28263dec9fcdSqs 	mutex_exit(hxgep->genlock);
28273dec9fcdSqs 	return (0);
28283dec9fcdSqs }
28293dec9fcdSqs 
28303dec9fcdSqs /*ARGSUSED*/
28313dec9fcdSqs boolean_t
28323dec9fcdSqs hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
28333dec9fcdSqs {
28343dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
28353dec9fcdSqs 	uint32_t		*txflags = cap_data;
28363dec9fcdSqs 	multiaddress_capab_t	*mmacp = cap_data;
28373dec9fcdSqs 
28383dec9fcdSqs 	switch (cap) {
28393dec9fcdSqs 	case MAC_CAPAB_HCKSUM:
28403dec9fcdSqs 		*txflags = HCKSUM_INET_PARTIAL;
28413dec9fcdSqs 		break;
28423dec9fcdSqs 
28433dec9fcdSqs 	case MAC_CAPAB_POLL:
28443dec9fcdSqs 		/*
28453dec9fcdSqs 		 * There's nothing for us to fill in, simply returning B_TRUE
28463dec9fcdSqs 		 * stating that we support polling is sufficient.
28473dec9fcdSqs 		 */
28483dec9fcdSqs 		break;
28493dec9fcdSqs 
28503dec9fcdSqs 	case MAC_CAPAB_MULTIADDRESS:
28513dec9fcdSqs 		/*
28523dec9fcdSqs 		 * The number of MAC addresses made available by
28533dec9fcdSqs 		 * this capability is one less than the total as
28543dec9fcdSqs 		 * the primary address in slot 0 is counted in
28553dec9fcdSqs 		 * the total.
28563dec9fcdSqs 		 */
28573dec9fcdSqs 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
28583dec9fcdSqs 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
28593dec9fcdSqs 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
28603dec9fcdSqs 		mmacp->maddr_handle = hxgep;
28613dec9fcdSqs 		mmacp->maddr_add = hxge_m_mmac_add;
28623dec9fcdSqs 		mmacp->maddr_remove = hxge_m_mmac_remove;
28633dec9fcdSqs 		mmacp->maddr_modify = hxge_m_mmac_modify;
28643dec9fcdSqs 		mmacp->maddr_get = hxge_m_mmac_get;
28653dec9fcdSqs 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
28663dec9fcdSqs 		break;
28673dec9fcdSqs 	default:
28683dec9fcdSqs 		return (B_FALSE);
28693dec9fcdSqs 	}
28703dec9fcdSqs 	return (B_TRUE);
28713dec9fcdSqs }
28723dec9fcdSqs 
28733dec9fcdSqs /*
28743dec9fcdSqs  * Module loading and removing entry points.
28753dec9fcdSqs  */
28763dec9fcdSqs DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
28773dec9fcdSqs     nodev, NULL, D_MP, NULL);
28783dec9fcdSqs 
28793dec9fcdSqs extern struct mod_ops mod_driverops;
28803dec9fcdSqs 
28813dec9fcdSqs #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
28823dec9fcdSqs 
28833dec9fcdSqs /*
28843dec9fcdSqs  * Module linkage information for the kernel.
28853dec9fcdSqs  */
28863dec9fcdSqs static struct modldrv hxge_modldrv = {
28873dec9fcdSqs 	&mod_driverops,
28883dec9fcdSqs 	HXGE_DESC_VER,
28893dec9fcdSqs 	&hxge_dev_ops
28903dec9fcdSqs };
28913dec9fcdSqs 
28923dec9fcdSqs static struct modlinkage modlinkage = {
28933dec9fcdSqs 	MODREV_1, (void *) &hxge_modldrv, NULL
28943dec9fcdSqs };
28953dec9fcdSqs 
28963dec9fcdSqs int
28973dec9fcdSqs _init(void)
28983dec9fcdSqs {
28993dec9fcdSqs 	int status;
29003dec9fcdSqs 
29013dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
29023dec9fcdSqs 	mac_init_ops(&hxge_dev_ops, "hxge");
29033dec9fcdSqs 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
29043dec9fcdSqs 	if (status != 0) {
29053dec9fcdSqs 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
29063dec9fcdSqs 		    "failed to init device soft state"));
29073dec9fcdSqs 		mac_fini_ops(&hxge_dev_ops);
29083dec9fcdSqs 		goto _init_exit;
29093dec9fcdSqs 	}
29103dec9fcdSqs 
29113dec9fcdSqs 	status = mod_install(&modlinkage);
29123dec9fcdSqs 	if (status != 0) {
29133dec9fcdSqs 		ddi_soft_state_fini(&hxge_list);
29143dec9fcdSqs 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
29153dec9fcdSqs 		goto _init_exit;
29163dec9fcdSqs 	}
29173dec9fcdSqs 
29183dec9fcdSqs 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
29193dec9fcdSqs 
29203dec9fcdSqs _init_exit:
29213dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
29223dec9fcdSqs 
29233dec9fcdSqs 	return (status);
29243dec9fcdSqs }
29253dec9fcdSqs 
29263dec9fcdSqs int
29273dec9fcdSqs _fini(void)
29283dec9fcdSqs {
29293dec9fcdSqs 	int status;
29303dec9fcdSqs 
29313dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
29323dec9fcdSqs 
29333dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
29343dec9fcdSqs 
29353dec9fcdSqs 	if (hxge_mblks_pending)
29363dec9fcdSqs 		return (EBUSY);
29373dec9fcdSqs 
29383dec9fcdSqs 	status = mod_remove(&modlinkage);
29393dec9fcdSqs 	if (status != DDI_SUCCESS) {
29403dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
29413dec9fcdSqs 		    "Module removal failed 0x%08x", status));
29423dec9fcdSqs 		goto _fini_exit;
29433dec9fcdSqs 	}
29443dec9fcdSqs 
29453dec9fcdSqs 	mac_fini_ops(&hxge_dev_ops);
29463dec9fcdSqs 
29473dec9fcdSqs 	ddi_soft_state_fini(&hxge_list);
29483dec9fcdSqs 
29493dec9fcdSqs 	MUTEX_DESTROY(&hxge_common_lock);
29503dec9fcdSqs 
29513dec9fcdSqs _fini_exit:
29523dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
29533dec9fcdSqs 
29543dec9fcdSqs 	return (status);
29553dec9fcdSqs }
29563dec9fcdSqs 
29573dec9fcdSqs int
29583dec9fcdSqs _info(struct modinfo *modinfop)
29593dec9fcdSqs {
29603dec9fcdSqs 	int status;
29613dec9fcdSqs 
29623dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
29633dec9fcdSqs 	status = mod_info(&modlinkage, modinfop);
29643dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
29653dec9fcdSqs 
29663dec9fcdSqs 	return (status);
29673dec9fcdSqs }
29683dec9fcdSqs 
29693dec9fcdSqs /*ARGSUSED*/
29703dec9fcdSqs hxge_status_t
29713dec9fcdSqs hxge_add_intrs(p_hxge_t hxgep)
29723dec9fcdSqs {
29733dec9fcdSqs 	int		intr_types;
29743dec9fcdSqs 	int		type = 0;
29753dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
29763dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
29773dec9fcdSqs 
29783dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
29793dec9fcdSqs 
29803dec9fcdSqs 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
29813dec9fcdSqs 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
29823dec9fcdSqs 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
29833dec9fcdSqs 	hxgep->hxge_intr_type.intr_added = 0;
29843dec9fcdSqs 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
29853dec9fcdSqs 	hxgep->hxge_intr_type.intr_type = 0;
29863dec9fcdSqs 
29873dec9fcdSqs 	if (hxge_msi_enable) {
29883dec9fcdSqs 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
29893dec9fcdSqs 	}
29903dec9fcdSqs 
29913dec9fcdSqs 	/* Get the supported interrupt types */
29923dec9fcdSqs 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
29933dec9fcdSqs 	    != DDI_SUCCESS) {
29943dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
29953dec9fcdSqs 		    "ddi_intr_get_supported_types failed: status 0x%08x",
29963dec9fcdSqs 		    ddi_status));
29973dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
29983dec9fcdSqs 	}
29993dec9fcdSqs 
30003dec9fcdSqs 	hxgep->hxge_intr_type.intr_types = intr_types;
30013dec9fcdSqs 
30023dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
30033dec9fcdSqs 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
30043dec9fcdSqs 
30053dec9fcdSqs 	/*
30063dec9fcdSqs 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
30073dec9fcdSqs 	 *	(1): 1 - MSI
30083dec9fcdSqs 	 *	(2): 2 - MSI-X
30093dec9fcdSqs 	 *	others - FIXED
30103dec9fcdSqs 	 */
30113dec9fcdSqs 	switch (hxge_msi_enable) {
30123dec9fcdSqs 	default:
30133dec9fcdSqs 		type = DDI_INTR_TYPE_FIXED;
30143dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
30153dec9fcdSqs 		    "use fixed (intx emulation) type %08x", type));
30163dec9fcdSqs 		break;
30173dec9fcdSqs 
30183dec9fcdSqs 	case 2:
30193dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
30203dec9fcdSqs 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
30213dec9fcdSqs 		if (intr_types & DDI_INTR_TYPE_MSIX) {
30223dec9fcdSqs 			type = DDI_INTR_TYPE_MSIX;
30233dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
30243dec9fcdSqs 			    "==> hxge_add_intrs: "
30253dec9fcdSqs 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
30263dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
30273dec9fcdSqs 			type = DDI_INTR_TYPE_MSI;
30283dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
30293dec9fcdSqs 			    "==> hxge_add_intrs: "
30303dec9fcdSqs 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
30313dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
30323dec9fcdSqs 			type = DDI_INTR_TYPE_FIXED;
30333dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
30343dec9fcdSqs 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
30353dec9fcdSqs 		}
30363dec9fcdSqs 		break;
30373dec9fcdSqs 
30383dec9fcdSqs 	case 1:
30393dec9fcdSqs 		if (intr_types & DDI_INTR_TYPE_MSI) {
30403dec9fcdSqs 			type = DDI_INTR_TYPE_MSI;
30413dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
30423dec9fcdSqs 			    "==> hxge_add_intrs: "
30433dec9fcdSqs 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
30443dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
30453dec9fcdSqs 			type = DDI_INTR_TYPE_MSIX;
30463dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
30473dec9fcdSqs 			    "==> hxge_add_intrs: "
30483dec9fcdSqs 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
30493dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
30503dec9fcdSqs 			type = DDI_INTR_TYPE_FIXED;
30513dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
30523dec9fcdSqs 			    "==> hxge_add_intrs: "
30533dec9fcdSqs 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
30543dec9fcdSqs 		}
30553dec9fcdSqs 	}
30563dec9fcdSqs 
30573dec9fcdSqs 	hxgep->hxge_intr_type.intr_type = type;
30583dec9fcdSqs 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
30593dec9fcdSqs 	    type == DDI_INTR_TYPE_FIXED) &&
30603dec9fcdSqs 	    hxgep->hxge_intr_type.niu_msi_enable) {
30613dec9fcdSqs 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
30623dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
30633dec9fcdSqs 			    " hxge_add_intrs: "
30643dec9fcdSqs 			    " hxge_add_intrs_adv failed: status 0x%08x",
30653dec9fcdSqs 			    status));
30663dec9fcdSqs 			return (status);
30673dec9fcdSqs 		} else {
30683dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
30693dec9fcdSqs 			    "interrupts registered : type %d", type));
30703dec9fcdSqs 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
30713dec9fcdSqs 
30723dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
30733dec9fcdSqs 			    "\nAdded advanced hxge add_intr_adv "
30743dec9fcdSqs 			    "intr type 0x%x\n", type));
30753dec9fcdSqs 
30763dec9fcdSqs 			return (status);
30773dec9fcdSqs 		}
30783dec9fcdSqs 	}
30793dec9fcdSqs 
30803dec9fcdSqs 	if (!hxgep->hxge_intr_type.intr_registered) {
30813dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
30823dec9fcdSqs 		    "==> hxge_add_intrs: failed to register interrupts"));
30833dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
30843dec9fcdSqs 	}
30853dec9fcdSqs 
30863dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
30873dec9fcdSqs 
30883dec9fcdSqs 	return (status);
30893dec9fcdSqs }
30903dec9fcdSqs 
30913dec9fcdSqs /*ARGSUSED*/
30923dec9fcdSqs static hxge_status_t
30933dec9fcdSqs hxge_add_soft_intrs(p_hxge_t hxgep)
30943dec9fcdSqs {
30953dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
30963dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
30973dec9fcdSqs 
30983dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
30993dec9fcdSqs 
31003dec9fcdSqs 	hxgep->resched_id = NULL;
31013dec9fcdSqs 	hxgep->resched_running = B_FALSE;
31023dec9fcdSqs 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
31033dec9fcdSqs 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
31043dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
31053dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
31063dec9fcdSqs 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
31073dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
31083dec9fcdSqs 	}
31093dec9fcdSqs 
31103dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
31113dec9fcdSqs 
31123dec9fcdSqs 	return (status);
31133dec9fcdSqs }
31143dec9fcdSqs 
31153dec9fcdSqs /*ARGSUSED*/
31163dec9fcdSqs static hxge_status_t
31173dec9fcdSqs hxge_add_intrs_adv(p_hxge_t hxgep)
31183dec9fcdSqs {
31193dec9fcdSqs 	int		intr_type;
31203dec9fcdSqs 	p_hxge_intr_t	intrp;
31213dec9fcdSqs 	hxge_status_t	status;
31223dec9fcdSqs 
31233dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
31243dec9fcdSqs 
31253dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
31263dec9fcdSqs 	intr_type = intrp->intr_type;
31273dec9fcdSqs 
31283dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
31293dec9fcdSqs 	    intr_type));
31303dec9fcdSqs 
31313dec9fcdSqs 	switch (intr_type) {
31323dec9fcdSqs 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
31333dec9fcdSqs 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
31343dec9fcdSqs 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
31353dec9fcdSqs 		break;
31363dec9fcdSqs 
31373dec9fcdSqs 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
31383dec9fcdSqs 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
31393dec9fcdSqs 		break;
31403dec9fcdSqs 
31413dec9fcdSqs 	default:
31423dec9fcdSqs 		status = HXGE_ERROR;
31433dec9fcdSqs 		break;
31443dec9fcdSqs 	}
31453dec9fcdSqs 
31463dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
31473dec9fcdSqs 
31483dec9fcdSqs 	return (status);
31493dec9fcdSqs }
31503dec9fcdSqs 
31513dec9fcdSqs /*ARGSUSED*/
31523dec9fcdSqs static hxge_status_t
31533dec9fcdSqs hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
31543dec9fcdSqs {
31553dec9fcdSqs 	dev_info_t	*dip = hxgep->dip;
31563dec9fcdSqs 	p_hxge_ldg_t	ldgp;
31573dec9fcdSqs 	p_hxge_intr_t	intrp;
31583dec9fcdSqs 	uint_t		*inthandler;
31593dec9fcdSqs 	void		*arg1, *arg2;
31603dec9fcdSqs 	int		behavior;
31613dec9fcdSqs 	int		nintrs, navail;
31623dec9fcdSqs 	int		nactual, nrequired;
31633dec9fcdSqs 	int		inum = 0;
31643dec9fcdSqs 	int		loop = 0;
31653dec9fcdSqs 	int		x, y;
31663dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
31673dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
31683dec9fcdSqs 
31693dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
31703dec9fcdSqs 
31713dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
31723dec9fcdSqs 
31733dec9fcdSqs 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
31743dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
31753dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
31763dec9fcdSqs 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
31773dec9fcdSqs 		    "nintrs: %d", ddi_status, nintrs));
31783dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
31793dec9fcdSqs 	}
31803dec9fcdSqs 
31813dec9fcdSqs 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
31823dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
31833dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
31843dec9fcdSqs 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
31853dec9fcdSqs 		    "nintrs: %d", ddi_status, navail));
31863dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
31873dec9fcdSqs 	}
31883dec9fcdSqs 
31893dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
31903dec9fcdSqs 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
31913dec9fcdSqs 	    int_type, nintrs, navail));
31923dec9fcdSqs 
31933dec9fcdSqs 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
31943dec9fcdSqs 		/* MSI must be power of 2 */
31953dec9fcdSqs 		if ((navail & 16) == 16) {
31963dec9fcdSqs 			navail = 16;
31973dec9fcdSqs 		} else if ((navail & 8) == 8) {
31983dec9fcdSqs 			navail = 8;
31993dec9fcdSqs 		} else if ((navail & 4) == 4) {
32003dec9fcdSqs 			navail = 4;
32013dec9fcdSqs 		} else if ((navail & 2) == 2) {
32023dec9fcdSqs 			navail = 2;
32033dec9fcdSqs 		} else {
32043dec9fcdSqs 			navail = 1;
32053dec9fcdSqs 		}
32063dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
32073dec9fcdSqs 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
32083dec9fcdSqs 		    "navail %d", nintrs, navail));
32093dec9fcdSqs 	}
32103dec9fcdSqs 
32113dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
32123dec9fcdSqs 	    "requesting: intr type %d nintrs %d, navail %d",
32133dec9fcdSqs 	    int_type, nintrs, navail));
32143dec9fcdSqs 
32153dec9fcdSqs 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
32163dec9fcdSqs 	    DDI_INTR_ALLOC_NORMAL);
32173dec9fcdSqs 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
32183dec9fcdSqs 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
32193dec9fcdSqs 
32203dec9fcdSqs 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
32213dec9fcdSqs 	    navail, &nactual, behavior);
32223dec9fcdSqs 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
32233dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
32243dec9fcdSqs 		    " ddi_intr_alloc() failed: %d", ddi_status));
32253dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
32263dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
32273dec9fcdSqs 	}
32283dec9fcdSqs 
32293dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
32303dec9fcdSqs 	    "ddi_intr_alloc() returned: navail %d nactual %d",
32313dec9fcdSqs 	    navail, nactual));
32323dec9fcdSqs 
32333dec9fcdSqs 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
32343dec9fcdSqs 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
32353dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
32363dec9fcdSqs 		    " ddi_intr_get_pri() failed: %d", ddi_status));
32373dec9fcdSqs 		/* Free already allocated interrupts */
32383dec9fcdSqs 		for (y = 0; y < nactual; y++) {
32393dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
32403dec9fcdSqs 		}
32413dec9fcdSqs 
32423dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
32433dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
32443dec9fcdSqs 	}
32453dec9fcdSqs 
32463dec9fcdSqs 	nrequired = 0;
32473dec9fcdSqs 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
32483dec9fcdSqs 	if (status != HXGE_OK) {
32493dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
32503dec9fcdSqs 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
32513dec9fcdSqs 		    "failed: 0x%x", status));
32523dec9fcdSqs 		/* Free already allocated interrupts */
32533dec9fcdSqs 		for (y = 0; y < nactual; y++) {
32543dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
32553dec9fcdSqs 		}
32563dec9fcdSqs 
32573dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
32583dec9fcdSqs 		return (status);
32593dec9fcdSqs 	}
32603dec9fcdSqs 
32613dec9fcdSqs 	ldgp = hxgep->ldgvp->ldgp;
32623dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
32633dec9fcdSqs 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
32643dec9fcdSqs 
32653dec9fcdSqs 	if (nactual < nrequired)
32663dec9fcdSqs 		loop = nactual;
32673dec9fcdSqs 	else
32683dec9fcdSqs 		loop = nrequired;
32693dec9fcdSqs 
32703dec9fcdSqs 	for (x = 0; x < loop; x++, ldgp++) {
32713dec9fcdSqs 		ldgp->vector = (uint8_t)x;
32723dec9fcdSqs 		arg1 = ldgp->ldvp;
32733dec9fcdSqs 		arg2 = hxgep;
32743dec9fcdSqs 		if (ldgp->nldvs == 1) {
32753dec9fcdSqs 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
32763dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
32773dec9fcdSqs 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
32783dec9fcdSqs 			    "1-1 int handler (entry %d)\n",
32793dec9fcdSqs 			    arg1, arg2, x));
32803dec9fcdSqs 		} else if (ldgp->nldvs > 1) {
32813dec9fcdSqs 			inthandler = (uint_t *)ldgp->sys_intr_handler;
32823dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
32833dec9fcdSqs 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
32843dec9fcdSqs 			    "nldevs %d int handler (entry %d)\n",
32853dec9fcdSqs 			    arg1, arg2, ldgp->nldvs, x));
32863dec9fcdSqs 		}
32873dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
32883dec9fcdSqs 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
32893dec9fcdSqs 		    "htable 0x%llx", x, intrp->htable[x]));
32903dec9fcdSqs 
32913dec9fcdSqs 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
32923dec9fcdSqs 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
32933dec9fcdSqs 		    DDI_SUCCESS) {
32943dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
32953dec9fcdSqs 			    "==> hxge_add_intrs_adv_type: failed #%d "
32963dec9fcdSqs 			    "status 0x%x", x, ddi_status));
32973dec9fcdSqs 			for (y = 0; y < intrp->intr_added; y++) {
32983dec9fcdSqs 				(void) ddi_intr_remove_handler(
32993dec9fcdSqs 				    intrp->htable[y]);
33003dec9fcdSqs 			}
33013dec9fcdSqs 
33023dec9fcdSqs 			/* Free already allocated intr */
33033dec9fcdSqs 			for (y = 0; y < nactual; y++) {
33043dec9fcdSqs 				(void) ddi_intr_free(intrp->htable[y]);
33053dec9fcdSqs 			}
33063dec9fcdSqs 			kmem_free(intrp->htable, intrp->intr_size);
33073dec9fcdSqs 
33083dec9fcdSqs 			(void) hxge_ldgv_uninit(hxgep);
33093dec9fcdSqs 
33103dec9fcdSqs 			return (HXGE_ERROR | HXGE_DDI_FAILED);
33113dec9fcdSqs 		}
33123dec9fcdSqs 
33133dec9fcdSqs 		intrp->intr_added++;
33143dec9fcdSqs 	}
33153dec9fcdSqs 	intrp->msi_intx_cnt = nactual;
33163dec9fcdSqs 
33173dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
33183dec9fcdSqs 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
33193dec9fcdSqs 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
33203dec9fcdSqs 
33213dec9fcdSqs 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
33223dec9fcdSqs 	(void) hxge_intr_ldgv_init(hxgep);
33233dec9fcdSqs 
33243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
33253dec9fcdSqs 
33263dec9fcdSqs 	return (status);
33273dec9fcdSqs }
33283dec9fcdSqs 
33293dec9fcdSqs /*ARGSUSED*/
33303dec9fcdSqs static hxge_status_t
33313dec9fcdSqs hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
33323dec9fcdSqs {
33333dec9fcdSqs 	dev_info_t	*dip = hxgep->dip;
33343dec9fcdSqs 	p_hxge_ldg_t	ldgp;
33353dec9fcdSqs 	p_hxge_intr_t	intrp;
33363dec9fcdSqs 	uint_t		*inthandler;
33373dec9fcdSqs 	void		*arg1, *arg2;
33383dec9fcdSqs 	int		behavior;
33393dec9fcdSqs 	int		nintrs, navail;
33403dec9fcdSqs 	int		nactual, nrequired;
33413dec9fcdSqs 	int		inum = 0;
33423dec9fcdSqs 	int		x, y;
33433dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
33443dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
33453dec9fcdSqs 
33463dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
33473dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
33483dec9fcdSqs 
33493dec9fcdSqs 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
33503dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
33513dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
33523dec9fcdSqs 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
33533dec9fcdSqs 		    "nintrs: %d", status, nintrs));
33543dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
33553dec9fcdSqs 	}
33563dec9fcdSqs 
33573dec9fcdSqs 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
33583dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
33593dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33603dec9fcdSqs 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
33613dec9fcdSqs 		    "nintrs: %d", ddi_status, navail));
33623dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
33633dec9fcdSqs 	}
33643dec9fcdSqs 
33653dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
33663dec9fcdSqs 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
33673dec9fcdSqs 	    nintrs, navail));
33683dec9fcdSqs 
33693dec9fcdSqs 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
33703dec9fcdSqs 	    DDI_INTR_ALLOC_NORMAL);
33713dec9fcdSqs 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
33723dec9fcdSqs 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
33733dec9fcdSqs 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
33743dec9fcdSqs 	    navail, &nactual, behavior);
33753dec9fcdSqs 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
33763dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33773dec9fcdSqs 		    " ddi_intr_alloc() failed: %d", ddi_status));
33783dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
33793dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
33803dec9fcdSqs 	}
33813dec9fcdSqs 
33823dec9fcdSqs 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
33833dec9fcdSqs 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
33843dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33853dec9fcdSqs 		    " ddi_intr_get_pri() failed: %d", ddi_status));
33863dec9fcdSqs 		/* Free already allocated interrupts */
33873dec9fcdSqs 		for (y = 0; y < nactual; y++) {
33883dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
33893dec9fcdSqs 		}
33903dec9fcdSqs 
33913dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
33923dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
33933dec9fcdSqs 	}
33943dec9fcdSqs 
33953dec9fcdSqs 	nrequired = 0;
33963dec9fcdSqs 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
33973dec9fcdSqs 	if (status != HXGE_OK) {
33983dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
33993dec9fcdSqs 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
34003dec9fcdSqs 		    "failed: 0x%x", status));
34013dec9fcdSqs 		/* Free already allocated interrupts */
34023dec9fcdSqs 		for (y = 0; y < nactual; y++) {
34033dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
34043dec9fcdSqs 		}
34053dec9fcdSqs 
34063dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
34073dec9fcdSqs 		return (status);
34083dec9fcdSqs 	}
34093dec9fcdSqs 
34103dec9fcdSqs 	ldgp = hxgep->ldgvp->ldgp;
34113dec9fcdSqs 	for (x = 0; x < nrequired; x++, ldgp++) {
34123dec9fcdSqs 		ldgp->vector = (uint8_t)x;
34133dec9fcdSqs 		arg1 = ldgp->ldvp;
34143dec9fcdSqs 		arg2 = hxgep;
34153dec9fcdSqs 		if (ldgp->nldvs == 1) {
34163dec9fcdSqs 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
34173dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
34183dec9fcdSqs 			    "hxge_add_intrs_adv_type_fix: "
34193dec9fcdSqs 			    "1-1 int handler(%d) ldg %d ldv %d "
34203dec9fcdSqs 			    "arg1 $%p arg2 $%p\n",
34213dec9fcdSqs 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
34223dec9fcdSqs 		} else if (ldgp->nldvs > 1) {
34233dec9fcdSqs 			inthandler = (uint_t *)ldgp->sys_intr_handler;
34243dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
34253dec9fcdSqs 			    "hxge_add_intrs_adv_type_fix: "
34263dec9fcdSqs 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
34273dec9fcdSqs 			    "arg1 0x%016llx arg2 0x%016llx\n",
34283dec9fcdSqs 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
34293dec9fcdSqs 			    arg1, arg2));
34303dec9fcdSqs 		}
34313dec9fcdSqs 
34323dec9fcdSqs 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
34333dec9fcdSqs 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
34343dec9fcdSqs 		    DDI_SUCCESS) {
34353dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
34363dec9fcdSqs 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
34373dec9fcdSqs 			    "status 0x%x", x, ddi_status));
34383dec9fcdSqs 			for (y = 0; y < intrp->intr_added; y++) {
34393dec9fcdSqs 				(void) ddi_intr_remove_handler(
34403dec9fcdSqs 				    intrp->htable[y]);
34413dec9fcdSqs 			}
34423dec9fcdSqs 			for (y = 0; y < nactual; y++) {
34433dec9fcdSqs 				(void) ddi_intr_free(intrp->htable[y]);
34443dec9fcdSqs 			}
34453dec9fcdSqs 			/* Free already allocated intr */
34463dec9fcdSqs 			kmem_free(intrp->htable, intrp->intr_size);
34473dec9fcdSqs 
34483dec9fcdSqs 			(void) hxge_ldgv_uninit(hxgep);
34493dec9fcdSqs 
34503dec9fcdSqs 			return (HXGE_ERROR | HXGE_DDI_FAILED);
34513dec9fcdSqs 		}
34523dec9fcdSqs 		intrp->intr_added++;
34533dec9fcdSqs 	}
34543dec9fcdSqs 
34553dec9fcdSqs 	intrp->msi_intx_cnt = nactual;
34563dec9fcdSqs 
34573dec9fcdSqs 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
34583dec9fcdSqs 
34593dec9fcdSqs 	status = hxge_intr_ldgv_init(hxgep);
34603dec9fcdSqs 
34613dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
34623dec9fcdSqs 
34633dec9fcdSqs 	return (status);
34643dec9fcdSqs }
34653dec9fcdSqs 
34663dec9fcdSqs /*ARGSUSED*/
34673dec9fcdSqs static void
34683dec9fcdSqs hxge_remove_intrs(p_hxge_t hxgep)
34693dec9fcdSqs {
34703dec9fcdSqs 	int		i, inum;
34713dec9fcdSqs 	p_hxge_intr_t	intrp;
34723dec9fcdSqs 
34733dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
34743dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
34753dec9fcdSqs 	if (!intrp->intr_registered) {
34763dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
34773dec9fcdSqs 		    "<== hxge_remove_intrs: interrupts not registered"));
34783dec9fcdSqs 		return;
34793dec9fcdSqs 	}
34803dec9fcdSqs 
34813dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
34823dec9fcdSqs 
34833dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
34843dec9fcdSqs 		(void) ddi_intr_block_disable(intrp->htable,
34853dec9fcdSqs 		    intrp->intr_added);
34863dec9fcdSqs 	} else {
34873dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
34883dec9fcdSqs 			(void) ddi_intr_disable(intrp->htable[i]);
34893dec9fcdSqs 		}
34903dec9fcdSqs 	}
34913dec9fcdSqs 
34923dec9fcdSqs 	for (inum = 0; inum < intrp->intr_added; inum++) {
34933dec9fcdSqs 		if (intrp->htable[inum]) {
34943dec9fcdSqs 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
34953dec9fcdSqs 		}
34963dec9fcdSqs 	}
34973dec9fcdSqs 
34983dec9fcdSqs 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
34993dec9fcdSqs 		if (intrp->htable[inum]) {
35003dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
35013dec9fcdSqs 			    "hxge_remove_intrs: ddi_intr_free inum %d "
35023dec9fcdSqs 			    "msi_intx_cnt %d intr_added %d",
35033dec9fcdSqs 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
35043dec9fcdSqs 
35053dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[inum]);
35063dec9fcdSqs 		}
35073dec9fcdSqs 	}
35083dec9fcdSqs 
35093dec9fcdSqs 	kmem_free(intrp->htable, intrp->intr_size);
35103dec9fcdSqs 	intrp->intr_registered = B_FALSE;
35113dec9fcdSqs 	intrp->intr_enabled = B_FALSE;
35123dec9fcdSqs 	intrp->msi_intx_cnt = 0;
35133dec9fcdSqs 	intrp->intr_added = 0;
35143dec9fcdSqs 
35153dec9fcdSqs 	(void) hxge_ldgv_uninit(hxgep);
35163dec9fcdSqs 
35173dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
35183dec9fcdSqs }
35193dec9fcdSqs 
35203dec9fcdSqs /*ARGSUSED*/
35213dec9fcdSqs static void
35223dec9fcdSqs hxge_remove_soft_intrs(p_hxge_t hxgep)
35233dec9fcdSqs {
35243dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
35253dec9fcdSqs 
35263dec9fcdSqs 	if (hxgep->resched_id) {
35273dec9fcdSqs 		ddi_remove_softintr(hxgep->resched_id);
35283dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
35293dec9fcdSqs 		    "==> hxge_remove_soft_intrs: removed"));
35303dec9fcdSqs 		hxgep->resched_id = NULL;
35313dec9fcdSqs 	}
35323dec9fcdSqs 
35333dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
35343dec9fcdSqs }
35353dec9fcdSqs 
35363dec9fcdSqs /*ARGSUSED*/
35373dec9fcdSqs void
35383dec9fcdSqs hxge_intrs_enable(p_hxge_t hxgep)
35393dec9fcdSqs {
35403dec9fcdSqs 	p_hxge_intr_t	intrp;
35413dec9fcdSqs 	int		i;
35423dec9fcdSqs 	int		status;
35433dec9fcdSqs 
35443dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
35453dec9fcdSqs 
35463dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
35473dec9fcdSqs 
35483dec9fcdSqs 	if (!intrp->intr_registered) {
35493dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
35503dec9fcdSqs 		    "interrupts are not registered"));
35513dec9fcdSqs 		return;
35523dec9fcdSqs 	}
35533dec9fcdSqs 
35543dec9fcdSqs 	if (intrp->intr_enabled) {
35553dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
35563dec9fcdSqs 		    "<== hxge_intrs_enable: already enabled"));
35573dec9fcdSqs 		return;
35583dec9fcdSqs 	}
35593dec9fcdSqs 
35603dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
35613dec9fcdSqs 		status = ddi_intr_block_enable(intrp->htable,
35623dec9fcdSqs 		    intrp->intr_added);
35633dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
35643dec9fcdSqs 		    "block enable - status 0x%x total inums #%d\n",
35653dec9fcdSqs 		    status, intrp->intr_added));
35663dec9fcdSqs 	} else {
35673dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
35683dec9fcdSqs 			status = ddi_intr_enable(intrp->htable[i]);
35693dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
35703dec9fcdSqs 			    "ddi_intr_enable:enable - status 0x%x "
35713dec9fcdSqs 			    "total inums %d enable inum #%d\n",
35723dec9fcdSqs 			    status, intrp->intr_added, i));
35733dec9fcdSqs 			if (status == DDI_SUCCESS) {
35743dec9fcdSqs 				intrp->intr_enabled = B_TRUE;
35753dec9fcdSqs 			}
35763dec9fcdSqs 		}
35773dec9fcdSqs 	}
35783dec9fcdSqs 
35793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
35803dec9fcdSqs }
35813dec9fcdSqs 
35823dec9fcdSqs /*ARGSUSED*/
35833dec9fcdSqs static void
35843dec9fcdSqs hxge_intrs_disable(p_hxge_t hxgep)
35853dec9fcdSqs {
35863dec9fcdSqs 	p_hxge_intr_t	intrp;
35873dec9fcdSqs 	int		i;
35883dec9fcdSqs 
35893dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
35903dec9fcdSqs 
35913dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
35923dec9fcdSqs 
35933dec9fcdSqs 	if (!intrp->intr_registered) {
35943dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
35953dec9fcdSqs 		    "interrupts are not registered"));
35963dec9fcdSqs 		return;
35973dec9fcdSqs 	}
35983dec9fcdSqs 
35993dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
36003dec9fcdSqs 		(void) ddi_intr_block_disable(intrp->htable,
36013dec9fcdSqs 		    intrp->intr_added);
36023dec9fcdSqs 	} else {
36033dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
36043dec9fcdSqs 			(void) ddi_intr_disable(intrp->htable[i]);
36053dec9fcdSqs 		}
36063dec9fcdSqs 	}
36073dec9fcdSqs 
36083dec9fcdSqs 	intrp->intr_enabled = B_FALSE;
36093dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
36103dec9fcdSqs }
36113dec9fcdSqs 
36123dec9fcdSqs static hxge_status_t
36133dec9fcdSqs hxge_mac_register(p_hxge_t hxgep)
36143dec9fcdSqs {
36153dec9fcdSqs 	mac_register_t	*macp;
36163dec9fcdSqs 	int		status;
36173dec9fcdSqs 
36183dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
36193dec9fcdSqs 
36203dec9fcdSqs 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
36213dec9fcdSqs 		return (HXGE_ERROR);
36223dec9fcdSqs 
36233dec9fcdSqs 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
36243dec9fcdSqs 	macp->m_driver = hxgep;
36253dec9fcdSqs 	macp->m_dip = hxgep->dip;
36263dec9fcdSqs 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
36273dec9fcdSqs 
36283dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
36293dec9fcdSqs 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
36303dec9fcdSqs 	    macp->m_src_addr[0],
36313dec9fcdSqs 	    macp->m_src_addr[1],
36323dec9fcdSqs 	    macp->m_src_addr[2],
36333dec9fcdSqs 	    macp->m_src_addr[3],
36343dec9fcdSqs 	    macp->m_src_addr[4],
36353dec9fcdSqs 	    macp->m_src_addr[5]));
36363dec9fcdSqs 
36373dec9fcdSqs 	macp->m_callbacks = &hxge_m_callbacks;
36383dec9fcdSqs 	macp->m_min_sdu = 0;
36393dec9fcdSqs 	macp->m_max_sdu = hxgep->vmac.maxframesize -
36403dec9fcdSqs 	    sizeof (struct ether_header) - ETHERFCSL - 4 - TX_PKT_HEADER_SIZE;
36413dec9fcdSqs 
36423dec9fcdSqs 	status = mac_register(macp, &hxgep->mach);
36433dec9fcdSqs 	mac_free(macp);
36443dec9fcdSqs 
36453dec9fcdSqs 	if (status != 0) {
36463dec9fcdSqs 		cmn_err(CE_WARN,
36473dec9fcdSqs 		    "hxge_mac_register failed (status %d instance %d)",
36483dec9fcdSqs 		    status, hxgep->instance);
36493dec9fcdSqs 		return (HXGE_ERROR);
36503dec9fcdSqs 	}
36513dec9fcdSqs 
36523dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
36533dec9fcdSqs 	    "(instance %d)", hxgep->instance));
36543dec9fcdSqs 
36553dec9fcdSqs 	return (HXGE_OK);
36563dec9fcdSqs }
36573dec9fcdSqs 
36583dec9fcdSqs static int
36593dec9fcdSqs hxge_init_common_dev(p_hxge_t hxgep)
36603dec9fcdSqs {
36613dec9fcdSqs 	p_hxge_hw_list_t	hw_p;
36623dec9fcdSqs 	dev_info_t		*p_dip;
36633dec9fcdSqs 
36643dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
36653dec9fcdSqs 
36663dec9fcdSqs 	p_dip = hxgep->p_dip;
36673dec9fcdSqs 	MUTEX_ENTER(&hxge_common_lock);
36683dec9fcdSqs 
36693dec9fcdSqs 	/*
36703dec9fcdSqs 	 * Loop through existing per Hydra hardware list.
36713dec9fcdSqs 	 */
36723dec9fcdSqs 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
36733dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
36743dec9fcdSqs 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
36753dec9fcdSqs 		    hw_p, p_dip));
36763dec9fcdSqs 		if (hw_p->parent_devp == p_dip) {
36773dec9fcdSqs 			hxgep->hxge_hw_p = hw_p;
36783dec9fcdSqs 			hw_p->ndevs++;
36793dec9fcdSqs 			hw_p->hxge_p = hxgep;
36803dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
36813dec9fcdSqs 			    "==> hxge_init_common_device: "
36823dec9fcdSqs 			    "hw_p $%p parent dip $%p ndevs %d (found)",
36833dec9fcdSqs 			    hw_p, p_dip, hw_p->ndevs));
36843dec9fcdSqs 			break;
36853dec9fcdSqs 		}
36863dec9fcdSqs 	}
36873dec9fcdSqs 
36883dec9fcdSqs 	if (hw_p == NULL) {
36893dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
36903dec9fcdSqs 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
36913dec9fcdSqs 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
36923dec9fcdSqs 		hw_p->parent_devp = p_dip;
36933dec9fcdSqs 		hw_p->magic = HXGE_MAGIC;
36943dec9fcdSqs 		hxgep->hxge_hw_p = hw_p;
36953dec9fcdSqs 		hw_p->ndevs++;
36963dec9fcdSqs 		hw_p->hxge_p = hxgep;
36973dec9fcdSqs 		hw_p->next = hxge_hw_list;
36983dec9fcdSqs 
36993dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
37003dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
37013dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
37023dec9fcdSqs 
37033dec9fcdSqs 		hxge_hw_list = hw_p;
37043dec9fcdSqs 	}
37053dec9fcdSqs 	MUTEX_EXIT(&hxge_common_lock);
37063dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37073dec9fcdSqs 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
37083dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
37093dec9fcdSqs 
37103dec9fcdSqs 	return (HXGE_OK);
37113dec9fcdSqs }
37123dec9fcdSqs 
37133dec9fcdSqs static void
37143dec9fcdSqs hxge_uninit_common_dev(p_hxge_t hxgep)
37153dec9fcdSqs {
37163dec9fcdSqs 	p_hxge_hw_list_t	hw_p, h_hw_p;
37173dec9fcdSqs 	dev_info_t		*p_dip;
37183dec9fcdSqs 
37193dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
37203dec9fcdSqs 	if (hxgep->hxge_hw_p == NULL) {
37213dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37223dec9fcdSqs 		    "<== hxge_uninit_common_dev (no common)"));
37233dec9fcdSqs 		return;
37243dec9fcdSqs 	}
37253dec9fcdSqs 
37263dec9fcdSqs 	MUTEX_ENTER(&hxge_common_lock);
37273dec9fcdSqs 	h_hw_p = hxge_hw_list;
37283dec9fcdSqs 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
37293dec9fcdSqs 		p_dip = hw_p->parent_devp;
37303dec9fcdSqs 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
37313dec9fcdSqs 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
37323dec9fcdSqs 		    hw_p->magic == HXGE_MAGIC) {
37333dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37343dec9fcdSqs 			    "==> hxge_uninit_common_dev: "
37353dec9fcdSqs 			    "hw_p $%p parent dip $%p ndevs %d (found)",
37363dec9fcdSqs 			    hw_p, p_dip, hw_p->ndevs));
37373dec9fcdSqs 
37383dec9fcdSqs 			hxgep->hxge_hw_p = NULL;
37393dec9fcdSqs 			if (hw_p->ndevs) {
37403dec9fcdSqs 				hw_p->ndevs--;
37413dec9fcdSqs 			}
37423dec9fcdSqs 			hw_p->hxge_p = NULL;
37433dec9fcdSqs 			if (!hw_p->ndevs) {
37443dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
37453dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
37463dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
37473dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37483dec9fcdSqs 				    "==> hxge_uninit_common_dev: "
37493dec9fcdSqs 				    "hw_p $%p parent dip $%p ndevs %d (last)",
37503dec9fcdSqs 				    hw_p, p_dip, hw_p->ndevs));
37513dec9fcdSqs 
37523dec9fcdSqs 				if (hw_p == hxge_hw_list) {
37533dec9fcdSqs 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37543dec9fcdSqs 					    "==> hxge_uninit_common_dev:"
37553dec9fcdSqs 					    "remove head "
37563dec9fcdSqs 					    "hw_p $%p parent dip $%p "
37573dec9fcdSqs 					    "ndevs %d (head)",
37583dec9fcdSqs 					    hw_p, p_dip, hw_p->ndevs));
37593dec9fcdSqs 					hxge_hw_list = hw_p->next;
37603dec9fcdSqs 				} else {
37613dec9fcdSqs 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37623dec9fcdSqs 					    "==> hxge_uninit_common_dev:"
37633dec9fcdSqs 					    "remove middle "
37643dec9fcdSqs 					    "hw_p $%p parent dip $%p "
37653dec9fcdSqs 					    "ndevs %d (middle)",
37663dec9fcdSqs 					    hw_p, p_dip, hw_p->ndevs));
37673dec9fcdSqs 					h_hw_p->next = hw_p->next;
37683dec9fcdSqs 				}
37693dec9fcdSqs 
37703dec9fcdSqs 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
37713dec9fcdSqs 			}
37723dec9fcdSqs 			break;
37733dec9fcdSqs 		} else {
37743dec9fcdSqs 			h_hw_p = hw_p;
37753dec9fcdSqs 		}
37763dec9fcdSqs 	}
37773dec9fcdSqs 
37783dec9fcdSqs 	MUTEX_EXIT(&hxge_common_lock);
37793dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
37803dec9fcdSqs 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
37813dec9fcdSqs 
37823dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
37833dec9fcdSqs }
3784