xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision 3dec9fcd)
1*3dec9fcdSqs /*
2*3dec9fcdSqs  * CDDL HEADER START
3*3dec9fcdSqs  *
4*3dec9fcdSqs  * The contents of this file are subject to the terms of the
5*3dec9fcdSqs  * Common Development and Distribution License (the "License").
6*3dec9fcdSqs  * You may not use this file except in compliance with the License.
7*3dec9fcdSqs  *
8*3dec9fcdSqs  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*3dec9fcdSqs  * or http://www.opensolaris.org/os/licensing.
10*3dec9fcdSqs  * See the License for the specific language governing permissions
11*3dec9fcdSqs  * and limitations under the License.
12*3dec9fcdSqs  *
13*3dec9fcdSqs  * When distributing Covered Code, include this CDDL HEADER in each
14*3dec9fcdSqs  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*3dec9fcdSqs  * If applicable, add the following below this CDDL HEADER, with the
16*3dec9fcdSqs  * fields enclosed by brackets "[]" replaced with your own identifying
17*3dec9fcdSqs  * information: Portions Copyright [yyyy] [name of copyright owner]
18*3dec9fcdSqs  *
19*3dec9fcdSqs  * CDDL HEADER END
20*3dec9fcdSqs  */
21*3dec9fcdSqs /*
22*3dec9fcdSqs  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23*3dec9fcdSqs  * Use is subject to license terms.
24*3dec9fcdSqs  */
25*3dec9fcdSqs 
26*3dec9fcdSqs #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*3dec9fcdSqs 
28*3dec9fcdSqs /*
29*3dec9fcdSqs  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
30*3dec9fcdSqs  */
31*3dec9fcdSqs #include <hxge_impl.h>
32*3dec9fcdSqs #include <hxge_pfc.h>
33*3dec9fcdSqs 
34*3dec9fcdSqs /*
35*3dec9fcdSqs  * PSARC/2007/453 MSI-X interrupt limit override
36*3dec9fcdSqs  * (This PSARC case is limited to MSI-X vectors
37*3dec9fcdSqs  *  and SPARC platforms only).
38*3dec9fcdSqs  */
39*3dec9fcdSqs #if defined(_BIG_ENDIAN)
40*3dec9fcdSqs uint32_t hxge_msi_enable = 2;
41*3dec9fcdSqs #else
42*3dec9fcdSqs uint32_t hxge_msi_enable = 1;
43*3dec9fcdSqs #endif
44*3dec9fcdSqs 
45*3dec9fcdSqs /*
46*3dec9fcdSqs  * Globals: tunable parameters (/etc/system or adb)
47*3dec9fcdSqs  *
48*3dec9fcdSqs  */
49*3dec9fcdSqs uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
50*3dec9fcdSqs uint32_t hxge_rbr_spare_size = 0;
51*3dec9fcdSqs uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
52*3dec9fcdSqs uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
53*3dec9fcdSqs uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
54*3dec9fcdSqs uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
55*3dec9fcdSqs uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
56*3dec9fcdSqs uint32_t hxge_jumbo_mtu = TX_JUMBO_MTU;
57*3dec9fcdSqs boolean_t hxge_jumbo_enable = B_FALSE;
58*3dec9fcdSqs 
59*3dec9fcdSqs static hxge_os_mutex_t hxgedebuglock;
60*3dec9fcdSqs static int hxge_debug_init = 0;
61*3dec9fcdSqs 
62*3dec9fcdSqs /*
63*3dec9fcdSqs  * Debugging flags:
64*3dec9fcdSqs  *		hxge_no_tx_lb : transmit load balancing
65*3dec9fcdSqs  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
66*3dec9fcdSqs  *				   1 - From the Stack
67*3dec9fcdSqs  *				   2 - Destination IP Address
68*3dec9fcdSqs  */
69*3dec9fcdSqs uint32_t hxge_no_tx_lb = 0;
70*3dec9fcdSqs uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
71*3dec9fcdSqs 
72*3dec9fcdSqs /*
73*3dec9fcdSqs  * Add tunable to reduce the amount of time spent in the
74*3dec9fcdSqs  * ISR doing Rx Processing.
75*3dec9fcdSqs  */
76*3dec9fcdSqs uint32_t hxge_max_rx_pkts = 1024;
77*3dec9fcdSqs 
78*3dec9fcdSqs /*
79*3dec9fcdSqs  * Tunables to manage the receive buffer blocks.
80*3dec9fcdSqs  *
81*3dec9fcdSqs  * hxge_rx_threshold_hi: copy all buffers.
82*3dec9fcdSqs  * hxge_rx_bcopy_size_type: receive buffer block size type.
83*3dec9fcdSqs  * hxge_rx_threshold_lo: copy only up to tunable block size type.
84*3dec9fcdSqs  */
85*3dec9fcdSqs hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
86*3dec9fcdSqs hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
87*3dec9fcdSqs hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
88*3dec9fcdSqs 
89*3dec9fcdSqs rtrace_t hpi_rtracebuf;
90*3dec9fcdSqs 
91*3dec9fcdSqs /*
92*3dec9fcdSqs  * Function Prototypes
93*3dec9fcdSqs  */
94*3dec9fcdSqs static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
95*3dec9fcdSqs static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
96*3dec9fcdSqs static void hxge_unattach(p_hxge_t);
97*3dec9fcdSqs 
98*3dec9fcdSqs static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
99*3dec9fcdSqs 
100*3dec9fcdSqs static hxge_status_t hxge_setup_mutexes(p_hxge_t);
101*3dec9fcdSqs static void hxge_destroy_mutexes(p_hxge_t);
102*3dec9fcdSqs 
103*3dec9fcdSqs static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
104*3dec9fcdSqs static void hxge_unmap_regs(p_hxge_t hxgep);
105*3dec9fcdSqs 
106*3dec9fcdSqs hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
107*3dec9fcdSqs static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
108*3dec9fcdSqs static void hxge_remove_intrs(p_hxge_t hxgep);
109*3dec9fcdSqs static void hxge_remove_soft_intrs(p_hxge_t hxgep);
110*3dec9fcdSqs static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
111*3dec9fcdSqs static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
112*3dec9fcdSqs static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
113*3dec9fcdSqs void hxge_intrs_enable(p_hxge_t hxgep);
114*3dec9fcdSqs static void hxge_intrs_disable(p_hxge_t hxgep);
115*3dec9fcdSqs static void hxge_suspend(p_hxge_t);
116*3dec9fcdSqs static hxge_status_t hxge_resume(p_hxge_t);
117*3dec9fcdSqs hxge_status_t hxge_setup_dev(p_hxge_t);
118*3dec9fcdSqs static void hxge_destroy_dev(p_hxge_t);
119*3dec9fcdSqs hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
120*3dec9fcdSqs static void hxge_free_mem_pool(p_hxge_t);
121*3dec9fcdSqs static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
122*3dec9fcdSqs static void hxge_free_rx_mem_pool(p_hxge_t);
123*3dec9fcdSqs static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
124*3dec9fcdSqs static void hxge_free_tx_mem_pool(p_hxge_t);
125*3dec9fcdSqs static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
126*3dec9fcdSqs     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
127*3dec9fcdSqs     p_hxge_dma_common_t);
128*3dec9fcdSqs static void hxge_dma_mem_free(p_hxge_dma_common_t);
129*3dec9fcdSqs static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
130*3dec9fcdSqs     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
131*3dec9fcdSqs static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
132*3dec9fcdSqs static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
133*3dec9fcdSqs     p_hxge_dma_common_t *, size_t);
134*3dec9fcdSqs static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
135*3dec9fcdSqs static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
136*3dec9fcdSqs     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
137*3dec9fcdSqs static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
138*3dec9fcdSqs static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
139*3dec9fcdSqs     p_hxge_dma_common_t *, size_t);
140*3dec9fcdSqs static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
141*3dec9fcdSqs static int hxge_init_common_dev(p_hxge_t);
142*3dec9fcdSqs static void hxge_uninit_common_dev(p_hxge_t);
143*3dec9fcdSqs 
144*3dec9fcdSqs /*
145*3dec9fcdSqs  * The next declarations are for the GLDv3 interface.
146*3dec9fcdSqs  */
147*3dec9fcdSqs static int hxge_m_start(void *);
148*3dec9fcdSqs static void hxge_m_stop(void *);
149*3dec9fcdSqs static int hxge_m_unicst(void *, const uint8_t *);
150*3dec9fcdSqs static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
151*3dec9fcdSqs static int hxge_m_promisc(void *, boolean_t);
152*3dec9fcdSqs static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
153*3dec9fcdSqs static void hxge_m_resources(void *);
154*3dec9fcdSqs static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
155*3dec9fcdSqs 
156*3dec9fcdSqs static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
157*3dec9fcdSqs static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
158*3dec9fcdSqs static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
159*3dec9fcdSqs static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
160*3dec9fcdSqs static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
161*3dec9fcdSqs 
162*3dec9fcdSqs #define	HXGE_MAGIC	0x4E584745UL
163*3dec9fcdSqs #define	MAX_DUMP_SZ 256
164*3dec9fcdSqs 
165*3dec9fcdSqs #define	HXGE_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
166*3dec9fcdSqs 
167*3dec9fcdSqs extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
168*3dec9fcdSqs extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
169*3dec9fcdSqs 
170*3dec9fcdSqs static mac_callbacks_t hxge_m_callbacks = {
171*3dec9fcdSqs 	HXGE_M_CALLBACK_FLAGS,
172*3dec9fcdSqs 	hxge_m_stat,
173*3dec9fcdSqs 	hxge_m_start,
174*3dec9fcdSqs 	hxge_m_stop,
175*3dec9fcdSqs 	hxge_m_promisc,
176*3dec9fcdSqs 	hxge_m_multicst,
177*3dec9fcdSqs 	hxge_m_unicst,
178*3dec9fcdSqs 	hxge_m_tx,
179*3dec9fcdSqs 	hxge_m_resources,
180*3dec9fcdSqs 	hxge_m_ioctl,
181*3dec9fcdSqs 	hxge_m_getcapab
182*3dec9fcdSqs };
183*3dec9fcdSqs 
184*3dec9fcdSqs /* Enable debug messages as necessary. */
185*3dec9fcdSqs uint64_t hxge_debug_level = 0x0;
186*3dec9fcdSqs 
187*3dec9fcdSqs /*
188*3dec9fcdSqs  * This list contains the instance structures for the Hydra
189*3dec9fcdSqs  * devices present in the system. The lock exists to guarantee
190*3dec9fcdSqs  * mutually exclusive access to the list.
191*3dec9fcdSqs  */
192*3dec9fcdSqs void *hxge_list = NULL;
193*3dec9fcdSqs void *hxge_hw_list = NULL;
194*3dec9fcdSqs hxge_os_mutex_t hxge_common_lock;
195*3dec9fcdSqs 
196*3dec9fcdSqs extern uint64_t hpi_debug_level;
197*3dec9fcdSqs 
198*3dec9fcdSqs extern hxge_status_t hxge_ldgv_init();
199*3dec9fcdSqs extern hxge_status_t hxge_ldgv_uninit();
200*3dec9fcdSqs extern hxge_status_t hxge_intr_ldgv_init();
201*3dec9fcdSqs extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
202*3dec9fcdSqs     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
203*3dec9fcdSqs extern void hxge_fm_fini(p_hxge_t hxgep);
204*3dec9fcdSqs 
205*3dec9fcdSqs /*
206*3dec9fcdSqs  * Count used to maintain the number of buffers being used
207*3dec9fcdSqs  * by Hydra instances and loaned up to the upper layers.
208*3dec9fcdSqs  */
209*3dec9fcdSqs uint32_t hxge_mblks_pending = 0;
210*3dec9fcdSqs 
211*3dec9fcdSqs /*
212*3dec9fcdSqs  * Device register access attributes for PIO.
213*3dec9fcdSqs  */
214*3dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
215*3dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
216*3dec9fcdSqs 	DDI_STRUCTURE_LE_ACC,
217*3dec9fcdSqs 	DDI_STRICTORDER_ACC,
218*3dec9fcdSqs };
219*3dec9fcdSqs 
220*3dec9fcdSqs /*
221*3dec9fcdSqs  * Device descriptor access attributes for DMA.
222*3dec9fcdSqs  */
223*3dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
224*3dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
225*3dec9fcdSqs 	DDI_STRUCTURE_LE_ACC,
226*3dec9fcdSqs 	DDI_STRICTORDER_ACC
227*3dec9fcdSqs };
228*3dec9fcdSqs 
229*3dec9fcdSqs /*
230*3dec9fcdSqs  * Device buffer access attributes for DMA.
231*3dec9fcdSqs  */
232*3dec9fcdSqs static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
233*3dec9fcdSqs 	DDI_DEVICE_ATTR_V0,
234*3dec9fcdSqs 	DDI_STRUCTURE_BE_ACC,
235*3dec9fcdSqs 	DDI_STRICTORDER_ACC
236*3dec9fcdSqs };
237*3dec9fcdSqs 
238*3dec9fcdSqs ddi_dma_attr_t hxge_desc_dma_attr = {
239*3dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
240*3dec9fcdSqs 	0,			/* low address */
241*3dec9fcdSqs 	0xffffffffffffffff,	/* high address */
242*3dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
243*3dec9fcdSqs 	0x100000,		/* alignment */
244*3dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
245*3dec9fcdSqs 	0x1,			/* minimum transfer size */
246*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
247*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
248*3dec9fcdSqs 	1,			/* scatter/gather list length */
249*3dec9fcdSqs 	(unsigned int)1,	/* granularity */
250*3dec9fcdSqs 	0			/* attribute flags */
251*3dec9fcdSqs };
252*3dec9fcdSqs 
253*3dec9fcdSqs ddi_dma_attr_t hxge_tx_dma_attr = {
254*3dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
255*3dec9fcdSqs 	0,			/* low address */
256*3dec9fcdSqs 	0xffffffffffffffff,	/* high address */
257*3dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
258*3dec9fcdSqs #if defined(_BIG_ENDIAN)
259*3dec9fcdSqs 	0x2000,			/* alignment */
260*3dec9fcdSqs #else
261*3dec9fcdSqs 	0x1000,			/* alignment */
262*3dec9fcdSqs #endif
263*3dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
264*3dec9fcdSqs 	0x1,			/* minimum transfer size */
265*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
266*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
267*3dec9fcdSqs 	5,			/* scatter/gather list length */
268*3dec9fcdSqs 	(unsigned int)1,	/* granularity */
269*3dec9fcdSqs 	0			/* attribute flags */
270*3dec9fcdSqs };
271*3dec9fcdSqs 
272*3dec9fcdSqs ddi_dma_attr_t hxge_rx_dma_attr = {
273*3dec9fcdSqs 	DMA_ATTR_V0,		/* version number. */
274*3dec9fcdSqs 	0,			/* low address */
275*3dec9fcdSqs 	0xffffffffffffffff,	/* high address */
276*3dec9fcdSqs 	0xffffffffffffffff,	/* address counter max */
277*3dec9fcdSqs 	0x10000,		/* alignment */
278*3dec9fcdSqs 	0xfc00fc,		/* dlim_burstsizes */
279*3dec9fcdSqs 	0x1,			/* minimum transfer size */
280*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum transfer size */
281*3dec9fcdSqs 	0xffffffffffffffff,	/* maximum segment size */
282*3dec9fcdSqs 	1,			/* scatter/gather list length */
283*3dec9fcdSqs 	(unsigned int)1,	/* granularity */
284*3dec9fcdSqs 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
285*3dec9fcdSqs };
286*3dec9fcdSqs 
287*3dec9fcdSqs ddi_dma_lim_t hxge_dma_limits = {
288*3dec9fcdSqs 	(uint_t)0,		/* dlim_addr_lo */
289*3dec9fcdSqs 	(uint_t)0xffffffff,	/* dlim_addr_hi */
290*3dec9fcdSqs 	(uint_t)0xffffffff,	/* dlim_cntr_max */
291*3dec9fcdSqs 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
292*3dec9fcdSqs 	0x1,			/* dlim_minxfer */
293*3dec9fcdSqs 	1024			/* dlim_speed */
294*3dec9fcdSqs };
295*3dec9fcdSqs 
296*3dec9fcdSqs dma_method_t hxge_force_dma = DVMA;
297*3dec9fcdSqs 
298*3dec9fcdSqs /*
299*3dec9fcdSqs  * dma chunk sizes.
300*3dec9fcdSqs  *
301*3dec9fcdSqs  * Try to allocate the largest possible size
302*3dec9fcdSqs  * so that fewer number of dma chunks would be managed
303*3dec9fcdSqs  */
304*3dec9fcdSqs size_t alloc_sizes[] = {
305*3dec9fcdSqs     0x1000, 0x2000, 0x4000, 0x8000,
306*3dec9fcdSqs     0x10000, 0x20000, 0x40000, 0x80000,
307*3dec9fcdSqs     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
308*3dec9fcdSqs };
309*3dec9fcdSqs 
310*3dec9fcdSqs /*
311*3dec9fcdSqs  * Translate "dev_t" to a pointer to the associated "dev_info_t".
312*3dec9fcdSqs  */
313*3dec9fcdSqs static int
314*3dec9fcdSqs hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
315*3dec9fcdSqs {
316*3dec9fcdSqs 	p_hxge_t	hxgep = NULL;
317*3dec9fcdSqs 	int		instance;
318*3dec9fcdSqs 	int		status = DDI_SUCCESS;
319*3dec9fcdSqs 
320*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
321*3dec9fcdSqs 
322*3dec9fcdSqs 	/*
323*3dec9fcdSqs 	 * Get the device instance since we'll need to setup or retrieve a soft
324*3dec9fcdSqs 	 * state for this instance.
325*3dec9fcdSqs 	 */
326*3dec9fcdSqs 	instance = ddi_get_instance(dip);
327*3dec9fcdSqs 
328*3dec9fcdSqs 	switch (cmd) {
329*3dec9fcdSqs 	case DDI_ATTACH:
330*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
331*3dec9fcdSqs 		break;
332*3dec9fcdSqs 
333*3dec9fcdSqs 	case DDI_RESUME:
334*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
335*3dec9fcdSqs 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
336*3dec9fcdSqs 		if (hxgep == NULL) {
337*3dec9fcdSqs 			status = DDI_FAILURE;
338*3dec9fcdSqs 			break;
339*3dec9fcdSqs 		}
340*3dec9fcdSqs 		if (hxgep->dip != dip) {
341*3dec9fcdSqs 			status = DDI_FAILURE;
342*3dec9fcdSqs 			break;
343*3dec9fcdSqs 		}
344*3dec9fcdSqs 		if (hxgep->suspended == DDI_PM_SUSPEND) {
345*3dec9fcdSqs 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
346*3dec9fcdSqs 		} else {
347*3dec9fcdSqs 			(void) hxge_resume(hxgep);
348*3dec9fcdSqs 		}
349*3dec9fcdSqs 		goto hxge_attach_exit;
350*3dec9fcdSqs 
351*3dec9fcdSqs 	case DDI_PM_RESUME:
352*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
353*3dec9fcdSqs 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
354*3dec9fcdSqs 		if (hxgep == NULL) {
355*3dec9fcdSqs 			status = DDI_FAILURE;
356*3dec9fcdSqs 			break;
357*3dec9fcdSqs 		}
358*3dec9fcdSqs 		if (hxgep->dip != dip) {
359*3dec9fcdSqs 			status = DDI_FAILURE;
360*3dec9fcdSqs 			break;
361*3dec9fcdSqs 		}
362*3dec9fcdSqs 		(void) hxge_resume(hxgep);
363*3dec9fcdSqs 		goto hxge_attach_exit;
364*3dec9fcdSqs 
365*3dec9fcdSqs 	default:
366*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
367*3dec9fcdSqs 		status = DDI_FAILURE;
368*3dec9fcdSqs 		goto hxge_attach_exit;
369*3dec9fcdSqs 	}
370*3dec9fcdSqs 
371*3dec9fcdSqs 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
372*3dec9fcdSqs 		status = DDI_FAILURE;
373*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
374*3dec9fcdSqs 		    "ddi_soft_state_zalloc failed"));
375*3dec9fcdSqs 		goto hxge_attach_exit;
376*3dec9fcdSqs 	}
377*3dec9fcdSqs 
378*3dec9fcdSqs 	hxgep = ddi_get_soft_state(hxge_list, instance);
379*3dec9fcdSqs 	if (hxgep == NULL) {
380*3dec9fcdSqs 		status = HXGE_ERROR;
381*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
382*3dec9fcdSqs 		    "ddi_get_soft_state failed"));
383*3dec9fcdSqs 		goto hxge_attach_fail2;
384*3dec9fcdSqs 	}
385*3dec9fcdSqs 
386*3dec9fcdSqs 	hxgep->drv_state = 0;
387*3dec9fcdSqs 	hxgep->dip = dip;
388*3dec9fcdSqs 	hxgep->instance = instance;
389*3dec9fcdSqs 	hxgep->p_dip = ddi_get_parent(dip);
390*3dec9fcdSqs 	hxgep->hxge_debug_level = hxge_debug_level;
391*3dec9fcdSqs 	hpi_debug_level = hxge_debug_level;
392*3dec9fcdSqs 
393*3dec9fcdSqs 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
394*3dec9fcdSqs 	    &hxge_rx_dma_attr);
395*3dec9fcdSqs 
396*3dec9fcdSqs 	status = hxge_map_regs(hxgep);
397*3dec9fcdSqs 	if (status != HXGE_OK) {
398*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
399*3dec9fcdSqs 		goto hxge_attach_fail3;
400*3dec9fcdSqs 	}
401*3dec9fcdSqs 
402*3dec9fcdSqs 	status = hxge_init_common_dev(hxgep);
403*3dec9fcdSqs 	if (status != HXGE_OK) {
404*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
405*3dec9fcdSqs 		    "hxge_init_common_dev failed"));
406*3dec9fcdSqs 		goto hxge_attach_fail4;
407*3dec9fcdSqs 	}
408*3dec9fcdSqs 
409*3dec9fcdSqs 	/*
410*3dec9fcdSqs 	 * Setup the Ndd parameters for this instance.
411*3dec9fcdSqs 	 */
412*3dec9fcdSqs 	hxge_init_param(hxgep);
413*3dec9fcdSqs 
414*3dec9fcdSqs 	/*
415*3dec9fcdSqs 	 * Setup Register Tracing Buffer.
416*3dec9fcdSqs 	 */
417*3dec9fcdSqs 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
418*3dec9fcdSqs 
419*3dec9fcdSqs 	/* init stats ptr */
420*3dec9fcdSqs 	hxge_init_statsp(hxgep);
421*3dec9fcdSqs 
422*3dec9fcdSqs 	status = hxge_get_config_properties(hxgep);
423*3dec9fcdSqs 	if (status != HXGE_OK) {
424*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
425*3dec9fcdSqs 		goto hxge_attach_fail;
426*3dec9fcdSqs 	}
427*3dec9fcdSqs 
428*3dec9fcdSqs 	/*
429*3dec9fcdSqs 	 * Setup the Kstats for the driver.
430*3dec9fcdSqs 	 */
431*3dec9fcdSqs 	hxge_setup_kstats(hxgep);
432*3dec9fcdSqs 	hxge_setup_param(hxgep);
433*3dec9fcdSqs 
434*3dec9fcdSqs 	status = hxge_setup_system_dma_pages(hxgep);
435*3dec9fcdSqs 	if (status != HXGE_OK) {
436*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
437*3dec9fcdSqs 		goto hxge_attach_fail;
438*3dec9fcdSqs 	}
439*3dec9fcdSqs 
440*3dec9fcdSqs 	hxge_hw_id_init(hxgep);
441*3dec9fcdSqs 	hxge_hw_init_niu_common(hxgep);
442*3dec9fcdSqs 
443*3dec9fcdSqs 	status = hxge_setup_mutexes(hxgep);
444*3dec9fcdSqs 	if (status != HXGE_OK) {
445*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
446*3dec9fcdSqs 		goto hxge_attach_fail;
447*3dec9fcdSqs 	}
448*3dec9fcdSqs 
449*3dec9fcdSqs 	status = hxge_setup_dev(hxgep);
450*3dec9fcdSqs 	if (status != DDI_SUCCESS) {
451*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
452*3dec9fcdSqs 		goto hxge_attach_fail;
453*3dec9fcdSqs 	}
454*3dec9fcdSqs 
455*3dec9fcdSqs 	status = hxge_add_intrs(hxgep);
456*3dec9fcdSqs 	if (status != DDI_SUCCESS) {
457*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
458*3dec9fcdSqs 		goto hxge_attach_fail;
459*3dec9fcdSqs 	}
460*3dec9fcdSqs 
461*3dec9fcdSqs 	status = hxge_add_soft_intrs(hxgep);
462*3dec9fcdSqs 	if (status != DDI_SUCCESS) {
463*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
464*3dec9fcdSqs 		goto hxge_attach_fail;
465*3dec9fcdSqs 	}
466*3dec9fcdSqs 
467*3dec9fcdSqs 	/*
468*3dec9fcdSqs 	 * Enable interrupts.
469*3dec9fcdSqs 	 */
470*3dec9fcdSqs 	hxge_intrs_enable(hxgep);
471*3dec9fcdSqs 
472*3dec9fcdSqs 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
473*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
474*3dec9fcdSqs 		    "unable to register to mac layer (%d)", status));
475*3dec9fcdSqs 		goto hxge_attach_fail;
476*3dec9fcdSqs 	}
477*3dec9fcdSqs 	mac_link_update(hxgep->mach, LINK_STATE_UP);
478*3dec9fcdSqs 
479*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
480*3dec9fcdSqs 	    instance));
481*3dec9fcdSqs 
482*3dec9fcdSqs 	goto hxge_attach_exit;
483*3dec9fcdSqs 
484*3dec9fcdSqs hxge_attach_fail:
485*3dec9fcdSqs 	hxge_unattach(hxgep);
486*3dec9fcdSqs 	goto hxge_attach_fail1;
487*3dec9fcdSqs 
488*3dec9fcdSqs hxge_attach_fail5:
489*3dec9fcdSqs 	/*
490*3dec9fcdSqs 	 * Tear down the ndd parameters setup.
491*3dec9fcdSqs 	 */
492*3dec9fcdSqs 	hxge_destroy_param(hxgep);
493*3dec9fcdSqs 
494*3dec9fcdSqs 	/*
495*3dec9fcdSqs 	 * Tear down the kstat setup.
496*3dec9fcdSqs 	 */
497*3dec9fcdSqs 	hxge_destroy_kstats(hxgep);
498*3dec9fcdSqs 
499*3dec9fcdSqs hxge_attach_fail4:
500*3dec9fcdSqs 	if (hxgep->hxge_hw_p) {
501*3dec9fcdSqs 		hxge_uninit_common_dev(hxgep);
502*3dec9fcdSqs 		hxgep->hxge_hw_p = NULL;
503*3dec9fcdSqs 	}
504*3dec9fcdSqs hxge_attach_fail3:
505*3dec9fcdSqs 	/*
506*3dec9fcdSqs 	 * Unmap the register setup.
507*3dec9fcdSqs 	 */
508*3dec9fcdSqs 	hxge_unmap_regs(hxgep);
509*3dec9fcdSqs 
510*3dec9fcdSqs 	hxge_fm_fini(hxgep);
511*3dec9fcdSqs 
512*3dec9fcdSqs hxge_attach_fail2:
513*3dec9fcdSqs 	ddi_soft_state_free(hxge_list, hxgep->instance);
514*3dec9fcdSqs 
515*3dec9fcdSqs hxge_attach_fail1:
516*3dec9fcdSqs 	if (status != HXGE_OK)
517*3dec9fcdSqs 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
518*3dec9fcdSqs 	hxgep = NULL;
519*3dec9fcdSqs 
520*3dec9fcdSqs hxge_attach_exit:
521*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
522*3dec9fcdSqs 	    status));
523*3dec9fcdSqs 
524*3dec9fcdSqs 	return (status);
525*3dec9fcdSqs }
526*3dec9fcdSqs 
527*3dec9fcdSqs static int
528*3dec9fcdSqs hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
529*3dec9fcdSqs {
530*3dec9fcdSqs 	int		status = DDI_SUCCESS;
531*3dec9fcdSqs 	int		instance;
532*3dec9fcdSqs 	p_hxge_t	hxgep = NULL;
533*3dec9fcdSqs 
534*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
535*3dec9fcdSqs 	instance = ddi_get_instance(dip);
536*3dec9fcdSqs 	hxgep = ddi_get_soft_state(hxge_list, instance);
537*3dec9fcdSqs 	if (hxgep == NULL) {
538*3dec9fcdSqs 		status = DDI_FAILURE;
539*3dec9fcdSqs 		goto hxge_detach_exit;
540*3dec9fcdSqs 	}
541*3dec9fcdSqs 
542*3dec9fcdSqs 	switch (cmd) {
543*3dec9fcdSqs 	case DDI_DETACH:
544*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
545*3dec9fcdSqs 		break;
546*3dec9fcdSqs 
547*3dec9fcdSqs 	case DDI_PM_SUSPEND:
548*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
549*3dec9fcdSqs 		hxgep->suspended = DDI_PM_SUSPEND;
550*3dec9fcdSqs 		hxge_suspend(hxgep);
551*3dec9fcdSqs 		break;
552*3dec9fcdSqs 
553*3dec9fcdSqs 	case DDI_SUSPEND:
554*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
555*3dec9fcdSqs 		if (hxgep->suspended != DDI_PM_SUSPEND) {
556*3dec9fcdSqs 			hxgep->suspended = DDI_SUSPEND;
557*3dec9fcdSqs 			hxge_suspend(hxgep);
558*3dec9fcdSqs 		}
559*3dec9fcdSqs 		break;
560*3dec9fcdSqs 
561*3dec9fcdSqs 	default:
562*3dec9fcdSqs 		status = DDI_FAILURE;
563*3dec9fcdSqs 		break;
564*3dec9fcdSqs 	}
565*3dec9fcdSqs 
566*3dec9fcdSqs 	if (cmd != DDI_DETACH)
567*3dec9fcdSqs 		goto hxge_detach_exit;
568*3dec9fcdSqs 
569*3dec9fcdSqs 	/*
570*3dec9fcdSqs 	 * Stop the xcvr polling.
571*3dec9fcdSqs 	 */
572*3dec9fcdSqs 	hxgep->suspended = cmd;
573*3dec9fcdSqs 
574*3dec9fcdSqs 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
575*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
576*3dec9fcdSqs 		    "<== hxge_detach status = 0x%08X", status));
577*3dec9fcdSqs 		return (DDI_FAILURE);
578*3dec9fcdSqs 	}
579*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
580*3dec9fcdSqs 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
581*3dec9fcdSqs 
582*3dec9fcdSqs 	hxge_unattach(hxgep);
583*3dec9fcdSqs 	hxgep = NULL;
584*3dec9fcdSqs 
585*3dec9fcdSqs hxge_detach_exit:
586*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
587*3dec9fcdSqs 	    status));
588*3dec9fcdSqs 
589*3dec9fcdSqs 	return (status);
590*3dec9fcdSqs }
591*3dec9fcdSqs 
592*3dec9fcdSqs static void
593*3dec9fcdSqs hxge_unattach(p_hxge_t hxgep)
594*3dec9fcdSqs {
595*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
596*3dec9fcdSqs 
597*3dec9fcdSqs 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
598*3dec9fcdSqs 		return;
599*3dec9fcdSqs 	}
600*3dec9fcdSqs 
601*3dec9fcdSqs 	if (hxgep->hxge_hw_p) {
602*3dec9fcdSqs 		hxge_uninit_common_dev(hxgep);
603*3dec9fcdSqs 		hxgep->hxge_hw_p = NULL;
604*3dec9fcdSqs 	}
605*3dec9fcdSqs 
606*3dec9fcdSqs 	if (hxgep->hxge_timerid) {
607*3dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
608*3dec9fcdSqs 		hxgep->hxge_timerid = 0;
609*3dec9fcdSqs 	}
610*3dec9fcdSqs 
611*3dec9fcdSqs 	/* Stop any further interrupts. */
612*3dec9fcdSqs 	hxge_remove_intrs(hxgep);
613*3dec9fcdSqs 
614*3dec9fcdSqs 	/* Remove soft interrups */
615*3dec9fcdSqs 	hxge_remove_soft_intrs(hxgep);
616*3dec9fcdSqs 
617*3dec9fcdSqs 	/* Stop the device and free resources. */
618*3dec9fcdSqs 	hxge_destroy_dev(hxgep);
619*3dec9fcdSqs 
620*3dec9fcdSqs 	/* Tear down the ndd parameters setup. */
621*3dec9fcdSqs 	hxge_destroy_param(hxgep);
622*3dec9fcdSqs 
623*3dec9fcdSqs 	/* Tear down the kstat setup. */
624*3dec9fcdSqs 	hxge_destroy_kstats(hxgep);
625*3dec9fcdSqs 
626*3dec9fcdSqs 	/* Destroy all mutexes.  */
627*3dec9fcdSqs 	hxge_destroy_mutexes(hxgep);
628*3dec9fcdSqs 
629*3dec9fcdSqs 	/*
630*3dec9fcdSqs 	 * Remove the list of ndd parameters which were setup during attach.
631*3dec9fcdSqs 	 */
632*3dec9fcdSqs 	if (hxgep->dip) {
633*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
634*3dec9fcdSqs 		    " hxge_unattach: remove all properties"));
635*3dec9fcdSqs 		(void) ddi_prop_remove_all(hxgep->dip);
636*3dec9fcdSqs 	}
637*3dec9fcdSqs 
638*3dec9fcdSqs 	/*
639*3dec9fcdSqs 	 * Unmap the register setup.
640*3dec9fcdSqs 	 */
641*3dec9fcdSqs 	hxge_unmap_regs(hxgep);
642*3dec9fcdSqs 
643*3dec9fcdSqs 	hxge_fm_fini(hxgep);
644*3dec9fcdSqs 
645*3dec9fcdSqs 	/*
646*3dec9fcdSqs 	 * Free the soft state data structures allocated with this instance.
647*3dec9fcdSqs 	 */
648*3dec9fcdSqs 	ddi_soft_state_free(hxge_list, hxgep->instance);
649*3dec9fcdSqs 
650*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
651*3dec9fcdSqs }
652*3dec9fcdSqs 
653*3dec9fcdSqs static hxge_status_t
654*3dec9fcdSqs hxge_map_regs(p_hxge_t hxgep)
655*3dec9fcdSqs {
656*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
657*3dec9fcdSqs 	p_dev_regs_t	dev_regs;
658*3dec9fcdSqs 
659*3dec9fcdSqs #ifdef	HXGE_DEBUG
660*3dec9fcdSqs 	char		*sysname;
661*3dec9fcdSqs #endif
662*3dec9fcdSqs 
663*3dec9fcdSqs 	off_t		regsize;
664*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
665*3dec9fcdSqs 	int		nregs;
666*3dec9fcdSqs 
667*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
668*3dec9fcdSqs 
669*3dec9fcdSqs 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
670*3dec9fcdSqs 		return (HXGE_ERROR);
671*3dec9fcdSqs 
672*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
673*3dec9fcdSqs 
674*3dec9fcdSqs 	hxgep->dev_regs = NULL;
675*3dec9fcdSqs 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
676*3dec9fcdSqs 	dev_regs->hxge_regh = NULL;
677*3dec9fcdSqs 	dev_regs->hxge_pciregh = NULL;
678*3dec9fcdSqs 	dev_regs->hxge_msix_regh = NULL;
679*3dec9fcdSqs 
680*3dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
681*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
682*3dec9fcdSqs 	    "hxge_map_regs: pci config size 0x%x", regsize));
683*3dec9fcdSqs 
684*3dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
685*3dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
686*3dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
687*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
688*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
689*3dec9fcdSqs 		    "ddi_map_regs, hxge bus config regs failed"));
690*3dec9fcdSqs 		goto hxge_map_regs_fail0;
691*3dec9fcdSqs 	}
692*3dec9fcdSqs 
693*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
694*3dec9fcdSqs 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
695*3dec9fcdSqs 	    dev_regs->hxge_pciregp,
696*3dec9fcdSqs 	    dev_regs->hxge_pciregh));
697*3dec9fcdSqs 
698*3dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
699*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
700*3dec9fcdSqs 	    "hxge_map_regs: pio size 0x%x", regsize));
701*3dec9fcdSqs 
702*3dec9fcdSqs 	/* set up the device mapped register */
703*3dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
704*3dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
705*3dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
706*3dec9fcdSqs 
707*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
708*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
709*3dec9fcdSqs 		    "ddi_map_regs for Hydra global reg failed"));
710*3dec9fcdSqs 		goto hxge_map_regs_fail1;
711*3dec9fcdSqs 	}
712*3dec9fcdSqs 
713*3dec9fcdSqs 	/* set up the msi/msi-x mapped register */
714*3dec9fcdSqs 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
715*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
716*3dec9fcdSqs 	    "hxge_map_regs: msix size 0x%x", regsize));
717*3dec9fcdSqs 
718*3dec9fcdSqs 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
719*3dec9fcdSqs 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
720*3dec9fcdSqs 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
721*3dec9fcdSqs 
722*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
723*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
724*3dec9fcdSqs 		    "ddi_map_regs for msi reg failed"));
725*3dec9fcdSqs 		goto hxge_map_regs_fail2;
726*3dec9fcdSqs 	}
727*3dec9fcdSqs 
728*3dec9fcdSqs 	hxgep->dev_regs = dev_regs;
729*3dec9fcdSqs 
730*3dec9fcdSqs 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
731*3dec9fcdSqs 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
732*3dec9fcdSqs 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
733*3dec9fcdSqs 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
734*3dec9fcdSqs 
735*3dec9fcdSqs 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
736*3dec9fcdSqs 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
737*3dec9fcdSqs 
738*3dec9fcdSqs 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
739*3dec9fcdSqs 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
740*3dec9fcdSqs 
741*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
742*3dec9fcdSqs 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
743*3dec9fcdSqs 
744*3dec9fcdSqs 	goto hxge_map_regs_exit;
745*3dec9fcdSqs 
746*3dec9fcdSqs hxge_map_regs_fail3:
747*3dec9fcdSqs 	if (dev_regs->hxge_msix_regh) {
748*3dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
749*3dec9fcdSqs 	}
750*3dec9fcdSqs 
751*3dec9fcdSqs hxge_map_regs_fail2:
752*3dec9fcdSqs 	if (dev_regs->hxge_regh) {
753*3dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_regh);
754*3dec9fcdSqs 	}
755*3dec9fcdSqs 
756*3dec9fcdSqs hxge_map_regs_fail1:
757*3dec9fcdSqs 	if (dev_regs->hxge_pciregh) {
758*3dec9fcdSqs 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
759*3dec9fcdSqs 	}
760*3dec9fcdSqs 
761*3dec9fcdSqs hxge_map_regs_fail0:
762*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
763*3dec9fcdSqs 	kmem_free(dev_regs, sizeof (dev_regs_t));
764*3dec9fcdSqs 
765*3dec9fcdSqs hxge_map_regs_exit:
766*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
767*3dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
768*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
769*3dec9fcdSqs 	return (status);
770*3dec9fcdSqs }
771*3dec9fcdSqs 
772*3dec9fcdSqs static void
773*3dec9fcdSqs hxge_unmap_regs(p_hxge_t hxgep)
774*3dec9fcdSqs {
775*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
776*3dec9fcdSqs 	if (hxgep->dev_regs) {
777*3dec9fcdSqs 		if (hxgep->dev_regs->hxge_pciregh) {
778*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
779*3dec9fcdSqs 			    "==> hxge_unmap_regs: bus"));
780*3dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
781*3dec9fcdSqs 			hxgep->dev_regs->hxge_pciregh = NULL;
782*3dec9fcdSqs 		}
783*3dec9fcdSqs 
784*3dec9fcdSqs 		if (hxgep->dev_regs->hxge_regh) {
785*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
786*3dec9fcdSqs 			    "==> hxge_unmap_regs: device registers"));
787*3dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
788*3dec9fcdSqs 			hxgep->dev_regs->hxge_regh = NULL;
789*3dec9fcdSqs 		}
790*3dec9fcdSqs 
791*3dec9fcdSqs 		if (hxgep->dev_regs->hxge_msix_regh) {
792*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
793*3dec9fcdSqs 			    "==> hxge_unmap_regs: device interrupts"));
794*3dec9fcdSqs 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
795*3dec9fcdSqs 			hxgep->dev_regs->hxge_msix_regh = NULL;
796*3dec9fcdSqs 		}
797*3dec9fcdSqs 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
798*3dec9fcdSqs 		hxgep->dev_regs = NULL;
799*3dec9fcdSqs 	}
800*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
801*3dec9fcdSqs }
802*3dec9fcdSqs 
803*3dec9fcdSqs static hxge_status_t
804*3dec9fcdSqs hxge_setup_mutexes(p_hxge_t hxgep)
805*3dec9fcdSqs {
806*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
807*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
808*3dec9fcdSqs 
809*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
810*3dec9fcdSqs 
811*3dec9fcdSqs 	/*
812*3dec9fcdSqs 	 * Get the interrupt cookie so the mutexes can be Initialised.
813*3dec9fcdSqs 	 */
814*3dec9fcdSqs 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
815*3dec9fcdSqs 	    &hxgep->interrupt_cookie);
816*3dec9fcdSqs 
817*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
818*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
819*3dec9fcdSqs 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
820*3dec9fcdSqs 		goto hxge_setup_mutexes_exit;
821*3dec9fcdSqs 	}
822*3dec9fcdSqs 
823*3dec9fcdSqs 	/*
824*3dec9fcdSqs 	 * Initialize mutex's for this device.
825*3dec9fcdSqs 	 */
826*3dec9fcdSqs 	MUTEX_INIT(hxgep->genlock, NULL,
827*3dec9fcdSqs 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
828*3dec9fcdSqs 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
829*3dec9fcdSqs 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
830*3dec9fcdSqs 	RW_INIT(&hxgep->filter_lock, NULL,
831*3dec9fcdSqs 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
832*3dec9fcdSqs 
833*3dec9fcdSqs hxge_setup_mutexes_exit:
834*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
835*3dec9fcdSqs 	    "<== hxge_setup_mutexes status = %x", status));
836*3dec9fcdSqs 
837*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
838*3dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
839*3dec9fcdSqs 
840*3dec9fcdSqs 	return (status);
841*3dec9fcdSqs }
842*3dec9fcdSqs 
843*3dec9fcdSqs static void
844*3dec9fcdSqs hxge_destroy_mutexes(p_hxge_t hxgep)
845*3dec9fcdSqs {
846*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
847*3dec9fcdSqs 	RW_DESTROY(&hxgep->filter_lock);
848*3dec9fcdSqs 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
849*3dec9fcdSqs 	MUTEX_DESTROY(hxgep->genlock);
850*3dec9fcdSqs 
851*3dec9fcdSqs 	if (hxge_debug_init == 1) {
852*3dec9fcdSqs 		MUTEX_DESTROY(&hxgedebuglock);
853*3dec9fcdSqs 		hxge_debug_init = 0;
854*3dec9fcdSqs 	}
855*3dec9fcdSqs 
856*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
857*3dec9fcdSqs }
858*3dec9fcdSqs 
859*3dec9fcdSqs hxge_status_t
860*3dec9fcdSqs hxge_init(p_hxge_t hxgep)
861*3dec9fcdSqs {
862*3dec9fcdSqs 	hxge_status_t status = HXGE_OK;
863*3dec9fcdSqs 
864*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
865*3dec9fcdSqs 
866*3dec9fcdSqs 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
867*3dec9fcdSqs 		return (status);
868*3dec9fcdSqs 	}
869*3dec9fcdSqs 
870*3dec9fcdSqs 	/*
871*3dec9fcdSqs 	 * Allocate system memory for the receive/transmit buffer blocks and
872*3dec9fcdSqs 	 * receive/transmit descriptor rings.
873*3dec9fcdSqs 	 */
874*3dec9fcdSqs 	status = hxge_alloc_mem_pool(hxgep);
875*3dec9fcdSqs 	if (status != HXGE_OK) {
876*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
877*3dec9fcdSqs 		goto hxge_init_fail1;
878*3dec9fcdSqs 	}
879*3dec9fcdSqs 
880*3dec9fcdSqs 	/*
881*3dec9fcdSqs 	 * Initialize and enable TXDMA channels.
882*3dec9fcdSqs 	 */
883*3dec9fcdSqs 	status = hxge_init_txdma_channels(hxgep);
884*3dec9fcdSqs 	if (status != HXGE_OK) {
885*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
886*3dec9fcdSqs 		goto hxge_init_fail3;
887*3dec9fcdSqs 	}
888*3dec9fcdSqs 
889*3dec9fcdSqs 	/*
890*3dec9fcdSqs 	 * Initialize and enable RXDMA channels.
891*3dec9fcdSqs 	 */
892*3dec9fcdSqs 	status = hxge_init_rxdma_channels(hxgep);
893*3dec9fcdSqs 	if (status != HXGE_OK) {
894*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
895*3dec9fcdSqs 		goto hxge_init_fail4;
896*3dec9fcdSqs 	}
897*3dec9fcdSqs 
898*3dec9fcdSqs 	/*
899*3dec9fcdSqs 	 * Initialize TCAM
900*3dec9fcdSqs 	 */
901*3dec9fcdSqs 	status = hxge_classify_init(hxgep);
902*3dec9fcdSqs 	if (status != HXGE_OK) {
903*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
904*3dec9fcdSqs 		goto hxge_init_fail5;
905*3dec9fcdSqs 	}
906*3dec9fcdSqs 
907*3dec9fcdSqs 	/*
908*3dec9fcdSqs 	 * Initialize the VMAC block.
909*3dec9fcdSqs 	 */
910*3dec9fcdSqs 	status = hxge_vmac_init(hxgep);
911*3dec9fcdSqs 	if (status != HXGE_OK) {
912*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
913*3dec9fcdSqs 		goto hxge_init_fail5;
914*3dec9fcdSqs 	}
915*3dec9fcdSqs 
916*3dec9fcdSqs 	/* Bringup - this may be unnecessary when PXE and FCODE available */
917*3dec9fcdSqs 	status = hxge_pfc_set_default_mac_addr(hxgep);
918*3dec9fcdSqs 	if (status != HXGE_OK) {
919*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
920*3dec9fcdSqs 		    "Default Address Failure\n"));
921*3dec9fcdSqs 		goto hxge_init_fail5;
922*3dec9fcdSqs 	}
923*3dec9fcdSqs 
924*3dec9fcdSqs 	hxge_intrs_enable(hxgep);
925*3dec9fcdSqs 
926*3dec9fcdSqs 	/*
927*3dec9fcdSqs 	 * Enable hardware interrupts.
928*3dec9fcdSqs 	 */
929*3dec9fcdSqs 	hxge_intr_hw_enable(hxgep);
930*3dec9fcdSqs 	hxgep->drv_state |= STATE_HW_INITIALIZED;
931*3dec9fcdSqs 
932*3dec9fcdSqs 	goto hxge_init_exit;
933*3dec9fcdSqs 
934*3dec9fcdSqs hxge_init_fail5:
935*3dec9fcdSqs 	hxge_uninit_rxdma_channels(hxgep);
936*3dec9fcdSqs hxge_init_fail4:
937*3dec9fcdSqs 	hxge_uninit_txdma_channels(hxgep);
938*3dec9fcdSqs hxge_init_fail3:
939*3dec9fcdSqs 	hxge_free_mem_pool(hxgep);
940*3dec9fcdSqs hxge_init_fail1:
941*3dec9fcdSqs 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
942*3dec9fcdSqs 	    "<== hxge_init status (failed) = 0x%08x", status));
943*3dec9fcdSqs 	return (status);
944*3dec9fcdSqs 
945*3dec9fcdSqs hxge_init_exit:
946*3dec9fcdSqs 
947*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
948*3dec9fcdSqs 	    status));
949*3dec9fcdSqs 
950*3dec9fcdSqs 	return (status);
951*3dec9fcdSqs }
952*3dec9fcdSqs 
953*3dec9fcdSqs timeout_id_t
954*3dec9fcdSqs hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
955*3dec9fcdSqs {
956*3dec9fcdSqs 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
957*3dec9fcdSqs 		return (timeout(func, (caddr_t)hxgep,
958*3dec9fcdSqs 		    drv_usectohz(1000 * msec)));
959*3dec9fcdSqs 	}
960*3dec9fcdSqs 	return (NULL);
961*3dec9fcdSqs }
962*3dec9fcdSqs 
963*3dec9fcdSqs /*ARGSUSED*/
964*3dec9fcdSqs void
965*3dec9fcdSqs hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
966*3dec9fcdSqs {
967*3dec9fcdSqs 	if (timerid) {
968*3dec9fcdSqs 		(void) untimeout(timerid);
969*3dec9fcdSqs 	}
970*3dec9fcdSqs }
971*3dec9fcdSqs 
972*3dec9fcdSqs void
973*3dec9fcdSqs hxge_uninit(p_hxge_t hxgep)
974*3dec9fcdSqs {
975*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
976*3dec9fcdSqs 
977*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
978*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
979*3dec9fcdSqs 		    "==> hxge_uninit: not initialized"));
980*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
981*3dec9fcdSqs 		return;
982*3dec9fcdSqs 	}
983*3dec9fcdSqs 
984*3dec9fcdSqs 	/* Stop timer */
985*3dec9fcdSqs 	if (hxgep->hxge_timerid) {
986*3dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
987*3dec9fcdSqs 		hxgep->hxge_timerid = 0;
988*3dec9fcdSqs 	}
989*3dec9fcdSqs 
990*3dec9fcdSqs 	(void) hxge_intr_hw_disable(hxgep);
991*3dec9fcdSqs 
992*3dec9fcdSqs 	/* Reset the receive VMAC side.  */
993*3dec9fcdSqs 	(void) hxge_rx_vmac_disable(hxgep);
994*3dec9fcdSqs 
995*3dec9fcdSqs 	/* Free classification resources */
996*3dec9fcdSqs 	(void) hxge_classify_uninit(hxgep);
997*3dec9fcdSqs 
998*3dec9fcdSqs 	/* Reset the transmit/receive DMA side.  */
999*3dec9fcdSqs 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1000*3dec9fcdSqs 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1001*3dec9fcdSqs 
1002*3dec9fcdSqs 	hxge_uninit_txdma_channels(hxgep);
1003*3dec9fcdSqs 	hxge_uninit_rxdma_channels(hxgep);
1004*3dec9fcdSqs 
1005*3dec9fcdSqs 	/* Reset the transmit VMAC side.  */
1006*3dec9fcdSqs 	(void) hxge_tx_vmac_disable(hxgep);
1007*3dec9fcdSqs 
1008*3dec9fcdSqs 	hxge_free_mem_pool(hxgep);
1009*3dec9fcdSqs 
1010*3dec9fcdSqs 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1011*3dec9fcdSqs 
1012*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1013*3dec9fcdSqs }
1014*3dec9fcdSqs 
1015*3dec9fcdSqs void
1016*3dec9fcdSqs hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1017*3dec9fcdSqs {
1018*3dec9fcdSqs #if defined(__i386)
1019*3dec9fcdSqs 	size_t		reg;
1020*3dec9fcdSqs #else
1021*3dec9fcdSqs 	uint64_t	reg;
1022*3dec9fcdSqs #endif
1023*3dec9fcdSqs 	uint64_t	regdata;
1024*3dec9fcdSqs 	int		i, retry;
1025*3dec9fcdSqs 
1026*3dec9fcdSqs 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1027*3dec9fcdSqs 	regdata = 0;
1028*3dec9fcdSqs 	retry = 1;
1029*3dec9fcdSqs 
1030*3dec9fcdSqs 	for (i = 0; i < retry; i++) {
1031*3dec9fcdSqs 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1032*3dec9fcdSqs 	}
1033*3dec9fcdSqs 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1034*3dec9fcdSqs }
1035*3dec9fcdSqs 
1036*3dec9fcdSqs void
1037*3dec9fcdSqs hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1038*3dec9fcdSqs {
1039*3dec9fcdSqs #if defined(__i386)
1040*3dec9fcdSqs 	size_t		reg;
1041*3dec9fcdSqs #else
1042*3dec9fcdSqs 	uint64_t	reg;
1043*3dec9fcdSqs #endif
1044*3dec9fcdSqs 	uint64_t	buf[2];
1045*3dec9fcdSqs 
1046*3dec9fcdSqs 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1047*3dec9fcdSqs #if defined(__i386)
1048*3dec9fcdSqs 	reg = (size_t)buf[0];
1049*3dec9fcdSqs #else
1050*3dec9fcdSqs 	reg = buf[0];
1051*3dec9fcdSqs #endif
1052*3dec9fcdSqs 
1053*3dec9fcdSqs 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1054*3dec9fcdSqs }
1055*3dec9fcdSqs 
1056*3dec9fcdSqs /*ARGSUSED*/
1057*3dec9fcdSqs /*VARARGS*/
1058*3dec9fcdSqs void
1059*3dec9fcdSqs hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1060*3dec9fcdSqs {
1061*3dec9fcdSqs 	char		msg_buffer[1048];
1062*3dec9fcdSqs 	char		prefix_buffer[32];
1063*3dec9fcdSqs 	int		instance;
1064*3dec9fcdSqs 	uint64_t	debug_level;
1065*3dec9fcdSqs 	int		cmn_level = CE_CONT;
1066*3dec9fcdSqs 	va_list		ap;
1067*3dec9fcdSqs 
1068*3dec9fcdSqs 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1069*3dec9fcdSqs 	    hxgep->hxge_debug_level;
1070*3dec9fcdSqs 
1071*3dec9fcdSqs 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1072*3dec9fcdSqs 	    (level == HXGE_ERR_CTL)) {
1073*3dec9fcdSqs 		/* do the msg processing */
1074*3dec9fcdSqs 		if (hxge_debug_init == 0) {
1075*3dec9fcdSqs 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1076*3dec9fcdSqs 			hxge_debug_init = 1;
1077*3dec9fcdSqs 		}
1078*3dec9fcdSqs 
1079*3dec9fcdSqs 		MUTEX_ENTER(&hxgedebuglock);
1080*3dec9fcdSqs 
1081*3dec9fcdSqs 		if ((level & HXGE_NOTE)) {
1082*3dec9fcdSqs 			cmn_level = CE_NOTE;
1083*3dec9fcdSqs 		}
1084*3dec9fcdSqs 
1085*3dec9fcdSqs 		if (level & HXGE_ERR_CTL) {
1086*3dec9fcdSqs 			cmn_level = CE_WARN;
1087*3dec9fcdSqs 		}
1088*3dec9fcdSqs 
1089*3dec9fcdSqs 		va_start(ap, fmt);
1090*3dec9fcdSqs 		(void) vsprintf(msg_buffer, fmt, ap);
1091*3dec9fcdSqs 		va_end(ap);
1092*3dec9fcdSqs 
1093*3dec9fcdSqs 		if (hxgep == NULL) {
1094*3dec9fcdSqs 			instance = -1;
1095*3dec9fcdSqs 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1096*3dec9fcdSqs 		} else {
1097*3dec9fcdSqs 			instance = hxgep->instance;
1098*3dec9fcdSqs 			(void) sprintf(prefix_buffer,
1099*3dec9fcdSqs 			    "%s%d :", "hxge", instance);
1100*3dec9fcdSqs 		}
1101*3dec9fcdSqs 
1102*3dec9fcdSqs 		MUTEX_EXIT(&hxgedebuglock);
1103*3dec9fcdSqs 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1104*3dec9fcdSqs 	}
1105*3dec9fcdSqs }
1106*3dec9fcdSqs 
1107*3dec9fcdSqs char *
1108*3dec9fcdSqs hxge_dump_packet(char *addr, int size)
1109*3dec9fcdSqs {
1110*3dec9fcdSqs 	uchar_t		*ap = (uchar_t *)addr;
1111*3dec9fcdSqs 	int		i;
1112*3dec9fcdSqs 	static char	etherbuf[1024];
1113*3dec9fcdSqs 	char		*cp = etherbuf;
1114*3dec9fcdSqs 	char		digits[] = "0123456789abcdef";
1115*3dec9fcdSqs 
1116*3dec9fcdSqs 	if (!size)
1117*3dec9fcdSqs 		size = 60;
1118*3dec9fcdSqs 
1119*3dec9fcdSqs 	if (size > MAX_DUMP_SZ) {
1120*3dec9fcdSqs 		/* Dump the leading bytes */
1121*3dec9fcdSqs 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1122*3dec9fcdSqs 			if (*ap > 0x0f)
1123*3dec9fcdSqs 				*cp++ = digits[*ap >> 4];
1124*3dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
1125*3dec9fcdSqs 			*cp++ = ':';
1126*3dec9fcdSqs 		}
1127*3dec9fcdSqs 		for (i = 0; i < 20; i++)
1128*3dec9fcdSqs 			*cp++ = '.';
1129*3dec9fcdSqs 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1130*3dec9fcdSqs 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1131*3dec9fcdSqs 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1132*3dec9fcdSqs 			if (*ap > 0x0f)
1133*3dec9fcdSqs 				*cp++ = digits[*ap >> 4];
1134*3dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
1135*3dec9fcdSqs 			*cp++ = ':';
1136*3dec9fcdSqs 		}
1137*3dec9fcdSqs 	} else {
1138*3dec9fcdSqs 		for (i = 0; i < size; i++) {
1139*3dec9fcdSqs 			if (*ap > 0x0f)
1140*3dec9fcdSqs 				*cp++ = digits[*ap >> 4];
1141*3dec9fcdSqs 			*cp++ = digits[*ap++ & 0xf];
1142*3dec9fcdSqs 			*cp++ = ':';
1143*3dec9fcdSqs 		}
1144*3dec9fcdSqs 	}
1145*3dec9fcdSqs 	*--cp = 0;
1146*3dec9fcdSqs 	return (etherbuf);
1147*3dec9fcdSqs }
1148*3dec9fcdSqs 
1149*3dec9fcdSqs static void
1150*3dec9fcdSqs hxge_suspend(p_hxge_t hxgep)
1151*3dec9fcdSqs {
1152*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1153*3dec9fcdSqs 
1154*3dec9fcdSqs 	hxge_intrs_disable(hxgep);
1155*3dec9fcdSqs 	hxge_destroy_dev(hxgep);
1156*3dec9fcdSqs 
1157*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1158*3dec9fcdSqs }
1159*3dec9fcdSqs 
1160*3dec9fcdSqs static hxge_status_t
1161*3dec9fcdSqs hxge_resume(p_hxge_t hxgep)
1162*3dec9fcdSqs {
1163*3dec9fcdSqs 	hxge_status_t status = HXGE_OK;
1164*3dec9fcdSqs 
1165*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1166*3dec9fcdSqs 	hxgep->suspended = DDI_RESUME;
1167*3dec9fcdSqs 
1168*3dec9fcdSqs 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1169*3dec9fcdSqs 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1170*3dec9fcdSqs 
1171*3dec9fcdSqs 	(void) hxge_rx_vmac_enable(hxgep);
1172*3dec9fcdSqs 	(void) hxge_tx_vmac_enable(hxgep);
1173*3dec9fcdSqs 
1174*3dec9fcdSqs 	hxge_intrs_enable(hxgep);
1175*3dec9fcdSqs 
1176*3dec9fcdSqs 	hxgep->suspended = 0;
1177*3dec9fcdSqs 
1178*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1179*3dec9fcdSqs 	    "<== hxge_resume status = 0x%x", status));
1180*3dec9fcdSqs 
1181*3dec9fcdSqs 	return (status);
1182*3dec9fcdSqs }
1183*3dec9fcdSqs 
1184*3dec9fcdSqs hxge_status_t
1185*3dec9fcdSqs hxge_setup_dev(p_hxge_t hxgep)
1186*3dec9fcdSqs {
1187*3dec9fcdSqs 	hxge_status_t status = HXGE_OK;
1188*3dec9fcdSqs 
1189*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1190*3dec9fcdSqs 
1191*3dec9fcdSqs 	status = hxge_link_init(hxgep);
1192*3dec9fcdSqs 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1193*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1194*3dec9fcdSqs 		    "Bad register acc handle"));
1195*3dec9fcdSqs 		status = HXGE_ERROR;
1196*3dec9fcdSqs 	}
1197*3dec9fcdSqs 
1198*3dec9fcdSqs 	if (status != HXGE_OK) {
1199*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1200*3dec9fcdSqs 		    " hxge_setup_dev status (link init 0x%08x)", status));
1201*3dec9fcdSqs 		goto hxge_setup_dev_exit;
1202*3dec9fcdSqs 	}
1203*3dec9fcdSqs 
1204*3dec9fcdSqs hxge_setup_dev_exit:
1205*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1206*3dec9fcdSqs 	    "<== hxge_setup_dev status = 0x%08x", status));
1207*3dec9fcdSqs 
1208*3dec9fcdSqs 	return (status);
1209*3dec9fcdSqs }
1210*3dec9fcdSqs 
1211*3dec9fcdSqs static void
1212*3dec9fcdSqs hxge_destroy_dev(p_hxge_t hxgep)
1213*3dec9fcdSqs {
1214*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1215*3dec9fcdSqs 
1216*3dec9fcdSqs 	(void) hxge_hw_stop(hxgep);
1217*3dec9fcdSqs 
1218*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1219*3dec9fcdSqs }
1220*3dec9fcdSqs 
1221*3dec9fcdSqs static hxge_status_t
1222*3dec9fcdSqs hxge_setup_system_dma_pages(p_hxge_t hxgep)
1223*3dec9fcdSqs {
1224*3dec9fcdSqs 	int			ddi_status = DDI_SUCCESS;
1225*3dec9fcdSqs 	uint_t			count;
1226*3dec9fcdSqs 	ddi_dma_cookie_t	cookie;
1227*3dec9fcdSqs 	uint_t			iommu_pagesize;
1228*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1229*3dec9fcdSqs 
1230*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1231*3dec9fcdSqs 
1232*3dec9fcdSqs 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1233*3dec9fcdSqs 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1234*3dec9fcdSqs 
1235*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1236*3dec9fcdSqs 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1237*3dec9fcdSqs 	    " default_block_size %d iommu_pagesize %d",
1238*3dec9fcdSqs 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1239*3dec9fcdSqs 	    hxgep->rx_default_block_size, iommu_pagesize));
1240*3dec9fcdSqs 
1241*3dec9fcdSqs 	if (iommu_pagesize != 0) {
1242*3dec9fcdSqs 		if (hxgep->sys_page_sz == iommu_pagesize) {
1243*3dec9fcdSqs 			/* Hydra support up to 8K pages */
1244*3dec9fcdSqs 			if (iommu_pagesize > 0x2000)
1245*3dec9fcdSqs 				hxgep->sys_page_sz = 0x2000;
1246*3dec9fcdSqs 		} else {
1247*3dec9fcdSqs 			if (hxgep->sys_page_sz > iommu_pagesize)
1248*3dec9fcdSqs 				hxgep->sys_page_sz = iommu_pagesize;
1249*3dec9fcdSqs 		}
1250*3dec9fcdSqs 	}
1251*3dec9fcdSqs 
1252*3dec9fcdSqs 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1253*3dec9fcdSqs 
1254*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1255*3dec9fcdSqs 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1256*3dec9fcdSqs 	    "default_block_size %d page mask %d",
1257*3dec9fcdSqs 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1258*3dec9fcdSqs 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1259*3dec9fcdSqs 
1260*3dec9fcdSqs 	switch (hxgep->sys_page_sz) {
1261*3dec9fcdSqs 	default:
1262*3dec9fcdSqs 		hxgep->sys_page_sz = 0x1000;
1263*3dec9fcdSqs 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1264*3dec9fcdSqs 		hxgep->rx_default_block_size = 0x1000;
1265*3dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1266*3dec9fcdSqs 		break;
1267*3dec9fcdSqs 	case 0x1000:
1268*3dec9fcdSqs 		hxgep->rx_default_block_size = 0x1000;
1269*3dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1270*3dec9fcdSqs 		break;
1271*3dec9fcdSqs 	case 0x2000:
1272*3dec9fcdSqs 		hxgep->rx_default_block_size = 0x2000;
1273*3dec9fcdSqs 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1274*3dec9fcdSqs 		break;
1275*3dec9fcdSqs 	}
1276*3dec9fcdSqs 
1277*3dec9fcdSqs 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1278*3dec9fcdSqs 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1279*3dec9fcdSqs 	hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1280*3dec9fcdSqs 
1281*3dec9fcdSqs 	/*
1282*3dec9fcdSqs 	 * Get the system DMA burst size.
1283*3dec9fcdSqs 	 */
1284*3dec9fcdSqs 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1285*3dec9fcdSqs 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1286*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
1287*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1288*3dec9fcdSqs 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1289*3dec9fcdSqs 		goto hxge_get_soft_properties_exit;
1290*3dec9fcdSqs 	}
1291*3dec9fcdSqs 
1292*3dec9fcdSqs 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1293*3dec9fcdSqs 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1294*3dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1295*3dec9fcdSqs 	    &cookie, &count);
1296*3dec9fcdSqs 	if (ddi_status != DDI_DMA_MAPPED) {
1297*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1298*3dec9fcdSqs 		    "Binding spare handle to find system burstsize failed."));
1299*3dec9fcdSqs 		ddi_status = DDI_FAILURE;
1300*3dec9fcdSqs 		goto hxge_get_soft_properties_fail1;
1301*3dec9fcdSqs 	}
1302*3dec9fcdSqs 
1303*3dec9fcdSqs 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1304*3dec9fcdSqs 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1305*3dec9fcdSqs 
1306*3dec9fcdSqs hxge_get_soft_properties_fail1:
1307*3dec9fcdSqs 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1308*3dec9fcdSqs 
1309*3dec9fcdSqs hxge_get_soft_properties_exit:
1310*3dec9fcdSqs 
1311*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS)
1312*3dec9fcdSqs 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1313*3dec9fcdSqs 
1314*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1315*3dec9fcdSqs 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1316*3dec9fcdSqs 
1317*3dec9fcdSqs 	return (status);
1318*3dec9fcdSqs }
1319*3dec9fcdSqs 
1320*3dec9fcdSqs hxge_status_t
1321*3dec9fcdSqs hxge_alloc_mem_pool(p_hxge_t hxgep)
1322*3dec9fcdSqs {
1323*3dec9fcdSqs 	hxge_status_t status = HXGE_OK;
1324*3dec9fcdSqs 
1325*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1326*3dec9fcdSqs 
1327*3dec9fcdSqs 	status = hxge_alloc_rx_mem_pool(hxgep);
1328*3dec9fcdSqs 	if (status != HXGE_OK) {
1329*3dec9fcdSqs 		return (HXGE_ERROR);
1330*3dec9fcdSqs 	}
1331*3dec9fcdSqs 
1332*3dec9fcdSqs 	status = hxge_alloc_tx_mem_pool(hxgep);
1333*3dec9fcdSqs 	if (status != HXGE_OK) {
1334*3dec9fcdSqs 		hxge_free_rx_mem_pool(hxgep);
1335*3dec9fcdSqs 		return (HXGE_ERROR);
1336*3dec9fcdSqs 	}
1337*3dec9fcdSqs 
1338*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1339*3dec9fcdSqs 	return (HXGE_OK);
1340*3dec9fcdSqs }
1341*3dec9fcdSqs 
1342*3dec9fcdSqs static void
1343*3dec9fcdSqs hxge_free_mem_pool(p_hxge_t hxgep)
1344*3dec9fcdSqs {
1345*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1346*3dec9fcdSqs 
1347*3dec9fcdSqs 	hxge_free_rx_mem_pool(hxgep);
1348*3dec9fcdSqs 	hxge_free_tx_mem_pool(hxgep);
1349*3dec9fcdSqs 
1350*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1351*3dec9fcdSqs }
1352*3dec9fcdSqs 
1353*3dec9fcdSqs static hxge_status_t
1354*3dec9fcdSqs hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1355*3dec9fcdSqs {
1356*3dec9fcdSqs 	int			i, j;
1357*3dec9fcdSqs 	uint32_t		ndmas, st_rdc;
1358*3dec9fcdSqs 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1359*3dec9fcdSqs 	p_hxge_hw_pt_cfg_t	p_cfgp;
1360*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
1361*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
1362*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
1363*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
1364*3dec9fcdSqs 	size_t			rx_buf_alloc_size;
1365*3dec9fcdSqs 	size_t			rx_cntl_alloc_size;
1366*3dec9fcdSqs 	uint32_t		*num_chunks;	/* per dma */
1367*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1368*3dec9fcdSqs 
1369*3dec9fcdSqs 	uint32_t		hxge_port_rbr_size;
1370*3dec9fcdSqs 	uint32_t		hxge_port_rbr_spare_size;
1371*3dec9fcdSqs 	uint32_t		hxge_port_rcr_size;
1372*3dec9fcdSqs 
1373*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1374*3dec9fcdSqs 
1375*3dec9fcdSqs 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1376*3dec9fcdSqs 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1377*3dec9fcdSqs 	st_rdc = p_cfgp->start_rdc;
1378*3dec9fcdSqs 	ndmas = p_cfgp->max_rdcs;
1379*3dec9fcdSqs 
1380*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1381*3dec9fcdSqs 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1382*3dec9fcdSqs 
1383*3dec9fcdSqs 	/*
1384*3dec9fcdSqs 	 * Allocate memory for each receive DMA channel.
1385*3dec9fcdSqs 	 */
1386*3dec9fcdSqs 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1387*3dec9fcdSqs 	    KM_SLEEP);
1388*3dec9fcdSqs 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1389*3dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1390*3dec9fcdSqs 
1391*3dec9fcdSqs 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1392*3dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1393*3dec9fcdSqs 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1394*3dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1395*3dec9fcdSqs 
1396*3dec9fcdSqs 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1397*3dec9fcdSqs 	    KM_SLEEP);
1398*3dec9fcdSqs 
1399*3dec9fcdSqs 	/*
1400*3dec9fcdSqs 	 * Assume that each DMA channel will be configured with default block
1401*3dec9fcdSqs 	 * size. rbr block counts are mod of batch count (16).
1402*3dec9fcdSqs 	 */
1403*3dec9fcdSqs 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1404*3dec9fcdSqs 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1405*3dec9fcdSqs 
1406*3dec9fcdSqs 	if (!hxge_port_rbr_size) {
1407*3dec9fcdSqs 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1408*3dec9fcdSqs 	}
1409*3dec9fcdSqs 
1410*3dec9fcdSqs 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1411*3dec9fcdSqs 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1412*3dec9fcdSqs 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1413*3dec9fcdSqs 	}
1414*3dec9fcdSqs 
1415*3dec9fcdSqs 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1416*3dec9fcdSqs 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1417*3dec9fcdSqs 
1418*3dec9fcdSqs 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1419*3dec9fcdSqs 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1420*3dec9fcdSqs 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1421*3dec9fcdSqs 	}
1422*3dec9fcdSqs 
1423*3dec9fcdSqs 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1424*3dec9fcdSqs 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1425*3dec9fcdSqs 
1426*3dec9fcdSqs 	/*
1427*3dec9fcdSqs 	 * Addresses of receive block ring, receive completion ring and the
1428*3dec9fcdSqs 	 * mailbox must be all cache-aligned (64 bytes).
1429*3dec9fcdSqs 	 */
1430*3dec9fcdSqs 	rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1431*3dec9fcdSqs 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1432*3dec9fcdSqs 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size);
1433*3dec9fcdSqs 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1434*3dec9fcdSqs 
1435*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1436*3dec9fcdSqs 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1437*3dec9fcdSqs 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1438*3dec9fcdSqs 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1439*3dec9fcdSqs 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1440*3dec9fcdSqs 
1441*3dec9fcdSqs 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1442*3dec9fcdSqs 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1443*3dec9fcdSqs 
1444*3dec9fcdSqs 	/*
1445*3dec9fcdSqs 	 * Allocate memory for receive buffers and descriptor rings. Replace
1446*3dec9fcdSqs 	 * allocation functions with interface functions provided by the
1447*3dec9fcdSqs 	 * partition manager when it is available.
1448*3dec9fcdSqs 	 */
1449*3dec9fcdSqs 	/*
1450*3dec9fcdSqs 	 * Allocate memory for the receive buffer blocks.
1451*3dec9fcdSqs 	 */
1452*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
1453*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1454*3dec9fcdSqs 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1455*3dec9fcdSqs 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1456*3dec9fcdSqs 		    i, dma_buf_p[i], &dma_buf_p[i]));
1457*3dec9fcdSqs 
1458*3dec9fcdSqs 		num_chunks[i] = 0;
1459*3dec9fcdSqs 
1460*3dec9fcdSqs 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1461*3dec9fcdSqs 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1462*3dec9fcdSqs 		    &num_chunks[i]);
1463*3dec9fcdSqs 		if (status != HXGE_OK) {
1464*3dec9fcdSqs 			break;
1465*3dec9fcdSqs 		}
1466*3dec9fcdSqs 
1467*3dec9fcdSqs 		st_rdc++;
1468*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1469*3dec9fcdSqs 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1470*3dec9fcdSqs 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1471*3dec9fcdSqs 		    dma_buf_p[i], &dma_buf_p[i]));
1472*3dec9fcdSqs 	}
1473*3dec9fcdSqs 
1474*3dec9fcdSqs 	if (i < ndmas) {
1475*3dec9fcdSqs 		goto hxge_alloc_rx_mem_fail1;
1476*3dec9fcdSqs 	}
1477*3dec9fcdSqs 
1478*3dec9fcdSqs 	/*
1479*3dec9fcdSqs 	 * Allocate memory for descriptor rings and mailbox.
1480*3dec9fcdSqs 	 */
1481*3dec9fcdSqs 	st_rdc = p_cfgp->start_rdc;
1482*3dec9fcdSqs 	for (j = 0; j < ndmas; j++) {
1483*3dec9fcdSqs 		status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j],
1484*3dec9fcdSqs 		    rx_cntl_alloc_size);
1485*3dec9fcdSqs 		if (status != HXGE_OK) {
1486*3dec9fcdSqs 			break;
1487*3dec9fcdSqs 		}
1488*3dec9fcdSqs 		st_rdc++;
1489*3dec9fcdSqs 	}
1490*3dec9fcdSqs 
1491*3dec9fcdSqs 	if (j < ndmas) {
1492*3dec9fcdSqs 		goto hxge_alloc_rx_mem_fail2;
1493*3dec9fcdSqs 	}
1494*3dec9fcdSqs 
1495*3dec9fcdSqs 	dma_poolp->ndmas = ndmas;
1496*3dec9fcdSqs 	dma_poolp->num_chunks = num_chunks;
1497*3dec9fcdSqs 	dma_poolp->buf_allocated = B_TRUE;
1498*3dec9fcdSqs 	hxgep->rx_buf_pool_p = dma_poolp;
1499*3dec9fcdSqs 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1500*3dec9fcdSqs 
1501*3dec9fcdSqs 	dma_cntl_poolp->ndmas = ndmas;
1502*3dec9fcdSqs 	dma_cntl_poolp->buf_allocated = B_TRUE;
1503*3dec9fcdSqs 	hxgep->rx_cntl_pool_p = dma_cntl_poolp;
1504*3dec9fcdSqs 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1505*3dec9fcdSqs 
1506*3dec9fcdSqs 	goto hxge_alloc_rx_mem_pool_exit;
1507*3dec9fcdSqs 
1508*3dec9fcdSqs hxge_alloc_rx_mem_fail2:
1509*3dec9fcdSqs 	/* Free control buffers */
1510*3dec9fcdSqs 	j--;
1511*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1512*3dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1513*3dec9fcdSqs 	for (; j >= 0; j--) {
1514*3dec9fcdSqs 		hxge_free_rx_cntl_dma(hxgep,
1515*3dec9fcdSqs 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1516*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1517*3dec9fcdSqs 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1518*3dec9fcdSqs 	}
1519*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1520*3dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1521*3dec9fcdSqs 
1522*3dec9fcdSqs hxge_alloc_rx_mem_fail1:
1523*3dec9fcdSqs 	/* Free data buffers */
1524*3dec9fcdSqs 	i--;
1525*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1526*3dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1527*3dec9fcdSqs 	for (; i >= 0; i--) {
1528*3dec9fcdSqs 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1529*3dec9fcdSqs 		    num_chunks[i]);
1530*3dec9fcdSqs 	}
1531*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1532*3dec9fcdSqs 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1533*3dec9fcdSqs 
1534*3dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1535*3dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1536*3dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1537*3dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1538*3dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1539*3dec9fcdSqs 
1540*3dec9fcdSqs hxge_alloc_rx_mem_pool_exit:
1541*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1542*3dec9fcdSqs 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1543*3dec9fcdSqs 
1544*3dec9fcdSqs 	return (status);
1545*3dec9fcdSqs }
1546*3dec9fcdSqs 
1547*3dec9fcdSqs static void
1548*3dec9fcdSqs hxge_free_rx_mem_pool(p_hxge_t hxgep)
1549*3dec9fcdSqs {
1550*3dec9fcdSqs 	uint32_t		i, ndmas;
1551*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
1552*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
1553*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
1554*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
1555*3dec9fcdSqs 	uint32_t		*num_chunks;
1556*3dec9fcdSqs 
1557*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1558*3dec9fcdSqs 
1559*3dec9fcdSqs 	dma_poolp = hxgep->rx_buf_pool_p;
1560*3dec9fcdSqs 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1561*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1562*3dec9fcdSqs 		    "(null rx buf pool or buf not allocated"));
1563*3dec9fcdSqs 		return;
1564*3dec9fcdSqs 	}
1565*3dec9fcdSqs 
1566*3dec9fcdSqs 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
1567*3dec9fcdSqs 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
1568*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1569*3dec9fcdSqs 		    "<== hxge_free_rx_mem_pool "
1570*3dec9fcdSqs 		    "(null rx cntl buf pool or cntl buf not allocated"));
1571*3dec9fcdSqs 		return;
1572*3dec9fcdSqs 	}
1573*3dec9fcdSqs 
1574*3dec9fcdSqs 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1575*3dec9fcdSqs 	num_chunks = dma_poolp->num_chunks;
1576*3dec9fcdSqs 
1577*3dec9fcdSqs 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1578*3dec9fcdSqs 	ndmas = dma_cntl_poolp->ndmas;
1579*3dec9fcdSqs 
1580*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
1581*3dec9fcdSqs 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1582*3dec9fcdSqs 	}
1583*3dec9fcdSqs 
1584*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
1585*3dec9fcdSqs 		hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]);
1586*3dec9fcdSqs 	}
1587*3dec9fcdSqs 
1588*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
1589*3dec9fcdSqs 		KMEM_FREE(dma_buf_p[i],
1590*3dec9fcdSqs 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1591*3dec9fcdSqs 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
1592*3dec9fcdSqs 	}
1593*3dec9fcdSqs 
1594*3dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1595*3dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1596*3dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1597*3dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1598*3dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1599*3dec9fcdSqs 
1600*3dec9fcdSqs 	hxgep->rx_buf_pool_p = NULL;
1601*3dec9fcdSqs 	hxgep->rx_cntl_pool_p = NULL;
1602*3dec9fcdSqs 
1603*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1604*3dec9fcdSqs }
1605*3dec9fcdSqs 
1606*3dec9fcdSqs static hxge_status_t
1607*3dec9fcdSqs hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1608*3dec9fcdSqs     p_hxge_dma_common_t *dmap,
1609*3dec9fcdSqs     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1610*3dec9fcdSqs {
1611*3dec9fcdSqs 	p_hxge_dma_common_t	rx_dmap;
1612*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1613*3dec9fcdSqs 	size_t			total_alloc_size;
1614*3dec9fcdSqs 	size_t			allocated = 0;
1615*3dec9fcdSqs 	int			i, size_index, array_size;
1616*3dec9fcdSqs 
1617*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1618*3dec9fcdSqs 
1619*3dec9fcdSqs 	rx_dmap = (p_hxge_dma_common_t)
1620*3dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1621*3dec9fcdSqs 
1622*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1623*3dec9fcdSqs 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1624*3dec9fcdSqs 	    dma_channel, alloc_size, block_size, dmap));
1625*3dec9fcdSqs 
1626*3dec9fcdSqs 	total_alloc_size = alloc_size;
1627*3dec9fcdSqs 
1628*3dec9fcdSqs 	i = 0;
1629*3dec9fcdSqs 	size_index = 0;
1630*3dec9fcdSqs 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1631*3dec9fcdSqs 	while ((alloc_sizes[size_index] < alloc_size) &&
1632*3dec9fcdSqs 	    (size_index < array_size))
1633*3dec9fcdSqs 		size_index++;
1634*3dec9fcdSqs 	if (size_index >= array_size) {
1635*3dec9fcdSqs 		size_index = array_size - 1;
1636*3dec9fcdSqs 	}
1637*3dec9fcdSqs 
1638*3dec9fcdSqs 	while ((allocated < total_alloc_size) &&
1639*3dec9fcdSqs 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1640*3dec9fcdSqs 		rx_dmap[i].dma_chunk_index = i;
1641*3dec9fcdSqs 		rx_dmap[i].block_size = block_size;
1642*3dec9fcdSqs 		rx_dmap[i].alength = alloc_sizes[size_index];
1643*3dec9fcdSqs 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1644*3dec9fcdSqs 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1645*3dec9fcdSqs 		rx_dmap[i].dma_channel = dma_channel;
1646*3dec9fcdSqs 		rx_dmap[i].contig_alloc_type = B_FALSE;
1647*3dec9fcdSqs 
1648*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1649*3dec9fcdSqs 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1650*3dec9fcdSqs 		    "i %d nblocks %d alength %d",
1651*3dec9fcdSqs 		    dma_channel, i, &rx_dmap[i], block_size,
1652*3dec9fcdSqs 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1653*3dec9fcdSqs 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1654*3dec9fcdSqs 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1655*3dec9fcdSqs 		    &hxge_dev_buf_dma_acc_attr,
1656*3dec9fcdSqs 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1657*3dec9fcdSqs 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1658*3dec9fcdSqs 		if (status != HXGE_OK) {
1659*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1660*3dec9fcdSqs 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1661*3dec9fcdSqs 			    " for size: %d", alloc_sizes[size_index]));
1662*3dec9fcdSqs 			size_index--;
1663*3dec9fcdSqs 		} else {
1664*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1665*3dec9fcdSqs 			    " alloc_rx_buf_dma allocated rdc %d "
1666*3dec9fcdSqs 			    "chunk %d size %x dvma %x bufp %llx ",
1667*3dec9fcdSqs 			    dma_channel, i, rx_dmap[i].alength,
1668*3dec9fcdSqs 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1669*3dec9fcdSqs 			i++;
1670*3dec9fcdSqs 			allocated += alloc_sizes[size_index];
1671*3dec9fcdSqs 		}
1672*3dec9fcdSqs 	}
1673*3dec9fcdSqs 
1674*3dec9fcdSqs 	if (allocated < total_alloc_size) {
1675*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1676*3dec9fcdSqs 		    " hxge_alloc_rx_buf_dma failed due to"
1677*3dec9fcdSqs 		    " allocated(%d) < required(%d)",
1678*3dec9fcdSqs 		    allocated, total_alloc_size));
1679*3dec9fcdSqs 		goto hxge_alloc_rx_mem_fail1;
1680*3dec9fcdSqs 	}
1681*3dec9fcdSqs 
1682*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1683*3dec9fcdSqs 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1684*3dec9fcdSqs 
1685*3dec9fcdSqs 	*num_chunks = i;
1686*3dec9fcdSqs 	*dmap = rx_dmap;
1687*3dec9fcdSqs 
1688*3dec9fcdSqs 	goto hxge_alloc_rx_mem_exit;
1689*3dec9fcdSqs 
1690*3dec9fcdSqs hxge_alloc_rx_mem_fail1:
1691*3dec9fcdSqs 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1692*3dec9fcdSqs 
1693*3dec9fcdSqs hxge_alloc_rx_mem_exit:
1694*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1695*3dec9fcdSqs 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1696*3dec9fcdSqs 
1697*3dec9fcdSqs 	return (status);
1698*3dec9fcdSqs }
1699*3dec9fcdSqs 
1700*3dec9fcdSqs /*ARGSUSED*/
1701*3dec9fcdSqs static void
1702*3dec9fcdSqs hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1703*3dec9fcdSqs     uint32_t num_chunks)
1704*3dec9fcdSqs {
1705*3dec9fcdSqs 	int i;
1706*3dec9fcdSqs 
1707*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1708*3dec9fcdSqs 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1709*3dec9fcdSqs 
1710*3dec9fcdSqs 	for (i = 0; i < num_chunks; i++) {
1711*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1712*3dec9fcdSqs 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1713*3dec9fcdSqs 		hxge_dma_mem_free(dmap++);
1714*3dec9fcdSqs 	}
1715*3dec9fcdSqs 
1716*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1717*3dec9fcdSqs }
1718*3dec9fcdSqs 
1719*3dec9fcdSqs /*ARGSUSED*/
1720*3dec9fcdSqs static hxge_status_t
1721*3dec9fcdSqs hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1722*3dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t size)
1723*3dec9fcdSqs {
1724*3dec9fcdSqs 	p_hxge_dma_common_t	rx_dmap;
1725*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1726*3dec9fcdSqs 
1727*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1728*3dec9fcdSqs 
1729*3dec9fcdSqs 	rx_dmap = (p_hxge_dma_common_t)
1730*3dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1731*3dec9fcdSqs 
1732*3dec9fcdSqs 	rx_dmap->contig_alloc_type = B_FALSE;
1733*3dec9fcdSqs 
1734*3dec9fcdSqs 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1735*3dec9fcdSqs 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
1736*3dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1737*3dec9fcdSqs 	if (status != HXGE_OK) {
1738*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1739*3dec9fcdSqs 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1740*3dec9fcdSqs 		    " for size: %d", size));
1741*3dec9fcdSqs 		goto hxge_alloc_rx_cntl_dma_fail1;
1742*3dec9fcdSqs 	}
1743*3dec9fcdSqs 
1744*3dec9fcdSqs 	*dmap = rx_dmap;
1745*3dec9fcdSqs 
1746*3dec9fcdSqs 	goto hxge_alloc_rx_cntl_dma_exit;
1747*3dec9fcdSqs 
1748*3dec9fcdSqs hxge_alloc_rx_cntl_dma_fail1:
1749*3dec9fcdSqs 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1750*3dec9fcdSqs 
1751*3dec9fcdSqs hxge_alloc_rx_cntl_dma_exit:
1752*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1753*3dec9fcdSqs 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1754*3dec9fcdSqs 
1755*3dec9fcdSqs 	return (status);
1756*3dec9fcdSqs }
1757*3dec9fcdSqs 
1758*3dec9fcdSqs /*ARGSUSED*/
1759*3dec9fcdSqs static void
1760*3dec9fcdSqs hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1761*3dec9fcdSqs {
1762*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1763*3dec9fcdSqs 
1764*3dec9fcdSqs 	hxge_dma_mem_free(dmap);
1765*3dec9fcdSqs 
1766*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1767*3dec9fcdSqs }
1768*3dec9fcdSqs 
1769*3dec9fcdSqs static hxge_status_t
1770*3dec9fcdSqs hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1771*3dec9fcdSqs {
1772*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1773*3dec9fcdSqs 	int			i, j;
1774*3dec9fcdSqs 	uint32_t		ndmas, st_tdc;
1775*3dec9fcdSqs 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1776*3dec9fcdSqs 	p_hxge_hw_pt_cfg_t	p_cfgp;
1777*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
1778*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
1779*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
1780*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
1781*3dec9fcdSqs 	size_t			tx_buf_alloc_size;
1782*3dec9fcdSqs 	size_t			tx_cntl_alloc_size;
1783*3dec9fcdSqs 	uint32_t		*num_chunks;	/* per dma */
1784*3dec9fcdSqs 
1785*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1786*3dec9fcdSqs 
1787*3dec9fcdSqs 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1788*3dec9fcdSqs 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1789*3dec9fcdSqs 	st_tdc = p_cfgp->start_tdc;
1790*3dec9fcdSqs 	ndmas = p_cfgp->max_tdcs;
1791*3dec9fcdSqs 
1792*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1793*3dec9fcdSqs 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1794*3dec9fcdSqs 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1795*3dec9fcdSqs 	/*
1796*3dec9fcdSqs 	 * Allocate memory for each transmit DMA channel.
1797*3dec9fcdSqs 	 */
1798*3dec9fcdSqs 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1799*3dec9fcdSqs 	    KM_SLEEP);
1800*3dec9fcdSqs 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1801*3dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1802*3dec9fcdSqs 
1803*3dec9fcdSqs 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1804*3dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1805*3dec9fcdSqs 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1806*3dec9fcdSqs 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1807*3dec9fcdSqs 
1808*3dec9fcdSqs 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1809*3dec9fcdSqs 
1810*3dec9fcdSqs 	/*
1811*3dec9fcdSqs 	 * Assume that each DMA channel will be configured with default
1812*3dec9fcdSqs 	 * transmit bufer size for copying transmit data. (For packet payload
1813*3dec9fcdSqs 	 * over this limit, packets will not be copied.)
1814*3dec9fcdSqs 	 */
1815*3dec9fcdSqs 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1816*3dec9fcdSqs 
1817*3dec9fcdSqs 	/*
1818*3dec9fcdSqs 	 * Addresses of transmit descriptor ring and the mailbox must be all
1819*3dec9fcdSqs 	 * cache-aligned (64 bytes).
1820*3dec9fcdSqs 	 */
1821*3dec9fcdSqs 	tx_cntl_alloc_size = hxge_tx_ring_size;
1822*3dec9fcdSqs 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1823*3dec9fcdSqs 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1824*3dec9fcdSqs 
1825*3dec9fcdSqs 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1826*3dec9fcdSqs 	    KM_SLEEP);
1827*3dec9fcdSqs 
1828*3dec9fcdSqs 	/*
1829*3dec9fcdSqs 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1830*3dec9fcdSqs 	 * allocation functions with interface functions provided by the
1831*3dec9fcdSqs 	 * partition manager when it is available.
1832*3dec9fcdSqs 	 *
1833*3dec9fcdSqs 	 * Allocate memory for the transmit buffer pool.
1834*3dec9fcdSqs 	 */
1835*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
1836*3dec9fcdSqs 		num_chunks[i] = 0;
1837*3dec9fcdSqs 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1838*3dec9fcdSqs 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1839*3dec9fcdSqs 		if (status != HXGE_OK) {
1840*3dec9fcdSqs 			break;
1841*3dec9fcdSqs 		}
1842*3dec9fcdSqs 		st_tdc++;
1843*3dec9fcdSqs 	}
1844*3dec9fcdSqs 
1845*3dec9fcdSqs 	if (i < ndmas) {
1846*3dec9fcdSqs 		goto hxge_alloc_tx_mem_pool_fail1;
1847*3dec9fcdSqs 	}
1848*3dec9fcdSqs 
1849*3dec9fcdSqs 	st_tdc = p_cfgp->start_tdc;
1850*3dec9fcdSqs 
1851*3dec9fcdSqs 	/*
1852*3dec9fcdSqs 	 * Allocate memory for descriptor rings and mailbox.
1853*3dec9fcdSqs 	 */
1854*3dec9fcdSqs 	for (j = 0; j < ndmas; j++) {
1855*3dec9fcdSqs 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
1856*3dec9fcdSqs 		    tx_cntl_alloc_size);
1857*3dec9fcdSqs 		if (status != HXGE_OK) {
1858*3dec9fcdSqs 			break;
1859*3dec9fcdSqs 		}
1860*3dec9fcdSqs 		st_tdc++;
1861*3dec9fcdSqs 	}
1862*3dec9fcdSqs 
1863*3dec9fcdSqs 	if (j < ndmas) {
1864*3dec9fcdSqs 		goto hxge_alloc_tx_mem_pool_fail2;
1865*3dec9fcdSqs 	}
1866*3dec9fcdSqs 
1867*3dec9fcdSqs 	dma_poolp->ndmas = ndmas;
1868*3dec9fcdSqs 	dma_poolp->num_chunks = num_chunks;
1869*3dec9fcdSqs 	dma_poolp->buf_allocated = B_TRUE;
1870*3dec9fcdSqs 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1871*3dec9fcdSqs 	hxgep->tx_buf_pool_p = dma_poolp;
1872*3dec9fcdSqs 
1873*3dec9fcdSqs 	dma_cntl_poolp->ndmas = ndmas;
1874*3dec9fcdSqs 	dma_cntl_poolp->buf_allocated = B_TRUE;
1875*3dec9fcdSqs 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1876*3dec9fcdSqs 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
1877*3dec9fcdSqs 
1878*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1879*3dec9fcdSqs 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
1880*3dec9fcdSqs 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
1881*3dec9fcdSqs 
1882*3dec9fcdSqs 	goto hxge_alloc_tx_mem_pool_exit;
1883*3dec9fcdSqs 
1884*3dec9fcdSqs hxge_alloc_tx_mem_pool_fail2:
1885*3dec9fcdSqs 	/* Free control buffers */
1886*3dec9fcdSqs 	j--;
1887*3dec9fcdSqs 	for (; j >= 0; j--) {
1888*3dec9fcdSqs 		hxge_free_tx_cntl_dma(hxgep,
1889*3dec9fcdSqs 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1890*3dec9fcdSqs 	}
1891*3dec9fcdSqs 
1892*3dec9fcdSqs hxge_alloc_tx_mem_pool_fail1:
1893*3dec9fcdSqs 	/* Free data buffers */
1894*3dec9fcdSqs 	i--;
1895*3dec9fcdSqs 	for (; i >= 0; i--) {
1896*3dec9fcdSqs 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1897*3dec9fcdSqs 		    num_chunks[i]);
1898*3dec9fcdSqs 	}
1899*3dec9fcdSqs 
1900*3dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1901*3dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1902*3dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1903*3dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1904*3dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1905*3dec9fcdSqs 
1906*3dec9fcdSqs hxge_alloc_tx_mem_pool_exit:
1907*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1908*3dec9fcdSqs 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
1909*3dec9fcdSqs 
1910*3dec9fcdSqs 	return (status);
1911*3dec9fcdSqs }
1912*3dec9fcdSqs 
1913*3dec9fcdSqs static hxge_status_t
1914*3dec9fcdSqs hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1915*3dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t alloc_size,
1916*3dec9fcdSqs     size_t block_size, uint32_t *num_chunks)
1917*3dec9fcdSqs {
1918*3dec9fcdSqs 	p_hxge_dma_common_t	tx_dmap;
1919*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
1920*3dec9fcdSqs 	size_t			total_alloc_size;
1921*3dec9fcdSqs 	size_t			allocated = 0;
1922*3dec9fcdSqs 	int			i, size_index, array_size;
1923*3dec9fcdSqs 
1924*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
1925*3dec9fcdSqs 
1926*3dec9fcdSqs 	tx_dmap = (p_hxge_dma_common_t)
1927*3dec9fcdSqs 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1928*3dec9fcdSqs 
1929*3dec9fcdSqs 	total_alloc_size = alloc_size;
1930*3dec9fcdSqs 	i = 0;
1931*3dec9fcdSqs 	size_index = 0;
1932*3dec9fcdSqs 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1933*3dec9fcdSqs 	while ((alloc_sizes[size_index] < alloc_size) &&
1934*3dec9fcdSqs 	    (size_index < array_size))
1935*3dec9fcdSqs 		size_index++;
1936*3dec9fcdSqs 	if (size_index >= array_size) {
1937*3dec9fcdSqs 		size_index = array_size - 1;
1938*3dec9fcdSqs 	}
1939*3dec9fcdSqs 
1940*3dec9fcdSqs 	while ((allocated < total_alloc_size) &&
1941*3dec9fcdSqs 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1942*3dec9fcdSqs 		tx_dmap[i].dma_chunk_index = i;
1943*3dec9fcdSqs 		tx_dmap[i].block_size = block_size;
1944*3dec9fcdSqs 		tx_dmap[i].alength = alloc_sizes[size_index];
1945*3dec9fcdSqs 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
1946*3dec9fcdSqs 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1947*3dec9fcdSqs 		tx_dmap[i].dma_channel = dma_channel;
1948*3dec9fcdSqs 		tx_dmap[i].contig_alloc_type = B_FALSE;
1949*3dec9fcdSqs 
1950*3dec9fcdSqs 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1951*3dec9fcdSqs 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
1952*3dec9fcdSqs 		    &hxge_dev_buf_dma_acc_attr,
1953*3dec9fcdSqs 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
1954*3dec9fcdSqs 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
1955*3dec9fcdSqs 		if (status != HXGE_OK) {
1956*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1957*3dec9fcdSqs 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
1958*3dec9fcdSqs 			    " for size: %d", alloc_sizes[size_index]));
1959*3dec9fcdSqs 			size_index--;
1960*3dec9fcdSqs 		} else {
1961*3dec9fcdSqs 			i++;
1962*3dec9fcdSqs 			allocated += alloc_sizes[size_index];
1963*3dec9fcdSqs 		}
1964*3dec9fcdSqs 	}
1965*3dec9fcdSqs 
1966*3dec9fcdSqs 	if (allocated < total_alloc_size) {
1967*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1968*3dec9fcdSqs 		    " hxge_alloc_tx_buf_dma: failed due to"
1969*3dec9fcdSqs 		    " allocated(%d) < required(%d)",
1970*3dec9fcdSqs 		    allocated, total_alloc_size));
1971*3dec9fcdSqs 		goto hxge_alloc_tx_mem_fail1;
1972*3dec9fcdSqs 	}
1973*3dec9fcdSqs 
1974*3dec9fcdSqs 	*num_chunks = i;
1975*3dec9fcdSqs 	*dmap = tx_dmap;
1976*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1977*3dec9fcdSqs 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
1978*3dec9fcdSqs 	    *dmap, i));
1979*3dec9fcdSqs 	goto hxge_alloc_tx_mem_exit;
1980*3dec9fcdSqs 
1981*3dec9fcdSqs hxge_alloc_tx_mem_fail1:
1982*3dec9fcdSqs 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1983*3dec9fcdSqs 
1984*3dec9fcdSqs hxge_alloc_tx_mem_exit:
1985*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1986*3dec9fcdSqs 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
1987*3dec9fcdSqs 
1988*3dec9fcdSqs 	return (status);
1989*3dec9fcdSqs }
1990*3dec9fcdSqs 
1991*3dec9fcdSqs /*ARGSUSED*/
1992*3dec9fcdSqs static void
1993*3dec9fcdSqs hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1994*3dec9fcdSqs     uint32_t num_chunks)
1995*3dec9fcdSqs {
1996*3dec9fcdSqs 	int i;
1997*3dec9fcdSqs 
1998*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
1999*3dec9fcdSqs 
2000*3dec9fcdSqs 	for (i = 0; i < num_chunks; i++) {
2001*3dec9fcdSqs 		hxge_dma_mem_free(dmap++);
2002*3dec9fcdSqs 	}
2003*3dec9fcdSqs 
2004*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2005*3dec9fcdSqs }
2006*3dec9fcdSqs 
2007*3dec9fcdSqs /*ARGSUSED*/
2008*3dec9fcdSqs static hxge_status_t
2009*3dec9fcdSqs hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2010*3dec9fcdSqs     p_hxge_dma_common_t *dmap, size_t size)
2011*3dec9fcdSqs {
2012*3dec9fcdSqs 	p_hxge_dma_common_t	tx_dmap;
2013*3dec9fcdSqs 	hxge_status_t		status = HXGE_OK;
2014*3dec9fcdSqs 
2015*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2016*3dec9fcdSqs 
2017*3dec9fcdSqs 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2018*3dec9fcdSqs 	    KM_SLEEP);
2019*3dec9fcdSqs 
2020*3dec9fcdSqs 	tx_dmap->contig_alloc_type = B_FALSE;
2021*3dec9fcdSqs 
2022*3dec9fcdSqs 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2023*3dec9fcdSqs 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2024*3dec9fcdSqs 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2025*3dec9fcdSqs 	if (status != HXGE_OK) {
2026*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2027*3dec9fcdSqs 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2028*3dec9fcdSqs 		    " for size: %d", size));
2029*3dec9fcdSqs 		goto hxge_alloc_tx_cntl_dma_fail1;
2030*3dec9fcdSqs 	}
2031*3dec9fcdSqs 
2032*3dec9fcdSqs 	*dmap = tx_dmap;
2033*3dec9fcdSqs 
2034*3dec9fcdSqs 	goto hxge_alloc_tx_cntl_dma_exit;
2035*3dec9fcdSqs 
2036*3dec9fcdSqs hxge_alloc_tx_cntl_dma_fail1:
2037*3dec9fcdSqs 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2038*3dec9fcdSqs 
2039*3dec9fcdSqs hxge_alloc_tx_cntl_dma_exit:
2040*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2041*3dec9fcdSqs 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2042*3dec9fcdSqs 
2043*3dec9fcdSqs 	return (status);
2044*3dec9fcdSqs }
2045*3dec9fcdSqs 
2046*3dec9fcdSqs /*ARGSUSED*/
2047*3dec9fcdSqs static void
2048*3dec9fcdSqs hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2049*3dec9fcdSqs {
2050*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2051*3dec9fcdSqs 
2052*3dec9fcdSqs 	hxge_dma_mem_free(dmap);
2053*3dec9fcdSqs 
2054*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2055*3dec9fcdSqs }
2056*3dec9fcdSqs 
2057*3dec9fcdSqs static void
2058*3dec9fcdSqs hxge_free_tx_mem_pool(p_hxge_t hxgep)
2059*3dec9fcdSqs {
2060*3dec9fcdSqs 	uint32_t		i, ndmas;
2061*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_poolp;
2062*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_buf_p;
2063*3dec9fcdSqs 	p_hxge_dma_pool_t	dma_cntl_poolp;
2064*3dec9fcdSqs 	p_hxge_dma_common_t	*dma_cntl_p;
2065*3dec9fcdSqs 	uint32_t		*num_chunks;
2066*3dec9fcdSqs 
2067*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2068*3dec9fcdSqs 
2069*3dec9fcdSqs 	dma_poolp = hxgep->tx_buf_pool_p;
2070*3dec9fcdSqs 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2071*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2072*3dec9fcdSqs 		    "<== hxge_free_tx_mem_pool "
2073*3dec9fcdSqs 		    "(null rx buf pool or buf not allocated"));
2074*3dec9fcdSqs 		return;
2075*3dec9fcdSqs 	}
2076*3dec9fcdSqs 
2077*3dec9fcdSqs 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2078*3dec9fcdSqs 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2079*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2080*3dec9fcdSqs 		    "<== hxge_free_tx_mem_pool "
2081*3dec9fcdSqs 		    "(null tx cntl buf pool or cntl buf not allocated"));
2082*3dec9fcdSqs 		return;
2083*3dec9fcdSqs 	}
2084*3dec9fcdSqs 
2085*3dec9fcdSqs 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2086*3dec9fcdSqs 	num_chunks = dma_poolp->num_chunks;
2087*3dec9fcdSqs 
2088*3dec9fcdSqs 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2089*3dec9fcdSqs 	ndmas = dma_cntl_poolp->ndmas;
2090*3dec9fcdSqs 
2091*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
2092*3dec9fcdSqs 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2093*3dec9fcdSqs 	}
2094*3dec9fcdSqs 
2095*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
2096*3dec9fcdSqs 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2097*3dec9fcdSqs 	}
2098*3dec9fcdSqs 
2099*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
2100*3dec9fcdSqs 		KMEM_FREE(dma_buf_p[i],
2101*3dec9fcdSqs 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2102*3dec9fcdSqs 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2103*3dec9fcdSqs 	}
2104*3dec9fcdSqs 
2105*3dec9fcdSqs 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2106*3dec9fcdSqs 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2107*3dec9fcdSqs 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2108*3dec9fcdSqs 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2109*3dec9fcdSqs 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2110*3dec9fcdSqs 
2111*3dec9fcdSqs 	hxgep->tx_buf_pool_p = NULL;
2112*3dec9fcdSqs 	hxgep->tx_cntl_pool_p = NULL;
2113*3dec9fcdSqs 
2114*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2115*3dec9fcdSqs }
2116*3dec9fcdSqs 
2117*3dec9fcdSqs /*ARGSUSED*/
2118*3dec9fcdSqs static hxge_status_t
2119*3dec9fcdSqs hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2120*3dec9fcdSqs     struct ddi_dma_attr *dma_attrp,
2121*3dec9fcdSqs     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2122*3dec9fcdSqs     p_hxge_dma_common_t dma_p)
2123*3dec9fcdSqs {
2124*3dec9fcdSqs 	caddr_t		kaddrp;
2125*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
2126*3dec9fcdSqs 
2127*3dec9fcdSqs 	dma_p->dma_handle = NULL;
2128*3dec9fcdSqs 	dma_p->acc_handle = NULL;
2129*3dec9fcdSqs 	dma_p->kaddrp = NULL;
2130*3dec9fcdSqs 
2131*3dec9fcdSqs 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2132*3dec9fcdSqs 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2133*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
2134*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2135*3dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2136*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2137*3dec9fcdSqs 	}
2138*3dec9fcdSqs 
2139*3dec9fcdSqs 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2140*3dec9fcdSqs 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2141*3dec9fcdSqs 	    &dma_p->acc_handle);
2142*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
2143*3dec9fcdSqs 		/* The caller will decide whether it is fatal */
2144*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2145*3dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2146*3dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
2147*3dec9fcdSqs 		dma_p->dma_handle = NULL;
2148*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2149*3dec9fcdSqs 	}
2150*3dec9fcdSqs 
2151*3dec9fcdSqs 	if (dma_p->alength < length) {
2152*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2153*3dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2154*3dec9fcdSqs 		ddi_dma_mem_free(&dma_p->acc_handle);
2155*3dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
2156*3dec9fcdSqs 		dma_p->acc_handle = NULL;
2157*3dec9fcdSqs 		dma_p->dma_handle = NULL;
2158*3dec9fcdSqs 		return (HXGE_ERROR);
2159*3dec9fcdSqs 	}
2160*3dec9fcdSqs 
2161*3dec9fcdSqs 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2162*3dec9fcdSqs 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2163*3dec9fcdSqs 	    &dma_p->dma_cookie, &dma_p->ncookies);
2164*3dec9fcdSqs 	if (ddi_status != DDI_DMA_MAPPED) {
2165*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2166*3dec9fcdSqs 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2167*3dec9fcdSqs 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2168*3dec9fcdSqs 		if (dma_p->acc_handle) {
2169*3dec9fcdSqs 			ddi_dma_mem_free(&dma_p->acc_handle);
2170*3dec9fcdSqs 			dma_p->acc_handle = NULL;
2171*3dec9fcdSqs 		}
2172*3dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
2173*3dec9fcdSqs 		dma_p->dma_handle = NULL;
2174*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2175*3dec9fcdSqs 	}
2176*3dec9fcdSqs 
2177*3dec9fcdSqs 	if (dma_p->ncookies != 1) {
2178*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2179*3dec9fcdSqs 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2180*3dec9fcdSqs 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2181*3dec9fcdSqs 		if (dma_p->acc_handle) {
2182*3dec9fcdSqs 			ddi_dma_mem_free(&dma_p->acc_handle);
2183*3dec9fcdSqs 			dma_p->acc_handle = NULL;
2184*3dec9fcdSqs 		}
2185*3dec9fcdSqs 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2186*3dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
2187*3dec9fcdSqs 		dma_p->dma_handle = NULL;
2188*3dec9fcdSqs 		return (HXGE_ERROR);
2189*3dec9fcdSqs 	}
2190*3dec9fcdSqs 
2191*3dec9fcdSqs 	dma_p->kaddrp = kaddrp;
2192*3dec9fcdSqs #if defined(__i386)
2193*3dec9fcdSqs 	dma_p->ioaddr_pp =
2194*3dec9fcdSqs 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2195*3dec9fcdSqs #else
2196*3dec9fcdSqs 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2197*3dec9fcdSqs #endif
2198*3dec9fcdSqs 
2199*3dec9fcdSqs 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2200*3dec9fcdSqs 
2201*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2202*3dec9fcdSqs 	    "dma buffer allocated: dma_p $%p "
2203*3dec9fcdSqs 	    "return dmac_ladress from cookie $%p dmac_size %d "
2204*3dec9fcdSqs 	    "dma_p->ioaddr_p $%p "
2205*3dec9fcdSqs 	    "dma_p->orig_ioaddr_p $%p "
2206*3dec9fcdSqs 	    "orig_vatopa $%p "
2207*3dec9fcdSqs 	    "alength %d (0x%x) "
2208*3dec9fcdSqs 	    "kaddrp $%p "
2209*3dec9fcdSqs 	    "length %d (0x%x)",
2210*3dec9fcdSqs 	    dma_p,
2211*3dec9fcdSqs 	    dma_p->dma_cookie.dmac_laddress,
2212*3dec9fcdSqs 	    dma_p->dma_cookie.dmac_size,
2213*3dec9fcdSqs 	    dma_p->ioaddr_pp,
2214*3dec9fcdSqs 	    dma_p->orig_ioaddr_pp,
2215*3dec9fcdSqs 	    dma_p->orig_vatopa,
2216*3dec9fcdSqs 	    dma_p->alength, dma_p->alength,
2217*3dec9fcdSqs 	    kaddrp,
2218*3dec9fcdSqs 	    length, length));
2219*3dec9fcdSqs 
2220*3dec9fcdSqs 	return (HXGE_OK);
2221*3dec9fcdSqs }
2222*3dec9fcdSqs 
2223*3dec9fcdSqs static void
2224*3dec9fcdSqs hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2225*3dec9fcdSqs {
2226*3dec9fcdSqs 	if (dma_p->dma_handle != NULL) {
2227*3dec9fcdSqs 		if (dma_p->ncookies) {
2228*3dec9fcdSqs 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2229*3dec9fcdSqs 			dma_p->ncookies = 0;
2230*3dec9fcdSqs 		}
2231*3dec9fcdSqs 		ddi_dma_free_handle(&dma_p->dma_handle);
2232*3dec9fcdSqs 		dma_p->dma_handle = NULL;
2233*3dec9fcdSqs 	}
2234*3dec9fcdSqs 	if (dma_p->acc_handle != NULL) {
2235*3dec9fcdSqs 		ddi_dma_mem_free(&dma_p->acc_handle);
2236*3dec9fcdSqs 		dma_p->acc_handle = NULL;
2237*3dec9fcdSqs 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2238*3dec9fcdSqs 	}
2239*3dec9fcdSqs 	dma_p->kaddrp = NULL;
2240*3dec9fcdSqs 	dma_p->alength = NULL;
2241*3dec9fcdSqs }
2242*3dec9fcdSqs 
2243*3dec9fcdSqs /*
2244*3dec9fcdSqs  *	hxge_m_start() -- start transmitting and receiving.
2245*3dec9fcdSqs  *
2246*3dec9fcdSqs  *	This function is called by the MAC layer when the first
2247*3dec9fcdSqs  *	stream is open to prepare the hardware ready for sending
2248*3dec9fcdSqs  *	and transmitting packets.
2249*3dec9fcdSqs  */
2250*3dec9fcdSqs static int
2251*3dec9fcdSqs hxge_m_start(void *arg)
2252*3dec9fcdSqs {
2253*3dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
2254*3dec9fcdSqs 
2255*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2256*3dec9fcdSqs 
2257*3dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
2258*3dec9fcdSqs 
2259*3dec9fcdSqs 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2260*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2261*3dec9fcdSqs 		    "<== hxge_m_start: initialization failed"));
2262*3dec9fcdSqs 		MUTEX_EXIT(hxgep->genlock);
2263*3dec9fcdSqs 		return (EIO);
2264*3dec9fcdSqs 	}
2265*3dec9fcdSqs 
2266*3dec9fcdSqs 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2267*3dec9fcdSqs 		/*
2268*3dec9fcdSqs 		 * Start timer to check the system error and tx hangs
2269*3dec9fcdSqs 		 */
2270*3dec9fcdSqs 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2271*3dec9fcdSqs 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2272*3dec9fcdSqs 
2273*3dec9fcdSqs 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2274*3dec9fcdSqs 	}
2275*3dec9fcdSqs 
2276*3dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
2277*3dec9fcdSqs 
2278*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2279*3dec9fcdSqs 
2280*3dec9fcdSqs 	return (0);
2281*3dec9fcdSqs }
2282*3dec9fcdSqs 
2283*3dec9fcdSqs /*
2284*3dec9fcdSqs  * hxge_m_stop(): stop transmitting and receiving.
2285*3dec9fcdSqs  */
2286*3dec9fcdSqs static void
2287*3dec9fcdSqs hxge_m_stop(void *arg)
2288*3dec9fcdSqs {
2289*3dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
2290*3dec9fcdSqs 
2291*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2292*3dec9fcdSqs 
2293*3dec9fcdSqs 	if (hxgep->hxge_timerid) {
2294*3dec9fcdSqs 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2295*3dec9fcdSqs 		hxgep->hxge_timerid = 0;
2296*3dec9fcdSqs 	}
2297*3dec9fcdSqs 
2298*3dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
2299*3dec9fcdSqs 
2300*3dec9fcdSqs 	hxge_uninit(hxgep);
2301*3dec9fcdSqs 
2302*3dec9fcdSqs 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2303*3dec9fcdSqs 
2304*3dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
2305*3dec9fcdSqs 
2306*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2307*3dec9fcdSqs }
2308*3dec9fcdSqs 
2309*3dec9fcdSqs static int
2310*3dec9fcdSqs hxge_m_unicst(void *arg, const uint8_t *macaddr)
2311*3dec9fcdSqs {
2312*3dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
2313*3dec9fcdSqs 	struct ether_addr	addrp;
2314*3dec9fcdSqs 	hxge_status_t		status;
2315*3dec9fcdSqs 
2316*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2317*3dec9fcdSqs 
2318*3dec9fcdSqs 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2319*3dec9fcdSqs 
2320*3dec9fcdSqs 	status = hxge_set_mac_addr(hxgep, &addrp);
2321*3dec9fcdSqs 	if (status != HXGE_OK) {
2322*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2323*3dec9fcdSqs 		    "<== hxge_m_unicst: set unitcast failed"));
2324*3dec9fcdSqs 		return (EINVAL);
2325*3dec9fcdSqs 	}
2326*3dec9fcdSqs 
2327*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2328*3dec9fcdSqs 
2329*3dec9fcdSqs 	return (0);
2330*3dec9fcdSqs }
2331*3dec9fcdSqs 
2332*3dec9fcdSqs static int
2333*3dec9fcdSqs hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2334*3dec9fcdSqs {
2335*3dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
2336*3dec9fcdSqs 	struct ether_addr	addrp;
2337*3dec9fcdSqs 
2338*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2339*3dec9fcdSqs 
2340*3dec9fcdSqs 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2341*3dec9fcdSqs 
2342*3dec9fcdSqs 	if (add) {
2343*3dec9fcdSqs 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2344*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2345*3dec9fcdSqs 			    "<== hxge_m_multicst: add multicast failed"));
2346*3dec9fcdSqs 			return (EINVAL);
2347*3dec9fcdSqs 		}
2348*3dec9fcdSqs 	} else {
2349*3dec9fcdSqs 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2350*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2351*3dec9fcdSqs 			    "<== hxge_m_multicst: del multicast failed"));
2352*3dec9fcdSqs 			return (EINVAL);
2353*3dec9fcdSqs 		}
2354*3dec9fcdSqs 	}
2355*3dec9fcdSqs 
2356*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2357*3dec9fcdSqs 
2358*3dec9fcdSqs 	return (0);
2359*3dec9fcdSqs }
2360*3dec9fcdSqs 
2361*3dec9fcdSqs static int
2362*3dec9fcdSqs hxge_m_promisc(void *arg, boolean_t on)
2363*3dec9fcdSqs {
2364*3dec9fcdSqs 	p_hxge_t hxgep = (p_hxge_t)arg;
2365*3dec9fcdSqs 
2366*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2367*3dec9fcdSqs 
2368*3dec9fcdSqs 	if (hxge_set_promisc(hxgep, on)) {
2369*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2370*3dec9fcdSqs 		    "<== hxge_m_promisc: set promisc failed"));
2371*3dec9fcdSqs 		return (EINVAL);
2372*3dec9fcdSqs 	}
2373*3dec9fcdSqs 
2374*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2375*3dec9fcdSqs 
2376*3dec9fcdSqs 	return (0);
2377*3dec9fcdSqs }
2378*3dec9fcdSqs 
2379*3dec9fcdSqs static void
2380*3dec9fcdSqs hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2381*3dec9fcdSqs {
2382*3dec9fcdSqs 	p_hxge_t	hxgep = (p_hxge_t)arg;
2383*3dec9fcdSqs 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2384*3dec9fcdSqs 	boolean_t	need_privilege;
2385*3dec9fcdSqs 	int		err;
2386*3dec9fcdSqs 	int		cmd;
2387*3dec9fcdSqs 
2388*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2389*3dec9fcdSqs 
2390*3dec9fcdSqs 	iocp = (struct iocblk *)mp->b_rptr;
2391*3dec9fcdSqs 	iocp->ioc_error = 0;
2392*3dec9fcdSqs 	need_privilege = B_TRUE;
2393*3dec9fcdSqs 	cmd = iocp->ioc_cmd;
2394*3dec9fcdSqs 
2395*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2396*3dec9fcdSqs 	switch (cmd) {
2397*3dec9fcdSqs 	default:
2398*3dec9fcdSqs 		miocnak(wq, mp, 0, EINVAL);
2399*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2400*3dec9fcdSqs 		return;
2401*3dec9fcdSqs 
2402*3dec9fcdSqs 	case LB_GET_INFO_SIZE:
2403*3dec9fcdSqs 	case LB_GET_INFO:
2404*3dec9fcdSqs 	case LB_GET_MODE:
2405*3dec9fcdSqs 		need_privilege = B_FALSE;
2406*3dec9fcdSqs 		break;
2407*3dec9fcdSqs 
2408*3dec9fcdSqs 	case LB_SET_MODE:
2409*3dec9fcdSqs 		break;
2410*3dec9fcdSqs 
2411*3dec9fcdSqs 	case ND_GET:
2412*3dec9fcdSqs 		need_privilege = B_FALSE;
2413*3dec9fcdSqs 		break;
2414*3dec9fcdSqs 	case ND_SET:
2415*3dec9fcdSqs 		break;
2416*3dec9fcdSqs 
2417*3dec9fcdSqs 	case HXGE_GET64:
2418*3dec9fcdSqs 	case HXGE_PUT64:
2419*3dec9fcdSqs 	case HXGE_GET_TX_RING_SZ:
2420*3dec9fcdSqs 	case HXGE_GET_TX_DESC:
2421*3dec9fcdSqs 	case HXGE_TX_SIDE_RESET:
2422*3dec9fcdSqs 	case HXGE_RX_SIDE_RESET:
2423*3dec9fcdSqs 	case HXGE_GLOBAL_RESET:
2424*3dec9fcdSqs 	case HXGE_RESET_MAC:
2425*3dec9fcdSqs 	case HXGE_PUT_TCAM:
2426*3dec9fcdSqs 	case HXGE_GET_TCAM:
2427*3dec9fcdSqs 	case HXGE_RTRACE:
2428*3dec9fcdSqs 
2429*3dec9fcdSqs 		need_privilege = B_FALSE;
2430*3dec9fcdSqs 		break;
2431*3dec9fcdSqs 	}
2432*3dec9fcdSqs 
2433*3dec9fcdSqs 	if (need_privilege) {
2434*3dec9fcdSqs 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2435*3dec9fcdSqs 		if (err != 0) {
2436*3dec9fcdSqs 			miocnak(wq, mp, 0, err);
2437*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2438*3dec9fcdSqs 			    "<== hxge_m_ioctl: no priv"));
2439*3dec9fcdSqs 			return;
2440*3dec9fcdSqs 		}
2441*3dec9fcdSqs 	}
2442*3dec9fcdSqs 
2443*3dec9fcdSqs 	switch (cmd) {
2444*3dec9fcdSqs 	case ND_GET:
2445*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2446*3dec9fcdSqs 	case ND_SET:
2447*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2448*3dec9fcdSqs 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2449*3dec9fcdSqs 		break;
2450*3dec9fcdSqs 
2451*3dec9fcdSqs 	case LB_GET_MODE:
2452*3dec9fcdSqs 	case LB_SET_MODE:
2453*3dec9fcdSqs 	case LB_GET_INFO_SIZE:
2454*3dec9fcdSqs 	case LB_GET_INFO:
2455*3dec9fcdSqs 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2456*3dec9fcdSqs 		break;
2457*3dec9fcdSqs 
2458*3dec9fcdSqs 	case HXGE_PUT_TCAM:
2459*3dec9fcdSqs 	case HXGE_GET_TCAM:
2460*3dec9fcdSqs 	case HXGE_GET64:
2461*3dec9fcdSqs 	case HXGE_PUT64:
2462*3dec9fcdSqs 	case HXGE_GET_TX_RING_SZ:
2463*3dec9fcdSqs 	case HXGE_GET_TX_DESC:
2464*3dec9fcdSqs 	case HXGE_TX_SIDE_RESET:
2465*3dec9fcdSqs 	case HXGE_RX_SIDE_RESET:
2466*3dec9fcdSqs 	case HXGE_GLOBAL_RESET:
2467*3dec9fcdSqs 	case HXGE_RESET_MAC:
2468*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2469*3dec9fcdSqs 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2470*3dec9fcdSqs 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2471*3dec9fcdSqs 		break;
2472*3dec9fcdSqs 	}
2473*3dec9fcdSqs 
2474*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2475*3dec9fcdSqs }
2476*3dec9fcdSqs 
2477*3dec9fcdSqs extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2478*3dec9fcdSqs 
2479*3dec9fcdSqs static void
2480*3dec9fcdSqs hxge_m_resources(void *arg)
2481*3dec9fcdSqs {
2482*3dec9fcdSqs 	p_hxge_t hxgep = arg;
2483*3dec9fcdSqs 	mac_rx_fifo_t mrf;
2484*3dec9fcdSqs 	p_rx_rcr_rings_t rcr_rings;
2485*3dec9fcdSqs 	p_rx_rcr_ring_t *rcr_p;
2486*3dec9fcdSqs 	p_rx_rcr_ring_t rcrp;
2487*3dec9fcdSqs 	uint32_t i, ndmas;
2488*3dec9fcdSqs 	int status;
2489*3dec9fcdSqs 
2490*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2491*3dec9fcdSqs 
2492*3dec9fcdSqs 	MUTEX_ENTER(hxgep->genlock);
2493*3dec9fcdSqs 
2494*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2495*3dec9fcdSqs 		status = hxge_init(hxgep);
2496*3dec9fcdSqs 		if (status != HXGE_OK) {
2497*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2498*3dec9fcdSqs 			    "hxge_init failed"));
2499*3dec9fcdSqs 			MUTEX_EXIT(hxgep->genlock);
2500*3dec9fcdSqs 			return;
2501*3dec9fcdSqs 		}
2502*3dec9fcdSqs 	}
2503*3dec9fcdSqs 
2504*3dec9fcdSqs 	mrf.mrf_type = MAC_RX_FIFO;
2505*3dec9fcdSqs 	mrf.mrf_blank = hxge_rx_hw_blank;
2506*3dec9fcdSqs 
2507*3dec9fcdSqs 	mrf.mrf_normal_blank_time = RXDMA_RCR_PTHRES_DEFAULT;
2508*3dec9fcdSqs 	mrf.mrf_normal_pkt_count = RXDMA_RCR_TO_DEFAULT;
2509*3dec9fcdSqs 
2510*3dec9fcdSqs 	rcr_rings = hxgep->rx_rcr_rings;
2511*3dec9fcdSqs 	rcr_p = rcr_rings->rcr_rings;
2512*3dec9fcdSqs 	ndmas = rcr_rings->ndmas;
2513*3dec9fcdSqs 
2514*3dec9fcdSqs 	/*
2515*3dec9fcdSqs 	 * Export our receive resources to the MAC layer.
2516*3dec9fcdSqs 	 */
2517*3dec9fcdSqs 	for (i = 0; i < ndmas; i++) {
2518*3dec9fcdSqs 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2519*3dec9fcdSqs 		mrf.mrf_arg = rcrp;
2520*3dec9fcdSqs 		rcrp->rcr_mac_handle =
2521*3dec9fcdSqs 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2522*3dec9fcdSqs 
2523*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2524*3dec9fcdSqs 		    "==> hxge_m_resources: vdma %d dma %d "
2525*3dec9fcdSqs 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2526*3dec9fcdSqs 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2527*3dec9fcdSqs 	}
2528*3dec9fcdSqs 
2529*3dec9fcdSqs 	MUTEX_EXIT(hxgep->genlock);
2530*3dec9fcdSqs 
2531*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2532*3dec9fcdSqs }
2533*3dec9fcdSqs 
2534*3dec9fcdSqs /*
2535*3dec9fcdSqs  * Set an alternate MAC address
2536*3dec9fcdSqs  */
2537*3dec9fcdSqs static int
2538*3dec9fcdSqs hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2539*3dec9fcdSqs {
2540*3dec9fcdSqs 	uint64_t	address;
2541*3dec9fcdSqs 	uint64_t	tmp;
2542*3dec9fcdSqs 	hpi_status_t	status;
2543*3dec9fcdSqs 	uint8_t		addrn;
2544*3dec9fcdSqs 	int		i;
2545*3dec9fcdSqs 
2546*3dec9fcdSqs 	/*
2547*3dec9fcdSqs 	 * Convert a byte array to a 48 bit value.
2548*3dec9fcdSqs 	 * Need to check endianess if in doubt
2549*3dec9fcdSqs 	 */
2550*3dec9fcdSqs 	address = 0;
2551*3dec9fcdSqs 	for (i = 0; i < ETHERADDRL; i++) {
2552*3dec9fcdSqs 		tmp = maddr[i];
2553*3dec9fcdSqs 		address <<= 8;
2554*3dec9fcdSqs 		address |= tmp;
2555*3dec9fcdSqs 	}
2556*3dec9fcdSqs 
2557*3dec9fcdSqs 	addrn = (uint8_t)slot;
2558*3dec9fcdSqs 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2559*3dec9fcdSqs 	if (status != HPI_SUCCESS)
2560*3dec9fcdSqs 		return (EIO);
2561*3dec9fcdSqs 
2562*3dec9fcdSqs 	return (0);
2563*3dec9fcdSqs }
2564*3dec9fcdSqs 
2565*3dec9fcdSqs static void
2566*3dec9fcdSqs hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2567*3dec9fcdSqs {
2568*3dec9fcdSqs 	p_hxge_mmac_stats_t	mmac_stats;
2569*3dec9fcdSqs 	int			i;
2570*3dec9fcdSqs 	hxge_mmac_t		*mmac_info;
2571*3dec9fcdSqs 
2572*3dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
2573*3dec9fcdSqs 	mmac_stats = &hxgep->statsp->mmac_stats;
2574*3dec9fcdSqs 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2575*3dec9fcdSqs 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2576*3dec9fcdSqs 
2577*3dec9fcdSqs 	for (i = 0; i < ETHERADDRL; i++) {
2578*3dec9fcdSqs 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2579*3dec9fcdSqs 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2580*3dec9fcdSqs 	}
2581*3dec9fcdSqs }
2582*3dec9fcdSqs 
2583*3dec9fcdSqs /*
2584*3dec9fcdSqs  * Find an unused address slot, set the address value to the one specified,
2585*3dec9fcdSqs  * enable the port to start filtering on the new MAC address.
2586*3dec9fcdSqs  * Returns: 0 on success.
2587*3dec9fcdSqs  */
2588*3dec9fcdSqs int
2589*3dec9fcdSqs hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2590*3dec9fcdSqs {
2591*3dec9fcdSqs 	p_hxge_t	hxgep = arg;
2592*3dec9fcdSqs 	mac_addr_slot_t	slot;
2593*3dec9fcdSqs 	hxge_mmac_t	*mmac_info;
2594*3dec9fcdSqs 	int		err;
2595*3dec9fcdSqs 	hxge_status_t	status;
2596*3dec9fcdSqs 
2597*3dec9fcdSqs 	mutex_enter(hxgep->genlock);
2598*3dec9fcdSqs 
2599*3dec9fcdSqs 	/*
2600*3dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
2601*3dec9fcdSqs 	 * not been called.
2602*3dec9fcdSqs 	 */
2603*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2604*3dec9fcdSqs 		status = hxge_init(hxgep);
2605*3dec9fcdSqs 		if (status != HXGE_OK) {
2606*3dec9fcdSqs 			mutex_exit(hxgep->genlock);
2607*3dec9fcdSqs 			return (ENXIO);
2608*3dec9fcdSqs 		}
2609*3dec9fcdSqs 	}
2610*3dec9fcdSqs 
2611*3dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
2612*3dec9fcdSqs 	if (mmac_info->naddrfree == 0) {
2613*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2614*3dec9fcdSqs 		return (ENOSPC);
2615*3dec9fcdSqs 	}
2616*3dec9fcdSqs 
2617*3dec9fcdSqs 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2618*3dec9fcdSqs 	    maddr->mma_addrlen)) {
2619*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2620*3dec9fcdSqs 		return (EINVAL);
2621*3dec9fcdSqs 	}
2622*3dec9fcdSqs 
2623*3dec9fcdSqs 	/*
2624*3dec9fcdSqs 	 * Search for the first available slot. Because naddrfree
2625*3dec9fcdSqs 	 * is not zero, we are guaranteed to find one.
2626*3dec9fcdSqs 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2627*3dec9fcdSqs 	 * MAC slot is slot 1.
2628*3dec9fcdSqs 	 */
2629*3dec9fcdSqs 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2630*3dec9fcdSqs 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2631*3dec9fcdSqs 			break;
2632*3dec9fcdSqs 	}
2633*3dec9fcdSqs 
2634*3dec9fcdSqs 	ASSERT(slot < mmac_info->num_mmac);
2635*3dec9fcdSqs 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2636*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2637*3dec9fcdSqs 		return (err);
2638*3dec9fcdSqs 	}
2639*3dec9fcdSqs 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2640*3dec9fcdSqs 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2641*3dec9fcdSqs 	mmac_info->naddrfree--;
2642*3dec9fcdSqs 	hxge_mmac_kstat_update(hxgep, slot);
2643*3dec9fcdSqs 
2644*3dec9fcdSqs 	maddr->mma_slot = slot;
2645*3dec9fcdSqs 
2646*3dec9fcdSqs 	mutex_exit(hxgep->genlock);
2647*3dec9fcdSqs 	return (0);
2648*3dec9fcdSqs }
2649*3dec9fcdSqs 
2650*3dec9fcdSqs /*
2651*3dec9fcdSqs  * Remove the specified mac address and update
2652*3dec9fcdSqs  * the h/w not to filter the mac address anymore.
2653*3dec9fcdSqs  * Returns: 0, on success.
2654*3dec9fcdSqs  */
2655*3dec9fcdSqs int
2656*3dec9fcdSqs hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2657*3dec9fcdSqs {
2658*3dec9fcdSqs 	p_hxge_t	hxgep = arg;
2659*3dec9fcdSqs 	hxge_mmac_t	*mmac_info;
2660*3dec9fcdSqs 	int		err = 0;
2661*3dec9fcdSqs 	hxge_status_t	status;
2662*3dec9fcdSqs 
2663*3dec9fcdSqs 	mutex_enter(hxgep->genlock);
2664*3dec9fcdSqs 
2665*3dec9fcdSqs 	/*
2666*3dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
2667*3dec9fcdSqs 	 * not been called.
2668*3dec9fcdSqs 	 */
2669*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2670*3dec9fcdSqs 		status = hxge_init(hxgep);
2671*3dec9fcdSqs 		if (status != HXGE_OK) {
2672*3dec9fcdSqs 			mutex_exit(hxgep->genlock);
2673*3dec9fcdSqs 			return (ENXIO);
2674*3dec9fcdSqs 		}
2675*3dec9fcdSqs 	}
2676*3dec9fcdSqs 
2677*3dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
2678*3dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2679*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2680*3dec9fcdSqs 		return (EINVAL);
2681*3dec9fcdSqs 	}
2682*3dec9fcdSqs 
2683*3dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2684*3dec9fcdSqs 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2685*3dec9fcdSqs 		    HPI_SUCCESS) {
2686*3dec9fcdSqs 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2687*3dec9fcdSqs 			mmac_info->naddrfree++;
2688*3dec9fcdSqs 			/*
2689*3dec9fcdSqs 			 * Clear mac_pool[slot].addr so that kstat shows 0
2690*3dec9fcdSqs 			 * alternate MAC address if the slot is not used.
2691*3dec9fcdSqs 			 */
2692*3dec9fcdSqs 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2693*3dec9fcdSqs 			hxge_mmac_kstat_update(hxgep, slot);
2694*3dec9fcdSqs 		} else {
2695*3dec9fcdSqs 			err = EIO;
2696*3dec9fcdSqs 		}
2697*3dec9fcdSqs 	} else {
2698*3dec9fcdSqs 		err = EINVAL;
2699*3dec9fcdSqs 	}
2700*3dec9fcdSqs 
2701*3dec9fcdSqs 	mutex_exit(hxgep->genlock);
2702*3dec9fcdSqs 	return (err);
2703*3dec9fcdSqs }
2704*3dec9fcdSqs 
2705*3dec9fcdSqs /*
2706*3dec9fcdSqs  * Modify a mac address added by hxge_mmac_add().
2707*3dec9fcdSqs  * Returns: 0, on success.
2708*3dec9fcdSqs  */
2709*3dec9fcdSqs int
2710*3dec9fcdSqs hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2711*3dec9fcdSqs {
2712*3dec9fcdSqs 	p_hxge_t	hxgep = arg;
2713*3dec9fcdSqs 	mac_addr_slot_t	slot;
2714*3dec9fcdSqs 	hxge_mmac_t	*mmac_info;
2715*3dec9fcdSqs 	int		err = 0;
2716*3dec9fcdSqs 	hxge_status_t	status;
2717*3dec9fcdSqs 
2718*3dec9fcdSqs 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2719*3dec9fcdSqs 	    maddr->mma_addrlen))
2720*3dec9fcdSqs 		return (EINVAL);
2721*3dec9fcdSqs 
2722*3dec9fcdSqs 	slot = maddr->mma_slot;
2723*3dec9fcdSqs 
2724*3dec9fcdSqs 	mutex_enter(hxgep->genlock);
2725*3dec9fcdSqs 
2726*3dec9fcdSqs 	/*
2727*3dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
2728*3dec9fcdSqs 	 * not been called.
2729*3dec9fcdSqs 	 */
2730*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2731*3dec9fcdSqs 		status = hxge_init(hxgep);
2732*3dec9fcdSqs 		if (status != HXGE_OK) {
2733*3dec9fcdSqs 			mutex_exit(hxgep->genlock);
2734*3dec9fcdSqs 			return (ENXIO);
2735*3dec9fcdSqs 		}
2736*3dec9fcdSqs 	}
2737*3dec9fcdSqs 
2738*3dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
2739*3dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2740*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2741*3dec9fcdSqs 		return (EINVAL);
2742*3dec9fcdSqs 	}
2743*3dec9fcdSqs 
2744*3dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2745*3dec9fcdSqs 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2746*3dec9fcdSqs 		    slot)) == 0) {
2747*3dec9fcdSqs 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2748*3dec9fcdSqs 			    ETHERADDRL);
2749*3dec9fcdSqs 			hxge_mmac_kstat_update(hxgep, slot);
2750*3dec9fcdSqs 		}
2751*3dec9fcdSqs 	} else {
2752*3dec9fcdSqs 		err = EINVAL;
2753*3dec9fcdSqs 	}
2754*3dec9fcdSqs 
2755*3dec9fcdSqs 	mutex_exit(hxgep->genlock);
2756*3dec9fcdSqs 	return (err);
2757*3dec9fcdSqs }
2758*3dec9fcdSqs 
2759*3dec9fcdSqs /*
2760*3dec9fcdSqs  * static int
2761*3dec9fcdSqs  * hxge_m_mmac_get() - Get the MAC address and other information
2762*3dec9fcdSqs  *	related to the slot.  mma_flags should be set to 0 in the call.
2763*3dec9fcdSqs  *	Note: although kstat shows MAC address as zero when a slot is
2764*3dec9fcdSqs  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2765*3dec9fcdSqs  *	to the caller as long as the slot is not using a user MAC address.
2766*3dec9fcdSqs  *	The following table shows the rules,
2767*3dec9fcdSqs  *
2768*3dec9fcdSqs  *     					USED    VENDOR    mma_addr
2769*3dec9fcdSqs  *	------------------------------------------------------------
2770*3dec9fcdSqs  *	(1) Slot uses a user MAC:	yes      no     user MAC
2771*3dec9fcdSqs  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2772*3dec9fcdSqs  *	(3) Slot is not used but is
2773*3dec9fcdSqs  *	     factory MAC capable:	no       yes    factory MAC
2774*3dec9fcdSqs  *	(4) Slot is not used and is
2775*3dec9fcdSqs  *	     not factory MAC capable:   no       no	0
2776*3dec9fcdSqs  *	------------------------------------------------------------
2777*3dec9fcdSqs  */
2778*3dec9fcdSqs int
2779*3dec9fcdSqs hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2780*3dec9fcdSqs {
2781*3dec9fcdSqs 	hxge_t		*hxgep = arg;
2782*3dec9fcdSqs 	mac_addr_slot_t	slot;
2783*3dec9fcdSqs 	hxge_mmac_t	*mmac_info;
2784*3dec9fcdSqs 	hxge_status_t	status;
2785*3dec9fcdSqs 
2786*3dec9fcdSqs 	slot = maddr->mma_slot;
2787*3dec9fcdSqs 
2788*3dec9fcdSqs 	mutex_enter(hxgep->genlock);
2789*3dec9fcdSqs 
2790*3dec9fcdSqs 	/*
2791*3dec9fcdSqs 	 * Make sure that hxge is initialized, if _start() has
2792*3dec9fcdSqs 	 * not been called.
2793*3dec9fcdSqs 	 */
2794*3dec9fcdSqs 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2795*3dec9fcdSqs 		status = hxge_init(hxgep);
2796*3dec9fcdSqs 		if (status != HXGE_OK) {
2797*3dec9fcdSqs 			mutex_exit(hxgep->genlock);
2798*3dec9fcdSqs 			return (ENXIO);
2799*3dec9fcdSqs 		}
2800*3dec9fcdSqs 	}
2801*3dec9fcdSqs 
2802*3dec9fcdSqs 	mmac_info = &hxgep->hxge_mmac_info;
2803*3dec9fcdSqs 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2804*3dec9fcdSqs 		mutex_exit(hxgep->genlock);
2805*3dec9fcdSqs 		return (EINVAL);
2806*3dec9fcdSqs 	}
2807*3dec9fcdSqs 
2808*3dec9fcdSqs 	maddr->mma_flags = 0;
2809*3dec9fcdSqs 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2810*3dec9fcdSqs 		maddr->mma_flags |= MMAC_SLOT_USED;
2811*3dec9fcdSqs 		bcopy(mmac_info->mac_pool[slot].addr,
2812*3dec9fcdSqs 		    maddr->mma_addr, ETHERADDRL);
2813*3dec9fcdSqs 		maddr->mma_addrlen = ETHERADDRL;
2814*3dec9fcdSqs 	}
2815*3dec9fcdSqs 
2816*3dec9fcdSqs 	mutex_exit(hxgep->genlock);
2817*3dec9fcdSqs 	return (0);
2818*3dec9fcdSqs }
2819*3dec9fcdSqs 
2820*3dec9fcdSqs /*ARGSUSED*/
2821*3dec9fcdSqs boolean_t
2822*3dec9fcdSqs hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2823*3dec9fcdSqs {
2824*3dec9fcdSqs 	p_hxge_t		hxgep = (p_hxge_t)arg;
2825*3dec9fcdSqs 	uint32_t		*txflags = cap_data;
2826*3dec9fcdSqs 	multiaddress_capab_t	*mmacp = cap_data;
2827*3dec9fcdSqs 
2828*3dec9fcdSqs 	switch (cap) {
2829*3dec9fcdSqs 	case MAC_CAPAB_HCKSUM:
2830*3dec9fcdSqs 		*txflags = HCKSUM_INET_PARTIAL;
2831*3dec9fcdSqs 		break;
2832*3dec9fcdSqs 
2833*3dec9fcdSqs 	case MAC_CAPAB_POLL:
2834*3dec9fcdSqs 		/*
2835*3dec9fcdSqs 		 * There's nothing for us to fill in, simply returning B_TRUE
2836*3dec9fcdSqs 		 * stating that we support polling is sufficient.
2837*3dec9fcdSqs 		 */
2838*3dec9fcdSqs 		break;
2839*3dec9fcdSqs 
2840*3dec9fcdSqs 	case MAC_CAPAB_MULTIADDRESS:
2841*3dec9fcdSqs 		/*
2842*3dec9fcdSqs 		 * The number of MAC addresses made available by
2843*3dec9fcdSqs 		 * this capability is one less than the total as
2844*3dec9fcdSqs 		 * the primary address in slot 0 is counted in
2845*3dec9fcdSqs 		 * the total.
2846*3dec9fcdSqs 		 */
2847*3dec9fcdSqs 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
2848*3dec9fcdSqs 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
2849*3dec9fcdSqs 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
2850*3dec9fcdSqs 		mmacp->maddr_handle = hxgep;
2851*3dec9fcdSqs 		mmacp->maddr_add = hxge_m_mmac_add;
2852*3dec9fcdSqs 		mmacp->maddr_remove = hxge_m_mmac_remove;
2853*3dec9fcdSqs 		mmacp->maddr_modify = hxge_m_mmac_modify;
2854*3dec9fcdSqs 		mmacp->maddr_get = hxge_m_mmac_get;
2855*3dec9fcdSqs 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
2856*3dec9fcdSqs 		break;
2857*3dec9fcdSqs 	default:
2858*3dec9fcdSqs 		return (B_FALSE);
2859*3dec9fcdSqs 	}
2860*3dec9fcdSqs 	return (B_TRUE);
2861*3dec9fcdSqs }
2862*3dec9fcdSqs 
2863*3dec9fcdSqs /*
2864*3dec9fcdSqs  * Module loading and removing entry points.
2865*3dec9fcdSqs  */
2866*3dec9fcdSqs DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
2867*3dec9fcdSqs     nodev, NULL, D_MP, NULL);
2868*3dec9fcdSqs 
2869*3dec9fcdSqs extern struct mod_ops mod_driverops;
2870*3dec9fcdSqs 
2871*3dec9fcdSqs #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
2872*3dec9fcdSqs 
2873*3dec9fcdSqs /*
2874*3dec9fcdSqs  * Module linkage information for the kernel.
2875*3dec9fcdSqs  */
2876*3dec9fcdSqs static struct modldrv hxge_modldrv = {
2877*3dec9fcdSqs 	&mod_driverops,
2878*3dec9fcdSqs 	HXGE_DESC_VER,
2879*3dec9fcdSqs 	&hxge_dev_ops
2880*3dec9fcdSqs };
2881*3dec9fcdSqs 
2882*3dec9fcdSqs static struct modlinkage modlinkage = {
2883*3dec9fcdSqs 	MODREV_1, (void *) &hxge_modldrv, NULL
2884*3dec9fcdSqs };
2885*3dec9fcdSqs 
2886*3dec9fcdSqs int
2887*3dec9fcdSqs _init(void)
2888*3dec9fcdSqs {
2889*3dec9fcdSqs 	int status;
2890*3dec9fcdSqs 
2891*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
2892*3dec9fcdSqs 	mac_init_ops(&hxge_dev_ops, "hxge");
2893*3dec9fcdSqs 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
2894*3dec9fcdSqs 	if (status != 0) {
2895*3dec9fcdSqs 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
2896*3dec9fcdSqs 		    "failed to init device soft state"));
2897*3dec9fcdSqs 		mac_fini_ops(&hxge_dev_ops);
2898*3dec9fcdSqs 		goto _init_exit;
2899*3dec9fcdSqs 	}
2900*3dec9fcdSqs 
2901*3dec9fcdSqs 	status = mod_install(&modlinkage);
2902*3dec9fcdSqs 	if (status != 0) {
2903*3dec9fcdSqs 		ddi_soft_state_fini(&hxge_list);
2904*3dec9fcdSqs 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
2905*3dec9fcdSqs 		goto _init_exit;
2906*3dec9fcdSqs 	}
2907*3dec9fcdSqs 
2908*3dec9fcdSqs 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
2909*3dec9fcdSqs 
2910*3dec9fcdSqs _init_exit:
2911*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
2912*3dec9fcdSqs 
2913*3dec9fcdSqs 	return (status);
2914*3dec9fcdSqs }
2915*3dec9fcdSqs 
2916*3dec9fcdSqs int
2917*3dec9fcdSqs _fini(void)
2918*3dec9fcdSqs {
2919*3dec9fcdSqs 	int status;
2920*3dec9fcdSqs 
2921*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
2922*3dec9fcdSqs 
2923*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
2924*3dec9fcdSqs 
2925*3dec9fcdSqs 	if (hxge_mblks_pending)
2926*3dec9fcdSqs 		return (EBUSY);
2927*3dec9fcdSqs 
2928*3dec9fcdSqs 	status = mod_remove(&modlinkage);
2929*3dec9fcdSqs 	if (status != DDI_SUCCESS) {
2930*3dec9fcdSqs 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
2931*3dec9fcdSqs 		    "Module removal failed 0x%08x", status));
2932*3dec9fcdSqs 		goto _fini_exit;
2933*3dec9fcdSqs 	}
2934*3dec9fcdSqs 
2935*3dec9fcdSqs 	mac_fini_ops(&hxge_dev_ops);
2936*3dec9fcdSqs 
2937*3dec9fcdSqs 	ddi_soft_state_fini(&hxge_list);
2938*3dec9fcdSqs 
2939*3dec9fcdSqs 	MUTEX_DESTROY(&hxge_common_lock);
2940*3dec9fcdSqs 
2941*3dec9fcdSqs _fini_exit:
2942*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
2943*3dec9fcdSqs 
2944*3dec9fcdSqs 	return (status);
2945*3dec9fcdSqs }
2946*3dec9fcdSqs 
2947*3dec9fcdSqs int
2948*3dec9fcdSqs _info(struct modinfo *modinfop)
2949*3dec9fcdSqs {
2950*3dec9fcdSqs 	int status;
2951*3dec9fcdSqs 
2952*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
2953*3dec9fcdSqs 	status = mod_info(&modlinkage, modinfop);
2954*3dec9fcdSqs 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
2955*3dec9fcdSqs 
2956*3dec9fcdSqs 	return (status);
2957*3dec9fcdSqs }
2958*3dec9fcdSqs 
2959*3dec9fcdSqs /*ARGSUSED*/
2960*3dec9fcdSqs hxge_status_t
2961*3dec9fcdSqs hxge_add_intrs(p_hxge_t hxgep)
2962*3dec9fcdSqs {
2963*3dec9fcdSqs 	int		intr_types;
2964*3dec9fcdSqs 	int		type = 0;
2965*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
2966*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
2967*3dec9fcdSqs 
2968*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
2969*3dec9fcdSqs 
2970*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
2971*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
2972*3dec9fcdSqs 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
2973*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_added = 0;
2974*3dec9fcdSqs 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
2975*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_type = 0;
2976*3dec9fcdSqs 
2977*3dec9fcdSqs 	if (hxge_msi_enable) {
2978*3dec9fcdSqs 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
2979*3dec9fcdSqs 	}
2980*3dec9fcdSqs 
2981*3dec9fcdSqs 	/* Get the supported interrupt types */
2982*3dec9fcdSqs 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
2983*3dec9fcdSqs 	    != DDI_SUCCESS) {
2984*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
2985*3dec9fcdSqs 		    "ddi_intr_get_supported_types failed: status 0x%08x",
2986*3dec9fcdSqs 		    ddi_status));
2987*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2988*3dec9fcdSqs 	}
2989*3dec9fcdSqs 
2990*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_types = intr_types;
2991*3dec9fcdSqs 
2992*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
2993*3dec9fcdSqs 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
2994*3dec9fcdSqs 
2995*3dec9fcdSqs 	/*
2996*3dec9fcdSqs 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
2997*3dec9fcdSqs 	 *	(1): 1 - MSI
2998*3dec9fcdSqs 	 *	(2): 2 - MSI-X
2999*3dec9fcdSqs 	 *	others - FIXED
3000*3dec9fcdSqs 	 */
3001*3dec9fcdSqs 	switch (hxge_msi_enable) {
3002*3dec9fcdSqs 	default:
3003*3dec9fcdSqs 		type = DDI_INTR_TYPE_FIXED;
3004*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3005*3dec9fcdSqs 		    "use fixed (intx emulation) type %08x", type));
3006*3dec9fcdSqs 		break;
3007*3dec9fcdSqs 
3008*3dec9fcdSqs 	case 2:
3009*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3010*3dec9fcdSqs 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3011*3dec9fcdSqs 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3012*3dec9fcdSqs 			type = DDI_INTR_TYPE_MSIX;
3013*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3014*3dec9fcdSqs 			    "==> hxge_add_intrs: "
3015*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3016*3dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3017*3dec9fcdSqs 			type = DDI_INTR_TYPE_MSI;
3018*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3019*3dec9fcdSqs 			    "==> hxge_add_intrs: "
3020*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3021*3dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3022*3dec9fcdSqs 			type = DDI_INTR_TYPE_FIXED;
3023*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3024*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3025*3dec9fcdSqs 		}
3026*3dec9fcdSqs 		break;
3027*3dec9fcdSqs 
3028*3dec9fcdSqs 	case 1:
3029*3dec9fcdSqs 		if (intr_types & DDI_INTR_TYPE_MSI) {
3030*3dec9fcdSqs 			type = DDI_INTR_TYPE_MSI;
3031*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3032*3dec9fcdSqs 			    "==> hxge_add_intrs: "
3033*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3034*3dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3035*3dec9fcdSqs 			type = DDI_INTR_TYPE_MSIX;
3036*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3037*3dec9fcdSqs 			    "==> hxge_add_intrs: "
3038*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3039*3dec9fcdSqs 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3040*3dec9fcdSqs 			type = DDI_INTR_TYPE_FIXED;
3041*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3042*3dec9fcdSqs 			    "==> hxge_add_intrs: "
3043*3dec9fcdSqs 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3044*3dec9fcdSqs 		}
3045*3dec9fcdSqs 	}
3046*3dec9fcdSqs 
3047*3dec9fcdSqs 	hxgep->hxge_intr_type.intr_type = type;
3048*3dec9fcdSqs 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3049*3dec9fcdSqs 	    type == DDI_INTR_TYPE_FIXED) &&
3050*3dec9fcdSqs 	    hxgep->hxge_intr_type.niu_msi_enable) {
3051*3dec9fcdSqs 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3052*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3053*3dec9fcdSqs 			    " hxge_add_intrs: "
3054*3dec9fcdSqs 			    " hxge_add_intrs_adv failed: status 0x%08x",
3055*3dec9fcdSqs 			    status));
3056*3dec9fcdSqs 			return (status);
3057*3dec9fcdSqs 		} else {
3058*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3059*3dec9fcdSqs 			    "interrupts registered : type %d", type));
3060*3dec9fcdSqs 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3061*3dec9fcdSqs 
3062*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3063*3dec9fcdSqs 			    "\nAdded advanced hxge add_intr_adv "
3064*3dec9fcdSqs 			    "intr type 0x%x\n", type));
3065*3dec9fcdSqs 
3066*3dec9fcdSqs 			return (status);
3067*3dec9fcdSqs 		}
3068*3dec9fcdSqs 	}
3069*3dec9fcdSqs 
3070*3dec9fcdSqs 	if (!hxgep->hxge_intr_type.intr_registered) {
3071*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3072*3dec9fcdSqs 		    "==> hxge_add_intrs: failed to register interrupts"));
3073*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3074*3dec9fcdSqs 	}
3075*3dec9fcdSqs 
3076*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3077*3dec9fcdSqs 
3078*3dec9fcdSqs 	return (status);
3079*3dec9fcdSqs }
3080*3dec9fcdSqs 
3081*3dec9fcdSqs /*ARGSUSED*/
3082*3dec9fcdSqs static hxge_status_t
3083*3dec9fcdSqs hxge_add_soft_intrs(p_hxge_t hxgep)
3084*3dec9fcdSqs {
3085*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
3086*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
3087*3dec9fcdSqs 
3088*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3089*3dec9fcdSqs 
3090*3dec9fcdSqs 	hxgep->resched_id = NULL;
3091*3dec9fcdSqs 	hxgep->resched_running = B_FALSE;
3092*3dec9fcdSqs 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3093*3dec9fcdSqs 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3094*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS) {
3095*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3096*3dec9fcdSqs 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3097*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3098*3dec9fcdSqs 	}
3099*3dec9fcdSqs 
3100*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3101*3dec9fcdSqs 
3102*3dec9fcdSqs 	return (status);
3103*3dec9fcdSqs }
3104*3dec9fcdSqs 
3105*3dec9fcdSqs /*ARGSUSED*/
3106*3dec9fcdSqs static hxge_status_t
3107*3dec9fcdSqs hxge_add_intrs_adv(p_hxge_t hxgep)
3108*3dec9fcdSqs {
3109*3dec9fcdSqs 	int		intr_type;
3110*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3111*3dec9fcdSqs 	hxge_status_t	status;
3112*3dec9fcdSqs 
3113*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3114*3dec9fcdSqs 
3115*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3116*3dec9fcdSqs 	intr_type = intrp->intr_type;
3117*3dec9fcdSqs 
3118*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3119*3dec9fcdSqs 	    intr_type));
3120*3dec9fcdSqs 
3121*3dec9fcdSqs 	switch (intr_type) {
3122*3dec9fcdSqs 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3123*3dec9fcdSqs 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3124*3dec9fcdSqs 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3125*3dec9fcdSqs 		break;
3126*3dec9fcdSqs 
3127*3dec9fcdSqs 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3128*3dec9fcdSqs 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3129*3dec9fcdSqs 		break;
3130*3dec9fcdSqs 
3131*3dec9fcdSqs 	default:
3132*3dec9fcdSqs 		status = HXGE_ERROR;
3133*3dec9fcdSqs 		break;
3134*3dec9fcdSqs 	}
3135*3dec9fcdSqs 
3136*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3137*3dec9fcdSqs 
3138*3dec9fcdSqs 	return (status);
3139*3dec9fcdSqs }
3140*3dec9fcdSqs 
3141*3dec9fcdSqs /*ARGSUSED*/
3142*3dec9fcdSqs static hxge_status_t
3143*3dec9fcdSqs hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3144*3dec9fcdSqs {
3145*3dec9fcdSqs 	dev_info_t	*dip = hxgep->dip;
3146*3dec9fcdSqs 	p_hxge_ldg_t	ldgp;
3147*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3148*3dec9fcdSqs 	uint_t		*inthandler;
3149*3dec9fcdSqs 	void		*arg1, *arg2;
3150*3dec9fcdSqs 	int		behavior;
3151*3dec9fcdSqs 	int		nintrs, navail;
3152*3dec9fcdSqs 	int		nactual, nrequired;
3153*3dec9fcdSqs 	int		inum = 0;
3154*3dec9fcdSqs 	int		loop = 0;
3155*3dec9fcdSqs 	int		x, y;
3156*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
3157*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
3158*3dec9fcdSqs 
3159*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3160*3dec9fcdSqs 
3161*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3162*3dec9fcdSqs 
3163*3dec9fcdSqs 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3164*3dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3165*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3166*3dec9fcdSqs 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3167*3dec9fcdSqs 		    "nintrs: %d", ddi_status, nintrs));
3168*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3169*3dec9fcdSqs 	}
3170*3dec9fcdSqs 
3171*3dec9fcdSqs 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3172*3dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3173*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3174*3dec9fcdSqs 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3175*3dec9fcdSqs 		    "nintrs: %d", ddi_status, navail));
3176*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3177*3dec9fcdSqs 	}
3178*3dec9fcdSqs 
3179*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3180*3dec9fcdSqs 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3181*3dec9fcdSqs 	    int_type, nintrs, navail));
3182*3dec9fcdSqs 
3183*3dec9fcdSqs 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3184*3dec9fcdSqs 		/* MSI must be power of 2 */
3185*3dec9fcdSqs 		if ((navail & 16) == 16) {
3186*3dec9fcdSqs 			navail = 16;
3187*3dec9fcdSqs 		} else if ((navail & 8) == 8) {
3188*3dec9fcdSqs 			navail = 8;
3189*3dec9fcdSqs 		} else if ((navail & 4) == 4) {
3190*3dec9fcdSqs 			navail = 4;
3191*3dec9fcdSqs 		} else if ((navail & 2) == 2) {
3192*3dec9fcdSqs 			navail = 2;
3193*3dec9fcdSqs 		} else {
3194*3dec9fcdSqs 			navail = 1;
3195*3dec9fcdSqs 		}
3196*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3197*3dec9fcdSqs 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3198*3dec9fcdSqs 		    "navail %d", nintrs, navail));
3199*3dec9fcdSqs 	}
3200*3dec9fcdSqs 
3201*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3202*3dec9fcdSqs 	    "requesting: intr type %d nintrs %d, navail %d",
3203*3dec9fcdSqs 	    int_type, nintrs, navail));
3204*3dec9fcdSqs 
3205*3dec9fcdSqs 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3206*3dec9fcdSqs 	    DDI_INTR_ALLOC_NORMAL);
3207*3dec9fcdSqs 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3208*3dec9fcdSqs 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3209*3dec9fcdSqs 
3210*3dec9fcdSqs 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3211*3dec9fcdSqs 	    navail, &nactual, behavior);
3212*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3213*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3214*3dec9fcdSqs 		    " ddi_intr_alloc() failed: %d", ddi_status));
3215*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3216*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3217*3dec9fcdSqs 	}
3218*3dec9fcdSqs 
3219*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3220*3dec9fcdSqs 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3221*3dec9fcdSqs 	    navail, nactual));
3222*3dec9fcdSqs 
3223*3dec9fcdSqs 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3224*3dec9fcdSqs 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3225*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3226*3dec9fcdSqs 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3227*3dec9fcdSqs 		/* Free already allocated interrupts */
3228*3dec9fcdSqs 		for (y = 0; y < nactual; y++) {
3229*3dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
3230*3dec9fcdSqs 		}
3231*3dec9fcdSqs 
3232*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3233*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3234*3dec9fcdSqs 	}
3235*3dec9fcdSqs 
3236*3dec9fcdSqs 	nrequired = 0;
3237*3dec9fcdSqs 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3238*3dec9fcdSqs 	if (status != HXGE_OK) {
3239*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3240*3dec9fcdSqs 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3241*3dec9fcdSqs 		    "failed: 0x%x", status));
3242*3dec9fcdSqs 		/* Free already allocated interrupts */
3243*3dec9fcdSqs 		for (y = 0; y < nactual; y++) {
3244*3dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
3245*3dec9fcdSqs 		}
3246*3dec9fcdSqs 
3247*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3248*3dec9fcdSqs 		return (status);
3249*3dec9fcdSqs 	}
3250*3dec9fcdSqs 
3251*3dec9fcdSqs 	ldgp = hxgep->ldgvp->ldgp;
3252*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3253*3dec9fcdSqs 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3254*3dec9fcdSqs 
3255*3dec9fcdSqs 	if (nactual < nrequired)
3256*3dec9fcdSqs 		loop = nactual;
3257*3dec9fcdSqs 	else
3258*3dec9fcdSqs 		loop = nrequired;
3259*3dec9fcdSqs 
3260*3dec9fcdSqs 	for (x = 0; x < loop; x++, ldgp++) {
3261*3dec9fcdSqs 		ldgp->vector = (uint8_t)x;
3262*3dec9fcdSqs 		arg1 = ldgp->ldvp;
3263*3dec9fcdSqs 		arg2 = hxgep;
3264*3dec9fcdSqs 		if (ldgp->nldvs == 1) {
3265*3dec9fcdSqs 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3266*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3267*3dec9fcdSqs 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3268*3dec9fcdSqs 			    "1-1 int handler (entry %d)\n",
3269*3dec9fcdSqs 			    arg1, arg2, x));
3270*3dec9fcdSqs 		} else if (ldgp->nldvs > 1) {
3271*3dec9fcdSqs 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3272*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3273*3dec9fcdSqs 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3274*3dec9fcdSqs 			    "nldevs %d int handler (entry %d)\n",
3275*3dec9fcdSqs 			    arg1, arg2, ldgp->nldvs, x));
3276*3dec9fcdSqs 		}
3277*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3278*3dec9fcdSqs 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3279*3dec9fcdSqs 		    "htable 0x%llx", x, intrp->htable[x]));
3280*3dec9fcdSqs 
3281*3dec9fcdSqs 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3282*3dec9fcdSqs 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3283*3dec9fcdSqs 		    DDI_SUCCESS) {
3284*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3285*3dec9fcdSqs 			    "==> hxge_add_intrs_adv_type: failed #%d "
3286*3dec9fcdSqs 			    "status 0x%x", x, ddi_status));
3287*3dec9fcdSqs 			for (y = 0; y < intrp->intr_added; y++) {
3288*3dec9fcdSqs 				(void) ddi_intr_remove_handler(
3289*3dec9fcdSqs 				    intrp->htable[y]);
3290*3dec9fcdSqs 			}
3291*3dec9fcdSqs 
3292*3dec9fcdSqs 			/* Free already allocated intr */
3293*3dec9fcdSqs 			for (y = 0; y < nactual; y++) {
3294*3dec9fcdSqs 				(void) ddi_intr_free(intrp->htable[y]);
3295*3dec9fcdSqs 			}
3296*3dec9fcdSqs 			kmem_free(intrp->htable, intrp->intr_size);
3297*3dec9fcdSqs 
3298*3dec9fcdSqs 			(void) hxge_ldgv_uninit(hxgep);
3299*3dec9fcdSqs 
3300*3dec9fcdSqs 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3301*3dec9fcdSqs 		}
3302*3dec9fcdSqs 
3303*3dec9fcdSqs 		intrp->intr_added++;
3304*3dec9fcdSqs 	}
3305*3dec9fcdSqs 	intrp->msi_intx_cnt = nactual;
3306*3dec9fcdSqs 
3307*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3308*3dec9fcdSqs 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3309*3dec9fcdSqs 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3310*3dec9fcdSqs 
3311*3dec9fcdSqs 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3312*3dec9fcdSqs 	(void) hxge_intr_ldgv_init(hxgep);
3313*3dec9fcdSqs 
3314*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3315*3dec9fcdSqs 
3316*3dec9fcdSqs 	return (status);
3317*3dec9fcdSqs }
3318*3dec9fcdSqs 
3319*3dec9fcdSqs /*ARGSUSED*/
3320*3dec9fcdSqs static hxge_status_t
3321*3dec9fcdSqs hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3322*3dec9fcdSqs {
3323*3dec9fcdSqs 	dev_info_t	*dip = hxgep->dip;
3324*3dec9fcdSqs 	p_hxge_ldg_t	ldgp;
3325*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3326*3dec9fcdSqs 	uint_t		*inthandler;
3327*3dec9fcdSqs 	void		*arg1, *arg2;
3328*3dec9fcdSqs 	int		behavior;
3329*3dec9fcdSqs 	int		nintrs, navail;
3330*3dec9fcdSqs 	int		nactual, nrequired;
3331*3dec9fcdSqs 	int		inum = 0;
3332*3dec9fcdSqs 	int		x, y;
3333*3dec9fcdSqs 	int		ddi_status = DDI_SUCCESS;
3334*3dec9fcdSqs 	hxge_status_t	status = HXGE_OK;
3335*3dec9fcdSqs 
3336*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3337*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3338*3dec9fcdSqs 
3339*3dec9fcdSqs 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3340*3dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3341*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3342*3dec9fcdSqs 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3343*3dec9fcdSqs 		    "nintrs: %d", status, nintrs));
3344*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3345*3dec9fcdSqs 	}
3346*3dec9fcdSqs 
3347*3dec9fcdSqs 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3348*3dec9fcdSqs 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3349*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3350*3dec9fcdSqs 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3351*3dec9fcdSqs 		    "nintrs: %d", ddi_status, navail));
3352*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3353*3dec9fcdSqs 	}
3354*3dec9fcdSqs 
3355*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3356*3dec9fcdSqs 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3357*3dec9fcdSqs 	    nintrs, navail));
3358*3dec9fcdSqs 
3359*3dec9fcdSqs 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3360*3dec9fcdSqs 	    DDI_INTR_ALLOC_NORMAL);
3361*3dec9fcdSqs 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3362*3dec9fcdSqs 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3363*3dec9fcdSqs 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3364*3dec9fcdSqs 	    navail, &nactual, behavior);
3365*3dec9fcdSqs 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3366*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3367*3dec9fcdSqs 		    " ddi_intr_alloc() failed: %d", ddi_status));
3368*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3369*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3370*3dec9fcdSqs 	}
3371*3dec9fcdSqs 
3372*3dec9fcdSqs 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3373*3dec9fcdSqs 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3374*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3375*3dec9fcdSqs 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3376*3dec9fcdSqs 		/* Free already allocated interrupts */
3377*3dec9fcdSqs 		for (y = 0; y < nactual; y++) {
3378*3dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
3379*3dec9fcdSqs 		}
3380*3dec9fcdSqs 
3381*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3382*3dec9fcdSqs 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3383*3dec9fcdSqs 	}
3384*3dec9fcdSqs 
3385*3dec9fcdSqs 	nrequired = 0;
3386*3dec9fcdSqs 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3387*3dec9fcdSqs 	if (status != HXGE_OK) {
3388*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3389*3dec9fcdSqs 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
3390*3dec9fcdSqs 		    "failed: 0x%x", status));
3391*3dec9fcdSqs 		/* Free already allocated interrupts */
3392*3dec9fcdSqs 		for (y = 0; y < nactual; y++) {
3393*3dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[y]);
3394*3dec9fcdSqs 		}
3395*3dec9fcdSqs 
3396*3dec9fcdSqs 		kmem_free(intrp->htable, intrp->intr_size);
3397*3dec9fcdSqs 		return (status);
3398*3dec9fcdSqs 	}
3399*3dec9fcdSqs 
3400*3dec9fcdSqs 	ldgp = hxgep->ldgvp->ldgp;
3401*3dec9fcdSqs 	for (x = 0; x < nrequired; x++, ldgp++) {
3402*3dec9fcdSqs 		ldgp->vector = (uint8_t)x;
3403*3dec9fcdSqs 		arg1 = ldgp->ldvp;
3404*3dec9fcdSqs 		arg2 = hxgep;
3405*3dec9fcdSqs 		if (ldgp->nldvs == 1) {
3406*3dec9fcdSqs 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3407*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3408*3dec9fcdSqs 			    "hxge_add_intrs_adv_type_fix: "
3409*3dec9fcdSqs 			    "1-1 int handler(%d) ldg %d ldv %d "
3410*3dec9fcdSqs 			    "arg1 $%p arg2 $%p\n",
3411*3dec9fcdSqs 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
3412*3dec9fcdSqs 		} else if (ldgp->nldvs > 1) {
3413*3dec9fcdSqs 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3414*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3415*3dec9fcdSqs 			    "hxge_add_intrs_adv_type_fix: "
3416*3dec9fcdSqs 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
3417*3dec9fcdSqs 			    "arg1 0x%016llx arg2 0x%016llx\n",
3418*3dec9fcdSqs 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
3419*3dec9fcdSqs 			    arg1, arg2));
3420*3dec9fcdSqs 		}
3421*3dec9fcdSqs 
3422*3dec9fcdSqs 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3423*3dec9fcdSqs 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3424*3dec9fcdSqs 		    DDI_SUCCESS) {
3425*3dec9fcdSqs 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3426*3dec9fcdSqs 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
3427*3dec9fcdSqs 			    "status 0x%x", x, ddi_status));
3428*3dec9fcdSqs 			for (y = 0; y < intrp->intr_added; y++) {
3429*3dec9fcdSqs 				(void) ddi_intr_remove_handler(
3430*3dec9fcdSqs 				    intrp->htable[y]);
3431*3dec9fcdSqs 			}
3432*3dec9fcdSqs 			for (y = 0; y < nactual; y++) {
3433*3dec9fcdSqs 				(void) ddi_intr_free(intrp->htable[y]);
3434*3dec9fcdSqs 			}
3435*3dec9fcdSqs 			/* Free already allocated intr */
3436*3dec9fcdSqs 			kmem_free(intrp->htable, intrp->intr_size);
3437*3dec9fcdSqs 
3438*3dec9fcdSqs 			(void) hxge_ldgv_uninit(hxgep);
3439*3dec9fcdSqs 
3440*3dec9fcdSqs 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3441*3dec9fcdSqs 		}
3442*3dec9fcdSqs 		intrp->intr_added++;
3443*3dec9fcdSqs 	}
3444*3dec9fcdSqs 
3445*3dec9fcdSqs 	intrp->msi_intx_cnt = nactual;
3446*3dec9fcdSqs 
3447*3dec9fcdSqs 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3448*3dec9fcdSqs 
3449*3dec9fcdSqs 	status = hxge_intr_ldgv_init(hxgep);
3450*3dec9fcdSqs 
3451*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
3452*3dec9fcdSqs 
3453*3dec9fcdSqs 	return (status);
3454*3dec9fcdSqs }
3455*3dec9fcdSqs 
3456*3dec9fcdSqs /*ARGSUSED*/
3457*3dec9fcdSqs static void
3458*3dec9fcdSqs hxge_remove_intrs(p_hxge_t hxgep)
3459*3dec9fcdSqs {
3460*3dec9fcdSqs 	int		i, inum;
3461*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3462*3dec9fcdSqs 
3463*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
3464*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3465*3dec9fcdSqs 	if (!intrp->intr_registered) {
3466*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3467*3dec9fcdSqs 		    "<== hxge_remove_intrs: interrupts not registered"));
3468*3dec9fcdSqs 		return;
3469*3dec9fcdSqs 	}
3470*3dec9fcdSqs 
3471*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
3472*3dec9fcdSqs 
3473*3dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3474*3dec9fcdSqs 		(void) ddi_intr_block_disable(intrp->htable,
3475*3dec9fcdSqs 		    intrp->intr_added);
3476*3dec9fcdSqs 	} else {
3477*3dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
3478*3dec9fcdSqs 			(void) ddi_intr_disable(intrp->htable[i]);
3479*3dec9fcdSqs 		}
3480*3dec9fcdSqs 	}
3481*3dec9fcdSqs 
3482*3dec9fcdSqs 	for (inum = 0; inum < intrp->intr_added; inum++) {
3483*3dec9fcdSqs 		if (intrp->htable[inum]) {
3484*3dec9fcdSqs 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
3485*3dec9fcdSqs 		}
3486*3dec9fcdSqs 	}
3487*3dec9fcdSqs 
3488*3dec9fcdSqs 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
3489*3dec9fcdSqs 		if (intrp->htable[inum]) {
3490*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3491*3dec9fcdSqs 			    "hxge_remove_intrs: ddi_intr_free inum %d "
3492*3dec9fcdSqs 			    "msi_intx_cnt %d intr_added %d",
3493*3dec9fcdSqs 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
3494*3dec9fcdSqs 
3495*3dec9fcdSqs 			(void) ddi_intr_free(intrp->htable[inum]);
3496*3dec9fcdSqs 		}
3497*3dec9fcdSqs 	}
3498*3dec9fcdSqs 
3499*3dec9fcdSqs 	kmem_free(intrp->htable, intrp->intr_size);
3500*3dec9fcdSqs 	intrp->intr_registered = B_FALSE;
3501*3dec9fcdSqs 	intrp->intr_enabled = B_FALSE;
3502*3dec9fcdSqs 	intrp->msi_intx_cnt = 0;
3503*3dec9fcdSqs 	intrp->intr_added = 0;
3504*3dec9fcdSqs 
3505*3dec9fcdSqs 	(void) hxge_ldgv_uninit(hxgep);
3506*3dec9fcdSqs 
3507*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
3508*3dec9fcdSqs }
3509*3dec9fcdSqs 
3510*3dec9fcdSqs /*ARGSUSED*/
3511*3dec9fcdSqs static void
3512*3dec9fcdSqs hxge_remove_soft_intrs(p_hxge_t hxgep)
3513*3dec9fcdSqs {
3514*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
3515*3dec9fcdSqs 
3516*3dec9fcdSqs 	if (hxgep->resched_id) {
3517*3dec9fcdSqs 		ddi_remove_softintr(hxgep->resched_id);
3518*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3519*3dec9fcdSqs 		    "==> hxge_remove_soft_intrs: removed"));
3520*3dec9fcdSqs 		hxgep->resched_id = NULL;
3521*3dec9fcdSqs 	}
3522*3dec9fcdSqs 
3523*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
3524*3dec9fcdSqs }
3525*3dec9fcdSqs 
3526*3dec9fcdSqs /*ARGSUSED*/
3527*3dec9fcdSqs void
3528*3dec9fcdSqs hxge_intrs_enable(p_hxge_t hxgep)
3529*3dec9fcdSqs {
3530*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3531*3dec9fcdSqs 	int		i;
3532*3dec9fcdSqs 	int		status;
3533*3dec9fcdSqs 
3534*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
3535*3dec9fcdSqs 
3536*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3537*3dec9fcdSqs 
3538*3dec9fcdSqs 	if (!intrp->intr_registered) {
3539*3dec9fcdSqs 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
3540*3dec9fcdSqs 		    "interrupts are not registered"));
3541*3dec9fcdSqs 		return;
3542*3dec9fcdSqs 	}
3543*3dec9fcdSqs 
3544*3dec9fcdSqs 	if (intrp->intr_enabled) {
3545*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3546*3dec9fcdSqs 		    "<== hxge_intrs_enable: already enabled"));
3547*3dec9fcdSqs 		return;
3548*3dec9fcdSqs 	}
3549*3dec9fcdSqs 
3550*3dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3551*3dec9fcdSqs 		status = ddi_intr_block_enable(intrp->htable,
3552*3dec9fcdSqs 		    intrp->intr_added);
3553*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3554*3dec9fcdSqs 		    "block enable - status 0x%x total inums #%d\n",
3555*3dec9fcdSqs 		    status, intrp->intr_added));
3556*3dec9fcdSqs 	} else {
3557*3dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
3558*3dec9fcdSqs 			status = ddi_intr_enable(intrp->htable[i]);
3559*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
3560*3dec9fcdSqs 			    "ddi_intr_enable:enable - status 0x%x "
3561*3dec9fcdSqs 			    "total inums %d enable inum #%d\n",
3562*3dec9fcdSqs 			    status, intrp->intr_added, i));
3563*3dec9fcdSqs 			if (status == DDI_SUCCESS) {
3564*3dec9fcdSqs 				intrp->intr_enabled = B_TRUE;
3565*3dec9fcdSqs 			}
3566*3dec9fcdSqs 		}
3567*3dec9fcdSqs 	}
3568*3dec9fcdSqs 
3569*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
3570*3dec9fcdSqs }
3571*3dec9fcdSqs 
3572*3dec9fcdSqs /*ARGSUSED*/
3573*3dec9fcdSqs static void
3574*3dec9fcdSqs hxge_intrs_disable(p_hxge_t hxgep)
3575*3dec9fcdSqs {
3576*3dec9fcdSqs 	p_hxge_intr_t	intrp;
3577*3dec9fcdSqs 	int		i;
3578*3dec9fcdSqs 
3579*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
3580*3dec9fcdSqs 
3581*3dec9fcdSqs 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3582*3dec9fcdSqs 
3583*3dec9fcdSqs 	if (!intrp->intr_registered) {
3584*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
3585*3dec9fcdSqs 		    "interrupts are not registered"));
3586*3dec9fcdSqs 		return;
3587*3dec9fcdSqs 	}
3588*3dec9fcdSqs 
3589*3dec9fcdSqs 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3590*3dec9fcdSqs 		(void) ddi_intr_block_disable(intrp->htable,
3591*3dec9fcdSqs 		    intrp->intr_added);
3592*3dec9fcdSqs 	} else {
3593*3dec9fcdSqs 		for (i = 0; i < intrp->intr_added; i++) {
3594*3dec9fcdSqs 			(void) ddi_intr_disable(intrp->htable[i]);
3595*3dec9fcdSqs 		}
3596*3dec9fcdSqs 	}
3597*3dec9fcdSqs 
3598*3dec9fcdSqs 	intrp->intr_enabled = B_FALSE;
3599*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
3600*3dec9fcdSqs }
3601*3dec9fcdSqs 
3602*3dec9fcdSqs static hxge_status_t
3603*3dec9fcdSqs hxge_mac_register(p_hxge_t hxgep)
3604*3dec9fcdSqs {
3605*3dec9fcdSqs 	mac_register_t	*macp;
3606*3dec9fcdSqs 	int		status;
3607*3dec9fcdSqs 
3608*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
3609*3dec9fcdSqs 
3610*3dec9fcdSqs 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
3611*3dec9fcdSqs 		return (HXGE_ERROR);
3612*3dec9fcdSqs 
3613*3dec9fcdSqs 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3614*3dec9fcdSqs 	macp->m_driver = hxgep;
3615*3dec9fcdSqs 	macp->m_dip = hxgep->dip;
3616*3dec9fcdSqs 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
3617*3dec9fcdSqs 
3618*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3619*3dec9fcdSqs 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
3620*3dec9fcdSqs 	    macp->m_src_addr[0],
3621*3dec9fcdSqs 	    macp->m_src_addr[1],
3622*3dec9fcdSqs 	    macp->m_src_addr[2],
3623*3dec9fcdSqs 	    macp->m_src_addr[3],
3624*3dec9fcdSqs 	    macp->m_src_addr[4],
3625*3dec9fcdSqs 	    macp->m_src_addr[5]));
3626*3dec9fcdSqs 
3627*3dec9fcdSqs 	macp->m_callbacks = &hxge_m_callbacks;
3628*3dec9fcdSqs 	macp->m_min_sdu = 0;
3629*3dec9fcdSqs 	macp->m_max_sdu = hxgep->vmac.maxframesize -
3630*3dec9fcdSqs 	    sizeof (struct ether_header) - ETHERFCSL - 4 - TX_PKT_HEADER_SIZE;
3631*3dec9fcdSqs 
3632*3dec9fcdSqs 	status = mac_register(macp, &hxgep->mach);
3633*3dec9fcdSqs 	mac_free(macp);
3634*3dec9fcdSqs 
3635*3dec9fcdSqs 	if (status != 0) {
3636*3dec9fcdSqs 		cmn_err(CE_WARN,
3637*3dec9fcdSqs 		    "hxge_mac_register failed (status %d instance %d)",
3638*3dec9fcdSqs 		    status, hxgep->instance);
3639*3dec9fcdSqs 		return (HXGE_ERROR);
3640*3dec9fcdSqs 	}
3641*3dec9fcdSqs 
3642*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
3643*3dec9fcdSqs 	    "(instance %d)", hxgep->instance));
3644*3dec9fcdSqs 
3645*3dec9fcdSqs 	return (HXGE_OK);
3646*3dec9fcdSqs }
3647*3dec9fcdSqs 
3648*3dec9fcdSqs static int
3649*3dec9fcdSqs hxge_init_common_dev(p_hxge_t hxgep)
3650*3dec9fcdSqs {
3651*3dec9fcdSqs 	p_hxge_hw_list_t	hw_p;
3652*3dec9fcdSqs 	dev_info_t		*p_dip;
3653*3dec9fcdSqs 
3654*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
3655*3dec9fcdSqs 
3656*3dec9fcdSqs 	p_dip = hxgep->p_dip;
3657*3dec9fcdSqs 	MUTEX_ENTER(&hxge_common_lock);
3658*3dec9fcdSqs 
3659*3dec9fcdSqs 	/*
3660*3dec9fcdSqs 	 * Loop through existing per Hydra hardware list.
3661*3dec9fcdSqs 	 */
3662*3dec9fcdSqs 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
3663*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3664*3dec9fcdSqs 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
3665*3dec9fcdSqs 		    hw_p, p_dip));
3666*3dec9fcdSqs 		if (hw_p->parent_devp == p_dip) {
3667*3dec9fcdSqs 			hxgep->hxge_hw_p = hw_p;
3668*3dec9fcdSqs 			hw_p->ndevs++;
3669*3dec9fcdSqs 			hw_p->hxge_p = hxgep;
3670*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3671*3dec9fcdSqs 			    "==> hxge_init_common_device: "
3672*3dec9fcdSqs 			    "hw_p $%p parent dip $%p ndevs %d (found)",
3673*3dec9fcdSqs 			    hw_p, p_dip, hw_p->ndevs));
3674*3dec9fcdSqs 			break;
3675*3dec9fcdSqs 		}
3676*3dec9fcdSqs 	}
3677*3dec9fcdSqs 
3678*3dec9fcdSqs 	if (hw_p == NULL) {
3679*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3680*3dec9fcdSqs 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
3681*3dec9fcdSqs 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
3682*3dec9fcdSqs 		hw_p->parent_devp = p_dip;
3683*3dec9fcdSqs 		hw_p->magic = HXGE_MAGIC;
3684*3dec9fcdSqs 		hxgep->hxge_hw_p = hw_p;
3685*3dec9fcdSqs 		hw_p->ndevs++;
3686*3dec9fcdSqs 		hw_p->hxge_p = hxgep;
3687*3dec9fcdSqs 		hw_p->next = hxge_hw_list;
3688*3dec9fcdSqs 
3689*3dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
3690*3dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
3691*3dec9fcdSqs 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
3692*3dec9fcdSqs 
3693*3dec9fcdSqs 		hxge_hw_list = hw_p;
3694*3dec9fcdSqs 	}
3695*3dec9fcdSqs 	MUTEX_EXIT(&hxge_common_lock);
3696*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3697*3dec9fcdSqs 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
3698*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
3699*3dec9fcdSqs 
3700*3dec9fcdSqs 	return (HXGE_OK);
3701*3dec9fcdSqs }
3702*3dec9fcdSqs 
3703*3dec9fcdSqs static void
3704*3dec9fcdSqs hxge_uninit_common_dev(p_hxge_t hxgep)
3705*3dec9fcdSqs {
3706*3dec9fcdSqs 	p_hxge_hw_list_t	hw_p, h_hw_p;
3707*3dec9fcdSqs 	dev_info_t		*p_dip;
3708*3dec9fcdSqs 
3709*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
3710*3dec9fcdSqs 	if (hxgep->hxge_hw_p == NULL) {
3711*3dec9fcdSqs 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3712*3dec9fcdSqs 		    "<== hxge_uninit_common_dev (no common)"));
3713*3dec9fcdSqs 		return;
3714*3dec9fcdSqs 	}
3715*3dec9fcdSqs 
3716*3dec9fcdSqs 	MUTEX_ENTER(&hxge_common_lock);
3717*3dec9fcdSqs 	h_hw_p = hxge_hw_list;
3718*3dec9fcdSqs 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
3719*3dec9fcdSqs 		p_dip = hw_p->parent_devp;
3720*3dec9fcdSqs 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
3721*3dec9fcdSqs 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
3722*3dec9fcdSqs 		    hw_p->magic == HXGE_MAGIC) {
3723*3dec9fcdSqs 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3724*3dec9fcdSqs 			    "==> hxge_uninit_common_dev: "
3725*3dec9fcdSqs 			    "hw_p $%p parent dip $%p ndevs %d (found)",
3726*3dec9fcdSqs 			    hw_p, p_dip, hw_p->ndevs));
3727*3dec9fcdSqs 
3728*3dec9fcdSqs 			hxgep->hxge_hw_p = NULL;
3729*3dec9fcdSqs 			if (hw_p->ndevs) {
3730*3dec9fcdSqs 				hw_p->ndevs--;
3731*3dec9fcdSqs 			}
3732*3dec9fcdSqs 			hw_p->hxge_p = NULL;
3733*3dec9fcdSqs 			if (!hw_p->ndevs) {
3734*3dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
3735*3dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
3736*3dec9fcdSqs 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
3737*3dec9fcdSqs 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3738*3dec9fcdSqs 				    "==> hxge_uninit_common_dev: "
3739*3dec9fcdSqs 				    "hw_p $%p parent dip $%p ndevs %d (last)",
3740*3dec9fcdSqs 				    hw_p, p_dip, hw_p->ndevs));
3741*3dec9fcdSqs 
3742*3dec9fcdSqs 				if (hw_p == hxge_hw_list) {
3743*3dec9fcdSqs 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3744*3dec9fcdSqs 					    "==> hxge_uninit_common_dev:"
3745*3dec9fcdSqs 					    "remove head "
3746*3dec9fcdSqs 					    "hw_p $%p parent dip $%p "
3747*3dec9fcdSqs 					    "ndevs %d (head)",
3748*3dec9fcdSqs 					    hw_p, p_dip, hw_p->ndevs));
3749*3dec9fcdSqs 					hxge_hw_list = hw_p->next;
3750*3dec9fcdSqs 				} else {
3751*3dec9fcdSqs 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3752*3dec9fcdSqs 					    "==> hxge_uninit_common_dev:"
3753*3dec9fcdSqs 					    "remove middle "
3754*3dec9fcdSqs 					    "hw_p $%p parent dip $%p "
3755*3dec9fcdSqs 					    "ndevs %d (middle)",
3756*3dec9fcdSqs 					    hw_p, p_dip, hw_p->ndevs));
3757*3dec9fcdSqs 					h_hw_p->next = hw_p->next;
3758*3dec9fcdSqs 				}
3759*3dec9fcdSqs 
3760*3dec9fcdSqs 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
3761*3dec9fcdSqs 			}
3762*3dec9fcdSqs 			break;
3763*3dec9fcdSqs 		} else {
3764*3dec9fcdSqs 			h_hw_p = hw_p;
3765*3dec9fcdSqs 		}
3766*3dec9fcdSqs 	}
3767*3dec9fcdSqs 
3768*3dec9fcdSqs 	MUTEX_EXIT(&hxge_common_lock);
3769*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
3770*3dec9fcdSqs 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
3771*3dec9fcdSqs 
3772*3dec9fcdSqs 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
3773*3dec9fcdSqs }
3774