xref: /illumos-gate/usr/src/uts/common/io/hxge/hxge_main.c (revision a512c5d1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOs MT STREAMS Hydra 10Gb Ethernet Device Driver.
28  */
29 #include <hxge_impl.h>
30 #include <hxge_pfc.h>
31 
32 /*
33  * PSARC/2007/453 MSI-X interrupt limit override
34  * (This PSARC case is limited to MSI-X vectors
35  *  and SPARC platforms only).
36  */
37 #if defined(_BIG_ENDIAN)
38 uint32_t hxge_msi_enable = 2;
39 #else
40 uint32_t hxge_msi_enable = 1;
41 #endif
42 
43 /*
44  * Globals: tunable parameters (/etc/system or adb)
45  *
46  */
47 uint32_t hxge_rbr_size = HXGE_RBR_RBB_DEFAULT;
48 uint32_t hxge_rbr_spare_size = 0;
49 uint32_t hxge_rcr_size = HXGE_RCR_DEFAULT;
50 uint32_t hxge_tx_ring_size = HXGE_TX_RING_DEFAULT;
51 uint32_t hxge_bcopy_thresh = TX_BCOPY_MAX;
52 uint32_t hxge_dvma_thresh = TX_FASTDVMA_MIN;
53 uint32_t hxge_dma_stream_thresh = TX_STREAM_MIN;
54 uint32_t hxge_jumbo_frame_size = MAX_FRAME_SIZE;
55 
56 static hxge_os_mutex_t hxgedebuglock;
57 static int hxge_debug_init = 0;
58 
59 /*
60  * Debugging flags:
61  *		hxge_no_tx_lb : transmit load balancing
62  *		hxge_tx_lb_policy: 0 - TCP/UDP port (default)
63  *				   1 - From the Stack
64  *				   2 - Destination IP Address
65  */
66 uint32_t hxge_no_tx_lb = 0;
67 uint32_t hxge_tx_lb_policy = HXGE_TX_LB_TCPUDP;
68 
69 /*
70  * Add tunable to reduce the amount of time spent in the
71  * ISR doing Rx Processing.
72  */
73 uint32_t hxge_max_rx_pkts = 1024;
74 
75 /*
76  * Tunables to manage the receive buffer blocks.
77  *
78  * hxge_rx_threshold_hi: copy all buffers.
79  * hxge_rx_bcopy_size_type: receive buffer block size type.
80  * hxge_rx_threshold_lo: copy only up to tunable block size type.
81  */
82 hxge_rxbuf_threshold_t hxge_rx_threshold_hi = HXGE_RX_COPY_6;
83 hxge_rxbuf_type_t hxge_rx_buf_size_type = RCR_PKTBUFSZ_0;
84 hxge_rxbuf_threshold_t hxge_rx_threshold_lo = HXGE_RX_COPY_3;
85 
86 rtrace_t hpi_rtracebuf;
87 
88 /*
89  * Function Prototypes
90  */
91 static int hxge_attach(dev_info_t *, ddi_attach_cmd_t);
92 static int hxge_detach(dev_info_t *, ddi_detach_cmd_t);
93 static void hxge_unattach(p_hxge_t);
94 
95 static hxge_status_t hxge_setup_system_dma_pages(p_hxge_t);
96 
97 static hxge_status_t hxge_setup_mutexes(p_hxge_t);
98 static void hxge_destroy_mutexes(p_hxge_t);
99 
100 static hxge_status_t hxge_map_regs(p_hxge_t hxgep);
101 static void hxge_unmap_regs(p_hxge_t hxgep);
102 
103 hxge_status_t hxge_add_intrs(p_hxge_t hxgep);
104 static hxge_status_t hxge_add_soft_intrs(p_hxge_t hxgep);
105 static void hxge_remove_intrs(p_hxge_t hxgep);
106 static void hxge_remove_soft_intrs(p_hxge_t hxgep);
107 static hxge_status_t hxge_add_intrs_adv(p_hxge_t hxgep);
108 static hxge_status_t hxge_add_intrs_adv_type(p_hxge_t, uint32_t);
109 static hxge_status_t hxge_add_intrs_adv_type_fix(p_hxge_t, uint32_t);
110 void hxge_intrs_enable(p_hxge_t hxgep);
111 static void hxge_intrs_disable(p_hxge_t hxgep);
112 static void hxge_suspend(p_hxge_t);
113 static hxge_status_t hxge_resume(p_hxge_t);
114 hxge_status_t hxge_setup_dev(p_hxge_t);
115 static void hxge_destroy_dev(p_hxge_t);
116 hxge_status_t hxge_alloc_mem_pool(p_hxge_t);
117 static void hxge_free_mem_pool(p_hxge_t);
118 static hxge_status_t hxge_alloc_rx_mem_pool(p_hxge_t);
119 static void hxge_free_rx_mem_pool(p_hxge_t);
120 static hxge_status_t hxge_alloc_tx_mem_pool(p_hxge_t);
121 static void hxge_free_tx_mem_pool(p_hxge_t);
122 static hxge_status_t hxge_dma_mem_alloc(p_hxge_t, dma_method_t,
123     struct ddi_dma_attr *, size_t, ddi_device_acc_attr_t *, uint_t,
124     p_hxge_dma_common_t);
125 static void hxge_dma_mem_free(p_hxge_dma_common_t);
126 static hxge_status_t hxge_alloc_rx_buf_dma(p_hxge_t, uint16_t,
127     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
128 static void hxge_free_rx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
129 static hxge_status_t hxge_alloc_rx_cntl_dma(p_hxge_t, uint16_t,
130     p_hxge_dma_common_t *, size_t);
131 static void hxge_free_rx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
132 static hxge_status_t hxge_alloc_tx_buf_dma(p_hxge_t, uint16_t,
133     p_hxge_dma_common_t *, size_t, size_t, uint32_t *);
134 static void hxge_free_tx_buf_dma(p_hxge_t, p_hxge_dma_common_t, uint32_t);
135 static hxge_status_t hxge_alloc_tx_cntl_dma(p_hxge_t, uint16_t,
136     p_hxge_dma_common_t *, size_t);
137 static void hxge_free_tx_cntl_dma(p_hxge_t, p_hxge_dma_common_t);
138 static int hxge_init_common_dev(p_hxge_t);
139 static void hxge_uninit_common_dev(p_hxge_t);
140 
141 /*
142  * The next declarations are for the GLDv3 interface.
143  */
144 static int hxge_m_start(void *);
145 static void hxge_m_stop(void *);
146 static int hxge_m_unicst(void *, const uint8_t *);
147 static int hxge_m_multicst(void *, boolean_t, const uint8_t *);
148 static int hxge_m_promisc(void *, boolean_t);
149 static void hxge_m_ioctl(void *, queue_t *, mblk_t *);
150 static void hxge_m_resources(void *);
151 static hxge_status_t hxge_mac_register(p_hxge_t hxgep);
152 
153 static int hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr);
154 static int hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot);
155 static int hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr);
156 static int hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr);
157 static boolean_t hxge_m_getcapab(void *, mac_capab_t, void *);
158 static boolean_t hxge_param_locked(mac_prop_id_t pr_num);
159 static int hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
160     uint_t pr_valsize, const void *pr_val);
161 static int hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
162     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
163 static int hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num,
164     uint_t pr_valsize, void *pr_val);
165 static int hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name,
166     uint_t pr_valsize, const void *pr_val);
167 static int hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name,
168     uint_t pr_flags, uint_t pr_valsize, void *pr_val);
169 static void hxge_link_poll(void *arg);
170 
171 mac_priv_prop_t hxge_priv_props[] = {
172 	{"_rxdma_intr_time", MAC_PROP_PERM_RW},
173 	{"_rxdma_intr_pkts", MAC_PROP_PERM_RW},
174 	{"_class_opt_ipv4_tcp", MAC_PROP_PERM_RW},
175 	{"_class_opt_ipv4_udp", MAC_PROP_PERM_RW},
176 	{"_class_opt_ipv4_ah", MAC_PROP_PERM_RW},
177 	{"_class_opt_ipv4_sctp", MAC_PROP_PERM_RW},
178 	{"_class_opt_ipv6_tcp", MAC_PROP_PERM_RW},
179 	{"_class_opt_ipv6_udp", MAC_PROP_PERM_RW},
180 	{"_class_opt_ipv6_ah", MAC_PROP_PERM_RW},
181 	{"_class_opt_ipv6_sctp", MAC_PROP_PERM_RW}
182 };
183 
184 #define	HXGE_MAX_PRIV_PROPS	\
185 	(sizeof (hxge_priv_props)/sizeof (mac_priv_prop_t))
186 
187 #define	HXGE_MAGIC	0x4E584745UL
188 #define	MAX_DUMP_SZ 256
189 
190 #define	HXGE_M_CALLBACK_FLAGS	\
191 	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP)
192 
193 extern mblk_t *hxge_m_tx(void *arg, mblk_t *mp);
194 extern hxge_status_t hxge_pfc_set_default_mac_addr(p_hxge_t hxgep);
195 
196 static mac_callbacks_t hxge_m_callbacks = {
197 	HXGE_M_CALLBACK_FLAGS,
198 	hxge_m_stat,
199 	hxge_m_start,
200 	hxge_m_stop,
201 	hxge_m_promisc,
202 	hxge_m_multicst,
203 	hxge_m_unicst,
204 	hxge_m_tx,
205 	hxge_m_resources,
206 	hxge_m_ioctl,
207 	hxge_m_getcapab,
208 	NULL,
209 	NULL,
210 	hxge_m_setprop,
211 	hxge_m_getprop
212 };
213 
214 /* Enable debug messages as necessary. */
215 uint64_t hxge_debug_level = 0;
216 
217 /*
218  * This list contains the instance structures for the Hydra
219  * devices present in the system. The lock exists to guarantee
220  * mutually exclusive access to the list.
221  */
222 void *hxge_list = NULL;
223 void *hxge_hw_list = NULL;
224 hxge_os_mutex_t hxge_common_lock;
225 
226 extern uint64_t hpi_debug_level;
227 
228 extern hxge_status_t hxge_ldgv_init();
229 extern hxge_status_t hxge_ldgv_uninit();
230 extern hxge_status_t hxge_intr_ldgv_init();
231 extern void hxge_fm_init(p_hxge_t hxgep, ddi_device_acc_attr_t *reg_attr,
232     ddi_device_acc_attr_t *desc_attr, ddi_dma_attr_t *dma_attr);
233 extern void hxge_fm_fini(p_hxge_t hxgep);
234 
235 /*
236  * Count used to maintain the number of buffers being used
237  * by Hydra instances and loaned up to the upper layers.
238  */
239 uint32_t hxge_mblks_pending = 0;
240 
241 /*
242  * Device register access attributes for PIO.
243  */
244 static ddi_device_acc_attr_t hxge_dev_reg_acc_attr = {
245 	DDI_DEVICE_ATTR_V0,
246 	DDI_STRUCTURE_LE_ACC,
247 	DDI_STRICTORDER_ACC,
248 };
249 
250 /*
251  * Device descriptor access attributes for DMA.
252  */
253 static ddi_device_acc_attr_t hxge_dev_desc_dma_acc_attr = {
254 	DDI_DEVICE_ATTR_V0,
255 	DDI_STRUCTURE_LE_ACC,
256 	DDI_STRICTORDER_ACC
257 };
258 
259 /*
260  * Device buffer access attributes for DMA.
261  */
262 static ddi_device_acc_attr_t hxge_dev_buf_dma_acc_attr = {
263 	DDI_DEVICE_ATTR_V0,
264 	DDI_STRUCTURE_BE_ACC,
265 	DDI_STRICTORDER_ACC
266 };
267 
268 ddi_dma_attr_t hxge_desc_dma_attr = {
269 	DMA_ATTR_V0,		/* version number. */
270 	0,			/* low address */
271 	0xffffffffffffffff,	/* high address */
272 	0xffffffffffffffff,	/* address counter max */
273 	0x100000,		/* alignment */
274 	0xfc00fc,		/* dlim_burstsizes */
275 	0x1,			/* minimum transfer size */
276 	0xffffffffffffffff,	/* maximum transfer size */
277 	0xffffffffffffffff,	/* maximum segment size */
278 	1,			/* scatter/gather list length */
279 	(unsigned int)1,	/* granularity */
280 	0			/* attribute flags */
281 };
282 
283 ddi_dma_attr_t hxge_tx_dma_attr = {
284 	DMA_ATTR_V0,		/* version number. */
285 	0,			/* low address */
286 	0xffffffffffffffff,	/* high address */
287 	0xffffffffffffffff,	/* address counter max */
288 #if defined(_BIG_ENDIAN)
289 	0x2000,			/* alignment */
290 #else
291 	0x1000,			/* alignment */
292 #endif
293 	0xfc00fc,		/* dlim_burstsizes */
294 	0x1,			/* minimum transfer size */
295 	0xffffffffffffffff,	/* maximum transfer size */
296 	0xffffffffffffffff,	/* maximum segment size */
297 	5,			/* scatter/gather list length */
298 	(unsigned int)1,	/* granularity */
299 	0			/* attribute flags */
300 };
301 
302 ddi_dma_attr_t hxge_rx_dma_attr = {
303 	DMA_ATTR_V0,		/* version number. */
304 	0,			/* low address */
305 	0xffffffffffffffff,	/* high address */
306 	0xffffffffffffffff,	/* address counter max */
307 	0x10000,		/* alignment */
308 	0xfc00fc,		/* dlim_burstsizes */
309 	0x1,			/* minimum transfer size */
310 	0xffffffffffffffff,	/* maximum transfer size */
311 	0xffffffffffffffff,	/* maximum segment size */
312 	1,			/* scatter/gather list length */
313 	(unsigned int)1,	/* granularity */
314 	DDI_DMA_RELAXED_ORDERING /* attribute flags */
315 };
316 
317 ddi_dma_lim_t hxge_dma_limits = {
318 	(uint_t)0,		/* dlim_addr_lo */
319 	(uint_t)0xffffffff,	/* dlim_addr_hi */
320 	(uint_t)0xffffffff,	/* dlim_cntr_max */
321 	(uint_t)0xfc00fc,	/* dlim_burstsizes for 32 and 64 bit xfers */
322 	0x1,			/* dlim_minxfer */
323 	1024			/* dlim_speed */
324 };
325 
326 dma_method_t hxge_force_dma = DVMA;
327 
328 /*
329  * dma chunk sizes.
330  *
331  * Try to allocate the largest possible size
332  * so that fewer number of dma chunks would be managed
333  */
334 size_t alloc_sizes[] = {
335     0x1000, 0x2000, 0x4000, 0x8000,
336     0x10000, 0x20000, 0x40000, 0x80000,
337     0x100000, 0x200000, 0x400000, 0x800000, 0x1000000
338 };
339 
340 /*
341  * Translate "dev_t" to a pointer to the associated "dev_info_t".
342  */
343 static int
344 hxge_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
345 {
346 	p_hxge_t	hxgep = NULL;
347 	int		instance;
348 	int		status = DDI_SUCCESS;
349 
350 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_attach"));
351 
352 	/*
353 	 * Get the device instance since we'll need to setup or retrieve a soft
354 	 * state for this instance.
355 	 */
356 	instance = ddi_get_instance(dip);
357 
358 	switch (cmd) {
359 	case DDI_ATTACH:
360 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_ATTACH"));
361 		break;
362 
363 	case DDI_RESUME:
364 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_RESUME"));
365 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
366 		if (hxgep == NULL) {
367 			status = DDI_FAILURE;
368 			break;
369 		}
370 		if (hxgep->dip != dip) {
371 			status = DDI_FAILURE;
372 			break;
373 		}
374 		if (hxgep->suspended == DDI_PM_SUSPEND) {
375 			status = ddi_dev_is_needed(hxgep->dip, 0, 1);
376 		} else {
377 			(void) hxge_resume(hxgep);
378 		}
379 		goto hxge_attach_exit;
380 
381 	case DDI_PM_RESUME:
382 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_RESUME"));
383 		hxgep = (p_hxge_t)ddi_get_soft_state(hxge_list, instance);
384 		if (hxgep == NULL) {
385 			status = DDI_FAILURE;
386 			break;
387 		}
388 		if (hxgep->dip != dip) {
389 			status = DDI_FAILURE;
390 			break;
391 		}
392 		(void) hxge_resume(hxgep);
393 		goto hxge_attach_exit;
394 
395 	default:
396 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing unknown"));
397 		status = DDI_FAILURE;
398 		goto hxge_attach_exit;
399 	}
400 
401 	if (ddi_soft_state_zalloc(hxge_list, instance) == DDI_FAILURE) {
402 		status = DDI_FAILURE;
403 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
404 		    "ddi_soft_state_zalloc failed"));
405 		goto hxge_attach_exit;
406 	}
407 
408 	hxgep = ddi_get_soft_state(hxge_list, instance);
409 	if (hxgep == NULL) {
410 		status = HXGE_ERROR;
411 		HXGE_ERROR_MSG((hxgep, DDI_CTL,
412 		    "ddi_get_soft_state failed"));
413 		goto hxge_attach_fail2;
414 	}
415 
416 	hxgep->drv_state = 0;
417 	hxgep->dip = dip;
418 	hxgep->instance = instance;
419 	hxgep->p_dip = ddi_get_parent(dip);
420 	hxgep->hxge_debug_level = hxge_debug_level;
421 	hpi_debug_level = hxge_debug_level;
422 
423 	hxge_fm_init(hxgep, &hxge_dev_reg_acc_attr, &hxge_dev_desc_dma_acc_attr,
424 	    &hxge_rx_dma_attr);
425 
426 	status = hxge_map_regs(hxgep);
427 	if (status != HXGE_OK) {
428 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "hxge_map_regs failed"));
429 		goto hxge_attach_fail3;
430 	}
431 
432 	status = hxge_init_common_dev(hxgep);
433 	if (status != HXGE_OK) {
434 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
435 		    "hxge_init_common_dev failed"));
436 		goto hxge_attach_fail4;
437 	}
438 
439 	/*
440 	 * Setup the Ndd parameters for this instance.
441 	 */
442 	hxge_init_param(hxgep);
443 
444 	/*
445 	 * Setup Register Tracing Buffer.
446 	 */
447 	hpi_rtrace_buf_init((rtrace_t *)&hpi_rtracebuf);
448 
449 	/* init stats ptr */
450 	hxge_init_statsp(hxgep);
451 
452 	status = hxge_setup_mutexes(hxgep);
453 	if (status != HXGE_OK) {
454 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set mutex failed"));
455 		goto hxge_attach_fail;
456 	}
457 
458 	status = hxge_get_config_properties(hxgep);
459 	if (status != HXGE_OK) {
460 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "get_hw create failed"));
461 		goto hxge_attach_fail;
462 	}
463 
464 	/*
465 	 * Setup the Kstats for the driver.
466 	 */
467 	hxge_setup_kstats(hxgep);
468 	hxge_setup_param(hxgep);
469 
470 	status = hxge_setup_system_dma_pages(hxgep);
471 	if (status != HXGE_OK) {
472 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "set dma page failed"));
473 		goto hxge_attach_fail;
474 	}
475 
476 	hxge_hw_id_init(hxgep);
477 	hxge_hw_init_niu_common(hxgep);
478 
479 	status = hxge_setup_dev(hxgep);
480 	if (status != DDI_SUCCESS) {
481 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "set dev failed"));
482 		goto hxge_attach_fail;
483 	}
484 
485 	status = hxge_add_intrs(hxgep);
486 	if (status != DDI_SUCCESS) {
487 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "add_intr failed"));
488 		goto hxge_attach_fail;
489 	}
490 
491 	status = hxge_add_soft_intrs(hxgep);
492 	if (status != DDI_SUCCESS) {
493 		HXGE_DEBUG_MSG((hxgep, HXGE_ERR_CTL, "add_soft_intr failed"));
494 		goto hxge_attach_fail;
495 	}
496 
497 	/*
498 	 * Enable interrupts.
499 	 */
500 	hxge_intrs_enable(hxgep);
501 
502 	if ((status = hxge_mac_register(hxgep)) != HXGE_OK) {
503 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
504 		    "unable to register to mac layer (%d)", status));
505 		goto hxge_attach_fail;
506 	}
507 	mac_link_update(hxgep->mach, LINK_STATE_UNKNOWN);
508 	hxgep->timeout.link_status = 0;
509 	hxgep->timeout.ticks = drv_usectohz(2 * 1000000);
510 
511 	/* Start the link status timer to check the link status */
512 	MUTEX_ENTER(&hxgep->timeout.lock);
513 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
514 	    hxgep->timeout.ticks);
515 	MUTEX_EXIT(&hxgep->timeout.lock);
516 
517 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "registered to mac (instance %d)",
518 	    instance));
519 
520 	goto hxge_attach_exit;
521 
522 hxge_attach_fail:
523 	hxge_unattach(hxgep);
524 	goto hxge_attach_fail1;
525 
526 hxge_attach_fail5:
527 	/*
528 	 * Tear down the ndd parameters setup.
529 	 */
530 	hxge_destroy_param(hxgep);
531 
532 	/*
533 	 * Tear down the kstat setup.
534 	 */
535 	hxge_destroy_kstats(hxgep);
536 
537 hxge_attach_fail4:
538 	if (hxgep->hxge_hw_p) {
539 		hxge_uninit_common_dev(hxgep);
540 		hxgep->hxge_hw_p = NULL;
541 	}
542 hxge_attach_fail3:
543 	/*
544 	 * Unmap the register setup.
545 	 */
546 	hxge_unmap_regs(hxgep);
547 
548 	hxge_fm_fini(hxgep);
549 
550 hxge_attach_fail2:
551 	ddi_soft_state_free(hxge_list, hxgep->instance);
552 
553 hxge_attach_fail1:
554 	if (status != HXGE_OK)
555 		status = (HXGE_ERROR | HXGE_DDI_FAILED);
556 	hxgep = NULL;
557 
558 hxge_attach_exit:
559 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_attach status = 0x%08x",
560 	    status));
561 
562 	return (status);
563 }
564 
565 static int
566 hxge_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
567 {
568 	int		status = DDI_SUCCESS;
569 	int		instance;
570 	p_hxge_t	hxgep = NULL;
571 
572 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_detach"));
573 	instance = ddi_get_instance(dip);
574 	hxgep = ddi_get_soft_state(hxge_list, instance);
575 	if (hxgep == NULL) {
576 		status = DDI_FAILURE;
577 		goto hxge_detach_exit;
578 	}
579 
580 	switch (cmd) {
581 	case DDI_DETACH:
582 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_DETACH"));
583 		break;
584 
585 	case DDI_PM_SUSPEND:
586 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_PM_SUSPEND"));
587 		hxgep->suspended = DDI_PM_SUSPEND;
588 		hxge_suspend(hxgep);
589 		break;
590 
591 	case DDI_SUSPEND:
592 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "doing DDI_SUSPEND"));
593 		if (hxgep->suspended != DDI_PM_SUSPEND) {
594 			hxgep->suspended = DDI_SUSPEND;
595 			hxge_suspend(hxgep);
596 		}
597 		break;
598 
599 	default:
600 		status = DDI_FAILURE;
601 		break;
602 	}
603 
604 	if (cmd != DDI_DETACH)
605 		goto hxge_detach_exit;
606 
607 	/*
608 	 * Stop the xcvr polling.
609 	 */
610 	hxgep->suspended = cmd;
611 
612 	/* Stop the link status timer before unregistering */
613 	MUTEX_ENTER(&hxgep->timeout.lock);
614 	if (hxgep->timeout.id)
615 		(void) untimeout(hxgep->timeout.id);
616 	MUTEX_EXIT(&hxgep->timeout.lock);
617 
618 	if (hxgep->mach && (status = mac_unregister(hxgep->mach)) != 0) {
619 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
620 		    "<== hxge_detach status = 0x%08X", status));
621 		return (DDI_FAILURE);
622 	}
623 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
624 	    "<== hxge_detach (mac_unregister) status = 0x%08X", status));
625 
626 	hxge_unattach(hxgep);
627 	hxgep = NULL;
628 
629 hxge_detach_exit:
630 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_detach status = 0x%08X",
631 	    status));
632 
633 	return (status);
634 }
635 
636 static void
637 hxge_unattach(p_hxge_t hxgep)
638 {
639 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unattach"));
640 
641 	if (hxgep == NULL || hxgep->dev_regs == NULL) {
642 		return;
643 	}
644 
645 	if (hxgep->hxge_hw_p) {
646 		hxge_uninit_common_dev(hxgep);
647 		hxgep->hxge_hw_p = NULL;
648 	}
649 
650 	if (hxgep->hxge_timerid) {
651 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
652 		hxgep->hxge_timerid = 0;
653 	}
654 
655 	/* Stop any further interrupts. */
656 	hxge_remove_intrs(hxgep);
657 
658 	/* Remove soft interrups */
659 	hxge_remove_soft_intrs(hxgep);
660 
661 	/* Stop the device and free resources. */
662 	hxge_destroy_dev(hxgep);
663 
664 	/* Tear down the ndd parameters setup. */
665 	hxge_destroy_param(hxgep);
666 
667 	/* Tear down the kstat setup. */
668 	hxge_destroy_kstats(hxgep);
669 
670 	/*
671 	 * Remove the list of ndd parameters which were setup during attach.
672 	 */
673 	if (hxgep->dip) {
674 		HXGE_DEBUG_MSG((hxgep, OBP_CTL,
675 		    " hxge_unattach: remove all properties"));
676 		(void) ddi_prop_remove_all(hxgep->dip);
677 	}
678 
679 	/*
680 	 * Reset RDC, TDC, PFC, and VMAC blocks from PEU to clear any
681 	 * previous state before unmapping the registers.
682 	 */
683 	HXGE_REG_WR32(hxgep->hpi_handle, BLOCK_RESET, 0x0000001E);
684 	HXGE_DELAY(1000);
685 
686 	/*
687 	 * Unmap the register setup.
688 	 */
689 	hxge_unmap_regs(hxgep);
690 
691 	hxge_fm_fini(hxgep);
692 
693 	/* Destroy all mutexes.  */
694 	hxge_destroy_mutexes(hxgep);
695 
696 	/*
697 	 * Free the soft state data structures allocated with this instance.
698 	 */
699 	ddi_soft_state_free(hxge_list, hxgep->instance);
700 
701 	HXGE_DEBUG_MSG((NULL, DDI_CTL, "<== hxge_unattach"));
702 }
703 
704 static hxge_status_t
705 hxge_map_regs(p_hxge_t hxgep)
706 {
707 	int		ddi_status = DDI_SUCCESS;
708 	p_dev_regs_t	dev_regs;
709 
710 #ifdef	HXGE_DEBUG
711 	char		*sysname;
712 #endif
713 
714 	off_t		regsize;
715 	hxge_status_t	status = HXGE_OK;
716 	int		nregs;
717 
718 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_map_regs"));
719 
720 	if (ddi_dev_nregs(hxgep->dip, &nregs) != DDI_SUCCESS)
721 		return (HXGE_ERROR);
722 
723 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_regs: nregs: %d", nregs));
724 
725 	hxgep->dev_regs = NULL;
726 	dev_regs = KMEM_ZALLOC(sizeof (dev_regs_t), KM_SLEEP);
727 	dev_regs->hxge_regh = NULL;
728 	dev_regs->hxge_pciregh = NULL;
729 	dev_regs->hxge_msix_regh = NULL;
730 
731 	(void) ddi_dev_regsize(hxgep->dip, 0, &regsize);
732 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
733 	    "hxge_map_regs: pci config size 0x%x", regsize));
734 
735 	ddi_status = ddi_regs_map_setup(hxgep->dip, 0,
736 	    (caddr_t *)&(dev_regs->hxge_pciregp), 0, 0,
737 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_pciregh);
738 	if (ddi_status != DDI_SUCCESS) {
739 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
740 		    "ddi_map_regs, hxge bus config regs failed"));
741 		goto hxge_map_regs_fail0;
742 	}
743 
744 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
745 	    "hxge_map_reg: PCI config addr 0x%0llx handle 0x%0llx",
746 	    dev_regs->hxge_pciregp,
747 	    dev_regs->hxge_pciregh));
748 
749 	(void) ddi_dev_regsize(hxgep->dip, 1, &regsize);
750 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
751 	    "hxge_map_regs: pio size 0x%x", regsize));
752 
753 	/* set up the device mapped register */
754 	ddi_status = ddi_regs_map_setup(hxgep->dip, 1,
755 	    (caddr_t *)&(dev_regs->hxge_regp), 0, 0,
756 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_regh);
757 
758 	if (ddi_status != DDI_SUCCESS) {
759 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
760 		    "ddi_map_regs for Hydra global reg failed"));
761 		goto hxge_map_regs_fail1;
762 	}
763 
764 	/* set up the msi/msi-x mapped register */
765 	(void) ddi_dev_regsize(hxgep->dip, 2, &regsize);
766 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
767 	    "hxge_map_regs: msix size 0x%x", regsize));
768 
769 	ddi_status = ddi_regs_map_setup(hxgep->dip, 2,
770 	    (caddr_t *)&(dev_regs->hxge_msix_regp), 0, 0,
771 	    &hxge_dev_reg_acc_attr, &dev_regs->hxge_msix_regh);
772 
773 	if (ddi_status != DDI_SUCCESS) {
774 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
775 		    "ddi_map_regs for msi reg failed"));
776 		goto hxge_map_regs_fail2;
777 	}
778 
779 	hxgep->dev_regs = dev_regs;
780 
781 	HPI_PCI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_pciregh);
782 	HPI_PCI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_pciregp);
783 	HPI_MSI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_msix_regh);
784 	HPI_MSI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_msix_regp);
785 
786 	HPI_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
787 	HPI_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
788 
789 	HPI_REG_ACC_HANDLE_SET(hxgep, dev_regs->hxge_regh);
790 	HPI_REG_ADD_HANDLE_SET(hxgep, (hpi_reg_ptr_t)dev_regs->hxge_regp);
791 
792 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "hxge_map_reg: hardware addr 0x%0llx "
793 	    " handle 0x%0llx", dev_regs->hxge_regp, dev_regs->hxge_regh));
794 
795 	goto hxge_map_regs_exit;
796 
797 hxge_map_regs_fail3:
798 	if (dev_regs->hxge_msix_regh) {
799 		ddi_regs_map_free(&dev_regs->hxge_msix_regh);
800 	}
801 
802 hxge_map_regs_fail2:
803 	if (dev_regs->hxge_regh) {
804 		ddi_regs_map_free(&dev_regs->hxge_regh);
805 	}
806 
807 hxge_map_regs_fail1:
808 	if (dev_regs->hxge_pciregh) {
809 		ddi_regs_map_free(&dev_regs->hxge_pciregh);
810 	}
811 
812 hxge_map_regs_fail0:
813 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "Freeing register set memory"));
814 	kmem_free(dev_regs, sizeof (dev_regs_t));
815 
816 hxge_map_regs_exit:
817 	if (ddi_status != DDI_SUCCESS)
818 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
819 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_map_regs"));
820 	return (status);
821 }
822 
823 static void
824 hxge_unmap_regs(p_hxge_t hxgep)
825 {
826 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_unmap_regs"));
827 	if (hxgep->dev_regs) {
828 		if (hxgep->dev_regs->hxge_pciregh) {
829 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
830 			    "==> hxge_unmap_regs: bus"));
831 			ddi_regs_map_free(&hxgep->dev_regs->hxge_pciregh);
832 			hxgep->dev_regs->hxge_pciregh = NULL;
833 		}
834 
835 		if (hxgep->dev_regs->hxge_regh) {
836 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
837 			    "==> hxge_unmap_regs: device registers"));
838 			ddi_regs_map_free(&hxgep->dev_regs->hxge_regh);
839 			hxgep->dev_regs->hxge_regh = NULL;
840 		}
841 
842 		if (hxgep->dev_regs->hxge_msix_regh) {
843 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
844 			    "==> hxge_unmap_regs: device interrupts"));
845 			ddi_regs_map_free(&hxgep->dev_regs->hxge_msix_regh);
846 			hxgep->dev_regs->hxge_msix_regh = NULL;
847 		}
848 		kmem_free(hxgep->dev_regs, sizeof (dev_regs_t));
849 		hxgep->dev_regs = NULL;
850 	}
851 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_unmap_regs"));
852 }
853 
854 static hxge_status_t
855 hxge_setup_mutexes(p_hxge_t hxgep)
856 {
857 	int		ddi_status = DDI_SUCCESS;
858 	hxge_status_t	status = HXGE_OK;
859 
860 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_mutexes"));
861 
862 	/*
863 	 * Get the interrupt cookie so the mutexes can be Initialised.
864 	 */
865 	ddi_status = ddi_get_iblock_cookie(hxgep->dip, 0,
866 	    &hxgep->interrupt_cookie);
867 
868 	if (ddi_status != DDI_SUCCESS) {
869 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
870 		    "<== hxge_setup_mutexes: failed 0x%x", ddi_status));
871 		goto hxge_setup_mutexes_exit;
872 	}
873 
874 	/*
875 	 * Initialize mutex's for this device.
876 	 */
877 	MUTEX_INIT(hxgep->genlock, NULL,
878 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
879 	MUTEX_INIT(&hxgep->ouraddr_lock, NULL,
880 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
881 	RW_INIT(&hxgep->filter_lock, NULL,
882 	    RW_DRIVER, (void *) hxgep->interrupt_cookie);
883 	MUTEX_INIT(&hxgep->pio_lock, NULL,
884 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
885 	MUTEX_INIT(&hxgep->timeout.lock, NULL,
886 	    MUTEX_DRIVER, (void *) hxgep->interrupt_cookie);
887 
888 hxge_setup_mutexes_exit:
889 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
890 	    "<== hxge_setup_mutexes status = %x", status));
891 
892 	if (ddi_status != DDI_SUCCESS)
893 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
894 
895 	return (status);
896 }
897 
898 static void
899 hxge_destroy_mutexes(p_hxge_t hxgep)
900 {
901 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_mutexes"));
902 	RW_DESTROY(&hxgep->filter_lock);
903 	MUTEX_DESTROY(&hxgep->ouraddr_lock);
904 	MUTEX_DESTROY(hxgep->genlock);
905 	MUTEX_DESTROY(&hxgep->pio_lock);
906 	MUTEX_DESTROY(&hxgep->timeout.lock);
907 
908 	if (hxge_debug_init == 1) {
909 		MUTEX_DESTROY(&hxgedebuglock);
910 		hxge_debug_init = 0;
911 	}
912 
913 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_mutexes"));
914 }
915 
916 hxge_status_t
917 hxge_init(p_hxge_t hxgep)
918 {
919 	hxge_status_t status = HXGE_OK;
920 
921 	HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_init"));
922 
923 	if (hxgep->drv_state & STATE_HW_INITIALIZED) {
924 		return (status);
925 	}
926 
927 	/*
928 	 * Allocate system memory for the receive/transmit buffer blocks and
929 	 * receive/transmit descriptor rings.
930 	 */
931 	status = hxge_alloc_mem_pool(hxgep);
932 	if (status != HXGE_OK) {
933 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "alloc mem failed\n"));
934 		goto hxge_init_fail1;
935 	}
936 
937 	/*
938 	 * Initialize and enable TXDMA channels.
939 	 */
940 	status = hxge_init_txdma_channels(hxgep);
941 	if (status != HXGE_OK) {
942 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init txdma failed\n"));
943 		goto hxge_init_fail3;
944 	}
945 
946 	/*
947 	 * Initialize and enable RXDMA channels.
948 	 */
949 	status = hxge_init_rxdma_channels(hxgep);
950 	if (status != HXGE_OK) {
951 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init rxdma failed\n"));
952 		goto hxge_init_fail4;
953 	}
954 
955 	/*
956 	 * Initialize TCAM
957 	 */
958 	status = hxge_classify_init(hxgep);
959 	if (status != HXGE_OK) {
960 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init classify failed\n"));
961 		goto hxge_init_fail5;
962 	}
963 
964 	/*
965 	 * Initialize the VMAC block.
966 	 */
967 	status = hxge_vmac_init(hxgep);
968 	if (status != HXGE_OK) {
969 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "init MAC failed\n"));
970 		goto hxge_init_fail5;
971 	}
972 
973 	/* Bringup - this may be unnecessary when PXE and FCODE available */
974 	status = hxge_pfc_set_default_mac_addr(hxgep);
975 	if (status != HXGE_OK) {
976 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
977 		    "Default Address Failure\n"));
978 		goto hxge_init_fail5;
979 	}
980 
981 	hxge_intrs_enable(hxgep);
982 
983 	/*
984 	 * Enable hardware interrupts.
985 	 */
986 	hxge_intr_hw_enable(hxgep);
987 	hxgep->drv_state |= STATE_HW_INITIALIZED;
988 
989 	goto hxge_init_exit;
990 
991 hxge_init_fail5:
992 	hxge_uninit_rxdma_channels(hxgep);
993 hxge_init_fail4:
994 	hxge_uninit_txdma_channels(hxgep);
995 hxge_init_fail3:
996 	hxge_free_mem_pool(hxgep);
997 hxge_init_fail1:
998 	HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
999 	    "<== hxge_init status (failed) = 0x%08x", status));
1000 	return (status);
1001 
1002 hxge_init_exit:
1003 
1004 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_init status = 0x%08x",
1005 	    status));
1006 
1007 	return (status);
1008 }
1009 
1010 timeout_id_t
1011 hxge_start_timer(p_hxge_t hxgep, fptrv_t func, int msec)
1012 {
1013 	if ((hxgep->suspended == 0) || (hxgep->suspended == DDI_RESUME)) {
1014 		return (timeout(func, (caddr_t)hxgep,
1015 		    drv_usectohz(1000 * msec)));
1016 	}
1017 	return (NULL);
1018 }
1019 
1020 /*ARGSUSED*/
1021 void
1022 hxge_stop_timer(p_hxge_t hxgep, timeout_id_t timerid)
1023 {
1024 	if (timerid) {
1025 		(void) untimeout(timerid);
1026 	}
1027 }
1028 
1029 void
1030 hxge_uninit(p_hxge_t hxgep)
1031 {
1032 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_uninit"));
1033 
1034 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
1035 		HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1036 		    "==> hxge_uninit: not initialized"));
1037 		HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1038 		return;
1039 	}
1040 
1041 	/* Stop timer */
1042 	if (hxgep->hxge_timerid) {
1043 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
1044 		hxgep->hxge_timerid = 0;
1045 	}
1046 
1047 	(void) hxge_intr_hw_disable(hxgep);
1048 
1049 	/* Reset the receive VMAC side.  */
1050 	(void) hxge_rx_vmac_disable(hxgep);
1051 
1052 	/* Free classification resources */
1053 	(void) hxge_classify_uninit(hxgep);
1054 
1055 	/* Reset the transmit/receive DMA side.  */
1056 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP);
1057 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP);
1058 
1059 	hxge_uninit_txdma_channels(hxgep);
1060 	hxge_uninit_rxdma_channels(hxgep);
1061 
1062 	/* Reset the transmit VMAC side.  */
1063 	(void) hxge_tx_vmac_disable(hxgep);
1064 
1065 	hxge_free_mem_pool(hxgep);
1066 
1067 	hxgep->drv_state &= ~STATE_HW_INITIALIZED;
1068 
1069 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_uninit"));
1070 }
1071 
1072 void
1073 hxge_get64(p_hxge_t hxgep, p_mblk_t mp)
1074 {
1075 #if defined(__i386)
1076 	size_t		reg;
1077 #else
1078 	uint64_t	reg;
1079 #endif
1080 	uint64_t	regdata;
1081 	int		i, retry;
1082 
1083 	bcopy((char *)mp->b_rptr, (char *)&reg, sizeof (uint64_t));
1084 	regdata = 0;
1085 	retry = 1;
1086 
1087 	for (i = 0; i < retry; i++) {
1088 		HXGE_REG_RD64(hxgep->hpi_handle, reg, &regdata);
1089 	}
1090 	bcopy((char *)&regdata, (char *)mp->b_rptr, sizeof (uint64_t));
1091 }
1092 
1093 void
1094 hxge_put64(p_hxge_t hxgep, p_mblk_t mp)
1095 {
1096 #if defined(__i386)
1097 	size_t		reg;
1098 #else
1099 	uint64_t	reg;
1100 #endif
1101 	uint64_t	buf[2];
1102 
1103 	bcopy((char *)mp->b_rptr, (char *)&buf[0], 2 * sizeof (uint64_t));
1104 #if defined(__i386)
1105 	reg = (size_t)buf[0];
1106 #else
1107 	reg = buf[0];
1108 #endif
1109 
1110 	HXGE_HPI_PIO_WRITE64(hxgep->hpi_handle, reg, buf[1]);
1111 }
1112 
1113 /*ARGSUSED*/
1114 /*VARARGS*/
1115 void
1116 hxge_debug_msg(p_hxge_t hxgep, uint64_t level, char *fmt, ...)
1117 {
1118 	char		msg_buffer[1048];
1119 	char		prefix_buffer[32];
1120 	int		instance;
1121 	uint64_t	debug_level;
1122 	int		cmn_level = CE_CONT;
1123 	va_list		ap;
1124 
1125 	debug_level = (hxgep == NULL) ? hxge_debug_level :
1126 	    hxgep->hxge_debug_level;
1127 
1128 	if ((level & debug_level) || (level == HXGE_NOTE) ||
1129 	    (level == HXGE_ERR_CTL)) {
1130 		/* do the msg processing */
1131 		if (hxge_debug_init == 0) {
1132 			MUTEX_INIT(&hxgedebuglock, NULL, MUTEX_DRIVER, NULL);
1133 			hxge_debug_init = 1;
1134 		}
1135 
1136 		MUTEX_ENTER(&hxgedebuglock);
1137 
1138 		if ((level & HXGE_NOTE)) {
1139 			cmn_level = CE_NOTE;
1140 		}
1141 
1142 		if (level & HXGE_ERR_CTL) {
1143 			cmn_level = CE_WARN;
1144 		}
1145 
1146 		va_start(ap, fmt);
1147 		(void) vsprintf(msg_buffer, fmt, ap);
1148 		va_end(ap);
1149 
1150 		if (hxgep == NULL) {
1151 			instance = -1;
1152 			(void) sprintf(prefix_buffer, "%s :", "hxge");
1153 		} else {
1154 			instance = hxgep->instance;
1155 			(void) sprintf(prefix_buffer,
1156 			    "%s%d :", "hxge", instance);
1157 		}
1158 
1159 		MUTEX_EXIT(&hxgedebuglock);
1160 		cmn_err(cmn_level, "%s %s\n", prefix_buffer, msg_buffer);
1161 	}
1162 }
1163 
1164 char *
1165 hxge_dump_packet(char *addr, int size)
1166 {
1167 	uchar_t		*ap = (uchar_t *)addr;
1168 	int		i;
1169 	static char	etherbuf[1024];
1170 	char		*cp = etherbuf;
1171 	char		digits[] = "0123456789abcdef";
1172 
1173 	if (!size)
1174 		size = 60;
1175 
1176 	if (size > MAX_DUMP_SZ) {
1177 		/* Dump the leading bytes */
1178 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1179 			if (*ap > 0x0f)
1180 				*cp++ = digits[*ap >> 4];
1181 			*cp++ = digits[*ap++ & 0xf];
1182 			*cp++ = ':';
1183 		}
1184 		for (i = 0; i < 20; i++)
1185 			*cp++ = '.';
1186 		/* Dump the last MAX_DUMP_SZ/2 bytes */
1187 		ap = (uchar_t *)(addr + (size - MAX_DUMP_SZ / 2));
1188 		for (i = 0; i < MAX_DUMP_SZ / 2; i++) {
1189 			if (*ap > 0x0f)
1190 				*cp++ = digits[*ap >> 4];
1191 			*cp++ = digits[*ap++ & 0xf];
1192 			*cp++ = ':';
1193 		}
1194 	} else {
1195 		for (i = 0; i < size; i++) {
1196 			if (*ap > 0x0f)
1197 				*cp++ = digits[*ap >> 4];
1198 			*cp++ = digits[*ap++ & 0xf];
1199 			*cp++ = ':';
1200 		}
1201 	}
1202 	*--cp = 0;
1203 	return (etherbuf);
1204 }
1205 
1206 static void
1207 hxge_suspend(p_hxge_t hxgep)
1208 {
1209 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_suspend"));
1210 
1211 	hxge_intrs_disable(hxgep);
1212 	hxge_destroy_dev(hxgep);
1213 
1214 	/* Stop the link status timer */
1215 	MUTEX_ENTER(&hxgep->timeout.lock);
1216 	if (hxgep->timeout.id)
1217 		(void) untimeout(hxgep->timeout.id);
1218 	MUTEX_EXIT(&hxgep->timeout.lock);
1219 
1220 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_suspend"));
1221 }
1222 
1223 static hxge_status_t
1224 hxge_resume(p_hxge_t hxgep)
1225 {
1226 	hxge_status_t status = HXGE_OK;
1227 
1228 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_resume"));
1229 	hxgep->suspended = DDI_RESUME;
1230 
1231 	(void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_START);
1232 	(void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_START);
1233 
1234 	(void) hxge_rx_vmac_enable(hxgep);
1235 	(void) hxge_tx_vmac_enable(hxgep);
1236 
1237 	hxge_intrs_enable(hxgep);
1238 
1239 	hxgep->suspended = 0;
1240 
1241 	/* Resume the link status timer */
1242 	MUTEX_ENTER(&hxgep->timeout.lock);
1243 	hxgep->timeout.id = timeout(hxge_link_poll, (void *)hxgep,
1244 	    hxgep->timeout.ticks);
1245 	MUTEX_EXIT(&hxgep->timeout.lock);
1246 
1247 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1248 	    "<== hxge_resume status = 0x%x", status));
1249 
1250 	return (status);
1251 }
1252 
1253 hxge_status_t
1254 hxge_setup_dev(p_hxge_t hxgep)
1255 {
1256 	hxge_status_t status = HXGE_OK;
1257 
1258 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_dev"));
1259 
1260 	status = hxge_link_init(hxgep);
1261 	if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) {
1262 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1263 		    "Bad register acc handle"));
1264 		status = HXGE_ERROR;
1265 	}
1266 
1267 	if (status != HXGE_OK) {
1268 		HXGE_DEBUG_MSG((hxgep, MAC_CTL,
1269 		    " hxge_setup_dev status (link init 0x%08x)", status));
1270 		goto hxge_setup_dev_exit;
1271 	}
1272 
1273 hxge_setup_dev_exit:
1274 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1275 	    "<== hxge_setup_dev status = 0x%08x", status));
1276 
1277 	return (status);
1278 }
1279 
1280 static void
1281 hxge_destroy_dev(p_hxge_t hxgep)
1282 {
1283 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_destroy_dev"));
1284 
1285 	(void) hxge_hw_stop(hxgep);
1286 
1287 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_destroy_dev"));
1288 }
1289 
1290 static hxge_status_t
1291 hxge_setup_system_dma_pages(p_hxge_t hxgep)
1292 {
1293 	int			ddi_status = DDI_SUCCESS;
1294 	uint_t			count;
1295 	ddi_dma_cookie_t	cookie;
1296 	uint_t			iommu_pagesize;
1297 	hxge_status_t		status = HXGE_OK;
1298 
1299 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_setup_system_dma_pages"));
1300 
1301 	hxgep->sys_page_sz = ddi_ptob(hxgep->dip, (ulong_t)1);
1302 	iommu_pagesize = dvma_pagesize(hxgep->dip);
1303 
1304 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1305 	    " hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1306 	    " default_block_size %d iommu_pagesize %d",
1307 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1308 	    hxgep->rx_default_block_size, iommu_pagesize));
1309 
1310 	if (iommu_pagesize != 0) {
1311 		if (hxgep->sys_page_sz == iommu_pagesize) {
1312 			/* Hydra support up to 8K pages */
1313 			if (iommu_pagesize > 0x2000)
1314 				hxgep->sys_page_sz = 0x2000;
1315 		} else {
1316 			if (hxgep->sys_page_sz > iommu_pagesize)
1317 				hxgep->sys_page_sz = iommu_pagesize;
1318 		}
1319 	}
1320 
1321 	hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1322 
1323 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1324 	    "==> hxge_setup_system_dma_pages: page %d (ddi_ptob %d) "
1325 	    "default_block_size %d page mask %d",
1326 	    hxgep->sys_page_sz, ddi_ptob(hxgep->dip, (ulong_t)1),
1327 	    hxgep->rx_default_block_size, hxgep->sys_page_mask));
1328 
1329 	switch (hxgep->sys_page_sz) {
1330 	default:
1331 		hxgep->sys_page_sz = 0x1000;
1332 		hxgep->sys_page_mask = ~(hxgep->sys_page_sz - 1);
1333 		hxgep->rx_default_block_size = 0x1000;
1334 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1335 		break;
1336 	case 0x1000:
1337 		hxgep->rx_default_block_size = 0x1000;
1338 		hxgep->rx_bksize_code = RBR_BKSIZE_4K;
1339 		break;
1340 	case 0x2000:
1341 		hxgep->rx_default_block_size = 0x2000;
1342 		hxgep->rx_bksize_code = RBR_BKSIZE_8K;
1343 		break;
1344 	}
1345 
1346 	hxge_rx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1347 	hxge_tx_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1348 	hxge_desc_dma_attr.dma_attr_align = hxgep->sys_page_sz;
1349 
1350 	/*
1351 	 * Get the system DMA burst size.
1352 	 */
1353 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, &hxge_tx_dma_attr,
1354 	    DDI_DMA_DONTWAIT, 0, &hxgep->dmasparehandle);
1355 	if (ddi_status != DDI_SUCCESS) {
1356 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1357 		    "ddi_dma_alloc_handle: failed status 0x%x", ddi_status));
1358 		goto hxge_get_soft_properties_exit;
1359 	}
1360 
1361 	ddi_status = ddi_dma_addr_bind_handle(hxgep->dmasparehandle, NULL,
1362 	    (caddr_t)hxgep->dmasparehandle, sizeof (hxgep->dmasparehandle),
1363 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1364 	    &cookie, &count);
1365 	if (ddi_status != DDI_DMA_MAPPED) {
1366 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1367 		    "Binding spare handle to find system burstsize failed."));
1368 		ddi_status = DDI_FAILURE;
1369 		goto hxge_get_soft_properties_fail1;
1370 	}
1371 
1372 	hxgep->sys_burst_sz = ddi_dma_burstsizes(hxgep->dmasparehandle);
1373 	(void) ddi_dma_unbind_handle(hxgep->dmasparehandle);
1374 
1375 hxge_get_soft_properties_fail1:
1376 	ddi_dma_free_handle(&hxgep->dmasparehandle);
1377 
1378 hxge_get_soft_properties_exit:
1379 
1380 	if (ddi_status != DDI_SUCCESS)
1381 		status |= (HXGE_ERROR | HXGE_DDI_FAILED);
1382 
1383 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
1384 	    "<== hxge_setup_system_dma_pages status = 0x%08x", status));
1385 
1386 	return (status);
1387 }
1388 
1389 hxge_status_t
1390 hxge_alloc_mem_pool(p_hxge_t hxgep)
1391 {
1392 	hxge_status_t status = HXGE_OK;
1393 
1394 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_alloc_mem_pool"));
1395 
1396 	status = hxge_alloc_rx_mem_pool(hxgep);
1397 	if (status != HXGE_OK) {
1398 		return (HXGE_ERROR);
1399 	}
1400 
1401 	status = hxge_alloc_tx_mem_pool(hxgep);
1402 	if (status != HXGE_OK) {
1403 		hxge_free_rx_mem_pool(hxgep);
1404 		return (HXGE_ERROR);
1405 	}
1406 
1407 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_alloc_mem_pool"));
1408 	return (HXGE_OK);
1409 }
1410 
1411 static void
1412 hxge_free_mem_pool(p_hxge_t hxgep)
1413 {
1414 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_mem_pool"));
1415 
1416 	hxge_free_rx_mem_pool(hxgep);
1417 	hxge_free_tx_mem_pool(hxgep);
1418 
1419 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_mem_pool"));
1420 }
1421 
1422 static hxge_status_t
1423 hxge_alloc_rx_mem_pool(p_hxge_t hxgep)
1424 {
1425 	int			i, j;
1426 	uint32_t		ndmas, st_rdc;
1427 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1428 	p_hxge_hw_pt_cfg_t	p_cfgp;
1429 	p_hxge_dma_pool_t	dma_poolp;
1430 	p_hxge_dma_common_t	*dma_buf_p;
1431 	p_hxge_dma_pool_t	dma_cntl_poolp;
1432 	p_hxge_dma_common_t	*dma_cntl_p;
1433 	size_t			rx_buf_alloc_size;
1434 	size_t			rx_cntl_alloc_size;
1435 	uint32_t		*num_chunks;	/* per dma */
1436 	hxge_status_t		status = HXGE_OK;
1437 
1438 	uint32_t		hxge_port_rbr_size;
1439 	uint32_t		hxge_port_rbr_spare_size;
1440 	uint32_t		hxge_port_rcr_size;
1441 
1442 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_mem_pool"));
1443 
1444 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1445 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1446 	st_rdc = p_cfgp->start_rdc;
1447 	ndmas = p_cfgp->max_rdcs;
1448 
1449 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1450 	    " hxge_alloc_rx_mem_pool st_rdc %d ndmas %d", st_rdc, ndmas));
1451 
1452 	/*
1453 	 * Allocate memory for each receive DMA channel.
1454 	 */
1455 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1456 	    KM_SLEEP);
1457 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1458 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1459 
1460 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1461 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1462 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1463 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1464 
1465 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1466 	    KM_SLEEP);
1467 
1468 	/*
1469 	 * Assume that each DMA channel will be configured with default block
1470 	 * size. rbr block counts are mod of batch count (16).
1471 	 */
1472 	hxge_port_rbr_size = p_all_cfgp->rbr_size;
1473 	hxge_port_rcr_size = p_all_cfgp->rcr_size;
1474 
1475 	if (!hxge_port_rbr_size) {
1476 		hxge_port_rbr_size = HXGE_RBR_RBB_DEFAULT;
1477 	}
1478 
1479 	if (hxge_port_rbr_size % HXGE_RXDMA_POST_BATCH) {
1480 		hxge_port_rbr_size = (HXGE_RXDMA_POST_BATCH *
1481 		    (hxge_port_rbr_size / HXGE_RXDMA_POST_BATCH + 1));
1482 	}
1483 
1484 	p_all_cfgp->rbr_size = hxge_port_rbr_size;
1485 	hxge_port_rbr_spare_size = hxge_rbr_spare_size;
1486 
1487 	if (hxge_port_rbr_spare_size % HXGE_RXDMA_POST_BATCH) {
1488 		hxge_port_rbr_spare_size = (HXGE_RXDMA_POST_BATCH *
1489 		    (hxge_port_rbr_spare_size / HXGE_RXDMA_POST_BATCH + 1));
1490 	}
1491 
1492 	rx_buf_alloc_size = (hxgep->rx_default_block_size *
1493 	    (hxge_port_rbr_size + hxge_port_rbr_spare_size));
1494 
1495 	/*
1496 	 * Addresses of receive block ring, receive completion ring and the
1497 	 * mailbox must be all cache-aligned (64 bytes).
1498 	 */
1499 	rx_cntl_alloc_size = hxge_port_rbr_size + hxge_port_rbr_spare_size;
1500 	rx_cntl_alloc_size *= (sizeof (rx_desc_t));
1501 	rx_cntl_alloc_size += (sizeof (rcr_entry_t) * hxge_port_rcr_size);
1502 	rx_cntl_alloc_size += sizeof (rxdma_mailbox_t);
1503 
1504 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_alloc_rx_mem_pool: "
1505 	    "hxge_port_rbr_size = %d hxge_port_rbr_spare_size = %d "
1506 	    "hxge_port_rcr_size = %d rx_cntl_alloc_size = %d",
1507 	    hxge_port_rbr_size, hxge_port_rbr_spare_size,
1508 	    hxge_port_rcr_size, rx_cntl_alloc_size));
1509 
1510 	hxgep->hxge_port_rbr_size = hxge_port_rbr_size;
1511 	hxgep->hxge_port_rcr_size = hxge_port_rcr_size;
1512 
1513 	/*
1514 	 * Allocate memory for receive buffers and descriptor rings. Replace
1515 	 * allocation functions with interface functions provided by the
1516 	 * partition manager when it is available.
1517 	 */
1518 	/*
1519 	 * Allocate memory for the receive buffer blocks.
1520 	 */
1521 	for (i = 0; i < ndmas; i++) {
1522 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1523 		    " hxge_alloc_rx_mem_pool to alloc mem: "
1524 		    " dma %d dma_buf_p %llx &dma_buf_p %llx",
1525 		    i, dma_buf_p[i], &dma_buf_p[i]));
1526 
1527 		num_chunks[i] = 0;
1528 
1529 		status = hxge_alloc_rx_buf_dma(hxgep, st_rdc, &dma_buf_p[i],
1530 		    rx_buf_alloc_size, hxgep->rx_default_block_size,
1531 		    &num_chunks[i]);
1532 		if (status != HXGE_OK) {
1533 			break;
1534 		}
1535 
1536 		st_rdc++;
1537 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1538 		    " hxge_alloc_rx_mem_pool DONE  alloc mem: "
1539 		    "dma %d dma_buf_p %llx &dma_buf_p %llx", i,
1540 		    dma_buf_p[i], &dma_buf_p[i]));
1541 	}
1542 
1543 	if (i < ndmas) {
1544 		goto hxge_alloc_rx_mem_fail1;
1545 	}
1546 
1547 	/*
1548 	 * Allocate memory for descriptor rings and mailbox.
1549 	 */
1550 	st_rdc = p_cfgp->start_rdc;
1551 	for (j = 0; j < ndmas; j++) {
1552 		status = hxge_alloc_rx_cntl_dma(hxgep, st_rdc, &dma_cntl_p[j],
1553 		    rx_cntl_alloc_size);
1554 		if (status != HXGE_OK) {
1555 			break;
1556 		}
1557 		st_rdc++;
1558 	}
1559 
1560 	if (j < ndmas) {
1561 		goto hxge_alloc_rx_mem_fail2;
1562 	}
1563 
1564 	dma_poolp->ndmas = ndmas;
1565 	dma_poolp->num_chunks = num_chunks;
1566 	dma_poolp->buf_allocated = B_TRUE;
1567 	hxgep->rx_buf_pool_p = dma_poolp;
1568 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1569 
1570 	dma_cntl_poolp->ndmas = ndmas;
1571 	dma_cntl_poolp->buf_allocated = B_TRUE;
1572 	hxgep->rx_cntl_pool_p = dma_cntl_poolp;
1573 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1574 
1575 	goto hxge_alloc_rx_mem_pool_exit;
1576 
1577 hxge_alloc_rx_mem_fail2:
1578 	/* Free control buffers */
1579 	j--;
1580 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1581 	    "==> hxge_alloc_rx_mem_pool: freeing control bufs (%d)", j));
1582 	for (; j >= 0; j--) {
1583 		hxge_free_rx_cntl_dma(hxgep,
1584 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1585 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1586 		    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1587 	}
1588 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1589 	    "==> hxge_alloc_rx_mem_pool: control bufs freed (%d)", j));
1590 
1591 hxge_alloc_rx_mem_fail1:
1592 	/* Free data buffers */
1593 	i--;
1594 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1595 	    "==> hxge_alloc_rx_mem_pool: freeing data bufs (%d)", i));
1596 	for (; i >= 0; i--) {
1597 		hxge_free_rx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1598 		    num_chunks[i]);
1599 	}
1600 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1601 	    "==> hxge_alloc_rx_mem_pool: data bufs freed (%d)", i));
1602 
1603 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1604 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1605 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1606 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1607 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1608 
1609 hxge_alloc_rx_mem_pool_exit:
1610 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1611 	    "<== hxge_alloc_rx_mem_pool:status 0x%08x", status));
1612 
1613 	return (status);
1614 }
1615 
1616 static void
1617 hxge_free_rx_mem_pool(p_hxge_t hxgep)
1618 {
1619 	uint32_t		i, ndmas;
1620 	p_hxge_dma_pool_t	dma_poolp;
1621 	p_hxge_dma_common_t	*dma_buf_p;
1622 	p_hxge_dma_pool_t	dma_cntl_poolp;
1623 	p_hxge_dma_common_t	*dma_cntl_p;
1624 	uint32_t		*num_chunks;
1625 
1626 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "==> hxge_free_rx_mem_pool"));
1627 
1628 	dma_poolp = hxgep->rx_buf_pool_p;
1629 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
1630 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool "
1631 		    "(null rx buf pool or buf not allocated"));
1632 		return;
1633 	}
1634 
1635 	dma_cntl_poolp = hxgep->rx_cntl_pool_p;
1636 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
1637 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1638 		    "<== hxge_free_rx_mem_pool "
1639 		    "(null rx cntl buf pool or cntl buf not allocated"));
1640 		return;
1641 	}
1642 
1643 	dma_buf_p = dma_poolp->dma_buf_pool_p;
1644 	num_chunks = dma_poolp->num_chunks;
1645 
1646 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
1647 	ndmas = dma_cntl_poolp->ndmas;
1648 
1649 	for (i = 0; i < ndmas; i++) {
1650 		hxge_free_rx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
1651 	}
1652 
1653 	for (i = 0; i < ndmas; i++) {
1654 		hxge_free_rx_cntl_dma(hxgep, dma_cntl_p[i]);
1655 	}
1656 
1657 	for (i = 0; i < ndmas; i++) {
1658 		KMEM_FREE(dma_buf_p[i],
1659 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1660 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
1661 	}
1662 
1663 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1664 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1665 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1666 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1667 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1668 
1669 	hxgep->rx_buf_pool_p = NULL;
1670 	hxgep->rx_cntl_pool_p = NULL;
1671 
1672 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_mem_pool"));
1673 }
1674 
1675 static hxge_status_t
1676 hxge_alloc_rx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1677     p_hxge_dma_common_t *dmap,
1678     size_t alloc_size, size_t block_size, uint32_t *num_chunks)
1679 {
1680 	p_hxge_dma_common_t	rx_dmap;
1681 	hxge_status_t		status = HXGE_OK;
1682 	size_t			total_alloc_size;
1683 	size_t			allocated = 0;
1684 	int			i, size_index, array_size;
1685 
1686 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_buf_dma"));
1687 
1688 	rx_dmap = (p_hxge_dma_common_t)
1689 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1690 
1691 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1692 	    " alloc_rx_buf_dma rdc %d asize %x bsize %x bbuf %llx ",
1693 	    dma_channel, alloc_size, block_size, dmap));
1694 
1695 	total_alloc_size = alloc_size;
1696 
1697 	i = 0;
1698 	size_index = 0;
1699 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
1700 	while ((alloc_sizes[size_index] < alloc_size) &&
1701 	    (size_index < array_size))
1702 		size_index++;
1703 	if (size_index >= array_size) {
1704 		size_index = array_size - 1;
1705 	}
1706 
1707 	while ((allocated < total_alloc_size) &&
1708 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
1709 		rx_dmap[i].dma_chunk_index = i;
1710 		rx_dmap[i].block_size = block_size;
1711 		rx_dmap[i].alength = alloc_sizes[size_index];
1712 		rx_dmap[i].orig_alength = rx_dmap[i].alength;
1713 		rx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
1714 		rx_dmap[i].dma_channel = dma_channel;
1715 		rx_dmap[i].contig_alloc_type = B_FALSE;
1716 
1717 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1718 		    "alloc_rx_buf_dma rdc %d chunk %d bufp %llx size %x "
1719 		    "i %d nblocks %d alength %d",
1720 		    dma_channel, i, &rx_dmap[i], block_size,
1721 		    i, rx_dmap[i].nblocks, rx_dmap[i].alength));
1722 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1723 		    &hxge_rx_dma_attr, rx_dmap[i].alength,
1724 		    &hxge_dev_buf_dma_acc_attr,
1725 		    DDI_DMA_READ | DDI_DMA_STREAMING,
1726 		    (p_hxge_dma_common_t)(&rx_dmap[i]));
1727 		if (status != HXGE_OK) {
1728 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1729 			    " hxge_alloc_rx_buf_dma: Alloc Failed: "
1730 			    " for size: %d", alloc_sizes[size_index]));
1731 			size_index--;
1732 		} else {
1733 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1734 			    " alloc_rx_buf_dma allocated rdc %d "
1735 			    "chunk %d size %x dvma %x bufp %llx ",
1736 			    dma_channel, i, rx_dmap[i].alength,
1737 			    rx_dmap[i].ioaddr_pp, &rx_dmap[i]));
1738 			i++;
1739 			allocated += alloc_sizes[size_index];
1740 		}
1741 	}
1742 
1743 	if (allocated < total_alloc_size) {
1744 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1745 		    " hxge_alloc_rx_buf_dma failed due to"
1746 		    " allocated(%d) < required(%d)",
1747 		    allocated, total_alloc_size));
1748 		goto hxge_alloc_rx_mem_fail1;
1749 	}
1750 
1751 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1752 	    " alloc_rx_buf_dma rdc %d allocated %d chunks", dma_channel, i));
1753 
1754 	*num_chunks = i;
1755 	*dmap = rx_dmap;
1756 
1757 	goto hxge_alloc_rx_mem_exit;
1758 
1759 hxge_alloc_rx_mem_fail1:
1760 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
1761 
1762 hxge_alloc_rx_mem_exit:
1763 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1764 	    "<== hxge_alloc_rx_buf_dma status 0x%08x", status));
1765 
1766 	return (status);
1767 }
1768 
1769 /*ARGSUSED*/
1770 static void
1771 hxge_free_rx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
1772     uint32_t num_chunks)
1773 {
1774 	int i;
1775 
1776 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1777 	    "==> hxge_free_rx_buf_dma: # of chunks %d", num_chunks));
1778 
1779 	for (i = 0; i < num_chunks; i++) {
1780 		HXGE_DEBUG_MSG((hxgep, MEM2_CTL,
1781 		    "==> hxge_free_rx_buf_dma: chunk %d dmap 0x%llx", i, dmap));
1782 		hxge_dma_mem_free(dmap++);
1783 	}
1784 
1785 	HXGE_DEBUG_MSG((hxgep, MEM2_CTL, "<== hxge_free_rx_buf_dma"));
1786 }
1787 
1788 /*ARGSUSED*/
1789 static hxge_status_t
1790 hxge_alloc_rx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
1791     p_hxge_dma_common_t *dmap, size_t size)
1792 {
1793 	p_hxge_dma_common_t	rx_dmap;
1794 	hxge_status_t		status = HXGE_OK;
1795 
1796 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_rx_cntl_dma"));
1797 
1798 	rx_dmap = (p_hxge_dma_common_t)
1799 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t), KM_SLEEP);
1800 
1801 	rx_dmap->contig_alloc_type = B_FALSE;
1802 
1803 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
1804 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
1805 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, rx_dmap);
1806 	if (status != HXGE_OK) {
1807 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
1808 		    " hxge_alloc_rx_cntl_dma: Alloc Failed: "
1809 		    " for size: %d", size));
1810 		goto hxge_alloc_rx_cntl_dma_fail1;
1811 	}
1812 
1813 	*dmap = rx_dmap;
1814 
1815 	goto hxge_alloc_rx_cntl_dma_exit;
1816 
1817 hxge_alloc_rx_cntl_dma_fail1:
1818 	KMEM_FREE(rx_dmap, sizeof (hxge_dma_common_t));
1819 
1820 hxge_alloc_rx_cntl_dma_exit:
1821 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
1822 	    "<== hxge_alloc_rx_cntl_dma status 0x%08x", status));
1823 
1824 	return (status);
1825 }
1826 
1827 /*ARGSUSED*/
1828 static void
1829 hxge_free_rx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
1830 {
1831 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_rx_cntl_dma"));
1832 
1833 	hxge_dma_mem_free(dmap);
1834 
1835 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_rx_cntl_dma"));
1836 }
1837 
1838 static hxge_status_t
1839 hxge_alloc_tx_mem_pool(p_hxge_t hxgep)
1840 {
1841 	hxge_status_t		status = HXGE_OK;
1842 	int			i, j;
1843 	uint32_t		ndmas, st_tdc;
1844 	p_hxge_dma_pt_cfg_t	p_all_cfgp;
1845 	p_hxge_hw_pt_cfg_t	p_cfgp;
1846 	p_hxge_dma_pool_t	dma_poolp;
1847 	p_hxge_dma_common_t	*dma_buf_p;
1848 	p_hxge_dma_pool_t	dma_cntl_poolp;
1849 	p_hxge_dma_common_t	*dma_cntl_p;
1850 	size_t			tx_buf_alloc_size;
1851 	size_t			tx_cntl_alloc_size;
1852 	uint32_t		*num_chunks;	/* per dma */
1853 
1854 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool"));
1855 
1856 	p_all_cfgp = (p_hxge_dma_pt_cfg_t)&hxgep->pt_config;
1857 	p_cfgp = (p_hxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
1858 	st_tdc = p_cfgp->start_tdc;
1859 	ndmas = p_cfgp->max_tdcs;
1860 
1861 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_alloc_tx_mem_pool: "
1862 	    "p_cfgp 0x%016llx start_tdc %d ndmas %d hxgep->max_tdcs %d",
1863 	    p_cfgp, p_cfgp->start_tdc, p_cfgp->max_tdcs, hxgep->max_tdcs));
1864 	/*
1865 	 * Allocate memory for each transmit DMA channel.
1866 	 */
1867 	dma_poolp = (p_hxge_dma_pool_t)KMEM_ZALLOC(sizeof (hxge_dma_pool_t),
1868 	    KM_SLEEP);
1869 	dma_buf_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1870 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1871 
1872 	dma_cntl_poolp = (p_hxge_dma_pool_t)
1873 	    KMEM_ZALLOC(sizeof (hxge_dma_pool_t), KM_SLEEP);
1874 	dma_cntl_p = (p_hxge_dma_common_t *)KMEM_ZALLOC(
1875 	    sizeof (p_hxge_dma_common_t) * ndmas, KM_SLEEP);
1876 
1877 	hxgep->hxge_port_tx_ring_size = hxge_tx_ring_size;
1878 
1879 	/*
1880 	 * Assume that each DMA channel will be configured with default
1881 	 * transmit bufer size for copying transmit data. (For packet payload
1882 	 * over this limit, packets will not be copied.)
1883 	 */
1884 	tx_buf_alloc_size = (hxge_bcopy_thresh * hxge_tx_ring_size);
1885 
1886 	/*
1887 	 * Addresses of transmit descriptor ring and the mailbox must be all
1888 	 * cache-aligned (64 bytes).
1889 	 */
1890 	tx_cntl_alloc_size = hxge_tx_ring_size;
1891 	tx_cntl_alloc_size *= (sizeof (tx_desc_t));
1892 	tx_cntl_alloc_size += sizeof (txdma_mailbox_t);
1893 
1894 	num_chunks = (uint32_t *)KMEM_ZALLOC(sizeof (uint32_t) * ndmas,
1895 	    KM_SLEEP);
1896 
1897 	/*
1898 	 * Allocate memory for transmit buffers and descriptor rings. Replace
1899 	 * allocation functions with interface functions provided by the
1900 	 * partition manager when it is available.
1901 	 *
1902 	 * Allocate memory for the transmit buffer pool.
1903 	 */
1904 	for (i = 0; i < ndmas; i++) {
1905 		num_chunks[i] = 0;
1906 		status = hxge_alloc_tx_buf_dma(hxgep, st_tdc, &dma_buf_p[i],
1907 		    tx_buf_alloc_size, hxge_bcopy_thresh, &num_chunks[i]);
1908 		if (status != HXGE_OK) {
1909 			break;
1910 		}
1911 		st_tdc++;
1912 	}
1913 
1914 	if (i < ndmas) {
1915 		goto hxge_alloc_tx_mem_pool_fail1;
1916 	}
1917 
1918 	st_tdc = p_cfgp->start_tdc;
1919 
1920 	/*
1921 	 * Allocate memory for descriptor rings and mailbox.
1922 	 */
1923 	for (j = 0; j < ndmas; j++) {
1924 		status = hxge_alloc_tx_cntl_dma(hxgep, st_tdc, &dma_cntl_p[j],
1925 		    tx_cntl_alloc_size);
1926 		if (status != HXGE_OK) {
1927 			break;
1928 		}
1929 		st_tdc++;
1930 	}
1931 
1932 	if (j < ndmas) {
1933 		goto hxge_alloc_tx_mem_pool_fail2;
1934 	}
1935 
1936 	dma_poolp->ndmas = ndmas;
1937 	dma_poolp->num_chunks = num_chunks;
1938 	dma_poolp->buf_allocated = B_TRUE;
1939 	dma_poolp->dma_buf_pool_p = dma_buf_p;
1940 	hxgep->tx_buf_pool_p = dma_poolp;
1941 
1942 	dma_cntl_poolp->ndmas = ndmas;
1943 	dma_cntl_poolp->buf_allocated = B_TRUE;
1944 	dma_cntl_poolp->dma_buf_pool_p = dma_cntl_p;
1945 	hxgep->tx_cntl_pool_p = dma_cntl_poolp;
1946 
1947 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1948 	    "==> hxge_alloc_tx_mem_pool: start_tdc %d "
1949 	    "ndmas %d poolp->ndmas %d", st_tdc, ndmas, dma_poolp->ndmas));
1950 
1951 	goto hxge_alloc_tx_mem_pool_exit;
1952 
1953 hxge_alloc_tx_mem_pool_fail2:
1954 	/* Free control buffers */
1955 	j--;
1956 	for (; j >= 0; j--) {
1957 		hxge_free_tx_cntl_dma(hxgep,
1958 		    (p_hxge_dma_common_t)dma_cntl_p[j]);
1959 	}
1960 
1961 hxge_alloc_tx_mem_pool_fail1:
1962 	/* Free data buffers */
1963 	i--;
1964 	for (; i >= 0; i--) {
1965 		hxge_free_tx_buf_dma(hxgep, (p_hxge_dma_common_t)dma_buf_p[i],
1966 		    num_chunks[i]);
1967 	}
1968 
1969 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
1970 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
1971 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
1972 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
1973 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
1974 
1975 hxge_alloc_tx_mem_pool_exit:
1976 	HXGE_DEBUG_MSG((hxgep, MEM_CTL,
1977 	    "<== hxge_alloc_tx_mem_pool:status 0x%08x", status));
1978 
1979 	return (status);
1980 }
1981 
1982 static hxge_status_t
1983 hxge_alloc_tx_buf_dma(p_hxge_t hxgep, uint16_t dma_channel,
1984     p_hxge_dma_common_t *dmap, size_t alloc_size,
1985     size_t block_size, uint32_t *num_chunks)
1986 {
1987 	p_hxge_dma_common_t	tx_dmap;
1988 	hxge_status_t		status = HXGE_OK;
1989 	size_t			total_alloc_size;
1990 	size_t			allocated = 0;
1991 	int			i, size_index, array_size;
1992 
1993 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_buf_dma"));
1994 
1995 	tx_dmap = (p_hxge_dma_common_t)
1996 	    KMEM_ZALLOC(sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK, KM_SLEEP);
1997 
1998 	total_alloc_size = alloc_size;
1999 	i = 0;
2000 	size_index = 0;
2001 	array_size = sizeof (alloc_sizes) / sizeof (size_t);
2002 	while ((alloc_sizes[size_index] < alloc_size) &&
2003 	    (size_index < array_size))
2004 		size_index++;
2005 	if (size_index >= array_size) {
2006 		size_index = array_size - 1;
2007 	}
2008 
2009 	while ((allocated < total_alloc_size) &&
2010 	    (size_index >= 0) && (i < HXGE_DMA_BLOCK)) {
2011 		tx_dmap[i].dma_chunk_index = i;
2012 		tx_dmap[i].block_size = block_size;
2013 		tx_dmap[i].alength = alloc_sizes[size_index];
2014 		tx_dmap[i].orig_alength = tx_dmap[i].alength;
2015 		tx_dmap[i].nblocks = alloc_sizes[size_index] / block_size;
2016 		tx_dmap[i].dma_channel = dma_channel;
2017 		tx_dmap[i].contig_alloc_type = B_FALSE;
2018 
2019 		status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2020 		    &hxge_tx_dma_attr, tx_dmap[i].alength,
2021 		    &hxge_dev_buf_dma_acc_attr,
2022 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
2023 		    (p_hxge_dma_common_t)(&tx_dmap[i]));
2024 		if (status != HXGE_OK) {
2025 			HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2026 			    " hxge_alloc_tx_buf_dma: Alloc Failed: "
2027 			    " for size: %d", alloc_sizes[size_index]));
2028 			size_index--;
2029 		} else {
2030 			i++;
2031 			allocated += alloc_sizes[size_index];
2032 		}
2033 	}
2034 
2035 	if (allocated < total_alloc_size) {
2036 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2037 		    " hxge_alloc_tx_buf_dma: failed due to"
2038 		    " allocated(%d) < required(%d)",
2039 		    allocated, total_alloc_size));
2040 		goto hxge_alloc_tx_mem_fail1;
2041 	}
2042 
2043 	*num_chunks = i;
2044 	*dmap = tx_dmap;
2045 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2046 	    "==> hxge_alloc_tx_buf_dma dmap 0x%016llx num chunks %d",
2047 	    *dmap, i));
2048 	goto hxge_alloc_tx_mem_exit;
2049 
2050 hxge_alloc_tx_mem_fail1:
2051 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2052 
2053 hxge_alloc_tx_mem_exit:
2054 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2055 	    "<== hxge_alloc_tx_buf_dma status 0x%08x", status));
2056 
2057 	return (status);
2058 }
2059 
2060 /*ARGSUSED*/
2061 static void
2062 hxge_free_tx_buf_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap,
2063     uint32_t num_chunks)
2064 {
2065 	int i;
2066 
2067 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "==> hxge_free_tx_buf_dma"));
2068 
2069 	for (i = 0; i < num_chunks; i++) {
2070 		hxge_dma_mem_free(dmap++);
2071 	}
2072 
2073 	HXGE_DEBUG_MSG((hxgep, MEM_CTL, "<== hxge_free_tx_buf_dma"));
2074 }
2075 
2076 /*ARGSUSED*/
2077 static hxge_status_t
2078 hxge_alloc_tx_cntl_dma(p_hxge_t hxgep, uint16_t dma_channel,
2079     p_hxge_dma_common_t *dmap, size_t size)
2080 {
2081 	p_hxge_dma_common_t	tx_dmap;
2082 	hxge_status_t		status = HXGE_OK;
2083 
2084 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_alloc_tx_cntl_dma"));
2085 
2086 	tx_dmap = (p_hxge_dma_common_t)KMEM_ZALLOC(sizeof (hxge_dma_common_t),
2087 	    KM_SLEEP);
2088 
2089 	tx_dmap->contig_alloc_type = B_FALSE;
2090 
2091 	status = hxge_dma_mem_alloc(hxgep, hxge_force_dma,
2092 	    &hxge_desc_dma_attr, size, &hxge_dev_desc_dma_acc_attr,
2093 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, tx_dmap);
2094 	if (status != HXGE_OK) {
2095 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2096 		    " hxge_alloc_tx_cntl_dma: Alloc Failed: "
2097 		    " for size: %d", size));
2098 		goto hxge_alloc_tx_cntl_dma_fail1;
2099 	}
2100 
2101 	*dmap = tx_dmap;
2102 
2103 	goto hxge_alloc_tx_cntl_dma_exit;
2104 
2105 hxge_alloc_tx_cntl_dma_fail1:
2106 	KMEM_FREE(tx_dmap, sizeof (hxge_dma_common_t));
2107 
2108 hxge_alloc_tx_cntl_dma_exit:
2109 	HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2110 	    "<== hxge_alloc_tx_cntl_dma status 0x%08x", status));
2111 
2112 	return (status);
2113 }
2114 
2115 /*ARGSUSED*/
2116 static void
2117 hxge_free_tx_cntl_dma(p_hxge_t hxgep, p_hxge_dma_common_t dmap)
2118 {
2119 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "==> hxge_free_tx_cntl_dma"));
2120 
2121 	hxge_dma_mem_free(dmap);
2122 
2123 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_free_tx_cntl_dma"));
2124 }
2125 
2126 static void
2127 hxge_free_tx_mem_pool(p_hxge_t hxgep)
2128 {
2129 	uint32_t		i, ndmas;
2130 	p_hxge_dma_pool_t	dma_poolp;
2131 	p_hxge_dma_common_t	*dma_buf_p;
2132 	p_hxge_dma_pool_t	dma_cntl_poolp;
2133 	p_hxge_dma_common_t	*dma_cntl_p;
2134 	uint32_t		*num_chunks;
2135 
2136 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "==> hxge_free_tx_mem_pool"));
2137 
2138 	dma_poolp = hxgep->tx_buf_pool_p;
2139 	if (dma_poolp == NULL || (!dma_poolp->buf_allocated)) {
2140 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2141 		    "<== hxge_free_tx_mem_pool "
2142 		    "(null rx buf pool or buf not allocated"));
2143 		return;
2144 	}
2145 
2146 	dma_cntl_poolp = hxgep->tx_cntl_pool_p;
2147 	if (dma_cntl_poolp == NULL || (!dma_cntl_poolp->buf_allocated)) {
2148 		HXGE_DEBUG_MSG((hxgep, MEM3_CTL,
2149 		    "<== hxge_free_tx_mem_pool "
2150 		    "(null tx cntl buf pool or cntl buf not allocated"));
2151 		return;
2152 	}
2153 
2154 	dma_buf_p = dma_poolp->dma_buf_pool_p;
2155 	num_chunks = dma_poolp->num_chunks;
2156 
2157 	dma_cntl_p = dma_cntl_poolp->dma_buf_pool_p;
2158 	ndmas = dma_cntl_poolp->ndmas;
2159 
2160 	for (i = 0; i < ndmas; i++) {
2161 		hxge_free_tx_buf_dma(hxgep, dma_buf_p[i], num_chunks[i]);
2162 	}
2163 
2164 	for (i = 0; i < ndmas; i++) {
2165 		hxge_free_tx_cntl_dma(hxgep, dma_cntl_p[i]);
2166 	}
2167 
2168 	for (i = 0; i < ndmas; i++) {
2169 		KMEM_FREE(dma_buf_p[i],
2170 		    sizeof (hxge_dma_common_t) * HXGE_DMA_BLOCK);
2171 		KMEM_FREE(dma_cntl_p[i], sizeof (hxge_dma_common_t));
2172 	}
2173 
2174 	KMEM_FREE(num_chunks, sizeof (uint32_t) * ndmas);
2175 	KMEM_FREE(dma_cntl_p, ndmas * sizeof (p_hxge_dma_common_t));
2176 	KMEM_FREE(dma_cntl_poolp, sizeof (hxge_dma_pool_t));
2177 	KMEM_FREE(dma_buf_p, ndmas * sizeof (p_hxge_dma_common_t));
2178 	KMEM_FREE(dma_poolp, sizeof (hxge_dma_pool_t));
2179 
2180 	hxgep->tx_buf_pool_p = NULL;
2181 	hxgep->tx_cntl_pool_p = NULL;
2182 
2183 	HXGE_DEBUG_MSG((hxgep, MEM3_CTL, "<== hxge_free_tx_mem_pool"));
2184 }
2185 
2186 /*ARGSUSED*/
2187 static hxge_status_t
2188 hxge_dma_mem_alloc(p_hxge_t hxgep, dma_method_t method,
2189     struct ddi_dma_attr *dma_attrp,
2190     size_t length, ddi_device_acc_attr_t *acc_attr_p, uint_t xfer_flags,
2191     p_hxge_dma_common_t dma_p)
2192 {
2193 	caddr_t		kaddrp;
2194 	int		ddi_status = DDI_SUCCESS;
2195 
2196 	dma_p->dma_handle = NULL;
2197 	dma_p->acc_handle = NULL;
2198 	dma_p->kaddrp = NULL;
2199 
2200 	ddi_status = ddi_dma_alloc_handle(hxgep->dip, dma_attrp,
2201 	    DDI_DMA_DONTWAIT, NULL, &dma_p->dma_handle);
2202 	if (ddi_status != DDI_SUCCESS) {
2203 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2204 		    "hxge_dma_mem_alloc:ddi_dma_alloc_handle failed."));
2205 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2206 	}
2207 
2208 	ddi_status = ddi_dma_mem_alloc(dma_p->dma_handle, length, acc_attr_p,
2209 	    xfer_flags, DDI_DMA_DONTWAIT, 0, &kaddrp, &dma_p->alength,
2210 	    &dma_p->acc_handle);
2211 	if (ddi_status != DDI_SUCCESS) {
2212 		/* The caller will decide whether it is fatal */
2213 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2214 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc failed"));
2215 		ddi_dma_free_handle(&dma_p->dma_handle);
2216 		dma_p->dma_handle = NULL;
2217 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2218 	}
2219 
2220 	if (dma_p->alength < length) {
2221 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2222 		    "hxge_dma_mem_alloc:ddi_dma_mem_alloc < length."));
2223 		ddi_dma_mem_free(&dma_p->acc_handle);
2224 		ddi_dma_free_handle(&dma_p->dma_handle);
2225 		dma_p->acc_handle = NULL;
2226 		dma_p->dma_handle = NULL;
2227 		return (HXGE_ERROR);
2228 	}
2229 
2230 	ddi_status = ddi_dma_addr_bind_handle(dma_p->dma_handle, NULL,
2231 	    kaddrp, dma_p->alength, xfer_flags, DDI_DMA_DONTWAIT, 0,
2232 	    &dma_p->dma_cookie, &dma_p->ncookies);
2233 	if (ddi_status != DDI_DMA_MAPPED) {
2234 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2235 		    "hxge_dma_mem_alloc:di_dma_addr_bind failed "
2236 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2237 		if (dma_p->acc_handle) {
2238 			ddi_dma_mem_free(&dma_p->acc_handle);
2239 			dma_p->acc_handle = NULL;
2240 		}
2241 		ddi_dma_free_handle(&dma_p->dma_handle);
2242 		dma_p->dma_handle = NULL;
2243 		return (HXGE_ERROR | HXGE_DDI_FAILED);
2244 	}
2245 
2246 	if (dma_p->ncookies != 1) {
2247 		HXGE_DEBUG_MSG((hxgep, DMA_CTL,
2248 		    "hxge_dma_mem_alloc:ddi_dma_addr_bind > 1 cookie"
2249 		    "(staus 0x%x ncookies %d.)", ddi_status, dma_p->ncookies));
2250 		if (dma_p->acc_handle) {
2251 			ddi_dma_mem_free(&dma_p->acc_handle);
2252 			dma_p->acc_handle = NULL;
2253 		}
2254 		(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2255 		ddi_dma_free_handle(&dma_p->dma_handle);
2256 		dma_p->dma_handle = NULL;
2257 		return (HXGE_ERROR);
2258 	}
2259 
2260 	dma_p->kaddrp = kaddrp;
2261 #if defined(__i386)
2262 	dma_p->ioaddr_pp =
2263 	    (unsigned char *)(uint32_t)dma_p->dma_cookie.dmac_laddress;
2264 #else
2265 	dma_p->ioaddr_pp = (unsigned char *) dma_p->dma_cookie.dmac_laddress;
2266 #endif
2267 
2268 	HPI_DMA_ACC_HANDLE_SET(dma_p, dma_p->acc_handle);
2269 
2270 	HXGE_DEBUG_MSG((hxgep, DMA_CTL, "<== hxge_dma_mem_alloc: "
2271 	    "dma buffer allocated: dma_p $%p "
2272 	    "return dmac_ladress from cookie $%p dmac_size %d "
2273 	    "dma_p->ioaddr_p $%p "
2274 	    "dma_p->orig_ioaddr_p $%p "
2275 	    "orig_vatopa $%p "
2276 	    "alength %d (0x%x) "
2277 	    "kaddrp $%p "
2278 	    "length %d (0x%x)",
2279 	    dma_p,
2280 	    dma_p->dma_cookie.dmac_laddress,
2281 	    dma_p->dma_cookie.dmac_size,
2282 	    dma_p->ioaddr_pp,
2283 	    dma_p->orig_ioaddr_pp,
2284 	    dma_p->orig_vatopa,
2285 	    dma_p->alength, dma_p->alength,
2286 	    kaddrp,
2287 	    length, length));
2288 
2289 	return (HXGE_OK);
2290 }
2291 
2292 static void
2293 hxge_dma_mem_free(p_hxge_dma_common_t dma_p)
2294 {
2295 	if (dma_p->dma_handle != NULL) {
2296 		if (dma_p->ncookies) {
2297 			(void) ddi_dma_unbind_handle(dma_p->dma_handle);
2298 			dma_p->ncookies = 0;
2299 		}
2300 		ddi_dma_free_handle(&dma_p->dma_handle);
2301 		dma_p->dma_handle = NULL;
2302 	}
2303 	if (dma_p->acc_handle != NULL) {
2304 		ddi_dma_mem_free(&dma_p->acc_handle);
2305 		dma_p->acc_handle = NULL;
2306 		HPI_DMA_ACC_HANDLE_SET(dma_p, NULL);
2307 	}
2308 	dma_p->kaddrp = NULL;
2309 	dma_p->alength = NULL;
2310 }
2311 
2312 /*
2313  *	hxge_m_start() -- start transmitting and receiving.
2314  *
2315  *	This function is called by the MAC layer when the first
2316  *	stream is open to prepare the hardware ready for sending
2317  *	and transmitting packets.
2318  */
2319 static int
2320 hxge_m_start(void *arg)
2321 {
2322 	p_hxge_t hxgep = (p_hxge_t)arg;
2323 
2324 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_start"));
2325 
2326 	MUTEX_ENTER(hxgep->genlock);
2327 
2328 	if (hxge_init(hxgep) != DDI_SUCCESS) {
2329 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2330 		    "<== hxge_m_start: initialization failed"));
2331 		MUTEX_EXIT(hxgep->genlock);
2332 		return (EIO);
2333 	}
2334 
2335 	if (hxgep->hxge_mac_state != HXGE_MAC_STARTED) {
2336 		/*
2337 		 * Start timer to check the system error and tx hangs
2338 		 */
2339 		hxgep->hxge_timerid = hxge_start_timer(hxgep,
2340 		    hxge_check_hw_state, HXGE_CHECK_TIMER);
2341 
2342 		hxgep->hxge_mac_state = HXGE_MAC_STARTED;
2343 	}
2344 
2345 	MUTEX_EXIT(hxgep->genlock);
2346 
2347 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_start"));
2348 
2349 	return (0);
2350 }
2351 
2352 /*
2353  * hxge_m_stop(): stop transmitting and receiving.
2354  */
2355 static void
2356 hxge_m_stop(void *arg)
2357 {
2358 	p_hxge_t hxgep = (p_hxge_t)arg;
2359 
2360 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_stop"));
2361 
2362 	if (hxgep->hxge_timerid) {
2363 		hxge_stop_timer(hxgep, hxgep->hxge_timerid);
2364 		hxgep->hxge_timerid = 0;
2365 	}
2366 
2367 	MUTEX_ENTER(hxgep->genlock);
2368 
2369 	hxge_uninit(hxgep);
2370 
2371 	hxgep->hxge_mac_state = HXGE_MAC_STOPPED;
2372 
2373 	MUTEX_EXIT(hxgep->genlock);
2374 
2375 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_stop"));
2376 }
2377 
2378 static int
2379 hxge_m_unicst(void *arg, const uint8_t *macaddr)
2380 {
2381 	p_hxge_t		hxgep = (p_hxge_t)arg;
2382 	struct ether_addr	addrp;
2383 	hxge_status_t		status;
2384 
2385 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_unicst"));
2386 
2387 	bcopy(macaddr, (uint8_t *)&addrp, ETHERADDRL);
2388 
2389 	status = hxge_set_mac_addr(hxgep, &addrp);
2390 	if (status != HXGE_OK) {
2391 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2392 		    "<== hxge_m_unicst: set unitcast failed"));
2393 		return (EINVAL);
2394 	}
2395 
2396 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_unicst"));
2397 
2398 	return (0);
2399 }
2400 
2401 static int
2402 hxge_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
2403 {
2404 	p_hxge_t		hxgep = (p_hxge_t)arg;
2405 	struct ether_addr	addrp;
2406 
2407 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_multicst: add %d", add));
2408 
2409 	bcopy(mca, (uint8_t *)&addrp, ETHERADDRL);
2410 
2411 	if (add) {
2412 		if (hxge_add_mcast_addr(hxgep, &addrp)) {
2413 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2414 			    "<== hxge_m_multicst: add multicast failed"));
2415 			return (EINVAL);
2416 		}
2417 	} else {
2418 		if (hxge_del_mcast_addr(hxgep, &addrp)) {
2419 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2420 			    "<== hxge_m_multicst: del multicast failed"));
2421 			return (EINVAL);
2422 		}
2423 	}
2424 
2425 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_multicst"));
2426 
2427 	return (0);
2428 }
2429 
2430 static int
2431 hxge_m_promisc(void *arg, boolean_t on)
2432 {
2433 	p_hxge_t hxgep = (p_hxge_t)arg;
2434 
2435 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "==> hxge_m_promisc: on %d", on));
2436 
2437 	if (hxge_set_promisc(hxgep, on)) {
2438 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2439 		    "<== hxge_m_promisc: set promisc failed"));
2440 		return (EINVAL);
2441 	}
2442 
2443 	HXGE_DEBUG_MSG((hxgep, MAC_CTL, "<== hxge_m_promisc: on %d", on));
2444 
2445 	return (0);
2446 }
2447 
2448 static void
2449 hxge_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
2450 {
2451 	p_hxge_t	hxgep = (p_hxge_t)arg;
2452 	struct iocblk	*iocp = (struct iocblk *)mp->b_rptr;
2453 	boolean_t	need_privilege;
2454 	int		err;
2455 	int		cmd;
2456 
2457 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl"));
2458 
2459 	iocp = (struct iocblk *)mp->b_rptr;
2460 	iocp->ioc_error = 0;
2461 	need_privilege = B_TRUE;
2462 	cmd = iocp->ioc_cmd;
2463 
2464 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "==> hxge_m_ioctl: cmd 0x%08x", cmd));
2465 	switch (cmd) {
2466 	default:
2467 		miocnak(wq, mp, 0, EINVAL);
2468 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl: invalid"));
2469 		return;
2470 
2471 	case LB_GET_INFO_SIZE:
2472 	case LB_GET_INFO:
2473 	case LB_GET_MODE:
2474 		need_privilege = B_FALSE;
2475 		break;
2476 
2477 	case LB_SET_MODE:
2478 		break;
2479 
2480 	case ND_GET:
2481 		need_privilege = B_FALSE;
2482 		break;
2483 	case ND_SET:
2484 		break;
2485 
2486 	case HXGE_GET64:
2487 	case HXGE_PUT64:
2488 	case HXGE_GET_TX_RING_SZ:
2489 	case HXGE_GET_TX_DESC:
2490 	case HXGE_TX_SIDE_RESET:
2491 	case HXGE_RX_SIDE_RESET:
2492 	case HXGE_GLOBAL_RESET:
2493 	case HXGE_RESET_MAC:
2494 	case HXGE_PUT_TCAM:
2495 	case HXGE_GET_TCAM:
2496 	case HXGE_RTRACE:
2497 
2498 		need_privilege = B_FALSE;
2499 		break;
2500 	}
2501 
2502 	if (need_privilege) {
2503 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
2504 		if (err != 0) {
2505 			miocnak(wq, mp, 0, err);
2506 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
2507 			    "<== hxge_m_ioctl: no priv"));
2508 			return;
2509 		}
2510 	}
2511 
2512 	switch (cmd) {
2513 	case ND_GET:
2514 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_GET command"));
2515 	case ND_SET:
2516 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "ND_SET command"));
2517 		hxge_param_ioctl(hxgep, wq, mp, iocp);
2518 		break;
2519 
2520 	case LB_GET_MODE:
2521 	case LB_SET_MODE:
2522 	case LB_GET_INFO_SIZE:
2523 	case LB_GET_INFO:
2524 		hxge_loopback_ioctl(hxgep, wq, mp, iocp);
2525 		break;
2526 
2527 	case HXGE_PUT_TCAM:
2528 	case HXGE_GET_TCAM:
2529 	case HXGE_GET64:
2530 	case HXGE_PUT64:
2531 	case HXGE_GET_TX_RING_SZ:
2532 	case HXGE_GET_TX_DESC:
2533 	case HXGE_TX_SIDE_RESET:
2534 	case HXGE_RX_SIDE_RESET:
2535 	case HXGE_GLOBAL_RESET:
2536 	case HXGE_RESET_MAC:
2537 		HXGE_DEBUG_MSG((hxgep, NEMO_CTL,
2538 		    "==> hxge_m_ioctl: cmd 0x%x", cmd));
2539 		hxge_hw_ioctl(hxgep, wq, mp, iocp);
2540 		break;
2541 	}
2542 
2543 	HXGE_DEBUG_MSG((hxgep, NEMO_CTL, "<== hxge_m_ioctl"));
2544 }
2545 
2546 extern void hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count);
2547 
2548 static void
2549 hxge_m_resources(void *arg)
2550 {
2551 	p_hxge_t hxgep = arg;
2552 	mac_rx_fifo_t mrf;
2553 	p_rx_rcr_rings_t rcr_rings;
2554 	p_rx_rcr_ring_t *rcr_p;
2555 	p_rx_rcr_ring_t rcrp;
2556 	uint32_t i, ndmas;
2557 	int status;
2558 
2559 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources"));
2560 
2561 	MUTEX_ENTER(hxgep->genlock);
2562 
2563 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2564 		status = hxge_init(hxgep);
2565 		if (status != HXGE_OK) {
2566 			HXGE_DEBUG_MSG((hxgep, RX_CTL, "==> hxge_m_resources: "
2567 			    "hxge_init failed"));
2568 			MUTEX_EXIT(hxgep->genlock);
2569 			return;
2570 		}
2571 	}
2572 
2573 	mrf.mrf_type = MAC_RX_FIFO;
2574 	mrf.mrf_blank = hxge_rx_hw_blank;
2575 	mrf.mrf_arg = (void *)hxgep;
2576 
2577 	mrf.mrf_normal_blank_time = RXDMA_RCR_TO_DEFAULT;
2578 	mrf.mrf_normal_pkt_count = RXDMA_RCR_PTHRES_DEFAULT;
2579 
2580 	rcr_rings = hxgep->rx_rcr_rings;
2581 	rcr_p = rcr_rings->rcr_rings;
2582 	ndmas = rcr_rings->ndmas;
2583 
2584 	/*
2585 	 * Export our receive resources to the MAC layer.
2586 	 */
2587 	for (i = 0; i < ndmas; i++) {
2588 		rcrp = (void *)(p_rx_rcr_ring_t)rcr_p[i];
2589 		rcrp->rcr_mac_handle =
2590 		    mac_resource_add(hxgep->mach, (mac_resource_t *)&mrf);
2591 
2592 		HXGE_DEBUG_MSG((hxgep, RX_CTL,
2593 		    "==> hxge_m_resources: vdma %d dma %d "
2594 		    "rcrptr 0x%016llx mac_handle 0x%016llx",
2595 		    i, rcrp->rdc, rcr_p[i], rcrp->rcr_mac_handle));
2596 	}
2597 
2598 	MUTEX_EXIT(hxgep->genlock);
2599 
2600 	HXGE_DEBUG_MSG((hxgep, RX_CTL, "<== hxge_m_resources"));
2601 }
2602 
2603 /*
2604  * Set an alternate MAC address
2605  */
2606 static int
2607 hxge_altmac_set(p_hxge_t hxgep, uint8_t *maddr, mac_addr_slot_t slot)
2608 {
2609 	uint64_t	address;
2610 	uint64_t	tmp;
2611 	hpi_status_t	status;
2612 	uint8_t		addrn;
2613 	int		i;
2614 
2615 	/*
2616 	 * Convert a byte array to a 48 bit value.
2617 	 * Need to check endianess if in doubt
2618 	 */
2619 	address = 0;
2620 	for (i = 0; i < ETHERADDRL; i++) {
2621 		tmp = maddr[i];
2622 		address <<= 8;
2623 		address |= tmp;
2624 	}
2625 
2626 	addrn = (uint8_t)slot;
2627 	status = hpi_pfc_set_mac_address(hxgep->hpi_handle, addrn, address);
2628 	if (status != HPI_SUCCESS)
2629 		return (EIO);
2630 
2631 	return (0);
2632 }
2633 
2634 static void
2635 hxge_mmac_kstat_update(p_hxge_t hxgep, mac_addr_slot_t slot)
2636 {
2637 	p_hxge_mmac_stats_t	mmac_stats;
2638 	int			i;
2639 	hxge_mmac_t		*mmac_info;
2640 
2641 	mmac_info = &hxgep->hxge_mmac_info;
2642 	mmac_stats = &hxgep->statsp->mmac_stats;
2643 	mmac_stats->mmac_max_cnt = mmac_info->num_mmac;
2644 	mmac_stats->mmac_avail_cnt = mmac_info->naddrfree;
2645 
2646 	for (i = 0; i < ETHERADDRL; i++) {
2647 		mmac_stats->mmac_avail_pool[slot].ether_addr_octet[i] =
2648 		    mmac_info->mac_pool[slot].addr[(ETHERADDRL - 1) - i];
2649 	}
2650 }
2651 
2652 /*
2653  * Find an unused address slot, set the address value to the one specified,
2654  * enable the port to start filtering on the new MAC address.
2655  * Returns: 0 on success.
2656  */
2657 int
2658 hxge_m_mmac_add(void *arg, mac_multi_addr_t *maddr)
2659 {
2660 	p_hxge_t	hxgep = arg;
2661 	mac_addr_slot_t	slot;
2662 	hxge_mmac_t	*mmac_info;
2663 	int		err;
2664 	hxge_status_t	status;
2665 
2666 	mutex_enter(hxgep->genlock);
2667 
2668 	/*
2669 	 * Make sure that hxge is initialized, if _start() has
2670 	 * not been called.
2671 	 */
2672 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2673 		status = hxge_init(hxgep);
2674 		if (status != HXGE_OK) {
2675 			mutex_exit(hxgep->genlock);
2676 			return (ENXIO);
2677 		}
2678 	}
2679 
2680 	mmac_info = &hxgep->hxge_mmac_info;
2681 	if (mmac_info->naddrfree == 0) {
2682 		mutex_exit(hxgep->genlock);
2683 		return (ENOSPC);
2684 	}
2685 
2686 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2687 	    maddr->mma_addrlen)) {
2688 		mutex_exit(hxgep->genlock);
2689 		return (EINVAL);
2690 	}
2691 
2692 	/*
2693 	 * Search for the first available slot. Because naddrfree
2694 	 * is not zero, we are guaranteed to find one.
2695 	 * Slot 0 is for unique (primary) MAC.  The first alternate
2696 	 * MAC slot is slot 1.
2697 	 */
2698 	for (slot = 1; slot < mmac_info->num_mmac; slot++) {
2699 		if (!(mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED))
2700 			break;
2701 	}
2702 
2703 	ASSERT(slot < mmac_info->num_mmac);
2704 	if ((err = hxge_altmac_set(hxgep, maddr->mma_addr, slot)) != 0) {
2705 		mutex_exit(hxgep->genlock);
2706 		return (err);
2707 	}
2708 	bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr, ETHERADDRL);
2709 	mmac_info->mac_pool[slot].flags |= MMAC_SLOT_USED;
2710 	mmac_info->naddrfree--;
2711 	hxge_mmac_kstat_update(hxgep, slot);
2712 
2713 	maddr->mma_slot = slot;
2714 
2715 	mutex_exit(hxgep->genlock);
2716 	return (0);
2717 }
2718 
2719 /*
2720  * Remove the specified mac address and update
2721  * the h/w not to filter the mac address anymore.
2722  * Returns: 0, on success.
2723  */
2724 int
2725 hxge_m_mmac_remove(void *arg, mac_addr_slot_t slot)
2726 {
2727 	p_hxge_t	hxgep = arg;
2728 	hxge_mmac_t	*mmac_info;
2729 	int		err = 0;
2730 	hxge_status_t	status;
2731 
2732 	mutex_enter(hxgep->genlock);
2733 
2734 	/*
2735 	 * Make sure that hxge is initialized, if _start() has
2736 	 * not been called.
2737 	 */
2738 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2739 		status = hxge_init(hxgep);
2740 		if (status != HXGE_OK) {
2741 			mutex_exit(hxgep->genlock);
2742 			return (ENXIO);
2743 		}
2744 	}
2745 
2746 	mmac_info = &hxgep->hxge_mmac_info;
2747 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2748 		mutex_exit(hxgep->genlock);
2749 		return (EINVAL);
2750 	}
2751 
2752 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2753 		if (hpi_pfc_mac_addr_disable(hxgep->hpi_handle, slot) ==
2754 		    HPI_SUCCESS) {
2755 			mmac_info->mac_pool[slot].flags &= ~MMAC_SLOT_USED;
2756 			mmac_info->naddrfree++;
2757 			/*
2758 			 * Clear mac_pool[slot].addr so that kstat shows 0
2759 			 * alternate MAC address if the slot is not used.
2760 			 */
2761 			bzero(mmac_info->mac_pool[slot].addr, ETHERADDRL);
2762 			hxge_mmac_kstat_update(hxgep, slot);
2763 		} else {
2764 			err = EIO;
2765 		}
2766 	} else {
2767 		err = EINVAL;
2768 	}
2769 
2770 	mutex_exit(hxgep->genlock);
2771 	return (err);
2772 }
2773 
2774 /*
2775  * Modify a mac address added by hxge_mmac_add().
2776  * Returns: 0, on success.
2777  */
2778 int
2779 hxge_m_mmac_modify(void *arg, mac_multi_addr_t *maddr)
2780 {
2781 	p_hxge_t	hxgep = arg;
2782 	mac_addr_slot_t	slot;
2783 	hxge_mmac_t	*mmac_info;
2784 	int		err = 0;
2785 	hxge_status_t	status;
2786 
2787 	if (!mac_unicst_verify(hxgep->mach, maddr->mma_addr,
2788 	    maddr->mma_addrlen))
2789 		return (EINVAL);
2790 
2791 	slot = maddr->mma_slot;
2792 
2793 	mutex_enter(hxgep->genlock);
2794 
2795 	/*
2796 	 * Make sure that hxge is initialized, if _start() has
2797 	 * not been called.
2798 	 */
2799 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2800 		status = hxge_init(hxgep);
2801 		if (status != HXGE_OK) {
2802 			mutex_exit(hxgep->genlock);
2803 			return (ENXIO);
2804 		}
2805 	}
2806 
2807 	mmac_info = &hxgep->hxge_mmac_info;
2808 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2809 		mutex_exit(hxgep->genlock);
2810 		return (EINVAL);
2811 	}
2812 
2813 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2814 		if ((err = hxge_altmac_set(hxgep, maddr->mma_addr,
2815 		    slot)) == 0) {
2816 			bcopy(maddr->mma_addr, mmac_info->mac_pool[slot].addr,
2817 			    ETHERADDRL);
2818 			hxge_mmac_kstat_update(hxgep, slot);
2819 		}
2820 	} else {
2821 		err = EINVAL;
2822 	}
2823 
2824 	mutex_exit(hxgep->genlock);
2825 	return (err);
2826 }
2827 
2828 /*
2829  * static int
2830  * hxge_m_mmac_get() - Get the MAC address and other information
2831  *	related to the slot.  mma_flags should be set to 0 in the call.
2832  *	Note: although kstat shows MAC address as zero when a slot is
2833  *	not used, Crossbow expects hxge_m_mmac_get to copy factory MAC
2834  *	to the caller as long as the slot is not using a user MAC address.
2835  *	The following table shows the rules,
2836  *
2837  *     					USED    VENDOR    mma_addr
2838  *	------------------------------------------------------------
2839  *	(1) Slot uses a user MAC:	yes      no     user MAC
2840  *	(2) Slot uses a factory MAC:    yes      yes    factory MAC
2841  *	(3) Slot is not used but is
2842  *	     factory MAC capable:	no       yes    factory MAC
2843  *	(4) Slot is not used and is
2844  *	     not factory MAC capable:   no       no	0
2845  *	------------------------------------------------------------
2846  */
2847 int
2848 hxge_m_mmac_get(void *arg, mac_multi_addr_t *maddr)
2849 {
2850 	hxge_t		*hxgep = arg;
2851 	mac_addr_slot_t	slot;
2852 	hxge_mmac_t	*mmac_info;
2853 	hxge_status_t	status;
2854 
2855 	slot = maddr->mma_slot;
2856 
2857 	mutex_enter(hxgep->genlock);
2858 
2859 	/*
2860 	 * Make sure that hxge is initialized, if _start() has
2861 	 * not been called.
2862 	 */
2863 	if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) {
2864 		status = hxge_init(hxgep);
2865 		if (status != HXGE_OK) {
2866 			mutex_exit(hxgep->genlock);
2867 			return (ENXIO);
2868 		}
2869 	}
2870 
2871 	mmac_info = &hxgep->hxge_mmac_info;
2872 	if (slot <= 0 || slot >= mmac_info->num_mmac) {
2873 		mutex_exit(hxgep->genlock);
2874 		return (EINVAL);
2875 	}
2876 
2877 	maddr->mma_flags = 0;
2878 	if (mmac_info->mac_pool[slot].flags & MMAC_SLOT_USED) {
2879 		maddr->mma_flags |= MMAC_SLOT_USED;
2880 		bcopy(mmac_info->mac_pool[slot].addr,
2881 		    maddr->mma_addr, ETHERADDRL);
2882 		maddr->mma_addrlen = ETHERADDRL;
2883 	}
2884 
2885 	mutex_exit(hxgep->genlock);
2886 	return (0);
2887 }
2888 
2889 /*ARGSUSED*/
2890 boolean_t
2891 hxge_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2892 {
2893 	p_hxge_t		hxgep = (p_hxge_t)arg;
2894 	uint32_t		*txflags = cap_data;
2895 	multiaddress_capab_t	*mmacp = cap_data;
2896 
2897 	switch (cap) {
2898 	case MAC_CAPAB_HCKSUM:
2899 		*txflags = HCKSUM_INET_PARTIAL;
2900 		break;
2901 
2902 	case MAC_CAPAB_POLL:
2903 		/*
2904 		 * There's nothing for us to fill in, simply returning B_TRUE
2905 		 * stating that we support polling is sufficient.
2906 		 */
2907 		break;
2908 
2909 	case MAC_CAPAB_MULTIADDRESS:
2910 		/*
2911 		 * The number of MAC addresses made available by
2912 		 * this capability is one less than the total as
2913 		 * the primary address in slot 0 is counted in
2914 		 * the total.
2915 		 */
2916 		mmacp->maddr_naddr = PFC_N_MAC_ADDRESSES - 1;
2917 		mmacp->maddr_naddrfree = hxgep->hxge_mmac_info.naddrfree;
2918 		mmacp->maddr_flag = 0;	/* No multiple factory macs */
2919 		mmacp->maddr_handle = hxgep;
2920 		mmacp->maddr_add = hxge_m_mmac_add;
2921 		mmacp->maddr_remove = hxge_m_mmac_remove;
2922 		mmacp->maddr_modify = hxge_m_mmac_modify;
2923 		mmacp->maddr_get = hxge_m_mmac_get;
2924 		mmacp->maddr_reserve = NULL;	/* No multiple factory macs */
2925 		break;
2926 	default:
2927 		return (B_FALSE);
2928 	}
2929 	return (B_TRUE);
2930 }
2931 
2932 static boolean_t
2933 hxge_param_locked(mac_prop_id_t pr_num)
2934 {
2935 	/*
2936 	 * All adv_* parameters are locked (read-only) while
2937 	 * the device is in any sort of loopback mode ...
2938 	 */
2939 	switch (pr_num) {
2940 		case MAC_PROP_ADV_1000FDX_CAP:
2941 		case MAC_PROP_EN_1000FDX_CAP:
2942 		case MAC_PROP_ADV_1000HDX_CAP:
2943 		case MAC_PROP_EN_1000HDX_CAP:
2944 		case MAC_PROP_ADV_100FDX_CAP:
2945 		case MAC_PROP_EN_100FDX_CAP:
2946 		case MAC_PROP_ADV_100HDX_CAP:
2947 		case MAC_PROP_EN_100HDX_CAP:
2948 		case MAC_PROP_ADV_10FDX_CAP:
2949 		case MAC_PROP_EN_10FDX_CAP:
2950 		case MAC_PROP_ADV_10HDX_CAP:
2951 		case MAC_PROP_EN_10HDX_CAP:
2952 		case MAC_PROP_AUTONEG:
2953 		case MAC_PROP_FLOWCTRL:
2954 			return (B_TRUE);
2955 	}
2956 	return (B_FALSE);
2957 }
2958 
2959 /*
2960  * callback functions for set/get of properties
2961  */
2962 static int
2963 hxge_m_setprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
2964     uint_t pr_valsize, const void *pr_val)
2965 {
2966 	hxge_t		*hxgep = barg;
2967 	p_hxge_stats_t	statsp;
2968 	int		err = 0;
2969 	uint32_t	new_mtu, old_framesize, new_framesize;
2970 
2971 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "==> hxge_m_setprop"));
2972 
2973 	statsp = hxgep->statsp;
2974 	mutex_enter(hxgep->genlock);
2975 	if (statsp->port_stats.lb_mode != hxge_lb_normal &&
2976 	    hxge_param_locked(pr_num)) {
2977 		/*
2978 		 * All adv_* parameters are locked (read-only)
2979 		 * while the device is in any sort of loopback mode.
2980 		 */
2981 		HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
2982 		    "==> hxge_m_setprop: loopback mode: read only"));
2983 		mutex_exit(hxgep->genlock);
2984 		return (EBUSY);
2985 	}
2986 
2987 	switch (pr_num) {
2988 		/*
2989 		 * These properties are either not exist or read only
2990 		 */
2991 		case MAC_PROP_EN_1000FDX_CAP:
2992 		case MAC_PROP_EN_100FDX_CAP:
2993 		case MAC_PROP_EN_10FDX_CAP:
2994 		case MAC_PROP_EN_1000HDX_CAP:
2995 		case MAC_PROP_EN_100HDX_CAP:
2996 		case MAC_PROP_EN_10HDX_CAP:
2997 		case MAC_PROP_ADV_1000FDX_CAP:
2998 		case MAC_PROP_ADV_1000HDX_CAP:
2999 		case MAC_PROP_ADV_100FDX_CAP:
3000 		case MAC_PROP_ADV_100HDX_CAP:
3001 		case MAC_PROP_ADV_10FDX_CAP:
3002 		case MAC_PROP_ADV_10HDX_CAP:
3003 		case MAC_PROP_STATUS:
3004 		case MAC_PROP_SPEED:
3005 		case MAC_PROP_DUPLEX:
3006 		case MAC_PROP_AUTONEG:
3007 		/*
3008 		 * Flow control is handled in the shared domain and
3009 		 * it is readonly here.
3010 		 */
3011 		case MAC_PROP_FLOWCTRL:
3012 			err = EINVAL;
3013 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3014 			    "==> hxge_m_setprop:  read only property %d",
3015 			    pr_num));
3016 			break;
3017 
3018 		case MAC_PROP_MTU:
3019 			if (hxgep->hxge_mac_state == HXGE_MAC_STARTED) {
3020 				err = EBUSY;
3021 				break;
3022 			}
3023 
3024 			bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3025 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3026 			    "==> hxge_m_setprop: set MTU: %d", new_mtu));
3027 
3028 			new_framesize = new_mtu + MTU_TO_FRAME_SIZE;
3029 			if (new_framesize == hxgep->vmac.maxframesize) {
3030 				err = 0;
3031 				break;
3032 			}
3033 
3034 			if (new_framesize < MIN_FRAME_SIZE ||
3035 			    new_framesize > MAX_FRAME_SIZE) {
3036 				err = EINVAL;
3037 				break;
3038 			}
3039 
3040 			old_framesize = hxgep->vmac.maxframesize;
3041 			hxgep->vmac.maxframesize = (uint16_t)new_framesize;
3042 
3043 			if (hxge_vmac_set_framesize(hxgep)) {
3044 				hxgep->vmac.maxframesize =
3045 				    (uint16_t)old_framesize;
3046 				err = EINVAL;
3047 				break;
3048 			}
3049 
3050 			err = mac_maxsdu_update(hxgep->mach, new_mtu);
3051 			if (err) {
3052 				hxgep->vmac.maxframesize =
3053 				    (uint16_t)old_framesize;
3054 				(void) hxge_vmac_set_framesize(hxgep);
3055 			}
3056 
3057 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3058 			    "==> hxge_m_setprop: set MTU: %d maxframe %d",
3059 			    new_mtu, hxgep->vmac.maxframesize));
3060 			break;
3061 
3062 		case MAC_PROP_PRIVATE:
3063 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3064 			    "==> hxge_m_setprop: private property"));
3065 			err = hxge_set_priv_prop(hxgep, pr_name, pr_valsize,
3066 			    pr_val);
3067 			break;
3068 
3069 		default:
3070 			err = ENOTSUP;
3071 			break;
3072 	}
3073 
3074 	mutex_exit(hxgep->genlock);
3075 
3076 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3077 	    "<== hxge_m_setprop (return %d)", err));
3078 
3079 	return (err);
3080 }
3081 
3082 /* ARGSUSED */
3083 static int
3084 hxge_get_def_val(hxge_t *hxgep, mac_prop_id_t pr_num, uint_t pr_valsize,
3085     void *pr_val)
3086 {
3087 	int		err = 0;
3088 	link_flowctrl_t	fl;
3089 
3090 	switch (pr_num) {
3091 	case MAC_PROP_DUPLEX:
3092 		*(uint8_t *)pr_val = 2;
3093 		break;
3094 	case MAC_PROP_AUTONEG:
3095 		*(uint8_t *)pr_val = 0;
3096 		break;
3097 	case MAC_PROP_FLOWCTRL:
3098 		if (pr_valsize < sizeof (link_flowctrl_t))
3099 			return (EINVAL);
3100 		fl = LINK_FLOWCTRL_TX;
3101 		bcopy(&fl, pr_val, sizeof (fl));
3102 		break;
3103 	default:
3104 		err = ENOTSUP;
3105 		break;
3106 	}
3107 	return (err);
3108 }
3109 
3110 static int
3111 hxge_m_getprop(void *barg, const char *pr_name, mac_prop_id_t pr_num,
3112     uint_t pr_flags, uint_t pr_valsize, void *pr_val)
3113 {
3114 	hxge_t 		*hxgep = barg;
3115 	p_hxge_stats_t	statsp = hxgep->statsp;
3116 	int		err = 0;
3117 	link_flowctrl_t fl;
3118 	uint64_t	tmp = 0;
3119 	link_state_t	ls;
3120 
3121 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3122 	    "==> hxge_m_getprop: pr_num %d", pr_num));
3123 
3124 	if (pr_valsize == 0)
3125 		return (EINVAL);
3126 
3127 	if ((pr_flags & MAC_PROP_DEFAULT) && (pr_num != MAC_PROP_PRIVATE)) {
3128 		err = hxge_get_def_val(hxgep, pr_num, pr_valsize, pr_val);
3129 		return (err);
3130 	}
3131 
3132 	bzero(pr_val, pr_valsize);
3133 	switch (pr_num) {
3134 		case MAC_PROP_DUPLEX:
3135 			*(uint8_t *)pr_val = statsp->mac_stats.link_duplex;
3136 			HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3137 			    "==> hxge_m_getprop: duplex mode %d",
3138 			    *(uint8_t *)pr_val));
3139 			break;
3140 
3141 		case MAC_PROP_SPEED:
3142 			if (pr_valsize < sizeof (uint64_t))
3143 				return (EINVAL);
3144 			tmp = statsp->mac_stats.link_speed * 1000000ull;
3145 			bcopy(&tmp, pr_val, sizeof (tmp));
3146 			break;
3147 
3148 		case MAC_PROP_STATUS:
3149 			if (pr_valsize < sizeof (link_state_t))
3150 				return (EINVAL);
3151 			if (!statsp->mac_stats.link_up)
3152 				ls = LINK_STATE_DOWN;
3153 			else
3154 				ls = LINK_STATE_UP;
3155 			bcopy(&ls, pr_val, sizeof (ls));
3156 			break;
3157 
3158 		case MAC_PROP_FLOWCTRL:
3159 			/*
3160 			 * Flow control is supported by the shared domain and
3161 			 * it is currently transmit only
3162 			 */
3163 			if (pr_valsize < sizeof (link_flowctrl_t))
3164 				return (EINVAL);
3165 			fl = LINK_FLOWCTRL_TX;
3166 			bcopy(&fl, pr_val, sizeof (fl));
3167 			break;
3168 		case MAC_PROP_AUTONEG:
3169 			/* 10G link only and it is not negotiable */
3170 			*(uint8_t *)pr_val = 0;
3171 			break;
3172 		case MAC_PROP_ADV_1000FDX_CAP:
3173 		case MAC_PROP_ADV_100FDX_CAP:
3174 		case MAC_PROP_ADV_10FDX_CAP:
3175 		case MAC_PROP_ADV_1000HDX_CAP:
3176 		case MAC_PROP_ADV_100HDX_CAP:
3177 		case MAC_PROP_ADV_10HDX_CAP:
3178 		case MAC_PROP_EN_1000FDX_CAP:
3179 		case MAC_PROP_EN_100FDX_CAP:
3180 		case MAC_PROP_EN_10FDX_CAP:
3181 		case MAC_PROP_EN_1000HDX_CAP:
3182 		case MAC_PROP_EN_100HDX_CAP:
3183 		case MAC_PROP_EN_10HDX_CAP:
3184 			err = ENOTSUP;
3185 			break;
3186 
3187 		case MAC_PROP_PRIVATE:
3188 			err = hxge_get_priv_prop(hxgep, pr_name, pr_flags,
3189 			    pr_valsize, pr_val);
3190 			break;
3191 		default:
3192 			err = EINVAL;
3193 			break;
3194 	}
3195 
3196 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL, "<== hxge_m_getprop"));
3197 
3198 	return (err);
3199 }
3200 
3201 /* ARGSUSED */
3202 static int
3203 hxge_set_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_valsize,
3204     const void *pr_val)
3205 {
3206 	p_hxge_param_t	param_arr = hxgep->param_arr;
3207 	int		err = 0;
3208 
3209 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3210 	    "==> hxge_set_priv_prop: name %s (value %s)", pr_name, pr_val));
3211 
3212 	if (pr_val == NULL) {
3213 		return (EINVAL);
3214 	}
3215 
3216 	/* Blanking */
3217 	if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3218 		err = hxge_param_rx_intr_time(hxgep, NULL, NULL,
3219 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_time]);
3220 	} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3221 		err = hxge_param_rx_intr_pkts(hxgep, NULL, NULL,
3222 		    (char *)pr_val, (caddr_t)&param_arr[param_rxdma_intr_pkts]);
3223 
3224 	/* Classification */
3225 	} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3226 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3227 		    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3228 	} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3229 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3230 		    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3231 	} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3232 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3233 		    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3234 	} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3235 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3236 		    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3237 	} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3238 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3239 		    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3240 	} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3241 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3242 		    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3243 	} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3244 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3245 		    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3246 	} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3247 		err = hxge_param_set_ip_opt(hxgep, NULL, NULL, (char *)pr_val,
3248 		    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3249 	} else {
3250 		err = EINVAL;
3251 	}
3252 
3253 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3254 	    "<== hxge_set_priv_prop: err %d", err));
3255 
3256 	return (err);
3257 }
3258 
3259 static int
3260 hxge_get_priv_prop(p_hxge_t hxgep, const char *pr_name, uint_t pr_flags,
3261     uint_t pr_valsize, void *pr_val)
3262 {
3263 	p_hxge_param_t	param_arr = hxgep->param_arr;
3264 	char		valstr[MAXNAMELEN];
3265 	int		err = 0;
3266 	uint_t		strsize;
3267 	int		value = 0;
3268 
3269 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3270 	    "==> hxge_get_priv_prop: property %s", pr_name));
3271 
3272 	if (pr_flags & MAC_PROP_DEFAULT) {
3273 		/* Receive Interrupt Blanking Parameters */
3274 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3275 			value = RXDMA_RCR_TO_DEFAULT;
3276 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3277 			value = RXDMA_RCR_PTHRES_DEFAULT;
3278 
3279 		/* Classification and Load Distribution Configuration */
3280 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0 ||
3281 		    strcmp(pr_name, "_class_opt_ipv4_udp") == 0 ||
3282 		    strcmp(pr_name, "_class_opt_ipv4_ah") == 0 ||
3283 		    strcmp(pr_name, "_class_opt_ipv4_sctp") == 0 ||
3284 		    strcmp(pr_name, "_class_opt_ipv6_tcp") == 0 ||
3285 		    strcmp(pr_name, "_class_opt_ipv6_udp") == 0 ||
3286 		    strcmp(pr_name, "_class_opt_ipv6_ah") == 0 ||
3287 		    strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3288 			value = HXGE_CLASS_TCAM_LOOKUP;
3289 		} else {
3290 			err = EINVAL;
3291 		}
3292 	} else {
3293 		/* Receive Interrupt Blanking Parameters */
3294 		if (strcmp(pr_name, "_rxdma_intr_time") == 0) {
3295 			value = hxgep->intr_timeout;
3296 		} else if (strcmp(pr_name, "_rxdma_intr_pkts") == 0) {
3297 			value = hxgep->intr_threshold;
3298 
3299 		/* Classification and Load Distribution Configuration */
3300 		} else if (strcmp(pr_name, "_class_opt_ipv4_tcp") == 0) {
3301 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3302 			    (caddr_t)&param_arr[param_class_opt_ipv4_tcp]);
3303 
3304 			value = (int)param_arr[param_class_opt_ipv4_tcp].value;
3305 		} else if (strcmp(pr_name, "_class_opt_ipv4_udp") == 0) {
3306 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3307 			    (caddr_t)&param_arr[param_class_opt_ipv4_udp]);
3308 
3309 			value = (int)param_arr[param_class_opt_ipv4_udp].value;
3310 		} else if (strcmp(pr_name, "_class_opt_ipv4_ah") == 0) {
3311 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3312 			    (caddr_t)&param_arr[param_class_opt_ipv4_ah]);
3313 
3314 			value = (int)param_arr[param_class_opt_ipv4_ah].value;
3315 		} else if (strcmp(pr_name, "_class_opt_ipv4_sctp") == 0) {
3316 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3317 			    (caddr_t)&param_arr[param_class_opt_ipv4_sctp]);
3318 
3319 			value = (int)param_arr[param_class_opt_ipv4_sctp].value;
3320 		} else if (strcmp(pr_name, "_class_opt_ipv6_tcp") == 0) {
3321 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3322 			    (caddr_t)&param_arr[param_class_opt_ipv6_tcp]);
3323 
3324 			value = (int)param_arr[param_class_opt_ipv6_tcp].value;
3325 		} else if (strcmp(pr_name, "_class_opt_ipv6_udp") == 0) {
3326 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3327 			    (caddr_t)&param_arr[param_class_opt_ipv6_udp]);
3328 
3329 			value = (int)param_arr[param_class_opt_ipv6_udp].value;
3330 		} else if (strcmp(pr_name, "_class_opt_ipv6_ah") == 0) {
3331 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3332 			    (caddr_t)&param_arr[param_class_opt_ipv6_ah]);
3333 
3334 			value = (int)param_arr[param_class_opt_ipv6_ah].value;
3335 		} else if (strcmp(pr_name, "_class_opt_ipv6_sctp") == 0) {
3336 			err = hxge_param_get_ip_opt(hxgep, NULL, NULL,
3337 			    (caddr_t)&param_arr[param_class_opt_ipv6_sctp]);
3338 
3339 			value = (int)param_arr[param_class_opt_ipv6_sctp].value;
3340 		} else {
3341 			err = EINVAL;
3342 		}
3343 	}
3344 
3345 	if (err == 0) {
3346 		(void) snprintf(valstr, sizeof (valstr), "0x%x", value);
3347 
3348 		strsize = (uint_t)strlen(valstr);
3349 		if (pr_valsize < strsize) {
3350 			err = ENOBUFS;
3351 		} else {
3352 			(void) strlcpy(pr_val, valstr, pr_valsize);
3353 		}
3354 	}
3355 
3356 	HXGE_DEBUG_MSG((hxgep, DLADM_CTL,
3357 	    "<== hxge_get_priv_prop: return %d", err));
3358 
3359 	return (err);
3360 }
3361 /*
3362  * Module loading and removing entry points.
3363  */
3364 DDI_DEFINE_STREAM_OPS(hxge_dev_ops, nulldev, nulldev, hxge_attach, hxge_detach,
3365     nodev, NULL, D_MP, NULL);
3366 
3367 extern struct mod_ops mod_driverops;
3368 
3369 #define	HXGE_DESC_VER	"HXGE 10Gb Ethernet Driver"
3370 
3371 /*
3372  * Module linkage information for the kernel.
3373  */
3374 static struct modldrv hxge_modldrv = {
3375 	&mod_driverops,
3376 	HXGE_DESC_VER,
3377 	&hxge_dev_ops
3378 };
3379 
3380 static struct modlinkage modlinkage = {
3381 	MODREV_1, (void *) &hxge_modldrv, NULL
3382 };
3383 
3384 int
3385 _init(void)
3386 {
3387 	int status;
3388 
3389 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _init"));
3390 	mac_init_ops(&hxge_dev_ops, "hxge");
3391 	status = ddi_soft_state_init(&hxge_list, sizeof (hxge_t), 0);
3392 	if (status != 0) {
3393 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL,
3394 		    "failed to init device soft state"));
3395 		mac_fini_ops(&hxge_dev_ops);
3396 		goto _init_exit;
3397 	}
3398 
3399 	status = mod_install(&modlinkage);
3400 	if (status != 0) {
3401 		ddi_soft_state_fini(&hxge_list);
3402 		HXGE_ERROR_MSG((NULL, HXGE_ERR_CTL, "Mod install failed"));
3403 		goto _init_exit;
3404 	}
3405 
3406 	MUTEX_INIT(&hxge_common_lock, NULL, MUTEX_DRIVER, NULL);
3407 
3408 _init_exit:
3409 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_init status = 0x%X", status));
3410 
3411 	return (status);
3412 }
3413 
3414 int
3415 _fini(void)
3416 {
3417 	int status;
3418 
3419 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini"));
3420 
3421 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _fini: mod_remove"));
3422 
3423 	if (hxge_mblks_pending)
3424 		return (EBUSY);
3425 
3426 	status = mod_remove(&modlinkage);
3427 	if (status != DDI_SUCCESS) {
3428 		HXGE_DEBUG_MSG((NULL, MOD_CTL,
3429 		    "Module removal failed 0x%08x", status));
3430 		goto _fini_exit;
3431 	}
3432 
3433 	mac_fini_ops(&hxge_dev_ops);
3434 
3435 	ddi_soft_state_fini(&hxge_list);
3436 
3437 	MUTEX_DESTROY(&hxge_common_lock);
3438 
3439 _fini_exit:
3440 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "_fini status = 0x%08x", status));
3441 
3442 	return (status);
3443 }
3444 
3445 int
3446 _info(struct modinfo *modinfop)
3447 {
3448 	int status;
3449 
3450 	HXGE_DEBUG_MSG((NULL, MOD_CTL, "==> _info"));
3451 	status = mod_info(&modlinkage, modinfop);
3452 	HXGE_DEBUG_MSG((NULL, MOD_CTL, " _info status = 0x%X", status));
3453 
3454 	return (status);
3455 }
3456 
3457 /*ARGSUSED*/
3458 hxge_status_t
3459 hxge_add_intrs(p_hxge_t hxgep)
3460 {
3461 	int		intr_types;
3462 	int		type = 0;
3463 	int		ddi_status = DDI_SUCCESS;
3464 	hxge_status_t	status = HXGE_OK;
3465 
3466 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs"));
3467 
3468 	hxgep->hxge_intr_type.intr_registered = B_FALSE;
3469 	hxgep->hxge_intr_type.intr_enabled = B_FALSE;
3470 	hxgep->hxge_intr_type.msi_intx_cnt = 0;
3471 	hxgep->hxge_intr_type.intr_added = 0;
3472 	hxgep->hxge_intr_type.niu_msi_enable = B_FALSE;
3473 	hxgep->hxge_intr_type.intr_type = 0;
3474 
3475 	if (hxge_msi_enable) {
3476 		hxgep->hxge_intr_type.niu_msi_enable = B_TRUE;
3477 	}
3478 
3479 	/* Get the supported interrupt types */
3480 	if ((ddi_status = ddi_intr_get_supported_types(hxgep->dip, &intr_types))
3481 	    != DDI_SUCCESS) {
3482 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_intrs: "
3483 		    "ddi_intr_get_supported_types failed: status 0x%08x",
3484 		    ddi_status));
3485 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3486 	}
3487 
3488 	hxgep->hxge_intr_type.intr_types = intr_types;
3489 
3490 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3491 	    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3492 
3493 	/*
3494 	 * Pick the interrupt type to use MSIX, MSI, INTX hxge_msi_enable:
3495 	 *	(1): 1 - MSI
3496 	 *	(2): 2 - MSI-X
3497 	 *	others - FIXED
3498 	 */
3499 	switch (hxge_msi_enable) {
3500 	default:
3501 		type = DDI_INTR_TYPE_FIXED;
3502 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3503 		    "use fixed (intx emulation) type %08x", type));
3504 		break;
3505 
3506 	case 2:
3507 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3508 		    "ddi_intr_get_supported_types: 0x%08x", intr_types));
3509 		if (intr_types & DDI_INTR_TYPE_MSIX) {
3510 			type = DDI_INTR_TYPE_MSIX;
3511 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3512 			    "==> hxge_add_intrs: "
3513 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3514 		} else if (intr_types & DDI_INTR_TYPE_MSI) {
3515 			type = DDI_INTR_TYPE_MSI;
3516 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3517 			    "==> hxge_add_intrs: "
3518 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3519 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3520 			type = DDI_INTR_TYPE_FIXED;
3521 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs: "
3522 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3523 		}
3524 		break;
3525 
3526 	case 1:
3527 		if (intr_types & DDI_INTR_TYPE_MSI) {
3528 			type = DDI_INTR_TYPE_MSI;
3529 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3530 			    "==> hxge_add_intrs: "
3531 			    "ddi_intr_get_supported_types: MSI 0x%08x", type));
3532 		} else if (intr_types & DDI_INTR_TYPE_MSIX) {
3533 			type = DDI_INTR_TYPE_MSIX;
3534 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3535 			    "==> hxge_add_intrs: "
3536 			    "ddi_intr_get_supported_types: MSIX 0x%08x", type));
3537 		} else if (intr_types & DDI_INTR_TYPE_FIXED) {
3538 			type = DDI_INTR_TYPE_FIXED;
3539 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3540 			    "==> hxge_add_intrs: "
3541 			    "ddi_intr_get_supported_types: MSXED0x%08x", type));
3542 		}
3543 	}
3544 
3545 	hxgep->hxge_intr_type.intr_type = type;
3546 	if ((type == DDI_INTR_TYPE_MSIX || type == DDI_INTR_TYPE_MSI ||
3547 	    type == DDI_INTR_TYPE_FIXED) &&
3548 	    hxgep->hxge_intr_type.niu_msi_enable) {
3549 		if ((status = hxge_add_intrs_adv(hxgep)) != DDI_SUCCESS) {
3550 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3551 			    " hxge_add_intrs: "
3552 			    " hxge_add_intrs_adv failed: status 0x%08x",
3553 			    status));
3554 			return (status);
3555 		} else {
3556 			HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_intrs: "
3557 			    "interrupts registered : type %d", type));
3558 			hxgep->hxge_intr_type.intr_registered = B_TRUE;
3559 
3560 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3561 			    "\nAdded advanced hxge add_intr_adv "
3562 			    "intr type 0x%x\n", type));
3563 
3564 			return (status);
3565 		}
3566 	}
3567 
3568 	if (!hxgep->hxge_intr_type.intr_registered) {
3569 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3570 		    "==> hxge_add_intrs: failed to register interrupts"));
3571 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3572 	}
3573 
3574 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs"));
3575 
3576 	return (status);
3577 }
3578 
3579 /*ARGSUSED*/
3580 static hxge_status_t
3581 hxge_add_soft_intrs(p_hxge_t hxgep)
3582 {
3583 	int		ddi_status = DDI_SUCCESS;
3584 	hxge_status_t	status = HXGE_OK;
3585 
3586 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_add_soft_intrs"));
3587 
3588 	hxgep->resched_id = NULL;
3589 	hxgep->resched_running = B_FALSE;
3590 	ddi_status = ddi_add_softintr(hxgep->dip, DDI_SOFTINT_LOW,
3591 	    &hxgep->resched_id, NULL, NULL, hxge_reschedule, (caddr_t)hxgep);
3592 	if (ddi_status != DDI_SUCCESS) {
3593 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_add_soft_intrs: "
3594 		    "ddi_add_softintrs failed: status 0x%08x", ddi_status));
3595 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3596 	}
3597 
3598 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_ddi_add_soft_intrs"));
3599 
3600 	return (status);
3601 }
3602 
3603 /*ARGSUSED*/
3604 static hxge_status_t
3605 hxge_add_intrs_adv(p_hxge_t hxgep)
3606 {
3607 	int		intr_type;
3608 	p_hxge_intr_t	intrp;
3609 	hxge_status_t	status;
3610 
3611 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv"));
3612 
3613 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3614 	intr_type = intrp->intr_type;
3615 
3616 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv: type 0x%x",
3617 	    intr_type));
3618 
3619 	switch (intr_type) {
3620 	case DDI_INTR_TYPE_MSI:		/* 0x2 */
3621 	case DDI_INTR_TYPE_MSIX:	/* 0x4 */
3622 		status = hxge_add_intrs_adv_type(hxgep, intr_type);
3623 		break;
3624 
3625 	case DDI_INTR_TYPE_FIXED:	/* 0x1 */
3626 		status = hxge_add_intrs_adv_type_fix(hxgep, intr_type);
3627 		break;
3628 
3629 	default:
3630 		status = HXGE_ERROR;
3631 		break;
3632 	}
3633 
3634 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv"));
3635 
3636 	return (status);
3637 }
3638 
3639 /*ARGSUSED*/
3640 static hxge_status_t
3641 hxge_add_intrs_adv_type(p_hxge_t hxgep, uint32_t int_type)
3642 {
3643 	dev_info_t	*dip = hxgep->dip;
3644 	p_hxge_ldg_t	ldgp;
3645 	p_hxge_intr_t	intrp;
3646 	uint_t		*inthandler;
3647 	void		*arg1, *arg2;
3648 	int		behavior;
3649 	int		nintrs, navail;
3650 	int		nactual, nrequired;
3651 	int		inum = 0;
3652 	int		loop = 0;
3653 	int		x, y;
3654 	int		ddi_status = DDI_SUCCESS;
3655 	hxge_status_t	status = HXGE_OK;
3656 
3657 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type"));
3658 
3659 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3660 
3661 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3662 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3663 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3664 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3665 		    "nintrs: %d", ddi_status, nintrs));
3666 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3667 	}
3668 
3669 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3670 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3671 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3672 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3673 		    "nintrs: %d", ddi_status, navail));
3674 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3675 	}
3676 
3677 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3678 	    "ddi_intr_get_navail() returned: intr type %d nintrs %d, navail %d",
3679 	    int_type, nintrs, navail));
3680 
3681 	if (int_type == DDI_INTR_TYPE_MSI && !ISP2(navail)) {
3682 		/* MSI must be power of 2 */
3683 		if ((navail & 16) == 16) {
3684 			navail = 16;
3685 		} else if ((navail & 8) == 8) {
3686 			navail = 8;
3687 		} else if ((navail & 4) == 4) {
3688 			navail = 4;
3689 		} else if ((navail & 2) == 2) {
3690 			navail = 2;
3691 		} else {
3692 			navail = 1;
3693 		}
3694 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3695 		    "ddi_intr_get_navail(): (msi power of 2) nintrs %d, "
3696 		    "navail %d", nintrs, navail));
3697 	}
3698 
3699 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3700 	    "requesting: intr type %d nintrs %d, navail %d",
3701 	    int_type, nintrs, navail));
3702 
3703 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3704 	    DDI_INTR_ALLOC_NORMAL);
3705 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3706 	intrp->htable = kmem_zalloc(intrp->intr_size, KM_SLEEP);
3707 
3708 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3709 	    navail, &nactual, behavior);
3710 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3711 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3712 		    " ddi_intr_alloc() failed: %d", ddi_status));
3713 		kmem_free(intrp->htable, intrp->intr_size);
3714 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3715 	}
3716 
3717 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3718 	    "ddi_intr_alloc() returned: navail %d nactual %d",
3719 	    navail, nactual));
3720 
3721 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3722 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3723 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3724 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3725 		/* Free already allocated interrupts */
3726 		for (y = 0; y < nactual; y++) {
3727 			(void) ddi_intr_free(intrp->htable[y]);
3728 		}
3729 
3730 		kmem_free(intrp->htable, intrp->intr_size);
3731 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3732 	}
3733 
3734 	nrequired = 0;
3735 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3736 	if (status != HXGE_OK) {
3737 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3738 		    "hxge_add_intrs_adv_typ:hxge_ldgv_init "
3739 		    "failed: 0x%x", status));
3740 		/* Free already allocated interrupts */
3741 		for (y = 0; y < nactual; y++) {
3742 			(void) ddi_intr_free(intrp->htable[y]);
3743 		}
3744 
3745 		kmem_free(intrp->htable, intrp->intr_size);
3746 		return (status);
3747 	}
3748 
3749 	ldgp = hxgep->ldgvp->ldgp;
3750 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3751 	    "After hxge_ldgv_init(): nreq %d nactual %d", nrequired, nactual));
3752 
3753 	if (nactual < nrequired)
3754 		loop = nactual;
3755 	else
3756 		loop = nrequired;
3757 
3758 	for (x = 0; x < loop; x++, ldgp++) {
3759 		ldgp->vector = (uint8_t)x;
3760 		arg1 = ldgp->ldvp;
3761 		arg2 = hxgep;
3762 		if (ldgp->nldvs == 1) {
3763 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3764 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3765 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3766 			    "1-1 int handler (entry %d)\n",
3767 			    arg1, arg2, x));
3768 		} else if (ldgp->nldvs > 1) {
3769 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3770 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3771 			    "hxge_add_intrs_adv_type: arg1 0x%x arg2 0x%x: "
3772 			    "nldevs %d int handler (entry %d)\n",
3773 			    arg1, arg2, ldgp->nldvs, x));
3774 		}
3775 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3776 		    "==> hxge_add_intrs_adv_type: ddi_add_intr(inum) #%d "
3777 		    "htable 0x%llx", x, intrp->htable[x]));
3778 
3779 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3780 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3781 		    DDI_SUCCESS) {
3782 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3783 			    "==> hxge_add_intrs_adv_type: failed #%d "
3784 			    "status 0x%x", x, ddi_status));
3785 			for (y = 0; y < intrp->intr_added; y++) {
3786 				(void) ddi_intr_remove_handler(
3787 				    intrp->htable[y]);
3788 			}
3789 
3790 			/* Free already allocated intr */
3791 			for (y = 0; y < nactual; y++) {
3792 				(void) ddi_intr_free(intrp->htable[y]);
3793 			}
3794 			kmem_free(intrp->htable, intrp->intr_size);
3795 
3796 			(void) hxge_ldgv_uninit(hxgep);
3797 
3798 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3799 		}
3800 
3801 		intrp->intr_added++;
3802 	}
3803 	intrp->msi_intx_cnt = nactual;
3804 
3805 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3806 	    "Requested: %d, Allowed: %d msi_intx_cnt %d intr_added %d",
3807 	    navail, nactual, intrp->msi_intx_cnt, intrp->intr_added));
3808 
3809 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3810 	(void) hxge_intr_ldgv_init(hxgep);
3811 
3812 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type"));
3813 
3814 	return (status);
3815 }
3816 
3817 /*ARGSUSED*/
3818 static hxge_status_t
3819 hxge_add_intrs_adv_type_fix(p_hxge_t hxgep, uint32_t int_type)
3820 {
3821 	dev_info_t	*dip = hxgep->dip;
3822 	p_hxge_ldg_t	ldgp;
3823 	p_hxge_intr_t	intrp;
3824 	uint_t		*inthandler;
3825 	void		*arg1, *arg2;
3826 	int		behavior;
3827 	int		nintrs, navail;
3828 	int		nactual, nrequired;
3829 	int		inum = 0;
3830 	int		x, y;
3831 	int		ddi_status = DDI_SUCCESS;
3832 	hxge_status_t	status = HXGE_OK;
3833 
3834 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_add_intrs_adv_type_fix"));
3835 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3836 
3837 	ddi_status = ddi_intr_get_nintrs(dip, int_type, &nintrs);
3838 	if ((ddi_status != DDI_SUCCESS) || (nintrs == 0)) {
3839 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3840 		    "ddi_intr_get_nintrs() failed, status: 0x%x%, "
3841 		    "nintrs: %d", status, nintrs));
3842 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3843 	}
3844 
3845 	ddi_status = ddi_intr_get_navail(dip, int_type, &navail);
3846 	if ((ddi_status != DDI_SUCCESS) || (navail == 0)) {
3847 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3848 		    "ddi_intr_get_navail() failed, status: 0x%x%, "
3849 		    "nintrs: %d", ddi_status, navail));
3850 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3851 	}
3852 
3853 	HXGE_DEBUG_MSG((hxgep, INT_CTL,
3854 	    "ddi_intr_get_navail() returned: nintrs %d, naavail %d",
3855 	    nintrs, navail));
3856 
3857 	behavior = ((int_type == DDI_INTR_TYPE_FIXED) ? DDI_INTR_ALLOC_STRICT :
3858 	    DDI_INTR_ALLOC_NORMAL);
3859 	intrp->intr_size = navail * sizeof (ddi_intr_handle_t);
3860 	intrp->htable = kmem_alloc(intrp->intr_size, KM_SLEEP);
3861 	ddi_status = ddi_intr_alloc(dip, intrp->htable, int_type, inum,
3862 	    navail, &nactual, behavior);
3863 	if (ddi_status != DDI_SUCCESS || nactual == 0) {
3864 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3865 		    " ddi_intr_alloc() failed: %d", ddi_status));
3866 		kmem_free(intrp->htable, intrp->intr_size);
3867 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3868 	}
3869 
3870 	if ((ddi_status = ddi_intr_get_pri(intrp->htable[0],
3871 	    (uint_t *)&intrp->pri)) != DDI_SUCCESS) {
3872 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3873 		    " ddi_intr_get_pri() failed: %d", ddi_status));
3874 		/* Free already allocated interrupts */
3875 		for (y = 0; y < nactual; y++) {
3876 			(void) ddi_intr_free(intrp->htable[y]);
3877 		}
3878 
3879 		kmem_free(intrp->htable, intrp->intr_size);
3880 		return (HXGE_ERROR | HXGE_DDI_FAILED);
3881 	}
3882 
3883 	nrequired = 0;
3884 	status = hxge_ldgv_init(hxgep, &nactual, &nrequired);
3885 	if (status != HXGE_OK) {
3886 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3887 		    "hxge_add_intrs_adv_type_fix:hxge_ldgv_init "
3888 		    "failed: 0x%x", status));
3889 		/* Free already allocated interrupts */
3890 		for (y = 0; y < nactual; y++) {
3891 			(void) ddi_intr_free(intrp->htable[y]);
3892 		}
3893 
3894 		kmem_free(intrp->htable, intrp->intr_size);
3895 		return (status);
3896 	}
3897 
3898 	ldgp = hxgep->ldgvp->ldgp;
3899 	for (x = 0; x < nrequired; x++, ldgp++) {
3900 		ldgp->vector = (uint8_t)x;
3901 		arg1 = ldgp->ldvp;
3902 		arg2 = hxgep;
3903 		if (ldgp->nldvs == 1) {
3904 			inthandler = (uint_t *)ldgp->ldvp->ldv_intr_handler;
3905 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3906 			    "hxge_add_intrs_adv_type_fix: "
3907 			    "1-1 int handler(%d) ldg %d ldv %d "
3908 			    "arg1 $%p arg2 $%p\n",
3909 			    x, ldgp->ldg, ldgp->ldvp->ldv, arg1, arg2));
3910 		} else if (ldgp->nldvs > 1) {
3911 			inthandler = (uint_t *)ldgp->sys_intr_handler;
3912 			HXGE_DEBUG_MSG((hxgep, INT_CTL,
3913 			    "hxge_add_intrs_adv_type_fix: "
3914 			    "shared ldv %d int handler(%d) ldv %d ldg %d"
3915 			    "arg1 0x%016llx arg2 0x%016llx\n",
3916 			    x, ldgp->nldvs, ldgp->ldg, ldgp->ldvp->ldv,
3917 			    arg1, arg2));
3918 		}
3919 
3920 		if ((ddi_status = ddi_intr_add_handler(intrp->htable[x],
3921 		    (ddi_intr_handler_t *)inthandler, arg1, arg2)) !=
3922 		    DDI_SUCCESS) {
3923 			HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL,
3924 			    "==> hxge_add_intrs_adv_type_fix: failed #%d "
3925 			    "status 0x%x", x, ddi_status));
3926 			for (y = 0; y < intrp->intr_added; y++) {
3927 				(void) ddi_intr_remove_handler(
3928 				    intrp->htable[y]);
3929 			}
3930 			for (y = 0; y < nactual; y++) {
3931 				(void) ddi_intr_free(intrp->htable[y]);
3932 			}
3933 			/* Free already allocated intr */
3934 			kmem_free(intrp->htable, intrp->intr_size);
3935 
3936 			(void) hxge_ldgv_uninit(hxgep);
3937 
3938 			return (HXGE_ERROR | HXGE_DDI_FAILED);
3939 		}
3940 		intrp->intr_added++;
3941 	}
3942 
3943 	intrp->msi_intx_cnt = nactual;
3944 
3945 	(void) ddi_intr_get_cap(intrp->htable[0], &intrp->intr_cap);
3946 
3947 	status = hxge_intr_ldgv_init(hxgep);
3948 
3949 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_add_intrs_adv_type_fix"));
3950 
3951 	return (status);
3952 }
3953 
3954 /*ARGSUSED*/
3955 static void
3956 hxge_remove_intrs(p_hxge_t hxgep)
3957 {
3958 	int		i, inum;
3959 	p_hxge_intr_t	intrp;
3960 
3961 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs"));
3962 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
3963 	if (!intrp->intr_registered) {
3964 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
3965 		    "<== hxge_remove_intrs: interrupts not registered"));
3966 		return;
3967 	}
3968 
3969 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_intrs:advanced"));
3970 
3971 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
3972 		(void) ddi_intr_block_disable(intrp->htable,
3973 		    intrp->intr_added);
3974 	} else {
3975 		for (i = 0; i < intrp->intr_added; i++) {
3976 			(void) ddi_intr_disable(intrp->htable[i]);
3977 		}
3978 	}
3979 
3980 	for (inum = 0; inum < intrp->intr_added; inum++) {
3981 		if (intrp->htable[inum]) {
3982 			(void) ddi_intr_remove_handler(intrp->htable[inum]);
3983 		}
3984 	}
3985 
3986 	for (inum = 0; inum < intrp->msi_intx_cnt; inum++) {
3987 		if (intrp->htable[inum]) {
3988 			HXGE_DEBUG_MSG((hxgep, DDI_CTL,
3989 			    "hxge_remove_intrs: ddi_intr_free inum %d "
3990 			    "msi_intx_cnt %d intr_added %d",
3991 			    inum, intrp->msi_intx_cnt, intrp->intr_added));
3992 
3993 			(void) ddi_intr_free(intrp->htable[inum]);
3994 		}
3995 	}
3996 
3997 	kmem_free(intrp->htable, intrp->intr_size);
3998 	intrp->intr_registered = B_FALSE;
3999 	intrp->intr_enabled = B_FALSE;
4000 	intrp->msi_intx_cnt = 0;
4001 	intrp->intr_added = 0;
4002 
4003 	(void) hxge_ldgv_uninit(hxgep);
4004 
4005 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_intrs"));
4006 }
4007 
4008 /*ARGSUSED*/
4009 static void
4010 hxge_remove_soft_intrs(p_hxge_t hxgep)
4011 {
4012 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_remove_soft_intrs"));
4013 
4014 	if (hxgep->resched_id) {
4015 		ddi_remove_softintr(hxgep->resched_id);
4016 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4017 		    "==> hxge_remove_soft_intrs: removed"));
4018 		hxgep->resched_id = NULL;
4019 	}
4020 
4021 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_remove_soft_intrs"));
4022 }
4023 
4024 /*ARGSUSED*/
4025 void
4026 hxge_intrs_enable(p_hxge_t hxgep)
4027 {
4028 	p_hxge_intr_t	intrp;
4029 	int		i;
4030 	int		status;
4031 
4032 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable"));
4033 
4034 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4035 
4036 	if (!intrp->intr_registered) {
4037 		HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, "<== hxge_intrs_enable: "
4038 		    "interrupts are not registered"));
4039 		return;
4040 	}
4041 
4042 	if (intrp->intr_enabled) {
4043 		HXGE_DEBUG_MSG((hxgep, INT_CTL,
4044 		    "<== hxge_intrs_enable: already enabled"));
4045 		return;
4046 	}
4047 
4048 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4049 		status = ddi_intr_block_enable(intrp->htable,
4050 		    intrp->intr_added);
4051 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4052 		    "block enable - status 0x%x total inums #%d\n",
4053 		    status, intrp->intr_added));
4054 	} else {
4055 		for (i = 0; i < intrp->intr_added; i++) {
4056 			status = ddi_intr_enable(intrp->htable[i]);
4057 			HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_enable "
4058 			    "ddi_intr_enable:enable - status 0x%x "
4059 			    "total inums %d enable inum #%d\n",
4060 			    status, intrp->intr_added, i));
4061 			if (status == DDI_SUCCESS) {
4062 				intrp->intr_enabled = B_TRUE;
4063 			}
4064 		}
4065 	}
4066 
4067 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_enable"));
4068 }
4069 
4070 /*ARGSUSED*/
4071 static void
4072 hxge_intrs_disable(p_hxge_t hxgep)
4073 {
4074 	p_hxge_intr_t	intrp;
4075 	int		i;
4076 
4077 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intrs_disable"));
4078 
4079 	intrp = (p_hxge_intr_t)&hxgep->hxge_intr_type;
4080 
4081 	if (!intrp->intr_registered) {
4082 		HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable: "
4083 		    "interrupts are not registered"));
4084 		return;
4085 	}
4086 
4087 	if (intrp->intr_cap & DDI_INTR_FLAG_BLOCK) {
4088 		(void) ddi_intr_block_disable(intrp->htable,
4089 		    intrp->intr_added);
4090 	} else {
4091 		for (i = 0; i < intrp->intr_added; i++) {
4092 			(void) ddi_intr_disable(intrp->htable[i]);
4093 		}
4094 	}
4095 
4096 	intrp->intr_enabled = B_FALSE;
4097 	HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intrs_disable"));
4098 }
4099 
4100 static hxge_status_t
4101 hxge_mac_register(p_hxge_t hxgep)
4102 {
4103 	mac_register_t	*macp;
4104 	int		status;
4105 
4106 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_mac_register"));
4107 
4108 	if ((macp = mac_alloc(MAC_VERSION)) == NULL)
4109 		return (HXGE_ERROR);
4110 
4111 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4112 	macp->m_driver = hxgep;
4113 	macp->m_dip = hxgep->dip;
4114 	macp->m_src_addr = hxgep->ouraddr.ether_addr_octet;
4115 
4116 	HXGE_DEBUG_MSG((hxgep, DDI_CTL,
4117 	    "hxge_mac_register: ether addr is %x:%x:%x:%x:%x:%x",
4118 	    macp->m_src_addr[0],
4119 	    macp->m_src_addr[1],
4120 	    macp->m_src_addr[2],
4121 	    macp->m_src_addr[3],
4122 	    macp->m_src_addr[4],
4123 	    macp->m_src_addr[5]));
4124 
4125 	macp->m_callbacks = &hxge_m_callbacks;
4126 	macp->m_min_sdu = 0;
4127 	macp->m_max_sdu = hxgep->vmac.maxframesize - MTU_TO_FRAME_SIZE;
4128 	macp->m_margin = VLAN_TAGSZ;
4129 	macp->m_priv_props = hxge_priv_props;
4130 	macp->m_priv_prop_count = HXGE_MAX_PRIV_PROPS;
4131 
4132 	status = mac_register(macp, &hxgep->mach);
4133 	mac_free(macp);
4134 
4135 	if (status != 0) {
4136 		cmn_err(CE_WARN,
4137 		    "hxge_mac_register failed (status %d instance %d)",
4138 		    status, hxgep->instance);
4139 		return (HXGE_ERROR);
4140 	}
4141 
4142 	HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_mac_register success "
4143 	    "(instance %d)", hxgep->instance));
4144 
4145 	return (HXGE_OK);
4146 }
4147 
4148 static int
4149 hxge_init_common_dev(p_hxge_t hxgep)
4150 {
4151 	p_hxge_hw_list_t	hw_p;
4152 	dev_info_t		*p_dip;
4153 
4154 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_init_common_dev"));
4155 
4156 	p_dip = hxgep->p_dip;
4157 	MUTEX_ENTER(&hxge_common_lock);
4158 
4159 	/*
4160 	 * Loop through existing per Hydra hardware list.
4161 	 */
4162 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4163 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4164 		    "==> hxge_init_common_dev: hw_p $%p parent dip $%p",
4165 		    hw_p, p_dip));
4166 		if (hw_p->parent_devp == p_dip) {
4167 			hxgep->hxge_hw_p = hw_p;
4168 			hw_p->ndevs++;
4169 			hw_p->hxge_p = hxgep;
4170 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4171 			    "==> hxge_init_common_device: "
4172 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4173 			    hw_p, p_dip, hw_p->ndevs));
4174 			break;
4175 		}
4176 	}
4177 
4178 	if (hw_p == NULL) {
4179 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4180 		    "==> hxge_init_common_dev: parent dip $%p (new)", p_dip));
4181 		hw_p = kmem_zalloc(sizeof (hxge_hw_list_t), KM_SLEEP);
4182 		hw_p->parent_devp = p_dip;
4183 		hw_p->magic = HXGE_MAGIC;
4184 		hxgep->hxge_hw_p = hw_p;
4185 		hw_p->ndevs++;
4186 		hw_p->hxge_p = hxgep;
4187 		hw_p->next = hxge_hw_list;
4188 
4189 		MUTEX_INIT(&hw_p->hxge_cfg_lock, NULL, MUTEX_DRIVER, NULL);
4190 		MUTEX_INIT(&hw_p->hxge_tcam_lock, NULL, MUTEX_DRIVER, NULL);
4191 		MUTEX_INIT(&hw_p->hxge_vlan_lock, NULL, MUTEX_DRIVER, NULL);
4192 
4193 		hxge_hw_list = hw_p;
4194 	}
4195 	MUTEX_EXIT(&hxge_common_lock);
4196 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4197 	    "==> hxge_init_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4198 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<== hxge_init_common_dev"));
4199 
4200 	return (HXGE_OK);
4201 }
4202 
4203 static void
4204 hxge_uninit_common_dev(p_hxge_t hxgep)
4205 {
4206 	p_hxge_hw_list_t	hw_p, h_hw_p;
4207 	dev_info_t		*p_dip;
4208 
4209 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "==> hxge_uninit_common_dev"));
4210 	if (hxgep->hxge_hw_p == NULL) {
4211 		HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4212 		    "<== hxge_uninit_common_dev (no common)"));
4213 		return;
4214 	}
4215 
4216 	MUTEX_ENTER(&hxge_common_lock);
4217 	h_hw_p = hxge_hw_list;
4218 	for (hw_p = hxge_hw_list; hw_p; hw_p = hw_p->next) {
4219 		p_dip = hw_p->parent_devp;
4220 		if (hxgep->hxge_hw_p == hw_p && p_dip == hxgep->p_dip &&
4221 		    hxgep->hxge_hw_p->magic == HXGE_MAGIC &&
4222 		    hw_p->magic == HXGE_MAGIC) {
4223 			HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4224 			    "==> hxge_uninit_common_dev: "
4225 			    "hw_p $%p parent dip $%p ndevs %d (found)",
4226 			    hw_p, p_dip, hw_p->ndevs));
4227 
4228 			hxgep->hxge_hw_p = NULL;
4229 			if (hw_p->ndevs) {
4230 				hw_p->ndevs--;
4231 			}
4232 			hw_p->hxge_p = NULL;
4233 			if (!hw_p->ndevs) {
4234 				MUTEX_DESTROY(&hw_p->hxge_vlan_lock);
4235 				MUTEX_DESTROY(&hw_p->hxge_tcam_lock);
4236 				MUTEX_DESTROY(&hw_p->hxge_cfg_lock);
4237 				HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4238 				    "==> hxge_uninit_common_dev: "
4239 				    "hw_p $%p parent dip $%p ndevs %d (last)",
4240 				    hw_p, p_dip, hw_p->ndevs));
4241 
4242 				if (hw_p == hxge_hw_list) {
4243 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4244 					    "==> hxge_uninit_common_dev:"
4245 					    "remove head "
4246 					    "hw_p $%p parent dip $%p "
4247 					    "ndevs %d (head)",
4248 					    hw_p, p_dip, hw_p->ndevs));
4249 					hxge_hw_list = hw_p->next;
4250 				} else {
4251 					HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4252 					    "==> hxge_uninit_common_dev:"
4253 					    "remove middle "
4254 					    "hw_p $%p parent dip $%p "
4255 					    "ndevs %d (middle)",
4256 					    hw_p, p_dip, hw_p->ndevs));
4257 					h_hw_p->next = hw_p->next;
4258 				}
4259 
4260 				KMEM_FREE(hw_p, sizeof (hxge_hw_list_t));
4261 			}
4262 			break;
4263 		} else {
4264 			h_hw_p = hw_p;
4265 		}
4266 	}
4267 
4268 	MUTEX_EXIT(&hxge_common_lock);
4269 	HXGE_DEBUG_MSG((hxgep, MOD_CTL,
4270 	    "==> hxge_uninit_common_dev (hxge_hw_list) $%p", hxge_hw_list));
4271 
4272 	HXGE_DEBUG_MSG((hxgep, MOD_CTL, "<= hxge_uninit_common_dev"));
4273 }
4274 
4275 static void
4276 hxge_link_poll(void *arg)
4277 {
4278 	p_hxge_t		hxgep = (p_hxge_t)arg;
4279 	hpi_handle_t		handle;
4280 	p_hxge_stats_t		statsp;
4281 	cip_link_stat_t		link_stat;
4282 	hxge_timeout		*to = &hxgep->timeout;
4283 
4284 	handle = HXGE_DEV_HPI_HANDLE(hxgep);
4285 	statsp = (p_hxge_stats_t)hxgep->statsp;
4286 	HXGE_REG_RD32(handle, CIP_LINK_STAT, &link_stat.value);
4287 
4288 	if (to->link_status != link_stat.bits.xpcs0_link_up) {
4289 		to->link_status = link_stat.bits.xpcs0_link_up;
4290 
4291 		if (link_stat.bits.xpcs0_link_up) {
4292 			mac_link_update(hxgep->mach, LINK_STATE_UP);
4293 			statsp->mac_stats.link_speed = 10000;
4294 			statsp->mac_stats.link_duplex = 2;
4295 			statsp->mac_stats.link_up = 1;
4296 		} else {
4297 			mac_link_update(hxgep->mach, LINK_STATE_DOWN);
4298 			statsp->mac_stats.link_speed = 0;
4299 			statsp->mac_stats.link_duplex = 0;
4300 			statsp->mac_stats.link_up = 0;
4301 		}
4302 	}
4303 
4304 	/* Restart the link status timer to check the link status */
4305 	MUTEX_ENTER(&to->lock);
4306 	to->id = timeout(hxge_link_poll, arg, to->ticks);
4307 	MUTEX_EXIT(&to->lock);
4308 }
4309