xref: /illumos-gate/usr/src/uts/common/io/igb/igb_gld.c (revision 85f496fabdffd32673f6be280a3caa103f7d58a5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29  * Copyright 2014 Pluribus Networks Inc.
30  * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
31  * Copyright (c) 2017, Joyent, Inc.
32  */
33 
34 #include "igb_sw.h"
35 
36 int
37 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
38 {
39 	igb_t *igb = (igb_t *)arg;
40 	struct e1000_hw *hw = &igb->hw;
41 	igb_stat_t *igb_ks;
42 	uint32_t low_val, high_val;
43 
44 	igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
45 
46 	mutex_enter(&igb->gen_lock);
47 
48 	if (igb->igb_state & IGB_SUSPENDED) {
49 		mutex_exit(&igb->gen_lock);
50 		return (ECANCELED);
51 	}
52 
53 	switch (stat) {
54 	case MAC_STAT_IFSPEED:
55 		*val = igb->link_speed * 1000000ull;
56 		break;
57 
58 	case MAC_STAT_MULTIRCV:
59 		igb->stat_mprc += E1000_READ_REG(hw, E1000_MPRC);
60 		*val = igb->stat_mprc;
61 		break;
62 
63 	case MAC_STAT_BRDCSTRCV:
64 		igb->stat_bprc += E1000_READ_REG(hw, E1000_BPRC);
65 		*val = igb->stat_bprc;
66 		break;
67 
68 	case MAC_STAT_MULTIXMT:
69 		igb->stat_mptc += E1000_READ_REG(hw, E1000_MPTC);
70 		*val = igb->stat_mptc;
71 		break;
72 
73 	case MAC_STAT_BRDCSTXMT:
74 		igb->stat_bptc += E1000_READ_REG(hw, E1000_BPTC);
75 		*val = igb->stat_bptc;
76 		break;
77 
78 	case MAC_STAT_NORCVBUF:
79 		igb->stat_rnbc += E1000_READ_REG(hw, E1000_RNBC);
80 		*val = igb->stat_rnbc;
81 		break;
82 
83 	case MAC_STAT_IERRORS:
84 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
85 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
86 		igb_ks->rlec.value.ui64 +=
87 		    E1000_READ_REG(hw, E1000_RLEC);
88 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
89 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
90 		*val = igb->stat_rxerrc +
91 		    igb->stat_algnerrc +
92 		    igb_ks->rlec.value.ui64 +
93 		    igb->stat_crcerrs +
94 		    igb->stat_cexterr;
95 		break;
96 
97 	case MAC_STAT_NOXMTBUF:
98 		*val = 0;
99 		break;
100 
101 	case MAC_STAT_OERRORS:
102 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
103 		*val = igb->stat_ecol;
104 		break;
105 
106 	case MAC_STAT_COLLISIONS:
107 		igb->stat_colc += E1000_READ_REG(hw, E1000_COLC);
108 		*val = igb->stat_colc;
109 		break;
110 
111 	case MAC_STAT_RBYTES:
112 		/*
113 		 * The 64-bit register will reset whenever the upper
114 		 * 32 bits are read. So we need to read the lower
115 		 * 32 bits first, then read the upper 32 bits.
116 		 */
117 		low_val = E1000_READ_REG(hw, E1000_TORL);
118 		high_val = E1000_READ_REG(hw, E1000_TORH);
119 		igb->stat_tor += (uint64_t)high_val << 32 | (uint64_t)low_val;
120 		*val = igb->stat_tor;
121 		break;
122 
123 	case MAC_STAT_IPACKETS:
124 		igb->stat_tpr += E1000_READ_REG(hw, E1000_TPR);
125 		*val = igb->stat_tpr;
126 		break;
127 
128 	case MAC_STAT_OBYTES:
129 		/*
130 		 * The 64-bit register will reset whenever the upper
131 		 * 32 bits are read. So we need to read the lower
132 		 * 32 bits first, then read the upper 32 bits.
133 		 */
134 		low_val = E1000_READ_REG(hw, E1000_TOTL);
135 		high_val = E1000_READ_REG(hw, E1000_TOTH);
136 		igb->stat_tot += (uint64_t)high_val << 32 | (uint64_t)low_val;
137 		*val = igb->stat_tot;
138 		break;
139 
140 	case MAC_STAT_OPACKETS:
141 		igb->stat_tpt += E1000_READ_REG(hw, E1000_TPT);
142 		*val = igb->stat_tpt;
143 		break;
144 
145 	/* RFC 1643 stats */
146 	case ETHER_STAT_ALIGN_ERRORS:
147 		igb->stat_algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
148 		*val = igb->stat_algnerrc;
149 		break;
150 
151 	case ETHER_STAT_FCS_ERRORS:
152 		igb->stat_crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
153 		*val = igb->stat_crcerrs;
154 		break;
155 
156 	case ETHER_STAT_FIRST_COLLISIONS:
157 		igb->stat_scc += E1000_READ_REG(hw, E1000_SCC);
158 		*val = igb->stat_scc;
159 		break;
160 
161 	case ETHER_STAT_MULTI_COLLISIONS:
162 		igb->stat_mcc += E1000_READ_REG(hw, E1000_MCC);
163 		*val = igb->stat_mcc;
164 		break;
165 
166 	case ETHER_STAT_SQE_ERRORS:
167 		igb->stat_sec += E1000_READ_REG(hw, E1000_SEC);
168 		*val = igb->stat_sec;
169 		break;
170 
171 	case ETHER_STAT_DEFER_XMTS:
172 		igb->stat_dc += E1000_READ_REG(hw, E1000_DC);
173 		*val = igb->stat_dc;
174 		break;
175 
176 	case ETHER_STAT_TX_LATE_COLLISIONS:
177 		igb->stat_latecol += E1000_READ_REG(hw, E1000_LATECOL);
178 		*val = igb->stat_latecol;
179 		break;
180 
181 	case ETHER_STAT_EX_COLLISIONS:
182 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
183 		*val = igb->stat_ecol;
184 		break;
185 
186 	case ETHER_STAT_MACXMT_ERRORS:
187 		igb->stat_ecol += E1000_READ_REG(hw, E1000_ECOL);
188 		*val = igb->stat_ecol;
189 		break;
190 
191 	case ETHER_STAT_CARRIER_ERRORS:
192 		igb->stat_cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
193 		*val = igb->stat_cexterr;
194 		break;
195 
196 	case ETHER_STAT_TOOLONG_ERRORS:
197 		igb->stat_roc += E1000_READ_REG(hw, E1000_ROC);
198 		*val = igb->stat_roc;
199 		break;
200 
201 	case ETHER_STAT_MACRCV_ERRORS:
202 		igb->stat_rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
203 		*val = igb->stat_rxerrc;
204 		break;
205 
206 	/* MII/GMII stats */
207 	case ETHER_STAT_XCVR_ADDR:
208 		/* The Internal PHY's MDI address for each MAC is 1 */
209 		*val = 1;
210 		break;
211 
212 	case ETHER_STAT_XCVR_ID:
213 		*val = hw->phy.id | hw->phy.revision;
214 		break;
215 
216 	case ETHER_STAT_XCVR_INUSE:
217 		switch (igb->link_speed) {
218 		case SPEED_1000:
219 			*val =
220 			    (hw->phy.media_type == e1000_media_type_copper) ?
221 			    XCVR_1000T : XCVR_1000X;
222 			break;
223 		case SPEED_100:
224 			*val =
225 			    (hw->phy.media_type == e1000_media_type_copper) ?
226 			    (igb->param_100t4_cap == 1) ?
227 			    XCVR_100T4 : XCVR_100T2 : XCVR_100X;
228 			break;
229 		case SPEED_10:
230 			*val = XCVR_10;
231 			break;
232 		default:
233 			*val = XCVR_NONE;
234 			break;
235 		}
236 		break;
237 
238 	case ETHER_STAT_CAP_1000FDX:
239 		*val = igb->param_1000fdx_cap;
240 		break;
241 
242 	case ETHER_STAT_CAP_1000HDX:
243 		*val = igb->param_1000hdx_cap;
244 		break;
245 
246 	case ETHER_STAT_CAP_100FDX:
247 		*val = igb->param_100fdx_cap;
248 		break;
249 
250 	case ETHER_STAT_CAP_100HDX:
251 		*val = igb->param_100hdx_cap;
252 		break;
253 
254 	case ETHER_STAT_CAP_10FDX:
255 		*val = igb->param_10fdx_cap;
256 		break;
257 
258 	case ETHER_STAT_CAP_10HDX:
259 		*val = igb->param_10hdx_cap;
260 		break;
261 
262 	case ETHER_STAT_CAP_ASMPAUSE:
263 		*val = igb->param_asym_pause_cap;
264 		break;
265 
266 	case ETHER_STAT_CAP_PAUSE:
267 		*val = igb->param_pause_cap;
268 		break;
269 
270 	case ETHER_STAT_CAP_AUTONEG:
271 		*val = igb->param_autoneg_cap;
272 		break;
273 
274 	case ETHER_STAT_ADV_CAP_1000FDX:
275 		*val = igb->param_adv_1000fdx_cap;
276 		break;
277 
278 	case ETHER_STAT_ADV_CAP_1000HDX:
279 		*val = igb->param_adv_1000hdx_cap;
280 		break;
281 
282 	case ETHER_STAT_ADV_CAP_100FDX:
283 		*val = igb->param_adv_100fdx_cap;
284 		break;
285 
286 	case ETHER_STAT_ADV_CAP_100HDX:
287 		*val = igb->param_adv_100hdx_cap;
288 		break;
289 
290 	case ETHER_STAT_ADV_CAP_10FDX:
291 		*val = igb->param_adv_10fdx_cap;
292 		break;
293 
294 	case ETHER_STAT_ADV_CAP_10HDX:
295 		*val = igb->param_adv_10hdx_cap;
296 		break;
297 
298 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
299 		*val = igb->param_adv_asym_pause_cap;
300 		break;
301 
302 	case ETHER_STAT_ADV_CAP_PAUSE:
303 		*val = igb->param_adv_pause_cap;
304 		break;
305 
306 	case ETHER_STAT_ADV_CAP_AUTONEG:
307 		*val = hw->mac.autoneg;
308 		break;
309 
310 	case ETHER_STAT_LP_CAP_1000FDX:
311 		*val = igb->param_lp_1000fdx_cap;
312 		break;
313 
314 	case ETHER_STAT_LP_CAP_1000HDX:
315 		*val = igb->param_lp_1000hdx_cap;
316 		break;
317 
318 	case ETHER_STAT_LP_CAP_100FDX:
319 		*val = igb->param_lp_100fdx_cap;
320 		break;
321 
322 	case ETHER_STAT_LP_CAP_100HDX:
323 		*val = igb->param_lp_100hdx_cap;
324 		break;
325 
326 	case ETHER_STAT_LP_CAP_10FDX:
327 		*val = igb->param_lp_10fdx_cap;
328 		break;
329 
330 	case ETHER_STAT_LP_CAP_10HDX:
331 		*val = igb->param_lp_10hdx_cap;
332 		break;
333 
334 	case ETHER_STAT_LP_CAP_ASMPAUSE:
335 		*val = igb->param_lp_asym_pause_cap;
336 		break;
337 
338 	case ETHER_STAT_LP_CAP_PAUSE:
339 		*val = igb->param_lp_pause_cap;
340 		break;
341 
342 	case ETHER_STAT_LP_CAP_AUTONEG:
343 		*val = igb->param_lp_autoneg_cap;
344 		break;
345 
346 	case ETHER_STAT_LINK_ASMPAUSE:
347 		*val = igb->param_asym_pause_cap;
348 		break;
349 
350 	case ETHER_STAT_LINK_PAUSE:
351 		*val = igb->param_pause_cap;
352 		break;
353 
354 	case ETHER_STAT_LINK_AUTONEG:
355 		*val = hw->mac.autoneg;
356 		break;
357 
358 	case ETHER_STAT_LINK_DUPLEX:
359 		*val = (igb->link_duplex == FULL_DUPLEX) ?
360 		    LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
361 		break;
362 
363 	case ETHER_STAT_TOOSHORT_ERRORS:
364 		igb->stat_ruc += E1000_READ_REG(hw, E1000_RUC);
365 		*val = igb->stat_ruc;
366 		break;
367 
368 	case ETHER_STAT_CAP_REMFAULT:
369 		*val = igb->param_rem_fault;
370 		break;
371 
372 	case ETHER_STAT_ADV_REMFAULT:
373 		*val = igb->param_adv_rem_fault;
374 		break;
375 
376 	case ETHER_STAT_LP_REMFAULT:
377 		*val = igb->param_lp_rem_fault;
378 		break;
379 
380 	case ETHER_STAT_JABBER_ERRORS:
381 		igb->stat_rjc += E1000_READ_REG(hw, E1000_RJC);
382 		*val = igb->stat_rjc;
383 		break;
384 
385 	case ETHER_STAT_CAP_100T4:
386 		*val = igb->param_100t4_cap;
387 		break;
388 
389 	case ETHER_STAT_ADV_CAP_100T4:
390 		*val = igb->param_adv_100t4_cap;
391 		break;
392 
393 	case ETHER_STAT_LP_CAP_100T4:
394 		*val = igb->param_lp_100t4_cap;
395 		break;
396 
397 	default:
398 		mutex_exit(&igb->gen_lock);
399 		return (ENOTSUP);
400 	}
401 
402 	mutex_exit(&igb->gen_lock);
403 
404 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
405 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
406 		return (EIO);
407 	}
408 
409 	return (0);
410 }
411 
412 /*
413  * Bring the device out of the reset/quiesced state that it
414  * was in when the interface was registered.
415  */
416 int
417 igb_m_start(void *arg)
418 {
419 	igb_t *igb = (igb_t *)arg;
420 
421 	mutex_enter(&igb->gen_lock);
422 
423 	if (igb->igb_state & IGB_SUSPENDED) {
424 		mutex_exit(&igb->gen_lock);
425 		return (ECANCELED);
426 	}
427 
428 	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
429 		mutex_exit(&igb->gen_lock);
430 		return (EIO);
431 	}
432 
433 	atomic_or_32(&igb->igb_state, IGB_STARTED);
434 
435 	mutex_exit(&igb->gen_lock);
436 
437 	/*
438 	 * Enable and start the watchdog timer
439 	 */
440 	igb_enable_watchdog_timer(igb);
441 
442 	return (0);
443 }
444 
445 /*
446  * Stop the device and put it in a reset/quiesced state such
447  * that the interface can be unregistered.
448  */
449 void
450 igb_m_stop(void *arg)
451 {
452 	igb_t *igb = (igb_t *)arg;
453 
454 	mutex_enter(&igb->gen_lock);
455 
456 	if (igb->igb_state & IGB_SUSPENDED) {
457 		mutex_exit(&igb->gen_lock);
458 		return;
459 	}
460 
461 	atomic_and_32(&igb->igb_state, ~IGB_STARTED);
462 
463 	igb_stop(igb, B_TRUE);
464 
465 	mutex_exit(&igb->gen_lock);
466 
467 	/*
468 	 * Disable and stop the watchdog timer
469 	 */
470 	igb_disable_watchdog_timer(igb);
471 }
472 
473 /*
474  * Set the promiscuity of the device.
475  */
476 int
477 igb_m_promisc(void *arg, boolean_t on)
478 {
479 	igb_t *igb = (igb_t *)arg;
480 	uint32_t reg_val;
481 
482 	mutex_enter(&igb->gen_lock);
483 
484 	if (igb->igb_state & IGB_SUSPENDED) {
485 		mutex_exit(&igb->gen_lock);
486 		return (ECANCELED);
487 	}
488 
489 	reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
490 
491 	if (on)
492 		reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
493 	else
494 		reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
495 
496 	E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
497 
498 	mutex_exit(&igb->gen_lock);
499 
500 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
501 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
502 		return (EIO);
503 	}
504 
505 	return (0);
506 }
507 
508 /*
509  * Add/remove the addresses to/from the set of multicast
510  * addresses for which the device will receive packets.
511  */
512 int
513 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
514 {
515 	igb_t *igb = (igb_t *)arg;
516 	int result;
517 
518 	mutex_enter(&igb->gen_lock);
519 
520 	if (igb->igb_state & IGB_SUSPENDED) {
521 		mutex_exit(&igb->gen_lock);
522 		return (ECANCELED);
523 	}
524 
525 	result = (add) ? igb_multicst_add(igb, mcst_addr)
526 	    : igb_multicst_remove(igb, mcst_addr);
527 
528 	mutex_exit(&igb->gen_lock);
529 
530 	return (result);
531 }
532 
533 /*
534  * Pass on M_IOCTL messages passed to the DLD, and support
535  * private IOCTLs for debugging and ndd.
536  */
537 void
538 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
539 {
540 	igb_t *igb = (igb_t *)arg;
541 	struct iocblk *iocp;
542 	enum ioc_reply status;
543 
544 	iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
545 	iocp->ioc_error = 0;
546 
547 	mutex_enter(&igb->gen_lock);
548 	if (igb->igb_state & IGB_SUSPENDED) {
549 		mutex_exit(&igb->gen_lock);
550 		miocnak(q, mp, 0, EINVAL);
551 		return;
552 	}
553 	mutex_exit(&igb->gen_lock);
554 
555 	switch (iocp->ioc_cmd) {
556 	case LB_GET_INFO_SIZE:
557 	case LB_GET_INFO:
558 	case LB_GET_MODE:
559 	case LB_SET_MODE:
560 		status = igb_loopback_ioctl(igb, iocp, mp);
561 		break;
562 
563 	default:
564 		status = IOC_INVAL;
565 		break;
566 	}
567 
568 	/*
569 	 * Decide how to reply
570 	 */
571 	switch (status) {
572 	default:
573 	case IOC_INVAL:
574 		/*
575 		 * Error, reply with a NAK and EINVAL or the specified error
576 		 */
577 		miocnak(q, mp, 0, iocp->ioc_error == 0 ?
578 		    EINVAL : iocp->ioc_error);
579 		break;
580 
581 	case IOC_DONE:
582 		/*
583 		 * OK, reply already sent
584 		 */
585 		break;
586 
587 	case IOC_ACK:
588 		/*
589 		 * OK, reply with an ACK
590 		 */
591 		miocack(q, mp, 0, 0);
592 		break;
593 
594 	case IOC_REPLY:
595 		/*
596 		 * OK, send prepared reply as ACK or NAK
597 		 */
598 		mp->b_datap->db_type = iocp->ioc_error == 0 ?
599 		    M_IOCACK : M_IOCNAK;
600 		qreply(q, mp);
601 		break;
602 	}
603 }
604 
605 /*
606  * Add a MAC address to the target RX group.
607  */
608 static int
609 igb_addmac(void *arg, const uint8_t *mac_addr)
610 {
611 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
612 	igb_t *igb = rx_group->igb;
613 	struct e1000_hw *hw = &igb->hw;
614 	int i, slot;
615 
616 	mutex_enter(&igb->gen_lock);
617 
618 	if (igb->igb_state & IGB_SUSPENDED) {
619 		mutex_exit(&igb->gen_lock);
620 		return (ECANCELED);
621 	}
622 
623 	if (igb->unicst_avail == 0) {
624 		/* no slots available */
625 		mutex_exit(&igb->gen_lock);
626 		return (ENOSPC);
627 	}
628 
629 	/*
630 	 * The slots from 0 to igb->num_rx_groups are reserved slots which
631 	 * are 1 to 1 mapped with group index directly. The other slots are
632 	 * shared between the all of groups. While adding a MAC address,
633 	 * it will try to set the reserved slots first, then the shared slots.
634 	 */
635 	slot = -1;
636 	if (igb->unicst_addr[rx_group->index].mac.set == 1) {
637 		/*
638 		 * The reserved slot for current group is used, find the free
639 		 * slots in the shared slots.
640 		 */
641 		for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
642 			if (igb->unicst_addr[i].mac.set == 0) {
643 				slot = i;
644 				break;
645 			}
646 		}
647 	} else
648 		slot = rx_group->index;
649 
650 	if (slot == -1) {
651 		/* no slots available in the shared slots */
652 		mutex_exit(&igb->gen_lock);
653 		return (ENOSPC);
654 	}
655 
656 	/* Set VMDq according to the mode supported by hardware. */
657 	e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
658 
659 	bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
660 	igb->unicst_addr[slot].mac.group_index = rx_group->index;
661 	igb->unicst_addr[slot].mac.set = 1;
662 	igb->unicst_avail--;
663 
664 	mutex_exit(&igb->gen_lock);
665 
666 	return (0);
667 }
668 
669 /*
670  * Remove a MAC address from the specified RX group.
671  */
672 static int
673 igb_remmac(void *arg, const uint8_t *mac_addr)
674 {
675 	igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
676 	igb_t *igb = rx_group->igb;
677 	struct e1000_hw *hw = &igb->hw;
678 	int slot;
679 
680 	mutex_enter(&igb->gen_lock);
681 
682 	if (igb->igb_state & IGB_SUSPENDED) {
683 		mutex_exit(&igb->gen_lock);
684 		return (ECANCELED);
685 	}
686 
687 	slot = igb_unicst_find(igb, mac_addr);
688 	if (slot == -1) {
689 		mutex_exit(&igb->gen_lock);
690 		return (EINVAL);
691 	}
692 
693 	if (igb->unicst_addr[slot].mac.set == 0) {
694 		mutex_exit(&igb->gen_lock);
695 		return (EINVAL);
696 	}
697 
698 	/* Clear the MAC ddress in the slot */
699 	e1000_rar_clear(hw, slot);
700 	igb->unicst_addr[slot].mac.set = 0;
701 	igb->unicst_avail++;
702 
703 	mutex_exit(&igb->gen_lock);
704 
705 	return (0);
706 }
707 
708 /*
709  * Enable interrupt on the specificed rx ring.
710  */
711 int
712 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
713 {
714 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
715 	igb_t *igb = rx_ring->igb;
716 	struct e1000_hw *hw = &igb->hw;
717 	uint32_t index = rx_ring->index;
718 
719 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
720 		/* Interrupt enabling for MSI-X */
721 		igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
722 		E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
723 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
724 	} else {
725 		ASSERT(index == 0);
726 		/* Interrupt enabling for MSI and legacy */
727 		igb->ims_mask |= E1000_IMS_RXT0;
728 		E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
729 	}
730 
731 	E1000_WRITE_FLUSH(hw);
732 
733 	return (0);
734 }
735 
736 /*
737  * Disable interrupt on the specificed rx ring.
738  */
739 int
740 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
741 {
742 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
743 	igb_t *igb = rx_ring->igb;
744 	struct e1000_hw *hw = &igb->hw;
745 	uint32_t index = rx_ring->index;
746 
747 	if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
748 		/* Interrupt disabling for MSI-X */
749 		igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
750 		E1000_WRITE_REG(hw, E1000_EIMC,
751 		    (E1000_EICR_RX_QUEUE0 << index));
752 		E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
753 	} else {
754 		ASSERT(index == 0);
755 		/* Interrupt disabling for MSI and legacy */
756 		igb->ims_mask &= ~E1000_IMS_RXT0;
757 		E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
758 	}
759 
760 	E1000_WRITE_FLUSH(hw);
761 
762 	return (0);
763 }
764 
765 /*
766  * Get the global ring index by a ring index within a group.
767  */
768 int
769 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
770 {
771 	igb_rx_ring_t *rx_ring;
772 	int i;
773 
774 	for (i = 0; i < igb->num_rx_rings; i++) {
775 		rx_ring = &igb->rx_rings[i];
776 		if (rx_ring->group_index == gindex)
777 			rindex--;
778 		if (rindex < 0)
779 			return (i);
780 	}
781 
782 	return (-1);
783 }
784 
785 static int
786 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
787 {
788 	igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
789 
790 	mutex_enter(&rx_ring->rx_lock);
791 	rx_ring->ring_gen_num = mr_gen_num;
792 	mutex_exit(&rx_ring->rx_lock);
793 	return (0);
794 }
795 
796 /*
797  * Callback funtion for MAC layer to register all rings.
798  */
799 /* ARGSUSED */
800 void
801 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
802     const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
803 {
804 	igb_t *igb = (igb_t *)arg;
805 	mac_intr_t *mintr = &infop->mri_intr;
806 
807 	switch (rtype) {
808 	case MAC_RING_TYPE_RX: {
809 		igb_rx_ring_t *rx_ring;
810 		int global_index;
811 
812 		/*
813 		 * 'index' is the ring index within the group.
814 		 * We need the global ring index by searching in group.
815 		 */
816 		global_index = igb_get_rx_ring_index(igb, rg_index, index);
817 
818 		ASSERT(global_index >= 0);
819 
820 		rx_ring = &igb->rx_rings[global_index];
821 		rx_ring->ring_handle = rh;
822 
823 		infop->mri_driver = (mac_ring_driver_t)rx_ring;
824 		infop->mri_start = igb_ring_start;
825 		infop->mri_stop = NULL;
826 		infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
827 		infop->mri_stat = igb_rx_ring_stat;
828 
829 		mintr->mi_handle = (mac_intr_handle_t)rx_ring;
830 		mintr->mi_enable = igb_rx_ring_intr_enable;
831 		mintr->mi_disable = igb_rx_ring_intr_disable;
832 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
833 			mintr->mi_ddi_handle =
834 			    igb->htable[rx_ring->intr_vector];
835 		}
836 		break;
837 	}
838 	case MAC_RING_TYPE_TX: {
839 		ASSERT(index < igb->num_tx_rings);
840 
841 		igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
842 		tx_ring->ring_handle = rh;
843 
844 		infop->mri_driver = (mac_ring_driver_t)tx_ring;
845 		infop->mri_start = NULL;
846 		infop->mri_stop = NULL;
847 		infop->mri_tx = igb_tx_ring_send;
848 		infop->mri_stat = igb_tx_ring_stat;
849 		if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
850 			mintr->mi_ddi_handle =
851 			    igb->htable[tx_ring->intr_vector];
852 		}
853 		break;
854 	}
855 	default:
856 		break;
857 	}
858 }
859 
860 void
861 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
862     mac_group_info_t *infop, mac_group_handle_t gh)
863 {
864 	igb_t *igb = (igb_t *)arg;
865 
866 	switch (rtype) {
867 	case MAC_RING_TYPE_RX: {
868 		igb_rx_group_t *rx_group;
869 
870 		ASSERT((index >= 0) && (index < igb->num_rx_groups));
871 
872 		rx_group = &igb->rx_groups[index];
873 		rx_group->group_handle = gh;
874 
875 		infop->mgi_driver = (mac_group_driver_t)rx_group;
876 		infop->mgi_start = NULL;
877 		infop->mgi_stop = NULL;
878 		infop->mgi_addmac = igb_addmac;
879 		infop->mgi_remmac = igb_remmac;
880 		infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
881 
882 		break;
883 	}
884 	case MAC_RING_TYPE_TX:
885 		break;
886 	default:
887 		break;
888 	}
889 }
890 
891 static int
892 igb_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
893 {
894 	igb_t *igb = arg;
895 
896 	if (flags != 0)
897 		return (EINVAL);
898 
899 	if (mode != MAC_LED_DEFAULT &&
900 	    mode != MAC_LED_IDENT &&
901 	    mode != MAC_LED_OFF &&
902 	    mode != MAC_LED_ON)
903 		return (ENOTSUP);
904 
905 	if (mode != MAC_LED_DEFAULT && !igb->igb_led_setup) {
906 		if (e1000_setup_led(&igb->hw) != E1000_SUCCESS)
907 			return (EIO);
908 
909 		igb->igb_led_setup = B_TRUE;
910 	}
911 
912 	switch (mode) {
913 	case MAC_LED_DEFAULT:
914 		if (igb->igb_led_setup) {
915 			if (e1000_cleanup_led(&igb->hw) != E1000_SUCCESS)
916 				return (EIO);
917 			igb->igb_led_setup = B_FALSE;
918 		}
919 		break;
920 	case MAC_LED_IDENT:
921 		if (e1000_blink_led(&igb->hw) != E1000_SUCCESS)
922 			return (EIO);
923 		break;
924 	case MAC_LED_OFF:
925 		if (e1000_led_off(&igb->hw) != E1000_SUCCESS)
926 			return (EIO);
927 		break;
928 	case MAC_LED_ON:
929 		if (e1000_led_on(&igb->hw) != E1000_SUCCESS)
930 			return (EIO);
931 		break;
932 	default:
933 		return (ENOTSUP);
934 	}
935 
936 	return (0);
937 }
938 
939 /*
940  * Obtain the MAC's capabilities and associated data from
941  * the driver.
942  */
943 boolean_t
944 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
945 {
946 	igb_t *igb = (igb_t *)arg;
947 
948 	switch (cap) {
949 	case MAC_CAPAB_HCKSUM: {
950 		uint32_t *tx_hcksum_flags = cap_data;
951 
952 		/*
953 		 * We advertise our capabilities only if tx hcksum offload is
954 		 * enabled.  On receive, the stack will accept checksummed
955 		 * packets anyway, even if we haven't said we can deliver
956 		 * them.
957 		 */
958 		if (!igb->tx_hcksum_enable)
959 			return (B_FALSE);
960 
961 		*tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
962 		break;
963 	}
964 	case MAC_CAPAB_LSO: {
965 		mac_capab_lso_t *cap_lso = cap_data;
966 
967 		if (igb->lso_enable) {
968 			cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4 |
969 			    LSO_TX_BASIC_TCP_IPV6;
970 			cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
971 			cap_lso->lso_basic_tcp_ipv6.lso_max = IGB_LSO_MAXLEN;
972 			break;
973 		} else {
974 			return (B_FALSE);
975 		}
976 	}
977 	case MAC_CAPAB_RINGS: {
978 		mac_capab_rings_t *cap_rings = cap_data;
979 
980 		switch (cap_rings->mr_type) {
981 		case MAC_RING_TYPE_RX:
982 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
983 			cap_rings->mr_rnum = igb->num_rx_rings;
984 			cap_rings->mr_gnum = igb->num_rx_groups;
985 			cap_rings->mr_rget = igb_fill_ring;
986 			cap_rings->mr_gget = igb_fill_group;
987 			cap_rings->mr_gaddring = NULL;
988 			cap_rings->mr_gremring = NULL;
989 
990 			break;
991 		case MAC_RING_TYPE_TX:
992 			cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
993 			cap_rings->mr_rnum = igb->num_tx_rings;
994 			cap_rings->mr_gnum = 0;
995 			cap_rings->mr_rget = igb_fill_ring;
996 			cap_rings->mr_gget = NULL;
997 
998 			break;
999 		default:
1000 			break;
1001 		}
1002 		break;
1003 	}
1004 
1005 	case MAC_CAPAB_LED: {
1006 		mac_capab_led_t *cap_led = cap_data;
1007 
1008 		cap_led->mcl_flags = 0;
1009 		cap_led->mcl_modes = MAC_LED_DEFAULT;
1010 		if (igb->hw.mac.ops.blink_led != NULL &&
1011 		    igb->hw.mac.ops.blink_led != e1000_null_ops_generic) {
1012 			cap_led->mcl_modes |= MAC_LED_IDENT;
1013 		}
1014 		if (igb->hw.mac.ops.led_off != NULL &&
1015 		    igb->hw.mac.ops.led_off != e1000_null_ops_generic) {
1016 			cap_led->mcl_modes |= MAC_LED_OFF;
1017 		}
1018 		if (igb->hw.mac.ops.led_on != NULL &&
1019 		    igb->hw.mac.ops.led_on != e1000_null_ops_generic) {
1020 			cap_led->mcl_modes |= MAC_LED_ON;
1021 		}
1022 		cap_led->mcl_set = igb_led_set;
1023 		break;
1024 	}
1025 
1026 	default:
1027 		return (B_FALSE);
1028 	}
1029 	return (B_TRUE);
1030 }
1031 
1032 int
1033 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1034     uint_t pr_valsize, const void *pr_val)
1035 {
1036 	igb_t *igb = (igb_t *)arg;
1037 	struct e1000_hw *hw = &igb->hw;
1038 	int err = 0;
1039 	uint32_t flow_control;
1040 	uint32_t cur_mtu, new_mtu;
1041 	uint32_t rx_size;
1042 	uint32_t tx_size;
1043 
1044 	mutex_enter(&igb->gen_lock);
1045 	if (igb->igb_state & IGB_SUSPENDED) {
1046 		mutex_exit(&igb->gen_lock);
1047 		return (ECANCELED);
1048 	}
1049 
1050 	if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
1051 		/*
1052 		 * All en_* parameters are locked (read-only)
1053 		 * while the device is in any sort of loopback mode.
1054 		 */
1055 		mutex_exit(&igb->gen_lock);
1056 		return (EBUSY);
1057 	}
1058 
1059 	switch (pr_num) {
1060 	case MAC_PROP_EN_1000FDX_CAP:
1061 		/* read/write on copper, read-only on serdes */
1062 		if (hw->phy.media_type != e1000_media_type_copper) {
1063 			err = ENOTSUP;
1064 			break;
1065 		}
1066 		igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
1067 		igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
1068 		goto setup_link;
1069 	case MAC_PROP_EN_100FDX_CAP:
1070 		if (hw->phy.media_type != e1000_media_type_copper) {
1071 			err = ENOTSUP;
1072 			break;
1073 		}
1074 		igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1075 		igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1076 		goto setup_link;
1077 	case MAC_PROP_EN_100HDX_CAP:
1078 		if (hw->phy.media_type != e1000_media_type_copper) {
1079 			err = ENOTSUP;
1080 			break;
1081 		}
1082 		igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1083 		igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1084 		goto setup_link;
1085 	case MAC_PROP_EN_10FDX_CAP:
1086 		if (hw->phy.media_type != e1000_media_type_copper) {
1087 			err = ENOTSUP;
1088 			break;
1089 		}
1090 		igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1091 		igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1092 		goto setup_link;
1093 	case MAC_PROP_EN_10HDX_CAP:
1094 		if (hw->phy.media_type != e1000_media_type_copper) {
1095 			err = ENOTSUP;
1096 			break;
1097 		}
1098 		igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1099 		igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1100 		goto setup_link;
1101 	case MAC_PROP_AUTONEG:
1102 		if (hw->phy.media_type != e1000_media_type_copper) {
1103 			err = ENOTSUP;
1104 			break;
1105 		}
1106 		igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1107 		goto setup_link;
1108 	case MAC_PROP_FLOWCTRL:
1109 		bcopy(pr_val, &flow_control, sizeof (flow_control));
1110 
1111 		switch (flow_control) {
1112 		default:
1113 			err = EINVAL;
1114 			break;
1115 		case LINK_FLOWCTRL_NONE:
1116 			hw->fc.requested_mode = e1000_fc_none;
1117 			break;
1118 		case LINK_FLOWCTRL_RX:
1119 			hw->fc.requested_mode = e1000_fc_rx_pause;
1120 			break;
1121 		case LINK_FLOWCTRL_TX:
1122 			hw->fc.requested_mode = e1000_fc_tx_pause;
1123 			break;
1124 		case LINK_FLOWCTRL_BI:
1125 			hw->fc.requested_mode = e1000_fc_full;
1126 			break;
1127 		}
1128 setup_link:
1129 		if (err == 0) {
1130 			if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1131 				err = EINVAL;
1132 		}
1133 		break;
1134 	case MAC_PROP_ADV_1000FDX_CAP:
1135 	case MAC_PROP_ADV_1000HDX_CAP:
1136 	case MAC_PROP_ADV_100T4_CAP:
1137 	case MAC_PROP_ADV_100FDX_CAP:
1138 	case MAC_PROP_ADV_100HDX_CAP:
1139 	case MAC_PROP_ADV_10FDX_CAP:
1140 	case MAC_PROP_ADV_10HDX_CAP:
1141 	case MAC_PROP_EN_1000HDX_CAP:
1142 	case MAC_PROP_EN_100T4_CAP:
1143 	case MAC_PROP_STATUS:
1144 	case MAC_PROP_SPEED:
1145 	case MAC_PROP_DUPLEX:
1146 		err = ENOTSUP; /* read-only prop. Can't set this. */
1147 		break;
1148 	case MAC_PROP_MTU:
1149 		/* adapter must be stopped for an MTU change */
1150 		if (igb->igb_state & IGB_STARTED) {
1151 			err = EBUSY;
1152 			break;
1153 		}
1154 
1155 		cur_mtu = igb->default_mtu;
1156 		bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1157 		if (new_mtu == cur_mtu) {
1158 			err = 0;
1159 			break;
1160 		}
1161 
1162 		if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1163 			err = EINVAL;
1164 			break;
1165 		}
1166 
1167 		err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1168 		if (err == 0) {
1169 			igb->default_mtu = new_mtu;
1170 			igb->max_frame_size = igb->default_mtu +
1171 			    sizeof (struct ether_vlan_header) + ETHERFCSL;
1172 
1173 			/*
1174 			 * Set rx buffer size
1175 			 */
1176 			rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1177 			igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1178 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1179 
1180 			/*
1181 			 * Set tx buffer size
1182 			 */
1183 			tx_size = igb->max_frame_size;
1184 			igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1185 			    (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1186 		}
1187 		break;
1188 	case MAC_PROP_PRIVATE:
1189 		err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1190 		break;
1191 	default:
1192 		err = ENOTSUP;
1193 		break;
1194 	}
1195 
1196 	mutex_exit(&igb->gen_lock);
1197 
1198 	if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1199 		ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1200 		return (EIO);
1201 	}
1202 
1203 	return (err);
1204 }
1205 
1206 int
1207 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1208     uint_t pr_valsize, void *pr_val)
1209 {
1210 	igb_t *igb = (igb_t *)arg;
1211 	struct e1000_hw *hw = &igb->hw;
1212 	int err = 0;
1213 	uint32_t flow_control;
1214 	uint64_t tmp = 0;
1215 
1216 	switch (pr_num) {
1217 	case MAC_PROP_DUPLEX:
1218 		ASSERT(pr_valsize >= sizeof (link_duplex_t));
1219 		bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1220 		break;
1221 	case MAC_PROP_SPEED:
1222 		ASSERT(pr_valsize >= sizeof (uint64_t));
1223 		tmp = igb->link_speed * 1000000ull;
1224 		bcopy(&tmp, pr_val, sizeof (tmp));
1225 		break;
1226 	case MAC_PROP_AUTONEG:
1227 		ASSERT(pr_valsize >= sizeof (uint8_t));
1228 		*(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1229 		break;
1230 	case MAC_PROP_FLOWCTRL:
1231 		ASSERT(pr_valsize >= sizeof (uint32_t));
1232 		switch (hw->fc.requested_mode) {
1233 			case e1000_fc_none:
1234 				flow_control = LINK_FLOWCTRL_NONE;
1235 				break;
1236 			case e1000_fc_rx_pause:
1237 				flow_control = LINK_FLOWCTRL_RX;
1238 				break;
1239 			case e1000_fc_tx_pause:
1240 				flow_control = LINK_FLOWCTRL_TX;
1241 				break;
1242 			case e1000_fc_full:
1243 				flow_control = LINK_FLOWCTRL_BI;
1244 				break;
1245 		}
1246 		bcopy(&flow_control, pr_val, sizeof (flow_control));
1247 		break;
1248 	case MAC_PROP_ADV_1000FDX_CAP:
1249 		*(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1250 		break;
1251 	case MAC_PROP_EN_1000FDX_CAP:
1252 		*(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1253 		break;
1254 	case MAC_PROP_ADV_1000HDX_CAP:
1255 		*(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1256 		break;
1257 	case MAC_PROP_EN_1000HDX_CAP:
1258 		*(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1259 		break;
1260 	case MAC_PROP_ADV_100T4_CAP:
1261 		*(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1262 		break;
1263 	case MAC_PROP_EN_100T4_CAP:
1264 		*(uint8_t *)pr_val = igb->param_en_100t4_cap;
1265 		break;
1266 	case MAC_PROP_ADV_100FDX_CAP:
1267 		*(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1268 		break;
1269 	case MAC_PROP_EN_100FDX_CAP:
1270 		*(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1271 		break;
1272 	case MAC_PROP_ADV_100HDX_CAP:
1273 		*(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1274 		break;
1275 	case MAC_PROP_EN_100HDX_CAP:
1276 		*(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1277 		break;
1278 	case MAC_PROP_ADV_10FDX_CAP:
1279 		*(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1280 		break;
1281 	case MAC_PROP_EN_10FDX_CAP:
1282 		*(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1283 		break;
1284 	case MAC_PROP_ADV_10HDX_CAP:
1285 		*(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1286 		break;
1287 	case MAC_PROP_EN_10HDX_CAP:
1288 		*(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1289 		break;
1290 	case MAC_PROP_PRIVATE:
1291 		err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1292 		break;
1293 	default:
1294 		err = ENOTSUP;
1295 		break;
1296 	}
1297 	return (err);
1298 }
1299 
1300 void
1301 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1302     mac_prop_info_handle_t prh)
1303 {
1304 	igb_t *igb = (igb_t *)arg;
1305 	struct e1000_hw *hw = &igb->hw;
1306 	uint16_t phy_status, phy_ext_status;
1307 
1308 	switch (pr_num) {
1309 	case MAC_PROP_DUPLEX:
1310 	case MAC_PROP_SPEED:
1311 	case MAC_PROP_ADV_1000FDX_CAP:
1312 	case MAC_PROP_ADV_1000HDX_CAP:
1313 	case MAC_PROP_EN_1000HDX_CAP:
1314 	case MAC_PROP_ADV_100T4_CAP:
1315 	case MAC_PROP_EN_100T4_CAP:
1316 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1317 		break;
1318 
1319 	case MAC_PROP_EN_1000FDX_CAP:
1320 		if (hw->phy.media_type != e1000_media_type_copper) {
1321 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1322 		} else {
1323 			(void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1324 			    &phy_ext_status);
1325 			mac_prop_info_set_default_uint8(prh,
1326 			    ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1327 			    (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1328 		}
1329 		break;
1330 
1331 	case MAC_PROP_ADV_100FDX_CAP:
1332 	case MAC_PROP_EN_100FDX_CAP:
1333 		if (hw->phy.media_type != e1000_media_type_copper) {
1334 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1335 		} else {
1336 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1337 			mac_prop_info_set_default_uint8(prh,
1338 			    ((phy_status & MII_SR_100X_FD_CAPS) ||
1339 			    (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1340 		}
1341 		break;
1342 
1343 	case MAC_PROP_ADV_100HDX_CAP:
1344 	case MAC_PROP_EN_100HDX_CAP:
1345 		if (hw->phy.media_type != e1000_media_type_copper) {
1346 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1347 		} else {
1348 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1349 			mac_prop_info_set_default_uint8(prh,
1350 			    ((phy_status & MII_SR_100X_HD_CAPS) ||
1351 			    (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1352 		}
1353 		break;
1354 
1355 	case MAC_PROP_ADV_10FDX_CAP:
1356 	case MAC_PROP_EN_10FDX_CAP:
1357 		if (hw->phy.media_type != e1000_media_type_copper) {
1358 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1359 		} else {
1360 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1361 			mac_prop_info_set_default_uint8(prh,
1362 			    (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1363 		}
1364 		break;
1365 
1366 	case MAC_PROP_ADV_10HDX_CAP:
1367 	case MAC_PROP_EN_10HDX_CAP:
1368 		if (hw->phy.media_type != e1000_media_type_copper) {
1369 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1370 		} else {
1371 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1372 			mac_prop_info_set_default_uint8(prh,
1373 			    (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1374 		}
1375 		break;
1376 
1377 	case MAC_PROP_AUTONEG:
1378 		if (hw->phy.media_type != e1000_media_type_copper) {
1379 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1380 		} else {
1381 			(void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1382 			mac_prop_info_set_default_uint8(prh,
1383 			    (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1384 		}
1385 		break;
1386 
1387 	case MAC_PROP_FLOWCTRL:
1388 		mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1389 		break;
1390 
1391 	case MAC_PROP_MTU:
1392 		mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1393 		break;
1394 
1395 	case MAC_PROP_PRIVATE:
1396 		igb_priv_prop_info(igb, pr_name, prh);
1397 		break;
1398 	}
1399 
1400 }
1401 
1402 boolean_t
1403 igb_param_locked(mac_prop_id_t pr_num)
1404 {
1405 	/*
1406 	 * All en_* parameters are locked (read-only) while
1407 	 * the device is in any sort of loopback mode ...
1408 	 */
1409 	switch (pr_num) {
1410 		case MAC_PROP_EN_1000FDX_CAP:
1411 		case MAC_PROP_EN_1000HDX_CAP:
1412 		case MAC_PROP_EN_100T4_CAP:
1413 		case MAC_PROP_EN_100FDX_CAP:
1414 		case MAC_PROP_EN_100HDX_CAP:
1415 		case MAC_PROP_EN_10FDX_CAP:
1416 		case MAC_PROP_EN_10HDX_CAP:
1417 		case MAC_PROP_AUTONEG:
1418 		case MAC_PROP_FLOWCTRL:
1419 			return (B_TRUE);
1420 	}
1421 	return (B_FALSE);
1422 }
1423 
1424 /* ARGSUSED */
1425 int
1426 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1427     uint_t pr_valsize, const void *pr_val)
1428 {
1429 	int err = 0;
1430 	long result;
1431 	struct e1000_hw *hw = &igb->hw;
1432 	int i;
1433 
1434 	if (strcmp(pr_name, "_eee_support") == 0) {
1435 		if (pr_val == NULL)
1436 			return (EINVAL);
1437 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1438 		switch (result) {
1439 		case 0:
1440 		case 1:
1441 			/*
1442 			 * For now, only supported on I350/I354.
1443 			 * Add new mac.type values (or use < instead)
1444 			 * as new cards offer up EEE.
1445 			 */
1446 			switch (hw->mac.type) {
1447 			case e1000_i350:
1448 				/* Must set this prior to the set call. */
1449 				hw->dev_spec._82575.eee_disable = !result;
1450 				if (e1000_set_eee_i350(hw, result,
1451 				    result) != E1000_SUCCESS)
1452 					err = EIO;
1453 				break;
1454 			case e1000_i354:
1455 				/* Must set this prior to the set call. */
1456 				hw->dev_spec._82575.eee_disable = !result;
1457 				if (e1000_set_eee_i354(hw, result,
1458 				    result) != E1000_SUCCESS)
1459 					err = EIO;
1460 				break;
1461 			default:
1462 				return (ENXIO);
1463 			}
1464 			break;
1465 		default:
1466 			err = EINVAL;
1467 			/* FALLTHRU */
1468 		}
1469 		return (err);
1470 	}
1471 	if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1472 		if (pr_val == NULL) {
1473 			err = EINVAL;
1474 			return (err);
1475 		}
1476 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1477 		if (result < MIN_TX_COPY_THRESHOLD ||
1478 		    result > MAX_TX_COPY_THRESHOLD)
1479 			err = EINVAL;
1480 		else {
1481 			igb->tx_copy_thresh = (uint32_t)result;
1482 		}
1483 		return (err);
1484 	}
1485 	if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1486 		if (pr_val == NULL) {
1487 			err = EINVAL;
1488 			return (err);
1489 		}
1490 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1491 		if (result < MIN_TX_RECYCLE_THRESHOLD ||
1492 		    result > MAX_TX_RECYCLE_THRESHOLD)
1493 			err = EINVAL;
1494 		else {
1495 			igb->tx_recycle_thresh = (uint32_t)result;
1496 		}
1497 		return (err);
1498 	}
1499 	if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1500 		if (pr_val == NULL) {
1501 			err = EINVAL;
1502 			return (err);
1503 		}
1504 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1505 		if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1506 		    result > MAX_TX_OVERLOAD_THRESHOLD)
1507 			err = EINVAL;
1508 		else {
1509 			igb->tx_overload_thresh = (uint32_t)result;
1510 		}
1511 		return (err);
1512 	}
1513 	if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1514 		if (pr_val == NULL) {
1515 			err = EINVAL;
1516 			return (err);
1517 		}
1518 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1519 		if (result < MIN_TX_RESCHED_THRESHOLD ||
1520 		    result > MAX_TX_RESCHED_THRESHOLD ||
1521 		    result > igb->tx_ring_size)
1522 			err = EINVAL;
1523 		else {
1524 			igb->tx_resched_thresh = (uint32_t)result;
1525 		}
1526 		return (err);
1527 	}
1528 	if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1529 		if (pr_val == NULL) {
1530 			err = EINVAL;
1531 			return (err);
1532 		}
1533 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1534 		if (result < MIN_RX_COPY_THRESHOLD ||
1535 		    result > MAX_RX_COPY_THRESHOLD)
1536 			err = EINVAL;
1537 		else {
1538 			igb->rx_copy_thresh = (uint32_t)result;
1539 		}
1540 		return (err);
1541 	}
1542 	if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1543 		if (pr_val == NULL) {
1544 			err = EINVAL;
1545 			return (err);
1546 		}
1547 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1548 		if (result < MIN_RX_LIMIT_PER_INTR ||
1549 		    result > MAX_RX_LIMIT_PER_INTR)
1550 			err = EINVAL;
1551 		else {
1552 			igb->rx_limit_per_intr = (uint32_t)result;
1553 		}
1554 		return (err);
1555 	}
1556 	if (strcmp(pr_name, "_intr_throttling") == 0) {
1557 		if (pr_val == NULL) {
1558 			err = EINVAL;
1559 			return (err);
1560 		}
1561 		(void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1562 
1563 		if (result < igb->capab->min_intr_throttle ||
1564 		    result > igb->capab->max_intr_throttle)
1565 			err = EINVAL;
1566 		else {
1567 			igb->intr_throttling[0] = (uint32_t)result;
1568 
1569 			for (i = 0; i < MAX_NUM_EITR; i++)
1570 				igb->intr_throttling[i] =
1571 				    igb->intr_throttling[0];
1572 
1573 			/* Set interrupt throttling rate */
1574 			for (i = 0; i < igb->intr_cnt; i++)
1575 				E1000_WRITE_REG(hw, E1000_EITR(i),
1576 				    igb->intr_throttling[i]);
1577 		}
1578 		return (err);
1579 	}
1580 	return (ENOTSUP);
1581 }
1582 
1583 int
1584 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1585     void *pr_val)
1586 {
1587 	int value;
1588 
1589 	if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1590 		value = igb->param_adv_pause_cap;
1591 	} else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1592 		value = igb->param_adv_asym_pause_cap;
1593 	} else if (strcmp(pr_name, "_eee_support") == 0) {
1594 		/*
1595 		 * For now, only supported on I350.  Add new mac.type values
1596 		 * (or use < instead) as new cards offer up EEE.
1597 		 */
1598 		switch (igb->hw.mac.type) {
1599 		case e1000_i350:
1600 		case e1000_i354:
1601 			value = !(igb->hw.dev_spec._82575.eee_disable);
1602 			break;
1603 		default:
1604 			value = 0;
1605 		}
1606 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1607 		value = igb->tx_copy_thresh;
1608 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1609 		value = igb->tx_recycle_thresh;
1610 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1611 		value = igb->tx_overload_thresh;
1612 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1613 		value = igb->tx_resched_thresh;
1614 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1615 		value = igb->rx_copy_thresh;
1616 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1617 		value = igb->rx_limit_per_intr;
1618 	} else if (strcmp(pr_name, "_intr_throttling") == 0) {
1619 		value = igb->intr_throttling[0];
1620 	} else {
1621 		return (ENOTSUP);
1622 	}
1623 
1624 	(void) snprintf(pr_val, pr_valsize, "%d", value);
1625 	return (0);
1626 }
1627 
1628 void
1629 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1630 {
1631 	char valstr[64];
1632 	int value;
1633 
1634 	if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1635 	    strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1636 		mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1637 		return;
1638 	} else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1639 		value = DEFAULT_TX_COPY_THRESHOLD;
1640 	} else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1641 		value = DEFAULT_TX_RECYCLE_THRESHOLD;
1642 	} else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1643 		value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1644 	} else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1645 		value = DEFAULT_TX_RESCHED_THRESHOLD;
1646 	} else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1647 		value = DEFAULT_RX_COPY_THRESHOLD;
1648 	} else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1649 		value = DEFAULT_RX_LIMIT_PER_INTR;
1650 	} else if (strcmp(pr_name, "_intr_throttling") == 0) {
1651 		value = igb->capab->def_intr_throttle;
1652 	} else {
1653 		return;
1654 	}
1655 
1656 	(void) snprintf(valstr, sizeof (valstr), "%d", value);
1657 	mac_prop_info_set_default_str(prh, valstr);
1658 }
1659