xref: /illumos-gate/usr/src/uts/common/io/vr/vr.c (revision cfe080a1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2018, Joyent, Inc.
29  */
30 
31 #include <sys/types.h>
32 #include <sys/stream.h>
33 #include <sys/strsun.h>
34 #include <sys/stat.h>
35 #include <sys/pci.h>
36 #include <sys/modctl.h>
37 #include <sys/kstat.h>
38 #include <sys/ethernet.h>
39 #include <sys/devops.h>
40 #include <sys/debug.h>
41 #include <sys/conf.h>
42 #include <sys/mac.h>
43 #include <sys/mac_provider.h>
44 #include <sys/mac_ether.h>
45 #include <sys/sysmacros.h>
46 #include <sys/dditypes.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/miiregs.h>
50 #include <sys/byteorder.h>
51 #include <sys/note.h>
52 #include <sys/vlan.h>
53 
54 #include "vr.h"
55 #include "vr_impl.h"
56 
57 /*
58  * VR in a nutshell
59  * The card uses two rings of data structures to communicate with the host.
60  * These are referred to as "descriptor rings" and there is one for transmit
61  * (TX) and one for receive (RX).
62  *
63  * The driver uses a "DMA buffer" data type for mapping to those descriptor
64  * rings. This is a structure with handles and a DMA'able buffer attached to it.
65  *
66  * Receive
67  * The receive ring is filled with DMA buffers. Received packets are copied into
68  * a newly allocated mblk's and passed upstream.
69  *
70  * Transmit
71  * Each transmit descriptor has a DMA buffer attached to it. The data of TX
72  * packets is copied into the DMA buffer which is then enqueued for
73  * transmission.
74  *
75  * Reclaim of transmitted packets is done as a result of a transmit completion
76  * interrupt which is generated 3 times per ring at minimum.
77  */
78 
79 #if defined(DEBUG)
80 uint32_t	vrdebug = 1;
81 #define	VR_DEBUG(args)	do {				\
82 		if (vrdebug > 0)			\
83 			(*vr_debug()) args;		\
84 			_NOTE(CONSTANTCONDITION)	\
85 		} while (0)
86 static	void	vr_prt(const char *fmt, ...);
87 	void	(*vr_debug())(const char *fmt, ...);
88 #else
89 #define	VR_DEBUG(args)	do ; _NOTE(CONSTANTCONDITION) while (0)
90 #endif
91 
92 static char vr_ident[] = "VIA Rhine Ethernet";
93 
94 /*
95  * Attributes for accessing registers and memory descriptors for this device.
96  */
97 static ddi_device_acc_attr_t vr_dev_dma_accattr = {
98 	DDI_DEVICE_ATTR_V0,
99 	DDI_STRUCTURE_LE_ACC,
100 	DDI_STRICTORDER_ACC
101 };
102 
103 /*
104  * Attributes for accessing data.
105  */
106 static ddi_device_acc_attr_t vr_data_dma_accattr = {
107 	DDI_DEVICE_ATTR_V0,
108 	DDI_NEVERSWAP_ACC,
109 	DDI_STRICTORDER_ACC
110 };
111 
112 /*
113  * DMA attributes for descriptors for communication with the device
114  * This driver assumes that all descriptors of one ring fit in one consequitive
115  * memory area of max 4K (256 descriptors) that does not cross a page boundary.
116  * Therefore, we request 4K alignement.
117  */
118 static ddi_dma_attr_t vr_dev_dma_attr = {
119 	DMA_ATTR_V0,			/* version number */
120 	0,				/* low DMA address range */
121 	0xFFFFFFFF,			/* high DMA address range */
122 	0x7FFFFFFF,			/* DMA counter register */
123 	0x1000,				/* DMA address alignment */
124 	0x7F,				/* DMA burstsizes */
125 	1,				/* min effective DMA size */
126 	0xFFFFFFFF,			/* max DMA xfer size */
127 	0xFFFFFFFF,			/* segment boundary */
128 	1,				/* s/g list length */
129 	1,				/* granularity of device */
130 	0				/* DMA transfer flags */
131 };
132 
133 /*
134  * DMA attributes for the data moved to/from the device
135  * Note that the alignement is set to 2K so hat a 1500 byte packet never
136  * crosses a page boundary and thus that a DMA transfer is not split up in
137  * multiple cookies with a 4K/8K pagesize
138  */
139 static ddi_dma_attr_t vr_data_dma_attr = {
140 	DMA_ATTR_V0,			/* version number */
141 	0,				/* low DMA address range */
142 	0xFFFFFFFF,			/* high DMA address range */
143 	0x7FFFFFFF,			/* DMA counter register */
144 	0x800,				/* DMA address alignment */
145 	0xfff,				/* DMA burstsizes */
146 	1,				/* min effective DMA size */
147 	0xFFFFFFFF,			/* max DMA xfer size */
148 	0xFFFFFFFF,			/* segment boundary */
149 	1,				/* s/g list length */
150 	1,				/* granularity of device */
151 	0				/* DMA transfer flags */
152 };
153 
154 static mac_callbacks_t vr_mac_callbacks = {
155 	MC_SETPROP|MC_GETPROP|MC_PROPINFO, /* Which callbacks are set */
156 	vr_mac_getstat,		/* Get the value of a statistic */
157 	vr_mac_start,		/* Start the device */
158 	vr_mac_stop,		/* Stop the device */
159 	vr_mac_set_promisc,	/* Enable or disable promiscuous mode */
160 	vr_mac_set_multicast,	/* Enable or disable a multicast addr */
161 	vr_mac_set_ether_addr,	/* Set the unicast MAC address */
162 	vr_mac_tx_enqueue_list,	/* Transmit a packet */
163 	NULL,
164 	NULL,			/* Process an unknown ioctl */
165 	NULL,			/* Get capability information */
166 	NULL,			/* Open the device */
167 	NULL,			/* Close the device */
168 	vr_mac_setprop,		/* Set properties of the device */
169 	vr_mac_getprop,		/* Get properties of the device */
170 	vr_mac_propinfo		/* Get properties attributes */
171 };
172 
173 /*
174  * Table with bugs and features for each incarnation of the card.
175  */
176 static const chip_info_t vr_chip_info [] = {
177 	{
178 		0x0, 0x0,
179 		"VIA Rhine Fast Ethernet",
180 		(VR_BUG_NO_MEMIO),
181 		(VR_FEATURE_NONE)
182 	},
183 	{
184 		0x04, 0x21,
185 		"VIA VT86C100A Fast Ethernet",
186 		(VR_BUG_NEEDMODE2PCEROPT | VR_BUG_NO_TXQUEUEING |
187 		    VR_BUG_NEEDMODE10T | VR_BUG_TXALIGN | VR_BUG_NO_MEMIO |
188 		    VR_BUG_MIIPOLLSTOP),
189 		(VR_FEATURE_NONE)
190 	},
191 	{
192 		0x40, 0x41,
193 		"VIA VT6102-A Rhine II Fast Ethernet",
194 		(VR_BUG_NEEDMODE2PCEROPT),
195 		(VR_FEATURE_RX_PAUSE_CAP)
196 	},
197 	{
198 		0x42, 0x7f,
199 		"VIA VT6102-C Rhine II Fast Ethernet",
200 		(VR_BUG_NEEDMODE2PCEROPT),
201 		(VR_FEATURE_RX_PAUSE_CAP)
202 	},
203 	{
204 		0x80, 0x82,
205 		"VIA VT6105-A Rhine III Fast Ethernet",
206 		(VR_BUG_NONE),
207 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
208 	},
209 	{
210 		0x83, 0x89,
211 		"VIA VT6105-B Rhine III Fast Ethernet",
212 		(VR_BUG_NONE),
213 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
214 	},
215 	{
216 		0x8a, 0x8b,
217 		"VIA VT6105-LOM Rhine III Fast Ethernet",
218 		(VR_BUG_NONE),
219 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
220 	},
221 	{
222 		0x8c, 0x8c,
223 		"VIA VT6107-A0 Rhine III Fast Ethernet",
224 		(VR_BUG_NONE),
225 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP)
226 	},
227 	{
228 		0x8d, 0x8f,
229 		"VIA VT6107-A1 Rhine III Fast Ethernet",
230 		(VR_BUG_NONE),
231 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
232 		    VR_FEATURE_MRDLNMULTIPLE)
233 	},
234 	{
235 		0x90, 0x93,
236 		"VIA VT6105M-A0 Rhine III Fast Ethernet Management Adapter",
237 		(VR_BUG_NONE),
238 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
239 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
240 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
241 		    VR_FEATURE_MIBCOUNTER)
242 	},
243 	{
244 		0x94, 0xff,
245 		"VIA VT6105M-B1 Rhine III Fast Ethernet Management Adapter",
246 		(VR_BUG_NONE),
247 		(VR_FEATURE_RX_PAUSE_CAP | VR_FEATURE_TX_PAUSE_CAP |
248 		    VR_FEATURE_TXCHKSUM | VR_FEATURE_RXCHKSUM |
249 		    VR_FEATURE_CAMSUPPORT | VR_FEATURE_VLANTAGGING |
250 		    VR_FEATURE_MIBCOUNTER)
251 	}
252 };
253 
254 /*
255  * Function prototypes
256  */
257 static	vr_result_t	vr_add_intr(vr_t *vrp);
258 static	void		vr_remove_intr(vr_t *vrp);
259 static	int32_t		vr_cam_index(vr_t *vrp, const uint8_t *maddr);
260 static	uint32_t	ether_crc_be(const uint8_t *address);
261 static	void		vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp);
262 static	void		vr_log(vr_t *vrp, int level, const char *fmt, ...);
263 static	int		vr_resume(dev_info_t *devinfo);
264 static	int		vr_suspend(dev_info_t *devinfo);
265 static	vr_result_t	vr_bus_config(vr_t *vrp);
266 static	void		vr_bus_unconfig(vr_t *vrp);
267 static	void		vr_reset(vr_t *vrp);
268 static	int		vr_start(vr_t *vrp);
269 static	int		vr_stop(vr_t *vrp);
270 static	vr_result_t	vr_rings_init(vr_t *vrp);
271 static	void		vr_rings_fini(vr_t *vrp);
272 static	vr_result_t	vr_alloc_ring(vr_t *vrp, vr_ring_t *r, size_t n);
273 static	void		vr_free_ring(vr_ring_t *r, size_t n);
274 static	vr_result_t	vr_rxring_init(vr_t *vrp);
275 static	void		vr_rxring_fini(vr_t *vrp);
276 static	vr_result_t	vr_txring_init(vr_t *vrp);
277 static	void		vr_txring_fini(vr_t *vrp);
278 static	vr_result_t	vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap,
279 			    uint_t flags);
280 static	void		vr_free_dmabuf(vr_data_dma_t *dmap);
281 static	void		vr_param_init(vr_t *vrp);
282 static	mblk_t		*vr_receive(vr_t *vrp);
283 static	void		vr_tx_reclaim(vr_t *vrp);
284 static	void		vr_periodic(void *p);
285 static	void		vr_error(vr_t *vrp);
286 static	void		vr_phy_read(vr_t *vrp, int offset, uint16_t *value);
287 static	void		vr_phy_write(vr_t *vrp, int offset, uint16_t value);
288 static	void		vr_phy_autopoll_disable(vr_t *vrp);
289 static	void		vr_phy_autopoll_enable(vr_t *vrp);
290 static	void		vr_link_init(vr_t *vrp);
291 static	void		vr_link_state(vr_t *vrp);
292 static	void		vr_kstats_init(vr_t *vrp);
293 static	int		vr_update_kstats(kstat_t *ksp, int access);
294 static	void		vr_remove_kstats(vr_t *vrp);
295 
296 static int
vr_attach(dev_info_t * devinfo,ddi_attach_cmd_t cmd)297 vr_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
298 {
299 	vr_t		*vrp;
300 	mac_register_t	*macreg;
301 
302 	if (cmd == DDI_RESUME)
303 		return (vr_resume(devinfo));
304 	else if (cmd != DDI_ATTACH)
305 		return (DDI_FAILURE);
306 
307 	/*
308 	 * Attach.
309 	 */
310 	vrp = kmem_zalloc(sizeof (vr_t), KM_SLEEP);
311 	ddi_set_driver_private(devinfo, vrp);
312 	vrp->devinfo = devinfo;
313 
314 	/*
315 	 * Store the name+instance of the module.
316 	 */
317 	(void) snprintf(vrp->ifname, sizeof (vrp->ifname), "%s%d",
318 	    MODULENAME, ddi_get_instance(devinfo));
319 
320 	/*
321 	 * Bus initialization.
322 	 */
323 	if (vr_bus_config(vrp) != VR_SUCCESS) {
324 		vr_log(vrp, CE_WARN, "vr_bus_config failed");
325 		goto fail0;
326 	}
327 
328 	/*
329 	 * Initialize default parameters.
330 	 */
331 	vr_param_init(vrp);
332 
333 	/*
334 	 * Setup the descriptor rings.
335 	 */
336 	if (vr_rings_init(vrp) != VR_SUCCESS) {
337 		vr_log(vrp, CE_WARN, "vr_rings_init failed");
338 		goto fail1;
339 	}
340 
341 	/*
342 	 * Initialize kstats.
343 	 */
344 	vr_kstats_init(vrp);
345 
346 	/*
347 	 * Add interrupt to the OS.
348 	 */
349 	if (vr_add_intr(vrp) != VR_SUCCESS) {
350 		vr_log(vrp, CE_WARN, "vr_add_intr failed in attach");
351 		goto fail3;
352 	}
353 
354 	/*
355 	 * Add mutexes.
356 	 */
357 	mutex_init(&vrp->intrlock, NULL, MUTEX_DRIVER,
358 	    DDI_INTR_PRI(vrp->intr_pri));
359 	mutex_init(&vrp->oplock, NULL, MUTEX_DRIVER, NULL);
360 	mutex_init(&vrp->tx.lock, NULL, MUTEX_DRIVER, NULL);
361 
362 	/*
363 	 * Enable interrupt.
364 	 */
365 	if (ddi_intr_enable(vrp->intr_hdl) != DDI_SUCCESS) {
366 		vr_log(vrp, CE_NOTE, "ddi_intr_enable failed");
367 		goto fail5;
368 	}
369 
370 	/*
371 	 * Register with parent, mac.
372 	 */
373 	if ((macreg = mac_alloc(MAC_VERSION)) == NULL) {
374 		vr_log(vrp, CE_WARN, "mac_alloc failed in attach");
375 		goto fail6;
376 	}
377 
378 	macreg->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
379 	macreg->m_driver = vrp;
380 	macreg->m_dip = devinfo;
381 	macreg->m_src_addr = vrp->vendor_ether_addr;
382 	macreg->m_callbacks = &vr_mac_callbacks;
383 	macreg->m_min_sdu = 0;
384 	macreg->m_max_sdu = ETHERMTU;
385 	macreg->m_margin = VLAN_TAGSZ;
386 
387 	if (mac_register(macreg, &vrp->machdl) != 0) {
388 		vr_log(vrp, CE_WARN, "mac_register failed in attach");
389 		goto fail7;
390 	}
391 	mac_free(macreg);
392 	return (DDI_SUCCESS);
393 
394 fail7:
395 	mac_free(macreg);
396 fail6:
397 	(void) ddi_intr_disable(vrp->intr_hdl);
398 fail5:
399 	mutex_destroy(&vrp->tx.lock);
400 	mutex_destroy(&vrp->oplock);
401 	mutex_destroy(&vrp->intrlock);
402 	vr_remove_intr(vrp);
403 fail3:
404 	vr_remove_kstats(vrp);
405 fail2:
406 	vr_rings_fini(vrp);
407 fail1:
408 	vr_bus_unconfig(vrp);
409 fail0:
410 	kmem_free(vrp, sizeof (vr_t));
411 	return (DDI_FAILURE);
412 }
413 
414 static int
vr_detach(dev_info_t * devinfo,ddi_detach_cmd_t cmd)415 vr_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
416 {
417 	vr_t		*vrp;
418 
419 	vrp = ddi_get_driver_private(devinfo);
420 
421 	if (cmd == DDI_SUSPEND)
422 		return (vr_suspend(devinfo));
423 	else if (cmd != DDI_DETACH)
424 		return (DDI_FAILURE);
425 
426 	if (vrp->chip.state == CHIPSTATE_RUNNING)
427 		return (DDI_FAILURE);
428 
429 	/*
430 	 * Try to un-register from the MAC layer.
431 	 */
432 	if (mac_unregister(vrp->machdl) != 0)
433 		return (DDI_FAILURE);
434 
435 	(void) ddi_intr_disable(vrp->intr_hdl);
436 	vr_remove_intr(vrp);
437 	mutex_destroy(&vrp->tx.lock);
438 	mutex_destroy(&vrp->oplock);
439 	mutex_destroy(&vrp->intrlock);
440 	vr_remove_kstats(vrp);
441 	vr_rings_fini(vrp);
442 	vr_bus_unconfig(vrp);
443 	kmem_free(vrp, sizeof (vr_t));
444 	return (DDI_SUCCESS);
445 }
446 
447 /*
448  * quiesce the card for fast reboot.
449  */
450 int
vr_quiesce(dev_info_t * dev_info)451 vr_quiesce(dev_info_t *dev_info)
452 {
453 	vr_t	*vrp;
454 
455 	vrp = (vr_t *)ddi_get_driver_private(dev_info);
456 
457 	/*
458 	 * Stop interrupts.
459 	 */
460 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
461 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
462 
463 	/*
464 	 * Stop DMA.
465 	 */
466 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
467 	return (DDI_SUCCESS);
468 }
469 
470 /*
471  * Add an interrupt for our device to the OS.
472  */
473 static vr_result_t
vr_add_intr(vr_t * vrp)474 vr_add_intr(vr_t *vrp)
475 {
476 	int	nintrs;
477 	int	rc;
478 
479 	rc = ddi_intr_alloc(vrp->devinfo, &vrp->intr_hdl,
480 	    DDI_INTR_TYPE_FIXED,	/* type */
481 	    0,			/* number */
482 	    1,			/* count */
483 	    &nintrs,		/* actualp */
484 	    DDI_INTR_ALLOC_STRICT);
485 
486 	if (rc != DDI_SUCCESS) {
487 		vr_log(vrp, CE_NOTE, "ddi_intr_alloc failed: %d", rc);
488 		return (VR_FAILURE);
489 	}
490 
491 	rc = ddi_intr_add_handler(vrp->intr_hdl, vr_intr, vrp, NULL);
492 	if (rc != DDI_SUCCESS) {
493 		vr_log(vrp, CE_NOTE, "ddi_intr_add_handler failed");
494 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
495 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
496 		return (VR_FAILURE);
497 	}
498 
499 	rc = ddi_intr_get_pri(vrp->intr_hdl, &vrp->intr_pri);
500 	if (rc != DDI_SUCCESS) {
501 		vr_log(vrp, CE_NOTE, "ddi_intr_get_pri failed");
502 		if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
503 			vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
504 
505 		if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
506 			vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
507 
508 		return (VR_FAILURE);
509 	}
510 	return (VR_SUCCESS);
511 }
512 
513 /*
514  * Remove our interrupt from the OS.
515  */
516 static void
vr_remove_intr(vr_t * vrp)517 vr_remove_intr(vr_t *vrp)
518 {
519 	if (ddi_intr_remove_handler(vrp->intr_hdl) != DDI_SUCCESS)
520 		vr_log(vrp, CE_NOTE, "ddi_intr_remove_handler failed");
521 
522 	if (ddi_intr_free(vrp->intr_hdl) != DDI_SUCCESS)
523 		vr_log(vrp, CE_NOTE, "ddi_intr_free failed");
524 }
525 
526 /*
527  * Resume operation after suspend.
528  */
529 static int
vr_resume(dev_info_t * devinfo)530 vr_resume(dev_info_t *devinfo)
531 {
532 	vr_t *vrp;
533 
534 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
535 	mutex_enter(&vrp->oplock);
536 	if (vrp->chip.state == CHIPSTATE_SUSPENDED_RUNNING)
537 		(void) vr_start(vrp);
538 	mutex_exit(&vrp->oplock);
539 	return (DDI_SUCCESS);
540 }
541 
542 /*
543  * Suspend operation.
544  */
545 static int
vr_suspend(dev_info_t * devinfo)546 vr_suspend(dev_info_t *devinfo)
547 {
548 	vr_t *vrp;
549 
550 	vrp = (vr_t *)ddi_get_driver_private(devinfo);
551 	mutex_enter(&vrp->oplock);
552 	if (vrp->chip.state == CHIPSTATE_RUNNING) {
553 		(void) vr_stop(vrp);
554 		vrp->chip.state = CHIPSTATE_SUSPENDED_RUNNING;
555 	}
556 	mutex_exit(&vrp->oplock);
557 	return (DDI_SUCCESS);
558 }
559 
560 /*
561  * Initial bus- and device configuration during attach(9E).
562  */
563 static vr_result_t
vr_bus_config(vr_t * vrp)564 vr_bus_config(vr_t *vrp)
565 {
566 	uint32_t		addr;
567 	int			n, nsets, rc;
568 	uint_t			elem;
569 	pci_regspec_t		*regs;
570 
571 	/*
572 	 * Get the reg property which describes the various access methods.
573 	 */
574 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, vrp->devinfo,
575 	    0, "reg", (int **)&regs, &elem) != DDI_PROP_SUCCESS) {
576 		vr_log(vrp, CE_WARN, "Can't get reg property");
577 		return (VR_FAILURE);
578 	}
579 	nsets = (elem * sizeof (uint_t)) / sizeof (pci_regspec_t);
580 
581 	/*
582 	 * Setup access to all available sets.
583 	 */
584 	vrp->nsets = nsets;
585 	vrp->regset = kmem_zalloc(nsets * sizeof (vr_acc_t), KM_SLEEP);
586 	for (n = 0; n < nsets; n++) {
587 		rc = ddi_regs_map_setup(vrp->devinfo, n,
588 		    &vrp->regset[n].addr, 0, 0,
589 		    &vr_dev_dma_accattr,
590 		    &vrp->regset[n].hdl);
591 		if (rc != DDI_SUCCESS) {
592 			vr_log(vrp, CE_NOTE,
593 			    "Setup of register set %d failed", n);
594 			while (--n >= 0)
595 				ddi_regs_map_free(&vrp->regset[n].hdl);
596 			kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
597 			ddi_prop_free(regs);
598 			return (VR_FAILURE);
599 		}
600 		bcopy(&regs[n], &vrp->regset[n].reg, sizeof (pci_regspec_t));
601 	}
602 	ddi_prop_free(regs);
603 
604 	/*
605 	 * Assign type-named pointers to the register sets.
606 	 */
607 	for (n = 0; n < nsets; n++) {
608 		addr = vrp->regset[n].reg.pci_phys_hi & PCI_REG_ADDR_M;
609 		if (addr == PCI_ADDR_CONFIG && vrp->acc_cfg == NULL)
610 			vrp->acc_cfg = &vrp->regset[n];
611 		else if (addr == PCI_ADDR_IO && vrp->acc_io == NULL)
612 			vrp->acc_io = &vrp->regset[n];
613 		else if (addr == PCI_ADDR_MEM32 && vrp->acc_mem == NULL)
614 			vrp->acc_mem = &vrp->regset[n];
615 	}
616 
617 	/*
618 	 * Assure there is one of each type.
619 	 */
620 	if (vrp->acc_cfg == NULL ||
621 	    vrp->acc_io == NULL ||
622 	    vrp->acc_mem == NULL) {
623 		for (n = 0; n < nsets; n++)
624 			ddi_regs_map_free(&vrp->regset[n].hdl);
625 		kmem_free(vrp->regset, nsets * sizeof (vr_acc_t));
626 		vr_log(vrp, CE_WARN,
627 		    "Config-, I/O- and memory sets not available");
628 		return (VR_FAILURE);
629 	}
630 
631 	/*
632 	 * Store vendor/device/revision.
633 	 */
634 	vrp->chip.vendor = VR_GET16(vrp->acc_cfg, PCI_CONF_VENID);
635 	vrp->chip.device = VR_GET16(vrp->acc_cfg, PCI_CONF_DEVID);
636 	vrp->chip.revision = VR_GET16(vrp->acc_cfg, PCI_CONF_REVID);
637 
638 	/*
639 	 * Copy the matching chip_info_t structure.
640 	 */
641 	elem = sizeof (vr_chip_info) / sizeof (chip_info_t);
642 	for (n = 0; n < elem; n++) {
643 		if (vrp->chip.revision >= vr_chip_info[n].revmin &&
644 		    vrp->chip.revision <= vr_chip_info[n].revmax) {
645 			bcopy((void*)&vr_chip_info[n],
646 			    (void*)&vrp->chip.info,
647 			    sizeof (chip_info_t));
648 			break;
649 		}
650 	}
651 
652 	/*
653 	 * If we didn't find a chip_info_t for this card, copy the first
654 	 * entry of the info structures. This is a generic Rhine whith no
655 	 * bugs and no features.
656 	 */
657 	if (vrp->chip.info.name[0] == '\0') {
658 		bcopy((void*)&vr_chip_info[0],
659 		    (void*) &vrp->chip.info,
660 		    sizeof (chip_info_t));
661 	}
662 
663 	/*
664 	 * Tell what is found.
665 	 */
666 	vr_log(vrp, CE_NOTE, "pci%d,%d,%d: %s, revision 0x%0x",
667 	    PCI_REG_BUS_G(vrp->acc_cfg->reg.pci_phys_hi),
668 	    PCI_REG_DEV_G(vrp->acc_cfg->reg.pci_phys_hi),
669 	    PCI_REG_FUNC_G(vrp->acc_cfg->reg.pci_phys_hi),
670 	    vrp->chip.info.name,
671 	    vrp->chip.revision);
672 
673 	/*
674 	 * Assure that the device is prepared for memory space accesses
675 	 * This should be the default as the device advertises memory
676 	 * access in it's BAR's. However, my VT6102 on a EPIA CL board doesn't
677 	 * and thus we explicetely enable it.
678 	 */
679 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
680 
681 	/*
682 	 * Setup a handle for regular usage, prefer memory space accesses.
683 	 */
684 	if (vrp->acc_mem != NULL &&
685 	    (vrp->chip.info.bugs & VR_BUG_NO_MEMIO) == 0)
686 		vrp->acc_reg = vrp->acc_mem;
687 	else
688 		vrp->acc_reg = vrp->acc_io;
689 
690 	/*
691 	 * Store the vendor's MAC address.
692 	 */
693 	for (n = 0; n < ETHERADDRL; n++) {
694 		vrp->vendor_ether_addr[n] = VR_GET8(vrp->acc_reg,
695 		    VR_ETHERADDR + n);
696 	}
697 	return (VR_SUCCESS);
698 }
699 
700 static void
vr_bus_unconfig(vr_t * vrp)701 vr_bus_unconfig(vr_t *vrp)
702 {
703 	uint_t	n;
704 
705 	/*
706 	 * Free the register access handles.
707 	 */
708 	for (n = 0; n < vrp->nsets; n++)
709 		ddi_regs_map_free(&vrp->regset[n].hdl);
710 	kmem_free(vrp->regset, vrp->nsets * sizeof (vr_acc_t));
711 }
712 
713 /*
714  * Initialize parameter structures.
715  */
716 static void
vr_param_init(vr_t * vrp)717 vr_param_init(vr_t *vrp)
718 {
719 	/*
720 	 * Initialize default link configuration parameters.
721 	 */
722 	vrp->param.an_en = VR_LINK_AUTONEG_ON;
723 	vrp->param.anadv_en = 1; /* Select 802.3 autonegotiation */
724 	vrp->param.anadv_en |= MII_ABILITY_100BASE_T4;
725 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX_FD;
726 	vrp->param.anadv_en |= MII_ABILITY_100BASE_TX;
727 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T_FD;
728 	vrp->param.anadv_en |= MII_ABILITY_10BASE_T;
729 	/* Not a PHY ability, but advertised on behalf of MAC */
730 	vrp->param.anadv_en |= MII_ABILITY_PAUSE;
731 	vrp->param.mtu = ETHERMTU;
732 
733 	/*
734 	 * Store the PHY identity.
735 	 */
736 	vr_phy_read(vrp, MII_PHYIDH, &vrp->chip.mii.identh);
737 	vr_phy_read(vrp, MII_PHYIDL, &vrp->chip.mii.identl);
738 
739 	/*
740 	 * Clear incapabilities imposed by PHY in phymask.
741 	 */
742 	vrp->param.an_phymask = vrp->param.anadv_en;
743 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
744 	if ((vrp->chip.mii.status & MII_STATUS_10) == 0)
745 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T;
746 
747 	if ((vrp->chip.mii.status & MII_STATUS_10_FD) == 0)
748 		vrp->param.an_phymask &= ~MII_ABILITY_10BASE_T_FD;
749 
750 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX) == 0)
751 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX;
752 
753 	if ((vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) == 0)
754 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_TX_FD;
755 
756 	if ((vrp->chip.mii.status & MII_STATUS_100_BASE_T4) == 0)
757 		vrp->param.an_phymask &= ~MII_ABILITY_100BASE_T4;
758 
759 	/*
760 	 * Clear incapabilities imposed by MAC in macmask
761 	 * Note that flowcontrol (FCS?) is never masked. All of our adapters
762 	 * have the ability to honor incoming pause frames. Only the newer can
763 	 * transmit pause frames. Since there's no asym flowcontrol in 100Mbit
764 	 * Ethernet, we always advertise (symmetric) pause.
765 	 */
766 	vrp->param.an_macmask = vrp->param.anadv_en;
767 
768 	/*
769 	 * Advertised capabilities is enabled minus incapable.
770 	 */
771 	vrp->chip.mii.anadv = vrp->param.anadv_en &
772 	    (vrp->param.an_phymask & vrp->param.an_macmask);
773 
774 	/*
775 	 * Ensure that autoneg of the PHY matches our default.
776 	 */
777 	if (vrp->param.an_en == VR_LINK_AUTONEG_ON)
778 		vrp->chip.mii.control = MII_CONTROL_ANE;
779 	else
780 		vrp->chip.mii.control =
781 		    (MII_CONTROL_100MB | MII_CONTROL_FDUPLEX);
782 }
783 
784 /*
785  * Setup the descriptor rings.
786  */
787 static vr_result_t
vr_rings_init(vr_t * vrp)788 vr_rings_init(vr_t *vrp)
789 {
790 
791 	vrp->rx.ndesc = VR_RX_N_DESC;
792 	vrp->tx.ndesc = VR_TX_N_DESC;
793 
794 	/*
795 	 * Create a ring for receive.
796 	 */
797 	if (vr_alloc_ring(vrp, &vrp->rxring, vrp->rx.ndesc) != VR_SUCCESS)
798 		return (VR_FAILURE);
799 
800 	/*
801 	 * Create a ring for transmit.
802 	 */
803 	if (vr_alloc_ring(vrp, &vrp->txring, vrp->tx.ndesc) != VR_SUCCESS) {
804 		vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
805 		return (VR_FAILURE);
806 	}
807 
808 	vrp->rx.ring = vrp->rxring.desc;
809 	vrp->tx.ring = vrp->txring.desc;
810 	return (VR_SUCCESS);
811 }
812 
813 static void
vr_rings_fini(vr_t * vrp)814 vr_rings_fini(vr_t *vrp)
815 {
816 	vr_free_ring(&vrp->rxring, vrp->rx.ndesc);
817 	vr_free_ring(&vrp->txring, vrp->tx.ndesc);
818 }
819 
820 /*
821  * Allocate a descriptor ring
822  * The number of descriptor entries must fit in a single page so that the
823  * whole ring fits in one consequtive space.
824  *  i386:  4K page / 16 byte descriptor = 256 entries
825  *  sparc: 8K page / 16 byte descriptor = 512 entries
826  */
827 static vr_result_t
vr_alloc_ring(vr_t * vrp,vr_ring_t * ring,size_t n)828 vr_alloc_ring(vr_t *vrp, vr_ring_t *ring, size_t n)
829 {
830 	ddi_dma_cookie_t	desc_dma_cookie;
831 	uint_t			desc_cookiecnt;
832 	int			i, rc;
833 	size_t			rbytes;
834 
835 	/*
836 	 * Allocate a DMA handle for the chip descriptors.
837 	 */
838 	rc = ddi_dma_alloc_handle(vrp->devinfo,
839 	    &vr_dev_dma_attr,
840 	    DDI_DMA_SLEEP,
841 	    NULL,
842 	    &ring->handle);
843 
844 	if (rc != DDI_SUCCESS) {
845 		vr_log(vrp, CE_WARN,
846 		    "ddi_dma_alloc_handle in vr_alloc_ring failed.");
847 		return (VR_FAILURE);
848 	}
849 
850 	/*
851 	 * Allocate memory for the chip descriptors.
852 	 */
853 	rc = ddi_dma_mem_alloc(ring->handle,
854 	    n * sizeof (vr_chip_desc_t),
855 	    &vr_dev_dma_accattr,
856 	    DDI_DMA_CONSISTENT,
857 	    DDI_DMA_SLEEP,
858 	    NULL,
859 	    (caddr_t *)&ring->cdesc,
860 	    &rbytes,
861 	    &ring->acchdl);
862 
863 	if (rc != DDI_SUCCESS) {
864 		vr_log(vrp, CE_WARN,
865 		    "ddi_dma_mem_alloc in vr_alloc_ring failed.");
866 		ddi_dma_free_handle(&ring->handle);
867 		return (VR_FAILURE);
868 	}
869 
870 	/*
871 	 * Map the descriptor memory.
872 	 */
873 	rc = ddi_dma_addr_bind_handle(ring->handle,
874 	    NULL,
875 	    (caddr_t)ring->cdesc,
876 	    rbytes,
877 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
878 	    DDI_DMA_SLEEP,
879 	    NULL,
880 	    &desc_dma_cookie,
881 	    &desc_cookiecnt);
882 
883 	if (rc != DDI_DMA_MAPPED || desc_cookiecnt > 1) {
884 		vr_log(vrp, CE_WARN,
885 		    "ddi_dma_addr_bind_handle in vr_alloc_ring failed: "
886 		    "rc = %d, cookiecnt = %d", rc, desc_cookiecnt);
887 		ddi_dma_mem_free(&ring->acchdl);
888 		ddi_dma_free_handle(&ring->handle);
889 		return (VR_FAILURE);
890 	}
891 	ring->cdesc_paddr = desc_dma_cookie.dmac_address;
892 
893 	/*
894 	 * Allocate memory for the host descriptor ring.
895 	 */
896 	ring->desc =
897 	    (vr_desc_t *)kmem_zalloc(n * sizeof (vr_desc_t), KM_SLEEP);
898 
899 	/*
900 	 * Interlink the descriptors and connect host- to chip descriptors.
901 	 */
902 	for (i = 0; i < n; i++) {
903 		/*
904 		 * Connect the host descriptor to a chip descriptor.
905 		 */
906 		ring->desc[i].cdesc = &ring->cdesc[i];
907 
908 		/*
909 		 * Store the DMA address and offset in the descriptor
910 		 * Offset is for ddi_dma_sync() and paddr is for ddi_get/-put().
911 		 */
912 		ring->desc[i].offset = i * sizeof (vr_chip_desc_t);
913 		ring->desc[i].paddr = ring->cdesc_paddr + ring->desc[i].offset;
914 
915 		/*
916 		 * Link the previous descriptor to this one.
917 		 */
918 		if (i > 0) {
919 			/* Host */
920 			ring->desc[i-1].next = &ring->desc[i];
921 
922 			/* Chip */
923 			ddi_put32(ring->acchdl,
924 			    &ring->cdesc[i-1].next,
925 			    ring->desc[i].paddr);
926 		}
927 	}
928 
929 	/*
930 	 * Make rings out of this list by pointing last to first.
931 	 */
932 	i = n - 1;
933 	ring->desc[i].next = &ring->desc[0];
934 	ddi_put32(ring->acchdl, &ring->cdesc[i].next, ring->desc[0].paddr);
935 	return (VR_SUCCESS);
936 }
937 
938 /*
939  * Free the memory allocated for a ring.
940  */
941 static void
vr_free_ring(vr_ring_t * r,size_t n)942 vr_free_ring(vr_ring_t *r, size_t n)
943 {
944 	/*
945 	 * Unmap and free the chip descriptors.
946 	 */
947 	(void) ddi_dma_unbind_handle(r->handle);
948 	ddi_dma_mem_free(&r->acchdl);
949 	ddi_dma_free_handle(&r->handle);
950 
951 	/*
952 	 * Free the memory for storing host descriptors
953 	 */
954 	kmem_free(r->desc, n * sizeof (vr_desc_t));
955 }
956 
957 /*
958  * Initialize the receive ring.
959  */
960 static vr_result_t
vr_rxring_init(vr_t * vrp)961 vr_rxring_init(vr_t *vrp)
962 {
963 	int		i, rc;
964 	vr_desc_t	*rp;
965 
966 	/*
967 	 * Set the read pointer at the start of the ring.
968 	 */
969 	vrp->rx.rp = &vrp->rx.ring[0];
970 
971 	/*
972 	 * Assign a DMA buffer to each receive descriptor.
973 	 */
974 	for (i = 0; i < vrp->rx.ndesc; i++) {
975 		rp = &vrp->rx.ring[i];
976 		rc = vr_alloc_dmabuf(vrp,
977 		    &vrp->rx.ring[i].dmabuf,
978 		    DDI_DMA_STREAMING | DDI_DMA_READ);
979 
980 		if (rc != VR_SUCCESS) {
981 			while (--i >= 0)
982 				vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
983 			return (VR_FAILURE);
984 		}
985 
986 		/*
987 		 * Store the address of the dma buffer in the chip descriptor
988 		 */
989 		ddi_put32(vrp->rxring.acchdl,
990 		    &rp->cdesc->data,
991 		    rp->dmabuf.paddr);
992 
993 		/*
994 		 * Put the buffer length in the chip descriptor. Ensure that
995 		 * length fits in the 11 bits of stat1 (2047/0x7FF)
996 		 */
997 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat1,
998 		    MIN(VR_MAX_PKTSZ, rp->dmabuf.bufsz));
999 
1000 		/*
1001 		 * Set descriptor ownership to the card
1002 		 */
1003 		ddi_put32(vrp->rxring.acchdl, &rp->cdesc->stat0, VR_RDES0_OWN);
1004 
1005 		/*
1006 		 * Sync the descriptor with main memory
1007 		 */
1008 		(void) ddi_dma_sync(vrp->rxring.handle, rp->offset,
1009 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1010 	}
1011 	return (VR_SUCCESS);
1012 }
1013 
1014 /*
1015  * Free the DMA buffers assigned to the receive ring.
1016  */
1017 static void
vr_rxring_fini(vr_t * vrp)1018 vr_rxring_fini(vr_t *vrp)
1019 {
1020 	int		i;
1021 
1022 	for (i = 0; i < vrp->rx.ndesc; i++)
1023 		vr_free_dmabuf(&vrp->rx.ring[i].dmabuf);
1024 }
1025 
1026 static vr_result_t
vr_txring_init(vr_t * vrp)1027 vr_txring_init(vr_t *vrp)
1028 {
1029 	vr_desc_t		*wp;
1030 	int			i, rc;
1031 
1032 	/*
1033 	 * Set the write- and claim pointer.
1034 	 */
1035 	vrp->tx.wp = &vrp->tx.ring[0];
1036 	vrp->tx.cp = &vrp->tx.ring[0];
1037 
1038 	/*
1039 	 * (Re)set the TX bookkeeping.
1040 	 */
1041 	vrp->tx.stallticks = 0;
1042 	vrp->tx.resched = 0;
1043 
1044 	/*
1045 	 * Every transmit decreases nfree. Every reclaim increases nfree.
1046 	 */
1047 	vrp->tx.nfree = vrp->tx.ndesc;
1048 
1049 	/*
1050 	 * Attach a DMA buffer to each transmit descriptor.
1051 	 */
1052 	for (i = 0; i < vrp->tx.ndesc; i++) {
1053 		rc = vr_alloc_dmabuf(vrp,
1054 		    &vrp->tx.ring[i].dmabuf,
1055 		    DDI_DMA_STREAMING | DDI_DMA_WRITE);
1056 
1057 		if (rc != VR_SUCCESS) {
1058 			while (--i >= 0)
1059 				vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1060 			return (VR_FAILURE);
1061 		}
1062 	}
1063 
1064 	/*
1065 	 * Init & sync the TX descriptors so the device sees a valid ring.
1066 	 */
1067 	for (i = 0; i < vrp->tx.ndesc; i++) {
1068 		wp = &vrp->tx.ring[i];
1069 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, 0);
1070 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1, 0);
1071 		ddi_put32(vrp->txring.acchdl, &wp->cdesc->data,
1072 		    wp->dmabuf.paddr);
1073 		(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1074 		    sizeof (vr_chip_desc_t),
1075 		    DDI_DMA_SYNC_FORDEV);
1076 	}
1077 	return (VR_SUCCESS);
1078 }
1079 
1080 /*
1081  * Free the DMA buffers attached to the TX ring.
1082  */
1083 static void
vr_txring_fini(vr_t * vrp)1084 vr_txring_fini(vr_t *vrp)
1085 {
1086 	int		i;
1087 
1088 	/*
1089 	 * Free the DMA buffers attached to the TX ring
1090 	 */
1091 	for (i = 0; i < vrp->tx.ndesc; i++)
1092 		vr_free_dmabuf(&vrp->tx.ring[i].dmabuf);
1093 }
1094 
1095 /*
1096  * Allocate a DMA buffer.
1097  */
1098 static vr_result_t
vr_alloc_dmabuf(vr_t * vrp,vr_data_dma_t * dmap,uint_t dmaflags)1099 vr_alloc_dmabuf(vr_t *vrp, vr_data_dma_t *dmap, uint_t dmaflags)
1100 {
1101 	ddi_dma_cookie_t	dma_cookie;
1102 	uint_t			cookiecnt;
1103 	int			rc;
1104 
1105 	/*
1106 	 * Allocate a DMA handle for the buffer
1107 	 */
1108 	rc = ddi_dma_alloc_handle(vrp->devinfo,
1109 	    &vr_data_dma_attr,
1110 	    DDI_DMA_DONTWAIT, NULL,
1111 	    &dmap->handle);
1112 
1113 	if (rc != DDI_SUCCESS) {
1114 		vr_log(vrp, CE_WARN,
1115 		    "ddi_dma_alloc_handle failed in vr_alloc_dmabuf");
1116 		return (VR_FAILURE);
1117 	}
1118 
1119 	/*
1120 	 * Allocate the buffer
1121 	 * The allocated buffer is aligned on 2K boundary. This ensures that
1122 	 * a 1500 byte frame never cross a page boundary and thus that the DMA
1123 	 * mapping can be established in 1 fragment.
1124 	 */
1125 	rc = ddi_dma_mem_alloc(dmap->handle,
1126 	    VR_DMABUFSZ,
1127 	    &vr_data_dma_accattr,
1128 	    DDI_DMA_RDWR | DDI_DMA_STREAMING,
1129 	    DDI_DMA_DONTWAIT, NULL,
1130 	    &dmap->buf,
1131 	    &dmap->bufsz,
1132 	    &dmap->acchdl);
1133 
1134 	if (rc != DDI_SUCCESS) {
1135 		vr_log(vrp, CE_WARN,
1136 		    "ddi_dma_mem_alloc failed in vr_alloc_dmabuf");
1137 		ddi_dma_free_handle(&dmap->handle);
1138 		return (VR_FAILURE);
1139 	}
1140 
1141 	/*
1142 	 * Map the memory
1143 	 */
1144 	rc = ddi_dma_addr_bind_handle(dmap->handle,
1145 	    NULL,
1146 	    (caddr_t)dmap->buf,
1147 	    dmap->bufsz,
1148 	    dmaflags,
1149 	    DDI_DMA_DONTWAIT,
1150 	    NULL,
1151 	    &dma_cookie,
1152 	    &cookiecnt);
1153 
1154 	/*
1155 	 * The cookiecount should never > 1 because we requested 2K alignment
1156 	 */
1157 	if (rc != DDI_DMA_MAPPED || cookiecnt > 1) {
1158 		vr_log(vrp, CE_WARN,
1159 		    "dma_addr_bind_handle failed in vr_alloc_dmabuf: "
1160 		    "rc = %d, cookiecnt = %d", rc, cookiecnt);
1161 		ddi_dma_mem_free(&dmap->acchdl);
1162 		ddi_dma_free_handle(&dmap->handle);
1163 		return (VR_FAILURE);
1164 	}
1165 	dmap->paddr = dma_cookie.dmac_address;
1166 	return (VR_SUCCESS);
1167 }
1168 
1169 /*
1170  * Destroy a DMA buffer.
1171  */
1172 static void
vr_free_dmabuf(vr_data_dma_t * dmap)1173 vr_free_dmabuf(vr_data_dma_t *dmap)
1174 {
1175 	(void) ddi_dma_unbind_handle(dmap->handle);
1176 	ddi_dma_mem_free(&dmap->acchdl);
1177 	ddi_dma_free_handle(&dmap->handle);
1178 }
1179 
1180 /*
1181  * Interrupt service routine
1182  * When our vector is shared with another device, av_dispatch_autovect calls
1183  * all service routines for the vector until *none* of them return claimed
1184  * That means that, when sharing vectors, this routine is called at least
1185  * twice for each interrupt.
1186  */
1187 uint_t
vr_intr(caddr_t arg1,caddr_t arg2)1188 vr_intr(caddr_t arg1, caddr_t arg2)
1189 {
1190 	vr_t		*vrp;
1191 	uint16_t	status;
1192 	mblk_t		*lp = NULL;
1193 	uint32_t	tx_resched;
1194 	uint32_t	link_change;
1195 
1196 	tx_resched = 0;
1197 	link_change = 0;
1198 	vrp = (void *)arg1;
1199 	_NOTE(ARGUNUSED(arg2))
1200 
1201 	mutex_enter(&vrp->intrlock);
1202 	/*
1203 	 * If the driver is not in running state it is not our interrupt.
1204 	 * Shared interrupts can end up here without us being started.
1205 	 */
1206 	if (vrp->chip.state != CHIPSTATE_RUNNING) {
1207 		mutex_exit(&vrp->intrlock);
1208 		return (DDI_INTR_UNCLAIMED);
1209 	}
1210 
1211 	/*
1212 	 * Read the status register to see if the interrupt is from our device
1213 	 * This read also ensures that posted writes are brought to main memory.
1214 	 */
1215 	status = VR_GET16(vrp->acc_reg, VR_ISR0) & VR_ICR0_CFG;
1216 	if (status == 0) {
1217 		/*
1218 		 * Status contains no configured interrupts
1219 		 * The interrupt was not generated by our device.
1220 		 */
1221 		vrp->stats.intr_unclaimed++;
1222 		mutex_exit(&vrp->intrlock);
1223 		return (DDI_INTR_UNCLAIMED);
1224 	}
1225 	vrp->stats.intr_claimed++;
1226 
1227 	/*
1228 	 * Acknowledge the event(s) that caused interruption.
1229 	 */
1230 	VR_PUT16(vrp->acc_reg, VR_ISR0, status);
1231 
1232 	/*
1233 	 * Receive completion.
1234 	 */
1235 	if ((status & (VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS)) != 0) {
1236 		/*
1237 		 * Received some packets.
1238 		 */
1239 		lp = vr_receive(vrp);
1240 
1241 		/*
1242 		 * DMA stops after a conflict in the FIFO.
1243 		 */
1244 		if ((status & VR_ISR_RX_ERR_BITS) != 0)
1245 			VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1246 		status &= ~(VR_ISR0_RX_DONE | VR_ISR_RX_ERR_BITS);
1247 	}
1248 
1249 	/*
1250 	 * Transmit completion.
1251 	 */
1252 	if ((status & (VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS)) != 0) {
1253 		/*
1254 		 * Card done with transmitting some packets
1255 		 * TX_DONE is generated 3 times per ring but it appears
1256 		 * more often because it is also set when an RX_DONE
1257 		 * interrupt is generated.
1258 		 */
1259 		mutex_enter(&vrp->tx.lock);
1260 		vr_tx_reclaim(vrp);
1261 		tx_resched = vrp->tx.resched;
1262 		vrp->tx.resched = 0;
1263 		mutex_exit(&vrp->tx.lock);
1264 		status &= ~(VR_ISR0_TX_DONE | VR_ISR_TX_ERR_BITS);
1265 	}
1266 
1267 	/*
1268 	 * Link status change.
1269 	 */
1270 	if ((status & VR_ICR0_LINKSTATUS) != 0) {
1271 		/*
1272 		 * Get new link state and inform the mac layer.
1273 		 */
1274 		mutex_enter(&vrp->oplock);
1275 		mutex_enter(&vrp->tx.lock);
1276 		vr_link_state(vrp);
1277 		mutex_exit(&vrp->tx.lock);
1278 		mutex_exit(&vrp->oplock);
1279 		status &= ~VR_ICR0_LINKSTATUS;
1280 		vrp->stats.linkchanges++;
1281 		link_change = 1;
1282 	}
1283 
1284 	/*
1285 	 * Bus error.
1286 	 */
1287 	if ((status & VR_ISR0_BUSERR) != 0) {
1288 		vr_log(vrp, CE_WARN, "bus error occured");
1289 		vrp->reset = 1;
1290 		status &= ~VR_ISR0_BUSERR;
1291 	}
1292 
1293 	/*
1294 	 * We must have handled all things here.
1295 	 */
1296 	ASSERT(status == 0);
1297 	mutex_exit(&vrp->intrlock);
1298 
1299 	/*
1300 	 * Reset the device if requested
1301 	 * The request can come from the periodic tx check or from the interrupt
1302 	 * status.
1303 	 */
1304 	if (vrp->reset != 0) {
1305 		vr_error(vrp);
1306 		vrp->reset = 0;
1307 	}
1308 
1309 	/*
1310 	 * Pass up the list with received packets.
1311 	 */
1312 	if (lp != NULL)
1313 		mac_rx(vrp->machdl, 0, lp);
1314 
1315 	/*
1316 	 * Inform the upper layer on the linkstatus if there was a change.
1317 	 */
1318 	if (link_change != 0)
1319 		mac_link_update(vrp->machdl,
1320 		    (link_state_t)vrp->chip.link.state);
1321 	/*
1322 	 * Restart transmissions if we were waiting for tx descriptors.
1323 	 */
1324 	if (tx_resched == 1)
1325 		mac_tx_update(vrp->machdl);
1326 
1327 	/*
1328 	 * Read something from the card to ensure that all of our configuration
1329 	 * writes are delivered to the device before the interrupt is ended.
1330 	 */
1331 	(void) VR_GET8(vrp->acc_reg, VR_ETHERADDR);
1332 	return (DDI_INTR_CLAIMED);
1333 }
1334 
1335 /*
1336  * Respond to an unforseen situation by resetting the card and our bookkeeping.
1337  */
1338 static void
vr_error(vr_t * vrp)1339 vr_error(vr_t *vrp)
1340 {
1341 	vr_log(vrp, CE_WARN, "resetting MAC.");
1342 	mutex_enter(&vrp->intrlock);
1343 	mutex_enter(&vrp->oplock);
1344 	mutex_enter(&vrp->tx.lock);
1345 	(void) vr_stop(vrp);
1346 	vr_reset(vrp);
1347 	(void) vr_start(vrp);
1348 	mutex_exit(&vrp->tx.lock);
1349 	mutex_exit(&vrp->oplock);
1350 	mutex_exit(&vrp->intrlock);
1351 	vrp->stats.resets++;
1352 }
1353 
1354 /*
1355  * Collect received packets in a list.
1356  */
1357 static mblk_t *
vr_receive(vr_t * vrp)1358 vr_receive(vr_t *vrp)
1359 {
1360 	mblk_t			*lp, *mp, *np;
1361 	vr_desc_t		*rxp;
1362 	vr_data_dma_t		*dmap;
1363 	uint32_t		pklen;
1364 	uint32_t		rxstat0;
1365 	uint32_t		n;
1366 
1367 	lp = NULL;
1368 	n = 0;
1369 	for (rxp = vrp->rx.rp; ; rxp = rxp->next, n++) {
1370 		/*
1371 		 * Sync the descriptor before looking at it.
1372 		 */
1373 		(void) ddi_dma_sync(vrp->rxring.handle, rxp->offset,
1374 		    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORKERNEL);
1375 
1376 		/*
1377 		 * Get the status from the descriptor.
1378 		 */
1379 		rxstat0 = ddi_get32(vrp->rxring.acchdl, &rxp->cdesc->stat0);
1380 
1381 		/*
1382 		 * We're done if the descriptor is owned by the card.
1383 		 */
1384 		if ((rxstat0 & VR_RDES0_OWN) != 0)
1385 			break;
1386 		else if ((rxstat0 & VR_RDES0_RXOK) != 0) {
1387 			/*
1388 			 * Received a good packet
1389 			 */
1390 			dmap = &rxp->dmabuf;
1391 			pklen = (rxstat0 >> 16) - ETHERFCSL;
1392 
1393 			/*
1394 			 * Sync the data.
1395 			 */
1396 			(void) ddi_dma_sync(dmap->handle, 0,
1397 			    pklen, DDI_DMA_SYNC_FORKERNEL);
1398 
1399 			/*
1400 			 * Send a new copied message upstream.
1401 			 */
1402 			np = allocb(pklen, 0);
1403 			if (np != NULL) {
1404 				bcopy(dmap->buf, np->b_rptr, pklen);
1405 				np->b_wptr = np->b_rptr + pklen;
1406 
1407 				vrp->stats.mac_stat_ipackets++;
1408 				vrp->stats.mac_stat_rbytes += pklen;
1409 
1410 				if ((rxstat0 & VR_RDES0_BAR) != 0)
1411 					vrp->stats.mac_stat_brdcstrcv++;
1412 				else if ((rxstat0 & VR_RDES0_MAR) != 0)
1413 					vrp->stats.mac_stat_multircv++;
1414 
1415 				/*
1416 				 * Link this packet in the list.
1417 				 */
1418 				np->b_next = NULL;
1419 				if (lp == NULL)
1420 					lp = mp = np;
1421 				else {
1422 					mp->b_next = np;
1423 					mp = np;
1424 				}
1425 			} else {
1426 				vrp->stats.allocbfail++;
1427 				vrp->stats.mac_stat_norcvbuf++;
1428 			}
1429 
1430 		} else {
1431 			/*
1432 			 * Received with errors.
1433 			 */
1434 			vrp->stats.mac_stat_ierrors++;
1435 			if ((rxstat0 & VR_RDES0_FAE) != 0)
1436 				vrp->stats.ether_stat_align_errors++;
1437 			if ((rxstat0 & VR_RDES0_CRCERR) != 0)
1438 				vrp->stats.ether_stat_fcs_errors++;
1439 			if ((rxstat0 & VR_RDES0_LONG) != 0)
1440 				vrp->stats.ether_stat_toolong_errors++;
1441 			if ((rxstat0 & VR_RDES0_RUNT) != 0)
1442 				vrp->stats.ether_stat_tooshort_errors++;
1443 			if ((rxstat0 & VR_RDES0_FOV) != 0)
1444 				vrp->stats.mac_stat_overflows++;
1445 		}
1446 
1447 		/*
1448 		 * Reset descriptor ownership to the MAC.
1449 		 */
1450 		ddi_put32(vrp->rxring.acchdl,
1451 		    &rxp->cdesc->stat0,
1452 		    VR_RDES0_OWN);
1453 		(void) ddi_dma_sync(vrp->rxring.handle,
1454 		    rxp->offset,
1455 		    sizeof (vr_chip_desc_t),
1456 		    DDI_DMA_SYNC_FORDEV);
1457 	}
1458 	vrp->rx.rp = rxp;
1459 
1460 	/*
1461 	 * If we do flowcontrol and if the card can transmit pause frames,
1462 	 * increment the "available receive descriptors" register.
1463 	 */
1464 	if (n > 0 && vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
1465 		/*
1466 		 * Whenever the card moves a fragment to host memory it
1467 		 * decrements the RXBUFCOUNT register. If the value in the
1468 		 * register reaches a low watermark, the card transmits a pause
1469 		 * frame. If the value in this register reaches a high
1470 		 * watermark, the card sends a "cancel pause" frame
1471 		 *
1472 		 * Non-zero values written to this byte register are added
1473 		 * by the chip to the register's contents, so we must write
1474 		 * the number of descriptors free'd.
1475 		 */
1476 		VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT, MIN(n, 0xFF));
1477 	}
1478 	return (lp);
1479 }
1480 
1481 /*
1482  * Enqueue a list of packets for transmission
1483  * Return the packets not transmitted.
1484  */
1485 mblk_t *
vr_mac_tx_enqueue_list(void * p,mblk_t * mp)1486 vr_mac_tx_enqueue_list(void *p, mblk_t *mp)
1487 {
1488 	vr_t		*vrp;
1489 	mblk_t		*nextp;
1490 
1491 	vrp = (vr_t *)p;
1492 	mutex_enter(&vrp->tx.lock);
1493 	do {
1494 		if (vrp->tx.nfree == 0) {
1495 			vrp->stats.ether_stat_defer_xmts++;
1496 			vrp->tx.resched = 1;
1497 			break;
1498 		}
1499 		nextp = mp->b_next;
1500 		mp->b_next = mp->b_prev = NULL;
1501 		vr_tx_enqueue_msg(vrp, mp);
1502 		mp = nextp;
1503 		vrp->tx.nfree--;
1504 	} while (mp != NULL);
1505 	mutex_exit(&vrp->tx.lock);
1506 
1507 	/*
1508 	 * Tell the chip to poll the TX ring.
1509 	 */
1510 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1511 	return (mp);
1512 }
1513 
1514 /*
1515  * Enqueue a message for transmission.
1516  */
1517 static void
vr_tx_enqueue_msg(vr_t * vrp,mblk_t * mp)1518 vr_tx_enqueue_msg(vr_t *vrp, mblk_t *mp)
1519 {
1520 	vr_desc_t		*wp;
1521 	vr_data_dma_t		*dmap;
1522 	uint32_t		pklen;
1523 	uint32_t		nextp;
1524 	int			padlen;
1525 
1526 	if ((uchar_t)mp->b_rptr[0] == 0xff &&
1527 	    (uchar_t)mp->b_rptr[1] == 0xff &&
1528 	    (uchar_t)mp->b_rptr[2] == 0xff &&
1529 	    (uchar_t)mp->b_rptr[3] == 0xff &&
1530 	    (uchar_t)mp->b_rptr[4] == 0xff &&
1531 	    (uchar_t)mp->b_rptr[5] == 0xff)
1532 		vrp->stats.mac_stat_brdcstxmt++;
1533 	else if ((uchar_t)mp->b_rptr[0] == 1)
1534 		vrp->stats.mac_stat_multixmt++;
1535 
1536 	pklen = msgsize(mp);
1537 	wp = vrp->tx.wp;
1538 	dmap = &wp->dmabuf;
1539 
1540 	/*
1541 	 * Copy the message into the pre-mapped buffer and free mp
1542 	 */
1543 	mcopymsg(mp, dmap->buf);
1544 
1545 	/*
1546 	 * Clean padlen bytes of short packet.
1547 	 */
1548 	padlen = ETHERMIN - pklen;
1549 	if (padlen > 0) {
1550 		bzero(dmap->buf + pklen, padlen);
1551 		pklen += padlen;
1552 	}
1553 
1554 	/*
1555 	 * Most of the statistics are updated on reclaim, after the actual
1556 	 * transmit. obytes is maintained here because the length is cleared
1557 	 * after transmission
1558 	 */
1559 	vrp->stats.mac_stat_obytes += pklen;
1560 
1561 	/*
1562 	 * Sync the data so the device sees the new content too.
1563 	 */
1564 	(void) ddi_dma_sync(dmap->handle, 0, pklen, DDI_DMA_SYNC_FORDEV);
1565 
1566 	/*
1567 	 * If we have reached the TX interrupt distance, enable a TX interrupt
1568 	 * for this packet. The Interrupt Control (IC) bit in the transmit
1569 	 * descriptor doesn't have any effect on the interrupt generation
1570 	 * despite the vague statements in the datasheet. Thus, we use the
1571 	 * more obscure interrupt suppress bit which is probably part of the
1572 	 * MAC's bookkeeping for TX interrupts and fragmented packets.
1573 	 */
1574 	vrp->tx.intr_distance++;
1575 	nextp = ddi_get32(vrp->txring.acchdl, &wp->cdesc->next);
1576 	if (vrp->tx.intr_distance >= VR_TX_MAX_INTR_DISTANCE) {
1577 		/*
1578 		 * Don't suppress the interrupt for this packet.
1579 		 */
1580 		vrp->tx.intr_distance = 0;
1581 		nextp &= (~VR_TDES3_SUPPRESS_INTR);
1582 	} else {
1583 		/*
1584 		 * Suppress the interrupt for this packet.
1585 		 */
1586 		nextp |= VR_TDES3_SUPPRESS_INTR;
1587 	}
1588 
1589 	/*
1590 	 * Write and sync the chip's descriptor
1591 	 */
1592 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat1,
1593 	    pklen | (VR_TDES1_STP | VR_TDES1_EDP | VR_TDES1_CHN));
1594 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->next, nextp);
1595 	ddi_put32(vrp->txring.acchdl, &wp->cdesc->stat0, VR_TDES0_OWN);
1596 	(void) ddi_dma_sync(vrp->txring.handle, wp->offset,
1597 	    sizeof (vr_chip_desc_t), DDI_DMA_SYNC_FORDEV);
1598 
1599 	/*
1600 	 * The ticks counter is cleared by reclaim when it reclaimed some
1601 	 * descriptors and incremented by the periodic TX stall check.
1602 	 */
1603 	vrp->tx.stallticks = 1;
1604 	vrp->tx.wp = wp->next;
1605 }
1606 
1607 /*
1608  * Free transmitted descriptors.
1609  */
1610 static void
vr_tx_reclaim(vr_t * vrp)1611 vr_tx_reclaim(vr_t *vrp)
1612 {
1613 	vr_desc_t		*cp;
1614 	uint32_t		stat0, stat1, freed, dirty;
1615 
1616 	ASSERT(mutex_owned(&vrp->tx.lock));
1617 
1618 	freed = 0;
1619 	dirty = vrp->tx.ndesc - vrp->tx.nfree;
1620 	for (cp = vrp->tx.cp; dirty > 0; cp = cp->next) {
1621 		/*
1622 		 * Sync & get descriptor status.
1623 		 */
1624 		(void) ddi_dma_sync(vrp->txring.handle, cp->offset,
1625 		    sizeof (vr_chip_desc_t),
1626 		    DDI_DMA_SYNC_FORKERNEL);
1627 		stat0 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat0);
1628 
1629 		if ((stat0 & VR_TDES0_OWN) != 0)
1630 			break;
1631 
1632 		/*
1633 		 * Do stats for the first descriptor in a chain.
1634 		 */
1635 		stat1 = ddi_get32(vrp->txring.acchdl, &cp->cdesc->stat1);
1636 		if ((stat1 & VR_TDES1_STP) != 0) {
1637 			if ((stat0 & VR_TDES0_TERR) != 0) {
1638 				vrp->stats.ether_stat_macxmt_errors++;
1639 				if ((stat0 & VR_TDES0_UDF) != 0)
1640 					vrp->stats.mac_stat_underflows++;
1641 				if ((stat0 & VR_TDES0_ABT) != 0)
1642 					vrp-> stats.ether_stat_ex_collisions++;
1643 				/*
1644 				 * Abort and FIFO underflow stop the MAC.
1645 				 * Packet queueing must be disabled with HD
1646 				 * links because otherwise the MAC is also lost
1647 				 * after a few of these events.
1648 				 */
1649 				VR_PUT8(vrp->acc_reg, VR_CTRL0,
1650 				    VR_CTRL0_DMA_GO);
1651 			} else
1652 				vrp->stats.mac_stat_opackets++;
1653 
1654 			if ((stat0 & VR_TDES0_COL) != 0) {
1655 				if ((stat0 & VR_TDES0_NCR) == 1) {
1656 					vrp->stats.
1657 					    ether_stat_first_collisions++;
1658 				} else {
1659 					vrp->stats.
1660 					    ether_stat_multi_collisions++;
1661 				}
1662 				vrp->stats.mac_stat_collisions +=
1663 				    (stat0 & VR_TDES0_NCR);
1664 			}
1665 
1666 			if ((stat0 & VR_TDES0_CRS) != 0)
1667 				vrp->stats.ether_stat_carrier_errors++;
1668 
1669 			if ((stat0 & VR_TDES0_OWC) != 0)
1670 				vrp->stats.ether_stat_tx_late_collisions++;
1671 		}
1672 		freed += 1;
1673 		dirty -= 1;
1674 	}
1675 	vrp->tx.cp = cp;
1676 
1677 	if (freed > 0) {
1678 		vrp->tx.nfree += freed;
1679 		vrp->tx.stallticks = 0;
1680 		vrp->stats.txreclaims += 1;
1681 	} else
1682 		vrp->stats.txreclaim0 += 1;
1683 }
1684 
1685 /*
1686  * Check TX health every 2 seconds.
1687  */
1688 static void
vr_periodic(void * p)1689 vr_periodic(void *p)
1690 {
1691 	vr_t		*vrp;
1692 
1693 	vrp = (vr_t *)p;
1694 	if (vrp->chip.state == CHIPSTATE_RUNNING &&
1695 	    vrp->chip.link.state == VR_LINK_STATE_UP && vrp->reset == 0) {
1696 		if (mutex_tryenter(&vrp->intrlock) != 0) {
1697 			mutex_enter(&vrp->tx.lock);
1698 			if (vrp->tx.resched == 1) {
1699 				if (vrp->tx.stallticks >= VR_MAXTXCHECKS) {
1700 					/*
1701 					 * No succesful reclaim in the last n
1702 					 * intervals. Reset the MAC.
1703 					 */
1704 					vrp->reset = 1;
1705 					vr_log(vrp, CE_WARN,
1706 					    "TX stalled, resetting MAC");
1707 					vrp->stats.txstalls++;
1708 				} else {
1709 					/*
1710 					 * Increase until we find that we've
1711 					 * waited long enough.
1712 					 */
1713 					vrp->tx.stallticks += 1;
1714 				}
1715 			}
1716 			mutex_exit(&vrp->tx.lock);
1717 			mutex_exit(&vrp->intrlock);
1718 			vrp->stats.txchecks++;
1719 		}
1720 	}
1721 	vrp->stats.cyclics++;
1722 }
1723 
1724 /*
1725  * Bring the device to our desired initial state.
1726  */
1727 static void
vr_reset(vr_t * vrp)1728 vr_reset(vr_t *vrp)
1729 {
1730 	uint32_t	time;
1731 
1732 	/*
1733 	 * Reset the MAC
1734 	 * If we don't wait long enough for the forced reset to complete,
1735 	 * MAC looses sync with PHY. Result link up, no link change interrupt
1736 	 * and no data transfer.
1737 	 */
1738 	time = 0;
1739 	VR_PUT8(vrp->acc_io, VR_CTRL1, VR_CTRL1_RESET);
1740 	do {
1741 		drv_usecwait(100);
1742 		time += 100;
1743 		if (time >= 100000) {
1744 			VR_PUT8(vrp->acc_io, VR_MISC1, VR_MISC1_RESET);
1745 			delay(drv_usectohz(200000));
1746 		}
1747 	} while ((VR_GET8(vrp->acc_io, VR_CTRL1) & VR_CTRL1_RESET) != 0);
1748 	delay(drv_usectohz(10000));
1749 
1750 	/*
1751 	 * Load the PROM contents into the MAC again.
1752 	 */
1753 	VR_SETBIT8(vrp->acc_io, VR_PROMCTL, VR_PROMCTL_RELOAD);
1754 	delay(drv_usectohz(100000));
1755 
1756 	/*
1757 	 * Tell the MAC via IO space that we like to use memory space for
1758 	 * accessing registers.
1759 	 */
1760 	VR_SETBIT8(vrp->acc_io, VR_CFGD, VR_CFGD_MMIOEN);
1761 }
1762 
1763 /*
1764  * Prepare and enable the card (MAC + PHY + PCI).
1765  */
1766 static int
vr_start(vr_t * vrp)1767 vr_start(vr_t *vrp)
1768 {
1769 	uint8_t		pci_latency, pci_mode;
1770 
1771 	ASSERT(mutex_owned(&vrp->oplock));
1772 
1773 	/*
1774 	 * Allocate DMA buffers for RX.
1775 	 */
1776 	if (vr_rxring_init(vrp) != VR_SUCCESS) {
1777 		vr_log(vrp, CE_NOTE, "vr_rxring_init() failed");
1778 		return (ENOMEM);
1779 	}
1780 
1781 	/*
1782 	 * Allocate DMA buffers for TX.
1783 	 */
1784 	if (vr_txring_init(vrp) != VR_SUCCESS) {
1785 		vr_log(vrp, CE_NOTE, "vr_txring_init() failed");
1786 		vr_rxring_fini(vrp);
1787 		return (ENOMEM);
1788 	}
1789 
1790 	/*
1791 	 * Changes of the chip specific registers as done in VIA's fet driver
1792 	 * These bits are not in the datasheet and controlled by vr_chip_info.
1793 	 */
1794 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE2);
1795 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE10T) != 0)
1796 		pci_mode |= VR_MODE2_MODE10T;
1797 
1798 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMODE2PCEROPT) != 0)
1799 		pci_mode |= VR_MODE2_PCEROPT;
1800 
1801 	if ((vrp->chip.info.features & VR_FEATURE_MRDLNMULTIPLE) != 0)
1802 		pci_mode |= VR_MODE2_MRDPL;
1803 	VR_PUT8(vrp->acc_reg, VR_MODE2, pci_mode);
1804 
1805 	pci_mode = VR_GET8(vrp->acc_reg, VR_MODE3);
1806 	if ((vrp->chip.info.bugs & VR_BUG_NEEDMIION) != 0)
1807 		pci_mode |= VR_MODE3_MIION;
1808 	VR_PUT8(vrp->acc_reg, VR_MODE3, pci_mode);
1809 
1810 	/*
1811 	 * RX: Accept broadcast packets.
1812 	 */
1813 	VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTBROAD);
1814 
1815 	/*
1816 	 * RX: Start DMA when there are 256 bytes in the FIFO.
1817 	 */
1818 	VR_SETBITS8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_FIFO_THRESHOLD_BITS,
1819 	    VR_RXCFG_FIFO_THRESHOLD_256);
1820 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_RX_FIFO_THRESHOLD_BITS,
1821 	    VR_BCR0_RX_FIFO_THRESHOLD_256);
1822 
1823 	/*
1824 	 * TX: Start transmit when there are 256 bytes in the FIFO.
1825 	 */
1826 	VR_SETBITS8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_FIFO_THRESHOLD_BITS,
1827 	    VR_TXCFG_FIFO_THRESHOLD_256);
1828 	VR_SETBITS8(vrp->acc_reg, VR_BCR1, VR_BCR1_TX_FIFO_THRESHOLD_BITS,
1829 	    VR_BCR1_TX_FIFO_THRESHOLD_256);
1830 
1831 	/*
1832 	 * Burst transfers up to 256 bytes.
1833 	 */
1834 	VR_SETBITS8(vrp->acc_reg, VR_BCR0, VR_BCR0_DMABITS, VR_BCR0_DMA256);
1835 
1836 	/*
1837 	 * Disable TX autopolling as it is bad for RX performance
1838 	 * I assume this is because the RX process finds the bus often occupied
1839 	 * by the polling process.
1840 	 */
1841 	VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_NOAUTOPOLL);
1842 
1843 	/*
1844 	 * Honor the PCI latency timer if it is reasonable.
1845 	 */
1846 	pci_latency = VR_GET8(vrp->acc_cfg, PCI_CONF_LATENCY_TIMER);
1847 	if (pci_latency != 0 && pci_latency != 0xFF)
1848 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1849 	else
1850 		VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_LATENCYTIMER);
1851 
1852 	/*
1853 	 * Ensure that VLAN filtering is off, because this strips the tag.
1854 	 */
1855 	if ((vrp->chip.info.features & VR_FEATURE_VLANTAGGING) != 0) {
1856 		VR_CLRBIT8(vrp->acc_reg, VR_BCR1, VR_BCR1_VLANFILTER);
1857 		VR_CLRBIT8(vrp->acc_reg, VR_TXCFG, VR_TXCFG_8021PQ_EN);
1858 	}
1859 
1860 	/*
1861 	 * Clear the CAM filter.
1862 	 */
1863 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
1864 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
1865 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 0);
1866 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1867 
1868 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
1869 		    VR_CAM_CTRL_ENABLE|VR_CAM_CTRL_SELECT_VLAN);
1870 		VR_PUT8(vrp->acc_reg, VR_VCAM0, 0);
1871 		VR_PUT8(vrp->acc_reg, VR_VCAM1, 0);
1872 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_WRITE);
1873 		VR_PUT32(vrp->acc_reg, VR_CAM_MASK, 1);
1874 		drv_usecwait(2);
1875 		VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
1876 	}
1877 
1878 	/*
1879 	 * Give the start addresses of the descriptor rings to the DMA
1880 	 * controller on the MAC.
1881 	 */
1882 	VR_PUT32(vrp->acc_reg, VR_RXADDR, vrp->rx.rp->paddr);
1883 	VR_PUT32(vrp->acc_reg, VR_TXADDR, vrp->tx.wp->paddr);
1884 
1885 	/*
1886 	 * We don't use the additionally invented interrupt ICR1 register,
1887 	 * so make sure these are disabled.
1888 	 */
1889 	VR_PUT8(vrp->acc_reg, VR_ISR1, 0xFF);
1890 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1891 
1892 	/*
1893 	 * Enable interrupts.
1894 	 */
1895 	VR_PUT16(vrp->acc_reg, VR_ISR0, 0xFFFF);
1896 	VR_PUT16(vrp->acc_reg, VR_ICR0, VR_ICR0_CFG);
1897 
1898 	/*
1899 	 * Enable the DMA controller.
1900 	 */
1901 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_GO);
1902 
1903 	/*
1904 	 * Configure the link. Rely on the link change interrupt for getting
1905 	 * the link state into the driver.
1906 	 */
1907 	vr_link_init(vrp);
1908 
1909 	/*
1910 	 * Set the software view on the state to 'running'.
1911 	 */
1912 	vrp->chip.state = CHIPSTATE_RUNNING;
1913 	return (0);
1914 }
1915 
1916 /*
1917  * Stop DMA and interrupts.
1918  */
1919 static int
vr_stop(vr_t * vrp)1920 vr_stop(vr_t *vrp)
1921 {
1922 	ASSERT(mutex_owned(&vrp->oplock));
1923 
1924 	/*
1925 	 * Stop interrupts.
1926 	 */
1927 	VR_PUT16(vrp->acc_reg, VR_ICR0, 0);
1928 	VR_PUT8(vrp->acc_reg, VR_ICR1, 0);
1929 
1930 	/*
1931 	 * Stop DMA.
1932 	 */
1933 	VR_PUT8(vrp->acc_reg, VR_CTRL0, VR_CTRL0_DMA_STOP);
1934 
1935 	/*
1936 	 * Set the software view on the state to stopped.
1937 	 */
1938 	vrp->chip.state = CHIPSTATE_STOPPED;
1939 
1940 	/*
1941 	 * Remove DMA buffers from the rings.
1942 	 */
1943 	vr_rxring_fini(vrp);
1944 	vr_txring_fini(vrp);
1945 	return (0);
1946 }
1947 
1948 int
vr_mac_start(void * p)1949 vr_mac_start(void *p)
1950 {
1951 	vr_t	*vrp;
1952 	int	rc;
1953 
1954 	vrp = (vr_t *)p;
1955 	mutex_enter(&vrp->oplock);
1956 
1957 	/*
1958 	 * Reset the card.
1959 	 */
1960 	vr_reset(vrp);
1961 
1962 	/*
1963 	 * Prepare and enable the card.
1964 	 */
1965 	rc = vr_start(vrp);
1966 
1967 	/*
1968 	 * Configure a cyclic function to keep the card & driver from diverting.
1969 	 */
1970 	vrp->periodic_id =
1971 	    ddi_periodic_add(vr_periodic, vrp, VR_CHECK_INTERVAL, DDI_IPL_0);
1972 
1973 	mutex_exit(&vrp->oplock);
1974 	return (rc);
1975 }
1976 
1977 void
vr_mac_stop(void * p)1978 vr_mac_stop(void *p)
1979 {
1980 	vr_t	*vrp = p;
1981 
1982 	mutex_enter(&vrp->oplock);
1983 	mutex_enter(&vrp->tx.lock);
1984 
1985 	/*
1986 	 * Stop the device.
1987 	 */
1988 	(void) vr_stop(vrp);
1989 	mutex_exit(&vrp->tx.lock);
1990 
1991 	/*
1992 	 * Remove the cyclic from the system.
1993 	 */
1994 	ddi_periodic_delete(vrp->periodic_id);
1995 	mutex_exit(&vrp->oplock);
1996 }
1997 
1998 /*
1999  * Add or remove a multicast address to/from the filter
2000  *
2001  * From the 21143 manual:
2002  *  The 21143 can store 512 bits serving as hash bucket heads, and one physical
2003  *  48-bit Ethernet address. Incoming frames with multicast destination
2004  *  addresses are subjected to imperfect filtering. Frames with physical
2005  *  destination  addresses are checked against the single physical address.
2006  *  For any incoming frame with a multicast destination address, the 21143
2007  *  applies the standard Ethernet cyclic redundancy check (CRC) function to the
2008  *  first 6 bytes containing the destination address, then it uses the most
2009  *  significant 9 bits of the result as a bit index into the table. If the
2010  *  indexed bit is set, the frame is accepted. If the bit is cleared, the frame
2011  *  is rejected. This filtering mode is called imperfect because multicast
2012  *  frames not addressed to this station may slip through, but it still
2013  *  decreases the number of frames that the host can receive.
2014  * I assume the above is also the way the VIA chips work. There's not a single
2015  * word about the multicast filter in the datasheet.
2016  *
2017  * Another word on the CAM filter on VT6105M controllers:
2018  *  The VT6105M has content addressable memory which can be used for perfect
2019  *  filtering of 32 multicast addresses and a few VLAN id's
2020  *
2021  *  I think it works like this: When the controller receives a multicast
2022  *  address, it looks up the address using CAM. When it is found, it takes the
2023  *  matching cell address (index) and compares this to the bit position in the
2024  *  cam mask. If the bit is set, the packet is passed up. If CAM lookup does not
2025  *  result in a match, the packet is filtered using the hash based filter,
2026  *  if that matches, the packet is passed up and dropped otherwise
2027  * Also, there's not a single word in the datasheet on how this cam is supposed
2028  * to work ...
2029  */
2030 int
vr_mac_set_multicast(void * p,boolean_t add,const uint8_t * mca)2031 vr_mac_set_multicast(void *p, boolean_t add, const uint8_t *mca)
2032 {
2033 	vr_t		*vrp;
2034 	uint32_t	crc_index;
2035 	int32_t		cam_index;
2036 	uint32_t	cam_mask;
2037 	boolean_t	use_hash_filter;
2038 	ether_addr_t	taddr;
2039 	uint32_t	a;
2040 
2041 	vrp = (vr_t *)p;
2042 	mutex_enter(&vrp->oplock);
2043 	mutex_enter(&vrp->intrlock);
2044 	use_hash_filter = B_FALSE;
2045 
2046 	if ((vrp->chip.info.features & VR_FEATURE_CAMSUPPORT) != 0) {
2047 		/*
2048 		 * Program the perfect filter.
2049 		 */
2050 		cam_mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2051 		if (add == B_TRUE) {
2052 			/*
2053 			 * Get index of first empty slot.
2054 			 */
2055 			bzero(&taddr, sizeof (taddr));
2056 			cam_index = vr_cam_index(vrp, taddr);
2057 			if (cam_index != -1) {
2058 				/*
2059 				 * Add address at cam_index.
2060 				 */
2061 				cam_mask |= (1 << cam_index);
2062 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2063 				    VR_CAM_CTRL_ENABLE);
2064 				VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, cam_index);
2065 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2066 				for (a = 0; a < ETHERADDRL; a++) {
2067 					VR_PUT8(vrp->acc_reg,
2068 					    VR_MCAM0 + a, mca[a]);
2069 				}
2070 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2071 				    VR_CAM_CTRL_WRITE);
2072 				drv_usecwait(2);
2073 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2074 				    VR_CAM_CTRL_DONE);
2075 			} else {
2076 				/*
2077 				 * No free CAM slots available
2078 				 * Add mca to the imperfect filter.
2079 				 */
2080 				use_hash_filter = B_TRUE;
2081 			}
2082 		} else {
2083 			/*
2084 			 * Find the index of the entry to remove
2085 			 * If the entry was not found (-1), the addition was
2086 			 * probably done when the table was full.
2087 			 */
2088 			cam_index = vr_cam_index(vrp, mca);
2089 			if (cam_index != -1) {
2090 				/*
2091 				 * Disable the corresponding mask bit.
2092 				 */
2093 				cam_mask &= ~(1 << cam_index);
2094 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2095 				    VR_CAM_CTRL_ENABLE);
2096 				VR_PUT32(vrp->acc_reg, VR_CAM_MASK, cam_mask);
2097 				VR_PUT8(vrp->acc_reg, VR_CAM_CTRL,
2098 				    VR_CAM_CTRL_DONE);
2099 			} else {
2100 				/*
2101 				 * The entry to be removed was not found
2102 				 * The likely cause is that the CAM was full
2103 				 * during addition. The entry is added to the
2104 				 * hash filter in that case and needs to be
2105 				 * removed there too.
2106 				 */
2107 				use_hash_filter = B_TRUE;
2108 			}
2109 		}
2110 	} else {
2111 		/*
2112 		 * No CAM in the MAC, thus we need the hash filter.
2113 		 */
2114 		use_hash_filter = B_TRUE;
2115 	}
2116 
2117 	if (use_hash_filter == B_TRUE) {
2118 		/*
2119 		 * Get the CRC-32 of the multicast address
2120 		 * The card uses the "MSB first" direction when calculating the
2121 		 * the CRC. This is odd because ethernet is "LSB first"
2122 		 * We have to use that "big endian" approach as well.
2123 		 */
2124 		crc_index = ether_crc_be(mca) >> (32 - 6);
2125 		if (add == B_TRUE) {
2126 			/*
2127 			 * Turn bit[crc_index] on.
2128 			 */
2129 			if (crc_index < 32)
2130 				vrp->mhash0 |= (1 << crc_index);
2131 			else
2132 				vrp->mhash1 |= (1 << (crc_index - 32));
2133 		} else {
2134 			/*
2135 			 * Turn bit[crc_index] off.
2136 			 */
2137 			if (crc_index < 32)
2138 				vrp->mhash0 &= ~(0 << crc_index);
2139 			else
2140 				vrp->mhash1 &= ~(0 << (crc_index - 32));
2141 		}
2142 
2143 		/*
2144 		 * When not promiscuous write the filter now. When promiscuous,
2145 		 * the filter is open and will be written when promiscuous ends.
2146 		 */
2147 		if (vrp->promisc == B_FALSE) {
2148 			VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2149 			VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2150 		}
2151 	}
2152 
2153 	/*
2154 	 * Enable/disable multicast receivements based on mcount.
2155 	 */
2156 	if (add == B_TRUE)
2157 		vrp->mcount++;
2158 	else if (vrp->mcount != 0)
2159 		vrp->mcount --;
2160 	if (vrp->mcount != 0)
2161 		VR_SETBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2162 	else
2163 		VR_CLRBIT8(vrp->acc_reg, VR_RXCFG, VR_RXCFG_ACCEPTMULTI);
2164 
2165 	mutex_exit(&vrp->intrlock);
2166 	mutex_exit(&vrp->oplock);
2167 	return (0);
2168 }
2169 
2170 /*
2171  * Calculate the CRC32 for 6 bytes of multicast address in MSB(it) first order.
2172  * The MSB first order is a bit odd because Ethernet standard is LSB first
2173  */
2174 static uint32_t
ether_crc_be(const uint8_t * data)2175 ether_crc_be(const uint8_t *data)
2176 {
2177 	uint32_t	crc = (uint32_t)0xFFFFFFFFU;
2178 	uint32_t	carry;
2179 	uint32_t	bit;
2180 	uint32_t	length;
2181 	uint8_t		c;
2182 
2183 	for (length = 0; length < ETHERADDRL; length++) {
2184 		c = data[length];
2185 		for (bit = 0; bit < 8; bit++) {
2186 			carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
2187 			crc <<= 1;
2188 			c >>= 1;
2189 			if (carry)
2190 				crc = (crc ^ 0x04C11DB6) | carry;
2191 		}
2192 	}
2193 	return (crc);
2194 }
2195 
2196 
2197 /*
2198  * Return the CAM index (base 0) of maddr or -1 if maddr is not found
2199  * If maddr is 0, return the index of an empty slot in CAM or -1 when no free
2200  * slots available.
2201  */
2202 static int32_t
vr_cam_index(vr_t * vrp,const uint8_t * maddr)2203 vr_cam_index(vr_t *vrp, const uint8_t *maddr)
2204 {
2205 	ether_addr_t	taddr;
2206 	int32_t		index;
2207 	uint32_t	mask;
2208 	uint32_t	a;
2209 
2210 	bzero(&taddr, sizeof (taddr));
2211 
2212 	/*
2213 	 * Read the CAM mask from the controller.
2214 	 */
2215 	mask = VR_GET32(vrp->acc_reg, VR_CAM_MASK);
2216 
2217 	/*
2218 	 * If maddr is 0, return the first unused slot or -1 for no unused.
2219 	 */
2220 	if (bcmp(maddr, taddr, ETHERADDRL) == 0) {
2221 		/*
2222 		 * Look for the first unused position in mask.
2223 		 */
2224 		for (index = 0; index < VR_CAM_SZ; index++) {
2225 			if (((mask >> index) & 1) == 0)
2226 				return (index);
2227 		}
2228 		return (-1);
2229 	} else {
2230 		/*
2231 		 * Look for maddr in CAM.
2232 		 */
2233 		for (index = 0; index < VR_CAM_SZ; index++) {
2234 			/* Look at enabled entries only */
2235 			if (((mask >> index) & 1) == 0)
2236 				continue;
2237 
2238 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_ENABLE);
2239 			VR_PUT8(vrp->acc_reg, VR_CAM_ADDR, index);
2240 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_READ);
2241 			drv_usecwait(2);
2242 			for (a = 0; a < ETHERADDRL; a++)
2243 				taddr[a] = VR_GET8(vrp->acc_reg, VR_MCAM0 + a);
2244 			VR_PUT8(vrp->acc_reg, VR_CAM_CTRL, VR_CAM_CTRL_DONE);
2245 			if (bcmp(maddr, taddr, ETHERADDRL) == 0)
2246 				return (index);
2247 		}
2248 	}
2249 	return (-1);
2250 }
2251 
2252 /*
2253  * Set promiscuous mode on or off.
2254  */
2255 int
vr_mac_set_promisc(void * p,boolean_t promiscflag)2256 vr_mac_set_promisc(void *p, boolean_t promiscflag)
2257 {
2258 	vr_t		*vrp;
2259 	uint8_t		rxcfg;
2260 
2261 	vrp = (vr_t *)p;
2262 
2263 	mutex_enter(&vrp->intrlock);
2264 	mutex_enter(&vrp->oplock);
2265 	mutex_enter(&vrp->tx.lock);
2266 
2267 	/*
2268 	 * Get current receive configuration.
2269 	 */
2270 	rxcfg = VR_GET8(vrp->acc_reg, VR_RXCFG);
2271 	vrp->promisc = promiscflag;
2272 
2273 	if (promiscflag == B_TRUE) {
2274 		/*
2275 		 * Enable promiscuous mode and open the multicast filter.
2276 		 */
2277 		rxcfg |= (VR_RXCFG_PROMISC | VR_RXCFG_ACCEPTMULTI);
2278 		VR_PUT32(vrp->acc_reg, VR_MAR0, 0xffffffff);
2279 		VR_PUT32(vrp->acc_reg, VR_MAR1, 0xffffffff);
2280 	} else {
2281 		/*
2282 		 * Restore the multicast filter and disable promiscuous mode.
2283 		 */
2284 		VR_PUT32(vrp->acc_reg, VR_MAR0, vrp->mhash0);
2285 		VR_PUT32(vrp->acc_reg, VR_MAR1, vrp->mhash1);
2286 		rxcfg &= ~VR_RXCFG_PROMISC;
2287 		if (vrp->mcount != 0)
2288 			rxcfg |= VR_RXCFG_ACCEPTMULTI;
2289 	}
2290 	VR_PUT8(vrp->acc_reg, VR_RXCFG, rxcfg);
2291 	mutex_exit(&vrp->tx.lock);
2292 	mutex_exit(&vrp->oplock);
2293 	mutex_exit(&vrp->intrlock);
2294 	return (0);
2295 }
2296 
2297 int
vr_mac_getstat(void * arg,uint_t stat,uint64_t * val)2298 vr_mac_getstat(void *arg, uint_t stat, uint64_t *val)
2299 {
2300 	vr_t		*vrp;
2301 	uint64_t	v;
2302 
2303 	vrp = (void *) arg;
2304 
2305 	switch (stat) {
2306 	default:
2307 		return (ENOTSUP);
2308 
2309 	case ETHER_STAT_ADV_CAP_100T4:
2310 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_T4) != 0;
2311 		break;
2312 
2313 	case ETHER_STAT_ADV_CAP_100FDX:
2314 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX_FD) != 0;
2315 		break;
2316 
2317 	case ETHER_STAT_ADV_CAP_100HDX:
2318 		v = (vrp->chip.mii.anadv & MII_ABILITY_100BASE_TX) != 0;
2319 		break;
2320 
2321 	case ETHER_STAT_ADV_CAP_10FDX:
2322 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T_FD) != 0;
2323 		break;
2324 
2325 	case ETHER_STAT_ADV_CAP_10HDX:
2326 		v = (vrp->chip.mii.anadv & MII_ABILITY_10BASE_T) != 0;
2327 		break;
2328 
2329 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
2330 		v = 0;
2331 		break;
2332 
2333 	case ETHER_STAT_ADV_CAP_AUTONEG:
2334 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0;
2335 		break;
2336 
2337 	case ETHER_STAT_ADV_CAP_PAUSE:
2338 		v = (vrp->chip.mii.anadv & MII_ABILITY_PAUSE) != 0;
2339 		break;
2340 
2341 	case ETHER_STAT_ADV_REMFAULT:
2342 		v = (vrp->chip.mii.anadv & MII_AN_ADVERT_REMFAULT) != 0;
2343 		break;
2344 
2345 	case ETHER_STAT_ALIGN_ERRORS:
2346 		v = vrp->stats.ether_stat_align_errors;
2347 		break;
2348 
2349 	case ETHER_STAT_CAP_100T4:
2350 		v = (vrp->chip.mii.status & MII_STATUS_100_BASE_T4) != 0;
2351 		break;
2352 
2353 	case ETHER_STAT_CAP_100FDX:
2354 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX_FD) != 0;
2355 		break;
2356 
2357 	case ETHER_STAT_CAP_100HDX:
2358 		v = (vrp->chip.mii.status & MII_STATUS_100_BASEX) != 0;
2359 		break;
2360 
2361 	case ETHER_STAT_CAP_10FDX:
2362 		v = (vrp->chip.mii.status & MII_STATUS_10_FD) != 0;
2363 		break;
2364 
2365 	case ETHER_STAT_CAP_10HDX:
2366 		v = (vrp->chip.mii.status & MII_STATUS_10) != 0;
2367 		break;
2368 
2369 	case ETHER_STAT_CAP_ASMPAUSE:
2370 		v = 0;
2371 		break;
2372 
2373 	case ETHER_STAT_CAP_AUTONEG:
2374 		v = (vrp->chip.mii.status & MII_STATUS_CANAUTONEG) != 0;
2375 		break;
2376 
2377 	case ETHER_STAT_CAP_PAUSE:
2378 		v = 1;
2379 		break;
2380 
2381 	case ETHER_STAT_CAP_REMFAULT:
2382 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2383 		break;
2384 
2385 	case ETHER_STAT_CARRIER_ERRORS:
2386 		/*
2387 		 * Number of times carrier was lost or never detected on a
2388 		 * transmission attempt.
2389 		 */
2390 		v = vrp->stats.ether_stat_carrier_errors;
2391 		break;
2392 
2393 	case ETHER_STAT_JABBER_ERRORS:
2394 		return (ENOTSUP);
2395 
2396 	case ETHER_STAT_DEFER_XMTS:
2397 		/*
2398 		 * Packets without collisions where first transmit attempt was
2399 		 * delayed because the medium was busy.
2400 		 */
2401 		v = vrp->stats.ether_stat_defer_xmts;
2402 		break;
2403 
2404 	case ETHER_STAT_EX_COLLISIONS:
2405 		/*
2406 		 * Frames where excess collisions occurred on transmit, causing
2407 		 * transmit failure.
2408 		 */
2409 		v = vrp->stats.ether_stat_ex_collisions;
2410 		break;
2411 
2412 	case ETHER_STAT_FCS_ERRORS:
2413 		/*
2414 		 * Packets received with CRC errors.
2415 		 */
2416 		v = vrp->stats.ether_stat_fcs_errors;
2417 		break;
2418 
2419 	case ETHER_STAT_FIRST_COLLISIONS:
2420 		/*
2421 		 * Packets successfully transmitted with exactly one collision.
2422 		 */
2423 		v = vrp->stats.ether_stat_first_collisions;
2424 		break;
2425 
2426 	case ETHER_STAT_LINK_ASMPAUSE:
2427 		v = 0;
2428 		break;
2429 
2430 	case ETHER_STAT_LINK_AUTONEG:
2431 		v = (vrp->chip.mii.control & MII_CONTROL_ANE) != 0 &&
2432 		    (vrp->chip.mii.status & MII_STATUS_ANDONE) != 0;
2433 		break;
2434 
2435 	case ETHER_STAT_LINK_DUPLEX:
2436 		v = vrp->chip.link.duplex;
2437 		break;
2438 
2439 	case ETHER_STAT_LINK_PAUSE:
2440 		v = vrp->chip.link.flowctrl;
2441 		break;
2442 
2443 	case ETHER_STAT_LP_CAP_100T4:
2444 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_T4) != 0;
2445 		break;
2446 
2447 	case ETHER_STAT_LP_CAP_1000FDX:
2448 		v = 0;
2449 		break;
2450 
2451 	case ETHER_STAT_LP_CAP_1000HDX:
2452 		v = 0;
2453 		break;
2454 
2455 	case ETHER_STAT_LP_CAP_100FDX:
2456 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX_FD) != 0;
2457 		break;
2458 
2459 	case ETHER_STAT_LP_CAP_100HDX:
2460 		v = (vrp->chip.mii.lpable & MII_ABILITY_100BASE_TX) != 0;
2461 		break;
2462 
2463 	case ETHER_STAT_LP_CAP_10FDX:
2464 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T_FD) != 0;
2465 		break;
2466 
2467 	case ETHER_STAT_LP_CAP_10HDX:
2468 		v = (vrp->chip.mii.lpable & MII_ABILITY_10BASE_T) != 0;
2469 		break;
2470 
2471 	case ETHER_STAT_LP_CAP_ASMPAUSE:
2472 		v = 0;
2473 		break;
2474 
2475 	case ETHER_STAT_LP_CAP_AUTONEG:
2476 		v = (vrp->chip.mii.anexp & MII_AN_EXP_LPCANAN) != 0;
2477 		break;
2478 
2479 	case ETHER_STAT_LP_CAP_PAUSE:
2480 		v = (vrp->chip.mii.lpable & MII_ABILITY_PAUSE) != 0;
2481 		break;
2482 
2483 	case ETHER_STAT_LP_REMFAULT:
2484 		v = (vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0;
2485 		break;
2486 
2487 	case ETHER_STAT_MACRCV_ERRORS:
2488 		/*
2489 		 * Packets received with MAC errors, except align_errors,
2490 		 * fcs_errors, and toolong_errors.
2491 		 */
2492 		v = vrp->stats.ether_stat_macrcv_errors;
2493 		break;
2494 
2495 	case ETHER_STAT_MACXMT_ERRORS:
2496 		/*
2497 		 * Packets encountering transmit MAC failures, except carrier
2498 		 * and collision failures.
2499 		 */
2500 		v = vrp->stats.ether_stat_macxmt_errors;
2501 		break;
2502 
2503 	case ETHER_STAT_MULTI_COLLISIONS:
2504 		/*
2505 		 * Packets successfully transmitted with multiple collisions.
2506 		 */
2507 		v = vrp->stats.ether_stat_multi_collisions;
2508 		break;
2509 
2510 	case ETHER_STAT_SQE_ERRORS:
2511 		/*
2512 		 * Number of times signal quality error was reported
2513 		 * This one is reported by the PHY.
2514 		 */
2515 		return (ENOTSUP);
2516 
2517 	case ETHER_STAT_TOOLONG_ERRORS:
2518 		/*
2519 		 * Packets received larger than the maximum permitted length.
2520 		 */
2521 		v = vrp->stats.ether_stat_toolong_errors;
2522 		break;
2523 
2524 	case ETHER_STAT_TOOSHORT_ERRORS:
2525 		v = vrp->stats.ether_stat_tooshort_errors;
2526 		break;
2527 
2528 	case ETHER_STAT_TX_LATE_COLLISIONS:
2529 		/*
2530 		 * Number of times a transmit collision occurred late
2531 		 * (after 512 bit times).
2532 		 */
2533 		v = vrp->stats.ether_stat_tx_late_collisions;
2534 		break;
2535 
2536 	case ETHER_STAT_XCVR_ADDR:
2537 		/*
2538 		 * MII address in the 0 to 31 range of the physical layer
2539 		 * device in use for a given Ethernet device.
2540 		 */
2541 		v = vrp->chip.phyaddr;
2542 		break;
2543 
2544 	case ETHER_STAT_XCVR_ID:
2545 		/*
2546 		 * MII transceiver manufacturer and device ID.
2547 		 */
2548 		v = (vrp->chip.mii.identh << 16) | vrp->chip.mii.identl;
2549 		break;
2550 
2551 	case ETHER_STAT_XCVR_INUSE:
2552 		v = vrp->chip.link.mau;
2553 		break;
2554 
2555 	case MAC_STAT_BRDCSTRCV:
2556 		v = vrp->stats.mac_stat_brdcstrcv;
2557 		break;
2558 
2559 	case MAC_STAT_BRDCSTXMT:
2560 		v = vrp->stats.mac_stat_brdcstxmt;
2561 		break;
2562 
2563 	case MAC_STAT_MULTIXMT:
2564 		v = vrp->stats.mac_stat_multixmt;
2565 		break;
2566 
2567 	case MAC_STAT_COLLISIONS:
2568 		v = vrp->stats.mac_stat_collisions;
2569 		break;
2570 
2571 	case MAC_STAT_IERRORS:
2572 		v = vrp->stats.mac_stat_ierrors;
2573 		break;
2574 
2575 	case MAC_STAT_IFSPEED:
2576 		if (vrp->chip.link.speed == VR_LINK_SPEED_100MBS)
2577 			v = 100 * 1000 * 1000;
2578 		else if (vrp->chip.link.speed == VR_LINK_SPEED_10MBS)
2579 			v = 10 * 1000 * 1000;
2580 		else
2581 			v = 0;
2582 		break;
2583 
2584 	case MAC_STAT_IPACKETS:
2585 		v = vrp->stats.mac_stat_ipackets;
2586 		break;
2587 
2588 	case MAC_STAT_MULTIRCV:
2589 		v = vrp->stats.mac_stat_multircv;
2590 		break;
2591 
2592 	case MAC_STAT_NORCVBUF:
2593 		vrp->stats.mac_stat_norcvbuf +=
2594 		    VR_GET16(vrp->acc_reg, VR_TALLY_MPA);
2595 		VR_PUT16(vrp->acc_reg, VR_TALLY_MPA, 0);
2596 		v = vrp->stats.mac_stat_norcvbuf;
2597 		break;
2598 
2599 	case MAC_STAT_NOXMTBUF:
2600 		v = vrp->stats.mac_stat_noxmtbuf;
2601 		break;
2602 
2603 	case MAC_STAT_OBYTES:
2604 		v = vrp->stats.mac_stat_obytes;
2605 		break;
2606 
2607 	case MAC_STAT_OERRORS:
2608 		v = vrp->stats.ether_stat_macxmt_errors +
2609 		    vrp->stats.mac_stat_underflows +
2610 		    vrp->stats.ether_stat_align_errors +
2611 		    vrp->stats.ether_stat_carrier_errors +
2612 		    vrp->stats.ether_stat_fcs_errors;
2613 		break;
2614 
2615 	case MAC_STAT_OPACKETS:
2616 		v = vrp->stats.mac_stat_opackets;
2617 		break;
2618 
2619 	case MAC_STAT_RBYTES:
2620 		v = vrp->stats.mac_stat_rbytes;
2621 		break;
2622 
2623 	case MAC_STAT_UNKNOWNS:
2624 		/*
2625 		 * Isn't this something for the MAC layer to maintain?
2626 		 */
2627 		return (ENOTSUP);
2628 
2629 	case MAC_STAT_UNDERFLOWS:
2630 		v = vrp->stats.mac_stat_underflows;
2631 		break;
2632 
2633 	case MAC_STAT_OVERFLOWS:
2634 		v = vrp->stats.mac_stat_overflows;
2635 		break;
2636 	}
2637 	*val = v;
2638 	return (0);
2639 }
2640 
2641 int
vr_mac_set_ether_addr(void * p,const uint8_t * ea)2642 vr_mac_set_ether_addr(void *p, const uint8_t *ea)
2643 {
2644 	vr_t	*vrp;
2645 	int	i;
2646 
2647 	vrp = (vr_t *)p;
2648 	mutex_enter(&vrp->oplock);
2649 	mutex_enter(&vrp->intrlock);
2650 
2651 	/*
2652 	 * Set a new station address.
2653 	 */
2654 	for (i = 0; i < ETHERADDRL; i++)
2655 		VR_PUT8(vrp->acc_reg, VR_ETHERADDR + i, ea[i]);
2656 
2657 	mutex_exit(&vrp->intrlock);
2658 	mutex_exit(&vrp->oplock);
2659 	return (0);
2660 }
2661 
2662 /*
2663  * Configure the ethernet link according to param and chip.mii.
2664  */
2665 static void
vr_link_init(vr_t * vrp)2666 vr_link_init(vr_t *vrp)
2667 {
2668 	ASSERT(mutex_owned(&vrp->oplock));
2669 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2670 		/*
2671 		 * If we do autoneg, ensure restart autoneg is ON.
2672 		 */
2673 		vrp->chip.mii.control |= MII_CONTROL_RSAN;
2674 
2675 		/*
2676 		 * The advertisements are prepared by param_init.
2677 		 */
2678 		vr_phy_write(vrp, MII_AN_ADVERT, vrp->chip.mii.anadv);
2679 	} else {
2680 		/*
2681 		 * If we don't autoneg, we need speed, duplex and flowcontrol
2682 		 * to configure the link. However, dladm doesn't allow changes
2683 		 * to speed and duplex (readonly). The way this is solved
2684 		 * (ahem) is to select the highest enabled combination
2685 		 * Speed and duplex should be r/w when autoneg is off.
2686 		 */
2687 		if ((vrp->param.anadv_en &
2688 		    MII_ABILITY_100BASE_TX_FD) != 0) {
2689 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2690 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2691 		} else if ((vrp->param.anadv_en &
2692 		    MII_ABILITY_100BASE_TX) != 0) {
2693 			vrp->chip.mii.control |= MII_CONTROL_100MB;
2694 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2695 		} else if ((vrp->param.anadv_en &
2696 		    MII_ABILITY_10BASE_T_FD) != 0) {
2697 			vrp->chip.mii.control |= MII_CONTROL_FDUPLEX;
2698 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2699 		} else {
2700 			vrp->chip.mii.control &= ~MII_CONTROL_100MB;
2701 			vrp->chip.mii.control &= ~MII_CONTROL_FDUPLEX;
2702 		}
2703 	}
2704 	/*
2705 	 * Write the control register.
2706 	 */
2707 	vr_phy_write(vrp, MII_CONTROL, vrp->chip.mii.control);
2708 
2709 	/*
2710 	 * With autoneg off we cannot rely on the link_change interrupt for
2711 	 * for getting the status into the driver.
2712 	 */
2713 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
2714 		vr_link_state(vrp);
2715 		mac_link_update(vrp->machdl,
2716 		    (link_state_t)vrp->chip.link.state);
2717 	}
2718 }
2719 
2720 /*
2721  * Get link state in the driver and configure the MAC accordingly.
2722  */
2723 static void
vr_link_state(vr_t * vrp)2724 vr_link_state(vr_t *vrp)
2725 {
2726 	uint16_t		mask;
2727 
2728 	ASSERT(mutex_owned(&vrp->oplock));
2729 
2730 	vr_phy_read(vrp, MII_STATUS, &vrp->chip.mii.status);
2731 	vr_phy_read(vrp, MII_CONTROL, &vrp->chip.mii.control);
2732 	vr_phy_read(vrp, MII_AN_ADVERT, &vrp->chip.mii.anadv);
2733 	vr_phy_read(vrp, MII_AN_LPABLE, &vrp->chip.mii.lpable);
2734 	vr_phy_read(vrp, MII_AN_EXPANSION, &vrp->chip.mii.anexp);
2735 
2736 	/*
2737 	 * If we did autongeg, deduce the link type/speed by selecting the
2738 	 * highest common denominator.
2739 	 */
2740 	if ((vrp->chip.mii.control & MII_CONTROL_ANE) != 0) {
2741 		mask = vrp->chip.mii.anadv & vrp->chip.mii.lpable;
2742 		if ((mask & MII_ABILITY_100BASE_TX_FD) != 0) {
2743 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2744 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2745 			vrp->chip.link.mau = VR_MAU_100X;
2746 		} else if ((mask & MII_ABILITY_100BASE_T4) != 0) {
2747 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2748 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2749 			vrp->chip.link.mau = VR_MAU_100T4;
2750 		} else if ((mask & MII_ABILITY_100BASE_TX) != 0) {
2751 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2752 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2753 			vrp->chip.link.mau = VR_MAU_100X;
2754 		} else if ((mask & MII_ABILITY_10BASE_T_FD) != 0) {
2755 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2756 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2757 			vrp->chip.link.mau = VR_MAU_10;
2758 		} else if ((mask & MII_ABILITY_10BASE_T) != 0) {
2759 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2760 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2761 			vrp->chip.link.mau = VR_MAU_10;
2762 		} else {
2763 			vrp->chip.link.speed = VR_LINK_SPEED_UNKNOWN;
2764 			vrp->chip.link.duplex = VR_LINK_DUPLEX_UNKNOWN;
2765 			vrp->chip.link.mau = VR_MAU_UNKNOWN;
2766 		}
2767 
2768 		/*
2769 		 * Did we negotiate pause?
2770 		 */
2771 		if ((mask & MII_ABILITY_PAUSE) != 0 &&
2772 		    vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL)
2773 			vrp->chip.link.flowctrl = VR_PAUSE_BIDIRECTIONAL;
2774 		else
2775 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2776 
2777 		/*
2778 		 * Did either one detect a AN fault?
2779 		 */
2780 		if ((vrp->chip.mii.status & MII_STATUS_REMFAULT) != 0)
2781 			vr_log(vrp, CE_WARN,
2782 			    "AN remote fault reported by LP.");
2783 
2784 		if ((vrp->chip.mii.lpable & MII_AN_ADVERT_REMFAULT) != 0)
2785 			vr_log(vrp, CE_WARN, "AN remote fault caused for LP.");
2786 	} else {
2787 		/*
2788 		 * We didn't autoneg
2789 		 * The link type is defined by the control register.
2790 		 */
2791 		if ((vrp->chip.mii.control & MII_CONTROL_100MB) != 0) {
2792 			vrp->chip.link.speed = VR_LINK_SPEED_100MBS;
2793 			vrp->chip.link.mau = VR_MAU_100X;
2794 		} else {
2795 			vrp->chip.link.speed = VR_LINK_SPEED_10MBS;
2796 			vrp->chip.link.mau = VR_MAU_10;
2797 		}
2798 
2799 		if ((vrp->chip.mii.control & MII_CONTROL_FDUPLEX) != 0)
2800 			vrp->chip.link.duplex = VR_LINK_DUPLEX_FULL;
2801 		else {
2802 			vrp->chip.link.duplex = VR_LINK_DUPLEX_HALF;
2803 			/*
2804 			 * No pause on HDX links.
2805 			 */
2806 			vrp->chip.link.flowctrl = VR_PAUSE_NONE;
2807 		}
2808 	}
2809 
2810 	/*
2811 	 * Set the duplex mode on the MAC according to that of the PHY.
2812 	 */
2813 	if (vrp->chip.link.duplex == VR_LINK_DUPLEX_FULL) {
2814 		VR_SETBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2815 		/*
2816 		 * Enable packet queueing on FDX links.
2817 		 */
2818 		if ((vrp->chip.info.bugs & VR_BUG_NO_TXQUEUEING) == 0)
2819 			VR_CLRBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2820 	} else {
2821 		VR_CLRBIT8(vrp->acc_reg, VR_CTRL1, VR_CTRL1_MACFULLDUPLEX);
2822 		/*
2823 		 * Disable packet queueing on HDX links. With queueing enabled,
2824 		 * this MAC get's lost after a TX abort (too many colisions).
2825 		 */
2826 		VR_SETBIT8(vrp->acc_reg, VR_CFGB, VR_CFGB_QPKTDIS);
2827 	}
2828 
2829 	/*
2830 	 * Set pause options on the MAC.
2831 	 */
2832 	if (vrp->chip.link.flowctrl == VR_PAUSE_BIDIRECTIONAL) {
2833 		/*
2834 		 * All of our MAC's can receive pause frames.
2835 		 */
2836 		VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXRFEN);
2837 
2838 		/*
2839 		 * VT6105 and above can transmit pause frames.
2840 		 */
2841 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2842 			/*
2843 			 * Set the number of available receive descriptors
2844 			 * Non-zero values written to this register are added
2845 			 * to the register's contents. Careful: Writing zero
2846 			 * clears the register and thus causes a (long) pause
2847 			 * request.
2848 			 */
2849 			VR_PUT8(vrp->acc_reg, VR_FCR0_RXBUFCOUNT,
2850 			    MIN(vrp->rx.ndesc, 0xFF) -
2851 			    VR_GET8(vrp->acc_reg,
2852 			    VR_FCR0_RXBUFCOUNT));
2853 
2854 			/*
2855 			 * Request pause when we have 4 descs left.
2856 			 */
2857 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2858 			    VR_FCR1_PAUSEONBITS, VR_FCR1_PAUSEON_04);
2859 
2860 			/*
2861 			 * Cancel the pause when there are 24 descriptors again.
2862 			 */
2863 			VR_SETBITS8(vrp->acc_reg, VR_FCR1,
2864 			    VR_FCR1_PAUSEOFFBITS, VR_FCR1_PAUSEOFF_24);
2865 
2866 			/*
2867 			 * Request a pause of FFFF bit-times. This long pause
2868 			 * is cancelled when the high watermark is reached.
2869 			 */
2870 			VR_PUT16(vrp->acc_reg, VR_FCR2_PAUSE, 0xFFFF);
2871 
2872 			/*
2873 			 * Enable flow control on the MAC.
2874 			 */
2875 			VR_SETBIT8(vrp->acc_reg, VR_MISC0, VR_MISC0_FDXTFEN);
2876 			VR_SETBIT8(vrp->acc_reg, VR_FCR1, VR_FCR1_FD_RX_EN |
2877 			    VR_FCR1_FD_TX_EN | VR_FCR1_XONXOFF_EN);
2878 		}
2879 	} else {
2880 		/*
2881 		 * Turn flow control OFF.
2882 		 */
2883 		VR_CLRBIT8(vrp->acc_reg,
2884 		    VR_MISC0, VR_MISC0_FDXRFEN | VR_MISC0_FDXTFEN);
2885 		if ((vrp->chip.info.features & VR_FEATURE_TX_PAUSE_CAP) != 0) {
2886 			VR_CLRBIT8(vrp->acc_reg, VR_FCR1,
2887 			    VR_FCR1_FD_RX_EN | VR_FCR1_FD_TX_EN |
2888 			    VR_FCR1_XONXOFF_EN);
2889 		}
2890 	}
2891 
2892 	/*
2893 	 * Set link state.
2894 	 */
2895 	if ((vrp->chip.mii.status & MII_STATUS_LINKUP) != 0)
2896 		vrp->chip.link.state = VR_LINK_STATE_UP;
2897 	else
2898 		vrp->chip.link.state = VR_LINK_STATE_DOWN;
2899 }
2900 
2901 /*
2902  * The PHY is automatically polled by the MAC once per 1024 MD clock cycles
2903  * MD is clocked once per 960ns so polling happens about every 1M ns, some
2904  * 1000 times per second
2905  * This polling process is required for the functionality of the link change
2906  * interrupt. Polling process must be disabled in order to access PHY registers
2907  * using MDIO
2908  *
2909  * Turn off PHY polling so that the PHY registers can be accessed.
2910  */
2911 static void
vr_phy_autopoll_disable(vr_t * vrp)2912 vr_phy_autopoll_disable(vr_t *vrp)
2913 {
2914 	uint32_t	time;
2915 	uint8_t		miicmd, miiaddr;
2916 
2917 	/*
2918 	 * Special procedure to stop the autopolling.
2919 	 */
2920 	if ((vrp->chip.info.bugs & VR_BUG_MIIPOLLSTOP) != 0) {
2921 		/*
2922 		 * If polling is enabled.
2923 		 */
2924 		miicmd = VR_GET8(vrp->acc_reg, VR_MIICMD);
2925 		if ((miicmd & VR_MIICMD_MD_AUTO) != 0) {
2926 			/*
2927 			 * Wait for the end of a cycle (mdone set).
2928 			 */
2929 			time = 0;
2930 			do {
2931 				drv_usecwait(10);
2932 				if (time >= VR_MMI_WAITMAX) {
2933 					vr_log(vrp, CE_WARN,
2934 					    "Timeout in "
2935 					    "disable MII polling");
2936 					break;
2937 				}
2938 				time += VR_MMI_WAITINCR;
2939 				miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2940 			} while ((miiaddr & VR_MIIADDR_MDONE) == 0);
2941 		}
2942 		/*
2943 		 * Once paused, we can disable autopolling.
2944 		 */
2945 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2946 	} else {
2947 		/*
2948 		 * Turn off MII polling.
2949 		 */
2950 		VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2951 
2952 		/*
2953 		 * Wait for MIDLE in MII address register.
2954 		 */
2955 		time = 0;
2956 		do {
2957 			drv_usecwait(VR_MMI_WAITINCR);
2958 			if (time >= VR_MMI_WAITMAX) {
2959 				vr_log(vrp, CE_WARN,
2960 				    "Timeout in disable MII polling");
2961 				break;
2962 			}
2963 			time += VR_MMI_WAITINCR;
2964 			miiaddr = VR_GET8(vrp->acc_reg, VR_MIIADDR);
2965 		} while ((miiaddr & VR_MIIADDR_MIDLE) == 0);
2966 	}
2967 }
2968 
2969 /*
2970  * Turn on PHY polling. PHY's registers cannot be accessed.
2971  */
2972 static void
vr_phy_autopoll_enable(vr_t * vrp)2973 vr_phy_autopoll_enable(vr_t *vrp)
2974 {
2975 	uint32_t	time;
2976 
2977 	VR_PUT8(vrp->acc_reg, VR_MIICMD, 0);
2978 	VR_PUT8(vrp->acc_reg, VR_MIIADDR, MII_STATUS|VR_MIIADDR_MAUTO);
2979 	VR_PUT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_AUTO);
2980 
2981 	/*
2982 	 * Wait for the polling process to finish.
2983 	 */
2984 	time = 0;
2985 	do {
2986 		drv_usecwait(VR_MMI_WAITINCR);
2987 		if (time >= VR_MMI_WAITMAX) {
2988 			vr_log(vrp, CE_NOTE, "Timeout in enable MII polling");
2989 			break;
2990 		}
2991 		time += VR_MMI_WAITINCR;
2992 	} while ((VR_GET8(vrp->acc_reg, VR_MIIADDR) & VR_MIIADDR_MDONE) == 0);
2993 
2994 	/*
2995 	 * Initiate a polling.
2996 	 */
2997 	VR_SETBIT8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_MAUTO);
2998 }
2999 
3000 /*
3001  * Read a register from the PHY using MDIO.
3002  */
3003 static void
vr_phy_read(vr_t * vrp,int offset,uint16_t * value)3004 vr_phy_read(vr_t *vrp, int offset, uint16_t *value)
3005 {
3006 	uint32_t	time;
3007 
3008 	vr_phy_autopoll_disable(vrp);
3009 
3010 	/*
3011 	 * Write the register number to the lower 5 bits of the MII address
3012 	 * register.
3013 	 */
3014 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3015 
3016 	/*
3017 	 * Write a READ command to the MII control register
3018 	 * This bit will be cleared when the read is finished.
3019 	 */
3020 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_READ);
3021 
3022 	/*
3023 	 * Wait until the read is done.
3024 	 */
3025 	time = 0;
3026 	do {
3027 		drv_usecwait(VR_MMI_WAITINCR);
3028 		if (time >= VR_MMI_WAITMAX) {
3029 			vr_log(vrp, CE_NOTE, "Timeout in MII read command");
3030 			break;
3031 		}
3032 		time += VR_MMI_WAITINCR;
3033 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_READ) != 0);
3034 
3035 	*value = VR_GET16(vrp->acc_reg, VR_MIIDATA);
3036 	vr_phy_autopoll_enable(vrp);
3037 }
3038 
3039 /*
3040  * Write to a PHY's register.
3041  */
3042 static void
vr_phy_write(vr_t * vrp,int offset,uint16_t value)3043 vr_phy_write(vr_t *vrp, int offset, uint16_t value)
3044 {
3045 	uint32_t	time;
3046 
3047 	vr_phy_autopoll_disable(vrp);
3048 
3049 	/*
3050 	 * Write the register number to the MII address register.
3051 	 */
3052 	VR_SETBITS8(vrp->acc_reg, VR_MIIADDR, VR_MIIADDR_BITS, offset);
3053 
3054 	/*
3055 	 * Write the value to the data register.
3056 	 */
3057 	VR_PUT16(vrp->acc_reg, VR_MIIDATA, value);
3058 
3059 	/*
3060 	 * Issue the WRITE command to the command register.
3061 	 * This bit will be cleared when the write is finished.
3062 	 */
3063 	VR_SETBIT8(vrp->acc_reg, VR_MIICMD, VR_MIICMD_MD_WRITE);
3064 
3065 	time = 0;
3066 	do {
3067 		drv_usecwait(VR_MMI_WAITINCR);
3068 		if (time >= VR_MMI_WAITMAX) {
3069 			vr_log(vrp, CE_NOTE, "Timeout in MII write command");
3070 			break;
3071 		}
3072 		time += VR_MMI_WAITINCR;
3073 	} while ((VR_GET8(vrp->acc_reg, VR_MIICMD) & VR_MIICMD_MD_WRITE) != 0);
3074 	vr_phy_autopoll_enable(vrp);
3075 }
3076 
3077 /*
3078  * Initialize and install some private kstats.
3079  */
3080 typedef struct {
3081 	char		*name;
3082 	uchar_t		type;
3083 } vr_kstat_t;
3084 
3085 static const vr_kstat_t vr_driver_stats [] = {
3086 	{"allocbfail",		KSTAT_DATA_INT32},
3087 	{"intr_claimed",	KSTAT_DATA_INT64},
3088 	{"intr_unclaimed",	KSTAT_DATA_INT64},
3089 	{"linkchanges",		KSTAT_DATA_INT64},
3090 	{"txnfree",		KSTAT_DATA_INT32},
3091 	{"txstalls",		KSTAT_DATA_INT32},
3092 	{"resets",		KSTAT_DATA_INT32},
3093 	{"txreclaims",		KSTAT_DATA_INT64},
3094 	{"txreclaim0",		KSTAT_DATA_INT64},
3095 	{"cyclics",		KSTAT_DATA_INT64},
3096 	{"txchecks",		KSTAT_DATA_INT64},
3097 };
3098 
3099 static void
vr_kstats_init(vr_t * vrp)3100 vr_kstats_init(vr_t *vrp)
3101 {
3102 	kstat_t			*ksp;
3103 	struct	kstat_named	*knp;
3104 	int			i;
3105 	int			nstats;
3106 
3107 	nstats = sizeof (vr_driver_stats) / sizeof (vr_kstat_t);
3108 
3109 	ksp = kstat_create(MODULENAME, ddi_get_instance(vrp->devinfo),
3110 	    "driver", "net", KSTAT_TYPE_NAMED, nstats, 0);
3111 
3112 	if (ksp == NULL)
3113 		vr_log(vrp, CE_WARN, "kstat_create failed");
3114 
3115 	ksp->ks_update = vr_update_kstats;
3116 	ksp->ks_private = (void*) vrp;
3117 	knp = ksp->ks_data;
3118 
3119 	for (i = 0; i < nstats; i++, knp++) {
3120 		kstat_named_init(knp, vr_driver_stats[i].name,
3121 		    vr_driver_stats[i].type);
3122 	}
3123 	kstat_install(ksp);
3124 	vrp->ksp = ksp;
3125 }
3126 
3127 static int
vr_update_kstats(kstat_t * ksp,int access)3128 vr_update_kstats(kstat_t *ksp, int access)
3129 {
3130 	vr_t			*vrp;
3131 	struct kstat_named	*knp;
3132 
3133 	vrp = (vr_t *)ksp->ks_private;
3134 	knp = ksp->ks_data;
3135 
3136 	if (access != KSTAT_READ)
3137 		return (EACCES);
3138 
3139 	(knp++)->value.ui32 = vrp->stats.allocbfail;
3140 	(knp++)->value.ui64 = vrp->stats.intr_claimed;
3141 	(knp++)->value.ui64 = vrp->stats.intr_unclaimed;
3142 	(knp++)->value.ui64 = vrp->stats.linkchanges;
3143 	(knp++)->value.ui32 = vrp->tx.nfree;
3144 	(knp++)->value.ui32 = vrp->stats.txstalls;
3145 	(knp++)->value.ui32 = vrp->stats.resets;
3146 	(knp++)->value.ui64 = vrp->stats.txreclaims;
3147 	(knp++)->value.ui64 = vrp->stats.txreclaim0;
3148 	(knp++)->value.ui64 = vrp->stats.cyclics;
3149 	(knp++)->value.ui64 = vrp->stats.txchecks;
3150 	return (0);
3151 }
3152 
3153 /*
3154  * Remove 'private' kstats.
3155  */
3156 static void
vr_remove_kstats(vr_t * vrp)3157 vr_remove_kstats(vr_t *vrp)
3158 {
3159 	if (vrp->ksp != NULL)
3160 		kstat_delete(vrp->ksp);
3161 }
3162 
3163 /*
3164  * Get a property of the device/driver
3165  * Remarks:
3166  * - pr_val is always an integer of size pr_valsize
3167  * - ENABLED (EN) is what is configured via dladm
3168  * - ADVERTISED (ADV) is ENABLED minus constraints, like PHY/MAC capabilities
3169  * - DEFAULT are driver- and hardware defaults (DEFAULT is implemented as a
3170  *   flag in pr_flags instead of MAC_PROP_DEFAULT_)
3171  * - perm is the permission printed on ndd -get /.. \?
3172  */
3173 int
vr_mac_getprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,void * pr_val)3174 vr_mac_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3175     uint_t pr_valsize, void *pr_val)
3176 {
3177 	vr_t		*vrp;
3178 	uint32_t	err;
3179 	uint64_t	val;
3180 
3181 	/* Since we have no private properties */
3182 	_NOTE(ARGUNUSED(pr_name))
3183 
3184 	err = 0;
3185 	vrp = (vr_t *)arg;
3186 	switch (pr_num) {
3187 		case MAC_PROP_ADV_1000FDX_CAP:
3188 		case MAC_PROP_ADV_1000HDX_CAP:
3189 		case MAC_PROP_EN_1000FDX_CAP:
3190 		case MAC_PROP_EN_1000HDX_CAP:
3191 			val = 0;
3192 			break;
3193 
3194 		case MAC_PROP_ADV_100FDX_CAP:
3195 			val = (vrp->chip.mii.anadv &
3196 			    MII_ABILITY_100BASE_TX_FD) != 0;
3197 			break;
3198 
3199 		case MAC_PROP_ADV_100HDX_CAP:
3200 			val = (vrp->chip.mii.anadv &
3201 			    MII_ABILITY_100BASE_TX) != 0;
3202 			break;
3203 
3204 		case MAC_PROP_ADV_100T4_CAP:
3205 			val = (vrp->chip.mii.anadv &
3206 			    MII_ABILITY_100BASE_T4) != 0;
3207 			break;
3208 
3209 		case MAC_PROP_ADV_10FDX_CAP:
3210 			val = (vrp->chip.mii.anadv &
3211 			    MII_ABILITY_10BASE_T_FD) != 0;
3212 			break;
3213 
3214 		case MAC_PROP_ADV_10HDX_CAP:
3215 			val = (vrp->chip.mii.anadv &
3216 			    MII_ABILITY_10BASE_T) != 0;
3217 			break;
3218 
3219 		case MAC_PROP_AUTONEG:
3220 			val = (vrp->chip.mii.control &
3221 			    MII_CONTROL_ANE) != 0;
3222 			break;
3223 
3224 		case MAC_PROP_DUPLEX:
3225 			val = vrp->chip.link.duplex;
3226 			break;
3227 
3228 		case MAC_PROP_EN_100FDX_CAP:
3229 			val = (vrp->param.anadv_en &
3230 			    MII_ABILITY_100BASE_TX_FD) != 0;
3231 			break;
3232 
3233 		case MAC_PROP_EN_100HDX_CAP:
3234 			val = (vrp->param.anadv_en &
3235 			    MII_ABILITY_100BASE_TX) != 0;
3236 			break;
3237 
3238 		case MAC_PROP_EN_100T4_CAP:
3239 			val = (vrp->param.anadv_en &
3240 			    MII_ABILITY_100BASE_T4) != 0;
3241 			break;
3242 
3243 		case MAC_PROP_EN_10FDX_CAP:
3244 			val = (vrp->param.anadv_en &
3245 			    MII_ABILITY_10BASE_T_FD) != 0;
3246 			break;
3247 
3248 		case MAC_PROP_EN_10HDX_CAP:
3249 			val = (vrp->param.anadv_en &
3250 			    MII_ABILITY_10BASE_T) != 0;
3251 			break;
3252 
3253 		case MAC_PROP_EN_AUTONEG:
3254 			val = vrp->param.an_en == VR_LINK_AUTONEG_ON;
3255 			break;
3256 
3257 		case MAC_PROP_FLOWCTRL:
3258 			val = vrp->chip.link.flowctrl;
3259 			break;
3260 
3261 		case MAC_PROP_MTU:
3262 			val = vrp->param.mtu;
3263 			break;
3264 
3265 		case MAC_PROP_SPEED:
3266 			if (vrp->chip.link.speed ==
3267 			    VR_LINK_SPEED_100MBS)
3268 				val = 100 * 1000 * 1000;
3269 			else if (vrp->chip.link.speed ==
3270 			    VR_LINK_SPEED_10MBS)
3271 				val = 10 * 1000 * 1000;
3272 			else
3273 				val = 0;
3274 			break;
3275 
3276 		case MAC_PROP_STATUS:
3277 			val = vrp->chip.link.state;
3278 			break;
3279 
3280 		default:
3281 			err = ENOTSUP;
3282 			break;
3283 	}
3284 
3285 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3286 		if (pr_valsize == sizeof (uint64_t))
3287 			*(uint64_t *)pr_val = val;
3288 		else if (pr_valsize == sizeof (uint32_t))
3289 			*(uint32_t *)pr_val = val;
3290 		else if (pr_valsize == sizeof (uint16_t))
3291 			*(uint16_t *)pr_val = val;
3292 		else if (pr_valsize == sizeof (uint8_t))
3293 			*(uint8_t *)pr_val = val;
3294 		else
3295 			err = EINVAL;
3296 	}
3297 	return (err);
3298 }
3299 
3300 void
vr_mac_propinfo(void * arg,const char * pr_name,mac_prop_id_t pr_num,mac_prop_info_handle_t prh)3301 vr_mac_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3302     mac_prop_info_handle_t prh)
3303 {
3304 	vr_t		*vrp = (vr_t *)arg;
3305 	uint8_t		val, perm;
3306 
3307 	/* Since we have no private properties */
3308 	_NOTE(ARGUNUSED(pr_name))
3309 
3310 	switch (pr_num) {
3311 		case MAC_PROP_ADV_1000FDX_CAP:
3312 		case MAC_PROP_ADV_1000HDX_CAP:
3313 		case MAC_PROP_EN_1000FDX_CAP:
3314 		case MAC_PROP_EN_1000HDX_CAP:
3315 		case MAC_PROP_ADV_100FDX_CAP:
3316 		case MAC_PROP_ADV_100HDX_CAP:
3317 		case MAC_PROP_ADV_100T4_CAP:
3318 		case MAC_PROP_ADV_10FDX_CAP:
3319 		case MAC_PROP_ADV_10HDX_CAP:
3320 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3321 			return;
3322 
3323 		case MAC_PROP_EN_100FDX_CAP:
3324 			val = (vrp->chip.mii.status &
3325 			    MII_STATUS_100_BASEX_FD) != 0;
3326 			break;
3327 
3328 		case MAC_PROP_EN_100HDX_CAP:
3329 			val = (vrp->chip.mii.status &
3330 			    MII_STATUS_100_BASEX) != 0;
3331 			break;
3332 
3333 		case MAC_PROP_EN_100T4_CAP:
3334 			val = (vrp->chip.mii.status &
3335 			    MII_STATUS_100_BASE_T4) != 0;
3336 			break;
3337 
3338 		case MAC_PROP_EN_10FDX_CAP:
3339 			val = (vrp->chip.mii.status &
3340 			    MII_STATUS_10_FD) != 0;
3341 			break;
3342 
3343 		case MAC_PROP_EN_10HDX_CAP:
3344 			val = (vrp->chip.mii.status &
3345 			    MII_STATUS_10) != 0;
3346 			break;
3347 
3348 		case MAC_PROP_AUTONEG:
3349 		case MAC_PROP_EN_AUTONEG:
3350 			val = (vrp->chip.mii.status &
3351 			    MII_STATUS_CANAUTONEG) != 0;
3352 			break;
3353 
3354 		case MAC_PROP_FLOWCTRL:
3355 			mac_prop_info_set_default_link_flowctrl(prh,
3356 			    LINK_FLOWCTRL_BI);
3357 			return;
3358 
3359 		case MAC_PROP_MTU:
3360 			mac_prop_info_set_range_uint32(prh,
3361 			    ETHERMTU, ETHERMTU);
3362 			return;
3363 
3364 		case MAC_PROP_DUPLEX:
3365 			/*
3366 			 * Writability depends on autoneg.
3367 			 */
3368 			perm = ((vrp->chip.mii.control &
3369 			    MII_CONTROL_ANE) == 0) ? MAC_PROP_PERM_RW :
3370 			    MAC_PROP_PERM_READ;
3371 			mac_prop_info_set_perm(prh, perm);
3372 
3373 			if (perm == MAC_PROP_PERM_RW) {
3374 				mac_prop_info_set_default_uint8(prh,
3375 				    VR_LINK_DUPLEX_FULL);
3376 			}
3377 			return;
3378 
3379 		case MAC_PROP_SPEED:
3380 			perm = ((vrp->chip.mii.control &
3381 			    MII_CONTROL_ANE) == 0) ?
3382 			    MAC_PROP_PERM_RW : MAC_PROP_PERM_READ;
3383 			mac_prop_info_set_perm(prh, perm);
3384 
3385 			if (perm == MAC_PROP_PERM_RW) {
3386 				mac_prop_info_set_default_uint64(prh,
3387 				    100 * 1000 * 1000);
3388 			}
3389 			return;
3390 
3391 		case MAC_PROP_STATUS:
3392 			mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3393 			return;
3394 
3395 		default:
3396 			return;
3397 	}
3398 
3399 	mac_prop_info_set_default_uint8(prh, val);
3400 }
3401 
3402 /*
3403  * Set a property of the device.
3404  */
3405 int
vr_mac_setprop(void * arg,const char * pr_name,mac_prop_id_t pr_num,uint_t pr_valsize,const void * pr_val)3406 vr_mac_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3407     uint_t pr_valsize, const void *pr_val)
3408 {
3409 	vr_t		*vrp;
3410 	uint32_t	err;
3411 	uint64_t	val;
3412 
3413 	/* Since we have no private properties */
3414 	_NOTE(ARGUNUSED(pr_name))
3415 
3416 	err = 0;
3417 	vrp = (vr_t *)arg;
3418 	mutex_enter(&vrp->oplock);
3419 
3420 	/*
3421 	 * The current set of public property values are passed as integers
3422 	 * Private properties are passed as strings in pr_val length pr_valsize.
3423 	 */
3424 	if (pr_num != MAC_PROP_PRIVATE) {
3425 		if (pr_valsize == sizeof (uint64_t))
3426 			val = *(uint64_t *)pr_val;
3427 		else if (pr_valsize == sizeof (uint32_t))
3428 			val = *(uint32_t *)pr_val;
3429 		else if (pr_valsize == sizeof (uint16_t))
3430 			val = *(uint32_t *)pr_val;
3431 		else if (pr_valsize == sizeof (uint8_t))
3432 			val = *(uint8_t *)pr_val;
3433 		else {
3434 			mutex_exit(&vrp->oplock);
3435 			return (EINVAL);
3436 		}
3437 	}
3438 
3439 	switch (pr_num) {
3440 		case MAC_PROP_DUPLEX:
3441 			if ((vrp->chip.mii.control & MII_CONTROL_ANE) == 0) {
3442 				if (val == LINK_DUPLEX_FULL)
3443 					vrp->chip.mii.control |=
3444 					    MII_CONTROL_FDUPLEX;
3445 				else if (val == LINK_DUPLEX_HALF)
3446 					vrp->chip.mii.control &=
3447 					    ~MII_CONTROL_FDUPLEX;
3448 				else
3449 					err = EINVAL;
3450 			} else
3451 				err = EINVAL;
3452 			break;
3453 
3454 		case MAC_PROP_EN_100FDX_CAP:
3455 			if (val == 0)
3456 				vrp->param.anadv_en &=
3457 				    ~MII_ABILITY_100BASE_TX_FD;
3458 			else
3459 				vrp->param.anadv_en |=
3460 				    MII_ABILITY_100BASE_TX_FD;
3461 			break;
3462 
3463 		case MAC_PROP_EN_100HDX_CAP:
3464 			if (val == 0)
3465 				vrp->param.anadv_en &=
3466 				    ~MII_ABILITY_100BASE_TX;
3467 			else
3468 				vrp->param.anadv_en |=
3469 				    MII_ABILITY_100BASE_TX;
3470 			break;
3471 
3472 		case MAC_PROP_EN_100T4_CAP:
3473 			if (val == 0)
3474 				vrp->param.anadv_en &=
3475 				    ~MII_ABILITY_100BASE_T4;
3476 			else
3477 				vrp->param.anadv_en |=
3478 				    MII_ABILITY_100BASE_T4;
3479 			break;
3480 
3481 		case MAC_PROP_EN_10FDX_CAP:
3482 			if (val == 0)
3483 				vrp->param.anadv_en &=
3484 				    ~MII_ABILITY_10BASE_T_FD;
3485 			else
3486 				vrp->param.anadv_en |=
3487 				    MII_ABILITY_10BASE_T_FD;
3488 			break;
3489 
3490 		case MAC_PROP_EN_10HDX_CAP:
3491 			if (val == 0)
3492 				vrp->param.anadv_en &=
3493 				    ~MII_ABILITY_10BASE_T;
3494 			else
3495 				vrp->param.anadv_en |=
3496 				    MII_ABILITY_10BASE_T;
3497 			break;
3498 
3499 		case MAC_PROP_AUTONEG:
3500 		case MAC_PROP_EN_AUTONEG:
3501 			if (val == 0) {
3502 				vrp->param.an_en = VR_LINK_AUTONEG_OFF;
3503 				vrp->chip.mii.control &= ~MII_CONTROL_ANE;
3504 			} else {
3505 				vrp->param.an_en = VR_LINK_AUTONEG_ON;
3506 				if ((vrp->chip.mii.status &
3507 				    MII_STATUS_CANAUTONEG) != 0)
3508 					vrp->chip.mii.control |=
3509 					    MII_CONTROL_ANE;
3510 				else
3511 					err = EINVAL;
3512 			}
3513 			break;
3514 
3515 		case MAC_PROP_FLOWCTRL:
3516 			if (val == LINK_FLOWCTRL_NONE)
3517 				vrp->param.anadv_en &= ~MII_ABILITY_PAUSE;
3518 			else if (val == LINK_FLOWCTRL_BI)
3519 				vrp->param.anadv_en |= MII_ABILITY_PAUSE;
3520 			else
3521 				err = EINVAL;
3522 			break;
3523 
3524 		case MAC_PROP_MTU:
3525 			if (val >= ETHERMIN && val <= ETHERMTU)
3526 				vrp->param.mtu = (uint32_t)val;
3527 			else
3528 				err = EINVAL;
3529 			break;
3530 
3531 		case MAC_PROP_SPEED:
3532 			if (val == 10 * 1000 * 1000)
3533 				vrp->chip.link.speed =
3534 				    VR_LINK_SPEED_10MBS;
3535 			else if (val == 100 * 1000 * 1000)
3536 				vrp->chip.link.speed =
3537 				    VR_LINK_SPEED_100MBS;
3538 			else
3539 				err = EINVAL;
3540 			break;
3541 
3542 		default:
3543 			err = ENOTSUP;
3544 			break;
3545 	}
3546 	if (err == 0 && pr_num != MAC_PROP_PRIVATE) {
3547 		vrp->chip.mii.anadv = vrp->param.anadv_en &
3548 		    (vrp->param.an_phymask & vrp->param.an_macmask);
3549 		vr_link_init(vrp);
3550 	}
3551 	mutex_exit(&vrp->oplock);
3552 	return (err);
3553 }
3554 
3555 
3556 /*
3557  * Logging and debug functions.
3558  */
3559 static struct {
3560 	kmutex_t mutex[1];
3561 	const char *ifname;
3562 	const char *fmt;
3563 	int level;
3564 } prtdata;
3565 
3566 static void
vr_vprt(const char * fmt,va_list args)3567 vr_vprt(const char *fmt, va_list args)
3568 {
3569 	char buf[512];
3570 
3571 	ASSERT(mutex_owned(prtdata.mutex));
3572 	(void) vsnprintf(buf, sizeof (buf), fmt, args);
3573 	cmn_err(prtdata.level, prtdata.fmt, prtdata.ifname, buf);
3574 }
3575 
3576 static void
vr_log(vr_t * vrp,int level,const char * fmt,...)3577 vr_log(vr_t *vrp, int level, const char *fmt, ...)
3578 {
3579 	va_list args;
3580 
3581 	mutex_enter(prtdata.mutex);
3582 	prtdata.ifname = vrp->ifname;
3583 	prtdata.fmt = "!%s: %s";
3584 	prtdata.level = level;
3585 
3586 	va_start(args, fmt);
3587 	vr_vprt(fmt, args);
3588 	va_end(args);
3589 
3590 	mutex_exit(prtdata.mutex);
3591 }
3592 
3593 #if defined(DEBUG)
3594 static void
vr_prt(const char * fmt,...)3595 vr_prt(const char *fmt, ...)
3596 {
3597 	va_list args;
3598 
3599 	ASSERT(mutex_owned(prtdata.mutex));
3600 
3601 	va_start(args, fmt);
3602 	vr_vprt(fmt, args);
3603 	va_end(args);
3604 
3605 	mutex_exit(prtdata.mutex);
3606 }
3607 
3608 void
vr_debug()3609 (*vr_debug())(const char *fmt, ...)
3610 {
3611 	mutex_enter(prtdata.mutex);
3612 	prtdata.ifname = MODULENAME;
3613 	prtdata.fmt = "^%s: %s\n";
3614 	prtdata.level = CE_CONT;
3615 
3616 	return (vr_prt);
3617 }
3618 #endif	/* DEBUG */
3619 
3620 DDI_DEFINE_STREAM_OPS(vr_dev_ops, nulldev, nulldev, vr_attach, vr_detach,
3621     nodev, NULL, D_MP, NULL, vr_quiesce);
3622 
3623 static struct modldrv vr_modldrv = {
3624 	&mod_driverops,		/* Type of module. This one is a driver */
3625 	vr_ident,		/* short description */
3626 	&vr_dev_ops		/* driver specific ops */
3627 };
3628 
3629 static struct modlinkage modlinkage = {
3630 	MODREV_1, (void *)&vr_modldrv, NULL
3631 };
3632 
3633 int
_info(struct modinfo * modinfop)3634 _info(struct modinfo *modinfop)
3635 {
3636 	return (mod_info(&modlinkage, modinfop));
3637 }
3638 
3639 int
_init(void)3640 _init(void)
3641 {
3642 	int	status;
3643 
3644 	mac_init_ops(&vr_dev_ops, MODULENAME);
3645 	status = mod_install(&modlinkage);
3646 	if (status == DDI_SUCCESS)
3647 		mutex_init(prtdata.mutex, NULL, MUTEX_DRIVER, NULL);
3648 	else
3649 		mac_fini_ops(&vr_dev_ops);
3650 	return (status);
3651 }
3652 
3653 int
_fini(void)3654 _fini(void)
3655 {
3656 	int status;
3657 
3658 	status = mod_remove(&modlinkage);
3659 	if (status == 0) {
3660 		mac_fini_ops(&vr_dev_ops);
3661 		mutex_destroy(prtdata.mutex);
3662 	}
3663 	return (status);
3664 }
3665