xref: /illumos-gate/usr/src/uts/sun/io/eri/eri.c (revision 7e996435)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * SunOS MT STREAMS ERI(PCI) 10/100 Mb Ethernet Device Driver
28  */
29 
30 #include	<sys/types.h>
31 #include	<sys/debug.h>
32 #include	<sys/stropts.h>
33 #include	<sys/stream.h>
34 #include	<sys/strsubr.h>
35 #include	<sys/kmem.h>
36 #include	<sys/crc32.h>
37 #include	<sys/ddi.h>
38 #include	<sys/sunddi.h>
39 #include	<sys/strsun.h>
40 #include	<sys/stat.h>
41 #include	<sys/cpu.h>
42 #include	<sys/kstat.h>
43 #include	<inet/common.h>
44 #include	<sys/pattr.h>
45 #include	<inet/mi.h>
46 #include	<inet/nd.h>
47 #include	<sys/ethernet.h>
48 #include	<sys/vlan.h>
49 #include	<sys/policy.h>
50 #include	<sys/mac_provider.h>
51 #include	<sys/mac_ether.h>
52 #include	<sys/dlpi.h>
53 
54 #include	<sys/pci.h>
55 
56 #include	"eri_phy.h"
57 #include	"eri_mac.h"
58 #include	"eri.h"
59 #include	"eri_common.h"
60 
61 #include	"eri_msg.h"
62 
63 /*
64  *  **** Function Prototypes *****
65  */
66 /*
67  * Entry points (man9e)
68  */
69 static	int	eri_attach(dev_info_t *, ddi_attach_cmd_t);
70 static	int	eri_detach(dev_info_t *, ddi_detach_cmd_t);
71 static	uint_t	eri_intr(caddr_t);
72 
73 /*
74  * I/O (Input/Output) Functions
75  */
76 static	boolean_t	eri_send_msg(struct eri *, mblk_t *);
77 static  mblk_t		*eri_read_dma(struct eri *, volatile struct rmd *,
78 			    volatile int, uint64_t flags);
79 
80 /*
81  * Initialization Functions
82  */
83 static  boolean_t	eri_init(struct eri *);
84 static	int	eri_allocthings(struct eri *);
85 static  int	eri_init_xfer_params(struct eri *);
86 static  void	eri_statinit(struct eri *);
87 static	int	eri_burstsize(struct eri *);
88 
89 static	void	eri_setup_mac_address(struct eri *, dev_info_t *);
90 
91 static	uint32_t eri_init_rx_channel(struct eri *);
92 static	void	eri_init_rx(struct eri *);
93 static	void	eri_init_txmac(struct eri *);
94 
95 /*
96  * Un-init Functions
97  */
98 static	uint32_t eri_txmac_disable(struct eri *);
99 static	uint32_t eri_rxmac_disable(struct eri *);
100 static	int	eri_stop(struct eri *);
101 static	void	eri_uninit(struct eri *erip);
102 static	int	eri_freebufs(struct eri *);
103 static	boolean_t	eri_reclaim(struct eri *, uint32_t);
104 
105 /*
106  * Transceiver (xcvr) Functions
107  */
108 static	int	eri_new_xcvr(struct eri *); /* Initializes & detects xcvrs */
109 static	int	eri_reset_xcvr(struct eri *);
110 
111 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
112 static	void	eri_xcvr_force_mode(struct eri *, uint32_t *);
113 #endif
114 
115 static	void	eri_mif_poll(struct eri *, soft_mif_enable_t);
116 static	void	eri_check_link(struct eri *);
117 static	uint32_t eri_check_link_noind(struct eri *);
118 static	link_state_t eri_mif_check(struct eri *, uint16_t, uint16_t);
119 static	void    eri_mii_write(struct eri *, uint8_t, uint16_t);
120 static	uint32_t eri_mii_read(struct eri *, uint8_t, uint16_t *);
121 
122 /*
123  * Reset Functions
124  */
125 static	uint32_t eri_etx_reset(struct eri *);
126 static	uint32_t eri_erx_reset(struct eri *);
127 
128 /*
129  * Error Functions
130  */
131 static	void eri_fatal_err(struct eri *, uint32_t);
132 static	void eri_nonfatal_err(struct eri *, uint32_t);
133 
134 #ifdef	ERI_TX_HUNG
135 static	int eri_check_txhung(struct eri *);
136 #endif
137 
138 /*
139  * Hardening Functions
140  */
141 static void eri_fault_msg(struct eri *, uint_t, msg_t, const char *, ...);
142 
143 /*
144  * Misc Functions
145  */
146 static void	eri_savecntrs(struct eri *);
147 
148 static	void	eri_stop_timer(struct eri *erip);
149 static	void	eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec);
150 
151 static	void eri_bb_force_idle(struct eri *);
152 
153 /*
154  * Utility Functions
155  */
156 static	mblk_t *eri_allocb(size_t size);
157 static	mblk_t *eri_allocb_sp(size_t size);
158 static	int	eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
159 static	int	eri_param_set(queue_t *, mblk_t *, char *, caddr_t);
160 
161 /*
162  * Functions to support ndd
163  */
164 static	void	eri_nd_free(caddr_t *nd_pparam);
165 
166 static	boolean_t	eri_nd_load(caddr_t *nd_pparam, char *name,
167 				pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
168 
169 static	int	eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
170 static	void	eri_param_cleanup(struct eri *);
171 static	int	eri_param_register(struct eri *, param_t *, int);
172 static	void	eri_process_ndd_ioctl(struct eri *, queue_t *, mblk_t *, int);
173 static	int	eri_mk_mblk_tail_space(mblk_t *, mblk_t **, size_t);
174 
175 
176 static	void eri_loopback(struct eri *, queue_t *, mblk_t *);
177 
178 static uint32_t	eri_ladrf_bit(const uint8_t *);
179 
180 
181 /*
182  * Nemo (GLDv3) Functions.
183  */
184 static	int		eri_m_stat(void *, uint_t, uint64_t *);
185 static	int		eri_m_start(void *);
186 static	void		eri_m_stop(void *);
187 static	int		eri_m_promisc(void *, boolean_t);
188 static	int		eri_m_multicst(void *, boolean_t, const uint8_t *);
189 static	int		eri_m_unicst(void *, const uint8_t *);
190 static	void		eri_m_ioctl(void *, queue_t *, mblk_t *);
191 static	boolean_t	eri_m_getcapab(void *, mac_capab_t, void *);
192 static	mblk_t		*eri_m_tx(void *, mblk_t *);
193 
194 static mac_callbacks_t eri_m_callbacks = {
195 	MC_IOCTL | MC_GETCAPAB,
196 	eri_m_stat,
197 	eri_m_start,
198 	eri_m_stop,
199 	eri_m_promisc,
200 	eri_m_multicst,
201 	eri_m_unicst,
202 	eri_m_tx,
203 	NULL,
204 	eri_m_ioctl,
205 	eri_m_getcapab
206 };
207 
208 /*
209  * Define PHY Vendors: Matches to IEEE
210  * Organizationally Unique Identifier (OUI)
211  */
212 /*
213  * The first two are supported as Internal XCVRs
214  */
215 #define	PHY_VENDOR_LUCENT	0x601d
216 
217 #define	PHY_LINK_NONE		0	/* Not attempted yet or retry */
218 #define	PHY_LINK_DOWN		1	/* Not being used	*/
219 #define	PHY_LINK_UP		2	/* Not being used	*/
220 
221 #define	AUTO_SPEED		0
222 #define	FORCE_SPEED		1
223 
224 /*
225  * MIB II broadcast/multicast packets
226  */
227 
228 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
229 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
230 
231 #define	BUMP_InNUcast(erip, pkt) \
232 		if (IS_BROADCAST(pkt)) { \
233 			HSTAT(erip, brdcstrcv); \
234 		} else if (IS_MULTICAST(pkt)) { \
235 			HSTAT(erip, multircv); \
236 		}
237 
238 #define	BUMP_OutNUcast(erip, pkt) \
239 		if (IS_BROADCAST(pkt)) { \
240 			HSTAT(erip, brdcstxmt); \
241 		} else if (IS_MULTICAST(pkt)) { \
242 			HSTAT(erip, multixmt); \
243 		}
244 
245 #define	NEXTTMDP(tbasep, tmdlimp, tmdp)	(((tmdp) + 1) == tmdlimp	\
246 	? tbasep : ((tmdp) + 1))
247 
248 #define	ETHERHEADER_SIZE (sizeof (struct ether_header))
249 
250 #ifdef	ERI_RCV_CKSUM
251 #define	ERI_PROCESS_READ(erip, bp, sum)				\
252 {								\
253 	t_uscalar_t	type;					\
254 	uint_t	start_offset, end_offset;			\
255 								\
256 	*(bp->b_wptr) = 0;	/* pad byte */			\
257 								\
258 	/*							\
259 	 * update MIB II statistics				\
260 	 */							\
261 	HSTAT(erip, ipackets64);				\
262 	HSTATN(erip, rbytes64, len);				\
263 	BUMP_InNUcast(erip, bp->b_rptr);			\
264 	type = get_ether_type(bp->b_rptr);			\
265 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {	\
266 		start_offset = 0;				\
267 		end_offset = MBLKL(bp) - ETHERHEADER_SIZE;	\
268 		mac_hcksum_set(bp,				\
269 			start_offset, 0, end_offset, sum,	\
270 			HCK_PARTIALCKSUM);			\
271 	} else {						\
272 		/*						\
273 		 * Strip the PADS for 802.3			\
274 		 */						\
275 		if (type <= ETHERMTU)				\
276 			bp->b_wptr = bp->b_rptr +		\
277 				ETHERHEADER_SIZE + type;	\
278 	}							\
279 }
280 #else
281 
282 #define	ERI_PROCESS_READ(erip, bp)				\
283 {								\
284 	t_uscalar_t	type;					\
285 	type = get_ether_type(bp->b_rptr);			\
286 								\
287 	/*							\
288 	 * update MIB II statistics				\
289 	 */							\
290 	HSTAT(erip, ipackets64);				\
291 	HSTATN(erip, rbytes64, len);				\
292 	BUMP_InNUcast(erip, bp->b_rptr);			\
293 	/*							\
294 	 * Strip the PADS for 802.3				\
295 	 */							\
296 	if (type <= ETHERMTU)					\
297 		bp->b_wptr = bp->b_rptr + ETHERHEADER_SIZE +	\
298 		    type;					\
299 }
300 #endif  /* ERI_RCV_CKSUM */
301 
302 /*
303  * TX Interrupt Rate
304  */
305 static	int	tx_interrupt_rate = 16;
306 
307 /*
308  * Ethernet broadcast address definition.
309  */
310 static uint8_t	etherbroadcastaddr[] = {
311 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
312 };
313 
314 /*
315  * The following variables are used for configuring various features
316  */
317 #define	ERI_DESC_HANDLE_ALLOC	0x0001
318 #define	ERI_DESC_MEM_ALLOC	0x0002
319 #define	ERI_DESC_MEM_MAP	0x0004
320 #define	ERI_RCV_HANDLE_ALLOC	0x0020
321 #define	ERI_RCV_HANDLE_BIND	0x0040
322 #define	ERI_XMIT_DVMA_ALLOC	0x0100
323 #define	ERI_RCV_DVMA_ALLOC	0x0200
324 #define	ERI_XBUFS_HANDLE_ALLOC  0x0400
325 #define	ERI_XBUFS_KMEM_ALLOC    0x0800
326 #define	ERI_XBUFS_KMEM_DMABIND  0x1000
327 
328 
329 #define	ERI_DONT_STRIP_CRC
330 /*
331  * Translate a kernel virtual address to i/o address.
332  */
333 #define	ERI_IOPBIOADDR(erip, a) \
334 	((erip)->iopbiobase + ((uintptr_t)a - (erip)->iopbkbase))
335 
336 /*
337  * ERI Configuration Register Value
338  * Used to configure parameters that define DMA burst
339  * and internal arbitration behavior.
340  * for equal TX and RX bursts, set the following in global
341  * configuration register.
342  * static	int	global_config = 0x42;
343  */
344 
345 /*
346  * ERI ERX Interrupt Blanking Time
347  * Each count is about 16 us (2048 clocks) for 66 MHz PCI.
348  */
349 static	int	intr_blank_time = 6;	/* for about 96 us */
350 static	int	intr_blank_packets = 8;	/*  */
351 
352 /*
353  * ERX PAUSE Threshold Register value
354  * The following value is for an OFF Threshold of about 15.5 Kbytes
355  * and an ON Threshold of 4K bytes.
356  */
357 static	int rx_pause_threshold = 0xf8 | (0x40 << 12);
358 static	int eri_reinit_fatal = 0;
359 #ifdef	DEBUG
360 static	int noteri = 0;
361 #endif
362 
363 #ifdef	ERI_TX_HUNG
364 static	int eri_reinit_txhung = 0;
365 #endif
366 
367 #ifdef ERI_HDX_BUG_WORKAROUND
368 /*
369  * By default enable padding in hdx mode to 97 bytes.
370  * To disabled, in /etc/system:
371  * set eri:eri_hdx_pad_enable=0
372  */
373 static	uchar_t eri_hdx_pad_enable = 1;
374 #endif
375 
376 /*
377  * Default values to initialize the cache line size and latency timer
378  * registers in the PCI configuration space.
379  * ERI_G_CACHE_LINE_SIZE_16 is defined as 16 since RIO expects in units
380  * of 4 bytes.
381  */
382 #ifdef ERI_PM_WORKAROUND_PCI
383 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_32; /* 128 bytes */
384 static int eri_pci_latency_timer = 0xff;		/* 255 PCI cycles */
385 #else
386 static int eri_pci_cache_line = ERI_G_CACHE_LINE_SIZE_16; /* 64 bytes */
387 static int eri_pci_latency_timer = 0x40;		/* 64 PCI cycles */
388 #endif
389 #define	ERI_CACHE_LINE_SIZE	(eri_pci_cache_line << ERI_G_CACHE_BIT)
390 
391 /*
392  * Claim the device is ultra-capable of burst in the beginning.  Use
393  * the value returned by ddi_dma_burstsizes() to actually set the ERI
394  * global configuration register later.
395  *
396  * PCI_ERI supports Infinite burst or 64-byte-multiple bursts.
397  */
398 #define	ERI_LIMADDRLO	((uint64_t)0x00000000)
399 #define	ERI_LIMADDRHI	((uint64_t)0xffffffff)
400 
401 static ddi_dma_attr_t dma_attr = {
402 	DMA_ATTR_V0,		/* version number. */
403 	(uint64_t)ERI_LIMADDRLO, /* low address */
404 	(uint64_t)ERI_LIMADDRHI, /* high address */
405 	(uint64_t)0x00ffffff,	/* address counter max */
406 	(uint64_t)1,		/* alignment */
407 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
408 	(uint32_t)0x1,		/* minimum transfer size */
409 	(uint64_t)0x7fffffff,	/* maximum transfer size */
410 	(uint64_t)0x00ffffff,	/* maximum segment size */
411 	1,			/* scatter/gather list length */
412 	(uint32_t)1,		/* granularity */
413 	(uint_t)0		/* attribute flags */
414 };
415 
416 static ddi_dma_attr_t desc_dma_attr = {
417 	DMA_ATTR_V0,		/* version number. */
418 	(uint64_t)ERI_LIMADDRLO, /* low address */
419 	(uint64_t)ERI_LIMADDRHI, /* high address */
420 	(uint64_t)0x00ffffff,	/* address counter max */
421 	(uint64_t)8,		/* alignment */
422 	(uint_t)0xe000e0,	/* dlim_burstsizes for 32 4 bit xfers */
423 	(uint32_t)0x1,		/* minimum transfer size */
424 	(uint64_t)0x7fffffff,	/* maximum transfer size */
425 	(uint64_t)0x00ffffff,	/* maximum segment size */
426 	1,			/* scatter/gather list length */
427 	16,			/* granularity */
428 	0			/* attribute flags */
429 };
430 
431 static ddi_device_acc_attr_t buf_attr = {
432 	DDI_DEVICE_ATTR_V0,	/* devacc_attr_version */
433 	DDI_NEVERSWAP_ACC,	/* devacc_attr_endian_flags */
434 	DDI_STRICTORDER_ACC,	/* devacc_attr_dataorder */
435 	DDI_DEFAULT_ACC,	/* devacc_attr_access */
436 };
437 
438 ddi_dma_lim_t eri_dma_limits = {
439 	(uint64_t)ERI_LIMADDRLO, /* dlim_addr_lo */
440 	(uint64_t)ERI_LIMADDRHI, /* dlim_addr_hi */
441 	(uint64_t)ERI_LIMADDRHI, /* dlim_cntr_max */
442 	(uint_t)0x00e000e0,	/* dlim_burstsizes for 32 and 64 bit xfers */
443 	(uint32_t)0x1,		/* dlim_minxfer */
444 	1024			/* dlim_speed */
445 };
446 
447 /*
448  * Link Configuration variables
449  *
450  * On Motherboard implementations, 10/100 Mbps speeds may be supported
451  * by using both the Serial Link and the MII on Non-serial-link interface.
452  * When both links are present, the driver automatically tries to bring up
453  * both. If both are up, the Gigabit Serial Link is selected for use, by
454  * default. The following configuration variable is used to force the selection
455  * of one of the links when both are up.
456  * To change the default selection to the MII link when both the Serial
457  * Link and the MII link are up, change eri_default_link to 1.
458  *
459  * Once a link is in use, the driver will continue to use that link till it
460  * goes down. When it goes down, the driver will look at the status of both the
461  * links again for link selection.
462  *
463  * Currently the standard is not stable w.r.t. gigabit link configuration
464  * using auto-negotiation procedures. Meanwhile, the link may be configured
465  * in "forced" mode using the "autonegotiation enable" bit (bit-12) in the
466  * PCS MII Command Register. In this mode the PCS sends "idles" until sees
467  * "idles" as initialization instead of the Link Configuration protocol
468  * where a Config register is exchanged. In this mode, the ERI is programmed
469  * for full-duplex operation with both pauseTX and pauseRX (for flow control)
470  * enabled.
471  */
472 
473 static	int	select_link = 0; /* automatic selection */
474 static	int	default_link = 0; /* Select Serial link if both are up */
475 
476 /*
477  * The following variables are used for configuring link-operation
478  * for all the "eri" interfaces in the system.
479  * Later these parameters may be changed per interface using "ndd" command
480  * These parameters may also be specified as properties using the .conf
481  * file mechanism for each interface.
482  */
483 
484 /*
485  * The following variable value will be overridden by "link-pulse-disabled"
486  * property which may be created by OBP or eri.conf file. This property is
487  * applicable only for 10 Mbps links.
488  */
489 static	int	link_pulse_disabled = 0;	/* link pulse disabled */
490 
491 /* For MII-based FastEthernet links */
492 static	int	adv_autoneg_cap = 1;
493 static	int	adv_100T4_cap = 0;
494 static	int	adv_100fdx_cap = 1;
495 static	int	adv_100hdx_cap = 1;
496 static	int	adv_10fdx_cap = 1;
497 static	int	adv_10hdx_cap = 1;
498 static	int	adv_pauseTX_cap =  0;
499 static	int	adv_pauseRX_cap =  0;
500 
501 /*
502  * The following gap parameters are in terms of byte times.
503  */
504 static	int	ipg0 = 8;
505 static	int	ipg1 = 8;
506 static	int	ipg2 = 4;
507 
508 static	int	lance_mode = 1;		/* to enable LANCE mode */
509 static	int	mifpoll_enable = 0;	/* to enable mif poll */
510 static	int	ngu_enable = 0;		/* to enable Never Give Up mode */
511 
512 static	int	eri_force_mlf = 0;	/* to enable mif poll */
513 static	int	eri_phy_mintrans = 1;	/* Lu3X31T mintrans algorithm */
514 /*
515  * For the MII interface, the External Transceiver is selected when present.
516  * The following variable is used to select the Internal Transceiver even
517  * when the External Transceiver is present.
518  */
519 static	int	use_int_xcvr = 0;
520 static	int	pace_size = 0;	/* Do not use pacing for now */
521 
522 static	int	eri_use_dvma_rx = 0;	/* =1:use dvma */
523 static	int	eri_rx_bcopy_max = RX_BCOPY_MAX;	/* =1:use bcopy() */
524 static	int	eri_overflow_reset = 1;	/* global reset if rx_fifo_overflow */
525 static	int	eri_tx_ring_size = 2048; /* number of entries in tx ring */
526 static	int	eri_rx_ring_size = 1024; /* number of entries in rx ring */
527 /*
528  * The following parameters may be configured by the user. If they are not
529  * configured by the user, the values will be based on the capabilities of
530  * the transceiver.
531  * The value "ERI_NOTUSR" is ORed with the parameter value to indicate values
532  * which are NOT configured by the user.
533  */
534 
535 #define	ERI_NOTUSR	0x0f000000
536 #define	ERI_MASK_1BIT	0x1
537 #define	ERI_MASK_2BIT	0x3
538 #define	ERI_MASK_8BIT	0xff
539 
540 
541 /*
542  * Note:
543  * ERI has all of the above capabilities.
544  * Only when an External Transceiver is selected for MII-based FastEthernet
545  * link operation, the capabilities depend upon the capabilities of the
546  * External Transceiver.
547  */
548 
549 /* ------------------------------------------------------------------------- */
550 
551 static  param_t	param_arr[] = {
552 	/* min		max		value	r/w/hidden+name */
553 	{  0,		2,		2,	"-transceiver_inuse"},
554 	{  0,		1,		0,	"-link_status"},
555 	{  0,		1,		0,	"-link_speed"},
556 	{  0,		1,		0,	"-link_mode"},
557 	{  0,		255,		8,	"+ipg1"},
558 	{  0,		255,		4,	"+ipg2"},
559 	{  0,		1,		0,	"+use_int_xcvr"},
560 	{  0,		255,		0,	"+pace_size"},
561 	{  0,		1,		1,	"+adv_autoneg_cap"},
562 	{  0,		1,		1,	"+adv_100T4_cap"},
563 	{  0,		1,		1,	"+adv_100fdx_cap"},
564 	{  0,		1,		1,	"+adv_100hdx_cap"},
565 	{  0,		1,		1,	"+adv_10fdx_cap"},
566 	{  0,		1,		1,	"+adv_10hdx_cap"},
567 	{  0,		1,		1,	"-autoneg_cap"},
568 	{  0,		1,		1,	"-100T4_cap"},
569 	{  0,		1,		1,	"-100fdx_cap"},
570 	{  0,		1,		1,	"-100hdx_cap"},
571 	{  0,		1,		1,	"-10fdx_cap"},
572 	{  0,		1,		1,	"-10hdx_cap"},
573 	{  0,		1,		0,	"-lp_autoneg_cap"},
574 	{  0,		1,		0,	"-lp_100T4_cap"},
575 	{  0,		1,		0,	"-lp_100fdx_cap"},
576 	{  0,		1,		0,	"-lp_100hdx_cap"},
577 	{  0,		1,		0,	"-lp_10fdx_cap"},
578 	{  0,		1,		0,	"-lp_10hdx_cap"},
579 	{  0,		1,		1,	"+lance_mode"},
580 	{  0,		31,		8,	"+ipg0"},
581 	{  0,		127,		6,	"+intr_blank_time"},
582 	{  0,		255,		8,	"+intr_blank_packets"},
583 	{  0,		1,		1,	"!serial-link"},
584 	{  0,		2,		1,	"!non-serial-link"},
585 	{  0,		1,		0,	"%select-link"},
586 	{  0,		1,		0,	"%default-link"},
587 	{  0,		2,		0,	"!link-in-use"},
588 	{  0,		1,		1,	"%adv_asm_dir_cap"},
589 	{  0,		1,		1,	"%adv_pause_cap"},
590 	{  0,		1,		0,	"!asm_dir_cap"},
591 	{  0,		1,		0,	"!pause_cap"},
592 	{  0,		1,		0,	"!lp_asm_dir_cap"},
593 	{  0,		1,		0,	"!lp_pause_cap"},
594 };
595 
596 DDI_DEFINE_STREAM_OPS(eri_dev_ops, nulldev, nulldev, eri_attach, eri_detach,
597     nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
598 
599 /*
600  * This is the loadable module wrapper.
601  */
602 #include <sys/modctl.h>
603 
604 /*
605  * Module linkage information for the kernel.
606  */
607 static struct modldrv modldrv = {
608 	&mod_driverops,	/* Type of module.  This one is a driver */
609 	"Sun RIO 10/100 Mb Ethernet",
610 	&eri_dev_ops,	/* driver ops */
611 };
612 
613 static struct modlinkage modlinkage = {
614 	MODREV_1, &modldrv, NULL
615 };
616 
617 /*
618  * Hardware Independent Functions
619  * New Section
620  */
621 
622 int
_init(void)623 _init(void)
624 {
625 	int	status;
626 
627 	mac_init_ops(&eri_dev_ops, "eri");
628 	if ((status = mod_install(&modlinkage)) != 0) {
629 		mac_fini_ops(&eri_dev_ops);
630 	}
631 	return (status);
632 }
633 
634 int
_fini(void)635 _fini(void)
636 {
637 	int status;
638 
639 	status = mod_remove(&modlinkage);
640 	if (status == 0) {
641 		mac_fini_ops(&eri_dev_ops);
642 	}
643 	return (status);
644 }
645 
646 int
_info(struct modinfo * modinfop)647 _info(struct modinfo *modinfop)
648 {
649 	return (mod_info(&modlinkage, modinfop));
650 }
651 
652 
653 /*
654  * Interface exists: make available by filling in network interface
655  * record.  System will initialize the interface when it is ready
656  * to accept packets.
657  */
658 static int
eri_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)659 eri_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
660 {
661 	struct eri *erip = NULL;
662 	mac_register_t *macp = NULL;
663 	int	regno;
664 	boolean_t	doinit;
665 	boolean_t	mutex_inited = B_FALSE;
666 	boolean_t	intr_add = B_FALSE;
667 
668 	switch (cmd) {
669 	case DDI_ATTACH:
670 		break;
671 
672 	case DDI_RESUME:
673 		if ((erip = ddi_get_driver_private(dip)) == NULL)
674 			return (DDI_FAILURE);
675 
676 		mutex_enter(&erip->intrlock);
677 		erip->flags &= ~ERI_SUSPENDED;
678 		erip->init_macregs = 1;
679 		param_linkup = 0;
680 		erip->stats.link_up = LINK_STATE_DOWN;
681 		erip->linkcheck = 0;
682 
683 		doinit =  (erip->flags & ERI_STARTED) ? B_TRUE : B_FALSE;
684 		mutex_exit(&erip->intrlock);
685 
686 		if (doinit && !eri_init(erip)) {
687 			return (DDI_FAILURE);
688 		}
689 		return (DDI_SUCCESS);
690 
691 	default:
692 		return (DDI_FAILURE);
693 	}
694 
695 	/*
696 	 * Allocate soft device data structure
697 	 */
698 	erip = kmem_zalloc(sizeof (struct eri), KM_SLEEP);
699 
700 	/*
701 	 * Initialize as many elements as possible.
702 	 */
703 	ddi_set_driver_private(dip, erip);
704 	erip->dip = dip;			/* dip	*/
705 	erip->instance = ddi_get_instance(dip);	/* instance */
706 	erip->flags = 0;
707 	erip->multi_refcnt = 0;
708 	erip->promisc = B_FALSE;
709 
710 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
711 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
712 		    "mac_alloc failed");
713 		goto attach_fail;
714 	}
715 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
716 	macp->m_driver = erip;
717 	macp->m_dip = dip;
718 	macp->m_src_addr = erip->ouraddr;
719 	macp->m_callbacks = &eri_m_callbacks;
720 	macp->m_min_sdu = 0;
721 	macp->m_max_sdu = ETHERMTU;
722 	macp->m_margin = VLAN_TAGSZ;
723 
724 	/*
725 	 * Map in the device registers.
726 	 * Separate pointers will be set up for the following
727 	 * register groups within the GEM Register Space:
728 	 *	Global register set
729 	 *	ETX register set
730 	 *	ERX register set
731 	 *	BigMAC register set.
732 	 *	MIF register set
733 	 */
734 
735 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
736 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
737 		    "ddi_dev_nregs failed, returned %d", regno);
738 		goto attach_fail;
739 	}
740 
741 	/*
742 	 * Map the PCI config space
743 	 */
744 	if (pci_config_setup(dip, &erip->pci_config_handle) != DDI_SUCCESS) {
745 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
746 		    "%s pci_config_setup()", config_space_fatal_msg);
747 		goto attach_fail;
748 	}
749 
750 	/*
751 	 * Initialize device attributes structure
752 	 */
753 	erip->dev_attr.devacc_attr_version =	DDI_DEVICE_ATTR_V0;
754 	erip->dev_attr.devacc_attr_dataorder =	DDI_STRICTORDER_ACC;
755 	erip->dev_attr.devacc_attr_endian_flags =	DDI_STRUCTURE_LE_ACC;
756 
757 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->globregp), 0, 0,
758 	    &erip->dev_attr, &erip->globregh)) {
759 		goto attach_fail;
760 	}
761 	erip->etxregh =		erip->globregh;
762 	erip->erxregh =		erip->globregh;
763 	erip->bmacregh =	erip->globregh;
764 	erip->mifregh =		erip->globregh;
765 
766 	erip->etxregp =  (void *)(((caddr_t)erip->globregp) + 0x2000);
767 	erip->erxregp =  (void *)(((caddr_t)erip->globregp) + 0x4000);
768 	erip->bmacregp = (void *)(((caddr_t)erip->globregp) + 0x6000);
769 	erip->mifregp =  (void *)(((caddr_t)erip->globregp) + 0x6200);
770 
771 	/*
772 	 * Map the software reset register.
773 	 */
774 	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&(erip->sw_reset_reg),
775 	    0x1010, 4, &erip->dev_attr, &erip->sw_reset_regh)) {
776 		ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
777 		    mregs_4soft_reset_fail_msg);
778 		goto attach_fail;
779 	}
780 
781 	/*
782 	 * Try and stop the device.
783 	 * This is done until we want to handle interrupts.
784 	 */
785 	if (eri_stop(erip))
786 		goto attach_fail;
787 
788 	/*
789 	 * set PCI latency timer register.
790 	 */
791 	pci_config_put8(erip->pci_config_handle, PCI_CONF_LATENCY_TIMER,
792 	    (uchar_t)eri_pci_latency_timer);
793 
794 	if (ddi_intr_hilevel(dip, 0)) {
795 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
796 		    " high-level interrupts are not supported");
797 		goto attach_fail;
798 	}
799 
800 	/*
801 	 * Get the interrupt cookie so the mutexes can be
802 	 * Initialized.
803 	 */
804 	if (ddi_get_iblock_cookie(dip, 0, &erip->cookie) != DDI_SUCCESS)
805 		goto attach_fail;
806 
807 	/*
808 	 * Initialize mutex's for this device.
809 	 */
810 	mutex_init(&erip->xmitlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
811 	mutex_init(&erip->intrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
812 	mutex_init(&erip->linklock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
813 	mutex_init(&erip->xcvrlock, NULL, MUTEX_DRIVER, (void *)erip->cookie);
814 
815 	mutex_inited = B_TRUE;
816 
817 	/*
818 	 * Add interrupt to system
819 	 */
820 	if (ddi_add_intr(dip, 0, &erip->cookie, 0, eri_intr, (caddr_t)erip) ==
821 	    DDI_SUCCESS)
822 		intr_add = B_TRUE;
823 	else {
824 		goto attach_fail;
825 	}
826 
827 	/*
828 	 * Set up the ethernet mac address.
829 	 */
830 	(void) eri_setup_mac_address(erip, dip);
831 
832 	if (eri_init_xfer_params(erip))
833 		goto attach_fail;
834 
835 	if (eri_burstsize(erip) == DDI_FAILURE) {
836 		goto attach_fail;
837 	}
838 
839 	/*
840 	 * Setup fewer receive bufers.
841 	 */
842 	ERI_RPENDING = eri_rx_ring_size;
843 	ERI_TPENDING = eri_tx_ring_size;
844 
845 	erip->rpending_mask = ERI_RPENDING - 1;
846 	erip->rmdmax_mask = ERI_RPENDING - 1;
847 	erip->mif_config = (ERI_PHY_BMSR << ERI_MIF_CFGPR_SHIFT);
848 
849 	erip->stats.pmcap = ERI_PMCAP_NONE;
850 	if (pci_report_pmcap(dip, PCI_PM_IDLESPEED, (void *)4000) ==
851 	    DDI_SUCCESS)
852 		erip->stats.pmcap = ERI_PMCAP_4MHZ;
853 
854 	if (mac_register(macp, &erip->mh) != 0)
855 		goto attach_fail;
856 
857 	mac_free(macp);
858 
859 	return (DDI_SUCCESS);
860 
861 attach_fail:
862 	if (erip->pci_config_handle)
863 		(void) pci_config_teardown(&erip->pci_config_handle);
864 
865 	if (mutex_inited) {
866 		mutex_destroy(&erip->xmitlock);
867 		mutex_destroy(&erip->intrlock);
868 		mutex_destroy(&erip->linklock);
869 		mutex_destroy(&erip->xcvrlock);
870 	}
871 
872 	ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, attach_fail_msg);
873 
874 	if (intr_add)
875 		ddi_remove_intr(dip, 0, erip->cookie);
876 
877 	if (erip->globregh)
878 		ddi_regs_map_free(&erip->globregh);
879 
880 	if (macp != NULL)
881 		mac_free(macp);
882 	if (erip != NULL)
883 		kmem_free(erip, sizeof (*erip));
884 
885 	return (DDI_FAILURE);
886 }
887 
888 static int
eri_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)889 eri_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
890 {
891 	struct eri	*erip;
892 	int i;
893 
894 	if ((erip = ddi_get_driver_private(dip)) == NULL) {
895 		/*
896 		 * No resources allocated.
897 		 */
898 		return (DDI_FAILURE);
899 	}
900 
901 	switch (cmd) {
902 	case DDI_DETACH:
903 		break;
904 
905 	case DDI_SUSPEND:
906 		erip->flags |= ERI_SUSPENDED;
907 		eri_uninit(erip);
908 		return (DDI_SUCCESS);
909 
910 	default:
911 		return (DDI_FAILURE);
912 	}
913 
914 	if (erip->flags & (ERI_RUNNING | ERI_SUSPENDED)) {
915 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG, busy_msg);
916 		return (DDI_FAILURE);
917 	}
918 
919 	if (mac_unregister(erip->mh) != 0) {
920 		return (DDI_FAILURE);
921 	}
922 
923 	/*
924 	 * Make the device quiescent
925 	 */
926 	(void) eri_stop(erip);
927 
928 	/*
929 	 * Remove instance of the intr
930 	 */
931 	ddi_remove_intr(dip, 0, erip->cookie);
932 
933 	if (erip->pci_config_handle)
934 		(void) pci_config_teardown(&erip->pci_config_handle);
935 
936 	/*
937 	 * Destroy all mutexes and data structures allocated during
938 	 * attach time.
939 	 */
940 
941 	if (erip->globregh)
942 		ddi_regs_map_free(&erip->globregh);
943 
944 	erip->etxregh =		NULL;
945 	erip->erxregh =		NULL;
946 	erip->bmacregh =	NULL;
947 	erip->mifregh =		NULL;
948 	erip->globregh =	NULL;
949 
950 	if (erip->sw_reset_regh)
951 		ddi_regs_map_free(&erip->sw_reset_regh);
952 
953 	if (erip->ksp)
954 		kstat_delete(erip->ksp);
955 
956 	eri_stop_timer(erip); /* acquire linklock */
957 	eri_start_timer(erip, eri_check_link, 0);
958 	mutex_destroy(&erip->xmitlock);
959 	mutex_destroy(&erip->intrlock);
960 	mutex_destroy(&erip->linklock);
961 	mutex_destroy(&erip->xcvrlock);
962 
963 	if (erip->md_h) {
964 		if (ddi_dma_unbind_handle(erip->md_h) ==
965 		    DDI_FAILURE)
966 			return (DDI_FAILURE);
967 		ddi_dma_mem_free(&erip->mdm_h);
968 		ddi_dma_free_handle(&erip->md_h);
969 	}
970 
971 	if (eri_freebufs(erip))
972 		return (DDI_FAILURE);
973 
974 	/* dvma handle case */
975 
976 	if (erip->eri_dvmarh) {
977 		(void) dvma_release(erip->eri_dvmarh);
978 		erip->eri_dvmarh = NULL;
979 	}
980 /*
981  *	xmit_dma_mode, erip->ndmaxh[i]=NULL for dvma
982  */
983 	else {
984 		for (i = 0; i < ERI_RPENDING; i++)
985 			if (erip->ndmarh[i])
986 				ddi_dma_free_handle(&erip->ndmarh[i]);
987 	}
988 /*
989  *	Release TX buffer
990  */
991 	if (erip->tbuf_ioaddr != 0) {
992 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
993 		erip->tbuf_ioaddr = 0;
994 	}
995 	if (erip->tbuf_kaddr != NULL) {
996 		ddi_dma_mem_free(&erip->tbuf_acch);
997 		erip->tbuf_kaddr = NULL;
998 	}
999 	if (erip->tbuf_handle != NULL) {
1000 		ddi_dma_free_handle(&erip->tbuf_handle);
1001 		erip->tbuf_handle = NULL;
1002 	}
1003 
1004 	eri_param_cleanup(erip);
1005 
1006 	ddi_set_driver_private(dip, NULL);
1007 	kmem_free((caddr_t)erip, sizeof (struct eri));
1008 
1009 	return (DDI_SUCCESS);
1010 }
1011 
1012 /*
1013  * To set up the mac address for the network interface:
1014  * The adapter card may support a local mac address which is published
1015  * in a device node property "local-mac-address". This mac address is
1016  * treated as the factory-installed mac address for DLPI interface.
1017  * If the adapter firmware has used the device for diskless boot
1018  * operation it publishes a property called "mac-address" for use by
1019  * inetboot and the device driver.
1020  * If "mac-address" is not found, the system options property
1021  * "local-mac-address" is used to select the mac-address. If this option
1022  * is set to "true", and "local-mac-address" has been found, then
1023  * local-mac-address is used; otherwise the system mac address is used
1024  * by calling the "localetheraddr()" function.
1025  */
1026 
1027 static void
eri_setup_mac_address(struct eri * erip,dev_info_t * dip)1028 eri_setup_mac_address(struct eri *erip, dev_info_t *dip)
1029 {
1030 	uchar_t			*prop;
1031 	char			*uselocal;
1032 	unsigned		prop_len;
1033 	uint32_t		addrflags = 0;
1034 	struct ether_addr	factaddr;
1035 
1036 	/*
1037 	 * Check if it is an adapter with its own local mac address
1038 	 * If it is present, save it as the "factory-address"
1039 	 * for this adapter.
1040 	 */
1041 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1042 	    "local-mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1043 		if (prop_len == ETHERADDRL) {
1044 			addrflags = ERI_FACTADDR_PRESENT;
1045 			bcopy(prop, &factaddr, ETHERADDRL);
1046 			ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
1047 			    lether_addr_msg, ether_sprintf(&factaddr));
1048 		}
1049 		ddi_prop_free(prop);
1050 	}
1051 	/*
1052 	 * Check if the adapter has published "mac-address" property.
1053 	 * If it is present, use it as the mac address for this device.
1054 	 */
1055 	if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1056 	    "mac-address", &prop, &prop_len) == DDI_PROP_SUCCESS) {
1057 		if (prop_len >= ETHERADDRL) {
1058 			bcopy(prop, erip->ouraddr, ETHERADDRL);
1059 			ddi_prop_free(prop);
1060 			return;
1061 		}
1062 		ddi_prop_free(prop);
1063 	}
1064 
1065 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
1066 	    &uselocal) == DDI_PROP_SUCCESS) {
1067 		if ((strcmp("true", uselocal) == 0) &&
1068 		    (addrflags & ERI_FACTADDR_PRESENT)) {
1069 			addrflags |= ERI_FACTADDR_USE;
1070 			bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1071 			ddi_prop_free(uselocal);
1072 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1073 			    lmac_addr_msg);
1074 			return;
1075 		}
1076 		ddi_prop_free(uselocal);
1077 	}
1078 
1079 	/*
1080 	 * Get the system ethernet address.
1081 	 */
1082 	(void) localetheraddr(NULL, &factaddr);
1083 	bcopy(&factaddr, erip->ouraddr, ETHERADDRL);
1084 }
1085 
1086 
1087 /*
1088  * Calculate the bit in the multicast address filter that selects the given
1089  * address.
1090  * Note: For ERI, the last 8-bits are used.
1091  */
1092 
1093 static uint32_t
eri_ladrf_bit(const uint8_t * addr)1094 eri_ladrf_bit(const uint8_t *addr)
1095 {
1096 	uint32_t crc;
1097 
1098 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
1099 
1100 	/*
1101 	 * Just want the 8 most significant bits.
1102 	 */
1103 	return ((~crc) >> 24);
1104 }
1105 
1106 static void
eri_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)1107 eri_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
1108 {
1109 	struct	eri	*erip = arg;
1110 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1111 	int	err;
1112 
1113 	ASSERT(erip != NULL);
1114 
1115 	/*
1116 	 * Privilege checks.
1117 	 */
1118 	switch (iocp->ioc_cmd) {
1119 	case ERI_SET_LOOP_MODE:
1120 	case ERI_ND_SET:
1121 		err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1122 		if (err != 0) {
1123 			miocnak(wq, mp, 0, err);
1124 			return;
1125 		}
1126 		break;
1127 	default:
1128 		break;
1129 	}
1130 
1131 	switch (iocp->ioc_cmd) {
1132 	case ERI_ND_GET:
1133 	case ERI_ND_SET:
1134 		eri_process_ndd_ioctl(erip, wq, mp, iocp->ioc_cmd);
1135 		break;
1136 
1137 	case ERI_SET_LOOP_MODE:
1138 	case ERI_GET_LOOP_MODE:
1139 		/*
1140 		 * XXX: Consider updating this to the new netlb ioctls.
1141 		 */
1142 		eri_loopback(erip, wq, mp);
1143 		break;
1144 
1145 	default:
1146 		miocnak(wq, mp, 0, EINVAL);
1147 		break;
1148 	}
1149 
1150 	ASSERT(!MUTEX_HELD(&erip->linklock));
1151 }
1152 
1153 static void
eri_loopback(struct eri * erip,queue_t * wq,mblk_t * mp)1154 eri_loopback(struct eri *erip, queue_t *wq, mblk_t *mp)
1155 {
1156 	struct	iocblk	*iocp = (void *)mp->b_rptr;
1157 	loopback_t	*al;
1158 
1159 	if (mp->b_cont == NULL || MBLKL(mp->b_cont) < sizeof (loopback_t)) {
1160 		miocnak(wq, mp, 0, EINVAL);
1161 		return;
1162 	}
1163 
1164 	al = (void *)mp->b_cont->b_rptr;
1165 
1166 	switch (iocp->ioc_cmd) {
1167 	case ERI_SET_LOOP_MODE:
1168 		switch (al->loopback) {
1169 		case ERI_LOOPBACK_OFF:
1170 			erip->flags &= (~ERI_MACLOOPBACK & ~ERI_SERLOOPBACK);
1171 			/* force link status to go down */
1172 			param_linkup = 0;
1173 			erip->stats.link_up = LINK_STATE_DOWN;
1174 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1175 			(void) eri_init(erip);
1176 			break;
1177 
1178 		case ERI_MAC_LOOPBACK_ON:
1179 			erip->flags |= ERI_MACLOOPBACK;
1180 			erip->flags &= ~ERI_SERLOOPBACK;
1181 			param_linkup = 0;
1182 			erip->stats.link_up = LINK_STATE_DOWN;
1183 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1184 			(void) eri_init(erip);
1185 			break;
1186 
1187 		case ERI_PCS_LOOPBACK_ON:
1188 			break;
1189 
1190 		case ERI_SER_LOOPBACK_ON:
1191 			erip->flags |= ERI_SERLOOPBACK;
1192 			erip->flags &= ~ERI_MACLOOPBACK;
1193 			/* force link status to go down */
1194 			param_linkup = 0;
1195 			erip->stats.link_up = LINK_STATE_DOWN;
1196 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1197 			(void) eri_init(erip);
1198 			break;
1199 
1200 		default:
1201 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
1202 			    loopback_val_default);
1203 			miocnak(wq, mp, 0, EINVAL);
1204 			return;
1205 		}
1206 		miocnak(wq, mp, 0, 0);
1207 		break;
1208 
1209 	case ERI_GET_LOOP_MODE:
1210 		al->loopback =	ERI_MAC_LOOPBACK_ON | ERI_PCS_LOOPBACK_ON |
1211 		    ERI_SER_LOOPBACK_ON;
1212 		miocack(wq, mp, sizeof (loopback_t), 0);
1213 		break;
1214 
1215 	default:
1216 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1217 		    loopback_cmd_default);
1218 	}
1219 }
1220 
1221 static int
eri_m_promisc(void * arg,boolean_t on)1222 eri_m_promisc(void *arg, boolean_t on)
1223 {
1224 	struct	eri	*erip = arg;
1225 
1226 	mutex_enter(&erip->intrlock);
1227 	erip->promisc = on;
1228 	eri_init_rx(erip);
1229 	mutex_exit(&erip->intrlock);
1230 	return (0);
1231 }
1232 
1233 /*
1234  * This is to support unlimited number of members
1235  * in Multicast.
1236  */
1237 static int
eri_m_multicst(void * arg,boolean_t add,const uint8_t * mca)1238 eri_m_multicst(void *arg, boolean_t add, const uint8_t *mca)
1239 {
1240 	struct eri		*erip = arg;
1241 	uint32_t		ladrf_bit;
1242 
1243 	/*
1244 	 * If this address's bit was not already set in the local address
1245 	 * filter, add it and re-initialize the Hardware.
1246 	 */
1247 	ladrf_bit = eri_ladrf_bit(mca);
1248 
1249 	mutex_enter(&erip->intrlock);
1250 	if (add) {
1251 		erip->ladrf_refcnt[ladrf_bit]++;
1252 		if (erip->ladrf_refcnt[ladrf_bit] == 1) {
1253 			LADRF_SET(erip, ladrf_bit);
1254 			erip->multi_refcnt++;
1255 			eri_init_rx(erip);
1256 		}
1257 	} else {
1258 		erip->ladrf_refcnt[ladrf_bit]--;
1259 		if (erip->ladrf_refcnt[ladrf_bit] == 0) {
1260 			LADRF_CLR(erip, ladrf_bit);
1261 			erip->multi_refcnt--;
1262 			eri_init_rx(erip);
1263 		}
1264 	}
1265 	mutex_exit(&erip->intrlock);
1266 	return (0);
1267 }
1268 
1269 static int
eri_m_unicst(void * arg,const uint8_t * macaddr)1270 eri_m_unicst(void *arg, const uint8_t *macaddr)
1271 {
1272 	struct	eri	*erip = arg;
1273 
1274 	/*
1275 	 * Set new interface local address and re-init device.
1276 	 * This is destructive to any other streams attached
1277 	 * to this device.
1278 	 */
1279 	mutex_enter(&erip->intrlock);
1280 	bcopy(macaddr, &erip->ouraddr, ETHERADDRL);
1281 	eri_init_rx(erip);
1282 	mutex_exit(&erip->intrlock);
1283 	return (0);
1284 }
1285 
1286 /*ARGSUSED*/
1287 static boolean_t
eri_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)1288 eri_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
1289 {
1290 	switch (cap) {
1291 	case MAC_CAPAB_HCKSUM: {
1292 		uint32_t *hcksum_txflags = cap_data;
1293 		*hcksum_txflags = HCKSUM_INET_PARTIAL;
1294 		return (B_TRUE);
1295 	}
1296 	default:
1297 		return (B_FALSE);
1298 	}
1299 }
1300 
1301 static int
eri_m_start(void * arg)1302 eri_m_start(void *arg)
1303 {
1304 	struct eri	*erip = arg;
1305 
1306 	mutex_enter(&erip->intrlock);
1307 	erip->flags |= ERI_STARTED;
1308 	mutex_exit(&erip->intrlock);
1309 
1310 	if (!eri_init(erip)) {
1311 		mutex_enter(&erip->intrlock);
1312 		erip->flags &= ~ERI_STARTED;
1313 		mutex_exit(&erip->intrlock);
1314 		return (EIO);
1315 	}
1316 	return (0);
1317 }
1318 
1319 static void
eri_m_stop(void * arg)1320 eri_m_stop(void *arg)
1321 {
1322 	struct eri	*erip = arg;
1323 
1324 	mutex_enter(&erip->intrlock);
1325 	erip->flags &= ~ERI_STARTED;
1326 	mutex_exit(&erip->intrlock);
1327 	eri_uninit(erip);
1328 }
1329 
1330 static int
eri_m_stat(void * arg,uint_t stat,uint64_t * val)1331 eri_m_stat(void *arg, uint_t stat, uint64_t *val)
1332 {
1333 	struct eri	*erip = arg;
1334 	struct stats	*esp;
1335 	boolean_t	macupdate = B_FALSE;
1336 
1337 	esp = &erip->stats;
1338 
1339 	mutex_enter(&erip->xmitlock);
1340 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
1341 		erip->tx_completion =
1342 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
1343 		macupdate |= eri_reclaim(erip, erip->tx_completion);
1344 	}
1345 	mutex_exit(&erip->xmitlock);
1346 	if (macupdate)
1347 		mac_tx_update(erip->mh);
1348 
1349 	eri_savecntrs(erip);
1350 
1351 	switch (stat) {
1352 	case MAC_STAT_IFSPEED:
1353 		*val = esp->ifspeed * 1000000ULL;
1354 		break;
1355 	case MAC_STAT_MULTIRCV:
1356 		*val = esp->multircv;
1357 		break;
1358 	case MAC_STAT_BRDCSTRCV:
1359 		*val = esp->brdcstrcv;
1360 		break;
1361 	case MAC_STAT_IPACKETS:
1362 		*val = esp->ipackets64;
1363 		break;
1364 	case MAC_STAT_RBYTES:
1365 		*val = esp->rbytes64;
1366 		break;
1367 	case MAC_STAT_OBYTES:
1368 		*val = esp->obytes64;
1369 		break;
1370 	case MAC_STAT_OPACKETS:
1371 		*val = esp->opackets64;
1372 		break;
1373 	case MAC_STAT_IERRORS:
1374 		*val = esp->ierrors;
1375 		break;
1376 	case MAC_STAT_OERRORS:
1377 		*val = esp->oerrors;
1378 		break;
1379 	case MAC_STAT_MULTIXMT:
1380 		*val = esp->multixmt;
1381 		break;
1382 	case MAC_STAT_BRDCSTXMT:
1383 		*val = esp->brdcstxmt;
1384 		break;
1385 	case MAC_STAT_NORCVBUF:
1386 		*val = esp->norcvbuf;
1387 		break;
1388 	case MAC_STAT_NOXMTBUF:
1389 		*val = esp->noxmtbuf;
1390 		break;
1391 	case MAC_STAT_UNDERFLOWS:
1392 		*val = esp->txmac_urun;
1393 		break;
1394 	case MAC_STAT_OVERFLOWS:
1395 		*val = esp->rx_overflow;
1396 		break;
1397 	case MAC_STAT_COLLISIONS:
1398 		*val = esp->collisions;
1399 		break;
1400 	case ETHER_STAT_ALIGN_ERRORS:
1401 		*val = esp->rx_align_err;
1402 		break;
1403 	case ETHER_STAT_FCS_ERRORS:
1404 		*val = esp->rx_crc_err;
1405 		break;
1406 	case ETHER_STAT_EX_COLLISIONS:
1407 		*val = esp->excessive_coll;
1408 		break;
1409 	case ETHER_STAT_TX_LATE_COLLISIONS:
1410 		*val = esp->late_coll;
1411 		break;
1412 	case ETHER_STAT_FIRST_COLLISIONS:
1413 		*val = esp->first_coll;
1414 		break;
1415 	case ETHER_STAT_LINK_DUPLEX:
1416 		*val = esp->link_duplex;
1417 		break;
1418 	case ETHER_STAT_TOOLONG_ERRORS:
1419 		*val = esp->rx_toolong_pkts;
1420 		break;
1421 	case ETHER_STAT_TOOSHORT_ERRORS:
1422 		*val = esp->rx_runt;
1423 		break;
1424 
1425 	case ETHER_STAT_XCVR_ADDR:
1426 		*val = erip->phyad;
1427 		break;
1428 
1429 	case ETHER_STAT_XCVR_INUSE:
1430 		*val = XCVR_100X;	/* should always be 100X for now */
1431 		break;
1432 
1433 	case ETHER_STAT_CAP_100FDX:
1434 		*val = param_bmsr_100fdx;
1435 		break;
1436 	case ETHER_STAT_CAP_100HDX:
1437 		*val = param_bmsr_100hdx;
1438 		break;
1439 	case ETHER_STAT_CAP_10FDX:
1440 		*val = param_bmsr_10fdx;
1441 		break;
1442 	case ETHER_STAT_CAP_10HDX:
1443 		*val = param_bmsr_10hdx;
1444 		break;
1445 	case ETHER_STAT_CAP_AUTONEG:
1446 		*val = param_bmsr_ancap;
1447 		break;
1448 	case ETHER_STAT_CAP_ASMPAUSE:
1449 		*val = param_bmsr_asm_dir;
1450 		break;
1451 	case ETHER_STAT_CAP_PAUSE:
1452 		*val = param_bmsr_pause;
1453 		break;
1454 	case ETHER_STAT_ADV_CAP_100FDX:
1455 		*val = param_anar_100fdx;
1456 		break;
1457 	case ETHER_STAT_ADV_CAP_100HDX:
1458 		*val = param_anar_100hdx;
1459 		break;
1460 	case ETHER_STAT_ADV_CAP_10FDX:
1461 		*val = param_anar_10fdx;
1462 		break;
1463 	case ETHER_STAT_ADV_CAP_10HDX:
1464 		*val = param_anar_10hdx;
1465 		break;
1466 	case ETHER_STAT_ADV_CAP_AUTONEG:
1467 		*val = param_autoneg;
1468 		break;
1469 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
1470 		*val = param_anar_asm_dir;
1471 		break;
1472 	case ETHER_STAT_ADV_CAP_PAUSE:
1473 		*val = param_anar_pause;
1474 		break;
1475 	case ETHER_STAT_LP_CAP_100FDX:
1476 		*val = param_anlpar_100fdx;
1477 		break;
1478 	case ETHER_STAT_LP_CAP_100HDX:
1479 		*val = param_anlpar_100hdx;
1480 		break;
1481 	case ETHER_STAT_LP_CAP_10FDX:
1482 		*val = param_anlpar_10fdx;
1483 		break;
1484 	case ETHER_STAT_LP_CAP_10HDX:
1485 		*val = param_anlpar_10hdx;
1486 		break;
1487 	case ETHER_STAT_LP_CAP_AUTONEG:
1488 		*val = param_aner_lpancap;
1489 		break;
1490 	case ETHER_STAT_LP_CAP_ASMPAUSE:
1491 		*val = param_anlpar_pauseTX;
1492 		break;
1493 	case ETHER_STAT_LP_CAP_PAUSE:
1494 		*val = param_anlpar_pauseRX;
1495 		break;
1496 	case ETHER_STAT_LINK_PAUSE:
1497 		*val = esp->pausing;
1498 		break;
1499 	case ETHER_STAT_LINK_ASMPAUSE:
1500 		*val = param_anar_asm_dir &&
1501 		    param_anlpar_pauseTX &&
1502 		    (param_anar_pause != param_anlpar_pauseRX);
1503 		break;
1504 	case ETHER_STAT_LINK_AUTONEG:
1505 		*val = param_autoneg && param_aner_lpancap;
1506 		break;
1507 	}
1508 	return (0);
1509 }
1510 
1511 /*
1512  * Hardware Functions
1513  * New Section
1514  */
1515 
1516 /*
1517  * Initialize the MAC registers. Some of of the MAC  registers are initialized
1518  * just once since  Global Reset or MAC reset doesn't clear them. Others (like
1519  * Host MAC Address Registers) are cleared on every reset and have to be
1520  * reinitialized.
1521  */
1522 static void
eri_init_macregs_generic(struct eri * erip)1523 eri_init_macregs_generic(struct eri *erip)
1524 {
1525 	/*
1526 	 * set up the MAC parameter registers once
1527 	 * after power cycle. SUSPEND/RESUME also requires
1528 	 * setting these registers.
1529 	 */
1530 	if ((erip->stats.inits == 1) || (erip->init_macregs)) {
1531 		erip->init_macregs = 0;
1532 		PUT_MACREG(ipg0, param_ipg0);
1533 		PUT_MACREG(ipg1, param_ipg1);
1534 		PUT_MACREG(ipg2, param_ipg2);
1535 		PUT_MACREG(macmin, BMAC_MIN_FRAME_SIZE);
1536 #ifdef	ERI_RX_TAG_ERROR_WORKAROUND
1537 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE_TAG | BMAC_MAX_BURST);
1538 #else
1539 		PUT_MACREG(macmax, BMAC_MAX_FRAME_SIZE | BMAC_MAX_BURST);
1540 #endif
1541 		PUT_MACREG(palen, BMAC_PREAMBLE_SIZE);
1542 		PUT_MACREG(jam, BMAC_JAM_SIZE);
1543 		PUT_MACREG(alimit, BMAC_ATTEMPT_LIMIT);
1544 		PUT_MACREG(macctl_type, BMAC_CONTROL_TYPE);
1545 		PUT_MACREG(rseed,
1546 		    ((erip->ouraddr[0] & 0x3) << 8) | erip->ouraddr[1]);
1547 
1548 		PUT_MACREG(madd3, BMAC_ADDRESS_3);
1549 		PUT_MACREG(madd4, BMAC_ADDRESS_4);
1550 		PUT_MACREG(madd5, BMAC_ADDRESS_5);
1551 
1552 		/* Program MAC Control address */
1553 		PUT_MACREG(madd6, BMAC_ADDRESS_6);
1554 		PUT_MACREG(madd7, BMAC_ADDRESS_7);
1555 		PUT_MACREG(madd8, BMAC_ADDRESS_8);
1556 
1557 		PUT_MACREG(afr0, BMAC_AF_0);
1558 		PUT_MACREG(afr1, BMAC_AF_1);
1559 		PUT_MACREG(afr2, BMAC_AF_2);
1560 		PUT_MACREG(afmr1_2, BMAC_AF21_MASK);
1561 		PUT_MACREG(afmr0, BMAC_AF0_MASK);
1562 	}
1563 
1564 	/* The counters need to be zeroed */
1565 	PUT_MACREG(nccnt, 0);
1566 	PUT_MACREG(fccnt, 0);
1567 	PUT_MACREG(excnt, 0);
1568 	PUT_MACREG(ltcnt, 0);
1569 	PUT_MACREG(dcnt,  0);
1570 	PUT_MACREG(frcnt, 0);
1571 	PUT_MACREG(lecnt, 0);
1572 	PUT_MACREG(aecnt, 0);
1573 	PUT_MACREG(fecnt, 0);
1574 	PUT_MACREG(rxcv,  0);
1575 
1576 	if (erip->pauseTX)
1577 		PUT_MACREG(spcmd, BMAC_SEND_PAUSE_CMD);
1578 	else
1579 		PUT_MACREG(spcmd, 0);
1580 
1581 	/*
1582 	 * Program BigMAC with local individual ethernet address.
1583 	 */
1584 
1585 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
1586 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
1587 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
1588 
1589 	/*
1590 	 * Install multicast address filter.
1591 	 */
1592 
1593 	PUT_MACREG(hash0, erip->ladrf[0]);
1594 	PUT_MACREG(hash1, erip->ladrf[1]);
1595 	PUT_MACREG(hash2, erip->ladrf[2]);
1596 	PUT_MACREG(hash3, erip->ladrf[3]);
1597 	PUT_MACREG(hash4, erip->ladrf[4]);
1598 	PUT_MACREG(hash5, erip->ladrf[5]);
1599 	PUT_MACREG(hash6, erip->ladrf[6]);
1600 	PUT_MACREG(hash7, erip->ladrf[7]);
1601 	PUT_MACREG(hash8, erip->ladrf[8]);
1602 	PUT_MACREG(hash9, erip->ladrf[9]);
1603 	PUT_MACREG(hash10, erip->ladrf[10]);
1604 	PUT_MACREG(hash11, erip->ladrf[11]);
1605 	PUT_MACREG(hash12, erip->ladrf[12]);
1606 	PUT_MACREG(hash13, erip->ladrf[13]);
1607 	PUT_MACREG(hash14, erip->ladrf[14]);
1608 }
1609 
1610 static int
eri_flush_rxbufs(struct eri * erip)1611 eri_flush_rxbufs(struct eri *erip)
1612 {
1613 	uint_t	i;
1614 	int	status = 0;
1615 	/*
1616 	 * Free and dvma_unload pending recv buffers.
1617 	 * Maintaining the 1-to-1 ordered sequence of
1618 	 * dvma_load() followed by dvma_unload() is critical.
1619 	 * Always unload anything before loading it again.
1620 	 * Never unload anything twice.  Always unload
1621 	 * before freeing the buffer.  We satisfy these
1622 	 * requirements by unloading only those descriptors
1623 	 * which currently have an mblk associated with them.
1624 	 */
1625 	for (i = 0; i < ERI_RPENDING; i++) {
1626 		if (erip->rmblkp[i]) {
1627 			if (erip->eri_dvmarh)
1628 				dvma_unload(erip->eri_dvmarh, 2 * i,
1629 				    DDI_DMA_SYNC_FORCPU);
1630 			else if ((ddi_dma_unbind_handle(erip->ndmarh[i]) ==
1631 			    DDI_FAILURE))
1632 				status = -1;
1633 			freeb(erip->rmblkp[i]);
1634 			erip->rmblkp[i] = NULL;
1635 		}
1636 	}
1637 	return (status);
1638 }
1639 
1640 static void
eri_init_txbufs(struct eri * erip)1641 eri_init_txbufs(struct eri *erip)
1642 {
1643 	/*
1644 	 * Clear TX descriptors.
1645 	 */
1646 	bzero((caddr_t)erip->eri_tmdp, ERI_TPENDING * sizeof (struct eri_tmd));
1647 
1648 	/*
1649 	 * sync TXDMA descriptors.
1650 	 */
1651 	ERI_SYNCIOPB(erip, erip->eri_tmdp,
1652 	    (ERI_TPENDING * sizeof (struct eri_tmd)), DDI_DMA_SYNC_FORDEV);
1653 	/*
1654 	 * Reset TMD 'walking' pointers.
1655 	 */
1656 	erip->tcurp = erip->eri_tmdp;
1657 	erip->tnextp = erip->eri_tmdp;
1658 	erip->tx_cur_cnt = 0;
1659 	erip->tx_kick = 0;
1660 	erip->tx_completion = 0;
1661 }
1662 
1663 static int
eri_init_rxbufs(struct eri * erip)1664 eri_init_rxbufs(struct eri *erip)
1665 {
1666 
1667 	ddi_dma_cookie_t	dma_cookie;
1668 	mblk_t			*bp;
1669 	int			i, status = 0;
1670 	uint32_t		ccnt;
1671 
1672 	/*
1673 	 * clear rcv descriptors
1674 	 */
1675 	bzero((caddr_t)erip->rmdp, ERI_RPENDING * sizeof (struct rmd));
1676 
1677 	for (i = 0; i < ERI_RPENDING; i++) {
1678 		if ((bp = eri_allocb(ERI_BUFSIZE)) == NULL) {
1679 			status = -1;
1680 			continue;
1681 		}
1682 		/* Load data buffer to DVMA space */
1683 		if (erip->eri_dvmarh)
1684 			dvma_kaddr_load(erip->eri_dvmarh,
1685 			    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1686 			    2 * i, &dma_cookie);
1687 /*
1688  *		Bind data buffer to DMA handle
1689  */
1690 		else if (ddi_dma_addr_bind_handle(erip->ndmarh[i], NULL,
1691 		    (caddr_t)bp->b_rptr, ERI_BUFSIZE,
1692 		    DDI_DMA_READ | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
1693 		    &dma_cookie, &ccnt) != DDI_DMA_MAPPED)
1694 			status = -1;
1695 
1696 		PUT_RMD((&erip->rmdp[i]), dma_cookie);
1697 		erip->rmblkp[i] = bp;	/* save for later use */
1698 	}
1699 
1700 	/*
1701 	 * sync RXDMA descriptors.
1702 	 */
1703 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
1704 	    DDI_DMA_SYNC_FORDEV);
1705 	/*
1706 	 * Reset RMD 'walking' pointers.
1707 	 */
1708 	erip->rnextp = erip->rmdp;
1709 	erip->rx_completion = 0;
1710 	erip->rx_kick = ERI_RPENDING - 4;
1711 	return (status);
1712 }
1713 
1714 static uint32_t
eri_txmac_disable(struct eri * erip)1715 eri_txmac_disable(struct eri *erip)
1716 {
1717 	int	n;
1718 
1719 	PUT_MACREG(txcfg, GET_MACREG(txcfg) & ~BMAC_TXCFG_ENAB);
1720 	n = (BMACTXRSTDELAY * 10) / ERI_WAITPERIOD;
1721 
1722 	while (--n > 0) {
1723 		drv_usecwait(ERI_WAITPERIOD);
1724 		if ((GET_MACREG(txcfg) & 1) == 0)
1725 			return (0);
1726 	}
1727 	return (1);
1728 }
1729 
1730 static uint32_t
eri_rxmac_disable(struct eri * erip)1731 eri_rxmac_disable(struct eri *erip)
1732 {
1733 	int	n;
1734 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) & ~BMAC_RXCFG_ENAB);
1735 	n = BMACRXRSTDELAY / ERI_WAITPERIOD;
1736 
1737 	while (--n > 0) {
1738 		drv_usecwait(ERI_WAITPERIOD);
1739 		if ((GET_MACREG(rxcfg) & 1) == 0)
1740 			return (0);
1741 	}
1742 	return (1);
1743 }
1744 
1745 /*
1746  * Return 0 upon success, 1 on failure.
1747  */
1748 static int
eri_stop(struct eri * erip)1749 eri_stop(struct eri *erip)
1750 {
1751 	(void) eri_erx_reset(erip);
1752 	(void) eri_etx_reset(erip);
1753 
1754 	/*
1755 	 * set up cache line to 16 for 64 bytes of pci burst size
1756 	 */
1757 	PUT_SWRSTREG(reset, ERI_G_RESET_GLOBAL | ERI_CACHE_LINE_SIZE);
1758 
1759 	if (erip->linkcheck) {
1760 		erip->linkcheck = 0;
1761 		erip->global_reset_issued = 2;
1762 	} else {
1763 		param_linkup = 0;
1764 		erip->stats.link_up = LINK_STATE_DOWN;
1765 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
1766 		erip->global_reset_issued = -1;
1767 	}
1768 
1769 	ERI_DELAY((GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE),
1770 	    ERI_MAX_RST_DELAY);
1771 	erip->rx_reset_issued = -1;
1772 	erip->tx_reset_issued = -1;
1773 
1774 	/*
1775 	 * workaround for RIO not resetting the interrupt mask
1776 	 * register to default value 0xffffffff.
1777 	 */
1778 	PUT_GLOBREG(intmask, ERI_G_MASK_ALL);
1779 
1780 	if (GET_SWRSTREG(reset) == ERI_CACHE_LINE_SIZE) {
1781 		return (0);
1782 	} else {
1783 		return (1);
1784 	}
1785 }
1786 
1787 /*
1788  * Reset Just the RX Portion
1789  * Return 0 upon success, 1 on failure.
1790  *
1791  * Resetting the rxdma while there is a rx dma transaction going on the
1792  * bus, will cause bus hang or parity errors. To avoid this, we would first
1793  * disable the rxdma by clearing the ENABLE bit (bit 0). To make sure it is
1794  * disabled, we will poll it until it realy clears. Furthermore, to verify
1795  * any RX DMA activity is subsided, we delay for 5 msec.
1796  */
1797 static uint32_t
eri_erx_reset(struct eri * erip)1798 eri_erx_reset(struct eri *erip)
1799 {
1800 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
1801 
1802 	/* Disable the RX DMA */
1803 	PUT_ERXREG(config, GET_ERXREG(config) & ~GET_CONFIG_RXDMA_EN);
1804 	ERI_DELAY(((GET_ERXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1805 	if ((GET_ERXREG(config) & 1) != 0)
1806 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1807 		    disable_erx_msg);
1808 
1809 	drv_usecwait(5000); /* Delay to insure no RX DMA activity */
1810 
1811 	PUT_SWRSTREG(reset, ERI_G_RESET_ERX | ERI_CACHE_LINE_SIZE);
1812 	/*
1813 	 * Wait until the reset is completed which is indicated by
1814 	 * the reset bit cleared or time out..
1815 	 */
1816 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ==
1817 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1818 	erip->rx_reset_issued = -1;
1819 
1820 	return ((GET_SWRSTREG(reset) & (ERI_G_RESET_ERX)) ? 1 : 0);
1821 }
1822 
1823 /*
1824  * Reset Just the TX Portion
1825  * Return 0 upon success, 1 on failure.
1826  * Resetting the txdma while there is a tx dma transaction on the bus, may cause
1827  * bus hang or parity errors. To avoid this we would first disable the txdma by
1828  * clearing the ENABLE bit (bit 0). To make sure it is disabled, we will poll
1829  * it until it realy clears. Furthermore, to any TX DMA activity is subsided,
1830  * we delay for 1 msec.
1831  */
1832 static uint32_t
eri_etx_reset(struct eri * erip)1833 eri_etx_reset(struct eri *erip)
1834 {
1835 	(void) eri_txmac_disable(erip);
1836 
1837 	/* Disable the TX DMA */
1838 	PUT_ETXREG(config, GET_ETXREG(config) & ~GET_CONFIG_TXDMA_EN);
1839 #ifdef ORIG
1840 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1841 	if ((GET_ETXREG(config) &  1) != 0)
1842 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1843 		    disable_etx_msg);
1844 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1845 #endif
1846 	drv_usecwait(5000); /* Delay  to ensure DMA completed (if any). */
1847 	ERI_DELAY(((GET_ETXREG(config) &  1) == 0), ERI_MAX_RST_DELAY);
1848 	if ((GET_ETXREG(config) &  1) != 0)
1849 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
1850 		    disable_etx_msg);
1851 
1852 	PUT_SWRSTREG(reset, ERI_G_RESET_ETX | ERI_CACHE_LINE_SIZE);
1853 
1854 	/*
1855 	 * Wait until the reset is completed which is indicated by the reset bit
1856 	 * cleared or time out..
1857 	 */
1858 	ERI_DELAY(((GET_SWRSTREG(reset) & (ERI_G_RESET_ETX)) ==
1859 	    ERI_CACHE_LINE_SIZE), ERI_MAX_RST_DELAY);
1860 	erip->tx_reset_issued = -1;
1861 
1862 	if (GET_SWRSTREG(reset) &  (ERI_G_RESET_ETX)) {
1863 		return (1);
1864 	} else
1865 		return (0);
1866 }
1867 
1868 
1869 /*
1870  * Initialize the TX DMA registers and Enable the TX DMA.
1871  */
1872 static uint32_t
eri_init_txregs(struct eri * erip)1873 eri_init_txregs(struct eri *erip)
1874 {
1875 
1876 	uint32_t	i;
1877 	uint64_t	tx_ring;
1878 
1879 	/*
1880 	 * Initialize ETX Registers:
1881 	 * config, txring_lo, txring_hi
1882 	 */
1883 	tx_ring = ERI_IOPBIOADDR(erip, erip->eri_tmdp);
1884 	PUT_ETXREG(txring_lo, (uint32_t)(tx_ring));
1885 	PUT_ETXREG(txring_hi, (uint32_t)(tx_ring >> 32));
1886 
1887 	/*
1888 	 * Get TX Ring Size Masks.
1889 	 * The ring size ERI_TPENDING is defined in eri_mac.h.
1890 	 */
1891 	switch (ERI_TPENDING) {
1892 	case 32: i = ETX_RINGSZ_32;
1893 		break;
1894 	case 64: i = ETX_RINGSZ_64;
1895 		break;
1896 	case 128: i = ETX_RINGSZ_128;
1897 		break;
1898 	case 256: i = ETX_RINGSZ_256;
1899 		break;
1900 	case 512: i = ETX_RINGSZ_512;
1901 		break;
1902 	case 1024: i = ETX_RINGSZ_1024;
1903 		break;
1904 	case 2048: i = ETX_RINGSZ_2048;
1905 		break;
1906 	case 4096: i = ETX_RINGSZ_4096;
1907 		break;
1908 	default:
1909 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1910 		    unk_tx_descr_sze_msg, ERI_TPENDING);
1911 		return (1);
1912 	}
1913 
1914 	i <<= ERI_TX_RINGSZ_SHIFT;
1915 	PUT_ETXREG(config, ETX_CONFIG_THRESHOLD | i);
1916 	ENABLE_TXDMA(erip);
1917 	ENABLE_MAC(erip);
1918 	return (0);
1919 }
1920 
1921 
1922 /*
1923  * Initialize the RX DMA registers and Enable the RX DMA.
1924  */
1925 static uint32_t
eri_init_rxregs(struct eri * erip)1926 eri_init_rxregs(struct eri *erip)
1927 {
1928 	int i;
1929 	uint64_t	rx_ring;
1930 
1931 	/*
1932 	 * Initialize ERX Registers:
1933 	 * rxring_lo, rxring_hi, config, rx_blanking, rx_pause_threshold.
1934 	 * Also, rx_kick
1935 	 * Read and save rxfifo_size.
1936 	 * XXX: Use this to properly configure PAUSE threshold values.
1937 	 */
1938 	rx_ring = ERI_IOPBIOADDR(erip, erip->rmdp);
1939 	PUT_ERXREG(rxring_lo, (uint32_t)(rx_ring));
1940 	PUT_ERXREG(rxring_hi, (uint32_t)(rx_ring >> 32));
1941 	PUT_ERXREG(rx_kick, erip->rx_kick);
1942 
1943 	/*
1944 	 * The Max ring size, ERI_RMDMAX is defined in eri_mac.h.
1945 	 * More ERI_RPENDING will provide better performance but requires more
1946 	 * system DVMA memory.
1947 	 * eri_rx_ring_size can be used to tune this value from /etc/system
1948 	 * eri_rx_ring_size cannot be NDD'able due to non-recoverable errors
1949 	 * which cannot be detected from NDD operations
1950 	 */
1951 
1952 	/*
1953 	 * get the rxring size bits
1954 	 */
1955 	switch (ERI_RPENDING) {
1956 	case 32: i = ERX_RINGSZ_32;
1957 		break;
1958 	case 64: i = ERX_RINGSZ_64;
1959 		break;
1960 	case 128: i = ERX_RINGSZ_128;
1961 		break;
1962 	case 256: i = ERX_RINGSZ_256;
1963 		break;
1964 	case 512: i = ERX_RINGSZ_512;
1965 		break;
1966 	case 1024: i = ERX_RINGSZ_1024;
1967 		break;
1968 	case 2048: i = ERX_RINGSZ_2048;
1969 		break;
1970 	case 4096: i = ERX_RINGSZ_4096;
1971 		break;
1972 	default:
1973 		ERI_FAULT_MSG2(erip, SEVERITY_HIGH, ERI_VERB_MSG,
1974 		    unk_rx_descr_sze_msg, ERI_RPENDING);
1975 		return (1);
1976 	}
1977 
1978 	i <<= ERI_RX_RINGSZ_SHIFT;
1979 	i |=  (ERI_FSTBYTE_OFFSET << ERI_RX_CONFIG_FBO_SHIFT) |
1980 	    (ETHERHEADER_SIZE << ERI_RX_CONFIG_RX_CSSTART_SHIFT) |
1981 	    (ERI_RX_FIFOTH_1024 << ERI_RX_CONFIG_RXFIFOTH_SHIFT);
1982 
1983 	PUT_ERXREG(config, i);
1984 	PUT_ERXREG(rx_blanking,
1985 	    (param_intr_blank_time << ERI_RX_BLNK_INTR_TIME_SHIFT) |
1986 	    param_intr_blank_packets);
1987 
1988 	PUT_ERXREG(rx_pause_threshold, rx_pause_threshold);
1989 	erip->rxfifo_size = GET_ERXREG(rxfifo_size);
1990 	ENABLE_RXDMA(erip);
1991 	return (0);
1992 }
1993 
1994 static int
eri_freebufs(struct eri * erip)1995 eri_freebufs(struct eri *erip)
1996 {
1997 	int status = 0;
1998 
1999 	status = eri_flush_rxbufs(erip);
2000 	return (status);
2001 }
2002 
2003 static void
eri_update_rxbufs(struct eri * erip)2004 eri_update_rxbufs(struct eri *erip)
2005 {
2006 	int		i;
2007 	volatile struct rmd  *rmdp, *rmdpbase;
2008 
2009 	/*
2010 	 * Hang out receive buffers.
2011 	 */
2012 	rmdpbase = erip->rmdp;
2013 	for (i = 0; i < ERI_RPENDING; i++) {
2014 		rmdp = rmdpbase + i;
2015 		UPDATE_RMD(rmdp);
2016 	}
2017 
2018 	/*
2019 	 * sync RXDMA descriptors.
2020 	 */
2021 	ERI_SYNCIOPB(erip, erip->rmdp, (ERI_RPENDING * sizeof (struct rmd)),
2022 	    DDI_DMA_SYNC_FORDEV);
2023 	/*
2024 	 * Reset RMD 'walking' pointers.
2025 	 */
2026 	erip->rnextp =	erip->rmdp;
2027 	erip->rx_completion = 0;
2028 	erip->rx_kick =	ERI_RPENDING - 4;
2029 }
2030 
2031 /*
2032  * This routine is used to reset the RX DMA only. In the case of RX
2033  * failures such as RX Tag Error, RX hang etc... we don't want to
2034  * do global reset which takes down the link and clears the FIFO's
2035  * By doing RX only reset, we leave the TX and the link intact.
2036  */
2037 static uint32_t
eri_init_rx_channel(struct eri * erip)2038 eri_init_rx_channel(struct eri *erip)
2039 {
2040 	erip->flags &= ~ERI_RXINIT;
2041 	(void) eri_erx_reset(erip);
2042 	eri_update_rxbufs(erip);
2043 	if (eri_init_rxregs(erip))
2044 		return (1);
2045 	PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2046 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2047 	erip->rx_reset_issued = 0;
2048 	HSTAT(erip, rx_inits);
2049 	erip->flags |= ERI_RXINIT;
2050 	return (0);
2051 }
2052 
2053 static void
eri_init_rx(struct eri * erip)2054 eri_init_rx(struct eri *erip)
2055 {
2056 	uint16_t	*ladrf;
2057 
2058 	/*
2059 	 * First of all make sure the Receive MAC is stop.
2060 	 */
2061 	(void) eri_rxmac_disable(erip); /* Disable the RX MAC */
2062 
2063 	/*
2064 	 * Program BigMAC with local individual ethernet address.
2065 	 */
2066 
2067 	PUT_MACREG(madd0, (erip->ouraddr[4] << 8) | erip->ouraddr[5]);
2068 	PUT_MACREG(madd1, (erip->ouraddr[2] << 8) | erip->ouraddr[3]);
2069 	PUT_MACREG(madd2, (erip->ouraddr[0] << 8) | erip->ouraddr[1]);
2070 
2071 	/*
2072 	 * Set up multicast address filter by passing all multicast
2073 	 * addresses through a crc generator, and then using the
2074 	 * low order 8 bits as a index into the 256 bit logical
2075 	 * address filter. The high order four bits select the word,
2076 	 * while the rest of the bits select the bit within the word.
2077 	 */
2078 
2079 	ladrf = erip->ladrf;
2080 
2081 	PUT_MACREG(hash0, ladrf[0]);
2082 	PUT_MACREG(hash1, ladrf[1]);
2083 	PUT_MACREG(hash2, ladrf[2]);
2084 	PUT_MACREG(hash3, ladrf[3]);
2085 	PUT_MACREG(hash4, ladrf[4]);
2086 	PUT_MACREG(hash5, ladrf[5]);
2087 	PUT_MACREG(hash6, ladrf[6]);
2088 	PUT_MACREG(hash7, ladrf[7]);
2089 	PUT_MACREG(hash8, ladrf[8]);
2090 	PUT_MACREG(hash9, ladrf[9]);
2091 	PUT_MACREG(hash10, ladrf[10]);
2092 	PUT_MACREG(hash11, ladrf[11]);
2093 	PUT_MACREG(hash12, ladrf[12]);
2094 	PUT_MACREG(hash13, ladrf[13]);
2095 	PUT_MACREG(hash14, ladrf[14]);
2096 	PUT_MACREG(hash15, ladrf[15]);
2097 
2098 #ifdef ERI_DONT_STRIP_CRC
2099 	PUT_MACREG(rxcfg,
2100 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2101 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2102 	    BMAC_RXCFG_ENAB));
2103 #else
2104 	PUT_MACREG(rxcfg,
2105 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2106 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2107 	    BMAC_RXCFG_ENAB | BMAC_RXCFG_STRIP_CRC));
2108 #endif
2109 	/* wait after setting Hash Enable bit */
2110 	/* drv_usecwait(10); */
2111 
2112 	HSTAT(erip, rx_inits);
2113 }
2114 
2115 /*
2116  * This routine is used to init the TX MAC only.
2117  *	&erip->xmitlock is held before calling this routine.
2118  */
2119 void
eri_init_txmac(struct eri * erip)2120 eri_init_txmac(struct eri *erip)
2121 {
2122 	uint32_t carrier_ext = 0;
2123 
2124 	erip->flags &= ~ERI_TXINIT;
2125 	/*
2126 	 * Stop the Transmit MAC.
2127 	 */
2128 	(void) eri_txmac_disable(erip);
2129 
2130 	/*
2131 	 * Must be Internal Transceiver
2132 	 */
2133 	if (param_mode)
2134 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2135 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2136 	else
2137 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2138 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2139 		    BMAC_XIFC_DIS_ECHO));
2140 
2141 	/*
2142 	 * Initialize the interpacket gap registers
2143 	 */
2144 	PUT_MACREG(ipg1, param_ipg1);
2145 	PUT_MACREG(ipg2, param_ipg2);
2146 
2147 	if (erip->ngu_enable)
2148 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2149 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2150 		    BMAC_TXCFG_ENIPG0 : 0) |
2151 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2152 		    BMAC_TXCFG_NGU));
2153 	else
2154 		PUT_MACREG(txcfg, ((param_mode ? BMAC_TXCFG_FDX: 0) |
2155 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2156 		    BMAC_TXCFG_ENIPG0 : 0) |
2157 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2158 
2159 	ENABLE_TXDMA(erip);
2160 	ENABLE_TXMAC(erip);
2161 
2162 	HSTAT(erip, tx_inits);
2163 	erip->flags |= ERI_TXINIT;
2164 }
2165 
2166 static void
eri_unallocthings(struct eri * erip)2167 eri_unallocthings(struct eri *erip)
2168 {
2169 	uint32_t	flag;
2170 	uint32_t	i;
2171 
2172 	flag = erip->alloc_flag;
2173 
2174 	if (flag & ERI_DESC_MEM_MAP)
2175 		(void) ddi_dma_unbind_handle(erip->md_h);
2176 
2177 	if (flag & ERI_DESC_MEM_ALLOC) {
2178 		ddi_dma_mem_free(&erip->mdm_h);
2179 		erip->rmdp = NULL;
2180 		erip->eri_tmdp = NULL;
2181 	}
2182 
2183 	if (flag & ERI_DESC_HANDLE_ALLOC)
2184 		ddi_dma_free_handle(&erip->md_h);
2185 
2186 	(void) eri_freebufs(erip);
2187 
2188 	if (flag & ERI_RCV_HANDLE_ALLOC)
2189 		for (i = 0; i < erip->rcv_handle_cnt; i++)
2190 			ddi_dma_free_handle(&erip->ndmarh[i]);
2191 
2192 	if (flag & ERI_RCV_DVMA_ALLOC) {
2193 		(void) dvma_release(erip->eri_dvmarh);
2194 		erip->eri_dvmarh = NULL;
2195 	}
2196 
2197 	if (flag & ERI_XBUFS_KMEM_DMABIND) {
2198 		(void) ddi_dma_unbind_handle(erip->tbuf_handle);
2199 		erip->tbuf_ioaddr = 0;
2200 	}
2201 
2202 	if (flag & ERI_XBUFS_KMEM_ALLOC) {
2203 		ddi_dma_mem_free(&erip->tbuf_acch);
2204 		erip->tbuf_kaddr = NULL;
2205 	}
2206 
2207 	if (flag & ERI_XBUFS_HANDLE_ALLOC) {
2208 		ddi_dma_free_handle(&erip->tbuf_handle);
2209 		erip->tbuf_handle = NULL;
2210 	}
2211 
2212 }
2213 
2214 /*
2215  * Initialize channel.
2216  * Return true on success, false on error.
2217  *
2218  * The recommended sequence for initialization is:
2219  * 1. Issue a Global Reset command to the Ethernet Channel.
2220  * 2. Poll the Global_Reset bits until the execution of the reset has been
2221  *    completed.
2222  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2223  *	 Poll Register 0 to till the Resetbit is 0.
2224  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2225  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2226  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2227  *	 to the MII interface so that the Bigmac core can correctly reset
2228  *	 upon a software reset.
2229  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2230  *	  the Global_Reset bits till completion.
2231  * 3. Set up all the data structures in the host memory.
2232  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2233  *    Register).
2234  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2235  *    Register).
2236  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2237  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2238  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2239  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2240  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2241  * 11. Program the XIF Configuration Register (enable the XIF).
2242  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2243  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2244  */
2245 /*
2246  * lock order:
2247  *	intrlock->linklock->xmitlock->xcvrlock
2248  */
2249 static boolean_t
eri_init(struct eri * erip)2250 eri_init(struct eri *erip)
2251 {
2252 	uint32_t	init_stat = 0;
2253 	uint32_t	partial_init = 0;
2254 	uint32_t	carrier_ext = 0;
2255 	uint32_t	mac_ctl = 0;
2256 	boolean_t	ret;
2257 	uint32_t	link_timeout = ERI_LINKCHECK_TIMER;
2258 	link_state_t	linkupdate = LINK_STATE_UNKNOWN;
2259 
2260 	/*
2261 	 * Just return successfully if device is suspended.
2262 	 * eri_init() will be called again from resume.
2263 	 */
2264 	ASSERT(erip != NULL);
2265 
2266 	if (erip->flags & ERI_SUSPENDED) {
2267 		ret = B_TRUE;
2268 		goto init_exit;
2269 	}
2270 
2271 	mutex_enter(&erip->intrlock);
2272 	eri_stop_timer(erip);	/* acquire linklock */
2273 	mutex_enter(&erip->xmitlock);
2274 	erip->flags &= (ERI_DLPI_LINKUP | ERI_STARTED);
2275 	erip->wantw = B_FALSE;
2276 	HSTAT(erip, inits);
2277 	erip->txhung = 0;
2278 
2279 	if ((erip->stats.inits > 1) && (erip->init_macregs == 0))
2280 		eri_savecntrs(erip);
2281 
2282 	mutex_enter(&erip->xcvrlock);
2283 	if (!param_linkup || erip->linkcheck) {
2284 		if (!erip->linkcheck)
2285 			linkupdate = LINK_STATE_DOWN;
2286 		(void) eri_stop(erip);
2287 	}
2288 	if (!(erip->flags & ERI_DLPI_LINKUP) || !param_linkup) {
2289 		erip->flags |= ERI_DLPI_LINKUP;
2290 		eri_mif_poll(erip, MIF_POLL_STOP);
2291 		(void) eri_new_xcvr(erip);
2292 		ERI_DEBUG_MSG1(erip, XCVR_MSG, "New transceiver detected.");
2293 		if (param_transceiver != NO_XCVR) {
2294 			/*
2295 			 * Reset the new PHY and bring up the
2296 			 * link
2297 			 */
2298 			if (eri_reset_xcvr(erip)) {
2299 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
2300 				    ERI_VERB_MSG, "In Init after reset");
2301 				mutex_exit(&erip->xcvrlock);
2302 				link_timeout = 0;
2303 				goto done;
2304 			}
2305 			if (erip->stats.link_up == LINK_STATE_UP)
2306 				linkupdate = LINK_STATE_UP;
2307 		} else {
2308 			erip->flags |= (ERI_RUNNING | ERI_INITIALIZED);
2309 			param_linkup = 0;
2310 			erip->stats.link_up = LINK_STATE_DOWN;
2311 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2312 			linkupdate = LINK_STATE_DOWN;
2313 			/*
2314 			 * Still go on and complete the MAC initialization as
2315 			 * xcvr might show up later.
2316 			 * you must return to their mutex ordering.
2317 			 */
2318 		}
2319 		eri_mif_poll(erip, MIF_POLL_START);
2320 	}
2321 
2322 	mutex_exit(&erip->xcvrlock);
2323 
2324 	/*
2325 	 * Allocate data structures.
2326 	 */
2327 	if (erip->global_reset_issued) {
2328 		if (erip->global_reset_issued == 2) { /* fast path */
2329 
2330 			/*
2331 			 * Hang out/Initialize descriptors and buffers.
2332 			 */
2333 			eri_init_txbufs(erip);
2334 
2335 			eri_update_rxbufs(erip);
2336 		} else {
2337 			init_stat = eri_allocthings(erip);
2338 			if (init_stat)
2339 				goto done;
2340 
2341 			if (eri_freebufs(erip))
2342 				goto done;
2343 			/*
2344 			 * Hang out/Initialize descriptors and buffers.
2345 			 */
2346 			eri_init_txbufs(erip);
2347 			if (eri_init_rxbufs(erip))
2348 				goto done;
2349 		}
2350 	}
2351 
2352 	/*
2353 	 * BigMAC requires that we confirm that tx, rx and hash are in
2354 	 * quiescent state.
2355 	 * MAC will not reset successfully if the transceiver is not reset and
2356 	 * brought out of Isolate mode correctly. TXMAC reset may fail if the
2357 	 * ext. transceiver is just disconnected. If it fails, try again by
2358 	 * checking the transceiver.
2359 	 */
2360 	if (eri_txmac_disable(erip)) {
2361 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2362 		    disable_txmac_msg);
2363 		param_linkup = 0;	/* force init again */
2364 		erip->stats.link_up = LINK_STATE_DOWN;
2365 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2366 		linkupdate = LINK_STATE_DOWN;
2367 		goto done;
2368 	}
2369 
2370 	if (eri_rxmac_disable(erip)) {
2371 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
2372 		    disable_rxmac_msg);
2373 		param_linkup = 0;	/* force init again */
2374 		erip->stats.link_up = LINK_STATE_DOWN;
2375 		erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2376 		linkupdate = LINK_STATE_DOWN;
2377 		goto done;
2378 	}
2379 
2380 	eri_init_macregs_generic(erip);
2381 
2382 	/*
2383 	 * Initialize ERI Global registers :
2384 	 * config
2385 	 * For PCI :  err_mask, bif_cfg
2386 	 *
2387 	 * Use user-configurable parameter for enabling 64-bit transfers.
2388 	 * Note:For PCI, burst sizes are in multiples of 64-bytes.
2389 	 */
2390 
2391 	/*
2392 	 * Significant performance improvements can be achieved by
2393 	 * disabling transmit interrupt. Thus TMD's are reclaimed
2394 	 * only very infrequently.
2395 	 * The PCS Interrupt is masked here. It is enabled only when
2396 	 * a PCS link is brought up because there is no second level
2397 	 * mask for this interrupt..
2398 	 * Init GLOBAL, TXMAC, RXMAC and MACCTL interrupt masks here.
2399 	 */
2400 	if (! partial_init) {
2401 		PUT_GLOBREG(intmask, ERI_G_MASK_INTR);
2402 		erip->tx_int_me = 0;
2403 		PUT_MACREG(txmask, BMAC_TXINTR_MASK);
2404 		PUT_MACREG(rxmask, BMAC_RXINTR_MASK);
2405 		PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2406 	}
2407 
2408 	if (erip->global_reset_issued) {
2409 		/*
2410 		 * Initialize ETX Registers:
2411 		 * config, txring_lo, txring_hi
2412 		 */
2413 		if (eri_init_txregs(erip))
2414 			goto done;
2415 		/*
2416 		 * Initialize ERX Registers:
2417 		 * rxring_lo, rxring_hi, config, rx_blanking,
2418 		 * rx_pause_threshold.  Also, rx_kick
2419 		 * Read and save rxfifo_size.
2420 		 */
2421 		if (eri_init_rxregs(erip))
2422 			goto done;
2423 	}
2424 
2425 	PUT_MACREG(macctl_mask, ERI_MACCTL_INTR_MASK);
2426 
2427 	/*
2428 	 * Set up the slottime,and  rxconfig, txconfig without enabling
2429 	 * the latter two at this time
2430 	 */
2431 	PUT_MACREG(slot, BMAC_SLOT_TIME);
2432 	carrier_ext = 0;
2433 
2434 #ifdef ERI_DONT_STRIP_CRC
2435 	PUT_MACREG(rxcfg,
2436 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2437 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2438 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2439 #else
2440 	PUT_MACREG(rxcfg,
2441 	    ((erip->promisc ? BMAC_RXCFG_PROMIS : 0) |
2442 	    (erip->multi_refcnt ? BMAC_RXCFG_HASH : 0) |
2443 	    BMAC_RXCFG_STRIP_CRC |
2444 	    (carrier_ext ? BMAC_RXCFG_CARR_EXT : 0)));
2445 #endif
2446 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2447 
2448 	if (erip->ngu_enable)
2449 		PUT_MACREG(txcfg,
2450 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2451 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2452 		    BMAC_TXCFG_ENIPG0 : 0) |
2453 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0) |
2454 		    BMAC_TXCFG_NGU));
2455 	else
2456 		PUT_MACREG(txcfg,
2457 		    ((param_mode ? BMAC_TXCFG_FDX: 0) |
2458 		    ((param_lance_mode && (erip->lance_mode_enable)) ?
2459 		    BMAC_TXCFG_ENIPG0 : 0) |
2460 		    (carrier_ext ? BMAC_TXCFG_CARR_EXT : 0)));
2461 
2462 	if (erip->pauseRX)
2463 		mac_ctl = ERI_MCTLCFG_RXPAUSE;
2464 	if (erip->pauseTX)
2465 		mac_ctl |= ERI_MCTLCFG_TXPAUSE;
2466 
2467 	PUT_MACREG(macctl_cfg, mac_ctl);
2468 
2469 	/*
2470 	 * Must be Internal Transceiver
2471 	 */
2472 	if (param_mode)
2473 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2474 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE));
2475 	else {
2476 		PUT_MACREG(xifc, ((param_transceiver == EXTERNAL_XCVR ?
2477 		    BMAC_XIFC_MIIBUF_OE : 0) | BMAC_XIFC_TX_MII_OE |
2478 		    BMAC_XIFC_DIS_ECHO));
2479 
2480 		link_timeout = ERI_CHECK_HANG_TIMER;
2481 	}
2482 
2483 	/*
2484 	 * if MAC int loopback flag is set, put xifc reg in mii loopback
2485 	 * mode {DIAG}
2486 	 */
2487 	if (erip->flags & ERI_MACLOOPBACK) {
2488 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIILPBK);
2489 	}
2490 
2491 	/*
2492 	 * Enable TX and RX MACs.
2493 	 */
2494 	ENABLE_MAC(erip);
2495 	erip->flags |= (ERI_RUNNING | ERI_INITIALIZED |
2496 	    ERI_TXINIT | ERI_RXINIT);
2497 	mac_tx_update(erip->mh);
2498 	erip->global_reset_issued = 0;
2499 
2500 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
2501 	eri_xcvr_force_mode(erip, &link_timeout);
2502 #endif
2503 
2504 done:
2505 	if (init_stat)
2506 		eri_unallocthings(erip);
2507 
2508 	mutex_exit(&erip->xmitlock);
2509 	eri_start_timer(erip, eri_check_link, link_timeout);
2510 	mutex_exit(&erip->intrlock);
2511 
2512 	if (linkupdate != LINK_STATE_UNKNOWN)
2513 		mac_link_update(erip->mh, linkupdate);
2514 
2515 	ret = (erip->flags & ERI_RUNNING) ? B_TRUE : B_FALSE;
2516 	if (!ret) {
2517 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
2518 		    "eri_init failed");
2519 	}
2520 
2521 init_exit:
2522 	ASSERT(!MUTEX_HELD(&erip->linklock));
2523 	return (ret);
2524 }
2525 
2526 /*
2527  * 0 as burstsize upon failure as it signifies no burst size.
2528  */
2529 static int
eri_burstsize(struct eri * erip)2530 eri_burstsize(struct eri *erip)
2531 {
2532 	ddi_dma_handle_t handle;
2533 
2534 	if (ddi_dma_alloc_handle(erip->dip, &dma_attr, DDI_DMA_DONTWAIT,
2535 	    NULL, &handle))
2536 		return (DDI_FAILURE);
2537 
2538 	erip->burstsizes = ddi_dma_burstsizes(handle);
2539 	ddi_dma_free_handle(&handle);
2540 
2541 	if (erip->burstsizes)
2542 		return (DDI_SUCCESS);
2543 
2544 	return (DDI_FAILURE);
2545 }
2546 
2547 /*
2548  * Un-initialize (STOP) ERI channel.
2549  */
2550 static void
eri_uninit(struct eri * erip)2551 eri_uninit(struct eri *erip)
2552 {
2553 	boolean_t needind;
2554 
2555 	/*
2556 	 * Allow up to 'ERI_DRAINTIME' for pending xmit's to complete.
2557 	 */
2558 	ERI_DELAY((erip->tcurp == erip->tnextp), ERI_DRAINTIME);
2559 
2560 	mutex_enter(&erip->intrlock);
2561 	eri_stop_timer(erip);   /* acquire linklock */
2562 	mutex_enter(&erip->xmitlock);
2563 	mutex_enter(&erip->xcvrlock);
2564 	eri_mif_poll(erip, MIF_POLL_STOP);
2565 	erip->flags &= ~ERI_DLPI_LINKUP;
2566 	mutex_exit(&erip->xcvrlock);
2567 
2568 	needind = !erip->linkcheck;
2569 	(void) eri_stop(erip);
2570 	erip->flags &= ~ERI_RUNNING;
2571 
2572 	mutex_exit(&erip->xmitlock);
2573 	eri_start_timer(erip, eri_check_link, 0);
2574 	mutex_exit(&erip->intrlock);
2575 
2576 	if (needind)
2577 		mac_link_update(erip->mh, LINK_STATE_DOWN);
2578 }
2579 
2580 /*
2581  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2582  * map it in IO space.
2583  *
2584  * The driver allocates STREAMS buffers which will be mapped in DVMA
2585  * space using DDI DMA resources.
2586  *
2587  */
2588 static int
eri_allocthings(struct eri * erip)2589 eri_allocthings(struct eri *erip)
2590 {
2591 
2592 	uintptr_t	a;
2593 	int		size;
2594 	uint32_t	rval;
2595 	int		i;
2596 	size_t		real_len;
2597 	uint32_t	cookiec;
2598 	int		alloc_stat = 0;
2599 	ddi_dma_cookie_t dma_cookie;
2600 
2601 	/*
2602 	 * Return if resources are already allocated.
2603 	 */
2604 	if (erip->rmdp)
2605 		return (alloc_stat);
2606 
2607 	erip->alloc_flag = 0;
2608 
2609 	/*
2610 	 * Allocate the TMD and RMD descriptors and extra for alignments.
2611 	 */
2612 	size = (ERI_RPENDING * sizeof (struct rmd) +
2613 	    ERI_TPENDING * sizeof (struct eri_tmd)) + ERI_GMDALIGN;
2614 
2615 	rval = ddi_dma_alloc_handle(erip->dip, &desc_dma_attr,
2616 	    DDI_DMA_DONTWAIT, 0, &erip->md_h);
2617 	if (rval != DDI_SUCCESS) {
2618 		return (++alloc_stat);
2619 	}
2620 	erip->alloc_flag |= ERI_DESC_HANDLE_ALLOC;
2621 
2622 	rval = ddi_dma_mem_alloc(erip->md_h, size, &erip->dev_attr,
2623 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
2624 	    (caddr_t *)&erip->iopbkbase, &real_len, &erip->mdm_h);
2625 	if (rval != DDI_SUCCESS) {
2626 		return (++alloc_stat);
2627 	}
2628 	erip->alloc_flag |= ERI_DESC_MEM_ALLOC;
2629 
2630 	rval = ddi_dma_addr_bind_handle(erip->md_h, NULL,
2631 	    (caddr_t)erip->iopbkbase, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2632 	    DDI_DMA_DONTWAIT, 0, &erip->md_c, &cookiec);
2633 
2634 	if (rval != DDI_DMA_MAPPED)
2635 		return (++alloc_stat);
2636 
2637 	erip->alloc_flag |= ERI_DESC_MEM_MAP;
2638 
2639 	if (cookiec != 1)
2640 		return (++alloc_stat);
2641 
2642 	erip->iopbiobase = erip->md_c.dmac_address;
2643 
2644 	a = erip->iopbkbase;
2645 	a = ROUNDUP(a, ERI_GMDALIGN);
2646 	erip->rmdp = (struct rmd *)a;
2647 	a += ERI_RPENDING * sizeof (struct rmd);
2648 	erip->eri_tmdp = (struct eri_tmd *)a;
2649 	/*
2650 	 * Specifically we reserve n (ERI_TPENDING + ERI_RPENDING)
2651 	 * pagetable entries. Therefore we have 2 ptes for each
2652 	 * descriptor. Since the ethernet buffers are 1518 bytes
2653 	 * so they can at most use 2 ptes.
2654 	 * Will do a ddi_dma_addr_setup for each bufer
2655 	 */
2656 	/*
2657 	 * In the current implementation, we use the ddi compliant
2658 	 * dma interface. We allocate ERI_RPENDING dma handles for receive
2659 	 * activity. The actual dma mapping is done in the io function
2660 	 * eri_read_dma(), by calling the ddi_dma_addr_bind_handle.
2661 	 * Dma resources are deallocated by calling ddi_dma_unbind_handle
2662 	 * in eri_reclaim() for transmit and eri_read_dma(), for receive io.
2663 	 */
2664 
2665 	if (eri_use_dvma_rx &&
2666 	    (dvma_reserve(erip->dip, &eri_dma_limits, (ERI_RPENDING * 2),
2667 	    &erip->eri_dvmarh)) == DDI_SUCCESS) {
2668 		erip->alloc_flag |= ERI_RCV_DVMA_ALLOC;
2669 	} else {
2670 		erip->eri_dvmarh = NULL;
2671 
2672 		for (i = 0; i < ERI_RPENDING; i++) {
2673 			rval = ddi_dma_alloc_handle(erip->dip,
2674 			    &dma_attr, DDI_DMA_DONTWAIT,
2675 			    0, &erip->ndmarh[i]);
2676 
2677 			if (rval != DDI_SUCCESS) {
2678 				ERI_FAULT_MSG1(erip, SEVERITY_HIGH,
2679 				    ERI_VERB_MSG, alloc_rx_dmah_msg);
2680 				alloc_stat++;
2681 				break;
2682 			}
2683 		}
2684 
2685 		erip->rcv_handle_cnt = i;
2686 
2687 		if (i)
2688 			erip->alloc_flag |= ERI_RCV_HANDLE_ALLOC;
2689 
2690 		if (alloc_stat)
2691 			return (alloc_stat);
2692 
2693 	}
2694 
2695 /*
2696  *	Allocate TX buffer
2697  *	Note: buffers must always be allocated in the native
2698  *	ordering of the CPU (always big-endian for Sparc).
2699  *	ddi_dma_mem_alloc returns memory in the native ordering
2700  *	of the bus (big endian for SBus, little endian for PCI).
2701  *	So we cannot use ddi_dma_mem_alloc(, &erip->ge_dev_attr)
2702  *	because we'll get little endian memory on PCI.
2703  */
2704 	if (ddi_dma_alloc_handle(erip->dip, &desc_dma_attr, DDI_DMA_DONTWAIT,
2705 	    0, &erip->tbuf_handle) != DDI_SUCCESS) {
2706 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2707 		    alloc_tx_dmah_msg);
2708 		return (++alloc_stat);
2709 	}
2710 	erip->alloc_flag |= ERI_XBUFS_HANDLE_ALLOC;
2711 	size = ERI_TPENDING * ERI_BUFSIZE;
2712 	if (ddi_dma_mem_alloc(erip->tbuf_handle, size, &buf_attr,
2713 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, &erip->tbuf_kaddr,
2714 	    &real_len, &erip->tbuf_acch) != DDI_SUCCESS) {
2715 		ERI_FAULT_MSG1(erip, SEVERITY_HIGH, ERI_VERB_MSG,
2716 		    alloc_tx_dmah_msg);
2717 		return (++alloc_stat);
2718 	}
2719 	erip->alloc_flag |= ERI_XBUFS_KMEM_ALLOC;
2720 	if (ddi_dma_addr_bind_handle(erip->tbuf_handle, NULL,
2721 	    erip->tbuf_kaddr, size, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2722 	    DDI_DMA_DONTWAIT, 0, &dma_cookie, &cookiec) != DDI_DMA_MAPPED) {
2723 			return (++alloc_stat);
2724 	}
2725 	erip->tbuf_ioaddr = dma_cookie.dmac_address;
2726 	erip->alloc_flag |= ERI_XBUFS_KMEM_DMABIND;
2727 	if (cookiec != 1)
2728 		return (++alloc_stat);
2729 
2730 	/*
2731 	 * Keep handy limit values for RMD, TMD, and Buffers.
2732 	 */
2733 	erip->rmdlimp = &((erip->rmdp)[ERI_RPENDING]);
2734 	erip->eri_tmdlimp = &((erip->eri_tmdp)[ERI_TPENDING]);
2735 
2736 	/*
2737 	 * Zero out RCV holders.
2738 	 */
2739 	bzero((caddr_t)erip->rmblkp, sizeof (erip->rmblkp));
2740 	return (alloc_stat);
2741 }
2742 
2743 /* <<<<<<<<<<<<<<<<<	INTERRUPT HANDLING FUNCTION	>>>>>>>>>>>>>>>>>>>> */
2744 /*
2745  *	First check to see if it is our device interrupting.
2746  */
2747 static uint_t
eri_intr(caddr_t arg)2748 eri_intr(caddr_t arg)
2749 {
2750 	struct eri *erip = (void *)arg;
2751 	uint32_t erisbits;
2752 	uint32_t mif_status;
2753 	uint32_t serviced = DDI_INTR_UNCLAIMED;
2754 	link_state_t linkupdate = LINK_STATE_UNKNOWN;
2755 	boolean_t macupdate = B_FALSE;
2756 	mblk_t *mp;
2757 	mblk_t *head;
2758 	mblk_t **tail;
2759 
2760 	head = NULL;
2761 	tail = &head;
2762 
2763 	mutex_enter(&erip->intrlock);
2764 
2765 	erisbits = GET_GLOBREG(status);
2766 
2767 	/*
2768 	 * Check if it is only the RX_DONE interrupt, which is
2769 	 * the most frequent one.
2770 	 */
2771 	if (((erisbits & ERI_G_STATUS_RX_INT) == ERI_G_STATUS_RX_DONE) &&
2772 	    (erip->flags & ERI_RUNNING)) {
2773 		serviced = DDI_INTR_CLAIMED;
2774 		goto rx_done_int;
2775 	}
2776 
2777 	/* Claim the first interrupt after initialization */
2778 	if (erip->flags & ERI_INITIALIZED) {
2779 		erip->flags &= ~ERI_INITIALIZED;
2780 		serviced = DDI_INTR_CLAIMED;
2781 	}
2782 
2783 	/* Check for interesting events */
2784 	if ((erisbits & ERI_G_STATUS_INTR) == 0) {
2785 #ifdef	ESTAR_WORKAROUND
2786 		uint32_t linkupdate;
2787 #endif
2788 
2789 		ERI_DEBUG_MSG2(erip, DIAG_MSG,
2790 		    "eri_intr: Interrupt Not Claimed gsbits  %X", erisbits);
2791 #ifdef	DEBUG
2792 		noteri++;
2793 #endif
2794 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF Config = 0x%X",
2795 		    GET_MIFREG(mif_cfg));
2796 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:MIF imask = 0x%X",
2797 		    GET_MIFREG(mif_imask));
2798 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:INT imask = 0x%X",
2799 		    GET_GLOBREG(intmask));
2800 		ERI_DEBUG_MSG2(erip, DIAG_MSG, "eri_intr:alias %X",
2801 		    GET_GLOBREG(status_alias));
2802 #ifdef	ESTAR_WORKAROUND
2803 		linkupdate = eri_check_link_noind(erip);
2804 #endif
2805 		mutex_exit(&erip->intrlock);
2806 #ifdef	ESTAR_WORKAROUND
2807 		if (linkupdate != LINK_STATE_UNKNOWN)
2808 			mac_link_update(erip->mh, linkupdate);
2809 #endif
2810 		return (serviced);
2811 	}
2812 	serviced = DDI_INTR_CLAIMED;
2813 
2814 	if (!(erip->flags & ERI_RUNNING)) {
2815 		mutex_exit(&erip->intrlock);
2816 		eri_uninit(erip);
2817 		return (serviced);
2818 	}
2819 
2820 	if (erisbits & ERI_G_STATUS_FATAL_ERR) {
2821 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2822 		    "eri_intr: fatal error: erisbits = %X", erisbits);
2823 		(void) eri_fatal_err(erip, erisbits);
2824 		eri_reinit_fatal++;
2825 
2826 		if (erip->rx_reset_issued) {
2827 			erip->rx_reset_issued = 0;
2828 			(void) eri_init_rx_channel(erip);
2829 			mutex_exit(&erip->intrlock);
2830 		} else {
2831 			param_linkup = 0;
2832 			erip->stats.link_up = LINK_STATE_DOWN;
2833 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
2834 			DISABLE_MAC(erip);
2835 			mutex_exit(&erip->intrlock);
2836 			(void) eri_init(erip);
2837 		}
2838 		return (serviced);
2839 	}
2840 
2841 	if (erisbits & ERI_G_STATUS_NONFATAL_ERR) {
2842 		ERI_DEBUG_MSG2(erip, INTR_MSG,
2843 		    "eri_intr: non-fatal error: erisbits = %X", erisbits);
2844 		(void) eri_nonfatal_err(erip, erisbits);
2845 		if (erip->linkcheck) {
2846 			mutex_exit(&erip->intrlock);
2847 			(void) eri_init(erip);
2848 			return (serviced);
2849 		}
2850 	}
2851 
2852 	if (erisbits & ERI_G_STATUS_MIF_INT) {
2853 		uint16_t stat;
2854 		ERI_DEBUG_MSG2(erip, XCVR_MSG,
2855 		    "eri_intr:MIF Interrupt:mii_status %X", erip->mii_status);
2856 		eri_stop_timer(erip);   /* acquire linklock */
2857 
2858 		mutex_enter(&erip->xmitlock);
2859 		mutex_enter(&erip->xcvrlock);
2860 #ifdef	ERI_MIF_POLL_STATUS_WORKAROUND
2861 		mif_status = GET_MIFREG(mif_bsts);
2862 		eri_mif_poll(erip, MIF_POLL_STOP);
2863 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
2864 		    "eri_intr: new MIF interrupt status %X XCVR status %X",
2865 		    mif_status, erip->mii_status);
2866 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
2867 		linkupdate = eri_mif_check(erip, stat, stat);
2868 
2869 #else
2870 		mif_status = GET_MIFREG(mif_bsts);
2871 		eri_mif_poll(erip, MIF_POLL_STOP);
2872 		linkupdate = eri_mif_check(erip, (uint16_t)mif_status,
2873 		    (uint16_t)(mif_status >> 16));
2874 #endif
2875 		eri_mif_poll(erip, MIF_POLL_START);
2876 		mutex_exit(&erip->xcvrlock);
2877 		mutex_exit(&erip->xmitlock);
2878 
2879 		if (!erip->openloop_autoneg)
2880 			eri_start_timer(erip, eri_check_link,
2881 			    ERI_LINKCHECK_TIMER);
2882 		else
2883 			eri_start_timer(erip, eri_check_link,
2884 			    ERI_P_FAULT_TIMER);
2885 	}
2886 
2887 	ERI_DEBUG_MSG2(erip, INTR_MSG,
2888 	    "eri_intr:May have Read Interrupt status:status %X", erisbits);
2889 
2890 rx_done_int:
2891 	if ((erisbits & (ERI_G_STATUS_TX_INT_ME)) ||
2892 	    (erip->tx_cur_cnt >= tx_interrupt_rate)) {
2893 		mutex_enter(&erip->xmitlock);
2894 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
2895 		    ETX_COMPLETION_MASK);
2896 
2897 		macupdate |= eri_reclaim(erip, erip->tx_completion);
2898 		if (macupdate)
2899 			erip->wantw = B_FALSE;
2900 
2901 		mutex_exit(&erip->xmitlock);
2902 	}
2903 
2904 	if (erisbits & ERI_G_STATUS_RX_DONE) {
2905 		volatile struct	rmd	*rmdp, *rmdpbase;
2906 		volatile uint32_t rmdi;
2907 		uint8_t loop_limit = 0x20;
2908 		uint64_t flags;
2909 		uint32_t rmdmax_mask = erip->rmdmax_mask;
2910 
2911 		rmdpbase = erip->rmdp;
2912 		rmdi = erip->rx_completion;
2913 		rmdp = rmdpbase + rmdi;
2914 
2915 		/*
2916 		 * Sync RMD before looking at it.
2917 		 */
2918 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2919 		    DDI_DMA_SYNC_FORCPU);
2920 		/*
2921 		 * Loop through each RMD.
2922 		 */
2923 
2924 		flags = GET_RMD_FLAGS(rmdp);
2925 		while (((flags & ERI_RMD_OWN) == 0) && (loop_limit)) {
2926 			/* process one packet */
2927 			mp = eri_read_dma(erip, rmdp, rmdi, flags);
2928 			rmdi =  (rmdi + 1) & rmdmax_mask;
2929 			rmdp = rmdpbase + rmdi;
2930 
2931 			if (mp != NULL) {
2932 				*tail = mp;
2933 				tail = &mp->b_next;
2934 			}
2935 
2936 			/*
2937 			 * ERI RCV DMA fetches or updates four descriptors
2938 			 * a time. Also we don't want to update the desc.
2939 			 * batch we just received packet on. So we update
2940 			 * descriptors for every 4 packets and we update
2941 			 * the group of 4 after the current batch.
2942 			 */
2943 
2944 			if (!(rmdi % 4)) {
2945 				if (eri_overflow_reset &&
2946 				    (GET_GLOBREG(status_alias) &
2947 				    ERI_G_STATUS_NONFATAL_ERR)) {
2948 					loop_limit = 1;
2949 				} else {
2950 					erip->rx_kick =
2951 					    (rmdi + ERI_RPENDING - 4) &
2952 					    rmdmax_mask;
2953 					PUT_ERXREG(rx_kick, erip->rx_kick);
2954 				}
2955 			}
2956 
2957 			/*
2958 			 * Sync the next RMD before looking at it.
2959 			 */
2960 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
2961 			    DDI_DMA_SYNC_FORCPU);
2962 			flags = GET_RMD_FLAGS(rmdp);
2963 			loop_limit--;
2964 		}
2965 		erip->rx_completion = rmdi;
2966 	}
2967 
2968 	mutex_exit(&erip->intrlock);
2969 
2970 	if (head)
2971 		mac_rx(erip->mh, NULL, head);
2972 
2973 	if (macupdate)
2974 		mac_tx_update(erip->mh);
2975 
2976 	if (linkupdate != LINK_STATE_UNKNOWN)
2977 		mac_link_update(erip->mh, linkupdate);
2978 
2979 	return (serviced);
2980 }
2981 
2982 /*
2983  * Handle interrupts for fatal errors
2984  * Need reinitialization.
2985  */
2986 #define	PCI_DATA_PARITY_REP	(1 << 8)
2987 #define	PCI_SING_TARGET_ABORT	(1 << 11)
2988 #define	PCI_RCV_TARGET_ABORT	(1 << 12)
2989 #define	PCI_RCV_MASTER_ABORT	(1 << 13)
2990 #define	PCI_SING_SYSTEM_ERR	(1 << 14)
2991 #define	PCI_DATA_PARITY_ERR	(1 << 15)
2992 
2993 /* called with intrlock held */
2994 static void
eri_fatal_err(struct eri * erip,uint32_t erisbits)2995 eri_fatal_err(struct eri *erip, uint32_t erisbits)
2996 {
2997 	uint16_t	pci_status;
2998 	uint32_t	pci_error_int = 0;
2999 
3000 	if (erisbits & ERI_G_STATUS_RX_TAG_ERR) {
3001 		erip->rx_reset_issued = 1;
3002 		HSTAT(erip, rxtag_err);
3003 	} else {
3004 		erip->global_reset_issued = 1;
3005 		if (erisbits & ERI_G_STATUS_BUS_ERR_INT) {
3006 			pci_error_int = 1;
3007 			HSTAT(erip, pci_error_int);
3008 		} else if (erisbits & ERI_G_STATUS_PERR_INT) {
3009 			HSTAT(erip, parity_error);
3010 		} else {
3011 			HSTAT(erip, unknown_fatal);
3012 		}
3013 	}
3014 
3015 	/*
3016 	 * PCI bus error
3017 	 */
3018 	if (pci_error_int && erip->pci_config_handle) {
3019 		pci_status = pci_config_get16(erip->pci_config_handle,
3020 		    PCI_CONF_STAT);
3021 		ERI_DEBUG_MSG2(erip, FATAL_ERR_MSG, "Bus Error Status %x",
3022 		    pci_status);
3023 		if (pci_status & PCI_DATA_PARITY_REP)
3024 			HSTAT(erip, pci_data_parity_err);
3025 		if (pci_status & PCI_SING_TARGET_ABORT)
3026 			HSTAT(erip, pci_signal_target_abort);
3027 		if (pci_status & PCI_RCV_TARGET_ABORT)
3028 			HSTAT(erip, pci_rcvd_target_abort);
3029 		if (pci_status & PCI_RCV_MASTER_ABORT)
3030 			HSTAT(erip, pci_rcvd_master_abort);
3031 		if (pci_status & PCI_SING_SYSTEM_ERR)
3032 			HSTAT(erip, pci_signal_system_err);
3033 		if (pci_status & PCI_DATA_PARITY_ERR)
3034 			HSTAT(erip, pci_signal_system_err);
3035 		/*
3036 		 * clear it by writing the value that was read back.
3037 		 */
3038 		pci_config_put16(erip->pci_config_handle, PCI_CONF_STAT,
3039 		    pci_status);
3040 	}
3041 }
3042 
3043 /*
3044  * Handle interrupts regarding non-fatal events.
3045  * TXMAC, RXMAC and MACCTL events
3046  */
3047 static void
eri_nonfatal_err(struct eri * erip,uint32_t erisbits)3048 eri_nonfatal_err(struct eri *erip, uint32_t erisbits)
3049 {
3050 
3051 	uint32_t	txmac_sts, rxmac_sts, macctl_sts, pause_time;
3052 
3053 #ifdef ERI_PM_WORKAROUND
3054 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
3055 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
3056 		erip->stats.pmcap = ERI_PMCAP_NONE;
3057 #endif
3058 
3059 	if (erisbits & ERI_G_STATUS_TX_MAC_INT) {
3060 		txmac_sts = GET_MACREG(txsts);
3061 		if (txmac_sts & BMAC_TXSTS_TX_URUN) {
3062 			erip->linkcheck = 1;
3063 			HSTAT(erip, txmac_urun);
3064 			HSTAT(erip, oerrors);
3065 		}
3066 
3067 		if (txmac_sts & BMAC_TXSTS_MAXPKT_ERR) {
3068 			erip->linkcheck = 1;
3069 			HSTAT(erip, txmac_maxpkt_err);
3070 			HSTAT(erip, oerrors);
3071 		}
3072 		if (txmac_sts & BMAC_TXSTS_NCC_EXP) {
3073 			erip->stats.collisions += 0x10000;
3074 		}
3075 
3076 		if (txmac_sts & BMAC_TXSTS_ECC_EXP) {
3077 			erip->stats.excessive_coll += 0x10000;
3078 		}
3079 
3080 		if (txmac_sts & BMAC_TXSTS_LCC_EXP) {
3081 			erip->stats.late_coll += 0x10000;
3082 		}
3083 
3084 		if (txmac_sts & BMAC_TXSTS_FCC_EXP) {
3085 			erip->stats.first_coll += 0x10000;
3086 		}
3087 
3088 		if (txmac_sts & BMAC_TXSTS_DEFER_EXP) {
3089 			HSTAT(erip, defer_timer_exp);
3090 		}
3091 
3092 		if (txmac_sts & BMAC_TXSTS_PEAK_EXP) {
3093 			erip->stats.peak_attempt_cnt += 0x100;
3094 		}
3095 	}
3096 
3097 	if (erisbits & ERI_G_STATUS_RX_NO_BUF) {
3098 		ERI_DEBUG_MSG1(erip, NONFATAL_MSG, "rx dropped/no free desc");
3099 
3100 		if (eri_overflow_reset)
3101 			erip->linkcheck = 1;
3102 
3103 		HSTAT(erip, no_free_rx_desc);
3104 		HSTAT(erip, ierrors);
3105 	}
3106 	if (erisbits & ERI_G_STATUS_RX_MAC_INT) {
3107 		rxmac_sts = GET_MACREG(rxsts);
3108 		if (rxmac_sts & BMAC_RXSTS_RX_OVF) {
3109 #ifndef ERI_RMAC_HANG_WORKAROUND
3110 			eri_stop_timer(erip);   /* acquire linklock */
3111 			erip->check_rmac_hang ++;
3112 			erip->check2_rmac_hang = 0;
3113 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
3114 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
3115 
3116 			ERI_DEBUG_MSG5(erip, NONFATAL_MSG,
3117 			    "overflow intr %d: %8x wr:%2x rd:%2x",
3118 			    erip->check_rmac_hang,
3119 			    GET_MACREG(macsm),
3120 			    GET_ERXREG(rxfifo_wr_ptr),
3121 			    GET_ERXREG(rxfifo_rd_ptr));
3122 
3123 			eri_start_timer(erip, eri_check_link,
3124 			    ERI_CHECK_HANG_TIMER);
3125 #endif
3126 			if (eri_overflow_reset)
3127 				erip->linkcheck = 1;
3128 
3129 			HSTAT(erip, rx_overflow);
3130 			HSTAT(erip, ierrors);
3131 		}
3132 
3133 		if (rxmac_sts & BMAC_RXSTS_ALE_EXP) {
3134 			erip->stats.rx_align_err += 0x10000;
3135 			erip->stats.ierrors += 0x10000;
3136 		}
3137 
3138 		if (rxmac_sts & BMAC_RXSTS_CRC_EXP) {
3139 			erip->stats.rx_crc_err += 0x10000;
3140 			erip->stats.ierrors += 0x10000;
3141 		}
3142 
3143 		if (rxmac_sts & BMAC_RXSTS_LEN_EXP) {
3144 			erip->stats.rx_length_err += 0x10000;
3145 			erip->stats.ierrors += 0x10000;
3146 		}
3147 
3148 		if (rxmac_sts & BMAC_RXSTS_CVI_EXP) {
3149 			erip->stats.rx_code_viol_err += 0x10000;
3150 			erip->stats.ierrors += 0x10000;
3151 		}
3152 	}
3153 
3154 	if (erisbits & ERI_G_STATUS_MAC_CTRL_INT) {
3155 
3156 		macctl_sts = GET_MACREG(macctl_sts);
3157 		if (macctl_sts & ERI_MCTLSTS_PAUSE_RCVD) {
3158 			pause_time = ((macctl_sts &
3159 			    ERI_MCTLSTS_PAUSE_TIME) >> 16);
3160 			ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
3161 			    "PAUSE Received. pause time = %X slot_times",
3162 			    pause_time);
3163 			HSTAT(erip, pause_rxcount);
3164 			erip->stats.pause_time_count += pause_time;
3165 		}
3166 
3167 		if (macctl_sts & ERI_MCTLSTS_PAUSE_STATE) {
3168 			HSTAT(erip, pause_oncount);
3169 			erip->stats.pausing = 1;
3170 		}
3171 
3172 		if (macctl_sts & ERI_MCTLSTS_NONPAUSE) {
3173 			HSTAT(erip, pause_offcount);
3174 			erip->stats.pausing = 0;
3175 		}
3176 	}
3177 
3178 }
3179 
3180 /*
3181  * if this is the first init do not bother to save the
3182  * counters.
3183  */
3184 static void
eri_savecntrs(struct eri * erip)3185 eri_savecntrs(struct eri *erip)
3186 {
3187 	uint32_t	fecnt, aecnt, lecnt, rxcv;
3188 	uint32_t	ltcnt, excnt, fccnt;
3189 
3190 	/* XXX What all gets added in ierrors and oerrors? */
3191 	fecnt = GET_MACREG(fecnt);
3192 	HSTATN(erip, rx_crc_err, fecnt);
3193 	PUT_MACREG(fecnt, 0);
3194 
3195 	aecnt = GET_MACREG(aecnt);
3196 	HSTATN(erip, rx_align_err, aecnt);
3197 	PUT_MACREG(aecnt, 0);
3198 
3199 	lecnt = GET_MACREG(lecnt);
3200 	HSTATN(erip, rx_length_err, lecnt);
3201 	PUT_MACREG(lecnt, 0);
3202 
3203 	rxcv = GET_MACREG(rxcv);
3204 	HSTATN(erip, rx_code_viol_err, rxcv);
3205 	PUT_MACREG(rxcv, 0);
3206 
3207 	ltcnt = GET_MACREG(ltcnt);
3208 	HSTATN(erip, late_coll, ltcnt);
3209 	PUT_MACREG(ltcnt, 0);
3210 
3211 	erip->stats.collisions += (GET_MACREG(nccnt) + ltcnt);
3212 	PUT_MACREG(nccnt, 0);
3213 
3214 	excnt = GET_MACREG(excnt);
3215 	HSTATN(erip, excessive_coll, excnt);
3216 	PUT_MACREG(excnt, 0);
3217 
3218 	fccnt = GET_MACREG(fccnt);
3219 	HSTATN(erip, first_coll, fccnt);
3220 	PUT_MACREG(fccnt, 0);
3221 
3222 	/*
3223 	 * Do not add code violations to input errors.
3224 	 * They are already counted in CRC errors
3225 	 */
3226 	HSTATN(erip, ierrors, (fecnt + aecnt + lecnt));
3227 	HSTATN(erip, oerrors, (ltcnt + excnt));
3228 }
3229 
3230 mblk_t *
eri_allocb_sp(size_t size)3231 eri_allocb_sp(size_t size)
3232 {
3233 	mblk_t  *mp;
3234 
3235 	size += 128;
3236 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3237 		return (NULL);
3238 	}
3239 	mp->b_wptr += 128;
3240 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3241 	mp->b_rptr = mp->b_wptr;
3242 
3243 	return (mp);
3244 }
3245 
3246 mblk_t *
eri_allocb(size_t size)3247 eri_allocb(size_t size)
3248 {
3249 	mblk_t  *mp;
3250 
3251 	if ((mp = allocb(size + 3 * ERI_BURSTSIZE, BPRI_HI)) == NULL) {
3252 		return (NULL);
3253 	}
3254 	mp->b_wptr = (uint8_t *)ROUNDUP2(mp->b_wptr, ERI_BURSTSIZE);
3255 	mp->b_rptr = mp->b_wptr;
3256 
3257 	return (mp);
3258 }
3259 
3260 /*
3261  * Hardware Dependent Functions
3262  * New Section.
3263  */
3264 
3265 /* <<<<<<<<<<<<<<<< Fast Ethernet PHY Bit Bang Operations >>>>>>>>>>>>>>>>>> */
3266 
3267 static void
send_bit(struct eri * erip,uint32_t x)3268 send_bit(struct eri *erip, uint32_t x)
3269 {
3270 	PUT_MIFREG(mif_bbdata, x);
3271 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3272 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3273 }
3274 
3275 /*
3276  * To read the MII register bits according to the IEEE Standard
3277  */
3278 static uint32_t
get_bit_std(struct eri * erip)3279 get_bit_std(struct eri *erip)
3280 {
3281 	uint32_t	x;
3282 
3283 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_LOW);
3284 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
3285 	if (param_transceiver == INTERNAL_XCVR)
3286 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM0) ? 1 : 0;
3287 	else
3288 		x = (GET_MIFREG(mif_cfg) & ERI_MIF_CFGM1) ? 1 : 0;
3289 	PUT_MIFREG(mif_bbclk, ERI_BBCLK_HIGH);
3290 	return (x);
3291 }
3292 
3293 #define	SEND_BIT(x)		send_bit(erip, x)
3294 #define	GET_BIT_STD(x)		x = get_bit_std(erip)
3295 
3296 
3297 static void
eri_bb_mii_write(struct eri * erip,uint8_t regad,uint16_t data)3298 eri_bb_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3299 {
3300 	uint8_t	phyad;
3301 	int		i;
3302 
3303 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3304 	phyad = erip->phyad;
3305 	(void) eri_bb_force_idle(erip);
3306 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3307 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
3308 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3309 		SEND_BIT((phyad >> i) & 1);
3310 	}
3311 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3312 		SEND_BIT((regad >> i) & 1);
3313 	}
3314 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
3315 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3316 		SEND_BIT((data >> i) & 1);
3317 	}
3318 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3319 }
3320 
3321 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3322 static uint32_t
eri_bb_mii_read(struct eri * erip,uint8_t regad,uint16_t * datap)3323 eri_bb_mii_read(struct eri *erip, uint8_t regad, uint16_t *datap)
3324 {
3325 	uint8_t	phyad;
3326 	int	i;
3327 	uint32_t	x;
3328 	uint32_t	y;
3329 
3330 	*datap = 0;
3331 
3332 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
3333 	phyad = erip->phyad;
3334 	(void) eri_bb_force_idle(erip);
3335 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
3336 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
3337 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
3338 		SEND_BIT((phyad >> i) & 1);
3339 	}
3340 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
3341 		SEND_BIT((regad >> i) & 1);
3342 	}
3343 
3344 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
3345 
3346 	GET_BIT_STD(x);
3347 	GET_BIT_STD(y);		/* <TA> */
3348 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
3349 		GET_BIT_STD(x);
3350 		*datap += (x << i);
3351 	}
3352 	/* Kludge to get the Transceiver out of hung mode */
3353 	/* XXX: Test if this is still needed */
3354 	GET_BIT_STD(x);
3355 	GET_BIT_STD(x);
3356 	GET_BIT_STD(x);
3357 
3358 	return (y);
3359 }
3360 
3361 static void
eri_bb_force_idle(struct eri * erip)3362 eri_bb_force_idle(struct eri *erip)
3363 {
3364 	int		i;
3365 
3366 	for (i = 0; i < 33; i++) {
3367 		SEND_BIT(1);
3368 	}
3369 }
3370 
3371 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
3372 
3373 
3374 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
3375 
3376 #ifdef ERI_FRM_DEBUG
3377 int frame_flag = 0;
3378 #endif
3379 
3380 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
3381 static uint32_t
eri_mii_read(struct eri * erip,uint8_t regad,uint16_t * datap)3382 eri_mii_read(struct eri	*erip, uint8_t regad, uint16_t *datap)
3383 {
3384 	uint32_t frame;
3385 	uint8_t phyad;
3386 
3387 	if (param_transceiver == NO_XCVR)
3388 		return (1);	/* No xcvr present */
3389 
3390 	if (!erip->frame_enable)
3391 		return (eri_bb_mii_read(erip, regad, datap));
3392 
3393 	phyad = erip->phyad;
3394 #ifdef ERI_FRM_DEBUG
3395 	if (!frame_flag) {
3396 		eri_errror(erip->dip, "Frame Register used for MII");
3397 		frame_flag = 1;
3398 	}
3399 #endif
3400 	ERI_DEBUG_MSG3(erip, FRM_MSG,
3401 	    "Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
3402 
3403 	PUT_MIFREG(mif_frame, ERI_MIF_FRREAD |
3404 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3405 	    (regad << ERI_MIF_FRREGAD_SHIFT));
3406 	MIF_ERIDELAY(300,  phyad, regad);
3407 	frame = GET_MIFREG(mif_frame);
3408 	if ((frame & ERI_MIF_FRTA0) == 0) {
3409 		return (1);
3410 	} else {
3411 		*datap = (uint16_t)(frame & ERI_MIF_FRDATA);
3412 		return (0);
3413 	}
3414 
3415 }
3416 
3417 static void
eri_mii_write(struct eri * erip,uint8_t regad,uint16_t data)3418 eri_mii_write(struct eri *erip, uint8_t regad, uint16_t data)
3419 {
3420 	uint8_t	phyad;
3421 
3422 	if (!erip->frame_enable) {
3423 		eri_bb_mii_write(erip, regad, data);
3424 		return;
3425 	}
3426 
3427 	phyad = erip->phyad;
3428 
3429 	PUT_MIFREG(mif_frame, (ERI_MIF_FRWRITE |
3430 	    (phyad << ERI_MIF_FRPHYAD_SHIFT) |
3431 	    (regad << ERI_MIF_FRREGAD_SHIFT) | data));
3432 	MIF_ERIDELAY(300,  phyad, regad);
3433 	(void) GET_MIFREG(mif_frame);
3434 }
3435 
3436 
3437 /* <<<<<<<<<<<<<<<<<	PACKET TRANSMIT FUNCTIONS	>>>>>>>>>>>>>>>>>>>> */
3438 
3439 #define	ERI_CROSS_PAGE_BOUNDRY(i, size, pagesize) \
3440 	((i & pagesize) != ((i + size) & pagesize))
3441 
3442 /*
3443  * Send a single mblk.  Returns B_TRUE if the packet is sent, or disposed of
3444  * by freemsg.  Returns B_FALSE if the packet was not sent or queued, and
3445  * should be retried later (due to tx resource exhaustion.)
3446  */
3447 static boolean_t
eri_send_msg(struct eri * erip,mblk_t * mp)3448 eri_send_msg(struct eri *erip, mblk_t *mp)
3449 {
3450 	volatile struct	eri_tmd	*tmdp = NULL;
3451 	volatile struct	eri_tmd	*tbasep = NULL;
3452 	uint32_t	len_msg = 0;
3453 	uint32_t	i;
3454 	uint64_t	int_me = 0;
3455 	uint_t		tmdcsum = 0;
3456 	uint_t		start_offset = 0;
3457 	uint_t		stuff_offset = 0;
3458 	uint_t		flags = 0;
3459 
3460 	caddr_t	ptr;
3461 	uint32_t	offset;
3462 	uint64_t	ctrl;
3463 	ddi_dma_cookie_t	c;
3464 
3465 	if (!param_linkup) {
3466 		freemsg(mp);
3467 		HSTAT(erip, tnocar);
3468 		HSTAT(erip, oerrors);
3469 		return (B_TRUE);
3470 	}
3471 
3472 #ifdef ERI_HWCSUM
3473 	mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
3474 
3475 	if (flags & HCK_PARTIALCKSUM) {
3476 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
3477 			start_offset += ETHERHEADER_SIZE + 4;
3478 			stuff_offset += ETHERHEADER_SIZE + 4;
3479 		} else {
3480 			start_offset += ETHERHEADER_SIZE;
3481 			stuff_offset += ETHERHEADER_SIZE;
3482 		}
3483 		tmdcsum = ERI_TMD_CSENABL;
3484 	}
3485 #endif /* ERI_HWCSUM */
3486 
3487 	if ((len_msg = msgsize(mp)) > ERI_BUFSIZE) {
3488 		/*
3489 		 * This sholdn't ever occur, as GLD should not send us
3490 		 * packets that are too big.
3491 		 */
3492 		HSTAT(erip, oerrors);
3493 		freemsg(mp);
3494 		return (B_TRUE);
3495 	}
3496 
3497 	/*
3498 	 * update MIB II statistics
3499 	 */
3500 	BUMP_OutNUcast(erip, mp->b_rptr);
3501 
3502 	mutex_enter(&erip->xmitlock);
3503 
3504 	tbasep = erip->eri_tmdp;
3505 
3506 	/* Check if there are enough descriptors for this packet */
3507 	tmdp = erip->tnextp;
3508 
3509 	if (tmdp >=  erip->tcurp) /* check notmds */
3510 		i = tmdp - erip->tcurp;
3511 	else
3512 		i = tmdp + ERI_TPENDING - erip->tcurp;
3513 
3514 	if (i > (ERI_TPENDING - 4))
3515 		goto notmds;
3516 
3517 	if (i >= (ERI_TPENDING >> 1) && !(erip->starts & 0x7)) {
3518 		int_me = ERI_TMD_INTME;
3519 
3520 		if (!erip->tx_int_me) {
3521 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
3522 			    ~(ERI_G_MASK_TX_INT_ME));
3523 			erip->tx_int_me = 1;
3524 		}
3525 	}
3526 
3527 	i = tmdp - tbasep; /* index */
3528 
3529 	offset = (i * ERI_BUFSIZE);
3530 	ptr = erip->tbuf_kaddr + offset;
3531 
3532 	mcopymsg(mp, ptr);
3533 
3534 #ifdef	ERI_HDX_BUG_WORKAROUND
3535 	if ((param_mode) || (eri_hdx_pad_enable == 0)) {
3536 		if (len_msg < ETHERMIN) {
3537 			bzero((ptr + len_msg), (ETHERMIN - len_msg));
3538 			len_msg = ETHERMIN;
3539 		}
3540 	} else {
3541 		if (len_msg < 97) {
3542 			bzero((ptr + len_msg), (97 - len_msg));
3543 			len_msg = 97;
3544 		}
3545 	}
3546 #endif
3547 	c.dmac_address = erip->tbuf_ioaddr + offset;
3548 	(void) ddi_dma_sync(erip->tbuf_handle,
3549 	    (off_t)offset, len_msg, DDI_DMA_SYNC_FORDEV);
3550 
3551 		/* first and last (and only!) descr of packet */
3552 	ctrl = ERI_TMD_SOP | ERI_TMD_EOP | int_me | tmdcsum |
3553 	    (start_offset << ERI_TMD_CSSTART_SHIFT) |
3554 	    (stuff_offset << ERI_TMD_CSSTUFF_SHIFT);
3555 
3556 	PUT_TMD(tmdp, c, len_msg, ctrl);
3557 	ERI_SYNCIOPB(erip, tmdp, sizeof (struct eri_tmd),
3558 	    DDI_DMA_SYNC_FORDEV);
3559 
3560 	tmdp = NEXTTMD(erip, tmdp);
3561 	erip->tx_cur_cnt++;
3562 
3563 	erip->tx_kick = tmdp - tbasep;
3564 	PUT_ETXREG(tx_kick, erip->tx_kick);
3565 	erip->tnextp = tmdp;
3566 
3567 	erip->starts++;
3568 
3569 	if (erip->tx_cur_cnt >= tx_interrupt_rate) {
3570 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
3571 		    ETX_COMPLETION_MASK);
3572 		(void) eri_reclaim(erip, erip->tx_completion);
3573 	}
3574 	mutex_exit(&erip->xmitlock);
3575 
3576 	return (B_TRUE);
3577 
3578 notmds:
3579 	HSTAT(erip, notmds);
3580 	erip->wantw = B_TRUE;
3581 
3582 	mutex_exit(&erip->xmitlock);
3583 
3584 	return (B_FALSE);
3585 }
3586 
3587 static mblk_t *
eri_m_tx(void * arg,mblk_t * mp)3588 eri_m_tx(void *arg, mblk_t *mp)
3589 {
3590 	struct eri *erip = arg;
3591 	mblk_t *next;
3592 
3593 	while (mp != NULL) {
3594 		next = mp->b_next;
3595 		mp->b_next = NULL;
3596 		if (!eri_send_msg(erip, mp)) {
3597 			mp->b_next = next;
3598 			break;
3599 		}
3600 		mp = next;
3601 	}
3602 
3603 	return (mp);
3604 }
3605 
3606 /*
3607  * Transmit completion reclaiming.
3608  */
3609 static boolean_t
eri_reclaim(struct eri * erip,uint32_t tx_completion)3610 eri_reclaim(struct eri *erip, uint32_t tx_completion)
3611 {
3612 	volatile struct	eri_tmd	*tmdp;
3613 	struct	eri_tmd	*tcomp;
3614 	struct	eri_tmd	*tbasep;
3615 	struct	eri_tmd	*tlimp;
3616 	uint64_t	flags;
3617 	uint_t reclaimed = 0;
3618 
3619 	tbasep = erip->eri_tmdp;
3620 	tlimp = erip->eri_tmdlimp;
3621 
3622 	tmdp = erip->tcurp;
3623 	tcomp = tbasep + tx_completion; /* pointer to completion tmd */
3624 
3625 	/*
3626 	 * Loop through each TMD starting from tcurp and upto tcomp.
3627 	 */
3628 	while (tmdp != tcomp) {
3629 		flags = GET_TMD_FLAGS(tmdp);
3630 		if (flags & (ERI_TMD_SOP))
3631 			HSTAT(erip, opackets64);
3632 
3633 		HSTATN(erip, obytes64, (flags & ERI_TMD_BUFSIZE));
3634 
3635 		tmdp = NEXTTMDP(tbasep, tlimp, tmdp);
3636 		reclaimed++;
3637 	}
3638 
3639 	erip->tcurp = tmdp;
3640 	erip->tx_cur_cnt -= reclaimed;
3641 
3642 	return (erip->wantw && reclaimed ? B_TRUE : B_FALSE);
3643 }
3644 
3645 
3646 /* <<<<<<<<<<<<<<<<<<<	PACKET RECEIVE FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
3647 static mblk_t *
eri_read_dma(struct eri * erip,volatile struct rmd * rmdp,int rmdi,uint64_t flags)3648 eri_read_dma(struct eri *erip, volatile struct rmd *rmdp,
3649     int rmdi, uint64_t flags)
3650 {
3651 	mblk_t	*bp, *nbp;
3652 	int	len;
3653 	uint_t ccnt;
3654 	ddi_dma_cookie_t	c;
3655 #ifdef ERI_RCV_CKSUM
3656 	ushort_t sum;
3657 #endif /* ERI_RCV_CKSUM */
3658 	mblk_t *retmp = NULL;
3659 
3660 	bp = erip->rmblkp[rmdi];
3661 	len = (flags & ERI_RMD_BUFSIZE) >> ERI_RMD_BUFSIZE_SHIFT;
3662 #ifdef	ERI_DONT_STRIP_CRC
3663 	len -= 4;
3664 #endif
3665 	/*
3666 	 * In the event of RX FIFO overflow error, ERI REV 1.0 ASIC can
3667 	 * corrupt packets following the descriptor corresponding the
3668 	 * overflow. To detect the corrupted packets, we disable the
3669 	 * dropping of the "bad" packets at the MAC. The descriptor
3670 	 * then would have the "BAD" bit set. We drop the overflowing
3671 	 * packet and the packet following it. We could have done some sort
3672 	 * of checking to determine if the second packet was indeed bad
3673 	 * (using CRC or checksum) but it would be expensive in this
3674 	 * routine, since it is run in interrupt context.
3675 	 */
3676 	if ((flags & ERI_RMD_BAD) || (len  < ETHERMIN) || (len > ETHERMAX+4)) {
3677 
3678 		HSTAT(erip, rx_bad_pkts);
3679 		if ((flags & ERI_RMD_BAD) == 0)
3680 			HSTAT(erip, ierrors);
3681 		if (len < ETHERMIN) {
3682 			HSTAT(erip, rx_runt);
3683 		} else if (len > ETHERMAX+4) {
3684 			HSTAT(erip, rx_toolong_pkts);
3685 		}
3686 		HSTAT(erip, drop);
3687 		UPDATE_RMD(rmdp);
3688 
3689 		ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3690 		    DDI_DMA_SYNC_FORDEV);
3691 		return (NULL);
3692 	}
3693 #ifdef  ERI_DONT_STRIP_CRC
3694 	{
3695 		uint32_t hw_fcs, tail_fcs;
3696 		/*
3697 		 * since we don't let the hardware strip the CRC in hdx
3698 		 * then the driver needs to do it.
3699 		 * this is to workaround a hardware bug
3700 		 */
3701 		bp->b_wptr = bp->b_rptr + ERI_FSTBYTE_OFFSET + len;
3702 		/*
3703 		 * Get the Checksum calculated by the hardware.
3704 		 */
3705 		hw_fcs = flags & ERI_RMD_CKSUM;
3706 		/*
3707 		 * Catch the case when the CRC starts on an odd
3708 		 * boundary.
3709 		 */
3710 		tail_fcs = bp->b_wptr[0] << 8 | bp->b_wptr[1];
3711 		tail_fcs += bp->b_wptr[2] << 8 | bp->b_wptr[3];
3712 		tail_fcs = (tail_fcs & 0xffff) + (tail_fcs >> 16);
3713 		if ((uintptr_t)(bp->b_wptr) & 1) {
3714 			tail_fcs = (tail_fcs << 8) & 0xffff  | (tail_fcs >> 8);
3715 		}
3716 		hw_fcs += tail_fcs;
3717 		hw_fcs = (hw_fcs & 0xffff) + (hw_fcs >> 16);
3718 		hw_fcs &= 0xffff;
3719 		/*
3720 		 * Now we can replace what the hardware wrote, make believe
3721 		 * it got it right in the first place.
3722 		 */
3723 		flags = (flags & ~(uint64_t)ERI_RMD_CKSUM) | hw_fcs;
3724 	}
3725 #endif
3726 	/*
3727 	 * Packet Processing
3728 	 * Once we get a packet bp, we try allocate a new mblk, nbp
3729 	 * to replace this one. If we succeed, we map it to the current
3730 	 * dma handle and update the descriptor with the new cookie. We
3731 	 * then put bp in our read service queue erip->ipq, if it exists
3732 	 * or we just bp to the streams expecting it.
3733 	 * If allocation of the new mblk fails, we implicitly drop the
3734 	 * current packet, i.e do not pass up the mblk and re-use it.
3735 	 * Re-mapping is not required.
3736 	 */
3737 
3738 	if (len < eri_rx_bcopy_max) {
3739 		if ((nbp = eri_allocb_sp(len + ERI_FSTBYTE_OFFSET))) {
3740 			(void) ddi_dma_sync(erip->ndmarh[rmdi], 0,
3741 			    len + ERI_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
3742 			DB_TYPE(nbp) = M_DATA;
3743 			bcopy(bp->b_rptr, nbp->b_rptr,
3744 			    len + ERI_FSTBYTE_OFFSET);
3745 			UPDATE_RMD(rmdp);
3746 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3747 			    DDI_DMA_SYNC_FORDEV);
3748 
3749 			/* Add the First Byte offset to the b_rptr */
3750 			nbp->b_rptr += ERI_FSTBYTE_OFFSET;
3751 			nbp->b_wptr = nbp->b_rptr + len;
3752 
3753 #ifdef ERI_RCV_CKSUM
3754 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3755 			ERI_PROCESS_READ(erip, nbp, sum);
3756 #else
3757 			ERI_PROCESS_READ(erip, nbp);
3758 #endif
3759 			retmp = nbp;
3760 		} else {
3761 
3762 			/*
3763 			 * mblk allocation has failed. Re-use the old mblk for
3764 			 * the next packet. Re-mapping is not required since
3765 			 * the same mblk and dma cookie is to be used again.
3766 			 */
3767 			HSTAT(erip, ierrors);
3768 			HSTAT(erip, allocbfail);
3769 			HSTAT(erip, norcvbuf);
3770 
3771 			UPDATE_RMD(rmdp);
3772 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3773 			    DDI_DMA_SYNC_FORDEV);
3774 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3775 		}
3776 	} else {
3777 		/* Use dma unmap/map */
3778 		if ((nbp = eri_allocb_sp(ERI_BUFSIZE))) {
3779 			/*
3780 			 * How do we harden this, specially if unbind
3781 			 * succeeds and then bind fails?
3782 			 *  If Unbind fails, we can leave without updating
3783 			 * the descriptor but would it continue to work on
3784 			 * next round?
3785 			 */
3786 			(void) ddi_dma_unbind_handle(erip->ndmarh[rmdi]);
3787 			(void) ddi_dma_addr_bind_handle(erip->ndmarh[rmdi],
3788 			    NULL, (caddr_t)nbp->b_rptr, ERI_BUFSIZE,
3789 			    DDI_DMA_READ | DDI_DMA_CONSISTENT,
3790 			    DDI_DMA_DONTWAIT, 0, &c, &ccnt);
3791 
3792 			erip->rmblkp[rmdi] = nbp;
3793 			PUT_RMD(rmdp, c);
3794 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3795 			    DDI_DMA_SYNC_FORDEV);
3796 
3797 			/* Add the First Byte offset to the b_rptr */
3798 
3799 			bp->b_rptr += ERI_FSTBYTE_OFFSET;
3800 			bp->b_wptr = bp->b_rptr + len;
3801 
3802 #ifdef ERI_RCV_CKSUM
3803 			sum = ~(uint16_t)(flags & ERI_RMD_CKSUM);
3804 			ERI_PROCESS_READ(erip, bp, sum);
3805 #else
3806 			ERI_PROCESS_READ(erip, bp);
3807 #endif
3808 			retmp = bp;
3809 		} else {
3810 
3811 			/*
3812 			 * mblk allocation has failed. Re-use the old mblk for
3813 			 * the next packet. Re-mapping is not required since
3814 			 * the same mblk and dma cookie is to be used again.
3815 			 */
3816 			HSTAT(erip, ierrors);
3817 			HSTAT(erip, allocbfail);
3818 			HSTAT(erip, norcvbuf);
3819 
3820 			UPDATE_RMD(rmdp);
3821 			ERI_SYNCIOPB(erip, rmdp, sizeof (struct rmd),
3822 			    DDI_DMA_SYNC_FORDEV);
3823 			ERI_DEBUG_MSG1(erip, RESOURCE_MSG, "allocb fail");
3824 		}
3825 	}
3826 
3827 	return (retmp);
3828 }
3829 
3830 #define	LINK_STAT_DISPLAY_TIME	20
3831 
3832 static int
eri_init_xfer_params(struct eri * erip)3833 eri_init_xfer_params(struct eri *erip)
3834 {
3835 	int	i;
3836 	dev_info_t *dip;
3837 
3838 	dip = erip->dip;
3839 
3840 	for (i = 0; i < A_CNT(param_arr); i++)
3841 		erip->param_arr[i] = param_arr[i];
3842 
3843 	erip->xmit_dma_mode = 0;
3844 	erip->rcv_dma_mode = 0;
3845 	erip->mifpoll_enable = mifpoll_enable;
3846 	erip->lance_mode_enable = lance_mode;
3847 	erip->frame_enable = 1;
3848 	erip->ngu_enable = ngu_enable;
3849 
3850 	if (!erip->g_nd && !eri_param_register(erip,
3851 	    erip->param_arr, A_CNT(param_arr))) {
3852 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
3853 		    param_reg_fail_msg);
3854 			return (-1);
3855 		}
3856 
3857 	/*
3858 	 * Set up the start-up values for user-configurable parameters
3859 	 * Get the values from the global variables first.
3860 	 * Use the MASK to limit the value to allowed maximum.
3861 	 */
3862 
3863 	param_transceiver = NO_XCVR;
3864 
3865 /*
3866  * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3867  * property "transfer-speed". This may be done in OBP by using the command
3868  * "apply transfer-speed=<speed> <device>". The speed may be either 10 or 100.
3869  */
3870 	i = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "transfer-speed", 0);
3871 	if (i != 0) {
3872 		param_autoneg = 0;	/* force speed */
3873 		param_anar_100T4 = 0;
3874 		param_anar_10fdx = 0;
3875 		param_anar_10hdx = 0;
3876 		param_anar_100fdx = 0;
3877 		param_anar_100hdx = 0;
3878 		param_anar_asm_dir = 0;
3879 		param_anar_pause = 0;
3880 
3881 		if (i == 10)
3882 			param_anar_10hdx = 1;
3883 		else if (i == 100)
3884 			param_anar_100hdx = 1;
3885 	}
3886 
3887 	/*
3888 	 * Get the parameter values configured in .conf file.
3889 	 */
3890 	param_ipg1 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg1", ipg1) &
3891 	    ERI_MASK_8BIT;
3892 
3893 	param_ipg2 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg2", ipg2) &
3894 	    ERI_MASK_8BIT;
3895 
3896 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3897 	    "use_int_xcvr", use_int_xcvr) & ERI_MASK_1BIT;
3898 
3899 	param_use_intphy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3900 	    "pace_size", pace_size) & ERI_MASK_8BIT;
3901 
3902 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3903 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3904 
3905 	param_autoneg = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3906 	    "adv_autoneg_cap", adv_autoneg_cap) & ERI_MASK_1BIT;
3907 
3908 	param_anar_100T4 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3909 	    "adv_100T4_cap", adv_100T4_cap) & ERI_MASK_1BIT;
3910 
3911 	param_anar_100fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3912 	    "adv_100fdx_cap", adv_100fdx_cap) & ERI_MASK_1BIT;
3913 
3914 	param_anar_100hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3915 	    "adv_100hdx_cap", adv_100hdx_cap) & ERI_MASK_1BIT;
3916 
3917 	param_anar_10fdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3918 	    "adv_10fdx_cap", adv_10fdx_cap) & ERI_MASK_1BIT;
3919 
3920 	param_anar_10hdx = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3921 	    "adv_10hdx_cap", adv_10hdx_cap) & ERI_MASK_1BIT;
3922 
3923 	param_ipg0 = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0, "ipg0", ipg0) &
3924 	    ERI_MASK_8BIT;
3925 
3926 	param_intr_blank_time = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3927 	    "intr_blank_time", intr_blank_time) & ERI_MASK_8BIT;
3928 
3929 	param_intr_blank_packets = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3930 	    "intr_blank_packets", intr_blank_packets) & ERI_MASK_8BIT;
3931 
3932 	param_lance_mode = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3933 	    "lance_mode", lance_mode) & ERI_MASK_1BIT;
3934 
3935 	param_select_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3936 	    "select_link", select_link) & ERI_MASK_1BIT;
3937 
3938 	param_default_link = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3939 	    "default_link", default_link) & ERI_MASK_1BIT;
3940 
3941 	param_anar_asm_dir = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3942 	    "adv_asm_dir_cap", adv_pauseTX_cap) & ERI_MASK_1BIT;
3943 
3944 	param_anar_pause = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
3945 	    "adv_pause_cap", adv_pauseRX_cap) & ERI_MASK_1BIT;
3946 
3947 	if (link_pulse_disabled)
3948 		erip->link_pulse_disabled = 1;
3949 	if (ddi_prop_exists(DDI_DEV_T_ANY, dip, 0, "link-pulse-disabled"))
3950 		erip->link_pulse_disabled = 1;
3951 
3952 	eri_statinit(erip);
3953 	return (0);
3954 
3955 }
3956 
3957 static void
eri_process_ndd_ioctl(struct eri * erip,queue_t * wq,mblk_t * mp,int cmd)3958 eri_process_ndd_ioctl(struct eri *erip, queue_t *wq, mblk_t *mp, int cmd)
3959 {
3960 
3961 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3962 	uint32_t old_100T4;
3963 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3964 	uint32_t old_ipg0, old_lance_mode;
3965 	uint32_t old_intr_blank_time, old_intr_blank_packets;
3966 	uint32_t old_asm_dir, old_pause;
3967 	uint32_t old_select_link, old_default_link;
3968 
3969 	switch (cmd) {
3970 	case ERI_ND_GET:
3971 
3972 		old_autoneg =	param_autoneg;
3973 		old_100T4 =	param_anar_100T4;
3974 		old_100fdx =	param_anar_100fdx;
3975 		old_100hdx =	param_anar_100hdx;
3976 		old_10fdx =	param_anar_10fdx;
3977 		old_10hdx =	param_anar_10hdx;
3978 		old_asm_dir =	param_anar_asm_dir;
3979 		old_pause =	param_anar_pause;
3980 
3981 		param_autoneg = old_autoneg & ~ERI_NOTUSR;
3982 		param_anar_100T4 = old_100T4 & ~ERI_NOTUSR;
3983 		param_anar_100fdx = old_100fdx & ~ERI_NOTUSR;
3984 		param_anar_100hdx = old_100hdx & ~ERI_NOTUSR;
3985 		param_anar_10fdx = old_10fdx & ~ERI_NOTUSR;
3986 		param_anar_10hdx = old_10hdx & ~ERI_NOTUSR;
3987 		param_anar_asm_dir = old_asm_dir & ~ERI_NOTUSR;
3988 		param_anar_pause = old_pause & ~ERI_NOTUSR;
3989 
3990 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
3991 			param_autoneg = old_autoneg;
3992 			param_anar_100T4 = old_100T4;
3993 			param_anar_100fdx = old_100fdx;
3994 			param_anar_100hdx = old_100hdx;
3995 			param_anar_10fdx = old_10fdx;
3996 			param_anar_10hdx = old_10hdx;
3997 			param_anar_asm_dir = old_asm_dir;
3998 			param_anar_pause = old_pause;
3999 			miocnak(wq, mp, 0, EINVAL);
4000 			return;
4001 		}
4002 		param_autoneg = old_autoneg;
4003 		param_anar_100T4 = old_100T4;
4004 		param_anar_100fdx = old_100fdx;
4005 		param_anar_100hdx = old_100hdx;
4006 		param_anar_10fdx = old_10fdx;
4007 		param_anar_10hdx = old_10hdx;
4008 		param_anar_asm_dir = old_asm_dir;
4009 		param_anar_pause = old_pause;
4010 
4011 		qreply(wq, mp);
4012 		break;
4013 
4014 	case ERI_ND_SET:
4015 		old_ipg0 = param_ipg0;
4016 		old_intr_blank_time = param_intr_blank_time;
4017 		old_intr_blank_packets = param_intr_blank_packets;
4018 		old_lance_mode = param_lance_mode;
4019 		old_ipg1 = param_ipg1;
4020 		old_ipg2 = param_ipg2;
4021 		old_use_int_xcvr = param_use_intphy;
4022 		old_autoneg = param_autoneg;
4023 		old_100T4 =	param_anar_100T4;
4024 		old_100fdx =	param_anar_100fdx;
4025 		old_100hdx =	param_anar_100hdx;
4026 		old_10fdx =	param_anar_10fdx;
4027 		old_10hdx =	param_anar_10hdx;
4028 		param_autoneg = 0xff;
4029 		old_asm_dir = param_anar_asm_dir;
4030 		param_anar_asm_dir = 0xff;
4031 		old_pause = param_anar_pause;
4032 		param_anar_pause = 0xff;
4033 		old_select_link = param_select_link;
4034 		old_default_link = param_default_link;
4035 
4036 		if (!eri_nd_getset(wq, erip->g_nd, mp)) {
4037 			param_autoneg = old_autoneg;
4038 			miocnak(wq, mp, 0, EINVAL);
4039 			return;
4040 		}
4041 
4042 		qreply(wq, mp);
4043 
4044 		if (param_autoneg != 0xff) {
4045 			ERI_DEBUG_MSG2(erip, NDD_MSG,
4046 			    "ndd_ioctl: new param_autoneg %d", param_autoneg);
4047 			param_linkup = 0;
4048 			erip->stats.link_up = LINK_STATE_DOWN;
4049 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4050 			(void) eri_init(erip);
4051 		} else {
4052 			param_autoneg = old_autoneg;
4053 			if ((old_use_int_xcvr != param_use_intphy) ||
4054 			    (old_default_link != param_default_link) ||
4055 			    (old_select_link != param_select_link)) {
4056 				param_linkup = 0;
4057 				erip->stats.link_up = LINK_STATE_DOWN;
4058 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4059 				(void) eri_init(erip);
4060 			} else if ((old_ipg1 != param_ipg1) ||
4061 			    (old_ipg2 != param_ipg2) ||
4062 			    (old_ipg0 != param_ipg0) ||
4063 			    (old_intr_blank_time != param_intr_blank_time) ||
4064 			    (old_intr_blank_packets !=
4065 			    param_intr_blank_packets) ||
4066 			    (old_lance_mode != param_lance_mode)) {
4067 				param_linkup = 0;
4068 				erip->stats.link_up = LINK_STATE_DOWN;
4069 				erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
4070 				(void) eri_init(erip);
4071 			}
4072 		}
4073 		break;
4074 	}
4075 }
4076 
4077 
4078 static int
eri_stat_kstat_update(kstat_t * ksp,int rw)4079 eri_stat_kstat_update(kstat_t *ksp, int rw)
4080 {
4081 	struct eri *erip;
4082 	struct erikstat *erikp;
4083 	struct stats *esp;
4084 	boolean_t macupdate = B_FALSE;
4085 
4086 	erip = (struct eri *)ksp->ks_private;
4087 	erikp = (struct erikstat *)ksp->ks_data;
4088 
4089 	if (rw != KSTAT_READ)
4090 		return (EACCES);
4091 	/*
4092 	 * Update all the stats by reading all the counter registers.
4093 	 * Counter register stats are not updated till they overflow
4094 	 * and interrupt.
4095 	 */
4096 
4097 	mutex_enter(&erip->xmitlock);
4098 	if ((erip->flags & ERI_RUNNING) && (erip->flags & ERI_TXINIT)) {
4099 		erip->tx_completion =
4100 		    GET_ETXREG(tx_completion) & ETX_COMPLETION_MASK;
4101 		macupdate |= eri_reclaim(erip, erip->tx_completion);
4102 	}
4103 	mutex_exit(&erip->xmitlock);
4104 	if (macupdate)
4105 		mac_tx_update(erip->mh);
4106 
4107 	eri_savecntrs(erip);
4108 
4109 	esp = &erip->stats;
4110 
4111 	erikp->erik_txmac_maxpkt_err.value.ul = esp->txmac_maxpkt_err;
4112 	erikp->erik_defer_timer_exp.value.ul = esp->defer_timer_exp;
4113 	erikp->erik_peak_attempt_cnt.value.ul = esp->peak_attempt_cnt;
4114 	erikp->erik_tx_hang.value.ul	= esp->tx_hang;
4115 
4116 	erikp->erik_no_free_rx_desc.value.ul	= esp->no_free_rx_desc;
4117 
4118 	erikp->erik_rx_hang.value.ul		= esp->rx_hang;
4119 	erikp->erik_rx_length_err.value.ul	= esp->rx_length_err;
4120 	erikp->erik_rx_code_viol_err.value.ul	= esp->rx_code_viol_err;
4121 	erikp->erik_pause_rxcount.value.ul	= esp->pause_rxcount;
4122 	erikp->erik_pause_oncount.value.ul	= esp->pause_oncount;
4123 	erikp->erik_pause_offcount.value.ul	= esp->pause_offcount;
4124 	erikp->erik_pause_time_count.value.ul	= esp->pause_time_count;
4125 
4126 	erikp->erik_inits.value.ul		= esp->inits;
4127 	erikp->erik_jab.value.ul		= esp->jab;
4128 	erikp->erik_notmds.value.ul		= esp->notmds;
4129 	erikp->erik_allocbfail.value.ul		= esp->allocbfail;
4130 	erikp->erik_drop.value.ul		= esp->drop;
4131 	erikp->erik_rx_bad_pkts.value.ul	= esp->rx_bad_pkts;
4132 	erikp->erik_rx_inits.value.ul		= esp->rx_inits;
4133 	erikp->erik_tx_inits.value.ul		= esp->tx_inits;
4134 	erikp->erik_rxtag_err.value.ul		= esp->rxtag_err;
4135 	erikp->erik_parity_error.value.ul	= esp->parity_error;
4136 	erikp->erik_pci_error_int.value.ul	= esp->pci_error_int;
4137 	erikp->erik_unknown_fatal.value.ul	= esp->unknown_fatal;
4138 	erikp->erik_pci_data_parity_err.value.ul = esp->pci_data_parity_err;
4139 	erikp->erik_pci_signal_target_abort.value.ul =
4140 	    esp->pci_signal_target_abort;
4141 	erikp->erik_pci_rcvd_target_abort.value.ul =
4142 	    esp->pci_rcvd_target_abort;
4143 	erikp->erik_pci_rcvd_master_abort.value.ul =
4144 	    esp->pci_rcvd_master_abort;
4145 	erikp->erik_pci_signal_system_err.value.ul =
4146 	    esp->pci_signal_system_err;
4147 	erikp->erik_pci_det_parity_err.value.ul = esp->pci_det_parity_err;
4148 
4149 	erikp->erik_pmcap.value.ul = esp->pmcap;
4150 
4151 	return (0);
4152 }
4153 
4154 static void
eri_statinit(struct eri * erip)4155 eri_statinit(struct eri *erip)
4156 {
4157 	struct	kstat	*ksp;
4158 	struct	erikstat	*erikp;
4159 
4160 	if ((ksp = kstat_create("eri", erip->instance, "driver_info", "net",
4161 	    KSTAT_TYPE_NAMED,
4162 	    sizeof (struct erikstat) / sizeof (kstat_named_t), 0)) == NULL) {
4163 		ERI_FAULT_MSG1(erip, SEVERITY_LOW, ERI_VERB_MSG,
4164 		    kstat_create_fail_msg);
4165 		return;
4166 	}
4167 
4168 	erip->ksp = ksp;
4169 	erikp = (struct erikstat *)(ksp->ks_data);
4170 	/*
4171 	 * MIB II kstat variables
4172 	 */
4173 
4174 	kstat_named_init(&erikp->erik_inits, "inits", KSTAT_DATA_ULONG);
4175 
4176 	kstat_named_init(&erikp->erik_txmac_maxpkt_err,	"txmac_maxpkt_err",
4177 	    KSTAT_DATA_ULONG);
4178 	kstat_named_init(&erikp->erik_defer_timer_exp, "defer_timer_exp",
4179 	    KSTAT_DATA_ULONG);
4180 	kstat_named_init(&erikp->erik_peak_attempt_cnt,	"peak_attempt_cnt",
4181 	    KSTAT_DATA_ULONG);
4182 	kstat_named_init(&erikp->erik_tx_hang, "tx_hang", KSTAT_DATA_ULONG);
4183 
4184 	kstat_named_init(&erikp->erik_no_free_rx_desc, "no_free_rx_desc",
4185 	    KSTAT_DATA_ULONG);
4186 	kstat_named_init(&erikp->erik_rx_hang, "rx_hang", KSTAT_DATA_ULONG);
4187 	kstat_named_init(&erikp->erik_rx_length_err, "rx_length_err",
4188 	    KSTAT_DATA_ULONG);
4189 	kstat_named_init(&erikp->erik_rx_code_viol_err,	"rx_code_viol_err",
4190 	    KSTAT_DATA_ULONG);
4191 
4192 	kstat_named_init(&erikp->erik_pause_rxcount, "pause_rcv_cnt",
4193 	    KSTAT_DATA_ULONG);
4194 
4195 	kstat_named_init(&erikp->erik_pause_oncount, "pause_on_cnt",
4196 	    KSTAT_DATA_ULONG);
4197 
4198 	kstat_named_init(&erikp->erik_pause_offcount, "pause_off_cnt",
4199 	    KSTAT_DATA_ULONG);
4200 	kstat_named_init(&erikp->erik_pause_time_count,	"pause_time_cnt",
4201 	    KSTAT_DATA_ULONG);
4202 
4203 	kstat_named_init(&erikp->erik_jab, "jabber", KSTAT_DATA_ULONG);
4204 	kstat_named_init(&erikp->erik_notmds, "no_tmds", KSTAT_DATA_ULONG);
4205 	kstat_named_init(&erikp->erik_allocbfail, "allocbfail",
4206 	    KSTAT_DATA_ULONG);
4207 
4208 	kstat_named_init(&erikp->erik_drop, "drop", KSTAT_DATA_ULONG);
4209 
4210 	kstat_named_init(&erikp->erik_rx_bad_pkts, "bad_pkts",
4211 	    KSTAT_DATA_ULONG);
4212 
4213 	kstat_named_init(&erikp->erik_rx_inits, "rx_inits", KSTAT_DATA_ULONG);
4214 
4215 	kstat_named_init(&erikp->erik_tx_inits, "tx_inits", KSTAT_DATA_ULONG);
4216 
4217 	kstat_named_init(&erikp->erik_rxtag_err, "rxtag_error",
4218 	    KSTAT_DATA_ULONG);
4219 
4220 	kstat_named_init(&erikp->erik_parity_error, "parity_error",
4221 	    KSTAT_DATA_ULONG);
4222 
4223 	kstat_named_init(&erikp->erik_pci_error_int, "pci_error_interrupt",
4224 	    KSTAT_DATA_ULONG);
4225 	kstat_named_init(&erikp->erik_unknown_fatal, "unknown_fatal",
4226 	    KSTAT_DATA_ULONG);
4227 	kstat_named_init(&erikp->erik_pci_data_parity_err,
4228 	    "pci_data_parity_err", KSTAT_DATA_ULONG);
4229 	kstat_named_init(&erikp->erik_pci_signal_target_abort,
4230 	    "pci_signal_target_abort", KSTAT_DATA_ULONG);
4231 	kstat_named_init(&erikp->erik_pci_rcvd_target_abort,
4232 	    "pci_rcvd_target_abort", KSTAT_DATA_ULONG);
4233 	kstat_named_init(&erikp->erik_pci_rcvd_master_abort,
4234 	    "pci_rcvd_master_abort", KSTAT_DATA_ULONG);
4235 	kstat_named_init(&erikp->erik_pci_signal_system_err,
4236 	    "pci_signal_system_err", KSTAT_DATA_ULONG);
4237 	kstat_named_init(&erikp->erik_pci_det_parity_err,
4238 	    "pci_det_parity_err", KSTAT_DATA_ULONG);
4239 
4240 	kstat_named_init(&erikp->erik_pmcap, "pmcap", KSTAT_DATA_ULONG);
4241 
4242 
4243 	ksp->ks_update = eri_stat_kstat_update;
4244 	ksp->ks_private = (void *) erip;
4245 	kstat_install(ksp);
4246 }
4247 
4248 
4249 /* <<<<<<<<<<<<<<<<<<<<<<< NDD SUPPORT FUNCTIONS	>>>>>>>>>>>>>>>>>>> */
4250 /*
4251  * ndd support functions to get/set parameters
4252  */
4253 /* Free the Named Dispatch Table by calling eri_nd_free */
4254 static void
eri_param_cleanup(struct eri * erip)4255 eri_param_cleanup(struct eri *erip)
4256 {
4257 	if (erip->g_nd)
4258 		(void) eri_nd_free(&erip->g_nd);
4259 }
4260 
4261 /*
4262  * Extracts the value from the eri parameter array and prints the
4263  * parameter value. cp points to the required parameter.
4264  */
4265 /* ARGSUSED */
4266 static int
eri_param_get(queue_t * q,mblk_t * mp,caddr_t cp)4267 eri_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
4268 {
4269 	param_t		*eripa = (void *)cp;
4270 	int		param_len = 1;
4271 	uint32_t	param_val;
4272 	mblk_t		*nmp;
4273 	int		ok;
4274 
4275 	param_val = eripa->param_val;
4276 	/*
4277 	 * Calculate space required in mblk.
4278 	 * Remember to include NULL terminator.
4279 	 */
4280 	do {
4281 		param_len++;
4282 		param_val /= 10;
4283 	} while (param_val);
4284 
4285 	ok = eri_mk_mblk_tail_space(mp, &nmp, param_len);
4286 	if (ok == 0) {
4287 		(void) sprintf((char *)nmp->b_wptr, "%d", eripa->param_val);
4288 		nmp->b_wptr += param_len;
4289 	}
4290 
4291 	return (ok);
4292 }
4293 
4294 /*
4295  * Check if there is space for p_val at the end if mblk.
4296  * If not, allocate new 1k mblk.
4297  */
4298 static int
eri_mk_mblk_tail_space(mblk_t * mp,mblk_t ** nmp,size_t sz)4299 eri_mk_mblk_tail_space(mblk_t *mp, mblk_t **nmp, size_t sz)
4300 {
4301 	mblk_t *tmp = mp;
4302 
4303 	while (tmp->b_cont)
4304 		tmp = tmp->b_cont;
4305 
4306 	if (MBLKTAIL(tmp) < sz) {
4307 		if ((tmp->b_cont = allocb(1024, BPRI_HI)) == NULL)
4308 			return (ENOMEM);
4309 		tmp = tmp->b_cont;
4310 	}
4311 	*nmp = tmp;
4312 	return (0);
4313 }
4314 
4315 /*
4316  * Register each element of the parameter array with the
4317  * named dispatch handler. Each element is loaded using
4318  * eri_nd_load()
4319  */
4320 static int
eri_param_register(struct eri * erip,param_t * eripa,int cnt)4321 eri_param_register(struct eri *erip, param_t *eripa, int cnt)
4322 {
4323 	/* cnt gives the count of the number of */
4324 	/* elements present in the parameter array */
4325 
4326 	int i;
4327 
4328 	for (i = 0; i < cnt; i++, eripa++) {
4329 		pfi_t	setter = (pfi_t)eri_param_set;
4330 
4331 		switch (eripa->param_name[0]) {
4332 		case '+':	/* read-write */
4333 			setter = (pfi_t)eri_param_set;
4334 			break;
4335 
4336 		case '-':	/* read-only */
4337 			setter = NULL;
4338 			break;
4339 
4340 		case '!':	/* read-only, not displayed */
4341 		case '%':	/* read-write, not displayed */
4342 			continue;
4343 		}
4344 
4345 		if (!eri_nd_load(&erip->g_nd, eripa->param_name + 1,
4346 		    (pfi_t)eri_param_get, setter, (caddr_t)eripa)) {
4347 			(void) eri_nd_free(&erip->g_nd);
4348 			return (B_FALSE);
4349 		}
4350 	}
4351 
4352 	return (B_TRUE);
4353 }
4354 
4355 /*
4356  * Sets the eri parameter to the value in the param_register using
4357  * eri_nd_load().
4358  */
4359 /* ARGSUSED */
4360 static int
eri_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp)4361 eri_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
4362 {
4363 	char *end;
4364 	long new_value;
4365 	param_t	*eripa = (void *)cp;
4366 
4367 	if (ddi_strtol(value, &end, 10, &new_value) != 0)
4368 		return (EINVAL);
4369 	if (end == value || new_value < eripa->param_min ||
4370 	    new_value > eripa->param_max) {
4371 			return (EINVAL);
4372 	}
4373 	eripa->param_val = (uint32_t)new_value;
4374 	return (0);
4375 
4376 }
4377 
4378 /* Free the table pointed to by 'ndp' */
4379 static void
eri_nd_free(caddr_t * nd_pparam)4380 eri_nd_free(caddr_t *nd_pparam)
4381 {
4382 	ND	*nd;
4383 
4384 	if ((nd = (void *)(*nd_pparam)) != NULL) {
4385 		if (nd->nd_tbl)
4386 			kmem_free(nd->nd_tbl, nd->nd_size);
4387 		kmem_free(nd, sizeof (ND));
4388 		*nd_pparam = NULL;
4389 	}
4390 }
4391 
4392 static int
eri_nd_getset(queue_t * q,caddr_t nd_param,MBLKP mp)4393 eri_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
4394 {
4395 	int	err;
4396 	IOCP	iocp;
4397 	MBLKP	mp1;
4398 	ND	*nd;
4399 	NDE	*nde;
4400 	char	*valp;
4401 	size_t	avail;
4402 	mblk_t	*nmp;
4403 
4404 	if (!nd_param)
4405 		return (B_FALSE);
4406 
4407 	nd = (void *)nd_param;
4408 	iocp = (void *)mp->b_rptr;
4409 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
4410 		mp->b_datap->db_type = M_IOCACK;
4411 		iocp->ioc_count = 0;
4412 		iocp->ioc_error = EINVAL;
4413 		return (B_TRUE);
4414 	}
4415 	/*
4416 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
4417 	 *	However, existing code sends in some big buffers.
4418 	 */
4419 	avail = iocp->ioc_count;
4420 	if (mp1->b_cont) {
4421 		freemsg(mp1->b_cont);
4422 		mp1->b_cont = NULL;
4423 	}
4424 
4425 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
4426 	valp = (char *)mp1->b_rptr;
4427 
4428 	for (nde = nd->nd_tbl; /* */; nde++) {
4429 		if (!nde->nde_name)
4430 			return (B_FALSE);
4431 		if (strcmp(nde->nde_name, valp) == 0)
4432 			break;
4433 	}
4434 	err = EINVAL;
4435 
4436 	while (*valp++)
4437 		;
4438 
4439 	if (!*valp || valp >= (char *)mp1->b_wptr)
4440 		valp = NULL;
4441 
4442 	switch (iocp->ioc_cmd) {
4443 	case ND_GET:
4444 	/*
4445 	 * (XXX) hack: "*valp" is size of user buffer for copyout. If result
4446 	 * of action routine is too big, free excess and return ioc_rval as buf
4447 	 * size needed.  Return as many mblocks as will fit, free the rest.  For
4448 	 * backward compatibility, assume size of orig ioctl buffer if "*valp"
4449 	 * bad or not given.
4450 	 */
4451 		if (valp)
4452 			(void) ddi_strtol(valp, NULL, 10, (long *)&avail);
4453 		/* We overwrite the name/value with the reply data */
4454 		{
4455 			mblk_t *mp2 = mp1;
4456 
4457 			while (mp2) {
4458 				mp2->b_wptr = mp2->b_rptr;
4459 				mp2 = mp2->b_cont;
4460 			}
4461 		}
4462 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
4463 		if (!err) {
4464 			size_t	size_out;
4465 			ssize_t	excess;
4466 
4467 			iocp->ioc_rval = 0;
4468 
4469 			/* Tack on the null */
4470 			err = eri_mk_mblk_tail_space(mp1, &nmp, 1);
4471 			if (!err) {
4472 				*nmp->b_wptr++ = '\0';
4473 				size_out = msgdsize(mp1);
4474 				excess = size_out - avail;
4475 				if (excess > 0) {
4476 					iocp->ioc_rval = (unsigned)size_out;
4477 					size_out -= excess;
4478 					(void) adjmsg(mp1, -(excess + 1));
4479 					err = eri_mk_mblk_tail_space(mp1,
4480 					    &nmp, 1);
4481 					if (!err)
4482 						*nmp->b_wptr++ = '\0';
4483 					else
4484 						size_out = 0;
4485 				}
4486 
4487 			} else
4488 				size_out = 0;
4489 
4490 			iocp->ioc_count = size_out;
4491 		}
4492 		break;
4493 
4494 	case ND_SET:
4495 		if (valp) {
4496 			err = (*nde->nde_set_pfi)(q, mp1, valp,
4497 			    nde->nde_data, iocp->ioc_cr);
4498 			iocp->ioc_count = 0;
4499 			freemsg(mp1);
4500 			mp->b_cont = NULL;
4501 		}
4502 		break;
4503 	}
4504 
4505 	iocp->ioc_error = err;
4506 	mp->b_datap->db_type = M_IOCACK;
4507 	return (B_TRUE);
4508 }
4509 
4510 /*
4511  * Load 'name' into the named dispatch table pointed to by 'ndp'.
4512  * 'ndp' should be the address of a char pointer cell.  If the table
4513  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
4514  * is stuffed.  If there is not enough space in the table for a new
4515  * entry, more space is allocated.
4516  */
4517 static boolean_t
eri_nd_load(caddr_t * nd_pparam,char * name,pfi_t get_pfi,pfi_t set_pfi,caddr_t data)4518 eri_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
4519     pfi_t set_pfi, caddr_t data)
4520 {
4521 	ND	*nd;
4522 	NDE	*nde;
4523 
4524 	if (!nd_pparam)
4525 		return (B_FALSE);
4526 
4527 	if ((nd = (void *)(*nd_pparam)) == NULL) {
4528 		if ((nd = (ND *)kmem_zalloc(sizeof (ND), KM_NOSLEEP))
4529 		    == NULL)
4530 			return (B_FALSE);
4531 		*nd_pparam = (caddr_t)nd;
4532 	}
4533 	if (nd->nd_tbl) {
4534 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
4535 			if (strcmp(name, nde->nde_name) == 0)
4536 				goto fill_it;
4537 		}
4538 	}
4539 	if (nd->nd_free_count <= 1) {
4540 		if ((nde = (NDE *)kmem_zalloc(nd->nd_size +
4541 		    NDE_ALLOC_SIZE, KM_NOSLEEP)) == NULL)
4542 			return (B_FALSE);
4543 
4544 		nd->nd_free_count += NDE_ALLOC_COUNT;
4545 		if (nd->nd_tbl) {
4546 			bcopy((char *)nd->nd_tbl, (char *)nde, nd->nd_size);
4547 			kmem_free((char *)nd->nd_tbl, nd->nd_size);
4548 		} else {
4549 			nd->nd_free_count--;
4550 			nde->nde_name = "?";
4551 			nde->nde_get_pfi = nd_get_names;
4552 			nde->nde_set_pfi = nd_set_default;
4553 		}
4554 		nde->nde_data = (caddr_t)nd;
4555 		nd->nd_tbl = nde;
4556 		nd->nd_size += NDE_ALLOC_SIZE;
4557 	}
4558 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
4559 		;
4560 	nd->nd_free_count--;
4561 fill_it:
4562 	nde->nde_name = name;
4563 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
4564 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
4565 	nde->nde_data = data;
4566 	return (B_TRUE);
4567 }
4568 
4569 /*
4570  * Hardening Functions
4571  * New Section
4572  */
4573 #ifdef  DEBUG
4574 /*PRINTFLIKE5*/
4575 static void
eri_debug_msg(const char * file,int line,struct eri * erip,debug_msg_t type,const char * fmt,...)4576 eri_debug_msg(const char *file, int line, struct eri *erip,
4577     debug_msg_t type, const char *fmt, ...)
4578 {
4579 	char	msg_buffer[255];
4580 	va_list ap;
4581 
4582 	va_start(ap, fmt);
4583 	(void) vsprintf(msg_buffer, fmt, ap);
4584 	va_end(ap);
4585 
4586 	if (eri_msg_out & ERI_CON_MSG) {
4587 		if (((type <= eri_debug_level) && eri_debug_all) ||
4588 		    ((type == eri_debug_level) && !eri_debug_all)) {
4589 			if (erip)
4590 				cmn_err(CE_CONT, "D: %s %s%d:(%s%d) %s\n",
4591 				    debug_msg_string[type], file, line,
4592 				    ddi_driver_name(erip->dip), erip->instance,
4593 				    msg_buffer);
4594 			else
4595 				cmn_err(CE_CONT, "D: %s %s(%d): %s\n",
4596 				    debug_msg_string[type], file,
4597 				    line, msg_buffer);
4598 		}
4599 	}
4600 }
4601 #endif
4602 
4603 
4604 /*PRINTFLIKE4*/
4605 static void
eri_fault_msg(struct eri * erip,uint_t severity,msg_t type,const char * fmt,...)4606 eri_fault_msg(struct eri *erip, uint_t severity, msg_t type,
4607     const char *fmt, ...)
4608 {
4609 	char	msg_buffer[255];
4610 	va_list	ap;
4611 
4612 	va_start(ap, fmt);
4613 	(void) vsprintf(msg_buffer, fmt, ap);
4614 	va_end(ap);
4615 
4616 	if (erip == NULL) {
4617 		cmn_err(CE_NOTE, "eri : %s", msg_buffer);
4618 		return;
4619 	}
4620 
4621 	if (severity == SEVERITY_HIGH) {
4622 		cmn_err(CE_WARN, "%s%d : %s", ddi_driver_name(erip->dip),
4623 		    erip->instance, msg_buffer);
4624 	} else switch (type) {
4625 	case ERI_VERB_MSG:
4626 		cmn_err(CE_CONT, "?%s%d : %s", ddi_driver_name(erip->dip),
4627 		    erip->instance, msg_buffer);
4628 		break;
4629 	case ERI_LOG_MSG:
4630 		cmn_err(CE_NOTE, "^%s%d : %s", ddi_driver_name(erip->dip),
4631 		    erip->instance, msg_buffer);
4632 		break;
4633 	case ERI_BUF_MSG:
4634 		cmn_err(CE_NOTE, "!%s%d : %s", ddi_driver_name(erip->dip),
4635 		    erip->instance, msg_buffer);
4636 		break;
4637 	case ERI_CON_MSG:
4638 		cmn_err(CE_CONT, "%s%d : %s", ddi_driver_name(erip->dip),
4639 		    erip->instance, msg_buffer);
4640 	default:
4641 		break;
4642 	}
4643 }
4644 
4645 /*
4646  * Transceiver (xcvr) Functions
4647  * New Section
4648  */
4649 /*
4650  * eri_stop_timer function is used by a function before doing link-related
4651  * processing. It locks the "linklock" to protect the link-related data
4652  * structures. This lock will be subsequently released in eri_start_timer().
4653  */
4654 static void
eri_stop_timer(struct eri * erip)4655 eri_stop_timer(struct eri *erip)
4656 {
4657 	timeout_id_t id;
4658 	mutex_enter(&erip->linklock);
4659 	if (erip->timerid) {
4660 		erip->flags |= ERI_NOTIMEOUTS; /* prevent multiple timeout */
4661 		id = erip->timerid;
4662 		erip->timerid = 0; /* prevent other thread do untimeout */
4663 		mutex_exit(&erip->linklock); /* no mutex across untimeout() */
4664 
4665 		(void) untimeout(id);
4666 		mutex_enter(&erip->linklock); /* acquire mutex again */
4667 		erip->flags &= ~ERI_NOTIMEOUTS;
4668 	}
4669 }
4670 
4671 /*
4672  * If msec parameter is zero, just release "linklock".
4673  */
4674 static void
eri_start_timer(struct eri * erip,fptrv_t func,clock_t msec)4675 eri_start_timer(struct eri *erip, fptrv_t func, clock_t msec)
4676 {
4677 	if (msec) {
4678 		if (!(erip->flags & ERI_NOTIMEOUTS) &&
4679 		    (erip->flags & ERI_RUNNING)) {
4680 			erip->timerid = timeout(func, (caddr_t)erip,
4681 			    drv_usectohz(1000*msec));
4682 		}
4683 	}
4684 
4685 	mutex_exit(&erip->linklock);
4686 }
4687 
4688 static int
eri_new_xcvr(struct eri * erip)4689 eri_new_xcvr(struct eri *erip)
4690 {
4691 	int		status;
4692 	uint32_t	cfg;
4693 	int		old_transceiver;
4694 
4695 	if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4696 	    PCI_PM_IDLESPEED_NONE) == DDI_SUCCESS)
4697 		erip->stats.pmcap = ERI_PMCAP_NONE;
4698 
4699 	status = B_FALSE;			/* no change */
4700 	cfg = GET_MIFREG(mif_cfg);
4701 	ERI_DEBUG_MSG2(erip, MIF_MSG, "cfg value = %X", cfg);
4702 	old_transceiver = param_transceiver;
4703 
4704 	if ((cfg & ERI_MIF_CFGM1) && !use_int_xcvr) {
4705 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found External XCVR");
4706 		/*
4707 		 * An External Transceiver was found and it takes priority
4708 		 * over an internal, given the use_int_xcvr flag
4709 		 * is false.
4710 		 */
4711 		if (old_transceiver != EXTERNAL_XCVR) {
4712 			/*
4713 			 * External transceiver has just been plugged
4714 			 * in. Isolate the internal Transceiver.
4715 			 */
4716 			if (old_transceiver == INTERNAL_XCVR) {
4717 				eri_mii_write(erip, ERI_PHY_BMCR,
4718 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4719 				    PHY_BMCR_LPBK));
4720 			}
4721 			status = B_TRUE;
4722 		}
4723 		/*
4724 		 * Select the external Transceiver.
4725 		 */
4726 		erip->phyad = ERI_EXTERNAL_PHYAD;
4727 		param_transceiver = EXTERNAL_XCVR;
4728 		erip->mif_config &= ~ERI_MIF_CFGPD;
4729 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4730 		erip->mif_config |= ERI_MIF_CFGPS;
4731 		PUT_MIFREG(mif_cfg, erip->mif_config);
4732 
4733 		PUT_MACREG(xifc, GET_MACREG(xifc) | BMAC_XIFC_MIIBUF_OE);
4734 		drv_usecwait(ERI_MIF_POLL_DELAY);
4735 	} else if (cfg & ERI_MIF_CFGM0) {
4736 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Found Internal XCVR");
4737 		/*
4738 		 * An Internal Transceiver was found or the
4739 		 * use_int_xcvr flag is true.
4740 		 */
4741 		if (old_transceiver != INTERNAL_XCVR) {
4742 			/*
4743 			 * The external transceiver has just been
4744 			 * disconnected or we're moving from a no
4745 			 * transceiver state.
4746 			 */
4747 			if ((old_transceiver == EXTERNAL_XCVR) &&
4748 			    (cfg & ERI_MIF_CFGM0)) {
4749 				eri_mii_write(erip, ERI_PHY_BMCR,
4750 				    (PHY_BMCR_ISOLATE | PHY_BMCR_PWRDN |
4751 				    PHY_BMCR_LPBK));
4752 			}
4753 			status = B_TRUE;
4754 		}
4755 		/*
4756 		 * Select the internal transceiver.
4757 		 */
4758 		erip->phyad = ERI_INTERNAL_PHYAD;
4759 		param_transceiver = INTERNAL_XCVR;
4760 		erip->mif_config &= ~ERI_MIF_CFGPD;
4761 		erip->mif_config |= (erip->phyad << ERI_MIF_CFGPD_SHIFT);
4762 		erip->mif_config &= ~ERI_MIF_CFGPS;
4763 		PUT_MIFREG(mif_cfg, erip->mif_config);
4764 
4765 		PUT_MACREG(xifc, GET_MACREG(xifc) & ~ BMAC_XIFC_MIIBUF_OE);
4766 		drv_usecwait(ERI_MIF_POLL_DELAY);
4767 	} else {
4768 		/*
4769 		 * Did not find a valid xcvr.
4770 		 */
4771 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4772 		    "Eri_new_xcvr : Select None");
4773 		param_transceiver = NO_XCVR;
4774 		erip->xcvr_status = PHY_LINK_DOWN;
4775 	}
4776 
4777 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4778 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4779 		    (void *)4000) == DDI_SUCCESS)
4780 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
4781 	}
4782 
4783 	return (status);
4784 }
4785 
4786 /*
4787  * This function is used for timers.  No locks are held on timer expiry.
4788  */
4789 static void
eri_check_link(struct eri * erip)4790 eri_check_link(struct eri *erip)
4791 {
4792 	link_state_t	linkupdate = eri_check_link_noind(erip);
4793 
4794 	if (linkupdate != LINK_STATE_UNKNOWN)
4795 		mac_link_update(erip->mh, linkupdate);
4796 }
4797 
4798 /*
4799  * Compare our xcvr in our structure to the xcvr that we get from
4800  * eri_check_mii_xcvr(). If they are different then mark the
4801  * link down, reset xcvr, and return.
4802  *
4803  * Note without the MII connector, conditions can not change that
4804  * will then use a external phy, thus this code has been cleaned
4805  * to not even call the function or to possibly change the xcvr.
4806  */
4807 static uint32_t
eri_check_link_noind(struct eri * erip)4808 eri_check_link_noind(struct eri *erip)
4809 {
4810 	uint16_t stat, control, mif_ints;
4811 	uint32_t link_timeout	= ERI_LINKCHECK_TIMER;
4812 	uint32_t linkupdate = 0;
4813 
4814 	eri_stop_timer(erip);	/* acquire linklock */
4815 
4816 	mutex_enter(&erip->xmitlock);
4817 	mutex_enter(&erip->xcvrlock);
4818 	eri_mif_poll(erip, MIF_POLL_STOP);
4819 
4820 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4821 	mif_ints = erip->mii_status ^ stat;
4822 
4823 	if (erip->openloop_autoneg) {
4824 		(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
4825 		ERI_DEBUG_MSG3(erip, XCVR_MSG,
4826 		    "eri_check_link:openloop stat %X mii_status %X",
4827 		    stat, erip->mii_status);
4828 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
4829 		if (!(stat & PHY_BMSR_LNKSTS) &&
4830 		    (erip->openloop_autoneg < 2)) {
4831 			if (param_speed) {
4832 				control &= ~PHY_BMCR_100M;
4833 				param_anlpar_100hdx = 0;
4834 				param_anlpar_10hdx = 1;
4835 				param_speed = 0;
4836 				erip->stats.ifspeed = 10;
4837 
4838 			} else {
4839 				control |= PHY_BMCR_100M;
4840 				param_anlpar_100hdx = 1;
4841 				param_anlpar_10hdx = 0;
4842 				param_speed = 1;
4843 				erip->stats.ifspeed = 100;
4844 			}
4845 			ERI_DEBUG_MSG3(erip, XCVR_MSG,
4846 			    "eri_check_link: trying speed %X stat %X",
4847 			    param_speed, stat);
4848 
4849 			erip->openloop_autoneg ++;
4850 			eri_mii_write(erip, ERI_PHY_BMCR, control);
4851 			link_timeout = ERI_P_FAULT_TIMER;
4852 		} else {
4853 			erip->openloop_autoneg = 0;
4854 			linkupdate = eri_mif_check(erip, stat, stat);
4855 			if (erip->openloop_autoneg)
4856 				link_timeout = ERI_P_FAULT_TIMER;
4857 		}
4858 		eri_mif_poll(erip, MIF_POLL_START);
4859 		mutex_exit(&erip->xcvrlock);
4860 		mutex_exit(&erip->xmitlock);
4861 
4862 		eri_start_timer(erip, eri_check_link, link_timeout);
4863 		return (linkupdate);
4864 	}
4865 
4866 	linkupdate = eri_mif_check(erip, mif_ints, stat);
4867 	eri_mif_poll(erip, MIF_POLL_START);
4868 	mutex_exit(&erip->xcvrlock);
4869 	mutex_exit(&erip->xmitlock);
4870 
4871 #ifdef ERI_RMAC_HANG_WORKAROUND
4872 	/*
4873 	 * Check if rx hung.
4874 	 */
4875 	if ((erip->flags & ERI_RUNNING) && param_linkup) {
4876 		if (erip->check_rmac_hang) {
4877 			ERI_DEBUG_MSG5(erip,
4878 			    NONFATAL_MSG,
4879 			    "check1 %d: macsm:%8x wr:%2x rd:%2x",
4880 			    erip->check_rmac_hang,
4881 			    GET_MACREG(macsm),
4882 			    GET_ERXREG(rxfifo_wr_ptr),
4883 			    GET_ERXREG(rxfifo_rd_ptr));
4884 
4885 			erip->check_rmac_hang = 0;
4886 			erip->check2_rmac_hang ++;
4887 
4888 			erip->rxfifo_wr_ptr_c = GET_ERXREG(rxfifo_wr_ptr);
4889 			erip->rxfifo_rd_ptr_c = GET_ERXREG(rxfifo_rd_ptr);
4890 
4891 			eri_start_timer(erip, eri_check_link,
4892 			    ERI_CHECK_HANG_TIMER);
4893 			return (linkupdate);
4894 		}
4895 
4896 		if (erip->check2_rmac_hang) {
4897 			ERI_DEBUG_MSG5(erip,
4898 			    NONFATAL_MSG,
4899 			    "check2 %d: macsm:%8x wr:%2x rd:%2x",
4900 			    erip->check2_rmac_hang,
4901 			    GET_MACREG(macsm),
4902 			    GET_ERXREG(rxfifo_wr_ptr),
4903 			    GET_ERXREG(rxfifo_rd_ptr));
4904 
4905 			erip->check2_rmac_hang = 0;
4906 
4907 			erip->rxfifo_wr_ptr = GET_ERXREG(rxfifo_wr_ptr);
4908 			erip->rxfifo_rd_ptr = GET_ERXREG(rxfifo_rd_ptr);
4909 
4910 			if (((GET_MACREG(macsm) & BMAC_OVERFLOW_STATE) ==
4911 			    BMAC_OVERFLOW_STATE) &&
4912 			    ((erip->rxfifo_wr_ptr_c == erip->rxfifo_rd_ptr_c) ||
4913 			    ((erip->rxfifo_rd_ptr == erip->rxfifo_rd_ptr_c) &&
4914 			    (erip->rxfifo_wr_ptr == erip->rxfifo_wr_ptr_c)))) {
4915 				ERI_DEBUG_MSG1(erip,
4916 				    NONFATAL_MSG,
4917 				    "RX hang: Reset mac");
4918 
4919 				HSTAT(erip, rx_hang);
4920 				erip->linkcheck = 1;
4921 
4922 				eri_start_timer(erip, eri_check_link,
4923 				    ERI_LINKCHECK_TIMER);
4924 				(void) eri_init(erip);
4925 				return (linkupdate);
4926 			}
4927 		}
4928 	}
4929 #endif
4930 
4931 	/*
4932 	 * Check if tx hung.
4933 	 */
4934 #ifdef	ERI_TX_HUNG
4935 	if ((erip->flags & ERI_RUNNING) && param_linkup &&
4936 	    (eri_check_txhung(erip))) {
4937 		HSTAT(erip, tx_hang);
4938 		eri_reinit_txhung++;
4939 		erip->linkcheck = 1;
4940 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4941 		(void) eri_init(erip);
4942 		return (linkupdate);
4943 	}
4944 #endif
4945 
4946 #ifdef ERI_PM_WORKAROUND
4947 	if (erip->stats.pmcap == ERI_PMCAP_NONE) {
4948 		if (pci_report_pmcap(erip->dip, PCI_PM_IDLESPEED,
4949 		    (void *)4000) == DDI_SUCCESS)
4950 			erip->stats.pmcap = ERI_PMCAP_4MHZ;
4951 
4952 		ERI_DEBUG_MSG2(erip, NONFATAL_MSG,
4953 		    "eri_check_link: PMCAP %d", erip->stats.pmcap);
4954 	}
4955 #endif
4956 	if ((!param_mode) && (param_transceiver != NO_XCVR))
4957 		eri_start_timer(erip, eri_check_link, ERI_CHECK_HANG_TIMER);
4958 	else
4959 		eri_start_timer(erip, eri_check_link, ERI_LINKCHECK_TIMER);
4960 	return (linkupdate);
4961 }
4962 
4963 static link_state_t
eri_mif_check(struct eri * erip,uint16_t mif_ints,uint16_t mif_data)4964 eri_mif_check(struct eri *erip, uint16_t mif_ints, uint16_t mif_data)
4965 {
4966 	uint16_t control, aner, anlpar, anar, an_common;
4967 	uint16_t old_mintrans;
4968 	int restart_autoneg = 0;
4969 	link_state_t retv;
4970 
4971 	ERI_DEBUG_MSG4(erip, XCVR_MSG, "eri_mif_check: mif_mask: %X, %X, %X",
4972 	    erip->mif_mask, mif_ints, mif_data);
4973 
4974 	mif_ints &= ~erip->mif_mask;
4975 	erip->mii_status = mif_data;
4976 	/*
4977 	 * Now check if someone has pulled the xcvr or
4978 	 * a new xcvr has shown up
4979 	 * If so try to find out what the new xcvr setup is.
4980 	 */
4981 	if (((mif_ints & PHY_BMSR_RES1) && (mif_data == 0xFFFF)) ||
4982 	    (param_transceiver == NO_XCVR)) {
4983 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
4984 		    "No status transceiver gone");
4985 		if (eri_new_xcvr(erip)) {
4986 			if (param_transceiver != NO_XCVR) {
4987 				/*
4988 				 * Reset the new PHY and bring up the link
4989 				 */
4990 				(void) eri_reset_xcvr(erip);
4991 			}
4992 		}
4993 		return (LINK_STATE_UNKNOWN);
4994 	}
4995 
4996 	if (param_autoneg && (mif_ints & PHY_BMSR_LNKSTS) &&
4997 	    (mif_data & PHY_BMSR_LNKSTS) && (mif_data & PHY_BMSR_ANC)) {
4998 		mif_ints |= PHY_BMSR_ANC;
4999 		ERI_DEBUG_MSG3(erip, PHY_MSG,
5000 		    "eri_mif_check: Set ANC bit mif_data %X mig_ints %X",
5001 		    mif_data, mif_ints);
5002 	}
5003 
5004 	if ((mif_ints & PHY_BMSR_ANC) && (mif_data & PHY_BMSR_ANC)) {
5005 		ERI_DEBUG_MSG1(erip, PHY_MSG, "Auto-negotiation interrupt.");
5006 
5007 		/*
5008 		 * Switch off Auto-negotiation interrupts and switch on
5009 		 * Link ststus interrupts.
5010 		 */
5011 		erip->mif_mask |= PHY_BMSR_ANC;
5012 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5013 		(void) eri_mii_read(erip, ERI_PHY_ANER, &aner);
5014 		param_aner_lpancap = 1 && (aner & PHY_ANER_LPNW);
5015 		if ((aner & PHY_ANER_MLF) || (eri_force_mlf)) {
5016 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5017 			    "parallel detection fault");
5018 			/*
5019 			 * Consider doing open loop auto-negotiation.
5020 			 */
5021 			ERI_DEBUG_MSG1(erip, XCVR_MSG,
5022 			    "Going into Open loop Auto-neg");
5023 			(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5024 
5025 			control &= ~(PHY_BMCR_ANE | PHY_BMCR_RAN |
5026 			    PHY_BMCR_FDX);
5027 			if (param_anar_100fdx || param_anar_100hdx) {
5028 				control |= PHY_BMCR_100M;
5029 				param_anlpar_100hdx = 1;
5030 				param_anlpar_10hdx = 0;
5031 				param_speed = 1;
5032 				erip->stats.ifspeed = 100;
5033 
5034 			} else if (param_anar_10fdx || param_anar_10hdx) {
5035 				control &= ~PHY_BMCR_100M;
5036 				param_anlpar_100hdx = 0;
5037 				param_anlpar_10hdx = 1;
5038 				param_speed = 0;
5039 				erip->stats.ifspeed = 10;
5040 			} else {
5041 				ERI_FAULT_MSG1(erip, SEVERITY_NONE,
5042 				    ERI_VERB_MSG,
5043 				    "Transceiver speed set incorrectly.");
5044 				return (0);
5045 			}
5046 
5047 			(void) eri_mii_write(erip, ERI_PHY_BMCR, control);
5048 			param_anlpar_100fdx = 0;
5049 			param_anlpar_10fdx = 0;
5050 			param_mode = 0;
5051 			erip->openloop_autoneg = 1;
5052 			return (0);
5053 		}
5054 		(void) eri_mii_read(erip, ERI_PHY_ANLPAR, &anlpar);
5055 		(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5056 		an_common = anar & anlpar;
5057 
5058 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "an_common = 0x%X", an_common);
5059 
5060 		if (an_common & (PHY_ANLPAR_TXFDX | PHY_ANLPAR_TX)) {
5061 			param_speed = 1;
5062 			erip->stats.ifspeed = 100;
5063 			param_mode = 1 && (an_common & PHY_ANLPAR_TXFDX);
5064 
5065 		} else if (an_common & (PHY_ANLPAR_10FDX | PHY_ANLPAR_10)) {
5066 			param_speed = 0;
5067 			erip->stats.ifspeed = 10;
5068 			param_mode = 1 && (an_common & PHY_ANLPAR_10FDX);
5069 
5070 		} else an_common = 0x0;
5071 
5072 		if (!an_common) {
5073 			ERI_FAULT_MSG1(erip, SEVERITY_MID, ERI_VERB_MSG,
5074 			    "Transceiver: anar not set with speed selection");
5075 		}
5076 		param_anlpar_100T4 = 1 && (anlpar & PHY_ANLPAR_T4);
5077 		param_anlpar_100fdx = 1 && (anlpar & PHY_ANLPAR_TXFDX);
5078 		param_anlpar_100hdx = 1 && (anlpar & PHY_ANLPAR_TX);
5079 		param_anlpar_10fdx = 1 && (anlpar & PHY_ANLPAR_10FDX);
5080 		param_anlpar_10hdx = 1 && (anlpar & PHY_ANLPAR_10);
5081 
5082 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5083 		    "Link duplex = 0x%X", param_mode);
5084 		ERI_DEBUG_MSG2(erip, PHY_MSG,
5085 		    "Link speed = 0x%X", param_speed);
5086 	/*	mif_ints |= PHY_BMSR_LNKSTS; prevent double msg */
5087 	/*	mif_data |= PHY_BMSR_LNKSTS; prevent double msg */
5088 	}
5089 	retv = LINK_STATE_UNKNOWN;
5090 	if (mif_ints & PHY_BMSR_LNKSTS) {
5091 		if (mif_data & PHY_BMSR_LNKSTS) {
5092 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link Up");
5093 			/*
5094 			 * Program Lu3X31T for mininum transition
5095 			 */
5096 			if (eri_phy_mintrans) {
5097 				eri_mii_write(erip, 31, 0x8000);
5098 				(void) eri_mii_read(erip, 0, &old_mintrans);
5099 				eri_mii_write(erip, 0, 0x00F1);
5100 				eri_mii_write(erip, 31, 0x0000);
5101 			}
5102 			/*
5103 			 * The link is up.
5104 			 */
5105 			eri_init_txmac(erip);
5106 			param_linkup = 1;
5107 			erip->stats.link_up = LINK_STATE_UP;
5108 			if (param_mode)
5109 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5110 			else
5111 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5112 
5113 			retv = LINK_STATE_UP;
5114 		} else {
5115 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Link down.");
5116 			param_linkup = 0;
5117 			erip->stats.link_up = LINK_STATE_DOWN;
5118 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5119 			retv = LINK_STATE_DOWN;
5120 			if (param_autoneg) {
5121 				restart_autoneg = 1;
5122 			}
5123 		}
5124 	} else {
5125 		if (mif_data & PHY_BMSR_LNKSTS) {
5126 			if (!param_linkup) {
5127 				ERI_DEBUG_MSG1(erip, PHY_MSG,
5128 				    "eri_mif_check: MIF data link up");
5129 				/*
5130 				 * Program Lu3X31T for minimum transition
5131 				 */
5132 				if (eri_phy_mintrans) {
5133 					eri_mii_write(erip, 31, 0x8000);
5134 					(void) eri_mii_read(erip, 0,
5135 					    &old_mintrans);
5136 					eri_mii_write(erip, 0, 0x00F1);
5137 					eri_mii_write(erip, 31, 0x0000);
5138 				}
5139 				/*
5140 				 * The link is up.
5141 				 */
5142 				eri_init_txmac(erip);
5143 
5144 				param_linkup = 1;
5145 				erip->stats.link_up = LINK_STATE_UP;
5146 				if (param_mode)
5147 					erip->stats.link_duplex =
5148 					    LINK_DUPLEX_FULL;
5149 				else
5150 					erip->stats.link_duplex =
5151 					    LINK_DUPLEX_HALF;
5152 
5153 				retv = LINK_STATE_UP;
5154 			}
5155 		} else if (param_linkup) {
5156 			/*
5157 			 * The link is down now.
5158 			 */
5159 			ERI_DEBUG_MSG1(erip, PHY_MSG,
5160 			    "eri_mif_check:Link was up and went down");
5161 			param_linkup = 0;
5162 			erip->stats.link_up = LINK_STATE_DOWN;
5163 			erip->stats.link_duplex = LINK_DUPLEX_UNKNOWN;
5164 			retv = LINK_STATE_DOWN;
5165 			if (param_autoneg)
5166 				restart_autoneg = 1;
5167 		}
5168 	}
5169 	if (restart_autoneg) {
5170 		/*
5171 		 * Restart normal auto-negotiation.
5172 		 */
5173 		ERI_DEBUG_MSG1(erip, PHY_MSG,
5174 		    "eri_mif_check:Restart AUto Negotiation");
5175 		erip->openloop_autoneg = 0;
5176 		param_mode = 0;
5177 		param_speed = 0;
5178 		param_anlpar_100T4 = 0;
5179 		param_anlpar_100fdx = 0;
5180 		param_anlpar_100hdx = 0;
5181 		param_anlpar_10fdx = 0;
5182 		param_anlpar_10hdx = 0;
5183 		param_aner_lpancap = 0;
5184 		(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5185 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5186 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5187 	}
5188 	if (mif_ints & PHY_BMSR_JABDET) {
5189 		if (mif_data & PHY_BMSR_JABDET) {
5190 			ERI_DEBUG_MSG1(erip, PHY_MSG, "Jabber detected.");
5191 			HSTAT(erip, jab);
5192 			/*
5193 			 * Reset the new PHY and bring up the link
5194 			 * (Check for failure?)
5195 			 */
5196 			(void) eri_reset_xcvr(erip);
5197 		}
5198 	}
5199 	return (retv);
5200 }
5201 
5202 #define	PHYRST_PERIOD 500
5203 static int
eri_reset_xcvr(struct eri * erip)5204 eri_reset_xcvr(struct eri *erip)
5205 {
5206 	uint16_t	stat;
5207 	uint16_t	anar;
5208 	uint16_t	control;
5209 	uint16_t	idr1;
5210 	uint16_t	idr2;
5211 	uint16_t	nicr;
5212 	uint32_t	speed_100;
5213 	uint32_t	speed_10;
5214 	int n;
5215 
5216 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5217 	erip->ifspeed_old = erip->stats.ifspeed;
5218 #endif
5219 	/*
5220 	 * Reset Open loop auto-negotiation this means you can try
5221 	 * Normal auto-negotiation, until you get a Multiple Link fault
5222 	 * at which point you try 100M half duplex then 10M half duplex
5223 	 * until you get a Link up.
5224 	 */
5225 	erip->openloop_autoneg = 0;
5226 
5227 	/*
5228 	 * Reset the xcvr.
5229 	 */
5230 	eri_mii_write(erip, ERI_PHY_BMCR, PHY_BMCR_RESET);
5231 
5232 	/* Check for transceiver reset completion */
5233 
5234 	n = 1000;
5235 	while (--n > 0) {
5236 		drv_usecwait((clock_t)PHYRST_PERIOD);
5237 		if (eri_mii_read(erip, ERI_PHY_BMCR, &control) == 1) {
5238 			/* Transceiver does not talk MII */
5239 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5240 			    "eri_reset_xcvr: no mii");
5241 		}
5242 		if ((control & PHY_BMCR_RESET) == 0)
5243 			goto reset_done;
5244 	}
5245 	ERI_FAULT_MSG2(erip, SEVERITY_NONE, ERI_VERB_MSG,
5246 	    "eri_reset_xcvr:reset_failed n == 0, control %x", control);
5247 	goto eri_reset_xcvr_failed;
5248 
5249 reset_done:
5250 
5251 	ERI_DEBUG_MSG2(erip, AUTOCONFIG_MSG,
5252 	    "eri_reset_xcvr: reset complete in %d us",
5253 	    (1000 - n) * PHYRST_PERIOD);
5254 
5255 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5256 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5257 	(void) eri_mii_read(erip, ERI_PHY_IDR1, &idr1);
5258 	(void) eri_mii_read(erip, ERI_PHY_IDR2, &idr2);
5259 
5260 	ERI_DEBUG_MSG4(erip, XCVR_MSG,
5261 	    "eri_reset_xcvr: control %x stat %x anar %x", control, stat, anar);
5262 
5263 	/*
5264 	 * Initialize the read only transceiver ndd information
5265 	 * the values are either 0 or 1.
5266 	 */
5267 	param_bmsr_ancap = 1 && (stat & PHY_BMSR_ACFG);
5268 	param_bmsr_100T4 = 1 && (stat & PHY_BMSR_100T4);
5269 	param_bmsr_100fdx = 1 && (stat & PHY_BMSR_100FDX);
5270 	param_bmsr_100hdx = 1 && (stat & PHY_BMSR_100HDX);
5271 	param_bmsr_10fdx = 1 && (stat & PHY_BMSR_10FDX);
5272 	param_bmsr_10hdx = 1 && (stat & PHY_BMSR_10HDX);
5273 
5274 	/*
5275 	 * Match up the ndd capabilities with the transceiver.
5276 	 */
5277 	param_autoneg &= param_bmsr_ancap;
5278 	param_anar_100fdx &= param_bmsr_100fdx;
5279 	param_anar_100hdx &= param_bmsr_100hdx;
5280 	param_anar_10fdx &= param_bmsr_10fdx;
5281 	param_anar_10hdx &= param_bmsr_10hdx;
5282 
5283 	/*
5284 	 * Select the operation mode of the transceiver.
5285 	 */
5286 	if (param_autoneg) {
5287 		/*
5288 		 * Initialize our auto-negotiation capabilities.
5289 		 */
5290 		anar = PHY_SELECTOR;
5291 		if (param_anar_100T4)
5292 			anar |= PHY_ANAR_T4;
5293 		if (param_anar_100fdx)
5294 			anar |= PHY_ANAR_TXFDX;
5295 		if (param_anar_100hdx)
5296 			anar |= PHY_ANAR_TX;
5297 		if (param_anar_10fdx)
5298 			anar |= PHY_ANAR_10FDX;
5299 		if (param_anar_10hdx)
5300 			anar |= PHY_ANAR_10;
5301 		ERI_DEBUG_MSG2(erip, XCVR_MSG, "anar = %x", anar);
5302 		eri_mii_write(erip, ERI_PHY_ANAR, anar);
5303 	}
5304 
5305 	/* Place the Transceiver in normal operation mode */
5306 	if ((control & PHY_BMCR_ISOLATE) || (control & PHY_BMCR_LPBK)) {
5307 		control &= ~(PHY_BMCR_ISOLATE | PHY_BMCR_LPBK);
5308 		eri_mii_write(erip, ERI_PHY_BMCR,
5309 		    (control & ~PHY_BMCR_ISOLATE));
5310 	}
5311 
5312 	/*
5313 	 * If Lu3X31T then allow nonzero eri_phy_mintrans
5314 	 */
5315 	if (eri_phy_mintrans &&
5316 	    (idr1 != 0x43 || (idr2 & 0xFFF0) != 0x7420)) {
5317 		eri_phy_mintrans = 0;
5318 	}
5319 	/*
5320 	 * Initialize the mif interrupt mask.
5321 	 */
5322 	erip->mif_mask = (uint16_t)(~PHY_BMSR_RES1);
5323 
5324 	/*
5325 	 * Establish link speeds and do necessary special stuff based
5326 	 * in the speed.
5327 	 */
5328 	speed_100 = param_anar_100fdx | param_anar_100hdx;
5329 	speed_10 = param_anar_10fdx | param_anar_10hdx;
5330 
5331 	ERI_DEBUG_MSG5(erip, XCVR_MSG, "eri_reset_xcvr: %d %d %d %d",
5332 	    param_anar_100fdx, param_anar_100hdx, param_anar_10fdx,
5333 	    param_anar_10hdx);
5334 
5335 	ERI_DEBUG_MSG3(erip, XCVR_MSG,
5336 	    "eri_reset_xcvr: speed_100 %d speed_10 %d", speed_100, speed_10);
5337 
5338 	if ((!speed_100) && (speed_10)) {
5339 		erip->mif_mask &= ~PHY_BMSR_JABDET;
5340 		if (!(param_anar_10fdx) &&
5341 		    (param_anar_10hdx) &&
5342 		    (erip->link_pulse_disabled)) {
5343 			param_speed = 0;
5344 			param_mode = 0;
5345 			(void) eri_mii_read(erip, ERI_PHY_NICR, &nicr);
5346 			nicr &= ~PHY_NICR_LD;
5347 			eri_mii_write(erip, ERI_PHY_NICR, nicr);
5348 			param_linkup = 1;
5349 			erip->stats.link_up = LINK_STATE_UP;
5350 			if (param_mode)
5351 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5352 			else
5353 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5354 		}
5355 	}
5356 
5357 	/*
5358 	 * Clear the autonegotitation before re-starting
5359 	 */
5360 	control = PHY_BMCR_100M | PHY_BMCR_FDX;
5361 /*	eri_mii_write(erip, ERI_PHY_BMCR, control); */
5362 	if (param_autoneg) {
5363 		/*
5364 		 * Setup the transceiver for autonegotiation.
5365 		 */
5366 		erip->mif_mask &= ~PHY_BMSR_ANC;
5367 
5368 		/*
5369 		 * Clear the Auto-negotiation before re-starting
5370 		 */
5371 		eri_mii_write(erip, ERI_PHY_BMCR, control & ~PHY_BMCR_ANE);
5372 
5373 		/*
5374 		 * Switch on auto-negotiation.
5375 		 */
5376 		control |= (PHY_BMCR_ANE | PHY_BMCR_RAN);
5377 
5378 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5379 	} else {
5380 		/*
5381 		 * Force the transceiver.
5382 		 */
5383 		erip->mif_mask &= ~PHY_BMSR_LNKSTS;
5384 
5385 		/*
5386 		 * Switch off auto-negotiation.
5387 		 */
5388 		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5389 
5390 		if (speed_100) {
5391 			control |= PHY_BMCR_100M;
5392 			param_aner_lpancap = 0; /* Clear LP nway */
5393 			param_anlpar_10fdx = 0;
5394 			param_anlpar_10hdx = 0;
5395 			param_anlpar_100T4 = param_anar_100T4;
5396 			param_anlpar_100fdx = param_anar_100fdx;
5397 			param_anlpar_100hdx = param_anar_100hdx;
5398 			param_speed = 1;
5399 			erip->stats.ifspeed = 100;
5400 			param_mode = param_anar_100fdx;
5401 			if (param_mode) {
5402 				param_anlpar_100hdx = 0;
5403 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5404 			} else {
5405 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5406 			}
5407 		} else if (speed_10) {
5408 			control &= ~PHY_BMCR_100M;
5409 			param_aner_lpancap = 0; /* Clear LP nway */
5410 			param_anlpar_100fdx = 0;
5411 			param_anlpar_100hdx = 0;
5412 			param_anlpar_100T4 = 0;
5413 			param_anlpar_10fdx = param_anar_10fdx;
5414 			param_anlpar_10hdx = param_anar_10hdx;
5415 			param_speed = 0;
5416 			erip->stats.ifspeed = 10;
5417 			param_mode = param_anar_10fdx;
5418 			if (param_mode) {
5419 				param_anlpar_10hdx = 0;
5420 				erip->stats.link_duplex = LINK_DUPLEX_FULL;
5421 			} else {
5422 				erip->stats.link_duplex = LINK_DUPLEX_HALF;
5423 			}
5424 		} else {
5425 			ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_VERB_MSG,
5426 			    "Transceiver speed set incorrectly.");
5427 		}
5428 
5429 		if (param_mode) {
5430 			control |= PHY_BMCR_FDX;
5431 		}
5432 
5433 		ERI_DEBUG_MSG4(erip, PHY_MSG,
5434 		    "control = %x status = %x param_mode %d",
5435 		    control, stat, param_mode);
5436 
5437 		eri_mii_write(erip, ERI_PHY_BMCR, control);
5438 /*
5439  *		if (param_mode) {
5440  *			control |= PHY_BMCR_FDX;
5441  *		}
5442  *		control &= ~(PHY_BMCR_FDX | PHY_BMCR_ANE | PHY_BMCR_RAN);
5443  *		eri_mii_write(erip, ERI_PHY_BMCR, control);
5444  */
5445 	}
5446 
5447 #ifdef DEBUG
5448 	(void) eri_mii_read(erip, ERI_PHY_BMCR, &control);
5449 	(void) eri_mii_read(erip, ERI_PHY_BMSR, &stat);
5450 	(void) eri_mii_read(erip, ERI_PHY_ANAR, &anar);
5451 #endif
5452 	ERI_DEBUG_MSG4(erip, PHY_MSG,
5453 	    "control %X status %X anar %X", control, stat, anar);
5454 
5455 eri_reset_xcvr_exit:
5456 	return (0);
5457 
5458 eri_reset_xcvr_failed:
5459 	return (1);
5460 }
5461 
5462 #ifdef	ERI_10_10_FORCE_SPEED_WORKAROUND
5463 
5464 static void
eri_xcvr_force_mode(struct eri * erip,uint32_t * link_timeout)5465 eri_xcvr_force_mode(struct eri *erip, uint32_t *link_timeout)
5466 {
5467 
5468 	if (!param_autoneg && !param_linkup && (erip->stats.ifspeed == 10) &&
5469 	    (param_anar_10fdx | param_anar_10hdx)) {
5470 		*link_timeout = SECOND(1);
5471 		return;
5472 	}
5473 
5474 	if (!param_autoneg && !param_linkup && (erip->ifspeed_old == 10) &&
5475 	    (param_anar_100fdx | param_anar_100hdx)) {
5476 		/*
5477 		 * May have to set link partner's speed and mode.
5478 		 */
5479 		ERI_FAULT_MSG1(erip, SEVERITY_NONE, ERI_LOG_MSG,
5480 		"May have to set link partner's speed and duplex mode.");
5481 	}
5482 }
5483 #endif
5484 
5485 static void
eri_mif_poll(struct eri * erip,soft_mif_enable_t enable)5486 eri_mif_poll(struct eri *erip, soft_mif_enable_t enable)
5487 {
5488 	if (enable == MIF_POLL_START) {
5489 		if (erip->mifpoll_enable && !erip->openloop_autoneg) {
5490 			erip->mif_config |= ERI_MIF_CFGPE;
5491 			PUT_MIFREG(mif_cfg, erip->mif_config);
5492 			drv_usecwait(ERI_MIF_POLL_DELAY);
5493 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) &
5494 			    ~ERI_G_MASK_MIF_INT);
5495 			PUT_MIFREG(mif_imask, erip->mif_mask);
5496 		}
5497 	} else if (enable == MIF_POLL_STOP) {
5498 			erip->mif_config &= ~ERI_MIF_CFGPE;
5499 			PUT_MIFREG(mif_cfg, erip->mif_config);
5500 			drv_usecwait(ERI_MIF_POLL_DELAY);
5501 			PUT_GLOBREG(intmask, GET_GLOBREG(intmask) |
5502 			    ERI_G_MASK_MIF_INT);
5503 			PUT_MIFREG(mif_imask, ERI_MIF_INTMASK);
5504 	}
5505 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF Config = 0x%X",
5506 	    GET_MIFREG(mif_cfg));
5507 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "MIF imask = 0x%X",
5508 	    GET_MIFREG(mif_imask));
5509 	ERI_DEBUG_MSG2(erip, XCVR_MSG, "INT imask = 0x%X",
5510 	    GET_GLOBREG(intmask));
5511 	ERI_DEBUG_MSG1(erip, XCVR_MSG, "<== mif_poll");
5512 }
5513 
5514 /* Decide if transmitter went dead and reinitialize everything */
5515 #ifdef	ERI_TX_HUNG
5516 static int eri_txhung_limit = 2;
5517 static int
eri_check_txhung(struct eri * erip)5518 eri_check_txhung(struct eri *erip)
5519 {
5520 	boolean_t	macupdate = B_FALSE;
5521 
5522 	mutex_enter(&erip->xmitlock);
5523 	if (erip->flags & ERI_RUNNING) {
5524 		erip->tx_completion = (uint32_t)(GET_ETXREG(tx_completion) &
5525 		    ETX_COMPLETION_MASK);
5526 		macupdate |= eri_reclaim(erip, erip->tx_completion);
5527 	}
5528 
5529 	/* Something needs to be sent out but it is not going out */
5530 	if ((erip->tcurp != erip->tnextp) &&
5531 	    (erip->stats.opackets64 == erip->erisave.reclaim_opackets) &&
5532 	    (erip->stats.collisions == erip->erisave.starts))
5533 		erip->txhung++;
5534 	else
5535 		erip->txhung = 0;
5536 
5537 	erip->erisave.reclaim_opackets = erip->stats.opackets64;
5538 	erip->erisave.starts = erip->stats.collisions;
5539 	mutex_exit(&erip->xmitlock);
5540 
5541 	if (macupdate)
5542 		mac_tx_update(erip->mh);
5543 
5544 	return (erip->txhung >= eri_txhung_limit);
5545 }
5546 #endif
5547