xref: /illumos-gate/usr/src/uts/common/io/hme/hme.c (revision 6a634c9d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 
26 /*
27  * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
28  */
29 
30 #include	<sys/types.h>
31 #include	<sys/debug.h>
32 #include	<sys/stream.h>
33 #include	<sys/cmn_err.h>
34 #include	<sys/kmem.h>
35 #include	<sys/crc32.h>
36 #include	<sys/modctl.h>
37 #include	<sys/conf.h>
38 #include	<sys/strsun.h>
39 #include	<sys/kstat.h>
40 #include	<sys/pattr.h>
41 #include	<sys/dlpi.h>
42 #include	<sys/strsubr.h>
43 #include	<sys/mac_provider.h>
44 #include	<sys/mac_ether.h>
45 #include	<sys/mii.h>
46 #include	<sys/ethernet.h>
47 #include	<sys/vlan.h>
48 #include	<sys/pci.h>
49 #include	<sys/policy.h>
50 #include	<sys/ddi.h>
51 #include	<sys/sunddi.h>
52 #include	<sys/byteorder.h>
53 #include	"hme_phy.h"
54 #include	"hme_mac.h"
55 #include	"hme.h"
56 
57 typedef void	(*fptrv_t)();
58 
59 typedef enum {
60 	NO_MSG		= 0,
61 	AUTOCONFIG_MSG,
62 	DISPLAY_MSG,
63 	INIT_MSG,
64 	UNINIT_MSG,
65 	CONFIG_MSG,
66 	MII_MSG,
67 	FATAL_ERR_MSG,
68 	NFATAL_ERR_MSG,
69 	XCVR_MSG,
70 	NOXCVR_MSG,
71 	ERX_MSG,
72 	DDI_MSG,
73 } msg_t;
74 
75 msg_t	hme_debug_level =	NO_MSG;
76 
77 static char	*msg_string[] = {
78 	"NONE       ",
79 	"AUTOCONFIG ",
80 	"DISPLAY	"
81 	"INIT       ",
82 	"UNINIT		",
83 	"CONFIG	",
84 	"MII	",
85 	"FATAL_ERR	",
86 	"NFATAL_ERR	",
87 	"XCVR	",
88 	"NOXCVR	",
89 	"ERX	",
90 	"DDI	",
91 };
92 
93 #define	SEVERITY_NONE	0
94 #define	SEVERITY_LOW	0
95 #define	SEVERITY_MID	1
96 #define	SEVERITY_HIGH	2
97 #define	SEVERITY_UNKNOWN 99
98 
99 #define	FEPS_URUN_BUG
100 #define	HME_CODEVIOL_BUG
101 
102 #define	KIOIP	KSTAT_INTR_PTR(hmep->hme_intrstats)
103 
104 /*
105  * The following variables are used for checking fixes in Sbus/FEPS 2.0
106  */
107 static	int	hme_urun_fix = 0;	/* Bug fixed in Sbus/FEPS 2.0 */
108 
109 /*
110  * The following variables are used for configuring various features
111  */
112 static	int	hme_64bit_enable =	1;	/* Use 64-bit sbus transfers */
113 static	int	hme_reject_own =	1;	/* Reject packets with own SA */
114 static	int	hme_ngu_enable =	0;	/* Never Give Up mode */
115 
116 char *hme_priv_prop[] = {
117 	"_ipg0",
118 	"_ipg1",
119 	"_ipg2",
120 	"_lance_mode",
121 	NULL
122 };
123 
124 static	int	hme_lance_mode =	1;	/* to enable lance mode */
125 static	int	hme_ipg0 =		16;
126 static	int	hme_ipg1 =		8;
127 static	int	hme_ipg2 =		4;
128 
129 /*
130  * The following parameters may be configured by the user. If they are not
131  * configured by the user, the values will be based on the capabilities of
132  * the transceiver.
133  * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
134  * which are NOT configured by the user.
135  */
136 
137 #define	HME_NOTUSR	0x0f000000
138 #define	HME_MASK_1BIT	0x1
139 #define	HME_MASK_5BIT	0x1f
140 #define	HME_MASK_8BIT	0xff
141 
142 /*
143  * All strings used by hme messaging functions
144  */
145 
146 static	char *no_xcvr_msg =
147 	"No transceiver found.";
148 
149 static	char *burst_size_msg =
150 	"Could not identify the burst size";
151 
152 static	char *unk_rx_ringsz_msg =
153 	"Unknown receive RINGSZ";
154 
155 static  char *add_intr_fail_msg =
156 	"ddi_add_intr(9F) failed";
157 
158 static  char *mregs_4global_reg_fail_msg =
159 	"ddi_regs_map_setup(9F) for global reg failed";
160 
161 static	char *mregs_4etx_reg_fail_msg =
162 	"ddi_map_regs for etx reg failed";
163 
164 static	char *mregs_4erx_reg_fail_msg =
165 	"ddi_map_regs for erx reg failed";
166 
167 static	char *mregs_4bmac_reg_fail_msg =
168 	"ddi_map_regs for bmac reg failed";
169 
170 static	char *mregs_4mif_reg_fail_msg =
171 	"ddi_map_regs for mif reg failed";
172 
173 static	char *init_fail_gen_msg =
174 	"Failed to initialize hardware/driver";
175 
176 static	char *ddi_nregs_fail_msg =
177 	"ddi_dev_nregs failed(9F), returned %d";
178 
179 static	char *bad_num_regs_msg =
180 	"Invalid number of registers.";
181 
182 
183 /* FATAL ERR msgs */
184 /*
185  * Function prototypes.
186  */
187 /* these two are global so that qfe can use them */
188 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
189 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
190 int hmequiesce(dev_info_t *);
191 static	boolean_t hmeinit_xfer_params(struct hme *);
192 static	uint_t hmestop(struct hme *);
193 static	void hmestatinit(struct hme *);
194 static	int hmeallocthings(struct hme *);
195 static	void hmefreethings(struct hme *);
196 static	int hmeallocbuf(struct hme *, hmebuf_t *, int);
197 static	int hmeallocbufs(struct hme *);
198 static	void hmefreebufs(struct hme *);
199 static	void hmeget_hm_rev_property(struct hme *);
200 static	boolean_t hmestart(struct hme *, mblk_t *);
201 static	uint_t hmeintr(caddr_t);
202 static	void hmereclaim(struct hme *);
203 static	int hmeinit(struct hme *);
204 static	void hmeuninit(struct hme *hmep);
205 static 	mblk_t *hmeread(struct hme *, hmebuf_t *, uint32_t);
206 static	void hmesavecntrs(struct hme *);
207 static	void hme_fatal_err(struct hme *, uint_t);
208 static	void hme_nonfatal_err(struct hme *, uint_t);
209 static	int hmeburstsizes(struct hme *);
210 static	void send_bit(struct hme *, uint16_t);
211 static	uint16_t get_bit_std(uint8_t, struct hme *);
212 static	uint16_t hme_bb_mii_read(struct hme *, uint8_t, uint8_t);
213 static	void hme_bb_mii_write(struct hme *, uint8_t, uint8_t, uint16_t);
214 static	void hme_bb_force_idle(struct hme *);
215 static	uint16_t hme_mii_read(void *, uint8_t, uint8_t);
216 static	void hme_mii_write(void *, uint8_t, uint8_t, uint16_t);
217 static	void hme_setup_mac_address(struct hme *, dev_info_t *);
218 static	void hme_mii_notify(void *, link_state_t);
219 
220 static void hme_fault_msg(struct hme *, uint_t, msg_t, char *, ...);
221 
222 static void hme_check_acc_handle(char *, uint_t, struct hme *,
223     ddi_acc_handle_t);
224 
225 /*
226  * Nemo (GLDv3) Functions.
227  */
228 static int	hme_m_stat(void *, uint_t, uint64_t *);
229 static int	hme_m_start(void *);
230 static void	hme_m_stop(void *);
231 static int	hme_m_promisc(void *, boolean_t);
232 static int	hme_m_multicst(void *, boolean_t, const uint8_t *);
233 static int	hme_m_unicst(void *, const uint8_t *);
234 static mblk_t	*hme_m_tx(void *, mblk_t *);
235 static boolean_t	hme_m_getcapab(void *, mac_capab_t, void *);
236 static int hme_m_getprop(void *, const char *, mac_prop_id_t, uint_t, void *);
237 static void hme_m_propinfo(void *, const char *, mac_prop_id_t,
238     mac_prop_info_handle_t);
239 static int hme_m_setprop(void *, const char *, mac_prop_id_t, uint_t,
240     const void *);
241 
242 static mii_ops_t hme_mii_ops = {
243 	MII_OPS_VERSION,
244 	hme_mii_read,
245 	hme_mii_write,
246 	hme_mii_notify,
247 	NULL
248 };
249 
250 static mac_callbacks_t hme_m_callbacks = {
251 	MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO,
252 	hme_m_stat,
253 	hme_m_start,
254 	hme_m_stop,
255 	hme_m_promisc,
256 	hme_m_multicst,
257 	hme_m_unicst,
258 	hme_m_tx,
259 	NULL,
260 	NULL,
261 	hme_m_getcapab,
262 	NULL,
263 	NULL,
264 	hme_m_setprop,
265 	hme_m_getprop,
266 	hme_m_propinfo
267 };
268 
269 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
270     nodev, NULL, D_MP, NULL, hmequiesce);
271 
272 #define	HME_FAULT_MSG1(p, s, t, f) \
273     hme_fault_msg((p), (s), (t), (f));
274 
275 #define	HME_FAULT_MSG2(p, s, t, f, a) \
276     hme_fault_msg((p), (s), (t), (f), (a));
277 
278 #define	HME_FAULT_MSG3(p, s, t, f, a, b) \
279     hme_fault_msg((p), (s), (t), (f), (a), (b));
280 
281 #define	HME_FAULT_MSG4(p, s, t, f, a, b, c) \
282     hme_fault_msg((p), (s), (t), (f), (a), (b), (c));
283 
284 #define	CHECK_MIFREG() \
285 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
286 #define	CHECK_ETXREG() \
287 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
288 #define	CHECK_ERXREG() \
289 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
290 #define	CHECK_MACREG() \
291 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
292 #define	CHECK_GLOBREG() \
293 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
294 
295 /*
296  * Claim the device is ultra-capable of burst in the beginning.  Use
297  * the value returned by ddi_dma_burstsizes() to actually set the HME
298  * global configuration register later.
299  *
300  * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
301  * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
302  * the the burstsizes in both the lo and hi words.
303  */
304 #define	HMELIMADDRLO	((uint64_t)0x00000000)
305 #define	HMELIMADDRHI	((uint64_t)0xffffffff)
306 
307 /*
308  * Note that rx and tx data buffers can be arbitrarily aligned, but
309  * that the descriptor rings need to be aligned on 2K boundaries, per
310  * the spec.
311  */
312 static ddi_dma_attr_t hme_dma_attr = {
313 	DMA_ATTR_V0,		/* version number. */
314 	(uint64_t)HMELIMADDRLO,	/* low address */
315 	(uint64_t)HMELIMADDRHI,	/* high address */
316 	(uint64_t)0x00ffffff,	/* address counter max */
317 	(uint64_t)HME_HMDALIGN,	/* alignment */
318 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
319 	(uint32_t)0x1,		/* minimum transfer size */
320 	(uint64_t)0x7fffffff,	/* maximum transfer size */
321 	(uint64_t)0x00ffffff,	/* maximum segment size */
322 	1,			/* scatter/gather list length */
323 	512,			/* granularity */
324 	0			/* attribute flags */
325 };
326 
327 static ddi_device_acc_attr_t hme_buf_attr = {
328 	DDI_DEVICE_ATTR_V0,
329 	DDI_NEVERSWAP_ACC,
330 	DDI_STRICTORDER_ACC,	/* probably could allow merging & caching */
331 	DDI_DEFAULT_ACC,
332 };
333 
334 static uchar_t pci_latency_timer = 0;
335 
336 /*
337  * Module linkage information for the kernel.
338  */
339 static struct modldrv modldrv = {
340 	&mod_driverops,	/* Type of module.  This one is a driver */
341 	"Sun HME 10/100 Mb Ethernet",
342 	&hme_dev_ops,	/* driver ops */
343 };
344 
345 static struct modlinkage modlinkage = {
346 	MODREV_1, &modldrv, NULL
347 };
348 
349 /* <<<<<<<<<<<<<<<<<<<<<<  Register operations >>>>>>>>>>>>>>>>>>>>> */
350 
351 #define	GET_MIFREG(reg) \
352 	ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
353 #define	PUT_MIFREG(reg, value) \
354 	ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
355 
356 #define	GET_ETXREG(reg) \
357 	ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
358 #define	PUT_ETXREG(reg, value) \
359 	ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
360 #define	GET_ERXREG(reg) \
361 	ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
362 #define	PUT_ERXREG(reg, value) \
363 	ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
364 #define	GET_MACREG(reg) \
365 	ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
366 #define	PUT_MACREG(reg, value) \
367 	ddi_put32(hmep->hme_bmacregh, \
368 		(uint32_t *)&hmep->hme_bmacregp->reg, value)
369 #define	GET_GLOBREG(reg) \
370 	ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
371 #define	PUT_GLOBREG(reg, value) \
372 	ddi_put32(hmep->hme_globregh, \
373 		(uint32_t *)&hmep->hme_globregp->reg, value)
374 #define	PUT_TMD(ptr, paddr, len, flags)					\
375 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_addr, paddr); \
376 	ddi_put32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags,	\
377 	    len | flags)
378 #define	GET_TMD_FLAGS(ptr)					\
379 	ddi_get32(hmep->hme_tmd_acch, &hmep->hme_tmdp[ptr].tmd_flags)
380 #define	PUT_RMD(ptr, paddr) \
381 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_addr, paddr); \
382 	ddi_put32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags,	\
383 	    (uint32_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
384 #define	GET_RMD_FLAGS(ptr)					\
385 	ddi_get32(hmep->hme_rmd_acch, &hmep->hme_rmdp[ptr].rmd_flags)
386 
387 #define	GET_ROM8(offset) \
388 	ddi_get8((hmep->hme_romh), (offset))
389 
390 /*
391  * Ether_copy is not endian-correct. Define an endian-correct version.
392  */
393 #define	ether_bcopy(a, b) (bcopy(a, b, 6))
394 
395 /*
396  * Ether-type is specifically big-endian, but data region is unknown endian
397  */
398 #define	get_ether_type(ptr) \
399 	(((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
400 
401 /* <<<<<<<<<<<<<<<<<<<<<<  Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
402 
403 #define	BMAC_DEFAULT_JAMSIZE	(0x04)		/* jamsize equals 4 */
404 #define	BMAC_LONG_JAMSIZE	(0x10)		/* jamsize equals 0x10 */
405 static	int 	jamsize = BMAC_DEFAULT_JAMSIZE;
406 
407 
408 /*
409  * Calculate the bit in the multicast address filter that selects the given
410  * address.
411  */
412 
413 static uint32_t
hmeladrf_bit(const uint8_t * addr)414 hmeladrf_bit(const uint8_t *addr)
415 {
416 	uint32_t crc;
417 
418 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
419 
420 	/*
421 	 * Just want the 6 most significant bits.
422 	 */
423 	return (crc >> 26);
424 }
425 
426 /* <<<<<<<<<<<<<<<<<<<<<<<<  Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
427 
428 static void
send_bit(struct hme * hmep,uint16_t x)429 send_bit(struct hme *hmep, uint16_t x)
430 {
431 	PUT_MIFREG(mif_bbdata, x);
432 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
433 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
434 }
435 
436 
437 /*
438  * To read the MII register bits according to the IEEE Standard
439  */
440 static uint16_t
get_bit_std(uint8_t phyad,struct hme * hmep)441 get_bit_std(uint8_t phyad, struct hme *hmep)
442 {
443 	uint16_t	x;
444 
445 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
446 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
447 	if (phyad == HME_INTERNAL_PHYAD)
448 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
449 	else
450 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
451 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
452 	return (x);
453 }
454 
455 #define	SEND_BIT(x)		send_bit(hmep, x)
456 #define	GET_BIT_STD(phyad, x)	x = get_bit_std(phyad, hmep)
457 
458 
459 static void
hme_bb_mii_write(struct hme * hmep,uint8_t phyad,uint8_t regad,uint16_t data)460 hme_bb_mii_write(struct hme *hmep, uint8_t phyad, uint8_t regad, uint16_t data)
461 {
462 	int	i;
463 
464 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
465 	(void) hme_bb_force_idle(hmep);
466 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
467 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
468 
469 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
470 		SEND_BIT((phyad >> i) & 1);
471 	}
472 
473 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
474 		SEND_BIT((regad >> i) & 1);
475 	}
476 
477 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
478 
479 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
480 		SEND_BIT((data >> i) & 1);
481 	}
482 
483 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
484 	CHECK_MIFREG();
485 }
486 
487 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
488 static uint16_t
hme_bb_mii_read(struct hme * hmep,uint8_t phyad,uint8_t regad)489 hme_bb_mii_read(struct hme *hmep, uint8_t phyad, uint8_t regad)
490 {
491 	int		i;
492 	uint32_t	x;
493 	uint16_t	data = 0;
494 
495 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
496 	(void) hme_bb_force_idle(hmep);
497 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
498 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
499 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
500 		SEND_BIT((phyad >> i) & 1);
501 	}
502 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
503 		SEND_BIT((regad >> i) & 1);
504 	}
505 
506 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
507 
508 	GET_BIT_STD(phyad, x);
509 	GET_BIT_STD(phyad, x);		/* <TA> */
510 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
511 		GET_BIT_STD(phyad, x);
512 		data += (x << i);
513 	}
514 	/*
515 	 * Kludge to get the Transceiver out of hung mode
516 	 */
517 	GET_BIT_STD(phyad, x);
518 	GET_BIT_STD(phyad, x);
519 	GET_BIT_STD(phyad, x);
520 	CHECK_MIFREG();
521 	return (data);
522 }
523 
524 
525 static void
hme_bb_force_idle(struct hme * hmep)526 hme_bb_force_idle(struct hme *hmep)
527 {
528 	int	i;
529 
530 	for (i = 0; i < 33; i++) {
531 		SEND_BIT(1);
532 	}
533 }
534 
535 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
536 
537 
538 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
539 
540 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
541 static uint16_t
hme_mii_read(void * arg,uint8_t phyad,uint8_t regad)542 hme_mii_read(void *arg, uint8_t phyad, uint8_t regad)
543 {
544 	struct hme	*hmep = arg;
545 	uint32_t	frame;
546 	uint32_t	tmp_mif;
547 	uint32_t	tmp_xif;
548 
549 	tmp_mif = GET_MIFREG(mif_cfg);
550 	tmp_xif = GET_MACREG(xifc);
551 
552 	switch (phyad) {
553 	case HME_EXTERNAL_PHYAD:
554 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
555 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
556 		break;
557 	case HME_INTERNAL_PHYAD:
558 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
559 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
560 		break;
561 	default:
562 		return (0xffff);
563 	}
564 
565 	if (!hmep->hme_frame_enable) {
566 		frame = (hme_bb_mii_read(hmep, phyad, regad));
567 		PUT_MACREG(xifc, tmp_xif);
568 		PUT_MIFREG(mif_cfg, tmp_mif);
569 		return (frame & 0xffff);
570 	}
571 
572 	PUT_MIFREG(mif_frame,
573 	    HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
574 	    (regad << HME_MIF_FRREGAD_SHIFT));
575 /*
576  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
577  */
578 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
579 	frame = GET_MIFREG(mif_frame);
580 	CHECK_MIFREG();
581 
582 	PUT_MACREG(xifc, tmp_xif);
583 	PUT_MIFREG(mif_cfg, tmp_mif);
584 
585 	if ((frame & HME_MIF_FRTA0) == 0) {
586 
587 
588 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, MII_MSG,
589 		    "MIF Read failure");
590 		return (0xffff);
591 	}
592 	return ((uint16_t)(frame & HME_MIF_FRDATA));
593 }
594 
595 static void
hme_mii_write(void * arg,uint8_t phyad,uint8_t regad,uint16_t data)596 hme_mii_write(void *arg, uint8_t phyad, uint8_t regad, uint16_t data)
597 {
598 	struct hme *hmep = arg;
599 	uint32_t frame;
600 	uint32_t tmp_mif;
601 	uint32_t tmp_xif;
602 
603 	tmp_mif = GET_MIFREG(mif_cfg);
604 	tmp_xif = GET_MACREG(xifc);
605 
606 	switch (phyad) {
607 	case HME_EXTERNAL_PHYAD:
608 		PUT_MIFREG(mif_cfg, tmp_mif | HME_MIF_CFGPS);
609 		PUT_MACREG(xifc, tmp_xif | BMAC_XIFC_MIIBUFDIS);
610 		break;
611 	case HME_INTERNAL_PHYAD:
612 		PUT_MIFREG(mif_cfg, tmp_mif & ~(HME_MIF_CFGPS));
613 		PUT_MACREG(xifc, tmp_xif & ~(BMAC_XIFC_MIIBUFDIS));
614 		break;
615 	default:
616 		return;
617 	}
618 
619 	if (!hmep->hme_frame_enable) {
620 		hme_bb_mii_write(hmep, phyad, regad, data);
621 		PUT_MACREG(xifc, tmp_xif);
622 		PUT_MIFREG(mif_cfg, tmp_mif);
623 		return;
624 	}
625 
626 	PUT_MIFREG(mif_frame,
627 	    HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
628 	    (regad << HME_MIF_FRREGAD_SHIFT) | data);
629 /*
630  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
631  */
632 	HMEDELAY((GET_MIFREG(mif_frame) & HME_MIF_FRTA0), 300);
633 	frame = GET_MIFREG(mif_frame);
634 	PUT_MACREG(xifc, tmp_xif);
635 	PUT_MIFREG(mif_cfg, tmp_mif);
636 	CHECK_MIFREG();
637 	if ((frame & HME_MIF_FRTA0) == 0) {
638 		HME_FAULT_MSG1(hmep, SEVERITY_MID, MII_MSG,
639 		    "MIF Write failure");
640 	}
641 }
642 
643 static void
hme_mii_notify(void * arg,link_state_t link)644 hme_mii_notify(void *arg, link_state_t link)
645 {
646 	struct hme *hmep = arg;
647 
648 	if (link == LINK_STATE_UP) {
649 		(void) hmeinit(hmep);
650 	}
651 	mac_link_update(hmep->hme_mh, link);
652 }
653 
654 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<  LOADABLE ENTRIES  >>>>>>>>>>>>>>>>>>>>>>> */
655 
656 int
_init(void)657 _init(void)
658 {
659 	int	status;
660 
661 	mac_init_ops(&hme_dev_ops, "hme");
662 	if ((status = mod_install(&modlinkage)) != 0) {
663 		mac_fini_ops(&hme_dev_ops);
664 	}
665 	return (status);
666 }
667 
668 int
_fini(void)669 _fini(void)
670 {
671 	int	status;
672 
673 	if ((status = mod_remove(&modlinkage)) == 0) {
674 		mac_fini_ops(&hme_dev_ops);
675 	}
676 	return (status);
677 }
678 
679 int
_info(struct modinfo * modinfop)680 _info(struct modinfo *modinfop)
681 {
682 	return (mod_info(&modlinkage, modinfop));
683 }
684 
685 /*
686  * ddi_dma_sync() a TMD or RMD descriptor.
687  */
688 #define	HMESYNCRMD(num, who)				\
689 	(void) ddi_dma_sync(hmep->hme_rmd_dmah,		\
690 	    (num * sizeof (struct hme_rmd)),		\
691 	    sizeof (struct hme_rmd),			\
692 	    who)
693 
694 #define	HMESYNCTMD(num, who)				\
695 	(void) ddi_dma_sync(hmep->hme_tmd_dmah,		\
696 	    (num * sizeof (struct hme_tmd)),		\
697 	    sizeof (struct hme_tmd),			\
698 	    who)
699 
700 /*
701  * Ethernet broadcast address definition.
702  */
703 static	struct ether_addr	etherbroadcastaddr = {
704 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
705 };
706 
707 /*
708  * MIB II broadcast/multicast packets
709  */
710 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
711 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
712 #define	BUMP_InNUcast(hmep, pkt) \
713 	if (IS_MULTICAST(pkt)) {			       \
714 		if (IS_BROADCAST(pkt)) {		       \
715 			hmep->hme_brdcstrcv++;		       \
716 		} else {				       \
717 			hmep->hme_multircv++;		       \
718 		}					       \
719 	}
720 #define	BUMP_OutNUcast(hmep, pkt) \
721 	if (IS_MULTICAST(pkt)) {			       \
722 		if (IS_BROADCAST(pkt)) {		       \
723 			hmep->hme_brdcstxmt++;		       \
724 		} else {				       \
725 			hmep->hme_multixmt++;		       \
726 		}					       \
727 	}
728 
729 static int
hme_create_prop_from_kw(dev_info_t * dip,char * vpdname,char * vpdstr)730 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
731 {
732 	char propstr[80];
733 	int i, needprop = 0;
734 	struct ether_addr local_mac;
735 
736 	if (strcmp(vpdname, "NA") == 0) {
737 		(void) strcpy(propstr, "local-mac-address");
738 		needprop = 1;
739 	} else if (strcmp(vpdname, "Z0") == 0) {
740 		(void) strcpy(propstr, "model");
741 		needprop = 1;
742 	} else if (strcmp(vpdname, "Z1") == 0) {
743 		(void) strcpy(propstr, "board-model");
744 		needprop = 1;
745 	}
746 
747 	if (needprop == 1) {
748 
749 		if (strcmp(propstr, "local-mac-address") == 0) {
750 			for (i = 0; i < ETHERADDRL; i++)
751 				local_mac.ether_addr_octet[i] =
752 				    (uchar_t)vpdstr[i];
753 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
754 			    DDI_PROP_CANSLEEP, propstr,
755 			    (char *)local_mac.ether_addr_octet, ETHERADDRL)
756 			    != DDI_SUCCESS) {
757 				return (DDI_FAILURE);
758 			}
759 		} else {
760 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
761 			    DDI_PROP_CANSLEEP, propstr, vpdstr,
762 			    strlen(vpdstr)+1) != DDI_SUCCESS) {
763 				return (DDI_FAILURE);
764 			}
765 		}
766 	}
767 	return (0);
768 }
769 
770 /*
771  * Get properties from old VPD
772  * for PCI cards
773  */
774 static int
hme_get_oldvpd_props(dev_info_t * dip,int vpd_base)775 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
776 {
777 	struct hme *hmep;
778 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
779 	char kw_namestr[3];
780 	char kw_fieldstr[256];
781 	int i;
782 
783 	hmep = ddi_get_driver_private(dip);
784 
785 	vpd_start = vpd_base;
786 
787 	if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
788 		return (1); /* error */
789 	} else {
790 		vpd_len = 9;
791 	}
792 
793 	/* Get local-mac-address */
794 	kw_start = vpd_start + 3; /* Location of 1st keyword */
795 	kw_ptr = kw_start;
796 	while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
797 		kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
798 		kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
799 		kw_namestr[2] = '\0';
800 		kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
801 		for (i = 0, kw_ptr += 3; i < kw_len; i++)
802 			kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
803 		kw_fieldstr[i] = '\0';
804 		if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
805 			return (DDI_FAILURE);
806 		}
807 		kw_ptr += kw_len;
808 	} /* next keyword */
809 
810 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
811 	    "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
812 		return (DDI_FAILURE);
813 	}
814 	return (0);
815 }
816 
817 
818 /*
819  * Get properties from new VPD
820  * for CompactPCI cards
821  */
822 static int
hme_get_newvpd_props(dev_info_t * dip,int vpd_base)823 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
824 {
825 	struct hme *hmep;
826 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
827 	char kw_namestr[3];
828 	char kw_fieldstr[256];
829 	int maxvpdsize, i;
830 
831 	hmep = ddi_get_driver_private(dip);
832 
833 	maxvpdsize = 1024; /* Real size not known until after it is read */
834 
835 	vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
836 	    ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
837 	vpd_start = vpd_base + vpd_start;
838 	while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
839 		if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
840 			break; /* no VPD found */
841 		} else {
842 			vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
843 			    + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
844 			    + 2]) & 0xff) << 8);
845 		}
846 		/* Get all keywords in this VPD */
847 		kw_start = vpd_start + 3; /* Location of 1st keyword */
848 		kw_ptr = kw_start;
849 		while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
850 			kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
851 			kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
852 			kw_namestr[2] = '\0';
853 			kw_len =
854 			    (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
855 			for (i = 0, kw_ptr += 3; i < kw_len; i++)
856 				kw_fieldstr[i] =
857 				    GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
858 			kw_fieldstr[i] = '\0';
859 			if (hme_create_prop_from_kw(dip, kw_namestr,
860 			    kw_fieldstr)) {
861 				return (DDI_FAILURE);
862 			}
863 			kw_ptr += kw_len;
864 		} /* next keyword */
865 		vpd_start += (vpd_len + 3);
866 	} /* next VPD */
867 	return (0);
868 }
869 
870 
871 /*
872  * Get properties from VPD
873  */
874 static int
hme_get_vpd_props(dev_info_t * dip)875 hme_get_vpd_props(dev_info_t *dip)
876 {
877 	struct hme *hmep;
878 	int v0, v1, vpd_base;
879 	int i, epromsrchlimit;
880 
881 
882 	hmep = ddi_get_driver_private(dip);
883 
884 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
885 	v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
886 	v0 = ((v0 & 0xff) << 8 | v1);
887 
888 	if ((v0 & 0xffff) != 0x55aa) {
889 		cmn_err(CE_NOTE, " Valid pci prom not found \n");
890 		return (1);
891 	}
892 
893 	epromsrchlimit = 4096;
894 	for (i = 2; i < epromsrchlimit; i++) {
895 		/* "PCIR" */
896 		if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
897 		    ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
898 		    ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
899 		    ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
900 			vpd_base =
901 			    (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
902 			    (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
903 			break; /* VPD pointer found */
904 		}
905 	}
906 
907 	/* No VPD found */
908 	if (vpd_base == 0) {
909 		cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
910 		return (1);
911 	}
912 
913 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
914 	if (v0 == 0x82) {
915 		if (hme_get_newvpd_props(dip, vpd_base))
916 			return (1);
917 		return (0);
918 	} else if (v0 == 0x90) {
919 		/* If we are are SUNW,qfe card, look for the Nth "NA" descr */
920 		if ((GET_ROM8(&hmep->hme_romp[vpd_base + 12])  != 0x79) &&
921 		    GET_ROM8(&hmep->hme_romp[vpd_base + 4 * 12]) == 0x79) {
922 			vpd_base += hmep->hme_devno * 12;
923 		}
924 		if (hme_get_oldvpd_props(dip, vpd_base))
925 			return (1);
926 		return (0);
927 	} else
928 		return (1);	/* unknown start byte in VPD */
929 }
930 
931 /*
932  * For x86, the BIOS doesn't map the PCI Rom register for the qfe
933  * cards, so we have to extract it from the ebus bridge that is
934  * function zero of the same device.  This is a bit of an ugly hack.
935  * (The ebus bridge leaves the entire ROM mapped at base address
936  * register 0x10.)
937  */
938 
939 typedef struct {
940 	struct hme 		*hmep;
941 	dev_info_t		*parent;
942 	uint8_t			bus, dev;
943 	ddi_acc_handle_t	acch;
944 	caddr_t			romp;
945 } ebus_rom_t;
946 
947 static int
hme_mapebusrom(dev_info_t * dip,void * arg)948 hme_mapebusrom(dev_info_t *dip, void *arg)
949 {
950 	int		*regs;
951 	unsigned	nregs;
952 	int		reg;
953 	ebus_rom_t	*rom = arg;
954 	struct hme	*hmep = rom->hmep;
955 
956 	/*
957 	 * We only want to look at our peers.  Skip our parent.
958 	 */
959 	if (dip == rom->parent) {
960 		return (DDI_WALK_PRUNESIB);
961 	}
962 
963 	if (ddi_get_parent(dip) != rom->parent)
964 		return (DDI_WALK_CONTINUE);
965 
966 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
967 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
968 		return (DDI_WALK_PRUNECHILD);
969 	}
970 
971 	if (nregs < 1) {
972 		ddi_prop_free(regs);
973 		return (DDI_WALK_PRUNECHILD);
974 	}
975 	reg = regs[0];
976 	ddi_prop_free(regs);
977 
978 	/*
979 	 * Look for function 0 on our bus and device.  If the device doesn't
980 	 * match, it might be an alternate peer, in which case we don't want
981 	 * to examine any of its children.
982 	 */
983 	if ((PCI_REG_BUS_G(reg) != rom->bus) ||
984 	    (PCI_REG_DEV_G(reg) != rom->dev) ||
985 	    (PCI_REG_FUNC_G(reg) != 0)) {
986 		return (DDI_WALK_PRUNECHILD);
987 	}
988 
989 	(void) ddi_regs_map_setup(dip, 1, &rom->romp, 0, 0, &hmep->hme_dev_attr,
990 	    &rom->acch);
991 	/*
992 	 * If we can't map the registers, the caller will notice that
993 	 * the acch is NULL.
994 	 */
995 	return (DDI_WALK_TERMINATE);
996 }
997 
998 static int
hmeget_promebus(dev_info_t * dip)999 hmeget_promebus(dev_info_t *dip)
1000 {
1001 	ebus_rom_t	rom;
1002 	int		*regs;
1003 	unsigned	nregs;
1004 	struct hme	*hmep;
1005 
1006 	hmep = ddi_get_driver_private(dip);
1007 
1008 	bzero(&rom, sizeof (rom));
1009 
1010 	/*
1011 	 * For x86, the BIOS doesn't map the PCI Rom register for the qfe
1012 	 * cards, so we have to extract it from the eBus bridge that is
1013 	 * function zero.  This is a bit of an ugly hack.
1014 	 */
1015 	if ((ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 0,
1016 	    "reg", &regs, &nregs)) != DDI_PROP_SUCCESS) {
1017 		return (DDI_FAILURE);
1018 	}
1019 
1020 	if (nregs < 5) {
1021 		ddi_prop_free(regs);
1022 		return (DDI_FAILURE);
1023 	}
1024 	rom.hmep = hmep;
1025 	rom.bus = PCI_REG_BUS_G(regs[0]);
1026 	rom.dev = PCI_REG_DEV_G(regs[0]);
1027 	hmep->hme_devno = rom.dev;
1028 	rom.parent = ddi_get_parent(dip);
1029 
1030 	/*
1031 	 * The implementation of ddi_walk_devs says that we must not
1032 	 * be called during autoconfiguration.  However, it turns out
1033 	 * that it is safe to call this during our attach routine,
1034 	 * because we are not a nexus device.
1035 	 *
1036 	 * Previously we rooted our search at our immediate parent,
1037 	 * but this triggered an assertion panic in debug kernels.
1038 	 */
1039 	ddi_walk_devs(ddi_root_node(), hme_mapebusrom, &rom);
1040 
1041 	if (rom.acch) {
1042 		hmep->hme_romh = rom.acch;
1043 		hmep->hme_romp = (unsigned char *)rom.romp;
1044 		return (DDI_SUCCESS);
1045 	}
1046 	return (DDI_FAILURE);
1047 }
1048 
1049 static int
hmeget_promprops(dev_info_t * dip)1050 hmeget_promprops(dev_info_t *dip)
1051 {
1052 	struct hme *hmep;
1053 	int rom_bar;
1054 	ddi_acc_handle_t cfg_handle;
1055 	struct {
1056 		uint16_t vendorid;
1057 		uint16_t devid;
1058 		uint16_t command;
1059 		uint16_t status;
1060 		uint32_t junk1;
1061 		uint8_t cache_line;
1062 		uint8_t latency;
1063 		uint8_t header;
1064 		uint8_t bist;
1065 		uint32_t base;
1066 		uint32_t base14;
1067 		uint32_t base18;
1068 		uint32_t base1c;
1069 		uint32_t base20;
1070 		uint32_t base24;
1071 		uint32_t base28;
1072 		uint32_t base2c;
1073 		uint32_t base30;
1074 	} *cfg_ptr;
1075 
1076 	hmep = ddi_get_driver_private(dip);
1077 
1078 
1079 	/*
1080 	 * map configuration space
1081 	 */
1082 	if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1083 	    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1084 		return (DDI_FAILURE);
1085 	}
1086 
1087 	/*
1088 	 * Enable bus-master and memory accesses
1089 	 */
1090 	ddi_put16(cfg_handle, &cfg_ptr->command,
1091 	    PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
1092 	    PCI_COMM_MAE | PCI_COMM_ME);
1093 
1094 	/*
1095 	 * Enable rom accesses
1096 	 */
1097 	rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
1098 	ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
1099 
1100 
1101 	if ((ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
1102 	    &hmep->hme_dev_attr, &hmep->hme_romh) != DDI_SUCCESS) &&
1103 	    (hmeget_promebus(dip) != DDI_SUCCESS)) {
1104 
1105 		if (cfg_ptr)
1106 			ddi_regs_map_free(&cfg_handle);
1107 		return (DDI_FAILURE);
1108 	} else {
1109 		if (hme_get_vpd_props(dip))
1110 			return (DDI_FAILURE);
1111 	}
1112 	if (hmep->hme_romp)
1113 		ddi_regs_map_free(&hmep->hme_romh);
1114 	if (cfg_ptr)
1115 		ddi_regs_map_free(&cfg_handle);
1116 	return (DDI_SUCCESS);
1117 
1118 }
1119 
1120 static void
hmeget_hm_rev_property(struct hme * hmep)1121 hmeget_hm_rev_property(struct hme *hmep)
1122 {
1123 	int	hm_rev;
1124 
1125 
1126 	hm_rev = hmep->asic_rev;
1127 	switch (hm_rev) {
1128 	case HME_2P1_REVID:
1129 	case HME_2P1_REVID_OBP:
1130 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1131 		    "SBus 2.1 Found (Rev Id = %x)", hm_rev);
1132 		hmep->hme_frame_enable = 1;
1133 		break;
1134 
1135 	case HME_2P0_REVID:
1136 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1137 		    "SBus 2.0 Found (Rev Id = %x)", hm_rev);
1138 		break;
1139 
1140 	case HME_1C0_REVID:
1141 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
1142 		    "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
1143 		break;
1144 
1145 	default:
1146 		HME_FAULT_MSG3(hmep, SEVERITY_NONE, DISPLAY_MSG,
1147 		    "%s (Rev Id = %x) Found",
1148 		    (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
1149 		hmep->hme_frame_enable = 1;
1150 		hmep->hme_lance_mode_enable = 1;
1151 		hmep->hme_rxcv_enable = 1;
1152 		break;
1153 	}
1154 }
1155 
1156 /*
1157  * Interface exists: make available by filling in network interface
1158  * record.  System will initialize the interface when it is ready
1159  * to accept packets.
1160  */
1161 int
hmeattach(dev_info_t * dip,ddi_attach_cmd_t cmd)1162 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1163 {
1164 	struct hme *hmep;
1165 	mac_register_t *macp = NULL;
1166 	int 	regno;
1167 	int hm_rev = 0;
1168 	int prop_len = sizeof (int);
1169 	ddi_acc_handle_t cfg_handle;
1170 	struct {
1171 		uint16_t vendorid;
1172 		uint16_t devid;
1173 		uint16_t command;
1174 		uint16_t status;
1175 		uint8_t revid;
1176 		uint8_t j1;
1177 		uint16_t j2;
1178 	} *cfg_ptr;
1179 
1180 	switch (cmd) {
1181 	case DDI_ATTACH:
1182 		break;
1183 
1184 	case DDI_RESUME:
1185 		if ((hmep = ddi_get_driver_private(dip)) == NULL)
1186 			return (DDI_FAILURE);
1187 
1188 		hmep->hme_flags &= ~HMESUSPENDED;
1189 
1190 		mii_resume(hmep->hme_mii);
1191 
1192 		if (hmep->hme_started)
1193 			(void) hmeinit(hmep);
1194 		return (DDI_SUCCESS);
1195 
1196 	default:
1197 		return (DDI_FAILURE);
1198 	}
1199 
1200 	/*
1201 	 * Allocate soft device data structure
1202 	 */
1203 	hmep = kmem_zalloc(sizeof (*hmep), KM_SLEEP);
1204 
1205 	/*
1206 	 * Might as well set up elements of data structure
1207 	 */
1208 	hmep->dip =		dip;
1209 	hmep->instance = 	ddi_get_instance(dip);
1210 	hmep->pagesize =	ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
1211 
1212 	/*
1213 	 *  Might as well setup the driver private
1214 	 * structure as part of the dip.
1215 	 */
1216 	ddi_set_driver_private(dip, hmep);
1217 
1218 	/*
1219 	 * Reject this device if it's in a slave-only slot.
1220 	 */
1221 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
1222 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1223 		    "Dev not used - dev in slave only slot");
1224 		goto error_state;
1225 	}
1226 
1227 	/*
1228 	 * Map in the device registers.
1229 	 *
1230 	 * Reg # 0 is the Global register set
1231 	 * Reg # 1 is the ETX register set
1232 	 * Reg # 2 is the ERX register set
1233 	 * Reg # 3 is the BigMAC register set.
1234 	 * Reg # 4 is the MIF register set
1235 	 */
1236 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
1237 		HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
1238 		    ddi_nregs_fail_msg, regno);
1239 		goto error_state;
1240 	}
1241 
1242 	switch (regno) {
1243 	case 5:
1244 		hmep->hme_cheerio_mode = 0;
1245 		break;
1246 	case 2:
1247 	case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
1248 		hmep->hme_cheerio_mode = 1;
1249 		break;
1250 	default:
1251 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
1252 		    bad_num_regs_msg);
1253 		goto error_state;
1254 	}
1255 
1256 	/* Initialize device attributes structure */
1257 	hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1258 
1259 	if (hmep->hme_cheerio_mode)
1260 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1261 		    DDI_STRUCTURE_LE_ACC;
1262 	else
1263 		hmep->hme_dev_attr.devacc_attr_endian_flags =
1264 		    DDI_STRUCTURE_BE_ACC;
1265 
1266 	hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1267 
1268 	if (hmep->hme_cheerio_mode) {
1269 		uint8_t		oldLT;
1270 		uint8_t		newLT = 0;
1271 		dev_info_t	*pdip;
1272 		const char	*pdrvname;
1273 
1274 		/*
1275 		 * Map the PCI config space
1276 		 */
1277 		if (pci_config_setup(dip, &hmep->pci_config_handle) !=
1278 		    DDI_SUCCESS) {
1279 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1280 			    "pci_config_setup() failed..");
1281 			goto error_state;
1282 		}
1283 
1284 		if (ddi_regs_map_setup(dip, 1,
1285 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1286 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1287 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1288 			    mregs_4global_reg_fail_msg);
1289 			goto error_unmap;
1290 		}
1291 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1292 		    hmep->hme_mifregh = hmep->hme_globregh;
1293 
1294 		hmep->hme_etxregp =
1295 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
1296 		hmep->hme_erxregp =
1297 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
1298 		hmep->hme_bmacregp =
1299 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
1300 		hmep->hme_mifregp =
1301 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
1302 
1303 		/*
1304 		 * Get parent pci bridge info.
1305 		 */
1306 		pdip = ddi_get_parent(dip);
1307 		pdrvname = ddi_driver_name(pdip);
1308 
1309 		oldLT = pci_config_get8(hmep->pci_config_handle,
1310 		    PCI_CONF_LATENCY_TIMER);
1311 		/*
1312 		 * Honor value set in /etc/system
1313 		 * "set hme:pci_latency_timer=0xYY"
1314 		 */
1315 		if (pci_latency_timer)
1316 			newLT = pci_latency_timer;
1317 		/*
1318 		 * Modify LT for simba
1319 		 */
1320 		else if (strcmp("simba", pdrvname) == 0)
1321 			newLT = 0xf0;
1322 		/*
1323 		 * Ensure minimum cheerio latency timer of 0x50
1324 		 * Usually OBP or pci bridge should set this value
1325 		 * based on cheerio
1326 		 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
1327 		 * Some system set cheerio LT at 0x40
1328 		 */
1329 		else if (oldLT < 0x40)
1330 			newLT = 0x50;
1331 
1332 		/*
1333 		 * Now program cheerio's pci latency timer with newLT
1334 		 */
1335 		if (newLT)
1336 			pci_config_put8(hmep->pci_config_handle,
1337 			    PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
1338 	} else { /* Map register sets */
1339 		if (ddi_regs_map_setup(dip, 0,
1340 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
1341 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
1342 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1343 			    mregs_4global_reg_fail_msg);
1344 			goto error_state;
1345 		}
1346 		if (ddi_regs_map_setup(dip, 1,
1347 		    (caddr_t *)&(hmep->hme_etxregp), 0, 0,
1348 		    &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
1349 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1350 			    mregs_4etx_reg_fail_msg);
1351 			goto error_unmap;
1352 		}
1353 		if (ddi_regs_map_setup(dip, 2,
1354 		    (caddr_t *)&(hmep->hme_erxregp), 0, 0,
1355 		    &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
1356 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1357 			    mregs_4erx_reg_fail_msg);
1358 			goto error_unmap;
1359 		}
1360 		if (ddi_regs_map_setup(dip, 3,
1361 		    (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
1362 		    &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
1363 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1364 			    mregs_4bmac_reg_fail_msg);
1365 			goto error_unmap;
1366 		}
1367 
1368 		if (ddi_regs_map_setup(dip, 4,
1369 		    (caddr_t *)&(hmep->hme_mifregp), 0, 0,
1370 		    &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
1371 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1372 			    mregs_4mif_reg_fail_msg);
1373 			goto error_unmap;
1374 		}
1375 	} /* Endif cheerio_mode */
1376 
1377 	/*
1378 	 * Based on the hm-rev, set some capabilities
1379 	 * Set up default capabilities for HM 2.0
1380 	 */
1381 	hmep->hme_frame_enable = 0;
1382 	hmep->hme_lance_mode_enable = 0;
1383 	hmep->hme_rxcv_enable = 0;
1384 
1385 	/* NEW routine to get the properties */
1386 
1387 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
1388 	    (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
1389 
1390 		hmep->asic_rev = hm_rev;
1391 		hmeget_hm_rev_property(hmep);
1392 	} else {
1393 		/*
1394 		 * hm_rev property not found so, this is
1395 		 * case of hot insertion of card without interpreting fcode.
1396 		 * Get it from revid in config space after mapping it.
1397 		 */
1398 		if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
1399 		    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
1400 			return (DDI_FAILURE);
1401 		}
1402 		/*
1403 		 * Since this is cheerio-based PCI card, we write 0xC in the
1404 		 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
1405 		 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
1406 		 */
1407 		hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
1408 		hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
1409 		hmep->asic_rev = hm_rev;
1410 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
1411 		    "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
1412 		    DDI_SUCCESS) {
1413 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1414 			    "ddi_prop_create error for hm_rev");
1415 		}
1416 		ddi_regs_map_free(&cfg_handle);
1417 
1418 		hmeget_hm_rev_property(hmep);
1419 
1420 		/* get info via VPD */
1421 		if (hmeget_promprops(dip) != DDI_SUCCESS) {
1422 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
1423 			    "no promprops");
1424 		}
1425 	}
1426 
1427 	if (ddi_intr_hilevel(dip, 0)) {
1428 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
1429 		    " high-level interrupts are not supported");
1430 		goto error_unmap;
1431 	}
1432 
1433 	/*
1434 	 * Get intr. block cookie so that mutex locks can be initialized.
1435 	 */
1436 	if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
1437 		goto error_unmap;
1438 
1439 	/*
1440 	 * Initialize mutex's for this device.
1441 	 */
1442 	mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1443 	mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
1444 
1445 	/*
1446 	 * Quiesce the hardware.
1447 	 */
1448 	(void) hmestop(hmep);
1449 
1450 	/*
1451 	 * Add interrupt to system
1452 	 */
1453 	if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
1454 	    (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
1455 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
1456 		    add_intr_fail_msg);
1457 		goto error_mutex;
1458 	}
1459 
1460 	/*
1461 	 * Set up the ethernet mac address.
1462 	 */
1463 	hme_setup_mac_address(hmep, dip);
1464 
1465 	if (!hmeinit_xfer_params(hmep))
1466 		goto error_intr;
1467 
1468 	if (hmeburstsizes(hmep) == DDI_FAILURE) {
1469 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
1470 		goto error_intr;
1471 	}
1472 
1473 	if (hmeallocthings(hmep) != DDI_SUCCESS) {
1474 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1475 		    "resource allocation failed");
1476 		goto error_intr;
1477 	}
1478 
1479 	if (hmeallocbufs(hmep) != DDI_SUCCESS) {
1480 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1481 		    "buffer allocation failed");
1482 		goto error_intr;
1483 	}
1484 
1485 	hmestatinit(hmep);
1486 
1487 	/* our external (preferred) PHY is at address 0 */
1488 	(void) ddi_prop_update_int(DDI_DEV_T_NONE, dip, "first-phy", 0);
1489 
1490 	hmep->hme_mii = mii_alloc(hmep, dip, &hme_mii_ops);
1491 	if (hmep->hme_mii == NULL) {
1492 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1493 		    "mii_alloc failed");
1494 		goto error_intr;
1495 	}
1496 	/* force a probe for the PHY */
1497 	mii_probe(hmep->hme_mii);
1498 
1499 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
1500 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
1501 		    "mac_alloc failed");
1502 		goto error_intr;
1503 	}
1504 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
1505 	macp->m_driver = hmep;
1506 	macp->m_dip = dip;
1507 	macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
1508 	macp->m_callbacks = &hme_m_callbacks;
1509 	macp->m_min_sdu = 0;
1510 	macp->m_max_sdu = ETHERMTU;
1511 	macp->m_margin = VLAN_TAGSZ;
1512 	macp->m_priv_props = hme_priv_prop;
1513 	if (mac_register(macp, &hmep->hme_mh) != 0) {
1514 		mac_free(macp);
1515 		goto error_intr;
1516 	}
1517 
1518 	mac_free(macp);
1519 
1520 	ddi_report_dev(dip);
1521 	return (DDI_SUCCESS);
1522 
1523 	/*
1524 	 * Failure Exit
1525 	 */
1526 
1527 error_intr:
1528 	if (hmep->hme_cookie)
1529 		ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1530 
1531 	if (hmep->hme_mii)
1532 		mii_free(hmep->hme_mii);
1533 
1534 error_mutex:
1535 	mutex_destroy(&hmep->hme_xmitlock);
1536 	mutex_destroy(&hmep->hme_intrlock);
1537 
1538 error_unmap:
1539 	if (hmep->hme_globregh)
1540 		ddi_regs_map_free(&hmep->hme_globregh);
1541 	if (hmep->hme_cheerio_mode == 0) {
1542 		if (hmep->hme_etxregh)
1543 			ddi_regs_map_free(&hmep->hme_etxregh);
1544 		if (hmep->hme_erxregh)
1545 			ddi_regs_map_free(&hmep->hme_erxregh);
1546 		if (hmep->hme_bmacregh)
1547 			ddi_regs_map_free(&hmep->hme_bmacregh);
1548 		if (hmep->hme_mifregh)
1549 			ddi_regs_map_free(&hmep->hme_mifregh);
1550 	} else {
1551 		if (hmep->pci_config_handle)
1552 			(void) pci_config_teardown(&hmep->pci_config_handle);
1553 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1554 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1555 	}
1556 
1557 error_state:
1558 	hmefreethings(hmep);
1559 	hmefreebufs(hmep);
1560 
1561 	if (hmep) {
1562 		kmem_free((caddr_t)hmep, sizeof (*hmep));
1563 		ddi_set_driver_private(dip, NULL);
1564 	}
1565 
1566 	return (DDI_FAILURE);
1567 }
1568 
1569 int
hmedetach(dev_info_t * dip,ddi_detach_cmd_t cmd)1570 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1571 {
1572 	struct hme *hmep;
1573 
1574 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1575 		return (DDI_FAILURE);
1576 
1577 	switch (cmd) {
1578 	case DDI_DETACH:
1579 		break;
1580 
1581 	case DDI_SUSPEND:
1582 		mii_suspend(hmep->hme_mii);
1583 		hmep->hme_flags |= HMESUSPENDED;
1584 		hmeuninit(hmep);
1585 		return (DDI_SUCCESS);
1586 
1587 	default:
1588 		return (DDI_FAILURE);
1589 	}
1590 
1591 
1592 	if (mac_unregister(hmep->hme_mh) != 0) {
1593 		return (DDI_FAILURE);
1594 	}
1595 
1596 	/*
1597 	 * Make driver quiescent, we don't want to prevent the
1598 	 * detach on failure.  Note that this should be redundant,
1599 	 * since mac_stop should already have called hmeuninit().
1600 	 */
1601 	if (!(hmep->hme_flags & HMESUSPENDED)) {
1602 		(void) hmestop(hmep);
1603 	}
1604 
1605 	if (hmep->hme_mii)
1606 		mii_free(hmep->hme_mii);
1607 
1608 	/*
1609 	 * Remove instance of the intr
1610 	 */
1611 	ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
1612 
1613 	/*
1614 	 * Unregister kstats.
1615 	 */
1616 	if (hmep->hme_ksp != NULL)
1617 		kstat_delete(hmep->hme_ksp);
1618 	if (hmep->hme_intrstats != NULL)
1619 		kstat_delete(hmep->hme_intrstats);
1620 
1621 	hmep->hme_ksp = NULL;
1622 	hmep->hme_intrstats = NULL;
1623 
1624 	/*
1625 	 * Destroy all mutexes and data structures allocated during
1626 	 * attach time.
1627 	 *
1628 	 * Note: at this time we should be the only thread accessing
1629 	 * the structures for this instance.
1630 	 */
1631 
1632 	if (hmep->hme_globregh)
1633 		ddi_regs_map_free(&hmep->hme_globregh);
1634 	if (hmep->hme_cheerio_mode == 0) {
1635 		if (hmep->hme_etxregh)
1636 			ddi_regs_map_free(&hmep->hme_etxregh);
1637 		if (hmep->hme_erxregh)
1638 			ddi_regs_map_free(&hmep->hme_erxregh);
1639 		if (hmep->hme_bmacregh)
1640 			ddi_regs_map_free(&hmep->hme_bmacregh);
1641 		if (hmep->hme_mifregh)
1642 			ddi_regs_map_free(&hmep->hme_mifregh);
1643 	} else {
1644 		if (hmep->pci_config_handle)
1645 			(void) pci_config_teardown(&hmep->pci_config_handle);
1646 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
1647 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
1648 	}
1649 
1650 	mutex_destroy(&hmep->hme_xmitlock);
1651 	mutex_destroy(&hmep->hme_intrlock);
1652 
1653 	hmefreethings(hmep);
1654 	hmefreebufs(hmep);
1655 
1656 	ddi_set_driver_private(dip, NULL);
1657 	kmem_free(hmep, sizeof (struct hme));
1658 
1659 	return (DDI_SUCCESS);
1660 }
1661 
1662 int
hmequiesce(dev_info_t * dip)1663 hmequiesce(dev_info_t *dip)
1664 {
1665 	struct hme *hmep;
1666 
1667 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
1668 		return (DDI_FAILURE);
1669 
1670 	(void) hmestop(hmep);
1671 	return (DDI_SUCCESS);
1672 }
1673 
1674 static boolean_t
hmeinit_xfer_params(struct hme * hmep)1675 hmeinit_xfer_params(struct hme *hmep)
1676 {
1677 	int hme_ipg1_conf, hme_ipg2_conf;
1678 	int hme_ipg0_conf, hme_lance_mode_conf;
1679 	int prop_len = sizeof (int);
1680 	dev_info_t *dip;
1681 
1682 	dip = hmep->dip;
1683 
1684 	/*
1685 	 * Set up the start-up values for user-configurable parameters
1686 	 * Get the values from the global variables first.
1687 	 * Use the MASK to limit the value to allowed maximum.
1688 	 */
1689 	hmep->hme_ipg1 = hme_ipg1 & HME_MASK_8BIT;
1690 	hmep->hme_ipg2 = hme_ipg2 & HME_MASK_8BIT;
1691 	hmep->hme_ipg0 = hme_ipg0 & HME_MASK_5BIT;
1692 
1693 	/*
1694 	 * Get the parameter values configured in .conf file.
1695 	 */
1696 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
1697 	    (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
1698 		hmep->hme_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
1699 	}
1700 
1701 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
1702 	    (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
1703 		hmep->hme_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
1704 	}
1705 
1706 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
1707 	    (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
1708 		hmep->hme_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
1709 	}
1710 
1711 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
1712 	    (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
1713 		hmep->hme_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
1714 	}
1715 
1716 	return (B_TRUE);
1717 }
1718 
1719 /*
1720  * Return 0 upon success, 1 on failure.
1721  */
1722 static uint_t
hmestop(struct hme * hmep)1723 hmestop(struct hme *hmep)
1724 {
1725 	/*
1726 	 * Disable the Tx dma engine.
1727 	 */
1728 	PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
1729 	HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
1730 
1731 	/*
1732 	 * Disable the Rx dma engine.
1733 	 */
1734 	PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
1735 	HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
1736 
1737 	/*
1738 	 * By this time all things should be quiet, so hit the
1739 	 * chip with a reset.
1740 	 */
1741 	PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
1742 
1743 	HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
1744 	if (GET_GLOBREG(reset)) {
1745 		return (1);
1746 	}
1747 
1748 	CHECK_GLOBREG();
1749 	return (0);
1750 }
1751 
1752 static int
hmestat_kstat_update(kstat_t * ksp,int rw)1753 hmestat_kstat_update(kstat_t *ksp, int rw)
1754 {
1755 	struct hme *hmep;
1756 	struct hmekstat *hkp;
1757 
1758 	hmep = (struct hme *)ksp->ks_private;
1759 	hkp = (struct hmekstat *)ksp->ks_data;
1760 
1761 	if (rw != KSTAT_READ)
1762 		return (EACCES);
1763 
1764 	/*
1765 	 * Update all the stats by reading all the counter registers.
1766 	 * Counter register stats are not updated till they overflow
1767 	 * and interrupt.
1768 	 */
1769 
1770 	mutex_enter(&hmep->hme_xmitlock);
1771 	if (hmep->hme_flags & HMERUNNING) {
1772 		hmereclaim(hmep);
1773 		hmesavecntrs(hmep);
1774 	}
1775 	mutex_exit(&hmep->hme_xmitlock);
1776 
1777 	hkp->hk_cvc.value.ul		= hmep->hme_cvc;
1778 	hkp->hk_lenerr.value.ul		= hmep->hme_lenerr;
1779 	hkp->hk_buff.value.ul		= hmep->hme_buff;
1780 	hkp->hk_missed.value.ul		= hmep->hme_missed;
1781 	hkp->hk_allocbfail.value.ul	= hmep->hme_allocbfail;
1782 	hkp->hk_babl.value.ul		= hmep->hme_babl;
1783 	hkp->hk_tmder.value.ul		= hmep->hme_tmder;
1784 	hkp->hk_txlaterr.value.ul	= hmep->hme_txlaterr;
1785 	hkp->hk_rxlaterr.value.ul	= hmep->hme_rxlaterr;
1786 	hkp->hk_slvparerr.value.ul	= hmep->hme_slvparerr;
1787 	hkp->hk_txparerr.value.ul	= hmep->hme_txparerr;
1788 	hkp->hk_rxparerr.value.ul	= hmep->hme_rxparerr;
1789 	hkp->hk_slverrack.value.ul	= hmep->hme_slverrack;
1790 	hkp->hk_txerrack.value.ul	= hmep->hme_txerrack;
1791 	hkp->hk_rxerrack.value.ul	= hmep->hme_rxerrack;
1792 	hkp->hk_txtagerr.value.ul	= hmep->hme_txtagerr;
1793 	hkp->hk_rxtagerr.value.ul	= hmep->hme_rxtagerr;
1794 	hkp->hk_eoperr.value.ul		= hmep->hme_eoperr;
1795 	hkp->hk_notmds.value.ul		= hmep->hme_notmds;
1796 	hkp->hk_notbufs.value.ul	= hmep->hme_notbufs;
1797 	hkp->hk_norbufs.value.ul	= hmep->hme_norbufs;
1798 
1799 	/*
1800 	 * Debug kstats
1801 	 */
1802 	hkp->hk_inits.value.ul		= hmep->inits;
1803 	hkp->hk_phyfail.value.ul	= hmep->phyfail;
1804 
1805 	/*
1806 	 * xcvr kstats
1807 	 */
1808 	hkp->hk_asic_rev.value.ul	= hmep->asic_rev;
1809 
1810 	return (0);
1811 }
1812 
1813 static void
hmestatinit(struct hme * hmep)1814 hmestatinit(struct hme *hmep)
1815 {
1816 	struct	kstat	*ksp;
1817 	struct	hmekstat	*hkp;
1818 	const char *driver;
1819 	int	instance;
1820 	char	buf[16];
1821 
1822 	instance = hmep->instance;
1823 	driver = ddi_driver_name(hmep->dip);
1824 
1825 	if ((ksp = kstat_create(driver, instance,
1826 	    "driver_info", "net", KSTAT_TYPE_NAMED,
1827 	    sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
1828 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
1829 		    "kstat_create failed");
1830 		return;
1831 	}
1832 
1833 	(void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
1834 	hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
1835 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
1836 	if (hmep->hme_intrstats)
1837 		kstat_install(hmep->hme_intrstats);
1838 
1839 	hmep->hme_ksp = ksp;
1840 	hkp = (struct hmekstat *)ksp->ks_data;
1841 	kstat_named_init(&hkp->hk_cvc,			"code_violations",
1842 	    KSTAT_DATA_ULONG);
1843 	kstat_named_init(&hkp->hk_lenerr,		"len_errors",
1844 	    KSTAT_DATA_ULONG);
1845 	kstat_named_init(&hkp->hk_buff,			"buff",
1846 	    KSTAT_DATA_ULONG);
1847 	kstat_named_init(&hkp->hk_missed,		"missed",
1848 	    KSTAT_DATA_ULONG);
1849 	kstat_named_init(&hkp->hk_nocanput,		"nocanput",
1850 	    KSTAT_DATA_ULONG);
1851 	kstat_named_init(&hkp->hk_allocbfail,		"allocbfail",
1852 	    KSTAT_DATA_ULONG);
1853 	kstat_named_init(&hkp->hk_babl,			"babble",
1854 	    KSTAT_DATA_ULONG);
1855 	kstat_named_init(&hkp->hk_tmder,		"tmd_error",
1856 	    KSTAT_DATA_ULONG);
1857 	kstat_named_init(&hkp->hk_txlaterr,		"tx_late_error",
1858 	    KSTAT_DATA_ULONG);
1859 	kstat_named_init(&hkp->hk_rxlaterr,		"rx_late_error",
1860 	    KSTAT_DATA_ULONG);
1861 	kstat_named_init(&hkp->hk_slvparerr,		"slv_parity_error",
1862 	    KSTAT_DATA_ULONG);
1863 	kstat_named_init(&hkp->hk_txparerr,		"tx_parity_error",
1864 	    KSTAT_DATA_ULONG);
1865 	kstat_named_init(&hkp->hk_rxparerr,		"rx_parity_error",
1866 	    KSTAT_DATA_ULONG);
1867 	kstat_named_init(&hkp->hk_slverrack,		"slv_error_ack",
1868 	    KSTAT_DATA_ULONG);
1869 	kstat_named_init(&hkp->hk_txerrack,		"tx_error_ack",
1870 	    KSTAT_DATA_ULONG);
1871 	kstat_named_init(&hkp->hk_rxerrack,		"rx_error_ack",
1872 	    KSTAT_DATA_ULONG);
1873 	kstat_named_init(&hkp->hk_txtagerr,		"tx_tag_error",
1874 	    KSTAT_DATA_ULONG);
1875 	kstat_named_init(&hkp->hk_rxtagerr,		"rx_tag_error",
1876 	    KSTAT_DATA_ULONG);
1877 	kstat_named_init(&hkp->hk_eoperr,		"eop_error",
1878 	    KSTAT_DATA_ULONG);
1879 	kstat_named_init(&hkp->hk_notmds,		"no_tmds",
1880 	    KSTAT_DATA_ULONG);
1881 	kstat_named_init(&hkp->hk_notbufs,		"no_tbufs",
1882 	    KSTAT_DATA_ULONG);
1883 	kstat_named_init(&hkp->hk_norbufs,		"no_rbufs",
1884 	    KSTAT_DATA_ULONG);
1885 
1886 	/*
1887 	 * Debugging kstats
1888 	 */
1889 	kstat_named_init(&hkp->hk_inits,		"inits",
1890 	    KSTAT_DATA_ULONG);
1891 	kstat_named_init(&hkp->hk_phyfail,		"phy_failures",
1892 	    KSTAT_DATA_ULONG);
1893 
1894 	/*
1895 	 * xcvr kstats
1896 	 */
1897 	kstat_named_init(&hkp->hk_asic_rev,		"asic_rev",
1898 	    KSTAT_DATA_ULONG);
1899 
1900 	ksp->ks_update = hmestat_kstat_update;
1901 	ksp->ks_private = (void *) hmep;
1902 	kstat_install(ksp);
1903 }
1904 
1905 int
hme_m_getprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,void * val)1906 hme_m_getprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1907     void *val)
1908 {
1909 	struct hme *hmep = arg;
1910 	int value;
1911 	int rv;
1912 
1913 	rv = mii_m_getprop(hmep->hme_mii, name, num, sz, val);
1914 	if (rv != ENOTSUP)
1915 		return (rv);
1916 
1917 	switch (num) {
1918 	case MAC_PROP_PRIVATE:
1919 		break;
1920 	default:
1921 		return (ENOTSUP);
1922 	}
1923 
1924 	if (strcmp(name, "_ipg0") == 0) {
1925 		value = hmep->hme_ipg0;
1926 	} else if (strcmp(name, "_ipg1") == 0) {
1927 		value = hmep->hme_ipg1;
1928 	} else if (strcmp(name, "_ipg2") == 0) {
1929 		value = hmep->hme_ipg2;
1930 	} else if (strcmp(name, "_lance_mode") == 0) {
1931 		value = hmep->hme_lance_mode;
1932 	} else {
1933 		return (ENOTSUP);
1934 	}
1935 	(void) snprintf(val, sz, "%d", value);
1936 	return (0);
1937 }
1938 
1939 static void
hme_m_propinfo(void * arg,const char * name,mac_prop_id_t num,mac_prop_info_handle_t mph)1940 hme_m_propinfo(void *arg, const char *name, mac_prop_id_t num,
1941     mac_prop_info_handle_t mph)
1942 {
1943 	struct hme *hmep = arg;
1944 
1945 	mii_m_propinfo(hmep->hme_mii, name, num, mph);
1946 
1947 	switch (num) {
1948 	case MAC_PROP_PRIVATE: {
1949 		char valstr[64];
1950 		int default_val;
1951 
1952 		if (strcmp(name, "_ipg0") == 0) {
1953 			default_val = hme_ipg0;
1954 		} else if (strcmp(name, "_ipg1") == 0) {
1955 			default_val = hme_ipg1;
1956 		} else if (strcmp(name, "_ipg2") == 0) {
1957 			default_val = hme_ipg2;
1958 		} if (strcmp(name, "_lance_mode") == 0) {
1959 			default_val = hme_lance_mode;
1960 		} else {
1961 			return;
1962 		}
1963 
1964 		(void) snprintf(valstr, sizeof (valstr), "%d", default_val);
1965 		mac_prop_info_set_default_str(mph, valstr);
1966 		break;
1967 	}
1968 	}
1969 }
1970 
1971 int
hme_m_setprop(void * arg,const char * name,mac_prop_id_t num,uint_t sz,const void * val)1972 hme_m_setprop(void *arg, const char *name, mac_prop_id_t num, uint_t sz,
1973     const void *val)
1974 {
1975 	struct hme *hmep = arg;
1976 	int rv;
1977 	long lval;
1978 	boolean_t init = B_FALSE;
1979 
1980 	rv = mii_m_setprop(hmep->hme_mii, name, num, sz, val);
1981 	if (rv != ENOTSUP)
1982 		return (rv);
1983 	rv = 0;
1984 
1985 	switch (num) {
1986 	case MAC_PROP_PRIVATE:
1987 		break;
1988 	default:
1989 		return (ENOTSUP);
1990 	}
1991 
1992 	(void) ddi_strtol(val, NULL, 0, &lval);
1993 
1994 	if (strcmp(name, "_ipg1") == 0) {
1995 		if ((lval >= 0) && (lval <= 255)) {
1996 			hmep->hme_ipg1 = lval & 0xff;
1997 			init = B_TRUE;
1998 		} else {
1999 			return (EINVAL);
2000 		}
2001 
2002 	} else if (strcmp(name, "_ipg2") == 0) {
2003 		if ((lval >= 0) && (lval <= 255)) {
2004 			hmep->hme_ipg2 = lval & 0xff;
2005 			init = B_TRUE;
2006 		} else {
2007 			return (EINVAL);
2008 		}
2009 
2010 	} else if (strcmp(name, "_ipg0") == 0) {
2011 		if ((lval >= 0) && (lval <= 31)) {
2012 			hmep->hme_ipg0 = lval & 0xff;
2013 			init = B_TRUE;
2014 		} else {
2015 			return (EINVAL);
2016 		}
2017 	} else if (strcmp(name, "_lance_mode") == 0) {
2018 		if ((lval >= 0) && (lval <= 1)) {
2019 			hmep->hme_lance_mode = lval & 0xff;
2020 			init = B_TRUE;
2021 		} else {
2022 			return (EINVAL);
2023 		}
2024 
2025 	} else {
2026 		rv = ENOTSUP;
2027 	}
2028 
2029 	if (init) {
2030 		(void) hmeinit(hmep);
2031 	}
2032 	return (rv);
2033 }
2034 
2035 
2036 /*ARGSUSED*/
2037 static boolean_t
hme_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)2038 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
2039 {
2040 	switch (cap) {
2041 	case MAC_CAPAB_HCKSUM:
2042 		*(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
2043 		return (B_TRUE);
2044 	default:
2045 		return (B_FALSE);
2046 	}
2047 }
2048 
2049 static int
hme_m_promisc(void * arg,boolean_t on)2050 hme_m_promisc(void *arg, boolean_t on)
2051 {
2052 	struct hme *hmep = arg;
2053 
2054 	hmep->hme_promisc = on;
2055 	(void) hmeinit(hmep);
2056 	return (0);
2057 }
2058 
2059 static int
hme_m_unicst(void * arg,const uint8_t * macaddr)2060 hme_m_unicst(void *arg, const uint8_t *macaddr)
2061 {
2062 	struct hme *hmep = arg;
2063 
2064 	/*
2065 	 * Set new interface local address and re-init device.
2066 	 * This is destructive to any other streams attached
2067 	 * to this device.
2068 	 */
2069 	mutex_enter(&hmep->hme_intrlock);
2070 	bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
2071 	mutex_exit(&hmep->hme_intrlock);
2072 	(void) hmeinit(hmep);
2073 	return (0);
2074 }
2075 
2076 static int
hme_m_multicst(void * arg,boolean_t add,const uint8_t * macaddr)2077 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
2078 {
2079 	struct hme	*hmep = arg;
2080 	uint32_t	ladrf_bit;
2081 	boolean_t	doinit = B_FALSE;
2082 
2083 	/*
2084 	 * If this address's bit was not already set in the local address
2085 	 * filter, add it and re-initialize the Hardware.
2086 	 */
2087 	ladrf_bit = hmeladrf_bit(macaddr);
2088 
2089 	mutex_enter(&hmep->hme_intrlock);
2090 	if (add) {
2091 		hmep->hme_ladrf_refcnt[ladrf_bit]++;
2092 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
2093 			hmep->hme_ladrf[ladrf_bit >> 4] |=
2094 			    1 << (ladrf_bit & 0xf);
2095 			hmep->hme_multi++;
2096 			doinit = B_TRUE;
2097 		}
2098 	} else {
2099 		hmep->hme_ladrf_refcnt[ladrf_bit]--;
2100 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
2101 			hmep->hme_ladrf[ladrf_bit >> 4] &=
2102 			    ~(1 << (ladrf_bit & 0xf));
2103 			doinit = B_TRUE;
2104 		}
2105 	}
2106 	mutex_exit(&hmep->hme_intrlock);
2107 
2108 	if (doinit) {
2109 		(void) hmeinit(hmep);
2110 	}
2111 
2112 	return (0);
2113 }
2114 
2115 static int
hme_m_start(void * arg)2116 hme_m_start(void *arg)
2117 {
2118 	struct hme *hmep = arg;
2119 
2120 	if (hmeinit(hmep) != 0) {
2121 		/* initialization failed -- really want DL_INITFAILED */
2122 		return (EIO);
2123 	} else {
2124 		hmep->hme_started = B_TRUE;
2125 		mii_start(hmep->hme_mii);
2126 		return (0);
2127 	}
2128 }
2129 
2130 static void
hme_m_stop(void * arg)2131 hme_m_stop(void *arg)
2132 {
2133 	struct hme *hmep = arg;
2134 
2135 	mii_stop(hmep->hme_mii);
2136 	hmep->hme_started = B_FALSE;
2137 	hmeuninit(hmep);
2138 }
2139 
2140 static int
hme_m_stat(void * arg,uint_t stat,uint64_t * val)2141 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
2142 {
2143 	struct hme	*hmep = arg;
2144 
2145 	mutex_enter(&hmep->hme_xmitlock);
2146 	if (hmep->hme_flags & HMERUNNING) {
2147 		hmereclaim(hmep);
2148 		hmesavecntrs(hmep);
2149 	}
2150 	mutex_exit(&hmep->hme_xmitlock);
2151 
2152 
2153 	if (mii_m_getstat(hmep->hme_mii, stat, val) == 0) {
2154 		return (0);
2155 	}
2156 	switch (stat) {
2157 	case MAC_STAT_IPACKETS:
2158 		*val = hmep->hme_ipackets;
2159 		break;
2160 	case MAC_STAT_RBYTES:
2161 		*val = hmep->hme_rbytes;
2162 		break;
2163 	case MAC_STAT_IERRORS:
2164 		*val = hmep->hme_ierrors;
2165 		break;
2166 	case MAC_STAT_OPACKETS:
2167 		*val = hmep->hme_opackets;
2168 		break;
2169 	case MAC_STAT_OBYTES:
2170 		*val = hmep->hme_obytes;
2171 		break;
2172 	case MAC_STAT_OERRORS:
2173 		*val = hmep->hme_oerrors;
2174 		break;
2175 	case MAC_STAT_MULTIRCV:
2176 		*val = hmep->hme_multircv;
2177 		break;
2178 	case MAC_STAT_MULTIXMT:
2179 		*val = hmep->hme_multixmt;
2180 		break;
2181 	case MAC_STAT_BRDCSTRCV:
2182 		*val = hmep->hme_brdcstrcv;
2183 		break;
2184 	case MAC_STAT_BRDCSTXMT:
2185 		*val = hmep->hme_brdcstxmt;
2186 		break;
2187 	case MAC_STAT_UNDERFLOWS:
2188 		*val = hmep->hme_uflo;
2189 		break;
2190 	case MAC_STAT_OVERFLOWS:
2191 		*val = hmep->hme_oflo;
2192 		break;
2193 	case MAC_STAT_COLLISIONS:
2194 		*val = hmep->hme_coll;
2195 		break;
2196 	case MAC_STAT_NORCVBUF:
2197 		*val = hmep->hme_norcvbuf;
2198 		break;
2199 	case MAC_STAT_NOXMTBUF:
2200 		*val = hmep->hme_noxmtbuf;
2201 		break;
2202 	case ETHER_STAT_LINK_DUPLEX:
2203 		*val = hmep->hme_duplex;
2204 		break;
2205 	case ETHER_STAT_ALIGN_ERRORS:
2206 		*val = hmep->hme_align_errors;
2207 		break;
2208 	case ETHER_STAT_FCS_ERRORS:
2209 		*val = hmep->hme_fcs_errors;
2210 		break;
2211 	case ETHER_STAT_EX_COLLISIONS:
2212 		*val = hmep->hme_excol;
2213 		break;
2214 	case ETHER_STAT_DEFER_XMTS:
2215 		*val = hmep->hme_defer_xmts;
2216 		break;
2217 	case ETHER_STAT_SQE_ERRORS:
2218 		*val = hmep->hme_sqe_errors;
2219 		break;
2220 	case ETHER_STAT_FIRST_COLLISIONS:
2221 		*val = hmep->hme_fstcol;
2222 		break;
2223 	case ETHER_STAT_TX_LATE_COLLISIONS:
2224 		*val = hmep->hme_tlcol;
2225 		break;
2226 	case ETHER_STAT_TOOLONG_ERRORS:
2227 		*val = hmep->hme_toolong_errors;
2228 		break;
2229 	case ETHER_STAT_TOOSHORT_ERRORS:
2230 		*val = hmep->hme_runt;
2231 		break;
2232 	case ETHER_STAT_CARRIER_ERRORS:
2233 		*val = hmep->hme_carrier_errors;
2234 		break;
2235 	default:
2236 		return (EINVAL);
2237 	}
2238 	return (0);
2239 }
2240 
2241 static mblk_t *
hme_m_tx(void * arg,mblk_t * mp)2242 hme_m_tx(void *arg, mblk_t *mp)
2243 {
2244 	struct hme *hmep = arg;
2245 	mblk_t *next;
2246 
2247 	while (mp != NULL) {
2248 		next = mp->b_next;
2249 		mp->b_next = NULL;
2250 		if (!hmestart(hmep, mp)) {
2251 			mp->b_next = next;
2252 			break;
2253 		}
2254 		mp = next;
2255 	}
2256 	return (mp);
2257 }
2258 
2259 /*
2260  * Software IP checksum, for the edge cases that the
2261  * hardware can't handle.  See hmestart for more info.
2262  */
2263 static uint16_t
hme_cksum(void * data,int len)2264 hme_cksum(void *data, int len)
2265 {
2266 	uint16_t	*words = data;
2267 	int		i, nwords = len / 2;
2268 	uint32_t	sum = 0;
2269 
2270 	/* just add up the words */
2271 	for (i = 0; i < nwords; i++) {
2272 		sum += *words++;
2273 	}
2274 
2275 	/* pick up residual byte ... assume even half-word allocations */
2276 	if (len % 2) {
2277 		sum += (*words & htons(0xff00));
2278 	}
2279 
2280 	sum = (sum >> 16) + (sum & 0xffff);
2281 	sum = (sum >> 16) + (sum & 0xffff);
2282 
2283 	return (~(sum & 0xffff));
2284 }
2285 
2286 static boolean_t
hmestart(struct hme * hmep,mblk_t * mp)2287 hmestart(struct hme *hmep, mblk_t *mp)
2288 {
2289 	uint32_t	len;
2290 	boolean_t	retval = B_TRUE;
2291 	hmebuf_t	*tbuf;
2292 	uint32_t	txptr;
2293 
2294 	uint32_t	csflags = 0;
2295 	uint32_t	flags;
2296 	uint32_t	start_offset;
2297 	uint32_t	stuff_offset;
2298 
2299 	mac_hcksum_get(mp, &start_offset, &stuff_offset, NULL, NULL, &flags);
2300 
2301 	if (flags & HCK_PARTIALCKSUM) {
2302 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
2303 			start_offset += sizeof (struct ether_header) + 4;
2304 			stuff_offset += sizeof (struct ether_header) + 4;
2305 		} else {
2306 			start_offset += sizeof (struct ether_header);
2307 			stuff_offset += sizeof (struct ether_header);
2308 		}
2309 		csflags = HMETMD_CSENABL |
2310 		    (start_offset << HMETMD_CSSTART_SHIFT) |
2311 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
2312 	}
2313 
2314 	mutex_enter(&hmep->hme_xmitlock);
2315 
2316 	if (hmep->hme_flags & HMESUSPENDED) {
2317 		hmep->hme_carrier_errors++;
2318 		hmep->hme_oerrors++;
2319 		goto bad;
2320 	}
2321 
2322 	if (hmep->hme_txindex != hmep->hme_txreclaim) {
2323 		hmereclaim(hmep);
2324 	}
2325 	if ((hmep->hme_txindex - HME_TMDMAX) == hmep->hme_txreclaim)
2326 		goto notmds;
2327 	txptr = hmep->hme_txindex % HME_TMDMAX;
2328 	tbuf = &hmep->hme_tbuf[txptr];
2329 
2330 	/*
2331 	 * Note that for checksum offload, the hardware cannot
2332 	 * generate correct checksums if the packet is smaller than
2333 	 * 64-bytes.  In such a case, we bcopy the packet and use
2334 	 * a software checksum.
2335 	 */
2336 
2337 	len = msgsize(mp);
2338 	if (len < 64) {
2339 		/* zero fill the padding */
2340 		bzero(tbuf->kaddr, 64);
2341 	}
2342 	mcopymsg(mp, tbuf->kaddr);
2343 
2344 	if ((csflags != 0) && ((len < 64) ||
2345 	    (start_offset > HMETMD_CSSTART_MAX) ||
2346 	    (stuff_offset > HMETMD_CSSTUFF_MAX))) {
2347 		uint16_t sum;
2348 		sum = hme_cksum(tbuf->kaddr + start_offset,
2349 		    len - start_offset);
2350 		bcopy(&sum, tbuf->kaddr + stuff_offset, sizeof (sum));
2351 		csflags = 0;
2352 	}
2353 
2354 	if (ddi_dma_sync(tbuf->dmah, 0, len, DDI_DMA_SYNC_FORDEV) ==
2355 	    DDI_FAILURE) {
2356 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
2357 		    "ddi_dma_sync failed");
2358 	}
2359 
2360 	/*
2361 	 * update MIB II statistics
2362 	 */
2363 	BUMP_OutNUcast(hmep, tbuf->kaddr);
2364 
2365 	PUT_TMD(txptr, tbuf->paddr, len,
2366 	    HMETMD_OWN | HMETMD_SOP | HMETMD_EOP | csflags);
2367 
2368 	HMESYNCTMD(txptr, DDI_DMA_SYNC_FORDEV);
2369 	hmep->hme_txindex++;
2370 
2371 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
2372 	CHECK_ETXREG();
2373 
2374 	mutex_exit(&hmep->hme_xmitlock);
2375 
2376 	hmep->hme_starts++;
2377 	return (B_TRUE);
2378 
2379 bad:
2380 	mutex_exit(&hmep->hme_xmitlock);
2381 	freemsg(mp);
2382 	return (B_TRUE);
2383 
2384 notmds:
2385 	hmep->hme_notmds++;
2386 	hmep->hme_wantw = B_TRUE;
2387 	hmereclaim(hmep);
2388 	retval = B_FALSE;
2389 done:
2390 	mutex_exit(&hmep->hme_xmitlock);
2391 
2392 	return (retval);
2393 }
2394 
2395 /*
2396  * Initialize channel.
2397  * Return 0 on success, nonzero on error.
2398  *
2399  * The recommended sequence for initialization is:
2400  * 1. Issue a Global Reset command to the Ethernet Channel.
2401  * 2. Poll the Global_Reset bits until the execution of the reset has been
2402  *    completed.
2403  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
2404  *	 Poll Register 0 to till the Resetbit is 0.
2405  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
2406  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
2407  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
2408  *	 to the MII interface so that the Bigmac core can correctly reset
2409  *	 upon a software reset.
2410  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
2411  *	  the Global_Reset bits till completion.
2412  * 3. Set up all the data structures in the host memory.
2413  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
2414  *    Register).
2415  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
2416  *    Register).
2417  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
2418  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
2419  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
2420  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
2421  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
2422  * 11. Program the XIF Configuration Register (enable the XIF).
2423  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
2424  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
2425  */
2426 
2427 
2428 #ifdef FEPS_URUN_BUG
2429 static int hme_palen = 32;
2430 #endif
2431 
2432 static int
hmeinit(struct hme * hmep)2433 hmeinit(struct hme *hmep)
2434 {
2435 	uint32_t		i;
2436 	int			ret;
2437 	boolean_t		fdx;
2438 	int			phyad;
2439 
2440 	/*
2441 	 * Lock sequence:
2442 	 *	hme_intrlock, hme_xmitlock.
2443 	 */
2444 	mutex_enter(&hmep->hme_intrlock);
2445 
2446 	/*
2447 	 * Don't touch the hardware if we are suspended.  But don't
2448 	 * fail either.  Some time later we may be resumed, and then
2449 	 * we'll be back here to program the device using the settings
2450 	 * in the soft state.
2451 	 */
2452 	if (hmep->hme_flags & HMESUSPENDED) {
2453 		mutex_exit(&hmep->hme_intrlock);
2454 		return (0);
2455 	}
2456 
2457 	/*
2458 	 * This should prevent us from clearing any interrupts that
2459 	 * may occur by temporarily stopping interrupts from occurring
2460 	 * for a short time.  We need to update the interrupt mask
2461 	 * later in this function.
2462 	 */
2463 	PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
2464 
2465 
2466 	/*
2467 	 * Rearranged the mutex acquisition order to solve the deadlock
2468 	 * situation as described in bug ID 4065896.
2469 	 */
2470 
2471 	mutex_enter(&hmep->hme_xmitlock);
2472 
2473 	hmep->hme_flags = 0;
2474 	hmep->hme_wantw = B_FALSE;
2475 
2476 	if (hmep->inits)
2477 		hmesavecntrs(hmep);
2478 
2479 	/*
2480 	 * Perform Global reset of the Sbus/FEPS ENET channel.
2481 	 */
2482 	(void) hmestop(hmep);
2483 
2484 	/*
2485 	 * Clear all descriptors.
2486 	 */
2487 	bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
2488 	bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
2489 
2490 	/*
2491 	 * Hang out receive buffers.
2492 	 */
2493 	for (i = 0; i < HME_RMDMAX; i++) {
2494 		PUT_RMD(i, hmep->hme_rbuf[i].paddr);
2495 	}
2496 
2497 	/*
2498 	 * DMA sync descriptors.
2499 	 */
2500 	(void) ddi_dma_sync(hmep->hme_rmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2501 	(void) ddi_dma_sync(hmep->hme_tmd_dmah, 0, 0, DDI_DMA_SYNC_FORDEV);
2502 
2503 	/*
2504 	 * Reset RMD and TMD 'walking' pointers.
2505 	 */
2506 	hmep->hme_rxindex = 0;
2507 	hmep->hme_txindex = hmep->hme_txreclaim = 0;
2508 
2509 	/*
2510 	 * This is the right place to initialize MIF !!!
2511 	 */
2512 
2513 	PUT_MIFREG(mif_imask, HME_MIF_INTMASK);	/* mask all interrupts */
2514 
2515 	if (!hmep->hme_frame_enable)
2516 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
2517 	else
2518 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
2519 						/* enable frame mode */
2520 
2521 	/*
2522 	 * Depending on the transceiver detected, select the source
2523 	 * of the clocks for the MAC. Without the clocks, TX_MAC does
2524 	 * not reset. When the Global Reset is issued to the Sbus/FEPS
2525 	 * ASIC, it selects Internal by default.
2526 	 */
2527 
2528 	switch ((phyad = mii_get_addr(hmep->hme_mii))) {
2529 	case -1:
2530 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
2531 		goto init_fail;	/* abort initialization */
2532 
2533 	case HME_INTERNAL_PHYAD:
2534 		PUT_MACREG(xifc, 0);
2535 		break;
2536 	case HME_EXTERNAL_PHYAD:
2537 		/* Isolate the Int. xcvr */
2538 		PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
2539 		break;
2540 	}
2541 
2542 	hmep->inits++;
2543 
2544 	/*
2545 	 * Initialize BigMAC registers.
2546 	 * First set the tx enable bit in tx config reg to 0 and poll on
2547 	 * it till it turns to 0. Same for rx config, hash and address
2548 	 * filter reg.
2549 	 * Here is the sequence per the spec.
2550 	 * MADD2 - MAC Address 2
2551 	 * MADD1 - MAC Address 1
2552 	 * MADD0 - MAC Address 0
2553 	 * HASH3, HASH2, HASH1, HASH0 for group address
2554 	 * AFR2, AFR1, AFR0 and AFMR for address filter mask
2555 	 * Program RXMIN and RXMAX for packet length if not 802.3
2556 	 * RXCFG - Rx config for not stripping CRC
2557 	 * XXX Anything else to hme configured in RXCFG
2558 	 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
2559 	 * if not 802.3 compliant
2560 	 * XIF register for speed selection
2561 	 * MASK  - Interrupt mask
2562 	 * Set bit 0 of TXCFG
2563 	 * Set bit 0 of RXCFG
2564 	 */
2565 
2566 	/*
2567 	 * Initialize the TX_MAC registers
2568 	 * Initialization of jamsize to work around rx crc bug
2569 	 */
2570 	PUT_MACREG(jam, jamsize);
2571 
2572 #ifdef	FEPS_URUN_BUG
2573 	if (hme_urun_fix)
2574 		PUT_MACREG(palen, hme_palen);
2575 #endif
2576 
2577 	PUT_MACREG(ipg1, hmep->hme_ipg1);
2578 	PUT_MACREG(ipg2, hmep->hme_ipg2);
2579 
2580 	PUT_MACREG(rseed,
2581 	    ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
2582 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2583 
2584 	/* Initialize the RX_MAC registers */
2585 
2586 	/*
2587 	 * Program BigMAC with local individual ethernet address.
2588 	 */
2589 	PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
2590 	    hmep->hme_ouraddr.ether_addr_octet[5]);
2591 	PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
2592 	    hmep->hme_ouraddr.ether_addr_octet[3]);
2593 	PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
2594 	    hmep->hme_ouraddr.ether_addr_octet[1]);
2595 
2596 	/*
2597 	 * Set up multicast address filter by passing all multicast
2598 	 * addresses through a crc generator, and then using the
2599 	 * low order 6 bits as a index into the 64 bit logical
2600 	 * address filter. The high order three bits select the word,
2601 	 * while the rest of the bits select the bit within the word.
2602 	 */
2603 	PUT_MACREG(hash0, hmep->hme_ladrf[0]);
2604 	PUT_MACREG(hash1, hmep->hme_ladrf[1]);
2605 	PUT_MACREG(hash2, hmep->hme_ladrf[2]);
2606 	PUT_MACREG(hash3, hmep->hme_ladrf[3]);
2607 
2608 	/*
2609 	 * Configure parameters to support VLAN.  (VLAN encapsulation adds
2610 	 * four bytes.)
2611 	 */
2612 	PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
2613 	PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
2614 
2615 	/*
2616 	 * Initialize HME Global registers, ETX registers and ERX registers.
2617 	 */
2618 
2619 	PUT_ETXREG(txring, hmep->hme_tmd_paddr);
2620 	PUT_ERXREG(rxring, hmep->hme_rmd_paddr);
2621 
2622 	/*
2623 	 * ERX registers can be written only if they have even no. of bits set.
2624 	 * So, if the value written is not read back, set the lsb and write
2625 	 * again.
2626 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2627 	 */
2628 	{
2629 		uint32_t temp;
2630 		temp  = hmep->hme_rmd_paddr;
2631 
2632 		if (GET_ERXREG(rxring) != temp)
2633 			PUT_ERXREG(rxring, (temp | 4));
2634 	}
2635 
2636 	PUT_GLOBREG(config, (hmep->hme_config |
2637 	    (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
2638 
2639 	/*
2640 	 * Significant performance improvements can be achieved by
2641 	 * disabling transmit interrupt. Thus TMD's are reclaimed only
2642 	 * when we run out of them in hmestart().
2643 	 */
2644 	PUT_GLOBREG(intmask,
2645 	    HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
2646 
2647 	PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
2648 	PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
2649 	    | HMET_CONFIG_TXFIFOTH));
2650 	/* get the rxring size bits */
2651 	switch (HME_RMDMAX) {
2652 	case 32:
2653 		i = HMER_CONFIG_RXRINGSZ32;
2654 		break;
2655 	case 64:
2656 		i = HMER_CONFIG_RXRINGSZ64;
2657 		break;
2658 	case 128:
2659 		i = HMER_CONFIG_RXRINGSZ128;
2660 		break;
2661 	case 256:
2662 		i = HMER_CONFIG_RXRINGSZ256;
2663 		break;
2664 	default:
2665 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2666 		    unk_rx_ringsz_msg);
2667 		goto init_fail;
2668 	}
2669 	i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
2670 	    | HMER_CONFIG_RXDMA_EN;
2671 
2672 	/* h/w checks start offset in half words */
2673 	i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
2674 
2675 	PUT_ERXREG(config, i);
2676 
2677 	/*
2678 	 * Bug related to the parity handling in ERX. When erxp-config is
2679 	 * read back.
2680 	 * Sbus/FEPS drives the parity bit. This value is used while
2681 	 * writing again.
2682 	 * This fixes the RECV problem in SS5.
2683 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
2684 	 */
2685 	{
2686 		uint32_t temp;
2687 		temp = GET_ERXREG(config);
2688 		PUT_ERXREG(config, i);
2689 
2690 		if (GET_ERXREG(config) != i)
2691 			HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
2692 			    "error:temp = %x erxp->config = %x, should be %x",
2693 			    temp, GET_ERXREG(config), i);
2694 	}
2695 
2696 	/*
2697 	 * Set up the rxconfig, txconfig and seed register without enabling
2698 	 * them the former two at this time
2699 	 *
2700 	 * BigMAC strips the CRC bytes by default. Since this is
2701 	 * contrary to other pieces of hardware, this bit needs to
2702 	 * enabled to tell BigMAC not to strip the CRC bytes.
2703 	 * Do not filter this node's own packets.
2704 	 */
2705 
2706 	if (hme_reject_own) {
2707 		PUT_MACREG(rxcfg,
2708 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2709 		    BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
2710 	} else {
2711 		PUT_MACREG(rxcfg,
2712 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
2713 		    BMAC_RXCFG_HASH));
2714 	}
2715 
2716 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
2717 
2718 	fdx = (mii_get_duplex(hmep->hme_mii) == LINK_DUPLEX_FULL);
2719 
2720 	if (hme_ngu_enable)
2721 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX : 0) |
2722 		    BMAC_TXCFG_NGU);
2723 	else
2724 		PUT_MACREG(txcfg, (fdx ? BMAC_TXCFG_FDX: 0));
2725 
2726 	i = 0;
2727 	if ((hmep->hme_lance_mode) && (hmep->hme_lance_mode_enable))
2728 		i = ((hmep->hme_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
2729 		    | BMAC_XIFC_LANCE_ENAB;
2730 	if (phyad == HME_INTERNAL_PHYAD)
2731 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
2732 	else
2733 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
2734 
2735 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
2736 	PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
2737 
2738 	hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
2739 	/*
2740 	 * Update the interrupt mask : this will re-allow interrupts to occur
2741 	 */
2742 	PUT_GLOBREG(intmask, HMEG_MASK_INTR);
2743 	mac_tx_update(hmep->hme_mh);
2744 
2745 init_fail:
2746 	/*
2747 	 * Release the locks in reverse order
2748 	 */
2749 	mutex_exit(&hmep->hme_xmitlock);
2750 	mutex_exit(&hmep->hme_intrlock);
2751 
2752 	ret = !(hmep->hme_flags & HMERUNNING);
2753 	if (ret) {
2754 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2755 		    init_fail_gen_msg);
2756 	}
2757 
2758 	/*
2759 	 * Hardware checks.
2760 	 */
2761 	CHECK_GLOBREG();
2762 	CHECK_MIFREG();
2763 	CHECK_MACREG();
2764 	CHECK_ERXREG();
2765 	CHECK_ETXREG();
2766 
2767 init_exit:
2768 	return (ret);
2769 }
2770 
2771 /*
2772  * Calculate the dvma burstsize by setting up a dvma temporarily.  Return
2773  * 0 as burstsize upon failure as it signifies no burst size.
2774  * Requests for 64-bit transfer setup, if the platform supports it.
2775  * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
2776  * sun4u Ultra-2 incorrectly returns a 32bit transfer.
2777  */
2778 static int
hmeburstsizes(struct hme * hmep)2779 hmeburstsizes(struct hme *hmep)
2780 {
2781 	int burstsizes;
2782 	ddi_dma_handle_t handle;
2783 
2784 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2785 	    DDI_DMA_DONTWAIT, NULL, &handle)) {
2786 		return (0);
2787 	}
2788 
2789 	hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
2790 	ddi_dma_free_handle(&handle);
2791 
2792 	/*
2793 	 * Use user-configurable parameter for enabling 64-bit transfers
2794 	 */
2795 	burstsizes = (hmep->hme_burstsizes >> 16);
2796 	if (burstsizes)
2797 		hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
2798 	else
2799 		burstsizes = hmep->hme_burstsizes;
2800 
2801 	if (hmep->hme_cheerio_mode)
2802 		hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
2803 
2804 	if (burstsizes & 0x40)
2805 		hmep->hme_config = HMEG_CONFIG_BURST64;
2806 	else if (burstsizes & 0x20)
2807 		hmep->hme_config = HMEG_CONFIG_BURST32;
2808 	else
2809 		hmep->hme_config = HMEG_CONFIG_BURST16;
2810 
2811 	return (DDI_SUCCESS);
2812 }
2813 
2814 static int
hmeallocbuf(struct hme * hmep,hmebuf_t * buf,int dir)2815 hmeallocbuf(struct hme *hmep, hmebuf_t *buf, int dir)
2816 {
2817 	ddi_dma_cookie_t	dmac;
2818 	size_t			len;
2819 	unsigned		ccnt;
2820 
2821 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
2822 	    DDI_DMA_DONTWAIT, NULL, &buf->dmah) != DDI_SUCCESS) {
2823 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2824 		    "cannot allocate buf dma handle - failed");
2825 		return (DDI_FAILURE);
2826 	}
2827 
2828 	if (ddi_dma_mem_alloc(buf->dmah, ROUNDUP(HMEBUFSIZE, 512),
2829 	    &hme_buf_attr, DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, NULL,
2830 	    &buf->kaddr, &len, &buf->acch) != DDI_SUCCESS) {
2831 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2832 		    "cannot allocate buf memory - failed");
2833 		return (DDI_FAILURE);
2834 	}
2835 
2836 	if (ddi_dma_addr_bind_handle(buf->dmah, NULL, buf->kaddr,
2837 	    len, dir | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2838 	    &dmac, &ccnt) != DDI_DMA_MAPPED) {
2839 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2840 		    "cannot map buf for dma - failed");
2841 		return (DDI_FAILURE);
2842 	}
2843 	buf->paddr = dmac.dmac_address;
2844 
2845 	/* apparently they don't handle multiple cookies */
2846 	if (ccnt > 1) {
2847 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2848 		    "too many buf dma cookies");
2849 		return (DDI_FAILURE);
2850 	}
2851 	return (DDI_SUCCESS);
2852 }
2853 
2854 static int
hmeallocbufs(struct hme * hmep)2855 hmeallocbufs(struct hme *hmep)
2856 {
2857 	hmep->hme_tbuf = kmem_zalloc(HME_TMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2858 	hmep->hme_rbuf = kmem_zalloc(HME_RMDMAX * sizeof (hmebuf_t), KM_SLEEP);
2859 
2860 	/* Alloc RX buffers. */
2861 	for (int i = 0; i < HME_RMDMAX; i++) {
2862 		if (hmeallocbuf(hmep, &hmep->hme_rbuf[i], DDI_DMA_READ) !=
2863 		    DDI_SUCCESS) {
2864 			return (DDI_FAILURE);
2865 		}
2866 	}
2867 
2868 	/* Alloc TX buffers. */
2869 	for (int i = 0; i < HME_TMDMAX; i++) {
2870 		if (hmeallocbuf(hmep, &hmep->hme_tbuf[i], DDI_DMA_WRITE) !=
2871 		    DDI_SUCCESS) {
2872 			return (DDI_FAILURE);
2873 		}
2874 	}
2875 	return (DDI_SUCCESS);
2876 }
2877 
2878 static void
hmefreebufs(struct hme * hmep)2879 hmefreebufs(struct hme *hmep)
2880 {
2881 	int i;
2882 
2883 	if (hmep->hme_rbuf == NULL)
2884 		return;
2885 
2886 	/*
2887 	 * Free and unload pending xmit and recv buffers.
2888 	 * Maintaining the 1-to-1 ordered sequence of
2889 	 * We have written the routine to be idempotent.
2890 	 */
2891 
2892 	for (i = 0; i < HME_TMDMAX; i++) {
2893 		hmebuf_t *tbuf = &hmep->hme_tbuf[i];
2894 		if (tbuf->paddr) {
2895 			(void) ddi_dma_unbind_handle(tbuf->dmah);
2896 		}
2897 		if (tbuf->kaddr) {
2898 			ddi_dma_mem_free(&tbuf->acch);
2899 		}
2900 		if (tbuf->dmah) {
2901 			ddi_dma_free_handle(&tbuf->dmah);
2902 		}
2903 	}
2904 	for (i = 0; i < HME_RMDMAX; i++) {
2905 		hmebuf_t *rbuf = &hmep->hme_rbuf[i];
2906 		if (rbuf->paddr) {
2907 			(void) ddi_dma_unbind_handle(rbuf->dmah);
2908 		}
2909 		if (rbuf->kaddr) {
2910 			ddi_dma_mem_free(&rbuf->acch);
2911 		}
2912 		if (rbuf->dmah) {
2913 			ddi_dma_free_handle(&rbuf->dmah);
2914 		}
2915 	}
2916 	kmem_free(hmep->hme_rbuf, HME_RMDMAX * sizeof (hmebuf_t));
2917 	kmem_free(hmep->hme_tbuf, HME_TMDMAX * sizeof (hmebuf_t));
2918 }
2919 
2920 /*
2921  * Un-initialize (STOP) HME channel.
2922  */
2923 static void
hmeuninit(struct hme * hmep)2924 hmeuninit(struct hme *hmep)
2925 {
2926 	/*
2927 	 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
2928 	 */
2929 	HMEDELAY((hmep->hme_txindex == hmep->hme_txreclaim), HMEDRAINTIME);
2930 
2931 	mutex_enter(&hmep->hme_intrlock);
2932 	mutex_enter(&hmep->hme_xmitlock);
2933 
2934 	hmep->hme_flags &= ~HMERUNNING;
2935 
2936 	(void) hmestop(hmep);
2937 
2938 	mutex_exit(&hmep->hme_xmitlock);
2939 	mutex_exit(&hmep->hme_intrlock);
2940 }
2941 
2942 /*
2943  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
2944  * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
2945  * structures to use the DMA interface.
2946  */
2947 static int
hmeallocthings(struct hme * hmep)2948 hmeallocthings(struct hme *hmep)
2949 {
2950 	int			size;
2951 	int			rval;
2952 	size_t			real_len;
2953 	uint_t			cookiec;
2954 	ddi_dma_cookie_t	dmac;
2955 	dev_info_t		*dip = hmep->dip;
2956 
2957 	/*
2958 	 * Allocate the TMD and RMD descriptors and extra for page alignment.
2959 	 */
2960 
2961 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2962 	    &hmep->hme_rmd_dmah);
2963 	if (rval != DDI_SUCCESS) {
2964 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2965 		    "cannot allocate rmd handle - failed");
2966 		return (DDI_FAILURE);
2967 	}
2968 	size = HME_RMDMAX * sizeof (struct hme_rmd);
2969 	rval = ddi_dma_mem_alloc(hmep->hme_rmd_dmah, size,
2970 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
2971 	    &hmep->hme_rmd_kaddr, &real_len, &hmep->hme_rmd_acch);
2972 	if (rval != DDI_SUCCESS) {
2973 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2974 		    "cannot allocate rmd dma mem - failed");
2975 		return (DDI_FAILURE);
2976 	}
2977 	hmep->hme_rmdp = (void *)(hmep->hme_rmd_kaddr);
2978 	rval = ddi_dma_addr_bind_handle(hmep->hme_rmd_dmah, NULL,
2979 	    hmep->hme_rmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2980 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
2981 	if (rval != DDI_DMA_MAPPED) {
2982 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2983 		    "cannot allocate rmd dma - failed");
2984 		return (DDI_FAILURE);
2985 	}
2986 	hmep->hme_rmd_paddr = dmac.dmac_address;
2987 	if (cookiec != 1) {
2988 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2989 		    "too many rmd cookies - failed");
2990 		return (DDI_FAILURE);
2991 	}
2992 
2993 	rval = ddi_dma_alloc_handle(dip, &hme_dma_attr, DDI_DMA_DONTWAIT, NULL,
2994 	    &hmep->hme_tmd_dmah);
2995 	if (rval != DDI_SUCCESS) {
2996 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2997 		    "cannot allocate tmd handle - failed");
2998 		return (DDI_FAILURE);
2999 	}
3000 	size = HME_TMDMAX * sizeof (struct hme_rmd);
3001 	rval = ddi_dma_mem_alloc(hmep->hme_tmd_dmah, size,
3002 	    &hmep->hme_dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
3003 	    &hmep->hme_tmd_kaddr, &real_len, &hmep->hme_tmd_acch);
3004 	if (rval != DDI_SUCCESS) {
3005 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3006 		    "cannot allocate tmd dma mem - failed");
3007 		return (DDI_FAILURE);
3008 	}
3009 	hmep->hme_tmdp = (void *)(hmep->hme_tmd_kaddr);
3010 	rval = ddi_dma_addr_bind_handle(hmep->hme_tmd_dmah, NULL,
3011 	    hmep->hme_tmd_kaddr, size, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3012 	    DDI_DMA_DONTWAIT, NULL, &dmac, &cookiec);
3013 	if (rval != DDI_DMA_MAPPED) {
3014 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3015 		    "cannot allocate tmd dma - failed");
3016 		return (DDI_FAILURE);
3017 	}
3018 	hmep->hme_tmd_paddr = dmac.dmac_address;
3019 	if (cookiec != 1) {
3020 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
3021 		    "too many tmd cookies - failed");
3022 		return (DDI_FAILURE);
3023 	}
3024 
3025 	return (DDI_SUCCESS);
3026 }
3027 
3028 static void
hmefreethings(struct hme * hmep)3029 hmefreethings(struct hme *hmep)
3030 {
3031 	if (hmep->hme_rmd_paddr) {
3032 		(void) ddi_dma_unbind_handle(hmep->hme_rmd_dmah);
3033 		hmep->hme_rmd_paddr = 0;
3034 	}
3035 	if (hmep->hme_rmd_acch)
3036 		ddi_dma_mem_free(&hmep->hme_rmd_acch);
3037 	if (hmep->hme_rmd_dmah)
3038 		ddi_dma_free_handle(&hmep->hme_rmd_dmah);
3039 
3040 	if (hmep->hme_tmd_paddr) {
3041 		(void) ddi_dma_unbind_handle(hmep->hme_tmd_dmah);
3042 		hmep->hme_tmd_paddr = 0;
3043 	}
3044 	if (hmep->hme_tmd_acch)
3045 		ddi_dma_mem_free(&hmep->hme_tmd_acch);
3046 	if (hmep->hme_tmd_dmah)
3047 		ddi_dma_free_handle(&hmep->hme_tmd_dmah);
3048 }
3049 
3050 /*
3051  *	First check to see if it our device interrupting.
3052  */
3053 static uint_t
hmeintr(caddr_t arg)3054 hmeintr(caddr_t arg)
3055 {
3056 	struct hme	*hmep = (void *)arg;
3057 	uint32_t	hmesbits;
3058 	uint32_t	serviced = DDI_INTR_UNCLAIMED;
3059 	uint32_t	num_reads = 0;
3060 	uint32_t	rflags;
3061 	mblk_t		*mp, *head, **tail;
3062 
3063 
3064 	head = NULL;
3065 	tail = &head;
3066 
3067 	mutex_enter(&hmep->hme_intrlock);
3068 
3069 	/*
3070 	 * The status register auto-clears on read except for
3071 	 * MIF Interrupt bit
3072 	 */
3073 	hmesbits = GET_GLOBREG(status);
3074 	CHECK_GLOBREG();
3075 
3076 	/*
3077 	 * Note: TINT is sometimes enabled in thr hmereclaim()
3078 	 */
3079 
3080 	/*
3081 	 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
3082 	 * Claim the first interrupt after initialization
3083 	 */
3084 	if (hmep->hme_flags & HMEINITIALIZED) {
3085 		hmep->hme_flags &= ~HMEINITIALIZED;
3086 		serviced = DDI_INTR_CLAIMED;
3087 	}
3088 
3089 	if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
3090 						/* No interesting interrupt */
3091 		if (hmep->hme_intrstats) {
3092 			if (serviced == DDI_INTR_UNCLAIMED)
3093 				KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
3094 			else
3095 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3096 		}
3097 		mutex_exit(&hmep->hme_intrlock);
3098 		return (serviced);
3099 	}
3100 
3101 	serviced = DDI_INTR_CLAIMED;
3102 
3103 	if (!(hmep->hme_flags & HMERUNNING)) {
3104 		if (hmep->hme_intrstats)
3105 			KIOIP->intrs[KSTAT_INTR_HARD]++;
3106 		mutex_exit(&hmep->hme_intrlock);
3107 		hmeuninit(hmep);
3108 		return (serviced);
3109 	}
3110 
3111 	if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
3112 		if (hmesbits & HMEG_STATUS_FATAL_ERR) {
3113 
3114 			if (hmep->hme_intrstats)
3115 				KIOIP->intrs[KSTAT_INTR_HARD]++;
3116 			hme_fatal_err(hmep, hmesbits);
3117 
3118 			mutex_exit(&hmep->hme_intrlock);
3119 			(void) hmeinit(hmep);
3120 			return (serviced);
3121 		}
3122 		hme_nonfatal_err(hmep, hmesbits);
3123 	}
3124 
3125 	if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
3126 		mutex_enter(&hmep->hme_xmitlock);
3127 
3128 		hmereclaim(hmep);
3129 		mutex_exit(&hmep->hme_xmitlock);
3130 	}
3131 
3132 	if (hmesbits & HMEG_STATUS_RINT) {
3133 
3134 		/*
3135 		 * This dummy PIO is required to flush the SBus
3136 		 * Bridge buffers in QFE.
3137 		 */
3138 		(void) GET_GLOBREG(config);
3139 
3140 		/*
3141 		 * Loop through each RMD no more than once.
3142 		 */
3143 		while (num_reads++ < HME_RMDMAX) {
3144 			hmebuf_t *rbuf;
3145 			int rxptr;
3146 
3147 			rxptr = hmep->hme_rxindex % HME_RMDMAX;
3148 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORKERNEL);
3149 
3150 			rflags = GET_RMD_FLAGS(rxptr);
3151 			if (rflags & HMERMD_OWN) {
3152 				/*
3153 				 * Chip still owns it.  We're done.
3154 				 */
3155 				break;
3156 			}
3157 
3158 			/*
3159 			 * Retrieve the packet.
3160 			 */
3161 			rbuf = &hmep->hme_rbuf[rxptr];
3162 			mp = hmeread(hmep, rbuf, rflags);
3163 
3164 			/*
3165 			 * Return ownership of the RMD.
3166 			 */
3167 			PUT_RMD(rxptr, rbuf->paddr);
3168 			HMESYNCRMD(rxptr, DDI_DMA_SYNC_FORDEV);
3169 
3170 			if (mp != NULL) {
3171 				*tail = mp;
3172 				tail = &mp->b_next;
3173 			}
3174 
3175 			/*
3176 			 * Advance to the next RMD.
3177 			 */
3178 			hmep->hme_rxindex++;
3179 		}
3180 	}
3181 
3182 	if (hmep->hme_intrstats)
3183 		KIOIP->intrs[KSTAT_INTR_HARD]++;
3184 
3185 	mutex_exit(&hmep->hme_intrlock);
3186 
3187 	if (head != NULL)
3188 		mac_rx(hmep->hme_mh, NULL, head);
3189 
3190 	return (serviced);
3191 }
3192 
3193 /*
3194  * Transmit completion reclaiming.
3195  */
3196 static void
hmereclaim(struct hme * hmep)3197 hmereclaim(struct hme *hmep)
3198 {
3199 	boolean_t	reclaimed = B_FALSE;
3200 
3201 	/*
3202 	 * Loop through each TMD.
3203 	 */
3204 	while (hmep->hme_txindex > hmep->hme_txreclaim) {
3205 
3206 		int		reclaim;
3207 		uint32_t	flags;
3208 
3209 		reclaim = hmep->hme_txreclaim % HME_TMDMAX;
3210 		HMESYNCTMD(reclaim, DDI_DMA_SYNC_FORKERNEL);
3211 
3212 		flags = GET_TMD_FLAGS(reclaim);
3213 		if (flags & HMETMD_OWN) {
3214 			/*
3215 			 * Chip still owns it.  We're done.
3216 			 */
3217 			break;
3218 		}
3219 
3220 		/*
3221 		 * Count a chained packet only once.
3222 		 */
3223 		if (flags & HMETMD_SOP) {
3224 			hmep->hme_opackets++;
3225 		}
3226 
3227 		/*
3228 		 * MIB II
3229 		 */
3230 		hmep->hme_obytes += flags & HMETMD_BUFSIZE;
3231 
3232 		reclaimed = B_TRUE;
3233 		hmep->hme_txreclaim++;
3234 	}
3235 
3236 	if (reclaimed) {
3237 		/*
3238 		 * we could reclaim some TMDs so turn off interrupts
3239 		 */
3240 		if (hmep->hme_wantw) {
3241 			PUT_GLOBREG(intmask,
3242 			    HMEG_MASK_INTR | HMEG_MASK_TINT |
3243 			    HMEG_MASK_TX_ALL);
3244 			hmep->hme_wantw = B_FALSE;
3245 			mac_tx_update(hmep->hme_mh);
3246 		}
3247 	} else {
3248 		/*
3249 		 * enable TINTS: so that even if there is no further activity
3250 		 * hmereclaim will get called
3251 		 */
3252 		if (hmep->hme_wantw)
3253 			PUT_GLOBREG(intmask,
3254 			    GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
3255 	}
3256 	CHECK_GLOBREG();
3257 }
3258 
3259 /*
3260  * Handle interrupts for fatal errors
3261  * Need reinitialization of the ENET channel.
3262  */
3263 static void
hme_fatal_err(struct hme * hmep,uint_t hmesbits)3264 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
3265 {
3266 
3267 	if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
3268 		hmep->hme_slvparerr++;
3269 	}
3270 
3271 	if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
3272 		hmep->hme_slverrack++;
3273 	}
3274 
3275 	if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
3276 		hmep->hme_txtagerr++;
3277 		hmep->hme_oerrors++;
3278 	}
3279 
3280 	if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
3281 		hmep->hme_txparerr++;
3282 		hmep->hme_oerrors++;
3283 	}
3284 
3285 	if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
3286 		hmep->hme_txlaterr++;
3287 		hmep->hme_oerrors++;
3288 	}
3289 
3290 	if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
3291 		hmep->hme_txerrack++;
3292 		hmep->hme_oerrors++;
3293 	}
3294 
3295 	if (hmesbits & HMEG_STATUS_EOP_ERR) {
3296 		hmep->hme_eoperr++;
3297 	}
3298 
3299 	if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
3300 		hmep->hme_rxtagerr++;
3301 		hmep->hme_ierrors++;
3302 	}
3303 
3304 	if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
3305 		hmep->hme_rxparerr++;
3306 		hmep->hme_ierrors++;
3307 	}
3308 
3309 	if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
3310 		hmep->hme_rxlaterr++;
3311 		hmep->hme_ierrors++;
3312 	}
3313 
3314 	if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
3315 		hmep->hme_rxerrack++;
3316 		hmep->hme_ierrors++;
3317 	}
3318 }
3319 
3320 /*
3321  * Handle interrupts regarding non-fatal errors.
3322  */
3323 static void
hme_nonfatal_err(struct hme * hmep,uint_t hmesbits)3324 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
3325 {
3326 
3327 	if (hmesbits & HMEG_STATUS_RX_DROP) {
3328 		hmep->hme_missed++;
3329 		hmep->hme_ierrors++;
3330 	}
3331 
3332 	if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
3333 		hmep->hme_defer_xmts++;
3334 	}
3335 
3336 	if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
3337 		hmep->hme_fstcol += 256;
3338 	}
3339 
3340 	if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
3341 		hmep->hme_tlcol += 256;
3342 		hmep->hme_oerrors += 256;
3343 	}
3344 
3345 	if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
3346 		hmep->hme_excol += 256;
3347 		hmep->hme_oerrors += 256;
3348 	}
3349 
3350 	if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
3351 		hmep->hme_coll += 256;
3352 	}
3353 
3354 	if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
3355 		hmep->hme_babl++;
3356 		hmep->hme_oerrors++;
3357 	}
3358 
3359 	/*
3360 	 * This error is fatal and the board needs to
3361 	 * be reinitialized. Comments?
3362 	 */
3363 	if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
3364 		hmep->hme_uflo++;
3365 		hmep->hme_oerrors++;
3366 	}
3367 
3368 	if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
3369 		hmep->hme_sqe_errors++;
3370 	}
3371 
3372 	if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
3373 		if (hmep->hme_rxcv_enable) {
3374 			hmep->hme_cvc += 256;
3375 		}
3376 	}
3377 
3378 	if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
3379 		hmep->hme_oflo++;
3380 		hmep->hme_ierrors++;
3381 	}
3382 
3383 	if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
3384 		hmep->hme_lenerr += 256;
3385 		hmep->hme_ierrors += 256;
3386 	}
3387 
3388 	if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
3389 		hmep->hme_align_errors += 256;
3390 		hmep->hme_ierrors += 256;
3391 	}
3392 
3393 	if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
3394 		hmep->hme_fcs_errors += 256;
3395 		hmep->hme_ierrors += 256;
3396 	}
3397 }
3398 
3399 static mblk_t *
hmeread(struct hme * hmep,hmebuf_t * rbuf,uint32_t rflags)3400 hmeread(struct hme *hmep, hmebuf_t *rbuf, uint32_t rflags)
3401 {
3402 	mblk_t		*bp;
3403 	uint32_t	len;
3404 	t_uscalar_t	type;
3405 
3406 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
3407 
3408 	/*
3409 	 * Check for short packet
3410 	 * and check for overflow packet also. The processing is the
3411 	 * same for both the cases - reuse the buffer. Update the Buffer
3412 	 * overflow counter.
3413 	 */
3414 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
3415 	    (len > (ETHERMAX + 4))) {
3416 		if (len < ETHERMIN)
3417 			hmep->hme_runt++;
3418 
3419 		else {
3420 			hmep->hme_buff++;
3421 			hmep->hme_toolong_errors++;
3422 		}
3423 		hmep->hme_ierrors++;
3424 		return (NULL);
3425 	}
3426 
3427 	/*
3428 	 * Sync the received buffer before looking at it.
3429 	 */
3430 
3431 	(void) ddi_dma_sync(rbuf->dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL);
3432 
3433 	/*
3434 	 * copy the packet data and then recycle the descriptor.
3435 	 */
3436 
3437 	if ((bp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) == NULL) {
3438 
3439 		hmep->hme_allocbfail++;
3440 		hmep->hme_norcvbuf++;
3441 
3442 		return (NULL);
3443 	}
3444 
3445 	bcopy(rbuf->kaddr, bp->b_rptr, len + HME_FSTBYTE_OFFSET);
3446 
3447 	hmep->hme_ipackets++;
3448 
3449 	/*  Add the First Byte offset to the b_rptr and copy */
3450 	bp->b_rptr += HME_FSTBYTE_OFFSET;
3451 	bp->b_wptr = bp->b_rptr + len;
3452 
3453 	/*
3454 	 * update MIB II statistics
3455 	 */
3456 	BUMP_InNUcast(hmep, bp->b_rptr);
3457 	hmep->hme_rbytes += len;
3458 
3459 	type = get_ether_type(bp->b_rptr);
3460 
3461 	/*
3462 	 * TCP partial checksum in hardware
3463 	 */
3464 	if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
3465 		uint16_t cksum = ~rflags & HMERMD_CKSUM;
3466 		uint_t end = len - sizeof (struct ether_header);
3467 		mac_hcksum_set(bp, 0, 0, end, htons(cksum), HCK_PARTIALCKSUM);
3468 	}
3469 
3470 	return (bp);
3471 }
3472 
3473 /*VARARGS*/
3474 static void
hme_fault_msg(struct hme * hmep,uint_t severity,msg_t type,char * fmt,...)3475 hme_fault_msg(struct hme *hmep, uint_t severity, msg_t type, char *fmt, ...)
3476 {
3477 	char	msg_buffer[255];
3478 	va_list	ap;
3479 
3480 	va_start(ap, fmt);
3481 	(void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
3482 
3483 	if (hmep == NULL) {
3484 		cmn_err(CE_NOTE, "hme : %s", msg_buffer);
3485 
3486 	} else if (type == DISPLAY_MSG) {
3487 		cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
3488 		    hmep->instance, msg_buffer);
3489 	} else if (severity == SEVERITY_HIGH) {
3490 		cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
3491 		    ddi_driver_name(hmep->dip), hmep->instance,
3492 		    msg_buffer, msg_string[type]);
3493 	} else {
3494 		cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
3495 		    hmep->instance, msg_buffer);
3496 	}
3497 	va_end(ap);
3498 }
3499 
3500 /*
3501  * if this is the first init do not bother to save the
3502  * counters. They should be 0, but do not count on it.
3503  */
3504 static void
hmesavecntrs(struct hme * hmep)3505 hmesavecntrs(struct hme *hmep)
3506 {
3507 	uint32_t fecnt, aecnt, lecnt, rxcv;
3508 	uint32_t ltcnt, excnt;
3509 
3510 	/* XXX What all gets added in ierrors and oerrors? */
3511 	fecnt = GET_MACREG(fecnt);
3512 	PUT_MACREG(fecnt, 0);
3513 
3514 	aecnt = GET_MACREG(aecnt);
3515 	hmep->hme_align_errors += aecnt;
3516 	PUT_MACREG(aecnt, 0);
3517 
3518 	lecnt = GET_MACREG(lecnt);
3519 	hmep->hme_lenerr += lecnt;
3520 	PUT_MACREG(lecnt, 0);
3521 
3522 	rxcv = GET_MACREG(rxcv);
3523 #ifdef HME_CODEVIOL_BUG
3524 	/*
3525 	 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
3526 	 */
3527 	if (!hmep->hme_rxcv_enable) {
3528 		rxcv = 0;
3529 	}
3530 #endif
3531 	hmep->hme_cvc += rxcv;
3532 	PUT_MACREG(rxcv, 0);
3533 
3534 	ltcnt = GET_MACREG(ltcnt);
3535 	hmep->hme_tlcol += ltcnt;
3536 	PUT_MACREG(ltcnt, 0);
3537 
3538 	excnt = GET_MACREG(excnt);
3539 	hmep->hme_excol += excnt;
3540 	PUT_MACREG(excnt, 0);
3541 
3542 	hmep->hme_fcs_errors += fecnt;
3543 	hmep->hme_ierrors += (fecnt + aecnt + lecnt);
3544 	hmep->hme_oerrors += (ltcnt + excnt);
3545 	hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
3546 
3547 	PUT_MACREG(nccnt, 0);
3548 	CHECK_MACREG();
3549 }
3550 
3551 /*
3552  * To set up the mac address for the network interface:
3553  * The adapter card may support a local mac address which is published
3554  * in a device node property "local-mac-address". This mac address is
3555  * treated as the factory-installed mac address for DLPI interface.
3556  * If the adapter firmware has used the device for diskless boot
3557  * operation it publishes a property called "mac-address" for use by
3558  * inetboot and the device driver.
3559  * If "mac-address" is not found, the system options property
3560  * "local-mac-address" is used to select the mac-address. If this option
3561  * is set to "true", and "local-mac-address" has been found, then
3562  * local-mac-address is used; otherwise the system mac address is used
3563  * by calling the "localetheraddr()" function.
3564  */
3565 static void
hme_setup_mac_address(struct hme * hmep,dev_info_t * dip)3566 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
3567 {
3568 	char	*prop;
3569 	int	prop_len = sizeof (int);
3570 
3571 	hmep->hme_addrflags = 0;
3572 
3573 	/*
3574 	 * Check if it is an adapter with its own local mac address
3575 	 * If it is present, save it as the "factory-address"
3576 	 * for this adapter.
3577 	 */
3578 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3579 	    "local-mac-address",
3580 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3581 		if (prop_len == ETHERADDRL) {
3582 			hmep->hme_addrflags = HME_FACTADDR_PRESENT;
3583 			ether_bcopy(prop, &hmep->hme_factaddr);
3584 			HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
3585 			    "Local Ethernet address = %s",
3586 			    ether_sprintf(&hmep->hme_factaddr));
3587 		}
3588 		kmem_free(prop, prop_len);
3589 	}
3590 
3591 	/*
3592 	 * Check if the adapter has published "mac-address" property.
3593 	 * If it is present, use it as the mac address for this device.
3594 	 */
3595 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
3596 	    "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3597 		if (prop_len >= ETHERADDRL) {
3598 			ether_bcopy(prop, &hmep->hme_ouraddr);
3599 			kmem_free(prop, prop_len);
3600 			return;
3601 		}
3602 		kmem_free(prop, prop_len);
3603 	}
3604 
3605 #ifdef	__sparc
3606 	/*
3607 	 * On sparc, we might be able to use the mac address from the
3608 	 * system.  However, on all other systems, we need to use the
3609 	 * address from the PROM.
3610 	 */
3611 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
3612 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
3613 		if ((strncmp("true", prop, prop_len) == 0) &&
3614 		    (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
3615 			hmep->hme_addrflags |= HME_FACTADDR_USE;
3616 			ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3617 			kmem_free(prop, prop_len);
3618 			HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
3619 			    "Using local MAC address");
3620 			return;
3621 		}
3622 		kmem_free(prop, prop_len);
3623 	}
3624 
3625 	/*
3626 	 * Get the system ethernet address.
3627 	 */
3628 	(void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
3629 #else
3630 	ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
3631 #endif
3632 }
3633 
3634 /* ARGSUSED */
3635 static void
hme_check_acc_handle(char * file,uint_t line,struct hme * hmep,ddi_acc_handle_t handle)3636 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
3637     ddi_acc_handle_t handle)
3638 {
3639 }
3640