xref: /illumos-gate/usr/src/uts/common/io/hme/hme.c (revision d62bc4badc1c1f1549c961cfb8b420e650e1272b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * SunOS MT STREAMS FEPS(SBus)/Cheerio(PCI) 10/100Mb Ethernet Device Driver
30  */
31 
32 #include	<sys/types.h>
33 #include	<sys/debug.h>
34 #include	<sys/stream.h>
35 #include	<sys/cmn_err.h>
36 #include	<sys/kmem.h>
37 #include	<sys/crc32.h>
38 #include	<sys/modctl.h>
39 #include	<sys/conf.h>
40 #include	<sys/strsun.h>
41 #include	<sys/kstat.h>
42 #include	<inet/common.h>
43 #include	<inet/mi.h>
44 #include	<inet/nd.h>
45 #include	<sys/pattr.h>
46 #include	<sys/dlpi.h>
47 #include	<sys/strsubr.h>
48 #include	<sys/mac.h>
49 #include	<sys/mac_ether.h>
50 #include	<sys/ethernet.h>
51 #include	<sys/vlan.h>
52 #include	<sys/pci.h>
53 #include	<sys/policy.h>
54 #include	<sys/ddi.h>
55 #include	<sys/sunddi.h>
56 #include	<sys/hme_phy.h>
57 #include	<sys/hme_mac.h>
58 #include	<sys/hme.h>
59 
60 typedef void	(*fptrv_t)();
61 
62 typedef enum {
63 	NO_MSG		= 0,
64 	AUTOCONFIG_MSG	= 1,
65 	STREAMS_MSG	= 2,
66 	IOCTL_MSG	= 3,
67 	PROTO_MSG	= 4,
68 	INIT_MSG	= 5,
69 	TX_MSG		= 6,
70 	RX_MSG		= 7,
71 	INTR_MSG	= 8,
72 	UNINIT_MSG	= 9,
73 	CONFIG_MSG	= 10,
74 	PROP_MSG	= 11,
75 	ENTER_MSG	= 12,
76 	RESUME_MSG	= 13,
77 	AUTONEG_MSG	= 14,
78 	NAUTONEG_MSG	= 15,
79 	FATAL_ERR_MSG	= 16,
80 	NFATAL_ERR_MSG	= 17,
81 	NDD_MSG		= 18,
82 	PHY_MSG		= 19,
83 	XCVR_MSG	= 20,
84 	NOXCVR_MSG	= 21,
85 	NSUPPORT_MSG	= 22,
86 	ERX_MSG		= 23,
87 	FREE_MSG	= 24,
88 	IPG_MSG		= 25,
89 	DDI_MSG		= 26,
90 	DEFAULT_MSG	= 27,
91 	DISPLAY_MSG	= 28,
92 	LATECOLL_MSG	= 29,
93 	MIFPOLL_MSG	= 30,
94 	LINKPULSE_MSG	= 31
95 } msg_t;
96 
97 msg_t	hme_debug_level =	NO_MSG;
98 
99 static char	*msg_string[] = {
100 	"NONE       ",
101 	"AUTOCONFIG ",
102 	"STREAMS    ",
103 	"IOCTL      ",
104 	"PROTO      ",
105 	"INIT       ",
106 	"TX         ",
107 	"RX         ",
108 	"INTR       ",
109 	"UNINIT		",
110 	"CONFIG	",
111 	"PROP	",
112 	"ENTER	",
113 	"RESUME	",
114 	"AUTONEG	",
115 	"NAUTONEG	",
116 	"FATAL_ERR	",
117 	"NFATAL_ERR	",
118 	"NDD	",
119 	"PHY	",
120 	"XCVR	",
121 	"NOXCVR	",
122 	"NSUPPOR	",
123 	"ERX	",
124 	"FREE	",
125 	"IPG	",
126 	"DDI	",
127 	"DEFAULT	",
128 	"DISPLAY	"
129 	"LATECOLL_MSG	",
130 	"MIFPOLL_MSG	",
131 	"LINKPULSE_MSG	"
132 };
133 
134 #define	SEVERITY_NONE	0
135 #define	SEVERITY_LOW	0
136 #define	SEVERITY_MID	1
137 #define	SEVERITY_HIGH	2
138 #define	SEVERITY_UNKNOWN 99
139 
140 #define	FEPS_URUN_BUG
141 #define	HME_CODEVIOL_BUG
142 
143 #define	KIOIP	KSTAT_INTR_PTR(hmep->hme_intrstats)
144 
145 /*
146  * The following variables are used for checking fixes in Sbus/FEPS 2.0
147  */
148 static	int	hme_urun_fix = 0;	/* Bug fixed in Sbus/FEPS 2.0 */
149 
150 /*
151  * The following variables are used for configuring various features
152  */
153 static	int	hme_64bit_enable =	1;	/* Use 64-bit sbus transfers */
154 static	int	hme_reject_own =	1;	/* Reject packets with own SA */
155 static	int	hme_autoneg_enable =	1;	/* Enable auto-negotiation */
156 
157 static	int	hme_ngu_enable =	1; /* to enable Never Give Up mode */
158 static	int	hme_mifpoll_enable =	1; /* to enable mif poll */
159 
160 /*
161  * The following variables are used for performance tuning.
162  */
163 
164 #define	RX_BCOPY_MAX	(sizeof (struct ether_header) + 256)
165 
166 static	int	hme_rx_bcopy_max =	RX_BCOPY_MAX;
167 
168 /*
169  * The following variables are used for configuring link-operation.
170  * Later these parameters may be changed per interface using "ndd" command
171  * These parameters may also be specified as properties using the .conf
172  * file mechanism for each interface.
173  */
174 
175 static	int	hme_lance_mode =	1;	/* to enable lance mode */
176 static	int	hme_ipg0 =		16;
177 static	int	hme_ipg1 =		8;
178 static	int	hme_ipg2 =		4;
179 static	int	hme_use_int_xcvr =	0;
180 static	int	hme_pace_size =		0;	/* Do not use pacing */
181 
182 /*
183  * The following variable value will be overridden by "link-pulse-disabled"
184  * property which may be created by OBP or hme.conf file.
185  */
186 static	int	hme_link_pulse_disabled = 0;	/* link pulse disabled */
187 
188 /*
189  * The following parameters may be configured by the user. If they are not
190  * configured by the user, the values will be based on the capabilities of
191  * the transceiver.
192  * The value "HME_NOTUSR" is ORed with the parameter value to indicate values
193  * which are NOT configured by the user.
194  */
195 
196 #define	HME_NOTUSR	0x0f000000
197 #define	HME_MASK_1BIT	0x1
198 #define	HME_MASK_5BIT	0x1f
199 #define	HME_MASK_8BIT	0xff
200 
201 static	int	hme_adv_autoneg_cap = HME_NOTUSR | 0;
202 static	int	hme_adv_100T4_cap = HME_NOTUSR | 0;
203 static	int	hme_adv_100fdx_cap = HME_NOTUSR | 0;
204 static	int	hme_adv_100hdx_cap = HME_NOTUSR | 0;
205 static	int	hme_adv_10fdx_cap = HME_NOTUSR | 0;
206 static	int	hme_adv_10hdx_cap = HME_NOTUSR | 0;
207 
208 /*
209  * PHY_IDR1 and PHY_IDR2 values to identify National Semiconductor's DP83840
210  * Rev C chip which needs some work-arounds.
211  */
212 #define	HME_NSIDR1	0x2000
213 #define	HME_NSIDR2	0x5c00 /* IDR2 register for with revision no. 0 */
214 
215 /*
216  * PHY_IDR1 and PHY_IDR2 values to identify Quality Semiconductor's QS6612
217  * chip which needs some work-arounds.
218  * Addition Interface Technologies Group (NPG) 8/28/1997.
219  */
220 #define	HME_QSIDR1	0x0181
221 #define	HME_QSIDR2	0x4400 /* IDR2 register for with revision no. 0 */
222 
223 /*
224  * The least significant 4 bits of HME_NSIDR2 represent the revision
225  * no. of the DP83840 chip. For Rev-C of DP83840, the rev. no. is 0.
226  * The next revision of the chip is called DP83840A and the value of
227  * HME_NSIDR2 is 0x5c01 for this new chip. All the workarounds specific
228  * to DP83840 chip are valid for both the revisions of the chip.
229  * Assuming that these workarounds are valid for the future revisions
230  * also, we will apply these workarounds independent of the revision no.
231  * Hence we mask out the last 4 bits of the IDR2 register and compare
232  * with 0x5c00 value.
233  */
234 
235 #define	HME_DP83840	((hmep->hme_idr1 == HME_NSIDR1) && \
236 			((hmep->hme_idr2 & 0xfff0) == HME_NSIDR2))
237 /*
238  * Likewise for the QSI 6612 Fast ethernet phy.
239  * Addition Interface Technologies Group (NPG) 8/28/1997.
240  */
241 #define	HME_QS6612	((hmep->hme_idr1 == HME_QSIDR1) && \
242 			((hmep->hme_idr2 & 0xfff0) == HME_QSIDR2))
243 /*
244  * All strings used by hme messaging functions
245  */
246 
247 static	char *busy_msg =
248 	"Driver is BUSY with upper layer";
249 
250 static	char *par_detect_msg =
251 	"Parallel detection fault.";
252 
253 static	char *xcvr_no_mii_msg =
254 	"Transceiver does not talk MII.";
255 
256 static	char *xcvr_isolate_msg =
257 	"Transceiver isolate failed.";
258 
259 static	char *int_xcvr_msg =
260 	"Internal Transceiver Selected.";
261 
262 static	char *ext_xcvr_msg =
263 	"External Transceiver Selected.";
264 
265 static	char *no_xcvr_msg =
266 	"No transceiver found.";
267 
268 static	char *slave_slot_msg =
269 	"Dev not used - dev in slave only slot";
270 
271 static	char *burst_size_msg =
272 	"Could not identify the burst size";
273 
274 static	char *unk_rx_ringsz_msg =
275 	"Unknown receive RINGSZ";
276 
277 static	char *lmac_addr_msg =
278 	"Using local MAC address";
279 
280 static  char *lether_addr_msg =
281 	"Local Ethernet address = %s";
282 
283 static  char *add_intr_fail_msg =
284 	"ddi_add_intr(9F) failed";
285 
286 static  char *mregs_4global_reg_fail_msg =
287 	"ddi_regs_map_setup(9F) for global reg failed";
288 
289 static	char *mregs_4etx_reg_fail_msg =
290 	"ddi_map_regs for etx reg failed";
291 
292 static	char *mregs_4erx_reg_fail_msg =
293 	"ddi_map_regs for erx reg failed";
294 
295 static	char *mregs_4bmac_reg_fail_msg =
296 	"ddi_map_regs for bmac reg failed";
297 
298 static	char *mregs_4mif_reg_fail_msg =
299 	"ddi_map_regs for mif reg failed";
300 
301 static  char *mif_read_fail_msg =
302 	"MIF Read failure";
303 
304 static  char *mif_write_fail_msg =
305 	"MIF Write failure";
306 
307 static  char *kstat_create_fail_msg =
308 	"kstat_create failed";
309 
310 static  char *param_reg_fail_msg =
311 	"parameter register error";
312 
313 static	char *init_fail_gen_msg =
314 	"Failed to initialize hardware/driver";
315 
316 static	char *ddi_nregs_fail_msg =
317 	"ddi_dev_nregs failed(9F), returned %d";
318 
319 static	char *bad_num_regs_msg =
320 	"Invalid number of registers.";
321 
322 static	char *anar_not_set_msg =
323 	"External Transceiver: anar not set with speed selection";
324 
325 static	char *par_detect_anar_not_set_msg =
326 	"External Transceiver: anar not set with speed selection";
327 
328 
329 #ifdef	HME_DEBUG
330 static  char *mregs_4config_fail_msg =
331 	"ddi_regs_map_setup(9F) for config space failed";
332 
333 static  char *attach_fail_msg =
334 	"Attach entry point failed";
335 
336 static  char *detach_bad_cmd_msg =
337 	"Detach entry point rcv'd a bad command";
338 
339 static  char *phy_msg =
340 	"Phy, Vendor Id: %x";
341 
342 static  char *no_phy_msg =
343 	"No Phy/xcvr found";
344 
345 static  char *unk_rx_descr_sze_msg =
346 	"Unknown Rx descriptor size %x.";
347 
348 static  char *disable_txmac_msg =
349 	"Txmac could not be disabled.";
350 
351 static  char *disable_rxmac_msg =
352 	"Rxmac could not be disabled.";
353 
354 static  char *config_space_fatal_msg =
355 	"Configuration space failed in routine.";
356 
357 static  char *mregs_4soft_reset_fail_msg =
358 	"ddi_regs_map_setup(9F) for soft reset failed";
359 
360 static  char *disable_erx_msg =
361 	"Can not disable Rx.";
362 
363 static  char *disable_etx_msg =
364 	"Can not disable Tx.";
365 
366 static  char *unk_tx_descr_sze_msg =
367 	"Unknown Tx descriptor size %x.";
368 
369 static  char *alloc_tx_dmah_msg =
370 	"Can not allocate Tx dma handle.";
371 
372 static  char *alloc_rx_dmah_msg =
373 	"Can not allocate Rx dma handle.";
374 
375 static  char *phy_speed_bad_msg =
376 	"The current Phy/xcvr speed is not valid";
377 
378 static  char *par_detect_fault_msg =
379 	"Parallel Detection Fault";
380 
381 static  char *autoneg_speed_bad_msg =
382 	"Autonegotiated speed is bad";
383 
384 #endif
385 
386 /*
387  *	"MIF Read failure: data = %X";
388  */
389 
390 /*
391  * SunVTS Loopback messaging support
392  *
393  * static  char *loopback_val_default =
394  *	"Loopback Value: Error In Value.";
395  *
396  * static  char *loopback_cmd_default =
397  *	"Loopback Command: Error In Value.";
398  */
399 
400 /* FATAL ERR msgs */
401 /*
402  * Function prototypes.
403  */
404 /* these two are global so that qfe can use them */
405 int hmeattach(dev_info_t *, ddi_attach_cmd_t);
406 int hmedetach(dev_info_t *, ddi_detach_cmd_t);
407 static	boolean_t hmeinit_xfer_params(struct hme *);
408 static	uint_t hmestop(struct hme *);
409 static	void hmestatinit(struct hme *);
410 static	int hmeallocthings(struct hme *);
411 static	void hmefreebufs(struct hme *);
412 static  void *hmeallocb(size_t, uint_t);
413 static	void hmeget_hm_rev_property(struct hme *);
414 static	boolean_t hmestart(struct hme *, mblk_t *);
415 static	uint_t hmeintr(caddr_t);
416 static	void hmereclaim(struct hme *);
417 static	int hmeinit(struct hme *);
418 static	void hmeuninit(struct hme *hmep);
419 static 	mblk_t *hmeread(struct hme *, volatile struct hme_rmd *, uint32_t);
420 static	void hmesavecntrs(struct hme *);
421 static	void hme_fatal_err(struct hme *, uint_t);
422 static	void hme_nonfatal_err(struct hme *, uint_t);
423 static	int hmeburstsizes(struct hme *);
424 static	void hme_start_mifpoll(struct hme *);
425 static	void hme_stop_mifpoll(struct hme *);
426 static	void hme_param_cleanup(struct hme *);
427 static	int hme_param_get(queue_t *q, mblk_t *mp, caddr_t cp);
428 static	int hme_param_register(struct hme *, hmeparam_t *, int);
429 static	int hme_param_set(queue_t *, mblk_t *, char *, caddr_t);
430 static	void send_bit(struct hme *, uint_t);
431 static	uint_t get_bit(struct hme *);
432 static	uint_t get_bit_std(struct hme *);
433 static	uint_t hme_bb_mii_read(struct hme *, uchar_t, uint16_t *);
434 static	void hme_bb_mii_write(struct hme *, uchar_t, uint16_t);
435 static	void hme_bb_force_idle(struct hme *);
436 static	uint_t hme_mii_read(struct hme *, uchar_t, uint16_t *);
437 static	void hme_mii_write(struct hme *, uchar_t, uint16_t);
438 static	void hme_stop_timer(struct hme *);
439 static	void hme_start_timer(struct hme *, fptrv_t, int);
440 static	int hme_select_speed(struct hme *, int);
441 static	void hme_reset_transceiver(struct hme *);
442 static	void hme_check_transceiver(struct hme *);
443 static	void hme_setup_link_default(struct hme *);
444 static	void hme_setup_link_status(struct hme *);
445 static	void hme_setup_link_control(struct hme *);
446 static	int hme_check_txhung(struct hme *hmep);
447 static	void hme_check_link(void *);
448 
449 static	void hme_init_xcvr_info(struct hme *);
450 static	void hme_disable_link_pulse(struct hme *);
451 static	void hme_force_speed(void *);
452 static	void hme_get_autoinfo(struct hme *);
453 static	int hme_try_auto_negotiation(struct hme *);
454 static	void hme_try_speed(void *);
455 static	void hme_link_now_up(struct hme *);
456 static	void hme_setup_mac_address(struct hme *, dev_info_t *);
457 
458 static	void hme_nd_free(caddr_t *nd_pparam);
459 static	int hme_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp);
460 static	boolean_t hme_nd_load(caddr_t *nd_pparam, char *name,
461     pfi_t get_pfi, pfi_t set_pfi, caddr_t data);
462 
463 static void hme_fault_msg(char *, uint_t, struct hme *, uint_t,
464     msg_t, char *, ...);
465 
466 static void hme_check_acc_handle(char *, uint_t, struct hme *,
467     ddi_acc_handle_t);
468 
469 static void hme_check_dma_handle(char *, uint_t, struct hme *,
470     ddi_dma_handle_t);
471 
472 /*
473  * Nemo (GLDv3) Functions.
474  */
475 static int	hme_m_stat(void *, uint_t, uint64_t *);
476 static int	hme_m_start(void *);
477 static void	hme_m_stop(void *);
478 static int	hme_m_promisc(void *, boolean_t);
479 static int	hme_m_multicst(void *, boolean_t, const uint8_t *);
480 static int	hme_m_unicst(void *, const uint8_t *);
481 static mblk_t	*hme_m_tx(void *, mblk_t *);
482 static void	hme_m_ioctl(void *, queue_t *, mblk_t *);
483 static boolean_t	hme_m_getcapab(void *, mac_capab_t, void *);
484 
485 static mac_callbacks_t hme_m_callbacks = {
486 	MC_IOCTL | MC_GETCAPAB,
487 	hme_m_stat,
488 	hme_m_start,
489 	hme_m_stop,
490 	hme_m_promisc,
491 	hme_m_multicst,
492 	hme_m_unicst,
493 	hme_m_tx,
494 	NULL,
495 	hme_m_ioctl,
496 	hme_m_getcapab,
497 };
498 
499 DDI_DEFINE_STREAM_OPS(hme_dev_ops, nulldev, nulldev, hmeattach, hmedetach,
500     nodev, NULL, D_MP, NULL);
501 
502 #define	HME_FAULT_MSG1(p, s, t, f) \
503     hme_fault_msg(__FILE__, __LINE__, (p), (s), (t), (f));
504 
505 #define	HME_FAULT_MSG2(p, s, t, f, a) \
506     hme_fault_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a));
507 
508 #define	HME_FAULT_MSG3(p, s, t, f, a, b) \
509     hme_fault_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b));
510 
511 #define	HME_FAULT_MSG4(p, s, t, f, a, b, c) \
512     hme_fault_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b), (c));
513 
514 #ifdef	HME_DEBUG
515 static void	hme_debug_msg(char *, uint_t, struct hme *, uint_t,
516 				msg_t, char *, ...);
517 
518 #define	HME_DEBUG_MSG1(p, s, t, f) \
519     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f))
520 
521 #define	HME_DEBUG_MSG2(p, s, t, f, a) \
522     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a))
523 
524 #define	HME_DEBUG_MSG3(p, s, t, f, a, b) \
525     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b))
526 
527 #define	HME_DEBUG_MSG4(p, s, t, f, a, b, c) \
528     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b), (c))
529 
530 #define	HME_DEBUG_MSG5(p, s, t, f, a, b, c, d) \
531     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b), (c), (d))
532 
533 #define	HME_DEBUG_MSG6(p, s, t, f, a, b, c, d, e) \
534     hme_debug_msg(__FILE__, __LINE__, (p), (s), (t), (f), (a), (b), (c),  \
535 		    (d), (e))
536 
537 #else
538 
539 #define	HME_DEBUG_MSG1(p, s, t, f)
540 #define	HME_DEBUG_MSG2(p, s, t, f, a)
541 #define	HME_DEBUG_MSG3(p, s, t, f, a, b)
542 #define	HME_DEBUG_MSG4(p, s, t, f, a, b, c)
543 #define	HME_DEBUG_MSG5(p, s, t, f, a, b, c, d)
544 #define	HME_DEBUG_MSG6(p, s, t, f, a, b, c, d, e)
545 
546 #endif
547 
548 #define	CHECK_MIFREG() \
549 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_mifregh)
550 #define	CHECK_ETXREG() \
551 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_etxregh)
552 #define	CHECK_ERXREG() \
553 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_erxregh)
554 #define	CHECK_MACREG() \
555 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_bmacregh)
556 #define	CHECK_GLOBREG() \
557 	hme_check_acc_handle(__FILE__, __LINE__, hmep, hmep->hme_globregh)
558 
559 /*
560  * Claim the device is ultra-capable of burst in the beginning.  Use
561  * the value returned by ddi_dma_burstsizes() to actually set the HME
562  * global configuration register later.
563  *
564  * Sbus/FEPS supports burst sizes of 16, 32 and 64 bytes. Also, it supports
565  * 32-bit and 64-bit Sbus transfers. Hence the dlim_burstsizes field contains
566  * the the burstsizes in both the lo and hi words.
567  */
568 #define	HMELIMADDRLO	((uint64_t)0x00000000)
569 #define	HMELIMADDRHI	((uint64_t)0xffffffff)
570 
571 static ddi_dma_attr_t hme_dma_attr = {
572 	DMA_ATTR_V0,		/* version number. */
573 	(uint64_t)HMELIMADDRLO,	/* low address */
574 	(uint64_t)HMELIMADDRHI,	/* high address */
575 	(uint64_t)0x00ffffff,	/* address counter max */
576 	(uint64_t)1,		/* alignment */
577 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
578 	(uint32_t)0x1,		/* minimum transfer size */
579 	(uint64_t)0x7fffffff,	/* maximum transfer size */
580 	(uint64_t)0x00ffffff,	/* maximum segment size */
581 	1,			/* scatter/gather list length */
582 	512,			/* granularity */
583 	0			/* attribute flags */
584 };
585 
586 static ddi_dma_lim_t hme_dma_limits = {
587 	(uint64_t)HMELIMADDRLO,	/* dlim_addr_lo */
588 	(uint64_t)HMELIMADDRHI,	/* dlim_addr_hi */
589 	(uint64_t)HMELIMADDRHI,	/* dlim_cntr_max */
590 	(uint_t)0x00700070,	/* dlim_burstsizes for 32 and 64 bit xfers */
591 	(uint32_t)0x1,		/* dlim_minxfer */
592 	1024			/* dlim_speed */
593 };
594 
595 static uchar_t pci_latency_timer = 0;
596 
597 /*
598  * Module linkage information for the kernel.
599  */
600 static struct modldrv modldrv = {
601 	&mod_driverops,	/* Type of module.  This one is a driver */
602 	"Sun HME 10/100 Mb Ethernet",
603 	&hme_dev_ops,	/* driver ops */
604 };
605 
606 static struct modlinkage modlinkage = {
607 	MODREV_1, &modldrv, NULL
608 };
609 
610 /*
611  * Internal PHY Id:
612  */
613 
614 #define	HME_BB1	0x15	/* Babybac1, Rev 1.5 */
615 #define	HME_BB2 0x20	/* Babybac2, Rev 0 */
616 
617 /* <<<<<<<<<<<<<<<<<<<<<<  Register operations >>>>>>>>>>>>>>>>>>>>> */
618 
619 #define	GET_MIFREG(reg) \
620 	ddi_get32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg)
621 #define	PUT_MIFREG(reg, value) \
622 	ddi_put32(hmep->hme_mifregh, (uint32_t *)&hmep->hme_mifregp->reg, value)
623 
624 #define	GET_ETXREG(reg) \
625 	ddi_get32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg)
626 #define	PUT_ETXREG(reg, value) \
627 	ddi_put32(hmep->hme_etxregh, (uint32_t *)&hmep->hme_etxregp->reg, value)
628 #define	GET_ERXREG(reg) \
629 	ddi_get32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg)
630 #define	PUT_ERXREG(reg, value) \
631 	ddi_put32(hmep->hme_erxregh, (uint32_t *)&hmep->hme_erxregp->reg, value)
632 #define	GET_MACREG(reg) \
633 	ddi_get32(hmep->hme_bmacregh, (uint32_t *)&hmep->hme_bmacregp->reg)
634 #define	PUT_MACREG(reg, value) \
635 	ddi_put32(hmep->hme_bmacregh, \
636 		(uint32_t *)&hmep->hme_bmacregp->reg, value)
637 #define	GET_GLOBREG(reg) \
638 	ddi_get32(hmep->hme_globregh, (uint32_t *)&hmep->hme_globregp->reg)
639 #define	PUT_GLOBREG(reg, value) \
640 	ddi_put32(hmep->hme_globregh, \
641 		(uint32_t *)&hmep->hme_globregp->reg, value)
642 #define	PUT_TMD(ptr, cookie, len, flags) \
643 	ddi_put32(hmep->hme_mdm_h, (uint32_t *)&ptr->tmd_addr, cookie); \
644 	ddi_put32(hmep->hme_mdm_h, (uint32_t *)&ptr->tmd_flags, \
645 	    (uint_t)HMETMD_OWN | len | flags)
646 #define	GET_TMD_FLAGS(ptr) \
647 	ddi_get32(hmep->hme_mdm_h, (uint32_t *)&ptr->tmd_flags)
648 #define	PUT_RMD(ptr, cookie) \
649 	ddi_put32(hmep->hme_mdm_h, (uint32_t *)&ptr->rmd_addr, cookie); \
650 	ddi_put32(hmep->hme_mdm_h, (uint32_t *)&ptr->rmd_flags, \
651 	    (uint_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
652 #define	GET_RMD_FLAGS(ptr) \
653 	ddi_get32(hmep->hme_mdm_h, (uint32_t *)&ptr->rmd_flags)
654 
655 #define	CLONE_RMD(old, new) \
656 	new->rmd_addr = old->rmd_addr; /* This is actually safe */\
657 	ddi_put32(hmep->hme_mdm_h, (uint32_t *)&new->rmd_flags, \
658 	    (uint_t)(HMEBUFSIZE << HMERMD_BUFSIZE_SHIFT) | HMERMD_OWN)
659 #define	GET_ROM8(offset) \
660 	ddi_get8((hmep->hme_romh), (offset))
661 
662 /*
663  * Ether_copy is not endian-correct. Define an endian-correct version.
664  */
665 #define	ether_bcopy(a, b) (bcopy(a, b, 6))
666 
667 /*
668  * Ether-type is specifically big-endian, but data region is unknown endian
669  */
670 #define	get_ether_type(ptr) \
671 	(((((uint8_t *)ptr)[12] << 8) | (((uint8_t *)ptr)[13])))
672 
673 /* <<<<<<<<<<<<<<<<<<<<<<  Configuration Parameters >>>>>>>>>>>>>>>>>>>>> */
674 
675 #define	BMAC_DEFAULT_JAMSIZE	(0x04)		/* jamsize equals 4 */
676 #define	BMAC_LONG_JAMSIZE	(0x10)		/* jamsize equals 0x10 */
677 static	int 	jamsize = BMAC_DEFAULT_JAMSIZE;
678 
679 
680 /*
681  * Calculate the bit in the multicast address filter that selects the given
682  * address.
683  */
684 
685 static uint32_t
686 hmeladrf_bit(const uint8_t *addr)
687 {
688 	uint32_t crc;
689 
690 	CRC32(crc, addr, ETHERADDRL, -1U, crc32_table);
691 
692 	/*
693 	 * Just want the 6 most significant bits.
694 	 */
695 	return (crc >> 26);
696 }
697 
698 /* <<<<<<<<<<<<<<<<<<<<<<<<  Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
699 
700 static int hme_internal_phy_id = HME_BB2;	/* Internal PHY is Babybac2  */
701 
702 
703 static void
704 send_bit(struct hme *hmep, uint32_t x)
705 {
706 	PUT_MIFREG(mif_bbdata, x);
707 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
708 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
709 }
710 
711 /*
712  * To read the MII register bits from the Babybac1 transceiver
713  */
714 static uint32_t
715 get_bit(struct hme *hmep)
716 {
717 	uint32_t	x;
718 
719 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
720 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
721 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
722 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
723 	else
724 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
725 	return (x);
726 }
727 
728 
729 /*
730  * To read the MII register bits according to the IEEE Standard
731  */
732 static uint32_t
733 get_bit_std(struct hme *hmep)
734 {
735 	uint32_t	x;
736 
737 	PUT_MIFREG(mif_bbclk, HME_BBCLK_LOW);
738 	drv_usecwait(1);	/* wait for  >330 ns for stable data */
739 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
740 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM0) ? 1 : 0;
741 	else
742 		x = (GET_MIFREG(mif_cfg) & HME_MIF_CFGM1) ? 1 : 0;
743 	PUT_MIFREG(mif_bbclk, HME_BBCLK_HIGH);
744 	return (x);
745 }
746 
747 #define	SEND_BIT(x)		send_bit(hmep, x)
748 #define	GET_BIT(x)		x = get_bit(hmep)
749 #define	GET_BIT_STD(x)		x = get_bit_std(hmep)
750 
751 
752 static void
753 hme_bb_mii_write(struct hme *hmep, uint8_t regad, uint16_t data)
754 {
755 	uint8_t	phyad;
756 	int	i;
757 
758 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
759 	phyad = hmep->hme_phyad;
760 	(void) hme_bb_force_idle(hmep);
761 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
762 	SEND_BIT(0); SEND_BIT(1);	/* <OP> */
763 
764 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
765 		SEND_BIT((phyad >> i) & 1);
766 	}
767 
768 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
769 		SEND_BIT((regad >> i) & 1);
770 	}
771 
772 	SEND_BIT(1); SEND_BIT(0);	/* <TA> */
773 
774 	for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
775 		SEND_BIT((data >> i) & 1);
776 	}
777 
778 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
779 	CHECK_MIFREG();
780 }
781 
782 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
783 static uint_t
784 hme_bb_mii_read(struct hme *hmep, uint8_t regad, uint16_t *datap)
785 {
786 	uint8_t		phyad;
787 	int		i;
788 	uint32_t	x;
789 	uint32_t	y;
790 
791 	*datap = 0;
792 
793 	PUT_MIFREG(mif_bbopenb, 1);	/* Enable the MII driver */
794 	phyad = hmep->hme_phyad;
795 	(void) hme_bb_force_idle(hmep);
796 	SEND_BIT(0); SEND_BIT(1);	/* <ST> */
797 	SEND_BIT(1); SEND_BIT(0);	/* <OP> */
798 	for (i = 4; i >= 0; i--) {		/* <AAAAA> */
799 		SEND_BIT((phyad >> i) & 1);
800 	}
801 	for (i = 4; i >= 0; i--) {		/* <RRRRR> */
802 		SEND_BIT((regad >> i) & 1);
803 	}
804 
805 	PUT_MIFREG(mif_bbopenb, 0);	/* Disable the MII driver */
806 
807 	if ((hme_internal_phy_id == HME_BB2) ||
808 	    (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER)) {
809 		GET_BIT_STD(x);
810 		GET_BIT_STD(y);		/* <TA> */
811 		for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
812 			GET_BIT_STD(x);
813 			*datap += (x << i);
814 		}
815 		/*
816 		 * Kludge to get the Transceiver out of hung mode
817 		 */
818 		GET_BIT_STD(x);
819 		GET_BIT_STD(x);
820 		GET_BIT_STD(x);
821 	} else {
822 		GET_BIT(x);
823 		GET_BIT(y);		/* <TA> */
824 		for (i = 0xf; i >= 0; i--) {	/* <DDDDDDDDDDDDDDDD> */
825 			GET_BIT(x);
826 			*datap += (x << i);
827 		}
828 		/*
829 		 * Kludge to get the Transceiver out of hung mode
830 		 */
831 		GET_BIT(x);
832 		GET_BIT(x);
833 		GET_BIT(x);
834 	}
835 	CHECK_MIFREG();
836 	return (y);
837 }
838 
839 
840 static void
841 hme_bb_force_idle(struct hme *hmep)
842 {
843 	int	i;
844 
845 	for (i = 0; i < 33; i++) {
846 		SEND_BIT(1);
847 	}
848 }
849 
850 /* <<<<<<<<<<<<<<<<<<<<End of Bit Bang Operations >>>>>>>>>>>>>>>>>>>>>>>> */
851 
852 
853 /* <<<<<<<<<<<<< Frame Register used for MII operations >>>>>>>>>>>>>>>>>>>> */
854 
855 #ifdef	HME_FRM_DEBUG
856 int hme_frame_flag = 0;
857 #endif
858 
859 /* Return 0 if OK, 1 if error (Transceiver does not talk management) */
860 static uint_t
861 hme_mii_read(struct hme *hmep, uchar_t regad, uint16_t *datap)
862 {
863 	volatile uint32_t *framerp = &hmep->hme_mifregp->mif_frame;
864 	uint32_t	frame;
865 	uint8_t		phyad;
866 
867 	if (hmep->hme_transceiver == HME_NO_TRANSCEIVER)
868 		return (1);	/* No transceiver present */
869 
870 	if (!hmep->hme_frame_enable)
871 		return (hme_bb_mii_read(hmep, regad, datap));
872 
873 	phyad = hmep->hme_phyad;
874 #ifdef	HME_FRM_DEBUG
875 	if (!hme_frame_flag) {
876 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
877 		    "Frame Register used for MII");
878 		hme_frame_flag = 1;
879 	}
880 		HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
881 		"Frame Reg :mii_read: phyad = %X reg = %X ", phyad, regad);
882 #endif
883 
884 	*framerp = HME_MIF_FRREAD | (phyad << HME_MIF_FRPHYAD_SHIFT) |
885 	    (regad << HME_MIF_FRREGAD_SHIFT);
886 /*
887  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
888  */
889 	HMEDELAY((*framerp & HME_MIF_FRTA0), 300);
890 	frame = *framerp;
891 	CHECK_MIFREG();
892 	if ((frame & HME_MIF_FRTA0) == 0) {
893 
894 
895 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
896 		    mif_read_fail_msg);
897 		return (1);
898 	} else {
899 		*datap = (uint16_t)(frame & HME_MIF_FRDATA);
900 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
901 		    "Frame Reg :mii_read: successful:data = %X ", *datap);
902 		return (0);
903 	}
904 
905 }
906 
907 static void
908 hme_mii_write(struct hme *hmep, uint8_t regad, uint16_t data)
909 {
910 	volatile uint32_t *framerp = &hmep->hme_mifregp->mif_frame;
911 	uint32_t frame;
912 	uint8_t	phyad;
913 
914 	if (!hmep->hme_frame_enable) {
915 		hme_bb_mii_write(hmep, regad, data);
916 		return;
917 	}
918 
919 	phyad = hmep->hme_phyad;
920 	HME_DEBUG_MSG4(hmep,  SEVERITY_UNKNOWN, NAUTONEG_MSG,
921 	    "Frame Reg :mii_write: phyad = %X reg = %X data = %X",
922 	    phyad, regad, data);
923 
924 	*framerp = HME_MIF_FRWRITE | (phyad << HME_MIF_FRPHYAD_SHIFT) |
925 	    (regad << HME_MIF_FRREGAD_SHIFT) | data;
926 /*
927  *	HMEDELAY((*framerp & HME_MIF_FRTA0), HMEMAXRSTDELAY);
928  */
929 	HMEDELAY((*framerp & HME_MIF_FRTA0), 300);
930 	frame = *framerp;
931 	CHECK_MIFREG();
932 	if ((frame & HME_MIF_FRTA0) == 0) {
933 		HME_FAULT_MSG1(hmep, SEVERITY_MID, NAUTONEG_MSG,
934 		    mif_write_fail_msg);
935 	}
936 #if HME_DEBUG
937 	else {
938 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
939 		    "Frame Reg :mii_write: successful");
940 	}
941 #endif
942 }
943 
944 /*
945  * hme_stop_timer function is used by a function before doing link-related
946  * processing. It locks the "hme_linklock" to protect the link-related data
947  * structures. This lock will be subsequently released in hme_start_timer().
948  */
949 static void
950 hme_stop_timer(struct hme *hmep)
951 {
952 	timeout_id_t	tid;
953 
954 	mutex_enter(&hmep->hme_linklock);
955 
956 	if (hmep->hme_timerid) {
957 		tid = hmep->hme_timerid;
958 		hmep->hme_timerid = 0;
959 		mutex_exit(&hmep->hme_linklock);
960 		(void) untimeout(tid);
961 		mutex_enter(&hmep->hme_linklock);
962 	}
963 }
964 
965 static void
966 hme_start_timer(struct hme *hmep, fptrv_t func, int msec)
967 {
968 	hmep->hme_timerid = timeout(func, hmep, drv_usectohz(1000 * msec));
969 
970 	mutex_exit(&hmep->hme_linklock);
971 }
972 
973 /*
974  * hme_select_speed is required only when auto-negotiation is not supported.
975  * It should be used only for the Internal Transceiver and not the External
976  * transceiver because we wouldn't know how to generate Link Down state on
977  * the wire.
978  * Currently it is required to support Electron 1.1 Build machines. When all
979  * these machines are upgraded to 1.2 or better, remove this function.
980  *
981  * Returns 1 if the link is up, 0 otherwise.
982  */
983 
984 static int
985 hme_select_speed(struct hme *hmep, int speed)
986 {
987 	uint16_t	stat;
988 	uint16_t	fdx;
989 
990 	if (hmep->hme_linkup_cnt)  /* not first time */
991 		goto read_status;
992 
993 	if (hmep->hme_fdx)
994 		fdx = PHY_BMCR_FDX;
995 	else
996 		fdx = 0;
997 
998 	switch (speed) {
999 	case HME_SPEED_100:
1000 
1001 		switch (hmep->hme_transceiver) {
1002 		case HME_INTERNAL_TRANSCEIVER:
1003 			hme_mii_write(hmep, HME_PHY_BMCR, fdx | PHY_BMCR_100M);
1004 			break;
1005 		case HME_EXTERNAL_TRANSCEIVER:
1006 			if (hmep->hme_delay == 0) {
1007 				hme_mii_write(hmep, HME_PHY_BMCR,
1008 				    fdx | PHY_BMCR_100M);
1009 			}
1010 			break;
1011 		default:
1012 			HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG,
1013 			    "Default in select speed 100");
1014 			break;
1015 		}
1016 		break;
1017 	case HME_SPEED_10:
1018 		switch (hmep->hme_transceiver) {
1019 		case HME_INTERNAL_TRANSCEIVER:
1020 			hme_mii_write(hmep, HME_PHY_BMCR, fdx);
1021 			break;
1022 		case HME_EXTERNAL_TRANSCEIVER:
1023 			if (hmep->hme_delay == 0) {
1024 				hme_mii_write(hmep, HME_PHY_BMCR, fdx);
1025 			}
1026 			break;
1027 		default:
1028 			HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG,
1029 			    "Default in select speed 10");
1030 			break;
1031 		}
1032 		break;
1033 	default:
1034 		HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG,
1035 		    "Default in select speed : Neither speed");
1036 		return (0);
1037 	}
1038 
1039 	if (!hmep->hme_linkup_cnt) {  /* first time; select speed */
1040 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1041 		hmep->hme_linkup_cnt++;
1042 		return (0);
1043 	}
1044 
1045 read_status:
1046 	hmep->hme_linkup_cnt++;
1047 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1048 	if (stat & PHY_BMSR_LNKSTS)
1049 		return (1);
1050 	else
1051 		return (0);
1052 }
1053 
1054 
1055 #define	HME_PHYRST_PERIOD 600	/* 600 milliseconds, instead of 500 */
1056 #define	HME_PDOWN_PERIOD 256	/* 256 milliseconds  power down period to */
1057 				/* insure a good reset of the QSI PHY */
1058 
1059 static void
1060 hme_reset_transceiver(struct hme *hmep)
1061 {
1062 	uint32_t	cfg;
1063 	uint16_t	stat;
1064 	uint16_t	anar;
1065 	uint16_t	control;
1066 	uint16_t	csc;
1067 	int		n;
1068 
1069 	cfg = GET_MIFREG(mif_cfg);
1070 
1071 	if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1072 		/* Isolate the Internal Transceiver */
1073 		PUT_MIFREG(mif_cfg, (cfg & ~HME_MIF_CFGPS));
1074 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
1075 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
1076 		hme_mii_write(hmep, HME_PHY_BMCR, (PHY_BMCR_ISOLATE |
1077 		    PHY_BMCR_PWRDN | PHY_BMCR_LPBK));
1078 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1)
1079 			goto start_again;
1080 
1081 		/* select the External transceiver */
1082 		PUT_MIFREG(mif_cfg, (cfg | HME_MIF_CFGPS));
1083 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
1084 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1085 
1086 	} else if (cfg & HME_MIF_CFGM1) {
1087 		/* Isolate the External transceiver, if present */
1088 		PUT_MIFREG(mif_cfg, (cfg | HME_MIF_CFGPS));
1089 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1090 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
1091 		hme_mii_write(hmep, HME_PHY_BMCR, (PHY_BMCR_ISOLATE |
1092 		    PHY_BMCR_PWRDN | PHY_BMCR_LPBK));
1093 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1)
1094 			goto start_again;
1095 
1096 		/* select the Internal transceiver */
1097 		PUT_MIFREG(mif_cfg, (cfg & ~HME_MIF_CFGPS));
1098 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
1099 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
1100 	}
1101 
1102 	hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_PWRDN);
1103 	drv_usecwait((clock_t)HME_PDOWN_PERIOD);
1104 
1105 	/*
1106 	 * Now reset the transceiver.
1107 	 */
1108 	hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_RESET);
1109 
1110 	/*
1111 	 * Check for transceiver reset completion.
1112 	 */
1113 	n = HME_PHYRST_PERIOD / HMEWAITPERIOD;
1114 
1115 	while (--n > 0) {
1116 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1) {
1117 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1118 			    xcvr_no_mii_msg);
1119 			goto start_again;
1120 		}
1121 		if ((control & PHY_BMCR_RESET) == 0)
1122 			goto reset_issued;
1123 		if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
1124 			drv_usecwait((clock_t)HMEWAITPERIOD);
1125 		else
1126 			drv_usecwait((clock_t)(500 * HMEWAITPERIOD));
1127 	}
1128 	/*
1129 	 * phy reset failure
1130 	 */
1131 	hmep->phyfail++;
1132 	goto start_again;
1133 
1134 reset_issued:
1135 
1136 	HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, PHY_MSG,
1137 	    "reset_trans: reset complete.");
1138 
1139 	/*
1140 	 * Get the PHY id registers. We need this to implement work-arounds
1141 	 * for bugs in transceivers which use the National DP83840 PHY chip.
1142 	 * National should fix this in the next release.
1143 	 */
1144 
1145 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1146 	(void) hme_mii_read(hmep, HME_PHY_IDR1, &hmep->hme_idr1);
1147 	(void) hme_mii_read(hmep, HME_PHY_IDR2, &hmep->hme_idr2);
1148 	(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
1149 
1150 	hme_init_xcvr_info(hmep);
1151 	HME_DEBUG_MSG6(hmep, SEVERITY_UNKNOWN, PHY_MSG, "reset_trans: "
1152 	    "control = %x status = %x idr1 = %x idr2 = %x anar = %x",
1153 	    control, stat, hmep->hme_idr1, hmep->hme_idr2, anar);
1154 
1155 	hmep->hme_bmcr = control;
1156 	hmep->hme_anar = anar;
1157 	hmep->hme_bmsr = stat;
1158 
1159 	/*
1160 	 * The strapping of AN0 and AN1 pins on DP83840 cannot select
1161 	 * 10FDX, 100FDX and Auto-negotiation. So select it here for the
1162 	 * Internal Transceiver.
1163 	 */
1164 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) {
1165 		anar = (PHY_ANAR_TXFDX | PHY_ANAR_10FDX |
1166 		    PHY_ANAR_TX | PHY_ANAR_10 | PHY_SELECTOR);
1167 	}
1168 	/*
1169 	 * Modify control and bmsr based on anar for Rev-C of DP83840.
1170 	 */
1171 	if (HME_DP83840) {
1172 		n = 0;
1173 		if (anar & PHY_ANAR_TXFDX) {
1174 			stat |= PHY_BMSR_100FDX;
1175 			n++;
1176 		} else
1177 			stat &= ~PHY_BMSR_100FDX;
1178 
1179 		if (anar & PHY_ANAR_TX) {
1180 			stat |= PHY_BMSR_100HDX;
1181 			n++;
1182 		} else
1183 			stat &= ~PHY_BMSR_100HDX;
1184 
1185 		if (anar & PHY_ANAR_10FDX) {
1186 			stat |= PHY_BMSR_10FDX;
1187 			n++;
1188 		} else
1189 			stat &= ~PHY_BMSR_10FDX;
1190 
1191 		if (anar & PHY_ANAR_10) {
1192 			stat |= PHY_BMSR_10HDX;
1193 			n++;
1194 		} else
1195 			stat &= ~PHY_BMSR_10HDX;
1196 
1197 		if (n == 1) { 	/* only one mode. disable auto-negotiation */
1198 			stat &= ~PHY_BMSR_ACFG;
1199 			control &= ~PHY_BMCR_ANE;
1200 		}
1201 		if (n) {
1202 			hmep->hme_bmsr = stat;
1203 			hmep->hme_bmcr = control;
1204 
1205 			HME_DEBUG_MSG4(hmep, SEVERITY_NONE, PHY_MSG,
1206 			    "DP83840 Rev-C found: Modified bmsr = %x "
1207 			    "control = %X n = %x", stat, control, n);
1208 		}
1209 	}
1210 	hme_setup_link_default(hmep);
1211 	hme_setup_link_status(hmep);
1212 
1213 
1214 	/*
1215 	 * Place the Transceiver in normal operation mode
1216 	 */
1217 	hme_mii_write(hmep, HME_PHY_BMCR, (control & ~PHY_BMCR_ISOLATE));
1218 
1219 	/*
1220 	 * check if the transceiver is not in Isolate mode
1221 	 */
1222 	n = HME_PHYRST_PERIOD / HMEWAITPERIOD;
1223 
1224 	while (--n > 0) {
1225 		if (hme_mii_read(hmep, HME_PHY_BMCR, &control) == 1) {
1226 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1227 			    xcvr_no_mii_msg);
1228 			goto start_again; /* Transceiver does not talk MII */
1229 		}
1230 		if ((control & PHY_BMCR_ISOLATE) == 0)
1231 			goto setconn;
1232 		drv_usecwait(HMEWAITPERIOD);
1233 	}
1234 	HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1235 	    xcvr_isolate_msg);
1236 	goto start_again;	/* transceiver reset failure */
1237 
1238 setconn:
1239 	HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, PHY_MSG,
1240 	    "reset_trans: isolate complete.");
1241 
1242 	/*
1243 	 * Work-around for the late-collision problem with 100m cables.
1244 	 * National should fix this in the next release !
1245 	 */
1246 	if (HME_DP83840) {
1247 		(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1248 
1249 		HME_DEBUG_MSG3(hmep, SEVERITY_NONE, LATECOLL_MSG,
1250 		    "hme_reset_trans: CSC read = %x written = %x",
1251 		    csc, csc | PHY_CSCR_FCONN);
1252 
1253 		hme_mii_write(hmep, HME_PHY_CSC, (csc | PHY_CSCR_FCONN));
1254 	}
1255 
1256 	hmep->hme_linkcheck =		0;
1257 	hmep->hme_linkup =		0;
1258 	hme_setup_link_status(hmep);
1259 	hmep->hme_autoneg =		HME_HWAN_TRY;
1260 	hmep->hme_force_linkdown =	HME_FORCE_LINKDOWN;
1261 	hmep->hme_linkup_cnt =		0;
1262 	hmep->hme_delay =		0;
1263 	hme_setup_link_control(hmep);
1264 	hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1265 
1266 	if (hmep->hme_mode == HME_FORCE_SPEED)
1267 		hme_force_speed(hmep);
1268 	else {
1269 		hmep->hme_linkup_10 = 	0;
1270 		hmep->hme_tryspeed =	HME_SPEED_100;
1271 		hmep->hme_ntries =	HME_NTRIES_LOW;
1272 		hmep->hme_nlasttries =	HME_NTRIES_LOW;
1273 		hme_try_speed(hmep);
1274 	}
1275 	return;
1276 
1277 start_again:
1278 	hme_start_timer(hmep, hme_check_link, HME_TICKS);
1279 }
1280 
1281 static void
1282 hme_check_transceiver(struct hme *hmep)
1283 {
1284 	uint32_t	cfgsav;
1285 	uint32_t 	cfg;
1286 	uint32_t 	stat;
1287 
1288 	/*
1289 	 * If the MIF Polling is ON, and Internal transceiver is in use, just
1290 	 * check for the presence of the External Transceiver.
1291 	 * Otherwise:
1292 	 * First check to see what transceivers are out there.
1293 	 * If an external transceiver is present
1294 	 * then use it, regardless of whether there is a Internal transceiver.
1295 	 * If Internal transceiver is present and no external transceiver
1296 	 * then use the Internal transceiver.
1297 	 * If there is no external transceiver and no Internal transceiver,
1298 	 * then something is wrong so print an error message.
1299 	 */
1300 
1301 	cfgsav = GET_MIFREG(mif_cfg);
1302 
1303 	if (hmep->hme_polling_on) {
1304 		HME_DEBUG_MSG2(hmep, SEVERITY_NONE, XCVR_MSG,
1305 		    "check_trans: polling_on: cfg = %X", cfgsav);
1306 
1307 		if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) {
1308 			if ((cfgsav & HME_MIF_CFGM1) && !hme_param_use_intphy) {
1309 				hme_stop_mifpoll(hmep);
1310 				hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1311 				hmep->hme_transceiver =
1312 				    HME_EXTERNAL_TRANSCEIVER;
1313 				PUT_MIFREG(mif_cfg, ((cfgsav & ~HME_MIF_CFGPE)
1314 				    | HME_MIF_CFGPS));
1315 			}
1316 		} else if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1317 			stat = (GET_MIFREG(mif_bsts) >> 16);
1318 			if ((stat == 0x00) || (hme_param_use_intphy)) {
1319 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
1320 				    XCVR_MSG, "Extern Transcvr Disconnected");
1321 
1322 				hme_stop_mifpoll(hmep);
1323 				hmep->hme_phyad = HME_INTERNAL_PHYAD;
1324 				hmep->hme_transceiver =
1325 				    HME_INTERNAL_TRANSCEIVER;
1326 				PUT_MIFREG(mif_cfg,
1327 				    (GET_MIFREG(mif_cfg) & ~HME_MIF_CFGPS));
1328 			}
1329 		}
1330 		CHECK_MIFREG();
1331 		return;
1332 	}
1333 
1334 	HME_DEBUG_MSG2(hmep, SEVERITY_NONE, XCVR_MSG,
1335 	    "check_trans: polling_off: cfg = %X", cfgsav);
1336 
1337 	cfg = GET_MIFREG(mif_cfg);
1338 	if ((cfg & HME_MIF_CFGM1) && !hme_param_use_intphy) {
1339 		PUT_MIFREG(mif_cfg, (cfgsav | HME_MIF_CFGPS));
1340 		hmep->hme_phyad = HME_EXTERNAL_PHYAD;
1341 		hmep->hme_transceiver = HME_EXTERNAL_TRANSCEIVER;
1342 
1343 	} else if (cfg & HME_MIF_CFGM0) {  /* Internal Transceiver OK */
1344 		PUT_MIFREG(mif_cfg, (cfgsav & ~HME_MIF_CFGPS));
1345 		hmep->hme_phyad = HME_INTERNAL_PHYAD;
1346 		hmep->hme_transceiver = HME_INTERNAL_TRANSCEIVER;
1347 
1348 	} else {
1349 		hmep->hme_transceiver = HME_NO_TRANSCEIVER;
1350 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
1351 	}
1352 	CHECK_MIFREG();
1353 }
1354 
1355 static void
1356 hme_setup_link_default(struct hme *hmep)
1357 {
1358 	uint16_t	bmsr;
1359 
1360 	bmsr = hmep->hme_bmsr;
1361 	if (hme_param_autoneg & HME_NOTUSR)
1362 		hme_param_autoneg = HME_NOTUSR |
1363 		    ((bmsr & PHY_BMSR_ACFG) ? 1 : 0);
1364 	if (hme_param_anar_100T4 & HME_NOTUSR)
1365 		hme_param_anar_100T4 = HME_NOTUSR |
1366 		    ((bmsr & PHY_BMSR_100T4) ? 1 : 0);
1367 	if (hme_param_anar_100fdx & HME_NOTUSR)
1368 		hme_param_anar_100fdx = HME_NOTUSR |
1369 		    ((bmsr & PHY_BMSR_100FDX) ? 1 : 0);
1370 	if (hme_param_anar_100hdx & HME_NOTUSR)
1371 		hme_param_anar_100hdx = HME_NOTUSR |
1372 		    ((bmsr & PHY_BMSR_100HDX) ? 1 : 0);
1373 	if (hme_param_anar_10fdx & HME_NOTUSR)
1374 		hme_param_anar_10fdx = HME_NOTUSR |
1375 		    ((bmsr & PHY_BMSR_10FDX) ? 1 : 0);
1376 	if (hme_param_anar_10hdx & HME_NOTUSR)
1377 		hme_param_anar_10hdx = HME_NOTUSR |
1378 		    ((bmsr & PHY_BMSR_10HDX) ? 1 : 0);
1379 }
1380 
1381 static void
1382 hme_setup_link_status(struct hme *hmep)
1383 {
1384 	uint16_t	tmp;
1385 
1386 	if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER)
1387 		hme_param_transceiver = 1;
1388 	else
1389 		hme_param_transceiver = 0;
1390 
1391 	tmp = hmep->hme_bmsr;
1392 	if (tmp & PHY_BMSR_ACFG)
1393 		hme_param_bmsr_ancap = 1;
1394 	else
1395 		hme_param_bmsr_ancap = 0;
1396 	if (tmp & PHY_BMSR_100T4)
1397 		hme_param_bmsr_100T4 = 1;
1398 	else
1399 		hme_param_bmsr_100T4 = 0;
1400 	if (tmp & PHY_BMSR_100FDX)
1401 		hme_param_bmsr_100fdx = 1;
1402 	else
1403 		hme_param_bmsr_100fdx = 0;
1404 	if (tmp & PHY_BMSR_100HDX)
1405 		hme_param_bmsr_100hdx = 1;
1406 	else
1407 		hme_param_bmsr_100hdx = 0;
1408 	if (tmp & PHY_BMSR_10FDX)
1409 		hme_param_bmsr_10fdx = 1;
1410 	else
1411 		hme_param_bmsr_10fdx = 0;
1412 	if (tmp & PHY_BMSR_10HDX)
1413 		hme_param_bmsr_10hdx = 1;
1414 	else
1415 		hme_param_bmsr_10hdx = 0;
1416 
1417 	if (hmep->hme_link_pulse_disabled) {
1418 		hme_param_linkup =	1;
1419 		hme_param_speed =	0;
1420 		hme_param_mode =	0;
1421 		hmep->hme_duplex =	LINK_DUPLEX_HALF;
1422 		mac_link_update(hmep->hme_mh, LINK_STATE_UP);
1423 		return;
1424 	}
1425 
1426 	if (!hmep->hme_linkup) {
1427 		hme_param_linkup =	0;
1428 		hmep->hme_duplex = LINK_DUPLEX_UNKNOWN;
1429 		mac_link_update(hmep->hme_mh, LINK_STATE_DOWN);
1430 		return;
1431 	}
1432 
1433 	hme_param_linkup = 1;
1434 
1435 	if (hmep->hme_fdx == HME_FULL_DUPLEX) {
1436 		hme_param_mode = 1;
1437 		hmep->hme_duplex = LINK_DUPLEX_FULL;
1438 	} else {
1439 		hme_param_mode = 0;
1440 		hmep->hme_duplex = LINK_DUPLEX_HALF;
1441 	}
1442 
1443 	mac_link_update(hmep->hme_mh, LINK_STATE_UP);
1444 
1445 	if (hmep->hme_mode == HME_FORCE_SPEED) {
1446 		if (hmep->hme_forcespeed == HME_SPEED_100)
1447 			hme_param_speed = 1;
1448 		else
1449 			hme_param_speed = 0;
1450 		return;
1451 	}
1452 	if (hmep->hme_tryspeed == HME_SPEED_100)
1453 		hme_param_speed = 1;
1454 	else
1455 		hme_param_speed = 0;
1456 
1457 
1458 	if (!(hmep->hme_aner & PHY_ANER_LPNW)) {
1459 		hme_param_aner_lpancap =	0;
1460 		hme_param_anlpar_100T4 =	0;
1461 		hme_param_anlpar_100fdx =	0;
1462 		hme_param_anlpar_100hdx =	0;
1463 		hme_param_anlpar_10fdx =	0;
1464 		hme_param_anlpar_10hdx =	0;
1465 		return;
1466 	}
1467 	hme_param_aner_lpancap = 1;
1468 	tmp = hmep->hme_anlpar;
1469 	if (tmp & PHY_ANLPAR_T4)
1470 		hme_param_anlpar_100T4 = 1;
1471 	else
1472 		hme_param_anlpar_100T4 = 0;
1473 	if (tmp & PHY_ANLPAR_TXFDX)
1474 		hme_param_anlpar_100fdx = 1;
1475 	else
1476 		hme_param_anlpar_100fdx = 0;
1477 	if (tmp & PHY_ANLPAR_TX)
1478 		hme_param_anlpar_100hdx = 1;
1479 	else
1480 		hme_param_anlpar_100hdx = 0;
1481 	if (tmp & PHY_ANLPAR_10FDX)
1482 		hme_param_anlpar_10fdx = 1;
1483 	else
1484 		hme_param_anlpar_10fdx = 0;
1485 	if (tmp & PHY_ANLPAR_10)
1486 		hme_param_anlpar_10hdx = 1;
1487 	else
1488 		hme_param_anlpar_10hdx = 0;
1489 }
1490 
1491 static void
1492 hme_setup_link_control(struct hme *hmep)
1493 {
1494 	uint_t anar = PHY_SELECTOR;
1495 	uint32_t autoneg = ~HME_NOTUSR & hme_param_autoneg;
1496 	uint32_t anar_100T4 = ~HME_NOTUSR & hme_param_anar_100T4;
1497 	uint32_t anar_100fdx = ~HME_NOTUSR & hme_param_anar_100fdx;
1498 	uint32_t anar_100hdx = ~HME_NOTUSR & hme_param_anar_100hdx;
1499 	uint32_t anar_10fdx = ~HME_NOTUSR & hme_param_anar_10fdx;
1500 	uint32_t anar_10hdx = ~HME_NOTUSR & hme_param_anar_10hdx;
1501 
1502 	if (autoneg) {
1503 		hmep->hme_mode = HME_AUTO_SPEED;
1504 		hmep->hme_tryspeed = HME_SPEED_100;
1505 		if (anar_100T4)
1506 			anar |= PHY_ANAR_T4;
1507 		if (anar_100fdx)
1508 			anar |= PHY_ANAR_TXFDX;
1509 		if (anar_100hdx)
1510 			anar |= PHY_ANAR_TX;
1511 		if (anar_10fdx)
1512 			anar |= PHY_ANAR_10FDX;
1513 		if (anar_10hdx)
1514 			anar |= PHY_ANAR_10;
1515 		hmep->hme_anar = anar;
1516 	} else {
1517 		hmep->hme_mode = HME_FORCE_SPEED;
1518 		if (anar_100T4) {
1519 			hmep->hme_forcespeed = HME_SPEED_100;
1520 			hmep->hme_fdx = HME_HALF_DUPLEX;
1521 			HME_DEBUG_MSG1(hmep, SEVERITY_NONE, NAUTONEG_MSG,
1522 			    "hme_link_control: force 100T4 hdx");
1523 
1524 		} else if (anar_100fdx) {
1525 			/* 100fdx needs to be checked first for 100BaseFX */
1526 			hmep->hme_forcespeed = HME_SPEED_100;
1527 			hmep->hme_fdx = HME_FULL_DUPLEX;
1528 
1529 		} else if (anar_100hdx) {
1530 			hmep->hme_forcespeed = HME_SPEED_100;
1531 			hmep->hme_fdx = HME_HALF_DUPLEX;
1532 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
1533 			    "hme_link_control: force 100 hdx");
1534 		} else if (anar_10hdx) {
1535 			/* 10hdx needs to be checked first for MII-AUI */
1536 			/* MII-AUI BugIds 1252776,4032280,4035106,4028558 */
1537 			hmep->hme_forcespeed = HME_SPEED_10;
1538 			hmep->hme_fdx = HME_HALF_DUPLEX;
1539 
1540 		} else if (anar_10fdx) {
1541 			hmep->hme_forcespeed = HME_SPEED_10;
1542 			hmep->hme_fdx = HME_FULL_DUPLEX;
1543 
1544 		} else {
1545 			hmep->hme_forcespeed = HME_SPEED_10;
1546 			hmep->hme_fdx = HME_HALF_DUPLEX;
1547 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
1548 			    "hme_link_control: force 10 hdx");
1549 		}
1550 	}
1551 }
1552 
1553 /* Decide if transmitter went dead and reinitialize everything */
1554 static int hme_txhung_limit = 3;
1555 static int
1556 hme_check_txhung(struct hme *hmep)
1557 {
1558 	boolean_t status;
1559 
1560 	mutex_enter(&hmep->hme_xmitlock);
1561 	if (hmep->hme_flags & HMERUNNING)
1562 		hmereclaim(hmep);
1563 
1564 	/* Something needs to be sent out but it is not going out */
1565 	if ((hmep->hme_tcurp != hmep->hme_tnextp) &&
1566 	    (hmep->hme_opackets == hmep->hmesave.hme_opackets))
1567 		hmep->hme_txhung++;
1568 	else
1569 		hmep->hme_txhung = 0;
1570 
1571 	hmep->hmesave.hme_opackets = hmep->hme_opackets;
1572 
1573 	status = hmep->hme_txhung >= hme_txhung_limit;
1574 	mutex_exit(&hmep->hme_xmitlock);
1575 
1576 	return (status);
1577 }
1578 
1579 /*
1580  * 	hme_check_link ()
1581  * Called as a result of HME_LINKCHECK_TIMER timeout, to poll for Transceiver
1582  * change or when a transceiver change has been detected by the hme_try_speed
1583  * function.
1584  * This function will also be called from the interrupt handler when polled mode
1585  * is used. Before calling this function the interrupt lock should be freed
1586  * so that the hmeinit() may be called.
1587  * Note that the hmeinit() function calls hme_select_speed() to set the link
1588  * speed and check for link status.
1589  */
1590 
1591 static void
1592 hme_check_link(void *arg)
1593 {
1594 	struct hme *hmep = arg;
1595 	uint16_t	stat;
1596 	uint_t 	temp;
1597 
1598 	hme_stop_timer(hmep);	/* acquire hme_linklock */
1599 
1600 	HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1601 	    "link_check entered:");
1602 	/*
1603 	 * This condition was added to work around for
1604 	 * a problem with the Synoptics/Bay 28115 switch.
1605 	 * Basically if the link is up but no packets
1606 	 * are being received. This can be checked using
1607 	 * ipackets, which in case of reception will
1608 	 * continue to increment after 'hmep->hme_iipackets'
1609 	 * has been made equal to it and the 'hme_check_link'
1610 	 * timer has expired. Note this could also be done
1611 	 * if there's no traffic on the net.
1612 	 * 'hmep->hme_ipackets' is incremented in hme_read
1613 	 * for successfully received packets.
1614 	 */
1615 	if ((hmep->hme_flags & HMERUNNING) && (hmep->hme_linkup)) {
1616 		if (hmep->hme_ipackets != hmep->hme_iipackets)
1617 			/*
1618 			 * Receptions are occurring set 'hmep->hme_iipackets'
1619 			 * to 'hmep->hme_ipackets' to monitor if receptions
1620 			 * occur during the next timeout interval.
1621 			 */
1622 			hmep->hme_iipackets = hmep->hme_ipackets;
1623 		else
1624 			/*
1625 			 * Receptions not occurring could be due to
1626 			 * Synoptics problem, try switchin of data
1627 			 * scrabbling. That should bring up the link.
1628 			 */
1629 			hme_link_now_up(hmep);
1630 	}
1631 
1632 	if ((hmep->hme_flags & HMERUNNING) &&
1633 	    (hmep->hme_linkup) && (hme_check_txhung(hmep))) {
1634 
1635 		HME_DEBUG_MSG1(hmep, SEVERITY_LOW, XCVR_MSG,
1636 		    "txhung: re-init MAC");
1637 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1638 		(void) hmeinit(hmep);	/* To reset the transceiver and */
1639 					/* to init the interface */
1640 		return;
1641 	}
1642 
1643 	/*
1644 	 * check if the transceiver is the same.
1645 	 * init to be done if the external transceiver is
1646 	 * connected/disconnected
1647 	 */
1648 	temp = hmep->hme_transceiver; /* save the transceiver type */
1649 	hme_check_transceiver(hmep);
1650 	if ((temp != hmep->hme_transceiver) || (hmep->hme_linkup == 0)) {
1651 		if (temp != hmep->hme_transceiver) {
1652 			if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1653 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1654 				    XCVR_MSG, ext_xcvr_msg);
1655 			} else {
1656 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1657 				    XCVR_MSG, int_xcvr_msg);
1658 			}
1659 		}
1660 		hmep->hme_linkcheck = 0;
1661 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1662 		(void) hmeinit(hmep); /* To reset xcvr and init interface */
1663 		return;
1664 	}
1665 
1666 
1667 	if (hmep->hme_mifpoll_enable) {
1668 		stat = (GET_MIFREG(mif_bsts) >> 16);
1669 
1670 		CHECK_MIFREG(); /* Verify */
1671 		HME_DEBUG_MSG4(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
1672 		    "int_flag = %X old_stat = %X stat = %X",
1673 		    hmep->hme_mifpoll_flag, hmep->hme_mifpoll_data, stat);
1674 
1675 		if (!hmep->hme_mifpoll_flag) {
1676 			if (stat & PHY_BMSR_LNKSTS) {
1677 				hme_start_timer(hmep, hme_check_link,
1678 				    HME_LINKCHECK_TIMER);
1679 				return;
1680 			}
1681 			HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
1682 			    "hme_check_link:DOWN polled data = %X\n", stat);
1683 			hme_stop_mifpoll(hmep);
1684 
1685 			temp = (GET_MIFREG(mif_bsts) >> 16);
1686 			HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
1687 			    "hme_check_link:after poll-stop: stat = %X", temp);
1688 		} else {
1689 			hmep->hme_mifpoll_flag = 0;
1690 		}
1691 	} else {
1692 		if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
1693 		/* Transceiver does not talk mii */
1694 			hme_start_timer(hmep, hme_check_link,
1695 			    HME_LINKCHECK_TIMER);
1696 			return;
1697 		}
1698 
1699 		if (stat & PHY_BMSR_LNKSTS) {
1700 			hme_start_timer(hmep, hme_check_link,
1701 			    HME_LINKCHECK_TIMER);
1702 			return;
1703 		}
1704 	}
1705 	HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
1706 	    "mifpoll_flag = %x first stat = %X", hmep->hme_mifpoll_flag, stat);
1707 
1708 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1709 	HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
1710 	    "second stat = %X", stat);
1711 
1712 	/*
1713 	 * The PHY may have automatically renegotiated link speed and mode.
1714 	 * Get the new link speed and mode.
1715 	 */
1716 	if ((stat & PHY_BMSR_LNKSTS) && hme_autoneg_enable) {
1717 		if (hmep->hme_mode == HME_AUTO_SPEED) {
1718 			(void) hme_get_autoinfo(hmep);
1719 			hme_setup_link_status(hmep);
1720 			hme_start_mifpoll(hmep);
1721 			if (hmep->hme_fdx != hmep->hme_macfdx) {
1722 				hme_start_timer(hmep, hme_check_link,
1723 				    HME_LINKCHECK_TIMER);
1724 				(void) hmeinit(hmep);
1725 				return;
1726 			}
1727 		}
1728 		hme_start_mifpoll(hmep);
1729 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1730 		return;
1731 	}
1732 	/* Reset the PHY and bring up the link */
1733 	hme_reset_transceiver(hmep);
1734 }
1735 
1736 static void
1737 hme_init_xcvr_info(struct hme *hmep)
1738 {
1739 	uint16_t phy_id1, phy_id2;
1740 
1741 	(void) hme_mii_read(hmep, HME_PHY_IDR1, &phy_id1);
1742 	(void) hme_mii_read(hmep, HME_PHY_IDR2, &phy_id2);
1743 }
1744 
1745 /*
1746  * Disable link pulses for the Internal Transceiver
1747  */
1748 
1749 static void
1750 hme_disable_link_pulse(struct hme *hmep)
1751 {
1752 	uint16_t	nicr;
1753 
1754 	hme_mii_write(hmep, HME_PHY_BMCR, 0); /* force 10 Mbps */
1755 	(void) hme_mii_read(hmep, HME_PHY_NICR, &nicr);
1756 
1757 	HME_DEBUG_MSG3(hmep, SEVERITY_NONE, LINKPULSE_MSG,
1758 	    "hme_disable_link_pulse: NICR read = %x written = %x",
1759 	    nicr, nicr & ~PHY_NICR_LD);
1760 
1761 	hme_mii_write(hmep, HME_PHY_NICR, (nicr & ~PHY_NICR_LD));
1762 
1763 	hmep->hme_linkup = 1;
1764 	hmep->hme_linkcheck = 1;
1765 	hme_setup_link_status(hmep);
1766 	hme_start_mifpoll(hmep);
1767 	hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1768 }
1769 
1770 static void
1771 hme_force_speed(void *arg)
1772 {
1773 	struct hme	*hmep = arg;
1774 	int		linkup;
1775 	uint_t		temp;
1776 	uint16_t	csc;
1777 
1778 	hme_stop_timer(hmep);
1779 	if (hmep->hme_fdx != hmep->hme_macfdx) {
1780 		hme_start_timer(hmep, hme_check_link, HME_TICKS*5);
1781 		return;
1782 	}
1783 	temp = hmep->hme_transceiver; /* save the transceiver type */
1784 	hme_check_transceiver(hmep);
1785 	if (temp != hmep->hme_transceiver) {
1786 		if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
1787 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1788 			    ext_xcvr_msg);
1789 		} else {
1790 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
1791 			    int_xcvr_msg);
1792 		}
1793 		hme_start_timer(hmep, hme_check_link, HME_TICKS * 10);
1794 		return;
1795 	}
1796 
1797 	if ((hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) &&
1798 	    (hmep->hme_link_pulse_disabled)) {
1799 		hmep->hme_forcespeed = HME_SPEED_10;
1800 		hme_disable_link_pulse(hmep);
1801 		return;
1802 	}
1803 
1804 	/*
1805 	 * To interoperate with auto-negotiable capable systems
1806 	 * the link should be brought down for 1 second.
1807 	 * How to do this using only standard registers ?
1808 	 */
1809 	if (HME_DP83840) {
1810 		if (hmep->hme_force_linkdown == HME_FORCE_LINKDOWN) {
1811 			hmep->hme_force_linkdown = HME_LINKDOWN_STARTED;
1812 			hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_100M);
1813 			(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1814 			hme_mii_write(hmep, HME_PHY_CSC,
1815 			    (csc | PHY_CSCR_TXOFF));
1816 			hme_start_timer(hmep, hme_force_speed, 10 * HME_TICKS);
1817 			return;
1818 		} else if (hmep->hme_force_linkdown == HME_LINKDOWN_STARTED) {
1819 			(void) hme_mii_read(hmep, HME_PHY_CSC, &csc);
1820 			hme_mii_write(hmep, HME_PHY_CSC,
1821 			    (csc & ~PHY_CSCR_TXOFF));
1822 			hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
1823 		}
1824 	} else {
1825 		if (hmep->hme_force_linkdown == HME_FORCE_LINKDOWN) {
1826 #ifdef	HME_100T4_DEBUG
1827 	{
1828 		uint16_t control, stat, aner, anlpar, anar;
1829 
1830 		(void) hme_mii_read(hmep, HME_PHY_BMCR, &control);
1831 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1832 		(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1833 		(void) hme_mii_read(hmep, HME_PHY_ANLPAR, &anlpar);
1834 		(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
1835 		HME_DEBUG_MSG5(hmep, SEVERITY_NONE, XCVR_MSG,
1836 		    "hme_force_speed: begin:control ="
1837 		    "  %X stat = %X aner = %X anar = %X anlpar = %X",
1838 		    control, stat, aner, anar, anlpar);
1839 	}
1840 #endif
1841 			hmep->hme_force_linkdown = HME_LINKDOWN_STARTED;
1842 			hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_LPBK);
1843 			hme_start_timer(hmep, hme_force_speed, 10 * HME_TICKS);
1844 			return;
1845 		} else if (hmep->hme_force_linkdown == HME_LINKDOWN_STARTED) {
1846 			hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
1847 		}
1848 	}
1849 
1850 
1851 	linkup = hme_select_speed(hmep, hmep->hme_forcespeed);
1852 	if (hmep->hme_linkup_cnt == 1) {
1853 		hme_start_timer(hmep, hme_force_speed, SECOND(4));
1854 		return;
1855 	}
1856 	if (linkup) {
1857 
1858 #ifdef	HME_100T4_DEBUG
1859 	{
1860 		uint16_t control, stat, aner, anlpar, anar;
1861 
1862 		(void) hme_mii_read(hmep, HME_PHY_BMCR, &control);
1863 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
1864 		(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1865 		(void) hme_mii_read(hmep, HME_PHY_ANLPAR, &anlpar);
1866 		(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
1867 		HME_DEBUG_MSG5(hmep, SEVERITY_NONE, XCVR_MSG,
1868 		    "hme_force_speed:end: control ="
1869 		    "%X stat = %X aner = %X anar = %X anlpar = %X",
1870 		    control, stat, aner, anar, anlpar);
1871 	}
1872 #endif
1873 		hmep->hme_linkup = 1;
1874 		hmep->hme_linkcheck = 1;
1875 		hmep->hme_ifspeed = hmep->hme_forcespeed;
1876 		hme_link_now_up(hmep);
1877 		hme_setup_link_status(hmep);
1878 		hme_start_mifpoll(hmep);
1879 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
1880 	} else {
1881 		hme_start_timer(hmep, hme_force_speed, HME_TICKS);
1882 	}
1883 }
1884 
1885 static void
1886 hme_get_autoinfo(struct hme *hmep)
1887 {
1888 	uint16_t	anar;
1889 	uint16_t	aner;
1890 	uint16_t	anlpar;
1891 	uint16_t	tmp;
1892 	uint16_t	ar;
1893 
1894 	(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
1895 	(void) hme_mii_read(hmep, HME_PHY_ANLPAR, &anlpar);
1896 	(void) hme_mii_read(hmep, HME_PHY_ANAR, &anar);
1897 
1898 	HME_DEBUG_MSG4(hmep, SEVERITY_NONE, AUTONEG_MSG,
1899 	    "autoinfo: aner = %X anar = %X anlpar = %X", aner, anar, anlpar);
1900 
1901 	hmep->hme_anlpar = anlpar;
1902 	hmep->hme_aner = aner;
1903 
1904 	if (aner & PHY_ANER_LPNW) {
1905 
1906 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
1907 		    "hme_try_autoneg: Link Partner AN able");
1908 
1909 		tmp = anar & anlpar;
1910 		if (tmp & PHY_ANAR_TXFDX) {
1911 			hmep->hme_tryspeed = HME_SPEED_100;
1912 			hmep->hme_fdx = HME_FULL_DUPLEX;
1913 		} else if (tmp & PHY_ANAR_TX) {
1914 			hmep->hme_tryspeed = HME_SPEED_100;
1915 			hmep->hme_fdx = HME_HALF_DUPLEX;
1916 		} else if (tmp & PHY_ANLPAR_10FDX) {
1917 			hmep->hme_tryspeed = HME_SPEED_10;
1918 			hmep->hme_fdx = HME_FULL_DUPLEX;
1919 		} else if (tmp & PHY_ANLPAR_10) {
1920 			hmep->hme_tryspeed = HME_SPEED_10;
1921 			hmep->hme_fdx = HME_HALF_DUPLEX;
1922 		} else {
1923 			if (HME_DP83840) {
1924 
1925 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
1926 				    AUTONEG_MSG, "hme_try_autoneg: "
1927 				    "anar not set with speed selection");
1928 
1929 				hmep->hme_fdx = HME_HALF_DUPLEX;
1930 				(void) hme_mii_read(hmep, HME_PHY_AR, &ar);
1931 
1932 				HME_DEBUG_MSG2(hmep, SEVERITY_NONE,
1933 				    AUTONEG_MSG, "ar = %X", ar);
1934 
1935 				if (ar & PHY_AR_SPEED10)
1936 					hmep->hme_tryspeed = HME_SPEED_10;
1937 				else
1938 					hmep->hme_tryspeed = HME_SPEED_100;
1939 			} else
1940 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1941 				    AUTONEG_MSG, anar_not_set_msg);
1942 		}
1943 		HME_DEBUG_MSG2(hmep, SEVERITY_NONE, AUTONEG_MSG,
1944 		    " hme_try_autoneg: fdx = %d", hmep->hme_fdx);
1945 	} else {
1946 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
1947 		    " hme_try_autoneg: parallel detection done");
1948 
1949 		hmep->hme_fdx = HME_HALF_DUPLEX;
1950 		if (anlpar & PHY_ANLPAR_TX)
1951 			hmep->hme_tryspeed = HME_SPEED_100;
1952 		else if (anlpar & PHY_ANLPAR_10)
1953 			hmep->hme_tryspeed = HME_SPEED_10;
1954 		else {
1955 			if (HME_DP83840) {
1956 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
1957 				    AUTONEG_MSG, " hme_try_autoneg: "
1958 				    "parallel detection: "
1959 				    "anar not set with speed selection");
1960 
1961 				(void) hme_mii_read(hmep, HME_PHY_AR, &ar);
1962 
1963 				HME_DEBUG_MSG2(hmep, SEVERITY_NONE,
1964 				    AUTONEG_MSG, "ar = %X", ar);
1965 
1966 				if (ar & PHY_AR_SPEED10)
1967 					hmep->hme_tryspeed = HME_SPEED_10;
1968 				else
1969 					hmep->hme_tryspeed = HME_SPEED_100;
1970 			} else
1971 				HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN,
1972 				    AUTONEG_MSG, par_detect_anar_not_set_msg);
1973 		}
1974 	}
1975 
1976 	hmep->hme_linkup = 1;
1977 	hmep->hme_linkcheck = 1;
1978 	hmep->hme_ifspeed = hmep->hme_tryspeed;
1979 	hme_link_now_up(hmep);
1980 }
1981 
1982 /*
1983  * Return 1 if the link is up or auto-negotiation being tried, 0 otherwise.
1984  */
1985 
1986 static int
1987 hme_try_auto_negotiation(struct hme *hmep)
1988 {
1989 	uint16_t	stat;
1990 	uint16_t	aner;
1991 #ifdef	HME_AUTONEG_DEBUG
1992 	uint16_t	anar;
1993 	uint16_t	anlpar;
1994 	uint16_t	control;
1995 #endif
1996 
1997 	if (hmep->hme_autoneg == HME_HWAN_TRY) {
1998 		/* auto negotiation not initiated */
1999 		(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
2000 		if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
2001 			/*
2002 			 * Transceiver does not talk mii
2003 			 */
2004 			goto hme_anfail;
2005 		}
2006 		if ((stat & PHY_BMSR_ACFG) == 0) { /* auto neg. not supported */
2007 
2008 			HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
2009 			    " PHY status reg = %X", stat);
2010 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, NAUTONEG_MSG,
2011 			    " Auto-negotiation not supported");
2012 
2013 			return (hmep->hme_autoneg = HME_HWAN_FAILED);
2014 		}
2015 
2016 		/*
2017 		 * Read ANER to clear status from previous operations.
2018 		 */
2019 		if (hme_mii_read(hmep, HME_PHY_ANER, &aner) == 1) {
2020 			/*
2021 			 * Transceiver does not talk mii
2022 			 */
2023 			goto hme_anfail;
2024 		}
2025 
2026 		hme_mii_write(hmep, HME_PHY_ANAR, hmep->hme_anar);
2027 		hme_mii_write(hmep, HME_PHY_BMCR, PHY_BMCR_ANE | PHY_BMCR_RAN);
2028 		/*
2029 		 * auto-negotiation initiated
2030 		 */
2031 		hmep->hme_delay = 0;
2032 		hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2033 		return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
2034 		/*
2035 		 * auto-negotiation in progress
2036 		 */
2037 	}
2038 
2039 	/*
2040 	 * Auto-negotiation has been in progress. Wait for at least
2041 	 * least 3000 ms.
2042 	 * Changed 8/28/97 to fix bug ID 4070989.
2043 	 */
2044 	if (hmep->hme_delay < 30) {
2045 		hmep->hme_delay++;
2046 		hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2047 		return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
2048 	}
2049 
2050 	(void) hme_mii_read(hmep, HME_PHY_BMSR, &stat);
2051 	if (hme_mii_read(hmep, HME_PHY_BMSR, &stat) == 1) {
2052 		/*
2053 		 * Transceiver does not talk mii
2054 		 */
2055 		goto hme_anfail;
2056 	}
2057 
2058 	if ((stat & PHY_BMSR_ANC) == 0) {
2059 		/*
2060 		 * wait for a maximum of 5 seconds
2061 		 */
2062 		if (hmep->hme_delay < 50) {
2063 			hmep->hme_delay++;
2064 			hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2065 			return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
2066 		}
2067 #ifdef	HME_AUTONEG_DEBUG
2068 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2069 		    "Auto-negotiation not completed in 5 seconds");
2070 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2071 		    " PHY status reg = %X", stat);
2072 
2073 		hme_mii_read(hmep, HME_PHY_BMCR, &control);
2074 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2075 		    " PHY control reg = %x", control);
2076 
2077 		hme_mii_read(hmep, HME_PHY_ANAR, &anar);
2078 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2079 		    " PHY anar reg = %x", anar);
2080 
2081 		hme_mii_read(hmep, HME_PHY_ANER, &aner);
2082 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2083 		    " PHY aner reg = %x", aner);
2084 
2085 		hme_mii_read(hmep, HME_PHY_ANLPAR, &anlpar);
2086 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2087 		    " PHY anlpar reg = %x", anlpar);
2088 #endif
2089 		if (HME_DP83840) {
2090 			(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
2091 			if (aner & PHY_ANER_MLF) {
2092 
2093 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
2094 				    AUTONEG_MSG,
2095 				    " hme_try_autoneg: MLF Detected"
2096 				    " after 5 seconds");
2097 
2098 				return (hmep->hme_autoneg = HME_HWAN_FAILED);
2099 			}
2100 		}
2101 
2102 		goto hme_anfail;
2103 	}
2104 
2105 	HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2106 	    "Auto-negotiate completed within %d 100ms time", hmep->hme_delay);
2107 
2108 	(void) hme_mii_read(hmep, HME_PHY_ANER, &aner);
2109 	if (aner & PHY_ANER_MLF) {
2110 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2111 		    par_detect_msg);
2112 		goto hme_anfail;
2113 	}
2114 
2115 	if (!(stat & PHY_BMSR_LNKSTS)) {
2116 		/*
2117 		 * wait for a maximum of 10 seconds
2118 		 */
2119 		if (hmep->hme_delay < 100) {
2120 			hmep->hme_delay++;
2121 			hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2122 			return (hmep->hme_autoneg = HME_HWAN_INPROGRESS);
2123 		}
2124 		HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2125 		    "Link not Up in 10 seconds: stat = %X", stat);
2126 		goto hme_anfail;
2127 	} else {
2128 		hmep->hme_bmsr |= (PHY_BMSR_LNKSTS);
2129 		hme_get_autoinfo(hmep);
2130 		hmep->hme_force_linkdown = HME_LINKDOWN_DONE;
2131 		hme_setup_link_status(hmep);
2132 		hme_start_mifpoll(hmep);
2133 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
2134 		if (hmep->hme_fdx != hmep->hme_macfdx)
2135 			(void) hmeinit(hmep);
2136 		return (hmep->hme_autoneg = HME_HWAN_SUCCESFUL);
2137 	}
2138 
2139 hme_anfail:
2140 	HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTONEG_MSG,
2141 	    "Retry Auto-negotiation.");
2142 	hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2143 	return (hmep->hme_autoneg = HME_HWAN_TRY);
2144 }
2145 
2146 /*
2147  * This function is used to perform automatic speed detection.
2148  * The Internal Transceiver which is based on the National PHY chip
2149  * 83840 supports auto-negotiation functionality.
2150  * Some External transceivers may not support auto-negotiation.
2151  * In that case, the software performs the speed detection.
2152  * The software tries to bring down the link for about 2 seconds to
2153  * force the Link Partner to notice speed change.
2154  * The software speed detection favors the 100 Mbps speed.
2155  * It does this by setting the 100 Mbps for longer duration ( 5 seconds )
2156  * than the 10 Mbps ( 2 seconds ). Also, even after the link is up
2157  * in 10 Mbps once, the 100 Mbps is also tried. Only if the link
2158  * is not up in 100 Mbps, the 10 Mbps speed is tried again.
2159  */
2160 static void
2161 hme_try_speed(void *arg)
2162 {
2163 	struct hme	*hmep = arg;
2164 	int		linkup;
2165 	uint_t		temp;
2166 
2167 	hme_stop_timer(hmep);
2168 	temp = hmep->hme_transceiver; /* save the transceiver type */
2169 	hme_check_transceiver(hmep);
2170 	if (temp != hmep->hme_transceiver) {
2171 		if (hmep->hme_transceiver == HME_EXTERNAL_TRANSCEIVER) {
2172 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
2173 			    ext_xcvr_msg);
2174 		} else {
2175 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, XCVR_MSG,
2176 			    int_xcvr_msg);
2177 		}
2178 		hme_start_timer(hmep, hme_check_link, 10 * HME_TICKS);
2179 		return;
2180 	}
2181 
2182 	if ((hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER) &&
2183 	    (hmep->hme_link_pulse_disabled)) {
2184 		hmep->hme_tryspeed = HME_SPEED_10;
2185 		hme_disable_link_pulse(hmep);
2186 		return;
2187 	}
2188 
2189 	if (hme_autoneg_enable && (hmep->hme_autoneg != HME_HWAN_FAILED)) {
2190 		if (hme_try_auto_negotiation(hmep) != HME_HWAN_FAILED)
2191 			return;	/* auto negotiation successful or being tried */
2192 	}
2193 
2194 	linkup = hme_select_speed(hmep, hmep->hme_tryspeed);
2195 	if (hmep->hme_linkup_cnt == 1) {
2196 		hme_start_timer(hmep, hme_try_speed, SECOND(1));
2197 		return;
2198 	}
2199 	if (linkup) {
2200 		switch (hmep->hme_tryspeed) {
2201 		case HME_SPEED_100:
2202 			if (hmep->hme_linkup_cnt == 4) {
2203 				hmep->hme_ntries =	HME_NTRIES_LOW;
2204 				hmep->hme_nlasttries =	HME_NTRIES_LOW;
2205 				hmep->hme_linkup = 1;
2206 				hmep->hme_linkcheck = 1;
2207 				hme_link_now_up(hmep);
2208 				hme_setup_link_status(hmep);
2209 				hme_start_mifpoll(hmep);
2210 				hme_start_timer(hmep, hme_check_link,
2211 				    HME_LINKCHECK_TIMER);
2212 				if (hmep->hme_fdx != hmep->hme_macfdx)
2213 					(void) hmeinit(hmep);
2214 			} else
2215 				hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2216 			break;
2217 		case HME_SPEED_10:
2218 			if (hmep->hme_linkup_cnt == 4) {
2219 				if (hmep->hme_linkup_10) {
2220 					hmep->hme_linkup_10 = 0;
2221 					hmep->hme_ntries = HME_NTRIES_LOW;
2222 					hmep->hme_nlasttries = HME_NTRIES_LOW;
2223 					hmep->hme_linkup = 1;
2224 					hmep->hme_linkcheck = 1;
2225 					hmep->hme_ifspeed = HME_SPEED_10;
2226 					hme_setup_link_status(hmep);
2227 					hme_start_mifpoll(hmep);
2228 					hme_start_timer(hmep, hme_check_link,
2229 					    HME_LINKCHECK_TIMER);
2230 					if (hmep->hme_fdx != hmep->hme_macfdx)
2231 						(void) hmeinit(hmep);
2232 				} else {
2233 					hmep->hme_linkup_10 = 1;
2234 					hmep->hme_tryspeed = HME_SPEED_100;
2235 					hmep->hme_force_linkdown =
2236 					    HME_FORCE_LINKDOWN;
2237 					hmep->hme_linkup_cnt = 0;
2238 					hmep->hme_ntries = HME_NTRIES_LOW;
2239 					hmep->hme_nlasttries = HME_NTRIES_LOW;
2240 					hme_start_timer(hmep,
2241 					    hme_try_speed, HME_TICKS);
2242 				}
2243 
2244 			} else
2245 				hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2246 			break;
2247 		default:
2248 			HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG,
2249 			    "Default: Try speed");
2250 			break;
2251 		}
2252 		return;
2253 	}
2254 
2255 	hmep->hme_ntries--;
2256 	hmep->hme_linkup_cnt = 0;
2257 	if (hmep->hme_ntries == 0) {
2258 		hmep->hme_force_linkdown = HME_FORCE_LINKDOWN;
2259 		switch (hmep->hme_tryspeed) {
2260 		case HME_SPEED_100:
2261 			hmep->hme_tryspeed = HME_SPEED_10;
2262 			hmep->hme_ntries = HME_NTRIES_LOW_10;
2263 			break;
2264 		case HME_SPEED_10:
2265 			hmep->hme_ntries = HME_NTRIES_LOW;
2266 			hmep->hme_tryspeed = HME_SPEED_100;
2267 			break;
2268 		default:
2269 			HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG,
2270 			    "Default: Try speed");
2271 			break;
2272 		}
2273 	}
2274 	hme_start_timer(hmep, hme_try_speed, HME_TICKS);
2275 }
2276 
2277 static void
2278 hme_link_now_up(struct hme *hmep)
2279 {
2280 	uint16_t	btxpc;
2281 	/*
2282 	 * Work-around for the scramble problem with QSI
2283 	 * chip and Synoptics 28115 switch.
2284 	 * Addition Interface Technologies Group (NPG) 8/28/1997.
2285 	 */
2286 	if ((HME_QS6612) && ((hmep->hme_tryspeed  == HME_SPEED_100) ||
2287 	    (hmep->hme_forcespeed == HME_SPEED_100))) {
2288 		/*
2289 		 * Addition of a check for 'hmep->hme_forcespeed'
2290 		 * This is necessary when the autonegotiation is
2291 		 * disabled by the 'hme.conf' file. In this case
2292 		 * hmep->hme_tryspeed is not initialized. Resulting
2293 		 * in the workaround not being applied.
2294 		 */
2295 		if (hme_mii_read(hmep, HME_PHY_BTXPC, &btxpc) == 0) {
2296 			hme_mii_write(hmep, HME_PHY_BTXPC,
2297 			    (btxpc | PHY_BTXPC_DSCRAM));
2298 			drv_usecwait(20);
2299 			hme_mii_write(hmep, HME_PHY_BTXPC, btxpc);
2300 		}
2301 	}
2302 }
2303 /* <<<<<<<<<<<<<<<<<<<<<<<<<<<  LOADABLE ENTRIES  >>>>>>>>>>>>>>>>>>>>>>> */
2304 
2305 int
2306 _init(void)
2307 {
2308 	int	status;
2309 
2310 	mac_init_ops(&hme_dev_ops, "hme");
2311 	if ((status = mod_install(&modlinkage)) != 0) {
2312 		mac_fini_ops(&hme_dev_ops);
2313 	}
2314 	return (status);
2315 }
2316 
2317 int
2318 _fini(void)
2319 {
2320 	int	status;
2321 
2322 	if ((status = mod_remove(&modlinkage)) == 0) {
2323 		mac_fini_ops(&hme_dev_ops);
2324 	}
2325 	return (status);
2326 }
2327 
2328 int
2329 _info(struct modinfo *modinfop)
2330 {
2331 	return (mod_info(&modlinkage, modinfop));
2332 }
2333 
2334 
2335 
2336 #define	HMERINDEX(i)		(i % HMERPENDING)
2337 
2338 #define	DONT_FLUSH		-1
2339 
2340 /*
2341  * Allocate and zero-out "number" structures
2342  * each of type "structure" in kernel memory.
2343  */
2344 #define	GETSTRUCT(structure, number)   \
2345 	((structure *)kmem_zalloc(\
2346 		(size_t)(sizeof (structure) * (number)), KM_SLEEP))
2347 
2348 /*
2349  * Translate a kernel virtual address to i/o address.
2350  */
2351 
2352 #define	HMEIOPBIOADDR(hmep, a) \
2353 	((uint32_t)((hmep)->hme_iopbiobase + \
2354 		((uintptr_t)(a) - (hmep)->hme_iopbkbase)))
2355 
2356 /*
2357  * ddi_dma_sync() a TMD or RMD descriptor.
2358  */
2359 #define	HMESYNCIOPB(hmep, a, size, who) \
2360 	(void) ddi_dma_sync((hmep)->hme_md_h, \
2361 		(off_t)((ulong_t)(a) - (hmep)->hme_iopbkbase), \
2362 		(size_t)(size), \
2363 		(who))
2364 
2365 #define	CHECK_IOPB() \
2366 	hme_check_dma_handle(__FILE__, __LINE__, hmep, hmep->hme_md_h)
2367 #define	CHECK_DMA(handle) \
2368 	hme_check_dma_handle(__FILE__, __LINE__, hmep, (handle))
2369 
2370 /*
2371  * Ethernet broadcast address definition.
2372  */
2373 static	struct ether_addr	etherbroadcastaddr = {
2374 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2375 };
2376 
2377 /*
2378  * MIB II broadcast/multicast packets
2379  */
2380 #define	IS_BROADCAST(pkt) (bcmp(pkt, &etherbroadcastaddr, ETHERADDRL) == 0)
2381 #define	IS_MULTICAST(pkt) ((pkt[0] & 01) == 1)
2382 #define	BUMP_InNUcast(hmep, pkt) \
2383 		if (IS_BROADCAST(pkt)) { \
2384 			hmep->hme_brdcstrcv++; \
2385 		} else if (IS_MULTICAST(pkt)) { \
2386 			hmep->hme_multircv++; \
2387 		}
2388 #define	BUMP_OutNUcast(hmep, pkt) \
2389 		if (IS_BROADCAST(pkt)) { \
2390 			hmep->hme_brdcstxmt++; \
2391 		} else if (IS_MULTICAST(pkt)) { \
2392 			hmep->hme_multixmt++; \
2393 		}
2394 
2395 
2396 static int
2397 hme_create_prop_from_kw(dev_info_t *dip, char *vpdname, char *vpdstr)
2398 {
2399 	char propstr[80];
2400 	int i, needprop = 0;
2401 	struct ether_addr local_mac;
2402 
2403 #ifdef HME_DEBUG
2404 	struct hme *hmep;
2405 	hmep = ddi_get_driver_private(dip);
2406 #endif
2407 
2408 	if (strcmp(vpdname, "NA") == 0) {
2409 		(void) strcpy(propstr, "local-mac-address");
2410 		needprop = 1;
2411 	} else if (strcmp(vpdname, "Z0") == 0) {
2412 		(void) strcpy(propstr, "model");
2413 		needprop = 1;
2414 	} else if (strcmp(vpdname, "Z1") == 0) {
2415 		(void) strcpy(propstr, "board-model");
2416 		needprop = 1;
2417 	}
2418 
2419 	if (needprop == 1) {
2420 		if (strcmp(propstr, "local-mac-address") == 0) {
2421 			for (i = 0; i < ETHERADDRL; i++)
2422 				local_mac.ether_addr_octet[i] =
2423 				    (uchar_t)vpdstr[i];
2424 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
2425 			    DDI_PROP_CANSLEEP, propstr,
2426 			    (char *)local_mac.ether_addr_octet, ETHERADDRL)
2427 			    != DDI_SUCCESS) {
2428 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
2429 				    AUTOCONFIG_MSG, "hme_create_newvpd_props: "
2430 				    "ddi_prop_create error");
2431 				return (DDI_FAILURE);
2432 			}
2433 		} else {
2434 			if (ddi_prop_create(DDI_DEV_T_NONE, dip,
2435 			    DDI_PROP_CANSLEEP, propstr, vpdstr,
2436 			    strlen(vpdstr)+1) != DDI_SUCCESS) {
2437 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
2438 				    AUTOCONFIG_MSG, "hme_create_newvpd_props: "
2439 				    "ddi_prop_create error");
2440 				return (DDI_FAILURE);
2441 			}
2442 		}
2443 	}
2444 	return (0);
2445 }
2446 
2447 /*
2448  * Get properties from old VPD
2449  * for PCI cards
2450  */
2451 static int
2452 hme_get_oldvpd_props(dev_info_t *dip, int vpd_base)
2453 {
2454 	struct hme *hmep;
2455 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
2456 	char kw_namestr[3];
2457 	char kw_fieldstr[256];
2458 	int i;
2459 
2460 	hmep = ddi_get_driver_private(dip);
2461 
2462 	vpd_start = vpd_base;
2463 
2464 	if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
2465 		return (1); /* error */
2466 	} else {
2467 		vpd_len = 9;
2468 	}
2469 
2470 	/* Get local-mac-address */
2471 	kw_start = vpd_start + 3; /* Location of 1st keyword */
2472 	kw_ptr = kw_start;
2473 	while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
2474 		kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
2475 		kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
2476 		kw_namestr[2] = '\0';
2477 		kw_len = (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
2478 		for (i = 0, kw_ptr += 3; i < kw_len; i++)
2479 			kw_fieldstr[i] = GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
2480 		kw_fieldstr[i] = '\0';
2481 		if (hme_create_prop_from_kw(dip, kw_namestr, kw_fieldstr)) {
2482 			HME_DEBUG_MSG2(hmep, SEVERITY_NONE, CONFIG_MSG,
2483 			    "cannot create_prop_from_kw %s", kw_namestr);
2484 			return (DDI_FAILURE);
2485 		}
2486 		kw_ptr += kw_len;
2487 	} /* next keyword */
2488 
2489 	if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, "model",
2490 	    "SUNW,cheerio", strlen("SUNW,cheerio")+1) != DDI_SUCCESS) {
2491 		HME_DEBUG_MSG1(hmep, SEVERITY_NONE, AUTOCONFIG_MSG,
2492 		    "hme_get_oldvpd model: ddi_prop_create error");
2493 		return (DDI_FAILURE);
2494 	}
2495 	return (0);
2496 }
2497 
2498 
2499 /*
2500  * Get properties from new VPD
2501  * for CompactPCI cards
2502  */
2503 static int
2504 hme_get_newvpd_props(dev_info_t *dip, int vpd_base)
2505 {
2506 	struct hme *hmep;
2507 	int vpd_start, vpd_len, kw_start, kw_len, kw_ptr;
2508 	char kw_namestr[3];
2509 	char kw_fieldstr[256];
2510 	int maxvpdsize, i;
2511 
2512 	hmep = ddi_get_driver_private(dip);
2513 
2514 	maxvpdsize = 1024; /* Real size not known until after it is read */
2515 
2516 	vpd_start = (int)((GET_ROM8(&(hmep->hme_romp[vpd_base+1])) & 0xff) |
2517 	    ((GET_ROM8(&hmep->hme_romp[vpd_base+2]) & 0xff) << 8)) +3;
2518 	vpd_start = vpd_base + vpd_start;
2519 	while (vpd_start < (vpd_base + maxvpdsize)) { /* Get all VPDs */
2520 		if ((GET_ROM8(&hmep->hme_romp[vpd_start]) & 0xff) != 0x90) {
2521 			break; /* no VPD found */
2522 		} else {
2523 			vpd_len = (int)((GET_ROM8(&hmep->hme_romp[vpd_start
2524 			    + 1]) & 0xff) | (GET_ROM8(&hmep->hme_romp[vpd_start
2525 			    + 2]) & 0xff) << 8);
2526 		}
2527 		/* Get all keywords in this VPD */
2528 		kw_start = vpd_start + 3; /* Location of 1st keyword */
2529 		kw_ptr = kw_start;
2530 		while ((kw_ptr - kw_start) < vpd_len) { /* Get all keywords */
2531 			kw_namestr[0] = GET_ROM8(&hmep->hme_romp[kw_ptr]);
2532 			kw_namestr[1] = GET_ROM8(&hmep->hme_romp[kw_ptr+1]);
2533 			kw_namestr[2] = '\0';
2534 			kw_len =
2535 			    (int)(GET_ROM8(&hmep->hme_romp[kw_ptr+2]) & 0xff);
2536 			for (i = 0, kw_ptr += 3; i < kw_len; i++)
2537 				kw_fieldstr[i] =
2538 				    GET_ROM8(&hmep->hme_romp[kw_ptr+i]);
2539 			kw_fieldstr[i] = '\0';
2540 			if (hme_create_prop_from_kw(dip, kw_namestr,
2541 			    kw_fieldstr)) {
2542 				HME_DEBUG_MSG2(hmep, SEVERITY_NONE, CONFIG_MSG,
2543 				"cannot create_prop_from_kw %s", kw_namestr);
2544 				return (DDI_FAILURE);
2545 			}
2546 			kw_ptr += kw_len;
2547 		} /* next keyword */
2548 		vpd_start += (vpd_len + 3);
2549 	} /* next VPD */
2550 	return (0);
2551 }
2552 
2553 
2554 /*
2555  * Get properties from VPD
2556  */
2557 static int
2558 hme_get_vpd_props(dev_info_t *dip)
2559 {
2560 	struct hme *hmep;
2561 	int v0, v1, vpd_base;
2562 	int i, epromsrchlimit;
2563 
2564 
2565 	hmep = ddi_get_driver_private(dip);
2566 
2567 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[0])));
2568 	v1 = (int)(GET_ROM8(&(hmep->hme_romp[1])));
2569 	v0 = ((v0 & 0xff) << 8 | v1);
2570 
2571 	if ((v0 & 0xffff) != 0x55aa) {
2572 		cmn_err(CE_NOTE, " Valid pci prom not found \n");
2573 		return (1);
2574 	}
2575 
2576 	epromsrchlimit = 4096;
2577 	for (i = 2; i < epromsrchlimit; i++) {
2578 		/* "PCIR" */
2579 		if (((GET_ROM8(&(hmep->hme_romp[i])) & 0xff) == 'P') &&
2580 		    ((GET_ROM8(&(hmep->hme_romp[i+1])) & 0xff) == 'C') &&
2581 		    ((GET_ROM8(&(hmep->hme_romp[i+2])) & 0xff) == 'I') &&
2582 		    ((GET_ROM8(&(hmep->hme_romp[i+3])) & 0xff) == 'R')) {
2583 			vpd_base =
2584 			    (int)((GET_ROM8(&(hmep->hme_romp[i+8])) & 0xff) |
2585 			    (GET_ROM8(&(hmep->hme_romp[i+9])) & 0xff) << 8);
2586 			break; /* VPD pointer found */
2587 		}
2588 	}
2589 
2590 	/* No VPD found */
2591 	if (vpd_base == 0) {
2592 		cmn_err(CE_NOTE, " Vital Product Data pointer not found \n");
2593 		return (1);
2594 	}
2595 
2596 	v0 = (int)(GET_ROM8(&(hmep->hme_romp[vpd_base])));
2597 	if (v0 == 0x82) {
2598 		if (hme_get_newvpd_props(dip, vpd_base))
2599 			return (1);
2600 		return (0);
2601 	} else if (v0 == 0x90) {
2602 		if (hme_get_oldvpd_props(dip, vpd_base))
2603 			return (1);
2604 		return (0);
2605 	} else
2606 		return (1);	/* unknown start byte in VPD */
2607 }
2608 
2609 static int
2610 hmeget_promprops(dev_info_t *dip)
2611 {
2612 	struct hme *hmep;
2613 	int rom_bar;
2614 	ddi_acc_handle_t cfg_handle;
2615 	struct {
2616 		uint16_t vendorid;
2617 		uint16_t devid;
2618 		uint16_t command;
2619 		uint16_t status;
2620 		uint32_t junk1;
2621 		uint8_t cache_line;
2622 		uint8_t latency;
2623 		uint8_t header;
2624 		uint8_t bist;
2625 		uint32_t base;
2626 		uint32_t base14;
2627 		uint32_t base18;
2628 		uint32_t base1c;
2629 		uint32_t base20;
2630 		uint32_t base24;
2631 		uint32_t base28;
2632 		uint32_t base2c;
2633 		uint32_t base30;
2634 	} *cfg_ptr;
2635 
2636 	hmep = ddi_get_driver_private(dip);
2637 
2638 
2639 	/*
2640 	 * map configuration space
2641 	 */
2642 	if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
2643 	    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
2644 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2645 		    "ddi_map_regs for config space failed");
2646 		return (DDI_FAILURE);
2647 	}
2648 
2649 	/*
2650 	 * Enable bus-master and memory accesses
2651 	 */
2652 	ddi_put16(cfg_handle, &cfg_ptr->command,
2653 	    PCI_COMM_SERR_ENABLE | PCI_COMM_PARITY_DETECT |
2654 	    PCI_COMM_MAE | PCI_COMM_ME);
2655 
2656 	/*
2657 	 * Enable rom accesses
2658 	 */
2659 	rom_bar = ddi_get32(cfg_handle, &cfg_ptr->base30);
2660 	ddi_put32(cfg_handle, &cfg_ptr->base30, rom_bar | 1);
2661 
2662 
2663 	if (ddi_regs_map_setup(dip, 2, (caddr_t *)&(hmep->hme_romp), 0, 0,
2664 	    &hmep->hme_dev_attr, &hmep->hme_romh)) {
2665 		HME_DEBUG_MSG1(hmep, SEVERITY_NONE, AUTOCONFIG_MSG,
2666 		    "reg mapping failed: Check reg property ");
2667 		if (cfg_ptr)
2668 			ddi_regs_map_free(&cfg_handle);
2669 		return (DDI_FAILURE);
2670 	} else {
2671 		if (hme_get_vpd_props(dip))
2672 			return (1);
2673 	}
2674 	if (hmep->hme_romp)
2675 		ddi_regs_map_free(&hmep->hme_romh);
2676 	if (cfg_ptr)
2677 		ddi_regs_map_free(&cfg_handle);
2678 	return (0);	/* SUCCESS */
2679 
2680 }
2681 
2682 static void
2683 hmeget_hm_rev_property(struct hme *hmep)
2684 {
2685 	int	hm_rev;
2686 
2687 
2688 	hm_rev = hmep->asic_rev;
2689 	switch (hm_rev) {
2690 	case HME_2P1_REVID:
2691 	case HME_2P1_REVID_OBP:
2692 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2693 		    "SBus 2.1 Found (Rev Id = %x)", hm_rev);
2694 		hmep->hme_mifpoll_enable = 1;
2695 		hmep->hme_frame_enable = 1;
2696 		break;
2697 
2698 	case HME_2P0_REVID:
2699 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2700 		    "SBus 2.0 Found (Rev Id = %x)", hm_rev);
2701 		break;
2702 
2703 	case HME_1C0_REVID:
2704 		HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
2705 		    "PCI IO 1.0 Found (Rev Id = %x)", hm_rev);
2706 		break;
2707 
2708 	default:
2709 		HME_FAULT_MSG3(hmep, SEVERITY_HIGH, DISPLAY_MSG,
2710 		    "%s (Rev Id = %x) Found",
2711 		    (hm_rev == HME_2C0_REVID) ? "PCI IO 2.0" : "Sbus", hm_rev);
2712 		hmep->hme_mifpoll_enable = 1;
2713 		hmep->hme_frame_enable = 1;
2714 		hmep->hme_lance_mode_enable = 1;
2715 		hmep->hme_rxcv_enable = 1;
2716 		break;
2717 	}
2718 }
2719 
2720 /*
2721  * Interface exists: make available by filling in network interface
2722  * record.  System will initialize the interface when it is ready
2723  * to accept packets.
2724  */
2725 int
2726 hmeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2727 {
2728 	struct hme *hmep;
2729 	mac_register_t *macp = NULL;
2730 	int 	regno;
2731 	int hm_rev = 0;
2732 	int prop_len = sizeof (int);
2733 	ddi_acc_handle_t cfg_handle;
2734 	struct {
2735 		uint16_t vendorid;
2736 		uint16_t devid;
2737 		uint16_t command;
2738 		uint16_t status;
2739 		uint8_t revid;
2740 		uint8_t j1;
2741 		uint16_t j2;
2742 	} *cfg_ptr;
2743 
2744 	switch (cmd) {
2745 	case DDI_ATTACH:
2746 		break;
2747 
2748 	case DDI_RESUME:
2749 		if ((hmep = ddi_get_driver_private(dip)) == NULL)
2750 			return (DDI_FAILURE);
2751 
2752 		hmep->hme_flags &= ~HMESUSPENDED;
2753 		hmep->hme_linkcheck = 0;
2754 
2755 		if (hmep->hme_started)
2756 			(void) hmeinit(hmep);
2757 		return (DDI_SUCCESS);
2758 
2759 	default:
2760 		return (DDI_FAILURE);
2761 	}
2762 
2763 	/*
2764 	 * Allocate soft device data structure
2765 	 */
2766 	hmep = GETSTRUCT(struct hme, 1);
2767 
2768 	/*
2769 	 * Might as well set up elements of data structure
2770 	 */
2771 	hmep->dip =		dip;
2772 	hmep->instance = 	ddi_get_instance(dip);
2773 	hmep->pagesize =	ddi_ptob(dip, (ulong_t)1); /* IOMMU PSize */
2774 
2775 	/*
2776 	 *  Might as well setup the driver private
2777 	 * structure as part of the dip.
2778 	 */
2779 	ddi_set_driver_private(dip, hmep);
2780 
2781 	/*
2782 	 * Reject this device if it's in a slave-only slot.
2783 	 */
2784 	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
2785 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2786 		    slave_slot_msg);
2787 		goto error_state;
2788 	}
2789 
2790 	/*
2791 	 * Map in the device registers.
2792 	 *
2793 	 * Reg # 0 is the Global register set
2794 	 * Reg # 1 is the ETX register set
2795 	 * Reg # 2 is the ERX register set
2796 	 * Reg # 3 is the BigMAC register set.
2797 	 * Reg # 4 is the MIF register set
2798 	 */
2799 	if (ddi_dev_nregs(dip, &regno) != (DDI_SUCCESS)) {
2800 		HME_FAULT_MSG2(hmep, SEVERITY_HIGH, INIT_MSG,
2801 		    ddi_nregs_fail_msg, regno);
2802 		goto error_state;
2803 	}
2804 
2805 	switch (regno) {
2806 	case 5:
2807 		hmep->hme_cheerio_mode = 0;
2808 		break;
2809 	case 2:
2810 	case 3: /* for hot swap/plug, there will be 3 entries in "reg" prop */
2811 		hmep->hme_cheerio_mode = 1;
2812 		break;
2813 	default:
2814 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
2815 		    bad_num_regs_msg);
2816 		goto error_state;
2817 	}
2818 
2819 	/* Initialize device attributes structure */
2820 	hmep->hme_dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2821 
2822 	if (hmep->hme_cheerio_mode)
2823 		hmep->hme_dev_attr.devacc_attr_endian_flags =
2824 		    DDI_STRUCTURE_LE_ACC;
2825 	else
2826 		hmep->hme_dev_attr.devacc_attr_endian_flags =
2827 		    DDI_STRUCTURE_BE_ACC;
2828 
2829 	hmep->hme_dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2830 
2831 	if (hmep->hme_cheerio_mode) {
2832 		uint8_t		oldLT;
2833 		uint8_t		newLT = 0;
2834 		dev_info_t	*pdip;
2835 		const char	*pdrvname;
2836 
2837 		/*
2838 		 * Map the PCI config space
2839 		 */
2840 		if (pci_config_setup(dip, &hmep->pci_config_handle) !=
2841 		    DDI_SUCCESS) {
2842 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2843 			    "pci_config_setup() failed..");
2844 			goto error_state;
2845 		}
2846 
2847 		if (ddi_regs_map_setup(dip, 1,
2848 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
2849 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
2850 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2851 			    mregs_4global_reg_fail_msg);
2852 			goto error_unmap;
2853 		}
2854 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
2855 		    hmep->hme_mifregh = hmep->hme_globregh;
2856 
2857 		hmep->hme_etxregp =
2858 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x2000);
2859 		hmep->hme_erxregp =
2860 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x4000);
2861 		hmep->hme_bmacregp =
2862 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x6000);
2863 		hmep->hme_mifregp =
2864 		    (void *)(((caddr_t)hmep->hme_globregp) + 0x7000);
2865 
2866 		/*
2867 		 * Get parent pci bridge info.
2868 		 */
2869 		pdip = ddi_get_parent(dip);
2870 		pdrvname = ddi_driver_name(pdip);
2871 
2872 		oldLT = pci_config_get8(hmep->pci_config_handle,
2873 		    PCI_CONF_LATENCY_TIMER);
2874 		/*
2875 		 * Honor value set in /etc/system
2876 		 * "set hme:pci_latency_timer=0xYY"
2877 		 */
2878 		if (pci_latency_timer)
2879 			newLT = pci_latency_timer;
2880 		/*
2881 		 * Modify LT for simba
2882 		 */
2883 		else if (strcmp("simba", pdrvname) == 0)
2884 			newLT = 0xf0;
2885 		/*
2886 		 * Ensure minimum cheerio latency timer of 0x50
2887 		 * Usually OBP or pci bridge should set this value
2888 		 * based on cheerio
2889 		 * min_grant * 8(33MHz) = 0x50 = 0xa * 0x8
2890 		 * Some system set cheerio LT at 0x40
2891 		 */
2892 		else if (oldLT < 0x40)
2893 			newLT = 0x50;
2894 
2895 		/*
2896 		 * Now program cheerio's pci latency timer with newLT
2897 		 */
2898 		if (newLT)
2899 			pci_config_put8(hmep->pci_config_handle,
2900 			    PCI_CONF_LATENCY_TIMER, (uchar_t)newLT);
2901 	} else { /* Map register sets */
2902 		if (ddi_regs_map_setup(dip, 0,
2903 		    (caddr_t *)&(hmep->hme_globregp), 0, 0,
2904 		    &hmep->hme_dev_attr, &hmep->hme_globregh)) {
2905 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2906 			    mregs_4global_reg_fail_msg);
2907 			goto error_state;
2908 		}
2909 		if (ddi_regs_map_setup(dip, 1,
2910 		    (caddr_t *)&(hmep->hme_etxregp), 0, 0,
2911 		    &hmep->hme_dev_attr, &hmep->hme_etxregh)) {
2912 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2913 			    mregs_4etx_reg_fail_msg);
2914 			goto error_unmap;
2915 		}
2916 		if (ddi_regs_map_setup(dip, 2,
2917 		    (caddr_t *)&(hmep->hme_erxregp), 0, 0,
2918 		    &hmep->hme_dev_attr, &hmep->hme_erxregh)) {
2919 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2920 			    mregs_4erx_reg_fail_msg);
2921 			goto error_unmap;
2922 		}
2923 		if (ddi_regs_map_setup(dip, 3,
2924 		    (caddr_t *)&(hmep->hme_bmacregp), 0, 0,
2925 		    &hmep->hme_dev_attr, &hmep->hme_bmacregh)) {
2926 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2927 			    mregs_4bmac_reg_fail_msg);
2928 			goto error_unmap;
2929 		}
2930 
2931 		if (ddi_regs_map_setup(dip, 4,
2932 		    (caddr_t *)&(hmep->hme_mifregp), 0, 0,
2933 		    &hmep->hme_dev_attr, &hmep->hme_mifregh)) {
2934 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
2935 			    mregs_4mif_reg_fail_msg);
2936 			goto error_unmap;
2937 		}
2938 	} /* Endif cheerio_mode */
2939 
2940 	/*
2941 	 * Based on the hm-rev, set some capabilities
2942 	 * Set up default capabilities for HM 2.0
2943 	 */
2944 	hmep->hme_mifpoll_enable = 0;
2945 	hmep->hme_frame_enable = 0;
2946 	hmep->hme_lance_mode_enable = 0;
2947 	hmep->hme_rxcv_enable = 0;
2948 
2949 	/* NEW routine to get the properties */
2950 
2951 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, hmep->dip, 0, "hm-rev",
2952 	    (caddr_t)&hm_rev, &prop_len) == DDI_PROP_SUCCESS) {
2953 
2954 		hmep->asic_rev = hm_rev;
2955 		hmeget_hm_rev_property(hmep);
2956 	} else {
2957 		/*
2958 		 * hm_rev property not found so, this is
2959 		 * case of hot insertion of card without interpreting fcode.
2960 		 * Get it from revid in config space after mapping it.
2961 		 */
2962 		if (ddi_regs_map_setup(hmep->dip, 0, (caddr_t *)&cfg_ptr,
2963 		    0, 0, &hmep->hme_dev_attr, &cfg_handle)) {
2964 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2965 			    "hmeattach: ddi_map_regs for config space failed");
2966 			return (DDI_FAILURE);
2967 		}
2968 		/*
2969 		 * Since this is cheerio-based PCI card, we write 0xC in the
2970 		 * top 4 bits(4-7) of hm-rev and retain the bottom(0-3) bits
2971 		 * for Cheerio version(1.0 or 2.0 = 0xC0 or 0xC1)
2972 		 */
2973 		hm_rev = ddi_get8(cfg_handle, &cfg_ptr->revid);
2974 		hm_rev = HME_1C0_REVID | (hm_rev & HME_REV_VERS_MASK);
2975 		hmep->asic_rev = hm_rev;
2976 		if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
2977 		    "hm-rev", (caddr_t)&hm_rev, sizeof (hm_rev)) !=
2978 		    DDI_SUCCESS) {
2979 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2980 			    "hmeattach: ddi_prop_create error for hm_rev");
2981 		}
2982 		ddi_regs_map_free(&cfg_handle);
2983 
2984 		hmeget_hm_rev_property(hmep);
2985 
2986 		/* get info via VPD */
2987 		if (hmeget_promprops(dip)) {
2988 			HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, AUTOCONFIG_MSG,
2989 			    "hmeattach: no promprops");
2990 		}
2991 	}
2992 
2993 	if (!hme_mifpoll_enable)
2994 		hmep->hme_mifpoll_enable = 0;
2995 
2996 	if (ddi_intr_hilevel(dip, 0)) {
2997 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, NFATAL_ERR_MSG,
2998 		    " high-level interrupts are not supported");
2999 		goto error_unmap;
3000 	}
3001 
3002 	/*
3003 	 * Get intr. block cookie so that mutex locks can be initialized.
3004 	 */
3005 	if (ddi_get_iblock_cookie(dip, 0, &hmep->hme_cookie) != DDI_SUCCESS)
3006 		goto error_unmap;
3007 
3008 	/*
3009 	 * Initialize mutex's for this device.
3010 	 */
3011 	mutex_init(&hmep->hme_xmitlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
3012 	mutex_init(&hmep->hme_intrlock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
3013 	mutex_init(&hmep->hme_linklock, NULL, MUTEX_DRIVER, hmep->hme_cookie);
3014 
3015 	/*
3016 	 * Quiesce the hardware.
3017 	 */
3018 	(void) hmestop(hmep);
3019 
3020 	/*
3021 	 * Add interrupt to system
3022 	 */
3023 	if (ddi_add_intr(dip, 0, (ddi_iblock_cookie_t *)NULL,
3024 	    (ddi_idevice_cookie_t *)NULL, hmeintr, (caddr_t)hmep)) {
3025 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, CONFIG_MSG,
3026 		    add_intr_fail_msg);
3027 		goto error_mutex;
3028 	}
3029 
3030 	/*
3031 	 * Set up the ethernet mac address.
3032 	 */
3033 	hme_setup_mac_address(hmep, dip);
3034 
3035 	if (!hmeinit_xfer_params(hmep))
3036 		goto error_intr;
3037 
3038 	if (hmeburstsizes(hmep) == DDI_FAILURE) {
3039 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG, burst_size_msg);
3040 		goto error_intr;
3041 	}
3042 
3043 
3044 	hmestatinit(hmep);
3045 
3046 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
3047 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, CONFIG_MSG,
3048 		    "mac_alloc failed");
3049 		goto error_intr;
3050 	}
3051 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
3052 	macp->m_driver = hmep;
3053 	macp->m_dip = dip;
3054 	macp->m_src_addr = hmep->hme_ouraddr.ether_addr_octet;
3055 	macp->m_callbacks = &hme_m_callbacks;
3056 	macp->m_min_sdu = 0;
3057 	macp->m_max_sdu = ETHERMTU;
3058 	macp->m_margin = VLAN_TAGSZ;
3059 	if (mac_register(macp, &hmep->hme_mh) != 0) {
3060 		mac_free(macp);
3061 		goto error_intr;
3062 	}
3063 
3064 	mac_free(macp);
3065 
3066 	ddi_report_dev(dip);
3067 	return (DDI_SUCCESS);
3068 
3069 	/*
3070 	 * Failure Exit
3071 	 */
3072 
3073 error_intr:
3074 	if (hmep->hme_cookie)
3075 		ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
3076 
3077 error_mutex:
3078 	mutex_destroy(&hmep->hme_xmitlock);
3079 	mutex_destroy(&hmep->hme_intrlock);
3080 	mutex_destroy(&hmep->hme_linklock);
3081 
3082 error_unmap:
3083 	if (hmep->hme_globregh)
3084 		ddi_regs_map_free(&hmep->hme_globregh);
3085 	if (hmep->hme_cheerio_mode == 0) {
3086 		if (hmep->hme_etxregh)
3087 			ddi_regs_map_free(&hmep->hme_etxregh);
3088 		if (hmep->hme_erxregh)
3089 			ddi_regs_map_free(&hmep->hme_erxregh);
3090 		if (hmep->hme_bmacregh)
3091 			ddi_regs_map_free(&hmep->hme_bmacregh);
3092 		if (hmep->hme_mifregh)
3093 			ddi_regs_map_free(&hmep->hme_mifregh);
3094 	} else {
3095 		if (hmep->pci_config_handle)
3096 			(void) pci_config_teardown(&hmep->pci_config_handle);
3097 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
3098 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
3099 	}
3100 
3101 error_state:
3102 	if (hmep) {
3103 		kmem_free((caddr_t)hmep, sizeof (*hmep));
3104 		ddi_set_driver_private(dip, NULL);
3105 	}
3106 
3107 	return (DDI_FAILURE);
3108 }
3109 
3110 int
3111 hmedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3112 {
3113 	struct hme *hmep;
3114 	int32_t	unval;
3115 
3116 	if ((hmep = ddi_get_driver_private(dip)) == NULL)
3117 		/*
3118 		 * No resources allocated
3119 		 */
3120 		return (DDI_FAILURE);
3121 
3122 	switch (cmd) {
3123 	case DDI_DETACH:
3124 		break;
3125 
3126 	case DDI_SUSPEND:
3127 		hmep->hme_flags |= HMESUSPENDED;
3128 		hmeuninit(hmep);
3129 		return (DDI_SUCCESS);
3130 
3131 	default:
3132 		HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, UNINIT_MSG,
3133 		    detach_bad_cmd_msg);
3134 		return (DDI_FAILURE);
3135 	}
3136 
3137 
3138 	if (mac_unregister(hmep->hme_mh) != 0) {
3139 		return (DDI_FAILURE);
3140 	}
3141 
3142 	/*
3143 	 * Bug ID 4013267
3144 	 * This bug manifests  by allowing the driver to allow detach
3145 	 * while the driver is busy and subsequent packets cause
3146 	 * the driver to panic.
3147 	 */
3148 	if (hmep->hme_flags & (HMERUNNING | HMESUSPENDED)) {
3149 		HME_FAULT_MSG1(hmep, SEVERITY_LOW, CONFIG_MSG, busy_msg);
3150 		return (DDI_FAILURE);
3151 	}
3152 
3153 	/*
3154 	 * Make driver quiescent, we don't want to prevent the
3155 	 * detach on failure.
3156 	 */
3157 	(void) hmestop(hmep);
3158 
3159 	/*
3160 	 * Remove instance of the intr
3161 	 */
3162 	ddi_remove_intr(dip, 0, (ddi_iblock_cookie_t)0);
3163 
3164 	/*
3165 	 * Unregister kstats.
3166 	 */
3167 	if (hmep->hme_ksp != NULL)
3168 		kstat_delete(hmep->hme_ksp);
3169 	if (hmep->hme_intrstats != NULL)
3170 		kstat_delete(hmep->hme_intrstats);
3171 
3172 	hmep->hme_ksp = NULL;
3173 	hmep->hme_intrstats = NULL;
3174 
3175 	/*
3176 	 * Stop asynchronous timer events.
3177 	 */
3178 	hme_stop_timer(hmep);
3179 	mutex_exit(&hmep->hme_linklock);
3180 
3181 	/*
3182 	 * Destroy all mutexes and data structures allocated during
3183 	 * attach time.
3184 	 *
3185 	 * Note: at this time we should be the only thread accessing
3186 	 * the structures for this instance.
3187 	 */
3188 
3189 	if (hmep->hme_globregh)
3190 		ddi_regs_map_free(&hmep->hme_globregh);
3191 	if (hmep->hme_cheerio_mode == 0) {
3192 		if (hmep->hme_etxregh)
3193 			ddi_regs_map_free(&hmep->hme_etxregh);
3194 		if (hmep->hme_erxregh)
3195 			ddi_regs_map_free(&hmep->hme_erxregh);
3196 		if (hmep->hme_bmacregh)
3197 			ddi_regs_map_free(&hmep->hme_bmacregh);
3198 		if (hmep->hme_mifregh)
3199 			ddi_regs_map_free(&hmep->hme_mifregh);
3200 	} else {
3201 		if (hmep->pci_config_handle)
3202 			(void) pci_config_teardown(&hmep->pci_config_handle);
3203 		hmep->hme_etxregh = hmep->hme_erxregh = hmep->hme_bmacregh =
3204 		    hmep->hme_mifregh = hmep->hme_globregh = NULL;
3205 	}
3206 
3207 	mutex_destroy(&hmep->hme_xmitlock);
3208 	mutex_destroy(&hmep->hme_intrlock);
3209 	mutex_destroy(&hmep->hme_linklock);
3210 
3211 	if (hmep->hme_md_h != NULL) {
3212 		unval = ddi_dma_unbind_handle(hmep->hme_md_h);
3213 		if (unval == DDI_FAILURE)
3214 			HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
3215 			    "dma_unbind_handle failed");
3216 		ddi_dma_mem_free(&hmep->hme_mdm_h);
3217 		ddi_dma_free_handle(&hmep->hme_md_h);
3218 	}
3219 
3220 	hmefreebufs(hmep);
3221 
3222 	/*
3223 	 * dvma handle case.
3224 	 */
3225 	if (hmep->hme_dvmarh != NULL) {
3226 		dvma_release(hmep->hme_dvmarh);
3227 		dvma_release(hmep->hme_dvmaxh);
3228 		hmep->hme_dvmarh = hmep->hme_dvmaxh = NULL;
3229 	}
3230 
3231 	/*
3232 	 * dma handle case.
3233 	 */
3234 	if (hmep->hme_dmarh != NULL) {
3235 		kmem_free(hmep->hme_dmaxh,
3236 		    (HME_TMDMAX + HMERPENDING) * (sizeof (ddi_dma_handle_t)));
3237 		hmep->hme_dmarh = hmep->hme_dmaxh = NULL;
3238 	}
3239 
3240 	hme_param_cleanup(hmep);
3241 
3242 	ddi_set_driver_private(dip, NULL);
3243 	kmem_free(hmep, sizeof (struct hme));
3244 
3245 	return (DDI_SUCCESS);
3246 }
3247 
3248 static boolean_t
3249 hmeinit_xfer_params(struct hme *hmep)
3250 {
3251 	int i;
3252 	int hme_ipg1_conf, hme_ipg2_conf;
3253 	int hme_use_int_xcvr_conf, hme_pace_count_conf;
3254 	int hme_autoneg_conf;
3255 	int hme_anar_100T4_conf;
3256 	int hme_anar_100fdx_conf, hme_anar_100hdx_conf;
3257 	int hme_anar_10fdx_conf, hme_anar_10hdx_conf;
3258 	int hme_ipg0_conf, hme_lance_mode_conf;
3259 	int prop_len = sizeof (int);
3260 	dev_info_t *dip;
3261 
3262 	dip = hmep->dip;
3263 
3264 	for (i = 0; i < A_CNT(hme_param_arr); i++)
3265 		hmep->hme_param_arr[i] = hme_param_arr[i];
3266 
3267 	if (!hmep->hme_g_nd && !hme_param_register(hmep, hmep->hme_param_arr,
3268 	    A_CNT(hme_param_arr))) {
3269 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, NDD_MSG,
3270 		    param_reg_fail_msg);
3271 		return (B_FALSE);
3272 	}
3273 
3274 	/*
3275 	 * Set up the start-up values for user-configurable parameters
3276 	 * Get the values from the global variables first.
3277 	 * Use the MASK to limit the value to allowed maximum.
3278 	 */
3279 	hme_param_ipg1 = hme_ipg1 & HME_MASK_8BIT;
3280 	hme_param_ipg2 = hme_ipg2 & HME_MASK_8BIT;
3281 	hme_param_use_intphy = hme_use_int_xcvr & HME_MASK_1BIT;
3282 	hme_param_pace_count = hme_pace_size & HME_MASK_8BIT;
3283 	hme_param_autoneg = hme_adv_autoneg_cap;
3284 	hme_param_anar_100T4 = hme_adv_100T4_cap;
3285 	hme_param_anar_100fdx = hme_adv_100fdx_cap;
3286 	hme_param_anar_100hdx = hme_adv_100hdx_cap;
3287 	hme_param_anar_10fdx = hme_adv_10fdx_cap;
3288 	hme_param_anar_10hdx = hme_adv_10hdx_cap;
3289 	hme_param_ipg0 = hme_ipg0 & HME_MASK_5BIT;
3290 	hme_param_lance_mode = hme_lance_mode & HME_MASK_1BIT;
3291 
3292 	/*
3293 	 * The link speed may be forced to either 10 Mbps or 100 Mbps using the
3294 	 * property "transfer-speed". This may be done in OBP by using the
3295 	 * command "apply transfer-speed=<speed> <device>". The speed may be
3296 	 * either 10 or 100.
3297 	 */
3298 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
3299 	    "transfer-speed", (caddr_t)&i, &prop_len) == DDI_PROP_SUCCESS) {
3300 		HME_DEBUG_MSG2(hmep, SEVERITY_LOW, PROP_MSG,
3301 		    "params:  transfer-speed property = %X", i);
3302 		hme_param_autoneg = 0;	/* force speed */
3303 		hme_param_anar_100T4 = 0;
3304 		hme_param_anar_100fdx = 0;
3305 		hme_param_anar_10fdx = 0;
3306 		if (i == 10) {
3307 			hme_param_anar_10hdx = 1;
3308 			hme_param_anar_100hdx = 0;
3309 		} else {
3310 			hme_param_anar_10hdx = 0;
3311 			hme_param_anar_100hdx = 1;
3312 		}
3313 	}
3314 
3315 	/*
3316 	 * Get the parameter values configured in .conf file.
3317 	 */
3318 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg1",
3319 	    (caddr_t)&hme_ipg1_conf, &prop_len) == DDI_PROP_SUCCESS) {
3320 		HME_DEBUG_MSG2(hmep, SEVERITY_LOW, PROP_MSG,
3321 		    "params: hme_ipg1 property = %X", hme_ipg1_conf);
3322 		hme_param_ipg1 = hme_ipg1_conf & HME_MASK_8BIT;
3323 	}
3324 
3325 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg2",
3326 	    (caddr_t)&hme_ipg2_conf, &prop_len) == DDI_PROP_SUCCESS) {
3327 		hme_param_ipg2 = hme_ipg2_conf & HME_MASK_8BIT;
3328 	}
3329 
3330 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "use_int_xcvr",
3331 	    (caddr_t)&hme_use_int_xcvr_conf, &prop_len) == DDI_PROP_SUCCESS) {
3332 		hme_param_use_intphy = hme_use_int_xcvr_conf & HME_MASK_1BIT;
3333 	}
3334 
3335 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "pace_size",
3336 	    (caddr_t)&hme_pace_count_conf, &prop_len) == DDI_PROP_SUCCESS) {
3337 		hme_param_pace_count = hme_pace_count_conf & HME_MASK_8BIT;
3338 	}
3339 
3340 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_autoneg_cap",
3341 	    (caddr_t)&hme_autoneg_conf, &prop_len) == DDI_PROP_SUCCESS) {
3342 		hme_param_autoneg = hme_autoneg_conf & HME_MASK_1BIT;
3343 	}
3344 
3345 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100T4_cap",
3346 	    (caddr_t)&hme_anar_100T4_conf, &prop_len) == DDI_PROP_SUCCESS) {
3347 		hme_param_anar_100T4 = hme_anar_100T4_conf & HME_MASK_1BIT;
3348 	}
3349 
3350 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100fdx_cap",
3351 	    (caddr_t)&hme_anar_100fdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3352 		hme_param_anar_100fdx = hme_anar_100fdx_conf & HME_MASK_1BIT;
3353 	}
3354 
3355 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_100hdx_cap",
3356 	    (caddr_t)&hme_anar_100hdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3357 		hme_param_anar_100hdx = hme_anar_100hdx_conf & HME_MASK_1BIT;
3358 	}
3359 
3360 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_10fdx_cap",
3361 	    (caddr_t)&hme_anar_10fdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3362 		hme_param_anar_10fdx = hme_anar_10fdx_conf & HME_MASK_1BIT;
3363 	}
3364 
3365 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "adv_10hdx_cap",
3366 	    (caddr_t)&hme_anar_10hdx_conf, &prop_len) == DDI_PROP_SUCCESS) {
3367 		hme_param_anar_10hdx = hme_anar_10hdx_conf & HME_MASK_1BIT;
3368 	}
3369 
3370 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "ipg0",
3371 	    (caddr_t)&hme_ipg0_conf, &prop_len) == DDI_PROP_SUCCESS) {
3372 		hme_param_ipg0 = hme_ipg0_conf & HME_MASK_5BIT;
3373 	}
3374 
3375 	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "lance_mode",
3376 	    (caddr_t)&hme_lance_mode_conf, &prop_len) == DDI_PROP_SUCCESS) {
3377 		hme_param_lance_mode = hme_lance_mode_conf & HME_MASK_1BIT;
3378 	}
3379 
3380 	if (hme_link_pulse_disabled)
3381 		hmep->hme_link_pulse_disabled = 1;
3382 	else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0,
3383 	    "link-pulse-disabled", (caddr_t)&i, &prop_len)
3384 	    == DDI_PROP_SUCCESS) {
3385 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, PROP_MSG,
3386 		    "params:  link-pulse-disable property found.");
3387 		hmep->hme_link_pulse_disabled = 1;
3388 	}
3389 	return (B_TRUE);
3390 }
3391 
3392 /*
3393  * Return 0 upon success, 1 on failure.
3394  */
3395 static uint_t
3396 hmestop(struct hme *hmep)
3397 {
3398 	/*
3399 	 * Disable the Tx dma engine.
3400 	 */
3401 	PUT_ETXREG(config, (GET_ETXREG(config) & ~HMET_CONFIG_TXDMA_EN));
3402 	HMEDELAY(((GET_ETXREG(state_mach) & 0x1f) == 0x1), HMEMAXRSTDELAY);
3403 
3404 	/*
3405 	 * Disable the Rx dma engine.
3406 	 */
3407 	PUT_ERXREG(config, (GET_ERXREG(config) & ~HMER_CONFIG_RXDMA_EN));
3408 	HMEDELAY(((GET_ERXREG(state_mach) & 0x3f) == 0), HMEMAXRSTDELAY);
3409 
3410 	/*
3411 	 * By this time all things should be quiet, so hit the
3412 	 * chip with a reset.
3413 	 */
3414 	PUT_GLOBREG(reset, HMEG_RESET_GLOBAL);
3415 
3416 	HMEDELAY((GET_GLOBREG(reset) == 0), HMEMAXRSTDELAY);
3417 	if (GET_GLOBREG(reset)) {
3418 		HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, UNINIT_MSG,
3419 		    "cannot stop hme - failed to access device");
3420 		return (1);
3421 	}
3422 
3423 	CHECK_GLOBREG();
3424 	return (0);
3425 }
3426 
3427 static int
3428 hmestat_kstat_update(kstat_t *ksp, int rw)
3429 {
3430 	struct hme *hmep;
3431 	struct hmekstat *hkp;
3432 
3433 	hmep = (struct hme *)ksp->ks_private;
3434 	hkp = (struct hmekstat *)ksp->ks_data;
3435 
3436 	if (rw != KSTAT_READ)
3437 		return (EACCES);
3438 
3439 	/*
3440 	 * Update all the stats by reading all the counter registers.
3441 	 * Counter register stats are not updated till they overflow
3442 	 * and interrupt.
3443 	 */
3444 
3445 	mutex_enter(&hmep->hme_xmitlock);
3446 	if (hmep->hme_flags & HMERUNNING)
3447 		hmereclaim(hmep);
3448 	mutex_exit(&hmep->hme_xmitlock);
3449 
3450 	hmesavecntrs(hmep);
3451 
3452 	hkp->hk_cvc.value.ul		= hmep->hme_cvc;
3453 	hkp->hk_lenerr.value.ul		= hmep->hme_lenerr;
3454 	hkp->hk_buff.value.ul		= hmep->hme_buff;
3455 	hkp->hk_missed.value.ul		= hmep->hme_missed;
3456 	hkp->hk_allocbfail.value.ul	= hmep->hme_allocbfail;
3457 	hkp->hk_babl.value.ul		= hmep->hme_babl;
3458 	hkp->hk_tmder.value.ul		= hmep->hme_tmder;
3459 	hkp->hk_txlaterr.value.ul	= hmep->hme_txlaterr;
3460 	hkp->hk_rxlaterr.value.ul	= hmep->hme_rxlaterr;
3461 	hkp->hk_slvparerr.value.ul	= hmep->hme_slvparerr;
3462 	hkp->hk_txparerr.value.ul	= hmep->hme_txparerr;
3463 	hkp->hk_rxparerr.value.ul	= hmep->hme_rxparerr;
3464 	hkp->hk_slverrack.value.ul	= hmep->hme_slverrack;
3465 	hkp->hk_txerrack.value.ul	= hmep->hme_txerrack;
3466 	hkp->hk_rxerrack.value.ul	= hmep->hme_rxerrack;
3467 	hkp->hk_txtagerr.value.ul	= hmep->hme_txtagerr;
3468 	hkp->hk_rxtagerr.value.ul	= hmep->hme_rxtagerr;
3469 	hkp->hk_eoperr.value.ul		= hmep->hme_eoperr;
3470 	hkp->hk_notmds.value.ul		= hmep->hme_notmds;
3471 	hkp->hk_notbufs.value.ul	= hmep->hme_notbufs;
3472 	hkp->hk_norbufs.value.ul	= hmep->hme_norbufs;
3473 	/*
3474 	 * MIB II kstat variables
3475 	 */
3476 	hkp->hk_newfree.value.ul	= hmep->hme_newfree;
3477 
3478 	/*
3479 	 * Debug kstats
3480 	 */
3481 	hkp->hk_inits.value.ul		= hmep->inits;
3482 	hkp->hk_rxinits.value.ul	= hmep->rxinits;
3483 	hkp->hk_txinits.value.ul	= hmep->txinits;
3484 	hkp->hk_dmarh_inits.value.ul	= hmep->dmarh_init;
3485 	hkp->hk_dmaxh_inits.value.ul	= hmep->dmaxh_init;
3486 	hkp->hk_phyfail.value.ul	= hmep->phyfail;
3487 
3488 	/*
3489 	 * xcvr kstats
3490 	 */
3491 	hkp->hk_asic_rev.value.ul	= hmep->asic_rev;
3492 
3493 	return (0);
3494 }
3495 
3496 static void
3497 hmestatinit(struct hme *hmep)
3498 {
3499 	struct	kstat	*ksp;
3500 	struct	hmekstat	*hkp;
3501 	const char *driver;
3502 	int	instance;
3503 	char	buf[16];
3504 
3505 	instance = hmep->instance;
3506 	driver = ddi_driver_name(hmep->dip);
3507 
3508 	if ((ksp = kstat_create(driver, instance,
3509 	    "driver_info", "net", KSTAT_TYPE_NAMED,
3510 	    sizeof (struct hmekstat) / sizeof (kstat_named_t), 0)) == NULL) {
3511 		HME_FAULT_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
3512 		    kstat_create_fail_msg);
3513 		return;
3514 	}
3515 
3516 	(void) snprintf(buf, sizeof (buf), "%sc%d", driver, instance);
3517 	hmep->hme_intrstats = kstat_create(driver, instance, buf, "controller",
3518 	    KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
3519 	if (hmep->hme_intrstats)
3520 		kstat_install(hmep->hme_intrstats);
3521 
3522 	hmep->hme_ksp = ksp;
3523 	hkp = (struct hmekstat *)ksp->ks_data;
3524 	kstat_named_init(&hkp->hk_cvc,			"code_violations",
3525 	    KSTAT_DATA_ULONG);
3526 	kstat_named_init(&hkp->hk_lenerr,		"len_errors",
3527 	    KSTAT_DATA_ULONG);
3528 	kstat_named_init(&hkp->hk_buff,			"buff",
3529 	    KSTAT_DATA_ULONG);
3530 	kstat_named_init(&hkp->hk_missed,		"missed",
3531 	    KSTAT_DATA_ULONG);
3532 	kstat_named_init(&hkp->hk_nocanput,		"nocanput",
3533 	    KSTAT_DATA_ULONG);
3534 	kstat_named_init(&hkp->hk_allocbfail,		"allocbfail",
3535 	    KSTAT_DATA_ULONG);
3536 	kstat_named_init(&hkp->hk_babl,			"babble",
3537 	    KSTAT_DATA_ULONG);
3538 	kstat_named_init(&hkp->hk_tmder,		"tmd_error",
3539 	    KSTAT_DATA_ULONG);
3540 	kstat_named_init(&hkp->hk_txlaterr,		"tx_late_error",
3541 	    KSTAT_DATA_ULONG);
3542 	kstat_named_init(&hkp->hk_rxlaterr,		"rx_late_error",
3543 	    KSTAT_DATA_ULONG);
3544 	kstat_named_init(&hkp->hk_slvparerr,		"slv_parity_error",
3545 	    KSTAT_DATA_ULONG);
3546 	kstat_named_init(&hkp->hk_txparerr,		"tx_parity_error",
3547 	    KSTAT_DATA_ULONG);
3548 	kstat_named_init(&hkp->hk_rxparerr,		"rx_parity_error",
3549 	    KSTAT_DATA_ULONG);
3550 	kstat_named_init(&hkp->hk_slverrack,		"slv_error_ack",
3551 	    KSTAT_DATA_ULONG);
3552 	kstat_named_init(&hkp->hk_txerrack,		"tx_error_ack",
3553 	    KSTAT_DATA_ULONG);
3554 	kstat_named_init(&hkp->hk_rxerrack,		"rx_error_ack",
3555 	    KSTAT_DATA_ULONG);
3556 	kstat_named_init(&hkp->hk_txtagerr,		"tx_tag_error",
3557 	    KSTAT_DATA_ULONG);
3558 	kstat_named_init(&hkp->hk_rxtagerr,		"rx_tag_error",
3559 	    KSTAT_DATA_ULONG);
3560 	kstat_named_init(&hkp->hk_eoperr,		"eop_error",
3561 	    KSTAT_DATA_ULONG);
3562 	kstat_named_init(&hkp->hk_notmds,		"no_tmds",
3563 	    KSTAT_DATA_ULONG);
3564 	kstat_named_init(&hkp->hk_notbufs,		"no_tbufs",
3565 	    KSTAT_DATA_ULONG);
3566 	kstat_named_init(&hkp->hk_norbufs,		"no_rbufs",
3567 	    KSTAT_DATA_ULONG);
3568 
3569 	kstat_named_init(&hkp->hk_newfree,		"newfree",
3570 	    KSTAT_DATA_ULONG);
3571 
3572 	/*
3573 	 * Debugging kstats
3574 	 */
3575 	kstat_named_init(&hkp->hk_inits,		"inits",
3576 	    KSTAT_DATA_ULONG);
3577 	kstat_named_init(&hkp->hk_rxinits,		"rxinits",
3578 	    KSTAT_DATA_ULONG);
3579 	kstat_named_init(&hkp->hk_txinits,		"txinits",
3580 	    KSTAT_DATA_ULONG);
3581 	kstat_named_init(&hkp->hk_dmarh_inits,		"dmarh_inits",
3582 	    KSTAT_DATA_ULONG);
3583 	kstat_named_init(&hkp->hk_dmaxh_inits,		"dmaxh_inits",
3584 	    KSTAT_DATA_ULONG);
3585 	kstat_named_init(&hkp->hk_phyfail,		"phy_failures",
3586 	    KSTAT_DATA_ULONG);
3587 
3588 	/*
3589 	 * I/O bus kstats
3590 	 * kstat_named_init(&hkp->hk_pci_speed,		"pci_bus_speed",
3591 	 *		KSTAT_DATA_ULONG);
3592 	 * kstat_named_init(&hkp->hk_pci_size,		"pci_bus_width",
3593 	 *		KSTAT_DATA_ULONG);
3594 	 */
3595 
3596 	/*
3597 	 * xcvr kstats
3598 	 */
3599 	kstat_named_init(&hkp->hk_asic_rev,		"asic_rev",
3600 	    KSTAT_DATA_ULONG);
3601 
3602 	ksp->ks_update = hmestat_kstat_update;
3603 	ksp->ks_private = (void *) hmep;
3604 	kstat_install(ksp);
3605 }
3606 
3607 static void
3608 hme_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
3609 {
3610 	struct	hme	*hmep = arg;
3611 	struct	iocblk	*iocp = (void *)mp->b_rptr;
3612 	uint32_t old_ipg1, old_ipg2, old_use_int_xcvr, old_autoneg;
3613 	uint32_t old_100T4;
3614 	uint32_t old_100fdx, old_100hdx, old_10fdx, old_10hdx;
3615 	uint32_t old_ipg0, old_lance_mode;
3616 
3617 	switch (iocp->ioc_cmd) {
3618 
3619 	case HME_ND_GET:
3620 
3621 		old_autoneg = hme_param_autoneg;
3622 		old_100T4 = hme_param_anar_100T4;
3623 		old_100fdx = hme_param_anar_100fdx;
3624 		old_100hdx = hme_param_anar_100hdx;
3625 		old_10fdx = hme_param_anar_10fdx;
3626 		old_10hdx = hme_param_anar_10hdx;
3627 
3628 		hme_param_autoneg = old_autoneg & ~HME_NOTUSR;
3629 		hme_param_anar_100T4 = old_100T4 & ~HME_NOTUSR;
3630 		hme_param_anar_100fdx = old_100fdx & ~HME_NOTUSR;
3631 		hme_param_anar_100hdx = old_100hdx & ~HME_NOTUSR;
3632 		hme_param_anar_10fdx = old_10fdx & ~HME_NOTUSR;
3633 		hme_param_anar_10hdx = old_10hdx & ~HME_NOTUSR;
3634 
3635 		if (!hme_nd_getset(wq, hmep->hme_g_nd, mp)) {
3636 			hme_param_autoneg = old_autoneg;
3637 			hme_param_anar_100T4 = old_100T4;
3638 			hme_param_anar_100fdx = old_100fdx;
3639 			hme_param_anar_100hdx = old_100hdx;
3640 			hme_param_anar_10fdx = old_10fdx;
3641 			hme_param_anar_10hdx = old_10hdx;
3642 			miocnak(wq, mp, 0, EINVAL);
3643 			return;
3644 		}
3645 		hme_param_autoneg = old_autoneg;
3646 		hme_param_anar_100T4 = old_100T4;
3647 		hme_param_anar_100fdx = old_100fdx;
3648 		hme_param_anar_100hdx = old_100hdx;
3649 		hme_param_anar_10fdx = old_10fdx;
3650 		hme_param_anar_10hdx = old_10hdx;
3651 
3652 		qreply(wq, mp);
3653 		break;
3654 
3655 	case HME_ND_SET:
3656 		old_ipg0 = hme_param_ipg0;
3657 		old_lance_mode = hme_param_lance_mode;
3658 		old_ipg1 = hme_param_ipg1;
3659 		old_ipg2 = hme_param_ipg2;
3660 		old_use_int_xcvr = hme_param_use_intphy;
3661 		old_autoneg = hme_param_autoneg;
3662 		hme_param_autoneg = 0xff;
3663 
3664 		if (!hme_nd_getset(wq, hmep->hme_g_nd, mp)) {
3665 			hme_param_autoneg = old_autoneg;
3666 			miocnak(wq, mp, 0, EINVAL);
3667 			return;
3668 		}
3669 
3670 		qreply(wq, mp);
3671 
3672 		if (hme_param_autoneg != 0xff) {
3673 			hmep->hme_linkcheck = 0;
3674 			(void) hmeinit(hmep);
3675 		} else {
3676 			hme_param_autoneg = old_autoneg;
3677 			if (old_use_int_xcvr != hme_param_use_intphy) {
3678 				hmep->hme_linkcheck = 0;
3679 				(void) hmeinit(hmep);
3680 			} else if ((old_ipg1 != hme_param_ipg1) ||
3681 			    (old_ipg2 != hme_param_ipg2) ||
3682 			    (old_ipg0 != hme_param_ipg0) ||
3683 			    (old_lance_mode != hme_param_lance_mode)) {
3684 				(void) hmeinit(hmep);
3685 			}
3686 		}
3687 		break;
3688 
3689 	default:
3690 		miocnak(wq, mp, 0, EINVAL);
3691 		break;
3692 	}
3693 }
3694 
3695 /*ARGSUSED*/
3696 static boolean_t
3697 hme_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3698 {
3699 	switch (cap) {
3700 	case MAC_CAPAB_HCKSUM:
3701 		*(uint32_t *)cap_data = HCKSUM_INET_PARTIAL;
3702 		return (B_TRUE);
3703 	default:
3704 		return (B_FALSE);
3705 	}
3706 }
3707 
3708 static int
3709 hme_m_promisc(void *arg, boolean_t on)
3710 {
3711 	struct hme *hmep = arg;
3712 
3713 	hmep->hme_promisc = on;
3714 	(void) hmeinit(hmep);
3715 	return (0);
3716 }
3717 
3718 static int
3719 hme_m_unicst(void *arg, const uint8_t *macaddr)
3720 {
3721 	struct hme *hmep = arg;
3722 
3723 	/*
3724 	 * Set new interface local address and re-init device.
3725 	 * This is destructive to any other streams attached
3726 	 * to this device.
3727 	 */
3728 	mutex_enter(&hmep->hme_intrlock);
3729 	bcopy(macaddr, &hmep->hme_ouraddr, ETHERADDRL);
3730 	mutex_exit(&hmep->hme_intrlock);
3731 	(void) hmeinit(hmep);
3732 	return (0);
3733 }
3734 
3735 static int
3736 hme_m_multicst(void *arg, boolean_t add, const uint8_t *macaddr)
3737 {
3738 	struct hme	*hmep = arg;
3739 	uint32_t	ladrf_bit;
3740 	boolean_t	doinit = B_FALSE;
3741 
3742 	/*
3743 	 * If this address's bit was not already set in the local address
3744 	 * filter, add it and re-initialize the Hardware.
3745 	 */
3746 	ladrf_bit = hmeladrf_bit(macaddr);
3747 
3748 	mutex_enter(&hmep->hme_intrlock);
3749 	if (add) {
3750 		hmep->hme_ladrf_refcnt[ladrf_bit]++;
3751 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 1) {
3752 			hmep->hme_ladrf[ladrf_bit >> 4] |=
3753 			    1 << (ladrf_bit & 0xf);
3754 			hmep->hme_multi++;
3755 			doinit = B_TRUE;
3756 		}
3757 	} else {
3758 		hmep->hme_ladrf_refcnt[ladrf_bit]--;
3759 		if (hmep->hme_ladrf_refcnt[ladrf_bit] == 0) {
3760 			hmep->hme_ladrf[ladrf_bit >> 4] &=
3761 			    ~(1 << (ladrf_bit & 0xf));
3762 			doinit = B_TRUE;
3763 		}
3764 	}
3765 	mutex_exit(&hmep->hme_intrlock);
3766 
3767 	if (doinit)
3768 		(void) hmeinit(hmep);
3769 
3770 	return (0);
3771 }
3772 
3773 static int
3774 hme_m_start(void *arg)
3775 {
3776 	struct hme *hmep = arg;
3777 
3778 	if (hmeinit(hmep) != 0) {
3779 		/* initialization failed -- really want DL_INITFAILED */
3780 		return (EIO);
3781 	} else {
3782 		hmep->hme_started = B_TRUE;
3783 		return (0);
3784 	}
3785 }
3786 
3787 static void
3788 hme_m_stop(void *arg)
3789 {
3790 	struct hme *hmep = arg;
3791 
3792 	hmep->hme_started = B_FALSE;
3793 	hmeuninit(hmep);
3794 }
3795 
3796 static int
3797 hme_m_stat(void *arg, uint_t stat, uint64_t *val)
3798 {
3799 	struct hme	*hmep = arg;
3800 
3801 	mutex_enter(&hmep->hme_xmitlock);
3802 	if (hmep->hme_flags & HMERUNNING)
3803 		hmereclaim(hmep);
3804 	mutex_exit(&hmep->hme_xmitlock);
3805 
3806 	hmesavecntrs(hmep);
3807 
3808 	switch (stat) {
3809 	case MAC_STAT_IFSPEED:
3810 		*val = hmep->hme_ifspeed * 1000000;
3811 		break;
3812 	case MAC_STAT_IPACKETS:
3813 		*val = hmep->hme_ipackets;
3814 		break;
3815 	case MAC_STAT_RBYTES:
3816 		*val = hmep->hme_rbytes;
3817 		break;
3818 	case MAC_STAT_IERRORS:
3819 		*val = hmep->hme_ierrors;
3820 		break;
3821 	case MAC_STAT_OPACKETS:
3822 		*val = hmep->hme_opackets;
3823 		break;
3824 	case MAC_STAT_OBYTES:
3825 		*val = hmep->hme_obytes;
3826 		break;
3827 	case MAC_STAT_OERRORS:
3828 		*val = hmep->hme_oerrors;
3829 		break;
3830 	case MAC_STAT_MULTIRCV:
3831 		*val = hmep->hme_multircv;
3832 		break;
3833 	case MAC_STAT_MULTIXMT:
3834 		*val = hmep->hme_multixmt;
3835 		break;
3836 	case MAC_STAT_BRDCSTRCV:
3837 		*val = hmep->hme_brdcstrcv;
3838 		break;
3839 	case MAC_STAT_BRDCSTXMT:
3840 		*val = hmep->hme_brdcstxmt;
3841 		break;
3842 	case MAC_STAT_UNDERFLOWS:
3843 		*val = hmep->hme_uflo;
3844 		break;
3845 	case MAC_STAT_OVERFLOWS:
3846 		*val = hmep->hme_oflo;
3847 		break;
3848 	case MAC_STAT_COLLISIONS:
3849 		*val = hmep->hme_coll;
3850 		break;
3851 	case MAC_STAT_NORCVBUF:
3852 		*val = hmep->hme_norcvbuf;
3853 		break;
3854 	case MAC_STAT_NOXMTBUF:
3855 		*val = hmep->hme_noxmtbuf;
3856 		break;
3857 	case ETHER_STAT_LINK_DUPLEX:
3858 		*val = hmep->hme_duplex;
3859 		break;
3860 	case ETHER_STAT_ALIGN_ERRORS:
3861 		*val = hmep->hme_align_errors;
3862 		break;
3863 	case ETHER_STAT_FCS_ERRORS:
3864 		*val = hmep->hme_fcs_errors;
3865 		break;
3866 	case ETHER_STAT_EX_COLLISIONS:
3867 		*val = hmep->hme_excol;
3868 		break;
3869 	case ETHER_STAT_DEFER_XMTS:
3870 		*val = hmep->hme_defer_xmts;
3871 		break;
3872 	case ETHER_STAT_SQE_ERRORS:
3873 		*val = hmep->hme_sqe_errors;
3874 		break;
3875 	case ETHER_STAT_FIRST_COLLISIONS:
3876 		*val = hmep->hme_fstcol;
3877 		break;
3878 	case ETHER_STAT_TX_LATE_COLLISIONS:
3879 		*val = hmep->hme_tlcol;
3880 		break;
3881 	case ETHER_STAT_TOOLONG_ERRORS:
3882 		*val = hmep->hme_toolong_errors;
3883 		break;
3884 	case ETHER_STAT_TOOSHORT_ERRORS:
3885 		*val = hmep->hme_runt;
3886 		break;
3887 	case ETHER_STAT_XCVR_ADDR:
3888 		*val = hmep->hme_phyad;
3889 		break;
3890 	case ETHER_STAT_XCVR_ID:
3891 		*val = (hmep->hme_idr1 << 16U) | (hmep->hme_idr2);
3892 		break;
3893 	case ETHER_STAT_XCVR_INUSE:
3894 		switch (hmep->hme_transceiver) {
3895 		case HME_INTERNAL_TRANSCEIVER:
3896 			*val = XCVR_100X;
3897 			break;
3898 		case HME_NO_TRANSCEIVER:
3899 			*val = XCVR_NONE;
3900 			break;
3901 		default:
3902 			*val = XCVR_UNDEFINED;
3903 			break;
3904 		}
3905 		break;
3906 	case ETHER_STAT_CAP_100T4:
3907 		*val = hme_param_bmsr_100T4;
3908 		break;
3909 	case ETHER_STAT_ADV_CAP_100T4:
3910 		*val = hme_param_anar_100T4 & ~HME_NOTUSR;
3911 		break;
3912 	case ETHER_STAT_LP_CAP_100T4:
3913 		*val = hme_param_anlpar_100T4;
3914 		break;
3915 	case ETHER_STAT_CAP_100FDX:
3916 		*val = hme_param_bmsr_100fdx;
3917 		break;
3918 	case ETHER_STAT_ADV_CAP_100FDX:
3919 		*val = hme_param_anar_100fdx & ~HME_NOTUSR;
3920 		break;
3921 	case ETHER_STAT_LP_CAP_100FDX:
3922 		*val = hme_param_anlpar_100fdx;
3923 		break;
3924 	case ETHER_STAT_CAP_100HDX:
3925 		*val = hme_param_bmsr_100hdx;
3926 		break;
3927 	case ETHER_STAT_ADV_CAP_100HDX:
3928 		*val = hme_param_anar_100hdx & ~HME_NOTUSR;
3929 		break;
3930 	case ETHER_STAT_LP_CAP_100HDX:
3931 		*val = hme_param_anlpar_100hdx;
3932 		break;
3933 	case ETHER_STAT_CAP_10FDX:
3934 		*val = hme_param_bmsr_10fdx;
3935 		break;
3936 	case ETHER_STAT_ADV_CAP_10FDX:
3937 		*val = hme_param_anar_10fdx & ~HME_NOTUSR;
3938 		break;
3939 	case ETHER_STAT_LP_CAP_10FDX:
3940 		*val = hme_param_anlpar_10fdx;
3941 		break;
3942 	case ETHER_STAT_CAP_10HDX:
3943 		*val = hme_param_bmsr_10hdx;
3944 		break;
3945 	case ETHER_STAT_ADV_CAP_10HDX:
3946 		*val = hme_param_anar_10hdx & ~HME_NOTUSR;
3947 		break;
3948 	case ETHER_STAT_LP_CAP_10HDX:
3949 		*val = hme_param_anlpar_10hdx;
3950 		break;
3951 	case ETHER_STAT_CAP_AUTONEG:
3952 		*val = hme_param_bmsr_ancap;
3953 		break;
3954 	case ETHER_STAT_ADV_CAP_AUTONEG:
3955 		*val = hme_param_autoneg & ~HME_NOTUSR;
3956 		break;
3957 	case ETHER_STAT_LP_CAP_AUTONEG:
3958 		*val = hme_param_aner_lpancap;
3959 		break;
3960 	default:
3961 		return (EINVAL);
3962 	}
3963 	return (0);
3964 }
3965 
3966 static mblk_t *
3967 hme_m_tx(void *arg, mblk_t *mp)
3968 {
3969 	struct hme *hmep = arg;
3970 	mblk_t *next;
3971 
3972 	while (mp != NULL) {
3973 		next = mp->b_next;
3974 		mp->b_next = NULL;
3975 		if (!hmestart(hmep, mp)) {
3976 			mp->b_next = next;
3977 			break;
3978 		}
3979 		mp = next;
3980 	}
3981 	return (mp);
3982 }
3983 
3984 /*
3985  * Software IP checksum, for the edge cases that the
3986  * hardware can't handle.  See hmestart for more info.
3987  */
3988 static uint16_t
3989 hme_cksum(void *data, int len)
3990 {
3991 	uint16_t	*words = data;
3992 	int		i, nwords = len / 2;
3993 	uint32_t	sum = 0;
3994 
3995 	/* just add up the words */
3996 	for (i = 0; i < nwords; i++) {
3997 		sum += *words++;
3998 	}
3999 
4000 	/* pick up residual byte ... assume even half-word allocations */
4001 	if (len % 2) {
4002 		sum += (*words & 0xff00);
4003 	}
4004 
4005 	sum = (sum >> 16) + (sum & 0xffff);
4006 	sum = (sum >> 16) + (sum & 0xffff);
4007 
4008 	return (~(sum & 0xffff));
4009 }
4010 
4011 static boolean_t
4012 hmestart_dma(struct hme *hmep, mblk_t *mp)
4013 {
4014 	volatile	struct	hme_tmd	*tmdp1 = NULL;
4015 	volatile	struct	hme_tmd	*tmdp2 = NULL;
4016 	volatile	struct	hme_tmd	*ntmdp = NULL;
4017 	mblk_t  *bp;
4018 	uint32_t len1, len2;
4019 	uint32_t temp_addr;
4020 	int32_t	syncval;
4021 	ulong_t i, j;
4022 	ddi_dma_cookie_t c;
4023 	uint_t cnt;
4024 	boolean_t retval = B_TRUE;
4025 
4026 	uint32_t	csflags = 0;
4027 	uint32_t	flags;
4028 	uint32_t	start_offset;
4029 	uint32_t	stuff_offset;
4030 
4031 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
4032 	    NULL, NULL, &flags);
4033 
4034 	if (flags & HCK_PARTIALCKSUM) {
4035 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
4036 			start_offset += sizeof (struct ether_header) + 4;
4037 			stuff_offset += sizeof (struct ether_header) + 4;
4038 		} else {
4039 			start_offset += sizeof (struct ether_header);
4040 			stuff_offset += sizeof (struct ether_header);
4041 		}
4042 		csflags = HMETMD_CSENABL |
4043 		    (start_offset << HMETMD_CSSTART_SHIFT) |
4044 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
4045 	}
4046 
4047 	mutex_enter(&hmep->hme_xmitlock);
4048 
4049 	if (hmep->hme_tnextp > hmep->hme_tcurp) {
4050 		if ((hmep->hme_tnextp - hmep->hme_tcurp) > HMETPENDING)
4051 			hmereclaim(hmep);
4052 	} else {
4053 		i = hmep->hme_tcurp - hmep->hme_tnextp;
4054 		if (i && (i < (HME_TMDMAX - HMETPENDING)))
4055 			hmereclaim(hmep);
4056 	}
4057 	tmdp1 = hmep->hme_tnextp;
4058 	if ((ntmdp = NEXTTMD(hmep, tmdp1)) == hmep->hme_tcurp)
4059 		goto notmds;
4060 
4061 	i = tmdp1 - hmep->hme_tmdp;
4062 
4063 	/*
4064 	 * here we deal with 3 cases.
4065 	 *	1. pkt has exactly one mblk
4066 	 *	2. pkt has exactly two mblks
4067 	 *	3. pkt has more than 2 mblks. Since this almost
4068 	 *		always never happens, we copy all of them
4069 	 *		into a msh with one mblk.
4070 	 * for each mblk in the message, we allocate a tmd and
4071 	 * figure out the tmd index. The index is then used to bind
4072 	 * a DMA handle to the mblk and set up an IO mapping..
4073 	 */
4074 
4075 	/*
4076 	 * Note that for checksum offload, the hardware cannot
4077 	 * generate correct checksums if the packet is smaller than
4078 	 * 64-bytes.  In such a case, we bcopy the packet and use
4079 	 * a software checksum.
4080 	 */
4081 
4082 	ASSERT(mp->b_wptr >= mp->b_rptr);
4083 	len1 = mp->b_wptr - mp->b_rptr;
4084 	bp = mp->b_cont;
4085 
4086 	if (bp == NULL && (len1 >= 64)) {
4087 		len2 = 0;
4088 
4089 		HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, TX_MSG,
4090 		    "hmestart: 1 buf: len = %ld b_rptr = %p",
4091 		    len1, mp->b_rptr);
4092 	} else if ((bp->b_cont == NULL) &&
4093 	    ((len2 = bp->b_wptr - bp->b_rptr) >= 4) &&
4094 	    ((len1 + len2) >= 64)) {
4095 
4096 		ASSERT(bp->b_wptr >= bp->b_rptr);
4097 
4098 		tmdp2 = ntmdp;
4099 		if ((ntmdp = NEXTTMD(hmep, tmdp2)) == hmep->hme_tcurp)
4100 			goto notmds;
4101 		j = tmdp2 - hmep->hme_tmdp;
4102 
4103 		HME_DEBUG_MSG5(hmep, SEVERITY_UNKNOWN, TX_MSG,
4104 		    "hmestart: 2 buf: len = %ld b_rptr = %p, "
4105 		    "len = %ld b_rptr = %p",
4106 		    len1, mp->b_rptr, len2, bp->b_rptr);
4107 	} else {
4108 		len1 = msgsize(mp);
4109 		if ((bp = hmeallocb(len1, BPRI_HI)) == NULL) {
4110 			hmep->hme_allocbfail++;
4111 			goto bad;
4112 		}
4113 
4114 		mcopymsg(mp, bp->b_rptr);
4115 		mp = bp;
4116 
4117 		bp = NULL;
4118 		len2 = 0;
4119 
4120 		if ((csflags != 0) && (len1 < 64)) {
4121 			uint16_t sum;
4122 			sum = hme_cksum(mp->b_rptr + start_offset,
4123 			    len1 - start_offset);
4124 			bcopy(&sum, mp->b_rptr + stuff_offset, sizeof (sum));
4125 			csflags = 0;
4126 		}
4127 
4128 		HME_DEBUG_MSG3(hmep, SEVERITY_NONE, TX_MSG,
4129 		    "hmestart: > 1 buf: len = %ld b_rptr = %p",
4130 		    len1, mp->b_rptr);
4131 	}
4132 
4133 
4134 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr, DDI_DMA_DONTWAIT,
4135 	    NULL, &hmep->hme_dmaxh[i])) {
4136 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, TX_MSG,
4137 		    "ddi_dma_alloc_handle failed");
4138 		goto done;
4139 	}
4140 
4141 	if (ddi_dma_addr_bind_handle(hmep->hme_dmaxh[i], NULL,
4142 	    (caddr_t)mp->b_rptr, len1, DDI_DMA_RDWR, DDI_DMA_DONTWAIT,
4143 	    NULL, &c, &cnt) != DDI_DMA_MAPPED) {
4144 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, TX_MSG,
4145 		    "ddi_dma_addr_bind_handle failed");
4146 		ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
4147 		goto done;
4148 	}
4149 
4150 	/* apparently they don't handle multiple cookies */
4151 	if (cnt > 1) {
4152 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4153 		    "dmaxh crossed page boundary - failed");
4154 		(void) ddi_dma_unbind_handle(hmep->hme_dmaxh[i]);
4155 		ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
4156 		goto done;
4157 	}
4158 
4159 	syncval = ddi_dma_sync(hmep->hme_dmaxh[i], (off_t)0, len1,
4160 	    DDI_DMA_SYNC_FORDEV);
4161 	if (syncval == DDI_FAILURE)
4162 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
4163 		    "ddi_dma_sync failed");
4164 
4165 	if (bp) {
4166 		temp_addr = c.dmac_address;
4167 		if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
4168 		    DDI_DMA_DONTWAIT, NULL, &hmep->hme_dmaxh[j])) {
4169 			HME_FAULT_MSG1(hmep, SEVERITY_HIGH, TX_MSG,
4170 			    "ddi_dma_alloc_handle failed");
4171 			goto done;
4172 		}
4173 
4174 		if (ddi_dma_addr_bind_handle(hmep->hme_dmaxh[j], NULL,
4175 		    (caddr_t)bp->b_rptr, len2, DDI_DMA_RDWR, DDI_DMA_DONTWAIT,
4176 		    NULL, &c, &cnt) != DDI_DMA_MAPPED) {
4177 			HME_FAULT_MSG1(hmep, SEVERITY_HIGH, TX_MSG,
4178 			    "ddi_dma_addr_bind_handle failed");
4179 			ddi_dma_free_handle(&hmep->hme_dmaxh[j]);
4180 			ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
4181 			goto done;
4182 		}
4183 
4184 		/* apparently they don't handle multiple cookies */
4185 		if (cnt > 1) {
4186 			HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4187 			    "dmaxh crossed page boundary - failed");
4188 			(void) ddi_dma_unbind_handle(hmep->hme_dmaxh[i]);
4189 			ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
4190 			(void) ddi_dma_unbind_handle(hmep->hme_dmaxh[j]);
4191 			ddi_dma_free_handle(&hmep->hme_dmaxh[j]);
4192 			goto done;
4193 		}
4194 
4195 		syncval = ddi_dma_sync(hmep->hme_dmaxh[j], 0, len2,
4196 		    DDI_DMA_SYNC_FORDEV);
4197 		if (syncval == DDI_FAILURE)
4198 			HME_FAULT_MSG1(hmep, SEVERITY_HIGH, DDI_MSG,
4199 			    "ddi_dma_sync failed");
4200 	}
4201 
4202 	if (bp) {
4203 		PUT_TMD(tmdp2, c.dmac_address, len2, HMETMD_EOP);
4204 		HMESYNCIOPB(hmep, tmdp2, sizeof (struct hme_tmd),
4205 		    DDI_DMA_SYNC_FORDEV);
4206 
4207 		PUT_TMD(tmdp1, temp_addr, len1, HMETMD_SOP | csflags);
4208 		HMESYNCIOPB(hmep, tmdp1, sizeof (struct hme_tmd),
4209 		    DDI_DMA_SYNC_FORDEV);
4210 		mp->b_cont = NULL;
4211 		hmep->hme_tmblkp[i] = mp;
4212 		hmep->hme_tmblkp[j] = bp;
4213 	} else {
4214 		PUT_TMD(tmdp1, c.dmac_address, len1,
4215 		    HMETMD_SOP | HMETMD_EOP | csflags);
4216 		HMESYNCIOPB(hmep, tmdp1, sizeof (struct hme_tmd),
4217 		    DDI_DMA_SYNC_FORDEV);
4218 		hmep->hme_tmblkp[i] = mp;
4219 	}
4220 	CHECK_IOPB();
4221 
4222 	hmep->hme_tnextp = ntmdp;
4223 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
4224 	CHECK_ETXREG();
4225 
4226 	mutex_exit(&hmep->hme_xmitlock);
4227 
4228 	hmep->hme_starts++;
4229 	return (B_TRUE);
4230 
4231 bad:
4232 	mutex_exit(&hmep->hme_xmitlock);
4233 	freemsg(mp);
4234 	return (B_TRUE);
4235 
4236 notmds:
4237 	hmep->hme_notmds++;
4238 	hmep->hme_wantw = B_TRUE;
4239 	hmep->hme_tnextp = tmdp1;
4240 	hmereclaim(hmep);
4241 	retval = B_FALSE;
4242 done:
4243 	mutex_exit(&hmep->hme_xmitlock);
4244 
4245 	return (retval);
4246 
4247 
4248 }
4249 
4250 /*
4251  * Start transmission.
4252  * Return B_TRUE on success,
4253  * otherwise put msg on wq, set 'want' flag and return B_FALSE.
4254  */
4255 static boolean_t
4256 hmestart(struct hme *hmep, mblk_t *mp)
4257 {
4258 	volatile struct hme_tmd *tmdp1 = NULL;
4259 	volatile struct hme_tmd *tmdp2 = NULL;
4260 	volatile struct hme_tmd *ntmdp = NULL;
4261 	mblk_t  *bp;
4262 	uint32_t len1, len2;
4263 	uint32_t temp_addr;
4264 	uint32_t i, j;
4265 	ddi_dma_cookie_t c;
4266 	boolean_t retval = B_TRUE;
4267 
4268 	uint32_t	csflags = 0;
4269 	uint32_t	flags;
4270 	uint32_t	start_offset;
4271 	uint32_t	stuff_offset;
4272 
4273 	/*
4274 	 * update MIB II statistics
4275 	 */
4276 	BUMP_OutNUcast(hmep, mp->b_rptr);
4277 
4278 	if (hmep->hme_dvmaxh == NULL)
4279 		return (hmestart_dma(hmep, mp));
4280 
4281 	hcksum_retrieve(mp, NULL, NULL, &start_offset, &stuff_offset,
4282 	    NULL, NULL, &flags);
4283 
4284 	if (flags & HCK_PARTIALCKSUM) {
4285 		if (get_ether_type(mp->b_rptr) == ETHERTYPE_VLAN) {
4286 			start_offset += sizeof (struct ether_header) + 4;
4287 			stuff_offset += sizeof (struct ether_header) + 4;
4288 		} else {
4289 			start_offset += sizeof (struct ether_header);
4290 			stuff_offset += sizeof (struct ether_header);
4291 		}
4292 		csflags = HMETMD_CSENABL |
4293 		    (start_offset << HMETMD_CSSTART_SHIFT) |
4294 		    (stuff_offset << HMETMD_CSSTUFF_SHIFT);
4295 	}
4296 
4297 	mutex_enter(&hmep->hme_xmitlock);
4298 
4299 	/*
4300 	 * reclaim if there are more than HMETPENDING descriptors
4301 	 * to be reclaimed.
4302 	 */
4303 	if (hmep->hme_tnextp > hmep->hme_tcurp) {
4304 		if ((hmep->hme_tnextp - hmep->hme_tcurp) > HMETPENDING) {
4305 			hmereclaim(hmep);
4306 		}
4307 	} else {
4308 		i = hmep->hme_tcurp - hmep->hme_tnextp;
4309 		if (i && (i < (HME_TMDMAX - HMETPENDING))) {
4310 			hmereclaim(hmep);
4311 		}
4312 	}
4313 
4314 	tmdp1 = hmep->hme_tnextp;
4315 	if ((ntmdp = NEXTTMD(hmep, tmdp1)) == hmep->hme_tcurp)
4316 		goto notmds;
4317 
4318 	i = tmdp1 - hmep->hme_tmdp;
4319 
4320 	/*
4321 	 * here we deal with 3 cases.
4322 	 *	1. pkt has exactly one mblk
4323 	 *	2. pkt has exactly two mblks
4324 	 *	3. pkt has more than 2 mblks. Since this almost
4325 	 *		always never happens, we copy all of them
4326 	 *		into a msh with one mblk.
4327 	 * for each mblk in the message, we allocate a tmd and
4328 	 * figure out the tmd index. This index also passed to
4329 	 * dvma_kaddr_load(), which establishes the IO mapping
4330 	 * for the mblk data. This index is used as a index into
4331 	 * the ptes reserved by dvma_reserve
4332 	 */
4333 
4334 	/*
4335 	 * Note that for checksum offload, the hardware cannot
4336 	 * generate correct checksums if the packet is smaller than
4337 	 * 64-bytes.  In such a case, we bcopy the packet and use
4338 	 * a software checksum.
4339 	 */
4340 
4341 	bp = mp->b_cont;
4342 
4343 	len1 = mp->b_wptr - mp->b_rptr;
4344 	if (bp == NULL && (len1 >= 64)) {
4345 		dvma_kaddr_load(hmep->hme_dvmaxh, (caddr_t)mp->b_rptr,
4346 		    len1, 2 * i, &c);
4347 		dvma_sync(hmep->hme_dvmaxh, 2 * i, DDI_DMA_SYNC_FORDEV);
4348 
4349 		PUT_TMD(tmdp1, c.dmac_address, len1,
4350 		    HMETMD_SOP | HMETMD_EOP | csflags);
4351 
4352 		HMESYNCIOPB(hmep, tmdp1, sizeof (struct hme_tmd),
4353 		    DDI_DMA_SYNC_FORDEV);
4354 		hmep->hme_tmblkp[i] = mp;
4355 
4356 	} else {
4357 
4358 		if ((bp != NULL) && (bp->b_cont == NULL) &&
4359 		    ((len2 = bp->b_wptr - bp->b_rptr) >= 4) &&
4360 		    ((len1 + len2) >= 64)) {
4361 			/*
4362 			 * Check with HW: The minimum len restriction
4363 			 * different for 64-bit burst ?
4364 			 */
4365 			tmdp2 = ntmdp;
4366 			if ((ntmdp = NEXTTMD(hmep, tmdp2)) == hmep->hme_tcurp)
4367 				goto notmds;
4368 			j = tmdp2 - hmep->hme_tmdp;
4369 			mp->b_cont = NULL;
4370 			hmep->hme_tmblkp[i] = mp;
4371 			hmep->hme_tmblkp[j] = bp;
4372 			dvma_kaddr_load(hmep->hme_dvmaxh, (caddr_t)mp->b_rptr,
4373 			    len1, 2 * i, &c);
4374 			dvma_sync(hmep->hme_dvmaxh, 2 * i,
4375 			    DDI_DMA_SYNC_FORDEV);
4376 
4377 			temp_addr = c.dmac_address;
4378 			dvma_kaddr_load(hmep->hme_dvmaxh, (caddr_t)bp->b_rptr,
4379 			    len2, 2 * j, &c);
4380 			dvma_sync(hmep->hme_dvmaxh, 2 * j,
4381 			    DDI_DMA_SYNC_FORDEV);
4382 
4383 			PUT_TMD(tmdp2, c.dmac_address, len2,
4384 			    HMETMD_EOP | csflags);
4385 
4386 			HMESYNCIOPB(hmep, tmdp2, sizeof (struct hme_tmd),
4387 			    DDI_DMA_SYNC_FORDEV);
4388 
4389 			PUT_TMD(tmdp1, temp_addr, len1, HMETMD_SOP | csflags);
4390 
4391 			HMESYNCIOPB(hmep, tmdp1, sizeof (struct hme_tmd),
4392 			    DDI_DMA_SYNC_FORDEV);
4393 
4394 		} else {
4395 			len1 = msgsize(mp);
4396 
4397 			if ((bp = hmeallocb(len1, BPRI_HI)) == NULL) {
4398 				hmep->hme_allocbfail++;
4399 				hmep->hme_noxmtbuf++;
4400 				goto bad;
4401 			}
4402 
4403 			mcopymsg(mp, bp->b_rptr);
4404 			mp = bp;
4405 			hmep->hme_tmblkp[i] = mp;
4406 
4407 			if ((csflags) && (len1 < 64)) {
4408 				uint16_t sum;
4409 				sum = hme_cksum(bp->b_rptr + start_offset,
4410 				    len1 - start_offset);
4411 				bcopy(&sum, bp->b_rptr + stuff_offset,
4412 				    sizeof (sum));
4413 				csflags = 0;
4414 			}
4415 
4416 			dvma_kaddr_load(hmep->hme_dvmaxh,
4417 			    (caddr_t)mp->b_rptr, len1, 2 * i, &c);
4418 			dvma_sync(hmep->hme_dvmaxh, 2 * i,
4419 			    DDI_DMA_SYNC_FORDEV);
4420 			PUT_TMD(tmdp1, c.dmac_address, len1,
4421 			    HMETMD_SOP | HMETMD_EOP | csflags);
4422 			HMESYNCIOPB(hmep, tmdp1, sizeof (struct hme_tmd),
4423 			    DDI_DMA_SYNC_FORDEV);
4424 		}
4425 	}
4426 	CHECK_IOPB();
4427 
4428 	hmep->hme_tnextp = ntmdp;
4429 	PUT_ETXREG(txpend, HMET_TXPEND_TDMD);
4430 	CHECK_ETXREG();
4431 
4432 	HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, TX_MSG,
4433 	    "hmestart:  Transmitted a frame");
4434 
4435 	mutex_exit(&hmep->hme_xmitlock);
4436 
4437 
4438 	hmep->hme_starts++;
4439 	return (B_TRUE);
4440 bad:
4441 	mutex_exit(&hmep->hme_xmitlock);
4442 	freemsg(mp);
4443 	return (B_TRUE);
4444 notmds:
4445 	hmep->hme_notmds++;
4446 	hmep->hme_wantw = B_TRUE;
4447 	hmep->hme_tnextp = tmdp1;
4448 	hmereclaim(hmep);
4449 	retval = B_FALSE;
4450 done:
4451 	mutex_exit(&hmep->hme_xmitlock);
4452 
4453 	return (retval);
4454 }
4455 
4456 /*
4457  * Initialize channel.
4458  * Return 0 on success, nonzero on error.
4459  *
4460  * The recommended sequence for initialization is:
4461  * 1. Issue a Global Reset command to the Ethernet Channel.
4462  * 2. Poll the Global_Reset bits until the execution of the reset has been
4463  *    completed.
4464  * 2(a). Use the MIF Frame/Output register to reset the transceiver.
4465  *	 Poll Register 0 to till the Resetbit is 0.
4466  * 2(b). Use the MIF Frame/Output register to set the PHY in in Normal-Op,
4467  *	 100Mbps and Non-Isolated mode. The main point here is to bring the
4468  *	 PHY out of Isolate mode so that it can generate the rx_clk and tx_clk
4469  *	 to the MII interface so that the Bigmac core can correctly reset
4470  *	 upon a software reset.
4471  * 2(c).  Issue another Global Reset command to the Ethernet Channel and poll
4472  *	  the Global_Reset bits till completion.
4473  * 3. Set up all the data structures in the host memory.
4474  * 4. Program the TX_MAC registers/counters (excluding the TX_MAC Configuration
4475  *    Register).
4476  * 5. Program the RX_MAC registers/counters (excluding the RX_MAC Configuration
4477  *    Register).
4478  * 6. Program the Transmit Descriptor Ring Base Address in the ETX.
4479  * 7. Program the Receive Descriptor Ring Base Address in the ERX.
4480  * 8. Program the Global Configuration and the Global Interrupt Mask Registers.
4481  * 9. Program the ETX Configuration register (enable the Transmit DMA channel).
4482  * 10. Program the ERX Configuration register (enable the Receive DMA channel).
4483  * 11. Program the XIF Configuration Register (enable the XIF).
4484  * 12. Program the RX_MAC Configuration Register (Enable the RX_MAC).
4485  * 13. Program the TX_MAC Configuration Register (Enable the TX_MAC).
4486  */
4487 
4488 
4489 #ifdef FEPS_URUN_BUG
4490 static int hme_palen = 32;
4491 #endif
4492 
4493 static int
4494 hmeinit(struct hme *hmep)
4495 {
4496 	mblk_t		*bp;
4497 	uint32_t	i;
4498 	int		ret;
4499 	int		alloc_ret;	/* hmeallocthings() return value   */
4500 	ddi_dma_cookie_t dma_cookie;
4501 	uint_t dmac_cnt;
4502 
4503 	/*
4504 	 * Lock sequence:
4505 	 *	hme_intrlock, hme_xmitlock.
4506 	 */
4507 	mutex_enter(&hmep->hme_intrlock);
4508 
4509 	/*
4510 	 * Don't touch the hardware if we are suspended.  But don't
4511 	 * fail either.  Some time later we may be resumed, and then
4512 	 * we'll be back here to program the device using the settings
4513 	 * in the soft state.
4514 	 */
4515 	if (hmep->hme_flags & HMESUSPENDED) {
4516 		mutex_exit(&hmep->hme_intrlock);
4517 		return (0);
4518 	}
4519 
4520 	/*
4521 	 * This should prevent us from clearing any interrupts that
4522 	 * may occur by temporarily stopping interrupts from occurring
4523 	 * for a short time.  We need to update the interrupt mask
4524 	 * later in this function.
4525 	 */
4526 	PUT_GLOBREG(intmask, ~HMEG_MASK_MIF_INTR);
4527 
4528 
4529 	/*
4530 	 * Rearranged the mutex acquisition order to solve the deadlock
4531 	 * situation as described in bug ID 4065896.
4532 	 */
4533 
4534 	hme_stop_timer(hmep);	/* acquire hme_linklock */
4535 	mutex_enter(&hmep->hme_xmitlock);
4536 
4537 	hmep->hme_flags = 0;
4538 	hmep->hme_wantw = B_FALSE;
4539 	hmep->hme_txhung = 0;
4540 
4541 	/*
4542 	 * Initializing 'hmep->hme_iipackets' to match current
4543 	 * number of received packets.
4544 	 */
4545 	hmep->hme_iipackets = hmep->hme_ipackets;
4546 
4547 	if (hmep->inits)
4548 		hmesavecntrs(hmep);
4549 
4550 	hme_stop_mifpoll(hmep);
4551 
4552 	/*
4553 	 * Perform Global reset of the Sbus/FEPS ENET channel.
4554 	 */
4555 	(void) hmestop(hmep);
4556 
4557 	/*
4558 	 * Allocate data structures.
4559 	 */
4560 	alloc_ret = hmeallocthings(hmep);
4561 	if (alloc_ret) {
4562 		/*
4563 		 * Failed
4564 		 */
4565 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
4566 		goto init_fail;
4567 	}
4568 
4569 	hmefreebufs(hmep);
4570 
4571 	/*
4572 	 * Clear all descriptors.
4573 	 */
4574 	bzero(hmep->hme_rmdp, HME_RMDMAX * sizeof (struct hme_rmd));
4575 	bzero(hmep->hme_tmdp, HME_TMDMAX * sizeof (struct hme_tmd));
4576 
4577 	/*
4578 	 * Hang out receive buffers.
4579 	 */
4580 	for (i = 0; i < HMERPENDING; i++) {
4581 		if ((bp = hmeallocb(HMEBUFSIZE, BPRI_LO)) == NULL) {
4582 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, INIT_MSG,
4583 			    "allocb failed");
4584 			hme_start_timer(hmep, hme_check_link,
4585 			    HME_LINKCHECK_TIMER);
4586 			goto init_fail;
4587 		}
4588 
4589 		/*
4590 		 * dvma case
4591 		 */
4592 		if (hmep->hme_dvmarh != NULL) {
4593 			dvma_kaddr_load(hmep->hme_dvmarh, (caddr_t)bp->b_rptr,
4594 			    (uint_t)HMEBUFSIZE, 2 * i, &dma_cookie);
4595 		} else {
4596 		/*
4597 		 * dma case
4598 		 */
4599 			if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
4600 			    DDI_DMA_DONTWAIT, NULL, &hmep->hme_dmarh[i])
4601 			    != DDI_SUCCESS) {
4602 				HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4603 				"ddi_dma_alloc_handle of bufs failed");
4604 				hme_start_timer(hmep, hme_check_link,
4605 				    HME_LINKCHECK_TIMER);
4606 				goto init_fail;
4607 			}
4608 
4609 			if (ddi_dma_addr_bind_handle(hmep->hme_dmarh[i], NULL,
4610 			    (caddr_t)bp->b_rptr, HMEBUFSIZE, DDI_DMA_RDWR,
4611 			    DDI_DMA_DONTWAIT, NULL, &dma_cookie, &dmac_cnt)
4612 			    != DDI_DMA_MAPPED) {
4613 				HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4614 				"ddi_dma_addr_bind_handle of bufs failed");
4615 				hme_start_timer(hmep, hme_check_link,
4616 				    HME_LINKCHECK_TIMER);
4617 				goto init_fail;
4618 			}
4619 			/* apparently they don't handle multiple cookies */
4620 			if (dmac_cnt > 1) {
4621 				HME_DEBUG_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4622 				    "dmarh crossed page boundary - failed");
4623 				hme_start_timer(hmep, hme_check_link,
4624 				    HME_LINKCHECK_TIMER);
4625 				goto init_fail;
4626 			}
4627 		}
4628 		PUT_RMD((&hmep->hme_rmdp[i]), dma_cookie.dmac_address);
4629 
4630 		hmep->hme_rmblkp[i] = bp;	/* save for later use */
4631 	}
4632 
4633 	/*
4634 	 * DMA sync descriptors.
4635 	 */
4636 	HMESYNCIOPB(hmep, hmep->hme_rmdp, (HME_RMDMAX * sizeof (struct hme_rmd)
4637 	    + HME_TMDMAX * sizeof (struct hme_tmd)), DDI_DMA_SYNC_FORDEV);
4638 	CHECK_IOPB();
4639 
4640 	/*
4641 	 * Reset RMD and TMD 'walking' pointers.
4642 	 */
4643 	hmep->hme_rnextp = hmep->hme_rmdp;
4644 	hmep->hme_rlastp = hmep->hme_rmdp + HMERPENDING - 1;
4645 	hmep->hme_tcurp = hmep->hme_tmdp;
4646 	hmep->hme_tnextp = hmep->hme_tmdp;
4647 
4648 	/*
4649 	 * This is the right place to initialize MIF !!!
4650 	 */
4651 
4652 	PUT_MIFREG(mif_imask, HME_MIF_INTMASK);	/* mask all interrupts */
4653 
4654 	if (!hmep->hme_frame_enable)
4655 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) | HME_MIF_CFGBB);
4656 	else
4657 		PUT_MIFREG(mif_cfg, GET_MIFREG(mif_cfg) & ~HME_MIF_CFGBB);
4658 						/* enable frame mode */
4659 
4660 	/*
4661 	 * Depending on the transceiver detected, select the source
4662 	 * of the clocks for the MAC. Without the clocks, TX_MAC does
4663 	 * not reset. When the Global Reset is issued to the Sbus/FEPS
4664 	 * ASIC, it selects Internal by default.
4665 	 */
4666 
4667 	hme_check_transceiver(hmep);
4668 	if (hmep->hme_transceiver == HME_NO_TRANSCEIVER) {
4669 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, XCVR_MSG, no_xcvr_msg);
4670 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
4671 		goto init_fail;	/* abort initialization */
4672 
4673 	} else if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
4674 		PUT_MACREG(xifc, 0);
4675 	else
4676 		PUT_MACREG(xifc, BMAC_XIFC_MIIBUFDIS);
4677 				/* Isolate the Int. xcvr */
4678 	/*
4679 	 * Perform transceiver reset and speed selection only if
4680 	 * the link is down.
4681 	 */
4682 	if (!hmep->hme_linkcheck)
4683 		/*
4684 		 * Reset the PHY and bring up the link
4685 		 * If it fails we will then increment a kstat.
4686 		 */
4687 		hme_reset_transceiver(hmep);
4688 	else {
4689 		if (hmep->hme_linkup)
4690 			hme_start_mifpoll(hmep);
4691 		hme_start_timer(hmep, hme_check_link, HME_LINKCHECK_TIMER);
4692 	}
4693 	hmep->inits++;
4694 
4695 	/*
4696 	 * Initialize BigMAC registers.
4697 	 * First set the tx enable bit in tx config reg to 0 and poll on
4698 	 * it till it turns to 0. Same for rx config, hash and address
4699 	 * filter reg.
4700 	 * Here is the sequence per the spec.
4701 	 * MADD2 - MAC Address 2
4702 	 * MADD1 - MAC Address 1
4703 	 * MADD0 - MAC Address 0
4704 	 * HASH3, HASH2, HASH1, HASH0 for group address
4705 	 * AFR2, AFR1, AFR0 and AFMR for address filter mask
4706 	 * Program RXMIN and RXMAX for packet length if not 802.3
4707 	 * RXCFG - Rx config for not stripping CRC
4708 	 * XXX Anything else to hme configured in RXCFG
4709 	 * IPG1, IPG2, ALIMIT, SLOT, PALEN, PAPAT, TXSFD, JAM, TXMAX, TXMIN
4710 	 * if not 802.3 compliant
4711 	 * XIF register for speed selection
4712 	 * MASK  - Interrupt mask
4713 	 * Set bit 0 of TXCFG
4714 	 * Set bit 0 of RXCFG
4715 	 */
4716 
4717 	/*
4718 	 * Initialize the TX_MAC registers
4719 	 * Initialization of jamsize to work around rx crc bug
4720 	 */
4721 	PUT_MACREG(jam, jamsize);
4722 
4723 #ifdef	FEPS_URUN_BUG
4724 	if (hme_urun_fix)
4725 		PUT_MACREG(palen, hme_palen);
4726 #endif
4727 
4728 	PUT_MACREG(ipg1, hme_param_ipg1);
4729 	PUT_MACREG(ipg2, hme_param_ipg2);
4730 
4731 	HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, IPG_MSG,
4732 	    "hmeinit: ipg1 = %d ipg2 = %d", hme_param_ipg1, hme_param_ipg2);
4733 	PUT_MACREG(rseed,
4734 	    ((hmep->hme_ouraddr.ether_addr_octet[0] << 8) & 0x3) |
4735 	    hmep->hme_ouraddr.ether_addr_octet[1]);
4736 
4737 	/* Initialize the RX_MAC registers */
4738 
4739 	/*
4740 	 * Program BigMAC with local individual ethernet address.
4741 	 */
4742 	PUT_MACREG(madd2, (hmep->hme_ouraddr.ether_addr_octet[4] << 8) |
4743 	    hmep->hme_ouraddr.ether_addr_octet[5]);
4744 	PUT_MACREG(madd1, (hmep->hme_ouraddr.ether_addr_octet[2] << 8) |
4745 	    hmep->hme_ouraddr.ether_addr_octet[3]);
4746 	PUT_MACREG(madd0, (hmep->hme_ouraddr.ether_addr_octet[0] << 8) |
4747 	    hmep->hme_ouraddr.ether_addr_octet[1]);
4748 
4749 	/*
4750 	 * Set up multicast address filter by passing all multicast
4751 	 * addresses through a crc generator, and then using the
4752 	 * low order 6 bits as a index into the 64 bit logical
4753 	 * address filter. The high order three bits select the word,
4754 	 * while the rest of the bits select the bit within the word.
4755 	 */
4756 	PUT_MACREG(hash0, hmep->hme_ladrf[0]);
4757 	PUT_MACREG(hash1, hmep->hme_ladrf[1]);
4758 	PUT_MACREG(hash2, hmep->hme_ladrf[2]);
4759 	PUT_MACREG(hash3, hmep->hme_ladrf[3]);
4760 
4761 	/*
4762 	 * Configure parameters to support VLAN.  (VLAN encapsulation adds
4763 	 * four bytes.)
4764 	 */
4765 	PUT_MACREG(txmax, ETHERMAX + ETHERFCSL + 4);
4766 	PUT_MACREG(rxmax, ETHERMAX + ETHERFCSL + 4);
4767 
4768 	/*
4769 	 * Initialize HME Global registers, ETX registers and ERX registers.
4770 	 */
4771 
4772 	PUT_ETXREG(txring, (uint32_t)HMEIOPBIOADDR(hmep, hmep->hme_tmdp));
4773 	PUT_ERXREG(rxring, (uint32_t)HMEIOPBIOADDR(hmep, hmep->hme_rmdp));
4774 
4775 	/*
4776 	 * ERX registers can be written only if they have even no. of bits set.
4777 	 * So, if the value written is not read back, set the lsb and write
4778 	 * again.
4779 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
4780 	 */
4781 	{
4782 		uint32_t temp;
4783 		temp  = ((uint32_t)HMEIOPBIOADDR(hmep, hmep->hme_rmdp));
4784 
4785 		if (GET_ERXREG(rxring) != temp)
4786 			PUT_ERXREG(rxring, (temp | 4));
4787 	}
4788 
4789 	HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, ERX_MSG, "rxring written = %X",
4790 	    ((uint32_t)HMEIOPBIOADDR(hmep, hmep->hme_rmdp)));
4791 	HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, ERX_MSG, "rxring read = %X",
4792 	    GET_ERXREG(rxring));
4793 
4794 	PUT_GLOBREG(config, (hmep->hme_config |
4795 	    (hmep->hme_64bit_xfer << HMEG_CONFIG_64BIT_SHIFT)));
4796 
4797 	/*
4798 	 * Significant performance improvements can be achieved by
4799 	 * disabling transmit interrupt. Thus TMD's are reclaimed only
4800 	 * when we run out of them in hmestart().
4801 	 */
4802 	PUT_GLOBREG(intmask,
4803 	    HMEG_MASK_INTR | HMEG_MASK_TINT | HMEG_MASK_TX_ALL);
4804 
4805 	PUT_ETXREG(txring_size, ((HME_TMDMAX -1)>> HMET_RINGSZ_SHIFT));
4806 	PUT_ETXREG(config, (GET_ETXREG(config) | HMET_CONFIG_TXDMA_EN
4807 	    | HMET_CONFIG_TXFIFOTH));
4808 	/* get the rxring size bits */
4809 	switch (HME_RMDMAX) {
4810 	case 32:
4811 		i = HMER_CONFIG_RXRINGSZ32;
4812 		break;
4813 	case 64:
4814 		i = HMER_CONFIG_RXRINGSZ64;
4815 		break;
4816 	case 128:
4817 		i = HMER_CONFIG_RXRINGSZ128;
4818 		break;
4819 	case 256:
4820 		i = HMER_CONFIG_RXRINGSZ256;
4821 		break;
4822 	default:
4823 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4824 		    unk_rx_ringsz_msg);
4825 		goto init_fail;
4826 	}
4827 	i |= (HME_FSTBYTE_OFFSET << HMER_CONFIG_FBO_SHIFT)
4828 	    | HMER_CONFIG_RXDMA_EN;
4829 
4830 	/* h/w checks start offset in half words */
4831 	i |= ((sizeof (struct ether_header) / 2) << HMER_RX_CSSTART_SHIFT);
4832 
4833 	PUT_ERXREG(config, i);
4834 
4835 	HME_DEBUG_MSG2(hmep, SEVERITY_UNKNOWN, INIT_MSG,
4836 	    "erxp->config = %X", GET_ERXREG(config));
4837 	/*
4838 	 * Bug related to the parity handling in ERX. When erxp-config is
4839 	 * read back.
4840 	 * Sbus/FEPS drives the parity bit. This value is used while
4841 	 * writing again.
4842 	 * This fixes the RECV problem in SS5.
4843 	 * static	int	hme_erx_fix = 1;   : Use the fix for erx bug
4844 	 */
4845 	{
4846 		uint32_t temp;
4847 		temp = GET_ERXREG(config);
4848 		PUT_ERXREG(config, i);
4849 
4850 		if (GET_ERXREG(config) != i)
4851 			HME_FAULT_MSG4(hmep, SEVERITY_UNKNOWN, ERX_MSG,
4852 			    "error:temp = %x erxp->config = %x, should be %x",
4853 			    temp, GET_ERXREG(config), i);
4854 	}
4855 
4856 	/*
4857 	 * Set up the rxconfig, txconfig and seed register without enabling
4858 	 * them the former two at this time
4859 	 *
4860 	 * BigMAC strips the CRC bytes by default. Since this is
4861 	 * contrary to other pieces of hardware, this bit needs to
4862 	 * enabled to tell BigMAC not to strip the CRC bytes.
4863 	 * Do not filter this node's own packets.
4864 	 */
4865 
4866 	if (hme_reject_own) {
4867 		PUT_MACREG(rxcfg,
4868 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
4869 		    BMAC_RXCFG_MYOWN | BMAC_RXCFG_HASH));
4870 	} else {
4871 		PUT_MACREG(rxcfg,
4872 		    ((hmep->hme_promisc ? BMAC_RXCFG_PROMIS : 0) |
4873 		    BMAC_RXCFG_HASH));
4874 	}
4875 
4876 	drv_usecwait(10);	/* wait after setting Hash Enable bit */
4877 
4878 	if (hme_ngu_enable)
4879 		PUT_MACREG(txcfg, (hmep->hme_fdx ? BMAC_TXCFG_FDX: 0) |
4880 		    BMAC_TXCFG_NGU);
4881 	else
4882 		PUT_MACREG(txcfg, (hmep->hme_fdx ? BMAC_TXCFG_FDX: 0));
4883 	hmep->hme_macfdx = hmep->hme_fdx;
4884 
4885 
4886 	i = 0;
4887 	if ((hme_param_lance_mode) && (hmep->hme_lance_mode_enable))
4888 		i = ((hme_param_ipg0 & HME_MASK_5BIT) << BMAC_XIFC_IPG0_SHIFT)
4889 		    | BMAC_XIFC_LANCE_ENAB;
4890 	if (hmep->hme_transceiver == HME_INTERNAL_TRANSCEIVER)
4891 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB));
4892 	else
4893 		PUT_MACREG(xifc, i | (BMAC_XIFC_ENAB | BMAC_XIFC_MIIBUFDIS));
4894 
4895 	PUT_MACREG(rxcfg, GET_MACREG(rxcfg) | BMAC_RXCFG_ENAB);
4896 	PUT_MACREG(txcfg, GET_MACREG(txcfg) | BMAC_TXCFG_ENAB);
4897 
4898 	hmep->hme_flags |= (HMERUNNING | HMEINITIALIZED);
4899 	/*
4900 	 * Update the interrupt mask : this will re-allow interrupts to occur
4901 	 */
4902 	PUT_GLOBREG(intmask, HMEG_MASK_INTR);
4903 	mac_tx_update(hmep->hme_mh);
4904 
4905 init_fail:
4906 	/*
4907 	 * Release the locks in reverse order
4908 	 */
4909 	mutex_exit(&hmep->hme_xmitlock);
4910 	mutex_exit(&hmep->hme_intrlock);
4911 
4912 	ret = !(hmep->hme_flags & HMERUNNING);
4913 	if (ret) {
4914 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
4915 		    init_fail_gen_msg);
4916 	}
4917 
4918 	/*
4919 	 * Hardware checks.
4920 	 */
4921 	CHECK_GLOBREG();
4922 	CHECK_MIFREG();
4923 	CHECK_MACREG();
4924 	CHECK_ERXREG();
4925 	CHECK_ETXREG();
4926 
4927 init_exit:
4928 	return (ret);
4929 }
4930 
4931 /*
4932  * Calculate the dvma burstsize by setting up a dvma temporarily.  Return
4933  * 0 as burstsize upon failure as it signifies no burst size.
4934  * Requests for 64-bit transfer setup, if the platform supports it.
4935  * NOTE: Do not use ddi_dma_alloc_handle(9f) then ddi_dma_burstsize(9f),
4936  * sun4u Ultra-2 incorrectly returns a 32bit transfer.
4937  */
4938 static int
4939 hmeburstsizes(struct hme *hmep)
4940 {
4941 	int burstsizes;
4942 	ddi_dma_handle_t handle;
4943 
4944 	if (ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
4945 	    DDI_DMA_DONTWAIT, NULL, &handle)) {
4946 		return (0);
4947 	}
4948 
4949 	hmep->hme_burstsizes = burstsizes = ddi_dma_burstsizes(handle);
4950 	ddi_dma_free_handle(&handle);
4951 
4952 	/*
4953 	 * Use user-configurable parameter for enabling 64-bit transfers
4954 	 */
4955 	burstsizes = (hmep->hme_burstsizes >> 16);
4956 	if (burstsizes)
4957 		hmep->hme_64bit_xfer = hme_64bit_enable; /* user config value */
4958 	else
4959 		burstsizes = hmep->hme_burstsizes;
4960 
4961 	if (hmep->hme_cheerio_mode)
4962 		hmep->hme_64bit_xfer = 0; /* Disable for cheerio */
4963 
4964 	if (burstsizes & 0x40)
4965 		hmep->hme_config = HMEG_CONFIG_BURST64;
4966 	else if (burstsizes & 0x20)
4967 		hmep->hme_config = HMEG_CONFIG_BURST32;
4968 	else
4969 		hmep->hme_config = HMEG_CONFIG_BURST16;
4970 
4971 	HME_DEBUG_MSG2(hmep, SEVERITY_NONE, INIT_MSG,
4972 	    "hme_config = 0x%X", hmep->hme_config);
4973 	return (DDI_SUCCESS);
4974 }
4975 
4976 static void
4977 hmefreebufs(struct hme *hmep)
4978 {
4979 	int i;
4980 	int32_t	freeval;
4981 
4982 	/*
4983 	 * Free and dvma_unload pending xmit and recv buffers.
4984 	 * Maintaining the 1-to-1 ordered sequence of
4985 	 * Always unload anything before loading it again.
4986 	 * Never unload anything twice.  Always unload
4987 	 * before freeing the buffer.  We satisfy these
4988 	 * requirements by unloading only those descriptors
4989 	 * which currently have an mblk associated with them.
4990 	 */
4991 	/*
4992 	 * Keep the ddi_dma_free() before the freeb()
4993 	 * with the dma handles.
4994 	 * Race condition with snoop.
4995 	 */
4996 	if (hmep->hme_dmarh) {
4997 		/* dma case */
4998 		for (i = 0; i < HME_TMDMAX; i++) {
4999 			if (hmep->hme_dmaxh[i]) {
5000 				freeval = ddi_dma_unbind_handle(
5001 				    hmep->hme_dmaxh[i]);
5002 				if (freeval == DDI_FAILURE)
5003 					HME_FAULT_MSG1(hmep, SEVERITY_HIGH,
5004 					    FREE_MSG, "ddi_dma_unbind_handle"
5005 					    " failed");
5006 				ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
5007 				hmep->hme_dmaxh[i] = NULL;
5008 			}
5009 		}
5010 		for (i = 0; i < HMERPENDING; i++) {
5011 			if (hmep->hme_dmarh[i]) {
5012 				freeval = ddi_dma_unbind_handle(
5013 				    hmep->hme_dmarh[i]);
5014 				if (freeval == DDI_FAILURE)
5015 					HME_FAULT_MSG1(hmep, SEVERITY_HIGH,
5016 					    FREE_MSG, "ddi_dma_unbind_handle"
5017 					    " failure");
5018 				ddi_dma_free_handle(&hmep->hme_dmarh[i]);
5019 				hmep->hme_dmarh[i] = NULL;
5020 			}
5021 		}
5022 	}
5023 	/*
5024 	 * This was generated when only a dma handle is expected.
5025 	 * else HME_FAULT_MSG1(NULL, SEVERITY_HIGH, FREE_MSG,
5026 	 *		"hme: Expected a dma read handle:failed");
5027 	 */
5028 
5029 	for (i = 0; i < HME_TMDMAX; i++) {
5030 		if (hmep->hme_tmblkp[i]) {
5031 			if (hmep->hme_dvmaxh != NULL)
5032 				dvma_unload(hmep->hme_dvmaxh,
5033 				    2 * i, DONT_FLUSH);
5034 			freeb(hmep->hme_tmblkp[i]);
5035 			hmep->hme_tmblkp[i] = NULL;
5036 		}
5037 	}
5038 
5039 	for (i = 0; i < HME_RMDMAX; i++) {
5040 		if (hmep->hme_rmblkp[i]) {
5041 			if (hmep->hme_dvmarh != NULL)
5042 				dvma_unload(hmep->hme_dvmarh, 2 * HMERINDEX(i),
5043 				    DDI_DMA_SYNC_FORKERNEL);
5044 			freeb(hmep->hme_rmblkp[i]);
5045 			hmep->hme_rmblkp[i] = NULL;
5046 		}
5047 	}
5048 
5049 }
5050 
5051 /*
5052  * hme_start_mifpoll() - Enables the polling of the BMSR register of the PHY.
5053  * After enabling the poll, delay for atleast 62us for one poll to be done.
5054  * Then read the MIF status register to auto-clear the MIF status field.
5055  * Then program the MIF interrupt mask register to enable interrupts for the
5056  * LINK_STATUS and JABBER_DETECT bits.
5057  */
5058 
5059 static void
5060 hme_start_mifpoll(struct hme *hmep)
5061 {
5062 	uint32_t cfg;
5063 
5064 	if (!hmep->hme_mifpoll_enable)
5065 		return;
5066 
5067 	cfg = (GET_MIFREG(mif_cfg) & ~(HME_MIF_CFGPD | HME_MIF_CFGPR));
5068 	PUT_MIFREG(mif_cfg,
5069 	    (cfg = (cfg | (hmep->hme_phyad << HME_MIF_CFGPD_SHIFT) |
5070 	    (HME_PHY_BMSR << HME_MIF_CFGPR_SHIFT) | HME_MIF_CFGPE)));
5071 
5072 	drv_usecwait(HME_MIF_POLL_DELAY);
5073 	hmep->hme_polling_on =		1;
5074 	hmep->hme_mifpoll_flag =	0;
5075 	hmep->hme_mifpoll_data =	(GET_MIFREG(mif_bsts) >> 16);
5076 
5077 	/* Do not poll for Jabber Detect for 100 Mbps speed */
5078 	if (((hmep->hme_mode == HME_AUTO_SPEED) &&
5079 	    (hmep->hme_tryspeed == HME_SPEED_100)) ||
5080 	    ((hmep->hme_mode == HME_FORCE_SPEED) &&
5081 	    (hmep->hme_forcespeed == HME_SPEED_100)))
5082 		PUT_MIFREG(mif_imask, ((uint16_t)~(PHY_BMSR_LNKSTS)));
5083 	else
5084 		PUT_MIFREG(mif_imask,
5085 		    (uint16_t)~(PHY_BMSR_LNKSTS | PHY_BMSR_JABDET));
5086 
5087 	CHECK_MIFREG();
5088 	HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, MIFPOLL_MSG,
5089 	    "mifpoll started: mif_cfg = %X mif_bsts = %X",
5090 	    cfg, GET_MIFREG(mif_bsts));
5091 }
5092 
5093 static void
5094 hme_stop_mifpoll(struct hme *hmep)
5095 {
5096 	if ((!hmep->hme_mifpoll_enable) || (!hmep->hme_polling_on))
5097 		return;
5098 
5099 	PUT_MIFREG(mif_imask, 0xffff);	/* mask interrupts */
5100 	PUT_MIFREG(mif_cfg, (GET_MIFREG(mif_cfg) & ~HME_MIF_CFGPE));
5101 
5102 	hmep->hme_polling_on = 0;
5103 	drv_usecwait(HME_MIF_POLL_DELAY);
5104 	CHECK_MIFREG();
5105 }
5106 
5107 /*
5108  * Un-initialize (STOP) HME channel.
5109  */
5110 static void
5111 hmeuninit(struct hme *hmep)
5112 {
5113 	/*
5114 	 * Allow up to 'HMEDRAINTIME' for pending xmit's to complete.
5115 	 */
5116 	HMEDELAY((hmep->hme_tcurp == hmep->hme_tnextp), HMEDRAINTIME);
5117 
5118 	hme_stop_timer(hmep);   /* acquire hme_linklock */
5119 	mutex_exit(&hmep->hme_linklock);
5120 
5121 	mutex_enter(&hmep->hme_intrlock);
5122 	mutex_enter(&hmep->hme_xmitlock);
5123 
5124 	hme_stop_mifpoll(hmep);
5125 
5126 	hmep->hme_flags &= ~HMERUNNING;
5127 
5128 	(void) hmestop(hmep);
5129 
5130 	mutex_exit(&hmep->hme_xmitlock);
5131 	mutex_exit(&hmep->hme_intrlock);
5132 }
5133 
5134 /*
5135  * Allocate CONSISTENT memory for rmds and tmds with appropriate alignment and
5136  * map it in IO space. Allocate space for transmit and receive ddi_dma_handle
5137  * structures to use the DMA interface.
5138  */
5139 static int
5140 hmeallocthings(struct hme *hmep)
5141 {
5142 	uintptr_t a;
5143 	int		size;
5144 	int		rval;
5145 	size_t		real_len;
5146 	uint_t		cookiec;
5147 
5148 	/*
5149 	 * Return if resources are already allocated.
5150 	 */
5151 	if (hmep->hme_rmdp)
5152 		return (0);
5153 
5154 	/*
5155 	 * Allocate the TMD and RMD descriptors and extra for page alignment.
5156 	 */
5157 	size = (HME_RMDMAX * sizeof (struct hme_rmd)
5158 	    + HME_TMDMAX * sizeof (struct hme_tmd));
5159 	size = ROUNDUP(size, hmep->pagesize) + hmep->pagesize;
5160 
5161 	rval = ddi_dma_alloc_handle(hmep->dip, &hme_dma_attr,
5162 	    DDI_DMA_DONTWAIT, 0, &hmep->hme_md_h);
5163 	if (rval != DDI_SUCCESS) {
5164 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
5165 		    "cannot allocate rmd handle - failed");
5166 		return (1);
5167 	}
5168 
5169 	rval = ddi_dma_mem_alloc(hmep->hme_md_h, size, &hmep->hme_dev_attr,
5170 	    DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
5171 	    (caddr_t *)&hmep->hme_iopbkbase, &real_len, &hmep->hme_mdm_h);
5172 	if (rval != DDI_SUCCESS) {
5173 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
5174 		    "cannot allocate trmd dma mem - failed");
5175 		ddi_dma_free_handle(&hmep->hme_md_h);
5176 		return (1);
5177 	}
5178 
5179 	hmep->hme_iopbkbase = ROUNDUP(hmep->hme_iopbkbase, hmep->pagesize);
5180 	size = (HME_RMDMAX * sizeof (struct hme_rmd)
5181 	    + HME_TMDMAX * sizeof (struct hme_tmd));
5182 
5183 	rval = ddi_dma_addr_bind_handle(hmep->hme_md_h, NULL,
5184 	    (caddr_t)hmep->hme_iopbkbase, size,
5185 	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, 0,
5186 	    &hmep->hme_md_c, &cookiec);
5187 	if (rval != DDI_DMA_MAPPED) {
5188 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
5189 		    "cannot allocate trmd dma - failed");
5190 		ddi_dma_mem_free(&hmep->hme_mdm_h);
5191 		ddi_dma_free_handle(&hmep->hme_md_h);
5192 		return (1);
5193 	}
5194 
5195 	if (cookiec != 1) {
5196 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, INIT_MSG,
5197 		    "trmds crossed page boundary - failed");
5198 		if (ddi_dma_unbind_handle(hmep->hme_md_h) == DDI_FAILURE)
5199 			return (2);
5200 		ddi_dma_mem_free(&hmep->hme_mdm_h);
5201 		ddi_dma_free_handle(&hmep->hme_md_h);
5202 		return (1);
5203 	}
5204 
5205 	hmep->hme_iopbiobase = hmep->hme_md_c.dmac_address;
5206 
5207 	a = hmep->hme_iopbkbase;
5208 	a = ROUNDUP(a, HME_HMDALIGN);
5209 	hmep->hme_rmdp = (struct hme_rmd *)a;
5210 	a += HME_RMDMAX * sizeof (struct hme_rmd);
5211 	hmep->hme_tmdp = (struct hme_tmd *)a;
5212 	/*
5213 	 * dvma_reserve() reserves DVMA space for private man
5214 	 * device driver.
5215 	 */
5216 	if ((dvma_reserve(hmep->dip, &hme_dma_limits, (HME_TMDMAX * 2),
5217 	    &hmep->hme_dvmaxh)) != DDI_SUCCESS) {
5218 		/*
5219 		 * Specifically we reserve n (HME_TMDMAX + HME_RMDMAX)
5220 		 * pagetable entries. Therefore we have 2 ptes for each
5221 		 * descriptor. Since the ethernet buffers are 1518 bytes
5222 		 * so they can at most use 2 ptes.
5223 		 * Will do a ddi_dma_addr_setup for each bufer
5224 		 */
5225 		/*
5226 		 * We will now do a dma, due to the fact that
5227 		 * dvma_reserve failied.
5228 		 */
5229 		hmep->hme_dmaxh = (ddi_dma_handle_t *)
5230 		    kmem_zalloc(((HME_TMDMAX +  HMERPENDING) *
5231 		    (sizeof (ddi_dma_handle_t))), KM_SLEEP);
5232 			hmep->hme_dmarh = hmep->hme_dmaxh + HME_TMDMAX;
5233 			hmep->hme_dvmaxh = hmep->hme_dvmarh = NULL;
5234 			hmep->dmaxh_init++;
5235 			hmep->dmarh_init++;
5236 
5237 	} else {
5238 		/*
5239 		 * Reserve dvma space for the receive side. If
5240 		 * this call fails, we have to release the resources
5241 		 * and fall back to the dma case.
5242 		 */
5243 		if ((dvma_reserve(hmep->dip, &hme_dma_limits,
5244 		    (HMERPENDING * 2), &hmep->hme_dvmarh)) != DDI_SUCCESS) {
5245 			(void) dvma_release(hmep->hme_dvmaxh);
5246 
5247 			hmep->hme_dmaxh = (ddi_dma_handle_t *)
5248 			    kmem_zalloc(((HME_TMDMAX +  HMERPENDING) *
5249 			    (sizeof (ddi_dma_handle_t))), KM_SLEEP);
5250 			hmep->hme_dmarh = hmep->hme_dmaxh + HME_TMDMAX;
5251 			hmep->hme_dvmaxh = hmep->hme_dvmarh = NULL;
5252 			hmep->dmaxh_init++;
5253 			hmep->dmarh_init++;
5254 		}
5255 	}
5256 
5257 	/*
5258 	 * Keep handy limit values for RMD, TMD, and Buffers.
5259 	 */
5260 	hmep->hme_rmdlimp = &((hmep->hme_rmdp)[HME_RMDMAX]);
5261 	hmep->hme_tmdlimp = &((hmep->hme_tmdp)[HME_TMDMAX]);
5262 
5263 	/*
5264 	 * Zero out xmit and rcv holders.
5265 	 */
5266 	bzero(hmep->hme_tmblkp, sizeof (hmep->hme_tmblkp));
5267 	bzero(hmep->hme_rmblkp, sizeof (hmep->hme_rmblkp));
5268 
5269 	return (0);
5270 }
5271 
5272 
5273 /*
5274  *	First check to see if it our device interrupting.
5275  */
5276 static uint_t
5277 hmeintr(caddr_t arg)
5278 {
5279 	struct hme	*hmep = (void *)arg;
5280 	uint32_t	hmesbits;
5281 	uint32_t	mif_status;
5282 	uint32_t	dummy_read;
5283 	uint32_t	serviced = DDI_INTR_UNCLAIMED;
5284 	uint32_t	num_reads = 0;
5285 	uint32_t	rflags;
5286 	mblk_t		*mp, *head, **tail;
5287 
5288 
5289 	head = NULL;
5290 	tail = &head;
5291 
5292 	mutex_enter(&hmep->hme_intrlock);
5293 
5294 	/*
5295 	 * The status register auto-clears on read except for
5296 	 * MIF Interrupt bit
5297 	 */
5298 	hmesbits = GET_GLOBREG(status);
5299 	CHECK_GLOBREG();
5300 
5301 	HME_DEBUG_MSG3(hmep, SEVERITY_NONE, INTR_MSG,
5302 	    "hmeintr: start:  hmep %X status = %X", hmep, hmesbits);
5303 	/*
5304 	 * Note: TINT is sometimes enabled in thr hmereclaim()
5305 	 */
5306 
5307 	/*
5308 	 * Bugid 1227832 - to handle spurious interrupts on fusion systems.
5309 	 * Claim the first interrupt after initialization
5310 	 */
5311 	if (hmep->hme_flags & HMEINITIALIZED) {
5312 		hmep->hme_flags &= ~HMEINITIALIZED;
5313 		serviced = DDI_INTR_CLAIMED;
5314 	}
5315 
5316 	if ((hmesbits & (HMEG_STATUS_INTR | HMEG_STATUS_TINT)) == 0) {
5317 						/* No interesting interrupt */
5318 		if (hmep->hme_intrstats) {
5319 			if (serviced == DDI_INTR_UNCLAIMED)
5320 				KIOIP->intrs[KSTAT_INTR_SPURIOUS]++;
5321 			else
5322 				KIOIP->intrs[KSTAT_INTR_HARD]++;
5323 		}
5324 		mutex_exit(&hmep->hme_intrlock);
5325 		return (serviced);
5326 	}
5327 
5328 	serviced = DDI_INTR_CLAIMED;
5329 
5330 	if (!(hmep->hme_flags & HMERUNNING)) {
5331 		if (hmep->hme_intrstats)
5332 			KIOIP->intrs[KSTAT_INTR_HARD]++;
5333 		mutex_exit(&hmep->hme_intrlock);
5334 		hmeuninit(hmep);
5335 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,  INTR_MSG,
5336 		    "hmeintr: hme not running");
5337 		return (serviced);
5338 	}
5339 
5340 	if (hmesbits & (HMEG_STATUS_FATAL_ERR | HMEG_STATUS_NONFATAL_ERR)) {
5341 		if (hmesbits & HMEG_STATUS_FATAL_ERR) {
5342 
5343 			HME_DEBUG_MSG2(hmep, SEVERITY_MID, INTR_MSG,
5344 			    "hmeintr: fatal error:hmesbits = %X", hmesbits);
5345 			if (hmep->hme_intrstats)
5346 				KIOIP->intrs[KSTAT_INTR_HARD]++;
5347 			hme_fatal_err(hmep, hmesbits);
5348 
5349 			HME_DEBUG_MSG2(hmep, SEVERITY_MID, INTR_MSG,
5350 			    "fatal %x: re-init MAC", hmesbits);
5351 
5352 			mutex_exit(&hmep->hme_intrlock);
5353 			(void) hmeinit(hmep);
5354 			return (serviced);
5355 		}
5356 		HME_DEBUG_MSG2(hmep, SEVERITY_MID, INTR_MSG,
5357 		    "hmeintr: non-fatal error:hmesbits = %X", hmesbits);
5358 		hme_nonfatal_err(hmep, hmesbits);
5359 	}
5360 
5361 	if (hmesbits & HMEG_STATUS_MIF_INTR) {
5362 		mif_status = (GET_MIFREG(mif_bsts) >> 16);
5363 		if (!(mif_status & PHY_BMSR_LNKSTS)) {
5364 
5365 			HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, INTR_MSG,
5366 			    "hmeintr: mif interrupt: Link Down");
5367 
5368 			if (hmep->hme_intrstats)
5369 				KIOIP->intrs[KSTAT_INTR_HARD]++;
5370 
5371 			hme_stop_mifpoll(hmep);
5372 			hmep->hme_mifpoll_flag = 1;
5373 			mutex_exit(&hmep->hme_intrlock);
5374 			hme_stop_timer(hmep);
5375 			hme_start_timer(hmep, hme_check_link, MSECOND(1));
5376 			return (serviced);
5377 		}
5378 		/*
5379 		 *
5380 		 * BugId 1261889 EscId 50699 ftp hangs @ 10 Mbps
5381 		 *
5382 		 * Here could be one cause:
5383 		 * national PHY sees jabber, goes into "Jabber function",
5384 		 * (see section 3.7.6 in PHY specs.), disables transmitter,
5385 		 * and waits for internal transmit enable to be de-asserted
5386 		 * for at least 750ms (the "unjab" time).  Also, the PHY
5387 		 * has asserted COL, the collision detect signal.
5388 		 *
5389 		 * In the meantime, the Sbus/FEPS, in never-give-up mode,
5390 		 * continually retries, backs off 16 times as per spec,
5391 		 * and restarts the transmission, so TX_EN is never
5392 		 * deasserted long enough, in particular TX_EN is turned
5393 		 * on approximately once every 4 microseconds on the
5394 		 * average.  PHY and MAC are deadlocked.
5395 		 *
5396 		 * Here is part of the fix:
5397 		 * On seeing the jabber, treat it like a hme_fatal_err
5398 		 * and reset both the Sbus/FEPS and the PHY.
5399 		 */
5400 
5401 		if (mif_status & (PHY_BMSR_JABDET)) {
5402 
5403 			HME_DEBUG_MSG1(hmep, SEVERITY_LOW, INTR_MSG,
5404 			    "jabber detected");
5405 
5406 			/* national phy only defines this at 10 Mbps */
5407 			if (hme_param_speed == 0) { /* 10 Mbps speed ? */
5408 				hmep->hme_jab++;
5409 
5410 				HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN,
5411 				    INTR_MSG, "mif interrupt: Jabber");
5412 
5413 				/* treat jabber like a fatal error */
5414 				hmep->hme_linkcheck = 0; /* force PHY reset */
5415 				mutex_exit(&hmep->hme_intrlock);
5416 				(void) hmeinit(hmep);
5417 
5418 				HME_DEBUG_MSG1(hmep, SEVERITY_LOW, INTR_MSG,
5419 				    "jabber: re-init PHY & MAC");
5420 				return (serviced);
5421 			}
5422 		}
5423 		hme_start_mifpoll(hmep);
5424 	}
5425 
5426 	if (hmesbits & (HMEG_STATUS_TX_ALL | HMEG_STATUS_TINT)) {
5427 		mutex_enter(&hmep->hme_xmitlock);
5428 
5429 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, TX_MSG,
5430 		    "hmeintr: packet transmitted");
5431 		hmereclaim(hmep);
5432 		mutex_exit(&hmep->hme_xmitlock);
5433 	}
5434 
5435 	if (hmesbits & HMEG_STATUS_RINT) {
5436 		volatile struct	hme_rmd	*rmdp;
5437 
5438 		/*
5439 		 * This dummy PIO is required to flush the SBus
5440 		 * Bridge buffers in QFE.
5441 		 */
5442 		dummy_read = GET_GLOBREG(config);
5443 #ifdef	lint
5444 		dummy_read = dummy_read;
5445 #endif
5446 
5447 		rmdp = hmep->hme_rnextp;
5448 
5449 		HME_DEBUG_MSG2(hmep, SEVERITY_NONE, INTR_MSG,
5450 		    "hmeintr: packet received: rmdp = %X", rmdp);
5451 
5452 		/*
5453 		 * Sync RMD before looking at it.
5454 		 */
5455 		HMESYNCIOPB(hmep, rmdp, sizeof (struct hme_rmd),
5456 		    DDI_DMA_SYNC_FORKERNEL);
5457 
5458 		/*
5459 		 * Loop through each RMD.
5460 		 */
5461 		while ((((rflags = GET_RMD_FLAGS(rmdp)) & HMERMD_OWN) == 0) &&
5462 		    (num_reads++ < HMERPENDING)) {
5463 
5464 			mp = hmeread(hmep, rmdp, rflags);
5465 
5466 			/*
5467 			 * Increment to next RMD.
5468 			 */
5469 			hmep->hme_rnextp = rmdp = NEXTRMD(hmep, rmdp);
5470 
5471 			if (mp != NULL) {
5472 				*tail = mp;
5473 				tail = &mp->b_next;
5474 			}
5475 
5476 			/*
5477 			 * Sync the next RMD before looking at it.
5478 			 */
5479 			HMESYNCIOPB(hmep, rmdp, sizeof (struct hme_rmd),
5480 			    DDI_DMA_SYNC_FORKERNEL);
5481 		}
5482 		CHECK_IOPB();
5483 	}
5484 
5485 	if (hmep->hme_intrstats)
5486 		KIOIP->intrs[KSTAT_INTR_HARD]++;
5487 
5488 	mutex_exit(&hmep->hme_intrlock);
5489 
5490 	if (head != NULL)
5491 		mac_rx(hmep->hme_mh, NULL, head);
5492 
5493 	return (serviced);
5494 }
5495 
5496 /*
5497  * Transmit completion reclaiming.
5498  */
5499 static void
5500 hmereclaim(struct hme *hmep)
5501 {
5502 	volatile struct	hme_tmd	*tmdp;
5503 	int	i;
5504 	int32_t	freeval;
5505 	int			nbytes;
5506 
5507 	tmdp = hmep->hme_tcurp;
5508 
5509 	/*
5510 	 * Sync TMDs before looking at them.
5511 	 */
5512 	if (hmep->hme_tnextp > hmep->hme_tcurp) {
5513 		nbytes = ((hmep->hme_tnextp - hmep->hme_tcurp)
5514 		    * sizeof (struct hme_tmd));
5515 		HMESYNCIOPB(hmep, tmdp, nbytes, DDI_DMA_SYNC_FORKERNEL);
5516 	} else {
5517 		nbytes = ((hmep->hme_tmdlimp - hmep->hme_tcurp)
5518 		    * sizeof (struct hme_tmd));
5519 		HMESYNCIOPB(hmep, tmdp, nbytes, DDI_DMA_SYNC_FORKERNEL);
5520 		nbytes = ((hmep->hme_tnextp - hmep->hme_tmdp)
5521 		    * sizeof (struct hme_tmd));
5522 		HMESYNCIOPB(hmep, hmep->hme_tmdp, nbytes,
5523 		    DDI_DMA_SYNC_FORKERNEL);
5524 	}
5525 	CHECK_IOPB();
5526 
5527 	/*
5528 	 * Loop through each TMD.
5529 	 */
5530 	while ((GET_TMD_FLAGS(tmdp) & (HMETMD_OWN)) == 0 &&
5531 	    (tmdp != hmep->hme_tnextp)) {
5532 
5533 		/*
5534 		 * count a chained packet only once.
5535 		 */
5536 		if (GET_TMD_FLAGS(tmdp) & (HMETMD_SOP)) {
5537 			hmep->hme_opackets++;
5538 		}
5539 
5540 		/*
5541 		 * MIB II
5542 		 */
5543 		hmep->hme_obytes += GET_TMD_FLAGS(tmdp) & HMETMD_BUFSIZE;
5544 
5545 		i = tmdp - hmep->hme_tmdp;
5546 
5547 		HME_DEBUG_MSG3(hmep, SEVERITY_UNKNOWN, TX_MSG,
5548 		    "reclaim: tmdp = %X index = %d", tmdp, i);
5549 		/*
5550 		 * dvma handle case.
5551 		 */
5552 		if (hmep->hme_dvmaxh != NULL)
5553 			dvma_unload(hmep->hme_dvmaxh, 2 * i,
5554 			    (uint_t)DONT_FLUSH);
5555 		/*
5556 		 * dma handle case.
5557 		 */
5558 		else if (hmep->hme_dmaxh) {
5559 			CHECK_DMA(hmep->hme_dmaxh[i]);
5560 			freeval = ddi_dma_unbind_handle(hmep->hme_dmaxh[i]);
5561 			if (freeval == DDI_FAILURE)
5562 				HME_FAULT_MSG1(hmep, SEVERITY_LOW, TX_MSG,
5563 				    "reclaim:ddi_dma_unbind_handle failure");
5564 			ddi_dma_free_handle(&hmep->hme_dmaxh[i]);
5565 			hmep->hme_dmaxh[i] = NULL;
5566 		} else HME_FAULT_MSG1(hmep, SEVERITY_HIGH, TX_MSG,
5567 		    "reclaim: expected dmaxh");
5568 
5569 		if (hmep->hme_tmblkp[i]) {
5570 			freeb(hmep->hme_tmblkp[i]);
5571 			hmep->hme_tmblkp[i] = NULL;
5572 		}
5573 
5574 		tmdp = NEXTTMD(hmep, tmdp);
5575 	}
5576 
5577 	if (tmdp != hmep->hme_tcurp) {
5578 		/*
5579 		 * we could reclaim some TMDs so turn off interrupts
5580 		 */
5581 		hmep->hme_tcurp = tmdp;
5582 		if (hmep->hme_wantw) {
5583 			PUT_GLOBREG(intmask,
5584 			    HMEG_MASK_INTR | HMEG_MASK_TINT |
5585 			    HMEG_MASK_TX_ALL);
5586 			hmep->hme_wantw = B_FALSE;
5587 			mac_tx_update(hmep->hme_mh);
5588 		}
5589 	} else {
5590 		/*
5591 		 * enable TINTS: so that even if there is no further activity
5592 		 * hmereclaim will get called
5593 		 */
5594 		if (hmep->hme_wantw)
5595 			PUT_GLOBREG(intmask,
5596 			    GET_GLOBREG(intmask) & ~HMEG_MASK_TX_ALL);
5597 	}
5598 	CHECK_GLOBREG();
5599 }
5600 
5601 /*
5602  * Handle interrupts for fatal errors
5603  * Need reinitialization of the ENET channel.
5604  */
5605 static void
5606 hme_fatal_err(struct hme *hmep, uint_t hmesbits)
5607 {
5608 
5609 	if (hmesbits & HMEG_STATUS_SLV_PAR_ERR) {
5610 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5611 		    "sbus slave parity error");
5612 		hmep->hme_slvparerr++;
5613 	}
5614 
5615 	if (hmesbits & HMEG_STATUS_SLV_ERR_ACK) {
5616 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5617 		    "sbus slave error ack");
5618 		hmep->hme_slverrack++;
5619 	}
5620 
5621 	if (hmesbits & HMEG_STATUS_TX_TAG_ERR) {
5622 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5623 		    "tx tag error");
5624 		hmep->hme_txtagerr++;
5625 		hmep->hme_oerrors++;
5626 	}
5627 
5628 	if (hmesbits & HMEG_STATUS_TX_PAR_ERR) {
5629 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5630 		    "sbus tx parity error");
5631 		hmep->hme_txparerr++;
5632 		hmep->hme_oerrors++;
5633 	}
5634 
5635 	if (hmesbits & HMEG_STATUS_TX_LATE_ERR) {
5636 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5637 		    "sbus tx late error");
5638 		hmep->hme_txlaterr++;
5639 		hmep->hme_oerrors++;
5640 	}
5641 
5642 	if (hmesbits & HMEG_STATUS_TX_ERR_ACK) {
5643 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5644 		    "sbus tx error ack");
5645 		hmep->hme_txerrack++;
5646 		hmep->hme_oerrors++;
5647 	}
5648 
5649 	if (hmesbits & HMEG_STATUS_EOP_ERR) {
5650 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5651 		    "chained packet descriptor error");
5652 		hmep->hme_eoperr++;
5653 	}
5654 
5655 	if (hmesbits & HMEG_STATUS_RX_TAG_ERR) {
5656 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5657 		    "rx tag error");
5658 		hmep->hme_rxtagerr++;
5659 		hmep->hme_ierrors++;
5660 	}
5661 
5662 	if (hmesbits & HMEG_STATUS_RX_PAR_ERR) {
5663 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5664 		    "sbus rx parity error");
5665 		hmep->hme_rxparerr++;
5666 		hmep->hme_ierrors++;
5667 	}
5668 
5669 	if (hmesbits & HMEG_STATUS_RX_LATE_ERR) {
5670 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5671 		    "sbus rx late error");
5672 		hmep->hme_rxlaterr++;
5673 		hmep->hme_ierrors++;
5674 	}
5675 
5676 	if (hmesbits & HMEG_STATUS_RX_ERR_ACK) {
5677 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, FATAL_ERR_MSG,
5678 		    "sbus rx error ack");
5679 		hmep->hme_rxerrack++;
5680 		hmep->hme_ierrors++;
5681 	}
5682 }
5683 
5684 /*
5685  * Handle interrupts regarding non-fatal errors.
5686  */
5687 static void
5688 hme_nonfatal_err(struct hme *hmep, uint_t hmesbits)
5689 {
5690 
5691 	if (hmesbits & HMEG_STATUS_RX_DROP) {
5692 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5693 		    "rx pkt dropped/no free descriptor error");
5694 		hmep->hme_missed++;
5695 		hmep->hme_ierrors++;
5696 	}
5697 
5698 	if (hmesbits & HMEG_STATUS_DEFTIMR_EXP) {
5699 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5700 		    "defer timer expired");
5701 		hmep->hme_defer_xmts++;
5702 	}
5703 
5704 	if (hmesbits & HMEG_STATUS_FSTCOLC_EXP) {
5705 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5706 		    "first collision counter expired");
5707 		hmep->hme_fstcol += 256;
5708 	}
5709 
5710 	if (hmesbits & HMEG_STATUS_LATCOLC_EXP) {
5711 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5712 		    "late collision");
5713 		hmep->hme_tlcol += 256;
5714 		hmep->hme_oerrors += 256;
5715 	}
5716 
5717 	if (hmesbits & HMEG_STATUS_EXCOLC_EXP) {
5718 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5719 		    "retry error");
5720 		hmep->hme_excol += 256;
5721 		hmep->hme_oerrors += 256;
5722 	}
5723 
5724 	if (hmesbits & HMEG_STATUS_NRMCOLC_EXP) {
5725 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5726 		    "first collision counter expired");
5727 		hmep->hme_coll += 256;
5728 	}
5729 
5730 	if (hmesbits & HMEG_STATUS_MXPKTSZ_ERR) {
5731 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG, "babble");
5732 		hmep->hme_babl++;
5733 		hmep->hme_oerrors++;
5734 	}
5735 
5736 	/*
5737 	 * This error is fatal and the board needs to
5738 	 * be reinitialized. Comments?
5739 	 */
5740 	if (hmesbits & HMEG_STATUS_TXFIFO_UNDR) {
5741 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5742 		    "tx fifo underflow");
5743 		hmep->hme_uflo++;
5744 		hmep->hme_oerrors++;
5745 	}
5746 
5747 	if (hmesbits & HMEG_STATUS_SQE_TST_ERR) {
5748 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5749 		    "sqe test error");
5750 		hmep->hme_sqe_errors++;
5751 	}
5752 
5753 	if (hmesbits & HMEG_STATUS_RCV_CNT_EXP) {
5754 		if (hmep->hme_rxcv_enable) {
5755 			HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5756 			    "code violation counter expired");
5757 			hmep->hme_cvc += 256;
5758 		}
5759 	}
5760 
5761 	if (hmesbits & HMEG_STATUS_RXFIFO_OVFL) {
5762 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5763 		    "rx fifo overflow");
5764 		hmep->hme_oflo++;
5765 		hmep->hme_ierrors++;
5766 	}
5767 
5768 	if (hmesbits & HMEG_STATUS_LEN_CNT_EXP) {
5769 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5770 		    "length error counter expired");
5771 		hmep->hme_lenerr += 256;
5772 		hmep->hme_ierrors += 256;
5773 	}
5774 
5775 	if (hmesbits & HMEG_STATUS_ALN_CNT_EXP) {
5776 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5777 		    "rx framing/alignment error");
5778 		hmep->hme_align_errors += 256;
5779 		hmep->hme_ierrors += 256;
5780 	}
5781 
5782 	if (hmesbits & HMEG_STATUS_CRC_CNT_EXP) {
5783 		HME_DEBUG_MSG1(hmep, SEVERITY_MID, NFATAL_ERR_MSG,
5784 		    "rx crc error");
5785 		hmep->hme_fcs_errors += 256;
5786 		hmep->hme_ierrors += 256;
5787 	}
5788 }
5789 
5790 static mblk_t *
5791 hmeread_dma(struct hme *hmep, volatile struct hme_rmd *rmdp, uint32_t rflags)
5792 {
5793 	long rmdi;
5794 	ulong_t	dvma_rmdi;
5795 	mblk_t	*bp, *nbp;
5796 	volatile struct	hme_rmd	*nrmdp;
5797 	t_uscalar_t type;
5798 	uint32_t len;
5799 	int32_t	syncval;
5800 	long	nrmdi;
5801 
5802 	rmdi = rmdp - hmep->hme_rmdp;
5803 	bp = hmep->hme_rmblkp[rmdi];
5804 	nrmdp = NEXTRMD(hmep, hmep->hme_rlastp);
5805 	hmep->hme_rlastp = nrmdp;
5806 	nrmdi = nrmdp - hmep->hme_rmdp;
5807 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
5808 	dvma_rmdi = HMERINDEX(rmdi);
5809 
5810 	/*
5811 	 * Check for short packet
5812 	 * and check for overflow packet also. The processing is the
5813 	 * same for both the cases - reuse the buffer. Update the Buffer
5814 	 * overflow counter.
5815 	 */
5816 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
5817 	    (len > (ETHERMAX + 4))) {
5818 		if (len < ETHERMIN)
5819 			hmep->hme_runt++;
5820 
5821 		else {
5822 			hmep->hme_buff++;
5823 			hmep->hme_toolong_errors++;
5824 		}
5825 		hmep->hme_ierrors++;
5826 		CLONE_RMD(rmdp, nrmdp);
5827 		hmep->hme_rmblkp[nrmdi] = bp;
5828 		hmep->hme_rmblkp[rmdi] = NULL;
5829 		HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
5830 		    DDI_DMA_SYNC_FORDEV);
5831 		CHECK_IOPB();
5832 		return (NULL);
5833 	}
5834 
5835 	/*
5836 	 * Sync the received buffer before looking at it.
5837 	 */
5838 
5839 	if (hmep->hme_dmarh[dvma_rmdi] == NULL) {
5840 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, RX_MSG,
5841 		    "read: null handle!");
5842 		return (NULL);
5843 	}
5844 
5845 	syncval = ddi_dma_sync(hmep->hme_dmarh[dvma_rmdi], 0,
5846 	    len + HME_FSTBYTE_OFFSET, DDI_DMA_SYNC_FORCPU);
5847 	if (syncval == DDI_FAILURE)
5848 		HME_FAULT_MSG1(hmep, SEVERITY_HIGH, RX_MSG,
5849 		    "read: ddi_dma_sync failure");
5850 	CHECK_DMA(hmep->hme_dmarh[dvma_rmdi]);
5851 
5852 	/*
5853 	 * copy the packet data and then recycle the descriptor.
5854 	 */
5855 
5856 	if ((nbp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_HI)) != NULL) {
5857 
5858 		DB_TYPE(nbp) = M_DATA;
5859 		bcopy(bp->b_rptr, nbp->b_rptr, len + HME_FSTBYTE_OFFSET);
5860 
5861 		CLONE_RMD(rmdp, nrmdp);
5862 		hmep->hme_rmblkp[nrmdi] = bp;
5863 		hmep->hme_rmblkp[rmdi] = NULL;
5864 		HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
5865 		    DDI_DMA_SYNC_FORDEV);
5866 		CHECK_IOPB();
5867 
5868 		hmep->hme_ipackets++;
5869 
5870 		bp = nbp;
5871 
5872 		/*  Add the First Byte offset to the b_rptr and copy */
5873 		bp->b_rptr += HME_FSTBYTE_OFFSET;
5874 		bp->b_wptr = bp->b_rptr + len;
5875 
5876 		/*
5877 		 * update MIB II statistics
5878 		 */
5879 		BUMP_InNUcast(hmep, bp->b_rptr);
5880 		hmep->hme_rbytes += len;
5881 
5882 		type = get_ether_type(bp->b_rptr);
5883 
5884 		/*
5885 		 * TCP partial checksum in hardware
5886 		 */
5887 		if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
5888 			uint16_t cksum = ~rflags & HMERMD_CKSUM;
5889 			uint_t end = len - sizeof (struct ether_header);
5890 			(void) hcksum_assoc(bp, NULL, NULL, 0,
5891 			    0, end, cksum, HCK_PARTIALCKSUM, 0);
5892 		}
5893 
5894 		return (bp);
5895 
5896 	} else {
5897 		CLONE_RMD(rmdp, nrmdp);
5898 		hmep->hme_rmblkp[nrmdi] = bp;
5899 		hmep->hme_rmblkp[rmdi] = NULL;
5900 		HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
5901 		    DDI_DMA_SYNC_FORDEV);
5902 		CHECK_IOPB();
5903 
5904 		hmep->hme_allocbfail++;
5905 		hmep->hme_norcvbuf++;
5906 		HME_DEBUG_MSG1(hmep, SEVERITY_UNKNOWN, RX_MSG,
5907 		    "allocb failure");
5908 
5909 		return (NULL);
5910 	}
5911 }
5912 
5913 static mblk_t *
5914 hmeread(struct hme *hmep, volatile struct hme_rmd *rmdp, uint32_t rflags)
5915 {
5916 	long    rmdi;
5917 	mblk_t  *bp, *nbp;
5918 	uint_t		dvma_rmdi, dvma_nrmdi;
5919 	volatile	struct  hme_rmd *nrmdp;
5920 	t_uscalar_t	type;
5921 	uint32_t len;
5922 	uint16_t cksum;
5923 	long    nrmdi;
5924 	ddi_dma_cookie_t	c;
5925 
5926 	if (hmep->hme_dvmaxh == NULL) {
5927 		return (hmeread_dma(hmep, rmdp, rflags));
5928 	}
5929 
5930 	rmdi = rmdp - hmep->hme_rmdp;
5931 	dvma_rmdi = HMERINDEX(rmdi);
5932 	bp = hmep->hme_rmblkp[rmdi];
5933 	nrmdp = NEXTRMD(hmep, hmep->hme_rlastp);
5934 	hmep->hme_rlastp = nrmdp;
5935 	nrmdi = nrmdp - hmep->hme_rmdp;
5936 	dvma_nrmdi = HMERINDEX(rmdi);
5937 
5938 	ASSERT(dvma_rmdi == dvma_nrmdi);
5939 
5940 	/*
5941 	 * HMERMD_OWN has been cleared by the Happymeal hardware.
5942 	 */
5943 	len = (rflags & HMERMD_BUFSIZE) >> HMERMD_BUFSIZE_SHIFT;
5944 	cksum = ~rflags & HMERMD_CKSUM;
5945 
5946 	/*
5947 	 * check for overflow packet also. The processing is the
5948 	 * same for both the cases - reuse the buffer. Update the Buffer
5949 	 * overflow counter.
5950 	 */
5951 	if ((len < ETHERMIN) || (rflags & HMERMD_OVFLOW) ||
5952 	    (len > (ETHERMAX + 4))) {
5953 		if (len < ETHERMIN)
5954 			hmep->hme_runt++;
5955 
5956 		else {
5957 			hmep->hme_buff++;
5958 			hmep->hme_toolong_errors++;
5959 		}
5960 
5961 		hmep->hme_ierrors++;
5962 		CLONE_RMD(rmdp, nrmdp);
5963 		HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
5964 		    DDI_DMA_SYNC_FORDEV);
5965 		CHECK_IOPB();
5966 		hmep->hme_rmblkp[nrmdi] = bp;
5967 		hmep->hme_rmblkp[rmdi] = NULL;
5968 		return (NULL);
5969 	}
5970 
5971 	/*
5972 	 * Copy small incoming packets to reduce memory consumption. The
5973 	 * performance loss is compensated by the reduced overhead for
5974 	 * DMA setup. The extra bytes before the actual data are copied
5975 	 * to maintain the alignment of the payload.
5976 	 */
5977 	if ((len <= hme_rx_bcopy_max) &&
5978 	    ((nbp = allocb(len + HME_FSTBYTE_OFFSET, BPRI_LO)) != NULL)) {
5979 		dvma_sync(hmep->hme_dvmarh, 2 * dvma_rmdi,
5980 		    DDI_DMA_SYNC_FORKERNEL);
5981 
5982 		bcopy(bp->b_rptr, nbp->b_wptr, len + HME_FSTBYTE_OFFSET);
5983 		nbp->b_rptr += HME_FSTBYTE_OFFSET;
5984 		nbp->b_wptr = nbp->b_rptr + len;
5985 
5986 		CLONE_RMD(rmdp, nrmdp);
5987 		HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
5988 		    DDI_DMA_SYNC_FORDEV);
5989 		CHECK_IOPB();
5990 		hmep->hme_rmblkp[nrmdi] = bp;
5991 		hmep->hme_rmblkp[rmdi] = NULL;
5992 		hmep->hme_ipackets++;
5993 
5994 		bp = nbp;
5995 	} else {
5996 		dvma_unload(hmep->hme_dvmarh, 2 * dvma_rmdi,
5997 		    DDI_DMA_SYNC_FORKERNEL);
5998 
5999 		if ((nbp = hmeallocb(HMEBUFSIZE, BPRI_LO))) {
6000 			dvma_kaddr_load(hmep->hme_dvmarh,
6001 			    (caddr_t)nbp->b_rptr, HMEBUFSIZE, 2 * dvma_nrmdi,
6002 			    &c);
6003 
6004 			PUT_RMD(nrmdp, c.dmac_address);
6005 			HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
6006 			    DDI_DMA_SYNC_FORDEV);
6007 			CHECK_IOPB();
6008 
6009 			hmep->hme_rmblkp[nrmdi] = nbp;
6010 			hmep->hme_rmblkp[rmdi] = NULL;
6011 			hmep->hme_ipackets++;
6012 
6013 			/*
6014 			 * Add the First Byte offset to the b_rptr
6015 			 */
6016 			bp->b_rptr += HME_FSTBYTE_OFFSET;
6017 			bp->b_wptr = bp->b_rptr + len;
6018 		} else {
6019 			dvma_kaddr_load(hmep->hme_dvmarh,
6020 			    (caddr_t)bp->b_rptr, HMEBUFSIZE, 2 * dvma_nrmdi,
6021 			    &c);
6022 			PUT_RMD(nrmdp, c.dmac_address);
6023 			hmep->hme_rmblkp[nrmdi] = bp;
6024 			hmep->hme_rmblkp[rmdi] = NULL;
6025 			HMESYNCIOPB(hmep, nrmdp, sizeof (struct hme_rmd),
6026 			    DDI_DMA_SYNC_FORDEV);
6027 			CHECK_IOPB();
6028 
6029 			hmep->hme_allocbfail++;
6030 			hmep->hme_norcvbuf++;
6031 			HME_DEBUG_MSG1(hmep, SEVERITY_LOW, RX_MSG,
6032 			    "allocb fail");
6033 
6034 			bp = NULL;
6035 		}
6036 	}
6037 
6038 	if (bp != NULL) {
6039 
6040 		/*
6041 		 * update MIB II statistics
6042 		 */
6043 		BUMP_InNUcast(hmep, bp->b_rptr);
6044 		hmep->hme_rbytes += len;
6045 
6046 		type = get_ether_type(bp->b_rptr);
6047 
6048 		/*
6049 		 * TCP partial checksum in hardware
6050 		 */
6051 		if (type == ETHERTYPE_IP || type == ETHERTYPE_IPV6) {
6052 			uint_t end = len - sizeof (struct ether_header);
6053 			(void) hcksum_assoc(bp, NULL, NULL, 0,
6054 			    0, end, cksum, HCK_PARTIALCKSUM, 0);
6055 		}
6056 	}
6057 	return (bp);
6058 }
6059 
6060 #ifdef  HME_DEBUG
6061 /*VARARGS*/
6062 static void
6063 hme_debug_msg(char *file, uint_t line, struct hme *hmep, uint_t severity,
6064 		msg_t type, char *fmt, ...)
6065 {
6066 	char	msg_buffer[255];
6067 	va_list	ap;
6068 
6069 #ifdef	HIGH_SEVERITY
6070 	if (severity != SEVERITY_HIGH)
6071 		return;
6072 #endif
6073 	if (hme_debug_level >= type) {
6074 		va_start(ap, fmt);
6075 		vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
6076 
6077 		cmn_err(CE_CONT, "D: %s (%d): %s\n",
6078 		    msg_string[type], line, msg_buffer);
6079 		va_end(ap);
6080 	}
6081 }
6082 #endif
6083 
6084 /*VARARGS*/
6085 /* ARGSUSED */
6086 static void
6087 hme_fault_msg(char *file, uint_t line, struct hme *hmep, uint_t severity,
6088 		msg_t type, char *fmt, ...)
6089 {
6090 	char	msg_buffer[255];
6091 	va_list	ap;
6092 
6093 	va_start(ap, fmt);
6094 	(void) vsnprintf(msg_buffer, sizeof (msg_buffer), fmt, ap);
6095 
6096 	if (hmep == NULL) {
6097 		cmn_err(CE_NOTE, "hme : %s", msg_buffer);
6098 
6099 	} else if (type == DISPLAY_MSG) {
6100 		cmn_err(CE_CONT, "?%s%d : %s\n", ddi_driver_name(hmep->dip),
6101 		    hmep->instance, msg_buffer);
6102 	} else if (severity == SEVERITY_HIGH) {
6103 		cmn_err(CE_WARN, "%s%d : %s, SEVERITY_HIGH, %s\n",
6104 		    ddi_driver_name(hmep->dip), hmep->instance,
6105 		    msg_buffer, msg_string[type]);
6106 	} else {
6107 		cmn_err(CE_CONT, "%s%d : %s\n", ddi_driver_name(hmep->dip),
6108 		    hmep->instance, msg_buffer);
6109 	}
6110 	va_end(ap);
6111 }
6112 
6113 /*
6114  * if this is the first init do not bother to save the
6115  * counters. They should be 0, but do not count on it.
6116  */
6117 static void
6118 hmesavecntrs(struct hme *hmep)
6119 {
6120 	uint32_t fecnt, aecnt, lecnt, rxcv;
6121 	uint32_t ltcnt, excnt;
6122 
6123 	/* XXX What all gets added in ierrors and oerrors? */
6124 	fecnt = GET_MACREG(fecnt);
6125 	PUT_MACREG(fecnt, 0);
6126 
6127 	aecnt = GET_MACREG(aecnt);
6128 	hmep->hme_align_errors += aecnt;
6129 	PUT_MACREG(aecnt, 0);
6130 
6131 	lecnt = GET_MACREG(lecnt);
6132 	hmep->hme_lenerr += lecnt;
6133 	PUT_MACREG(lecnt, 0);
6134 
6135 	rxcv = GET_MACREG(rxcv);
6136 #ifdef HME_CODEVIOL_BUG
6137 	/*
6138 	 * Ignore rxcv errors for Sbus/FEPS 2.1 or earlier
6139 	 */
6140 	if (!hmep->hme_rxcv_enable) {
6141 		rxcv = 0;
6142 	}
6143 #endif
6144 	hmep->hme_cvc += rxcv;
6145 	PUT_MACREG(rxcv, 0);
6146 
6147 	ltcnt = GET_MACREG(ltcnt);
6148 	hmep->hme_tlcol += ltcnt;
6149 	PUT_MACREG(ltcnt, 0);
6150 
6151 	excnt = GET_MACREG(excnt);
6152 	hmep->hme_excol += excnt;
6153 	PUT_MACREG(excnt, 0);
6154 
6155 	hmep->hme_fcs_errors += fecnt;
6156 	hmep->hme_ierrors += (fecnt + aecnt + lecnt);
6157 	hmep->hme_oerrors += (ltcnt + excnt);
6158 	hmep->hme_coll += (GET_MACREG(nccnt) + ltcnt);
6159 
6160 	PUT_MACREG(nccnt, 0);
6161 	CHECK_MACREG();
6162 }
6163 
6164 /*
6165  * ndd support functions to get/set parameters
6166  */
6167 /* Free the Named Dispatch Table by calling hme_nd_free */
6168 static void
6169 hme_param_cleanup(struct hme *hmep)
6170 {
6171 	if (hmep->hme_g_nd)
6172 		(void) hme_nd_free(&hmep->hme_g_nd);
6173 }
6174 
6175 /*
6176  * Extracts the value from the hme parameter array and prints the
6177  * parameter value. cp points to the required parameter.
6178  */
6179 /* ARGSUSED */
6180 static int
6181 hme_param_get(queue_t *q, mblk_t *mp, caddr_t cp)
6182 {
6183 	hmeparam_t *hmepa = (hmeparam_t *)cp;
6184 
6185 	(void) mi_mpprintf(mp, "%d", hmepa->hme_param_val);
6186 	return (0);
6187 }
6188 
6189 /*
6190  * Register each element of the parameter array with the
6191  * named dispatch handler. Each element is loaded using
6192  * hme_nd_load()
6193  */
6194 /* ARGSUSED */
6195 static int
6196 hme_param_register(struct hme *hmep, hmeparam_t *hmepa, int cnt)
6197 {
6198 	int i;
6199 
6200 	/* First 4 elements are read-only */
6201 	for (i = 0; i < 4; i++, hmepa++)
6202 		if (!hme_nd_load(&hmep->hme_g_nd, hmepa->hme_param_name,
6203 		    (pfi_t)hme_param_get, (pfi_t)0, (caddr_t)hmepa)) {
6204 			(void) hme_nd_free(&hmep->hme_g_nd);
6205 			return (B_FALSE);
6206 		}
6207 	/* Next 10 elements are read and write */
6208 	for (i = 0; i < 10; i++, hmepa++)
6209 		if (hmepa->hme_param_name && hmepa->hme_param_name[0]) {
6210 			if (!hme_nd_load(&hmep->hme_g_nd,
6211 			    hmepa->hme_param_name, (pfi_t)hme_param_get,
6212 			    (pfi_t)hme_param_set, (caddr_t)hmepa)) {
6213 				(void) hme_nd_free(&hmep->hme_g_nd);
6214 				return (B_FALSE);
6215 
6216 			}
6217 		}
6218 	/* next 12 elements are read-only */
6219 	for (i = 0; i < 12; i++, hmepa++)
6220 		if (!hme_nd_load(&hmep->hme_g_nd, hmepa->hme_param_name,
6221 		    (pfi_t)hme_param_get, (pfi_t)0, (caddr_t)hmepa)) {
6222 			(void) hme_nd_free(&hmep->hme_g_nd);
6223 			return (B_FALSE);
6224 		}
6225 	/* Next 3  elements are read and write */
6226 	for (i = 0; i < 3; i++, hmepa++)
6227 		if (hmepa->hme_param_name && hmepa->hme_param_name[0]) {
6228 			if (!hme_nd_load(&hmep->hme_g_nd,
6229 			    hmepa->hme_param_name, (pfi_t)hme_param_get,
6230 			    (pfi_t)hme_param_set, (caddr_t)hmepa)) {
6231 				(void) hme_nd_free(&hmep->hme_g_nd);
6232 				return (B_FALSE);
6233 			}
6234 		}
6235 
6236 	return (B_TRUE);
6237 }
6238 
6239 /*
6240  * Sets the hme parameter to the value in the hme_param_register using
6241  * hme_nd_load().
6242  */
6243 /* ARGSUSED */
6244 static int
6245 hme_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp)
6246 {
6247 	char *end;
6248 	size_t new_value;
6249 	hmeparam_t *hmepa = (hmeparam_t *)cp;
6250 
6251 	new_value = mi_strtol(value, &end, 10);
6252 	if (end == value || new_value < hmepa->hme_param_min ||
6253 	    new_value > hmepa->hme_param_max) {
6254 			return (EINVAL);
6255 	}
6256 	hmepa->hme_param_val = new_value;
6257 	return (0);
6258 
6259 }
6260 
6261 /* Free the table pointed to by 'ndp' */
6262 static void
6263 hme_nd_free(caddr_t *nd_pparam)
6264 {
6265 	ND	*nd;
6266 
6267 	if ((nd = (ND *)(*nd_pparam)) != NULL) {
6268 		if (nd->nd_tbl)
6269 			mi_free((char *)nd->nd_tbl);
6270 		mi_free((char *)nd);
6271 		*nd_pparam = NULL;
6272 	}
6273 }
6274 
6275 static int
6276 hme_nd_getset(queue_t *q, caddr_t nd_param, MBLKP mp)
6277 {
6278 	int	err;
6279 	IOCP	iocp;
6280 	MBLKP	mp1;
6281 	ND	*nd;
6282 	NDE	*nde;
6283 	char	*valp;
6284 	size_t	avail;
6285 
6286 	if (!nd_param)
6287 		return (B_FALSE);
6288 
6289 	nd = (ND *)nd_param;
6290 	iocp = (IOCP)mp->b_rptr;
6291 	if ((iocp->ioc_count == 0) || !(mp1 = mp->b_cont)) {
6292 		mp->b_datap->db_type = M_IOCACK;
6293 		iocp->ioc_count = 0;
6294 		iocp->ioc_error = EINVAL;
6295 		return (B_TRUE);
6296 	}
6297 
6298 	/*
6299 	 * NOTE - logic throughout nd_xxx assumes single data block for ioctl.
6300 	 *	However, existing code sends in some big buffers.
6301 	 */
6302 	avail = iocp->ioc_count;
6303 	if (mp1->b_cont) {
6304 		freemsg(mp1->b_cont);
6305 		mp1->b_cont = NULL;
6306 	}
6307 
6308 	mp1->b_datap->db_lim[-1] = '\0';	/* Force null termination */
6309 	valp = (char *)mp1->b_rptr;
6310 	for (nde = nd->nd_tbl; /* */; nde++) {
6311 		if (!nde->nde_name)
6312 			return (B_FALSE);
6313 		if (mi_strcmp(nde->nde_name, valp) == 0)
6314 			break;
6315 	}
6316 
6317 	err = EINVAL;
6318 	while (*valp++)
6319 		;
6320 	if (!*valp || valp >= (char *)mp1->b_wptr)
6321 		valp = NULL;
6322 	switch (iocp->ioc_cmd) {
6323 	case ND_GET:
6324 /*
6325  * (temporary) hack: "*valp" is size of user buffer for copyout. If result
6326  * of action routine is too big, free excess and return ioc_rval as buffer
6327  * size needed.  Return as many mblocks as will fit, free the rest.  For
6328  * backward compatibility, assume size of original ioctl buffer if "*valp"
6329  * bad or not given.
6330  */
6331 		if (valp)
6332 			avail = mi_strtol(valp, (char **)0, 10);
6333 		/* We overwrite the name/value with the reply data */
6334 		{
6335 			mblk_t *mp2 = mp1;
6336 
6337 			while (mp2) {
6338 				mp2->b_wptr = mp2->b_rptr;
6339 				mp2 = mp2->b_cont;
6340 			}
6341 		}
6342 		err = (*nde->nde_get_pfi)(q, mp1, nde->nde_data, iocp->ioc_cr);
6343 		if (!err) {
6344 			size_t	size_out;
6345 			ssize_t	excess;
6346 
6347 			iocp->ioc_rval = 0;
6348 
6349 			/* Tack on the null */
6350 			(void) mi_mpprintf_putc((char *)mp1, '\0');
6351 			size_out = msgdsize(mp1);
6352 			excess = size_out - avail;
6353 			if (excess > 0) {
6354 				iocp->ioc_rval = (int)size_out;
6355 				size_out -= excess;
6356 				(void) adjmsg(mp1, -(excess + 1));
6357 				(void) mi_mpprintf_putc((char *)mp1, '\0');
6358 			}
6359 			iocp->ioc_count = size_out;
6360 		}
6361 		break;
6362 
6363 	case ND_SET:
6364 		if (valp) {
6365 			if ((iocp->ioc_cr != NULL) &&
6366 			    ((err = secpolicy_net_config(iocp->ioc_cr, B_FALSE))
6367 			    == 0)) {
6368 				err = (*nde->nde_set_pfi)(q, mp1, valp,
6369 				    nde->nde_data, iocp->ioc_cr);
6370 			}
6371 			iocp->ioc_count = 0;
6372 			freemsg(mp1);
6373 			mp->b_cont = NULL;
6374 		}
6375 		break;
6376 
6377 	default:
6378 		break;
6379 	}
6380 
6381 	iocp->ioc_error = err;
6382 	mp->b_datap->db_type = M_IOCACK;
6383 	return (B_TRUE);
6384 }
6385 
6386 /*
6387  * Load 'name' into the named dispatch table pointed to by 'ndp'.
6388  * 'ndp' should be the address of a char pointer cell.  If the table
6389  * does not exist (*ndp == 0), a new table is allocated and 'ndp'
6390  * is stuffed.  If there is not enough space in the table for a new
6391  * entry, more space is allocated.
6392  */
6393 static boolean_t
6394 hme_nd_load(caddr_t *nd_pparam, char *name, pfi_t get_pfi,
6395     pfi_t set_pfi, caddr_t data)
6396 {
6397 	ND	*nd;
6398 	NDE	*nde;
6399 
6400 	if (!nd_pparam)
6401 		return (B_FALSE);
6402 
6403 	if ((nd = (ND *)(*nd_pparam)) == NULL) {
6404 		if ((nd = (ND *)mi_alloc(sizeof (ND), BPRI_MED)) == NULL)
6405 			return (B_FALSE);
6406 		bzero(nd, sizeof (ND));
6407 		*nd_pparam = (caddr_t)nd;
6408 	}
6409 
6410 	if (nd->nd_tbl) {
6411 		for (nde = nd->nd_tbl; nde->nde_name; nde++) {
6412 			if (mi_strcmp(name, nde->nde_name) == 0)
6413 				goto fill_it;
6414 		}
6415 	}
6416 
6417 	if (nd->nd_free_count <= 1) {
6418 		if ((nde = (NDE *)mi_alloc(nd->nd_size +
6419 		    NDE_ALLOC_SIZE, BPRI_MED)) == NULL)
6420 			return (B_FALSE);
6421 		bzero(nde, nd->nd_size + NDE_ALLOC_SIZE);
6422 		nd->nd_free_count += NDE_ALLOC_COUNT;
6423 		if (nd->nd_tbl) {
6424 			bcopy(nd->nd_tbl, nde, nd->nd_size);
6425 			mi_free((char *)nd->nd_tbl);
6426 		} else {
6427 			nd->nd_free_count--;
6428 			nde->nde_name = "?";
6429 			nde->nde_get_pfi = nd_get_names;
6430 			nde->nde_set_pfi = nd_set_default;
6431 		}
6432 		nde->nde_data = (caddr_t)nd;
6433 		nd->nd_tbl = nde;
6434 		nd->nd_size += NDE_ALLOC_SIZE;
6435 	}
6436 
6437 	for (nde = nd->nd_tbl; nde->nde_name; nde++)
6438 		;
6439 	nd->nd_free_count--;
6440 fill_it:
6441 	nde->nde_name = name;
6442 	nde->nde_get_pfi = get_pfi ? get_pfi : nd_get_default;
6443 	nde->nde_set_pfi = set_pfi ? set_pfi : nd_set_default;
6444 	nde->nde_data = data;
6445 	return (B_TRUE);
6446 }
6447 
6448 /*
6449  * To set up the mac address for the network interface:
6450  * The adapter card may support a local mac address which is published
6451  * in a device node property "local-mac-address". This mac address is
6452  * treated as the factory-installed mac address for DLPI interface.
6453  * If the adapter firmware has used the device for diskless boot
6454  * operation it publishes a property called "mac-address" for use by
6455  * inetboot and the device driver.
6456  * If "mac-address" is not found, the system options property
6457  * "local-mac-address" is used to select the mac-address. If this option
6458  * is set to "true", and "local-mac-address" has been found, then
6459  * local-mac-address is used; otherwise the system mac address is used
6460  * by calling the "localetheraddr()" function.
6461  */
6462 static void
6463 hme_setup_mac_address(struct hme *hmep, dev_info_t *dip)
6464 {
6465 	char	*prop;
6466 	int	prop_len = sizeof (int);
6467 
6468 	hmep->hme_addrflags = 0;
6469 
6470 	/*
6471 	 * Check if it is an adapter with its own local mac address
6472 	 * If it is present, save it as the "factory-address"
6473 	 * for this adapter.
6474 	 */
6475 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6476 	    "local-mac-address",
6477 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
6478 		if (prop_len == ETHERADDRL) {
6479 			hmep->hme_addrflags = HME_FACTADDR_PRESENT;
6480 			ether_bcopy(prop, &hmep->hme_factaddr);
6481 			HME_FAULT_MSG2(hmep, SEVERITY_NONE, DISPLAY_MSG,
6482 			    lether_addr_msg,
6483 			    ether_sprintf(&hmep->hme_factaddr));
6484 		}
6485 		kmem_free(prop, prop_len);
6486 	}
6487 
6488 	/*
6489 	 * Check if the adapter has published "mac-address" property.
6490 	 * If it is present, use it as the mac address for this device.
6491 	 */
6492 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
6493 	    "mac-address", (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
6494 		if (prop_len >= ETHERADDRL) {
6495 			ether_bcopy(prop, &hmep->hme_ouraddr);
6496 			kmem_free(prop, prop_len);
6497 			return;
6498 		}
6499 		kmem_free(prop, prop_len);
6500 	}
6501 
6502 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, "local-mac-address?",
6503 	    (caddr_t)&prop, &prop_len) == DDI_PROP_SUCCESS) {
6504 		if ((strncmp("true", prop, prop_len) == 0) &&
6505 		    (hmep->hme_addrflags & HME_FACTADDR_PRESENT)) {
6506 			hmep->hme_addrflags |= HME_FACTADDR_USE;
6507 			ether_bcopy(&hmep->hme_factaddr, &hmep->hme_ouraddr);
6508 			kmem_free(prop, prop_len);
6509 			HME_FAULT_MSG1(hmep, SEVERITY_NONE, DISPLAY_MSG,
6510 			    lmac_addr_msg);
6511 			return;
6512 		}
6513 		kmem_free(prop, prop_len);
6514 	}
6515 
6516 	/*
6517 	 * Get the system ethernet address.
6518 	 */
6519 	(void) localetheraddr((struct ether_addr *)NULL, &hmep->hme_ouraddr);
6520 }
6521 
6522 /* ARGSUSED */
6523 static void
6524 hme_check_acc_handle(char *file, uint_t line, struct hme *hmep,
6525     ddi_acc_handle_t handle)
6526 {
6527 }
6528 
6529 /* ARGSUSED */
6530 static void
6531 hme_check_dma_handle(char *file, uint_t line, struct hme *hmep,
6532     ddi_dma_handle_t handle)
6533 {
6534 }
6535 
6536 static void *
6537 hmeallocb(size_t size, uint_t pri)
6538 {
6539 	mblk_t  *mp;
6540 
6541 	if ((mp = allocb(size + 3 * HMEBURSTSIZE, pri)) == NULL) {
6542 		return (NULL);
6543 	}
6544 	mp->b_wptr = (uchar_t *)ROUNDUP2(mp->b_wptr, HMEBURSTSIZE);
6545 	mp->b_rptr = mp->b_wptr;
6546 
6547 	return (mp);
6548 }
6549