xref: /illumos-gate/usr/src/uts/common/io/sfe/sfe_util.c (revision 5c5f1371)
1 /*
2  * sfe_util.c: general ethernet mac driver framework version 2.6
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * System Header files.
41  */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h>		/* required for MBLK* */
53 #include <sys/strsun.h>		/* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/sysmacros.h>
56 #include <sys/pci.h>
57 #include <inet/common.h>
58 #include <inet/led.h>
59 #include <inet/mi.h>
60 #include <inet/nd.h>
61 #include <sys/crc32.h>
62 
63 #include <sys/note.h>
64 
65 #include "sfe_mii.h"
66 #include "sfe_util.h"
67 
68 
69 
70 extern char ident[];
71 
72 /* Debugging support */
73 #ifdef GEM_DEBUG_LEVEL
74 static int gem_debug = GEM_DEBUG_LEVEL;
75 #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
76 #else
77 #define	DPRINTF(n, args)
78 #undef ASSERT
79 #define	ASSERT(x)
80 #endif
81 
82 #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
83 
84 /*
85  * Useful macros and typedefs
86  */
87 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
88 
89 #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
90 #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
91 
92 #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
93 #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
94 
95 
96 #ifndef INT32_MAX
97 #define	INT32_MAX	0x7fffffff
98 #endif
99 
100 #define	VTAG_OFF	(ETHERADDRL*2)
101 #ifndef VTAG_SIZE
102 #define	VTAG_SIZE	4
103 #endif
104 #ifndef VTAG_TPID
105 #define	VTAG_TPID	0x8100U
106 #endif
107 
108 #define	GET_TXBUF(dp, sn)	\
109 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
110 
111 #define	TXFLAG_VTAG(flag)	\
112 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
113 
114 #define	MAXPKTBUF(dp)	\
115 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
116 
117 #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
118 #define	BOOLEAN(x)	((x) != 0)
119 
120 /*
121  * Macros to distinct chip generation.
122  */
123 
124 /*
125  * Private functions
126  */
127 static void gem_mii_start(struct gem_dev *);
128 static void gem_mii_stop(struct gem_dev *);
129 
130 /* local buffer management */
131 static void gem_nd_setup(struct gem_dev *dp);
132 static void gem_nd_cleanup(struct gem_dev *dp);
133 static int gem_alloc_memory(struct gem_dev *);
134 static void gem_free_memory(struct gem_dev *);
135 static void gem_init_rx_ring(struct gem_dev *);
136 static void gem_init_tx_ring(struct gem_dev *);
137 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
138 
139 static void gem_tx_timeout(struct gem_dev *);
140 static void gem_mii_link_watcher(struct gem_dev *dp);
141 static int gem_mac_init(struct gem_dev *dp);
142 static int gem_mac_start(struct gem_dev *dp);
143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
145 
146 static	struct ether_addr	gem_etherbroadcastaddr = {
147 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
148 };
149 
150 int gem_speed_value[] = {10, 100, 1000};
151 
152 /* ============================================================== */
153 /*
154  * Misc runtime routines
155  */
156 /* ============================================================== */
157 /*
158  * Ether CRC calculation according to 21143 data sheet
159  */
160 uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)161 gem_ether_crc_le(const uint8_t *addr, int len)
162 {
163 	uint32_t	crc;
164 
165 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
166 	return (crc);
167 }
168 
169 uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)170 gem_ether_crc_be(const uint8_t *addr, int len)
171 {
172 	int		idx;
173 	int		bit;
174 	uint_t		data;
175 	uint32_t	crc;
176 #define	CRC32_POLY_BE	0x04c11db7
177 
178 	crc = 0xffffffff;
179 	for (idx = 0; idx < len; idx++) {
180 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
181 			crc = (crc << 1)
182 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
183 		}
184 	}
185 	return (crc);
186 #undef	CRC32_POLY_BE
187 }
188 
189 int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
191 {
192 	char	propname[32];
193 
194 	(void) sprintf(propname, prop_template, dp->name);
195 
196 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
197 	    DDI_PROP_DONTPASS, propname, def_val));
198 }
199 
200 static int
gem_population(uint32_t x)201 gem_population(uint32_t x)
202 {
203 	int	i;
204 	int	cnt;
205 
206 	cnt = 0;
207 	for (i = 0; i < 32; i++) {
208 		if (x & (1 << i)) {
209 			cnt++;
210 		}
211 	}
212 	return (cnt);
213 }
214 
215 #ifdef GEM_DEBUG_LEVEL
216 #ifdef GEM_DEBUG_VLAN
217 static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
219     boolean_t check_cksum)
220 {
221 	char	msg[180];
222 	uint8_t	buf[18+20+20];
223 	uint8_t	*p;
224 	size_t	offset;
225 	uint_t	ethertype;
226 	uint_t	proto;
227 	uint_t	ipproto = 0;
228 	uint_t	iplen;
229 	uint_t	iphlen;
230 	uint_t	tcplen;
231 	uint_t	udplen;
232 	uint_t	cksum;
233 	int	rest;
234 	int	len;
235 	char	*bp;
236 	mblk_t	*tp;
237 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
238 
239 	msg[0] = 0;
240 	bp = msg;
241 
242 	rest = sizeof (buf);
243 	offset = 0;
244 	for (tp = mp; tp; tp = tp->b_cont) {
245 		len = tp->b_wptr - tp->b_rptr;
246 		len = min(rest, len);
247 		bcopy(tp->b_rptr, &buf[offset], len);
248 		rest -= len;
249 		offset += len;
250 		if (rest == 0) {
251 			break;
252 		}
253 	}
254 
255 	offset = 0;
256 	p = &buf[offset];
257 
258 	/* ethernet address */
259 	sprintf(bp,
260 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
261 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
262 	    p[6], p[7], p[8], p[9], p[10], p[11],
263 	    p[0], p[1], p[2], p[3], p[4], p[5]);
264 	bp = &msg[strlen(msg)];
265 
266 	/* vlag tag and etherrtype */
267 	ethertype = GET_ETHERTYPE(p);
268 	if (ethertype == VTAG_TPID) {
269 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
270 		bp = &msg[strlen(msg)];
271 
272 		offset += VTAG_SIZE;
273 		p = &buf[offset];
274 		ethertype = GET_ETHERTYPE(p);
275 	}
276 	sprintf(bp, " type:%04x", ethertype);
277 	bp = &msg[strlen(msg)];
278 
279 	/* ethernet packet length */
280 	sprintf(bp, " mblklen:%d", msgdsize(mp));
281 	bp = &msg[strlen(msg)];
282 	if (mp->b_cont) {
283 		sprintf(bp, "(");
284 		bp = &msg[strlen(msg)];
285 		for (tp = mp; tp; tp = tp->b_cont) {
286 			if (tp == mp) {
287 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
288 			} else {
289 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
290 			}
291 			bp = &msg[strlen(msg)];
292 		}
293 		sprintf(bp, ")");
294 		bp = &msg[strlen(msg)];
295 	}
296 
297 	if (ethertype != ETHERTYPE_IP) {
298 		goto x;
299 	}
300 
301 	/* ip address */
302 	offset += sizeof (struct ether_header);
303 	p = &buf[offset];
304 	ipproto = p[9];
305 	iplen = GET_NET16(&p[2]);
306 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
307 	    p[12], p[13], p[14], p[15],
308 	    p[16], p[17], p[18], p[19],
309 	    ipproto, iplen);
310 	bp = (void *)&msg[strlen(msg)];
311 
312 	iphlen = (p[0] & 0xf) * 4;
313 
314 	/* cksum for psuedo header */
315 	cksum = *(uint16_t *)&p[12];
316 	cksum += *(uint16_t *)&p[14];
317 	cksum += *(uint16_t *)&p[16];
318 	cksum += *(uint16_t *)&p[18];
319 	cksum += BE_16(ipproto);
320 
321 	/* tcp or udp protocol header */
322 	offset += iphlen;
323 	p = &buf[offset];
324 	if (ipproto == IPPROTO_TCP) {
325 		tcplen = iplen - iphlen;
326 		sprintf(bp, ", tcp: len:%d cksum:%x",
327 		    tcplen, GET_NET16(&p[16]));
328 		bp = (void *)&msg[strlen(msg)];
329 
330 		if (check_cksum) {
331 			cksum += BE_16(tcplen);
332 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
333 			sprintf(bp, " (%s)",
334 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
335 			bp = (void *)&msg[strlen(msg)];
336 		}
337 	} else if (ipproto == IPPROTO_UDP) {
338 		udplen = GET_NET16(&p[4]);
339 		sprintf(bp, ", udp: len:%d cksum:%x",
340 		    udplen, GET_NET16(&p[6]));
341 		bp = (void *)&msg[strlen(msg)];
342 
343 		if (GET_NET16(&p[6]) && check_cksum) {
344 			cksum += *(uint16_t *)&p[4];
345 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
346 			sprintf(bp, " (%s)",
347 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
348 			bp = (void *)&msg[strlen(msg)];
349 		}
350 	}
351 x:
352 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
353 }
354 #endif /* GEM_DEBUG_VLAN */
355 #endif /* GEM_DEBUG_LEVEL */
356 
357 /* ============================================================== */
358 /*
359  * IO cache flush
360  */
361 /* ============================================================== */
362 __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
364 {
365 	int	n;
366 	int	m;
367 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
368 
369 	/* sync active descriptors */
370 	if (rx_desc_unit_shift < 0 || nslot == 0) {
371 		/* no rx descriptor ring */
372 		return;
373 	}
374 
375 	n = dp->gc.gc_rx_ring_size - head;
376 	if ((m = nslot - n) > 0) {
377 		(void) ddi_dma_sync(dp->desc_dma_handle,
378 		    (off_t)0,
379 		    (size_t)(m << rx_desc_unit_shift),
380 		    how);
381 		nslot = n;
382 	}
383 
384 	(void) ddi_dma_sync(dp->desc_dma_handle,
385 	    (off_t)(head << rx_desc_unit_shift),
386 	    (size_t)(nslot << rx_desc_unit_shift),
387 	    how);
388 }
389 
390 __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
392 {
393 	int	n;
394 	int	m;
395 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
396 
397 	/* sync active descriptors */
398 	if (tx_desc_unit_shift < 0 || nslot == 0) {
399 		/* no tx descriptor ring */
400 		return;
401 	}
402 
403 	n = dp->gc.gc_tx_ring_size - head;
404 	if ((m = nslot - n) > 0) {
405 		(void) ddi_dma_sync(dp->desc_dma_handle,
406 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
407 		    (size_t)(m << tx_desc_unit_shift),
408 		    how);
409 		nslot = n;
410 	}
411 
412 	(void) ddi_dma_sync(dp->desc_dma_handle,
413 	    (off_t)((head << tx_desc_unit_shift)
414 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
415 	    (size_t)(nslot << tx_desc_unit_shift),
416 	    how);
417 }
418 
419 static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
421 {
422 	gem_rx_desc_dma_sync(dp,
423 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
424 	    DDI_DMA_SYNC_FORDEV);
425 }
426 
427 /* ============================================================== */
428 /*
429  * Buffer management
430  */
431 /* ============================================================== */
432 static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
434 {
435 	cmn_err(level,
436 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
437 	    "tx_softq: %d[%d] %d[%d] (+%d), "
438 	    "tx_free: %d[%d] %d[%d] (+%d), "
439 	    "tx_desc: %d[%d] %d[%d] (+%d), "
440 	    "intr: %d[%d] (+%d), ",
441 	    dp->name, title,
442 	    dp->tx_active_head,
443 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
444 	    dp->tx_active_tail,
445 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
446 	    dp->tx_active_tail - dp->tx_active_head,
447 	    dp->tx_softq_head,
448 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
449 	    dp->tx_softq_tail,
450 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
451 	    dp->tx_softq_tail - dp->tx_softq_head,
452 	    dp->tx_free_head,
453 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
454 	    dp->tx_free_tail,
455 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
456 	    dp->tx_free_tail - dp->tx_free_head,
457 	    dp->tx_desc_head,
458 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
459 	    dp->tx_desc_tail,
460 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
461 	    dp->tx_desc_tail - dp->tx_desc_head,
462 	    dp->tx_desc_intr,
463 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
464 	    dp->tx_desc_intr - dp->tx_desc_head);
465 }
466 
467 static void
gem_free_rxbuf(struct rxbuf * rbp)468 gem_free_rxbuf(struct rxbuf *rbp)
469 {
470 	struct gem_dev	*dp;
471 
472 	dp = rbp->rxb_devp;
473 	ASSERT(mutex_owned(&dp->intrlock));
474 	rbp->rxb_next = dp->rx_buf_freelist;
475 	dp->rx_buf_freelist = rbp;
476 	dp->rx_buf_freecnt++;
477 }
478 
479 /*
480  * gem_get_rxbuf: supply a receive buffer which have been mapped into
481  * DMA space.
482  */
483 struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)484 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
485 {
486 	struct rxbuf		*rbp;
487 	uint_t			count = 0;
488 	int			i;
489 	int			err;
490 
491 	ASSERT(mutex_owned(&dp->intrlock));
492 
493 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
494 	    dp->rx_buf_freecnt));
495 	/*
496 	 * Get rx buffer management structure
497 	 */
498 	rbp = dp->rx_buf_freelist;
499 	if (rbp) {
500 		/* get one from the recycle list */
501 		ASSERT(dp->rx_buf_freecnt > 0);
502 
503 		dp->rx_buf_freelist = rbp->rxb_next;
504 		dp->rx_buf_freecnt--;
505 		rbp->rxb_next = NULL;
506 		return (rbp);
507 	}
508 
509 	/*
510 	 * Allocate a rx buffer management structure
511 	 */
512 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
513 	if (rbp == NULL) {
514 		/* no memory */
515 		return (NULL);
516 	}
517 
518 	/*
519 	 * Prepare a back pointer to the device structure which will be
520 	 * refered on freeing the buffer later.
521 	 */
522 	rbp->rxb_devp = dp;
523 
524 	/* allocate a dma handle for rx data buffer */
525 	if ((err = ddi_dma_alloc_handle(dp->dip,
526 	    &dp->gc.gc_dma_attr_rxbuf,
527 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
528 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
529 
530 		cmn_err(CE_WARN,
531 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
532 		    dp->name, __func__, err);
533 
534 		kmem_free(rbp, sizeof (struct rxbuf));
535 		return (NULL);
536 	}
537 
538 	/* allocate a bounce buffer for rx */
539 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
540 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
541 	    &dp->gc.gc_buf_attr,
542 		/*
543 		 * if the nic requires a header at the top of receive buffers,
544 		 * it may access the rx buffer randomly.
545 		 */
546 	    (dp->gc.gc_rx_header_len > 0)
547 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
548 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
549 	    NULL,
550 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
551 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
552 
553 		cmn_err(CE_WARN,
554 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
555 		    dp->name, __func__, err);
556 
557 		ddi_dma_free_handle(&rbp->rxb_dh);
558 		kmem_free(rbp, sizeof (struct rxbuf));
559 		return (NULL);
560 	}
561 
562 	/* Mapin the bounce buffer into the DMA space */
563 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
564 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
565 	    ((dp->gc.gc_rx_header_len > 0)
566 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
567 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
568 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
569 	    NULL,
570 	    rbp->rxb_dmacookie,
571 	    &count)) != DDI_DMA_MAPPED) {
572 
573 		ASSERT(err != DDI_DMA_INUSE);
574 		DPRINTF(0, (CE_WARN,
575 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
576 		    dp->name, __func__, err));
577 
578 		/*
579 		 * we failed to allocate a dma resource
580 		 * for the rx bounce buffer.
581 		 */
582 		ddi_dma_mem_free(&rbp->rxb_bah);
583 		ddi_dma_free_handle(&rbp->rxb_dh);
584 		kmem_free(rbp, sizeof (struct rxbuf));
585 		return (NULL);
586 	}
587 
588 	/* correct the rest of the DMA mapping */
589 	for (i = 1; i < count; i++) {
590 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
591 	}
592 	rbp->rxb_nfrags = count;
593 
594 	/* Now we successfully prepared an rx buffer */
595 	dp->rx_buf_allocated++;
596 
597 	return (rbp);
598 }
599 
600 /* ============================================================== */
601 /*
602  * memory resource management
603  */
604 /* ============================================================== */
605 static int
gem_alloc_memory(struct gem_dev * dp)606 gem_alloc_memory(struct gem_dev *dp)
607 {
608 	caddr_t			ring;
609 	caddr_t			buf;
610 	size_t			req_size;
611 	size_t			ring_len;
612 	size_t			buf_len;
613 	ddi_dma_cookie_t	ring_cookie;
614 	ddi_dma_cookie_t	buf_cookie;
615 	uint_t			count;
616 	int			i;
617 	int			err;
618 	struct txbuf		*tbp;
619 	int			tx_buf_len;
620 	ddi_dma_attr_t		dma_attr_txbounce;
621 
622 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
623 
624 	dp->desc_dma_handle = NULL;
625 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
626 
627 	if (req_size > 0) {
628 		/*
629 		 * Alloc RX/TX descriptors and a io area.
630 		 */
631 		if ((err = ddi_dma_alloc_handle(dp->dip,
632 		    &dp->gc.gc_dma_attr_desc,
633 		    DDI_DMA_SLEEP, NULL,
634 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
635 			cmn_err(CE_WARN,
636 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
637 			    dp->name, __func__, err);
638 			return (ENOMEM);
639 		}
640 
641 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
642 		    req_size, &dp->gc.gc_desc_attr,
643 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
644 		    &ring, &ring_len,
645 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
646 			cmn_err(CE_WARN,
647 			    "!%s: %s: ddi_dma_mem_alloc failed: "
648 			    "ret %d, request size: %d",
649 			    dp->name, __func__, err, (int)req_size);
650 			ddi_dma_free_handle(&dp->desc_dma_handle);
651 			return (ENOMEM);
652 		}
653 
654 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
655 		    NULL, ring, ring_len,
656 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
657 		    DDI_DMA_SLEEP, NULL,
658 		    &ring_cookie, &count)) != DDI_SUCCESS) {
659 			ASSERT(err != DDI_DMA_INUSE);
660 			cmn_err(CE_WARN,
661 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
662 			    dp->name, __func__, err);
663 			ddi_dma_mem_free(&dp->desc_acc_handle);
664 			ddi_dma_free_handle(&dp->desc_dma_handle);
665 			return (ENOMEM);
666 		}
667 		ASSERT(count == 1);
668 
669 		/* set base of rx descriptor ring */
670 		dp->rx_ring = ring;
671 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
672 
673 		/* set base of tx descriptor ring */
674 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
675 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
676 
677 		/* set base of io area */
678 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
679 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
680 	}
681 
682 	/*
683 	 * Prepare DMA resources for tx packets
684 	 */
685 	ASSERT(dp->gc.gc_tx_buf_size > 0);
686 
687 	/* Special dma attribute for tx bounce buffers */
688 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
689 	dma_attr_txbounce.dma_attr_sgllen = 1;
690 	dma_attr_txbounce.dma_attr_align =
691 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
692 
693 	/* Size for tx bounce buffers must be max tx packet size. */
694 	tx_buf_len = MAXPKTBUF(dp);
695 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
696 
697 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
698 
699 	for (i = 0, tbp = dp->tx_buf;
700 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
701 
702 		/* setup bounce buffers for tx packets */
703 		if ((err = ddi_dma_alloc_handle(dp->dip,
704 		    &dma_attr_txbounce,
705 		    DDI_DMA_SLEEP, NULL,
706 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
707 
708 			cmn_err(CE_WARN,
709 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
710 			    " err=%d, i=%d",
711 			    dp->name, __func__, err, i);
712 			goto err_alloc_dh;
713 		}
714 
715 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
716 		    tx_buf_len,
717 		    &dp->gc.gc_buf_attr,
718 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
719 		    &buf, &buf_len,
720 		    &tbp->txb_bah)) != DDI_SUCCESS) {
721 			cmn_err(CE_WARN,
722 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
723 			    "ret %d, request size %d",
724 			    dp->name, __func__, err, tx_buf_len);
725 			ddi_dma_free_handle(&tbp->txb_bdh);
726 			goto err_alloc_dh;
727 		}
728 
729 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
730 		    NULL, buf, buf_len,
731 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
732 		    DDI_DMA_SLEEP, NULL,
733 		    &buf_cookie, &count)) != DDI_SUCCESS) {
734 				ASSERT(err != DDI_DMA_INUSE);
735 				cmn_err(CE_WARN,
736 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
737 				    dp->name, __func__, err);
738 				ddi_dma_mem_free(&tbp->txb_bah);
739 				ddi_dma_free_handle(&tbp->txb_bdh);
740 				goto err_alloc_dh;
741 		}
742 		ASSERT(count == 1);
743 		tbp->txb_buf = buf;
744 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
745 	}
746 
747 	return (0);
748 
749 err_alloc_dh:
750 	if (dp->gc.gc_tx_buf_size > 0) {
751 		while (i-- > 0) {
752 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
753 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
754 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
755 		}
756 	}
757 
758 	if (dp->desc_dma_handle) {
759 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
760 		ddi_dma_mem_free(&dp->desc_acc_handle);
761 		ddi_dma_free_handle(&dp->desc_dma_handle);
762 		dp->desc_dma_handle = NULL;
763 	}
764 
765 	return (ENOMEM);
766 }
767 
768 static void
gem_free_memory(struct gem_dev * dp)769 gem_free_memory(struct gem_dev *dp)
770 {
771 	int		i;
772 	struct rxbuf	*rbp;
773 	struct txbuf	*tbp;
774 
775 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
776 
777 	/* Free TX/RX descriptors and tx padding buffer */
778 	if (dp->desc_dma_handle) {
779 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
780 		ddi_dma_mem_free(&dp->desc_acc_handle);
781 		ddi_dma_free_handle(&dp->desc_dma_handle);
782 		dp->desc_dma_handle = NULL;
783 	}
784 
785 	/* Free dma handles for Tx */
786 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
787 		/* Free bounce buffer associated to each txbuf */
788 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
789 		ddi_dma_mem_free(&tbp->txb_bah);
790 		ddi_dma_free_handle(&tbp->txb_bdh);
791 	}
792 
793 	/* Free rx buffer */
794 	while ((rbp = dp->rx_buf_freelist) != NULL) {
795 
796 		ASSERT(dp->rx_buf_freecnt > 0);
797 
798 		dp->rx_buf_freelist = rbp->rxb_next;
799 		dp->rx_buf_freecnt--;
800 
801 		/* release DMA mapping */
802 		ASSERT(rbp->rxb_dh != NULL);
803 
804 		/* free dma handles for rx bbuf */
805 		/* it has dma mapping always */
806 		ASSERT(rbp->rxb_nfrags > 0);
807 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
808 
809 		/* free the associated bounce buffer and dma handle */
810 		ASSERT(rbp->rxb_bah != NULL);
811 		ddi_dma_mem_free(&rbp->rxb_bah);
812 		/* free the associated dma handle */
813 		ddi_dma_free_handle(&rbp->rxb_dh);
814 
815 		/* free the base memory of rx buffer management */
816 		kmem_free(rbp, sizeof (struct rxbuf));
817 	}
818 }
819 
820 /* ============================================================== */
821 /*
822  * Rx/Tx descriptor slot management
823  */
824 /* ============================================================== */
825 /*
826  * Initialize an empty rx ring.
827  */
828 static void
gem_init_rx_ring(struct gem_dev * dp)829 gem_init_rx_ring(struct gem_dev *dp)
830 {
831 	int		i;
832 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
833 
834 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
835 	    dp->name, __func__,
836 	    rx_ring_size, dp->gc.gc_rx_buf_max));
837 
838 	/* make a physical chain of rx descriptors */
839 	for (i = 0; i < rx_ring_size; i++) {
840 		(*dp->gc.gc_rx_desc_init)(dp, i);
841 	}
842 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
843 
844 	dp->rx_active_head = (seqnum_t)0;
845 	dp->rx_active_tail = (seqnum_t)0;
846 
847 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
848 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
849 }
850 
851 /*
852  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
853  */
854 static void
gem_prepare_rx_buf(struct gem_dev * dp)855 gem_prepare_rx_buf(struct gem_dev *dp)
856 {
857 	int		i;
858 	int		nrbuf;
859 	struct rxbuf	*rbp;
860 
861 	ASSERT(mutex_owned(&dp->intrlock));
862 
863 	/* Now we have no active buffers in rx ring */
864 
865 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
866 	for (i = 0; i < nrbuf; i++) {
867 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
868 			break;
869 		}
870 		gem_append_rxbuf(dp, rbp);
871 	}
872 
873 	gem_rx_desc_dma_sync(dp,
874 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
875 }
876 
877 /*
878  * Reclaim active rx buffers in rx buffer ring.
879  */
880 static void
gem_clean_rx_buf(struct gem_dev * dp)881 gem_clean_rx_buf(struct gem_dev *dp)
882 {
883 	int		i;
884 	struct rxbuf	*rbp;
885 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
886 #ifdef GEM_DEBUG_LEVEL
887 	int		total;
888 #endif
889 	ASSERT(mutex_owned(&dp->intrlock));
890 
891 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
892 	    dp->name, __func__, dp->rx_buf_freecnt));
893 	/*
894 	 * clean up HW descriptors
895 	 */
896 	for (i = 0; i < rx_ring_size; i++) {
897 		(*dp->gc.gc_rx_desc_clean)(dp, i);
898 	}
899 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
900 
901 #ifdef GEM_DEBUG_LEVEL
902 	total = 0;
903 #endif
904 	/*
905 	 * Reclaim allocated rx buffers
906 	 */
907 	while ((rbp = dp->rx_buf_head) != NULL) {
908 #ifdef GEM_DEBUG_LEVEL
909 		total++;
910 #endif
911 		/* remove the first one from rx buffer list */
912 		dp->rx_buf_head = rbp->rxb_next;
913 
914 		/* recycle the rxbuf */
915 		gem_free_rxbuf(rbp);
916 	}
917 	dp->rx_buf_tail = (struct rxbuf *)NULL;
918 
919 	DPRINTF(2, (CE_CONT,
920 	    "!%s: %s: %d buffers freeed, total: %d free",
921 	    dp->name, __func__, total, dp->rx_buf_freecnt));
922 }
923 
924 /*
925  * Initialize an empty transmit buffer/descriptor ring
926  */
927 static void
gem_init_tx_ring(struct gem_dev * dp)928 gem_init_tx_ring(struct gem_dev *dp)
929 {
930 	int		i;
931 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
932 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
933 
934 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
935 	    dp->name, __func__,
936 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
937 
938 	ASSERT(!dp->mac_active);
939 
940 	/* initialize active list and free list */
941 	dp->tx_slots_base =
942 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
943 	dp->tx_softq_tail -= dp->tx_softq_head;
944 	dp->tx_softq_head = (seqnum_t)0;
945 
946 	dp->tx_active_head = dp->tx_softq_head;
947 	dp->tx_active_tail = dp->tx_softq_head;
948 
949 	dp->tx_free_head   = dp->tx_softq_tail;
950 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
951 
952 	dp->tx_desc_head = (seqnum_t)0;
953 	dp->tx_desc_tail = (seqnum_t)0;
954 	dp->tx_desc_intr = (seqnum_t)0;
955 
956 	for (i = 0; i < tx_ring_size; i++) {
957 		(*dp->gc.gc_tx_desc_init)(dp, i);
958 	}
959 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
960 }
961 
962 __INLINE__
963 static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)964 gem_txbuf_free_dma_resources(struct txbuf *tbp)
965 {
966 	if (tbp->txb_mp) {
967 		freemsg(tbp->txb_mp);
968 		tbp->txb_mp = NULL;
969 	}
970 	tbp->txb_nfrags = 0;
971 	tbp->txb_flag = 0;
972 }
973 #pragma inline(gem_txbuf_free_dma_resources)
974 
975 /*
976  * reclaim active tx buffers and reset positions in tx rings.
977  */
978 static void
gem_clean_tx_buf(struct gem_dev * dp)979 gem_clean_tx_buf(struct gem_dev *dp)
980 {
981 	int		i;
982 	seqnum_t	head;
983 	seqnum_t	tail;
984 	seqnum_t	sn;
985 	struct txbuf	*tbp;
986 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
987 #ifdef GEM_DEBUG_LEVEL
988 	int		err;
989 #endif
990 
991 	ASSERT(!dp->mac_active);
992 	ASSERT(dp->tx_busy == 0);
993 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
994 
995 	/*
996 	 * clean up all HW descriptors
997 	 */
998 	for (i = 0; i < tx_ring_size; i++) {
999 		(*dp->gc.gc_tx_desc_clean)(dp, i);
1000 	}
1001 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1002 
1003 	/* dequeue all active and loaded buffers */
1004 	head = dp->tx_active_head;
1005 	tail = dp->tx_softq_tail;
1006 
1007 	ASSERT(dp->tx_free_head - head >= 0);
1008 	tbp = GET_TXBUF(dp, head);
1009 	for (sn = head; sn != tail; sn++) {
1010 		gem_txbuf_free_dma_resources(tbp);
1011 		ASSERT(tbp->txb_mp == NULL);
1012 		dp->stats.errxmt++;
1013 		tbp = tbp->txb_next;
1014 	}
1015 
1016 #ifdef GEM_DEBUG_LEVEL
1017 	/* ensure no dma resources for tx are not in use now */
1018 	err = 0;
1019 	while (sn != head + dp->gc.gc_tx_buf_size) {
1020 		if (tbp->txb_mp || tbp->txb_nfrags) {
1021 			DPRINTF(0, (CE_CONT,
1022 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1023 			    dp->name, __func__,
1024 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1025 			    tbp->txb_mp, tbp->txb_nfrags));
1026 			err = 1;
1027 		}
1028 		sn++;
1029 		tbp = tbp->txb_next;
1030 	}
1031 
1032 	if (err) {
1033 		gem_dump_txbuf(dp, CE_WARN,
1034 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1035 	}
1036 #endif
1037 	/* recycle buffers, now no active tx buffers in the ring */
1038 	dp->tx_free_tail += tail - head;
1039 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1040 
1041 	/* fix positions in tx buffer rings */
1042 	dp->tx_active_head = dp->tx_free_head;
1043 	dp->tx_active_tail = dp->tx_free_head;
1044 	dp->tx_softq_head  = dp->tx_free_head;
1045 	dp->tx_softq_tail  = dp->tx_free_head;
1046 }
1047 
1048 /*
1049  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1050  */
1051 __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1052 gem_reclaim_txbuf(struct gem_dev *dp)
1053 {
1054 	struct txbuf	*tbp;
1055 	uint_t		txstat;
1056 	int		err = GEM_SUCCESS;
1057 	seqnum_t	head;
1058 	seqnum_t	tail;
1059 	seqnum_t	sn;
1060 	seqnum_t	desc_head;
1061 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1062 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1063 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1064 	clock_t		now;
1065 
1066 	now = ddi_get_lbolt();
1067 	if (now == (clock_t)0) {
1068 		/* make non-zero timestamp */
1069 		now--;
1070 	}
1071 
1072 	mutex_enter(&dp->xmitlock);
1073 
1074 	head = dp->tx_active_head;
1075 	tail = dp->tx_active_tail;
1076 
1077 #if GEM_DEBUG_LEVEL > 2
1078 	if (head != tail) {
1079 		cmn_err(CE_CONT, "!%s: %s: "
1080 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1081 		    dp->name, __func__,
1082 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1083 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1084 	}
1085 #endif
1086 #ifdef DEBUG
1087 	if (dp->tx_reclaim_busy == 0) {
1088 		/* check tx buffer management consistency */
1089 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1090 		    == dp->gc.gc_tx_buf_limit);
1091 		/* EMPTY */
1092 	}
1093 #endif
1094 	dp->tx_reclaim_busy++;
1095 
1096 	/* sync all active HW descriptors */
1097 	gem_tx_desc_dma_sync(dp,
1098 	    SLOT(dp->tx_desc_head, tx_ring_size),
1099 	    dp->tx_desc_tail - dp->tx_desc_head,
1100 	    DDI_DMA_SYNC_FORKERNEL);
1101 
1102 	tbp = GET_TXBUF(dp, head);
1103 	desc_head = dp->tx_desc_head;
1104 	for (sn = head; sn != tail;
1105 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1106 		int	ndescs;
1107 
1108 		ASSERT(tbp->txb_desc == desc_head);
1109 
1110 		ndescs = tbp->txb_ndescs;
1111 		if (ndescs == 0) {
1112 			/* skip errored descriptors */
1113 			continue;
1114 		}
1115 		txstat = (*tx_desc_stat)(dp,
1116 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1117 
1118 		if (txstat == 0) {
1119 			/* not transmitted yet */
1120 			break;
1121 		}
1122 
1123 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1124 			dp->tx_blocked = now;
1125 		}
1126 
1127 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1128 
1129 		if (txstat & GEM_TX_ERR) {
1130 			err = GEM_FAILURE;
1131 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1132 			    dp->name, sn, SLOT(sn, tx_ring_size));
1133 		}
1134 #if GEM_DEBUG_LEVEL > 4
1135 		if (now - tbp->txb_stime >= 50) {
1136 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1137 			    dp->name, (now - tbp->txb_stime)*10);
1138 		}
1139 #endif
1140 		/* free transmitted descriptors */
1141 		desc_head += ndescs;
1142 	}
1143 
1144 	if (dp->tx_desc_head != desc_head) {
1145 		/* we have reclaimed one or more tx buffers */
1146 		dp->tx_desc_head = desc_head;
1147 
1148 		/* If we passed the next interrupt position, update it */
1149 		if (desc_head - dp->tx_desc_intr > 0) {
1150 			dp->tx_desc_intr = desc_head;
1151 		}
1152 	}
1153 	mutex_exit(&dp->xmitlock);
1154 
1155 	/* free dma mapping resources associated with transmitted tx buffers */
1156 	tbp = GET_TXBUF(dp, head);
1157 	tail = sn;
1158 #if GEM_DEBUG_LEVEL > 2
1159 	if (head != tail) {
1160 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1161 		    __func__,
1162 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1163 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1164 	}
1165 #endif
1166 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1167 		gem_txbuf_free_dma_resources(tbp);
1168 	}
1169 
1170 	/* recycle the tx buffers */
1171 	mutex_enter(&dp->xmitlock);
1172 	if (--dp->tx_reclaim_busy == 0) {
1173 		/* we are the last thread who can update free tail */
1174 #if GEM_DEBUG_LEVEL > 4
1175 		/* check all resouces have been deallocated */
1176 		sn = dp->tx_free_tail;
1177 		tbp = GET_TXBUF(dp, new_tail);
1178 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1179 			if (tbp->txb_nfrags) {
1180 				/* in use */
1181 				break;
1182 			}
1183 			ASSERT(tbp->txb_mp == NULL);
1184 			tbp = tbp->txb_next;
1185 			sn++;
1186 		}
1187 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1188 #endif
1189 		dp->tx_free_tail =
1190 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1191 	}
1192 	if (!dp->mac_active) {
1193 		/* someone may be waiting for me. */
1194 		cv_broadcast(&dp->tx_drain_cv);
1195 	}
1196 #if GEM_DEBUG_LEVEL > 2
1197 	cmn_err(CE_CONT, "!%s: %s: called, "
1198 	    "free_head:%d free_tail:%d(+%d) added:%d",
1199 	    dp->name, __func__,
1200 	    dp->tx_free_head, dp->tx_free_tail,
1201 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1202 #endif
1203 	mutex_exit(&dp->xmitlock);
1204 
1205 	return (err);
1206 }
1207 #pragma inline(gem_reclaim_txbuf)
1208 
1209 
1210 /*
1211  * Make tx descriptors in out-of-order manner
1212  */
1213 static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1214 gem_tx_load_descs_oo(struct gem_dev *dp,
1215 	seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1216 {
1217 	seqnum_t	sn;
1218 	struct txbuf	*tbp;
1219 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1220 	int	(*tx_desc_write)
1221 	    (struct gem_dev *dp, int slot,
1222 	    ddi_dma_cookie_t *dmacookie,
1223 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1224 	clock_t	now = ddi_get_lbolt();
1225 
1226 	sn = start_slot;
1227 	tbp = GET_TXBUF(dp, sn);
1228 	do {
1229 #if GEM_DEBUG_LEVEL > 1
1230 		if (dp->tx_cnt < 100) {
1231 			dp->tx_cnt++;
1232 			flags |= GEM_TXFLAG_INTR;
1233 		}
1234 #endif
1235 		/* write a tx descriptor */
1236 		tbp->txb_desc = sn;
1237 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1238 		    SLOT(sn, tx_ring_size),
1239 		    tbp->txb_dmacookie,
1240 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1241 		tbp->txb_stime = now;
1242 		ASSERT(tbp->txb_ndescs == 1);
1243 
1244 		flags = 0;
1245 		sn++;
1246 		tbp = tbp->txb_next;
1247 	} while (sn != end_slot);
1248 }
1249 
1250 __INLINE__
1251 static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1252 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1253 {
1254 	size_t			min_pkt;
1255 	caddr_t			bp;
1256 	size_t			off;
1257 	mblk_t			*tp;
1258 	size_t			len;
1259 	uint64_t		flag;
1260 
1261 	ASSERT(tbp->txb_mp == NULL);
1262 
1263 	/* we use bounce buffer for the packet */
1264 	min_pkt = ETHERMIN;
1265 	bp = tbp->txb_buf;
1266 	off = 0;
1267 	tp = mp;
1268 
1269 	flag = tbp->txb_flag;
1270 	if (flag & GEM_TXFLAG_SWVTAG) {
1271 		/* need to increase min packet size */
1272 		min_pkt += VTAG_SIZE;
1273 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1274 	}
1275 
1276 	/* copy the rest */
1277 	for (; tp; tp = tp->b_cont) {
1278 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1279 			bcopy(tp->b_rptr, &bp[off], len);
1280 			off += len;
1281 		}
1282 	}
1283 
1284 	if (off < min_pkt &&
1285 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1286 		/*
1287 		 * Extend the packet to minimum packet size explicitly.
1288 		 * For software vlan packets, we shouldn't use tx autopad
1289 		 * function because nics may not be aware of vlan.
1290 		 * we must keep 46 octet of payload even if we use vlan.
1291 		 */
1292 		bzero(&bp[off], min_pkt - off);
1293 		off = min_pkt;
1294 	}
1295 
1296 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1297 
1298 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1299 	tbp->txb_dmacookie[0].dmac_size = off;
1300 
1301 	DPRINTF(2, (CE_CONT,
1302 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1303 	    dp->name, __func__,
1304 	    tbp->txb_dmacookie[0].dmac_laddress,
1305 	    tbp->txb_dmacookie[0].dmac_size,
1306 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1307 	    min_pkt));
1308 
1309 	/* save misc info */
1310 	tbp->txb_mp = mp;
1311 	tbp->txb_nfrags = 1;
1312 #ifdef DEBUG_MULTIFRAGS
1313 	if (dp->gc.gc_tx_max_frags >= 3 &&
1314 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1315 		tbp->txb_dmacookie[1].dmac_laddress =
1316 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1317 		tbp->txb_dmacookie[2].dmac_laddress =
1318 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1319 
1320 		tbp->txb_dmacookie[2].dmac_size =
1321 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1322 		tbp->txb_dmacookie[1].dmac_size = 16;
1323 		tbp->txb_dmacookie[0].dmac_size = 16;
1324 		tbp->txb_nfrags  = 3;
1325 	}
1326 #endif
1327 	return (off);
1328 }
1329 #pragma inline(gem_setup_txbuf_copy)
1330 
1331 __INLINE__
1332 static void
gem_tx_start_unit(struct gem_dev * dp)1333 gem_tx_start_unit(struct gem_dev *dp)
1334 {
1335 	seqnum_t	head;
1336 	seqnum_t	tail;
1337 	struct txbuf	*tbp_head;
1338 	struct txbuf	*tbp_tail;
1339 
1340 	/* update HW descriptors from soft queue */
1341 	ASSERT(mutex_owned(&dp->xmitlock));
1342 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1343 
1344 	head = dp->tx_softq_head;
1345 	tail = dp->tx_softq_tail;
1346 
1347 	DPRINTF(1, (CE_CONT,
1348 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1349 	    dp->name, __func__, head, tail, tail - head,
1350 	    dp->tx_desc_head, dp->tx_desc_tail,
1351 	    dp->tx_desc_tail - dp->tx_desc_head));
1352 
1353 	ASSERT(tail - head > 0);
1354 
1355 	dp->tx_desc_tail = tail;
1356 
1357 	tbp_head = GET_TXBUF(dp, head);
1358 	tbp_tail = GET_TXBUF(dp, tail - 1);
1359 
1360 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1361 
1362 	dp->gc.gc_tx_start(dp,
1363 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1364 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1365 
1366 	/* advance softq head and active tail */
1367 	dp->tx_softq_head = dp->tx_active_tail = tail;
1368 }
1369 #pragma inline(gem_tx_start_unit)
1370 
1371 #ifdef GEM_DEBUG_LEVEL
1372 static int gem_send_cnt[10];
1373 #endif
1374 #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1375 #define	EHLEN	(sizeof (struct ether_header))
1376 /*
1377  * check ether packet type and ip protocol
1378  */
1379 static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)1380 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1381 {
1382 	mblk_t		*tp;
1383 	ssize_t		len;
1384 	uint_t		vtag;
1385 	int		off;
1386 	uint64_t	flag;
1387 
1388 	flag = 0ULL;
1389 
1390 	/*
1391 	 * prepare continuous header of the packet for protocol analysis
1392 	 */
1393 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1394 		/* we use work buffer to copy mblk */
1395 		for (tp = mp, off = 0;
1396 		    tp && (off < PKT_MIN_SIZE);
1397 		    tp = tp->b_cont, off += len) {
1398 			len = (long)tp->b_wptr - (long)tp->b_rptr;
1399 			len = min(len, PKT_MIN_SIZE - off);
1400 			bcopy(tp->b_rptr, &bp[off], len);
1401 		}
1402 	} else {
1403 		/* we can use mblk without copy */
1404 		bp = mp->b_rptr;
1405 	}
1406 
1407 	/* process vlan tag for GLD v3 */
1408 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1409 		if (dp->misc_flag & GEM_VLAN_HARD) {
1410 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1411 			ASSERT(vtag);
1412 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1413 		} else {
1414 			flag |= GEM_TXFLAG_SWVTAG;
1415 		}
1416 	}
1417 	return (flag);
1418 }
1419 #undef EHLEN
1420 #undef PKT_MIN_SIZE
1421 /*
1422  * gem_send_common is an exported function because hw depend routines may
1423  * use it for sending control frames like setup frames for 2114x chipset.
1424  */
1425 mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1426 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1427 {
1428 	int			nmblk;
1429 	int			avail;
1430 	mblk_t			*tp;
1431 	mblk_t			*mp;
1432 	int			i;
1433 	struct txbuf		*tbp;
1434 	seqnum_t		head;
1435 	uint64_t		load_flags;
1436 	uint64_t		len_total = 0;
1437 	uint32_t		bcast = 0;
1438 	uint32_t		mcast = 0;
1439 
1440 	ASSERT(mp_head != NULL);
1441 
1442 	mp = mp_head;
1443 	nmblk = 1;
1444 	while ((mp = mp->b_next) != NULL) {
1445 		nmblk++;
1446 	}
1447 #ifdef GEM_DEBUG_LEVEL
1448 	gem_send_cnt[0]++;
1449 	gem_send_cnt[min(nmblk, 9)]++;
1450 #endif
1451 	/*
1452 	 * Aquire resources
1453 	 */
1454 	mutex_enter(&dp->xmitlock);
1455 	if (dp->mac_suspended) {
1456 		mutex_exit(&dp->xmitlock);
1457 		mp = mp_head;
1458 		while (mp) {
1459 			tp = mp->b_next;
1460 			freemsg(mp);
1461 			mp = tp;
1462 		}
1463 		return (NULL);
1464 	}
1465 
1466 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1467 		/* don't send data packets while mac isn't active */
1468 		/* XXX - should we discard packets? */
1469 		mutex_exit(&dp->xmitlock);
1470 		return (mp_head);
1471 	}
1472 
1473 	/* allocate free slots */
1474 	head = dp->tx_free_head;
1475 	avail = dp->tx_free_tail - head;
1476 
1477 	DPRINTF(2, (CE_CONT,
1478 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1479 	    dp->name, __func__,
1480 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1481 
1482 	avail = min(avail, dp->tx_max_packets);
1483 
1484 	if (nmblk > avail) {
1485 		if (avail == 0) {
1486 			/* no resources; short cut */
1487 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1488 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1489 			goto done;
1490 		}
1491 		nmblk = avail;
1492 	}
1493 
1494 	dp->tx_free_head = head + nmblk;
1495 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1496 
1497 	/* update last interrupt position if tx buffers exhaust.  */
1498 	if (nmblk == avail) {
1499 		tbp = GET_TXBUF(dp, head + avail - 1);
1500 		tbp->txb_flag = GEM_TXFLAG_INTR;
1501 		dp->tx_desc_intr = head + avail;
1502 	}
1503 	mutex_exit(&dp->xmitlock);
1504 
1505 	tbp = GET_TXBUF(dp, head);
1506 
1507 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1508 		uint8_t		*bp;
1509 		uint64_t	txflag;
1510 
1511 		/* remove one from the mblk list */
1512 		ASSERT(mp_head != NULL);
1513 		mp = mp_head;
1514 		mp_head = mp_head->b_next;
1515 		mp->b_next = NULL;
1516 
1517 		/* statistics for non-unicast packets */
1518 		bp = mp->b_rptr;
1519 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1520 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1521 			    ETHERADDRL) == 0) {
1522 				bcast++;
1523 			} else {
1524 				mcast++;
1525 			}
1526 		}
1527 
1528 		/* save misc info */
1529 		txflag = tbp->txb_flag;
1530 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1531 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1532 		tbp->txb_flag = txflag;
1533 
1534 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1535 	}
1536 
1537 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1538 
1539 	/* Append the tbp at the tail of the active tx buffer list */
1540 	mutex_enter(&dp->xmitlock);
1541 
1542 	if ((--dp->tx_busy) == 0) {
1543 		/* extend the tail of softq, as new packets have been ready. */
1544 		dp->tx_softq_tail = dp->tx_free_head;
1545 
1546 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1547 			/*
1548 			 * The device status has changed while we are
1549 			 * preparing tx buf.
1550 			 * As we are the last one that make tx non-busy.
1551 			 * wake up someone who may wait for us.
1552 			 */
1553 			cv_broadcast(&dp->tx_drain_cv);
1554 		} else {
1555 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1556 			gem_tx_start_unit(dp);
1557 		}
1558 	}
1559 	dp->stats.obytes += len_total;
1560 	dp->stats.opackets += nmblk;
1561 	dp->stats.obcast += bcast;
1562 	dp->stats.omcast += mcast;
1563 done:
1564 	mutex_exit(&dp->xmitlock);
1565 
1566 	return (mp_head);
1567 }
1568 
1569 /* ========================================================== */
1570 /*
1571  * error detection and restart routines
1572  */
1573 /* ========================================================== */
1574 int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1575 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1576 {
1577 	ASSERT(mutex_owned(&dp->intrlock));
1578 
1579 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1580 #ifdef GEM_DEBUG_LEVEL
1581 #if GEM_DEBUG_LEVEL > 1
1582 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1583 #endif
1584 #endif
1585 
1586 	if (dp->mac_suspended) {
1587 		/* should we return GEM_FAILURE ? */
1588 		return (GEM_FAILURE);
1589 	}
1590 
1591 	/*
1592 	 * We should avoid calling any routines except xxx_chip_reset
1593 	 * when we are resuming the system.
1594 	 */
1595 	if (dp->mac_active) {
1596 		if (flags & GEM_RESTART_KEEP_BUF) {
1597 			/* stop rx gracefully */
1598 			dp->rxmode &= ~RXMODE_ENABLE;
1599 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1600 		}
1601 		(void) gem_mac_stop(dp, flags);
1602 	}
1603 
1604 	/* reset the chip. */
1605 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1606 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1607 		    dp->name, __func__);
1608 		goto err;
1609 	}
1610 
1611 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1612 		goto err;
1613 	}
1614 
1615 	/* setup media mode if the link have been up */
1616 	if (dp->mii_state == MII_STATE_LINKUP) {
1617 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1618 			goto err;
1619 		}
1620 	}
1621 
1622 	/* setup mac address and enable rx filter */
1623 	dp->rxmode |= RXMODE_ENABLE;
1624 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1625 		goto err;
1626 	}
1627 
1628 	/*
1629 	 * XXX - a panic happened because of linkdown.
1630 	 * We must check mii_state here, because the link can be down just
1631 	 * before the restart event happen. If the link is down now,
1632 	 * gem_mac_start() will be called from gem_mii_link_check() when
1633 	 * the link become up later.
1634 	 */
1635 	if (dp->mii_state == MII_STATE_LINKUP) {
1636 		/* restart the nic */
1637 		ASSERT(!dp->mac_active);
1638 		(void) gem_mac_start(dp);
1639 	}
1640 	return (GEM_SUCCESS);
1641 err:
1642 	return (GEM_FAILURE);
1643 }
1644 
1645 
1646 static void
gem_tx_timeout(struct gem_dev * dp)1647 gem_tx_timeout(struct gem_dev *dp)
1648 {
1649 	clock_t		now;
1650 	boolean_t	tx_sched;
1651 	struct txbuf	*tbp;
1652 
1653 	mutex_enter(&dp->intrlock);
1654 
1655 	tx_sched = B_FALSE;
1656 	now = ddi_get_lbolt();
1657 
1658 	mutex_enter(&dp->xmitlock);
1659 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1660 		mutex_exit(&dp->xmitlock);
1661 		goto schedule_next;
1662 	}
1663 	mutex_exit(&dp->xmitlock);
1664 
1665 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1666 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1667 		/* tx error happened, reset transmitter in the chip */
1668 		(void) gem_restart_nic(dp, 0);
1669 		tx_sched = B_TRUE;
1670 		dp->tx_blocked = (clock_t)0;
1671 
1672 		goto schedule_next;
1673 	}
1674 
1675 	mutex_enter(&dp->xmitlock);
1676 	/* check if the transmitter thread is stuck */
1677 	if (dp->tx_active_head == dp->tx_active_tail) {
1678 		/* no tx buffer is loaded to the nic */
1679 		if (dp->tx_blocked &&
1680 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1681 			gem_dump_txbuf(dp, CE_WARN,
1682 			    "gem_tx_timeout: tx blocked");
1683 			tx_sched = B_TRUE;
1684 			dp->tx_blocked = (clock_t)0;
1685 		}
1686 		mutex_exit(&dp->xmitlock);
1687 		goto schedule_next;
1688 	}
1689 
1690 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1691 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1692 		mutex_exit(&dp->xmitlock);
1693 		goto schedule_next;
1694 	}
1695 	mutex_exit(&dp->xmitlock);
1696 
1697 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1698 
1699 	/* discard untransmitted packet and restart tx.  */
1700 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1701 	tx_sched = B_TRUE;
1702 	dp->tx_blocked = (clock_t)0;
1703 
1704 schedule_next:
1705 	mutex_exit(&dp->intrlock);
1706 
1707 	/* restart the downstream if needed */
1708 	if (tx_sched) {
1709 		mac_tx_update(dp->mh);
1710 	}
1711 
1712 	DPRINTF(4, (CE_CONT,
1713 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1714 	    dp->name, BOOLEAN(dp->tx_blocked),
1715 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1716 	dp->timeout_id =
1717 	    timeout((void (*)(void *))gem_tx_timeout,
1718 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1719 }
1720 
1721 /* ================================================================== */
1722 /*
1723  * Interrupt handler
1724  */
1725 /* ================================================================== */
1726 __INLINE__
1727 static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1728 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1729 {
1730 	struct rxbuf	*rbp;
1731 	seqnum_t	tail;
1732 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1733 
1734 	ASSERT(rbp_head != NULL);
1735 	ASSERT(mutex_owned(&dp->intrlock));
1736 
1737 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1738 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1739 
1740 	/*
1741 	 * Add new buffers into active rx buffer list
1742 	 */
1743 	if (dp->rx_buf_head == NULL) {
1744 		dp->rx_buf_head = rbp_head;
1745 		ASSERT(dp->rx_buf_tail == NULL);
1746 	} else {
1747 		dp->rx_buf_tail->rxb_next = rbp_head;
1748 	}
1749 
1750 	tail = dp->rx_active_tail;
1751 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1752 		/* need to notify the tail for the lower layer */
1753 		dp->rx_buf_tail = rbp;
1754 
1755 		dp->gc.gc_rx_desc_write(dp,
1756 		    SLOT(tail, rx_ring_size),
1757 		    rbp->rxb_dmacookie,
1758 		    rbp->rxb_nfrags);
1759 
1760 		dp->rx_active_tail = tail = tail + 1;
1761 	}
1762 }
1763 #pragma inline(gem_append_rxbuf)
1764 
1765 mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1766 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1767 {
1768 	int		rx_header_len = dp->gc.gc_rx_header_len;
1769 	uint8_t		*bp;
1770 	mblk_t		*mp;
1771 
1772 	/* allocate a new mblk */
1773 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1774 		ASSERT(mp->b_next == NULL);
1775 		ASSERT(mp->b_cont == NULL);
1776 
1777 		mp->b_rptr += VTAG_SIZE;
1778 		bp = mp->b_rptr;
1779 		mp->b_wptr = bp + len;
1780 
1781 		/*
1782 		 * flush the range of the entire buffer to invalidate
1783 		 * all of corresponding dirty entries in iocache.
1784 		 */
1785 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1786 		    0, DDI_DMA_SYNC_FORKERNEL);
1787 
1788 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1789 	}
1790 	return (mp);
1791 }
1792 
1793 #ifdef GEM_DEBUG_LEVEL
1794 uint_t	gem_rx_pkts[17];
1795 #endif
1796 
1797 
1798 int
gem_receive(struct gem_dev * dp)1799 gem_receive(struct gem_dev *dp)
1800 {
1801 	uint64_t	len_total = 0;
1802 	struct rxbuf	*rbp;
1803 	mblk_t		*mp;
1804 	int		cnt = 0;
1805 	uint64_t	rxstat;
1806 	struct rxbuf	*newbufs;
1807 	struct rxbuf	**newbufs_tailp;
1808 	mblk_t		*rx_head;
1809 	mblk_t 		**rx_tailp;
1810 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1811 	seqnum_t	active_head;
1812 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1813 	    int slot, int ndesc);
1814 	int		ethermin = ETHERMIN;
1815 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1816 	int		rx_header_len = dp->gc.gc_rx_header_len;
1817 
1818 	ASSERT(mutex_owned(&dp->intrlock));
1819 
1820 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1821 	    dp->name, dp->rx_buf_head));
1822 
1823 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1824 	newbufs_tailp = &newbufs;
1825 	rx_tailp = &rx_head;
1826 	for (active_head = dp->rx_active_head;
1827 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1828 		int		len;
1829 		if (cnt == 0) {
1830 			cnt = max(dp->poll_pkt_delay*2, 10);
1831 			cnt = min(cnt,
1832 			    dp->rx_active_tail - active_head);
1833 			gem_rx_desc_dma_sync(dp,
1834 			    SLOT(active_head, rx_ring_size),
1835 			    cnt,
1836 			    DDI_DMA_SYNC_FORKERNEL);
1837 		}
1838 
1839 		if (rx_header_len > 0) {
1840 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
1841 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1842 		}
1843 
1844 		if (((rxstat = (*rx_desc_stat)(dp,
1845 		    SLOT(active_head, rx_ring_size),
1846 		    rbp->rxb_nfrags))
1847 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1848 			/* not received yet */
1849 			break;
1850 		}
1851 
1852 		/* Remove the head of the rx buffer list */
1853 		dp->rx_buf_head = rbp->rxb_next;
1854 		cnt--;
1855 
1856 
1857 		if (rxstat & GEM_RX_ERR) {
1858 			goto next;
1859 		}
1860 
1861 		len = rxstat & GEM_RX_LEN;
1862 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1863 		    dp->name, __func__, rxstat, len));
1864 
1865 		/*
1866 		 * Copy the packet
1867 		 */
1868 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1869 			/* no memory, discard the packet */
1870 			dp->stats.norcvbuf++;
1871 			goto next;
1872 		}
1873 
1874 		/*
1875 		 * Process VLAN tag
1876 		 */
1877 		ethermin = ETHERMIN;
1878 		ethermax = dp->mtu + sizeof (struct ether_header);
1879 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1880 			ethermax += VTAG_SIZE;
1881 		}
1882 
1883 		/* check packet size */
1884 		if (len < ethermin) {
1885 			dp->stats.errrcv++;
1886 			dp->stats.runt++;
1887 			freemsg(mp);
1888 			goto next;
1889 		}
1890 
1891 		if (len > ethermax) {
1892 			dp->stats.errrcv++;
1893 			dp->stats.frame_too_long++;
1894 			freemsg(mp);
1895 			goto next;
1896 		}
1897 
1898 		len_total += len;
1899 
1900 #ifdef GEM_DEBUG_VLAN
1901 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1902 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1903 		}
1904 #endif
1905 		/* append received packet to temporaly rx buffer list */
1906 		*rx_tailp = mp;
1907 		rx_tailp  = &mp->b_next;
1908 
1909 		if (mp->b_rptr[0] & 1) {
1910 			if (bcmp(mp->b_rptr,
1911 			    gem_etherbroadcastaddr.ether_addr_octet,
1912 			    ETHERADDRL) == 0) {
1913 				dp->stats.rbcast++;
1914 			} else {
1915 				dp->stats.rmcast++;
1916 			}
1917 		}
1918 next:
1919 		ASSERT(rbp != NULL);
1920 
1921 		/* append new one to temporal new buffer list */
1922 		*newbufs_tailp = rbp;
1923 		newbufs_tailp  = &rbp->rxb_next;
1924 	}
1925 
1926 	/* advance rx_active_head */
1927 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1928 		dp->stats.rbytes += len_total;
1929 		dp->stats.rpackets += cnt;
1930 	}
1931 	dp->rx_active_head = active_head;
1932 
1933 	/* terminate the working list */
1934 	*newbufs_tailp = NULL;
1935 	*rx_tailp = NULL;
1936 
1937 	if (dp->rx_buf_head == NULL) {
1938 		dp->rx_buf_tail = NULL;
1939 	}
1940 
1941 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1942 	    dp->name, __func__, cnt, rx_head));
1943 
1944 	if (newbufs) {
1945 		/*
1946 		 * fillfull rx list with new buffers
1947 		 */
1948 		seqnum_t	head;
1949 
1950 		/* save current tail */
1951 		head = dp->rx_active_tail;
1952 		gem_append_rxbuf(dp, newbufs);
1953 
1954 		/* call hw depend start routine if we have. */
1955 		dp->gc.gc_rx_start(dp,
1956 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1957 	}
1958 
1959 	if (rx_head) {
1960 		/*
1961 		 * send up received packets
1962 		 */
1963 		mutex_exit(&dp->intrlock);
1964 		mac_rx(dp->mh, NULL, rx_head);
1965 		mutex_enter(&dp->intrlock);
1966 	}
1967 
1968 #ifdef GEM_DEBUG_LEVEL
1969 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1970 #endif
1971 	return (cnt);
1972 }
1973 
1974 boolean_t
gem_tx_done(struct gem_dev * dp)1975 gem_tx_done(struct gem_dev *dp)
1976 {
1977 	boolean_t	tx_sched = B_FALSE;
1978 
1979 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1980 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1981 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1982 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1983 		tx_sched = B_TRUE;
1984 		goto x;
1985 	}
1986 
1987 	mutex_enter(&dp->xmitlock);
1988 
1989 	/* XXX - we must not have any packets in soft queue */
1990 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1991 	/*
1992 	 * If we won't have chance to get more free tx buffers, and blocked,
1993 	 * it is worth to reschedule the downstream i.e. tx side.
1994 	 */
1995 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1996 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1997 		/*
1998 		 * As no further tx-done interrupts are scheduled, this
1999 		 * is the last chance to kick tx side, which may be
2000 		 * blocked now, otherwise the tx side never works again.
2001 		 */
2002 		tx_sched = B_TRUE;
2003 		dp->tx_blocked = (clock_t)0;
2004 		dp->tx_max_packets =
2005 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2006 	}
2007 
2008 	mutex_exit(&dp->xmitlock);
2009 
2010 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2011 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2012 x:
2013 	return (tx_sched);
2014 }
2015 
2016 static uint_t
gem_intr(struct gem_dev * dp)2017 gem_intr(struct gem_dev	*dp)
2018 {
2019 	uint_t		ret;
2020 
2021 	mutex_enter(&dp->intrlock);
2022 	if (dp->mac_suspended) {
2023 		mutex_exit(&dp->intrlock);
2024 		return (DDI_INTR_UNCLAIMED);
2025 	}
2026 	dp->intr_busy = B_TRUE;
2027 
2028 	ret = (*dp->gc.gc_interrupt)(dp);
2029 
2030 	if (ret == DDI_INTR_UNCLAIMED) {
2031 		dp->intr_busy = B_FALSE;
2032 		mutex_exit(&dp->intrlock);
2033 		return (ret);
2034 	}
2035 
2036 	if (!dp->mac_active) {
2037 		cv_broadcast(&dp->tx_drain_cv);
2038 	}
2039 
2040 
2041 	dp->stats.intr++;
2042 	dp->intr_busy = B_FALSE;
2043 
2044 	mutex_exit(&dp->intrlock);
2045 
2046 	if (ret & INTR_RESTART_TX) {
2047 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2048 		mac_tx_update(dp->mh);
2049 		ret &= ~INTR_RESTART_TX;
2050 	}
2051 	return (ret);
2052 }
2053 
2054 static void
gem_intr_watcher(struct gem_dev * dp)2055 gem_intr_watcher(struct gem_dev *dp)
2056 {
2057 	(void) gem_intr(dp);
2058 
2059 	/* schedule next call of tu_intr_watcher */
2060 	dp->intr_watcher_id =
2061 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2062 }
2063 
2064 /* ======================================================================== */
2065 /*
2066  * MII support routines
2067  */
2068 /* ======================================================================== */
2069 static void
gem_choose_forcedmode(struct gem_dev * dp)2070 gem_choose_forcedmode(struct gem_dev *dp)
2071 {
2072 	/* choose media mode */
2073 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2074 		dp->speed = GEM_SPD_1000;
2075 		dp->full_duplex = dp->anadv_1000fdx;
2076 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2077 		dp->speed = GEM_SPD_100;
2078 		dp->full_duplex = B_TRUE;
2079 	} else if (dp->anadv_100hdx) {
2080 		dp->speed = GEM_SPD_100;
2081 		dp->full_duplex = B_FALSE;
2082 	} else {
2083 		dp->speed = GEM_SPD_10;
2084 		dp->full_duplex = dp->anadv_10fdx;
2085 	}
2086 }
2087 
2088 uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2089 gem_mii_read(struct gem_dev *dp, uint_t reg)
2090 {
2091 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2092 		(*dp->gc.gc_mii_sync)(dp);
2093 	}
2094 	return ((*dp->gc.gc_mii_read)(dp, reg));
2095 }
2096 
2097 void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2098 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2099 {
2100 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2101 		(*dp->gc.gc_mii_sync)(dp);
2102 	}
2103 	(*dp->gc.gc_mii_write)(dp, reg, val);
2104 }
2105 
2106 #define	fc_cap_decode(x)	\
2107 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2108 	(((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2109 
2110 int
gem_mii_config_default(struct gem_dev * dp)2111 gem_mii_config_default(struct gem_dev *dp)
2112 {
2113 	uint16_t	mii_stat;
2114 	uint16_t	val;
2115 	static uint16_t fc_cap_encode[4] = {
2116 		0, /* none */
2117 		MII_ABILITY_PAUSE, /* symmetric */
2118 		MII_ABILITY_ASMPAUSE, /* tx */
2119 		MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2120 	};
2121 
2122 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2123 
2124 	/*
2125 	 * Configure bits in advertisement register
2126 	 */
2127 	mii_stat = dp->mii_status;
2128 
2129 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2130 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2131 
2132 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2133 		/* it's funny */
2134 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2135 		    dp->name, mii_stat, MII_STATUS_BITS);
2136 		return (GEM_FAILURE);
2137 	}
2138 
2139 	/* Do not change the rest of the ability bits in the advert reg */
2140 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2141 
2142 	DPRINTF(0, (CE_CONT,
2143 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2144 	    dp->name, __func__,
2145 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2146 	    dp->anadv_10fdx, dp->anadv_10hdx));
2147 
2148 	if (dp->anadv_100t4) {
2149 		val |= MII_ABILITY_100BASE_T4;
2150 	}
2151 	if (dp->anadv_100fdx) {
2152 		val |= MII_ABILITY_100BASE_TX_FD;
2153 	}
2154 	if (dp->anadv_100hdx) {
2155 		val |= MII_ABILITY_100BASE_TX;
2156 	}
2157 	if (dp->anadv_10fdx) {
2158 		val |= MII_ABILITY_10BASE_T_FD;
2159 	}
2160 	if (dp->anadv_10hdx) {
2161 		val |= MII_ABILITY_10BASE_T;
2162 	}
2163 
2164 	/* set flow control capability */
2165 	val |= fc_cap_encode[dp->anadv_flow_control];
2166 
2167 	DPRINTF(0, (CE_CONT,
2168 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2169 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2170 	    dp->anadv_flow_control));
2171 
2172 	gem_mii_write(dp, MII_AN_ADVERT, val);
2173 
2174 	if (mii_stat & MII_STATUS_XSTATUS) {
2175 		/*
2176 		 * 1000Base-T GMII support
2177 		 */
2178 		if (!dp->anadv_autoneg) {
2179 			/* enable manual configuration */
2180 			val = MII_1000TC_CFG_EN;
2181 		} else {
2182 			val = 0;
2183 			if (dp->anadv_1000fdx) {
2184 				val |= MII_1000TC_ADV_FULL;
2185 			}
2186 			if (dp->anadv_1000hdx) {
2187 				val |= MII_1000TC_ADV_HALF;
2188 			}
2189 		}
2190 		DPRINTF(0, (CE_CONT,
2191 		    "!%s: %s: setting MII_1000TC reg:%b",
2192 		    dp->name, __func__, val, MII_1000TC_BITS));
2193 
2194 		gem_mii_write(dp, MII_1000TC, val);
2195 	}
2196 
2197 	return (GEM_SUCCESS);
2198 }
2199 
2200 #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2201 #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2202 
2203 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2204 /*	 none	symm	tx	rx/symm */
2205 /* none */
2206 	{FLOW_CONTROL_NONE,
2207 		FLOW_CONTROL_NONE,
2208 			FLOW_CONTROL_NONE,
2209 				FLOW_CONTROL_NONE},
2210 /* sym */
2211 	{FLOW_CONTROL_NONE,
2212 		FLOW_CONTROL_SYMMETRIC,
2213 			FLOW_CONTROL_NONE,
2214 				FLOW_CONTROL_SYMMETRIC},
2215 /* tx */
2216 	{FLOW_CONTROL_NONE,
2217 		FLOW_CONTROL_NONE,
2218 			FLOW_CONTROL_NONE,
2219 				FLOW_CONTROL_TX_PAUSE},
2220 /* rx/symm */
2221 	{FLOW_CONTROL_NONE,
2222 		FLOW_CONTROL_SYMMETRIC,
2223 			FLOW_CONTROL_RX_PAUSE,
2224 				FLOW_CONTROL_SYMMETRIC},
2225 };
2226 
2227 static char *gem_fc_type[] = {
2228 	"without",
2229 	"with symmetric",
2230 	"with tx",
2231 	"with rx",
2232 };
2233 
2234 boolean_t
gem_mii_link_check(struct gem_dev * dp)2235 gem_mii_link_check(struct gem_dev *dp)
2236 {
2237 	uint16_t	old_mii_state;
2238 	boolean_t	tx_sched = B_FALSE;
2239 	uint16_t	status;
2240 	uint16_t	advert;
2241 	uint16_t	lpable;
2242 	uint16_t	exp;
2243 	uint16_t	ctl1000;
2244 	uint16_t	stat1000;
2245 	uint16_t	val;
2246 	clock_t		now;
2247 	clock_t		diff;
2248 	int		linkdown_action;
2249 	boolean_t	fix_phy = B_FALSE;
2250 
2251 	now = ddi_get_lbolt();
2252 	old_mii_state = dp->mii_state;
2253 
2254 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2255 	    dp->name, __func__, now, dp->mii_state));
2256 
2257 	diff = now - dp->mii_last_check;
2258 	dp->mii_last_check = now;
2259 
2260 	/*
2261 	 * For NWAM, don't show linkdown state right
2262 	 * after the system boots
2263 	 */
2264 	if (dp->linkup_delay > 0) {
2265 		if (dp->linkup_delay > diff) {
2266 			dp->linkup_delay -= diff;
2267 		} else {
2268 			/* link up timeout */
2269 			dp->linkup_delay = -1;
2270 		}
2271 	}
2272 
2273 next_nowait:
2274 	switch (dp->mii_state) {
2275 	case MII_STATE_UNKNOWN:
2276 		/* power-up, DP83840 requires 32 sync bits */
2277 		(*dp->gc.gc_mii_sync)(dp);
2278 		goto reset_phy;
2279 
2280 	case MII_STATE_RESETTING:
2281 		dp->mii_timer -= diff;
2282 		if (dp->mii_timer > 0) {
2283 			/* don't read phy registers in resetting */
2284 			dp->mii_interval = WATCH_INTERVAL_FAST;
2285 			goto next;
2286 		}
2287 
2288 		/* Timer expired, ensure reset bit is not set */
2289 
2290 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2291 			/* some phys need sync bits after reset */
2292 			(*dp->gc.gc_mii_sync)(dp);
2293 		}
2294 		val = gem_mii_read(dp, MII_CONTROL);
2295 		if (val & MII_CONTROL_RESET) {
2296 			cmn_err(CE_NOTE,
2297 			    "!%s: time:%ld resetting phy not complete."
2298 			    " mii_control:0x%b",
2299 			    dp->name, ddi_get_lbolt(),
2300 			    val, MII_CONTROL_BITS);
2301 		}
2302 
2303 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2304 		/* XXX -- this operation is required for NS DP83840A. */
2305 		gem_mii_write(dp, MII_CONTROL, 0);
2306 
2307 		/* As resetting PHY has completed, configure PHY registers */
2308 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2309 			/* we failed to configure PHY. */
2310 			goto reset_phy;
2311 		}
2312 
2313 		/* mii_config may disable autonegatiation */
2314 		gem_choose_forcedmode(dp);
2315 
2316 		dp->mii_lpable = 0;
2317 		dp->mii_advert = 0;
2318 		dp->mii_exp = 0;
2319 		dp->mii_ctl1000 = 0;
2320 		dp->mii_stat1000 = 0;
2321 		dp->flow_control = FLOW_CONTROL_NONE;
2322 
2323 		if (!dp->anadv_autoneg) {
2324 			/* skip auto-negotiation phase */
2325 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2326 			dp->mii_timer = 0;
2327 			dp->mii_interval = 0;
2328 			goto next_nowait;
2329 		}
2330 
2331 		/* Issue auto-negotiation command */
2332 		goto autonego;
2333 
2334 	case MII_STATE_AUTONEGOTIATING:
2335 		/*
2336 		 * Autonegotiation is in progress
2337 		 */
2338 		dp->mii_timer -= diff;
2339 		if (dp->mii_timer -
2340 		    (dp->gc.gc_mii_an_timeout
2341 		    - dp->gc.gc_mii_an_wait) > 0) {
2342 			/*
2343 			 * wait for a while, typically autonegotiation
2344 			 * completes in 2.3 - 2.5 sec.
2345 			 */
2346 			dp->mii_interval = WATCH_INTERVAL_FAST;
2347 			goto next;
2348 		}
2349 
2350 		/* read PHY status */
2351 		status = gem_mii_read(dp, MII_STATUS);
2352 		DPRINTF(4, (CE_CONT,
2353 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2354 		    dp->name, __func__, dp->mii_state,
2355 		    status, MII_STATUS_BITS));
2356 
2357 		if (status & MII_STATUS_REMFAULT) {
2358 			/*
2359 			 * The link parnert told me something wrong happend.
2360 			 * What do we do ?
2361 			 */
2362 			cmn_err(CE_CONT,
2363 			    "!%s: auto-negotiation failed: remote fault",
2364 			    dp->name);
2365 			goto autonego;
2366 		}
2367 
2368 		if ((status & MII_STATUS_ANDONE) == 0) {
2369 			if (dp->mii_timer <= 0) {
2370 				/*
2371 				 * Auto-negotiation was timed out,
2372 				 * try again w/o resetting phy.
2373 				 */
2374 				if (!dp->mii_supress_msg) {
2375 					cmn_err(CE_WARN,
2376 				    "!%s: auto-negotiation failed: timeout",
2377 					    dp->name);
2378 					dp->mii_supress_msg = B_TRUE;
2379 				}
2380 				goto autonego;
2381 			}
2382 			/*
2383 			 * Auto-negotiation is in progress. Wait.
2384 			 */
2385 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2386 			goto next;
2387 		}
2388 
2389 		/*
2390 		 * Auto-negotiation have completed.
2391 		 * Assume linkdown and fall through.
2392 		 */
2393 		dp->mii_supress_msg = B_FALSE;
2394 		dp->mii_state = MII_STATE_AN_DONE;
2395 		DPRINTF(0, (CE_CONT,
2396 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2397 		    dp->name, status, MII_STATUS_BITS));
2398 
2399 		if (dp->gc.gc_mii_an_delay > 0) {
2400 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2401 			dp->mii_interval = drv_usectohz(20*1000);
2402 			goto next;
2403 		}
2404 
2405 		dp->mii_timer = 0;
2406 		diff = 0;
2407 		goto next_nowait;
2408 
2409 	case MII_STATE_AN_DONE:
2410 		/*
2411 		 * Auto-negotiation have done. Now we can set up media.
2412 		 */
2413 		dp->mii_timer -= diff;
2414 		if (dp->mii_timer > 0) {
2415 			/* wait for a while */
2416 			dp->mii_interval = WATCH_INTERVAL_FAST;
2417 			goto next;
2418 		}
2419 
2420 		/*
2421 		 * set up the result of auto negotiation
2422 		 */
2423 
2424 		/*
2425 		 * Read registers required to determin current
2426 		 * duplex mode and media speed.
2427 		 */
2428 		if (dp->gc.gc_mii_an_delay > 0) {
2429 			/*
2430 			 * As the link watcher context has been suspended,
2431 			 * 'status' is invalid. We must status register here
2432 			 */
2433 			status = gem_mii_read(dp, MII_STATUS);
2434 		}
2435 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2436 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2437 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2438 		if (exp == 0xffff) {
2439 			/* some phys don't have exp register */
2440 			exp = 0;
2441 		}
2442 		ctl1000  = 0;
2443 		stat1000 = 0;
2444 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2445 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2446 			stat1000 = gem_mii_read(dp, MII_1000TS);
2447 		}
2448 		dp->mii_lpable = lpable;
2449 		dp->mii_advert = advert;
2450 		dp->mii_exp = exp;
2451 		dp->mii_ctl1000  = ctl1000;
2452 		dp->mii_stat1000 = stat1000;
2453 
2454 		cmn_err(CE_CONT,
2455 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2456 		    dp->name,
2457 		    advert, MII_ABILITY_BITS,
2458 		    lpable, MII_ABILITY_BITS,
2459 		    exp, MII_AN_EXP_BITS);
2460 
2461 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2462 			cmn_err(CE_CONT,
2463 			    "! MII_1000TC:%b, MII_1000TS:%b",
2464 			    ctl1000, MII_1000TC_BITS,
2465 			    stat1000, MII_1000TS_BITS);
2466 		}
2467 
2468 		if (gem_population(lpable) <= 1 &&
2469 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2470 			if ((advert & MII_ABILITY_TECH) != lpable) {
2471 				cmn_err(CE_WARN,
2472 				    "!%s: but the link partnar doesn't seem"
2473 				    " to have auto-negotiation capability."
2474 				    " please check the link configuration.",
2475 				    dp->name);
2476 			}
2477 			/*
2478 			 * it should be result of parallel detection, which
2479 			 * cannot detect duplex mode.
2480 			 */
2481 			if (lpable & MII_ABILITY_100BASE_TX) {
2482 				/*
2483 				 * we prefer full duplex mode for 100Mbps
2484 				 * connection, if we can.
2485 				 */
2486 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2487 			}
2488 
2489 			if ((advert & lpable) == 0 &&
2490 			    lpable & MII_ABILITY_10BASE_T) {
2491 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2492 			}
2493 			/*
2494 			 * as the link partnar isn't auto-negotiatable, use
2495 			 * fixed mode temporally.
2496 			 */
2497 			fix_phy = B_TRUE;
2498 		} else if (lpable == 0) {
2499 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2500 			goto reset_phy;
2501 		}
2502 		/*
2503 		 * configure current link mode according to AN priority.
2504 		 */
2505 		val = advert & lpable;
2506 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2507 		    (stat1000 & MII_1000TS_LP_FULL)) {
2508 			/* 1000BaseT & full duplex */
2509 			dp->speed	 = GEM_SPD_1000;
2510 			dp->full_duplex  = B_TRUE;
2511 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2512 		    (stat1000 & MII_1000TS_LP_HALF)) {
2513 			/* 1000BaseT & half duplex */
2514 			dp->speed = GEM_SPD_1000;
2515 			dp->full_duplex = B_FALSE;
2516 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2517 			/* 100BaseTx & full duplex */
2518 			dp->speed = GEM_SPD_100;
2519 			dp->full_duplex = B_TRUE;
2520 		} else if (val & MII_ABILITY_100BASE_T4) {
2521 			/* 100BaseT4 & full duplex */
2522 			dp->speed = GEM_SPD_100;
2523 			dp->full_duplex = B_TRUE;
2524 		} else if (val & MII_ABILITY_100BASE_TX) {
2525 			/* 100BaseTx & half duplex */
2526 			dp->speed	 = GEM_SPD_100;
2527 			dp->full_duplex  = B_FALSE;
2528 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2529 			/* 10BaseT & full duplex */
2530 			dp->speed	 = GEM_SPD_10;
2531 			dp->full_duplex  = B_TRUE;
2532 		} else if (val & MII_ABILITY_10BASE_T) {
2533 			/* 10BaseT & half duplex */
2534 			dp->speed	 = GEM_SPD_10;
2535 			dp->full_duplex  = B_FALSE;
2536 		} else {
2537 			/*
2538 			 * It seems that the link partnar doesn't have
2539 			 * auto-negotiation capability and our PHY
2540 			 * could not report the correct current mode.
2541 			 * We guess current mode by mii_control register.
2542 			 */
2543 			val = gem_mii_read(dp, MII_CONTROL);
2544 
2545 			/* select 100m full or 10m half */
2546 			dp->speed = (val & MII_CONTROL_100MB) ?
2547 			    GEM_SPD_100 : GEM_SPD_10;
2548 			dp->full_duplex = dp->speed != GEM_SPD_10;
2549 			fix_phy = B_TRUE;
2550 
2551 			cmn_err(CE_NOTE,
2552 			    "!%s: auto-negotiation done but "
2553 			    "common ability not found.\n"
2554 			    "PHY state: control:%b advert:%b lpable:%b\n"
2555 			    "guessing %d Mbps %s duplex mode",
2556 			    dp->name,
2557 			    val, MII_CONTROL_BITS,
2558 			    advert, MII_ABILITY_BITS,
2559 			    lpable, MII_ABILITY_BITS,
2560 			    gem_speed_value[dp->speed],
2561 			    dp->full_duplex ? "full" : "half");
2562 		}
2563 
2564 		if (dp->full_duplex) {
2565 			dp->flow_control =
2566 			    gem_fc_result[fc_cap_decode(advert)]
2567 			    [fc_cap_decode(lpable)];
2568 		} else {
2569 			dp->flow_control = FLOW_CONTROL_NONE;
2570 		}
2571 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2572 		/* FALLTHROUGH */
2573 
2574 	case MII_STATE_MEDIA_SETUP:
2575 		dp->mii_state = MII_STATE_LINKDOWN;
2576 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2577 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2578 		dp->mii_supress_msg = B_FALSE;
2579 
2580 		/* use short interval */
2581 		dp->mii_interval = WATCH_INTERVAL_FAST;
2582 
2583 		if ((!dp->anadv_autoneg) ||
2584 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2585 
2586 			/*
2587 			 * write specified mode to phy.
2588 			 */
2589 			val = gem_mii_read(dp, MII_CONTROL);
2590 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2591 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2592 
2593 			if (dp->full_duplex) {
2594 				val |= MII_CONTROL_FDUPLEX;
2595 			}
2596 
2597 			switch (dp->speed) {
2598 			case GEM_SPD_1000:
2599 				val |= MII_CONTROL_1000MB;
2600 				break;
2601 
2602 			case GEM_SPD_100:
2603 				val |= MII_CONTROL_100MB;
2604 				break;
2605 
2606 			default:
2607 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2608 				    dp->name, dp->speed);
2609 				/* FALLTHROUGH */
2610 			case GEM_SPD_10:
2611 				/* for GEM_SPD_10, do nothing */
2612 				break;
2613 			}
2614 
2615 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2616 				gem_mii_write(dp,
2617 				    MII_1000TC, MII_1000TC_CFG_EN);
2618 			}
2619 			gem_mii_write(dp, MII_CONTROL, val);
2620 		}
2621 
2622 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2623 			/* notify the result of auto-negotiation to mac */
2624 			(*dp->gc.gc_set_media)(dp);
2625 		}
2626 
2627 		if ((void *)dp->gc.gc_mii_tune_phy) {
2628 			/* for built-in sis900 */
2629 			/* XXX - this code should be removed.  */
2630 			(*dp->gc.gc_mii_tune_phy)(dp);
2631 		}
2632 
2633 		goto next_nowait;
2634 
2635 	case MII_STATE_LINKDOWN:
2636 		status = gem_mii_read(dp, MII_STATUS);
2637 		if (status & MII_STATUS_LINKUP) {
2638 			/*
2639 			 * Link going up
2640 			 */
2641 			dp->mii_state = MII_STATE_LINKUP;
2642 			dp->mii_supress_msg = B_FALSE;
2643 
2644 			DPRINTF(0, (CE_CONT,
2645 			    "!%s: link up detected: mii_stat:%b",
2646 			    dp->name, status, MII_STATUS_BITS));
2647 
2648 			/*
2649 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2650 			 * ignored when MII_CONTROL_ANE is set.
2651 			 */
2652 			cmn_err(CE_CONT,
2653 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2654 			    dp->name,
2655 			    gem_speed_value[dp->speed],
2656 			    dp->full_duplex ? "full" : "half",
2657 			    gem_fc_type[dp->flow_control]);
2658 
2659 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2660 
2661 			/* XXX - we need other timer to watch statictics */
2662 			if (dp->gc.gc_mii_hw_link_detection &&
2663 			    dp->nic_state == NIC_STATE_ONLINE) {
2664 				dp->mii_interval = 0;
2665 			}
2666 
2667 			if (dp->nic_state == NIC_STATE_ONLINE) {
2668 				if (!dp->mac_active) {
2669 					(void) gem_mac_start(dp);
2670 				}
2671 				tx_sched = B_TRUE;
2672 			}
2673 			goto next;
2674 		}
2675 
2676 		dp->mii_supress_msg = B_TRUE;
2677 		if (dp->anadv_autoneg) {
2678 			dp->mii_timer -= diff;
2679 			if (dp->mii_timer <= 0) {
2680 				/*
2681 				 * link down timer expired.
2682 				 * need to restart auto-negotiation.
2683 				 */
2684 				linkdown_action =
2685 				    dp->gc.gc_mii_linkdown_timeout_action;
2686 				goto restart_autonego;
2687 			}
2688 		}
2689 		/* don't change mii_state */
2690 		break;
2691 
2692 	case MII_STATE_LINKUP:
2693 		status = gem_mii_read(dp, MII_STATUS);
2694 		if ((status & MII_STATUS_LINKUP) == 0) {
2695 			/*
2696 			 * Link going down
2697 			 */
2698 			cmn_err(CE_NOTE,
2699 			    "!%s: link down detected: mii_stat:%b",
2700 			    dp->name, status, MII_STATUS_BITS);
2701 
2702 			if (dp->nic_state == NIC_STATE_ONLINE &&
2703 			    dp->mac_active &&
2704 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2705 				(void) gem_mac_stop(dp, 0);
2706 
2707 				if (dp->tx_blocked) {
2708 					/* drain tx */
2709 					tx_sched = B_TRUE;
2710 				}
2711 			}
2712 
2713 			if (dp->anadv_autoneg) {
2714 				/* need to restart auto-negotiation */
2715 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2716 				goto restart_autonego;
2717 			}
2718 
2719 			dp->mii_state = MII_STATE_LINKDOWN;
2720 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2721 
2722 			if ((void *)dp->gc.gc_mii_tune_phy) {
2723 				/* for built-in sis900 */
2724 				(*dp->gc.gc_mii_tune_phy)(dp);
2725 			}
2726 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2727 			goto next;
2728 		}
2729 
2730 		/* don't change mii_state */
2731 		if (dp->gc.gc_mii_hw_link_detection &&
2732 		    dp->nic_state == NIC_STATE_ONLINE) {
2733 			dp->mii_interval = 0;
2734 			goto next;
2735 		}
2736 		break;
2737 	}
2738 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2739 	goto next;
2740 
2741 	/* Actions on the end of state routine */
2742 
2743 restart_autonego:
2744 	switch (linkdown_action) {
2745 	case MII_ACTION_RESET:
2746 		if (!dp->mii_supress_msg) {
2747 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2748 		}
2749 		dp->mii_supress_msg = B_TRUE;
2750 		goto reset_phy;
2751 
2752 	case MII_ACTION_NONE:
2753 		dp->mii_supress_msg = B_TRUE;
2754 		if (dp->gc.gc_mii_an_oneshot) {
2755 			goto autonego;
2756 		}
2757 		/* PHY will restart autonego automatically */
2758 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2759 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2760 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2761 		goto next;
2762 
2763 	case MII_ACTION_RSA:
2764 		if (!dp->mii_supress_msg) {
2765 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2766 			    dp->name);
2767 		}
2768 		dp->mii_supress_msg = B_TRUE;
2769 		goto autonego;
2770 
2771 	default:
2772 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2773 		    dp->name, dp->gc.gc_mii_linkdown_action);
2774 		dp->mii_supress_msg = B_TRUE;
2775 	}
2776 	/* NOTREACHED */
2777 
2778 reset_phy:
2779 	if (!dp->mii_supress_msg) {
2780 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2781 	}
2782 	dp->mii_state = MII_STATE_RESETTING;
2783 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2784 	if (!dp->gc.gc_mii_dont_reset) {
2785 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2786 	}
2787 	dp->mii_interval = WATCH_INTERVAL_FAST;
2788 	goto next;
2789 
2790 autonego:
2791 	if (!dp->mii_supress_msg) {
2792 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2793 	}
2794 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2795 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2796 
2797 	/* start/restart auto nego */
2798 	val = gem_mii_read(dp, MII_CONTROL) &
2799 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2800 
2801 	gem_mii_write(dp, MII_CONTROL,
2802 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2803 
2804 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2805 
2806 next:
2807 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2808 		/* we must schedule next mii_watcher */
2809 		dp->link_watcher_id =
2810 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2811 		    (void *)dp, dp->mii_interval);
2812 	}
2813 
2814 	if (old_mii_state != dp->mii_state) {
2815 		/* notify new mii link state */
2816 		if (dp->mii_state == MII_STATE_LINKUP) {
2817 			dp->linkup_delay = 0;
2818 			GEM_LINKUP(dp);
2819 		} else if (dp->linkup_delay <= 0) {
2820 			GEM_LINKDOWN(dp);
2821 		}
2822 	} else if (dp->linkup_delay < 0) {
2823 		/* first linkup timeout */
2824 		dp->linkup_delay = 0;
2825 		GEM_LINKDOWN(dp);
2826 	}
2827 
2828 	return (tx_sched);
2829 }
2830 
2831 static void
gem_mii_link_watcher(struct gem_dev * dp)2832 gem_mii_link_watcher(struct gem_dev *dp)
2833 {
2834 	boolean_t	tx_sched;
2835 
2836 	mutex_enter(&dp->intrlock);
2837 
2838 	dp->link_watcher_id = 0;
2839 	tx_sched = gem_mii_link_check(dp);
2840 #if GEM_DEBUG_LEVEL > 2
2841 	if (dp->link_watcher_id == 0) {
2842 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2843 	}
2844 #endif
2845 	mutex_exit(&dp->intrlock);
2846 
2847 	if (tx_sched) {
2848 		/* kick potentially stopped downstream */
2849 		mac_tx_update(dp->mh);
2850 	}
2851 }
2852 
2853 int
gem_mii_probe_default(struct gem_dev * dp)2854 gem_mii_probe_default(struct gem_dev *dp)
2855 {
2856 	int8_t		phy;
2857 	uint16_t	status;
2858 	uint16_t	adv;
2859 	uint16_t	adv_org;
2860 
2861 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2862 
2863 	/*
2864 	 * Scan PHY
2865 	 */
2866 	/* ensure to send sync bits */
2867 	dp->mii_status = 0;
2868 
2869 	/* Try default phy first */
2870 	if (dp->mii_phy_addr) {
2871 		status = gem_mii_read(dp, MII_STATUS);
2872 		if (status != 0xffff && status != 0) {
2873 			gem_mii_write(dp, MII_CONTROL, 0);
2874 			goto PHY_found;
2875 		}
2876 
2877 		if (dp->mii_phy_addr < 0) {
2878 			cmn_err(CE_NOTE,
2879 	    "!%s: failed to probe default internal and/or non-MII PHY",
2880 			    dp->name);
2881 			return (GEM_FAILURE);
2882 		}
2883 
2884 		cmn_err(CE_NOTE,
2885 		    "!%s: failed to probe default MII PHY at %d",
2886 		    dp->name, dp->mii_phy_addr);
2887 	}
2888 
2889 	/* Try all possible address */
2890 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2891 		dp->mii_phy_addr = phy;
2892 		status = gem_mii_read(dp, MII_STATUS);
2893 
2894 		if (status != 0xffff && status != 0) {
2895 			gem_mii_write(dp, MII_CONTROL, 0);
2896 			goto PHY_found;
2897 		}
2898 	}
2899 
2900 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2901 		dp->mii_phy_addr = phy;
2902 		gem_mii_write(dp, MII_CONTROL, 0);
2903 		status = gem_mii_read(dp, MII_STATUS);
2904 
2905 		if (status != 0xffff && status != 0) {
2906 			goto PHY_found;
2907 		}
2908 	}
2909 
2910 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2911 	dp->mii_phy_addr = -1;
2912 
2913 	return (GEM_FAILURE);
2914 
2915 PHY_found:
2916 	dp->mii_status = status;
2917 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2918 	    gem_mii_read(dp, MII_PHYIDL);
2919 
2920 	if (dp->mii_phy_addr < 0) {
2921 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2922 		    dp->name, dp->mii_phy_id);
2923 	} else {
2924 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2925 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2926 	}
2927 
2928 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2929 	    dp->name,
2930 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2931 	    status, MII_STATUS_BITS,
2932 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2933 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2934 
2935 	dp->mii_xstatus = 0;
2936 	if (status & MII_STATUS_XSTATUS) {
2937 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2938 
2939 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2940 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2941 	}
2942 
2943 	/* check if the phy can advertize pause abilities */
2944 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2945 
2946 	gem_mii_write(dp, MII_AN_ADVERT,
2947 	    MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2948 
2949 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2950 
2951 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2952 		dp->gc.gc_flow_control &= ~1;
2953 	}
2954 
2955 	if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2956 		dp->gc.gc_flow_control &= ~2;
2957 	}
2958 
2959 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2960 
2961 	return (GEM_SUCCESS);
2962 }
2963 
2964 static void
gem_mii_start(struct gem_dev * dp)2965 gem_mii_start(struct gem_dev *dp)
2966 {
2967 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2968 
2969 	/* make a first call of check link */
2970 	dp->mii_state = MII_STATE_UNKNOWN;
2971 	dp->mii_last_check = ddi_get_lbolt();
2972 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2973 	(void) gem_mii_link_watcher(dp);
2974 }
2975 
2976 static void
gem_mii_stop(struct gem_dev * dp)2977 gem_mii_stop(struct gem_dev *dp)
2978 {
2979 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2980 
2981 	/* Ensure timer routine stopped */
2982 	mutex_enter(&dp->intrlock);
2983 	if (dp->link_watcher_id) {
2984 		while (untimeout(dp->link_watcher_id) == -1)
2985 			;
2986 		dp->link_watcher_id = 0;
2987 	}
2988 	mutex_exit(&dp->intrlock);
2989 }
2990 
2991 boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2992 gem_get_mac_addr_conf(struct gem_dev *dp)
2993 {
2994 	char		propname[32];
2995 	char		*valstr;
2996 	uint8_t		mac[ETHERADDRL];
2997 	char		*cp;
2998 	int		c;
2999 	int		i;
3000 	int		j;
3001 	uint8_t		v;
3002 	uint8_t		d;
3003 	uint8_t		ored;
3004 
3005 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3006 	/*
3007 	 * Get ethernet address from .conf file
3008 	 */
3009 	(void) sprintf(propname, "mac-addr");
3010 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3011 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3012 	    DDI_PROP_SUCCESS) {
3013 		return (B_FALSE);
3014 	}
3015 
3016 	if (strlen(valstr) != ETHERADDRL*3-1) {
3017 		goto syntax_err;
3018 	}
3019 
3020 	cp = valstr;
3021 	j  = 0;
3022 	ored = 0;
3023 	for (;;) {
3024 		v = 0;
3025 		for (i = 0; i < 2; i++) {
3026 			c = *cp++;
3027 
3028 			if (c >= 'a' && c <= 'f') {
3029 				d = c - 'a' + 10;
3030 			} else if (c >= 'A' && c <= 'F') {
3031 				d = c - 'A' + 10;
3032 			} else if (c >= '0' && c <= '9') {
3033 				d = c - '0';
3034 			} else {
3035 				goto syntax_err;
3036 			}
3037 			v = (v << 4) | d;
3038 		}
3039 
3040 		mac[j++] = v;
3041 		ored |= v;
3042 		if (j == ETHERADDRL) {
3043 			/* done */
3044 			break;
3045 		}
3046 
3047 		c = *cp++;
3048 		if (c != ':') {
3049 			goto syntax_err;
3050 		}
3051 	}
3052 
3053 	if (ored == 0) {
3054 		goto err;
3055 	}
3056 	for (i = 0; i < ETHERADDRL; i++) {
3057 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3058 	}
3059 	ddi_prop_free(valstr);
3060 	return (B_TRUE);
3061 
3062 syntax_err:
3063 	cmn_err(CE_CONT,
3064 	    "!%s: read mac addr: trying .conf: syntax err %s",
3065 	    dp->name, valstr);
3066 err:
3067 	ddi_prop_free(valstr);
3068 
3069 	return (B_FALSE);
3070 }
3071 
3072 
3073 /* ============================================================== */
3074 /*
3075  * internal start/stop interface
3076  */
3077 /* ============================================================== */
3078 static int
gem_mac_set_rx_filter(struct gem_dev * dp)3079 gem_mac_set_rx_filter(struct gem_dev *dp)
3080 {
3081 	return ((*dp->gc.gc_set_rx_filter)(dp));
3082 }
3083 
3084 /*
3085  * gem_mac_init: cold start
3086  */
3087 static int
gem_mac_init(struct gem_dev * dp)3088 gem_mac_init(struct gem_dev *dp)
3089 {
3090 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3091 
3092 	if (dp->mac_suspended) {
3093 		return (GEM_FAILURE);
3094 	}
3095 
3096 	dp->mac_active = B_FALSE;
3097 
3098 	gem_init_rx_ring(dp);
3099 	gem_init_tx_ring(dp);
3100 
3101 	/* reset transmitter state */
3102 	dp->tx_blocked = (clock_t)0;
3103 	dp->tx_busy = 0;
3104 	dp->tx_reclaim_busy = 0;
3105 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3106 
3107 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3108 		return (GEM_FAILURE);
3109 	}
3110 
3111 	gem_prepare_rx_buf(dp);
3112 
3113 	return (GEM_SUCCESS);
3114 }
3115 /*
3116  * gem_mac_start: warm start
3117  */
3118 static int
gem_mac_start(struct gem_dev * dp)3119 gem_mac_start(struct gem_dev *dp)
3120 {
3121 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3122 
3123 	ASSERT(mutex_owned(&dp->intrlock));
3124 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3125 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3126 
3127 	/* enable tx and rx */
3128 	mutex_enter(&dp->xmitlock);
3129 	if (dp->mac_suspended) {
3130 		mutex_exit(&dp->xmitlock);
3131 		return (GEM_FAILURE);
3132 	}
3133 	dp->mac_active = B_TRUE;
3134 	mutex_exit(&dp->xmitlock);
3135 
3136 	/* setup rx buffers */
3137 	(*dp->gc.gc_rx_start)(dp,
3138 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3139 	    dp->rx_active_tail - dp->rx_active_head);
3140 
3141 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3142 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3143 		    dp->name, __func__);
3144 		return (GEM_FAILURE);
3145 	}
3146 
3147 	mutex_enter(&dp->xmitlock);
3148 
3149 	/* load untranmitted packets to the nic */
3150 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3151 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3152 		gem_tx_load_descs_oo(dp,
3153 		    dp->tx_softq_head, dp->tx_softq_tail,
3154 		    GEM_TXFLAG_HEAD);
3155 		/* issue preloaded tx buffers */
3156 		gem_tx_start_unit(dp);
3157 	}
3158 
3159 	mutex_exit(&dp->xmitlock);
3160 
3161 	return (GEM_SUCCESS);
3162 }
3163 
3164 static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3165 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3166 {
3167 	int		i;
3168 	int		wait_time; /* in uS */
3169 #ifdef GEM_DEBUG_LEVEL
3170 	clock_t		now;
3171 #endif
3172 	int		ret = GEM_SUCCESS;
3173 
3174 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3175 	    dp->name, __func__, dp->rx_buf_freecnt));
3176 
3177 	ASSERT(mutex_owned(&dp->intrlock));
3178 	ASSERT(!mutex_owned(&dp->xmitlock));
3179 
3180 	/*
3181 	 * Block transmits
3182 	 */
3183 	mutex_enter(&dp->xmitlock);
3184 	if (dp->mac_suspended) {
3185 		mutex_exit(&dp->xmitlock);
3186 		return (GEM_SUCCESS);
3187 	}
3188 	dp->mac_active = B_FALSE;
3189 
3190 	while (dp->tx_busy > 0) {
3191 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3192 	}
3193 	mutex_exit(&dp->xmitlock);
3194 
3195 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3196 		/*
3197 		 * Wait for all tx buffers sent.
3198 		 */
3199 		wait_time =
3200 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3201 		    (dp->tx_active_tail - dp->tx_active_head);
3202 
3203 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3204 		    dp->name, __func__, wait_time));
3205 		i = 0;
3206 #ifdef GEM_DEBUG_LEVEL
3207 		now = ddi_get_lbolt();
3208 #endif
3209 		while (dp->tx_active_tail != dp->tx_active_head) {
3210 			if (i > wait_time) {
3211 				/* timeout */
3212 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3213 				    dp->name, __func__);
3214 				break;
3215 			}
3216 			(void) gem_reclaim_txbuf(dp);
3217 			drv_usecwait(100);
3218 			i += 100;
3219 		}
3220 		DPRINTF(0, (CE_NOTE,
3221 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3222 		    dp->name, __func__, i,
3223 		    10*((int)(ddi_get_lbolt() - now))));
3224 	}
3225 
3226 	/*
3227 	 * Now we can stop the nic safely.
3228 	 */
3229 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3230 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3231