xref: /illumos-gate/usr/src/uts/common/io/gld.c (revision 9b664393)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  * Copyright (c) 2016 by Delphix. All rights reserved.
25  * Copyright 2018 Joyent, Inc.
26  * Copyright 2022 Garrett D'Amore
27  */
28 
29 /*
30  * gld - Generic LAN Driver Version 2, PSARC/1997/382
31  *
32  * This is a utility module that provides generic facilities for
33  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
34  * are handled here.
35  *
36  * It no longer provides compatibility with drivers
37  * implemented according to the GLD v0 documentation published
38  * in 1993. (See PSARC 2003/728)
39  */
40 
41 
42 #include <sys/types.h>
43 #include <sys/errno.h>
44 #include <sys/stropts.h>
45 #include <sys/stream.h>
46 #include <sys/kmem.h>
47 #include <sys/stat.h>
48 #include <sys/modctl.h>
49 #include <sys/kstat.h>
50 #include <sys/debug.h>
51 #include <sys/note.h>
52 #include <sys/sysmacros.h>
53 
54 #include <sys/byteorder.h>
55 #include <sys/strsun.h>
56 #include <sys/strsubr.h>
57 #include <sys/dlpi.h>
58 #include <sys/pattr.h>
59 #include <sys/ethernet.h>
60 #include <sys/ib/clients/ibd/ibd.h>
61 #include <sys/policy.h>
62 #include <sys/atomic.h>
63 
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macros to increment statistics.
72  */
73 
74 /*
75  * Increase kstats. Note this operation is not atomic. It can be used when
76  * GLDM_LOCK_HELD_WRITE(macinfo).
77  */
78 #define	BUMP(stats, vstats, stat, delta)	do {			\
79 	((stats)->stat) += (delta);					\
80 	_NOTE(CONSTANTCONDITION)					\
81 	if ((vstats) != NULL)						\
82 		((struct gld_stats *)(vstats))->stat += (delta);	\
83 	_NOTE(CONSTANTCONDITION)					\
84 } while (0)
85 
86 #define	ATOMIC_BUMP_STAT(stat, delta)	do {			\
87 	_NOTE(CONSTANTCONDITION)				\
88 	if (sizeof ((stat)) == sizeof (uint32_t)) {		\
89 		atomic_add_32((uint32_t *)&(stat), (delta));	\
90 	_NOTE(CONSTANTCONDITION)				\
91 	} else if (sizeof ((stat)) == sizeof (uint64_t)) {	\
92 		atomic_add_64((uint64_t *)&(stat), (delta));	\
93 	}							\
94 	_NOTE(CONSTANTCONDITION)				\
95 } while (0)
96 
97 #define	ATOMIC_BUMP(stats, vstats, stat, delta)	do {			\
98 	ATOMIC_BUMP_STAT((stats)->stat, (delta));			\
99 	_NOTE(CONSTANTCONDITION)					\
100 	if ((vstats) != NULL) {						\
101 		ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat,	\
102 		    (delta));						\
103 	}								\
104 	_NOTE(CONSTANTCONDITION)					\
105 } while (0)
106 
107 #define	UPDATE_STATS(stats, vstats, pktinfo, delta) {			\
108 	if ((pktinfo).isBroadcast) {					\
109 		ATOMIC_BUMP((stats), (vstats),				\
110 		    glds_brdcstxmt, (delta));				\
111 	} else if ((pktinfo).isMulticast) {				\
112 		ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta));	\
113 	}								\
114 	ATOMIC_BUMP((stats), (vstats), glds_bytexmt64,			\
115 	    ((pktinfo).pktLen));					\
116 	ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta));		\
117 }
118 
119 #ifdef GLD_DEBUG
120 int gld_debug = GLDERRS;
121 #endif
122 
123 /* called from gld_register */
124 static int gld_initstats(gld_mac_info_t *);
125 
126 /* called from kstat mechanism, and from wsrv's get_statistics */
127 static int gld_update_kstat(kstat_t *, int);
128 
129 /* statistics for additional vlans */
130 static int gld_init_vlan_stats(gld_vlan_t *);
131 static int gld_update_vlan_kstat(kstat_t *, int);
132 
133 /* called from gld_getinfo */
134 static dev_info_t *gld_finddevinfo(dev_t);
135 
136 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
137 /* also from the source routing stuff for sending RDE protocol packets */
138 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
139 
140 /* called from gld_start to loopback packet(s) in promiscuous mode */
141 static void gld_precv(gld_mac_info_t *, mblk_t *, uint32_t, struct gld_stats *);
142 
143 /* receive group: called from gld_recv and gld_precv* with maclock held */
144 static void gld_sendup(gld_mac_info_t *, pktinfo_t *, mblk_t *,
145     int (*)());
146 static int gld_accept(gld_t *, pktinfo_t *);
147 static int gld_mcmatch(gld_t *, pktinfo_t *);
148 static int gld_multicast(unsigned char *, gld_t *);
149 static int gld_paccept(gld_t *, pktinfo_t *);
150 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
151     void (*)(queue_t *, mblk_t *));
152 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *, boolean_t);
153 
154 /* wsrv group: called from wsrv, single threaded per queue */
155 static int gld_ioctl(queue_t *, mblk_t *);
156 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
157 static int gld_cmds(queue_t *, mblk_t *);
158 static mblk_t *gld_bindack(queue_t *, mblk_t *);
159 static int gld_notify_req(queue_t *, mblk_t *);
160 static int gld_udqos(queue_t *, mblk_t *);
161 static int gld_bind(queue_t *, mblk_t *);
162 static int gld_unbind(queue_t *, mblk_t *);
163 static int gld_inforeq(queue_t *, mblk_t *);
164 static int gld_unitdata(queue_t *, mblk_t *);
165 static int gldattach(queue_t *, mblk_t *);
166 static int gldunattach(queue_t *, mblk_t *);
167 static int gld_enable_multi(queue_t *, mblk_t *);
168 static int gld_disable_multi(queue_t *, mblk_t *);
169 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
170 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
171 static int gld_physaddr(queue_t *, mblk_t *);
172 static int gld_setaddr(queue_t *, mblk_t *);
173 static int gld_get_statistics(queue_t *, mblk_t *);
174 static int gld_cap(queue_t *, mblk_t *);
175 static int gld_cap_ack(queue_t *, mblk_t *);
176 static int gld_cap_enable(queue_t *, mblk_t *);
177 
178 /* misc utilities, some requiring various mutexes held */
179 static int gld_start_mac(gld_mac_info_t *);
180 static void gld_stop_mac(gld_mac_info_t *);
181 static void gld_set_ipq(gld_t *);
182 static void gld_flushqueue(queue_t *);
183 static glddev_t *gld_devlookup(int);
184 static int gld_findminor(glddev_t *);
185 static void gldinsque(void *, void *);
186 static void gldremque(void *);
187 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
188 void gld_bitreverse(uchar_t *, size_t);
189 char *gld_macaddr_sprintf(char *, unsigned char *, int);
190 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
191 static void gld_rem_vlan(gld_vlan_t *);
192 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
193 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
194 
195 #ifdef GLD_DEBUG
196 static void gld_check_assertions(void);
197 extern void gld_sr_dump(gld_mac_info_t *);
198 #endif
199 
200 /*
201  * Allocate and zero-out "number" structures each of type "structure" in
202  * kernel memory.
203  */
204 #define	GLD_GETSTRUCT(structure, number)   \
205 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
206 
207 #define	abs(a) ((a) < 0 ? -(a) : a)
208 
209 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
210 
211 /*
212  * The device is of DL_ETHER type and is able to support VLAN by itself.
213  */
214 #define	VLAN_CAPABLE(macinfo) \
215 	((macinfo)->gldm_type == DL_ETHER && \
216 	(macinfo)->gldm_send_tagged != NULL)
217 
218 /*
219  * The set of notifications generatable by GLD itself, the additional
220  * set that can be generated if the MAC driver provide the link-state
221  * tracking callback capability, and the set supported by the GLD
222  * notification code below.
223  *
224  * PLEASE keep these in sync with what the code actually does!
225  */
226 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
227 						DL_NOTE_PROMISC_OFF_PHYS |
228 						DL_NOTE_PHYS_ADDR;
229 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
230 						DL_NOTE_LINK_UP |
231 						DL_NOTE_SPEED;
232 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
233 						DL_NOTE_PROMISC_OFF_PHYS |
234 						DL_NOTE_PHYS_ADDR |
235 						DL_NOTE_LINK_DOWN |
236 						DL_NOTE_LINK_UP |
237 						DL_NOTE_SPEED;
238 
239 /* Media must correspond to #defines in gld.h */
240 static char *gld_media[] = {
241 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
242 	"aui",		/* GLDM_AUI */
243 	"bnc",		/* GLDM_BNC */
244 	"twpair",	/* GLDM_TP */
245 	"fiber",	/* GLDM_FIBER */
246 	"100baseT",	/* GLDM_100BT */
247 	"100vgAnyLan",	/* GLDM_VGANYLAN */
248 	"10baseT",	/* GLDM_10BT */
249 	"ring4",	/* GLDM_RING4 */
250 	"ring16",	/* GLDM_RING16 */
251 	"PHY/MII",	/* GLDM_PHYMII */
252 	"100baseTX",	/* GLDM_100BTX */
253 	"100baseT4",	/* GLDM_100BT4 */
254 	"unknown",	/* skip */
255 	"ipib",		/* GLDM_IB */
256 };
257 
258 /* Must correspond to #defines in gld.h */
259 static char *gld_duplex[] = {
260 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
261 	"half",		/* GLD_DUPLEX_HALF */
262 	"full"		/* GLD_DUPLEX_FULL */
263 };
264 
265 /*
266  * Interface types currently supported by GLD.
267  * If you add new types, you must check all "XXX" strings in the GLD source
268  * for implementation issues that may affect the support of your new type.
269  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
270  * require generalizing this GLD source to handle the new cases.  In other
271  * words there are assumptions built into the code in a few places that must
272  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
273  */
274 static gld_interface_t interfaces[] = {
275 
276 	/* Ethernet Bus */
277 	{
278 		DL_ETHER,
279 		(uint_t)-1,
280 		sizeof (struct ether_header),
281 		gld_interpret_ether,
282 		gld_fastpath_ether,
283 		gld_unitdata_ether,
284 		gld_init_ether,
285 		gld_uninit_ether,
286 		"ether"
287 	},
288 
289 	/* Fiber Distributed data interface */
290 	{
291 		DL_FDDI,
292 		4352,
293 		sizeof (struct fddi_mac_frm),
294 		gld_interpret_fddi,
295 		gld_fastpath_fddi,
296 		gld_unitdata_fddi,
297 		gld_init_fddi,
298 		gld_uninit_fddi,
299 		"fddi"
300 	},
301 
302 	/* Token Ring interface */
303 	{
304 		DL_TPR,
305 		17914,
306 		-1,			/* variable header size */
307 		gld_interpret_tr,
308 		gld_fastpath_tr,
309 		gld_unitdata_tr,
310 		gld_init_tr,
311 		gld_uninit_tr,
312 		"tpr"
313 	},
314 
315 	/* Infiniband */
316 	{
317 		DL_IB,
318 		4092,
319 		sizeof (struct ipoib_header),
320 		gld_interpret_ib,
321 		gld_fastpath_ib,
322 		gld_unitdata_ib,
323 		gld_init_ib,
324 		gld_uninit_ib,
325 		"ipib"
326 	},
327 };
328 
329 /*
330  * bit reversal lookup table.
331  */
332 static	uchar_t bit_rev[] = {
333 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
334 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
335 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
336 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
337 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
338 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
339 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
340 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
341 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
342 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
343 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
344 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
345 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
346 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
347 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
348 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
349 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
350 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
351 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
352 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
353 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
354 	0x3f, 0xbf, 0x7f, 0xff,
355 };
356 
357 /*
358  * User priorities, mapped from b_band.
359  */
360 static uint32_t user_priority[] = {
361 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
362 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
363 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
364 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
365 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
366 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
367 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
368 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
369 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
370 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
371 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
372 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
373 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
374 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
375 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
376 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
377 };
378 
379 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
380 
381 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
382 
383 /*
384  * Module linkage information for the kernel.
385  */
386 
387 static struct modldrv modlmisc = {
388 	&mod_miscops,		/* Type of module - a utility provider */
389 	"Generic LAN Driver (" GLD_VERSION_STRING ")"
390 #ifdef GLD_DEBUG
391 	" DEBUG"
392 #endif
393 };
394 
395 static struct modlinkage modlinkage = {
396 	MODREV_1, &modlmisc, NULL
397 };
398 
399 int
_init(void)400 _init(void)
401 {
402 	int e;
403 
404 	/* initialize gld_device_list mutex */
405 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
406 
407 	/* initialize device driver (per-major) list */
408 	gld_device_list.gld_next =
409 	    gld_device_list.gld_prev = &gld_device_list;
410 
411 	if ((e = mod_install(&modlinkage)) != 0)
412 		mutex_destroy(&gld_device_list.gld_devlock);
413 
414 	return (e);
415 }
416 
417 int
_fini(void)418 _fini(void)
419 {
420 	int e;
421 
422 	if ((e = mod_remove(&modlinkage)) != 0)
423 		return (e);
424 
425 	ASSERT(gld_device_list.gld_next ==
426 	    (glddev_t *)&gld_device_list.gld_next);
427 	ASSERT(gld_device_list.gld_prev ==
428 	    (glddev_t *)&gld_device_list.gld_next);
429 	mutex_destroy(&gld_device_list.gld_devlock);
430 
431 	return (e);
432 }
433 
434 int
_info(struct modinfo * modinfop)435 _info(struct modinfo *modinfop)
436 {
437 	return (mod_info(&modlinkage, modinfop));
438 }
439 
440 /*
441  * GLD service routines
442  */
443 
444 /* So this gld binary maybe can be forward compatible with future v2 drivers */
445 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
446 
447 /*ARGSUSED*/
448 gld_mac_info_t *
gld_mac_alloc(dev_info_t * devinfo)449 gld_mac_alloc(dev_info_t *devinfo)
450 {
451 	gld_mac_info_t *macinfo;
452 
453 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
454 	    KM_SLEEP);
455 
456 	/*
457 	 * The setting of gldm_driver_version will not be documented or allowed
458 	 * until a future release.
459 	 */
460 	macinfo->gldm_driver_version = GLD_VERSION_200;
461 
462 	/*
463 	 * GLD's version.  This also is undocumented for now, but will be
464 	 * available if needed in the future.
465 	 */
466 	macinfo->gldm_GLD_version = GLD_VERSION;
467 
468 	return (macinfo);
469 }
470 
471 /*
472  * gld_mac_free must be called after the driver has removed interrupts
473  * and completely stopped calling gld_recv() and gld_sched().  At that
474  * point the interrupt routine is guaranteed by the system to have been
475  * exited and the maclock is no longer needed.  Of course, it is
476  * expected (required) that (assuming gld_register() succeeded),
477  * gld_unregister() was called before gld_mac_free().
478  */
479 void
gld_mac_free(gld_mac_info_t * macinfo)480 gld_mac_free(gld_mac_info_t *macinfo)
481 {
482 	ASSERT(macinfo);
483 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
484 
485 	/*
486 	 * Assert that if we made it through gld_register, then we must
487 	 * have unregistered.
488 	 */
489 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
490 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
491 
492 	GLDM_LOCK_DESTROY(macinfo);
493 
494 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
495 }
496 
497 /*
498  * gld_register -- called once per device instance (PPA)
499  *
500  * During its attach routine, a real device driver will register with GLD
501  * so that later opens and dl_attach_reqs will work.  The arguments are the
502  * devinfo pointer, the device name, and a macinfo structure describing the
503  * physical device instance.
504  */
505 int
gld_register(dev_info_t * devinfo,char * devname,gld_mac_info_t * macinfo)506 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
507 {
508 	int mediatype;
509 	int major = ddi_name_to_major(devname), i;
510 	glddev_t *glddev;
511 	gld_mac_pvt_t *mac_pvt;
512 	char minordev[32];
513 	char pbuf[3*GLD_MAX_ADDRLEN];
514 	gld_interface_t *ifp;
515 
516 	ASSERT(devinfo != NULL);
517 	ASSERT(macinfo != NULL);
518 
519 	if (macinfo->gldm_driver_version != GLD_VERSION)
520 		return (DDI_FAILURE);
521 
522 	mediatype = macinfo->gldm_type;
523 
524 	/*
525 	 * Entry points should be ready for us.
526 	 * ioctl is optional.
527 	 * set_multicast and get_stats are optional in v0.
528 	 * intr is only required if you add an interrupt.
529 	 */
530 	ASSERT(macinfo->gldm_reset != NULL);
531 	ASSERT(macinfo->gldm_start != NULL);
532 	ASSERT(macinfo->gldm_stop != NULL);
533 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
534 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
535 	ASSERT(macinfo->gldm_send != NULL);
536 
537 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
538 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
539 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
540 	ASSERT(macinfo->gldm_vendor_addr != NULL);
541 	ASSERT(macinfo->gldm_ident != NULL);
542 
543 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
544 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
545 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
546 		return (DDI_FAILURE);
547 	}
548 
549 	/*
550 	 * GLD only functions properly with saplen == -2
551 	 */
552 	if (macinfo->gldm_saplen != -2) {
553 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
554 		    "not supported", devname, macinfo->gldm_saplen);
555 		return (DDI_FAILURE);
556 	}
557 
558 	/* see gld_rsrv() */
559 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
560 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
561 
562 	mutex_enter(&gld_device_list.gld_devlock);
563 	glddev = gld_devlookup(major);
564 
565 	/*
566 	 *  Allocate per-driver (major) data structure if necessary
567 	 */
568 	if (glddev == NULL) {
569 		/* first occurrence of this device name (major number) */
570 		glddev = GLD_GETSTRUCT(glddev_t, 1);
571 		if (glddev == NULL) {
572 			mutex_exit(&gld_device_list.gld_devlock);
573 			return (DDI_FAILURE);
574 		}
575 		(void) strncpy(glddev->gld_name, devname,
576 		    sizeof (glddev->gld_name) - 1);
577 		glddev->gld_major = major;
578 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
579 		glddev->gld_mac_next = glddev->gld_mac_prev =
580 		    (gld_mac_info_t *)&glddev->gld_mac_next;
581 		glddev->gld_str_next = glddev->gld_str_prev =
582 		    (gld_t *)&glddev->gld_str_next;
583 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
584 
585 		/* allow increase of number of supported multicast addrs */
586 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
587 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
588 
589 		/*
590 		 * Optionally restrict DLPI provider style
591 		 *
592 		 * -1 - don't create style 1 nodes
593 		 * -2 - don't create style 2 nodes
594 		 */
595 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
596 		    "gld-provider-styles", 0);
597 
598 		/* Stuff that's needed before any PPA gets attached */
599 		glddev->gld_type = macinfo->gldm_type;
600 		glddev->gld_minsdu = macinfo->gldm_minpkt;
601 		glddev->gld_saplen = macinfo->gldm_saplen;
602 		glddev->gld_addrlen = macinfo->gldm_addrlen;
603 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
604 		    KM_SLEEP);
605 		bcopy(macinfo->gldm_broadcast_addr,
606 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
607 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
608 		gldinsque(glddev, gld_device_list.gld_prev);
609 	}
610 	glddev->gld_ndevice++;
611 	/* Now glddev can't go away until we unregister this mac (or fail) */
612 	mutex_exit(&gld_device_list.gld_devlock);
613 
614 	/*
615 	 *  Per-instance initialization
616 	 */
617 
618 	/*
619 	 * Initialize per-mac structure that is private to GLD.
620 	 * Set up interface pointer. These are device class specific pointers
621 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
622 	 */
623 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
624 		if (mediatype != interfaces[i].mac_type)
625 			continue;
626 
627 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
628 		    KM_SLEEP);
629 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
630 		    &interfaces[i];
631 		break;
632 	}
633 
634 	if (ifp == NULL) {
635 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
636 		    "of type %d", devname, mediatype);
637 		goto failure;
638 	}
639 
640 	/*
641 	 * Driver can only register MTU within legal media range.
642 	 */
643 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
644 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
645 		    devname);
646 		goto failure;
647 	}
648 
649 	/*
650 	 * Correct margin size if it is not set.
651 	 */
652 	if (VLAN_CAPABLE(macinfo) && (macinfo->gldm_margin == 0))
653 		macinfo->gldm_margin = VTAG_SIZE;
654 
655 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
656 	mac_pvt->major_dev = glddev;
657 
658 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
659 	/*
660 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
661 	 * format or in wire format?  Also gldm_broadcast.  For now
662 	 * we are assuming canonical, but I'm not sure that makes the
663 	 * most sense for ease of driver implementation.
664 	 */
665 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
666 	    macinfo->gldm_addrlen);
667 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
668 
669 	/*
670 	 * The available set of notifications is those generatable by GLD
671 	 * itself, plus those corresponding to the capabilities of the MAC
672 	 * driver, intersected with those supported by gld_notify_ind() above.
673 	 */
674 	mac_pvt->notifications = gld_internal_notes;
675 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
676 		mac_pvt->notifications |= gld_linkstate_notes;
677 	mac_pvt->notifications &= gld_supported_notes;
678 
679 	GLDM_LOCK_INIT(macinfo);
680 
681 	ddi_set_driver_private(devinfo, macinfo);
682 
683 	/*
684 	 * Now atomically get a PPA and put ourselves on the mac list.
685 	 */
686 	mutex_enter(&glddev->gld_devlock);
687 
688 #ifdef DEBUG
689 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
690 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
691 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
692 		    macinfo->gldm_ppa);
693 #endif
694 
695 	/*
696 	 * Create style 2 node (gated by gld-provider-styles property).
697 	 *
698 	 * NOTE: When the CLONE_DEV flag is specified to
699 	 *	 ddi_create_minor_node() the minor number argument is
700 	 *	 immaterial. Opens of that node will go via the clone
701 	 *	 driver and gld_open() will always be passed a dev_t with
702 	 *	 minor of zero.
703 	 */
704 	if (glddev->gld_styles != -2) {
705 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
706 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
707 			mutex_exit(&glddev->gld_devlock);
708 			goto late_failure;
709 		}
710 	}
711 
712 	/*
713 	 * Create style 1 node (gated by gld-provider-styles property)
714 	 */
715 	if (glddev->gld_styles != -1) {
716 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
717 		    macinfo->gldm_ppa);
718 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
719 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
720 		    0) != DDI_SUCCESS) {
721 			mutex_exit(&glddev->gld_devlock);
722 			goto late_failure;
723 		}
724 	}
725 
726 	/* add ourselves to this major device's linked list of instances */
727 	gldinsque(macinfo, glddev->gld_mac_prev);
728 
729 	mutex_exit(&glddev->gld_devlock);
730 
731 	/*
732 	 * Unfortunately we need the ppa before we call gld_initstats();
733 	 * otherwise we would like to do this just above the mutex_enter
734 	 * above.  In which case we could have set MAC_READY inside the
735 	 * mutex and we wouldn't have needed to check it in open and
736 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
737 	 * inside the mutex because it might get taken in our kstat_update
738 	 * routine and cause a deadlock with kstat_chain_lock.
739 	 */
740 
741 	/* gld_initstats() calls (*ifp->init)() */
742 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
743 		mutex_enter(&glddev->gld_devlock);
744 		gldremque(macinfo);
745 		mutex_exit(&glddev->gld_devlock);
746 		goto late_failure;
747 	}
748 
749 	/*
750 	 * Need to indicate we are NOW ready to process interrupts;
751 	 * any interrupt before this is set is for someone else.
752 	 * This flag is also now used to tell open, et. al. that this
753 	 * mac is now fully ready and available for use.
754 	 */
755 	GLDM_LOCK(macinfo, RW_WRITER);
756 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
757 	GLDM_UNLOCK(macinfo);
758 
759 	/* log local ethernet address -- XXX not DDI compliant */
760 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
761 		(void) localetheraddr(
762 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
763 
764 	/* now put announcement into the message buffer */
765 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
766 	    glddev->gld_name,
767 	    macinfo->gldm_ppa, macinfo->gldm_ident,
768 	    mac_pvt->interfacep->mac_string,
769 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
770 	    macinfo->gldm_addrlen));
771 
772 	ddi_report_dev(devinfo);
773 	return (DDI_SUCCESS);
774 
775 late_failure:
776 	ddi_remove_minor_node(devinfo, NULL);
777 	GLDM_LOCK_DESTROY(macinfo);
778 	if (mac_pvt->curr_macaddr != NULL)
779 		kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
780 	if (mac_pvt->statistics != NULL)
781 		kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
782 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
783 	macinfo->gldm_mac_pvt = NULL;
784 
785 failure:
786 	mutex_enter(&gld_device_list.gld_devlock);
787 	glddev->gld_ndevice--;
788 	/*
789 	 * Note that just because this goes to zero here does not necessarily
790 	 * mean that we were the one who added the glddev above.  It's
791 	 * possible that the first mac unattached while were were in here
792 	 * failing to attach the second mac.  But we're now the last.
793 	 */
794 	if (glddev->gld_ndevice == 0) {
795 		/* There should be no macinfos left */
796 		ASSERT(glddev->gld_mac_next ==
797 		    (gld_mac_info_t *)&glddev->gld_mac_next);
798 		ASSERT(glddev->gld_mac_prev ==
799 		    (gld_mac_info_t *)&glddev->gld_mac_next);
800 
801 		/*
802 		 * There should be no DL_UNATTACHED streams: the system
803 		 * should not have detached the "first" devinfo which has
804 		 * all the open style 2 streams.
805 		 *
806 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
807 		 */
808 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
809 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
810 
811 		gldremque(glddev);
812 		mutex_destroy(&glddev->gld_devlock);
813 		if (glddev->gld_broadcast != NULL)
814 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
815 		kmem_free(glddev, sizeof (glddev_t));
816 	}
817 	mutex_exit(&gld_device_list.gld_devlock);
818 
819 	return (DDI_FAILURE);
820 }
821 
822 /*
823  * gld_unregister (macinfo)
824  * remove the macinfo structure from local structures
825  * this is cleanup for a driver to be unloaded
826  */
827 int
gld_unregister(gld_mac_info_t * macinfo)828 gld_unregister(gld_mac_info_t *macinfo)
829 {
830 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
831 	glddev_t *glddev = mac_pvt->major_dev;
832 	gld_interface_t *ifp;
833 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
834 
835 	mutex_enter(&glddev->gld_devlock);
836 	GLDM_LOCK(macinfo, RW_WRITER);
837 
838 	if (mac_pvt->nvlan > 0) {
839 		GLDM_UNLOCK(macinfo);
840 		mutex_exit(&glddev->gld_devlock);
841 		return (DDI_FAILURE);
842 	}
843 
844 #ifdef	GLD_DEBUG
845 	{
846 		int i;
847 
848 		for (i = 0; i < VLAN_HASHSZ; i++) {
849 			if ((mac_pvt->vlan_hash[i] != NULL))
850 				cmn_err(CE_PANIC,
851 				    "%s, line %d: "
852 				    "mac_pvt->vlan_hash[%d] != NULL",
853 				    __FILE__, __LINE__, i);
854 		}
855 	}
856 #endif
857 
858 	/* Delete this mac */
859 	gldremque(macinfo);
860 
861 	/* Disallow further entries to gld_recv() and gld_sched() */
862 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
863 
864 	GLDM_UNLOCK(macinfo);
865 	mutex_exit(&glddev->gld_devlock);
866 
867 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
868 	(*ifp->uninit)(macinfo);
869 
870 	ASSERT(mac_pvt->kstatp);
871 	kstat_delete(mac_pvt->kstatp);
872 
873 	ASSERT(GLDM_LOCK_INITED(macinfo));
874 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
875 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
876 
877 	if (mac_pvt->mcast_table != NULL)
878 		kmem_free(mac_pvt->mcast_table, multisize);
879 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
880 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
881 
882 	/* We now have one fewer instance for this major device */
883 	mutex_enter(&gld_device_list.gld_devlock);
884 	glddev->gld_ndevice--;
885 	if (glddev->gld_ndevice == 0) {
886 		/* There should be no macinfos left */
887 		ASSERT(glddev->gld_mac_next ==
888 		    (gld_mac_info_t *)&glddev->gld_mac_next);
889 		ASSERT(glddev->gld_mac_prev ==
890 		    (gld_mac_info_t *)&glddev->gld_mac_next);
891 
892 		/*
893 		 * There should be no DL_UNATTACHED streams: the system
894 		 * should not have detached the "first" devinfo which has
895 		 * all the open style 2 streams.
896 		 *
897 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
898 		 */
899 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
900 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
901 
902 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
903 		gldremque(glddev);
904 		mutex_destroy(&glddev->gld_devlock);
905 		if (glddev->gld_broadcast != NULL)
906 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
907 		kmem_free(glddev, sizeof (glddev_t));
908 	}
909 	mutex_exit(&gld_device_list.gld_devlock);
910 
911 	return (DDI_SUCCESS);
912 }
913 
914 /*
915  * gld_initstats
916  * called from gld_register
917  */
918 static int
gld_initstats(gld_mac_info_t * macinfo)919 gld_initstats(gld_mac_info_t *macinfo)
920 {
921 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
922 	struct gldkstats *sp;
923 	glddev_t *glddev;
924 	kstat_t *ksp;
925 	gld_interface_t *ifp;
926 
927 	glddev = mac_pvt->major_dev;
928 
929 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
930 	    NULL, "net", KSTAT_TYPE_NAMED,
931 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
932 		cmn_err(CE_WARN,
933 		    "GLD: failed to create kstat structure for %s%d",
934 		    glddev->gld_name, macinfo->gldm_ppa);
935 		return (GLD_FAILURE);
936 	}
937 	mac_pvt->kstatp = ksp;
938 
939 	ksp->ks_update = gld_update_kstat;
940 	ksp->ks_private = (void *)macinfo;
941 
942 	sp = ksp->ks_data;
943 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
944 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
945 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
946 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
947 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
948 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
949 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
950 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
951 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
952 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
953 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
954 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
955 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
956 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
957 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
958 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
959 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
960 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
961 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
962 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
963 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
964 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
965 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
966 
967 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
968 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
969 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
970 
971 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
972 	    KSTAT_DATA_UINT32);
973 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
974 	    KSTAT_DATA_UINT32);
975 
976 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
977 
978 	(*ifp->init)(macinfo);
979 
980 	kstat_install(ksp);
981 
982 	return (GLD_SUCCESS);
983 }
984 
985 /* called from kstat mechanism, and from wsrv's get_statistics_req */
986 static int
gld_update_kstat(kstat_t * ksp,int rw)987 gld_update_kstat(kstat_t *ksp, int rw)
988 {
989 	gld_mac_info_t	*macinfo;
990 	gld_mac_pvt_t	*mac_pvt;
991 	struct gldkstats *gsp;
992 	struct gld_stats *stats;
993 
994 	if (rw == KSTAT_WRITE)
995 		return (EACCES);
996 
997 	macinfo = (gld_mac_info_t *)ksp->ks_private;
998 	ASSERT(macinfo != NULL);
999 
1000 	GLDM_LOCK(macinfo, RW_WRITER);
1001 
1002 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1003 		GLDM_UNLOCK(macinfo);
1004 		return (EIO);	/* this one's not ready yet */
1005 	}
1006 
1007 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1008 		GLDM_UNLOCK(macinfo);
1009 		return (EIO);	/* this one's not ready any more */
1010 	}
1011 
1012 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1013 	gsp = mac_pvt->kstatp->ks_data;
1014 	ASSERT(gsp);
1015 	stats = mac_pvt->statistics;
1016 
1017 	if (macinfo->gldm_get_stats)
1018 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1019 
1020 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1021 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1022 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1023 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1024 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1025 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1026 
1027 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1028 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1029 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1030 
1031 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1032 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1033 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1034 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1035 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1036 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1037 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1038 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1039 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1040 	gsp->glds_missed.value.ul = stats->glds_missed;
1041 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1042 	    stats->glds_gldnorcvbuf;
1043 	gsp->glds_intr.value.ul = stats->glds_intr;
1044 
1045 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1046 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1047 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1048 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1049 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1050 
1051 	if (mac_pvt->nprom)
1052 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1053 	else if (mac_pvt->nprom_multi)
1054 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1055 	else
1056 		(void) strcpy(gsp->glds_prom.value.c, "off");
1057 
1058 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1059 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1060 	    ? stats->glds_media : 0]);
1061 
1062 	switch (macinfo->gldm_type) {
1063 	case DL_ETHER:
1064 		gsp->glds_frame.value.ul = stats->glds_frame;
1065 		gsp->glds_crc.value.ul = stats->glds_crc;
1066 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1067 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1068 		gsp->glds_defer.value.ul = stats->glds_defer;
1069 		gsp->glds_short.value.ul = stats->glds_short;
1070 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1071 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1072 		gsp->glds_dot3_first_coll.value.ui32 =
1073 		    stats->glds_dot3_first_coll;
1074 		gsp->glds_dot3_multi_coll.value.ui32 =
1075 		    stats->glds_dot3_multi_coll;
1076 		gsp->glds_dot3_sqe_error.value.ui32 =
1077 		    stats->glds_dot3_sqe_error;
1078 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1079 		    stats->glds_dot3_mac_xmt_error;
1080 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1081 		    stats->glds_dot3_mac_rcv_error;
1082 		gsp->glds_dot3_frame_too_long.value.ui32 =
1083 		    stats->glds_dot3_frame_too_long;
1084 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1085 		    stats->glds_duplex <
1086 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1087 		    stats->glds_duplex : 0]);
1088 		break;
1089 	case DL_TPR:
1090 		gsp->glds_dot5_line_error.value.ui32 =
1091 		    stats->glds_dot5_line_error;
1092 		gsp->glds_dot5_burst_error.value.ui32 =
1093 		    stats->glds_dot5_burst_error;
1094 		gsp->glds_dot5_signal_loss.value.ui32 =
1095 		    stats->glds_dot5_signal_loss;
1096 		gsp->glds_dot5_ace_error.value.ui32 =
1097 		    stats->glds_dot5_ace_error;
1098 		gsp->glds_dot5_internal_error.value.ui32 =
1099 		    stats->glds_dot5_internal_error;
1100 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1101 		    stats->glds_dot5_lost_frame_error;
1102 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1103 		    stats->glds_dot5_frame_copied_error;
1104 		gsp->glds_dot5_token_error.value.ui32 =
1105 		    stats->glds_dot5_token_error;
1106 		gsp->glds_dot5_freq_error.value.ui32 =
1107 		    stats->glds_dot5_freq_error;
1108 		break;
1109 	case DL_FDDI:
1110 		gsp->glds_fddi_mac_error.value.ui32 =
1111 		    stats->glds_fddi_mac_error;
1112 		gsp->glds_fddi_mac_lost.value.ui32 =
1113 		    stats->glds_fddi_mac_lost;
1114 		gsp->glds_fddi_mac_token.value.ui32 =
1115 		    stats->glds_fddi_mac_token;
1116 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1117 		    stats->glds_fddi_mac_tvx_expired;
1118 		gsp->glds_fddi_mac_late.value.ui32 =
1119 		    stats->glds_fddi_mac_late;
1120 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1121 		    stats->glds_fddi_mac_ring_op;
1122 		break;
1123 	case DL_IB:
1124 		break;
1125 	default:
1126 		break;
1127 	}
1128 
1129 	GLDM_UNLOCK(macinfo);
1130 
1131 #ifdef GLD_DEBUG
1132 	gld_check_assertions();
1133 	if (gld_debug & GLDRDE)
1134 		gld_sr_dump(macinfo);
1135 #endif
1136 
1137 	return (0);
1138 }
1139 
1140 static int
gld_init_vlan_stats(gld_vlan_t * vlan)1141 gld_init_vlan_stats(gld_vlan_t *vlan)
1142 {
1143 	gld_mac_info_t *mac = vlan->gldv_mac;
1144 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1145 	struct gldkstats *sp;
1146 	glddev_t *glddev;
1147 	kstat_t *ksp;
1148 	char *name;
1149 	int instance;
1150 
1151 	glddev = mac_pvt->major_dev;
1152 	name = glddev->gld_name;
1153 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1154 
1155 	if ((ksp = kstat_create(name, instance,
1156 	    NULL, "net", KSTAT_TYPE_NAMED,
1157 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1158 		cmn_err(CE_WARN,
1159 		    "GLD: failed to create kstat structure for %s%d",
1160 		    name, instance);
1161 		return (GLD_FAILURE);
1162 	}
1163 
1164 	vlan->gldv_kstatp = ksp;
1165 
1166 	ksp->ks_update = gld_update_vlan_kstat;
1167 	ksp->ks_private = (void *)vlan;
1168 
1169 	sp = ksp->ks_data;
1170 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1171 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1172 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1173 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1174 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1175 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1176 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1177 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1178 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1179 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1180 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1181 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1182 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1183 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1184 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1185 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1186 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1187 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1188 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1189 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1190 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1191 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1192 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1193 
1194 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1195 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1196 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1197 
1198 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1199 	    KSTAT_DATA_UINT32);
1200 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1201 	    KSTAT_DATA_UINT32);
1202 
1203 	kstat_install(ksp);
1204 	return (GLD_SUCCESS);
1205 }
1206 
1207 static int
gld_update_vlan_kstat(kstat_t * ksp,int rw)1208 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1209 {
1210 	gld_vlan_t	*vlan;
1211 	gld_mac_info_t	*macinfo;
1212 	struct gldkstats *gsp;
1213 	struct gld_stats *stats;
1214 	gld_mac_pvt_t *mac_pvt;
1215 	uint32_t media;
1216 
1217 	if (rw == KSTAT_WRITE)
1218 		return (EACCES);
1219 
1220 	vlan = (gld_vlan_t *)ksp->ks_private;
1221 	ASSERT(vlan != NULL);
1222 
1223 	macinfo = vlan->gldv_mac;
1224 	GLDM_LOCK(macinfo, RW_WRITER);
1225 
1226 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1227 
1228 	gsp = vlan->gldv_kstatp->ks_data;
1229 	ASSERT(gsp);
1230 	stats = vlan->gldv_stats;
1231 
1232 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1233 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1234 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1235 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1236 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1237 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1238 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1239 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1240 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1241 
1242 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1243 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1244 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1245 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1246 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1247 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1248 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1249 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1250 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1251 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1252 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1253 
1254 	gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed;
1255 	media = mac_pvt->statistics->glds_media;
1256 	(void) strcpy(gsp->glds_media.value.c,
1257 	    gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ?
1258 	    media : 0]);
1259 
1260 	GLDM_UNLOCK(macinfo);
1261 	return (0);
1262 }
1263 
1264 /*
1265  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1266  */
1267 /*ARGSUSED*/
1268 int
gld_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** resultp)1269 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1270 {
1271 	dev_info_t	*devinfo;
1272 	minor_t		minor = getminor((dev_t)arg);
1273 	int		rc = DDI_FAILURE;
1274 
1275 	switch (cmd) {
1276 	case DDI_INFO_DEVT2DEVINFO:
1277 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1278 			*(dev_info_t **)resultp = devinfo;
1279 			rc = DDI_SUCCESS;
1280 		}
1281 		break;
1282 	case DDI_INFO_DEVT2INSTANCE:
1283 		/* Need static mapping for deferred attach */
1284 		if (minor == GLD_USE_STYLE2) {
1285 			/*
1286 			 * Style 2:  this minor number does not correspond to
1287 			 * any particular instance number.
1288 			 */
1289 			rc = DDI_FAILURE;
1290 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1291 			/* Style 1:  calculate the PPA from the minor */
1292 			*resultp = (void *)(uintptr_t)
1293 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1294 			rc = DDI_SUCCESS;
1295 		} else {
1296 			/* Clone:  look for it.  Not a static mapping */
1297 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1298 				*resultp = (void *)(uintptr_t)
1299 				    ddi_get_instance(devinfo);
1300 				rc = DDI_SUCCESS;
1301 			}
1302 		}
1303 		break;
1304 	}
1305 
1306 	return (rc);
1307 }
1308 
1309 /* called from gld_getinfo */
1310 dev_info_t *
gld_finddevinfo(dev_t dev)1311 gld_finddevinfo(dev_t dev)
1312 {
1313 	minor_t		minor = getminor(dev);
1314 	glddev_t	*device;
1315 	gld_mac_info_t	*mac;
1316 	gld_vlan_t	*vlan;
1317 	gld_t		*str;
1318 	dev_info_t	*devinfo = NULL;
1319 	int		i;
1320 
1321 	if (minor == GLD_USE_STYLE2) {
1322 		/*
1323 		 * Style 2:  this minor number does not correspond to
1324 		 * any particular instance number.
1325 		 *
1326 		 * XXX We don't know what to say.  See Bug 1165519.
1327 		 */
1328 		return (NULL);
1329 	}
1330 
1331 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1332 
1333 	device = gld_devlookup(getmajor(dev));
1334 	if (device == NULL) {
1335 		/* There are no attached instances of this device */
1336 		mutex_exit(&gld_device_list.gld_devlock);
1337 		return (NULL);
1338 	}
1339 
1340 	/*
1341 	 * Search all attached macs and streams.
1342 	 *
1343 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1344 	 * we don't know what devinfo we should report back even if we
1345 	 * found the minor.  Maybe we should associate streams that are
1346 	 * not currently attached to a PPA with the "first" devinfo node
1347 	 * of the major device to attach -- the one that created the
1348 	 * minor node for the generic device.
1349 	 */
1350 	mutex_enter(&device->gld_devlock);
1351 
1352 	for (mac = device->gld_mac_next;
1353 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1354 	    mac = mac->gldm_next) {
1355 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1356 
1357 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1358 			continue;	/* this one's not ready yet */
1359 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1360 			/* Style 1 -- look for the corresponding PPA */
1361 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1362 				devinfo = mac->gldm_devinfo;
1363 				goto out;	/* found it! */
1364 			} else
1365 				continue;	/* not this PPA */
1366 		}
1367 
1368 		/* We are looking for a clone */
1369 		for (i = 0; i < VLAN_HASHSZ; i++) {
1370 			for (vlan = pvt->vlan_hash[i];
1371 			    vlan != NULL; vlan = vlan->gldv_next) {
1372 				for (str = vlan->gldv_str_next;
1373 				    str != (gld_t *)&vlan->gldv_str_next;
1374 				    str = str->gld_next) {
1375 					ASSERT(str->gld_mac_info == mac);
1376 					if (minor == str->gld_minor) {
1377 						devinfo = mac->gldm_devinfo;
1378 						goto out;
1379 					}
1380 				}
1381 			}
1382 		}
1383 	}
1384 out:
1385 	mutex_exit(&device->gld_devlock);
1386 	mutex_exit(&gld_device_list.gld_devlock);
1387 	return (devinfo);
1388 }
1389 
1390 /*
1391  * STREAMS open routine.  The device dependent driver specifies this as its
1392  * open entry point.
1393  */
1394 /*ARGSUSED2*/
1395 int
gld_open(queue_t * q,dev_t * dev,int flag,int sflag,cred_t * cred)1396 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1397 {
1398 	gld_mac_pvt_t *mac_pvt;
1399 	gld_t *gld;
1400 	glddev_t *glddev;
1401 	gld_mac_info_t *macinfo;
1402 	minor_t minor = getminor(*dev);
1403 	gld_vlan_t *vlan;
1404 	t_uscalar_t ppa;
1405 
1406 	ASSERT(q != NULL);
1407 
1408 	if (minor > GLD_MAX_STYLE1_MINOR)
1409 		return (ENXIO);
1410 
1411 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1412 
1413 	/* Find our per-major glddev_t structure */
1414 	mutex_enter(&gld_device_list.gld_devlock);
1415 	glddev = gld_devlookup(getmajor(*dev));
1416 
1417 	/*
1418 	 * This glddev will hang around since detach (and therefore
1419 	 * gld_unregister) can't run while we're here in the open routine.
1420 	 */
1421 	mutex_exit(&gld_device_list.gld_devlock);
1422 
1423 	if (glddev == NULL)
1424 		return (ENXIO);
1425 
1426 #ifdef GLD_DEBUG
1427 	if (gld_debug & GLDPROT) {
1428 		if (minor == GLD_USE_STYLE2)
1429 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1430 		else
1431 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1432 			    (void *)q, minor);
1433 	}
1434 #endif
1435 
1436 	/*
1437 	 * get a per-stream structure and link things together so we
1438 	 * can easily find them later.
1439 	 */
1440 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1441 
1442 	/*
1443 	 * fill in the structure and state info
1444 	 */
1445 	gld->gld_qptr = q;
1446 	gld->gld_device = glddev;
1447 	gld->gld_state = DL_UNATTACHED;
1448 
1449 	/*
1450 	 * we must atomically find a free minor number and add the stream
1451 	 * to a list, because gld_findminor has to traverse the lists to
1452 	 * determine which minor numbers are free.
1453 	 */
1454 	mutex_enter(&glddev->gld_devlock);
1455 
1456 	/* find a free minor device number for the clone */
1457 	gld->gld_minor = gld_findminor(glddev);
1458 	if (gld->gld_minor == 0) {
1459 		mutex_exit(&glddev->gld_devlock);
1460 		kmem_free(gld, sizeof (gld_t));
1461 		return (ENOSR);
1462 	}
1463 
1464 #ifdef GLD_VERBOSE_DEBUG
1465 	if (gld_debug & GLDPROT)
1466 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1467 		    (void *)gld, gld->gld_minor);
1468 #endif
1469 
1470 	if (minor == GLD_USE_STYLE2) {
1471 		gld->gld_style = DL_STYLE2;
1472 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1473 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1474 		gldinsque(gld, glddev->gld_str_prev);
1475 #ifdef GLD_VERBOSE_DEBUG
1476 		if (gld_debug & GLDPROT)
1477 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1478 #endif
1479 		(void) qassociate(q, -1);
1480 		goto done;
1481 	}
1482 
1483 	gld->gld_style = DL_STYLE1;
1484 
1485 	/* the PPA is actually 1 less than the minordev */
1486 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1487 
1488 	for (macinfo = glddev->gld_mac_next;
1489 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1490 	    macinfo = macinfo->gldm_next) {
1491 		ASSERT(macinfo != NULL);
1492 		if (macinfo->gldm_ppa != ppa)
1493 			continue;
1494 
1495 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1496 			continue;	/* this one's not ready yet */
1497 
1498 		/*
1499 		 * we found the correct PPA
1500 		 */
1501 		GLDM_LOCK(macinfo, RW_WRITER);
1502 
1503 		gld->gld_mac_info = macinfo;
1504 
1505 		if (macinfo->gldm_send_tagged != NULL)
1506 			gld->gld_send = macinfo->gldm_send_tagged;
1507 		else
1508 			gld->gld_send = macinfo->gldm_send;
1509 
1510 		/* now ready for action */
1511 		gld->gld_state = DL_UNBOUND;
1512 
1513 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1514 			GLDM_UNLOCK(macinfo);
1515 			mutex_exit(&glddev->gld_devlock);
1516 			kmem_free(gld, sizeof (gld_t));
1517 			return (EIO);
1518 		}
1519 
1520 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1521 		if (!mac_pvt->started) {
1522 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1523 				gld_rem_vlan(vlan);
1524 				GLDM_UNLOCK(macinfo);
1525 				mutex_exit(&glddev->gld_devlock);
1526 				kmem_free(gld, sizeof (gld_t));
1527 				return (EIO);
1528 			}
1529 		}
1530 
1531 		gld->gld_vlan = vlan;
1532 		vlan->gldv_nstreams++;
1533 		gldinsque(gld, vlan->gldv_str_prev);
1534 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1535 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1536 
1537 		GLDM_UNLOCK(macinfo);
1538 #ifdef GLD_VERBOSE_DEBUG
1539 		if (gld_debug & GLDPROT)
1540 			cmn_err(CE_NOTE,
1541 			    "GLDstruct added to instance list");
1542 #endif
1543 		break;
1544 	}
1545 
1546 	if (gld->gld_state == DL_UNATTACHED) {
1547 		mutex_exit(&glddev->gld_devlock);
1548 		kmem_free(gld, sizeof (gld_t));
1549 		return (ENXIO);
1550 	}
1551 
1552 done:
1553 	mutex_exit(&glddev->gld_devlock);
1554 	noenable(WR(q));	/* We'll do the qenables manually */
1555 	qprocson(q);		/* start the queues running */
1556 	qenable(WR(q));
1557 	return (0);
1558 }
1559 
1560 /*
1561  * normal stream close call checks current status and cleans up
1562  * data structures that were dynamically allocated
1563  */
1564 /*ARGSUSED1*/
1565 int
gld_close(queue_t * q,int flag,cred_t * cred)1566 gld_close(queue_t *q, int flag, cred_t *cred)
1567 {
1568 	gld_t	*gld = (gld_t *)q->q_ptr;
1569 	glddev_t *glddev = gld->gld_device;
1570 
1571 	ASSERT(q);
1572 	ASSERT(gld);
1573 
1574 #ifdef GLD_DEBUG
1575 	if (gld_debug & GLDPROT) {
1576 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1577 		    (void *)q, (gld->gld_style & 0x1) + 1);
1578 	}
1579 #endif
1580 
1581 	/* Hold all device streams lists still while we check for a macinfo */
1582 	mutex_enter(&glddev->gld_devlock);
1583 
1584 	if (gld->gld_mac_info != NULL) {
1585 		/* If there's a macinfo, block recv while we change state */
1586 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1587 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1588 		GLDM_UNLOCK(gld->gld_mac_info);
1589 	} else {
1590 		/* no mac DL_ATTACHED right now */
1591 		gld->gld_flags |= GLD_STR_CLOSING;
1592 	}
1593 
1594 	mutex_exit(&glddev->gld_devlock);
1595 
1596 	/*
1597 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1598 	 * we know wsrv isn't in there trying to undo what we're doing.
1599 	 */
1600 	qprocsoff(q);
1601 
1602 	ASSERT(gld->gld_wput_count == 0);
1603 	gld->gld_wput_count = 0;	/* just in case */
1604 
1605 	if (gld->gld_state == DL_IDLE) {
1606 		/* Need to unbind */
1607 		ASSERT(gld->gld_mac_info != NULL);
1608 		(void) gld_unbind(WR(q), NULL);
1609 	}
1610 
1611 	if (gld->gld_state == DL_UNBOUND) {
1612 		/*
1613 		 * Need to unattach
1614 		 * For style 2 stream, gldunattach also
1615 		 * associate queue with NULL dip
1616 		 */
1617 		ASSERT(gld->gld_mac_info != NULL);
1618 		(void) gldunattach(WR(q), NULL);
1619 	}
1620 
1621 	/* disassociate the stream from the device */
1622 	q->q_ptr = WR(q)->q_ptr = NULL;
1623 
1624 	/*
1625 	 * Since we unattached above (if necessary), we know that we're
1626 	 * on the per-major list of unattached streams, rather than a
1627 	 * per-PPA list.  So we know we should hold the devlock.
1628 	 */
1629 	mutex_enter(&glddev->gld_devlock);
1630 	gldremque(gld);			/* remove from Style 2 list */
1631 	mutex_exit(&glddev->gld_devlock);
1632 
1633 	kmem_free(gld, sizeof (gld_t));
1634 
1635 	return (0);
1636 }
1637 
1638 /*
1639  * gld_rsrv (q)
1640  *	simple read service procedure
1641  *	purpose is to avoid the time it takes for packets
1642  *	to move through IP so we can get them off the board
1643  *	as fast as possible due to limited PC resources.
1644  *
1645  *	This is not normally used in the current implementation.  It
1646  *	can be selected with the undocumented property "fast_recv".
1647  *	If that property is set, gld_recv will send the packet
1648  *	upstream with a putq() rather than a putnext(), thus causing
1649  *	this routine to be scheduled.
1650  */
1651 int
gld_rsrv(queue_t * q)1652 gld_rsrv(queue_t *q)
1653 {
1654 	mblk_t *mp;
1655 
1656 	while ((mp = getq(q)) != NULL) {
1657 		if (canputnext(q)) {
1658 			putnext(q, mp);
1659 		} else {
1660 			freemsg(mp);
1661 		}
1662 	}
1663 	return (0);
1664 }
1665 
1666 /*
1667  * gld_wput (q, mp)
1668  * general gld stream write put routine. Receives fastpath data from upper
1669  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1670  * queued for later processing by the service procedure.
1671  */
1672 
1673 int
gld_wput(queue_t * q,mblk_t * mp)1674 gld_wput(queue_t *q, mblk_t *mp)
1675 {
1676 	gld_t  *gld = (gld_t *)(q->q_ptr);
1677 	int	rc;
1678 	uint32_t upri;
1679 
1680 #ifdef GLD_DEBUG
1681 	if (gld_debug & GLDTRACE)
1682 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1683 		    (void *)q, (void *)mp, DB_TYPE(mp));
1684 #endif
1685 	switch (DB_TYPE(mp)) {
1686 
1687 	case M_DATA:
1688 		/* fast data / raw support */
1689 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1690 		/* Tricky to access memory without taking the mutex */
1691 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1692 		    gld->gld_state != DL_IDLE) {
1693 			merror(q, mp, EPROTO);
1694 			break;
1695 		}
1696 		/*
1697 		 * Cleanup MBLK_VTAG in case it is set by other
1698 		 * modules. MBLK_VTAG is used to save the vtag information.
1699 		 */
1700 		GLD_CLEAR_MBLK_VTAG(mp);
1701 
1702 		/*
1703 		 * This can happen if wsrv has taken off the last mblk but
1704 		 * is still processing it.
1705 		 */
1706 		membar_consumer();
1707 		if (gld->gld_in_wsrv)
1708 			goto use_wsrv;
1709 
1710 		/*
1711 		 * Keep a count of current wput calls to start.
1712 		 * Nonzero count delays any attempted DL_UNBIND.
1713 		 * See comments above gld_start().
1714 		 */
1715 		atomic_inc_32((uint32_t *)&gld->gld_wput_count);
1716 		membar_enter();
1717 
1718 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1719 		/* If this Q is in process of DL_UNBIND, don't call start */
1720 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1721 			/* Extremely unlikely */
1722 			atomic_dec_32((uint32_t *)&gld->gld_wput_count);
1723 			goto use_wsrv;
1724 		}
1725 
1726 		/*
1727 		 * Get the priority value. Note that in raw mode, the
1728 		 * per-packet priority value kept in b_band is ignored.
1729 		 */
1730 		upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1731 		    UPRI(gld, mp->b_band);
1732 
1733 		rc = gld_start(q, mp, GLD_WPUT, upri);
1734 
1735 		/* Allow DL_UNBIND again */
1736 		membar_exit();
1737 		atomic_dec_32((uint32_t *)&gld->gld_wput_count);
1738 
1739 		if (rc == GLD_NORESOURCES)
1740 			qenable(q);
1741 		break;	/*  Done with this packet */
1742 
1743 use_wsrv:
1744 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1745 		(void) putq(q, mp);
1746 		qenable(q);
1747 		break;
1748 
1749 	case M_IOCTL:
1750 		/* ioctl relies on wsrv single threading per queue */
1751 		(void) putq(q, mp);
1752 		qenable(q);
1753 		break;
1754 
1755 	case M_CTL:
1756 		(void) putq(q, mp);
1757 		qenable(q);
1758 		break;
1759 
1760 	case M_FLUSH:		/* canonical flush handling */
1761 		/* XXX Should these be FLUSHALL? */
1762 		if (*mp->b_rptr & FLUSHW)
1763 			flushq(q, 0);
1764 		if (*mp->b_rptr & FLUSHR) {
1765 			flushq(RD(q), 0);
1766 			*mp->b_rptr &= ~FLUSHW;
1767 			qreply(q, mp);
1768 		} else
1769 			freemsg(mp);
1770 		break;
1771 
1772 	case M_PROTO:
1773 	case M_PCPROTO:
1774 		/* these rely on wsrv single threading per queue */
1775 		(void) putq(q, mp);
1776 		qenable(q);
1777 		break;
1778 
1779 	default:
1780 #ifdef GLD_DEBUG
1781 		if (gld_debug & GLDETRACE)
1782 			cmn_err(CE_WARN,
1783 			    "gld: Unexpected packet type from queue: 0x%x",
1784 			    DB_TYPE(mp));
1785 #endif
1786 		freemsg(mp);
1787 	}
1788 	return (0);
1789 }
1790 
1791 /*
1792  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1793  * specification.
1794  *
1795  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1796  * lock for reading data items that are only ever written by us.
1797  */
1798 
1799 int
gld_wsrv(queue_t * q)1800 gld_wsrv(queue_t *q)
1801 {
1802 	mblk_t *mp;
1803 	gld_t *gld = (gld_t *)q->q_ptr;
1804 	gld_mac_info_t *macinfo;
1805 	union DL_primitives *prim;
1806 	int err;
1807 	uint32_t upri;
1808 
1809 #ifdef GLD_DEBUG
1810 	if (gld_debug & GLDTRACE)
1811 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1812 #endif
1813 
1814 	ASSERT(!gld->gld_in_wsrv);
1815 
1816 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1817 
1818 	if (q->q_first == NULL)
1819 		return (0);
1820 
1821 	macinfo = gld->gld_mac_info;
1822 
1823 	/*
1824 	 * Help wput avoid a call to gld_start if there might be a message
1825 	 * previously queued by that thread being processed here.
1826 	 */
1827 	gld->gld_in_wsrv = B_TRUE;
1828 	membar_enter();
1829 
1830 	while ((mp = getq(q)) != NULL) {
1831 		switch (DB_TYPE(mp)) {
1832 		case M_DATA:
1833 
1834 			/*
1835 			 * retry of a previously processed UNITDATA_REQ
1836 			 * or is a RAW or FAST message from above.
1837 			 */
1838 			if (macinfo == NULL) {
1839 				/* No longer attached to a PPA, drop packet */
1840 				freemsg(mp);
1841 				break;
1842 			}
1843 
1844 			gld->gld_sched_ran = B_FALSE;
1845 			membar_enter();
1846 
1847 			/*
1848 			 * Get the priority value. Note that in raw mode, the
1849 			 * per-packet priority value kept in b_band is ignored.
1850 			 */
1851 			upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1852 			    UPRI(gld, mp->b_band);
1853 
1854 			err = gld_start(q, mp, GLD_WSRV, upri);
1855 			if (err == GLD_NORESOURCES) {
1856 				/* gld_sched will qenable us later */
1857 				gld->gld_xwait = B_TRUE; /* want qenable */
1858 				membar_enter();
1859 				/*
1860 				 * v2:  we're not holding the lock; it's
1861 				 * possible that the driver could have already
1862 				 * called gld_sched (following up on its
1863 				 * return of GLD_NORESOURCES), before we got a
1864 				 * chance to do the putbq() and set gld_xwait.
1865 				 * So if we saw a call to gld_sched that
1866 				 * examined this queue, since our call to
1867 				 * gld_start() above, then it's possible we've
1868 				 * already seen the only call to gld_sched()
1869 				 * we're ever going to see.  So we better retry
1870 				 * transmitting this packet right now.
1871 				 */
1872 				if (gld->gld_sched_ran) {
1873 #ifdef GLD_DEBUG
1874 					if (gld_debug & GLDTRACE)
1875 						cmn_err(CE_NOTE, "gld_wsrv: "
1876 						    "sched was called");
1877 #endif
1878 					break;	/* try again right now */
1879 				}
1880 				gld->gld_in_wsrv = B_FALSE;
1881 				return (0);
1882 			}
1883 			break;
1884 
1885 		case M_IOCTL:
1886 			(void) gld_ioctl(q, mp);
1887 			break;
1888 
1889 		case M_CTL:
1890 			if (macinfo == NULL) {
1891 				freemsg(mp);
1892 				break;
1893 			}
1894 
1895 			if (macinfo->gldm_mctl != NULL) {
1896 				GLDM_LOCK(macinfo, RW_WRITER);
1897 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1898 				GLDM_UNLOCK(macinfo);
1899 			} else {
1900 				/* This driver doesn't recognize, just drop */
1901 				freemsg(mp);
1902 			}
1903 			break;
1904 
1905 		case M_PROTO:	/* Will be an DLPI message of some type */
1906 		case M_PCPROTO:
1907 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1908 				if (err == GLDE_RETRY) {
1909 					gld->gld_in_wsrv = B_FALSE;
1910 					return (0); /* quit while we're ahead */
1911 				}
1912 				prim = (union DL_primitives *)mp->b_rptr;
1913 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1914 			}
1915 			break;
1916 
1917 		default:
1918 			/* This should never happen */
1919 #ifdef GLD_DEBUG
1920 			if (gld_debug & GLDERRS)
1921 				cmn_err(CE_WARN,
1922 				    "gld_wsrv: db_type(%x) not supported",
1923 				    mp->b_datap->db_type);
1924 #endif
1925 			freemsg(mp);	/* unknown types are discarded */
1926 			break;
1927 		}
1928 	}
1929 
1930 	membar_exit();
1931 	gld->gld_in_wsrv = B_FALSE;
1932 	return (0);
1933 }
1934 
1935 /*
1936  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1937  *
1938  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1939  *
1940  * In particular, we must avoid calling gld_precv*() if we came from wput().
1941  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1942  * packets to the receive side if we are in physical promiscuous mode.
1943  * Since the receive side holds a lock across its call to the upstream
1944  * putnext, and that upstream module could well have looped back to our
1945  * wput() routine on the same thread, we cannot call gld_precv* from here
1946  * for fear of causing a recursive lock entry in our receive code.
1947  *
1948  * There is a problem here when coming from gld_wput().  While wput
1949  * only comes here if the queue is attached to a PPA and bound to a SAP
1950  * and there are no messages on the queue ahead of the M_DATA that could
1951  * change that, it is theoretically possible that another thread could
1952  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1953  * could wake up and process them, before we finish processing this
1954  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1955  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1956  * and Style 1 streams only DL_DETACH in the close routine, where
1957  * qprocsoff() protects us.  If this happens we could end up calling
1958  * gldm_send() after we have detached the stream and possibly called
1959  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1960  * detach/unregister could be called, and the macinfo could go away entirely.
1961  *
1962  * No one has ever seen this happen.
1963  *
1964  * It is some trouble to fix this, and we would rather not add any mutex
1965  * logic into the wput() routine, which is supposed to be a "fast"
1966  * path.
1967  *
1968  * What I've done is use an atomic counter to keep a count of the number
1969  * of threads currently calling gld_start() from wput() on this stream.
1970  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
1971  * the queue and qenables, hoping to have better luck next time.  Since
1972  * people shouldn't be trying to send after they've asked to DL_DETACH,
1973  * hopefully very soon all the wput=>start threads should have returned
1974  * and the DL_DETACH will succeed.  It's hard to test this since the odds
1975  * of the failure even trying to happen are so small.  I probably could
1976  * have ignored the whole issue and never been the worse for it.
1977  *
1978  * Because some GLDv2 Ethernet drivers do not allow the size of transmitted
1979  * packet to be greater than ETHERMAX, we must first strip the VLAN tag
1980  * from a tagged packet before passing it to the driver's gld_send() entry
1981  * point function, and pass the VLAN tag as a separate argument. The
1982  * gld_send() function may fail. In that case, the packet will need to be
1983  * queued in order to be processed again in GLD's service routine. As the
1984  * VTAG has already been stripped at that time, we save the VTAG information
1985  * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the
1986  * VTAG can also be queued and be able to be got when gld_start() is called
1987  * next time from gld_wsrv().
1988  *
1989  * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros:
1990  *
1991  * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time
1992  *   the message is queued by putbq().
1993  *
1994  * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information
1995  *   (if any) in dblk before the message is passed to the gld_start() function.
1996  */
1997 static int
gld_start(queue_t * q,mblk_t * mp,int caller,uint32_t upri)1998 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
1999 {
2000 	mblk_t *nmp;
2001 	gld_t *gld = (gld_t *)q->q_ptr;
2002 	gld_mac_info_t *macinfo;
2003 	gld_mac_pvt_t *mac_pvt;
2004 	int rc;
2005 	gld_interface_t *ifp;
2006 	pktinfo_t pktinfo;
2007 	uint32_t vtag, vid;
2008 	uint32_t raw_vtag = 0;
2009 	gld_vlan_t *vlan;
2010 	struct gld_stats *stats0, *stats = NULL;
2011 
2012 	ASSERT(DB_TYPE(mp) == M_DATA);
2013 	macinfo = gld->gld_mac_info;
2014 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2015 	ifp = mac_pvt->interfacep;
2016 	vlan = (gld_vlan_t *)gld->gld_vlan;
2017 	vid = vlan->gldv_id;
2018 
2019 	/*
2020 	 * If this interface is a VLAN, the kstats of corresponding
2021 	 * "VLAN 0" should also be updated. Note that the gld_vlan_t
2022 	 * structure for VLAN 0 might not exist if there are no DLPI
2023 	 * consumers attaching on VLAN 0. Fortunately we can directly
2024 	 * access VLAN 0's kstats from macinfo.
2025 	 *
2026 	 * Therefore, stats0 (VLAN 0's kstats) must always be
2027 	 * updated, and stats must to be updated if it is not NULL.
2028 	 */
2029 	stats0 = mac_pvt->statistics;
2030 	if (vid != VLAN_VID_NONE)
2031 		stats = vlan->gldv_stats;
2032 
2033 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2034 #ifdef GLD_DEBUG
2035 		if (gld_debug & GLDERRS)
2036 			cmn_err(CE_WARN,
2037 			    "gld_start: failed to interpret outbound packet");
2038 #endif
2039 		goto badarg;
2040 	}
2041 
2042 	vtag = VLAN_VID_NONE;
2043 	raw_vtag = GLD_GET_MBLK_VTAG(mp);
2044 	if (GLD_VTAG_TCI(raw_vtag) != 0) {
2045 		uint16_t raw_pri, raw_vid, evid;
2046 
2047 		/*
2048 		 * Tagged packet.
2049 		 */
2050 		raw_pri = GLD_VTAG_PRI(raw_vtag);
2051 		raw_vid = GLD_VTAG_VID(raw_vtag);
2052 		GLD_CLEAR_MBLK_VTAG(mp);
2053 
2054 		if (gld->gld_flags & GLD_RAW) {
2055 			/*
2056 			 * In raw mode, we only expect untagged packets or
2057 			 * special priority-tagged packets on a VLAN stream.
2058 			 * Drop the packet if its VID is not zero.
2059 			 */
2060 			if (vid != VLAN_VID_NONE && raw_vid != VLAN_VID_NONE)
2061 				goto badarg;
2062 
2063 			/*
2064 			 * If it is raw mode, use the per-stream priority if
2065 			 * the priority is not specified in the packet.
2066 			 * Otherwise, ignore the priority bits in the packet.
2067 			 */
2068 			upri = (raw_pri != 0) ? raw_pri : upri;
2069 		}
2070 
2071 		if (vid == VLAN_VID_NONE && vid != raw_vid) {
2072 			gld_vlan_t *tmp_vlan;
2073 
2074 			/*
2075 			 * This link is a physical link but the packet is
2076 			 * a VLAN tagged packet, the kstats of corresponding
2077 			 * VLAN (if any) should also be updated.
2078 			 */
2079 			tmp_vlan = gld_find_vlan(macinfo, raw_vid);
2080 			if (tmp_vlan != NULL)
2081 				stats = tmp_vlan->gldv_stats;
2082 		}
2083 
2084 		evid = (vid == VLAN_VID_NONE) ? raw_vid : vid;
2085 		if (evid != VLAN_VID_NONE || upri != 0)
2086 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, evid);
2087 	} else {
2088 		/*
2089 		 * Untagged packet:
2090 		 * Get vtag from the attached PPA of this stream.
2091 		 */
2092 		if ((vid != VLAN_VID_NONE) ||
2093 		    ((macinfo->gldm_type == DL_ETHER) && (upri != 0))) {
2094 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, vid);
2095 		}
2096 	}
2097 
2098 	/*
2099 	 * We're not holding the lock for this check.  If the promiscuous
2100 	 * state is in flux it doesn't matter much if we get this wrong.
2101 	 */
2102 	if (mac_pvt->nprom > 0) {
2103 		/*
2104 		 * We want to loopback to the receive side, but to avoid
2105 		 * recursive lock entry:  if we came from wput(), which
2106 		 * could have looped back via IP from our own receive
2107 		 * interrupt thread, we decline this request.  wput()
2108 		 * will then queue the packet for wsrv().  This means
2109 		 * that when snoop is running we don't get the advantage
2110 		 * of the wput() multithreaded direct entry to the
2111 		 * driver's send routine.
2112 		 */
2113 		if (caller == GLD_WPUT) {
2114 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2115 			(void) putbq(q, mp);
2116 			return (GLD_NORESOURCES);
2117 		}
2118 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2119 			nmp = dupmsg_noloan(mp);
2120 		else
2121 			nmp = dupmsg(mp);
2122 	} else
2123 		nmp = NULL;		/* we need no loopback */
2124 
2125 	if (ifp->hdr_size > 0 &&
2126 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2127 	    macinfo->gldm_maxpkt) {
2128 		if (nmp)
2129 			freemsg(nmp);	/* free the duped message */
2130 #ifdef GLD_DEBUG
2131 		if (gld_debug & GLDERRS)
2132 			cmn_err(CE_WARN,
2133 			    "gld_start: oversize outbound packet, size %d,"
2134 			    "max %d", pktinfo.pktLen,
2135 			    ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2136 			    macinfo->gldm_maxpkt);
2137 #endif
2138 		goto badarg;
2139 	}
2140 
2141 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2142 
2143 	if (rc != GLD_SUCCESS) {
2144 		if (rc == GLD_NORESOURCES) {
2145 			ATOMIC_BUMP(stats0, stats, glds_xmtretry, 1);
2146 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2147 			(void) putbq(q, mp);
2148 		} else {
2149 			/* transmit error; drop the packet */
2150 			freemsg(mp);
2151 			/* We're supposed to count failed attempts as well */
2152 			UPDATE_STATS(stats0, stats, pktinfo, 1);
2153 #ifdef GLD_DEBUG
2154 			if (gld_debug & GLDERRS)
2155 				cmn_err(CE_WARN,
2156 				    "gld_start: gldm_send failed %d", rc);
2157 #endif
2158 		}
2159 		if (nmp)
2160 			freemsg(nmp);	/* free the dupped message */
2161 		return (rc);
2162 	}
2163 
2164 	UPDATE_STATS(stats0, stats, pktinfo, 1);
2165 
2166 	/*
2167 	 * Loopback case. The message needs to be returned back on
2168 	 * the read side. This would silently fail if the dupmsg fails
2169 	 * above. This is probably OK, if there is no memory to dup the
2170 	 * block, then there isn't much we could do anyway.
2171 	 */
2172 	if (nmp) {
2173 		GLDM_LOCK(macinfo, RW_WRITER);
2174 		gld_precv(macinfo, nmp, vtag, stats);
2175 		GLDM_UNLOCK(macinfo);
2176 	}
2177 
2178 	return (GLD_SUCCESS);
2179 badarg:
2180 	freemsg(mp);
2181 
2182 	ATOMIC_BUMP(stats0, stats, glds_xmtbadinterp, 1);
2183 	return (GLD_BADARG);
2184 }
2185 
2186 /*
2187  * gld_intr (macinfo)
2188  */
2189 uint_t
gld_intr(gld_mac_info_t * macinfo)2190 gld_intr(gld_mac_info_t *macinfo)
2191 {
2192 	ASSERT(macinfo != NULL);
2193 
2194 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2195 		return (DDI_INTR_UNCLAIMED);
2196 
2197 	return ((*macinfo->gldm_intr)(macinfo));
2198 }
2199 
2200 /*
2201  * gld_sched (macinfo)
2202  *
2203  * This routine scans the streams that refer to a specific macinfo
2204  * structure and causes the STREAMS scheduler to try to run them if
2205  * they are marked as waiting for the transmit buffer.
2206  */
2207 void
gld_sched(gld_mac_info_t * macinfo)2208 gld_sched(gld_mac_info_t *macinfo)
2209 {
2210 	gld_mac_pvt_t *mac_pvt;
2211 	gld_t *gld;
2212 	gld_vlan_t *vlan;
2213 	int i;
2214 
2215 	ASSERT(macinfo != NULL);
2216 
2217 	GLDM_LOCK(macinfo, RW_WRITER);
2218 
2219 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2220 		/* We're probably being called from a leftover interrupt */
2221 		GLDM_UNLOCK(macinfo);
2222 		return;
2223 	}
2224 
2225 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2226 
2227 	for (i = 0; i < VLAN_HASHSZ; i++) {
2228 		for (vlan = mac_pvt->vlan_hash[i];
2229 		    vlan != NULL; vlan = vlan->gldv_next) {
2230 			for (gld = vlan->gldv_str_next;
2231 			    gld != (gld_t *)&vlan->gldv_str_next;
2232 			    gld = gld->gld_next) {
2233 				ASSERT(gld->gld_mac_info == macinfo);
2234 				gld->gld_sched_ran = B_TRUE;
2235 				membar_enter();
2236 				if (gld->gld_xwait) {
2237 					gld->gld_xwait = B_FALSE;
2238 					qenable(WR(gld->gld_qptr));
2239 				}
2240 			}
2241 		}
2242 	}
2243 
2244 	GLDM_UNLOCK(macinfo);
2245 }
2246 
2247 /*
2248  * gld_precv (macinfo, mp, vtag, stats)
2249  * called from gld_start to loopback a packet when in promiscuous mode
2250  *
2251  * VLAN 0's statistics need to be updated. If stats is not NULL,
2252  * it needs to be updated as well.
2253  */
2254 static void
gld_precv(gld_mac_info_t * macinfo,mblk_t * mp,uint32_t vtag,struct gld_stats * stats)2255 gld_precv(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag,
2256     struct gld_stats *stats)
2257 {
2258 	gld_mac_pvt_t *mac_pvt;
2259 	gld_interface_t *ifp;
2260 	pktinfo_t pktinfo;
2261 
2262 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2263 
2264 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2265 	ifp = mac_pvt->interfacep;
2266 
2267 	/*
2268 	 * call the media specific packet interpreter routine
2269 	 */
2270 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2271 		freemsg(mp);
2272 		BUMP(mac_pvt->statistics, stats, glds_rcvbadinterp, 1);
2273 #ifdef GLD_DEBUG
2274 		if (gld_debug & GLDERRS)
2275 			cmn_err(CE_WARN,
2276 			    "gld_precv: interpreter failed");
2277 #endif
2278 		return;
2279 	}
2280 
2281 	/*
2282 	 * Update the vtag information.
2283 	 */
2284 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2285 	pktinfo.vid = GLD_VTAG_VID(vtag);
2286 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2287 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2288 
2289 	gld_sendup(macinfo, &pktinfo, mp, gld_paccept);
2290 }
2291 
2292 /*
2293  * gld_recv (macinfo, mp)
2294  * called with an mac-level packet in a mblock; take the maclock,
2295  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2296  *
2297  * V0 drivers already are holding the mutex when they call us.
2298  */
2299 void
gld_recv(gld_mac_info_t * macinfo,mblk_t * mp)2300 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2301 {
2302 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2303 }
2304 
2305 void
gld_recv_tagged(gld_mac_info_t * macinfo,mblk_t * mp,uint32_t vtag)2306 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2307 {
2308 	gld_mac_pvt_t *mac_pvt;
2309 	char pbuf[3*GLD_MAX_ADDRLEN];
2310 	pktinfo_t pktinfo;
2311 	gld_interface_t *ifp;
2312 	queue_t *ipq = NULL;
2313 	gld_vlan_t *vlan = NULL, *vlan0 = NULL, *vlann = NULL;
2314 	struct gld_stats *stats0, *stats = NULL;
2315 	uint32_t vid;
2316 	int err;
2317 
2318 	ASSERT(macinfo != NULL);
2319 	ASSERT(mp->b_datap->db_ref);
2320 
2321 	GLDM_LOCK(macinfo, RW_READER);
2322 
2323 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2324 		/* We're probably being called from a leftover interrupt */
2325 		freemsg(mp);
2326 		goto done;
2327 	}
2328 
2329 	/*
2330 	 * If this packet is a VLAN tagged packet, the kstats of corresponding
2331 	 * "VLAN 0" should also be updated. We can directly access VLAN 0's
2332 	 * kstats from macinfo.
2333 	 *
2334 	 * Further, the packets needs to be passed to VLAN 0 if there is
2335 	 * any DLPI consumer on VLAN 0 who is interested in tagged packets
2336 	 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP).
2337 	 */
2338 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2339 	stats0 = mac_pvt->statistics;
2340 
2341 	vid = GLD_VTAG_VID(vtag);
2342 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2343 	if (vid != VLAN_VID_NONE) {
2344 		/*
2345 		 * If there are no physical DLPI consumers interested in the
2346 		 * VLAN packet, clear vlan0.
2347 		 */
2348 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2349 			vlan0 = NULL;
2350 		/*
2351 		 * vlann is the VLAN with the same VID as the VLAN packet.
2352 		 */
2353 		vlann = gld_find_vlan(macinfo, vid);
2354 		if (vlann != NULL)
2355 			stats = vlann->gldv_stats;
2356 	}
2357 
2358 	vlan = (vid == VLAN_VID_NONE) ? vlan0 : vlann;
2359 
2360 	ifp = mac_pvt->interfacep;
2361 	err = (*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXQUICK);
2362 
2363 	BUMP(stats0, stats, glds_bytercv64, pktinfo.pktLen);
2364 	BUMP(stats0, stats, glds_pktrcv64, 1);
2365 
2366 	if ((vlann == NULL) && (vlan0 == NULL)) {
2367 		freemsg(mp);
2368 		goto done;
2369 	}
2370 
2371 	/*
2372 	 * Check whether underlying media code supports the IPQ hack:
2373 	 *
2374 	 * - the interpreter could quickly parse the packet
2375 	 * - the device type supports IPQ (ethernet and IPoIB)
2376 	 * - there is one, and only one, IP stream bound (to this VLAN)
2377 	 * - that stream is a "fastpath" stream
2378 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2379 	 * - there are no streams in promiscuous mode (on this VLAN)
2380 	 * - if this packet is tagged, there is no need to send this
2381 	 *   packet to physical streams
2382 	 */
2383 	if ((err != 0) && ((vlan != NULL) && (vlan->gldv_nprom == 0)) &&
2384 	    (vlan == vlan0 || vlan0 == NULL)) {
2385 		switch (pktinfo.ethertype) {
2386 		case ETHERTYPE_IP:
2387 			ipq = vlan->gldv_ipq;
2388 			break;
2389 		case ETHERTYPE_IPV6:
2390 			ipq = vlan->gldv_ipv6q;
2391 			break;
2392 		}
2393 	}
2394 
2395 	/*
2396 	 * Special case for IP; we can simply do the putnext here, if:
2397 	 * o The IPQ hack is possible (ipq != NULL).
2398 	 * o the packet is specifically for me, and therefore:
2399 	 * - the packet is not multicast or broadcast (fastpath only
2400 	 *   wants unicast packets).
2401 	 *
2402 	 * o the stream is not asserting flow control.
2403 	 */
2404 	if (ipq != NULL &&
2405 	    pktinfo.isForMe &&
2406 	    canputnext(ipq)) {
2407 		/*
2408 		 * Skip the mac header. We know there is no LLC1/SNAP header
2409 		 * in this packet
2410 		 */
2411 		mp->b_rptr += pktinfo.macLen;
2412 		putnext(ipq, mp);
2413 		goto done;
2414 	}
2415 
2416 	/*
2417 	 * call the media specific packet interpreter routine
2418 	 */
2419 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2420 		BUMP(stats0, stats, glds_rcvbadinterp, 1);
2421 #ifdef GLD_DEBUG
2422 		if (gld_debug & GLDERRS)
2423 			cmn_err(CE_WARN,
2424 			    "gld_recv_tagged: interpreter failed");
2425 #endif
2426 		freemsg(mp);
2427 		goto done;
2428 	}
2429 
2430 	/*
2431 	 * This is safe even if vtag is VLAN_VTAG_NONE
2432 	 */
2433 	pktinfo.vid = vid;
2434 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2435 #ifdef GLD_DEBUG
2436 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2437 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2438 #endif
2439 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2440 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2441 
2442 #ifdef GLD_DEBUG
2443 	if ((gld_debug & GLDRECV) &&
2444 	    (!(gld_debug & GLDNOBR) ||
2445 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2446 		char pbuf2[3*GLD_MAX_ADDRLEN];
2447 
2448 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2449 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2450 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2451 		    pktinfo.dhost, macinfo->gldm_addrlen));
2452 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2453 		    pktinfo.vid,
2454 		    pktinfo.user_pri);
2455 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2456 		    "Hdr: %d,%d isMulticast: %s\n",
2457 		    pktinfo.ethertype,
2458 		    pktinfo.pktLen,
2459 		    pktinfo.macLen,
2460 		    pktinfo.hdrLen,
2461 		    pktinfo.isMulticast ? "Y" : "N");
2462 	}
2463 #endif
2464 
2465 	gld_sendup(macinfo, &pktinfo, mp, gld_accept);
2466 
2467 done:
2468 	GLDM_UNLOCK(macinfo);
2469 }
2470 
2471 /* =================================================================== */
2472 /* receive group: called from gld_recv and gld_precv* with maclock held */
2473 /* =================================================================== */
2474 
2475 /*
2476  * Search all the streams attached to the specified VLAN looking for
2477  * those eligible to receive the packet.
2478  * Note that in order to avoid an extra dupmsg(), if this is the first
2479  * eligible stream, remember it (in fgldp) so that we can send up the
2480  * message after this function.
2481  *
2482  * Return errno if fails. Currently the only error is ENOMEM.
2483  */
2484 static int
gld_sendup_vlan(gld_vlan_t * vlan,pktinfo_t * pktinfo,mblk_t * mp,int (* acceptfunc)(),void (* send)(),int (* cansend)(),gld_t ** fgldp)2485 gld_sendup_vlan(gld_vlan_t *vlan, pktinfo_t *pktinfo, mblk_t *mp,
2486     int (*acceptfunc)(), void (*send)(), int (*cansend)(), gld_t **fgldp)
2487 {
2488 	mblk_t *nmp;
2489 	gld_t *gld;
2490 	int err = 0;
2491 
2492 	ASSERT(vlan != NULL);
2493 	for (gld = vlan->gldv_str_next; gld != (gld_t *)&vlan->gldv_str_next;
2494 	    gld = gld->gld_next) {
2495 #ifdef GLD_VERBOSE_DEBUG
2496 		cmn_err(CE_NOTE, "gld_sendup_vlan: SAP: %4x QPTR: %p "
2497 		    "QSTATE: %s", gld->gld_sap, (void *)gld->gld_qptr,
2498 		    gld->gld_state == DL_IDLE ? "IDLE" : "NOT IDLE");
2499 #endif
2500 		ASSERT(gld->gld_qptr != NULL);
2501 		ASSERT(gld->gld_state == DL_IDLE ||
2502 		    gld->gld_state == DL_UNBOUND);
2503 		ASSERT(gld->gld_vlan == vlan);
2504 
2505 		if (gld->gld_state != DL_IDLE)
2506 			continue;	/* not eligible to receive */
2507 		if (gld->gld_flags & GLD_STR_CLOSING)
2508 			continue;	/* not eligible to receive */
2509 
2510 #ifdef GLD_DEBUG
2511 		if ((gld_debug & GLDRECV) &&
2512 		    (!(gld_debug & GLDNOBR) ||
2513 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2514 			cmn_err(CE_NOTE,
2515 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2516 			    gld->gld_sap,
2517 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2518 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2519 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2520 #endif
2521 
2522 		/*
2523 		 * The accept function differs depending on whether this is
2524 		 * a packet that we received from the wire or a loopback.
2525 		 */
2526 		if ((*acceptfunc)(gld, pktinfo)) {
2527 			/* sap matches */
2528 			pktinfo->wasAccepted = 1; /* known protocol */
2529 
2530 			if (!(*cansend)(gld->gld_qptr)) {
2531 				/*
2532 				 * Upper stream is not accepting messages, i.e.
2533 				 * it is flow controlled, therefore we will
2534 				 * forgo sending the message up this stream.
2535 				 */
2536 #ifdef GLD_DEBUG
2537 				if (gld_debug & GLDETRACE)
2538 					cmn_err(CE_WARN,
2539 					    "gld_sendup: canput failed");
2540 #endif
2541 				BUMP(vlan->gldv_stats, NULL, glds_blocked, 1);
2542 				qenable(gld->gld_qptr);
2543 				continue;
2544 			}
2545 
2546 			/*
2547 			 * In order to avoid an extra dupmsg(), remember this
2548 			 * gld if this is the first eligible stream.
2549 			 */
2550 			if (*fgldp == NULL) {
2551 				*fgldp = gld;
2552 				continue;
2553 			}
2554 
2555 			/* duplicate the packet for this stream */
2556 			nmp = dupmsg(mp);
2557 			if (nmp == NULL) {
2558 				BUMP(vlan->gldv_stats, NULL,
2559 				    glds_gldnorcvbuf, 1);
2560 #ifdef GLD_DEBUG
2561 				if (gld_debug & GLDERRS)
2562 					cmn_err(CE_WARN,
2563 					    "gld_sendup: dupmsg failed");
2564 #endif
2565 				/* couldn't get resources; drop it */
2566 				err = ENOMEM;
2567 				break;
2568 			}
2569 			/* pass the message up the stream */
2570 			gld_passon(gld, nmp, pktinfo, send);
2571 		}
2572 	}
2573 	return (err);
2574 }
2575 
2576 /*
2577  * gld_sendup (macinfo, pktinfo, mp, acceptfunc)
2578  * called with an ethernet packet in an mblk; must decide whether
2579  * packet is for us and which streams to queue it to.
2580  */
2581 static void
gld_sendup(gld_mac_info_t * macinfo,pktinfo_t * pktinfo,mblk_t * mp,int (* acceptfunc)())2582 gld_sendup(gld_mac_info_t *macinfo, pktinfo_t *pktinfo,
2583     mblk_t *mp, int (*acceptfunc)())
2584 {
2585 	gld_t *fgld = NULL;
2586 	void (*send)(queue_t *qp, mblk_t *mp);
2587 	int (*cansend)(queue_t *qp);
2588 	gld_vlan_t *vlan0, *vlann = NULL;
2589 	struct gld_stats *stats0, *stats = NULL;
2590 	int err = 0;
2591 
2592 #ifdef GLD_DEBUG
2593 	if (gld_debug & GLDTRACE)
2594 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2595 		    (void *)macinfo);
2596 #endif
2597 
2598 	ASSERT(mp != NULL);
2599 	ASSERT(macinfo != NULL);
2600 	ASSERT(pktinfo != NULL);
2601 	ASSERT(GLDM_LOCK_HELD(macinfo));
2602 
2603 	/*
2604 	 * The tagged packets should also be looped back (transmit-side)
2605 	 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to
2606 	 * DL_PROMISC_SAP or there is any DLPI consumer bind to the
2607 	 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated
2608 	 * as well.
2609 	 */
2610 	stats0 = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->statistics;
2611 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2612 	if (pktinfo->vid != VLAN_VID_NONE) {
2613 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2614 			vlan0 = NULL;
2615 		vlann = gld_find_vlan(macinfo, pktinfo->vid);
2616 		if (vlann != NULL)
2617 			stats = vlann->gldv_stats;
2618 	}
2619 
2620 	ASSERT((vlan0 != NULL) || (vlann != NULL));
2621 
2622 	/*
2623 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2624 	 * gld_recv returns to the caller's interrupt routine.  The total
2625 	 * network throughput would normally be lower when selecting this
2626 	 * option, because we putq the messages and process them later,
2627 	 * instead of sending them with putnext now.  Some time critical
2628 	 * device might need this, so it's here but undocumented.
2629 	 */
2630 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2631 		send = (void (*)(queue_t *, mblk_t *))(uintptr_t)putq;
2632 		cansend = canput;
2633 	} else {
2634 		send = putnext;
2635 		cansend = canputnext;
2636 	}
2637 
2638 	/*
2639 	 * Send the packets for all eligible streams.
2640 	 */
2641 	if (vlan0 != NULL) {
2642 		err = gld_sendup_vlan(vlan0, pktinfo, mp, acceptfunc, send,
2643 		    cansend, &fgld);
2644 	}
2645 	if ((err == 0) && (vlann != NULL)) {
2646 		err = gld_sendup_vlan(vlann, pktinfo, mp, acceptfunc, send,
2647 		    cansend, &fgld);
2648 	}
2649 
2650 	ASSERT(mp);
2651 	/* send the original dup of the packet up the first stream found */
2652 	if (fgld)
2653 		gld_passon(fgld, mp, pktinfo, send);
2654 	else
2655 		freemsg(mp);	/* no streams matched */
2656 
2657 	/* We do not count looped back packets */
2658 	if (acceptfunc == gld_paccept)
2659 		return;		/* transmit loopback case */
2660 
2661 	if (pktinfo->isBroadcast)
2662 		BUMP(stats0, stats, glds_brdcstrcv, 1);
2663 	else if (pktinfo->isMulticast)
2664 		BUMP(stats0, stats, glds_multircv, 1);
2665 
2666 	/* No stream accepted this packet */
2667 	if (!pktinfo->wasAccepted)
2668 		BUMP(stats0, stats, glds_unknowns, 1);
2669 }
2670 
2671 #define	GLD_IS_PHYS(gld)	\
2672 	(((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE)
2673 
2674 /*
2675  * A packet matches a stream if:
2676  *      The stream's VLAN id is the same as the one in the packet.
2677  *  and the stream accepts EtherType encoded packets and the type matches
2678  *  or  the stream accepts LLC packets and the packet is an LLC packet
2679  */
2680 #define	MATCH(stream, pktinfo) \
2681 	((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \
2682 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2683 	(!stream->gld_ethertype && pktinfo->isLLC)))
2684 
2685 /*
2686  * This function validates a packet for sending up a particular
2687  * stream. The message header has been parsed and its characteristic
2688  * are recorded in the pktinfo data structure. The streams stack info
2689  * are presented in gld data structures.
2690  */
2691 static int
gld_accept(gld_t * gld,pktinfo_t * pktinfo)2692 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2693 {
2694 	/*
2695 	 * if there is no match do not bother checking further.
2696 	 * Note that it is okay to examine gld_vlan because
2697 	 * macinfo->gldm_lock is held.
2698 	 *
2699 	 * Because all tagged packets have SAP value ETHERTYPE_VLAN,
2700 	 * these packets will pass the SAP filter check if the stream
2701 	 * is a ETHERTYPE_VLAN listener.
2702 	 */
2703 	if ((!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP) &&
2704 	    !(GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
2705 	    pktinfo->isTagged)))
2706 		return (0);
2707 
2708 	/*
2709 	 * We don't accept any packet from the hardware if we originated it.
2710 	 * (Contrast gld_paccept, the send-loopback accept function.)
2711 	 */
2712 	if (pktinfo->isLooped)
2713 		return (0);
2714 
2715 	/*
2716 	 * If the packet is broadcast or sent to us directly we will accept it.
2717 	 * Also we will accept multicast packets requested by the stream.
2718 	 */
2719 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2720 	    gld_mcmatch(gld, pktinfo))
2721 		return (1);
2722 
2723 	/*
2724 	 * Finally, accept anything else if we're in promiscuous mode
2725 	 */
2726 	if (gld->gld_flags & GLD_PROM_PHYS)
2727 		return (1);
2728 
2729 	return (0);
2730 }
2731 
2732 /*
2733  * Return TRUE if the given multicast address is one
2734  * of those that this particular Stream is interested in.
2735  */
2736 static int
gld_mcmatch(gld_t * gld,pktinfo_t * pktinfo)2737 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
2738 {
2739 	/*
2740 	 * Return FALSE if not a multicast address.
2741 	 */
2742 	if (!pktinfo->isMulticast)
2743 		return (0);
2744 
2745 	/*
2746 	 * Check if all multicasts have been enabled for this Stream
2747 	 */
2748 	if (gld->gld_flags & GLD_PROM_MULT)
2749 		return (1);
2750 
2751 	/*
2752 	 * Return FALSE if no multicast addresses enabled for this Stream.
2753 	 */
2754 	if (!gld->gld_mcast)
2755 		return (0);
2756 
2757 	/*
2758 	 * Otherwise, look for it in the table.
2759 	 */
2760 	return (gld_multicast(pktinfo->dhost, gld));
2761 }
2762 
2763 /*
2764  * gld_multicast determines if the address is a multicast address for
2765  * this stream.
2766  */
2767 static int
gld_multicast(unsigned char * macaddr,gld_t * gld)2768 gld_multicast(unsigned char *macaddr, gld_t *gld)
2769 {
2770 	int i;
2771 
2772 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
2773 
2774 	if (!gld->gld_mcast)
2775 		return (0);
2776 
2777 	for (i = 0; i < gld->gld_multicnt; i++) {
2778 		if (gld->gld_mcast[i]) {
2779 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
2780 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
2781 			    gld->gld_mac_info->gldm_addrlen))
2782 				return (1);
2783 		}
2784 	}
2785 
2786 	return (0);
2787 }
2788 
2789 /*
2790  * accept function for looped back packets
2791  */
2792 static int
gld_paccept(gld_t * gld,pktinfo_t * pktinfo)2793 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
2794 {
2795 	/*
2796 	 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock
2797 	 * is held.
2798 	 *
2799 	 * If a stream is a ETHERTYPE_VLAN listener, it must
2800 	 * accept all tagged packets as those packets have SAP value
2801 	 * ETHERTYPE_VLAN.
2802 	 */
2803 	return (gld->gld_flags & GLD_PROM_PHYS &&
2804 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP ||
2805 	    (GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
2806 	    pktinfo->isTagged)));
2807 
2808 }
2809 
2810 static void
gld_passon(gld_t * gld,mblk_t * mp,pktinfo_t * pktinfo,void (* send)(queue_t * qp,mblk_t * mp))2811 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
2812     void (*send)(queue_t *qp, mblk_t *mp))
2813 {
2814 	boolean_t is_phys = GLD_IS_PHYS(gld);
2815 	int skiplen;
2816 	boolean_t addtag = B_FALSE;
2817 	uint32_t vtag = 0;
2818 
2819 #ifdef GLD_DEBUG
2820 	if (gld_debug & GLDTRACE)
2821 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
2822 		    (void *)mp, (void *)pktinfo);
2823 
2824 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
2825 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2826 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
2827 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
2828 		    gld->gld_sap);
2829 #endif
2830 	/*
2831 	 * Figure out how much of the packet header to throw away.
2832 	 *
2833 	 * Normal DLPI (non RAW/FAST) streams also want the
2834 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
2835 	 */
2836 	if (gld->gld_flags & GLD_RAW) {
2837 		/*
2838 		 * The packet will be tagged in the following cases:
2839 		 *   - if priority is not 0
2840 		 *   - a tagged packet sent on a physical link
2841 		 */
2842 		if ((pktinfo->isTagged && is_phys) || (pktinfo->user_pri != 0))
2843 			addtag = B_TRUE;
2844 		skiplen = 0;
2845 	} else {
2846 		/*
2847 		 * The packet will be tagged if it meets all below conditions:
2848 		 *   -  this is a physical stream
2849 		 *   -  this packet is tagged packet
2850 		 *   -  the stream is either a DL_PROMISC_SAP listener or a
2851 		 *	ETHERTYPE_VLAN listener
2852 		 */
2853 		if (is_phys && pktinfo->isTagged &&
2854 		    ((gld->gld_sap == ETHERTYPE_VLAN) ||
2855 		    (gld->gld_flags & GLD_PROM_SAP))) {
2856 			addtag = B_TRUE;
2857 		}
2858 
2859 		skiplen = pktinfo->macLen;		/* skip mac header */
2860 		if (gld->gld_ethertype)
2861 			skiplen += pktinfo->hdrLen;	/* skip any extra */
2862 	}
2863 	if (skiplen >= pktinfo->pktLen) {
2864 		/*
2865 		 * If the interpreter did its job right, then it cannot be
2866 		 * asking us to skip more bytes than are in the packet!
2867 		 * However, there could be zero data bytes left after the
2868 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
2869 		 * should contain at least one byte of data, so if we have
2870 		 * none we just drop it.
2871 		 */
2872 		ASSERT(!(skiplen > pktinfo->pktLen));
2873 		freemsg(mp);
2874 		return;
2875 	}
2876 
2877 	if (addtag) {
2878 		mblk_t *savemp = mp;
2879 
2880 		vtag = GLD_MAKE_VTAG(pktinfo->user_pri, pktinfo->cfi,
2881 		    is_phys ? pktinfo->vid : VLAN_VID_NONE);
2882 		if ((mp = gld_insert_vtag_ether(mp, vtag)) == NULL) {
2883 			freemsg(savemp);
2884 			return;
2885 		}
2886 	}
2887 
2888 	/*
2889 	 * Skip over the header(s), taking care to possibly handle message
2890 	 * fragments shorter than the amount we need to skip.  Hopefully
2891 	 * the driver will put the entire packet, or at least the entire
2892 	 * header, into a single message block.  But we handle it if not.
2893 	 */
2894 	while (skiplen >= MBLKL(mp)) {
2895 		mblk_t *savemp = mp;
2896 		skiplen -= MBLKL(mp);
2897 		mp = mp->b_cont;
2898 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
2899 		freeb(savemp);
2900 	}
2901 	mp->b_rptr += skiplen;
2902 
2903 	/* Add M_PROTO if necessary, and pass upstream */
2904 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
2905 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
2906 		/* RAW/FAST: just send up the M_DATA */
2907 		(*send)(gld->gld_qptr, mp);
2908 	} else {
2909 		/* everybody else wants to see a unitdata_ind structure */
2910 		mp = gld_addudind(gld, mp, pktinfo, addtag);
2911 		if (mp)
2912 			(*send)(gld->gld_qptr, mp);
2913 		/* if it failed, gld_addudind already bumped statistic */
2914 	}
2915 }
2916 
2917 /*
2918  * gld_addudind(gld, mp, pktinfo)
2919  * format a DL_UNITDATA_IND message to be sent upstream to the user
2920  */
2921 static mblk_t *
gld_addudind(gld_t * gld,mblk_t * mp,pktinfo_t * pktinfo,boolean_t tagged)2922 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, boolean_t tagged)
2923 {
2924 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
2925 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
2926 	dl_unitdata_ind_t	*dludindp;
2927 	mblk_t			*nmp;
2928 	int			size;
2929 	int			type;
2930 
2931 #ifdef GLD_DEBUG
2932 	if (gld_debug & GLDTRACE)
2933 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
2934 		    (void *)mp, (void *)pktinfo);
2935 #endif
2936 	ASSERT(macinfo != NULL);
2937 
2938 	/*
2939 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
2940 	 * might as well discard since we can't go further
2941 	 */
2942 	size = sizeof (dl_unitdata_ind_t) +
2943 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
2944 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
2945 		freemsg(mp);
2946 		BUMP(vlan->gldv_stats, NULL, glds_gldnorcvbuf, 1);
2947 #ifdef GLD_DEBUG
2948 		if (gld_debug & GLDERRS)
2949 			cmn_err(CE_WARN,
2950 			    "gld_addudind: allocb failed");
2951 #endif
2952 		return ((mblk_t *)NULL);
2953 	}
2954 	DB_TYPE(nmp) = M_PROTO;
2955 	nmp->b_rptr = nmp->b_datap->db_lim - size;
2956 
2957 	if (tagged)
2958 		type = ETHERTYPE_VLAN;
2959 	else
2960 		type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
2961 
2962 
2963 	/*
2964 	 * now setup the DL_UNITDATA_IND header
2965 	 *
2966 	 * XXX This looks broken if the saps aren't two bytes.
2967 	 */
2968 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2969 	dludindp->dl_primitive = DL_UNITDATA_IND;
2970 	dludindp->dl_src_addr_length =
2971 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
2972 	    abs(macinfo->gldm_saplen);
2973 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2974 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
2975 	    dludindp->dl_dest_addr_length;
2976 
2977 	dludindp->dl_group_address = (pktinfo->isMulticast ||
2978 	    pktinfo->isBroadcast);
2979 
2980 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
2981 
2982 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
2983 	nmp->b_wptr += macinfo->gldm_addrlen;
2984 
2985 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
2986 	*(ushort_t *)(nmp->b_wptr) = type;
2987 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2988 
2989 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
2990 
2991 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
2992 	nmp->b_wptr += macinfo->gldm_addrlen;
2993 
2994 	*(ushort_t *)(nmp->b_wptr) = type;
2995 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2996 
2997 	if (pktinfo->nosource)
2998 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
2999 	linkb(nmp, mp);
3000 	return (nmp);
3001 }
3002 
3003 /* ======================================================= */
3004 /* wsrv group: called from wsrv, single threaded per queue */
3005 /* ======================================================= */
3006 
3007 /*
3008  * We go to some trouble to avoid taking the same lock during normal
3009  * transmit processing as we do during normal receive processing.
3010  *
3011  * Elements of the per-instance macinfo and per-stream gld_t structures
3012  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3013  * (Elements of the gld_mac_pvt_t structure are considered part of the
3014  * macinfo structure for purposes of this discussion).
3015  *
3016  * However, it is more complicated than that:
3017  *
3018  *	Elements of the macinfo structure that are set before the macinfo
3019  *	structure is added to its device list by gld_register(), and never
3020  *	thereafter modified, are accessed without requiring taking the lock.
3021  *	A similar rule applies to those elements of the gld_t structure that
3022  *	are written by gld_open() before the stream is added to any list.
3023  *
3024  *	Most other elements of the macinfo structure may only be read or
3025  *	written while holding the maclock.
3026  *
3027  *	Most writable elements of the gld_t structure are written only
3028  *	within the single-threaded domain of wsrv() and subsidiaries.
3029  *	(This domain includes open/close while qprocs are not on.)
3030  *	The maclock need not be taken while within that domain
3031  *	simply to read those elements.  Writing to them, even within
3032  *	that domain, or reading from it outside that domain, requires
3033  *	holding the maclock.  Exception:  if the stream is not
3034  *	presently attached to a PPA, there is no associated macinfo,
3035  *	and no maclock need be taken.
3036  *
3037  *	The curr_macaddr element of the mac private structure is also
3038  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3039  *      of that structure. However, there are a few instances in the
3040  *      transmit path where we choose to forgo lock protection when
3041  *      reading this variable. This is to avoid lock contention between
3042  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3043  *      In doing so we will take a small risk or a few corrupted packets
3044  *      during the short an rare times when someone is changing the interface's
3045  *      physical address. We consider the small cost in this rare case to be
3046  *      worth the benefit of reduced lock contention under normal operating
3047  *      conditions. The risk/cost is small because:
3048  *          1. there is no guarantee at this layer of uncorrupted delivery.
3049  *          2. the physaddr doesn't change very often - no performance hit.
3050  *          3. if the physaddr changes, other stuff is going to be screwed
3051  *             up for a while anyway, while other sites refigure ARP, etc.,
3052  *             so losing a couple of packets is the least of our worries.
3053  *
3054  *	The list of streams associated with a macinfo is protected by
3055  *	two locks:  the per-macinfo maclock, and the per-major-device
3056  *	gld_devlock.  Both must be held to modify the list, but either
3057  *	may be held to protect the list during reading/traversing.  This
3058  *	allows independent locking for multiple instances in the receive
3059  *	path (using macinfo), while facilitating routines that must search
3060  *	the entire set of streams associated with a major device, such as
3061  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3062  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3063  *	protected, since they change at exactly the same time macinfo
3064  *	streams list does.
3065  *
3066  *	The list of macinfo structures associated with a major device
3067  *	structure is protected by the gld_devlock, as is the per-major
3068  *	list of Style 2 streams in the DL_UNATTACHED state.
3069  *
3070  *	The list of major devices is kept on a module-global list
3071  *	gld_device_list, which has its own lock to protect the list.
3072  *
3073  *	When it is necessary to hold more than one lock at a time, they
3074  *	are acquired in this "outside in" order:
3075  *		gld_device_list.gld_devlock
3076  *		glddev->gld_devlock
3077  *		GLDM_LOCK(macinfo)
3078  *
3079  *	Finally, there are some "volatile" elements of the gld_t structure
3080  *	used for synchronization between various routines that don't share
3081  *	the same mutexes.  See the routines for details.  These are:
3082  *		gld_xwait	between gld_wsrv() and gld_sched()
3083  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3084  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3085  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3086  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3087  *				(used in conjunction with q->q_first)
3088  */
3089 
3090 /*
3091  * gld_ioctl (q, mp)
3092  * handles all ioctl requests passed downstream. This routine is
3093  * passed a pointer to the message block with the ioctl request in it, and a
3094  * pointer to the queue so it can respond to the ioctl request with an ack.
3095  */
3096 int
gld_ioctl(queue_t * q,mblk_t * mp)3097 gld_ioctl(queue_t *q, mblk_t *mp)
3098 {
3099 	struct iocblk *iocp;
3100 	gld_t *gld;
3101 	gld_mac_info_t *macinfo;
3102 
3103 #ifdef GLD_DEBUG
3104 	if (gld_debug & GLDTRACE)
3105 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3106 #endif
3107 	gld = (gld_t *)q->q_ptr;
3108 	iocp = (struct iocblk *)mp->b_rptr;
3109 	switch (iocp->ioc_cmd) {
3110 	case DLIOCRAW:		/* raw M_DATA mode */
3111 		gld->gld_flags |= GLD_RAW;
3112 		DB_TYPE(mp) = M_IOCACK;
3113 		qreply(q, mp);
3114 		break;
3115 
3116 	case DL_IOC_HDR_INFO:	/* fastpath */
3117 		/*
3118 		 * DL_IOC_HDR_INFO should only come from IP. The one
3119 		 * initiated from user-land should not be allowed.
3120 		 */
3121 		if ((gld_global_options & GLD_OPT_NO_FASTPATH) ||
3122 		    (iocp->ioc_cr != kcred)) {
3123 			miocnak(q, mp, 0, EINVAL);
3124 			break;
3125 		}
3126 		gld_fastpath(gld, q, mp);
3127 		break;
3128 
3129 	case DLIOCMARGININFO: {	/* margin size */
3130 		int err;
3131 
3132 		if ((macinfo = gld->gld_mac_info) == NULL) {
3133 			miocnak(q, mp, 0, EINVAL);
3134 			break;
3135 		}
3136 
3137 		if ((err = miocpullup(mp, sizeof (uint32_t))) != 0) {
3138 			miocnak(q, mp, 0, err);
3139 			break;
3140 		}
3141 
3142 		*((uint32_t *)mp->b_cont->b_rptr) = macinfo->gldm_margin;
3143 		miocack(q, mp, sizeof (uint32_t), 0);
3144 		break;
3145 	}
3146 	default:
3147 		macinfo	 = gld->gld_mac_info;
3148 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3149 			miocnak(q, mp, 0, EINVAL);
3150 			break;
3151 		}
3152 
3153 		GLDM_LOCK(macinfo, RW_WRITER);
3154 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3155 		GLDM_UNLOCK(macinfo);
3156 		break;
3157 	}
3158 	return (0);
3159 }
3160 
3161 /*
3162  * Since the rules for "fastpath" mode don't seem to be documented
3163  * anywhere, I will describe GLD's rules for fastpath users here:
3164  *
3165  * Once in this mode you remain there until close.
3166  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3167  * You must be bound (DL_IDLE) to transmit.
3168  * There are other rules not listed above.
3169  */
3170 static void
gld_fastpath(gld_t * gld,queue_t * q,mblk_t * mp)3171 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3172 {
3173 	gld_interface_t *ifp;
3174 	gld_mac_info_t *macinfo;
3175 	dl_unitdata_req_t *dludp;
3176 	mblk_t *nmp;
3177 	t_scalar_t off, len;
3178 	uint_t maclen;
3179 	int error;
3180 
3181 	if (gld->gld_state != DL_IDLE) {
3182 		miocnak(q, mp, 0, EINVAL);
3183 		return;
3184 	}
3185 
3186 	macinfo = gld->gld_mac_info;
3187 	ASSERT(macinfo != NULL);
3188 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3189 
3190 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3191 	if (error != 0) {
3192 		miocnak(q, mp, 0, error);
3193 		return;
3194 	}
3195 
3196 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3197 	off = dludp->dl_dest_addr_offset;
3198 	len = dludp->dl_dest_addr_length;
3199 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3200 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3201 		miocnak(q, mp, 0, EINVAL);
3202 		return;
3203 	}
3204 
3205 	/*
3206 	 * We take the fastpath request as a declaration that they will accept
3207 	 * M_DATA messages from us, whether or not we are willing to accept
3208 	 * M_DATA from them.  This allows us to have fastpath in one direction
3209 	 * (flow upstream) even on media with Source Routing, where we are
3210 	 * unable to provide a fixed MAC header to be prepended to downstream
3211 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3212 	 * allow them to send M_DATA down to us.
3213 	 */
3214 	GLDM_LOCK(macinfo, RW_WRITER);
3215 	gld->gld_flags |= GLD_FAST;
3216 	GLDM_UNLOCK(macinfo);
3217 
3218 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3219 
3220 	/* This will fail for Source Routing media */
3221 	/* Also on Ethernet on 802.2 SAPs */
3222 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3223 		miocnak(q, mp, 0, ENOMEM);
3224 		return;
3225 	}
3226 
3227 	/*
3228 	 * Link new mblk in after the "request" mblks.
3229 	 */
3230 	linkb(mp, nmp);
3231 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3232 }
3233 
3234 /*
3235  * gld_cmds (q, mp)
3236  *	process the DL commands as defined in dlpi.h
3237  *	note that the primitives return status which is passed back
3238  *	to the service procedure.  If the value is GLDE_RETRY, then
3239  *	it is assumed that processing must stop and the primitive has
3240  *	been put back onto the queue.  If the value is any other error,
3241  *	then an error ack is generated by the service procedure.
3242  */
3243 static int
gld_cmds(queue_t * q,mblk_t * mp)3244 gld_cmds(queue_t *q, mblk_t *mp)
3245 {
3246 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3247 	gld_t *gld = (gld_t *)(q->q_ptr);
3248 	int result = DL_BADPRIM;
3249 	int mblkl = MBLKL(mp);
3250 	t_uscalar_t dlreq;
3251 
3252 	/* Make sure we have at least dlp->dl_primitive */
3253 	if (mblkl < sizeof (dlp->dl_primitive))
3254 		return (DL_BADPRIM);
3255 
3256 	dlreq = dlp->dl_primitive;
3257 #ifdef	GLD_DEBUG
3258 	if (gld_debug & GLDTRACE)
3259 		cmn_err(CE_NOTE,
3260 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3261 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3262 #endif
3263 
3264 	switch (dlreq) {
3265 	case DL_UDQOS_REQ:
3266 		if (mblkl < DL_UDQOS_REQ_SIZE)
3267 			break;
3268 		result = gld_udqos(q, mp);
3269 		break;
3270 
3271 	case DL_BIND_REQ:
3272 		if (mblkl < DL_BIND_REQ_SIZE)
3273 			break;
3274 		result = gld_bind(q, mp);
3275 		break;
3276 
3277 	case DL_UNBIND_REQ:
3278 		if (mblkl < DL_UNBIND_REQ_SIZE)
3279 			break;
3280 		result = gld_unbind(q, mp);
3281 		break;
3282 
3283 	case DL_UNITDATA_REQ:
3284 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3285 			break;
3286 		result = gld_unitdata(q, mp);
3287 		break;
3288 
3289 	case DL_INFO_REQ:
3290 		if (mblkl < DL_INFO_REQ_SIZE)
3291 			break;
3292 		result = gld_inforeq(q, mp);
3293 		break;
3294 
3295 	case DL_ATTACH_REQ:
3296 		if (mblkl < DL_ATTACH_REQ_SIZE)
3297 			break;
3298 		if (gld->gld_style == DL_STYLE2)
3299 			result = gldattach(q, mp);
3300 		else
3301 			result = DL_NOTSUPPORTED;
3302 		break;
3303 
3304 	case DL_DETACH_REQ:
3305 		if (mblkl < DL_DETACH_REQ_SIZE)
3306 			break;
3307 		if (gld->gld_style == DL_STYLE2)
3308 			result = gldunattach(q, mp);
3309 		else
3310 			result = DL_NOTSUPPORTED;
3311 		break;
3312 
3313 	case DL_ENABMULTI_REQ:
3314 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3315 			break;
3316 		result = gld_enable_multi(q, mp);
3317 		break;
3318 
3319 	case DL_DISABMULTI_REQ:
3320 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3321 			break;
3322 		result = gld_disable_multi(q, mp);
3323 		break;
3324 
3325 	case DL_PHYS_ADDR_REQ:
3326 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3327 			break;
3328 		result = gld_physaddr(q, mp);
3329 		break;
3330 
3331 	case DL_SET_PHYS_ADDR_REQ:
3332 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3333 			break;
3334 		result = gld_setaddr(q, mp);
3335 		break;
3336 
3337 	case DL_PROMISCON_REQ:
3338 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3339 			break;
3340 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3341 		break;
3342 
3343 	case DL_PROMISCOFF_REQ:
3344 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3345 			break;
3346 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3347 		break;
3348 
3349 	case DL_GET_STATISTICS_REQ:
3350 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3351 			break;
3352 		result = gld_get_statistics(q, mp);
3353 		break;
3354 
3355 	case DL_CAPABILITY_REQ:
3356 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3357 			break;
3358 		result = gld_cap(q, mp);
3359 		break;
3360 
3361 	case DL_NOTIFY_REQ:
3362 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3363 			break;
3364 		result = gld_notify_req(q, mp);
3365 		break;
3366 
3367 	case DL_XID_REQ:
3368 	case DL_XID_RES:
3369 	case DL_TEST_REQ:
3370 	case DL_TEST_RES:
3371 	case DL_CONTROL_REQ:
3372 	case DL_PASSIVE_REQ:
3373 		result = DL_NOTSUPPORTED;
3374 		break;
3375 
3376 	default:
3377 #ifdef	GLD_DEBUG
3378 		if (gld_debug & GLDERRS)
3379 			cmn_err(CE_WARN,
3380 			    "gld_cmds: unknown M_PROTO message: %d",
3381 			    dlreq);
3382 #endif
3383 		result = DL_BADPRIM;
3384 	}
3385 
3386 	return (result);
3387 }
3388 
3389 static int
gld_cap(queue_t * q,mblk_t * mp)3390 gld_cap(queue_t *q, mblk_t *mp)
3391 {
3392 	gld_t *gld = (gld_t *)q->q_ptr;
3393 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3394 
3395 	if (gld->gld_state == DL_UNATTACHED)
3396 		return (DL_OUTSTATE);
3397 
3398 	if (dlp->dl_sub_length == 0)
3399 		return (gld_cap_ack(q, mp));
3400 
3401 	return (gld_cap_enable(q, mp));
3402 }
3403 
3404 static int
gld_cap_ack(queue_t * q,mblk_t * mp)3405 gld_cap_ack(queue_t *q, mblk_t *mp)
3406 {
3407 	gld_t *gld = (gld_t *)q->q_ptr;
3408 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3409 	dl_capability_ack_t *dlap;
3410 	dl_capability_sub_t *dlsp;
3411 	size_t size = sizeof (dl_capability_ack_t);
3412 	size_t subsize = 0;
3413 
3414 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3415 		subsize += sizeof (dl_capability_sub_t) +
3416 		    sizeof (dl_capab_hcksum_t);
3417 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3418 		subsize += sizeof (dl_capability_sub_t) +
3419 		    sizeof (dl_capab_zerocopy_t);
3420 
3421 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3422 	    DL_CAPABILITY_ACK)) == NULL)
3423 		return (GLDE_OK);
3424 
3425 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3426 	dlap->dl_sub_offset = 0;
3427 	if ((dlap->dl_sub_length = subsize) != 0)
3428 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3429 	dlsp = (dl_capability_sub_t *)&dlap[1];
3430 
3431 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3432 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3433 
3434 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3435 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3436 
3437 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3438 
3439 		dlhp->hcksum_txflags = 0;
3440 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3441 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3442 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3443 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3444 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3445 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3446 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3447 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3448 
3449 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3450 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3451 	}
3452 
3453 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3454 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3455 
3456 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3457 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3458 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3459 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3460 
3461 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3462 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3463 	}
3464 
3465 	qreply(q, mp);
3466 	return (GLDE_OK);
3467 }
3468 
3469 static int
gld_cap_enable(queue_t * q,mblk_t * mp)3470 gld_cap_enable(queue_t *q, mblk_t *mp)
3471 {
3472 	dl_capability_req_t *dlp;
3473 	dl_capability_sub_t *dlsp;
3474 	dl_capab_hcksum_t *dlhp;
3475 	offset_t off;
3476 	size_t len;
3477 	size_t size;
3478 	offset_t end;
3479 
3480 	dlp = (dl_capability_req_t *)mp->b_rptr;
3481 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3482 
3483 	off = dlp->dl_sub_offset;
3484 	len = dlp->dl_sub_length;
3485 
3486 	if (!MBLKIN(mp, off, len))
3487 		return (DL_BADPRIM);
3488 
3489 	end = off + len;
3490 	while (off < end) {
3491 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3492 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3493 		if (off + size > end)
3494 			return (DL_BADPRIM);
3495 
3496 		switch (dlsp->dl_cap) {
3497 		case DL_CAPAB_HCKSUM:
3498 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3499 			/* nothing useful we can do with the contents */
3500 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3501 			break;
3502 		default:
3503 			break;
3504 		}
3505 
3506 		off += size;
3507 	}
3508 
3509 	qreply(q, mp);
3510 	return (GLDE_OK);
3511 }
3512 
3513 /*
3514  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3515  * requested the specific <notification> that the message carries AND is
3516  * eligible and ready to receive the notification immediately.
3517  *
3518  * This routine ignores flow control. Notifications will be sent regardless.
3519  *
3520  * In all cases, the original message passed in is freed at the end of
3521  * the routine.
3522  */
3523 static void
gld_notify_qs(gld_mac_info_t * macinfo,mblk_t * mp,uint32_t notification)3524 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3525 {
3526 	gld_mac_pvt_t *mac_pvt;
3527 	gld_vlan_t *vlan;
3528 	gld_t *gld;
3529 	mblk_t *nmp;
3530 	int i;
3531 
3532 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3533 
3534 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3535 
3536 	/*
3537 	 * Search all the streams attached to this macinfo looking
3538 	 * for those eligible to receive the present notification.
3539 	 */
3540 	for (i = 0; i < VLAN_HASHSZ; i++) {
3541 		for (vlan = mac_pvt->vlan_hash[i];
3542 		    vlan != NULL; vlan = vlan->gldv_next) {
3543 			for (gld = vlan->gldv_str_next;
3544 			    gld != (gld_t *)&vlan->gldv_str_next;
3545 			    gld = gld->gld_next) {
3546 				ASSERT(gld->gld_qptr != NULL);
3547 				ASSERT(gld->gld_state == DL_IDLE ||
3548 				    gld->gld_state == DL_UNBOUND);
3549 				ASSERT(gld->gld_mac_info == macinfo);
3550 
3551 				if (gld->gld_flags & GLD_STR_CLOSING)
3552 					continue; /* not eligible - skip */
3553 				if (!(notification & gld->gld_notifications))
3554 					continue; /* not wanted - skip */
3555 				if ((nmp = dupmsg(mp)) == NULL)
3556 					continue; /* can't copy - skip */
3557 
3558 				/*
3559 				 * All OK; send dup'd notification up this
3560 				 * stream
3561 				 */
3562 				qreply(WR(gld->gld_qptr), nmp);
3563 			}
3564 		}
3565 	}
3566 
3567 	/*
3568 	 * Drop the original message block now
3569 	 */
3570 	freemsg(mp);
3571 }
3572 
3573 /*
3574  * For each (understood) bit in the <notifications> argument, contruct
3575  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3576  * eligible queues if <q> is NULL.
3577  */
3578 static void
gld_notify_ind(gld_mac_info_t * macinfo,uint32_t notifications,queue_t * q)3579 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3580 {
3581 	gld_mac_pvt_t *mac_pvt;
3582 	dl_notify_ind_t *dlnip;
3583 	struct gld_stats *stats;
3584 	mblk_t *mp;
3585 	size_t size;
3586 	uint32_t bit;
3587 
3588 	GLDM_LOCK(macinfo, RW_WRITER);
3589 
3590 	/*
3591 	 * The following cases shouldn't happen, but just in case the
3592 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3593 	 * check anyway ...
3594 	 */
3595 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3596 		GLDM_UNLOCK(macinfo);
3597 		return;				/* not ready yet	*/
3598 	}
3599 
3600 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3601 		GLDM_UNLOCK(macinfo);
3602 		return;				/* not ready anymore	*/
3603 	}
3604 
3605 	/*
3606 	 * Make sure the kstats are up to date, 'cos we use some of
3607 	 * the kstat values below, specifically the link speed ...
3608 	 */
3609 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3610 	stats = mac_pvt->statistics;
3611 	if (macinfo->gldm_get_stats)
3612 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3613 
3614 	for (bit = 1; notifications != 0; bit <<= 1) {
3615 		if ((notifications & bit) == 0)
3616 			continue;
3617 		notifications &= ~bit;
3618 
3619 		size = DL_NOTIFY_IND_SIZE;
3620 		if (bit == DL_NOTE_PHYS_ADDR)
3621 			size += macinfo->gldm_addrlen;
3622 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3623 			continue;
3624 
3625 		mp->b_datap->db_type = M_PROTO;
3626 		mp->b_wptr = mp->b_rptr + size;
3627 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3628 		dlnip->dl_primitive = DL_NOTIFY_IND;
3629 		dlnip->dl_notification = 0;
3630 		dlnip->dl_data = 0;
3631 		dlnip->dl_addr_length = 0;
3632 		dlnip->dl_addr_offset = 0;
3633 
3634 		switch (bit) {
3635 		case DL_NOTE_PROMISC_ON_PHYS:
3636 		case DL_NOTE_PROMISC_OFF_PHYS:
3637 			if (mac_pvt->nprom != 0)
3638 				dlnip->dl_notification = bit;
3639 			break;
3640 
3641 		case DL_NOTE_LINK_DOWN:
3642 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3643 				dlnip->dl_notification = bit;
3644 			break;
3645 
3646 		case DL_NOTE_LINK_UP:
3647 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3648 				dlnip->dl_notification = bit;
3649 			break;
3650 
3651 		case DL_NOTE_SPEED:
3652 			/*
3653 			 * Conversion required here:
3654 			 *	GLD keeps the speed in bit/s in a uint64
3655 			 *	DLPI wants it in kb/s in a uint32
3656 			 * Fortunately this is still big enough for 10Gb/s!
3657 			 */
3658 			dlnip->dl_notification = bit;
3659 			dlnip->dl_data = stats->glds_speed/1000ULL;
3660 			break;
3661 
3662 		case DL_NOTE_PHYS_ADDR:
3663 			dlnip->dl_notification = bit;
3664 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3665 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3666 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3667 			    abs(macinfo->gldm_saplen);
3668 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3669 			mac_copy(mac_pvt->curr_macaddr,
3670 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3671 			    macinfo->gldm_addrlen);
3672 			break;
3673 
3674 		default:
3675 			break;
3676 		}
3677 
3678 		if (dlnip->dl_notification == 0)
3679 			freemsg(mp);
3680 		else if (q != NULL)
3681 			qreply(q, mp);
3682 		else
3683 			gld_notify_qs(macinfo, mp, bit);
3684 	}
3685 
3686 	GLDM_UNLOCK(macinfo);
3687 }
3688 
3689 /*
3690  * gld_notify_req - handle a DL_NOTIFY_REQ message
3691  */
3692 static int
gld_notify_req(queue_t * q,mblk_t * mp)3693 gld_notify_req(queue_t *q, mblk_t *mp)
3694 {
3695 	gld_t *gld = (gld_t *)q->q_ptr;
3696 	gld_mac_info_t *macinfo;
3697 	gld_mac_pvt_t *pvt;
3698 	dl_notify_req_t *dlnrp;
3699 	dl_notify_ack_t *dlnap;
3700 
3701 	ASSERT(gld != NULL);
3702 	ASSERT(gld->gld_qptr == RD(q));
3703 
3704 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3705 
3706 #ifdef GLD_DEBUG
3707 	if (gld_debug & GLDTRACE)
3708 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3709 		    (void *)q, (void *)mp);
3710 #endif
3711 
3712 	if (gld->gld_state == DL_UNATTACHED) {
3713 #ifdef GLD_DEBUG
3714 		if (gld_debug & GLDERRS)
3715 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3716 			    gld->gld_state);
3717 #endif
3718 		return (DL_OUTSTATE);
3719 	}
3720 
3721 	/*
3722 	 * Remember what notifications are required by this stream
3723 	 */
3724 	macinfo = gld->gld_mac_info;
3725 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3726 
3727 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
3728 
3729 	/*
3730 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
3731 	 * that this driver can provide, independently of which ones have
3732 	 * previously been or are now being requested.
3733 	 */
3734 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
3735 	    DL_NOTIFY_ACK)) == NULL)
3736 		return (DL_SYSERR);
3737 
3738 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
3739 	dlnap->dl_notifications = pvt->notifications;
3740 	qreply(q, mp);
3741 
3742 	/*
3743 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
3744 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
3745 	 * that provide the current status.
3746 	 */
3747 	gld_notify_ind(macinfo, gld->gld_notifications, q);
3748 
3749 	return (GLDE_OK);
3750 }
3751 
3752 /*
3753  * gld_linkstate()
3754  *	Called by driver to tell GLD the state of the physical link.
3755  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
3756  *	notification to each client that has previously requested such
3757  *	notifications
3758  */
3759 void
gld_linkstate(gld_mac_info_t * macinfo,int32_t newstate)3760 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
3761 {
3762 	uint32_t notification;
3763 
3764 	switch (newstate) {
3765 	default:
3766 		return;
3767 
3768 	case GLD_LINKSTATE_DOWN:
3769 		notification = DL_NOTE_LINK_DOWN;
3770 		break;
3771 
3772 	case GLD_LINKSTATE_UP:
3773 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
3774 		break;
3775 
3776 	case GLD_LINKSTATE_UNKNOWN:
3777 		notification = 0;
3778 		break;
3779 	}
3780 
3781 	GLDM_LOCK(macinfo, RW_WRITER);
3782 	if (macinfo->gldm_linkstate == newstate)
3783 		notification = 0;
3784 	else
3785 		macinfo->gldm_linkstate = newstate;
3786 	GLDM_UNLOCK(macinfo);
3787 
3788 	if (notification)
3789 		gld_notify_ind(macinfo, notification, NULL);
3790 }
3791 
3792 /*
3793  * gld_udqos - set the current QoS parameters (priority only at the moment).
3794  */
3795 static int
gld_udqos(queue_t * q,mblk_t * mp)3796 gld_udqos(queue_t *q, mblk_t *mp)
3797 {
3798 	dl_udqos_req_t *dlp;
3799 	gld_t  *gld = (gld_t *)q->q_ptr;
3800 	int off;
3801 	int len;
3802 	dl_qos_cl_sel1_t *selp;
3803 
3804 	ASSERT(gld);
3805 	ASSERT(gld->gld_qptr == RD(q));
3806 
3807 #ifdef GLD_DEBUG
3808 	if (gld_debug & GLDTRACE)
3809 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
3810 #endif
3811 
3812 	if (gld->gld_state != DL_IDLE) {
3813 #ifdef GLD_DEBUG
3814 		if (gld_debug & GLDERRS)
3815 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
3816 			    gld->gld_state);
3817 #endif
3818 		return (DL_OUTSTATE);
3819 	}
3820 
3821 	dlp = (dl_udqos_req_t *)mp->b_rptr;
3822 	off = dlp->dl_qos_offset;
3823 	len = dlp->dl_qos_length;
3824 
3825 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
3826 		return (DL_BADQOSTYPE);
3827 
3828 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
3829 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
3830 		return (DL_BADQOSTYPE);
3831 
3832 	if (selp->dl_trans_delay != 0 &&
3833 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
3834 		return (DL_BADQOSPARAM);
3835 	if (selp->dl_protection != 0 &&
3836 	    selp->dl_protection != DL_QOS_DONT_CARE)
3837 		return (DL_BADQOSPARAM);
3838 	if (selp->dl_residual_error != 0 &&
3839 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
3840 		return (DL_BADQOSPARAM);
3841 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
3842 		return (DL_BADQOSPARAM);
3843 
3844 	gld->gld_upri = selp->dl_priority;
3845 
3846 	dlokack(q, mp, DL_UDQOS_REQ);
3847 	return (GLDE_OK);
3848 }
3849 
3850 static mblk_t *
gld_bindack(queue_t * q,mblk_t * mp)3851 gld_bindack(queue_t *q, mblk_t *mp)
3852 {
3853 	gld_t *gld = (gld_t *)q->q_ptr;
3854 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3855 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3856 	dl_bind_ack_t *dlp;
3857 	size_t size;
3858 	t_uscalar_t addrlen;
3859 	uchar_t *sapp;
3860 
3861 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3862 	size = sizeof (dl_bind_ack_t) + addrlen;
3863 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
3864 		return (NULL);
3865 
3866 	dlp = (dl_bind_ack_t *)mp->b_rptr;
3867 	dlp->dl_sap = gld->gld_sap;
3868 	dlp->dl_addr_length = addrlen;
3869 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
3870 	dlp->dl_max_conind = 0;
3871 	dlp->dl_xidtest_flg = 0;
3872 
3873 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
3874 	    macinfo->gldm_addrlen);
3875 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
3876 	*(ushort_t *)sapp = gld->gld_sap;
3877 
3878 	return (mp);
3879 }
3880 
3881 /*
3882  * gld_bind - determine if a SAP is already allocated and whether it is legal
3883  * to do the bind at this time
3884  */
3885 static int
gld_bind(queue_t * q,mblk_t * mp)3886 gld_bind(queue_t *q, mblk_t *mp)
3887 {
3888 	ulong_t	sap;
3889 	dl_bind_req_t *dlp;
3890 	gld_t *gld = (gld_t *)q->q_ptr;
3891 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3892 
3893 	ASSERT(gld);
3894 	ASSERT(gld->gld_qptr == RD(q));
3895 
3896 #ifdef GLD_DEBUG
3897 	if (gld_debug & GLDTRACE)
3898 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
3899 #endif
3900 
3901 	dlp = (dl_bind_req_t *)mp->b_rptr;
3902 	sap = dlp->dl_sap;
3903 
3904 #ifdef GLD_DEBUG
3905 	if (gld_debug & GLDPROT)
3906 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
3907 #endif
3908 
3909 	if (gld->gld_state != DL_UNBOUND) {
3910 #ifdef GLD_DEBUG
3911 		if (gld_debug & GLDERRS)
3912 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
3913 			    gld->gld_state);
3914 #endif
3915 		return (DL_OUTSTATE);
3916 	}
3917 	ASSERT(macinfo);
3918 
3919 	if (dlp->dl_service_mode != DL_CLDLS) {
3920 		return (DL_UNSUPPORTED);
3921 	}
3922 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
3923 		return (DL_NOAUTO);
3924 	}
3925 
3926 	/*
3927 	 * Check sap validity and decide whether this stream accepts
3928 	 * IEEE 802.2 (LLC) packets.
3929 	 */
3930 	if (sap > ETHERTYPE_MAX)
3931 		return (DL_BADSAP);
3932 
3933 	/*
3934 	 * Decide whether the SAP value selects EtherType encoding/decoding.
3935 	 * For compatibility with monolithic ethernet drivers, the range of
3936 	 * SAP values is different for DL_ETHER media.
3937 	 */
3938 	switch (macinfo->gldm_type) {
3939 	case DL_ETHER:
3940 		gld->gld_ethertype = (sap > ETHERMTU);
3941 		break;
3942 	default:
3943 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
3944 		break;
3945 	}
3946 
3947 	/* if we get to here, then the SAP is legal enough */
3948 	GLDM_LOCK(macinfo, RW_WRITER);
3949 	gld->gld_state = DL_IDLE;	/* bound and ready */
3950 	gld->gld_sap = sap;
3951 	if ((macinfo->gldm_type == DL_ETHER) && (sap == ETHERTYPE_VLAN))
3952 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap++;
3953 	gld_set_ipq(gld);
3954 
3955 #ifdef GLD_DEBUG
3956 	if (gld_debug & GLDPROT)
3957 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
3958 #endif
3959 
3960 	/* ACK the BIND */
3961 	mp = gld_bindack(q, mp);
3962 	GLDM_UNLOCK(macinfo);
3963 
3964 	if (mp != NULL) {
3965 		qreply(q, mp);
3966 		return (GLDE_OK);
3967 	}
3968 
3969 	return (DL_SYSERR);
3970 }
3971 
3972 /*
3973  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
3974  * The stream is still open and can be re-bound.
3975  */
3976 static int
gld_unbind(queue_t * q,mblk_t * mp)3977 gld_unbind(queue_t *q, mblk_t *mp)
3978 {
3979 	gld_t *gld = (gld_t *)q->q_ptr;
3980 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3981 
3982 	ASSERT(gld);
3983 
3984 #ifdef GLD_DEBUG
3985 	if (gld_debug & GLDTRACE)
3986 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
3987 #endif
3988 
3989 	if (gld->gld_state != DL_IDLE) {
3990 #ifdef GLD_DEBUG
3991 		if (gld_debug & GLDERRS)
3992 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
3993 			    gld->gld_state);
3994 #endif
3995 		return (DL_OUTSTATE);
3996 	}
3997 	ASSERT(macinfo);
3998 
3999 	/*
4000 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4001 	 * See comments above gld_start().
4002 	 */
4003 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
4004 	membar_enter();
4005 	if (gld->gld_wput_count != 0) {
4006 		gld->gld_in_unbind = B_FALSE;
4007 		ASSERT(mp);		/* we didn't come from close */
4008 #ifdef GLD_DEBUG
4009 		if (gld_debug & GLDETRACE)
4010 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4011 #endif
4012 		(void) putbq(q, mp);
4013 		qenable(q);		/* try again soon */
4014 		return (GLDE_RETRY);
4015 	}
4016 
4017 	GLDM_LOCK(macinfo, RW_WRITER);
4018 	if ((macinfo->gldm_type == DL_ETHER) &&
4019 	    (gld->gld_sap == ETHERTYPE_VLAN)) {
4020 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap--;
4021 	}
4022 	gld->gld_state = DL_UNBOUND;
4023 	gld->gld_sap = 0;
4024 	gld_set_ipq(gld);
4025 	GLDM_UNLOCK(macinfo);
4026 
4027 	membar_exit();
4028 	gld->gld_in_unbind = B_FALSE;
4029 
4030 	/* mp is NULL if we came from close */
4031 	if (mp) {
4032 		gld_flushqueue(q);	/* flush the queues */
4033 		dlokack(q, mp, DL_UNBIND_REQ);
4034 	}
4035 	return (GLDE_OK);
4036 }
4037 
4038 /*
4039  * gld_inforeq - generate the response to an info request
4040  */
4041 static int
gld_inforeq(queue_t * q,mblk_t * mp)4042 gld_inforeq(queue_t *q, mblk_t *mp)
4043 {
4044 	gld_t		*gld;
4045 	dl_info_ack_t	*dlp;
4046 	int		bufsize;
4047 	glddev_t	*glddev;
4048 	gld_mac_info_t	*macinfo;
4049 	gld_mac_pvt_t	*mac_pvt;
4050 	int		sel_offset = 0;
4051 	int		range_offset = 0;
4052 	int		addr_offset;
4053 	int		addr_length;
4054 	int		sap_length;
4055 	int		brdcst_offset;
4056 	int		brdcst_length;
4057 	uchar_t		*sapp;
4058 
4059 #ifdef GLD_DEBUG
4060 	if (gld_debug & GLDTRACE)
4061 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4062 #endif
4063 	gld = (gld_t *)q->q_ptr;
4064 	ASSERT(gld);
4065 	glddev = gld->gld_device;
4066 	ASSERT(glddev);
4067 
4068 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4069 		macinfo = gld->gld_mac_info;
4070 		ASSERT(macinfo != NULL);
4071 
4072 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4073 
4074 		addr_length = macinfo->gldm_addrlen;
4075 		sap_length = macinfo->gldm_saplen;
4076 		brdcst_length = macinfo->gldm_addrlen;
4077 	} else {
4078 		addr_length = glddev->gld_addrlen;
4079 		sap_length = glddev->gld_saplen;
4080 		brdcst_length = glddev->gld_addrlen;
4081 	}
4082 
4083 	bufsize = sizeof (dl_info_ack_t);
4084 
4085 	addr_offset = bufsize;
4086 	bufsize += addr_length;
4087 	bufsize += abs(sap_length);
4088 
4089 	brdcst_offset = bufsize;
4090 	bufsize += brdcst_length;
4091 
4092 	if (((gld_vlan_t *)gld->gld_vlan) != NULL) {
4093 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4094 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4095 
4096 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4097 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4098 	}
4099 
4100 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4101 		return (GLDE_OK);	/* nothing more to be done */
4102 
4103 	bzero(mp->b_rptr, bufsize);
4104 
4105 	dlp = (dl_info_ack_t *)mp->b_rptr;
4106 	dlp->dl_primitive = DL_INFO_ACK;
4107 	dlp->dl_version = DL_VERSION_2;
4108 	dlp->dl_service_mode = DL_CLDLS;
4109 	dlp->dl_current_state = gld->gld_state;
4110 	dlp->dl_provider_style = gld->gld_style;
4111 
4112 	if (sel_offset != 0) {
4113 		dl_qos_cl_sel1_t	*selp;
4114 		dl_qos_cl_range1_t	*rangep;
4115 
4116 		ASSERT(range_offset != 0);
4117 
4118 		dlp->dl_qos_offset = sel_offset;
4119 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4120 		dlp->dl_qos_range_offset = range_offset;
4121 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4122 
4123 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4124 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4125 		selp->dl_priority = gld->gld_upri;
4126 
4127 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4128 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4129 		rangep->dl_priority.dl_min = 0;
4130 		rangep->dl_priority.dl_max = 7;
4131 	}
4132 
4133 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4134 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4135 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4136 		dlp->dl_mac_type = macinfo->gldm_type;
4137 		dlp->dl_addr_length = addr_length + abs(sap_length);
4138 		dlp->dl_sap_length = sap_length;
4139 
4140 		if (gld->gld_state == DL_IDLE) {
4141 			/*
4142 			 * If we are bound to a non-LLC SAP on any medium
4143 			 * other than Ethernet, then we need room for a
4144 			 * SNAP header.  So we have to adjust the MTU size
4145 			 * accordingly.  XXX I suppose this should be done
4146 			 * in gldutil.c, but it seems likely that this will
4147 			 * always be true for everything GLD supports but
4148 			 * Ethernet.  Check this if you add another medium.
4149 			 */
4150 			if ((macinfo->gldm_type == DL_TPR ||
4151 			    macinfo->gldm_type == DL_FDDI) &&
4152 			    gld->gld_ethertype)
4153 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4154 
4155 			/* copy macaddr and sap */
4156 			dlp->dl_addr_offset = addr_offset;
4157 
4158 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4159 			    addr_offset, macinfo->gldm_addrlen);
4160 			sapp = mp->b_rptr + addr_offset +
4161 			    macinfo->gldm_addrlen;
4162 			*(ushort_t *)sapp = gld->gld_sap;
4163 		} else {
4164 			dlp->dl_addr_offset = 0;
4165 		}
4166 
4167 		/* copy broadcast addr */
4168 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4169 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4170 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4171 		    mp->b_rptr + brdcst_offset, brdcst_length);
4172 	} else {
4173 		/*
4174 		 * No PPA is attached.
4175 		 * The best we can do is use the values provided
4176 		 * by the first mac that called gld_register.
4177 		 */
4178 		dlp->dl_min_sdu = glddev->gld_minsdu;
4179 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4180 		dlp->dl_mac_type = glddev->gld_type;
4181 		dlp->dl_addr_length = addr_length + abs(sap_length);
4182 		dlp->dl_sap_length = sap_length;
4183 		dlp->dl_addr_offset = 0;
4184 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4185 		dlp->dl_brdcst_addr_length = brdcst_length;
4186 		mac_copy((caddr_t)glddev->gld_broadcast,
4187 		    mp->b_rptr + brdcst_offset, brdcst_length);
4188 	}
4189 	qreply(q, mp);
4190 	return (GLDE_OK);
4191 }
4192 
4193 /*
4194  * gld_unitdata (q, mp)
4195  * send a datagram.  Destination address/lsap is in M_PROTO
4196  * message (first mblock), data is in remainder of message.
4197  *
4198  */
4199 static int
gld_unitdata(queue_t * q,mblk_t * mp)4200 gld_unitdata(queue_t *q, mblk_t *mp)
4201 {
4202 	gld_t *gld = (gld_t *)q->q_ptr;
4203 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4204 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4205 	size_t	msglen;
4206 	mblk_t	*nmp;
4207 	gld_interface_t *ifp;
4208 	uint32_t start;
4209 	uint32_t stuff;
4210 	uint32_t end;
4211 	uint32_t value;
4212 	uint32_t flags;
4213 	uint32_t upri;
4214 
4215 #ifdef GLD_DEBUG
4216 	if (gld_debug & GLDTRACE)
4217 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4218 #endif
4219 
4220 	if (gld->gld_state != DL_IDLE) {
4221 #ifdef GLD_DEBUG
4222 		if (gld_debug & GLDERRS)
4223 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4224 			    gld->gld_state);
4225 #endif
4226 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4227 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4228 		return (GLDE_OK);
4229 	}
4230 	ASSERT(macinfo != NULL);
4231 
4232 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4233 	    dlp->dl_dest_addr_length !=
4234 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4235 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4236 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4237 		return (GLDE_OK);
4238 	}
4239 
4240 	upri = dlp->dl_priority.dl_max;
4241 
4242 	msglen = msgdsize(mp);
4243 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4244 #ifdef GLD_DEBUG
4245 		if (gld_debug & GLDERRS)
4246 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4247 			    (int)msglen);
4248 #endif
4249 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4250 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4251 		return (GLDE_OK);
4252 	}
4253 
4254 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4255 
4256 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4257 
4258 	/* grab any checksum information that may be present */
4259 	mac_hcksum_get(mp->b_cont, &start, &stuff, &end, &value, &flags);
4260 
4261 	/*
4262 	 * Prepend a valid header for transmission
4263 	 */
4264 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4265 #ifdef GLD_DEBUG
4266 		if (gld_debug & GLDERRS)
4267 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4268 #endif
4269 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4270 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4271 		return (GLDE_OK);
4272 	}
4273 
4274 	/* apply any checksum information to the first block in the chain */
4275 	mac_hcksum_set(nmp, start, stuff, end, value, flags);
4276 
4277 	GLD_CLEAR_MBLK_VTAG(nmp);
4278 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4279 		qenable(q);
4280 		return (GLDE_RETRY);
4281 	}
4282 
4283 	return (GLDE_OK);
4284 }
4285 
4286 /*
4287  * gldattach(q, mp)
4288  * DLPI DL_ATTACH_REQ
4289  * this attaches the stream to a PPA
4290  */
4291 static int
gldattach(queue_t * q,mblk_t * mp)4292 gldattach(queue_t *q, mblk_t *mp)
4293 {
4294 	dl_attach_req_t *at;
4295 	gld_mac_info_t *macinfo;
4296 	gld_t  *gld = (gld_t *)q->q_ptr;
4297 	glddev_t *glddev;
4298 	gld_mac_pvt_t *mac_pvt;
4299 	uint32_t ppa;
4300 	uint32_t vid;
4301 	gld_vlan_t *vlan;
4302 
4303 	at = (dl_attach_req_t *)mp->b_rptr;
4304 
4305 	if (gld->gld_state != DL_UNATTACHED)
4306 		return (DL_OUTSTATE);
4307 
4308 	ASSERT(!gld->gld_mac_info);
4309 
4310 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4311 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4312 	if (vid > VLAN_VID_MAX)
4313 		return (DL_BADPPA);
4314 
4315 	glddev = gld->gld_device;
4316 	mutex_enter(&glddev->gld_devlock);
4317 	for (macinfo = glddev->gld_mac_next;
4318 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4319 	    macinfo = macinfo->gldm_next) {
4320 		int inst;
4321 
4322 		ASSERT(macinfo != NULL);
4323 		if (macinfo->gldm_ppa != ppa)
4324 			continue;
4325 
4326 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4327 			continue;	/* this one's not ready yet */
4328 
4329 		/*
4330 		 * VLAN sanity check
4331 		 */
4332 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4333 			mutex_exit(&glddev->gld_devlock);
4334 			return (DL_BADPPA);
4335 		}
4336 
4337 		/*
4338 		 * We found the correct PPA, hold the instance
4339 		 */
4340 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4341 		if (inst == -1 || qassociate(q, inst) != 0) {
4342 			mutex_exit(&glddev->gld_devlock);
4343 			return (DL_BADPPA);
4344 		}
4345 
4346 		/* Take the stream off the per-driver-class list */
4347 		gldremque(gld);
4348 
4349 		/*
4350 		 * We must hold the lock to prevent multiple calls
4351 		 * to the reset and start routines.
4352 		 */
4353 		GLDM_LOCK(macinfo, RW_WRITER);
4354 
4355 		gld->gld_mac_info = macinfo;
4356 
4357 		if (macinfo->gldm_send_tagged != NULL)
4358 			gld->gld_send = macinfo->gldm_send_tagged;
4359 		else
4360 			gld->gld_send = macinfo->gldm_send;
4361 
4362 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4363 			GLDM_UNLOCK(macinfo);
4364 			gldinsque(gld, glddev->gld_str_prev);
4365 			mutex_exit(&glddev->gld_devlock);
4366 			(void) qassociate(q, -1);
4367 			return (DL_BADPPA);
4368 		}
4369 
4370 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4371 		if (!mac_pvt->started) {
4372 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4373 				gld_rem_vlan(vlan);
4374 				GLDM_UNLOCK(macinfo);
4375 				gldinsque(gld, glddev->gld_str_prev);
4376 				mutex_exit(&glddev->gld_devlock);
4377 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4378 				    EIO);
4379 				(void) qassociate(q, -1);
4380 				return (GLDE_OK);
4381 			}
4382 		}
4383 
4384 		gld->gld_vlan = vlan;
4385 		vlan->gldv_nstreams++;
4386 		gldinsque(gld, vlan->gldv_str_prev);
4387 		gld->gld_state = DL_UNBOUND;
4388 		GLDM_UNLOCK(macinfo);
4389 
4390 #ifdef GLD_DEBUG
4391 		if (gld_debug & GLDPROT) {
4392 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4393 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4394 		}
4395 #endif
4396 		mutex_exit(&glddev->gld_devlock);
4397 		dlokack(q, mp, DL_ATTACH_REQ);
4398 		return (GLDE_OK);
4399 	}
4400 	mutex_exit(&glddev->gld_devlock);
4401 	return (DL_BADPPA);
4402 }
4403 
4404 /*
4405  * gldunattach(q, mp)
4406  * DLPI DL_DETACH_REQ
4407  * detaches the mac layer from the stream
4408  */
4409 int
gldunattach(queue_t * q,mblk_t * mp)4410 gldunattach(queue_t *q, mblk_t *mp)
4411 {
4412 	gld_t  *gld = (gld_t *)q->q_ptr;
4413 	glddev_t *glddev = gld->gld_device;
4414 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4415 	int	state = gld->gld_state;
4416 	int	i;
4417 	gld_mac_pvt_t *mac_pvt;
4418 	gld_vlan_t *vlan;
4419 	boolean_t phys_off;
4420 	boolean_t mult_off;
4421 	int op = GLD_MAC_PROMISC_NOOP;
4422 
4423 	if (state != DL_UNBOUND)
4424 		return (DL_OUTSTATE);
4425 
4426 	ASSERT(macinfo != NULL);
4427 	ASSERT(gld->gld_sap == 0);
4428 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4429 
4430 #ifdef GLD_DEBUG
4431 	if (gld_debug & GLDPROT) {
4432 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4433 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4434 	}
4435 #endif
4436 
4437 	GLDM_LOCK(macinfo, RW_WRITER);
4438 
4439 	if (gld->gld_mcast) {
4440 		for (i = 0; i < gld->gld_multicnt; i++) {
4441 			gld_mcast_t *mcast;
4442 
4443 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4444 				ASSERT(mcast->gldm_refcnt);
4445 				gld_send_disable_multi(macinfo, mcast);
4446 			}
4447 		}
4448 		kmem_free(gld->gld_mcast,
4449 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4450 		gld->gld_mcast = NULL;
4451 		gld->gld_multicnt = 0;
4452 	}
4453 
4454 	/* decide if we need to turn off any promiscuity */
4455 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4456 	    --mac_pvt->nprom == 0);
4457 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4458 	    --mac_pvt->nprom_multi == 0);
4459 
4460 	if (phys_off) {
4461 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4462 		    GLD_MAC_PROMISC_MULTI;
4463 	} else if (mult_off) {
4464 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4465 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4466 	}
4467 
4468 	if (op != GLD_MAC_PROMISC_NOOP)
4469 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4470 
4471 	vlan = (gld_vlan_t *)gld->gld_vlan;
4472 	if (gld->gld_flags & GLD_PROM_PHYS)
4473 		vlan->gldv_nprom--;
4474 	if (gld->gld_flags & GLD_PROM_MULT)
4475 		vlan->gldv_nprom--;
4476 	if (gld->gld_flags & GLD_PROM_SAP) {
4477 		vlan->gldv_nprom--;
4478 		vlan->gldv_nvlan_sap--;
4479 	}
4480 
4481 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4482 
4483 	GLDM_UNLOCK(macinfo);
4484 
4485 	if (phys_off)
4486 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4487 
4488 	/*
4489 	 * We need to hold both locks when modifying the mac stream list
4490 	 * to protect findminor as well as everyone else.
4491 	 */
4492 	mutex_enter(&glddev->gld_devlock);
4493 	GLDM_LOCK(macinfo, RW_WRITER);
4494 
4495 	/* disassociate this stream with its vlan and underlying mac */
4496 	gldremque(gld);
4497 
4498 	if (--vlan->gldv_nstreams == 0) {
4499 		gld_rem_vlan(vlan);
4500 		gld->gld_vlan = NULL;
4501 	}
4502 
4503 	gld->gld_mac_info = NULL;
4504 	gld->gld_state = DL_UNATTACHED;
4505 
4506 	/* cleanup mac layer if last vlan */
4507 	if (mac_pvt->nvlan == 0) {
4508 		gld_stop_mac(macinfo);
4509 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4510 	}
4511 
4512 	/* make sure no references to this gld for gld_v0_sched */
4513 	if (mac_pvt->last_sched == gld)
4514 		mac_pvt->last_sched = NULL;
4515 
4516 	GLDM_UNLOCK(macinfo);
4517 
4518 	/* put the stream on the unattached Style 2 list */
4519 	gldinsque(gld, glddev->gld_str_prev);
4520 
4521 	mutex_exit(&glddev->gld_devlock);
4522 
4523 	/* There will be no mp if we were called from close */
4524 	if (mp) {
4525 		dlokack(q, mp, DL_DETACH_REQ);
4526 	}
4527 	if (gld->gld_style == DL_STYLE2)
4528 		(void) qassociate(q, -1);
4529 	return (GLDE_OK);
4530 }
4531 
4532 /*
4533  * gld_enable_multi (q, mp)
4534  * Enables multicast address on the stream.  If the mac layer
4535  * isn't enabled for this address, enable at that level as well.
4536  */
4537 static int
gld_enable_multi(queue_t * q,mblk_t * mp)4538 gld_enable_multi(queue_t *q, mblk_t *mp)
4539 {
4540 	gld_t  *gld = (gld_t *)q->q_ptr;
4541 	glddev_t *glddev;
4542 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4543 	unsigned char *maddr;
4544 	dl_enabmulti_req_t *multi;
4545 	gld_mcast_t *mcast;
4546 	int	i, rc;
4547 	gld_mac_pvt_t *mac_pvt;
4548 
4549 #ifdef GLD_DEBUG
4550 	if (gld_debug & GLDPROT) {
4551 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4552 		    (void *)mp);
4553 	}
4554 #endif
4555 
4556 	if (gld->gld_state == DL_UNATTACHED)
4557 		return (DL_OUTSTATE);
4558 
4559 	ASSERT(macinfo != NULL);
4560 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4561 
4562 	if (macinfo->gldm_set_multicast == NULL) {
4563 		return (DL_UNSUPPORTED);
4564 	}
4565 
4566 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4567 
4568 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4569 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4570 		return (DL_BADADDR);
4571 
4572 	/* request appears to be valid */
4573 
4574 	glddev = mac_pvt->major_dev;
4575 	ASSERT(glddev == gld->gld_device);
4576 
4577 	maddr = mp->b_rptr + multi->dl_addr_offset;
4578 
4579 	/*
4580 	 * The multicast addresses live in a per-device table, along
4581 	 * with a reference count.  Each stream has a table that
4582 	 * points to entries in the device table, with the reference
4583 	 * count reflecting the number of streams pointing at it.  If
4584 	 * this multicast address is already in the per-device table,
4585 	 * all we have to do is point at it.
4586 	 */
4587 	GLDM_LOCK(macinfo, RW_WRITER);
4588 
4589 	/* does this address appear in current table? */
4590 	if (gld->gld_mcast == NULL) {
4591 		/* no mcast addresses -- allocate table */
4592 		gld->gld_mcast = GLD_GETSTRUCT(gld_mcast_t *,
4593 		    glddev->gld_multisize);
4594 		if (gld->gld_mcast == NULL) {
4595 			GLDM_UNLOCK(macinfo);
4596 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4597 			return (GLDE_OK);
4598 		}
4599 		gld->gld_multicnt = glddev->gld_multisize;
4600 	} else {
4601 		for (i = 0; i < gld->gld_multicnt; i++) {
4602 			if (gld->gld_mcast[i] &&
4603 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4604 			    maddr, macinfo->gldm_addrlen)) {
4605 				/* this is a match -- just succeed */
4606 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4607 				GLDM_UNLOCK(macinfo);
4608 				dlokack(q, mp, DL_ENABMULTI_REQ);
4609 				return (GLDE_OK);
4610 			}
4611 		}
4612 	}
4613 
4614 	/*
4615 	 * it wasn't in the stream so check to see if the mac layer has it
4616 	 */
4617 	mcast = NULL;
4618 	if (mac_pvt->mcast_table == NULL) {
4619 		mac_pvt->mcast_table = GLD_GETSTRUCT(gld_mcast_t,
4620 		    glddev->gld_multisize);
4621 		if (mac_pvt->mcast_table == NULL) {
4622 			GLDM_UNLOCK(macinfo);
4623 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4624 			return (GLDE_OK);
4625 		}
4626 	} else {
4627 		for (i = 0; i < glddev->gld_multisize; i++) {
4628 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4629 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4630 			    maddr, macinfo->gldm_addrlen)) {
4631 				mcast = &mac_pvt->mcast_table[i];
4632 				break;
4633 			}
4634 		}
4635 	}
4636 	if (mcast == NULL) {
4637 		/* not in mac layer -- find an empty mac slot to fill in */
4638 		for (i = 0; i < glddev->gld_multisize; i++) {
4639 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4640 				mcast = &mac_pvt->mcast_table[i];
4641 				mac_copy(maddr, mcast->gldm_addr,
4642 				    macinfo->gldm_addrlen);
4643 				break;
4644 			}
4645 		}
4646 	}
4647 	if (mcast == NULL) {
4648 		/* couldn't get a mac layer slot */
4649 		GLDM_UNLOCK(macinfo);
4650 		return (DL_TOOMANY);
4651 	}
4652 
4653 	/* now we have a mac layer slot in mcast -- get a stream slot */
4654 	for (i = 0; i < gld->gld_multicnt; i++) {
4655 		if (gld->gld_mcast[i] != NULL)
4656 			continue;
4657 		/* found an empty slot */
4658 		if (!mcast->gldm_refcnt) {
4659 			/* set mcast in hardware */
4660 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4661 
4662 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4663 			cmac_copy(maddr, cmaddr,
4664 			    macinfo->gldm_addrlen, macinfo);
4665 
4666 			rc = (*macinfo->gldm_set_multicast)
4667 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4668 			if (rc == GLD_NOTSUPPORTED) {
4669 				GLDM_UNLOCK(macinfo);
4670 				return (DL_NOTSUPPORTED);
4671 			} else if (rc == GLD_NORESOURCES) {
4672 				GLDM_UNLOCK(macinfo);
4673 				return (DL_TOOMANY);
4674 			} else if (rc == GLD_BADARG) {
4675 				GLDM_UNLOCK(macinfo);
4676 				return (DL_BADADDR);
4677 			} else if (rc == GLD_RETRY) {
4678 				/*
4679 				 * The putbq and gld_xwait must be
4680 				 * within the lock to prevent races
4681 				 * with gld_sched.
4682 				 */
4683 				(void) putbq(q, mp);
4684 				gld->gld_xwait = B_TRUE;
4685 				GLDM_UNLOCK(macinfo);
4686 				return (GLDE_RETRY);
4687 			} else if (rc != GLD_SUCCESS) {
4688 				GLDM_UNLOCK(macinfo);
4689 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4690 				    DL_SYSERR, EIO);
4691 				return (GLDE_OK);
4692 			}
4693 		}
4694 		gld->gld_mcast[i] = mcast;
4695 		mcast->gldm_refcnt++;
4696 		GLDM_UNLOCK(macinfo);
4697 		dlokack(q, mp, DL_ENABMULTI_REQ);
4698 		return (GLDE_OK);
4699 	}
4700 
4701 	/* couldn't get a stream slot */
4702 	GLDM_UNLOCK(macinfo);
4703 	return (DL_TOOMANY);
4704 }
4705 
4706 
4707 /*
4708  * gld_disable_multi (q, mp)
4709  * Disable the multicast address on the stream.  If last
4710  * reference for the mac layer, disable there as well.
4711  */
4712 static int
gld_disable_multi(queue_t * q,mblk_t * mp)4713 gld_disable_multi(queue_t *q, mblk_t *mp)
4714 {
4715 	gld_t  *gld;
4716 	gld_mac_info_t *macinfo;
4717 	unsigned char *maddr;
4718 	dl_disabmulti_req_t *multi;
4719 	int i;
4720 	gld_mcast_t *mcast;
4721 
4722 #ifdef GLD_DEBUG
4723 	if (gld_debug & GLDPROT) {
4724 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
4725 		    (void *)mp);
4726 	}
4727 #endif
4728 
4729 	gld = (gld_t *)q->q_ptr;
4730 	if (gld->gld_state == DL_UNATTACHED)
4731 		return (DL_OUTSTATE);
4732 
4733 	macinfo = gld->gld_mac_info;
4734 	ASSERT(macinfo != NULL);
4735 	if (macinfo->gldm_set_multicast == NULL) {
4736 		return (DL_UNSUPPORTED);
4737 	}
4738 
4739 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
4740 
4741 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4742 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4743 		return (DL_BADADDR);
4744 
4745 	maddr = mp->b_rptr + multi->dl_addr_offset;
4746 
4747 	/* request appears to be valid */
4748 	/* does this address appear in current table? */
4749 	GLDM_LOCK(macinfo, RW_WRITER);
4750 	if (gld->gld_mcast != NULL) {
4751 		for (i = 0; i < gld->gld_multicnt; i++)
4752 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
4753 			    mac_eq(mcast->gldm_addr,
4754 			    maddr, macinfo->gldm_addrlen)) {
4755 				ASSERT(mcast->gldm_refcnt);
4756 				gld_send_disable_multi(macinfo, mcast);
4757 				gld->gld_mcast[i] = NULL;
4758 				GLDM_UNLOCK(macinfo);
4759 				dlokack(q, mp, DL_DISABMULTI_REQ);
4760 				return (GLDE_OK);
4761 			}
4762 	}
4763 	GLDM_UNLOCK(macinfo);
4764 	return (DL_NOTENAB); /* not an enabled address */
4765 }
4766 
4767 /*
4768  * gld_send_disable_multi(macinfo, mcast)
4769  * this function is used to disable a multicast address if the reference
4770  * count goes to zero. The disable request will then be forwarded to the
4771  * lower stream.
4772  */
4773 static void
gld_send_disable_multi(gld_mac_info_t * macinfo,gld_mcast_t * mcast)4774 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
4775 {
4776 	ASSERT(macinfo != NULL);
4777 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
4778 	ASSERT(mcast != NULL);
4779 	ASSERT(mcast->gldm_refcnt);
4780 
4781 	if (!mcast->gldm_refcnt) {
4782 		return;			/* "cannot happen" */
4783 	}
4784 
4785 	if (--mcast->gldm_refcnt > 0) {
4786 		return;
4787 	}
4788 
4789 	/*
4790 	 * This must be converted from canonical form to device form.
4791 	 * The refcnt is now zero so we can trash the data.
4792 	 */
4793 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
4794 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
4795 
4796 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
4797 	(void) (*macinfo->gldm_set_multicast)
4798 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
4799 }
4800 
4801 /*
4802  * gld_promisc (q, mp, req, on)
4803  *	enable or disable the use of promiscuous mode with the hardware
4804  */
4805 static int
gld_promisc(queue_t * q,mblk_t * mp,t_uscalar_t req,boolean_t on)4806 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
4807 {
4808 	gld_t *gld;
4809 	gld_mac_info_t *macinfo;
4810 	gld_mac_pvt_t *mac_pvt;
4811 	gld_vlan_t *vlan;
4812 	union DL_primitives *prim;
4813 	int macrc = GLD_SUCCESS;
4814 	int dlerr = GLDE_OK;
4815 	int op = GLD_MAC_PROMISC_NOOP;
4816 
4817 #ifdef GLD_DEBUG
4818 	if (gld_debug & GLDTRACE)
4819 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
4820 		    (void *)q, (void *)mp, req, on);
4821 #endif
4822 
4823 	ASSERT(mp != NULL);
4824 	prim = (union DL_primitives *)mp->b_rptr;
4825 
4826 	/* XXX I think spec allows promisc in unattached state */
4827 	gld = (gld_t *)q->q_ptr;
4828 	if (gld->gld_state == DL_UNATTACHED)
4829 		return (DL_OUTSTATE);
4830 
4831 	macinfo = gld->gld_mac_info;
4832 	ASSERT(macinfo != NULL);
4833 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4834 
4835 	vlan = (gld_vlan_t *)gld->gld_vlan;
4836 	ASSERT(vlan != NULL);
4837 
4838 	GLDM_LOCK(macinfo, RW_WRITER);
4839 
4840 	/*
4841 	 * Work out what request (if any) has to be made to the MAC layer
4842 	 */
4843 	if (on) {
4844 		switch (prim->promiscon_req.dl_level) {
4845 		default:
4846 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4847 			break;
4848 
4849 		case DL_PROMISC_PHYS:
4850 			if (mac_pvt->nprom == 0)
4851 				op = GLD_MAC_PROMISC_PHYS;
4852 			break;
4853 
4854 		case DL_PROMISC_MULTI:
4855 			if (mac_pvt->nprom_multi == 0)
4856 				if (mac_pvt->nprom == 0)
4857 					op = GLD_MAC_PROMISC_MULTI;
4858 			break;
4859 
4860 		case DL_PROMISC_SAP:
4861 			/* We can do this without reference to the MAC */
4862 			break;
4863 		}
4864 	} else {
4865 		switch (prim->promiscoff_req.dl_level) {
4866 		default:
4867 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4868 			break;
4869 
4870 		case DL_PROMISC_PHYS:
4871 			if (!(gld->gld_flags & GLD_PROM_PHYS))
4872 				dlerr = DL_NOTENAB;
4873 			else if (mac_pvt->nprom == 1)
4874 				if (mac_pvt->nprom_multi)
4875 					op = GLD_MAC_PROMISC_MULTI;
4876 				else
4877 					op = GLD_MAC_PROMISC_NONE;
4878 			break;
4879 
4880 		case DL_PROMISC_MULTI:
4881 			if (!(gld->gld_flags & GLD_PROM_MULT))
4882 				dlerr = DL_NOTENAB;
4883 			else if (mac_pvt->nprom_multi == 1)
4884 				if (mac_pvt->nprom == 0)
4885 					op = GLD_MAC_PROMISC_NONE;
4886 			break;
4887 
4888 		case DL_PROMISC_SAP:
4889 			if (!(gld->gld_flags & GLD_PROM_SAP))
4890 				dlerr = DL_NOTENAB;
4891 
4892 			/* We can do this without reference to the MAC */
4893 			break;
4894 		}
4895 	}
4896 
4897 	/*
4898 	 * The request was invalid in some way so no need to continue.
4899 	 */
4900 	if (dlerr != GLDE_OK) {
4901 		GLDM_UNLOCK(macinfo);
4902 		return (dlerr);
4903 	}
4904 
4905 	/*
4906 	 * Issue the request to the MAC layer, if required
4907 	 */
4908 	if (op != GLD_MAC_PROMISC_NOOP) {
4909 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
4910 	}
4911 
4912 	/*
4913 	 * On success, update the appropriate flags & refcounts
4914 	 */
4915 	if (macrc == GLD_SUCCESS) {
4916 		if (on) {
4917 			switch (prim->promiscon_req.dl_level) {
4918 			case DL_PROMISC_PHYS:
4919 				mac_pvt->nprom++;
4920 				vlan->gldv_nprom++;
4921 				gld->gld_flags |= GLD_PROM_PHYS;
4922 				break;
4923 
4924 			case DL_PROMISC_MULTI:
4925 				mac_pvt->nprom_multi++;
4926 				vlan->gldv_nprom++;
4927 				gld->gld_flags |= GLD_PROM_MULT;
4928 				break;
4929 
4930 			case DL_PROMISC_SAP:
4931 				gld->gld_flags |= GLD_PROM_SAP;
4932 				vlan->gldv_nprom++;
4933 				vlan->gldv_nvlan_sap++;
4934 				break;
4935 
4936 			default:
4937 				break;
4938 			}
4939 		} else {
4940 			switch (prim->promiscoff_req.dl_level) {
4941 			case DL_PROMISC_PHYS:
4942 				mac_pvt->nprom--;
4943 				vlan->gldv_nprom--;
4944 				gld->gld_flags &= ~GLD_PROM_PHYS;
4945 				break;
4946 
4947 			case DL_PROMISC_MULTI:
4948 				mac_pvt->nprom_multi--;
4949 				vlan->gldv_nprom--;
4950 				gld->gld_flags &= ~GLD_PROM_MULT;
4951 				break;
4952 
4953 			case DL_PROMISC_SAP:
4954 				gld->gld_flags &= ~GLD_PROM_SAP;
4955 				vlan->gldv_nvlan_sap--;
4956 				vlan->gldv_nprom--;
4957 				break;
4958 
4959 			default:
4960 				break;
4961 			}
4962 		}
4963 	} else if (macrc == GLD_RETRY) {
4964 		/*
4965 		 * The putbq and gld_xwait must be within the lock to
4966 		 * prevent races with gld_sched.
4967 		 */
4968 		(void) putbq(q, mp);
4969 		gld->gld_xwait = B_TRUE;
4970 	}
4971 
4972 	GLDM_UNLOCK(macinfo);
4973 
4974 	/*
4975 	 * Finally, decide how to reply.
4976 	 *
4977 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
4978 	 * layer but failed.  In such cases, we can return a DL_* error
4979 	 * code and let the caller send an error-ack reply upstream, or
4980 	 * we can send a reply here and then return GLDE_OK so that the
4981 	 * caller doesn't also respond.
4982 	 *
4983 	 * If physical-promiscuous mode was (successfully) switched on or
4984 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
4985 	 */
4986 	switch (macrc) {
4987 	case GLD_NOTSUPPORTED:
4988 		return (DL_NOTSUPPORTED);
4989 
4990 	case GLD_NORESOURCES:
4991 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
4992 		return (GLDE_OK);
4993 
4994 	case GLD_RETRY:
4995 		return (GLDE_RETRY);
4996 
4997 	default:
4998 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
4999 		return (GLDE_OK);
5000 
5001 	case GLD_SUCCESS:
5002 		dlokack(q, mp, req);
5003 		break;
5004 	}
5005 
5006 	switch (op) {
5007 	case GLD_MAC_PROMISC_NOOP:
5008 		break;
5009 
5010 	case GLD_MAC_PROMISC_PHYS:
5011 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
5012 		break;
5013 
5014 	default:
5015 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
5016 		break;
5017 	}
5018 
5019 	return (GLDE_OK);
5020 }
5021 
5022 /*
5023  * gld_physaddr()
5024  *	get the current or factory physical address value
5025  */
5026 static int
gld_physaddr(queue_t * q,mblk_t * mp)5027 gld_physaddr(queue_t *q, mblk_t *mp)
5028 {
5029 	gld_t *gld = (gld_t *)q->q_ptr;
5030 	gld_mac_info_t *macinfo;
5031 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5032 	unsigned char addr[GLD_MAX_ADDRLEN];
5033 
5034 	if (gld->gld_state == DL_UNATTACHED)
5035 		return (DL_OUTSTATE);
5036 
5037 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5038 	ASSERT(macinfo != NULL);
5039 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5040 
5041 	switch (prim->physaddr_req.dl_addr_type) {
5042 	case DL_FACT_PHYS_ADDR:
5043 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5044 		    (caddr_t)addr, macinfo->gldm_addrlen);
5045 		break;
5046 	case DL_CURR_PHYS_ADDR:
5047 		/* make a copy so we don't hold the lock across qreply */
5048 		GLDM_LOCK(macinfo, RW_WRITER);
5049 		mac_copy((caddr_t)
5050 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5051 		    (caddr_t)addr, macinfo->gldm_addrlen);
5052 		GLDM_UNLOCK(macinfo);
5053 		break;
5054 	default:
5055 		return (DL_BADPRIM);
5056 	}
5057 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5058 	return (GLDE_OK);
5059 }
5060 
5061 /*
5062  * gld_setaddr()
5063  *	change the hardware's physical address to a user specified value
5064  */
5065 static int
gld_setaddr(queue_t * q,mblk_t * mp)5066 gld_setaddr(queue_t *q, mblk_t *mp)
5067 {
5068 	gld_t *gld = (gld_t *)q->q_ptr;
5069 	gld_mac_info_t *macinfo;
5070 	gld_mac_pvt_t *mac_pvt;
5071 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5072 	unsigned char *addr;
5073 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5074 	int rc;
5075 	gld_vlan_t *vlan;
5076 
5077 	if (gld->gld_state == DL_UNATTACHED)
5078 		return (DL_OUTSTATE);
5079 
5080 	vlan = (gld_vlan_t *)gld->gld_vlan;
5081 	ASSERT(vlan != NULL);
5082 
5083 	if (vlan->gldv_id != VLAN_VID_NONE)
5084 		return (DL_NOTSUPPORTED);
5085 
5086 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5087 	ASSERT(macinfo != NULL);
5088 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5089 
5090 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5091 	    prim->set_physaddr_req.dl_addr_length) ||
5092 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5093 		return (DL_BADADDR);
5094 
5095 	GLDM_LOCK(macinfo, RW_WRITER);
5096 
5097 	/* now do the set at the hardware level */
5098 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5099 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5100 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5101 
5102 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5103 	if (rc == GLD_SUCCESS)
5104 		mac_copy(addr, mac_pvt->curr_macaddr,
5105 		    macinfo->gldm_addrlen);
5106 
5107 	GLDM_UNLOCK(macinfo);
5108 
5109 	switch (rc) {
5110 	case GLD_SUCCESS:
5111 		break;
5112 	case GLD_NOTSUPPORTED:
5113 		return (DL_NOTSUPPORTED);
5114 	case GLD_BADARG:
5115 		return (DL_BADADDR);
5116 	case GLD_NORESOURCES:
5117 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5118 		return (GLDE_OK);
5119 	default:
5120 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5121 		return (GLDE_OK);
5122 	}
5123 
5124 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5125 
5126 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5127 	return (GLDE_OK);
5128 }
5129 
5130 int
gld_get_statistics(queue_t * q,mblk_t * mp)5131 gld_get_statistics(queue_t *q, mblk_t *mp)
5132 {
5133 	dl_get_statistics_ack_t *dlsp;
5134 	gld_t  *gld = (gld_t *)q->q_ptr;
5135 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5136 	gld_mac_pvt_t *mac_pvt;
5137 
5138 	if (gld->gld_state == DL_UNATTACHED)
5139 		return (DL_OUTSTATE);
5140 
5141 	ASSERT(macinfo != NULL);
5142 
5143 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5144 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5145 
5146 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5147 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5148 
5149 	if (mp == NULL)
5150 		return (GLDE_OK);	/* mexchange already sent merror */
5151 
5152 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5153 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5154 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5155 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5156 
5157 	GLDM_LOCK(macinfo, RW_WRITER);
5158 	bcopy(mac_pvt->kstatp->ks_data,
5159 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5160 	    sizeof (struct gldkstats));
5161 	GLDM_UNLOCK(macinfo);
5162 
5163 	qreply(q, mp);
5164 	return (GLDE_OK);
5165 }
5166 
5167 /* =================================================== */
5168 /* misc utilities, some requiring various mutexes held */
5169 /* =================================================== */
5170 
5171 /*
5172  * Initialize and start the driver.
5173  */
5174 static int
gld_start_mac(gld_mac_info_t * macinfo)5175 gld_start_mac(gld_mac_info_t *macinfo)
5176 {
5177 	int	rc;
5178 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5179 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5180 
5181 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5182 	ASSERT(!mac_pvt->started);
5183 
5184 	rc = (*macinfo->gldm_reset)(macinfo);
5185 	if (rc != GLD_SUCCESS)
5186 		return (GLD_FAILURE);
5187 
5188 	/* set the addr after we reset the device */
5189 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5190 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5191 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5192 
5193 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5194 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5195 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5196 		return (GLD_FAILURE);
5197 
5198 	rc = (*macinfo->gldm_start)(macinfo);
5199 	if (rc != GLD_SUCCESS)
5200 		return (GLD_FAILURE);
5201 
5202 	mac_pvt->started = B_TRUE;
5203 	return (GLD_SUCCESS);
5204 }
5205 
5206 /*
5207  * Stop the driver.
5208  */
5209 static void
gld_stop_mac(gld_mac_info_t * macinfo)5210 gld_stop_mac(gld_mac_info_t *macinfo)
5211 {
5212 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5213 
5214 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5215 	ASSERT(mac_pvt->started);
5216 
5217 	(void) (*macinfo->gldm_stop)(macinfo);
5218 
5219 	mac_pvt->started = B_FALSE;
5220 }
5221 
5222 
5223 /*
5224  * gld_set_ipq will set a pointer to the queue which is bound to the
5225  * IP sap if:
5226  * o the device type is ethernet or IPoIB.
5227  * o there is no stream in SAP promiscuous mode.
5228  * o there is exactly one stream bound to the IP sap.
5229  * o the stream is in "fastpath" mode.
5230  */
5231 static void
gld_set_ipq(gld_t * gld)5232 gld_set_ipq(gld_t *gld)
5233 {
5234 	gld_vlan_t	*vlan;
5235 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5236 	gld_t		*ip_gld = NULL;
5237 	uint_t		ipq_candidates = 0;
5238 	gld_t		*ipv6_gld = NULL;
5239 	uint_t		ipv6q_candidates = 0;
5240 
5241 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5242 
5243 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5244 	if (((macinfo->gldm_type != DL_ETHER) &&
5245 	    (macinfo->gldm_type != DL_IB)) ||
5246 	    (gld_global_options & GLD_OPT_NO_IPQ))
5247 		return;
5248 
5249 	vlan = (gld_vlan_t *)gld->gld_vlan;
5250 	ASSERT(vlan != NULL);
5251 
5252 	/* clear down any previously defined ipqs */
5253 	vlan->gldv_ipq = NULL;
5254 	vlan->gldv_ipv6q = NULL;
5255 
5256 	/* Try to find a single stream eligible to receive IP packets */
5257 	for (gld = vlan->gldv_str_next;
5258 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5259 		if (gld->gld_state != DL_IDLE)
5260 			continue;	/* not eligible to receive */
5261 		if (gld->gld_flags & GLD_STR_CLOSING)
5262 			continue;	/* not eligible to receive */
5263 
5264 		if (gld->gld_sap == ETHERTYPE_IP) {
5265 			ip_gld = gld;
5266 			ipq_candidates++;
5267 		}
5268 
5269 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5270 			ipv6_gld = gld;
5271 			ipv6q_candidates++;
5272 		}
5273 	}
5274 
5275 	if (ipq_candidates == 1) {
5276 		ASSERT(ip_gld != NULL);
5277 
5278 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5279 			vlan->gldv_ipq = ip_gld->gld_qptr;
5280 	}
5281 
5282 	if (ipv6q_candidates == 1) {
5283 		ASSERT(ipv6_gld != NULL);
5284 
5285 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5286 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5287 	}
5288 }
5289 
5290 /*
5291  * gld_flushqueue (q)
5292  *	used by DLPI primitives that require flushing the queues.
5293  *	essentially, this is DL_UNBIND_REQ.
5294  */
5295 static void
gld_flushqueue(queue_t * q)5296 gld_flushqueue(queue_t *q)
5297 {
5298 	/* flush all data in both queues */
5299 	/* XXX Should these be FLUSHALL? */
5300 	flushq(q, FLUSHDATA);
5301 	flushq(WR(q), FLUSHDATA);
5302 	/* flush all the queues upstream */
5303 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5304 }
5305 
5306 /*
5307  * gld_devlookup (major)
5308  * search the device table for the device with specified
5309  * major number and return a pointer to it if it exists
5310  */
5311 static glddev_t *
gld_devlookup(int major)5312 gld_devlookup(int major)
5313 {
5314 	struct glddevice *dev;
5315 
5316 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5317 
5318 	for (dev = gld_device_list.gld_next;
5319 	    dev != &gld_device_list;
5320 	    dev = dev->gld_next) {
5321 		ASSERT(dev);
5322 		if (dev->gld_major == major)
5323 			return (dev);
5324 	}
5325 	return (NULL);
5326 }
5327 
5328 /*
5329  * gld_findminor(device)
5330  * Returns a minor number currently unused by any stream in the current
5331  * device class (major) list.
5332  */
5333 static int
gld_findminor(glddev_t * device)5334 gld_findminor(glddev_t *device)
5335 {
5336 	gld_t		*next;
5337 	gld_mac_info_t	*nextmac;
5338 	gld_vlan_t	*nextvlan;
5339 	int		minor;
5340 	int		i;
5341 
5342 	ASSERT(mutex_owned(&device->gld_devlock));
5343 
5344 	/* The fast way */
5345 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5346 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5347 		return (device->gld_nextminor++);
5348 
5349 	/* The steady way */
5350 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5351 	    minor++) {
5352 		/* Search all unattached streams */
5353 		for (next = device->gld_str_next;
5354 		    next != (gld_t *)&device->gld_str_next;
5355 		    next = next->gld_next) {
5356 			if (minor == next->gld_minor)
5357 				goto nextminor;
5358 		}
5359 		/* Search all attached streams; we don't need maclock because */
5360 		/* mac stream list is protected by devlock as well as maclock */
5361 		for (nextmac = device->gld_mac_next;
5362 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5363 		    nextmac = nextmac->gldm_next) {
5364 			gld_mac_pvt_t *pvt =
5365 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5366 
5367 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5368 				continue;	/* this one's not ready yet */
5369 
5370 			for (i = 0; i < VLAN_HASHSZ; i++) {
5371 				for (nextvlan = pvt->vlan_hash[i];
5372 				    nextvlan != NULL;
5373 				    nextvlan = nextvlan->gldv_next) {
5374 					for (next = nextvlan->gldv_str_next;
5375 					    next !=
5376 					    (gld_t *)&nextvlan->gldv_str_next;
5377 					    next = next->gld_next) {
5378 						if (minor == next->gld_minor)
5379 							goto nextminor;
5380 					}
5381 				}
5382 			}
5383 		}
5384 
5385 		return (minor);
5386 nextminor:
5387 		/* don't need to do anything */
5388 		;
5389 	}
5390 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5391 	    device->gld_name);
5392 	return (0);
5393 }
5394 
5395 /*
5396  * version of insque/remque for use by this driver
5397  */
5398 struct qelem {
5399 	struct qelem *q_forw;
5400 	struct qelem *q_back;
5401 	/* rest of structure */
5402 };
5403 
5404 static void
gldinsque(void * elem,void * pred)5405 gldinsque(void *elem, void *pred)
5406 {
5407 	struct qelem *pelem = elem;
5408 	struct qelem *ppred = pred;
5409 	struct qelem *pnext = ppred->q_forw;
5410 
5411 	pelem->q_forw = pnext;
5412 	pelem->q_back = ppred;
5413 	ppred->q_forw = pelem;
5414 	pnext->q_back = pelem;
5415 }
5416 
5417 static void
gldremque(void * arg)5418 gldremque(void *arg)
5419 {
5420 	struct qelem *pelem = arg;
5421 	struct qelem *elem = arg;
5422 
5423 	pelem->q_forw->q_back = pelem->q_back;
5424 	pelem->q_back->q_forw = pelem->q_forw;
5425 	elem->q_back = elem->q_forw = NULL;
5426 }
5427 
5428 static gld_vlan_t *
gld_add_vlan(gld_mac_info_t * macinfo,uint32_t vid)5429 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5430 {
5431 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5432 	gld_vlan_t	**pp;
5433 	gld_vlan_t	*p;
5434 
5435 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5436 	while ((p = *pp) != NULL) {
5437 		ASSERT(p->gldv_id != vid);
5438 		pp = &(p->gldv_next);
5439 	}
5440 
5441 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5442 		return (NULL);
5443 
5444 	p->gldv_mac = macinfo;
5445 	p->gldv_id = vid;
5446 
5447 	if (vid == VLAN_VID_NONE) {
5448 		p->gldv_ptag = VLAN_VTAG_NONE;
5449 		p->gldv_stats = mac_pvt->statistics;
5450 		p->gldv_kstatp = NULL;
5451 	} else {
5452 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5453 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5454 		    KM_SLEEP);
5455 
5456 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5457 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5458 			kmem_free(p, sizeof (gld_vlan_t));
5459 			return (NULL);
5460 		}
5461 	}
5462 
5463 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5464 	mac_pvt->nvlan++;
5465 	*pp = p;
5466 
5467 	return (p);
5468 }
5469 
5470 static void
gld_rem_vlan(gld_vlan_t * vlan)5471 gld_rem_vlan(gld_vlan_t *vlan)
5472 {
5473 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5474 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5475 	gld_vlan_t	**pp;
5476 	gld_vlan_t	*p;
5477 
5478 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5479 	while ((p = *pp) != NULL) {
5480 		if (p->gldv_id == vlan->gldv_id)
5481 			break;
5482 		pp = &(p->gldv_next);
5483 	}
5484 	ASSERT(p != NULL);
5485 
5486 	*pp = p->gldv_next;
5487 	mac_pvt->nvlan--;
5488 	if (p->gldv_id != VLAN_VID_NONE) {
5489 		ASSERT(p->gldv_kstatp != NULL);
5490 		kstat_delete(p->gldv_kstatp);
5491 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5492 	}
5493 	kmem_free(p, sizeof (gld_vlan_t));
5494 }
5495 
5496 gld_vlan_t *
gld_find_vlan(gld_mac_info_t * macinfo,uint32_t vid)5497 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5498 {
5499 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5500 	gld_vlan_t	*p;
5501 
5502 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5503 	while (p != NULL) {
5504 		if (p->gldv_id == vid)
5505 			return (p);
5506 		p = p->gldv_next;
5507 	}
5508 	return (NULL);
5509 }
5510 
5511 gld_vlan_t *
gld_get_vlan(gld_mac_info_t * macinfo,uint32_t vid)5512 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5513 {
5514 	gld_vlan_t	*vlan;
5515 
5516 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5517 		vlan = gld_add_vlan(macinfo, vid);
5518 
5519 	return (vlan);
5520 }
5521 
5522 /*
5523  * gld_bitrevcopy()
5524  * This is essentially bcopy, with the ability to bit reverse the
5525  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5526  * interfaces are bit reversed.
5527  */
5528 void
gld_bitrevcopy(caddr_t src,caddr_t target,size_t n)5529 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5530 {
5531 	while (n--)
5532 		*target++ = bit_rev[(uchar_t)*src++];
5533 }
5534 
5535 /*
5536  * gld_bitreverse()
5537  * Convert the bit order by swaping all the bits, using a
5538  * lookup table.
5539  */
5540 void
gld_bitreverse(uchar_t * rptr,size_t n)5541 gld_bitreverse(uchar_t *rptr, size_t n)
5542 {
5543 	while (n--) {
5544 		*rptr = bit_rev[*rptr];
5545 		rptr++;
5546 	}
5547 }
5548 
5549 char *
gld_macaddr_sprintf(char * etherbuf,unsigned char * ap,int len)5550 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5551 {
5552 	int i;
5553 	char *cp = etherbuf;
5554 	static char digits[] = "0123456789abcdef";
5555 
5556 	for (i = 0; i < len; i++) {
5557 		*cp++ = digits[*ap >> 4];
5558 		*cp++ = digits[*ap++ & 0xf];
5559 		*cp++ = ':';
5560 	}
5561 	*--cp = 0;
5562 	return (etherbuf);
5563 }
5564 
5565 #ifdef GLD_DEBUG
5566 static void
gld_check_assertions()5567 gld_check_assertions()
5568 {
5569 	glddev_t	*dev;
5570 	gld_mac_info_t	*mac;
5571 	gld_t		*str;
5572 	gld_vlan_t	*vlan;
5573 	int		i;
5574 
5575 	mutex_enter(&gld_device_list.gld_devlock);
5576 
5577 	for (dev = gld_device_list.gld_next;
5578 	    dev != (glddev_t *)&gld_device_list.gld_next;
5579 	    dev = dev->gld_next) {
5580 		mutex_enter(&dev->gld_devlock);
5581 		ASSERT(dev->gld_broadcast != NULL);
5582 		for (str = dev->gld_str_next;
5583 		    str != (gld_t *)&dev->gld_str_next;
5584 		    str = str->gld_next) {
5585 			ASSERT(str->gld_device == dev);
5586 			ASSERT(str->gld_mac_info == NULL);
5587 			ASSERT(str->gld_qptr != NULL);
5588 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5589 			ASSERT(str->gld_multicnt == 0);
5590 			ASSERT(str->gld_mcast == NULL);
5591 			ASSERT(!(str->gld_flags &
5592 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5593 			ASSERT(str->gld_sap == 0);
5594 			ASSERT(str->gld_state == DL_UNATTACHED);
5595 		}
5596 		for (mac = dev->gld_mac_next;
5597 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5598 		    mac = mac->gldm_next) {
5599 			int nvlan = 0;
5600 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5601 
5602 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5603 				continue;	/* this one's not ready yet */
5604 
5605 			GLDM_LOCK(mac, RW_WRITER);
5606 			ASSERT(mac->gldm_devinfo != NULL);
5607 			ASSERT(mac->gldm_mac_pvt != NULL);
5608 			ASSERT(pvt->interfacep != NULL);
5609 			ASSERT(pvt->kstatp != NULL);
5610 			ASSERT(pvt->statistics != NULL);
5611 			ASSERT(pvt->major_dev == dev);
5612 
5613 			for (i = 0; i < VLAN_HASHSZ; i++) {
5614 				for (vlan = pvt->vlan_hash[i];
5615 				    vlan != NULL; vlan = vlan->gldv_next) {
5616 					int nstr = 0;
5617 
5618 					ASSERT(vlan->gldv_mac == mac);
5619 
5620 					for (str = vlan->gldv_str_next;
5621 					    str !=
5622 					    (gld_t *)&vlan->gldv_str_next;
5623 					    str = str->gld_next) {
5624 						ASSERT(str->gld_device == dev);
5625 						ASSERT(str->gld_mac_info ==
5626 						    mac);
5627 						ASSERT(str->gld_qptr != NULL);
5628 						ASSERT(str->gld_minor >=
5629 						    GLD_MIN_CLONE_MINOR);
5630 						ASSERT(
5631 						    str->gld_multicnt == 0 ||
5632 						    str->gld_mcast);
5633 						nstr++;
5634 					}
5635 					ASSERT(vlan->gldv_nstreams == nstr);
5636 					nvlan++;
5637 				}
5638 			}
5639 			ASSERT(pvt->nvlan == nvlan);
5640 			GLDM_UNLOCK(mac);
5641 		}
5642 		mutex_exit(&dev->gld_devlock);
5643 	}
5644 	mutex_exit(&gld_device_list.gld_devlock);
5645 }
5646 #endif
5647