1*b494511aSVenki Rajagopalan /*
2*b494511aSVenki Rajagopalan * CDDL HEADER START
3*b494511aSVenki Rajagopalan *
4*b494511aSVenki Rajagopalan * The contents of this file are subject to the terms of the
5*b494511aSVenki Rajagopalan * Common Development and Distribution License (the "License").
6*b494511aSVenki Rajagopalan * You may not use this file except in compliance with the License.
7*b494511aSVenki Rajagopalan *
8*b494511aSVenki Rajagopalan * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9*b494511aSVenki Rajagopalan * or http://www.opensolaris.org/os/licensing.
10*b494511aSVenki Rajagopalan * See the License for the specific language governing permissions
11*b494511aSVenki Rajagopalan * and limitations under the License.
12*b494511aSVenki Rajagopalan *
13*b494511aSVenki Rajagopalan * When distributing Covered Code, include this CDDL HEADER in each
14*b494511aSVenki Rajagopalan * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15*b494511aSVenki Rajagopalan * If applicable, add the following below this CDDL HEADER, with the
16*b494511aSVenki Rajagopalan * fields enclosed by brackets "[]" replaced with your own identifying
17*b494511aSVenki Rajagopalan * information: Portions Copyright [yyyy] [name of copyright owner]
18*b494511aSVenki Rajagopalan *
19*b494511aSVenki Rajagopalan * CDDL HEADER END
20*b494511aSVenki Rajagopalan */
21*b494511aSVenki Rajagopalan
22*b494511aSVenki Rajagopalan /*
23*b494511aSVenki Rajagopalan * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24*b494511aSVenki Rajagopalan */
25*b494511aSVenki Rajagopalan
26*b494511aSVenki Rajagopalan #include <sys/types.h>
27*b494511aSVenki Rajagopalan #include <sys/kmem.h>
28*b494511aSVenki Rajagopalan #include <sys/conf.h>
29*b494511aSVenki Rajagopalan #include <sys/ddi.h>
30*b494511aSVenki Rajagopalan #include <sys/sunddi.h>
31*b494511aSVenki Rajagopalan #include <sys/ksynch.h>
32*b494511aSVenki Rajagopalan #include <sys/callb.h>
33*b494511aSVenki Rajagopalan #include <sys/mac_provider.h>
34*b494511aSVenki Rajagopalan
35*b494511aSVenki Rajagopalan #include <sys/ib/clients/eoib/eib_impl.h>
36*b494511aSVenki Rajagopalan
37*b494511aSVenki Rajagopalan /*
38*b494511aSVenki Rajagopalan * Thread to handle EoIB events asynchronously
39*b494511aSVenki Rajagopalan */
40*b494511aSVenki Rajagopalan void
eib_events_handler(eib_t * ss)41*b494511aSVenki Rajagopalan eib_events_handler(eib_t *ss)
42*b494511aSVenki Rajagopalan {
43*b494511aSVenki Rajagopalan eib_event_t *evi;
44*b494511aSVenki Rajagopalan eib_event_t *nxt;
45*b494511aSVenki Rajagopalan kmutex_t ci_lock;
46*b494511aSVenki Rajagopalan callb_cpr_t ci;
47*b494511aSVenki Rajagopalan
48*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
49*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_EVENTS_HDLR);
50*b494511aSVenki Rajagopalan
51*b494511aSVenki Rajagopalan wait_for_event:
52*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_ev_lock);
53*b494511aSVenki Rajagopalan while ((evi = ss->ei_event) == NULL) {
54*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
55*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
56*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
57*b494511aSVenki Rajagopalan
58*b494511aSVenki Rajagopalan cv_wait(&ss->ei_ev_cv, &ss->ei_ev_lock);
59*b494511aSVenki Rajagopalan
60*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
61*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
62*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
63*b494511aSVenki Rajagopalan }
64*b494511aSVenki Rajagopalan
65*b494511aSVenki Rajagopalan /*
66*b494511aSVenki Rajagopalan * Are we being asked to die ?
67*b494511aSVenki Rajagopalan */
68*b494511aSVenki Rajagopalan if (evi->ev_code == EIB_EV_SHUTDOWN) {
69*b494511aSVenki Rajagopalan while (evi) {
70*b494511aSVenki Rajagopalan nxt = evi->ev_next;
71*b494511aSVenki Rajagopalan kmem_free(evi, sizeof (eib_event_t));
72*b494511aSVenki Rajagopalan evi = nxt;
73*b494511aSVenki Rajagopalan }
74*b494511aSVenki Rajagopalan ss->ei_event = NULL;
75*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_ev_lock);
76*b494511aSVenki Rajagopalan
77*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
78*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
79*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
80*b494511aSVenki Rajagopalan
81*b494511aSVenki Rajagopalan return;
82*b494511aSVenki Rajagopalan }
83*b494511aSVenki Rajagopalan
84*b494511aSVenki Rajagopalan /*
85*b494511aSVenki Rajagopalan * Otherwise, pull out the first entry from our work queue
86*b494511aSVenki Rajagopalan */
87*b494511aSVenki Rajagopalan ss->ei_event = evi->ev_next;
88*b494511aSVenki Rajagopalan evi->ev_next = NULL;
89*b494511aSVenki Rajagopalan
90*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_ev_lock);
91*b494511aSVenki Rajagopalan
92*b494511aSVenki Rajagopalan /*
93*b494511aSVenki Rajagopalan * Process this event
94*b494511aSVenki Rajagopalan *
95*b494511aSVenki Rajagopalan * Note that we don't want to race with plumb/unplumb in this
96*b494511aSVenki Rajagopalan * handler, since we may have to restart vnics or do stuff that
97*b494511aSVenki Rajagopalan * may get re-initialized or released if we allowed plumb/unplumb
98*b494511aSVenki Rajagopalan * to happen in parallel.
99*b494511aSVenki Rajagopalan */
100*b494511aSVenki Rajagopalan eib_mac_set_nic_state(ss, EIB_NIC_RESTARTING);
101*b494511aSVenki Rajagopalan
102*b494511aSVenki Rajagopalan switch (evi->ev_code) {
103*b494511aSVenki Rajagopalan case EIB_EV_PORT_DOWN:
104*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
105*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_PORT_DOWN");
106*b494511aSVenki Rajagopalan
107*b494511aSVenki Rajagopalan eib_mac_link_down(ss, B_FALSE);
108*b494511aSVenki Rajagopalan
109*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
110*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_PORT_DOWN");
111*b494511aSVenki Rajagopalan break;
112*b494511aSVenki Rajagopalan
113*b494511aSVenki Rajagopalan case EIB_EV_PORT_UP:
114*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
115*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_PORT_UP");
116*b494511aSVenki Rajagopalan
117*b494511aSVenki Rajagopalan eib_ibt_link_mod(ss);
118*b494511aSVenki Rajagopalan
119*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
120*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_PORT_UP");
121*b494511aSVenki Rajagopalan break;
122*b494511aSVenki Rajagopalan
123*b494511aSVenki Rajagopalan case EIB_EV_PKEY_CHANGE:
124*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
125*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_PKEY_CHANGE");
126*b494511aSVenki Rajagopalan
127*b494511aSVenki Rajagopalan eib_ibt_link_mod(ss);
128*b494511aSVenki Rajagopalan
129*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
130*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_PKEY_CHANGE");
131*b494511aSVenki Rajagopalan break;
132*b494511aSVenki Rajagopalan
133*b494511aSVenki Rajagopalan case EIB_EV_SGID_CHANGE:
134*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
135*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_SGID_CHANGE");
136*b494511aSVenki Rajagopalan
137*b494511aSVenki Rajagopalan eib_ibt_link_mod(ss);
138*b494511aSVenki Rajagopalan
139*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
140*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_SGID_CHANGE");
141*b494511aSVenki Rajagopalan break;
142*b494511aSVenki Rajagopalan
143*b494511aSVenki Rajagopalan case EIB_EV_CLNT_REREG:
144*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
145*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_CLNT_REREG");
146*b494511aSVenki Rajagopalan
147*b494511aSVenki Rajagopalan eib_ibt_link_mod(ss);
148*b494511aSVenki Rajagopalan
149*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
150*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_CLNT_REREG");
151*b494511aSVenki Rajagopalan break;
152*b494511aSVenki Rajagopalan
153*b494511aSVenki Rajagopalan case EIB_EV_GW_UP:
154*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
155*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_GW_UP");
156*b494511aSVenki Rajagopalan
157*b494511aSVenki Rajagopalan /*
158*b494511aSVenki Rajagopalan * EoIB nexus has notified us that our gateway is now
159*b494511aSVenki Rajagopalan * reachable. Unless we already think it is reachable,
160*b494511aSVenki Rajagopalan * mark it so in our records and try to resurrect dead
161*b494511aSVenki Rajagopalan * vnics.
162*b494511aSVenki Rajagopalan */
163*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_vnic_lock);
164*b494511aSVenki Rajagopalan if (ss->ei_gw_unreachable == B_FALSE) {
165*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
166*b494511aSVenki Rajagopalan "eib_events_handler: gw reachable");
167*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_lock);
168*b494511aSVenki Rajagopalan
169*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
170*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_GW_UP");
171*b494511aSVenki Rajagopalan break;
172*b494511aSVenki Rajagopalan }
173*b494511aSVenki Rajagopalan ss->ei_gw_unreachable = B_FALSE;
174*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_lock);
175*b494511aSVenki Rajagopalan
176*b494511aSVenki Rajagopalan /*
177*b494511aSVenki Rajagopalan * If we've not even started yet, we have nothing to do.
178*b494511aSVenki Rajagopalan */
179*b494511aSVenki Rajagopalan if ((ss->ei_node_state->ns_nic_state & EIB_NIC_STARTED) == 0) {
180*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
181*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_GW_UP");
182*b494511aSVenki Rajagopalan break;
183*b494511aSVenki Rajagopalan }
184*b494511aSVenki Rajagopalan
185*b494511aSVenki Rajagopalan if (eib_mac_hca_portstate(ss, NULL, NULL) != EIB_E_SUCCESS) {
186*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
187*b494511aSVenki Rajagopalan "eib_events_handler: "
188*b494511aSVenki Rajagopalan "HCA portstate failed, marking link down");
189*b494511aSVenki Rajagopalan
190*b494511aSVenki Rajagopalan eib_mac_link_down(ss, B_FALSE);
191*b494511aSVenki Rajagopalan } else {
192*b494511aSVenki Rajagopalan uint8_t vn0_mac[ETHERADDRL];
193*b494511aSVenki Rajagopalan
194*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
195*b494511aSVenki Rajagopalan "eib_events_handler: "
196*b494511aSVenki Rajagopalan "HCA portstate ok, resurrecting zombies");
197*b494511aSVenki Rajagopalan
198*b494511aSVenki Rajagopalan bcopy(eib_zero_mac, vn0_mac, ETHERADDRL);
199*b494511aSVenki Rajagopalan eib_vnic_resurrect_zombies(ss, vn0_mac);
200*b494511aSVenki Rajagopalan
201*b494511aSVenki Rajagopalan /*
202*b494511aSVenki Rajagopalan * If we've resurrected the zombies because the gateway
203*b494511aSVenki Rajagopalan * went down and came back, it is possible our unicast
204*b494511aSVenki Rajagopalan * mac address changed from what it was earlier. If
205*b494511aSVenki Rajagopalan * so, we need to update our unicast address with the
206*b494511aSVenki Rajagopalan * mac layer before marking the link up.
207*b494511aSVenki Rajagopalan */
208*b494511aSVenki Rajagopalan if (bcmp(vn0_mac, eib_zero_mac, ETHERADDRL) != 0) {
209*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
210*b494511aSVenki Rajagopalan "eib_events_handler: updating unicast "
211*b494511aSVenki Rajagopalan "addr to %x:%x:%x:%x:%x:%x", vn0_mac[0],
212*b494511aSVenki Rajagopalan vn0_mac[1], vn0_mac[2], vn0_mac[3],
213*b494511aSVenki Rajagopalan vn0_mac[4], vn0_mac[5]);
214*b494511aSVenki Rajagopalan
215*b494511aSVenki Rajagopalan mac_unicst_update(ss->ei_mac_hdl, vn0_mac);
216*b494511aSVenki Rajagopalan }
217*b494511aSVenki Rajagopalan
218*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
219*b494511aSVenki Rajagopalan "eib_events_handler: eib_mac_link_up(B_FALSE)");
220*b494511aSVenki Rajagopalan
221*b494511aSVenki Rajagopalan eib_mac_link_up(ss, B_FALSE);
222*b494511aSVenki Rajagopalan }
223*b494511aSVenki Rajagopalan
224*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
225*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_GW_UP");
226*b494511aSVenki Rajagopalan break;
227*b494511aSVenki Rajagopalan
228*b494511aSVenki Rajagopalan case EIB_EV_GW_INFO_UPDATE:
229*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
230*b494511aSVenki Rajagopalan "eib_events_handler: Begin EIB_EV_GW_INFO_UPDATE");
231*b494511aSVenki Rajagopalan
232*b494511aSVenki Rajagopalan if (evi->ev_arg) {
233*b494511aSVenki Rajagopalan eib_update_props(ss, (eib_gw_info_t *)(evi->ev_arg));
234*b494511aSVenki Rajagopalan kmem_free(evi->ev_arg, sizeof (eib_gw_info_t));
235*b494511aSVenki Rajagopalan }
236*b494511aSVenki Rajagopalan
237*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
238*b494511aSVenki Rajagopalan "eib_events_handler: End EIB_EV_GW_INFO_UPDATE");
239*b494511aSVenki Rajagopalan break;
240*b494511aSVenki Rajagopalan
241*b494511aSVenki Rajagopalan case EIB_EV_MCG_DELETED:
242*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
243*b494511aSVenki Rajagopalan "eib_events_handler: Begin-End EIB_EV_MCG_DELETED");
244*b494511aSVenki Rajagopalan break;
245*b494511aSVenki Rajagopalan
246*b494511aSVenki Rajagopalan case EIB_EV_MCG_CREATED:
247*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
248*b494511aSVenki Rajagopalan "eib_events_handler: Begin-End EIB_EV_MCG_CREATED");
249*b494511aSVenki Rajagopalan break;
250*b494511aSVenki Rajagopalan
251*b494511aSVenki Rajagopalan case EIB_EV_GW_EPORT_DOWN:
252*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
253*b494511aSVenki Rajagopalan "eib_events_handler: Begin-End EIB_EV_GW_EPORT_DOWN");
254*b494511aSVenki Rajagopalan break;
255*b494511aSVenki Rajagopalan
256*b494511aSVenki Rajagopalan case EIB_EV_GW_DOWN:
257*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
258*b494511aSVenki Rajagopalan "eib_events_handler: Begin-End EIB_EV_GW_DOWN");
259*b494511aSVenki Rajagopalan break;
260*b494511aSVenki Rajagopalan }
261*b494511aSVenki Rajagopalan
262*b494511aSVenki Rajagopalan eib_mac_clr_nic_state(ss, EIB_NIC_RESTARTING);
263*b494511aSVenki Rajagopalan
264*b494511aSVenki Rajagopalan kmem_free(evi, sizeof (eib_event_t));
265*b494511aSVenki Rajagopalan goto wait_for_event;
266*b494511aSVenki Rajagopalan
267*b494511aSVenki Rajagopalan /*NOTREACHED*/
268*b494511aSVenki Rajagopalan }
269*b494511aSVenki Rajagopalan
270*b494511aSVenki Rajagopalan void
eib_svc_enqueue_event(eib_t * ss,eib_event_t * evi)271*b494511aSVenki Rajagopalan eib_svc_enqueue_event(eib_t *ss, eib_event_t *evi)
272*b494511aSVenki Rajagopalan {
273*b494511aSVenki Rajagopalan eib_event_t *elem = NULL;
274*b494511aSVenki Rajagopalan eib_event_t *tail = NULL;
275*b494511aSVenki Rajagopalan
276*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_ev_lock);
277*b494511aSVenki Rajagopalan
278*b494511aSVenki Rajagopalan /*
279*b494511aSVenki Rajagopalan * Notice to shutdown has a higher priority than the
280*b494511aSVenki Rajagopalan * rest and goes to the head of the list. Everything
281*b494511aSVenki Rajagopalan * else goes at the end.
282*b494511aSVenki Rajagopalan */
283*b494511aSVenki Rajagopalan if (evi->ev_code == EIB_EV_SHUTDOWN) {
284*b494511aSVenki Rajagopalan evi->ev_next = ss->ei_event;
285*b494511aSVenki Rajagopalan ss->ei_event = evi;
286*b494511aSVenki Rajagopalan } else {
287*b494511aSVenki Rajagopalan for (elem = ss->ei_event; elem; elem = elem->ev_next)
288*b494511aSVenki Rajagopalan tail = elem;
289*b494511aSVenki Rajagopalan
290*b494511aSVenki Rajagopalan if (tail)
291*b494511aSVenki Rajagopalan tail->ev_next = evi;
292*b494511aSVenki Rajagopalan else
293*b494511aSVenki Rajagopalan ss->ei_event = evi;
294*b494511aSVenki Rajagopalan }
295*b494511aSVenki Rajagopalan
296*b494511aSVenki Rajagopalan cv_signal(&ss->ei_ev_cv);
297*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_ev_lock);
298*b494511aSVenki Rajagopalan }
299*b494511aSVenki Rajagopalan
300*b494511aSVenki Rajagopalan /*
301*b494511aSVenki Rajagopalan * Thread to refill channels with rwqes whenever they get low.
302*b494511aSVenki Rajagopalan */
303*b494511aSVenki Rajagopalan void
eib_refill_rwqes(eib_t * ss)304*b494511aSVenki Rajagopalan eib_refill_rwqes(eib_t *ss)
305*b494511aSVenki Rajagopalan {
306*b494511aSVenki Rajagopalan eib_chan_t *chan;
307*b494511aSVenki Rajagopalan kmutex_t ci_lock;
308*b494511aSVenki Rajagopalan callb_cpr_t ci;
309*b494511aSVenki Rajagopalan
310*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
311*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_RWQES_REFILLER);
312*b494511aSVenki Rajagopalan
313*b494511aSVenki Rajagopalan wait_for_refill_work:
314*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_rxpost_lock);
315*b494511aSVenki Rajagopalan
316*b494511aSVenki Rajagopalan while ((ss->ei_rxpost == NULL) && (ss->ei_rxpost_die == 0)) {
317*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
318*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
319*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
320*b494511aSVenki Rajagopalan
321*b494511aSVenki Rajagopalan cv_wait(&ss->ei_rxpost_cv, &ss->ei_rxpost_lock);
322*b494511aSVenki Rajagopalan
323*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
324*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
325*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
326*b494511aSVenki Rajagopalan }
327*b494511aSVenki Rajagopalan
328*b494511aSVenki Rajagopalan /*
329*b494511aSVenki Rajagopalan * Discard all requests for refill if we're being asked to die
330*b494511aSVenki Rajagopalan */
331*b494511aSVenki Rajagopalan if (ss->ei_rxpost_die) {
332*b494511aSVenki Rajagopalan ss->ei_rxpost = NULL;
333*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_rxpost_lock);
334*b494511aSVenki Rajagopalan
335*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
336*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
337*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
338*b494511aSVenki Rajagopalan
339*b494511aSVenki Rajagopalan return;
340*b494511aSVenki Rajagopalan }
341*b494511aSVenki Rajagopalan ASSERT(ss->ei_rxpost != NULL);
342*b494511aSVenki Rajagopalan
343*b494511aSVenki Rajagopalan /*
344*b494511aSVenki Rajagopalan * Take the first element out of the queue
345*b494511aSVenki Rajagopalan */
346*b494511aSVenki Rajagopalan chan = ss->ei_rxpost;
347*b494511aSVenki Rajagopalan ss->ei_rxpost = chan->ch_rxpost_next;
348*b494511aSVenki Rajagopalan chan->ch_rxpost_next = NULL;
349*b494511aSVenki Rajagopalan
350*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_rxpost_lock);
351*b494511aSVenki Rajagopalan
352*b494511aSVenki Rajagopalan /*
353*b494511aSVenki Rajagopalan * Try to post a bunch of recv wqes into this channel. If we
354*b494511aSVenki Rajagopalan * fail, it means that we haven't even been able to post a
355*b494511aSVenki Rajagopalan * single recv wqe. This is alarming, but there's nothing
356*b494511aSVenki Rajagopalan * we can do. We just move on to the next channel needing
357*b494511aSVenki Rajagopalan * our service.
358*b494511aSVenki Rajagopalan */
359*b494511aSVenki Rajagopalan if (eib_chan_post_rx(ss, chan, NULL) != EIB_E_SUCCESS) {
360*b494511aSVenki Rajagopalan EIB_DPRINTF_ERR(ss->ei_instance,
361*b494511aSVenki Rajagopalan "eib_refill_rwqes: eib_chan_post_rx() failed");
362*b494511aSVenki Rajagopalan }
363*b494511aSVenki Rajagopalan
364*b494511aSVenki Rajagopalan /*
365*b494511aSVenki Rajagopalan * Mark it to indicate that the refilling is done
366*b494511aSVenki Rajagopalan */
367*b494511aSVenki Rajagopalan mutex_enter(&chan->ch_rx_lock);
368*b494511aSVenki Rajagopalan chan->ch_rx_refilling = B_FALSE;
369*b494511aSVenki Rajagopalan mutex_exit(&chan->ch_rx_lock);
370*b494511aSVenki Rajagopalan
371*b494511aSVenki Rajagopalan goto wait_for_refill_work;
372*b494511aSVenki Rajagopalan
373*b494511aSVenki Rajagopalan /*NOTREACHED*/
374*b494511aSVenki Rajagopalan }
375*b494511aSVenki Rajagopalan
376*b494511aSVenki Rajagopalan /*
377*b494511aSVenki Rajagopalan * Thread to create or restart vnics when required
378*b494511aSVenki Rajagopalan */
379*b494511aSVenki Rajagopalan void
eib_vnic_creator(eib_t * ss)380*b494511aSVenki Rajagopalan eib_vnic_creator(eib_t *ss)
381*b494511aSVenki Rajagopalan {
382*b494511aSVenki Rajagopalan eib_vnic_req_t *vrq;
383*b494511aSVenki Rajagopalan eib_vnic_req_t *elem;
384*b494511aSVenki Rajagopalan eib_vnic_req_t *nxt;
385*b494511aSVenki Rajagopalan kmutex_t ci_lock;
386*b494511aSVenki Rajagopalan callb_cpr_t ci;
387*b494511aSVenki Rajagopalan uint_t vr_req;
388*b494511aSVenki Rajagopalan uint8_t *vr_mac;
389*b494511aSVenki Rajagopalan int ret;
390*b494511aSVenki Rajagopalan int err;
391*b494511aSVenki Rajagopalan
392*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
393*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_VNIC_CREATOR);
394*b494511aSVenki Rajagopalan
395*b494511aSVenki Rajagopalan wait_for_vnic_req:
396*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_vnic_req_lock);
397*b494511aSVenki Rajagopalan
398*b494511aSVenki Rajagopalan while ((vrq = ss->ei_vnic_req) == NULL) {
399*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
400*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
401*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
402*b494511aSVenki Rajagopalan
403*b494511aSVenki Rajagopalan cv_wait(&ss->ei_vnic_req_cv, &ss->ei_vnic_req_lock);
404*b494511aSVenki Rajagopalan
405*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
406*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
407*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
408*b494511aSVenki Rajagopalan }
409*b494511aSVenki Rajagopalan
410*b494511aSVenki Rajagopalan /*
411*b494511aSVenki Rajagopalan * Pull out the first request
412*b494511aSVenki Rajagopalan */
413*b494511aSVenki Rajagopalan ss->ei_vnic_req = vrq->vr_next;
414*b494511aSVenki Rajagopalan vrq->vr_next = NULL;
415*b494511aSVenki Rajagopalan
416*b494511aSVenki Rajagopalan vr_req = vrq->vr_req;
417*b494511aSVenki Rajagopalan vr_mac = vrq->vr_mac;
418*b494511aSVenki Rajagopalan
419*b494511aSVenki Rajagopalan switch (vr_req) {
420*b494511aSVenki Rajagopalan case EIB_CR_REQ_DIE:
421*b494511aSVenki Rajagopalan case EIB_CR_REQ_FLUSH:
422*b494511aSVenki Rajagopalan /*
423*b494511aSVenki Rajagopalan * Cleanup all pending reqs and failed reqs
424*b494511aSVenki Rajagopalan */
425*b494511aSVenki Rajagopalan for (elem = ss->ei_vnic_req; elem; elem = nxt) {
426*b494511aSVenki Rajagopalan nxt = elem->vr_next;
427*b494511aSVenki Rajagopalan kmem_free(elem, sizeof (eib_vnic_req_t));
428*b494511aSVenki Rajagopalan }
429*b494511aSVenki Rajagopalan for (elem = ss->ei_failed_vnic_req; elem; elem = nxt) {
430*b494511aSVenki Rajagopalan nxt = elem->vr_next;
431*b494511aSVenki Rajagopalan kmem_free(elem, sizeof (eib_vnic_req_t));
432*b494511aSVenki Rajagopalan }
433*b494511aSVenki Rajagopalan ss->ei_vnic_req = NULL;
434*b494511aSVenki Rajagopalan ss->ei_failed_vnic_req = NULL;
435*b494511aSVenki Rajagopalan ss->ei_pending_vnic_req = NULL;
436*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_req_lock);
437*b494511aSVenki Rajagopalan
438*b494511aSVenki Rajagopalan break;
439*b494511aSVenki Rajagopalan
440*b494511aSVenki Rajagopalan case EIB_CR_REQ_NEW_VNIC:
441*b494511aSVenki Rajagopalan ss->ei_pending_vnic_req = vrq;
442*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_req_lock);
443*b494511aSVenki Rajagopalan
444*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance, "eib_vnic_creator: "
445*b494511aSVenki Rajagopalan "new vnic creation request for %x:%x:%x:%x:%x:%x, 0x%x",
446*b494511aSVenki Rajagopalan vr_mac[0], vr_mac[1], vr_mac[2], vr_mac[3], vr_mac[4],
447*b494511aSVenki Rajagopalan vr_mac[5], vrq->vr_vlan);
448*b494511aSVenki Rajagopalan
449*b494511aSVenki Rajagopalan /*
450*b494511aSVenki Rajagopalan * Make sure we don't race with the plumb/unplumb code. If
451*b494511aSVenki Rajagopalan * the eoib instance has been unplumbed already, we ignore any
452*b494511aSVenki Rajagopalan * creation requests that may have been pending.
453*b494511aSVenki Rajagopalan */
454*b494511aSVenki Rajagopalan eib_mac_set_nic_state(ss, EIB_NIC_STARTING);
455*b494511aSVenki Rajagopalan
456*b494511aSVenki Rajagopalan if ((ss->ei_node_state->ns_nic_state & EIB_NIC_STARTED) !=
457*b494511aSVenki Rajagopalan EIB_NIC_STARTED) {
458*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_vnic_req_lock);
459*b494511aSVenki Rajagopalan ss->ei_pending_vnic_req = NULL;
460*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_req_lock);
461*b494511aSVenki Rajagopalan eib_mac_clr_nic_state(ss, EIB_NIC_STARTING);
462*b494511aSVenki Rajagopalan break;
463*b494511aSVenki Rajagopalan }
464*b494511aSVenki Rajagopalan
465*b494511aSVenki Rajagopalan /*
466*b494511aSVenki Rajagopalan * Try to create a new vnic with the supplied parameters.
467*b494511aSVenki Rajagopalan */
468*b494511aSVenki Rajagopalan err = 0;
469*b494511aSVenki Rajagopalan if ((ret = eib_vnic_create(ss, vrq->vr_mac, vrq->vr_vlan,
470*b494511aSVenki Rajagopalan NULL, &err)) != EIB_E_SUCCESS) {
471*b494511aSVenki Rajagopalan EIB_DPRINTF_WARN(ss->ei_instance, "eib_vnic_creator: "
472*b494511aSVenki Rajagopalan "eib_vnic_create(mac=%x:%x:%x:%x:%x:%x, vlan=0x%x) "
473*b494511aSVenki Rajagopalan "failed, ret=%d", vr_mac[0], vr_mac[1], vr_mac[2],
474*b494511aSVenki Rajagopalan vr_mac[3], vr_mac[4], vr_mac[5], vrq->vr_vlan, err);
475*b494511aSVenki Rajagopalan }
476*b494511aSVenki Rajagopalan
477*b494511aSVenki Rajagopalan /*
478*b494511aSVenki Rajagopalan * If we failed, add this vnic req to our failed list (unless
479*b494511aSVenki Rajagopalan * it already exists there), so we won't try to create this
480*b494511aSVenki Rajagopalan * vnic again. Whether we fail or succeed, we're done with
481*b494511aSVenki Rajagopalan * processing this req, so clear the pending req.
482*b494511aSVenki Rajagopalan */
483*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_vnic_req_lock);
484*b494511aSVenki Rajagopalan if ((ret != EIB_E_SUCCESS) && (err != EEXIST)) {
485*b494511aSVenki Rajagopalan vrq->vr_next = ss->ei_failed_vnic_req;
486*b494511aSVenki Rajagopalan ss->ei_failed_vnic_req = vrq;
487*b494511aSVenki Rajagopalan vrq = NULL;
488*b494511aSVenki Rajagopalan }
489*b494511aSVenki Rajagopalan ss->ei_pending_vnic_req = NULL;
490*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_req_lock);
491*b494511aSVenki Rajagopalan
492*b494511aSVenki Rajagopalan /*
493*b494511aSVenki Rajagopalan * Notify the mac layer that it should retry its tx again. If we
494*b494511aSVenki Rajagopalan * had created the vnic successfully, we'll be able to send the
495*b494511aSVenki Rajagopalan * packets; if we had not been successful, we'll drop packets on
496*b494511aSVenki Rajagopalan * this vnic.
497*b494511aSVenki Rajagopalan */
498*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance,
499*b494511aSVenki Rajagopalan "eib_vnic_creator: calling mac_tx_update()");
500*b494511aSVenki Rajagopalan mac_tx_update(ss->ei_mac_hdl);
501*b494511aSVenki Rajagopalan
502*b494511aSVenki Rajagopalan eib_mac_clr_nic_state(ss, EIB_NIC_STARTING);
503*b494511aSVenki Rajagopalan break;
504*b494511aSVenki Rajagopalan
505*b494511aSVenki Rajagopalan default:
506*b494511aSVenki Rajagopalan EIB_DPRINTF_DEBUG(ss->ei_instance, "eib_vnic_creator: "
507*b494511aSVenki Rajagopalan "unknown request 0x%lx, ignoring", vrq->vr_req);
508*b494511aSVenki Rajagopalan break;
509*b494511aSVenki Rajagopalan }
510*b494511aSVenki Rajagopalan
511*b494511aSVenki Rajagopalan /*
512*b494511aSVenki Rajagopalan * Free the current req and quit if we have to
513*b494511aSVenki Rajagopalan */
514*b494511aSVenki Rajagopalan if (vrq) {
515*b494511aSVenki Rajagopalan kmem_free(vrq, sizeof (eib_vnic_req_t));
516*b494511aSVenki Rajagopalan }
517*b494511aSVenki Rajagopalan
518*b494511aSVenki Rajagopalan if (vr_req == EIB_CR_REQ_DIE) {
519*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
520*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
521*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
522*b494511aSVenki Rajagopalan
523*b494511aSVenki Rajagopalan return;
524*b494511aSVenki Rajagopalan }
525*b494511aSVenki Rajagopalan
526*b494511aSVenki Rajagopalan goto wait_for_vnic_req;
527*b494511aSVenki Rajagopalan /*NOTREACHED*/
528*b494511aSVenki Rajagopalan }
529*b494511aSVenki Rajagopalan
530*b494511aSVenki Rajagopalan /*
531*b494511aSVenki Rajagopalan * Thread to monitor tx wqes and update the mac layer when needed.
532*b494511aSVenki Rajagopalan * Note that this thread can only be started after the tx wqe pool
533*b494511aSVenki Rajagopalan * has been allocated and initialized.
534*b494511aSVenki Rajagopalan */
535*b494511aSVenki Rajagopalan void
eib_monitor_tx_wqes(eib_t * ss)536*b494511aSVenki Rajagopalan eib_monitor_tx_wqes(eib_t *ss)
537*b494511aSVenki Rajagopalan {
538*b494511aSVenki Rajagopalan eib_wqe_pool_t *wp = ss->ei_tx;
539*b494511aSVenki Rajagopalan kmutex_t ci_lock;
540*b494511aSVenki Rajagopalan callb_cpr_t ci;
541*b494511aSVenki Rajagopalan
542*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
543*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_TXWQES_MONITOR);
544*b494511aSVenki Rajagopalan
545*b494511aSVenki Rajagopalan ASSERT(wp != NULL);
546*b494511aSVenki Rajagopalan
547*b494511aSVenki Rajagopalan monitor_wqe_status:
548*b494511aSVenki Rajagopalan mutex_enter(&wp->wp_lock);
549*b494511aSVenki Rajagopalan
550*b494511aSVenki Rajagopalan /*
551*b494511aSVenki Rajagopalan * Wait till someone falls short of wqes
552*b494511aSVenki Rajagopalan */
553*b494511aSVenki Rajagopalan while (wp->wp_status == 0) {
554*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
555*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
556*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
557*b494511aSVenki Rajagopalan
558*b494511aSVenki Rajagopalan cv_wait(&wp->wp_cv, &wp->wp_lock);
559*b494511aSVenki Rajagopalan
560*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
561*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
562*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
563*b494511aSVenki Rajagopalan }
564*b494511aSVenki Rajagopalan
565*b494511aSVenki Rajagopalan /*
566*b494511aSVenki Rajagopalan * Have we been asked to die ?
567*b494511aSVenki Rajagopalan */
568*b494511aSVenki Rajagopalan if (wp->wp_status & EIB_TXWQE_MONITOR_DIE) {
569*b494511aSVenki Rajagopalan mutex_exit(&wp->wp_lock);
570*b494511aSVenki Rajagopalan
571*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
572*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
573*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
574*b494511aSVenki Rajagopalan
575*b494511aSVenki Rajagopalan return;
576*b494511aSVenki Rajagopalan }
577*b494511aSVenki Rajagopalan
578*b494511aSVenki Rajagopalan ASSERT((wp->wp_status & EIB_TXWQE_SHORT) != 0);
579*b494511aSVenki Rajagopalan
580*b494511aSVenki Rajagopalan /*
581*b494511aSVenki Rajagopalan * Start monitoring free wqes till they cross min threshold
582*b494511aSVenki Rajagopalan */
583*b494511aSVenki Rajagopalan while ((wp->wp_nfree < EIB_NFREE_SWQES_HWM) &&
584*b494511aSVenki Rajagopalan ((wp->wp_status & EIB_TXWQE_MONITOR_DIE) == 0)) {
585*b494511aSVenki Rajagopalan
586*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
587*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
588*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
589*b494511aSVenki Rajagopalan
590*b494511aSVenki Rajagopalan cv_wait(&wp->wp_cv, &wp->wp_lock);
591*b494511aSVenki Rajagopalan
592*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
593*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
594*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
595*b494511aSVenki Rajagopalan }
596*b494511aSVenki Rajagopalan
597*b494511aSVenki Rajagopalan /*
598*b494511aSVenki Rajagopalan * Have we been asked to die ?
599*b494511aSVenki Rajagopalan */
600*b494511aSVenki Rajagopalan if (wp->wp_status & EIB_TXWQE_MONITOR_DIE) {
601*b494511aSVenki Rajagopalan mutex_exit(&wp->wp_lock);
602*b494511aSVenki Rajagopalan
603*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
604*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
605*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
606*b494511aSVenki Rajagopalan
607*b494511aSVenki Rajagopalan return;
608*b494511aSVenki Rajagopalan }
609*b494511aSVenki Rajagopalan
610*b494511aSVenki Rajagopalan ASSERT(wp->wp_nfree >= EIB_NFREE_SWQES_HWM);
611*b494511aSVenki Rajagopalan wp->wp_status &= (~EIB_TXWQE_SHORT);
612*b494511aSVenki Rajagopalan
613*b494511aSVenki Rajagopalan mutex_exit(&wp->wp_lock);
614*b494511aSVenki Rajagopalan
615*b494511aSVenki Rajagopalan /*
616*b494511aSVenki Rajagopalan * Inform the mac layer that tx resources are now available
617*b494511aSVenki Rajagopalan * and go back to monitoring
618*b494511aSVenki Rajagopalan */
619*b494511aSVenki Rajagopalan if (ss->ei_mac_hdl) {
620*b494511aSVenki Rajagopalan mac_tx_update(ss->ei_mac_hdl);
621*b494511aSVenki Rajagopalan }
622*b494511aSVenki Rajagopalan goto monitor_wqe_status;
623*b494511aSVenki Rajagopalan
624*b494511aSVenki Rajagopalan /*NOTREACHED*/
625*b494511aSVenki Rajagopalan }
626*b494511aSVenki Rajagopalan
627*b494511aSVenki Rajagopalan /*
628*b494511aSVenki Rajagopalan * Thread to monitor lso bufs and update the mac layer as needed.
629*b494511aSVenki Rajagopalan * Note that this thread can only be started after the lso buckets
630*b494511aSVenki Rajagopalan * have been allocated and initialized.
631*b494511aSVenki Rajagopalan */
632*b494511aSVenki Rajagopalan void
eib_monitor_lso_bufs(eib_t * ss)633*b494511aSVenki Rajagopalan eib_monitor_lso_bufs(eib_t *ss)
634*b494511aSVenki Rajagopalan {
635*b494511aSVenki Rajagopalan eib_lsobkt_t *bkt = ss->ei_lso;
636*b494511aSVenki Rajagopalan kmutex_t ci_lock;
637*b494511aSVenki Rajagopalan callb_cpr_t ci;
638*b494511aSVenki Rajagopalan
639*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
640*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_LSOBUFS_MONITOR);
641*b494511aSVenki Rajagopalan
642*b494511aSVenki Rajagopalan ASSERT(bkt != NULL);
643*b494511aSVenki Rajagopalan
644*b494511aSVenki Rajagopalan monitor_lso_status:
645*b494511aSVenki Rajagopalan mutex_enter(&bkt->bk_lock);
646*b494511aSVenki Rajagopalan
647*b494511aSVenki Rajagopalan /*
648*b494511aSVenki Rajagopalan * Wait till someone falls short of LSO buffers or we're asked
649*b494511aSVenki Rajagopalan * to die
650*b494511aSVenki Rajagopalan */
651*b494511aSVenki Rajagopalan while (bkt->bk_status == 0) {
652*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
653*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
654*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
655*b494511aSVenki Rajagopalan
656*b494511aSVenki Rajagopalan cv_wait(&bkt->bk_cv, &bkt->bk_lock);
657*b494511aSVenki Rajagopalan
658*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
659*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
660*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
661*b494511aSVenki Rajagopalan }
662*b494511aSVenki Rajagopalan
663*b494511aSVenki Rajagopalan if (bkt->bk_status & EIB_LBUF_MONITOR_DIE) {
664*b494511aSVenki Rajagopalan mutex_exit(&bkt->bk_lock);
665*b494511aSVenki Rajagopalan
666*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
667*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
668*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
669*b494511aSVenki Rajagopalan
670*b494511aSVenki Rajagopalan return;
671*b494511aSVenki Rajagopalan }
672*b494511aSVenki Rajagopalan
673*b494511aSVenki Rajagopalan ASSERT((bkt->bk_status & EIB_LBUF_SHORT) != 0);
674*b494511aSVenki Rajagopalan
675*b494511aSVenki Rajagopalan /*
676*b494511aSVenki Rajagopalan * Start monitoring free LSO buffers till there are enough
677*b494511aSVenki Rajagopalan * free buffers available
678*b494511aSVenki Rajagopalan */
679*b494511aSVenki Rajagopalan while ((bkt->bk_nfree < EIB_LSO_FREE_BUFS_THRESH) &&
680*b494511aSVenki Rajagopalan ((bkt->bk_status & EIB_LBUF_MONITOR_DIE) == 0)) {
681*b494511aSVenki Rajagopalan
682*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
683*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
684*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
685*b494511aSVenki Rajagopalan
686*b494511aSVenki Rajagopalan cv_wait(&bkt->bk_cv, &bkt->bk_lock);
687*b494511aSVenki Rajagopalan
688*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
689*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
690*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
691*b494511aSVenki Rajagopalan }
692*b494511aSVenki Rajagopalan
693*b494511aSVenki Rajagopalan if (bkt->bk_status & EIB_LBUF_MONITOR_DIE) {
694*b494511aSVenki Rajagopalan mutex_exit(&bkt->bk_lock);
695*b494511aSVenki Rajagopalan
696*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
697*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
698*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
699*b494511aSVenki Rajagopalan
700*b494511aSVenki Rajagopalan return;
701*b494511aSVenki Rajagopalan }
702*b494511aSVenki Rajagopalan
703*b494511aSVenki Rajagopalan /*
704*b494511aSVenki Rajagopalan * We have enough lso buffers available now
705*b494511aSVenki Rajagopalan */
706*b494511aSVenki Rajagopalan ASSERT(bkt->bk_nfree >= EIB_LSO_FREE_BUFS_THRESH);
707*b494511aSVenki Rajagopalan bkt->bk_status &= (~EIB_LBUF_SHORT);
708*b494511aSVenki Rajagopalan
709*b494511aSVenki Rajagopalan mutex_exit(&bkt->bk_lock);
710*b494511aSVenki Rajagopalan
711*b494511aSVenki Rajagopalan /*
712*b494511aSVenki Rajagopalan * Inform the mac layer that tx lso resources are now available
713*b494511aSVenki Rajagopalan * and go back to monitoring
714*b494511aSVenki Rajagopalan */
715*b494511aSVenki Rajagopalan if (ss->ei_mac_hdl) {
716*b494511aSVenki Rajagopalan mac_tx_update(ss->ei_mac_hdl);
717*b494511aSVenki Rajagopalan }
718*b494511aSVenki Rajagopalan goto monitor_lso_status;
719*b494511aSVenki Rajagopalan
720*b494511aSVenki Rajagopalan /*NOTREACHED*/
721*b494511aSVenki Rajagopalan }
722*b494511aSVenki Rajagopalan
723*b494511aSVenki Rajagopalan /*
724*b494511aSVenki Rajagopalan * Thread to manage the keepalive requirements for vnics and the gateway.
725*b494511aSVenki Rajagopalan */
726*b494511aSVenki Rajagopalan void
eib_manage_keepalives(eib_t * ss)727*b494511aSVenki Rajagopalan eib_manage_keepalives(eib_t *ss)
728*b494511aSVenki Rajagopalan {
729*b494511aSVenki Rajagopalan eib_ka_vnics_t *elem;
730*b494511aSVenki Rajagopalan eib_ka_vnics_t *nxt;
731*b494511aSVenki Rajagopalan clock_t deadline;
732*b494511aSVenki Rajagopalan int64_t lbolt64;
733*b494511aSVenki Rajagopalan int err;
734*b494511aSVenki Rajagopalan kmutex_t ci_lock;
735*b494511aSVenki Rajagopalan callb_cpr_t ci;
736*b494511aSVenki Rajagopalan
737*b494511aSVenki Rajagopalan mutex_init(&ci_lock, NULL, MUTEX_DRIVER, NULL);
738*b494511aSVenki Rajagopalan CALLB_CPR_INIT(&ci, &ci_lock, callb_generic_cpr, EIB_EVENTS_HDLR);
739*b494511aSVenki Rajagopalan
740*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_ka_vnics_lock);
741*b494511aSVenki Rajagopalan
742*b494511aSVenki Rajagopalan periodic_keepalive:
743*b494511aSVenki Rajagopalan deadline = ddi_get_lbolt() + ss->ei_gw_props->pp_vnic_ka_ticks;
744*b494511aSVenki Rajagopalan
745*b494511aSVenki Rajagopalan while ((ss->ei_ka_vnics_event &
746*b494511aSVenki Rajagopalan (EIB_KA_VNICS_DIE | EIB_KA_VNICS_TIMED_OUT)) == 0) {
747*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
748*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_BEGIN(&ci);
749*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
750*b494511aSVenki Rajagopalan
751*b494511aSVenki Rajagopalan if (cv_timedwait(&ss->ei_ka_vnics_cv, &ss->ei_ka_vnics_lock,
752*b494511aSVenki Rajagopalan deadline) == -1) {
753*b494511aSVenki Rajagopalan ss->ei_ka_vnics_event |= EIB_KA_VNICS_TIMED_OUT;
754*b494511aSVenki Rajagopalan }
755*b494511aSVenki Rajagopalan
756*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
757*b494511aSVenki Rajagopalan CALLB_CPR_SAFE_END(&ci, &ci_lock);
758*b494511aSVenki Rajagopalan mutex_exit(&ci_lock);
759*b494511aSVenki Rajagopalan }
760*b494511aSVenki Rajagopalan
761*b494511aSVenki Rajagopalan if (ss->ei_ka_vnics_event & EIB_KA_VNICS_DIE) {
762*b494511aSVenki Rajagopalan for (elem = ss->ei_ka_vnics; elem; elem = nxt) {
763*b494511aSVenki Rajagopalan nxt = elem->ka_next;
764*b494511aSVenki Rajagopalan kmem_free(elem, sizeof (eib_ka_vnics_t));
765*b494511aSVenki Rajagopalan }
766*b494511aSVenki Rajagopalan ss->ei_ka_vnics = NULL;
767*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_ka_vnics_lock);
768*b494511aSVenki Rajagopalan
769*b494511aSVenki Rajagopalan mutex_enter(&ci_lock);
770*b494511aSVenki Rajagopalan CALLB_CPR_EXIT(&ci);
771*b494511aSVenki Rajagopalan mutex_destroy(&ci_lock);
772*b494511aSVenki Rajagopalan
773*b494511aSVenki Rajagopalan return;
774*b494511aSVenki Rajagopalan }
775*b494511aSVenki Rajagopalan
776*b494511aSVenki Rajagopalan /*
777*b494511aSVenki Rajagopalan * Are there any vnics that need keepalive management ?
778*b494511aSVenki Rajagopalan */
779*b494511aSVenki Rajagopalan ss->ei_ka_vnics_event &= ~EIB_KA_VNICS_TIMED_OUT;
780*b494511aSVenki Rajagopalan if (ss->ei_ka_vnics == NULL)
781*b494511aSVenki Rajagopalan goto periodic_keepalive;
782*b494511aSVenki Rajagopalan
783*b494511aSVenki Rajagopalan /*
784*b494511aSVenki Rajagopalan * Ok, we need to send vnic keepalives to our gateway. But first
785*b494511aSVenki Rajagopalan * check if the gateway heartbeat is good as of this moment. Note
786*b494511aSVenki Rajagopalan * that we need do get the lbolt value after acquiring ei_vnic_lock
787*b494511aSVenki Rajagopalan * to ensure that ei_gw_last_heartbeat does not change before the
788*b494511aSVenki Rajagopalan * comparison (to avoid a negative value in the comparison result
789*b494511aSVenki Rajagopalan * causing us to incorrectly assume that the gateway heartbeat has
790*b494511aSVenki Rajagopalan * stopped).
791*b494511aSVenki Rajagopalan */
792*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_vnic_lock);
793*b494511aSVenki Rajagopalan
794*b494511aSVenki Rajagopalan lbolt64 = ddi_get_lbolt64();
795*b494511aSVenki Rajagopalan
796*b494511aSVenki Rajagopalan if (ss->ei_gw_last_heartbeat != 0) {
797*b494511aSVenki Rajagopalan if ((lbolt64 - ss->ei_gw_last_heartbeat) >
798*b494511aSVenki Rajagopalan ss->ei_gw_props->pp_gw_ka_ticks) {
799*b494511aSVenki Rajagopalan
800*b494511aSVenki Rajagopalan EIB_DPRINTF_WARN(ss->ei_instance,
801*b494511aSVenki Rajagopalan "eib_manage_keepalives: no keepalives from gateway "
802*b494511aSVenki Rajagopalan "0x%x for hca_guid=0x%llx, port=0x%x, "
803*b494511aSVenki Rajagopalan "last_gw_ka=0x%llx", ss->ei_gw_props->pp_gw_portid,
804*b494511aSVenki Rajagopalan ss->ei_props->ep_hca_guid,
805*b494511aSVenki Rajagopalan ss->ei_props->ep_port_num,
806*b494511aSVenki Rajagopalan ss->ei_gw_last_heartbeat);
807*b494511aSVenki Rajagopalan
808*b494511aSVenki Rajagopalan for (elem = ss->ei_ka_vnics; elem; elem = nxt) {
809*b494511aSVenki Rajagopalan nxt = elem->ka_next;
810*b494511aSVenki Rajagopalan ss->ei_zombie_vnics |=
811*b494511aSVenki Rajagopalan ((uint64_t)1 << elem->ka_vnic->vn_instance);
812*b494511aSVenki Rajagopalan kmem_free(elem, sizeof (eib_ka_vnics_t));
813*b494511aSVenki Rajagopalan }
814*b494511aSVenki Rajagopalan ss->ei_ka_vnics = NULL;
815*b494511aSVenki Rajagopalan ss->ei_gw_unreachable = B_TRUE;
816*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_lock);
817*b494511aSVenki Rajagopalan
818*b494511aSVenki Rajagopalan eib_mac_link_down(ss, B_FALSE);
819*b494511aSVenki Rajagopalan
820*b494511aSVenki Rajagopalan goto periodic_keepalive;
821*b494511aSVenki Rajagopalan }
822*b494511aSVenki Rajagopalan }
823*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_vnic_lock);
824*b494511aSVenki Rajagopalan
825*b494511aSVenki Rajagopalan for (elem = ss->ei_ka_vnics; elem; elem = elem->ka_next)
826*b494511aSVenki Rajagopalan (void) eib_fip_heartbeat(ss, elem->ka_vnic, &err);
827*b494511aSVenki Rajagopalan
828*b494511aSVenki Rajagopalan goto periodic_keepalive;
829*b494511aSVenki Rajagopalan /*NOTREACHED*/
830*b494511aSVenki Rajagopalan }
831*b494511aSVenki Rajagopalan
832*b494511aSVenki Rajagopalan void
eib_stop_events_handler(eib_t * ss)833*b494511aSVenki Rajagopalan eib_stop_events_handler(eib_t *ss)
834*b494511aSVenki Rajagopalan {
835*b494511aSVenki Rajagopalan eib_event_t *evi;
836*b494511aSVenki Rajagopalan
837*b494511aSVenki Rajagopalan evi = kmem_zalloc(sizeof (eib_event_t), KM_SLEEP);
838*b494511aSVenki Rajagopalan evi->ev_code = EIB_EV_SHUTDOWN;
839*b494511aSVenki Rajagopalan evi->ev_arg = NULL;
840*b494511aSVenki Rajagopalan
841*b494511aSVenki Rajagopalan eib_svc_enqueue_event(ss, evi);
842*b494511aSVenki Rajagopalan
843*b494511aSVenki Rajagopalan thread_join(ss->ei_events_handler);
844*b494511aSVenki Rajagopalan }
845*b494511aSVenki Rajagopalan
846*b494511aSVenki Rajagopalan void
eib_stop_refill_rwqes(eib_t * ss)847*b494511aSVenki Rajagopalan eib_stop_refill_rwqes(eib_t *ss)
848*b494511aSVenki Rajagopalan {
849*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_rxpost_lock);
850*b494511aSVenki Rajagopalan
851*b494511aSVenki Rajagopalan ss->ei_rxpost_die = 1;
852*b494511aSVenki Rajagopalan
853*b494511aSVenki Rajagopalan cv_signal(&ss->ei_rxpost_cv);
854*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_rxpost_lock);
855*b494511aSVenki Rajagopalan
856*b494511aSVenki Rajagopalan thread_join(ss->ei_rwqes_refiller);
857*b494511aSVenki Rajagopalan }
858*b494511aSVenki Rajagopalan
859*b494511aSVenki Rajagopalan void
eib_stop_vnic_creator(eib_t * ss)860*b494511aSVenki Rajagopalan eib_stop_vnic_creator(eib_t *ss)
861*b494511aSVenki Rajagopalan {
862*b494511aSVenki Rajagopalan eib_vnic_req_t *vrq;
863*b494511aSVenki Rajagopalan
864*b494511aSVenki Rajagopalan vrq = kmem_zalloc(sizeof (eib_vnic_req_t), KM_SLEEP);
865*b494511aSVenki Rajagopalan vrq->vr_req = EIB_CR_REQ_DIE;
866*b494511aSVenki Rajagopalan vrq->vr_next = NULL;
867*b494511aSVenki Rajagopalan
868*b494511aSVenki Rajagopalan eib_vnic_enqueue_req(ss, vrq);
869*b494511aSVenki Rajagopalan
870*b494511aSVenki Rajagopalan thread_join(ss->ei_vnic_creator);
871*b494511aSVenki Rajagopalan }
872*b494511aSVenki Rajagopalan
873*b494511aSVenki Rajagopalan void
eib_stop_monitor_tx_wqes(eib_t * ss)874*b494511aSVenki Rajagopalan eib_stop_monitor_tx_wqes(eib_t *ss)
875*b494511aSVenki Rajagopalan {
876*b494511aSVenki Rajagopalan eib_wqe_pool_t *wp = ss->ei_tx;
877*b494511aSVenki Rajagopalan
878*b494511aSVenki Rajagopalan mutex_enter(&wp->wp_lock);
879*b494511aSVenki Rajagopalan
880*b494511aSVenki Rajagopalan wp->wp_status |= EIB_TXWQE_MONITOR_DIE;
881*b494511aSVenki Rajagopalan
882*b494511aSVenki Rajagopalan cv_signal(&wp->wp_cv);
883*b494511aSVenki Rajagopalan mutex_exit(&wp->wp_lock);
884*b494511aSVenki Rajagopalan
885*b494511aSVenki Rajagopalan thread_join(ss->ei_txwqe_monitor);
886*b494511aSVenki Rajagopalan }
887*b494511aSVenki Rajagopalan
888*b494511aSVenki Rajagopalan int
eib_stop_monitor_lso_bufs(eib_t * ss,boolean_t force)889*b494511aSVenki Rajagopalan eib_stop_monitor_lso_bufs(eib_t *ss, boolean_t force)
890*b494511aSVenki Rajagopalan {
891*b494511aSVenki Rajagopalan eib_lsobkt_t *bkt = ss->ei_lso;
892*b494511aSVenki Rajagopalan
893*b494511aSVenki Rajagopalan mutex_enter(&bkt->bk_lock);
894*b494511aSVenki Rajagopalan
895*b494511aSVenki Rajagopalan /*
896*b494511aSVenki Rajagopalan * If there are some buffers still not reaped and the force
897*b494511aSVenki Rajagopalan * flag is not set, return without doing anything. Otherwise,
898*b494511aSVenki Rajagopalan * stop the lso bufs monitor and wait for it to die.
899*b494511aSVenki Rajagopalan */
900*b494511aSVenki Rajagopalan if ((bkt->bk_nelem != bkt->bk_nfree) && (force == B_FALSE)) {
901*b494511aSVenki Rajagopalan mutex_exit(&bkt->bk_lock);
902*b494511aSVenki Rajagopalan return (EIB_E_FAILURE);
903*b494511aSVenki Rajagopalan }
904*b494511aSVenki Rajagopalan
905*b494511aSVenki Rajagopalan bkt->bk_status |= EIB_LBUF_MONITOR_DIE;
906*b494511aSVenki Rajagopalan
907*b494511aSVenki Rajagopalan cv_signal(&bkt->bk_cv);
908*b494511aSVenki Rajagopalan mutex_exit(&bkt->bk_lock);
909*b494511aSVenki Rajagopalan
910*b494511aSVenki Rajagopalan thread_join(ss->ei_lsobufs_monitor);
911*b494511aSVenki Rajagopalan return (EIB_E_SUCCESS);
912*b494511aSVenki Rajagopalan }
913*b494511aSVenki Rajagopalan
914*b494511aSVenki Rajagopalan void
eib_stop_manage_keepalives(eib_t * ss)915*b494511aSVenki Rajagopalan eib_stop_manage_keepalives(eib_t *ss)
916*b494511aSVenki Rajagopalan {
917*b494511aSVenki Rajagopalan mutex_enter(&ss->ei_ka_vnics_lock);
918*b494511aSVenki Rajagopalan
919*b494511aSVenki Rajagopalan ss->ei_ka_vnics_event |= EIB_KA_VNICS_DIE;
920*b494511aSVenki Rajagopalan
921*b494511aSVenki Rajagopalan cv_signal(&ss->ei_ka_vnics_cv);
922*b494511aSVenki Rajagopalan mutex_exit(&ss->ei_ka_vnics_lock);
923*b494511aSVenki Rajagopalan
924*b494511aSVenki Rajagopalan thread_join(ss->ei_keepalives_manager);
925*b494511aSVenki Rajagopalan }
926*b494511aSVenki Rajagopalan
927*b494511aSVenki Rajagopalan void
eib_flush_vnic_reqs(eib_t * ss)928*b494511aSVenki Rajagopalan eib_flush_vnic_reqs(eib_t *ss)
929*b494511aSVenki Rajagopalan {
930*b494511aSVenki Rajagopalan eib_vnic_req_t *vrq;
931*b494511aSVenki Rajagopalan
932*b494511aSVenki Rajagopalan vrq = kmem_zalloc(sizeof (eib_vnic_req_t), KM_SLEEP);
933*b494511aSVenki Rajagopalan vrq->vr_req = EIB_CR_REQ_FLUSH;
934*b494511aSVenki Rajagopalan vrq->vr_next = NULL;
935*b494511aSVenki Rajagopalan
936*b494511aSVenki Rajagopalan eib_vnic_enqueue_req(ss, vrq);
937*b494511aSVenki Rajagopalan }
938*b494511aSVenki Rajagopalan
939*b494511aSVenki Rajagopalan /*ARGSUSED*/
940*b494511aSVenki Rajagopalan void
eib_gw_alive_cb(dev_info_t * dip,ddi_eventcookie_t cookie,void * arg,void * impl_data)941*b494511aSVenki Rajagopalan eib_gw_alive_cb(dev_info_t *dip, ddi_eventcookie_t cookie, void *arg,
942*b494511aSVenki Rajagopalan void *impl_data)
943*b494511aSVenki Rajagopalan {
944*b494511aSVenki Rajagopalan eib_t *ss = (eib_t *)arg;
945*b494511aSVenki Rajagopalan eib_event_t *evi;
946*b494511aSVenki Rajagopalan
947*b494511aSVenki Rajagopalan evi = kmem_zalloc(sizeof (eib_event_t), KM_NOSLEEP);
948*b494511aSVenki Rajagopalan if (evi == NULL) {
949*b494511aSVenki Rajagopalan EIB_DPRINTF_WARN(ss->ei_instance, "eib_gw_alive_cb: "
950*b494511aSVenki Rajagopalan "no memory, ignoring this gateway alive event");
951*b494511aSVenki Rajagopalan } else {
952*b494511aSVenki Rajagopalan evi->ev_code = EIB_EV_GW_UP;
953*b494511aSVenki Rajagopalan evi->ev_arg = NULL;
954*b494511aSVenki Rajagopalan eib_svc_enqueue_event(ss, evi);
955*b494511aSVenki Rajagopalan }
956*b494511aSVenki Rajagopalan }
957*b494511aSVenki Rajagopalan
958*b494511aSVenki Rajagopalan /*ARGSUSED*/
959*b494511aSVenki Rajagopalan void
eib_login_ack_cb(dev_info_t * dip,ddi_eventcookie_t cookie,void * arg,void * impl_data)960*b494511aSVenki Rajagopalan eib_login_ack_cb(dev_info_t *dip, ddi_eventcookie_t cookie, void *arg,
961*b494511aSVenki Rajagopalan void *impl_data)
962*b494511aSVenki Rajagopalan {
963*b494511aSVenki Rajagopalan eib_t *ss = (eib_t *)arg;
964*b494511aSVenki Rajagopalan uint8_t *pkt = (uint8_t *)impl_data;
965*b494511aSVenki Rajagopalan eib_login_data_t ld;
966*b494511aSVenki Rajagopalan
967*b494511aSVenki Rajagopalan /*
968*b494511aSVenki Rajagopalan * We have received a login ack message from the gateway via the EoIB
969*b494511aSVenki Rajagopalan * nexus (solicitation qpn). The packet is passed to us raw (unparsed)
970*b494511aSVenki Rajagopalan * and we have to figure out if this is a vnic login ack.
971*b494511aSVenki Rajagopalan */
972*b494511aSVenki Rajagopalan if (eib_fip_parse_login_ack(ss, pkt + EIB_GRH_SZ, &ld) == EIB_E_SUCCESS)
973*b494511aSVenki Rajagopalan eib_vnic_login_ack(ss, &ld);
974*b494511aSVenki Rajagopalan }
975*b494511aSVenki Rajagopalan
976*b494511aSVenki Rajagopalan /*ARGSUSED*/
977*b494511aSVenki Rajagopalan void
eib_gw_info_cb(dev_info_t * dip,ddi_eventcookie_t cookie,void * arg,void * impl_data)978*b494511aSVenki Rajagopalan eib_gw_info_cb(dev_info_t *dip, ddi_eventcookie_t cookie, void *arg,
979*b494511aSVenki Rajagopalan void *impl_data)
980*b494511aSVenki Rajagopalan {
981*b494511aSVenki Rajagopalan eib_t *ss = (eib_t *)arg;
982*b494511aSVenki Rajagopalan eib_event_t *evi;
983*b494511aSVenki Rajagopalan
984*b494511aSVenki Rajagopalan evi = kmem_zalloc(sizeof (eib_event_t), KM_NOSLEEP);
985*b494511aSVenki Rajagopalan if (evi == NULL) {
986*b494511aSVenki Rajagopalan EIB_DPRINTF_WARN(ss->ei_instance, "eib_gw_info_cb: "
987*b494511aSVenki Rajagopalan "no memory, ignoring this gateway props update event");
988*b494511aSVenki Rajagopalan return;
989*b494511aSVenki Rajagopalan }
990*b494511aSVenki Rajagopalan evi->ev_arg = kmem_zalloc(sizeof (eib_gw_info_t), KM_NOSLEEP);
991*b494511aSVenki Rajagopalan if (evi->ev_arg == NULL) {
992*b494511aSVenki Rajagopalan EIB_DPRINTF_WARN(ss->ei_instance, "eib_gw_info_cb: "
993*b494511aSVenki Rajagopalan "no memory, ignoring this gateway props update event");
994*b494511aSVenki Rajagopalan kmem_free(evi, sizeof (eib_event_t));
995*b494511aSVenki Rajagopalan return;
996*b494511aSVenki Rajagopalan }
997*b494511aSVenki Rajagopalan bcopy(impl_data, evi->ev_arg, sizeof (eib_gw_info_t));
998*b494511aSVenki Rajagopalan evi->ev_code = EIB_EV_GW_INFO_UPDATE;
999*b494511aSVenki Rajagopalan
1000*b494511aSVenki Rajagopalan eib_svc_enqueue_event(ss, evi);
1001*b494511aSVenki Rajagopalan }
1002